1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Greybus interface code
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2014 Google Inc.
6*4882a593Smuzhiyun * Copyright 2014 Linaro Ltd.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/greybus.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include "greybus_trace.h"
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #define GB_INTERFACE_MODE_SWITCH_TIMEOUT 2000
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #define GB_INTERFACE_DEVICE_ID_BAD 0xff
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #define GB_INTERFACE_AUTOSUSPEND_MS 3000
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /* Time required for interface to enter standby before disabling REFCLK */
21*4882a593Smuzhiyun #define GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS 20
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /* Don't-care selector index */
24*4882a593Smuzhiyun #define DME_SELECTOR_INDEX_NULL 0
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /* DME attributes */
27*4882a593Smuzhiyun /* FIXME: remove ES2 support and DME_T_TST_SRC_INCREMENT */
28*4882a593Smuzhiyun #define DME_T_TST_SRC_INCREMENT 0x4083
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #define DME_DDBL1_MANUFACTURERID 0x5003
31*4882a593Smuzhiyun #define DME_DDBL1_PRODUCTID 0x5004
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #define DME_TOSHIBA_GMP_VID 0x6000
34*4882a593Smuzhiyun #define DME_TOSHIBA_GMP_PID 0x6001
35*4882a593Smuzhiyun #define DME_TOSHIBA_GMP_SN0 0x6002
36*4882a593Smuzhiyun #define DME_TOSHIBA_GMP_SN1 0x6003
37*4882a593Smuzhiyun #define DME_TOSHIBA_GMP_INIT_STATUS 0x6101
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /* DDBL1 Manufacturer and Product ids */
40*4882a593Smuzhiyun #define TOSHIBA_DMID 0x0126
41*4882a593Smuzhiyun #define TOSHIBA_ES2_BRIDGE_DPID 0x1000
42*4882a593Smuzhiyun #define TOSHIBA_ES3_APBRIDGE_DPID 0x1001
43*4882a593Smuzhiyun #define TOSHIBA_ES3_GBPHY_DPID 0x1002
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun static int gb_interface_hibernate_link(struct gb_interface *intf);
46*4882a593Smuzhiyun static int gb_interface_refclk_set(struct gb_interface *intf, bool enable);
47*4882a593Smuzhiyun
gb_interface_dme_attr_get(struct gb_interface * intf,u16 attr,u32 * val)48*4882a593Smuzhiyun static int gb_interface_dme_attr_get(struct gb_interface *intf,
49*4882a593Smuzhiyun u16 attr, u32 *val)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun return gb_svc_dme_peer_get(intf->hd->svc, intf->interface_id,
52*4882a593Smuzhiyun attr, DME_SELECTOR_INDEX_NULL, val);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
gb_interface_read_ara_dme(struct gb_interface * intf)55*4882a593Smuzhiyun static int gb_interface_read_ara_dme(struct gb_interface *intf)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun u32 sn0, sn1;
58*4882a593Smuzhiyun int ret;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun * Unless this is a Toshiba bridge, bail out until we have defined
62*4882a593Smuzhiyun * standard GMP attributes.
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun if (intf->ddbl1_manufacturer_id != TOSHIBA_DMID) {
65*4882a593Smuzhiyun dev_err(&intf->dev, "unknown manufacturer %08x\n",
66*4882a593Smuzhiyun intf->ddbl1_manufacturer_id);
67*4882a593Smuzhiyun return -ENODEV;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_VID,
71*4882a593Smuzhiyun &intf->vendor_id);
72*4882a593Smuzhiyun if (ret)
73*4882a593Smuzhiyun return ret;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_PID,
76*4882a593Smuzhiyun &intf->product_id);
77*4882a593Smuzhiyun if (ret)
78*4882a593Smuzhiyun return ret;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN0, &sn0);
81*4882a593Smuzhiyun if (ret)
82*4882a593Smuzhiyun return ret;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN1, &sn1);
85*4882a593Smuzhiyun if (ret)
86*4882a593Smuzhiyun return ret;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun intf->serial_number = (u64)sn1 << 32 | sn0;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun return 0;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
gb_interface_read_dme(struct gb_interface * intf)93*4882a593Smuzhiyun static int gb_interface_read_dme(struct gb_interface *intf)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun int ret;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /* DME attributes have already been read */
98*4882a593Smuzhiyun if (intf->dme_read)
99*4882a593Smuzhiyun return 0;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun ret = gb_interface_dme_attr_get(intf, DME_DDBL1_MANUFACTURERID,
102*4882a593Smuzhiyun &intf->ddbl1_manufacturer_id);
103*4882a593Smuzhiyun if (ret)
104*4882a593Smuzhiyun return ret;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun ret = gb_interface_dme_attr_get(intf, DME_DDBL1_PRODUCTID,
107*4882a593Smuzhiyun &intf->ddbl1_product_id);
108*4882a593Smuzhiyun if (ret)
109*4882a593Smuzhiyun return ret;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun if (intf->ddbl1_manufacturer_id == TOSHIBA_DMID &&
112*4882a593Smuzhiyun intf->ddbl1_product_id == TOSHIBA_ES2_BRIDGE_DPID) {
113*4882a593Smuzhiyun intf->quirks |= GB_INTERFACE_QUIRK_NO_GMP_IDS;
114*4882a593Smuzhiyun intf->quirks |= GB_INTERFACE_QUIRK_NO_INIT_STATUS;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun ret = gb_interface_read_ara_dme(intf);
118*4882a593Smuzhiyun if (ret)
119*4882a593Smuzhiyun return ret;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun intf->dme_read = true;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun return 0;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
gb_interface_route_create(struct gb_interface * intf)126*4882a593Smuzhiyun static int gb_interface_route_create(struct gb_interface *intf)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun struct gb_svc *svc = intf->hd->svc;
129*4882a593Smuzhiyun u8 intf_id = intf->interface_id;
130*4882a593Smuzhiyun u8 device_id;
131*4882a593Smuzhiyun int ret;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /* Allocate an interface device id. */
134*4882a593Smuzhiyun ret = ida_simple_get(&svc->device_id_map,
135*4882a593Smuzhiyun GB_SVC_DEVICE_ID_MIN, GB_SVC_DEVICE_ID_MAX + 1,
136*4882a593Smuzhiyun GFP_KERNEL);
137*4882a593Smuzhiyun if (ret < 0) {
138*4882a593Smuzhiyun dev_err(&intf->dev, "failed to allocate device id: %d\n", ret);
139*4882a593Smuzhiyun return ret;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun device_id = ret;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun ret = gb_svc_intf_device_id(svc, intf_id, device_id);
144*4882a593Smuzhiyun if (ret) {
145*4882a593Smuzhiyun dev_err(&intf->dev, "failed to set device id %u: %d\n",
146*4882a593Smuzhiyun device_id, ret);
147*4882a593Smuzhiyun goto err_ida_remove;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /* FIXME: Hard-coded AP device id. */
151*4882a593Smuzhiyun ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_SVC_DEVICE_ID_AP,
152*4882a593Smuzhiyun intf_id, device_id);
153*4882a593Smuzhiyun if (ret) {
154*4882a593Smuzhiyun dev_err(&intf->dev, "failed to create route: %d\n", ret);
155*4882a593Smuzhiyun goto err_svc_id_free;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun intf->device_id = device_id;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun return 0;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun err_svc_id_free:
163*4882a593Smuzhiyun /*
164*4882a593Smuzhiyun * XXX Should we tell SVC that this id doesn't belong to interface
165*4882a593Smuzhiyun * XXX anymore.
166*4882a593Smuzhiyun */
167*4882a593Smuzhiyun err_ida_remove:
168*4882a593Smuzhiyun ida_simple_remove(&svc->device_id_map, device_id);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun return ret;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
gb_interface_route_destroy(struct gb_interface * intf)173*4882a593Smuzhiyun static void gb_interface_route_destroy(struct gb_interface *intf)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun struct gb_svc *svc = intf->hd->svc;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun if (intf->device_id == GB_INTERFACE_DEVICE_ID_BAD)
178*4882a593Smuzhiyun return;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun gb_svc_route_destroy(svc, svc->ap_intf_id, intf->interface_id);
181*4882a593Smuzhiyun ida_simple_remove(&svc->device_id_map, intf->device_id);
182*4882a593Smuzhiyun intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* Locking: Caller holds the interface mutex. */
gb_interface_legacy_mode_switch(struct gb_interface * intf)186*4882a593Smuzhiyun static int gb_interface_legacy_mode_switch(struct gb_interface *intf)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun int ret;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun dev_info(&intf->dev, "legacy mode switch detected\n");
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /* Mark as disconnected to prevent I/O during disable. */
193*4882a593Smuzhiyun intf->disconnected = true;
194*4882a593Smuzhiyun gb_interface_disable(intf);
195*4882a593Smuzhiyun intf->disconnected = false;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun ret = gb_interface_enable(intf);
198*4882a593Smuzhiyun if (ret) {
199*4882a593Smuzhiyun dev_err(&intf->dev, "failed to re-enable interface: %d\n", ret);
200*4882a593Smuzhiyun gb_interface_deactivate(intf);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun return ret;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
gb_interface_mailbox_event(struct gb_interface * intf,u16 result,u32 mailbox)206*4882a593Smuzhiyun void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
207*4882a593Smuzhiyun u32 mailbox)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun mutex_lock(&intf->mutex);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun if (result) {
212*4882a593Smuzhiyun dev_warn(&intf->dev,
213*4882a593Smuzhiyun "mailbox event with UniPro error: 0x%04x\n",
214*4882a593Smuzhiyun result);
215*4882a593Smuzhiyun goto err_disable;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) {
219*4882a593Smuzhiyun dev_warn(&intf->dev,
220*4882a593Smuzhiyun "mailbox event with unexpected value: 0x%08x\n",
221*4882a593Smuzhiyun mailbox);
222*4882a593Smuzhiyun goto err_disable;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun if (intf->quirks & GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH) {
226*4882a593Smuzhiyun gb_interface_legacy_mode_switch(intf);
227*4882a593Smuzhiyun goto out_unlock;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun if (!intf->mode_switch) {
231*4882a593Smuzhiyun dev_warn(&intf->dev, "unexpected mailbox event: 0x%08x\n",
232*4882a593Smuzhiyun mailbox);
233*4882a593Smuzhiyun goto err_disable;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun dev_info(&intf->dev, "mode switch detected\n");
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun complete(&intf->mode_switch_completion);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun out_unlock:
241*4882a593Smuzhiyun mutex_unlock(&intf->mutex);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun return;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun err_disable:
246*4882a593Smuzhiyun gb_interface_disable(intf);
247*4882a593Smuzhiyun gb_interface_deactivate(intf);
248*4882a593Smuzhiyun mutex_unlock(&intf->mutex);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
gb_interface_mode_switch_work(struct work_struct * work)251*4882a593Smuzhiyun static void gb_interface_mode_switch_work(struct work_struct *work)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun struct gb_interface *intf;
254*4882a593Smuzhiyun struct gb_control *control;
255*4882a593Smuzhiyun unsigned long timeout;
256*4882a593Smuzhiyun int ret;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun intf = container_of(work, struct gb_interface, mode_switch_work);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun mutex_lock(&intf->mutex);
261*4882a593Smuzhiyun /* Make sure interface is still enabled. */
262*4882a593Smuzhiyun if (!intf->enabled) {
263*4882a593Smuzhiyun dev_dbg(&intf->dev, "mode switch aborted\n");
264*4882a593Smuzhiyun intf->mode_switch = false;
265*4882a593Smuzhiyun mutex_unlock(&intf->mutex);
266*4882a593Smuzhiyun goto out_interface_put;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /*
270*4882a593Smuzhiyun * Prepare the control device for mode switch and make sure to get an
271*4882a593Smuzhiyun * extra reference before it goes away during interface disable.
272*4882a593Smuzhiyun */
273*4882a593Smuzhiyun control = gb_control_get(intf->control);
274*4882a593Smuzhiyun gb_control_mode_switch_prepare(control);
275*4882a593Smuzhiyun gb_interface_disable(intf);
276*4882a593Smuzhiyun mutex_unlock(&intf->mutex);
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun timeout = msecs_to_jiffies(GB_INTERFACE_MODE_SWITCH_TIMEOUT);
279*4882a593Smuzhiyun ret = wait_for_completion_interruptible_timeout(
280*4882a593Smuzhiyun &intf->mode_switch_completion, timeout);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /* Finalise control-connection mode switch. */
283*4882a593Smuzhiyun gb_control_mode_switch_complete(control);
284*4882a593Smuzhiyun gb_control_put(control);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun if (ret < 0) {
287*4882a593Smuzhiyun dev_err(&intf->dev, "mode switch interrupted\n");
288*4882a593Smuzhiyun goto err_deactivate;
289*4882a593Smuzhiyun } else if (ret == 0) {
290*4882a593Smuzhiyun dev_err(&intf->dev, "mode switch timed out\n");
291*4882a593Smuzhiyun goto err_deactivate;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /* Re-enable (re-enumerate) interface if still active. */
295*4882a593Smuzhiyun mutex_lock(&intf->mutex);
296*4882a593Smuzhiyun intf->mode_switch = false;
297*4882a593Smuzhiyun if (intf->active) {
298*4882a593Smuzhiyun ret = gb_interface_enable(intf);
299*4882a593Smuzhiyun if (ret) {
300*4882a593Smuzhiyun dev_err(&intf->dev, "failed to re-enable interface: %d\n",
301*4882a593Smuzhiyun ret);
302*4882a593Smuzhiyun gb_interface_deactivate(intf);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun mutex_unlock(&intf->mutex);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun out_interface_put:
308*4882a593Smuzhiyun gb_interface_put(intf);
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun return;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun err_deactivate:
313*4882a593Smuzhiyun mutex_lock(&intf->mutex);
314*4882a593Smuzhiyun intf->mode_switch = false;
315*4882a593Smuzhiyun gb_interface_deactivate(intf);
316*4882a593Smuzhiyun mutex_unlock(&intf->mutex);
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun gb_interface_put(intf);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
gb_interface_request_mode_switch(struct gb_interface * intf)321*4882a593Smuzhiyun int gb_interface_request_mode_switch(struct gb_interface *intf)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun int ret = 0;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun mutex_lock(&intf->mutex);
326*4882a593Smuzhiyun if (intf->mode_switch) {
327*4882a593Smuzhiyun ret = -EBUSY;
328*4882a593Smuzhiyun goto out_unlock;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun intf->mode_switch = true;
332*4882a593Smuzhiyun reinit_completion(&intf->mode_switch_completion);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /*
335*4882a593Smuzhiyun * Get a reference to the interface device, which will be put once the
336*4882a593Smuzhiyun * mode switch is complete.
337*4882a593Smuzhiyun */
338*4882a593Smuzhiyun get_device(&intf->dev);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun if (!queue_work(system_long_wq, &intf->mode_switch_work)) {
341*4882a593Smuzhiyun put_device(&intf->dev);
342*4882a593Smuzhiyun ret = -EBUSY;
343*4882a593Smuzhiyun goto out_unlock;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun out_unlock:
347*4882a593Smuzhiyun mutex_unlock(&intf->mutex);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun return ret;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_interface_request_mode_switch);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /*
354*4882a593Smuzhiyun * T_TstSrcIncrement is written by the module on ES2 as a stand-in for the
355*4882a593Smuzhiyun * init-status attribute DME_TOSHIBA_INIT_STATUS. The AP needs to read and
356*4882a593Smuzhiyun * clear it after reading a non-zero value from it.
357*4882a593Smuzhiyun *
358*4882a593Smuzhiyun * FIXME: This is module-hardware dependent and needs to be extended for every
359*4882a593Smuzhiyun * type of module we want to support.
360*4882a593Smuzhiyun */
gb_interface_read_and_clear_init_status(struct gb_interface * intf)361*4882a593Smuzhiyun static int gb_interface_read_and_clear_init_status(struct gb_interface *intf)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun struct gb_host_device *hd = intf->hd;
364*4882a593Smuzhiyun unsigned long bootrom_quirks;
365*4882a593Smuzhiyun unsigned long s2l_quirks;
366*4882a593Smuzhiyun int ret;
367*4882a593Smuzhiyun u32 value;
368*4882a593Smuzhiyun u16 attr;
369*4882a593Smuzhiyun u8 init_status;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /*
372*4882a593Smuzhiyun * ES2 bridges use T_TstSrcIncrement for the init status.
373*4882a593Smuzhiyun *
374*4882a593Smuzhiyun * FIXME: Remove ES2 support
375*4882a593Smuzhiyun */
376*4882a593Smuzhiyun if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
377*4882a593Smuzhiyun attr = DME_T_TST_SRC_INCREMENT;
378*4882a593Smuzhiyun else
379*4882a593Smuzhiyun attr = DME_TOSHIBA_GMP_INIT_STATUS;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr,
382*4882a593Smuzhiyun DME_SELECTOR_INDEX_NULL, &value);
383*4882a593Smuzhiyun if (ret)
384*4882a593Smuzhiyun return ret;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /*
387*4882a593Smuzhiyun * A nonzero init status indicates the module has finished
388*4882a593Smuzhiyun * initializing.
389*4882a593Smuzhiyun */
390*4882a593Smuzhiyun if (!value) {
391*4882a593Smuzhiyun dev_err(&intf->dev, "invalid init status\n");
392*4882a593Smuzhiyun return -ENODEV;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /*
396*4882a593Smuzhiyun * Extract the init status.
397*4882a593Smuzhiyun *
398*4882a593Smuzhiyun * For ES2: We need to check lowest 8 bits of 'value'.
399*4882a593Smuzhiyun * For ES3: We need to check highest 8 bits out of 32 of 'value'.
400*4882a593Smuzhiyun *
401*4882a593Smuzhiyun * FIXME: Remove ES2 support
402*4882a593Smuzhiyun */
403*4882a593Smuzhiyun if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
404*4882a593Smuzhiyun init_status = value & 0xff;
405*4882a593Smuzhiyun else
406*4882a593Smuzhiyun init_status = value >> 24;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /*
409*4882a593Smuzhiyun * Check if the interface is executing the quirky ES3 bootrom that,
410*4882a593Smuzhiyun * for example, requires E2EFC, CSD and CSV to be disabled.
411*4882a593Smuzhiyun */
412*4882a593Smuzhiyun bootrom_quirks = GB_INTERFACE_QUIRK_NO_CPORT_FEATURES |
413*4882a593Smuzhiyun GB_INTERFACE_QUIRK_FORCED_DISABLE |
414*4882a593Smuzhiyun GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH |
415*4882a593Smuzhiyun GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun s2l_quirks = GB_INTERFACE_QUIRK_NO_PM;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun switch (init_status) {
420*4882a593Smuzhiyun case GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED:
421*4882a593Smuzhiyun case GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED:
422*4882a593Smuzhiyun intf->quirks |= bootrom_quirks;
423*4882a593Smuzhiyun break;
424*4882a593Smuzhiyun case GB_INIT_S2_LOADER_BOOT_STARTED:
425*4882a593Smuzhiyun /* S2 Loader doesn't support runtime PM */
426*4882a593Smuzhiyun intf->quirks &= ~bootrom_quirks;
427*4882a593Smuzhiyun intf->quirks |= s2l_quirks;
428*4882a593Smuzhiyun break;
429*4882a593Smuzhiyun default:
430*4882a593Smuzhiyun intf->quirks &= ~bootrom_quirks;
431*4882a593Smuzhiyun intf->quirks &= ~s2l_quirks;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /* Clear the init status. */
435*4882a593Smuzhiyun return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr,
436*4882a593Smuzhiyun DME_SELECTOR_INDEX_NULL, 0);
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun /* interface sysfs attributes */
440*4882a593Smuzhiyun #define gb_interface_attr(field, type) \
441*4882a593Smuzhiyun static ssize_t field##_show(struct device *dev, \
442*4882a593Smuzhiyun struct device_attribute *attr, \
443*4882a593Smuzhiyun char *buf) \
444*4882a593Smuzhiyun { \
445*4882a593Smuzhiyun struct gb_interface *intf = to_gb_interface(dev); \
446*4882a593Smuzhiyun return scnprintf(buf, PAGE_SIZE, type"\n", intf->field); \
447*4882a593Smuzhiyun } \
448*4882a593Smuzhiyun static DEVICE_ATTR_RO(field)
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun gb_interface_attr(ddbl1_manufacturer_id, "0x%08x");
451*4882a593Smuzhiyun gb_interface_attr(ddbl1_product_id, "0x%08x");
452*4882a593Smuzhiyun gb_interface_attr(interface_id, "%u");
453*4882a593Smuzhiyun gb_interface_attr(vendor_id, "0x%08x");
454*4882a593Smuzhiyun gb_interface_attr(product_id, "0x%08x");
455*4882a593Smuzhiyun gb_interface_attr(serial_number, "0x%016llx");
456*4882a593Smuzhiyun
voltage_now_show(struct device * dev,struct device_attribute * attr,char * buf)457*4882a593Smuzhiyun static ssize_t voltage_now_show(struct device *dev,
458*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun struct gb_interface *intf = to_gb_interface(dev);
461*4882a593Smuzhiyun int ret;
462*4882a593Smuzhiyun u32 measurement;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
465*4882a593Smuzhiyun GB_SVC_PWRMON_TYPE_VOL,
466*4882a593Smuzhiyun &measurement);
467*4882a593Smuzhiyun if (ret) {
468*4882a593Smuzhiyun dev_err(&intf->dev, "failed to get voltage sample (%d)\n", ret);
469*4882a593Smuzhiyun return ret;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun return sprintf(buf, "%u\n", measurement);
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun static DEVICE_ATTR_RO(voltage_now);
475*4882a593Smuzhiyun
current_now_show(struct device * dev,struct device_attribute * attr,char * buf)476*4882a593Smuzhiyun static ssize_t current_now_show(struct device *dev,
477*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun struct gb_interface *intf = to_gb_interface(dev);
480*4882a593Smuzhiyun int ret;
481*4882a593Smuzhiyun u32 measurement;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
484*4882a593Smuzhiyun GB_SVC_PWRMON_TYPE_CURR,
485*4882a593Smuzhiyun &measurement);
486*4882a593Smuzhiyun if (ret) {
487*4882a593Smuzhiyun dev_err(&intf->dev, "failed to get current sample (%d)\n", ret);
488*4882a593Smuzhiyun return ret;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun return sprintf(buf, "%u\n", measurement);
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun static DEVICE_ATTR_RO(current_now);
494*4882a593Smuzhiyun
power_now_show(struct device * dev,struct device_attribute * attr,char * buf)495*4882a593Smuzhiyun static ssize_t power_now_show(struct device *dev,
496*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun struct gb_interface *intf = to_gb_interface(dev);
499*4882a593Smuzhiyun int ret;
500*4882a593Smuzhiyun u32 measurement;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
503*4882a593Smuzhiyun GB_SVC_PWRMON_TYPE_PWR,
504*4882a593Smuzhiyun &measurement);
505*4882a593Smuzhiyun if (ret) {
506*4882a593Smuzhiyun dev_err(&intf->dev, "failed to get power sample (%d)\n", ret);
507*4882a593Smuzhiyun return ret;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun return sprintf(buf, "%u\n", measurement);
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun static DEVICE_ATTR_RO(power_now);
513*4882a593Smuzhiyun
power_state_show(struct device * dev,struct device_attribute * attr,char * buf)514*4882a593Smuzhiyun static ssize_t power_state_show(struct device *dev,
515*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun struct gb_interface *intf = to_gb_interface(dev);
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun if (intf->active)
520*4882a593Smuzhiyun return scnprintf(buf, PAGE_SIZE, "on\n");
521*4882a593Smuzhiyun else
522*4882a593Smuzhiyun return scnprintf(buf, PAGE_SIZE, "off\n");
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
power_state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)525*4882a593Smuzhiyun static ssize_t power_state_store(struct device *dev,
526*4882a593Smuzhiyun struct device_attribute *attr, const char *buf,
527*4882a593Smuzhiyun size_t len)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun struct gb_interface *intf = to_gb_interface(dev);
530*4882a593Smuzhiyun bool activate;
531*4882a593Smuzhiyun int ret = 0;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun if (kstrtobool(buf, &activate))
534*4882a593Smuzhiyun return -EINVAL;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun mutex_lock(&intf->mutex);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun if (activate == intf->active)
539*4882a593Smuzhiyun goto unlock;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun if (activate) {
542*4882a593Smuzhiyun ret = gb_interface_activate(intf);
543*4882a593Smuzhiyun if (ret) {
544*4882a593Smuzhiyun dev_err(&intf->dev,
545*4882a593Smuzhiyun "failed to activate interface: %d\n", ret);
546*4882a593Smuzhiyun goto unlock;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun ret = gb_interface_enable(intf);
550*4882a593Smuzhiyun if (ret) {
551*4882a593Smuzhiyun dev_err(&intf->dev,
552*4882a593Smuzhiyun "failed to enable interface: %d\n", ret);
553*4882a593Smuzhiyun gb_interface_deactivate(intf);
554*4882a593Smuzhiyun goto unlock;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun } else {
557*4882a593Smuzhiyun gb_interface_disable(intf);
558*4882a593Smuzhiyun gb_interface_deactivate(intf);
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun unlock:
562*4882a593Smuzhiyun mutex_unlock(&intf->mutex);
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun if (ret)
565*4882a593Smuzhiyun return ret;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun return len;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun static DEVICE_ATTR_RW(power_state);
570*4882a593Smuzhiyun
gb_interface_type_string(struct gb_interface * intf)571*4882a593Smuzhiyun static const char *gb_interface_type_string(struct gb_interface *intf)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun static const char * const types[] = {
574*4882a593Smuzhiyun [GB_INTERFACE_TYPE_INVALID] = "invalid",
575*4882a593Smuzhiyun [GB_INTERFACE_TYPE_UNKNOWN] = "unknown",
576*4882a593Smuzhiyun [GB_INTERFACE_TYPE_DUMMY] = "dummy",
577*4882a593Smuzhiyun [GB_INTERFACE_TYPE_UNIPRO] = "unipro",
578*4882a593Smuzhiyun [GB_INTERFACE_TYPE_GREYBUS] = "greybus",
579*4882a593Smuzhiyun };
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun return types[intf->type];
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
interface_type_show(struct device * dev,struct device_attribute * attr,char * buf)584*4882a593Smuzhiyun static ssize_t interface_type_show(struct device *dev,
585*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun struct gb_interface *intf = to_gb_interface(dev);
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun return sprintf(buf, "%s\n", gb_interface_type_string(intf));
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun static DEVICE_ATTR_RO(interface_type);
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun static struct attribute *interface_unipro_attrs[] = {
594*4882a593Smuzhiyun &dev_attr_ddbl1_manufacturer_id.attr,
595*4882a593Smuzhiyun &dev_attr_ddbl1_product_id.attr,
596*4882a593Smuzhiyun NULL
597*4882a593Smuzhiyun };
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun static struct attribute *interface_greybus_attrs[] = {
600*4882a593Smuzhiyun &dev_attr_vendor_id.attr,
601*4882a593Smuzhiyun &dev_attr_product_id.attr,
602*4882a593Smuzhiyun &dev_attr_serial_number.attr,
603*4882a593Smuzhiyun NULL
604*4882a593Smuzhiyun };
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun static struct attribute *interface_power_attrs[] = {
607*4882a593Smuzhiyun &dev_attr_voltage_now.attr,
608*4882a593Smuzhiyun &dev_attr_current_now.attr,
609*4882a593Smuzhiyun &dev_attr_power_now.attr,
610*4882a593Smuzhiyun &dev_attr_power_state.attr,
611*4882a593Smuzhiyun NULL
612*4882a593Smuzhiyun };
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun static struct attribute *interface_common_attrs[] = {
615*4882a593Smuzhiyun &dev_attr_interface_id.attr,
616*4882a593Smuzhiyun &dev_attr_interface_type.attr,
617*4882a593Smuzhiyun NULL
618*4882a593Smuzhiyun };
619*4882a593Smuzhiyun
interface_unipro_is_visible(struct kobject * kobj,struct attribute * attr,int n)620*4882a593Smuzhiyun static umode_t interface_unipro_is_visible(struct kobject *kobj,
621*4882a593Smuzhiyun struct attribute *attr, int n)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun struct device *dev = kobj_to_dev(kobj);
624*4882a593Smuzhiyun struct gb_interface *intf = to_gb_interface(dev);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun switch (intf->type) {
627*4882a593Smuzhiyun case GB_INTERFACE_TYPE_UNIPRO:
628*4882a593Smuzhiyun case GB_INTERFACE_TYPE_GREYBUS:
629*4882a593Smuzhiyun return attr->mode;
630*4882a593Smuzhiyun default:
631*4882a593Smuzhiyun return 0;
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
interface_greybus_is_visible(struct kobject * kobj,struct attribute * attr,int n)635*4882a593Smuzhiyun static umode_t interface_greybus_is_visible(struct kobject *kobj,
636*4882a593Smuzhiyun struct attribute *attr, int n)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun struct device *dev = kobj_to_dev(kobj);
639*4882a593Smuzhiyun struct gb_interface *intf = to_gb_interface(dev);
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun switch (intf->type) {
642*4882a593Smuzhiyun case GB_INTERFACE_TYPE_GREYBUS:
643*4882a593Smuzhiyun return attr->mode;
644*4882a593Smuzhiyun default:
645*4882a593Smuzhiyun return 0;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
interface_power_is_visible(struct kobject * kobj,struct attribute * attr,int n)649*4882a593Smuzhiyun static umode_t interface_power_is_visible(struct kobject *kobj,
650*4882a593Smuzhiyun struct attribute *attr, int n)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun struct device *dev = kobj_to_dev(kobj);
653*4882a593Smuzhiyun struct gb_interface *intf = to_gb_interface(dev);
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun switch (intf->type) {
656*4882a593Smuzhiyun case GB_INTERFACE_TYPE_UNIPRO:
657*4882a593Smuzhiyun case GB_INTERFACE_TYPE_GREYBUS:
658*4882a593Smuzhiyun return attr->mode;
659*4882a593Smuzhiyun default:
660*4882a593Smuzhiyun return 0;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun static const struct attribute_group interface_unipro_group = {
665*4882a593Smuzhiyun .is_visible = interface_unipro_is_visible,
666*4882a593Smuzhiyun .attrs = interface_unipro_attrs,
667*4882a593Smuzhiyun };
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun static const struct attribute_group interface_greybus_group = {
670*4882a593Smuzhiyun .is_visible = interface_greybus_is_visible,
671*4882a593Smuzhiyun .attrs = interface_greybus_attrs,
672*4882a593Smuzhiyun };
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun static const struct attribute_group interface_power_group = {
675*4882a593Smuzhiyun .is_visible = interface_power_is_visible,
676*4882a593Smuzhiyun .attrs = interface_power_attrs,
677*4882a593Smuzhiyun };
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun static const struct attribute_group interface_common_group = {
680*4882a593Smuzhiyun .attrs = interface_common_attrs,
681*4882a593Smuzhiyun };
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun static const struct attribute_group *interface_groups[] = {
684*4882a593Smuzhiyun &interface_unipro_group,
685*4882a593Smuzhiyun &interface_greybus_group,
686*4882a593Smuzhiyun &interface_power_group,
687*4882a593Smuzhiyun &interface_common_group,
688*4882a593Smuzhiyun NULL
689*4882a593Smuzhiyun };
690*4882a593Smuzhiyun
gb_interface_release(struct device * dev)691*4882a593Smuzhiyun static void gb_interface_release(struct device *dev)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun struct gb_interface *intf = to_gb_interface(dev);
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun trace_gb_interface_release(intf);
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun kfree(intf);
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun #ifdef CONFIG_PM
gb_interface_suspend(struct device * dev)701*4882a593Smuzhiyun static int gb_interface_suspend(struct device *dev)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun struct gb_interface *intf = to_gb_interface(dev);
704*4882a593Smuzhiyun int ret;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun ret = gb_control_interface_suspend_prepare(intf->control);
707*4882a593Smuzhiyun if (ret)
708*4882a593Smuzhiyun return ret;
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun ret = gb_control_suspend(intf->control);
711*4882a593Smuzhiyun if (ret)
712*4882a593Smuzhiyun goto err_hibernate_abort;
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun ret = gb_interface_hibernate_link(intf);
715*4882a593Smuzhiyun if (ret)
716*4882a593Smuzhiyun return ret;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun /* Delay to allow interface to enter standby before disabling refclk */
719*4882a593Smuzhiyun msleep(GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS);
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun ret = gb_interface_refclk_set(intf, false);
722*4882a593Smuzhiyun if (ret)
723*4882a593Smuzhiyun return ret;
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun return 0;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun err_hibernate_abort:
728*4882a593Smuzhiyun gb_control_interface_hibernate_abort(intf->control);
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun return ret;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun
gb_interface_resume(struct device * dev)733*4882a593Smuzhiyun static int gb_interface_resume(struct device *dev)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun struct gb_interface *intf = to_gb_interface(dev);
736*4882a593Smuzhiyun struct gb_svc *svc = intf->hd->svc;
737*4882a593Smuzhiyun int ret;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun ret = gb_interface_refclk_set(intf, true);
740*4882a593Smuzhiyun if (ret)
741*4882a593Smuzhiyun return ret;
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun ret = gb_svc_intf_resume(svc, intf->interface_id);
744*4882a593Smuzhiyun if (ret)
745*4882a593Smuzhiyun return ret;
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun ret = gb_control_resume(intf->control);
748*4882a593Smuzhiyun if (ret)
749*4882a593Smuzhiyun return ret;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun return 0;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
gb_interface_runtime_idle(struct device * dev)754*4882a593Smuzhiyun static int gb_interface_runtime_idle(struct device *dev)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun pm_runtime_mark_last_busy(dev);
757*4882a593Smuzhiyun pm_request_autosuspend(dev);
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun return 0;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun #endif
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun static const struct dev_pm_ops gb_interface_pm_ops = {
764*4882a593Smuzhiyun SET_RUNTIME_PM_OPS(gb_interface_suspend, gb_interface_resume,
765*4882a593Smuzhiyun gb_interface_runtime_idle)
766*4882a593Smuzhiyun };
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun struct device_type greybus_interface_type = {
769*4882a593Smuzhiyun .name = "greybus_interface",
770*4882a593Smuzhiyun .release = gb_interface_release,
771*4882a593Smuzhiyun .pm = &gb_interface_pm_ops,
772*4882a593Smuzhiyun };
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun /*
775*4882a593Smuzhiyun * A Greybus module represents a user-replaceable component on a GMP
776*4882a593Smuzhiyun * phone. An interface is the physical connection on that module. A
777*4882a593Smuzhiyun * module may have more than one interface.
778*4882a593Smuzhiyun *
779*4882a593Smuzhiyun * Create a gb_interface structure to represent a discovered interface.
780*4882a593Smuzhiyun * The position of interface within the Endo is encoded in "interface_id"
781*4882a593Smuzhiyun * argument.
782*4882a593Smuzhiyun *
783*4882a593Smuzhiyun * Returns a pointer to the new interfce or a null pointer if a
784*4882a593Smuzhiyun * failure occurs due to memory exhaustion.
785*4882a593Smuzhiyun */
gb_interface_create(struct gb_module * module,u8 interface_id)786*4882a593Smuzhiyun struct gb_interface *gb_interface_create(struct gb_module *module,
787*4882a593Smuzhiyun u8 interface_id)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun struct gb_host_device *hd = module->hd;
790*4882a593Smuzhiyun struct gb_interface *intf;
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun intf = kzalloc(sizeof(*intf), GFP_KERNEL);
793*4882a593Smuzhiyun if (!intf)
794*4882a593Smuzhiyun return NULL;
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun intf->hd = hd; /* XXX refcount? */
797*4882a593Smuzhiyun intf->module = module;
798*4882a593Smuzhiyun intf->interface_id = interface_id;
799*4882a593Smuzhiyun INIT_LIST_HEAD(&intf->bundles);
800*4882a593Smuzhiyun INIT_LIST_HEAD(&intf->manifest_descs);
801*4882a593Smuzhiyun mutex_init(&intf->mutex);
802*4882a593Smuzhiyun INIT_WORK(&intf->mode_switch_work, gb_interface_mode_switch_work);
803*4882a593Smuzhiyun init_completion(&intf->mode_switch_completion);
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun /* Invalid device id to start with */
806*4882a593Smuzhiyun intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun intf->dev.parent = &module->dev;
809*4882a593Smuzhiyun intf->dev.bus = &greybus_bus_type;
810*4882a593Smuzhiyun intf->dev.type = &greybus_interface_type;
811*4882a593Smuzhiyun intf->dev.groups = interface_groups;
812*4882a593Smuzhiyun intf->dev.dma_mask = module->dev.dma_mask;
813*4882a593Smuzhiyun device_initialize(&intf->dev);
814*4882a593Smuzhiyun dev_set_name(&intf->dev, "%s.%u", dev_name(&module->dev),
815*4882a593Smuzhiyun interface_id);
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun pm_runtime_set_autosuspend_delay(&intf->dev,
818*4882a593Smuzhiyun GB_INTERFACE_AUTOSUSPEND_MS);
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun trace_gb_interface_create(intf);
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun return intf;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun
gb_interface_vsys_set(struct gb_interface * intf,bool enable)825*4882a593Smuzhiyun static int gb_interface_vsys_set(struct gb_interface *intf, bool enable)
826*4882a593Smuzhiyun {
827*4882a593Smuzhiyun struct gb_svc *svc = intf->hd->svc;
828*4882a593Smuzhiyun int ret;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun ret = gb_svc_intf_vsys_set(svc, intf->interface_id, enable);
833*4882a593Smuzhiyun if (ret) {
834*4882a593Smuzhiyun dev_err(&intf->dev, "failed to set v_sys: %d\n", ret);
835*4882a593Smuzhiyun return ret;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun return 0;
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
gb_interface_refclk_set(struct gb_interface * intf,bool enable)841*4882a593Smuzhiyun static int gb_interface_refclk_set(struct gb_interface *intf, bool enable)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun struct gb_svc *svc = intf->hd->svc;
844*4882a593Smuzhiyun int ret;
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun ret = gb_svc_intf_refclk_set(svc, intf->interface_id, enable);
849*4882a593Smuzhiyun if (ret) {
850*4882a593Smuzhiyun dev_err(&intf->dev, "failed to set refclk: %d\n", ret);
851*4882a593Smuzhiyun return ret;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun return 0;
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
gb_interface_unipro_set(struct gb_interface * intf,bool enable)857*4882a593Smuzhiyun static int gb_interface_unipro_set(struct gb_interface *intf, bool enable)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun struct gb_svc *svc = intf->hd->svc;
860*4882a593Smuzhiyun int ret;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun ret = gb_svc_intf_unipro_set(svc, intf->interface_id, enable);
865*4882a593Smuzhiyun if (ret) {
866*4882a593Smuzhiyun dev_err(&intf->dev, "failed to set UniPro: %d\n", ret);
867*4882a593Smuzhiyun return ret;
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun return 0;
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun
gb_interface_activate_operation(struct gb_interface * intf,enum gb_interface_type * intf_type)873*4882a593Smuzhiyun static int gb_interface_activate_operation(struct gb_interface *intf,
874*4882a593Smuzhiyun enum gb_interface_type *intf_type)
875*4882a593Smuzhiyun {
876*4882a593Smuzhiyun struct gb_svc *svc = intf->hd->svc;
877*4882a593Smuzhiyun u8 type;
878*4882a593Smuzhiyun int ret;
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun dev_dbg(&intf->dev, "%s\n", __func__);
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun ret = gb_svc_intf_activate(svc, intf->interface_id, &type);
883*4882a593Smuzhiyun if (ret) {
884*4882a593Smuzhiyun dev_err(&intf->dev, "failed to activate: %d\n", ret);
885*4882a593Smuzhiyun return ret;
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun switch (type) {
889*4882a593Smuzhiyun case GB_SVC_INTF_TYPE_DUMMY:
890*4882a593Smuzhiyun *intf_type = GB_INTERFACE_TYPE_DUMMY;
891*4882a593Smuzhiyun /* FIXME: handle as an error for now */
892*4882a593Smuzhiyun return -ENODEV;
893*4882a593Smuzhiyun case GB_SVC_INTF_TYPE_UNIPRO:
894*4882a593Smuzhiyun *intf_type = GB_INTERFACE_TYPE_UNIPRO;
895*4882a593Smuzhiyun dev_err(&intf->dev, "interface type UniPro not supported\n");
896*4882a593Smuzhiyun /* FIXME: handle as an error for now */
897*4882a593Smuzhiyun return -ENODEV;
898*4882a593Smuzhiyun case GB_SVC_INTF_TYPE_GREYBUS:
899*4882a593Smuzhiyun *intf_type = GB_INTERFACE_TYPE_GREYBUS;
900*4882a593Smuzhiyun break;
901*4882a593Smuzhiyun default:
902*4882a593Smuzhiyun dev_err(&intf->dev, "unknown interface type: %u\n", type);
903*4882a593Smuzhiyun *intf_type = GB_INTERFACE_TYPE_UNKNOWN;
904*4882a593Smuzhiyun return -ENODEV;
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun return 0;
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun
gb_interface_hibernate_link(struct gb_interface * intf)910*4882a593Smuzhiyun static int gb_interface_hibernate_link(struct gb_interface *intf)
911*4882a593Smuzhiyun {
912*4882a593Smuzhiyun struct gb_svc *svc = intf->hd->svc;
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun return gb_svc_intf_set_power_mode_hibernate(svc, intf->interface_id);
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun
_gb_interface_activate(struct gb_interface * intf,enum gb_interface_type * type)917*4882a593Smuzhiyun static int _gb_interface_activate(struct gb_interface *intf,
918*4882a593Smuzhiyun enum gb_interface_type *type)
919*4882a593Smuzhiyun {
920*4882a593Smuzhiyun int ret;
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun *type = GB_INTERFACE_TYPE_UNKNOWN;
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun if (intf->ejected || intf->removed)
925*4882a593Smuzhiyun return -ENODEV;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun ret = gb_interface_vsys_set(intf, true);
928*4882a593Smuzhiyun if (ret)
929*4882a593Smuzhiyun return ret;
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun ret = gb_interface_refclk_set(intf, true);
932*4882a593Smuzhiyun if (ret)
933*4882a593Smuzhiyun goto err_vsys_disable;
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun ret = gb_interface_unipro_set(intf, true);
936*4882a593Smuzhiyun if (ret)
937*4882a593Smuzhiyun goto err_refclk_disable;
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun ret = gb_interface_activate_operation(intf, type);
940*4882a593Smuzhiyun if (ret) {
941*4882a593Smuzhiyun switch (*type) {
942*4882a593Smuzhiyun case GB_INTERFACE_TYPE_UNIPRO:
943*4882a593Smuzhiyun case GB_INTERFACE_TYPE_GREYBUS:
944*4882a593Smuzhiyun goto err_hibernate_link;
945*4882a593Smuzhiyun default:
946*4882a593Smuzhiyun goto err_unipro_disable;
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun ret = gb_interface_read_dme(intf);
951*4882a593Smuzhiyun if (ret)
952*4882a593Smuzhiyun goto err_hibernate_link;
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun ret = gb_interface_route_create(intf);
955*4882a593Smuzhiyun if (ret)
956*4882a593Smuzhiyun goto err_hibernate_link;
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun intf->active = true;
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun trace_gb_interface_activate(intf);
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun return 0;
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun err_hibernate_link:
965*4882a593Smuzhiyun gb_interface_hibernate_link(intf);
966*4882a593Smuzhiyun err_unipro_disable:
967*4882a593Smuzhiyun gb_interface_unipro_set(intf, false);
968*4882a593Smuzhiyun err_refclk_disable:
969*4882a593Smuzhiyun gb_interface_refclk_set(intf, false);
970*4882a593Smuzhiyun err_vsys_disable:
971*4882a593Smuzhiyun gb_interface_vsys_set(intf, false);
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun return ret;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun /*
977*4882a593Smuzhiyun * At present, we assume a UniPro-only module to be a Greybus module that
978*4882a593Smuzhiyun * failed to send its mailbox poke. There is some reason to believe that this
979*4882a593Smuzhiyun * is because of a bug in the ES3 bootrom.
980*4882a593Smuzhiyun *
981*4882a593Smuzhiyun * FIXME: Check if this is a Toshiba bridge before retrying?
982*4882a593Smuzhiyun */
_gb_interface_activate_es3_hack(struct gb_interface * intf,enum gb_interface_type * type)983*4882a593Smuzhiyun static int _gb_interface_activate_es3_hack(struct gb_interface *intf,
984*4882a593Smuzhiyun enum gb_interface_type *type)
985*4882a593Smuzhiyun {
986*4882a593Smuzhiyun int retries = 3;
987*4882a593Smuzhiyun int ret;
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun while (retries--) {
990*4882a593Smuzhiyun ret = _gb_interface_activate(intf, type);
991*4882a593Smuzhiyun if (ret == -ENODEV && *type == GB_INTERFACE_TYPE_UNIPRO)
992*4882a593Smuzhiyun continue;
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun break;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun return ret;
998*4882a593Smuzhiyun }
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun /*
1001*4882a593Smuzhiyun * Activate an interface.
1002*4882a593Smuzhiyun *
1003*4882a593Smuzhiyun * Locking: Caller holds the interface mutex.
1004*4882a593Smuzhiyun */
gb_interface_activate(struct gb_interface * intf)1005*4882a593Smuzhiyun int gb_interface_activate(struct gb_interface *intf)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun enum gb_interface_type type;
1008*4882a593Smuzhiyun int ret;
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun switch (intf->type) {
1011*4882a593Smuzhiyun case GB_INTERFACE_TYPE_INVALID:
1012*4882a593Smuzhiyun case GB_INTERFACE_TYPE_GREYBUS:
1013*4882a593Smuzhiyun ret = _gb_interface_activate_es3_hack(intf, &type);
1014*4882a593Smuzhiyun break;
1015*4882a593Smuzhiyun default:
1016*4882a593Smuzhiyun ret = _gb_interface_activate(intf, &type);
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun /* Make sure type is detected correctly during reactivation. */
1020*4882a593Smuzhiyun if (intf->type != GB_INTERFACE_TYPE_INVALID) {
1021*4882a593Smuzhiyun if (type != intf->type) {
1022*4882a593Smuzhiyun dev_err(&intf->dev, "failed to detect interface type\n");
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun if (!ret)
1025*4882a593Smuzhiyun gb_interface_deactivate(intf);
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun return -EIO;
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun } else {
1030*4882a593Smuzhiyun intf->type = type;
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun return ret;
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun /*
1037*4882a593Smuzhiyun * Deactivate an interface.
1038*4882a593Smuzhiyun *
1039*4882a593Smuzhiyun * Locking: Caller holds the interface mutex.
1040*4882a593Smuzhiyun */
gb_interface_deactivate(struct gb_interface * intf)1041*4882a593Smuzhiyun void gb_interface_deactivate(struct gb_interface *intf)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun if (!intf->active)
1044*4882a593Smuzhiyun return;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun trace_gb_interface_deactivate(intf);
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun /* Abort any ongoing mode switch. */
1049*4882a593Smuzhiyun if (intf->mode_switch)
1050*4882a593Smuzhiyun complete(&intf->mode_switch_completion);
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun gb_interface_route_destroy(intf);
1053*4882a593Smuzhiyun gb_interface_hibernate_link(intf);
1054*4882a593Smuzhiyun gb_interface_unipro_set(intf, false);
1055*4882a593Smuzhiyun gb_interface_refclk_set(intf, false);
1056*4882a593Smuzhiyun gb_interface_vsys_set(intf, false);
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun intf->active = false;
1059*4882a593Smuzhiyun }
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun /*
1062*4882a593Smuzhiyun * Enable an interface by enabling its control connection, fetching the
1063*4882a593Smuzhiyun * manifest and other information over it, and finally registering its child
1064*4882a593Smuzhiyun * devices.
1065*4882a593Smuzhiyun *
1066*4882a593Smuzhiyun * Locking: Caller holds the interface mutex.
1067*4882a593Smuzhiyun */
gb_interface_enable(struct gb_interface * intf)1068*4882a593Smuzhiyun int gb_interface_enable(struct gb_interface *intf)
1069*4882a593Smuzhiyun {
1070*4882a593Smuzhiyun struct gb_control *control;
1071*4882a593Smuzhiyun struct gb_bundle *bundle, *tmp;
1072*4882a593Smuzhiyun int ret, size;
1073*4882a593Smuzhiyun void *manifest;
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun ret = gb_interface_read_and_clear_init_status(intf);
1076*4882a593Smuzhiyun if (ret) {
1077*4882a593Smuzhiyun dev_err(&intf->dev, "failed to clear init status: %d\n", ret);
1078*4882a593Smuzhiyun return ret;
1079*4882a593Smuzhiyun }
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun /* Establish control connection */
1082*4882a593Smuzhiyun control = gb_control_create(intf);
1083*4882a593Smuzhiyun if (IS_ERR(control)) {
1084*4882a593Smuzhiyun dev_err(&intf->dev, "failed to create control device: %ld\n",
1085*4882a593Smuzhiyun PTR_ERR(control));
1086*4882a593Smuzhiyun return PTR_ERR(control);
1087*4882a593Smuzhiyun }
1088*4882a593Smuzhiyun intf->control = control;
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun ret = gb_control_enable(intf->control);
1091*4882a593Smuzhiyun if (ret)
1092*4882a593Smuzhiyun goto err_put_control;
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun /* Get manifest size using control protocol on CPort */
1095*4882a593Smuzhiyun size = gb_control_get_manifest_size_operation(intf);
1096*4882a593Smuzhiyun if (size <= 0) {
1097*4882a593Smuzhiyun dev_err(&intf->dev, "failed to get manifest size: %d\n", size);
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun if (size)
1100*4882a593Smuzhiyun ret = size;
1101*4882a593Smuzhiyun else
1102*4882a593Smuzhiyun ret = -EINVAL;
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun goto err_disable_control;
1105*4882a593Smuzhiyun }
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun manifest = kmalloc(size, GFP_KERNEL);
1108*4882a593Smuzhiyun if (!manifest) {
1109*4882a593Smuzhiyun ret = -ENOMEM;
1110*4882a593Smuzhiyun goto err_disable_control;
1111*4882a593Smuzhiyun }
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun /* Get manifest using control protocol on CPort */
1114*4882a593Smuzhiyun ret = gb_control_get_manifest_operation(intf, manifest, size);
1115*4882a593Smuzhiyun if (ret) {
1116*4882a593Smuzhiyun dev_err(&intf->dev, "failed to get manifest: %d\n", ret);
1117*4882a593Smuzhiyun goto err_free_manifest;
1118*4882a593Smuzhiyun }
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun /*
1121*4882a593Smuzhiyun * Parse the manifest and build up our data structures representing
1122*4882a593Smuzhiyun * what's in it.
1123*4882a593Smuzhiyun */
1124*4882a593Smuzhiyun if (!gb_manifest_parse(intf, manifest, size)) {
1125*4882a593Smuzhiyun dev_err(&intf->dev, "failed to parse manifest\n");
1126*4882a593Smuzhiyun ret = -EINVAL;
1127*4882a593Smuzhiyun goto err_destroy_bundles;
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun ret = gb_control_get_bundle_versions(intf->control);
1131*4882a593Smuzhiyun if (ret)
1132*4882a593Smuzhiyun goto err_destroy_bundles;
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun /* Register the control device and any bundles */
1135*4882a593Smuzhiyun ret = gb_control_add(intf->control);
1136*4882a593Smuzhiyun if (ret)
1137*4882a593Smuzhiyun goto err_destroy_bundles;
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun pm_runtime_use_autosuspend(&intf->dev);
1140*4882a593Smuzhiyun pm_runtime_get_noresume(&intf->dev);
1141*4882a593Smuzhiyun pm_runtime_set_active(&intf->dev);
1142*4882a593Smuzhiyun pm_runtime_enable(&intf->dev);
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun list_for_each_entry_safe_reverse(bundle, tmp, &intf->bundles, links) {
1145*4882a593Smuzhiyun ret = gb_bundle_add(bundle);
1146*4882a593Smuzhiyun if (ret) {
1147*4882a593Smuzhiyun gb_bundle_destroy(bundle);
1148*4882a593Smuzhiyun continue;
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun kfree(manifest);
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun intf->enabled = true;
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun pm_runtime_put(&intf->dev);
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun trace_gb_interface_enable(intf);
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun return 0;
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun err_destroy_bundles:
1163*4882a593Smuzhiyun list_for_each_entry_safe(bundle, tmp, &intf->bundles, links)
1164*4882a593Smuzhiyun gb_bundle_destroy(bundle);
1165*4882a593Smuzhiyun err_free_manifest:
1166*4882a593Smuzhiyun kfree(manifest);
1167*4882a593Smuzhiyun err_disable_control:
1168*4882a593Smuzhiyun gb_control_disable(intf->control);
1169*4882a593Smuzhiyun err_put_control:
1170*4882a593Smuzhiyun gb_control_put(intf->control);
1171*4882a593Smuzhiyun intf->control = NULL;
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun return ret;
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun /*
1177*4882a593Smuzhiyun * Disable an interface and destroy its bundles.
1178*4882a593Smuzhiyun *
1179*4882a593Smuzhiyun * Locking: Caller holds the interface mutex.
1180*4882a593Smuzhiyun */
gb_interface_disable(struct gb_interface * intf)1181*4882a593Smuzhiyun void gb_interface_disable(struct gb_interface *intf)
1182*4882a593Smuzhiyun {
1183*4882a593Smuzhiyun struct gb_bundle *bundle;
1184*4882a593Smuzhiyun struct gb_bundle *next;
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun if (!intf->enabled)
1187*4882a593Smuzhiyun return;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun trace_gb_interface_disable(intf);
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun pm_runtime_get_sync(&intf->dev);
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun /* Set disconnected flag to avoid I/O during connection tear down. */
1194*4882a593Smuzhiyun if (intf->quirks & GB_INTERFACE_QUIRK_FORCED_DISABLE)
1195*4882a593Smuzhiyun intf->disconnected = true;
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun list_for_each_entry_safe(bundle, next, &intf->bundles, links)
1198*4882a593Smuzhiyun gb_bundle_destroy(bundle);
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun if (!intf->mode_switch && !intf->disconnected)
1201*4882a593Smuzhiyun gb_control_interface_deactivate_prepare(intf->control);
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun gb_control_del(intf->control);
1204*4882a593Smuzhiyun gb_control_disable(intf->control);
1205*4882a593Smuzhiyun gb_control_put(intf->control);
1206*4882a593Smuzhiyun intf->control = NULL;
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun intf->enabled = false;
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun pm_runtime_disable(&intf->dev);
1211*4882a593Smuzhiyun pm_runtime_set_suspended(&intf->dev);
1212*4882a593Smuzhiyun pm_runtime_dont_use_autosuspend(&intf->dev);
1213*4882a593Smuzhiyun pm_runtime_put_noidle(&intf->dev);
1214*4882a593Smuzhiyun }
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun /* Register an interface. */
gb_interface_add(struct gb_interface * intf)1217*4882a593Smuzhiyun int gb_interface_add(struct gb_interface *intf)
1218*4882a593Smuzhiyun {
1219*4882a593Smuzhiyun int ret;
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun ret = device_add(&intf->dev);
1222*4882a593Smuzhiyun if (ret) {
1223*4882a593Smuzhiyun dev_err(&intf->dev, "failed to register interface: %d\n", ret);
1224*4882a593Smuzhiyun return ret;
1225*4882a593Smuzhiyun }
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun trace_gb_interface_add(intf);
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun dev_info(&intf->dev, "Interface added (%s)\n",
1230*4882a593Smuzhiyun gb_interface_type_string(intf));
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun switch (intf->type) {
1233*4882a593Smuzhiyun case GB_INTERFACE_TYPE_GREYBUS:
1234*4882a593Smuzhiyun dev_info(&intf->dev, "GMP VID=0x%08x, PID=0x%08x\n",
1235*4882a593Smuzhiyun intf->vendor_id, intf->product_id);
1236*4882a593Smuzhiyun fallthrough;
1237*4882a593Smuzhiyun case GB_INTERFACE_TYPE_UNIPRO:
1238*4882a593Smuzhiyun dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n",
1239*4882a593Smuzhiyun intf->ddbl1_manufacturer_id,
1240*4882a593Smuzhiyun intf->ddbl1_product_id);
1241*4882a593Smuzhiyun break;
1242*4882a593Smuzhiyun default:
1243*4882a593Smuzhiyun break;
1244*4882a593Smuzhiyun }
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun return 0;
1247*4882a593Smuzhiyun }
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun /* Deregister an interface. */
gb_interface_del(struct gb_interface * intf)1250*4882a593Smuzhiyun void gb_interface_del(struct gb_interface *intf)
1251*4882a593Smuzhiyun {
1252*4882a593Smuzhiyun if (device_is_registered(&intf->dev)) {
1253*4882a593Smuzhiyun trace_gb_interface_del(intf);
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun device_del(&intf->dev);
1256*4882a593Smuzhiyun dev_info(&intf->dev, "Interface removed\n");
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun
gb_interface_put(struct gb_interface * intf)1260*4882a593Smuzhiyun void gb_interface_put(struct gb_interface *intf)
1261*4882a593Smuzhiyun {
1262*4882a593Smuzhiyun put_device(&intf->dev);
1263*4882a593Smuzhiyun }
1264