1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Greybus "AP" USB driver for "ES2" controller chips
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2014-2015 Google Inc.
6*4882a593Smuzhiyun * Copyright 2014-2015 Linaro Ltd.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #include <linux/kthread.h>
9*4882a593Smuzhiyun #include <linux/sizes.h>
10*4882a593Smuzhiyun #include <linux/usb.h>
11*4882a593Smuzhiyun #include <linux/kfifo.h>
12*4882a593Smuzhiyun #include <linux/debugfs.h>
13*4882a593Smuzhiyun #include <linux/list.h>
14*4882a593Smuzhiyun #include <linux/greybus.h>
15*4882a593Smuzhiyun #include <asm/unaligned.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include "arpc.h"
18*4882a593Smuzhiyun #include "greybus_trace.h"
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /* Default timeout for USB vendor requests. */
22*4882a593Smuzhiyun #define ES2_USB_CTRL_TIMEOUT 500
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /* Default timeout for ARPC CPort requests */
25*4882a593Smuzhiyun #define ES2_ARPC_CPORT_TIMEOUT 500
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /* Fixed CPort numbers */
28*4882a593Smuzhiyun #define ES2_CPORT_CDSI0 16
29*4882a593Smuzhiyun #define ES2_CPORT_CDSI1 17
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun /* Memory sizes for the buffers sent to/from the ES2 controller */
32*4882a593Smuzhiyun #define ES2_GBUF_MSG_SIZE_MAX 2048
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /* Memory sizes for the ARPC buffers */
35*4882a593Smuzhiyun #define ARPC_OUT_SIZE_MAX U16_MAX
36*4882a593Smuzhiyun #define ARPC_IN_SIZE_MAX 128
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun static const struct usb_device_id id_table[] = {
39*4882a593Smuzhiyun { USB_DEVICE(0x18d1, 0x1eaf) },
40*4882a593Smuzhiyun { },
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun MODULE_DEVICE_TABLE(usb, id_table);
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #define APB1_LOG_SIZE SZ_16K
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /*
47*4882a593Smuzhiyun * Number of CPort IN urbs in flight at any point in time.
48*4882a593Smuzhiyun * Adjust if we are having stalls in the USB buffer due to not enough urbs in
49*4882a593Smuzhiyun * flight.
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun #define NUM_CPORT_IN_URB 4
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /* Number of CPort OUT urbs in flight at any point in time.
54*4882a593Smuzhiyun * Adjust if we get messages saying we are out of urbs in the system log.
55*4882a593Smuzhiyun */
56*4882a593Smuzhiyun #define NUM_CPORT_OUT_URB 8
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun * Number of ARPC in urbs in flight at any point in time.
60*4882a593Smuzhiyun */
61*4882a593Smuzhiyun #define NUM_ARPC_IN_URB 2
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun * @endpoint: bulk in endpoint for CPort data
65*4882a593Smuzhiyun * @urb: array of urbs for the CPort in messages
66*4882a593Smuzhiyun * @buffer: array of buffers for the @cport_in_urb urbs
67*4882a593Smuzhiyun */
68*4882a593Smuzhiyun struct es2_cport_in {
69*4882a593Smuzhiyun __u8 endpoint;
70*4882a593Smuzhiyun struct urb *urb[NUM_CPORT_IN_URB];
71*4882a593Smuzhiyun u8 *buffer[NUM_CPORT_IN_URB];
72*4882a593Smuzhiyun };
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /**
75*4882a593Smuzhiyun * es2_ap_dev - ES2 USB Bridge to AP structure
76*4882a593Smuzhiyun * @usb_dev: pointer to the USB device we are.
77*4882a593Smuzhiyun * @usb_intf: pointer to the USB interface we are bound to.
78*4882a593Smuzhiyun * @hd: pointer to our gb_host_device structure
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun * @cport_in: endpoint, urbs and buffer for cport in messages
81*4882a593Smuzhiyun * @cport_out_endpoint: endpoint for for cport out messages
82*4882a593Smuzhiyun * @cport_out_urb: array of urbs for the CPort out messages
83*4882a593Smuzhiyun * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
84*4882a593Smuzhiyun * not.
85*4882a593Smuzhiyun * @cport_out_urb_cancelled: array of flags indicating whether the
86*4882a593Smuzhiyun * corresponding @cport_out_urb is being cancelled
87*4882a593Smuzhiyun * @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
88*4882a593Smuzhiyun *
89*4882a593Smuzhiyun * @apb_log_task: task pointer for logging thread
90*4882a593Smuzhiyun * @apb_log_dentry: file system entry for the log file interface
91*4882a593Smuzhiyun * @apb_log_enable_dentry: file system entry for enabling logging
92*4882a593Smuzhiyun * @apb_log_fifo: kernel FIFO to carry logged data
93*4882a593Smuzhiyun * @arpc_urb: array of urbs for the ARPC in messages
94*4882a593Smuzhiyun * @arpc_buffer: array of buffers for the @arpc_urb urbs
95*4882a593Smuzhiyun * @arpc_endpoint_in: bulk in endpoint for APBridgeA RPC
96*4882a593Smuzhiyun * @arpc_id_cycle: gives an unique id to ARPC
97*4882a593Smuzhiyun * @arpc_lock: locks ARPC list
98*4882a593Smuzhiyun * @arpcs: list of in progress ARPCs
99*4882a593Smuzhiyun */
100*4882a593Smuzhiyun struct es2_ap_dev {
101*4882a593Smuzhiyun struct usb_device *usb_dev;
102*4882a593Smuzhiyun struct usb_interface *usb_intf;
103*4882a593Smuzhiyun struct gb_host_device *hd;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun struct es2_cport_in cport_in;
106*4882a593Smuzhiyun __u8 cport_out_endpoint;
107*4882a593Smuzhiyun struct urb *cport_out_urb[NUM_CPORT_OUT_URB];
108*4882a593Smuzhiyun bool cport_out_urb_busy[NUM_CPORT_OUT_URB];
109*4882a593Smuzhiyun bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB];
110*4882a593Smuzhiyun spinlock_t cport_out_urb_lock;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun bool cdsi1_in_use;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun struct task_struct *apb_log_task;
115*4882a593Smuzhiyun struct dentry *apb_log_dentry;
116*4882a593Smuzhiyun struct dentry *apb_log_enable_dentry;
117*4882a593Smuzhiyun DECLARE_KFIFO(apb_log_fifo, char, APB1_LOG_SIZE);
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun __u8 arpc_endpoint_in;
120*4882a593Smuzhiyun struct urb *arpc_urb[NUM_ARPC_IN_URB];
121*4882a593Smuzhiyun u8 *arpc_buffer[NUM_ARPC_IN_URB];
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun int arpc_id_cycle;
124*4882a593Smuzhiyun spinlock_t arpc_lock;
125*4882a593Smuzhiyun struct list_head arpcs;
126*4882a593Smuzhiyun };
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun struct arpc {
129*4882a593Smuzhiyun struct list_head list;
130*4882a593Smuzhiyun struct arpc_request_message *req;
131*4882a593Smuzhiyun struct arpc_response_message *resp;
132*4882a593Smuzhiyun struct completion response_received;
133*4882a593Smuzhiyun bool active;
134*4882a593Smuzhiyun };
135*4882a593Smuzhiyun
hd_to_es2(struct gb_host_device * hd)136*4882a593Smuzhiyun static inline struct es2_ap_dev *hd_to_es2(struct gb_host_device *hd)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun return (struct es2_ap_dev *)&hd->hd_priv;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun static void cport_out_callback(struct urb *urb);
142*4882a593Smuzhiyun static void usb_log_enable(struct es2_ap_dev *es2);
143*4882a593Smuzhiyun static void usb_log_disable(struct es2_ap_dev *es2);
144*4882a593Smuzhiyun static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload,
145*4882a593Smuzhiyun size_t size, int *result, unsigned int timeout);
146*4882a593Smuzhiyun
output_sync(struct es2_ap_dev * es2,void * req,u16 size,u8 cmd)147*4882a593Smuzhiyun static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun struct usb_device *udev = es2->usb_dev;
150*4882a593Smuzhiyun u8 *data;
151*4882a593Smuzhiyun int retval;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun data = kmemdup(req, size, GFP_KERNEL);
154*4882a593Smuzhiyun if (!data)
155*4882a593Smuzhiyun return -ENOMEM;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
158*4882a593Smuzhiyun cmd,
159*4882a593Smuzhiyun USB_DIR_OUT | USB_TYPE_VENDOR |
160*4882a593Smuzhiyun USB_RECIP_INTERFACE,
161*4882a593Smuzhiyun 0, 0, data, size, ES2_USB_CTRL_TIMEOUT);
162*4882a593Smuzhiyun if (retval < 0)
163*4882a593Smuzhiyun dev_err(&udev->dev, "%s: return error %d\n", __func__, retval);
164*4882a593Smuzhiyun else
165*4882a593Smuzhiyun retval = 0;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun kfree(data);
168*4882a593Smuzhiyun return retval;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
ap_urb_complete(struct urb * urb)171*4882a593Smuzhiyun static void ap_urb_complete(struct urb *urb)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun struct usb_ctrlrequest *dr = urb->context;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun kfree(dr);
176*4882a593Smuzhiyun usb_free_urb(urb);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
output_async(struct es2_ap_dev * es2,void * req,u16 size,u8 cmd)179*4882a593Smuzhiyun static int output_async(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun struct usb_device *udev = es2->usb_dev;
182*4882a593Smuzhiyun struct urb *urb;
183*4882a593Smuzhiyun struct usb_ctrlrequest *dr;
184*4882a593Smuzhiyun u8 *buf;
185*4882a593Smuzhiyun int retval;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun urb = usb_alloc_urb(0, GFP_ATOMIC);
188*4882a593Smuzhiyun if (!urb)
189*4882a593Smuzhiyun return -ENOMEM;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun dr = kmalloc(sizeof(*dr) + size, GFP_ATOMIC);
192*4882a593Smuzhiyun if (!dr) {
193*4882a593Smuzhiyun usb_free_urb(urb);
194*4882a593Smuzhiyun return -ENOMEM;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun buf = (u8 *)dr + sizeof(*dr);
198*4882a593Smuzhiyun memcpy(buf, req, size);
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun dr->bRequest = cmd;
201*4882a593Smuzhiyun dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
202*4882a593Smuzhiyun dr->wValue = 0;
203*4882a593Smuzhiyun dr->wIndex = 0;
204*4882a593Smuzhiyun dr->wLength = cpu_to_le16(size);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun usb_fill_control_urb(urb, udev, usb_sndctrlpipe(udev, 0),
207*4882a593Smuzhiyun (unsigned char *)dr, buf, size,
208*4882a593Smuzhiyun ap_urb_complete, dr);
209*4882a593Smuzhiyun retval = usb_submit_urb(urb, GFP_ATOMIC);
210*4882a593Smuzhiyun if (retval) {
211*4882a593Smuzhiyun usb_free_urb(urb);
212*4882a593Smuzhiyun kfree(dr);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun return retval;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
output(struct gb_host_device * hd,void * req,u16 size,u8 cmd,bool async)217*4882a593Smuzhiyun static int output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
218*4882a593Smuzhiyun bool async)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun struct es2_ap_dev *es2 = hd_to_es2(hd);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun if (async)
223*4882a593Smuzhiyun return output_async(es2, req, size, cmd);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun return output_sync(es2, req, size, cmd);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
es2_cport_in_enable(struct es2_ap_dev * es2,struct es2_cport_in * cport_in)228*4882a593Smuzhiyun static int es2_cport_in_enable(struct es2_ap_dev *es2,
229*4882a593Smuzhiyun struct es2_cport_in *cport_in)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun struct urb *urb;
232*4882a593Smuzhiyun int ret;
233*4882a593Smuzhiyun int i;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
236*4882a593Smuzhiyun urb = cport_in->urb[i];
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun ret = usb_submit_urb(urb, GFP_KERNEL);
239*4882a593Smuzhiyun if (ret) {
240*4882a593Smuzhiyun dev_err(&es2->usb_dev->dev,
241*4882a593Smuzhiyun "failed to submit in-urb: %d\n", ret);
242*4882a593Smuzhiyun goto err_kill_urbs;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun return 0;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun err_kill_urbs:
249*4882a593Smuzhiyun for (--i; i >= 0; --i) {
250*4882a593Smuzhiyun urb = cport_in->urb[i];
251*4882a593Smuzhiyun usb_kill_urb(urb);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun return ret;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
es2_cport_in_disable(struct es2_ap_dev * es2,struct es2_cport_in * cport_in)257*4882a593Smuzhiyun static void es2_cport_in_disable(struct es2_ap_dev *es2,
258*4882a593Smuzhiyun struct es2_cport_in *cport_in)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun struct urb *urb;
261*4882a593Smuzhiyun int i;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
264*4882a593Smuzhiyun urb = cport_in->urb[i];
265*4882a593Smuzhiyun usb_kill_urb(urb);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
es2_arpc_in_enable(struct es2_ap_dev * es2)269*4882a593Smuzhiyun static int es2_arpc_in_enable(struct es2_ap_dev *es2)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun struct urb *urb;
272*4882a593Smuzhiyun int ret;
273*4882a593Smuzhiyun int i;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
276*4882a593Smuzhiyun urb = es2->arpc_urb[i];
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun ret = usb_submit_urb(urb, GFP_KERNEL);
279*4882a593Smuzhiyun if (ret) {
280*4882a593Smuzhiyun dev_err(&es2->usb_dev->dev,
281*4882a593Smuzhiyun "failed to submit arpc in-urb: %d\n", ret);
282*4882a593Smuzhiyun goto err_kill_urbs;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun return 0;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun err_kill_urbs:
289*4882a593Smuzhiyun for (--i; i >= 0; --i) {
290*4882a593Smuzhiyun urb = es2->arpc_urb[i];
291*4882a593Smuzhiyun usb_kill_urb(urb);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun return ret;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
es2_arpc_in_disable(struct es2_ap_dev * es2)297*4882a593Smuzhiyun static void es2_arpc_in_disable(struct es2_ap_dev *es2)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun struct urb *urb;
300*4882a593Smuzhiyun int i;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
303*4882a593Smuzhiyun urb = es2->arpc_urb[i];
304*4882a593Smuzhiyun usb_kill_urb(urb);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
next_free_urb(struct es2_ap_dev * es2,gfp_t gfp_mask)308*4882a593Smuzhiyun static struct urb *next_free_urb(struct es2_ap_dev *es2, gfp_t gfp_mask)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun struct urb *urb = NULL;
311*4882a593Smuzhiyun unsigned long flags;
312*4882a593Smuzhiyun int i;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /* Look in our pool of allocated urbs first, as that's the "fastest" */
317*4882a593Smuzhiyun for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
318*4882a593Smuzhiyun if (!es2->cport_out_urb_busy[i] &&
319*4882a593Smuzhiyun !es2->cport_out_urb_cancelled[i]) {
320*4882a593Smuzhiyun es2->cport_out_urb_busy[i] = true;
321*4882a593Smuzhiyun urb = es2->cport_out_urb[i];
322*4882a593Smuzhiyun break;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
326*4882a593Smuzhiyun if (urb)
327*4882a593Smuzhiyun return urb;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /*
330*4882a593Smuzhiyun * Crap, pool is empty, complain to the syslog and go allocate one
331*4882a593Smuzhiyun * dynamically as we have to succeed.
332*4882a593Smuzhiyun */
333*4882a593Smuzhiyun dev_dbg(&es2->usb_dev->dev,
334*4882a593Smuzhiyun "No free CPort OUT urbs, having to dynamically allocate one!\n");
335*4882a593Smuzhiyun return usb_alloc_urb(0, gfp_mask);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
free_urb(struct es2_ap_dev * es2,struct urb * urb)338*4882a593Smuzhiyun static void free_urb(struct es2_ap_dev *es2, struct urb *urb)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun unsigned long flags;
341*4882a593Smuzhiyun int i;
342*4882a593Smuzhiyun /*
343*4882a593Smuzhiyun * See if this was an urb in our pool, if so mark it "free", otherwise
344*4882a593Smuzhiyun * we need to free it ourselves.
345*4882a593Smuzhiyun */
346*4882a593Smuzhiyun spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
347*4882a593Smuzhiyun for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
348*4882a593Smuzhiyun if (urb == es2->cport_out_urb[i]) {
349*4882a593Smuzhiyun es2->cport_out_urb_busy[i] = false;
350*4882a593Smuzhiyun urb = NULL;
351*4882a593Smuzhiyun break;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /* If urb is not NULL, then we need to free this urb */
357*4882a593Smuzhiyun usb_free_urb(urb);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /*
361*4882a593Smuzhiyun * We (ab)use the operation-message header pad bytes to transfer the
362*4882a593Smuzhiyun * cport id in order to minimise overhead.
363*4882a593Smuzhiyun */
364*4882a593Smuzhiyun static void
gb_message_cport_pack(struct gb_operation_msg_hdr * header,u16 cport_id)365*4882a593Smuzhiyun gb_message_cport_pack(struct gb_operation_msg_hdr *header, u16 cport_id)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun header->pad[0] = cport_id;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /* Clear the pad bytes used for the CPort id */
gb_message_cport_clear(struct gb_operation_msg_hdr * header)371*4882a593Smuzhiyun static void gb_message_cport_clear(struct gb_operation_msg_hdr *header)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun header->pad[0] = 0;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /* Extract the CPort id packed into the header, and clear it */
gb_message_cport_unpack(struct gb_operation_msg_hdr * header)377*4882a593Smuzhiyun static u16 gb_message_cport_unpack(struct gb_operation_msg_hdr *header)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun u16 cport_id = header->pad[0];
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun gb_message_cport_clear(header);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun return cport_id;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /*
387*4882a593Smuzhiyun * Returns zero if the message was successfully queued, or a negative errno
388*4882a593Smuzhiyun * otherwise.
389*4882a593Smuzhiyun */
message_send(struct gb_host_device * hd,u16 cport_id,struct gb_message * message,gfp_t gfp_mask)390*4882a593Smuzhiyun static int message_send(struct gb_host_device *hd, u16 cport_id,
391*4882a593Smuzhiyun struct gb_message *message, gfp_t gfp_mask)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun struct es2_ap_dev *es2 = hd_to_es2(hd);
394*4882a593Smuzhiyun struct usb_device *udev = es2->usb_dev;
395*4882a593Smuzhiyun size_t buffer_size;
396*4882a593Smuzhiyun int retval;
397*4882a593Smuzhiyun struct urb *urb;
398*4882a593Smuzhiyun unsigned long flags;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /*
401*4882a593Smuzhiyun * The data actually transferred will include an indication
402*4882a593Smuzhiyun * of where the data should be sent. Do one last check of
403*4882a593Smuzhiyun * the target CPort id before filling it in.
404*4882a593Smuzhiyun */
405*4882a593Smuzhiyun if (!cport_id_valid(hd, cport_id)) {
406*4882a593Smuzhiyun dev_err(&udev->dev, "invalid cport %u\n", cport_id);
407*4882a593Smuzhiyun return -EINVAL;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun /* Find a free urb */
411*4882a593Smuzhiyun urb = next_free_urb(es2, gfp_mask);
412*4882a593Smuzhiyun if (!urb)
413*4882a593Smuzhiyun return -ENOMEM;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
416*4882a593Smuzhiyun message->hcpriv = urb;
417*4882a593Smuzhiyun spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /* Pack the cport id into the message header */
420*4882a593Smuzhiyun gb_message_cport_pack(message->header, cport_id);
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun buffer_size = sizeof(*message->header) + message->payload_size;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun usb_fill_bulk_urb(urb, udev,
425*4882a593Smuzhiyun usb_sndbulkpipe(udev,
426*4882a593Smuzhiyun es2->cport_out_endpoint),
427*4882a593Smuzhiyun message->buffer, buffer_size,
428*4882a593Smuzhiyun cport_out_callback, message);
429*4882a593Smuzhiyun urb->transfer_flags |= URB_ZERO_PACKET;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun trace_gb_message_submit(message);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun retval = usb_submit_urb(urb, gfp_mask);
434*4882a593Smuzhiyun if (retval) {
435*4882a593Smuzhiyun dev_err(&udev->dev, "failed to submit out-urb: %d\n", retval);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
438*4882a593Smuzhiyun message->hcpriv = NULL;
439*4882a593Smuzhiyun spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun free_urb(es2, urb);
442*4882a593Smuzhiyun gb_message_cport_clear(message->header);
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun return retval;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun return 0;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun /*
451*4882a593Smuzhiyun * Can not be called in atomic context.
452*4882a593Smuzhiyun */
message_cancel(struct gb_message * message)453*4882a593Smuzhiyun static void message_cancel(struct gb_message *message)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun struct gb_host_device *hd = message->operation->connection->hd;
456*4882a593Smuzhiyun struct es2_ap_dev *es2 = hd_to_es2(hd);
457*4882a593Smuzhiyun struct urb *urb;
458*4882a593Smuzhiyun int i;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun might_sleep();
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun spin_lock_irq(&es2->cport_out_urb_lock);
463*4882a593Smuzhiyun urb = message->hcpriv;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun /* Prevent dynamically allocated urb from being deallocated. */
466*4882a593Smuzhiyun usb_get_urb(urb);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun /* Prevent pre-allocated urb from being reused. */
469*4882a593Smuzhiyun for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
470*4882a593Smuzhiyun if (urb == es2->cport_out_urb[i]) {
471*4882a593Smuzhiyun es2->cport_out_urb_cancelled[i] = true;
472*4882a593Smuzhiyun break;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun spin_unlock_irq(&es2->cport_out_urb_lock);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun usb_kill_urb(urb);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun if (i < NUM_CPORT_OUT_URB) {
480*4882a593Smuzhiyun spin_lock_irq(&es2->cport_out_urb_lock);
481*4882a593Smuzhiyun es2->cport_out_urb_cancelled[i] = false;
482*4882a593Smuzhiyun spin_unlock_irq(&es2->cport_out_urb_lock);
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun usb_free_urb(urb);
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
es2_cport_allocate(struct gb_host_device * hd,int cport_id,unsigned long flags)488*4882a593Smuzhiyun static int es2_cport_allocate(struct gb_host_device *hd, int cport_id,
489*4882a593Smuzhiyun unsigned long flags)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun struct es2_ap_dev *es2 = hd_to_es2(hd);
492*4882a593Smuzhiyun struct ida *id_map = &hd->cport_id_map;
493*4882a593Smuzhiyun int ida_start, ida_end;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun switch (cport_id) {
496*4882a593Smuzhiyun case ES2_CPORT_CDSI0:
497*4882a593Smuzhiyun case ES2_CPORT_CDSI1:
498*4882a593Smuzhiyun dev_err(&hd->dev, "cport %d not available\n", cport_id);
499*4882a593Smuzhiyun return -EBUSY;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun if (flags & GB_CONNECTION_FLAG_OFFLOADED &&
503*4882a593Smuzhiyun flags & GB_CONNECTION_FLAG_CDSI1) {
504*4882a593Smuzhiyun if (es2->cdsi1_in_use) {
505*4882a593Smuzhiyun dev_err(&hd->dev, "CDSI1 already in use\n");
506*4882a593Smuzhiyun return -EBUSY;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun es2->cdsi1_in_use = true;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun return ES2_CPORT_CDSI1;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun if (cport_id < 0) {
515*4882a593Smuzhiyun ida_start = 0;
516*4882a593Smuzhiyun ida_end = hd->num_cports;
517*4882a593Smuzhiyun } else if (cport_id < hd->num_cports) {
518*4882a593Smuzhiyun ida_start = cport_id;
519*4882a593Smuzhiyun ida_end = cport_id + 1;
520*4882a593Smuzhiyun } else {
521*4882a593Smuzhiyun dev_err(&hd->dev, "cport %d not available\n", cport_id);
522*4882a593Smuzhiyun return -EINVAL;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
es2_cport_release(struct gb_host_device * hd,u16 cport_id)528*4882a593Smuzhiyun static void es2_cport_release(struct gb_host_device *hd, u16 cport_id)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun struct es2_ap_dev *es2 = hd_to_es2(hd);
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun switch (cport_id) {
533*4882a593Smuzhiyun case ES2_CPORT_CDSI1:
534*4882a593Smuzhiyun es2->cdsi1_in_use = false;
535*4882a593Smuzhiyun return;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun ida_simple_remove(&hd->cport_id_map, cport_id);
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
cport_enable(struct gb_host_device * hd,u16 cport_id,unsigned long flags)541*4882a593Smuzhiyun static int cport_enable(struct gb_host_device *hd, u16 cport_id,
542*4882a593Smuzhiyun unsigned long flags)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun struct es2_ap_dev *es2 = hd_to_es2(hd);
545*4882a593Smuzhiyun struct usb_device *udev = es2->usb_dev;
546*4882a593Smuzhiyun struct gb_apb_request_cport_flags *req;
547*4882a593Smuzhiyun u32 connection_flags;
548*4882a593Smuzhiyun int ret;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun req = kzalloc(sizeof(*req), GFP_KERNEL);
551*4882a593Smuzhiyun if (!req)
552*4882a593Smuzhiyun return -ENOMEM;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun connection_flags = 0;
555*4882a593Smuzhiyun if (flags & GB_CONNECTION_FLAG_CONTROL)
556*4882a593Smuzhiyun connection_flags |= GB_APB_CPORT_FLAG_CONTROL;
557*4882a593Smuzhiyun if (flags & GB_CONNECTION_FLAG_HIGH_PRIO)
558*4882a593Smuzhiyun connection_flags |= GB_APB_CPORT_FLAG_HIGH_PRIO;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun req->flags = cpu_to_le32(connection_flags);
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun dev_dbg(&hd->dev, "%s - cport = %u, flags = %02x\n", __func__,
563*4882a593Smuzhiyun cport_id, connection_flags);
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
566*4882a593Smuzhiyun GB_APB_REQUEST_CPORT_FLAGS,
567*4882a593Smuzhiyun USB_DIR_OUT | USB_TYPE_VENDOR |
568*4882a593Smuzhiyun USB_RECIP_INTERFACE, cport_id, 0,
569*4882a593Smuzhiyun req, sizeof(*req), ES2_USB_CTRL_TIMEOUT);
570*4882a593Smuzhiyun if (ret != sizeof(*req)) {
571*4882a593Smuzhiyun dev_err(&udev->dev, "failed to set cport flags for port %d\n",
572*4882a593Smuzhiyun cport_id);
573*4882a593Smuzhiyun if (ret >= 0)
574*4882a593Smuzhiyun ret = -EIO;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun goto out;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun ret = 0;
580*4882a593Smuzhiyun out:
581*4882a593Smuzhiyun kfree(req);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun return ret;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
es2_cport_connected(struct gb_host_device * hd,u16 cport_id)586*4882a593Smuzhiyun static int es2_cport_connected(struct gb_host_device *hd, u16 cport_id)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun struct es2_ap_dev *es2 = hd_to_es2(hd);
589*4882a593Smuzhiyun struct device *dev = &es2->usb_dev->dev;
590*4882a593Smuzhiyun struct arpc_cport_connected_req req;
591*4882a593Smuzhiyun int ret;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun req.cport_id = cpu_to_le16(cport_id);
594*4882a593Smuzhiyun ret = arpc_sync(es2, ARPC_TYPE_CPORT_CONNECTED, &req, sizeof(req),
595*4882a593Smuzhiyun NULL, ES2_ARPC_CPORT_TIMEOUT);
596*4882a593Smuzhiyun if (ret) {
597*4882a593Smuzhiyun dev_err(dev, "failed to set connected state for cport %u: %d\n",
598*4882a593Smuzhiyun cport_id, ret);
599*4882a593Smuzhiyun return ret;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun return 0;
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun
es2_cport_flush(struct gb_host_device * hd,u16 cport_id)605*4882a593Smuzhiyun static int es2_cport_flush(struct gb_host_device *hd, u16 cport_id)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun struct es2_ap_dev *es2 = hd_to_es2(hd);
608*4882a593Smuzhiyun struct device *dev = &es2->usb_dev->dev;
609*4882a593Smuzhiyun struct arpc_cport_flush_req req;
610*4882a593Smuzhiyun int ret;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun req.cport_id = cpu_to_le16(cport_id);
613*4882a593Smuzhiyun ret = arpc_sync(es2, ARPC_TYPE_CPORT_FLUSH, &req, sizeof(req),
614*4882a593Smuzhiyun NULL, ES2_ARPC_CPORT_TIMEOUT);
615*4882a593Smuzhiyun if (ret) {
616*4882a593Smuzhiyun dev_err(dev, "failed to flush cport %u: %d\n", cport_id, ret);
617*4882a593Smuzhiyun return ret;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun return 0;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
es2_cport_shutdown(struct gb_host_device * hd,u16 cport_id,u8 phase,unsigned int timeout)623*4882a593Smuzhiyun static int es2_cport_shutdown(struct gb_host_device *hd, u16 cport_id,
624*4882a593Smuzhiyun u8 phase, unsigned int timeout)
625*4882a593Smuzhiyun {
626*4882a593Smuzhiyun struct es2_ap_dev *es2 = hd_to_es2(hd);
627*4882a593Smuzhiyun struct device *dev = &es2->usb_dev->dev;
628*4882a593Smuzhiyun struct arpc_cport_shutdown_req req;
629*4882a593Smuzhiyun int result;
630*4882a593Smuzhiyun int ret;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun if (timeout > U16_MAX)
633*4882a593Smuzhiyun return -EINVAL;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun req.cport_id = cpu_to_le16(cport_id);
636*4882a593Smuzhiyun req.timeout = cpu_to_le16(timeout);
637*4882a593Smuzhiyun req.phase = phase;
638*4882a593Smuzhiyun ret = arpc_sync(es2, ARPC_TYPE_CPORT_SHUTDOWN, &req, sizeof(req),
639*4882a593Smuzhiyun &result, ES2_ARPC_CPORT_TIMEOUT + timeout);
640*4882a593Smuzhiyun if (ret) {
641*4882a593Smuzhiyun dev_err(dev, "failed to send shutdown over cport %u: %d (%d)\n",
642*4882a593Smuzhiyun cport_id, ret, result);
643*4882a593Smuzhiyun return ret;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun return 0;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
es2_cport_quiesce(struct gb_host_device * hd,u16 cport_id,size_t peer_space,unsigned int timeout)649*4882a593Smuzhiyun static int es2_cport_quiesce(struct gb_host_device *hd, u16 cport_id,
650*4882a593Smuzhiyun size_t peer_space, unsigned int timeout)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun struct es2_ap_dev *es2 = hd_to_es2(hd);
653*4882a593Smuzhiyun struct device *dev = &es2->usb_dev->dev;
654*4882a593Smuzhiyun struct arpc_cport_quiesce_req req;
655*4882a593Smuzhiyun int result;
656*4882a593Smuzhiyun int ret;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun if (peer_space > U16_MAX)
659*4882a593Smuzhiyun return -EINVAL;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun if (timeout > U16_MAX)
662*4882a593Smuzhiyun return -EINVAL;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun req.cport_id = cpu_to_le16(cport_id);
665*4882a593Smuzhiyun req.peer_space = cpu_to_le16(peer_space);
666*4882a593Smuzhiyun req.timeout = cpu_to_le16(timeout);
667*4882a593Smuzhiyun ret = arpc_sync(es2, ARPC_TYPE_CPORT_QUIESCE, &req, sizeof(req),
668*4882a593Smuzhiyun &result, ES2_ARPC_CPORT_TIMEOUT + timeout);
669*4882a593Smuzhiyun if (ret) {
670*4882a593Smuzhiyun dev_err(dev, "failed to quiesce cport %u: %d (%d)\n",
671*4882a593Smuzhiyun cport_id, ret, result);
672*4882a593Smuzhiyun return ret;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun return 0;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
es2_cport_clear(struct gb_host_device * hd,u16 cport_id)678*4882a593Smuzhiyun static int es2_cport_clear(struct gb_host_device *hd, u16 cport_id)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun struct es2_ap_dev *es2 = hd_to_es2(hd);
681*4882a593Smuzhiyun struct device *dev = &es2->usb_dev->dev;
682*4882a593Smuzhiyun struct arpc_cport_clear_req req;
683*4882a593Smuzhiyun int ret;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun req.cport_id = cpu_to_le16(cport_id);
686*4882a593Smuzhiyun ret = arpc_sync(es2, ARPC_TYPE_CPORT_CLEAR, &req, sizeof(req),
687*4882a593Smuzhiyun NULL, ES2_ARPC_CPORT_TIMEOUT);
688*4882a593Smuzhiyun if (ret) {
689*4882a593Smuzhiyun dev_err(dev, "failed to clear cport %u: %d\n", cport_id, ret);
690*4882a593Smuzhiyun return ret;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun return 0;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
latency_tag_enable(struct gb_host_device * hd,u16 cport_id)696*4882a593Smuzhiyun static int latency_tag_enable(struct gb_host_device *hd, u16 cport_id)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun int retval;
699*4882a593Smuzhiyun struct es2_ap_dev *es2 = hd_to_es2(hd);
700*4882a593Smuzhiyun struct usb_device *udev = es2->usb_dev;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
703*4882a593Smuzhiyun GB_APB_REQUEST_LATENCY_TAG_EN,
704*4882a593Smuzhiyun USB_DIR_OUT | USB_TYPE_VENDOR |
705*4882a593Smuzhiyun USB_RECIP_INTERFACE, cport_id, 0, NULL,
706*4882a593Smuzhiyun 0, ES2_USB_CTRL_TIMEOUT);
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun if (retval < 0)
709*4882a593Smuzhiyun dev_err(&udev->dev, "Cannot enable latency tag for cport %d\n",
710*4882a593Smuzhiyun cport_id);
711*4882a593Smuzhiyun return retval;
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun
latency_tag_disable(struct gb_host_device * hd,u16 cport_id)714*4882a593Smuzhiyun static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id)
715*4882a593Smuzhiyun {
716*4882a593Smuzhiyun int retval;
717*4882a593Smuzhiyun struct es2_ap_dev *es2 = hd_to_es2(hd);
718*4882a593Smuzhiyun struct usb_device *udev = es2->usb_dev;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
721*4882a593Smuzhiyun GB_APB_REQUEST_LATENCY_TAG_DIS,
722*4882a593Smuzhiyun USB_DIR_OUT | USB_TYPE_VENDOR |
723*4882a593Smuzhiyun USB_RECIP_INTERFACE, cport_id, 0, NULL,
724*4882a593Smuzhiyun 0, ES2_USB_CTRL_TIMEOUT);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun if (retval < 0)
727*4882a593Smuzhiyun dev_err(&udev->dev, "Cannot disable latency tag for cport %d\n",
728*4882a593Smuzhiyun cport_id);
729*4882a593Smuzhiyun return retval;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun static struct gb_hd_driver es2_driver = {
733*4882a593Smuzhiyun .hd_priv_size = sizeof(struct es2_ap_dev),
734*4882a593Smuzhiyun .message_send = message_send,
735*4882a593Smuzhiyun .message_cancel = message_cancel,
736*4882a593Smuzhiyun .cport_allocate = es2_cport_allocate,
737*4882a593Smuzhiyun .cport_release = es2_cport_release,
738*4882a593Smuzhiyun .cport_enable = cport_enable,
739*4882a593Smuzhiyun .cport_connected = es2_cport_connected,
740*4882a593Smuzhiyun .cport_flush = es2_cport_flush,
741*4882a593Smuzhiyun .cport_shutdown = es2_cport_shutdown,
742*4882a593Smuzhiyun .cport_quiesce = es2_cport_quiesce,
743*4882a593Smuzhiyun .cport_clear = es2_cport_clear,
744*4882a593Smuzhiyun .latency_tag_enable = latency_tag_enable,
745*4882a593Smuzhiyun .latency_tag_disable = latency_tag_disable,
746*4882a593Smuzhiyun .output = output,
747*4882a593Smuzhiyun };
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun /* Common function to report consistent warnings based on URB status */
check_urb_status(struct urb * urb)750*4882a593Smuzhiyun static int check_urb_status(struct urb *urb)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun struct device *dev = &urb->dev->dev;
753*4882a593Smuzhiyun int status = urb->status;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun switch (status) {
756*4882a593Smuzhiyun case 0:
757*4882a593Smuzhiyun return 0;
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun case -EOVERFLOW:
760*4882a593Smuzhiyun dev_err(dev, "%s: overflow actual length is %d\n",
761*4882a593Smuzhiyun __func__, urb->actual_length);
762*4882a593Smuzhiyun fallthrough;
763*4882a593Smuzhiyun case -ECONNRESET:
764*4882a593Smuzhiyun case -ENOENT:
765*4882a593Smuzhiyun case -ESHUTDOWN:
766*4882a593Smuzhiyun case -EILSEQ:
767*4882a593Smuzhiyun case -EPROTO:
768*4882a593Smuzhiyun /* device is gone, stop sending */
769*4882a593Smuzhiyun return status;
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun dev_err(dev, "%s: unknown status %d\n", __func__, status);
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun return -EAGAIN;
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun
es2_destroy(struct es2_ap_dev * es2)776*4882a593Smuzhiyun static void es2_destroy(struct es2_ap_dev *es2)
777*4882a593Smuzhiyun {
778*4882a593Smuzhiyun struct usb_device *udev;
779*4882a593Smuzhiyun struct urb *urb;
780*4882a593Smuzhiyun int i;
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun debugfs_remove(es2->apb_log_enable_dentry);
783*4882a593Smuzhiyun usb_log_disable(es2);
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun /* Tear down everything! */
786*4882a593Smuzhiyun for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
787*4882a593Smuzhiyun urb = es2->cport_out_urb[i];
788*4882a593Smuzhiyun usb_kill_urb(urb);
789*4882a593Smuzhiyun usb_free_urb(urb);
790*4882a593Smuzhiyun es2->cport_out_urb[i] = NULL;
791*4882a593Smuzhiyun es2->cport_out_urb_busy[i] = false; /* just to be anal */
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
795*4882a593Smuzhiyun usb_free_urb(es2->arpc_urb[i]);
796*4882a593Smuzhiyun kfree(es2->arpc_buffer[i]);
797*4882a593Smuzhiyun es2->arpc_buffer[i] = NULL;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
801*4882a593Smuzhiyun usb_free_urb(es2->cport_in.urb[i]);
802*4882a593Smuzhiyun kfree(es2->cport_in.buffer[i]);
803*4882a593Smuzhiyun es2->cport_in.buffer[i] = NULL;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun /* release reserved CDSI0 and CDSI1 cports */
807*4882a593Smuzhiyun gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI1);
808*4882a593Smuzhiyun gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI0);
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun udev = es2->usb_dev;
811*4882a593Smuzhiyun gb_hd_put(es2->hd);
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun usb_put_dev(udev);
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
cport_in_callback(struct urb * urb)816*4882a593Smuzhiyun static void cport_in_callback(struct urb *urb)
817*4882a593Smuzhiyun {
818*4882a593Smuzhiyun struct gb_host_device *hd = urb->context;
819*4882a593Smuzhiyun struct device *dev = &urb->dev->dev;
820*4882a593Smuzhiyun struct gb_operation_msg_hdr *header;
821*4882a593Smuzhiyun int status = check_urb_status(urb);
822*4882a593Smuzhiyun int retval;
823*4882a593Smuzhiyun u16 cport_id;
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun if (status) {
826*4882a593Smuzhiyun if ((status == -EAGAIN) || (status == -EPROTO))
827*4882a593Smuzhiyun goto exit;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun /* The urb is being unlinked */
830*4882a593Smuzhiyun if (status == -ENOENT || status == -ESHUTDOWN)
831*4882a593Smuzhiyun return;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun dev_err(dev, "urb cport in error %d (dropped)\n", status);
834*4882a593Smuzhiyun return;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun if (urb->actual_length < sizeof(*header)) {
838*4882a593Smuzhiyun dev_err(dev, "short message received\n");
839*4882a593Smuzhiyun goto exit;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun /* Extract the CPort id, which is packed in the message header */
843*4882a593Smuzhiyun header = urb->transfer_buffer;
844*4882a593Smuzhiyun cport_id = gb_message_cport_unpack(header);
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun if (cport_id_valid(hd, cport_id)) {
847*4882a593Smuzhiyun greybus_data_rcvd(hd, cport_id, urb->transfer_buffer,
848*4882a593Smuzhiyun urb->actual_length);
849*4882a593Smuzhiyun } else {
850*4882a593Smuzhiyun dev_err(dev, "invalid cport id %u received\n", cport_id);
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun exit:
853*4882a593Smuzhiyun /* put our urb back in the request pool */
854*4882a593Smuzhiyun retval = usb_submit_urb(urb, GFP_ATOMIC);
855*4882a593Smuzhiyun if (retval)
856*4882a593Smuzhiyun dev_err(dev, "failed to resubmit in-urb: %d\n", retval);
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun
cport_out_callback(struct urb * urb)859*4882a593Smuzhiyun static void cport_out_callback(struct urb *urb)
860*4882a593Smuzhiyun {
861*4882a593Smuzhiyun struct gb_message *message = urb->context;
862*4882a593Smuzhiyun struct gb_host_device *hd = message->operation->connection->hd;
863*4882a593Smuzhiyun struct es2_ap_dev *es2 = hd_to_es2(hd);
864*4882a593Smuzhiyun int status = check_urb_status(urb);
865*4882a593Smuzhiyun unsigned long flags;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun gb_message_cport_clear(message->header);
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
870*4882a593Smuzhiyun message->hcpriv = NULL;
871*4882a593Smuzhiyun spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun /*
874*4882a593Smuzhiyun * Tell the submitter that the message send (attempt) is
875*4882a593Smuzhiyun * complete, and report the status.
876*4882a593Smuzhiyun */
877*4882a593Smuzhiyun greybus_message_sent(hd, message, status);
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun free_urb(es2, urb);
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun
arpc_alloc(void * payload,u16 size,u8 type)882*4882a593Smuzhiyun static struct arpc *arpc_alloc(void *payload, u16 size, u8 type)
883*4882a593Smuzhiyun {
884*4882a593Smuzhiyun struct arpc *rpc;
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun if (size + sizeof(*rpc->req) > ARPC_OUT_SIZE_MAX)
887*4882a593Smuzhiyun return NULL;
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
890*4882a593Smuzhiyun if (!rpc)
891*4882a593Smuzhiyun return NULL;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun INIT_LIST_HEAD(&rpc->list);
894*4882a593Smuzhiyun rpc->req = kzalloc(sizeof(*rpc->req) + size, GFP_KERNEL);
895*4882a593Smuzhiyun if (!rpc->req)
896*4882a593Smuzhiyun goto err_free_rpc;
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun rpc->resp = kzalloc(sizeof(*rpc->resp), GFP_KERNEL);
899*4882a593Smuzhiyun if (!rpc->resp)
900*4882a593Smuzhiyun goto err_free_req;
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun rpc->req->type = type;
903*4882a593Smuzhiyun rpc->req->size = cpu_to_le16(sizeof(*rpc->req) + size);
904*4882a593Smuzhiyun memcpy(rpc->req->data, payload, size);
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun init_completion(&rpc->response_received);
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun return rpc;
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun err_free_req:
911*4882a593Smuzhiyun kfree(rpc->req);
912*4882a593Smuzhiyun err_free_rpc:
913*4882a593Smuzhiyun kfree(rpc);
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun return NULL;
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun
arpc_free(struct arpc * rpc)918*4882a593Smuzhiyun static void arpc_free(struct arpc *rpc)
919*4882a593Smuzhiyun {
920*4882a593Smuzhiyun kfree(rpc->req);
921*4882a593Smuzhiyun kfree(rpc->resp);
922*4882a593Smuzhiyun kfree(rpc);
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun
arpc_find(struct es2_ap_dev * es2,__le16 id)925*4882a593Smuzhiyun static struct arpc *arpc_find(struct es2_ap_dev *es2, __le16 id)
926*4882a593Smuzhiyun {
927*4882a593Smuzhiyun struct arpc *rpc;
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun list_for_each_entry(rpc, &es2->arpcs, list) {
930*4882a593Smuzhiyun if (rpc->req->id == id)
931*4882a593Smuzhiyun return rpc;
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun return NULL;
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun
arpc_add(struct es2_ap_dev * es2,struct arpc * rpc)937*4882a593Smuzhiyun static void arpc_add(struct es2_ap_dev *es2, struct arpc *rpc)
938*4882a593Smuzhiyun {
939*4882a593Smuzhiyun rpc->active = true;
940*4882a593Smuzhiyun rpc->req->id = cpu_to_le16(es2->arpc_id_cycle++);
941*4882a593Smuzhiyun list_add_tail(&rpc->list, &es2->arpcs);
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun
arpc_del(struct es2_ap_dev * es2,struct arpc * rpc)944*4882a593Smuzhiyun static void arpc_del(struct es2_ap_dev *es2, struct arpc *rpc)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun if (rpc->active) {
947*4882a593Smuzhiyun rpc->active = false;
948*4882a593Smuzhiyun list_del(&rpc->list);
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun
arpc_send(struct es2_ap_dev * es2,struct arpc * rpc,int timeout)952*4882a593Smuzhiyun static int arpc_send(struct es2_ap_dev *es2, struct arpc *rpc, int timeout)
953*4882a593Smuzhiyun {
954*4882a593Smuzhiyun struct usb_device *udev = es2->usb_dev;
955*4882a593Smuzhiyun int retval;
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
958*4882a593Smuzhiyun GB_APB_REQUEST_ARPC_RUN,
959*4882a593Smuzhiyun USB_DIR_OUT | USB_TYPE_VENDOR |
960*4882a593Smuzhiyun USB_RECIP_INTERFACE,
961*4882a593Smuzhiyun 0, 0,
962*4882a593Smuzhiyun rpc->req, le16_to_cpu(rpc->req->size),
963*4882a593Smuzhiyun ES2_USB_CTRL_TIMEOUT);
964*4882a593Smuzhiyun if (retval != le16_to_cpu(rpc->req->size)) {
965*4882a593Smuzhiyun dev_err(&udev->dev,
966*4882a593Smuzhiyun "failed to send ARPC request %d: %d\n",
967*4882a593Smuzhiyun rpc->req->type, retval);
968*4882a593Smuzhiyun if (retval > 0)
969*4882a593Smuzhiyun retval = -EIO;
970*4882a593Smuzhiyun return retval;
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun return 0;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun
arpc_sync(struct es2_ap_dev * es2,u8 type,void * payload,size_t size,int * result,unsigned int timeout)976*4882a593Smuzhiyun static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload,
977*4882a593Smuzhiyun size_t size, int *result, unsigned int timeout)
978*4882a593Smuzhiyun {
979*4882a593Smuzhiyun struct arpc *rpc;
980*4882a593Smuzhiyun unsigned long flags;
981*4882a593Smuzhiyun int retval;
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun if (result)
984*4882a593Smuzhiyun *result = 0;
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun rpc = arpc_alloc(payload, size, type);
987*4882a593Smuzhiyun if (!rpc)
988*4882a593Smuzhiyun return -ENOMEM;
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun spin_lock_irqsave(&es2->arpc_lock, flags);
991*4882a593Smuzhiyun arpc_add(es2, rpc);
992*4882a593Smuzhiyun spin_unlock_irqrestore(&es2->arpc_lock, flags);
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun retval = arpc_send(es2, rpc, timeout);
995*4882a593Smuzhiyun if (retval)
996*4882a593Smuzhiyun goto out_arpc_del;
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun retval = wait_for_completion_interruptible_timeout(
999*4882a593Smuzhiyun &rpc->response_received,
1000*4882a593Smuzhiyun msecs_to_jiffies(timeout));
1001*4882a593Smuzhiyun if (retval <= 0) {
1002*4882a593Smuzhiyun if (!retval)
1003*4882a593Smuzhiyun retval = -ETIMEDOUT;
1004*4882a593Smuzhiyun goto out_arpc_del;
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun if (rpc->resp->result) {
1008*4882a593Smuzhiyun retval = -EREMOTEIO;
1009*4882a593Smuzhiyun if (result)
1010*4882a593Smuzhiyun *result = rpc->resp->result;
1011*4882a593Smuzhiyun } else {
1012*4882a593Smuzhiyun retval = 0;
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun out_arpc_del:
1016*4882a593Smuzhiyun spin_lock_irqsave(&es2->arpc_lock, flags);
1017*4882a593Smuzhiyun arpc_del(es2, rpc);
1018*4882a593Smuzhiyun spin_unlock_irqrestore(&es2->arpc_lock, flags);
1019*4882a593Smuzhiyun arpc_free(rpc);
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun if (retval < 0 && retval != -EREMOTEIO) {
1022*4882a593Smuzhiyun dev_err(&es2->usb_dev->dev,
1023*4882a593Smuzhiyun "failed to execute ARPC: %d\n", retval);
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun return retval;
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun
arpc_in_callback(struct urb * urb)1029*4882a593Smuzhiyun static void arpc_in_callback(struct urb *urb)
1030*4882a593Smuzhiyun {
1031*4882a593Smuzhiyun struct es2_ap_dev *es2 = urb->context;
1032*4882a593Smuzhiyun struct device *dev = &urb->dev->dev;
1033*4882a593Smuzhiyun int status = check_urb_status(urb);
1034*4882a593Smuzhiyun struct arpc *rpc;
1035*4882a593Smuzhiyun struct arpc_response_message *resp;
1036*4882a593Smuzhiyun unsigned long flags;
1037*4882a593Smuzhiyun int retval;
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun if (status) {
1040*4882a593Smuzhiyun if ((status == -EAGAIN) || (status == -EPROTO))
1041*4882a593Smuzhiyun goto exit;
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun /* The urb is being unlinked */
1044*4882a593Smuzhiyun if (status == -ENOENT || status == -ESHUTDOWN)
1045*4882a593Smuzhiyun return;
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun dev_err(dev, "arpc in-urb error %d (dropped)\n", status);
1048*4882a593Smuzhiyun return;
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun if (urb->actual_length < sizeof(*resp)) {
1052*4882a593Smuzhiyun dev_err(dev, "short aprc response received\n");
1053*4882a593Smuzhiyun goto exit;
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun resp = urb->transfer_buffer;
1057*4882a593Smuzhiyun spin_lock_irqsave(&es2->arpc_lock, flags);
1058*4882a593Smuzhiyun rpc = arpc_find(es2, resp->id);
1059*4882a593Smuzhiyun if (!rpc) {
1060*4882a593Smuzhiyun dev_err(dev, "invalid arpc response id received: %u\n",
1061*4882a593Smuzhiyun le16_to_cpu(resp->id));
1062*4882a593Smuzhiyun spin_unlock_irqrestore(&es2->arpc_lock, flags);
1063*4882a593Smuzhiyun goto exit;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun arpc_del(es2, rpc);
1067*4882a593Smuzhiyun memcpy(rpc->resp, resp, sizeof(*resp));
1068*4882a593Smuzhiyun complete(&rpc->response_received);
1069*4882a593Smuzhiyun spin_unlock_irqrestore(&es2->arpc_lock, flags);
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun exit:
1072*4882a593Smuzhiyun /* put our urb back in the request pool */
1073*4882a593Smuzhiyun retval = usb_submit_urb(urb, GFP_ATOMIC);
1074*4882a593Smuzhiyun if (retval)
1075*4882a593Smuzhiyun dev_err(dev, "failed to resubmit arpc in-urb: %d\n", retval);
1076*4882a593Smuzhiyun }
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun #define APB1_LOG_MSG_SIZE 64
apb_log_get(struct es2_ap_dev * es2,char * buf)1079*4882a593Smuzhiyun static void apb_log_get(struct es2_ap_dev *es2, char *buf)
1080*4882a593Smuzhiyun {
1081*4882a593Smuzhiyun int retval;
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun do {
1084*4882a593Smuzhiyun retval = usb_control_msg(es2->usb_dev,
1085*4882a593Smuzhiyun usb_rcvctrlpipe(es2->usb_dev, 0),
1086*4882a593Smuzhiyun GB_APB_REQUEST_LOG,
1087*4882a593Smuzhiyun USB_DIR_IN | USB_TYPE_VENDOR |
1088*4882a593Smuzhiyun USB_RECIP_INTERFACE,
1089*4882a593Smuzhiyun 0x00, 0x00,
1090*4882a593Smuzhiyun buf,
1091*4882a593Smuzhiyun APB1_LOG_MSG_SIZE,
1092*4882a593Smuzhiyun ES2_USB_CTRL_TIMEOUT);
1093*4882a593Smuzhiyun if (retval > 0)
1094*4882a593Smuzhiyun kfifo_in(&es2->apb_log_fifo, buf, retval);
1095*4882a593Smuzhiyun } while (retval > 0);
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun
apb_log_poll(void * data)1098*4882a593Smuzhiyun static int apb_log_poll(void *data)
1099*4882a593Smuzhiyun {
1100*4882a593Smuzhiyun struct es2_ap_dev *es2 = data;
1101*4882a593Smuzhiyun char *buf;
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun buf = kmalloc(APB1_LOG_MSG_SIZE, GFP_KERNEL);
1104*4882a593Smuzhiyun if (!buf)
1105*4882a593Smuzhiyun return -ENOMEM;
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun while (!kthread_should_stop()) {
1108*4882a593Smuzhiyun msleep(1000);
1109*4882a593Smuzhiyun apb_log_get(es2, buf);
1110*4882a593Smuzhiyun }
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun kfree(buf);
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun return 0;
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun
apb_log_read(struct file * f,char __user * buf,size_t count,loff_t * ppos)1117*4882a593Smuzhiyun static ssize_t apb_log_read(struct file *f, char __user *buf,
1118*4882a593Smuzhiyun size_t count, loff_t *ppos)
1119*4882a593Smuzhiyun {
1120*4882a593Smuzhiyun struct es2_ap_dev *es2 = file_inode(f)->i_private;
1121*4882a593Smuzhiyun ssize_t ret;
1122*4882a593Smuzhiyun size_t copied;
1123*4882a593Smuzhiyun char *tmp_buf;
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun if (count > APB1_LOG_SIZE)
1126*4882a593Smuzhiyun count = APB1_LOG_SIZE;
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun tmp_buf = kmalloc(count, GFP_KERNEL);
1129*4882a593Smuzhiyun if (!tmp_buf)
1130*4882a593Smuzhiyun return -ENOMEM;
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun copied = kfifo_out(&es2->apb_log_fifo, tmp_buf, count);
1133*4882a593Smuzhiyun ret = simple_read_from_buffer(buf, count, ppos, tmp_buf, copied);
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun kfree(tmp_buf);
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun return ret;
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun static const struct file_operations apb_log_fops = {
1141*4882a593Smuzhiyun .read = apb_log_read,
1142*4882a593Smuzhiyun };
1143*4882a593Smuzhiyun
usb_log_enable(struct es2_ap_dev * es2)1144*4882a593Smuzhiyun static void usb_log_enable(struct es2_ap_dev *es2)
1145*4882a593Smuzhiyun {
1146*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(es2->apb_log_task))
1147*4882a593Smuzhiyun return;
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun /* get log from APB1 */
1150*4882a593Smuzhiyun es2->apb_log_task = kthread_run(apb_log_poll, es2, "apb_log");
1151*4882a593Smuzhiyun if (IS_ERR(es2->apb_log_task))
1152*4882a593Smuzhiyun return;
1153*4882a593Smuzhiyun /* XXX We will need to rename this per APB */
1154*4882a593Smuzhiyun es2->apb_log_dentry = debugfs_create_file("apb_log", 0444,
1155*4882a593Smuzhiyun gb_debugfs_get(), es2,
1156*4882a593Smuzhiyun &apb_log_fops);
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun
usb_log_disable(struct es2_ap_dev * es2)1159*4882a593Smuzhiyun static void usb_log_disable(struct es2_ap_dev *es2)
1160*4882a593Smuzhiyun {
1161*4882a593Smuzhiyun if (IS_ERR_OR_NULL(es2->apb_log_task))
1162*4882a593Smuzhiyun return;
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun debugfs_remove(es2->apb_log_dentry);
1165*4882a593Smuzhiyun es2->apb_log_dentry = NULL;
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun kthread_stop(es2->apb_log_task);
1168*4882a593Smuzhiyun es2->apb_log_task = NULL;
1169*4882a593Smuzhiyun }
1170*4882a593Smuzhiyun
apb_log_enable_read(struct file * f,char __user * buf,size_t count,loff_t * ppos)1171*4882a593Smuzhiyun static ssize_t apb_log_enable_read(struct file *f, char __user *buf,
1172*4882a593Smuzhiyun size_t count, loff_t *ppos)
1173*4882a593Smuzhiyun {
1174*4882a593Smuzhiyun struct es2_ap_dev *es2 = file_inode(f)->i_private;
1175*4882a593Smuzhiyun int enable = !IS_ERR_OR_NULL(es2->apb_log_task);
1176*4882a593Smuzhiyun char tmp_buf[3];
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun sprintf(tmp_buf, "%d\n", enable);
1179*4882a593Smuzhiyun return simple_read_from_buffer(buf, count, ppos, tmp_buf, 3);
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun
apb_log_enable_write(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1182*4882a593Smuzhiyun static ssize_t apb_log_enable_write(struct file *f, const char __user *buf,
1183*4882a593Smuzhiyun size_t count, loff_t *ppos)
1184*4882a593Smuzhiyun {
1185*4882a593Smuzhiyun int enable;
1186*4882a593Smuzhiyun ssize_t retval;
1187*4882a593Smuzhiyun struct es2_ap_dev *es2 = file_inode(f)->i_private;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun retval = kstrtoint_from_user(buf, count, 10, &enable);
1190*4882a593Smuzhiyun if (retval)
1191*4882a593Smuzhiyun return retval;
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun if (enable)
1194*4882a593Smuzhiyun usb_log_enable(es2);
1195*4882a593Smuzhiyun else
1196*4882a593Smuzhiyun usb_log_disable(es2);
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun return count;
1199*4882a593Smuzhiyun }
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun static const struct file_operations apb_log_enable_fops = {
1202*4882a593Smuzhiyun .read = apb_log_enable_read,
1203*4882a593Smuzhiyun .write = apb_log_enable_write,
1204*4882a593Smuzhiyun };
1205*4882a593Smuzhiyun
apb_get_cport_count(struct usb_device * udev)1206*4882a593Smuzhiyun static int apb_get_cport_count(struct usb_device *udev)
1207*4882a593Smuzhiyun {
1208*4882a593Smuzhiyun int retval;
1209*4882a593Smuzhiyun __le16 *cport_count;
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun cport_count = kzalloc(sizeof(*cport_count), GFP_KERNEL);
1212*4882a593Smuzhiyun if (!cport_count)
1213*4882a593Smuzhiyun return -ENOMEM;
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1216*4882a593Smuzhiyun GB_APB_REQUEST_CPORT_COUNT,
1217*4882a593Smuzhiyun USB_DIR_IN | USB_TYPE_VENDOR |
1218*4882a593Smuzhiyun USB_RECIP_INTERFACE, 0, 0, cport_count,
1219*4882a593Smuzhiyun sizeof(*cport_count), ES2_USB_CTRL_TIMEOUT);
1220*4882a593Smuzhiyun if (retval != sizeof(*cport_count)) {
1221*4882a593Smuzhiyun dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
1222*4882a593Smuzhiyun retval);
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun if (retval >= 0)
1225*4882a593Smuzhiyun retval = -EIO;
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun goto out;
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun retval = le16_to_cpu(*cport_count);
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun /* We need to fit a CPort ID in one byte of a message header */
1233*4882a593Smuzhiyun if (retval > U8_MAX) {
1234*4882a593Smuzhiyun retval = U8_MAX;
1235*4882a593Smuzhiyun dev_warn(&udev->dev, "Limiting number of CPorts to U8_MAX\n");
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun out:
1239*4882a593Smuzhiyun kfree(cport_count);
1240*4882a593Smuzhiyun return retval;
1241*4882a593Smuzhiyun }
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun /*
1244*4882a593Smuzhiyun * The ES2 USB Bridge device has 15 endpoints
1245*4882a593Smuzhiyun * 1 Control - usual USB stuff + AP -> APBridgeA messages
1246*4882a593Smuzhiyun * 7 Bulk IN - CPort data in
1247*4882a593Smuzhiyun * 7 Bulk OUT - CPort data out
1248*4882a593Smuzhiyun */
ap_probe(struct usb_interface * interface,const struct usb_device_id * id)1249*4882a593Smuzhiyun static int ap_probe(struct usb_interface *interface,
1250*4882a593Smuzhiyun const struct usb_device_id *id)
1251*4882a593Smuzhiyun {
1252*4882a593Smuzhiyun struct es2_ap_dev *es2;
1253*4882a593Smuzhiyun struct gb_host_device *hd;
1254*4882a593Smuzhiyun struct usb_device *udev;
1255*4882a593Smuzhiyun struct usb_host_interface *iface_desc;
1256*4882a593Smuzhiyun struct usb_endpoint_descriptor *endpoint;
1257*4882a593Smuzhiyun __u8 ep_addr;
1258*4882a593Smuzhiyun int retval;
1259*4882a593Smuzhiyun int i;
1260*4882a593Smuzhiyun int num_cports;
1261*4882a593Smuzhiyun bool bulk_out_found = false;
1262*4882a593Smuzhiyun bool bulk_in_found = false;
1263*4882a593Smuzhiyun bool arpc_in_found = false;
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun udev = usb_get_dev(interface_to_usbdev(interface));
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun num_cports = apb_get_cport_count(udev);
1268*4882a593Smuzhiyun if (num_cports < 0) {
1269*4882a593Smuzhiyun usb_put_dev(udev);
1270*4882a593Smuzhiyun dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
1271*4882a593Smuzhiyun num_cports);
1272*4882a593Smuzhiyun return num_cports;
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun hd = gb_hd_create(&es2_driver, &udev->dev, ES2_GBUF_MSG_SIZE_MAX,
1276*4882a593Smuzhiyun num_cports);
1277*4882a593Smuzhiyun if (IS_ERR(hd)) {
1278*4882a593Smuzhiyun usb_put_dev(udev);
1279*4882a593Smuzhiyun return PTR_ERR(hd);
1280*4882a593Smuzhiyun }
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun es2 = hd_to_es2(hd);
1283*4882a593Smuzhiyun es2->hd = hd;
1284*4882a593Smuzhiyun es2->usb_intf = interface;
1285*4882a593Smuzhiyun es2->usb_dev = udev;
1286*4882a593Smuzhiyun spin_lock_init(&es2->cport_out_urb_lock);
1287*4882a593Smuzhiyun INIT_KFIFO(es2->apb_log_fifo);
1288*4882a593Smuzhiyun usb_set_intfdata(interface, es2);
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun /*
1291*4882a593Smuzhiyun * Reserve the CDSI0 and CDSI1 CPorts so they won't be allocated
1292*4882a593Smuzhiyun * dynamically.
1293*4882a593Smuzhiyun */
1294*4882a593Smuzhiyun retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI0);
1295*4882a593Smuzhiyun if (retval)
1296*4882a593Smuzhiyun goto error;
1297*4882a593Smuzhiyun retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI1);
1298*4882a593Smuzhiyun if (retval)
1299*4882a593Smuzhiyun goto error;
1300*4882a593Smuzhiyun
1301*4882a593Smuzhiyun /* find all bulk endpoints */
1302*4882a593Smuzhiyun iface_desc = interface->cur_altsetting;
1303*4882a593Smuzhiyun for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
1304*4882a593Smuzhiyun endpoint = &iface_desc->endpoint[i].desc;
1305*4882a593Smuzhiyun ep_addr = endpoint->bEndpointAddress;
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun if (usb_endpoint_is_bulk_in(endpoint)) {
1308*4882a593Smuzhiyun if (!bulk_in_found) {
1309*4882a593Smuzhiyun es2->cport_in.endpoint = ep_addr;
1310*4882a593Smuzhiyun bulk_in_found = true;
1311*4882a593Smuzhiyun } else if (!arpc_in_found) {
1312*4882a593Smuzhiyun es2->arpc_endpoint_in = ep_addr;
1313*4882a593Smuzhiyun arpc_in_found = true;
1314*4882a593Smuzhiyun } else {
1315*4882a593Smuzhiyun dev_warn(&udev->dev,
1316*4882a593Smuzhiyun "Unused bulk IN endpoint found: 0x%02x\n",
1317*4882a593Smuzhiyun ep_addr);
1318*4882a593Smuzhiyun }
1319*4882a593Smuzhiyun continue;
1320*4882a593Smuzhiyun }
1321*4882a593Smuzhiyun if (usb_endpoint_is_bulk_out(endpoint)) {
1322*4882a593Smuzhiyun if (!bulk_out_found) {
1323*4882a593Smuzhiyun es2->cport_out_endpoint = ep_addr;
1324*4882a593Smuzhiyun bulk_out_found = true;
1325*4882a593Smuzhiyun } else {
1326*4882a593Smuzhiyun dev_warn(&udev->dev,
1327*4882a593Smuzhiyun "Unused bulk OUT endpoint found: 0x%02x\n",
1328*4882a593Smuzhiyun ep_addr);
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun continue;
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun dev_warn(&udev->dev,
1333*4882a593Smuzhiyun "Unknown endpoint type found, address 0x%02x\n",
1334*4882a593Smuzhiyun ep_addr);
1335*4882a593Smuzhiyun }
1336*4882a593Smuzhiyun if (!bulk_in_found || !arpc_in_found || !bulk_out_found) {
1337*4882a593Smuzhiyun dev_err(&udev->dev, "Not enough endpoints found in device, aborting!\n");
1338*4882a593Smuzhiyun retval = -ENODEV;
1339*4882a593Smuzhiyun goto error;
1340*4882a593Smuzhiyun }
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun /* Allocate buffers for our cport in messages */
1343*4882a593Smuzhiyun for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
1344*4882a593Smuzhiyun struct urb *urb;
1345*4882a593Smuzhiyun u8 *buffer;
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun urb = usb_alloc_urb(0, GFP_KERNEL);
1348*4882a593Smuzhiyun if (!urb) {
1349*4882a593Smuzhiyun retval = -ENOMEM;
1350*4882a593Smuzhiyun goto error;
1351*4882a593Smuzhiyun }
1352*4882a593Smuzhiyun es2->cport_in.urb[i] = urb;
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun buffer = kmalloc(ES2_GBUF_MSG_SIZE_MAX, GFP_KERNEL);
1355*4882a593Smuzhiyun if (!buffer) {
1356*4882a593Smuzhiyun retval = -ENOMEM;
1357*4882a593Smuzhiyun goto error;
1358*4882a593Smuzhiyun }
1359*4882a593Smuzhiyun
1360*4882a593Smuzhiyun usb_fill_bulk_urb(urb, udev,
1361*4882a593Smuzhiyun usb_rcvbulkpipe(udev, es2->cport_in.endpoint),
1362*4882a593Smuzhiyun buffer, ES2_GBUF_MSG_SIZE_MAX,
1363*4882a593Smuzhiyun cport_in_callback, hd);
1364*4882a593Smuzhiyun
1365*4882a593Smuzhiyun es2->cport_in.buffer[i] = buffer;
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun
1368*4882a593Smuzhiyun /* Allocate buffers for ARPC in messages */
1369*4882a593Smuzhiyun for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
1370*4882a593Smuzhiyun struct urb *urb;
1371*4882a593Smuzhiyun u8 *buffer;
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun urb = usb_alloc_urb(0, GFP_KERNEL);
1374*4882a593Smuzhiyun if (!urb) {
1375*4882a593Smuzhiyun retval = -ENOMEM;
1376*4882a593Smuzhiyun goto error;
1377*4882a593Smuzhiyun }
1378*4882a593Smuzhiyun es2->arpc_urb[i] = urb;
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun buffer = kmalloc(ARPC_IN_SIZE_MAX, GFP_KERNEL);
1381*4882a593Smuzhiyun if (!buffer) {
1382*4882a593Smuzhiyun retval = -ENOMEM;
1383*4882a593Smuzhiyun goto error;
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun
1386*4882a593Smuzhiyun usb_fill_bulk_urb(urb, udev,
1387*4882a593Smuzhiyun usb_rcvbulkpipe(udev,
1388*4882a593Smuzhiyun es2->arpc_endpoint_in),
1389*4882a593Smuzhiyun buffer, ARPC_IN_SIZE_MAX,
1390*4882a593Smuzhiyun arpc_in_callback, es2);
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun es2->arpc_buffer[i] = buffer;
1393*4882a593Smuzhiyun }
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun /* Allocate urbs for our CPort OUT messages */
1396*4882a593Smuzhiyun for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
1397*4882a593Smuzhiyun struct urb *urb;
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun urb = usb_alloc_urb(0, GFP_KERNEL);
1400*4882a593Smuzhiyun if (!urb) {
1401*4882a593Smuzhiyun retval = -ENOMEM;
1402*4882a593Smuzhiyun goto error;
1403*4882a593Smuzhiyun }
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun es2->cport_out_urb[i] = urb;
1406*4882a593Smuzhiyun es2->cport_out_urb_busy[i] = false; /* just to be anal */
1407*4882a593Smuzhiyun }
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun /* XXX We will need to rename this per APB */
1410*4882a593Smuzhiyun es2->apb_log_enable_dentry = debugfs_create_file("apb_log_enable",
1411*4882a593Smuzhiyun 0644,
1412*4882a593Smuzhiyun gb_debugfs_get(), es2,
1413*4882a593Smuzhiyun &apb_log_enable_fops);
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun INIT_LIST_HEAD(&es2->arpcs);
1416*4882a593Smuzhiyun spin_lock_init(&es2->arpc_lock);
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun retval = es2_arpc_in_enable(es2);
1419*4882a593Smuzhiyun if (retval)
1420*4882a593Smuzhiyun goto error;
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun retval = gb_hd_add(hd);
1423*4882a593Smuzhiyun if (retval)
1424*4882a593Smuzhiyun goto err_disable_arpc_in;
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun retval = es2_cport_in_enable(es2, &es2->cport_in);
1427*4882a593Smuzhiyun if (retval)
1428*4882a593Smuzhiyun goto err_hd_del;
1429*4882a593Smuzhiyun
1430*4882a593Smuzhiyun return 0;
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyun err_hd_del:
1433*4882a593Smuzhiyun gb_hd_del(hd);
1434*4882a593Smuzhiyun err_disable_arpc_in:
1435*4882a593Smuzhiyun es2_arpc_in_disable(es2);
1436*4882a593Smuzhiyun error:
1437*4882a593Smuzhiyun es2_destroy(es2);
1438*4882a593Smuzhiyun
1439*4882a593Smuzhiyun return retval;
1440*4882a593Smuzhiyun }
1441*4882a593Smuzhiyun
ap_disconnect(struct usb_interface * interface)1442*4882a593Smuzhiyun static void ap_disconnect(struct usb_interface *interface)
1443*4882a593Smuzhiyun {
1444*4882a593Smuzhiyun struct es2_ap_dev *es2 = usb_get_intfdata(interface);
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun gb_hd_del(es2->hd);
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun es2_cport_in_disable(es2, &es2->cport_in);
1449*4882a593Smuzhiyun es2_arpc_in_disable(es2);
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun es2_destroy(es2);
1452*4882a593Smuzhiyun }
1453*4882a593Smuzhiyun
1454*4882a593Smuzhiyun static struct usb_driver es2_ap_driver = {
1455*4882a593Smuzhiyun .name = "es2_ap_driver",
1456*4882a593Smuzhiyun .probe = ap_probe,
1457*4882a593Smuzhiyun .disconnect = ap_disconnect,
1458*4882a593Smuzhiyun .id_table = id_table,
1459*4882a593Smuzhiyun .soft_unbind = 1,
1460*4882a593Smuzhiyun };
1461*4882a593Smuzhiyun
1462*4882a593Smuzhiyun module_usb_driver(es2_ap_driver);
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1465*4882a593Smuzhiyun MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");
1466