1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * 2013 (c) Aeroflex Gaisler AB
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * This driver supports GRUSBDC USB Device Controller cores available in the
8*4882a593Smuzhiyun * GRLIB VHDL IP core library.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Full documentation of the GRUSBDC core can be found here:
11*4882a593Smuzhiyun * https://www.gaisler.com/products/grlib/grip.pdf
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Contributors:
14*4882a593Smuzhiyun * - Andreas Larsson <andreas@gaisler.com>
15*4882a593Smuzhiyun * - Marko Isomaki
16*4882a593Smuzhiyun */
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun * A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each
20*4882a593Smuzhiyun * individually configurable to any of the four USB transfer types. This driver
21*4882a593Smuzhiyun * only supports cores in DMA mode.
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <linux/kernel.h>
25*4882a593Smuzhiyun #include <linux/module.h>
26*4882a593Smuzhiyun #include <linux/slab.h>
27*4882a593Smuzhiyun #include <linux/spinlock.h>
28*4882a593Smuzhiyun #include <linux/errno.h>
29*4882a593Smuzhiyun #include <linux/list.h>
30*4882a593Smuzhiyun #include <linux/interrupt.h>
31*4882a593Smuzhiyun #include <linux/device.h>
32*4882a593Smuzhiyun #include <linux/usb.h>
33*4882a593Smuzhiyun #include <linux/usb/ch9.h>
34*4882a593Smuzhiyun #include <linux/usb/gadget.h>
35*4882a593Smuzhiyun #include <linux/dma-mapping.h>
36*4882a593Smuzhiyun #include <linux/dmapool.h>
37*4882a593Smuzhiyun #include <linux/debugfs.h>
38*4882a593Smuzhiyun #include <linux/seq_file.h>
39*4882a593Smuzhiyun #include <linux/of_platform.h>
40*4882a593Smuzhiyun #include <linux/of_irq.h>
41*4882a593Smuzhiyun #include <linux/of_address.h>
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #include <asm/byteorder.h>
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #include "gr_udc.h"
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #define DRIVER_NAME "gr_udc"
48*4882a593Smuzhiyun #define DRIVER_DESC "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun static const char driver_name[] = DRIVER_NAME;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #define gr_read32(x) (ioread32be((x)))
53*4882a593Smuzhiyun #define gr_write32(x, v) (iowrite32be((v), (x)))
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /* USB speed and corresponding string calculated from status register value */
56*4882a593Smuzhiyun #define GR_SPEED(status) \
57*4882a593Smuzhiyun ((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH)
58*4882a593Smuzhiyun #define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status))
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /* Size of hardware buffer calculated from epctrl register value */
61*4882a593Smuzhiyun #define GR_BUFFER_SIZE(epctrl) \
62*4882a593Smuzhiyun ((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \
63*4882a593Smuzhiyun GR_EPCTRL_BUFSZ_SCALER)
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /* ---------------------------------------------------------------------- */
66*4882a593Smuzhiyun /* Debug printout functionality */
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun static const char * const gr_modestring[] = {"control", "iso", "bulk", "int"};
69*4882a593Smuzhiyun
gr_ep0state_string(enum gr_ep0state state)70*4882a593Smuzhiyun static const char *gr_ep0state_string(enum gr_ep0state state)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun static const char *const names[] = {
73*4882a593Smuzhiyun [GR_EP0_DISCONNECT] = "disconnect",
74*4882a593Smuzhiyun [GR_EP0_SETUP] = "setup",
75*4882a593Smuzhiyun [GR_EP0_IDATA] = "idata",
76*4882a593Smuzhiyun [GR_EP0_ODATA] = "odata",
77*4882a593Smuzhiyun [GR_EP0_ISTATUS] = "istatus",
78*4882a593Smuzhiyun [GR_EP0_OSTATUS] = "ostatus",
79*4882a593Smuzhiyun [GR_EP0_STALL] = "stall",
80*4882a593Smuzhiyun [GR_EP0_SUSPEND] = "suspend",
81*4882a593Smuzhiyun };
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun if (state < 0 || state >= ARRAY_SIZE(names))
84*4882a593Smuzhiyun return "UNKNOWN";
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun return names[state];
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #ifdef VERBOSE_DEBUG
90*4882a593Smuzhiyun
gr_dbgprint_request(const char * str,struct gr_ep * ep,struct gr_request * req)91*4882a593Smuzhiyun static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
92*4882a593Smuzhiyun struct gr_request *req)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun int buflen = ep->is_in ? req->req.length : req->req.actual;
95*4882a593Smuzhiyun int rowlen = 32;
96*4882a593Smuzhiyun int plen = min(rowlen, buflen);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
99*4882a593Smuzhiyun (buflen > plen ? " (truncated)" : ""));
100*4882a593Smuzhiyun print_hex_dump_debug(" ", DUMP_PREFIX_NONE,
101*4882a593Smuzhiyun rowlen, 4, req->req.buf, plen, false);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
gr_dbgprint_devreq(struct gr_udc * dev,u8 type,u8 request,u16 value,u16 index,u16 length)104*4882a593Smuzhiyun static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
105*4882a593Smuzhiyun u16 value, u16 index, u16 length)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun dev_vdbg(dev->dev, "REQ: %02x.%02x v%04x i%04x l%04x\n",
108*4882a593Smuzhiyun type, request, value, index, length);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun #else /* !VERBOSE_DEBUG */
111*4882a593Smuzhiyun
gr_dbgprint_request(const char * str,struct gr_ep * ep,struct gr_request * req)112*4882a593Smuzhiyun static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
113*4882a593Smuzhiyun struct gr_request *req) {}
114*4882a593Smuzhiyun
gr_dbgprint_devreq(struct gr_udc * dev,u8 type,u8 request,u16 value,u16 index,u16 length)115*4882a593Smuzhiyun static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
116*4882a593Smuzhiyun u16 value, u16 index, u16 length) {}
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun #endif /* VERBOSE_DEBUG */
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /* ---------------------------------------------------------------------- */
121*4882a593Smuzhiyun /* Debugfs functionality */
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun #ifdef CONFIG_USB_GADGET_DEBUG_FS
124*4882a593Smuzhiyun
gr_seq_ep_show(struct seq_file * seq,struct gr_ep * ep)125*4882a593Smuzhiyun static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun u32 epctrl = gr_read32(&ep->regs->epctrl);
128*4882a593Smuzhiyun u32 epstat = gr_read32(&ep->regs->epstat);
129*4882a593Smuzhiyun int mode = (epctrl & GR_EPCTRL_TT_MASK) >> GR_EPCTRL_TT_POS;
130*4882a593Smuzhiyun struct gr_request *req;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun seq_printf(seq, "%s:\n", ep->ep.name);
133*4882a593Smuzhiyun seq_printf(seq, " mode = %s\n", gr_modestring[mode]);
134*4882a593Smuzhiyun seq_printf(seq, " halted: %d\n", !!(epctrl & GR_EPCTRL_EH));
135*4882a593Smuzhiyun seq_printf(seq, " disabled: %d\n", !!(epctrl & GR_EPCTRL_ED));
136*4882a593Smuzhiyun seq_printf(seq, " valid: %d\n", !!(epctrl & GR_EPCTRL_EV));
137*4882a593Smuzhiyun seq_printf(seq, " dma_start = %d\n", ep->dma_start);
138*4882a593Smuzhiyun seq_printf(seq, " stopped = %d\n", ep->stopped);
139*4882a593Smuzhiyun seq_printf(seq, " wedged = %d\n", ep->wedged);
140*4882a593Smuzhiyun seq_printf(seq, " callback = %d\n", ep->callback);
141*4882a593Smuzhiyun seq_printf(seq, " maxpacket = %d\n", ep->ep.maxpacket);
142*4882a593Smuzhiyun seq_printf(seq, " maxpacket_limit = %d\n", ep->ep.maxpacket_limit);
143*4882a593Smuzhiyun seq_printf(seq, " bytes_per_buffer = %d\n", ep->bytes_per_buffer);
144*4882a593Smuzhiyun if (mode == 1 || mode == 3)
145*4882a593Smuzhiyun seq_printf(seq, " nt = %d\n",
146*4882a593Smuzhiyun (epctrl & GR_EPCTRL_NT_MASK) >> GR_EPCTRL_NT_POS);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun seq_printf(seq, " Buffer 0: %s %s%d\n",
149*4882a593Smuzhiyun epstat & GR_EPSTAT_B0 ? "valid" : "invalid",
150*4882a593Smuzhiyun epstat & GR_EPSTAT_BS ? " " : "selected ",
151*4882a593Smuzhiyun (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS);
152*4882a593Smuzhiyun seq_printf(seq, " Buffer 1: %s %s%d\n",
153*4882a593Smuzhiyun epstat & GR_EPSTAT_B1 ? "valid" : "invalid",
154*4882a593Smuzhiyun epstat & GR_EPSTAT_BS ? "selected " : " ",
155*4882a593Smuzhiyun (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun if (list_empty(&ep->queue)) {
158*4882a593Smuzhiyun seq_puts(seq, " Queue: empty\n\n");
159*4882a593Smuzhiyun return;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun seq_puts(seq, " Queue:\n");
163*4882a593Smuzhiyun list_for_each_entry(req, &ep->queue, queue) {
164*4882a593Smuzhiyun struct gr_dma_desc *desc;
165*4882a593Smuzhiyun struct gr_dma_desc *next;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun seq_printf(seq, " 0x%p: 0x%p %d %d\n", req,
168*4882a593Smuzhiyun &req->req.buf, req->req.actual, req->req.length);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun next = req->first_desc;
171*4882a593Smuzhiyun do {
172*4882a593Smuzhiyun desc = next;
173*4882a593Smuzhiyun next = desc->next_desc;
174*4882a593Smuzhiyun seq_printf(seq, " %c 0x%p (0x%08x): 0x%05x 0x%08x\n",
175*4882a593Smuzhiyun desc == req->curr_desc ? 'c' : ' ',
176*4882a593Smuzhiyun desc, desc->paddr, desc->ctrl, desc->data);
177*4882a593Smuzhiyun } while (desc != req->last_desc);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun seq_puts(seq, "\n");
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
gr_dfs_show(struct seq_file * seq,void * v)182*4882a593Smuzhiyun static int gr_dfs_show(struct seq_file *seq, void *v)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun struct gr_udc *dev = seq->private;
185*4882a593Smuzhiyun u32 control = gr_read32(&dev->regs->control);
186*4882a593Smuzhiyun u32 status = gr_read32(&dev->regs->status);
187*4882a593Smuzhiyun struct gr_ep *ep;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun seq_printf(seq, "usb state = %s\n",
190*4882a593Smuzhiyun usb_state_string(dev->gadget.state));
191*4882a593Smuzhiyun seq_printf(seq, "address = %d\n",
192*4882a593Smuzhiyun (control & GR_CONTROL_UA_MASK) >> GR_CONTROL_UA_POS);
193*4882a593Smuzhiyun seq_printf(seq, "speed = %s\n", GR_SPEED_STR(status));
194*4882a593Smuzhiyun seq_printf(seq, "ep0state = %s\n", gr_ep0state_string(dev->ep0state));
195*4882a593Smuzhiyun seq_printf(seq, "irq_enabled = %d\n", dev->irq_enabled);
196*4882a593Smuzhiyun seq_printf(seq, "remote_wakeup = %d\n", dev->remote_wakeup);
197*4882a593Smuzhiyun seq_printf(seq, "test_mode = %d\n", dev->test_mode);
198*4882a593Smuzhiyun seq_puts(seq, "\n");
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun list_for_each_entry(ep, &dev->ep_list, ep_list)
201*4882a593Smuzhiyun gr_seq_ep_show(seq, ep);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun return 0;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(gr_dfs);
206*4882a593Smuzhiyun
gr_dfs_create(struct gr_udc * dev)207*4882a593Smuzhiyun static void gr_dfs_create(struct gr_udc *dev)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun const char *name = "gr_udc_state";
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), usb_debug_root);
212*4882a593Smuzhiyun debugfs_create_file(name, 0444, dev->dfs_root, dev, &gr_dfs_fops);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
gr_dfs_delete(struct gr_udc * dev)215*4882a593Smuzhiyun static void gr_dfs_delete(struct gr_udc *dev)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun debugfs_remove_recursive(dev->dfs_root);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun #else /* !CONFIG_USB_GADGET_DEBUG_FS */
221*4882a593Smuzhiyun
gr_dfs_create(struct gr_udc * dev)222*4882a593Smuzhiyun static void gr_dfs_create(struct gr_udc *dev) {}
gr_dfs_delete(struct gr_udc * dev)223*4882a593Smuzhiyun static void gr_dfs_delete(struct gr_udc *dev) {}
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun #endif /* CONFIG_USB_GADGET_DEBUG_FS */
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /* ---------------------------------------------------------------------- */
228*4882a593Smuzhiyun /* DMA and request handling */
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */
gr_alloc_dma_desc(struct gr_ep * ep,gfp_t gfp_flags)231*4882a593Smuzhiyun static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun dma_addr_t paddr;
234*4882a593Smuzhiyun struct gr_dma_desc *dma_desc;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun dma_desc = dma_pool_zalloc(ep->dev->desc_pool, gfp_flags, &paddr);
237*4882a593Smuzhiyun if (!dma_desc) {
238*4882a593Smuzhiyun dev_err(ep->dev->dev, "Could not allocate from DMA pool\n");
239*4882a593Smuzhiyun return NULL;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun dma_desc->paddr = paddr;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun return dma_desc;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
gr_free_dma_desc(struct gr_udc * dev,struct gr_dma_desc * desc)247*4882a593Smuzhiyun static inline void gr_free_dma_desc(struct gr_udc *dev,
248*4882a593Smuzhiyun struct gr_dma_desc *desc)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun dma_pool_free(dev->desc_pool, desc, (dma_addr_t)desc->paddr);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /* Frees the chain of struct gr_dma_desc for the given request */
gr_free_dma_desc_chain(struct gr_udc * dev,struct gr_request * req)254*4882a593Smuzhiyun static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun struct gr_dma_desc *desc;
257*4882a593Smuzhiyun struct gr_dma_desc *next;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun next = req->first_desc;
260*4882a593Smuzhiyun if (!next)
261*4882a593Smuzhiyun return;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun do {
264*4882a593Smuzhiyun desc = next;
265*4882a593Smuzhiyun next = desc->next_desc;
266*4882a593Smuzhiyun gr_free_dma_desc(dev, desc);
267*4882a593Smuzhiyun } while (desc != req->last_desc);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun req->first_desc = NULL;
270*4882a593Smuzhiyun req->curr_desc = NULL;
271*4882a593Smuzhiyun req->last_desc = NULL;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req);
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /*
277*4882a593Smuzhiyun * Frees allocated resources and calls the appropriate completion function/setup
278*4882a593Smuzhiyun * package handler for a finished request.
279*4882a593Smuzhiyun *
280*4882a593Smuzhiyun * Must be called with dev->lock held and irqs disabled.
281*4882a593Smuzhiyun */
gr_finish_request(struct gr_ep * ep,struct gr_request * req,int status)282*4882a593Smuzhiyun static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
283*4882a593Smuzhiyun int status)
284*4882a593Smuzhiyun __releases(&dev->lock)
285*4882a593Smuzhiyun __acquires(&dev->lock)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun struct gr_udc *dev;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun list_del_init(&req->queue);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun if (likely(req->req.status == -EINPROGRESS))
292*4882a593Smuzhiyun req->req.status = status;
293*4882a593Smuzhiyun else
294*4882a593Smuzhiyun status = req->req.status;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun dev = ep->dev;
297*4882a593Smuzhiyun usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
298*4882a593Smuzhiyun gr_free_dma_desc_chain(dev, req);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun if (ep->is_in) { /* For OUT, req->req.actual gets updated bit by bit */
301*4882a593Smuzhiyun req->req.actual = req->req.length;
302*4882a593Smuzhiyun } else if (req->oddlen && req->req.actual > req->evenlen) {
303*4882a593Smuzhiyun /*
304*4882a593Smuzhiyun * Copy to user buffer in this case where length was not evenly
305*4882a593Smuzhiyun * divisible by ep->ep.maxpacket and the last descriptor was
306*4882a593Smuzhiyun * actually used.
307*4882a593Smuzhiyun */
308*4882a593Smuzhiyun char *buftail = ((char *)req->req.buf + req->evenlen);
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun memcpy(buftail, ep->tailbuf, req->oddlen);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun if (req->req.actual > req->req.length) {
313*4882a593Smuzhiyun /* We got more data than was requested */
314*4882a593Smuzhiyun dev_dbg(ep->dev->dev, "Overflow for ep %s\n",
315*4882a593Smuzhiyun ep->ep.name);
316*4882a593Smuzhiyun gr_dbgprint_request("OVFL", ep, req);
317*4882a593Smuzhiyun req->req.status = -EOVERFLOW;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun if (!status) {
322*4882a593Smuzhiyun if (ep->is_in)
323*4882a593Smuzhiyun gr_dbgprint_request("SENT", ep, req);
324*4882a593Smuzhiyun else
325*4882a593Smuzhiyun gr_dbgprint_request("RECV", ep, req);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /* Prevent changes to ep->queue during callback */
329*4882a593Smuzhiyun ep->callback = 1;
330*4882a593Smuzhiyun if (req == dev->ep0reqo && !status) {
331*4882a593Smuzhiyun if (req->setup)
332*4882a593Smuzhiyun gr_ep0_setup(dev, req);
333*4882a593Smuzhiyun else
334*4882a593Smuzhiyun dev_err(dev->dev,
335*4882a593Smuzhiyun "Unexpected non setup packet on ep0in\n");
336*4882a593Smuzhiyun } else if (req->req.complete) {
337*4882a593Smuzhiyun spin_unlock(&dev->lock);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun usb_gadget_giveback_request(&ep->ep, &req->req);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun spin_lock(&dev->lock);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun ep->callback = 0;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
gr_alloc_request(struct usb_ep * _ep,gfp_t gfp_flags)346*4882a593Smuzhiyun static struct usb_request *gr_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun struct gr_request *req;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun req = kzalloc(sizeof(*req), gfp_flags);
351*4882a593Smuzhiyun if (!req)
352*4882a593Smuzhiyun return NULL;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun INIT_LIST_HEAD(&req->queue);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun return &req->req;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /*
360*4882a593Smuzhiyun * Starts DMA for endpoint ep if there are requests in the queue.
361*4882a593Smuzhiyun *
362*4882a593Smuzhiyun * Must be called with dev->lock held and with !ep->stopped.
363*4882a593Smuzhiyun */
gr_start_dma(struct gr_ep * ep)364*4882a593Smuzhiyun static void gr_start_dma(struct gr_ep *ep)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun struct gr_request *req;
367*4882a593Smuzhiyun u32 dmactrl;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun if (list_empty(&ep->queue)) {
370*4882a593Smuzhiyun ep->dma_start = 0;
371*4882a593Smuzhiyun return;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun req = list_first_entry(&ep->queue, struct gr_request, queue);
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /* A descriptor should already have been allocated */
377*4882a593Smuzhiyun BUG_ON(!req->curr_desc);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun /*
380*4882a593Smuzhiyun * The DMA controller can not handle smaller OUT buffers than
381*4882a593Smuzhiyun * ep->ep.maxpacket. It could lead to buffer overruns if an unexpectedly
382*4882a593Smuzhiyun * long packet are received. Therefore an internal bounce buffer gets
383*4882a593Smuzhiyun * used when such a request gets enabled.
384*4882a593Smuzhiyun */
385*4882a593Smuzhiyun if (!ep->is_in && req->oddlen)
386*4882a593Smuzhiyun req->last_desc->data = ep->tailbuf_paddr;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun wmb(); /* Make sure all is settled before handing it over to DMA */
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun /* Set the descriptor pointer in the hardware */
391*4882a593Smuzhiyun gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun /* Announce available descriptors */
394*4882a593Smuzhiyun dmactrl = gr_read32(&ep->regs->dmactrl);
395*4882a593Smuzhiyun gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun ep->dma_start = 1;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /*
401*4882a593Smuzhiyun * Finishes the first request in the ep's queue and, if available, starts the
402*4882a593Smuzhiyun * next request in queue.
403*4882a593Smuzhiyun *
404*4882a593Smuzhiyun * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
405*4882a593Smuzhiyun */
gr_dma_advance(struct gr_ep * ep,int status)406*4882a593Smuzhiyun static void gr_dma_advance(struct gr_ep *ep, int status)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun struct gr_request *req;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun req = list_first_entry(&ep->queue, struct gr_request, queue);
411*4882a593Smuzhiyun gr_finish_request(ep, req, status);
412*4882a593Smuzhiyun gr_start_dma(ep); /* Regardless of ep->dma_start */
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /*
416*4882a593Smuzhiyun * Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA
417*4882a593Smuzhiyun * transfer to be canceled and clears GR_DMACTRL_DA.
418*4882a593Smuzhiyun *
419*4882a593Smuzhiyun * Must be called with dev->lock held.
420*4882a593Smuzhiyun */
gr_abort_dma(struct gr_ep * ep)421*4882a593Smuzhiyun static void gr_abort_dma(struct gr_ep *ep)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun u32 dmactrl;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun dmactrl = gr_read32(&ep->regs->dmactrl);
426*4882a593Smuzhiyun gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun /*
430*4882a593Smuzhiyun * Allocates and sets up a struct gr_dma_desc and putting it on the descriptor
431*4882a593Smuzhiyun * chain.
432*4882a593Smuzhiyun *
433*4882a593Smuzhiyun * Size is not used for OUT endpoints. Hardware can not be instructed to handle
434*4882a593Smuzhiyun * smaller buffer than MAXPL in the OUT direction.
435*4882a593Smuzhiyun */
gr_add_dma_desc(struct gr_ep * ep,struct gr_request * req,dma_addr_t data,unsigned size,gfp_t gfp_flags)436*4882a593Smuzhiyun static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
437*4882a593Smuzhiyun dma_addr_t data, unsigned size, gfp_t gfp_flags)
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun struct gr_dma_desc *desc;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun desc = gr_alloc_dma_desc(ep, gfp_flags);
442*4882a593Smuzhiyun if (!desc)
443*4882a593Smuzhiyun return -ENOMEM;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun desc->data = data;
446*4882a593Smuzhiyun if (ep->is_in)
447*4882a593Smuzhiyun desc->ctrl =
448*4882a593Smuzhiyun (GR_DESC_IN_CTRL_LEN_MASK & size) | GR_DESC_IN_CTRL_EN;
449*4882a593Smuzhiyun else
450*4882a593Smuzhiyun desc->ctrl = GR_DESC_OUT_CTRL_IE;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun if (!req->first_desc) {
453*4882a593Smuzhiyun req->first_desc = desc;
454*4882a593Smuzhiyun req->curr_desc = desc;
455*4882a593Smuzhiyun } else {
456*4882a593Smuzhiyun req->last_desc->next_desc = desc;
457*4882a593Smuzhiyun req->last_desc->next = desc->paddr;
458*4882a593Smuzhiyun req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun req->last_desc = desc;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun return 0;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun /*
466*4882a593Smuzhiyun * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
467*4882a593Smuzhiyun * together covers req->req.length bytes of the buffer at DMA address
468*4882a593Smuzhiyun * req->req.dma for the OUT direction.
469*4882a593Smuzhiyun *
470*4882a593Smuzhiyun * The first descriptor in the chain is enabled, the rest disabled. The
471*4882a593Smuzhiyun * interrupt handler will later enable them one by one when needed so we can
472*4882a593Smuzhiyun * find out when the transfer is finished. For OUT endpoints, all descriptors
473*4882a593Smuzhiyun * therefore generate interrutps.
474*4882a593Smuzhiyun */
gr_setup_out_desc_list(struct gr_ep * ep,struct gr_request * req,gfp_t gfp_flags)475*4882a593Smuzhiyun static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
476*4882a593Smuzhiyun gfp_t gfp_flags)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun u16 bytes_left; /* Bytes left to provide descriptors for */
479*4882a593Smuzhiyun u16 bytes_used; /* Bytes accommodated for */
480*4882a593Smuzhiyun int ret = 0;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun req->first_desc = NULL; /* Signals that no allocation is done yet */
483*4882a593Smuzhiyun bytes_left = req->req.length;
484*4882a593Smuzhiyun bytes_used = 0;
485*4882a593Smuzhiyun while (bytes_left > 0) {
486*4882a593Smuzhiyun dma_addr_t start = req->req.dma + bytes_used;
487*4882a593Smuzhiyun u16 size = min(bytes_left, ep->bytes_per_buffer);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun if (size < ep->bytes_per_buffer) {
490*4882a593Smuzhiyun /* Prepare using bounce buffer */
491*4882a593Smuzhiyun req->evenlen = req->req.length - bytes_left;
492*4882a593Smuzhiyun req->oddlen = size;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
496*4882a593Smuzhiyun if (ret)
497*4882a593Smuzhiyun goto alloc_err;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun bytes_left -= size;
500*4882a593Smuzhiyun bytes_used += size;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun return 0;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun alloc_err:
508*4882a593Smuzhiyun gr_free_dma_desc_chain(ep->dev, req);
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun return ret;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun /*
514*4882a593Smuzhiyun * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
515*4882a593Smuzhiyun * together covers req->req.length bytes of the buffer at DMA address
516*4882a593Smuzhiyun * req->req.dma for the IN direction.
517*4882a593Smuzhiyun *
518*4882a593Smuzhiyun * When more data is provided than the maximum payload size, the hardware splits
519*4882a593Smuzhiyun * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
520*4882a593Smuzhiyun * is always set to a multiple of the maximum payload (restricted to the valid
521*4882a593Smuzhiyun * number of maximum payloads during high bandwidth isochronous or interrupt
522*4882a593Smuzhiyun * transfers)
523*4882a593Smuzhiyun *
524*4882a593Smuzhiyun * All descriptors are enabled from the beginning and we only generate an
525*4882a593Smuzhiyun * interrupt for the last one indicating that the entire request has been pushed
526*4882a593Smuzhiyun * to hardware.
527*4882a593Smuzhiyun */
gr_setup_in_desc_list(struct gr_ep * ep,struct gr_request * req,gfp_t gfp_flags)528*4882a593Smuzhiyun static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
529*4882a593Smuzhiyun gfp_t gfp_flags)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun u16 bytes_left; /* Bytes left in req to provide descriptors for */
532*4882a593Smuzhiyun u16 bytes_used; /* Bytes in req accommodated for */
533*4882a593Smuzhiyun int ret = 0;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun req->first_desc = NULL; /* Signals that no allocation is done yet */
536*4882a593Smuzhiyun bytes_left = req->req.length;
537*4882a593Smuzhiyun bytes_used = 0;
538*4882a593Smuzhiyun do { /* Allow for zero length packets */
539*4882a593Smuzhiyun dma_addr_t start = req->req.dma + bytes_used;
540*4882a593Smuzhiyun u16 size = min(bytes_left, ep->bytes_per_buffer);
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
543*4882a593Smuzhiyun if (ret)
544*4882a593Smuzhiyun goto alloc_err;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun bytes_left -= size;
547*4882a593Smuzhiyun bytes_used += size;
548*4882a593Smuzhiyun } while (bytes_left > 0);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /*
551*4882a593Smuzhiyun * Send an extra zero length packet to indicate that no more data is
552*4882a593Smuzhiyun * available when req->req.zero is set and the data length is even
553*4882a593Smuzhiyun * multiples of ep->ep.maxpacket.
554*4882a593Smuzhiyun */
555*4882a593Smuzhiyun if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
556*4882a593Smuzhiyun ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
557*4882a593Smuzhiyun if (ret)
558*4882a593Smuzhiyun goto alloc_err;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun /*
562*4882a593Smuzhiyun * For IN packets we only want to know when the last packet has been
563*4882a593Smuzhiyun * transmitted (not just put into internal buffers).
564*4882a593Smuzhiyun */
565*4882a593Smuzhiyun req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun return 0;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun alloc_err:
570*4882a593Smuzhiyun gr_free_dma_desc_chain(ep->dev, req);
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun return ret;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun /* Must be called with dev->lock held */
gr_queue(struct gr_ep * ep,struct gr_request * req,gfp_t gfp_flags)576*4882a593Smuzhiyun static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun struct gr_udc *dev = ep->dev;
579*4882a593Smuzhiyun int ret;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun if (unlikely(!ep->ep.desc && ep->num != 0)) {
582*4882a593Smuzhiyun dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name);
583*4882a593Smuzhiyun return -EINVAL;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun if (unlikely(!req->req.buf || !list_empty(&req->queue))) {
587*4882a593Smuzhiyun dev_err(dev->dev,
588*4882a593Smuzhiyun "Invalid request for %s: buf=%p list_empty=%d\n",
589*4882a593Smuzhiyun ep->ep.name, req->req.buf, list_empty(&req->queue));
590*4882a593Smuzhiyun return -EINVAL;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
594*4882a593Smuzhiyun dev_err(dev->dev, "-ESHUTDOWN");
595*4882a593Smuzhiyun return -ESHUTDOWN;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun /* Can't touch registers when suspended */
599*4882a593Smuzhiyun if (dev->ep0state == GR_EP0_SUSPEND) {
600*4882a593Smuzhiyun dev_err(dev->dev, "-EBUSY");
601*4882a593Smuzhiyun return -EBUSY;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun /* Set up DMA mapping in case the caller didn't */
605*4882a593Smuzhiyun ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
606*4882a593Smuzhiyun if (ret) {
607*4882a593Smuzhiyun dev_err(dev->dev, "usb_gadget_map_request");
608*4882a593Smuzhiyun return ret;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun if (ep->is_in)
612*4882a593Smuzhiyun ret = gr_setup_in_desc_list(ep, req, gfp_flags);
613*4882a593Smuzhiyun else
614*4882a593Smuzhiyun ret = gr_setup_out_desc_list(ep, req, gfp_flags);
615*4882a593Smuzhiyun if (ret)
616*4882a593Smuzhiyun return ret;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun req->req.status = -EINPROGRESS;
619*4882a593Smuzhiyun req->req.actual = 0;
620*4882a593Smuzhiyun list_add_tail(&req->queue, &ep->queue);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun /* Start DMA if not started, otherwise interrupt handler handles it */
623*4882a593Smuzhiyun if (!ep->dma_start && likely(!ep->stopped))
624*4882a593Smuzhiyun gr_start_dma(ep);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun return 0;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun /*
630*4882a593Smuzhiyun * Queue a request from within the driver.
631*4882a593Smuzhiyun *
632*4882a593Smuzhiyun * Must be called with dev->lock held.
633*4882a593Smuzhiyun */
gr_queue_int(struct gr_ep * ep,struct gr_request * req,gfp_t gfp_flags)634*4882a593Smuzhiyun static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
635*4882a593Smuzhiyun gfp_t gfp_flags)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun if (ep->is_in)
638*4882a593Smuzhiyun gr_dbgprint_request("RESP", ep, req);
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun return gr_queue(ep, req, gfp_flags);
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun /* ---------------------------------------------------------------------- */
644*4882a593Smuzhiyun /* General helper functions */
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun /*
647*4882a593Smuzhiyun * Dequeue ALL requests.
648*4882a593Smuzhiyun *
649*4882a593Smuzhiyun * Must be called with dev->lock held and irqs disabled.
650*4882a593Smuzhiyun */
gr_ep_nuke(struct gr_ep * ep)651*4882a593Smuzhiyun static void gr_ep_nuke(struct gr_ep *ep)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun struct gr_request *req;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun ep->stopped = 1;
656*4882a593Smuzhiyun ep->dma_start = 0;
657*4882a593Smuzhiyun gr_abort_dma(ep);
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun while (!list_empty(&ep->queue)) {
660*4882a593Smuzhiyun req = list_first_entry(&ep->queue, struct gr_request, queue);
661*4882a593Smuzhiyun gr_finish_request(ep, req, -ESHUTDOWN);
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun /*
666*4882a593Smuzhiyun * Reset the hardware state of this endpoint.
667*4882a593Smuzhiyun *
668*4882a593Smuzhiyun * Must be called with dev->lock held.
669*4882a593Smuzhiyun */
gr_ep_reset(struct gr_ep * ep)670*4882a593Smuzhiyun static void gr_ep_reset(struct gr_ep *ep)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun gr_write32(&ep->regs->epctrl, 0);
673*4882a593Smuzhiyun gr_write32(&ep->regs->dmactrl, 0);
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun ep->ep.maxpacket = MAX_CTRL_PL_SIZE;
676*4882a593Smuzhiyun ep->ep.desc = NULL;
677*4882a593Smuzhiyun ep->stopped = 1;
678*4882a593Smuzhiyun ep->dma_start = 0;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun /*
682*4882a593Smuzhiyun * Generate STALL on ep0in/out.
683*4882a593Smuzhiyun *
684*4882a593Smuzhiyun * Must be called with dev->lock held.
685*4882a593Smuzhiyun */
gr_control_stall(struct gr_udc * dev)686*4882a593Smuzhiyun static void gr_control_stall(struct gr_udc *dev)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun u32 epctrl;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun epctrl = gr_read32(&dev->epo[0].regs->epctrl);
691*4882a593Smuzhiyun gr_write32(&dev->epo[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
692*4882a593Smuzhiyun epctrl = gr_read32(&dev->epi[0].regs->epctrl);
693*4882a593Smuzhiyun gr_write32(&dev->epi[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun dev->ep0state = GR_EP0_STALL;
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun /*
699*4882a593Smuzhiyun * Halts, halts and wedges, or clears halt for an endpoint.
700*4882a593Smuzhiyun *
701*4882a593Smuzhiyun * Must be called with dev->lock held.
702*4882a593Smuzhiyun */
gr_ep_halt_wedge(struct gr_ep * ep,int halt,int wedge,int fromhost)703*4882a593Smuzhiyun static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun u32 epctrl;
706*4882a593Smuzhiyun int retval = 0;
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun if (ep->num && !ep->ep.desc)
709*4882a593Smuzhiyun return -EINVAL;
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)
712*4882a593Smuzhiyun return -EOPNOTSUPP;
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun /* Never actually halt ep0, and therefore never clear halt for ep0 */
715*4882a593Smuzhiyun if (!ep->num) {
716*4882a593Smuzhiyun if (halt && !fromhost) {
717*4882a593Smuzhiyun /* ep0 halt from gadget - generate protocol stall */
718*4882a593Smuzhiyun gr_control_stall(ep->dev);
719*4882a593Smuzhiyun dev_dbg(ep->dev->dev, "EP: stall ep0\n");
720*4882a593Smuzhiyun return 0;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun return -EINVAL;
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun dev_dbg(ep->dev->dev, "EP: %s halt %s\n",
726*4882a593Smuzhiyun (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name);
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun epctrl = gr_read32(&ep->regs->epctrl);
729*4882a593Smuzhiyun if (halt) {
730*4882a593Smuzhiyun /* Set HALT */
731*4882a593Smuzhiyun gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH);
732*4882a593Smuzhiyun ep->stopped = 1;
733*4882a593Smuzhiyun if (wedge)
734*4882a593Smuzhiyun ep->wedged = 1;
735*4882a593Smuzhiyun } else {
736*4882a593Smuzhiyun gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH);
737*4882a593Smuzhiyun ep->stopped = 0;
738*4882a593Smuzhiyun ep->wedged = 0;
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun /* Things might have been queued up in the meantime */
741*4882a593Smuzhiyun if (!ep->dma_start)
742*4882a593Smuzhiyun gr_start_dma(ep);
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun return retval;
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun /* Must be called with dev->lock held */
gr_set_ep0state(struct gr_udc * dev,enum gr_ep0state value)749*4882a593Smuzhiyun static inline void gr_set_ep0state(struct gr_udc *dev, enum gr_ep0state value)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun if (dev->ep0state != value)
752*4882a593Smuzhiyun dev_vdbg(dev->dev, "STATE: ep0state=%s\n",
753*4882a593Smuzhiyun gr_ep0state_string(value));
754*4882a593Smuzhiyun dev->ep0state = value;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun /*
758*4882a593Smuzhiyun * Should only be called when endpoints can not generate interrupts.
759*4882a593Smuzhiyun *
760*4882a593Smuzhiyun * Must be called with dev->lock held.
761*4882a593Smuzhiyun */
gr_disable_interrupts_and_pullup(struct gr_udc * dev)762*4882a593Smuzhiyun static void gr_disable_interrupts_and_pullup(struct gr_udc *dev)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun gr_write32(&dev->regs->control, 0);
765*4882a593Smuzhiyun wmb(); /* Make sure that we do not deny one of our interrupts */
766*4882a593Smuzhiyun dev->irq_enabled = 0;
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun /*
770*4882a593Smuzhiyun * Stop all device activity and disable data line pullup.
771*4882a593Smuzhiyun *
772*4882a593Smuzhiyun * Must be called with dev->lock held and irqs disabled.
773*4882a593Smuzhiyun */
gr_stop_activity(struct gr_udc * dev)774*4882a593Smuzhiyun static void gr_stop_activity(struct gr_udc *dev)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun struct gr_ep *ep;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun list_for_each_entry(ep, &dev->ep_list, ep_list)
779*4882a593Smuzhiyun gr_ep_nuke(ep);
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun gr_disable_interrupts_and_pullup(dev);
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun gr_set_ep0state(dev, GR_EP0_DISCONNECT);
784*4882a593Smuzhiyun usb_gadget_set_state(&dev->gadget, USB_STATE_NOTATTACHED);
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun /* ---------------------------------------------------------------------- */
788*4882a593Smuzhiyun /* ep0 setup packet handling */
789*4882a593Smuzhiyun
gr_ep0_testmode_complete(struct usb_ep * _ep,struct usb_request * _req)790*4882a593Smuzhiyun static void gr_ep0_testmode_complete(struct usb_ep *_ep,
791*4882a593Smuzhiyun struct usb_request *_req)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun struct gr_ep *ep;
794*4882a593Smuzhiyun struct gr_udc *dev;
795*4882a593Smuzhiyun u32 control;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun ep = container_of(_ep, struct gr_ep, ep);
798*4882a593Smuzhiyun dev = ep->dev;
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun spin_lock(&dev->lock);
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun control = gr_read32(&dev->regs->control);
803*4882a593Smuzhiyun control |= GR_CONTROL_TM | (dev->test_mode << GR_CONTROL_TS_POS);
804*4882a593Smuzhiyun gr_write32(&dev->regs->control, control);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun spin_unlock(&dev->lock);
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun
gr_ep0_dummy_complete(struct usb_ep * _ep,struct usb_request * _req)809*4882a593Smuzhiyun static void gr_ep0_dummy_complete(struct usb_ep *_ep, struct usb_request *_req)
810*4882a593Smuzhiyun {
811*4882a593Smuzhiyun /* Nothing needs to be done here */
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun /*
815*4882a593Smuzhiyun * Queue a response on ep0in.
816*4882a593Smuzhiyun *
817*4882a593Smuzhiyun * Must be called with dev->lock held.
818*4882a593Smuzhiyun */
gr_ep0_respond(struct gr_udc * dev,u8 * buf,int length,void (* complete)(struct usb_ep * ep,struct usb_request * req))819*4882a593Smuzhiyun static int gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length,
820*4882a593Smuzhiyun void (*complete)(struct usb_ep *ep,
821*4882a593Smuzhiyun struct usb_request *req))
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun u8 *reqbuf = dev->ep0reqi->req.buf;
824*4882a593Smuzhiyun int status;
825*4882a593Smuzhiyun int i;
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun for (i = 0; i < length; i++)
828*4882a593Smuzhiyun reqbuf[i] = buf[i];
829*4882a593Smuzhiyun dev->ep0reqi->req.length = length;
830*4882a593Smuzhiyun dev->ep0reqi->req.complete = complete;
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun status = gr_queue_int(&dev->epi[0], dev->ep0reqi, GFP_ATOMIC);
833*4882a593Smuzhiyun if (status < 0)
834*4882a593Smuzhiyun dev_err(dev->dev,
835*4882a593Smuzhiyun "Could not queue ep0in setup response: %d\n", status);
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun return status;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun /*
841*4882a593Smuzhiyun * Queue a 2 byte response on ep0in.
842*4882a593Smuzhiyun *
843*4882a593Smuzhiyun * Must be called with dev->lock held.
844*4882a593Smuzhiyun */
gr_ep0_respond_u16(struct gr_udc * dev,u16 response)845*4882a593Smuzhiyun static inline int gr_ep0_respond_u16(struct gr_udc *dev, u16 response)
846*4882a593Smuzhiyun {
847*4882a593Smuzhiyun __le16 le_response = cpu_to_le16(response);
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun return gr_ep0_respond(dev, (u8 *)&le_response, 2,
850*4882a593Smuzhiyun gr_ep0_dummy_complete);
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun /*
854*4882a593Smuzhiyun * Queue a ZLP response on ep0in.
855*4882a593Smuzhiyun *
856*4882a593Smuzhiyun * Must be called with dev->lock held.
857*4882a593Smuzhiyun */
gr_ep0_respond_empty(struct gr_udc * dev)858*4882a593Smuzhiyun static inline int gr_ep0_respond_empty(struct gr_udc *dev)
859*4882a593Smuzhiyun {
860*4882a593Smuzhiyun return gr_ep0_respond(dev, NULL, 0, gr_ep0_dummy_complete);
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun /*
864*4882a593Smuzhiyun * This is run when a SET_ADDRESS request is received. First writes
865*4882a593Smuzhiyun * the new address to the control register which is updated internally
866*4882a593Smuzhiyun * when the next IN packet is ACKED.
867*4882a593Smuzhiyun *
868*4882a593Smuzhiyun * Must be called with dev->lock held.
869*4882a593Smuzhiyun */
gr_set_address(struct gr_udc * dev,u8 address)870*4882a593Smuzhiyun static void gr_set_address(struct gr_udc *dev, u8 address)
871*4882a593Smuzhiyun {
872*4882a593Smuzhiyun u32 control;
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun control = gr_read32(&dev->regs->control) & ~GR_CONTROL_UA_MASK;
875*4882a593Smuzhiyun control |= (address << GR_CONTROL_UA_POS) & GR_CONTROL_UA_MASK;
876*4882a593Smuzhiyun control |= GR_CONTROL_SU;
877*4882a593Smuzhiyun gr_write32(&dev->regs->control, control);
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun /*
881*4882a593Smuzhiyun * Returns negative for STALL, 0 for successful handling and positive for
882*4882a593Smuzhiyun * delegation.
883*4882a593Smuzhiyun *
884*4882a593Smuzhiyun * Must be called with dev->lock held.
885*4882a593Smuzhiyun */
gr_device_request(struct gr_udc * dev,u8 type,u8 request,u16 value,u16 index)886*4882a593Smuzhiyun static int gr_device_request(struct gr_udc *dev, u8 type, u8 request,
887*4882a593Smuzhiyun u16 value, u16 index)
888*4882a593Smuzhiyun {
889*4882a593Smuzhiyun u16 response;
890*4882a593Smuzhiyun u8 test;
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun switch (request) {
893*4882a593Smuzhiyun case USB_REQ_SET_ADDRESS:
894*4882a593Smuzhiyun dev_dbg(dev->dev, "STATUS: address %d\n", value & 0xff);
895*4882a593Smuzhiyun gr_set_address(dev, value & 0xff);
896*4882a593Smuzhiyun if (value)
897*4882a593Smuzhiyun usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
898*4882a593Smuzhiyun else
899*4882a593Smuzhiyun usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
900*4882a593Smuzhiyun return gr_ep0_respond_empty(dev);
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun case USB_REQ_GET_STATUS:
903*4882a593Smuzhiyun /* Self powered | remote wakeup */
904*4882a593Smuzhiyun response = 0x0001 | (dev->remote_wakeup ? 0x0002 : 0);
905*4882a593Smuzhiyun return gr_ep0_respond_u16(dev, response);
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun case USB_REQ_SET_FEATURE:
908*4882a593Smuzhiyun switch (value) {
909*4882a593Smuzhiyun case USB_DEVICE_REMOTE_WAKEUP:
910*4882a593Smuzhiyun /* Allow remote wakeup */
911*4882a593Smuzhiyun dev->remote_wakeup = 1;
912*4882a593Smuzhiyun return gr_ep0_respond_empty(dev);
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun case USB_DEVICE_TEST_MODE:
915*4882a593Smuzhiyun /* The hardware does not support USB_TEST_FORCE_ENABLE */
916*4882a593Smuzhiyun test = index >> 8;
917*4882a593Smuzhiyun if (test >= USB_TEST_J && test <= USB_TEST_PACKET) {
918*4882a593Smuzhiyun dev->test_mode = test;
919*4882a593Smuzhiyun return gr_ep0_respond(dev, NULL, 0,
920*4882a593Smuzhiyun gr_ep0_testmode_complete);
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun break;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun case USB_REQ_CLEAR_FEATURE:
926*4882a593Smuzhiyun switch (value) {
927*4882a593Smuzhiyun case USB_DEVICE_REMOTE_WAKEUP:
928*4882a593Smuzhiyun /* Disallow remote wakeup */
929*4882a593Smuzhiyun dev->remote_wakeup = 0;
930*4882a593Smuzhiyun return gr_ep0_respond_empty(dev);
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun break;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun return 1; /* Delegate the rest */
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun /*
939*4882a593Smuzhiyun * Returns negative for STALL, 0 for successful handling and positive for
940*4882a593Smuzhiyun * delegation.
941*4882a593Smuzhiyun *
942*4882a593Smuzhiyun * Must be called with dev->lock held.
943*4882a593Smuzhiyun */
gr_interface_request(struct gr_udc * dev,u8 type,u8 request,u16 value,u16 index)944*4882a593Smuzhiyun static int gr_interface_request(struct gr_udc *dev, u8 type, u8 request,
945*4882a593Smuzhiyun u16 value, u16 index)
946*4882a593Smuzhiyun {
947*4882a593Smuzhiyun if (dev->gadget.state != USB_STATE_CONFIGURED)
948*4882a593Smuzhiyun return -1;
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun /*
951*4882a593Smuzhiyun * Should return STALL for invalid interfaces, but udc driver does not
952*4882a593Smuzhiyun * know anything about that. However, many gadget drivers do not handle
953*4882a593Smuzhiyun * GET_STATUS so we need to take care of that.
954*4882a593Smuzhiyun */
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun switch (request) {
957*4882a593Smuzhiyun case USB_REQ_GET_STATUS:
958*4882a593Smuzhiyun return gr_ep0_respond_u16(dev, 0x0000);
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun case USB_REQ_SET_FEATURE:
961*4882a593Smuzhiyun case USB_REQ_CLEAR_FEATURE:
962*4882a593Smuzhiyun /*
963*4882a593Smuzhiyun * No possible valid standard requests. Still let gadget drivers
964*4882a593Smuzhiyun * have a go at it.
965*4882a593Smuzhiyun */
966*4882a593Smuzhiyun break;
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun return 1; /* Delegate the rest */
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun /*
973*4882a593Smuzhiyun * Returns negative for STALL, 0 for successful handling and positive for
974*4882a593Smuzhiyun * delegation.
975*4882a593Smuzhiyun *
976*4882a593Smuzhiyun * Must be called with dev->lock held.
977*4882a593Smuzhiyun */
gr_endpoint_request(struct gr_udc * dev,u8 type,u8 request,u16 value,u16 index)978*4882a593Smuzhiyun static int gr_endpoint_request(struct gr_udc *dev, u8 type, u8 request,
979*4882a593Smuzhiyun u16 value, u16 index)
980*4882a593Smuzhiyun {
981*4882a593Smuzhiyun struct gr_ep *ep;
982*4882a593Smuzhiyun int status;
983*4882a593Smuzhiyun int halted;
984*4882a593Smuzhiyun u8 epnum = index & USB_ENDPOINT_NUMBER_MASK;
985*4882a593Smuzhiyun u8 is_in = index & USB_ENDPOINT_DIR_MASK;
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun if ((is_in && epnum >= dev->nepi) || (!is_in && epnum >= dev->nepo))
988*4882a593Smuzhiyun return -1;
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun if (dev->gadget.state != USB_STATE_CONFIGURED && epnum != 0)
991*4882a593Smuzhiyun return -1;
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]);
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun switch (request) {
996*4882a593Smuzhiyun case USB_REQ_GET_STATUS:
997*4882a593Smuzhiyun halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH;
998*4882a593Smuzhiyun return gr_ep0_respond_u16(dev, halted ? 0x0001 : 0);
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun case USB_REQ_SET_FEATURE:
1001*4882a593Smuzhiyun switch (value) {
1002*4882a593Smuzhiyun case USB_ENDPOINT_HALT:
1003*4882a593Smuzhiyun status = gr_ep_halt_wedge(ep, 1, 0, 1);
1004*4882a593Smuzhiyun if (status >= 0)
1005*4882a593Smuzhiyun status = gr_ep0_respond_empty(dev);
1006*4882a593Smuzhiyun return status;
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun break;
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun case USB_REQ_CLEAR_FEATURE:
1011*4882a593Smuzhiyun switch (value) {
1012*4882a593Smuzhiyun case USB_ENDPOINT_HALT:
1013*4882a593Smuzhiyun if (ep->wedged)
1014*4882a593Smuzhiyun return -1;
1015*4882a593Smuzhiyun status = gr_ep_halt_wedge(ep, 0, 0, 1);
1016*4882a593Smuzhiyun if (status >= 0)
1017*4882a593Smuzhiyun status = gr_ep0_respond_empty(dev);
1018*4882a593Smuzhiyun return status;
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun break;
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun return 1; /* Delegate the rest */
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun /* Must be called with dev->lock held */
gr_ep0out_requeue(struct gr_udc * dev)1027*4882a593Smuzhiyun static void gr_ep0out_requeue(struct gr_udc *dev)
1028*4882a593Smuzhiyun {
1029*4882a593Smuzhiyun int ret = gr_queue_int(&dev->epo[0], dev->ep0reqo, GFP_ATOMIC);
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun if (ret)
1032*4882a593Smuzhiyun dev_err(dev->dev, "Could not queue ep0out setup request: %d\n",
1033*4882a593Smuzhiyun ret);
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun /*
1037*4882a593Smuzhiyun * The main function dealing with setup requests on ep0.
1038*4882a593Smuzhiyun *
1039*4882a593Smuzhiyun * Must be called with dev->lock held and irqs disabled
1040*4882a593Smuzhiyun */
gr_ep0_setup(struct gr_udc * dev,struct gr_request * req)1041*4882a593Smuzhiyun static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req)
1042*4882a593Smuzhiyun __releases(&dev->lock)
1043*4882a593Smuzhiyun __acquires(&dev->lock)
1044*4882a593Smuzhiyun {
1045*4882a593Smuzhiyun union {
1046*4882a593Smuzhiyun struct usb_ctrlrequest ctrl;
1047*4882a593Smuzhiyun u8 raw[8];
1048*4882a593Smuzhiyun u32 word[2];
1049*4882a593Smuzhiyun } u;
1050*4882a593Smuzhiyun u8 type;
1051*4882a593Smuzhiyun u8 request;
1052*4882a593Smuzhiyun u16 value;
1053*4882a593Smuzhiyun u16 index;
1054*4882a593Smuzhiyun u16 length;
1055*4882a593Smuzhiyun int i;
1056*4882a593Smuzhiyun int status;
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun /* Restore from ep0 halt */
1059*4882a593Smuzhiyun if (dev->ep0state == GR_EP0_STALL) {
1060*4882a593Smuzhiyun gr_set_ep0state(dev, GR_EP0_SETUP);
1061*4882a593Smuzhiyun if (!req->req.actual)
1062*4882a593Smuzhiyun goto out;
1063*4882a593Smuzhiyun }
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun if (dev->ep0state == GR_EP0_ISTATUS) {
1066*4882a593Smuzhiyun gr_set_ep0state(dev, GR_EP0_SETUP);
1067*4882a593Smuzhiyun if (req->req.actual > 0)
1068*4882a593Smuzhiyun dev_dbg(dev->dev,
1069*4882a593Smuzhiyun "Unexpected setup packet at state %s\n",
1070*4882a593Smuzhiyun gr_ep0state_string(GR_EP0_ISTATUS));
1071*4882a593Smuzhiyun else
1072*4882a593Smuzhiyun goto out; /* Got expected ZLP */
1073*4882a593Smuzhiyun } else if (dev->ep0state != GR_EP0_SETUP) {
1074*4882a593Smuzhiyun dev_info(dev->dev,
1075*4882a593Smuzhiyun "Unexpected ep0out request at state %s - stalling\n",
1076*4882a593Smuzhiyun gr_ep0state_string(dev->ep0state));
1077*4882a593Smuzhiyun gr_control_stall(dev);
1078*4882a593Smuzhiyun gr_set_ep0state(dev, GR_EP0_SETUP);
1079*4882a593Smuzhiyun goto out;
1080*4882a593Smuzhiyun } else if (!req->req.actual) {
1081*4882a593Smuzhiyun dev_dbg(dev->dev, "Unexpected ZLP at state %s\n",
1082*4882a593Smuzhiyun gr_ep0state_string(dev->ep0state));
1083*4882a593Smuzhiyun goto out;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun /* Handle SETUP packet */
1087*4882a593Smuzhiyun for (i = 0; i < req->req.actual; i++)
1088*4882a593Smuzhiyun u.raw[i] = ((u8 *)req->req.buf)[i];
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun type = u.ctrl.bRequestType;
1091*4882a593Smuzhiyun request = u.ctrl.bRequest;
1092*4882a593Smuzhiyun value = le16_to_cpu(u.ctrl.wValue);
1093*4882a593Smuzhiyun index = le16_to_cpu(u.ctrl.wIndex);
1094*4882a593Smuzhiyun length = le16_to_cpu(u.ctrl.wLength);
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun gr_dbgprint_devreq(dev, type, request, value, index, length);
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun /* Check for data stage */
1099*4882a593Smuzhiyun if (length) {
1100*4882a593Smuzhiyun if (type & USB_DIR_IN)
1101*4882a593Smuzhiyun gr_set_ep0state(dev, GR_EP0_IDATA);
1102*4882a593Smuzhiyun else
1103*4882a593Smuzhiyun gr_set_ep0state(dev, GR_EP0_ODATA);
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun status = 1; /* Positive status flags delegation */
1107*4882a593Smuzhiyun if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1108*4882a593Smuzhiyun switch (type & USB_RECIP_MASK) {
1109*4882a593Smuzhiyun case USB_RECIP_DEVICE:
1110*4882a593Smuzhiyun status = gr_device_request(dev, type, request,
1111*4882a593Smuzhiyun value, index);
1112*4882a593Smuzhiyun break;
1113*4882a593Smuzhiyun case USB_RECIP_ENDPOINT:
1114*4882a593Smuzhiyun status = gr_endpoint_request(dev, type, request,
1115*4882a593Smuzhiyun value, index);
1116*4882a593Smuzhiyun break;
1117*4882a593Smuzhiyun case USB_RECIP_INTERFACE:
1118*4882a593Smuzhiyun status = gr_interface_request(dev, type, request,
1119*4882a593Smuzhiyun value, index);
1120*4882a593Smuzhiyun break;
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun if (status > 0) {
1125*4882a593Smuzhiyun spin_unlock(&dev->lock);
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun dev_vdbg(dev->dev, "DELEGATE\n");
1128*4882a593Smuzhiyun status = dev->driver->setup(&dev->gadget, &u.ctrl);
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun spin_lock(&dev->lock);
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun /* Generate STALL on both ep0out and ep0in if requested */
1134*4882a593Smuzhiyun if (unlikely(status < 0)) {
1135*4882a593Smuzhiyun dev_vdbg(dev->dev, "STALL\n");
1136*4882a593Smuzhiyun gr_control_stall(dev);
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD &&
1140*4882a593Smuzhiyun request == USB_REQ_SET_CONFIGURATION) {
1141*4882a593Smuzhiyun if (!value) {
1142*4882a593Smuzhiyun dev_dbg(dev->dev, "STATUS: deconfigured\n");
1143*4882a593Smuzhiyun usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
1144*4882a593Smuzhiyun } else if (status >= 0) {
1145*4882a593Smuzhiyun /* Not configured unless gadget OK:s it */
1146*4882a593Smuzhiyun dev_dbg(dev->dev, "STATUS: configured: %d\n", value);
1147*4882a593Smuzhiyun usb_gadget_set_state(&dev->gadget,
1148*4882a593Smuzhiyun USB_STATE_CONFIGURED);
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun /* Get ready for next stage */
1153*4882a593Smuzhiyun if (dev->ep0state == GR_EP0_ODATA)
1154*4882a593Smuzhiyun gr_set_ep0state(dev, GR_EP0_OSTATUS);
1155*4882a593Smuzhiyun else if (dev->ep0state == GR_EP0_IDATA)
1156*4882a593Smuzhiyun gr_set_ep0state(dev, GR_EP0_ISTATUS);
1157*4882a593Smuzhiyun else
1158*4882a593Smuzhiyun gr_set_ep0state(dev, GR_EP0_SETUP);
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun out:
1161*4882a593Smuzhiyun gr_ep0out_requeue(dev);
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun /* ---------------------------------------------------------------------- */
1165*4882a593Smuzhiyun /* VBUS and USB reset handling */
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun /* Must be called with dev->lock held and irqs disabled */
gr_vbus_connected(struct gr_udc * dev,u32 status)1168*4882a593Smuzhiyun static void gr_vbus_connected(struct gr_udc *dev, u32 status)
1169*4882a593Smuzhiyun {
1170*4882a593Smuzhiyun u32 control;
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun dev->gadget.speed = GR_SPEED(status);
1173*4882a593Smuzhiyun usb_gadget_set_state(&dev->gadget, USB_STATE_POWERED);
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun /* Turn on full interrupts and pullup */
1176*4882a593Smuzhiyun control = (GR_CONTROL_SI | GR_CONTROL_UI | GR_CONTROL_VI |
1177*4882a593Smuzhiyun GR_CONTROL_SP | GR_CONTROL_EP);
1178*4882a593Smuzhiyun gr_write32(&dev->regs->control, control);
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun /* Must be called with dev->lock held */
gr_enable_vbus_detect(struct gr_udc * dev)1182*4882a593Smuzhiyun static void gr_enable_vbus_detect(struct gr_udc *dev)
1183*4882a593Smuzhiyun {
1184*4882a593Smuzhiyun u32 status;
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun dev->irq_enabled = 1;
1187*4882a593Smuzhiyun wmb(); /* Make sure we do not ignore an interrupt */
1188*4882a593Smuzhiyun gr_write32(&dev->regs->control, GR_CONTROL_VI);
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun /* Take care of the case we are already plugged in at this point */
1191*4882a593Smuzhiyun status = gr_read32(&dev->regs->status);
1192*4882a593Smuzhiyun if (status & GR_STATUS_VB)
1193*4882a593Smuzhiyun gr_vbus_connected(dev, status);
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun /* Must be called with dev->lock held and irqs disabled */
gr_vbus_disconnected(struct gr_udc * dev)1197*4882a593Smuzhiyun static void gr_vbus_disconnected(struct gr_udc *dev)
1198*4882a593Smuzhiyun {
1199*4882a593Smuzhiyun gr_stop_activity(dev);
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun /* Report disconnect */
1202*4882a593Smuzhiyun if (dev->driver && dev->driver->disconnect) {
1203*4882a593Smuzhiyun spin_unlock(&dev->lock);
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun dev->driver->disconnect(&dev->gadget);
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun spin_lock(&dev->lock);
1208*4882a593Smuzhiyun }
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun gr_enable_vbus_detect(dev);
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun /* Must be called with dev->lock held and irqs disabled */
gr_udc_usbreset(struct gr_udc * dev,u32 status)1214*4882a593Smuzhiyun static void gr_udc_usbreset(struct gr_udc *dev, u32 status)
1215*4882a593Smuzhiyun {
1216*4882a593Smuzhiyun gr_set_address(dev, 0);
1217*4882a593Smuzhiyun gr_set_ep0state(dev, GR_EP0_SETUP);
1218*4882a593Smuzhiyun usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
1219*4882a593Smuzhiyun dev->gadget.speed = GR_SPEED(status);
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun gr_ep_nuke(&dev->epo[0]);
1222*4882a593Smuzhiyun gr_ep_nuke(&dev->epi[0]);
1223*4882a593Smuzhiyun dev->epo[0].stopped = 0;
1224*4882a593Smuzhiyun dev->epi[0].stopped = 0;
1225*4882a593Smuzhiyun gr_ep0out_requeue(dev);
1226*4882a593Smuzhiyun }
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun /* ---------------------------------------------------------------------- */
1229*4882a593Smuzhiyun /* Irq handling */
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun /*
1232*4882a593Smuzhiyun * Handles interrupts from in endpoints. Returns whether something was handled.
1233*4882a593Smuzhiyun *
1234*4882a593Smuzhiyun * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
1235*4882a593Smuzhiyun */
gr_handle_in_ep(struct gr_ep * ep)1236*4882a593Smuzhiyun static int gr_handle_in_ep(struct gr_ep *ep)
1237*4882a593Smuzhiyun {
1238*4882a593Smuzhiyun struct gr_request *req;
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun req = list_first_entry(&ep->queue, struct gr_request, queue);
1241*4882a593Smuzhiyun if (!req->last_desc)
1242*4882a593Smuzhiyun return 0;
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
1245*4882a593Smuzhiyun return 0; /* Not put in hardware buffers yet */
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
1248*4882a593Smuzhiyun return 0; /* Not transmitted yet, still in hardware buffers */
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun /* Write complete */
1251*4882a593Smuzhiyun gr_dma_advance(ep, 0);
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun return 1;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun /*
1257*4882a593Smuzhiyun * Handles interrupts from out endpoints. Returns whether something was handled.
1258*4882a593Smuzhiyun *
1259*4882a593Smuzhiyun * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
1260*4882a593Smuzhiyun */
gr_handle_out_ep(struct gr_ep * ep)1261*4882a593Smuzhiyun static int gr_handle_out_ep(struct gr_ep *ep)
1262*4882a593Smuzhiyun {
1263*4882a593Smuzhiyun u32 ep_dmactrl;
1264*4882a593Smuzhiyun u32 ctrl;
1265*4882a593Smuzhiyun u16 len;
1266*4882a593Smuzhiyun struct gr_request *req;
1267*4882a593Smuzhiyun struct gr_udc *dev = ep->dev;
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun req = list_first_entry(&ep->queue, struct gr_request, queue);
1270*4882a593Smuzhiyun if (!req->curr_desc)
1271*4882a593Smuzhiyun return 0;
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun ctrl = READ_ONCE(req->curr_desc->ctrl);
1274*4882a593Smuzhiyun if (ctrl & GR_DESC_OUT_CTRL_EN)
1275*4882a593Smuzhiyun return 0; /* Not received yet */
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun /* Read complete */
1278*4882a593Smuzhiyun len = ctrl & GR_DESC_OUT_CTRL_LEN_MASK;
1279*4882a593Smuzhiyun req->req.actual += len;
1280*4882a593Smuzhiyun if (ctrl & GR_DESC_OUT_CTRL_SE)
1281*4882a593Smuzhiyun req->setup = 1;
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun if (len < ep->ep.maxpacket || req->req.actual >= req->req.length) {
1284*4882a593Smuzhiyun /* Short packet or >= expected size - we are done */
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) {
1287*4882a593Smuzhiyun /*
1288*4882a593Smuzhiyun * Send a status stage ZLP to ack the DATA stage in the
1289*4882a593Smuzhiyun * OUT direction. This needs to be done before
1290*4882a593Smuzhiyun * gr_dma_advance as that can lead to a call to
1291*4882a593Smuzhiyun * ep0_setup that can change dev->ep0state.
1292*4882a593Smuzhiyun */
1293*4882a593Smuzhiyun gr_ep0_respond_empty(dev);
1294*4882a593Smuzhiyun gr_set_ep0state(dev, GR_EP0_SETUP);
1295*4882a593Smuzhiyun }
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun gr_dma_advance(ep, 0);
1298*4882a593Smuzhiyun } else {
1299*4882a593Smuzhiyun /* Not done yet. Enable the next descriptor to receive more. */
1300*4882a593Smuzhiyun req->curr_desc = req->curr_desc->next_desc;
1301*4882a593Smuzhiyun req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun ep_dmactrl = gr_read32(&ep->regs->dmactrl);
1304*4882a593Smuzhiyun gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA);
1305*4882a593Smuzhiyun }
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun return 1;
1308*4882a593Smuzhiyun }
1309*4882a593Smuzhiyun
1310*4882a593Smuzhiyun /*
1311*4882a593Smuzhiyun * Handle state changes. Returns whether something was handled.
1312*4882a593Smuzhiyun *
1313*4882a593Smuzhiyun * Must be called with dev->lock held and irqs disabled.
1314*4882a593Smuzhiyun */
gr_handle_state_changes(struct gr_udc * dev)1315*4882a593Smuzhiyun static int gr_handle_state_changes(struct gr_udc *dev)
1316*4882a593Smuzhiyun {
1317*4882a593Smuzhiyun u32 status = gr_read32(&dev->regs->status);
1318*4882a593Smuzhiyun int handled = 0;
1319*4882a593Smuzhiyun int powstate = !(dev->gadget.state == USB_STATE_NOTATTACHED ||
1320*4882a593Smuzhiyun dev->gadget.state == USB_STATE_ATTACHED);
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun /* VBUS valid detected */
1323*4882a593Smuzhiyun if (!powstate && (status & GR_STATUS_VB)) {
1324*4882a593Smuzhiyun dev_dbg(dev->dev, "STATUS: vbus valid detected\n");
1325*4882a593Smuzhiyun gr_vbus_connected(dev, status);
1326*4882a593Smuzhiyun handled = 1;
1327*4882a593Smuzhiyun }
1328*4882a593Smuzhiyun
1329*4882a593Smuzhiyun /* Disconnect */
1330*4882a593Smuzhiyun if (powstate && !(status & GR_STATUS_VB)) {
1331*4882a593Smuzhiyun dev_dbg(dev->dev, "STATUS: vbus invalid detected\n");
1332*4882a593Smuzhiyun gr_vbus_disconnected(dev);
1333*4882a593Smuzhiyun handled = 1;
1334*4882a593Smuzhiyun }
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun /* USB reset detected */
1337*4882a593Smuzhiyun if (status & GR_STATUS_UR) {
1338*4882a593Smuzhiyun dev_dbg(dev->dev, "STATUS: USB reset - speed is %s\n",
1339*4882a593Smuzhiyun GR_SPEED_STR(status));
1340*4882a593Smuzhiyun gr_write32(&dev->regs->status, GR_STATUS_UR);
1341*4882a593Smuzhiyun gr_udc_usbreset(dev, status);
1342*4882a593Smuzhiyun handled = 1;
1343*4882a593Smuzhiyun }
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun /* Speed change */
1346*4882a593Smuzhiyun if (dev->gadget.speed != GR_SPEED(status)) {
1347*4882a593Smuzhiyun dev_dbg(dev->dev, "STATUS: USB Speed change to %s\n",
1348*4882a593Smuzhiyun GR_SPEED_STR(status));
1349*4882a593Smuzhiyun dev->gadget.speed = GR_SPEED(status);
1350*4882a593Smuzhiyun handled = 1;
1351*4882a593Smuzhiyun }
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun /* Going into suspend */
1354*4882a593Smuzhiyun if ((dev->ep0state != GR_EP0_SUSPEND) && !(status & GR_STATUS_SU)) {
1355*4882a593Smuzhiyun dev_dbg(dev->dev, "STATUS: USB suspend\n");
1356*4882a593Smuzhiyun gr_set_ep0state(dev, GR_EP0_SUSPEND);
1357*4882a593Smuzhiyun dev->suspended_from = dev->gadget.state;
1358*4882a593Smuzhiyun usb_gadget_set_state(&dev->gadget, USB_STATE_SUSPENDED);
1359*4882a593Smuzhiyun
1360*4882a593Smuzhiyun if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
1361*4882a593Smuzhiyun dev->driver && dev->driver->suspend) {
1362*4882a593Smuzhiyun spin_unlock(&dev->lock);
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun dev->driver->suspend(&dev->gadget);
1365*4882a593Smuzhiyun
1366*4882a593Smuzhiyun spin_lock(&dev->lock);
1367*4882a593Smuzhiyun }
1368*4882a593Smuzhiyun handled = 1;
1369*4882a593Smuzhiyun }
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun /* Coming out of suspend */
1372*4882a593Smuzhiyun if ((dev->ep0state == GR_EP0_SUSPEND) && (status & GR_STATUS_SU)) {
1373*4882a593Smuzhiyun dev_dbg(dev->dev, "STATUS: USB resume\n");
1374*4882a593Smuzhiyun if (dev->suspended_from == USB_STATE_POWERED)
1375*4882a593Smuzhiyun gr_set_ep0state(dev, GR_EP0_DISCONNECT);
1376*4882a593Smuzhiyun else
1377*4882a593Smuzhiyun gr_set_ep0state(dev, GR_EP0_SETUP);
1378*4882a593Smuzhiyun usb_gadget_set_state(&dev->gadget, dev->suspended_from);
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
1381*4882a593Smuzhiyun dev->driver && dev->driver->resume) {
1382*4882a593Smuzhiyun spin_unlock(&dev->lock);
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun dev->driver->resume(&dev->gadget);
1385*4882a593Smuzhiyun
1386*4882a593Smuzhiyun spin_lock(&dev->lock);
1387*4882a593Smuzhiyun }
1388*4882a593Smuzhiyun handled = 1;
1389*4882a593Smuzhiyun }
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun return handled;
1392*4882a593Smuzhiyun }
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun /* Non-interrupt context irq handler */
gr_irq_handler(int irq,void * _dev)1395*4882a593Smuzhiyun static irqreturn_t gr_irq_handler(int irq, void *_dev)
1396*4882a593Smuzhiyun {
1397*4882a593Smuzhiyun struct gr_udc *dev = _dev;
1398*4882a593Smuzhiyun struct gr_ep *ep;
1399*4882a593Smuzhiyun int handled = 0;
1400*4882a593Smuzhiyun int i;
1401*4882a593Smuzhiyun unsigned long flags;
1402*4882a593Smuzhiyun
1403*4882a593Smuzhiyun spin_lock_irqsave(&dev->lock, flags);
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun if (!dev->irq_enabled)
1406*4882a593Smuzhiyun goto out;
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun /*
1409*4882a593Smuzhiyun * Check IN ep interrupts. We check these before the OUT eps because
1410*4882a593Smuzhiyun * some gadgets reuse the request that might already be currently
1411*4882a593Smuzhiyun * outstanding and needs to be completed (mainly setup requests).
1412*4882a593Smuzhiyun */
1413*4882a593Smuzhiyun for (i = 0; i < dev->nepi; i++) {
1414*4882a593Smuzhiyun ep = &dev->epi[i];
1415*4882a593Smuzhiyun if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
1416*4882a593Smuzhiyun handled = gr_handle_in_ep(ep) || handled;
1417*4882a593Smuzhiyun }
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun /* Check OUT ep interrupts */
1420*4882a593Smuzhiyun for (i = 0; i < dev->nepo; i++) {
1421*4882a593Smuzhiyun ep = &dev->epo[i];
1422*4882a593Smuzhiyun if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
1423*4882a593Smuzhiyun handled = gr_handle_out_ep(ep) || handled;
1424*4882a593Smuzhiyun }
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun /* Check status interrupts */
1427*4882a593Smuzhiyun handled = gr_handle_state_changes(dev) || handled;
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun /*
1430*4882a593Smuzhiyun * Check AMBA DMA errors. Only check if we didn't find anything else to
1431*4882a593Smuzhiyun * handle because this shouldn't happen if we did everything right.
1432*4882a593Smuzhiyun */
1433*4882a593Smuzhiyun if (!handled) {
1434*4882a593Smuzhiyun list_for_each_entry(ep, &dev->ep_list, ep_list) {
1435*4882a593Smuzhiyun if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) {
1436*4882a593Smuzhiyun dev_err(dev->dev,
1437*4882a593Smuzhiyun "AMBA Error occurred for %s\n",
1438*4882a593Smuzhiyun ep->ep.name);
1439*4882a593Smuzhiyun handled = 1;
1440*4882a593Smuzhiyun }
1441*4882a593Smuzhiyun }
1442*4882a593Smuzhiyun }
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun out:
1445*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->lock, flags);
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun return handled ? IRQ_HANDLED : IRQ_NONE;
1448*4882a593Smuzhiyun }
1449*4882a593Smuzhiyun
1450*4882a593Smuzhiyun /* Interrupt context irq handler */
gr_irq(int irq,void * _dev)1451*4882a593Smuzhiyun static irqreturn_t gr_irq(int irq, void *_dev)
1452*4882a593Smuzhiyun {
1453*4882a593Smuzhiyun struct gr_udc *dev = _dev;
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun if (!dev->irq_enabled)
1456*4882a593Smuzhiyun return IRQ_NONE;
1457*4882a593Smuzhiyun
1458*4882a593Smuzhiyun return IRQ_WAKE_THREAD;
1459*4882a593Smuzhiyun }
1460*4882a593Smuzhiyun
1461*4882a593Smuzhiyun /* ---------------------------------------------------------------------- */
1462*4882a593Smuzhiyun /* USB ep ops */
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun /* Enable endpoint. Not for ep0in and ep0out that are handled separately. */
gr_ep_enable(struct usb_ep * _ep,const struct usb_endpoint_descriptor * desc)1465*4882a593Smuzhiyun static int gr_ep_enable(struct usb_ep *_ep,
1466*4882a593Smuzhiyun const struct usb_endpoint_descriptor *desc)
1467*4882a593Smuzhiyun {
1468*4882a593Smuzhiyun struct gr_udc *dev;
1469*4882a593Smuzhiyun struct gr_ep *ep;
1470*4882a593Smuzhiyun u8 mode;
1471*4882a593Smuzhiyun u8 nt;
1472*4882a593Smuzhiyun u16 max;
1473*4882a593Smuzhiyun u16 buffer_size = 0;
1474*4882a593Smuzhiyun u32 epctrl;
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun ep = container_of(_ep, struct gr_ep, ep);
1477*4882a593Smuzhiyun if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
1478*4882a593Smuzhiyun return -EINVAL;
1479*4882a593Smuzhiyun
1480*4882a593Smuzhiyun dev = ep->dev;
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun /* 'ep0' IN and OUT are reserved */
1483*4882a593Smuzhiyun if (ep == &dev->epo[0] || ep == &dev->epi[0])
1484*4882a593Smuzhiyun return -EINVAL;
1485*4882a593Smuzhiyun
1486*4882a593Smuzhiyun if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1487*4882a593Smuzhiyun return -ESHUTDOWN;
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun /* Make sure we are clear for enabling */
1490*4882a593Smuzhiyun epctrl = gr_read32(&ep->regs->epctrl);
1491*4882a593Smuzhiyun if (epctrl & GR_EPCTRL_EV)
1492*4882a593Smuzhiyun return -EBUSY;
1493*4882a593Smuzhiyun
1494*4882a593Smuzhiyun /* Check that directions match */
1495*4882a593Smuzhiyun if (!ep->is_in != !usb_endpoint_dir_in(desc))
1496*4882a593Smuzhiyun return -EINVAL;
1497*4882a593Smuzhiyun
1498*4882a593Smuzhiyun /* Check ep num */
1499*4882a593Smuzhiyun if ((!ep->is_in && ep->num >= dev->nepo) ||
1500*4882a593Smuzhiyun (ep->is_in && ep->num >= dev->nepi))
1501*4882a593Smuzhiyun return -EINVAL;
1502*4882a593Smuzhiyun
1503*4882a593Smuzhiyun if (usb_endpoint_xfer_control(desc)) {
1504*4882a593Smuzhiyun mode = 0;
1505*4882a593Smuzhiyun } else if (usb_endpoint_xfer_isoc(desc)) {
1506*4882a593Smuzhiyun mode = 1;
1507*4882a593Smuzhiyun } else if (usb_endpoint_xfer_bulk(desc)) {
1508*4882a593Smuzhiyun mode = 2;
1509*4882a593Smuzhiyun } else if (usb_endpoint_xfer_int(desc)) {
1510*4882a593Smuzhiyun mode = 3;
1511*4882a593Smuzhiyun } else {
1512*4882a593Smuzhiyun dev_err(dev->dev, "Unknown transfer type for %s\n",
1513*4882a593Smuzhiyun ep->ep.name);
1514*4882a593Smuzhiyun return -EINVAL;
1515*4882a593Smuzhiyun }
1516*4882a593Smuzhiyun
1517*4882a593Smuzhiyun /*
1518*4882a593Smuzhiyun * Bits 10-0 set the max payload. 12-11 set the number of
1519*4882a593Smuzhiyun * additional transactions.
1520*4882a593Smuzhiyun */
1521*4882a593Smuzhiyun max = usb_endpoint_maxp(desc);
1522*4882a593Smuzhiyun nt = usb_endpoint_maxp_mult(desc) - 1;
1523*4882a593Smuzhiyun buffer_size = GR_BUFFER_SIZE(epctrl);
1524*4882a593Smuzhiyun if (nt && (mode == 0 || mode == 2)) {
1525*4882a593Smuzhiyun dev_err(dev->dev,
1526*4882a593Smuzhiyun "%s mode: multiple trans./microframe not valid\n",
1527*4882a593Smuzhiyun (mode == 2 ? "Bulk" : "Control"));
1528*4882a593Smuzhiyun return -EINVAL;
1529*4882a593Smuzhiyun } else if (nt == 0x3) {
1530*4882a593Smuzhiyun dev_err(dev->dev,
1531*4882a593Smuzhiyun "Invalid value 0x3 for additional trans./microframe\n");
1532*4882a593Smuzhiyun return -EINVAL;
1533*4882a593Smuzhiyun } else if ((nt + 1) * max > buffer_size) {
1534*4882a593Smuzhiyun dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
1535*4882a593Smuzhiyun buffer_size, (nt + 1), max);
1536*4882a593Smuzhiyun return -EINVAL;
1537*4882a593Smuzhiyun } else if (max == 0) {
1538*4882a593Smuzhiyun dev_err(dev->dev, "Max payload cannot be set to 0\n");
1539*4882a593Smuzhiyun return -EINVAL;
1540*4882a593Smuzhiyun } else if (max > ep->ep.maxpacket_limit) {
1541*4882a593Smuzhiyun dev_err(dev->dev, "Requested max payload %d > limit %d\n",
1542*4882a593Smuzhiyun max, ep->ep.maxpacket_limit);
1543*4882a593Smuzhiyun return -EINVAL;
1544*4882a593Smuzhiyun }
1545*4882a593Smuzhiyun
1546*4882a593Smuzhiyun spin_lock(&ep->dev->lock);
1547*4882a593Smuzhiyun
1548*4882a593Smuzhiyun if (!ep->stopped) {
1549*4882a593Smuzhiyun spin_unlock(&ep->dev->lock);
1550*4882a593Smuzhiyun return -EBUSY;
1551*4882a593Smuzhiyun }
1552*4882a593Smuzhiyun
1553*4882a593Smuzhiyun ep->stopped = 0;
1554*4882a593Smuzhiyun ep->wedged = 0;
1555*4882a593Smuzhiyun ep->ep.desc = desc;
1556*4882a593Smuzhiyun ep->ep.maxpacket = max;
1557*4882a593Smuzhiyun ep->dma_start = 0;
1558*4882a593Smuzhiyun
1559*4882a593Smuzhiyun
1560*4882a593Smuzhiyun if (nt) {
1561*4882a593Smuzhiyun /*
1562*4882a593Smuzhiyun * Maximum possible size of all payloads in one microframe
1563*4882a593Smuzhiyun * regardless of direction when using high-bandwidth mode.
1564*4882a593Smuzhiyun */
1565*4882a593Smuzhiyun ep->bytes_per_buffer = (nt + 1) * max;
1566*4882a593Smuzhiyun } else if (ep->is_in) {
1567*4882a593Smuzhiyun /*
1568*4882a593Smuzhiyun * The biggest multiple of maximum packet size that fits into
1569*4882a593Smuzhiyun * the buffer. The hardware will split up into many packets in
1570*4882a593Smuzhiyun * the IN direction.
1571*4882a593Smuzhiyun */
1572*4882a593Smuzhiyun ep->bytes_per_buffer = (buffer_size / max) * max;
1573*4882a593Smuzhiyun } else {
1574*4882a593Smuzhiyun /*
1575*4882a593Smuzhiyun * Only single packets will be placed the buffers in the OUT
1576*4882a593Smuzhiyun * direction.
1577*4882a593Smuzhiyun */
1578*4882a593Smuzhiyun ep->bytes_per_buffer = max;
1579*4882a593Smuzhiyun }
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun epctrl = (max << GR_EPCTRL_MAXPL_POS)
1582*4882a593Smuzhiyun | (nt << GR_EPCTRL_NT_POS)
1583*4882a593Smuzhiyun | (mode << GR_EPCTRL_TT_POS)
1584*4882a593Smuzhiyun | GR_EPCTRL_EV;
1585*4882a593Smuzhiyun if (ep->is_in)
1586*4882a593Smuzhiyun epctrl |= GR_EPCTRL_PI;
1587*4882a593Smuzhiyun gr_write32(&ep->regs->epctrl, epctrl);
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI);
1590*4882a593Smuzhiyun
1591*4882a593Smuzhiyun spin_unlock(&ep->dev->lock);
1592*4882a593Smuzhiyun
1593*4882a593Smuzhiyun dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n",
1594*4882a593Smuzhiyun ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer);
1595*4882a593Smuzhiyun return 0;
1596*4882a593Smuzhiyun }
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun /* Disable endpoint. Not for ep0in and ep0out that are handled separately. */
gr_ep_disable(struct usb_ep * _ep)1599*4882a593Smuzhiyun static int gr_ep_disable(struct usb_ep *_ep)
1600*4882a593Smuzhiyun {
1601*4882a593Smuzhiyun struct gr_ep *ep;
1602*4882a593Smuzhiyun struct gr_udc *dev;
1603*4882a593Smuzhiyun unsigned long flags;
1604*4882a593Smuzhiyun
1605*4882a593Smuzhiyun ep = container_of(_ep, struct gr_ep, ep);
1606*4882a593Smuzhiyun if (!_ep || !ep->ep.desc)
1607*4882a593Smuzhiyun return -ENODEV;
1608*4882a593Smuzhiyun
1609*4882a593Smuzhiyun dev = ep->dev;
1610*4882a593Smuzhiyun
1611*4882a593Smuzhiyun /* 'ep0' IN and OUT are reserved */
1612*4882a593Smuzhiyun if (ep == &dev->epo[0] || ep == &dev->epi[0])
1613*4882a593Smuzhiyun return -EINVAL;
1614*4882a593Smuzhiyun
1615*4882a593Smuzhiyun if (dev->ep0state == GR_EP0_SUSPEND)
1616*4882a593Smuzhiyun return -EBUSY;
1617*4882a593Smuzhiyun
1618*4882a593Smuzhiyun dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name);
1619*4882a593Smuzhiyun
1620*4882a593Smuzhiyun spin_lock_irqsave(&dev->lock, flags);
1621*4882a593Smuzhiyun
1622*4882a593Smuzhiyun gr_ep_nuke(ep);
1623*4882a593Smuzhiyun gr_ep_reset(ep);
1624*4882a593Smuzhiyun ep->ep.desc = NULL;
1625*4882a593Smuzhiyun
1626*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->lock, flags);
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun return 0;
1629*4882a593Smuzhiyun }
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun /*
1632*4882a593Smuzhiyun * Frees a request, but not any DMA buffers associated with it
1633*4882a593Smuzhiyun * (gr_finish_request should already have taken care of that).
1634*4882a593Smuzhiyun */
gr_free_request(struct usb_ep * _ep,struct usb_request * _req)1635*4882a593Smuzhiyun static void gr_free_request(struct usb_ep *_ep, struct usb_request *_req)
1636*4882a593Smuzhiyun {
1637*4882a593Smuzhiyun struct gr_request *req;
1638*4882a593Smuzhiyun
1639*4882a593Smuzhiyun if (!_ep || !_req)
1640*4882a593Smuzhiyun return;
1641*4882a593Smuzhiyun req = container_of(_req, struct gr_request, req);
1642*4882a593Smuzhiyun
1643*4882a593Smuzhiyun /* Leads to memory leak */
1644*4882a593Smuzhiyun WARN(!list_empty(&req->queue),
1645*4882a593Smuzhiyun "request not dequeued properly before freeing\n");
1646*4882a593Smuzhiyun
1647*4882a593Smuzhiyun kfree(req);
1648*4882a593Smuzhiyun }
1649*4882a593Smuzhiyun
1650*4882a593Smuzhiyun /* Queue a request from the gadget */
gr_queue_ext(struct usb_ep * _ep,struct usb_request * _req,gfp_t gfp_flags)1651*4882a593Smuzhiyun static int gr_queue_ext(struct usb_ep *_ep, struct usb_request *_req,
1652*4882a593Smuzhiyun gfp_t gfp_flags)
1653*4882a593Smuzhiyun {
1654*4882a593Smuzhiyun struct gr_ep *ep;
1655*4882a593Smuzhiyun struct gr_request *req;
1656*4882a593Smuzhiyun struct gr_udc *dev;
1657*4882a593Smuzhiyun int ret;
1658*4882a593Smuzhiyun
1659*4882a593Smuzhiyun if (unlikely(!_ep || !_req))
1660*4882a593Smuzhiyun return -EINVAL;
1661*4882a593Smuzhiyun
1662*4882a593Smuzhiyun ep = container_of(_ep, struct gr_ep, ep);
1663*4882a593Smuzhiyun req = container_of(_req, struct gr_request, req);
1664*4882a593Smuzhiyun dev = ep->dev;
1665*4882a593Smuzhiyun
1666*4882a593Smuzhiyun spin_lock(&ep->dev->lock);
1667*4882a593Smuzhiyun
1668*4882a593Smuzhiyun /*
1669*4882a593Smuzhiyun * The ep0 pointer in the gadget struct is used both for ep0in and
1670*4882a593Smuzhiyun * ep0out. In a data stage in the out direction ep0out needs to be used
1671*4882a593Smuzhiyun * instead of the default ep0in. Completion functions might use
1672*4882a593Smuzhiyun * driver_data, so that needs to be copied as well.
1673*4882a593Smuzhiyun */
1674*4882a593Smuzhiyun if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) {
1675*4882a593Smuzhiyun ep = &dev->epo[0];
1676*4882a593Smuzhiyun ep->ep.driver_data = dev->epi[0].ep.driver_data;
1677*4882a593Smuzhiyun }
1678*4882a593Smuzhiyun
1679*4882a593Smuzhiyun if (ep->is_in)
1680*4882a593Smuzhiyun gr_dbgprint_request("EXTERN", ep, req);
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun ret = gr_queue(ep, req, GFP_ATOMIC);
1683*4882a593Smuzhiyun
1684*4882a593Smuzhiyun spin_unlock(&ep->dev->lock);
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun return ret;
1687*4882a593Smuzhiyun }
1688*4882a593Smuzhiyun
1689*4882a593Smuzhiyun /* Dequeue JUST ONE request */
gr_dequeue(struct usb_ep * _ep,struct usb_request * _req)1690*4882a593Smuzhiyun static int gr_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1691*4882a593Smuzhiyun {
1692*4882a593Smuzhiyun struct gr_request *req;
1693*4882a593Smuzhiyun struct gr_ep *ep;
1694*4882a593Smuzhiyun struct gr_udc *dev;
1695*4882a593Smuzhiyun int ret = 0;
1696*4882a593Smuzhiyun unsigned long flags;
1697*4882a593Smuzhiyun
1698*4882a593Smuzhiyun ep = container_of(_ep, struct gr_ep, ep);
1699*4882a593Smuzhiyun if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
1700*4882a593Smuzhiyun return -EINVAL;
1701*4882a593Smuzhiyun dev = ep->dev;
1702*4882a593Smuzhiyun if (!dev->driver)
1703*4882a593Smuzhiyun return -ESHUTDOWN;
1704*4882a593Smuzhiyun
1705*4882a593Smuzhiyun /* We can't touch (DMA) registers when suspended */
1706*4882a593Smuzhiyun if (dev->ep0state == GR_EP0_SUSPEND)
1707*4882a593Smuzhiyun return -EBUSY;
1708*4882a593Smuzhiyun
1709*4882a593Smuzhiyun spin_lock_irqsave(&dev->lock, flags);
1710*4882a593Smuzhiyun
1711*4882a593Smuzhiyun /* Make sure it's actually queued on this endpoint */
1712*4882a593Smuzhiyun list_for_each_entry(req, &ep->queue, queue) {
1713*4882a593Smuzhiyun if (&req->req == _req)
1714*4882a593Smuzhiyun break;
1715*4882a593Smuzhiyun }
1716*4882a593Smuzhiyun if (&req->req != _req) {
1717*4882a593Smuzhiyun ret = -EINVAL;
1718*4882a593Smuzhiyun goto out;
1719*4882a593Smuzhiyun }
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
1722*4882a593Smuzhiyun /* This request is currently being processed */
1723*4882a593Smuzhiyun gr_abort_dma(ep);
1724*4882a593Smuzhiyun if (ep->stopped)
1725*4882a593Smuzhiyun gr_finish_request(ep, req, -ECONNRESET);
1726*4882a593Smuzhiyun else
1727*4882a593Smuzhiyun gr_dma_advance(ep, -ECONNRESET);
1728*4882a593Smuzhiyun } else if (!list_empty(&req->queue)) {
1729*4882a593Smuzhiyun /* Not being processed - gr_finish_request dequeues it */
1730*4882a593Smuzhiyun gr_finish_request(ep, req, -ECONNRESET);
1731*4882a593Smuzhiyun } else {
1732*4882a593Smuzhiyun ret = -EOPNOTSUPP;
1733*4882a593Smuzhiyun }
1734*4882a593Smuzhiyun
1735*4882a593Smuzhiyun out:
1736*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->lock, flags);
1737*4882a593Smuzhiyun
1738*4882a593Smuzhiyun return ret;
1739*4882a593Smuzhiyun }
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun /* Helper for gr_set_halt and gr_set_wedge */
gr_set_halt_wedge(struct usb_ep * _ep,int halt,int wedge)1742*4882a593Smuzhiyun static int gr_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
1743*4882a593Smuzhiyun {
1744*4882a593Smuzhiyun int ret;
1745*4882a593Smuzhiyun struct gr_ep *ep;
1746*4882a593Smuzhiyun
1747*4882a593Smuzhiyun if (!_ep)
1748*4882a593Smuzhiyun return -ENODEV;
1749*4882a593Smuzhiyun ep = container_of(_ep, struct gr_ep, ep);
1750*4882a593Smuzhiyun
1751*4882a593Smuzhiyun spin_lock(&ep->dev->lock);
1752*4882a593Smuzhiyun
1753*4882a593Smuzhiyun /* Halting an IN endpoint should fail if queue is not empty */
1754*4882a593Smuzhiyun if (halt && ep->is_in && !list_empty(&ep->queue)) {
1755*4882a593Smuzhiyun ret = -EAGAIN;
1756*4882a593Smuzhiyun goto out;
1757*4882a593Smuzhiyun }
1758*4882a593Smuzhiyun
1759*4882a593Smuzhiyun ret = gr_ep_halt_wedge(ep, halt, wedge, 0);
1760*4882a593Smuzhiyun
1761*4882a593Smuzhiyun out:
1762*4882a593Smuzhiyun spin_unlock(&ep->dev->lock);
1763*4882a593Smuzhiyun
1764*4882a593Smuzhiyun return ret;
1765*4882a593Smuzhiyun }
1766*4882a593Smuzhiyun
1767*4882a593Smuzhiyun /* Halt endpoint */
gr_set_halt(struct usb_ep * _ep,int halt)1768*4882a593Smuzhiyun static int gr_set_halt(struct usb_ep *_ep, int halt)
1769*4882a593Smuzhiyun {
1770*4882a593Smuzhiyun return gr_set_halt_wedge(_ep, halt, 0);
1771*4882a593Smuzhiyun }
1772*4882a593Smuzhiyun
1773*4882a593Smuzhiyun /* Halt and wedge endpoint */
gr_set_wedge(struct usb_ep * _ep)1774*4882a593Smuzhiyun static int gr_set_wedge(struct usb_ep *_ep)
1775*4882a593Smuzhiyun {
1776*4882a593Smuzhiyun return gr_set_halt_wedge(_ep, 1, 1);
1777*4882a593Smuzhiyun }
1778*4882a593Smuzhiyun
1779*4882a593Smuzhiyun /*
1780*4882a593Smuzhiyun * Return the total number of bytes currently stored in the internal buffers of
1781*4882a593Smuzhiyun * the endpoint.
1782*4882a593Smuzhiyun */
gr_fifo_status(struct usb_ep * _ep)1783*4882a593Smuzhiyun static int gr_fifo_status(struct usb_ep *_ep)
1784*4882a593Smuzhiyun {
1785*4882a593Smuzhiyun struct gr_ep *ep;
1786*4882a593Smuzhiyun u32 epstat;
1787*4882a593Smuzhiyun u32 bytes = 0;
1788*4882a593Smuzhiyun
1789*4882a593Smuzhiyun if (!_ep)
1790*4882a593Smuzhiyun return -ENODEV;
1791*4882a593Smuzhiyun ep = container_of(_ep, struct gr_ep, ep);
1792*4882a593Smuzhiyun
1793*4882a593Smuzhiyun epstat = gr_read32(&ep->regs->epstat);
1794*4882a593Smuzhiyun
1795*4882a593Smuzhiyun if (epstat & GR_EPSTAT_B0)
1796*4882a593Smuzhiyun bytes += (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS;
1797*4882a593Smuzhiyun if (epstat & GR_EPSTAT_B1)
1798*4882a593Smuzhiyun bytes += (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS;
1799*4882a593Smuzhiyun
1800*4882a593Smuzhiyun return bytes;
1801*4882a593Smuzhiyun }
1802*4882a593Smuzhiyun
1803*4882a593Smuzhiyun
1804*4882a593Smuzhiyun /* Empty data from internal buffers of an endpoint. */
gr_fifo_flush(struct usb_ep * _ep)1805*4882a593Smuzhiyun static void gr_fifo_flush(struct usb_ep *_ep)
1806*4882a593Smuzhiyun {
1807*4882a593Smuzhiyun struct gr_ep *ep;
1808*4882a593Smuzhiyun u32 epctrl;
1809*4882a593Smuzhiyun
1810*4882a593Smuzhiyun if (!_ep)
1811*4882a593Smuzhiyun return;
1812*4882a593Smuzhiyun ep = container_of(_ep, struct gr_ep, ep);
1813*4882a593Smuzhiyun dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name);
1814*4882a593Smuzhiyun
1815*4882a593Smuzhiyun spin_lock(&ep->dev->lock);
1816*4882a593Smuzhiyun
1817*4882a593Smuzhiyun epctrl = gr_read32(&ep->regs->epctrl);
1818*4882a593Smuzhiyun epctrl |= GR_EPCTRL_CB;
1819*4882a593Smuzhiyun gr_write32(&ep->regs->epctrl, epctrl);
1820*4882a593Smuzhiyun
1821*4882a593Smuzhiyun spin_unlock(&ep->dev->lock);
1822*4882a593Smuzhiyun }
1823*4882a593Smuzhiyun
1824*4882a593Smuzhiyun static const struct usb_ep_ops gr_ep_ops = {
1825*4882a593Smuzhiyun .enable = gr_ep_enable,
1826*4882a593Smuzhiyun .disable = gr_ep_disable,
1827*4882a593Smuzhiyun
1828*4882a593Smuzhiyun .alloc_request = gr_alloc_request,
1829*4882a593Smuzhiyun .free_request = gr_free_request,
1830*4882a593Smuzhiyun
1831*4882a593Smuzhiyun .queue = gr_queue_ext,
1832*4882a593Smuzhiyun .dequeue = gr_dequeue,
1833*4882a593Smuzhiyun
1834*4882a593Smuzhiyun .set_halt = gr_set_halt,
1835*4882a593Smuzhiyun .set_wedge = gr_set_wedge,
1836*4882a593Smuzhiyun .fifo_status = gr_fifo_status,
1837*4882a593Smuzhiyun .fifo_flush = gr_fifo_flush,
1838*4882a593Smuzhiyun };
1839*4882a593Smuzhiyun
1840*4882a593Smuzhiyun /* ---------------------------------------------------------------------- */
1841*4882a593Smuzhiyun /* USB Gadget ops */
1842*4882a593Smuzhiyun
gr_get_frame(struct usb_gadget * _gadget)1843*4882a593Smuzhiyun static int gr_get_frame(struct usb_gadget *_gadget)
1844*4882a593Smuzhiyun {
1845*4882a593Smuzhiyun struct gr_udc *dev;
1846*4882a593Smuzhiyun
1847*4882a593Smuzhiyun if (!_gadget)
1848*4882a593Smuzhiyun return -ENODEV;
1849*4882a593Smuzhiyun dev = container_of(_gadget, struct gr_udc, gadget);
1850*4882a593Smuzhiyun return gr_read32(&dev->regs->status) & GR_STATUS_FN_MASK;
1851*4882a593Smuzhiyun }
1852*4882a593Smuzhiyun
gr_wakeup(struct usb_gadget * _gadget)1853*4882a593Smuzhiyun static int gr_wakeup(struct usb_gadget *_gadget)
1854*4882a593Smuzhiyun {
1855*4882a593Smuzhiyun struct gr_udc *dev;
1856*4882a593Smuzhiyun
1857*4882a593Smuzhiyun if (!_gadget)
1858*4882a593Smuzhiyun return -ENODEV;
1859*4882a593Smuzhiyun dev = container_of(_gadget, struct gr_udc, gadget);
1860*4882a593Smuzhiyun
1861*4882a593Smuzhiyun /* Remote wakeup feature not enabled by host*/
1862*4882a593Smuzhiyun if (!dev->remote_wakeup)
1863*4882a593Smuzhiyun return -EINVAL;
1864*4882a593Smuzhiyun
1865*4882a593Smuzhiyun spin_lock(&dev->lock);
1866*4882a593Smuzhiyun
1867*4882a593Smuzhiyun gr_write32(&dev->regs->control,
1868*4882a593Smuzhiyun gr_read32(&dev->regs->control) | GR_CONTROL_RW);
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun spin_unlock(&dev->lock);
1871*4882a593Smuzhiyun
1872*4882a593Smuzhiyun return 0;
1873*4882a593Smuzhiyun }
1874*4882a593Smuzhiyun
gr_pullup(struct usb_gadget * _gadget,int is_on)1875*4882a593Smuzhiyun static int gr_pullup(struct usb_gadget *_gadget, int is_on)
1876*4882a593Smuzhiyun {
1877*4882a593Smuzhiyun struct gr_udc *dev;
1878*4882a593Smuzhiyun u32 control;
1879*4882a593Smuzhiyun
1880*4882a593Smuzhiyun if (!_gadget)
1881*4882a593Smuzhiyun return -ENODEV;
1882*4882a593Smuzhiyun dev = container_of(_gadget, struct gr_udc, gadget);
1883*4882a593Smuzhiyun
1884*4882a593Smuzhiyun spin_lock(&dev->lock);
1885*4882a593Smuzhiyun
1886*4882a593Smuzhiyun control = gr_read32(&dev->regs->control);
1887*4882a593Smuzhiyun if (is_on)
1888*4882a593Smuzhiyun control |= GR_CONTROL_EP;
1889*4882a593Smuzhiyun else
1890*4882a593Smuzhiyun control &= ~GR_CONTROL_EP;
1891*4882a593Smuzhiyun gr_write32(&dev->regs->control, control);
1892*4882a593Smuzhiyun
1893*4882a593Smuzhiyun spin_unlock(&dev->lock);
1894*4882a593Smuzhiyun
1895*4882a593Smuzhiyun return 0;
1896*4882a593Smuzhiyun }
1897*4882a593Smuzhiyun
gr_udc_start(struct usb_gadget * gadget,struct usb_gadget_driver * driver)1898*4882a593Smuzhiyun static int gr_udc_start(struct usb_gadget *gadget,
1899*4882a593Smuzhiyun struct usb_gadget_driver *driver)
1900*4882a593Smuzhiyun {
1901*4882a593Smuzhiyun struct gr_udc *dev = to_gr_udc(gadget);
1902*4882a593Smuzhiyun
1903*4882a593Smuzhiyun spin_lock(&dev->lock);
1904*4882a593Smuzhiyun
1905*4882a593Smuzhiyun /* Hook up the driver */
1906*4882a593Smuzhiyun driver->driver.bus = NULL;
1907*4882a593Smuzhiyun dev->driver = driver;
1908*4882a593Smuzhiyun
1909*4882a593Smuzhiyun /* Get ready for host detection */
1910*4882a593Smuzhiyun gr_enable_vbus_detect(dev);
1911*4882a593Smuzhiyun
1912*4882a593Smuzhiyun spin_unlock(&dev->lock);
1913*4882a593Smuzhiyun
1914*4882a593Smuzhiyun return 0;
1915*4882a593Smuzhiyun }
1916*4882a593Smuzhiyun
gr_udc_stop(struct usb_gadget * gadget)1917*4882a593Smuzhiyun static int gr_udc_stop(struct usb_gadget *gadget)
1918*4882a593Smuzhiyun {
1919*4882a593Smuzhiyun struct gr_udc *dev = to_gr_udc(gadget);
1920*4882a593Smuzhiyun unsigned long flags;
1921*4882a593Smuzhiyun
1922*4882a593Smuzhiyun spin_lock_irqsave(&dev->lock, flags);
1923*4882a593Smuzhiyun
1924*4882a593Smuzhiyun dev->driver = NULL;
1925*4882a593Smuzhiyun gr_stop_activity(dev);
1926*4882a593Smuzhiyun
1927*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->lock, flags);
1928*4882a593Smuzhiyun
1929*4882a593Smuzhiyun return 0;
1930*4882a593Smuzhiyun }
1931*4882a593Smuzhiyun
1932*4882a593Smuzhiyun static const struct usb_gadget_ops gr_ops = {
1933*4882a593Smuzhiyun .get_frame = gr_get_frame,
1934*4882a593Smuzhiyun .wakeup = gr_wakeup,
1935*4882a593Smuzhiyun .pullup = gr_pullup,
1936*4882a593Smuzhiyun .udc_start = gr_udc_start,
1937*4882a593Smuzhiyun .udc_stop = gr_udc_stop,
1938*4882a593Smuzhiyun /* Other operations not supported */
1939*4882a593Smuzhiyun };
1940*4882a593Smuzhiyun
1941*4882a593Smuzhiyun /* ---------------------------------------------------------------------- */
1942*4882a593Smuzhiyun /* Module probe, removal and of-matching */
1943*4882a593Smuzhiyun
1944*4882a593Smuzhiyun static const char * const onames[] = {
1945*4882a593Smuzhiyun "ep0out", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
1946*4882a593Smuzhiyun "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out",
1947*4882a593Smuzhiyun "ep12out", "ep13out", "ep14out", "ep15out"
1948*4882a593Smuzhiyun };
1949*4882a593Smuzhiyun
1950*4882a593Smuzhiyun static const char * const inames[] = {
1951*4882a593Smuzhiyun "ep0in", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
1952*4882a593Smuzhiyun "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in",
1953*4882a593Smuzhiyun "ep12in", "ep13in", "ep14in", "ep15in"
1954*4882a593Smuzhiyun };
1955*4882a593Smuzhiyun
1956*4882a593Smuzhiyun /* Must be called with dev->lock held */
gr_ep_init(struct gr_udc * dev,int num,int is_in,u32 maxplimit)1957*4882a593Smuzhiyun static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
1958*4882a593Smuzhiyun {
1959*4882a593Smuzhiyun struct gr_ep *ep;
1960*4882a593Smuzhiyun struct gr_request *req;
1961*4882a593Smuzhiyun struct usb_request *_req;
1962*4882a593Smuzhiyun void *buf;
1963*4882a593Smuzhiyun
1964*4882a593Smuzhiyun if (is_in) {
1965*4882a593Smuzhiyun ep = &dev->epi[num];
1966*4882a593Smuzhiyun ep->ep.name = inames[num];
1967*4882a593Smuzhiyun ep->regs = &dev->regs->epi[num];
1968*4882a593Smuzhiyun } else {
1969*4882a593Smuzhiyun ep = &dev->epo[num];
1970*4882a593Smuzhiyun ep->ep.name = onames[num];
1971*4882a593Smuzhiyun ep->regs = &dev->regs->epo[num];
1972*4882a593Smuzhiyun }
1973*4882a593Smuzhiyun
1974*4882a593Smuzhiyun gr_ep_reset(ep);
1975*4882a593Smuzhiyun ep->num = num;
1976*4882a593Smuzhiyun ep->is_in = is_in;
1977*4882a593Smuzhiyun ep->dev = dev;
1978*4882a593Smuzhiyun ep->ep.ops = &gr_ep_ops;
1979*4882a593Smuzhiyun INIT_LIST_HEAD(&ep->queue);
1980*4882a593Smuzhiyun
1981*4882a593Smuzhiyun if (num == 0) {
1982*4882a593Smuzhiyun _req = gr_alloc_request(&ep->ep, GFP_ATOMIC);
1983*4882a593Smuzhiyun if (!_req)
1984*4882a593Smuzhiyun return -ENOMEM;
1985*4882a593Smuzhiyun
1986*4882a593Smuzhiyun buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC);
1987*4882a593Smuzhiyun if (!buf) {
1988*4882a593Smuzhiyun gr_free_request(&ep->ep, _req);
1989*4882a593Smuzhiyun return -ENOMEM;
1990*4882a593Smuzhiyun }
1991*4882a593Smuzhiyun
1992*4882a593Smuzhiyun req = container_of(_req, struct gr_request, req);
1993*4882a593Smuzhiyun req->req.buf = buf;
1994*4882a593Smuzhiyun req->req.length = MAX_CTRL_PL_SIZE;
1995*4882a593Smuzhiyun
1996*4882a593Smuzhiyun if (is_in)
1997*4882a593Smuzhiyun dev->ep0reqi = req; /* Complete gets set as used */
1998*4882a593Smuzhiyun else
1999*4882a593Smuzhiyun dev->ep0reqo = req; /* Completion treated separately */
2000*4882a593Smuzhiyun
2001*4882a593Smuzhiyun usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
2002*4882a593Smuzhiyun ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
2003*4882a593Smuzhiyun
2004*4882a593Smuzhiyun ep->ep.caps.type_control = true;
2005*4882a593Smuzhiyun } else {
2006*4882a593Smuzhiyun usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
2007*4882a593Smuzhiyun list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2008*4882a593Smuzhiyun
2009*4882a593Smuzhiyun ep->ep.caps.type_iso = true;
2010*4882a593Smuzhiyun ep->ep.caps.type_bulk = true;
2011*4882a593Smuzhiyun ep->ep.caps.type_int = true;
2012*4882a593Smuzhiyun }
2013*4882a593Smuzhiyun list_add_tail(&ep->ep_list, &dev->ep_list);
2014*4882a593Smuzhiyun
2015*4882a593Smuzhiyun if (is_in)
2016*4882a593Smuzhiyun ep->ep.caps.dir_in = true;
2017*4882a593Smuzhiyun else
2018*4882a593Smuzhiyun ep->ep.caps.dir_out = true;
2019*4882a593Smuzhiyun
2020*4882a593Smuzhiyun ep->tailbuf = dma_alloc_coherent(dev->dev, ep->ep.maxpacket_limit,
2021*4882a593Smuzhiyun &ep->tailbuf_paddr, GFP_ATOMIC);
2022*4882a593Smuzhiyun if (!ep->tailbuf)
2023*4882a593Smuzhiyun return -ENOMEM;
2024*4882a593Smuzhiyun
2025*4882a593Smuzhiyun return 0;
2026*4882a593Smuzhiyun }
2027*4882a593Smuzhiyun
2028*4882a593Smuzhiyun /* Must be called with dev->lock held */
gr_udc_init(struct gr_udc * dev)2029*4882a593Smuzhiyun static int gr_udc_init(struct gr_udc *dev)
2030*4882a593Smuzhiyun {
2031*4882a593Smuzhiyun struct device_node *np = dev->dev->of_node;
2032*4882a593Smuzhiyun u32 epctrl_val;
2033*4882a593Smuzhiyun u32 dmactrl_val;
2034*4882a593Smuzhiyun int i;
2035*4882a593Smuzhiyun int ret = 0;
2036*4882a593Smuzhiyun u32 bufsize;
2037*4882a593Smuzhiyun
2038*4882a593Smuzhiyun gr_set_address(dev, 0);
2039*4882a593Smuzhiyun
2040*4882a593Smuzhiyun INIT_LIST_HEAD(&dev->gadget.ep_list);
2041*4882a593Smuzhiyun dev->gadget.speed = USB_SPEED_UNKNOWN;
2042*4882a593Smuzhiyun dev->gadget.ep0 = &dev->epi[0].ep;
2043*4882a593Smuzhiyun
2044*4882a593Smuzhiyun INIT_LIST_HEAD(&dev->ep_list);
2045*4882a593Smuzhiyun gr_set_ep0state(dev, GR_EP0_DISCONNECT);
2046*4882a593Smuzhiyun
2047*4882a593Smuzhiyun for (i = 0; i < dev->nepo; i++) {
2048*4882a593Smuzhiyun if (of_property_read_u32_index(np, "epobufsizes", i, &bufsize))
2049*4882a593Smuzhiyun bufsize = 1024;
2050*4882a593Smuzhiyun ret = gr_ep_init(dev, i, 0, bufsize);
2051*4882a593Smuzhiyun if (ret)
2052*4882a593Smuzhiyun return ret;
2053*4882a593Smuzhiyun }
2054*4882a593Smuzhiyun
2055*4882a593Smuzhiyun for (i = 0; i < dev->nepi; i++) {
2056*4882a593Smuzhiyun if (of_property_read_u32_index(np, "epibufsizes", i, &bufsize))
2057*4882a593Smuzhiyun bufsize = 1024;
2058*4882a593Smuzhiyun ret = gr_ep_init(dev, i, 1, bufsize);
2059*4882a593Smuzhiyun if (ret)
2060*4882a593Smuzhiyun return ret;
2061*4882a593Smuzhiyun }
2062*4882a593Smuzhiyun
2063*4882a593Smuzhiyun /* Must be disabled by default */
2064*4882a593Smuzhiyun dev->remote_wakeup = 0;
2065*4882a593Smuzhiyun
2066*4882a593Smuzhiyun /* Enable ep0out and ep0in */
2067*4882a593Smuzhiyun epctrl_val = (MAX_CTRL_PL_SIZE << GR_EPCTRL_MAXPL_POS) | GR_EPCTRL_EV;
2068*4882a593Smuzhiyun dmactrl_val = GR_DMACTRL_IE | GR_DMACTRL_AI;
2069*4882a593Smuzhiyun gr_write32(&dev->epo[0].regs->epctrl, epctrl_val);
2070*4882a593Smuzhiyun gr_write32(&dev->epi[0].regs->epctrl, epctrl_val | GR_EPCTRL_PI);
2071*4882a593Smuzhiyun gr_write32(&dev->epo[0].regs->dmactrl, dmactrl_val);
2072*4882a593Smuzhiyun gr_write32(&dev->epi[0].regs->dmactrl, dmactrl_val);
2073*4882a593Smuzhiyun
2074*4882a593Smuzhiyun return 0;
2075*4882a593Smuzhiyun }
2076*4882a593Smuzhiyun
gr_ep_remove(struct gr_udc * dev,int num,int is_in)2077*4882a593Smuzhiyun static void gr_ep_remove(struct gr_udc *dev, int num, int is_in)
2078*4882a593Smuzhiyun {
2079*4882a593Smuzhiyun struct gr_ep *ep;
2080*4882a593Smuzhiyun
2081*4882a593Smuzhiyun if (is_in)
2082*4882a593Smuzhiyun ep = &dev->epi[num];
2083*4882a593Smuzhiyun else
2084*4882a593Smuzhiyun ep = &dev->epo[num];
2085*4882a593Smuzhiyun
2086*4882a593Smuzhiyun if (ep->tailbuf)
2087*4882a593Smuzhiyun dma_free_coherent(dev->dev, ep->ep.maxpacket_limit,
2088*4882a593Smuzhiyun ep->tailbuf, ep->tailbuf_paddr);
2089*4882a593Smuzhiyun }
2090*4882a593Smuzhiyun
gr_remove(struct platform_device * pdev)2091*4882a593Smuzhiyun static int gr_remove(struct platform_device *pdev)
2092*4882a593Smuzhiyun {
2093*4882a593Smuzhiyun struct gr_udc *dev = platform_get_drvdata(pdev);
2094*4882a593Smuzhiyun int i;
2095*4882a593Smuzhiyun
2096*4882a593Smuzhiyun if (dev->added)
2097*4882a593Smuzhiyun usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */
2098*4882a593Smuzhiyun if (dev->driver)
2099*4882a593Smuzhiyun return -EBUSY;
2100*4882a593Smuzhiyun
2101*4882a593Smuzhiyun gr_dfs_delete(dev);
2102*4882a593Smuzhiyun dma_pool_destroy(dev->desc_pool);
2103*4882a593Smuzhiyun platform_set_drvdata(pdev, NULL);
2104*4882a593Smuzhiyun
2105*4882a593Smuzhiyun gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
2106*4882a593Smuzhiyun gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);
2107*4882a593Smuzhiyun
2108*4882a593Smuzhiyun for (i = 0; i < dev->nepo; i++)
2109*4882a593Smuzhiyun gr_ep_remove(dev, i, 0);
2110*4882a593Smuzhiyun for (i = 0; i < dev->nepi; i++)
2111*4882a593Smuzhiyun gr_ep_remove(dev, i, 1);
2112*4882a593Smuzhiyun
2113*4882a593Smuzhiyun return 0;
2114*4882a593Smuzhiyun }
gr_request_irq(struct gr_udc * dev,int irq)2115*4882a593Smuzhiyun static int gr_request_irq(struct gr_udc *dev, int irq)
2116*4882a593Smuzhiyun {
2117*4882a593Smuzhiyun return devm_request_threaded_irq(dev->dev, irq, gr_irq, gr_irq_handler,
2118*4882a593Smuzhiyun IRQF_SHARED, driver_name, dev);
2119*4882a593Smuzhiyun }
2120*4882a593Smuzhiyun
gr_probe(struct platform_device * pdev)2121*4882a593Smuzhiyun static int gr_probe(struct platform_device *pdev)
2122*4882a593Smuzhiyun {
2123*4882a593Smuzhiyun struct gr_udc *dev;
2124*4882a593Smuzhiyun struct gr_regs __iomem *regs;
2125*4882a593Smuzhiyun int retval;
2126*4882a593Smuzhiyun u32 status;
2127*4882a593Smuzhiyun
2128*4882a593Smuzhiyun dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
2129*4882a593Smuzhiyun if (!dev)
2130*4882a593Smuzhiyun return -ENOMEM;
2131*4882a593Smuzhiyun dev->dev = &pdev->dev;
2132*4882a593Smuzhiyun
2133*4882a593Smuzhiyun regs = devm_platform_ioremap_resource(pdev, 0);
2134*4882a593Smuzhiyun if (IS_ERR(regs))
2135*4882a593Smuzhiyun return PTR_ERR(regs);
2136*4882a593Smuzhiyun
2137*4882a593Smuzhiyun dev->irq = platform_get_irq(pdev, 0);
2138*4882a593Smuzhiyun if (dev->irq <= 0)
2139*4882a593Smuzhiyun return -ENODEV;
2140*4882a593Smuzhiyun
2141*4882a593Smuzhiyun /* Some core configurations has separate irqs for IN and OUT events */
2142*4882a593Smuzhiyun dev->irqi = platform_get_irq(pdev, 1);
2143*4882a593Smuzhiyun if (dev->irqi > 0) {
2144*4882a593Smuzhiyun dev->irqo = platform_get_irq(pdev, 2);
2145*4882a593Smuzhiyun if (dev->irqo <= 0)
2146*4882a593Smuzhiyun return -ENODEV;
2147*4882a593Smuzhiyun } else {
2148*4882a593Smuzhiyun dev->irqi = 0;
2149*4882a593Smuzhiyun }
2150*4882a593Smuzhiyun
2151*4882a593Smuzhiyun dev->gadget.name = driver_name;
2152*4882a593Smuzhiyun dev->gadget.max_speed = USB_SPEED_HIGH;
2153*4882a593Smuzhiyun dev->gadget.ops = &gr_ops;
2154*4882a593Smuzhiyun
2155*4882a593Smuzhiyun spin_lock_init(&dev->lock);
2156*4882a593Smuzhiyun dev->regs = regs;
2157*4882a593Smuzhiyun
2158*4882a593Smuzhiyun platform_set_drvdata(pdev, dev);
2159*4882a593Smuzhiyun
2160*4882a593Smuzhiyun /* Determine number of endpoints and data interface mode */
2161*4882a593Smuzhiyun status = gr_read32(&dev->regs->status);
2162*4882a593Smuzhiyun dev->nepi = ((status & GR_STATUS_NEPI_MASK) >> GR_STATUS_NEPI_POS) + 1;
2163*4882a593Smuzhiyun dev->nepo = ((status & GR_STATUS_NEPO_MASK) >> GR_STATUS_NEPO_POS) + 1;
2164*4882a593Smuzhiyun
2165*4882a593Smuzhiyun if (!(status & GR_STATUS_DM)) {
2166*4882a593Smuzhiyun dev_err(dev->dev, "Slave mode cores are not supported\n");
2167*4882a593Smuzhiyun return -ENODEV;
2168*4882a593Smuzhiyun }
2169*4882a593Smuzhiyun
2170*4882a593Smuzhiyun /* --- Effects of the following calls might need explicit cleanup --- */
2171*4882a593Smuzhiyun
2172*4882a593Smuzhiyun /* Create DMA pool for descriptors */
2173*4882a593Smuzhiyun dev->desc_pool = dma_pool_create("desc_pool", dev->dev,
2174*4882a593Smuzhiyun sizeof(struct gr_dma_desc), 4, 0);
2175*4882a593Smuzhiyun if (!dev->desc_pool) {
2176*4882a593Smuzhiyun dev_err(dev->dev, "Could not allocate DMA pool");
2177*4882a593Smuzhiyun return -ENOMEM;
2178*4882a593Smuzhiyun }
2179*4882a593Smuzhiyun
2180*4882a593Smuzhiyun /* Inside lock so that no gadget can use this udc until probe is done */
2181*4882a593Smuzhiyun retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
2182*4882a593Smuzhiyun if (retval) {
2183*4882a593Smuzhiyun dev_err(dev->dev, "Could not add gadget udc");
2184*4882a593Smuzhiyun goto out;
2185*4882a593Smuzhiyun }
2186*4882a593Smuzhiyun dev->added = 1;
2187*4882a593Smuzhiyun
2188*4882a593Smuzhiyun spin_lock(&dev->lock);
2189*4882a593Smuzhiyun
2190*4882a593Smuzhiyun retval = gr_udc_init(dev);
2191*4882a593Smuzhiyun if (retval) {
2192*4882a593Smuzhiyun spin_unlock(&dev->lock);
2193*4882a593Smuzhiyun goto out;
2194*4882a593Smuzhiyun }
2195*4882a593Smuzhiyun
2196*4882a593Smuzhiyun /* Clear all interrupt enables that might be left on since last boot */
2197*4882a593Smuzhiyun gr_disable_interrupts_and_pullup(dev);
2198*4882a593Smuzhiyun
2199*4882a593Smuzhiyun spin_unlock(&dev->lock);
2200*4882a593Smuzhiyun
2201*4882a593Smuzhiyun gr_dfs_create(dev);
2202*4882a593Smuzhiyun
2203*4882a593Smuzhiyun retval = gr_request_irq(dev, dev->irq);
2204*4882a593Smuzhiyun if (retval) {
2205*4882a593Smuzhiyun dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
2206*4882a593Smuzhiyun goto out;
2207*4882a593Smuzhiyun }
2208*4882a593Smuzhiyun
2209*4882a593Smuzhiyun if (dev->irqi) {
2210*4882a593Smuzhiyun retval = gr_request_irq(dev, dev->irqi);
2211*4882a593Smuzhiyun if (retval) {
2212*4882a593Smuzhiyun dev_err(dev->dev, "Failed to request irqi %d\n",
2213*4882a593Smuzhiyun dev->irqi);
2214*4882a593Smuzhiyun goto out;
2215*4882a593Smuzhiyun }
2216*4882a593Smuzhiyun retval = gr_request_irq(dev, dev->irqo);
2217*4882a593Smuzhiyun if (retval) {
2218*4882a593Smuzhiyun dev_err(dev->dev, "Failed to request irqo %d\n",
2219*4882a593Smuzhiyun dev->irqo);
2220*4882a593Smuzhiyun goto out;
2221*4882a593Smuzhiyun }
2222*4882a593Smuzhiyun }
2223*4882a593Smuzhiyun
2224*4882a593Smuzhiyun if (dev->irqi)
2225*4882a593Smuzhiyun dev_info(dev->dev, "regs: %p, irqs %d, %d, %d\n", dev->regs,
2226*4882a593Smuzhiyun dev->irq, dev->irqi, dev->irqo);
2227*4882a593Smuzhiyun else
2228*4882a593Smuzhiyun dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
2229*4882a593Smuzhiyun
2230*4882a593Smuzhiyun out:
2231*4882a593Smuzhiyun if (retval)
2232*4882a593Smuzhiyun gr_remove(pdev);
2233*4882a593Smuzhiyun
2234*4882a593Smuzhiyun return retval;
2235*4882a593Smuzhiyun }
2236*4882a593Smuzhiyun
2237*4882a593Smuzhiyun static const struct of_device_id gr_match[] = {
2238*4882a593Smuzhiyun {.name = "GAISLER_USBDC"},
2239*4882a593Smuzhiyun {.name = "01_021"},
2240*4882a593Smuzhiyun {},
2241*4882a593Smuzhiyun };
2242*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, gr_match);
2243*4882a593Smuzhiyun
2244*4882a593Smuzhiyun static struct platform_driver gr_driver = {
2245*4882a593Smuzhiyun .driver = {
2246*4882a593Smuzhiyun .name = DRIVER_NAME,
2247*4882a593Smuzhiyun .of_match_table = gr_match,
2248*4882a593Smuzhiyun },
2249*4882a593Smuzhiyun .probe = gr_probe,
2250*4882a593Smuzhiyun .remove = gr_remove,
2251*4882a593Smuzhiyun };
2252*4882a593Smuzhiyun module_platform_driver(gr_driver);
2253*4882a593Smuzhiyun
2254*4882a593Smuzhiyun MODULE_AUTHOR("Aeroflex Gaisler AB.");
2255*4882a593Smuzhiyun MODULE_DESCRIPTION(DRIVER_DESC);
2256*4882a593Smuzhiyun MODULE_LICENSE("GPL");
2257