1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * MUSB OTG peripheral driver ep0 handling
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright 2005 Mentor Graphics Corporation
5*4882a593Smuzhiyun * Copyright (C) 2005-2006 by Texas Instruments
6*4882a593Smuzhiyun * Copyright (C) 2006-2007 Nokia Corporation
7*4882a593Smuzhiyun * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #ifndef __UBOOT__
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/list.h>
15*4882a593Smuzhiyun #include <linux/timer.h>
16*4882a593Smuzhiyun #include <linux/spinlock.h>
17*4882a593Smuzhiyun #include <linux/device.h>
18*4882a593Smuzhiyun #include <linux/interrupt.h>
19*4882a593Smuzhiyun #else
20*4882a593Smuzhiyun #include <common.h>
21*4882a593Smuzhiyun #include "linux-compat.h"
22*4882a593Smuzhiyun #include <asm/processor.h>
23*4882a593Smuzhiyun #endif
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include "musb_core.h"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /* ep0 is always musb->endpoints[0].ep_in */
28*4882a593Smuzhiyun #define next_ep0_request(musb) next_in_request(&(musb)->endpoints[0])
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun * locking note: we use only the controller lock, for simpler correctness.
32*4882a593Smuzhiyun * It's always held with IRQs blocked.
33*4882a593Smuzhiyun *
34*4882a593Smuzhiyun * It protects the ep0 request queue as well as ep0_state, not just the
35*4882a593Smuzhiyun * controller and indexed registers. And that lock stays held unless it
36*4882a593Smuzhiyun * needs to be dropped to allow reentering this driver ... like upcalls to
37*4882a593Smuzhiyun * the gadget driver, or adjusting endpoint halt status.
38*4882a593Smuzhiyun */
39*4882a593Smuzhiyun
decode_ep0stage(u8 stage)40*4882a593Smuzhiyun static char *decode_ep0stage(u8 stage)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun switch (stage) {
43*4882a593Smuzhiyun case MUSB_EP0_STAGE_IDLE: return "idle";
44*4882a593Smuzhiyun case MUSB_EP0_STAGE_SETUP: return "setup";
45*4882a593Smuzhiyun case MUSB_EP0_STAGE_TX: return "in";
46*4882a593Smuzhiyun case MUSB_EP0_STAGE_RX: return "out";
47*4882a593Smuzhiyun case MUSB_EP0_STAGE_ACKWAIT: return "wait";
48*4882a593Smuzhiyun case MUSB_EP0_STAGE_STATUSIN: return "in/status";
49*4882a593Smuzhiyun case MUSB_EP0_STAGE_STATUSOUT: return "out/status";
50*4882a593Smuzhiyun default: return "?";
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* handle a standard GET_STATUS request
55*4882a593Smuzhiyun * Context: caller holds controller lock
56*4882a593Smuzhiyun */
service_tx_status_request(struct musb * musb,const struct usb_ctrlrequest * ctrlrequest)57*4882a593Smuzhiyun static int service_tx_status_request(
58*4882a593Smuzhiyun struct musb *musb,
59*4882a593Smuzhiyun const struct usb_ctrlrequest *ctrlrequest)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun void __iomem *mbase = musb->mregs;
62*4882a593Smuzhiyun int handled = 1;
63*4882a593Smuzhiyun u8 result[2], epnum = 0;
64*4882a593Smuzhiyun const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun result[1] = 0;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun switch (recip) {
69*4882a593Smuzhiyun case USB_RECIP_DEVICE:
70*4882a593Smuzhiyun result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED;
71*4882a593Smuzhiyun result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
72*4882a593Smuzhiyun if (musb->g.is_otg) {
73*4882a593Smuzhiyun result[0] |= musb->g.b_hnp_enable
74*4882a593Smuzhiyun << USB_DEVICE_B_HNP_ENABLE;
75*4882a593Smuzhiyun result[0] |= musb->g.a_alt_hnp_support
76*4882a593Smuzhiyun << USB_DEVICE_A_ALT_HNP_SUPPORT;
77*4882a593Smuzhiyun result[0] |= musb->g.a_hnp_support
78*4882a593Smuzhiyun << USB_DEVICE_A_HNP_SUPPORT;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun break;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun case USB_RECIP_INTERFACE:
83*4882a593Smuzhiyun result[0] = 0;
84*4882a593Smuzhiyun break;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun case USB_RECIP_ENDPOINT: {
87*4882a593Smuzhiyun int is_in;
88*4882a593Smuzhiyun struct musb_ep *ep;
89*4882a593Smuzhiyun u16 tmp;
90*4882a593Smuzhiyun void __iomem *regs;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun epnum = (u8) ctrlrequest->wIndex;
93*4882a593Smuzhiyun if (!epnum) {
94*4882a593Smuzhiyun result[0] = 0;
95*4882a593Smuzhiyun break;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun is_in = epnum & USB_DIR_IN;
99*4882a593Smuzhiyun if (is_in) {
100*4882a593Smuzhiyun epnum &= 0x0f;
101*4882a593Smuzhiyun ep = &musb->endpoints[epnum].ep_in;
102*4882a593Smuzhiyun } else {
103*4882a593Smuzhiyun ep = &musb->endpoints[epnum].ep_out;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun regs = musb->endpoints[epnum].regs;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun if (epnum >= MUSB_C_NUM_EPS || !ep->desc) {
108*4882a593Smuzhiyun handled = -EINVAL;
109*4882a593Smuzhiyun break;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun musb_ep_select(mbase, epnum);
113*4882a593Smuzhiyun if (is_in)
114*4882a593Smuzhiyun tmp = musb_readw(regs, MUSB_TXCSR)
115*4882a593Smuzhiyun & MUSB_TXCSR_P_SENDSTALL;
116*4882a593Smuzhiyun else
117*4882a593Smuzhiyun tmp = musb_readw(regs, MUSB_RXCSR)
118*4882a593Smuzhiyun & MUSB_RXCSR_P_SENDSTALL;
119*4882a593Smuzhiyun musb_ep_select(mbase, 0);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun result[0] = tmp ? 1 : 0;
122*4882a593Smuzhiyun } break;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun default:
125*4882a593Smuzhiyun /* class, vendor, etc ... delegate */
126*4882a593Smuzhiyun handled = 0;
127*4882a593Smuzhiyun break;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /* fill up the fifo; caller updates csr0 */
131*4882a593Smuzhiyun if (handled > 0) {
132*4882a593Smuzhiyun u16 len = le16_to_cpu(ctrlrequest->wLength);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun if (len > 2)
135*4882a593Smuzhiyun len = 2;
136*4882a593Smuzhiyun musb_write_fifo(&musb->endpoints[0], len, result);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun return handled;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * handle a control-IN request, the end0 buffer contains the current request
144*4882a593Smuzhiyun * that is supposed to be a standard control request. Assumes the fifo to
145*4882a593Smuzhiyun * be at least 2 bytes long.
146*4882a593Smuzhiyun *
147*4882a593Smuzhiyun * @return 0 if the request was NOT HANDLED,
148*4882a593Smuzhiyun * < 0 when error
149*4882a593Smuzhiyun * > 0 when the request is processed
150*4882a593Smuzhiyun *
151*4882a593Smuzhiyun * Context: caller holds controller lock
152*4882a593Smuzhiyun */
153*4882a593Smuzhiyun static int
service_in_request(struct musb * musb,const struct usb_ctrlrequest * ctrlrequest)154*4882a593Smuzhiyun service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun int handled = 0; /* not handled */
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
159*4882a593Smuzhiyun == USB_TYPE_STANDARD) {
160*4882a593Smuzhiyun switch (ctrlrequest->bRequest) {
161*4882a593Smuzhiyun case USB_REQ_GET_STATUS:
162*4882a593Smuzhiyun handled = service_tx_status_request(musb,
163*4882a593Smuzhiyun ctrlrequest);
164*4882a593Smuzhiyun break;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /* case USB_REQ_SYNC_FRAME: */
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun default:
169*4882a593Smuzhiyun break;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun return handled;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun * Context: caller holds controller lock
177*4882a593Smuzhiyun */
musb_g_ep0_giveback(struct musb * musb,struct usb_request * req)178*4882a593Smuzhiyun static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun musb_g_giveback(&musb->endpoints[0].ep_in, req, 0);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /*
184*4882a593Smuzhiyun * Tries to start B-device HNP negotiation if enabled via sysfs
185*4882a593Smuzhiyun */
musb_try_b_hnp_enable(struct musb * musb)186*4882a593Smuzhiyun static inline void musb_try_b_hnp_enable(struct musb *musb)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun void __iomem *mbase = musb->mregs;
189*4882a593Smuzhiyun u8 devctl;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun dev_dbg(musb->controller, "HNP: Setting HR\n");
192*4882a593Smuzhiyun devctl = musb_readb(mbase, MUSB_DEVCTL);
193*4882a593Smuzhiyun musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /*
197*4882a593Smuzhiyun * Handle all control requests with no DATA stage, including standard
198*4882a593Smuzhiyun * requests such as:
199*4882a593Smuzhiyun * USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized
200*4882a593Smuzhiyun * always delegated to the gadget driver
201*4882a593Smuzhiyun * USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE
202*4882a593Smuzhiyun * always handled here, except for class/vendor/... features
203*4882a593Smuzhiyun *
204*4882a593Smuzhiyun * Context: caller holds controller lock
205*4882a593Smuzhiyun */
206*4882a593Smuzhiyun static int
service_zero_data_request(struct musb * musb,struct usb_ctrlrequest * ctrlrequest)207*4882a593Smuzhiyun service_zero_data_request(struct musb *musb,
208*4882a593Smuzhiyun struct usb_ctrlrequest *ctrlrequest)
209*4882a593Smuzhiyun __releases(musb->lock)
210*4882a593Smuzhiyun __acquires(musb->lock)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun int handled = -EINVAL;
213*4882a593Smuzhiyun void __iomem *mbase = musb->mregs;
214*4882a593Smuzhiyun const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /* the gadget driver handles everything except what we MUST handle */
217*4882a593Smuzhiyun if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
218*4882a593Smuzhiyun == USB_TYPE_STANDARD) {
219*4882a593Smuzhiyun switch (ctrlrequest->bRequest) {
220*4882a593Smuzhiyun case USB_REQ_SET_ADDRESS:
221*4882a593Smuzhiyun /* change it after the status stage */
222*4882a593Smuzhiyun musb->set_address = true;
223*4882a593Smuzhiyun musb->address = (u8) (ctrlrequest->wValue & 0x7f);
224*4882a593Smuzhiyun handled = 1;
225*4882a593Smuzhiyun break;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun case USB_REQ_CLEAR_FEATURE:
228*4882a593Smuzhiyun switch (recip) {
229*4882a593Smuzhiyun case USB_RECIP_DEVICE:
230*4882a593Smuzhiyun if (ctrlrequest->wValue
231*4882a593Smuzhiyun != USB_DEVICE_REMOTE_WAKEUP)
232*4882a593Smuzhiyun break;
233*4882a593Smuzhiyun musb->may_wakeup = 0;
234*4882a593Smuzhiyun handled = 1;
235*4882a593Smuzhiyun break;
236*4882a593Smuzhiyun case USB_RECIP_INTERFACE:
237*4882a593Smuzhiyun break;
238*4882a593Smuzhiyun case USB_RECIP_ENDPOINT:{
239*4882a593Smuzhiyun const u8 epnum =
240*4882a593Smuzhiyun ctrlrequest->wIndex & 0x0f;
241*4882a593Smuzhiyun struct musb_ep *musb_ep;
242*4882a593Smuzhiyun struct musb_hw_ep *ep;
243*4882a593Smuzhiyun struct musb_request *request;
244*4882a593Smuzhiyun void __iomem *regs;
245*4882a593Smuzhiyun int is_in;
246*4882a593Smuzhiyun u16 csr;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun if (epnum == 0 || epnum >= MUSB_C_NUM_EPS ||
249*4882a593Smuzhiyun ctrlrequest->wValue != USB_ENDPOINT_HALT)
250*4882a593Smuzhiyun break;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun ep = musb->endpoints + epnum;
253*4882a593Smuzhiyun regs = ep->regs;
254*4882a593Smuzhiyun is_in = ctrlrequest->wIndex & USB_DIR_IN;
255*4882a593Smuzhiyun if (is_in)
256*4882a593Smuzhiyun musb_ep = &ep->ep_in;
257*4882a593Smuzhiyun else
258*4882a593Smuzhiyun musb_ep = &ep->ep_out;
259*4882a593Smuzhiyun if (!musb_ep->desc)
260*4882a593Smuzhiyun break;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun handled = 1;
263*4882a593Smuzhiyun /* Ignore request if endpoint is wedged */
264*4882a593Smuzhiyun if (musb_ep->wedged)
265*4882a593Smuzhiyun break;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun musb_ep_select(mbase, epnum);
268*4882a593Smuzhiyun if (is_in) {
269*4882a593Smuzhiyun csr = musb_readw(regs, MUSB_TXCSR);
270*4882a593Smuzhiyun csr |= MUSB_TXCSR_CLRDATATOG |
271*4882a593Smuzhiyun MUSB_TXCSR_P_WZC_BITS;
272*4882a593Smuzhiyun csr &= ~(MUSB_TXCSR_P_SENDSTALL |
273*4882a593Smuzhiyun MUSB_TXCSR_P_SENTSTALL |
274*4882a593Smuzhiyun MUSB_TXCSR_TXPKTRDY);
275*4882a593Smuzhiyun musb_writew(regs, MUSB_TXCSR, csr);
276*4882a593Smuzhiyun } else {
277*4882a593Smuzhiyun csr = musb_readw(regs, MUSB_RXCSR);
278*4882a593Smuzhiyun csr |= MUSB_RXCSR_CLRDATATOG |
279*4882a593Smuzhiyun MUSB_RXCSR_P_WZC_BITS;
280*4882a593Smuzhiyun csr &= ~(MUSB_RXCSR_P_SENDSTALL |
281*4882a593Smuzhiyun MUSB_RXCSR_P_SENTSTALL);
282*4882a593Smuzhiyun musb_writew(regs, MUSB_RXCSR, csr);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /* Maybe start the first request in the queue */
286*4882a593Smuzhiyun request = next_request(musb_ep);
287*4882a593Smuzhiyun if (!musb_ep->busy && request) {
288*4882a593Smuzhiyun dev_dbg(musb->controller, "restarting the request\n");
289*4882a593Smuzhiyun musb_ep_restart(musb, request);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /* select ep0 again */
293*4882a593Smuzhiyun musb_ep_select(mbase, 0);
294*4882a593Smuzhiyun } break;
295*4882a593Smuzhiyun default:
296*4882a593Smuzhiyun /* class, vendor, etc ... delegate */
297*4882a593Smuzhiyun handled = 0;
298*4882a593Smuzhiyun break;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun break;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun case USB_REQ_SET_FEATURE:
303*4882a593Smuzhiyun switch (recip) {
304*4882a593Smuzhiyun case USB_RECIP_DEVICE:
305*4882a593Smuzhiyun handled = 1;
306*4882a593Smuzhiyun switch (ctrlrequest->wValue) {
307*4882a593Smuzhiyun case USB_DEVICE_REMOTE_WAKEUP:
308*4882a593Smuzhiyun musb->may_wakeup = 1;
309*4882a593Smuzhiyun break;
310*4882a593Smuzhiyun case USB_DEVICE_TEST_MODE:
311*4882a593Smuzhiyun if (musb->g.speed != USB_SPEED_HIGH)
312*4882a593Smuzhiyun goto stall;
313*4882a593Smuzhiyun if (ctrlrequest->wIndex & 0xff)
314*4882a593Smuzhiyun goto stall;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun switch (ctrlrequest->wIndex >> 8) {
317*4882a593Smuzhiyun case 1:
318*4882a593Smuzhiyun pr_debug("TEST_J\n");
319*4882a593Smuzhiyun /* TEST_J */
320*4882a593Smuzhiyun musb->test_mode_nr =
321*4882a593Smuzhiyun MUSB_TEST_J;
322*4882a593Smuzhiyun break;
323*4882a593Smuzhiyun case 2:
324*4882a593Smuzhiyun /* TEST_K */
325*4882a593Smuzhiyun pr_debug("TEST_K\n");
326*4882a593Smuzhiyun musb->test_mode_nr =
327*4882a593Smuzhiyun MUSB_TEST_K;
328*4882a593Smuzhiyun break;
329*4882a593Smuzhiyun case 3:
330*4882a593Smuzhiyun /* TEST_SE0_NAK */
331*4882a593Smuzhiyun pr_debug("TEST_SE0_NAK\n");
332*4882a593Smuzhiyun musb->test_mode_nr =
333*4882a593Smuzhiyun MUSB_TEST_SE0_NAK;
334*4882a593Smuzhiyun break;
335*4882a593Smuzhiyun case 4:
336*4882a593Smuzhiyun /* TEST_PACKET */
337*4882a593Smuzhiyun pr_debug("TEST_PACKET\n");
338*4882a593Smuzhiyun musb->test_mode_nr =
339*4882a593Smuzhiyun MUSB_TEST_PACKET;
340*4882a593Smuzhiyun break;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun case 0xc0:
343*4882a593Smuzhiyun /* TEST_FORCE_HS */
344*4882a593Smuzhiyun pr_debug("TEST_FORCE_HS\n");
345*4882a593Smuzhiyun musb->test_mode_nr =
346*4882a593Smuzhiyun MUSB_TEST_FORCE_HS;
347*4882a593Smuzhiyun break;
348*4882a593Smuzhiyun case 0xc1:
349*4882a593Smuzhiyun /* TEST_FORCE_FS */
350*4882a593Smuzhiyun pr_debug("TEST_FORCE_FS\n");
351*4882a593Smuzhiyun musb->test_mode_nr =
352*4882a593Smuzhiyun MUSB_TEST_FORCE_FS;
353*4882a593Smuzhiyun break;
354*4882a593Smuzhiyun case 0xc2:
355*4882a593Smuzhiyun /* TEST_FIFO_ACCESS */
356*4882a593Smuzhiyun pr_debug("TEST_FIFO_ACCESS\n");
357*4882a593Smuzhiyun musb->test_mode_nr =
358*4882a593Smuzhiyun MUSB_TEST_FIFO_ACCESS;
359*4882a593Smuzhiyun break;
360*4882a593Smuzhiyun case 0xc3:
361*4882a593Smuzhiyun /* TEST_FORCE_HOST */
362*4882a593Smuzhiyun pr_debug("TEST_FORCE_HOST\n");
363*4882a593Smuzhiyun musb->test_mode_nr =
364*4882a593Smuzhiyun MUSB_TEST_FORCE_HOST;
365*4882a593Smuzhiyun break;
366*4882a593Smuzhiyun default:
367*4882a593Smuzhiyun goto stall;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /* enter test mode after irq */
371*4882a593Smuzhiyun if (handled > 0)
372*4882a593Smuzhiyun musb->test_mode = true;
373*4882a593Smuzhiyun break;
374*4882a593Smuzhiyun case USB_DEVICE_B_HNP_ENABLE:
375*4882a593Smuzhiyun if (!musb->g.is_otg)
376*4882a593Smuzhiyun goto stall;
377*4882a593Smuzhiyun musb->g.b_hnp_enable = 1;
378*4882a593Smuzhiyun musb_try_b_hnp_enable(musb);
379*4882a593Smuzhiyun break;
380*4882a593Smuzhiyun case USB_DEVICE_A_HNP_SUPPORT:
381*4882a593Smuzhiyun if (!musb->g.is_otg)
382*4882a593Smuzhiyun goto stall;
383*4882a593Smuzhiyun musb->g.a_hnp_support = 1;
384*4882a593Smuzhiyun break;
385*4882a593Smuzhiyun case USB_DEVICE_A_ALT_HNP_SUPPORT:
386*4882a593Smuzhiyun if (!musb->g.is_otg)
387*4882a593Smuzhiyun goto stall;
388*4882a593Smuzhiyun musb->g.a_alt_hnp_support = 1;
389*4882a593Smuzhiyun break;
390*4882a593Smuzhiyun case USB_DEVICE_DEBUG_MODE:
391*4882a593Smuzhiyun handled = 0;
392*4882a593Smuzhiyun break;
393*4882a593Smuzhiyun stall:
394*4882a593Smuzhiyun default:
395*4882a593Smuzhiyun handled = -EINVAL;
396*4882a593Smuzhiyun break;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun break;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun case USB_RECIP_INTERFACE:
401*4882a593Smuzhiyun break;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun case USB_RECIP_ENDPOINT:{
404*4882a593Smuzhiyun const u8 epnum =
405*4882a593Smuzhiyun ctrlrequest->wIndex & 0x0f;
406*4882a593Smuzhiyun struct musb_ep *musb_ep;
407*4882a593Smuzhiyun struct musb_hw_ep *ep;
408*4882a593Smuzhiyun void __iomem *regs;
409*4882a593Smuzhiyun int is_in;
410*4882a593Smuzhiyun u16 csr;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun if (epnum == 0 || epnum >= MUSB_C_NUM_EPS ||
413*4882a593Smuzhiyun ctrlrequest->wValue != USB_ENDPOINT_HALT)
414*4882a593Smuzhiyun break;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun ep = musb->endpoints + epnum;
417*4882a593Smuzhiyun regs = ep->regs;
418*4882a593Smuzhiyun is_in = ctrlrequest->wIndex & USB_DIR_IN;
419*4882a593Smuzhiyun if (is_in)
420*4882a593Smuzhiyun musb_ep = &ep->ep_in;
421*4882a593Smuzhiyun else
422*4882a593Smuzhiyun musb_ep = &ep->ep_out;
423*4882a593Smuzhiyun if (!musb_ep->desc)
424*4882a593Smuzhiyun break;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun musb_ep_select(mbase, epnum);
427*4882a593Smuzhiyun if (is_in) {
428*4882a593Smuzhiyun csr = musb_readw(regs, MUSB_TXCSR);
429*4882a593Smuzhiyun if (csr & MUSB_TXCSR_FIFONOTEMPTY)
430*4882a593Smuzhiyun csr |= MUSB_TXCSR_FLUSHFIFO;
431*4882a593Smuzhiyun csr |= MUSB_TXCSR_P_SENDSTALL
432*4882a593Smuzhiyun | MUSB_TXCSR_CLRDATATOG
433*4882a593Smuzhiyun | MUSB_TXCSR_P_WZC_BITS;
434*4882a593Smuzhiyun musb_writew(regs, MUSB_TXCSR, csr);
435*4882a593Smuzhiyun } else {
436*4882a593Smuzhiyun csr = musb_readw(regs, MUSB_RXCSR);
437*4882a593Smuzhiyun csr |= MUSB_RXCSR_P_SENDSTALL
438*4882a593Smuzhiyun | MUSB_RXCSR_FLUSHFIFO
439*4882a593Smuzhiyun | MUSB_RXCSR_CLRDATATOG
440*4882a593Smuzhiyun | MUSB_RXCSR_P_WZC_BITS;
441*4882a593Smuzhiyun musb_writew(regs, MUSB_RXCSR, csr);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun /* select ep0 again */
445*4882a593Smuzhiyun musb_ep_select(mbase, 0);
446*4882a593Smuzhiyun handled = 1;
447*4882a593Smuzhiyun } break;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun default:
450*4882a593Smuzhiyun /* class, vendor, etc ... delegate */
451*4882a593Smuzhiyun handled = 0;
452*4882a593Smuzhiyun break;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun break;
455*4882a593Smuzhiyun default:
456*4882a593Smuzhiyun /* delegate SET_CONFIGURATION, etc */
457*4882a593Smuzhiyun handled = 0;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun } else
460*4882a593Smuzhiyun handled = 0;
461*4882a593Smuzhiyun return handled;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun /* we have an ep0out data packet
465*4882a593Smuzhiyun * Context: caller holds controller lock
466*4882a593Smuzhiyun */
ep0_rxstate(struct musb * musb)467*4882a593Smuzhiyun static void ep0_rxstate(struct musb *musb)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun void __iomem *regs = musb->control_ep->regs;
470*4882a593Smuzhiyun struct musb_request *request;
471*4882a593Smuzhiyun struct usb_request *req;
472*4882a593Smuzhiyun u16 count, csr;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun request = next_ep0_request(musb);
475*4882a593Smuzhiyun req = &request->request;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /* read packet and ack; or stall because of gadget driver bug:
478*4882a593Smuzhiyun * should have provided the rx buffer before setup() returned.
479*4882a593Smuzhiyun */
480*4882a593Smuzhiyun if (req) {
481*4882a593Smuzhiyun void *buf = req->buf + req->actual;
482*4882a593Smuzhiyun unsigned len = req->length - req->actual;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /* read the buffer */
485*4882a593Smuzhiyun count = musb_readb(regs, MUSB_COUNT0);
486*4882a593Smuzhiyun if (count > len) {
487*4882a593Smuzhiyun req->status = -EOVERFLOW;
488*4882a593Smuzhiyun count = len;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun musb_read_fifo(&musb->endpoints[0], count, buf);
491*4882a593Smuzhiyun req->actual += count;
492*4882a593Smuzhiyun csr = MUSB_CSR0_P_SVDRXPKTRDY;
493*4882a593Smuzhiyun if (count < 64 || req->actual == req->length) {
494*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
495*4882a593Smuzhiyun csr |= MUSB_CSR0_P_DATAEND;
496*4882a593Smuzhiyun } else
497*4882a593Smuzhiyun req = NULL;
498*4882a593Smuzhiyun } else
499*4882a593Smuzhiyun csr = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /* Completion handler may choose to stall, e.g. because the
503*4882a593Smuzhiyun * message just received holds invalid data.
504*4882a593Smuzhiyun */
505*4882a593Smuzhiyun if (req) {
506*4882a593Smuzhiyun musb->ackpend = csr;
507*4882a593Smuzhiyun musb_g_ep0_giveback(musb, req);
508*4882a593Smuzhiyun if (!musb->ackpend)
509*4882a593Smuzhiyun return;
510*4882a593Smuzhiyun musb->ackpend = 0;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun musb_ep_select(musb->mregs, 0);
513*4882a593Smuzhiyun musb_writew(regs, MUSB_CSR0, csr);
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun /*
517*4882a593Smuzhiyun * transmitting to the host (IN), this code might be called from IRQ
518*4882a593Smuzhiyun * and from kernel thread.
519*4882a593Smuzhiyun *
520*4882a593Smuzhiyun * Context: caller holds controller lock
521*4882a593Smuzhiyun */
ep0_txstate(struct musb * musb)522*4882a593Smuzhiyun static void ep0_txstate(struct musb *musb)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun void __iomem *regs = musb->control_ep->regs;
525*4882a593Smuzhiyun struct musb_request *req = next_ep0_request(musb);
526*4882a593Smuzhiyun struct usb_request *request;
527*4882a593Smuzhiyun u16 csr = MUSB_CSR0_TXPKTRDY;
528*4882a593Smuzhiyun u8 *fifo_src;
529*4882a593Smuzhiyun u8 fifo_count;
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun if (!req) {
532*4882a593Smuzhiyun /* WARN_ON(1); */
533*4882a593Smuzhiyun dev_dbg(musb->controller, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0));
534*4882a593Smuzhiyun return;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun request = &req->request;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun /* load the data */
540*4882a593Smuzhiyun fifo_src = (u8 *) request->buf + request->actual;
541*4882a593Smuzhiyun fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE,
542*4882a593Smuzhiyun request->length - request->actual);
543*4882a593Smuzhiyun musb_write_fifo(&musb->endpoints[0], fifo_count, fifo_src);
544*4882a593Smuzhiyun request->actual += fifo_count;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun /* update the flags */
547*4882a593Smuzhiyun if (fifo_count < MUSB_MAX_END0_PACKET
548*4882a593Smuzhiyun || (request->actual == request->length
549*4882a593Smuzhiyun && !request->zero)) {
550*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
551*4882a593Smuzhiyun csr |= MUSB_CSR0_P_DATAEND;
552*4882a593Smuzhiyun } else
553*4882a593Smuzhiyun request = NULL;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun /* send it out, triggering a "txpktrdy cleared" irq */
556*4882a593Smuzhiyun musb_ep_select(musb->mregs, 0);
557*4882a593Smuzhiyun musb_writew(regs, MUSB_CSR0, csr);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /* report completions as soon as the fifo's loaded; there's no
560*4882a593Smuzhiyun * win in waiting till this last packet gets acked. (other than
561*4882a593Smuzhiyun * very precise fault reporting, needed by USB TMC; possible with
562*4882a593Smuzhiyun * this hardware, but not usable from portable gadget drivers.)
563*4882a593Smuzhiyun */
564*4882a593Smuzhiyun if (request) {
565*4882a593Smuzhiyun musb->ackpend = csr;
566*4882a593Smuzhiyun musb_g_ep0_giveback(musb, request);
567*4882a593Smuzhiyun if (!musb->ackpend)
568*4882a593Smuzhiyun return;
569*4882a593Smuzhiyun musb->ackpend = 0;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun /*
574*4882a593Smuzhiyun * Read a SETUP packet (struct usb_ctrlrequest) from the hardware.
575*4882a593Smuzhiyun * Fields are left in USB byte-order.
576*4882a593Smuzhiyun *
577*4882a593Smuzhiyun * Context: caller holds controller lock.
578*4882a593Smuzhiyun */
579*4882a593Smuzhiyun static void
musb_read_setup(struct musb * musb,struct usb_ctrlrequest * req)580*4882a593Smuzhiyun musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun struct musb_request *r;
583*4882a593Smuzhiyun void __iomem *regs = musb->control_ep->regs;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req);
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun /* NOTE: earlier 2.6 versions changed setup packets to host
588*4882a593Smuzhiyun * order, but now USB packets always stay in USB byte order.
589*4882a593Smuzhiyun */
590*4882a593Smuzhiyun dev_dbg(musb->controller, "SETUP req%02x.%02x v%04x i%04x l%d\n",
591*4882a593Smuzhiyun req->bRequestType,
592*4882a593Smuzhiyun req->bRequest,
593*4882a593Smuzhiyun le16_to_cpu(req->wValue),
594*4882a593Smuzhiyun le16_to_cpu(req->wIndex),
595*4882a593Smuzhiyun le16_to_cpu(req->wLength));
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun /* clean up any leftover transfers */
598*4882a593Smuzhiyun r = next_ep0_request(musb);
599*4882a593Smuzhiyun if (r)
600*4882a593Smuzhiyun musb_g_ep0_giveback(musb, &r->request);
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun /* For zero-data requests we want to delay the STATUS stage to
603*4882a593Smuzhiyun * avoid SETUPEND errors. If we read data (OUT), delay accepting
604*4882a593Smuzhiyun * packets until there's a buffer to store them in.
605*4882a593Smuzhiyun *
606*4882a593Smuzhiyun * If we write data, the controller acts happier if we enable
607*4882a593Smuzhiyun * the TX FIFO right away, and give the controller a moment
608*4882a593Smuzhiyun * to switch modes...
609*4882a593Smuzhiyun */
610*4882a593Smuzhiyun musb->set_address = false;
611*4882a593Smuzhiyun musb->ackpend = MUSB_CSR0_P_SVDRXPKTRDY;
612*4882a593Smuzhiyun if (req->wLength == 0) {
613*4882a593Smuzhiyun if (req->bRequestType & USB_DIR_IN)
614*4882a593Smuzhiyun musb->ackpend |= MUSB_CSR0_TXPKTRDY;
615*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_ACKWAIT;
616*4882a593Smuzhiyun } else if (req->bRequestType & USB_DIR_IN) {
617*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_TX;
618*4882a593Smuzhiyun musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDRXPKTRDY);
619*4882a593Smuzhiyun while ((musb_readw(regs, MUSB_CSR0)
620*4882a593Smuzhiyun & MUSB_CSR0_RXPKTRDY) != 0)
621*4882a593Smuzhiyun cpu_relax();
622*4882a593Smuzhiyun musb->ackpend = 0;
623*4882a593Smuzhiyun } else
624*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_RX;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun static int
forward_to_driver(struct musb * musb,const struct usb_ctrlrequest * ctrlrequest)628*4882a593Smuzhiyun forward_to_driver(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
629*4882a593Smuzhiyun __releases(musb->lock)
630*4882a593Smuzhiyun __acquires(musb->lock)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun int retval;
633*4882a593Smuzhiyun if (!musb->gadget_driver)
634*4882a593Smuzhiyun return -EOPNOTSUPP;
635*4882a593Smuzhiyun spin_unlock(&musb->lock);
636*4882a593Smuzhiyun retval = musb->gadget_driver->setup(&musb->g, ctrlrequest);
637*4882a593Smuzhiyun spin_lock(&musb->lock);
638*4882a593Smuzhiyun return retval;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun /*
642*4882a593Smuzhiyun * Handle peripheral ep0 interrupt
643*4882a593Smuzhiyun *
644*4882a593Smuzhiyun * Context: irq handler; we won't re-enter the driver that way.
645*4882a593Smuzhiyun */
musb_g_ep0_irq(struct musb * musb)646*4882a593Smuzhiyun irqreturn_t musb_g_ep0_irq(struct musb *musb)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun u16 csr;
649*4882a593Smuzhiyun u16 len;
650*4882a593Smuzhiyun void __iomem *mbase = musb->mregs;
651*4882a593Smuzhiyun void __iomem *regs = musb->endpoints[0].regs;
652*4882a593Smuzhiyun irqreturn_t retval = IRQ_NONE;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun musb_ep_select(mbase, 0); /* select ep0 */
655*4882a593Smuzhiyun csr = musb_readw(regs, MUSB_CSR0);
656*4882a593Smuzhiyun len = musb_readb(regs, MUSB_COUNT0);
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun dev_dbg(musb->controller, "csr %04x, count %d, myaddr %d, ep0stage %s\n",
659*4882a593Smuzhiyun csr, len,
660*4882a593Smuzhiyun musb_readb(mbase, MUSB_FADDR),
661*4882a593Smuzhiyun decode_ep0stage(musb->ep0_state));
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun if (csr & MUSB_CSR0_P_DATAEND) {
664*4882a593Smuzhiyun /*
665*4882a593Smuzhiyun * If DATAEND is set we should not call the callback,
666*4882a593Smuzhiyun * hence the status stage is not complete.
667*4882a593Smuzhiyun */
668*4882a593Smuzhiyun return IRQ_HANDLED;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun /* I sent a stall.. need to acknowledge it now.. */
672*4882a593Smuzhiyun if (csr & MUSB_CSR0_P_SENTSTALL) {
673*4882a593Smuzhiyun musb_writew(regs, MUSB_CSR0,
674*4882a593Smuzhiyun csr & ~MUSB_CSR0_P_SENTSTALL);
675*4882a593Smuzhiyun retval = IRQ_HANDLED;
676*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_IDLE;
677*4882a593Smuzhiyun csr = musb_readw(regs, MUSB_CSR0);
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun /* request ended "early" */
681*4882a593Smuzhiyun if (csr & MUSB_CSR0_P_SETUPEND) {
682*4882a593Smuzhiyun musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND);
683*4882a593Smuzhiyun retval = IRQ_HANDLED;
684*4882a593Smuzhiyun /* Transition into the early status phase */
685*4882a593Smuzhiyun switch (musb->ep0_state) {
686*4882a593Smuzhiyun case MUSB_EP0_STAGE_TX:
687*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
688*4882a593Smuzhiyun break;
689*4882a593Smuzhiyun case MUSB_EP0_STAGE_RX:
690*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
691*4882a593Smuzhiyun break;
692*4882a593Smuzhiyun default:
693*4882a593Smuzhiyun ERR("SetupEnd came in a wrong ep0stage %s\n",
694*4882a593Smuzhiyun decode_ep0stage(musb->ep0_state));
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun csr = musb_readw(regs, MUSB_CSR0);
697*4882a593Smuzhiyun /* NOTE: request may need completion */
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun /* docs from Mentor only describe tx, rx, and idle/setup states.
701*4882a593Smuzhiyun * we need to handle nuances around status stages, and also the
702*4882a593Smuzhiyun * case where status and setup stages come back-to-back ...
703*4882a593Smuzhiyun */
704*4882a593Smuzhiyun switch (musb->ep0_state) {
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun case MUSB_EP0_STAGE_TX:
707*4882a593Smuzhiyun /* irq on clearing txpktrdy */
708*4882a593Smuzhiyun if ((csr & MUSB_CSR0_TXPKTRDY) == 0) {
709*4882a593Smuzhiyun ep0_txstate(musb);
710*4882a593Smuzhiyun retval = IRQ_HANDLED;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun break;
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun case MUSB_EP0_STAGE_RX:
715*4882a593Smuzhiyun /* irq on set rxpktrdy */
716*4882a593Smuzhiyun if (csr & MUSB_CSR0_RXPKTRDY) {
717*4882a593Smuzhiyun ep0_rxstate(musb);
718*4882a593Smuzhiyun retval = IRQ_HANDLED;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun break;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun case MUSB_EP0_STAGE_STATUSIN:
723*4882a593Smuzhiyun /* end of sequence #2 (OUT/RX state) or #3 (no data) */
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun /* update address (if needed) only @ the end of the
726*4882a593Smuzhiyun * status phase per usb spec, which also guarantees
727*4882a593Smuzhiyun * we get 10 msec to receive this irq... until this
728*4882a593Smuzhiyun * is done we won't see the next packet.
729*4882a593Smuzhiyun */
730*4882a593Smuzhiyun if (musb->set_address) {
731*4882a593Smuzhiyun musb->set_address = false;
732*4882a593Smuzhiyun musb_writeb(mbase, MUSB_FADDR, musb->address);
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun /* enter test mode if needed (exit by reset) */
736*4882a593Smuzhiyun else if (musb->test_mode) {
737*4882a593Smuzhiyun dev_dbg(musb->controller, "entering TESTMODE\n");
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun if (MUSB_TEST_PACKET == musb->test_mode_nr)
740*4882a593Smuzhiyun musb_load_testpacket(musb);
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun musb_writeb(mbase, MUSB_TESTMODE,
743*4882a593Smuzhiyun musb->test_mode_nr);
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun /* FALLTHROUGH */
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun case MUSB_EP0_STAGE_STATUSOUT:
748*4882a593Smuzhiyun /* end of sequence #1: write to host (TX state) */
749*4882a593Smuzhiyun {
750*4882a593Smuzhiyun struct musb_request *req;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun req = next_ep0_request(musb);
753*4882a593Smuzhiyun if (req)
754*4882a593Smuzhiyun musb_g_ep0_giveback(musb, &req->request);
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun /*
758*4882a593Smuzhiyun * In case when several interrupts can get coalesced,
759*4882a593Smuzhiyun * check to see if we've already received a SETUP packet...
760*4882a593Smuzhiyun */
761*4882a593Smuzhiyun if (csr & MUSB_CSR0_RXPKTRDY)
762*4882a593Smuzhiyun goto setup;
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun retval = IRQ_HANDLED;
765*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_IDLE;
766*4882a593Smuzhiyun break;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun case MUSB_EP0_STAGE_IDLE:
769*4882a593Smuzhiyun /*
770*4882a593Smuzhiyun * This state is typically (but not always) indiscernible
771*4882a593Smuzhiyun * from the status states since the corresponding interrupts
772*4882a593Smuzhiyun * tend to happen within too little period of time (with only
773*4882a593Smuzhiyun * a zero-length packet in between) and so get coalesced...
774*4882a593Smuzhiyun */
775*4882a593Smuzhiyun retval = IRQ_HANDLED;
776*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_SETUP;
777*4882a593Smuzhiyun /* FALLTHROUGH */
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun case MUSB_EP0_STAGE_SETUP:
780*4882a593Smuzhiyun setup:
781*4882a593Smuzhiyun if (csr & MUSB_CSR0_RXPKTRDY) {
782*4882a593Smuzhiyun struct usb_ctrlrequest setup;
783*4882a593Smuzhiyun int handled = 0;
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun if (len != 8) {
786*4882a593Smuzhiyun ERR("SETUP packet len %d != 8 ?\n", len);
787*4882a593Smuzhiyun break;
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun musb_read_setup(musb, &setup);
790*4882a593Smuzhiyun retval = IRQ_HANDLED;
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun /* sometimes the RESET won't be reported */
793*4882a593Smuzhiyun if (unlikely(musb->g.speed == USB_SPEED_UNKNOWN)) {
794*4882a593Smuzhiyun u8 power;
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun printk(KERN_NOTICE "%s: peripheral reset "
797*4882a593Smuzhiyun "irq lost!\n",
798*4882a593Smuzhiyun musb_driver_name);
799*4882a593Smuzhiyun power = musb_readb(mbase, MUSB_POWER);
800*4882a593Smuzhiyun musb->g.speed = (power & MUSB_POWER_HSMODE)
801*4882a593Smuzhiyun ? USB_SPEED_HIGH : USB_SPEED_FULL;
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun switch (musb->ep0_state) {
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun /* sequence #3 (no data stage), includes requests
808*4882a593Smuzhiyun * we can't forward (notably SET_ADDRESS and the
809*4882a593Smuzhiyun * device/endpoint feature set/clear operations)
810*4882a593Smuzhiyun * plus SET_CONFIGURATION and others we must
811*4882a593Smuzhiyun */
812*4882a593Smuzhiyun case MUSB_EP0_STAGE_ACKWAIT:
813*4882a593Smuzhiyun handled = service_zero_data_request(
814*4882a593Smuzhiyun musb, &setup);
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun /*
817*4882a593Smuzhiyun * We're expecting no data in any case, so
818*4882a593Smuzhiyun * always set the DATAEND bit -- doing this
819*4882a593Smuzhiyun * here helps avoid SetupEnd interrupt coming
820*4882a593Smuzhiyun * in the idle stage when we're stalling...
821*4882a593Smuzhiyun */
822*4882a593Smuzhiyun musb->ackpend |= MUSB_CSR0_P_DATAEND;
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun /* status stage might be immediate */
825*4882a593Smuzhiyun if (handled > 0)
826*4882a593Smuzhiyun musb->ep0_state =
827*4882a593Smuzhiyun MUSB_EP0_STAGE_STATUSIN;
828*4882a593Smuzhiyun break;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun /* sequence #1 (IN to host), includes GET_STATUS
831*4882a593Smuzhiyun * requests that we can't forward, GET_DESCRIPTOR
832*4882a593Smuzhiyun * and others that we must
833*4882a593Smuzhiyun */
834*4882a593Smuzhiyun case MUSB_EP0_STAGE_TX:
835*4882a593Smuzhiyun handled = service_in_request(musb, &setup);
836*4882a593Smuzhiyun if (handled > 0) {
837*4882a593Smuzhiyun musb->ackpend = MUSB_CSR0_TXPKTRDY
838*4882a593Smuzhiyun | MUSB_CSR0_P_DATAEND;
839*4882a593Smuzhiyun musb->ep0_state =
840*4882a593Smuzhiyun MUSB_EP0_STAGE_STATUSOUT;
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun break;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun /* sequence #2 (OUT from host), always forward */
845*4882a593Smuzhiyun default: /* MUSB_EP0_STAGE_RX */
846*4882a593Smuzhiyun break;
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun dev_dbg(musb->controller, "handled %d, csr %04x, ep0stage %s\n",
850*4882a593Smuzhiyun handled, csr,
851*4882a593Smuzhiyun decode_ep0stage(musb->ep0_state));
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun /* unless we need to delegate this to the gadget
854*4882a593Smuzhiyun * driver, we know how to wrap this up: csr0 has
855*4882a593Smuzhiyun * not yet been written.
856*4882a593Smuzhiyun */
857*4882a593Smuzhiyun if (handled < 0)
858*4882a593Smuzhiyun goto stall;
859*4882a593Smuzhiyun else if (handled > 0)
860*4882a593Smuzhiyun goto finish;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun handled = forward_to_driver(musb, &setup);
863*4882a593Smuzhiyun if (handled < 0) {
864*4882a593Smuzhiyun musb_ep_select(mbase, 0);
865*4882a593Smuzhiyun stall:
866*4882a593Smuzhiyun dev_dbg(musb->controller, "stall (%d)\n", handled);
867*4882a593Smuzhiyun musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
868*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_IDLE;
869*4882a593Smuzhiyun finish:
870*4882a593Smuzhiyun musb_writew(regs, MUSB_CSR0,
871*4882a593Smuzhiyun musb->ackpend);
872*4882a593Smuzhiyun musb->ackpend = 0;
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun break;
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun case MUSB_EP0_STAGE_ACKWAIT:
878*4882a593Smuzhiyun /* This should not happen. But happens with tusb6010 with
879*4882a593Smuzhiyun * g_file_storage and high speed. Do nothing.
880*4882a593Smuzhiyun */
881*4882a593Smuzhiyun retval = IRQ_HANDLED;
882*4882a593Smuzhiyun break;
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun default:
885*4882a593Smuzhiyun /* "can't happen" */
886*4882a593Smuzhiyun WARN_ON(1);
887*4882a593Smuzhiyun musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL);
888*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_IDLE;
889*4882a593Smuzhiyun break;
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun return retval;
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun static int
musb_g_ep0_enable(struct usb_ep * ep,const struct usb_endpoint_descriptor * desc)897*4882a593Smuzhiyun musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc)
898*4882a593Smuzhiyun {
899*4882a593Smuzhiyun /* always enabled */
900*4882a593Smuzhiyun return -EINVAL;
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun
musb_g_ep0_disable(struct usb_ep * e)903*4882a593Smuzhiyun static int musb_g_ep0_disable(struct usb_ep *e)
904*4882a593Smuzhiyun {
905*4882a593Smuzhiyun /* always enabled */
906*4882a593Smuzhiyun return -EINVAL;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun static int
musb_g_ep0_queue(struct usb_ep * e,struct usb_request * r,gfp_t gfp_flags)910*4882a593Smuzhiyun musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
911*4882a593Smuzhiyun {
912*4882a593Smuzhiyun struct musb_ep *ep;
913*4882a593Smuzhiyun struct musb_request *req;
914*4882a593Smuzhiyun struct musb *musb;
915*4882a593Smuzhiyun int status;
916*4882a593Smuzhiyun unsigned long lockflags;
917*4882a593Smuzhiyun void __iomem *regs;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun if (!e || !r)
920*4882a593Smuzhiyun return -EINVAL;
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun ep = to_musb_ep(e);
923*4882a593Smuzhiyun musb = ep->musb;
924*4882a593Smuzhiyun regs = musb->control_ep->regs;
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun req = to_musb_request(r);
927*4882a593Smuzhiyun req->musb = musb;
928*4882a593Smuzhiyun req->request.actual = 0;
929*4882a593Smuzhiyun req->request.status = -EINPROGRESS;
930*4882a593Smuzhiyun req->tx = ep->is_in;
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun spin_lock_irqsave(&musb->lock, lockflags);
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun if (!list_empty(&ep->req_list)) {
935*4882a593Smuzhiyun status = -EBUSY;
936*4882a593Smuzhiyun goto cleanup;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun switch (musb->ep0_state) {
940*4882a593Smuzhiyun case MUSB_EP0_STAGE_RX: /* control-OUT data */
941*4882a593Smuzhiyun case MUSB_EP0_STAGE_TX: /* control-IN data */
942*4882a593Smuzhiyun case MUSB_EP0_STAGE_ACKWAIT: /* zero-length data */
943*4882a593Smuzhiyun status = 0;
944*4882a593Smuzhiyun break;
945*4882a593Smuzhiyun default:
946*4882a593Smuzhiyun dev_dbg(musb->controller, "ep0 request queued in state %d\n",
947*4882a593Smuzhiyun musb->ep0_state);
948*4882a593Smuzhiyun status = -EINVAL;
949*4882a593Smuzhiyun goto cleanup;
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun /* add request to the list */
953*4882a593Smuzhiyun list_add_tail(&req->list, &ep->req_list);
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun dev_dbg(musb->controller, "queue to %s (%s), length=%d\n",
956*4882a593Smuzhiyun ep->name, ep->is_in ? "IN/TX" : "OUT/RX",
957*4882a593Smuzhiyun req->request.length);
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun musb_ep_select(musb->mregs, 0);
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun /* sequence #1, IN ... start writing the data */
962*4882a593Smuzhiyun if (musb->ep0_state == MUSB_EP0_STAGE_TX)
963*4882a593Smuzhiyun ep0_txstate(musb);
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun /* sequence #3, no-data ... issue IN status */
966*4882a593Smuzhiyun else if (musb->ep0_state == MUSB_EP0_STAGE_ACKWAIT) {
967*4882a593Smuzhiyun if (req->request.length)
968*4882a593Smuzhiyun status = -EINVAL;
969*4882a593Smuzhiyun else {
970*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
971*4882a593Smuzhiyun musb_writew(regs, MUSB_CSR0,
972*4882a593Smuzhiyun musb->ackpend | MUSB_CSR0_P_DATAEND);
973*4882a593Smuzhiyun musb->ackpend = 0;
974*4882a593Smuzhiyun musb_g_ep0_giveback(ep->musb, r);
975*4882a593Smuzhiyun }
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun /* else for sequence #2 (OUT), caller provides a buffer
978*4882a593Smuzhiyun * before the next packet arrives. deferred responses
979*4882a593Smuzhiyun * (after SETUP is acked) are racey.
980*4882a593Smuzhiyun */
981*4882a593Smuzhiyun } else if (musb->ackpend) {
982*4882a593Smuzhiyun musb_writew(regs, MUSB_CSR0, musb->ackpend);
983*4882a593Smuzhiyun musb->ackpend = 0;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun cleanup:
987*4882a593Smuzhiyun spin_unlock_irqrestore(&musb->lock, lockflags);
988*4882a593Smuzhiyun return status;
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun
musb_g_ep0_dequeue(struct usb_ep * ep,struct usb_request * req)991*4882a593Smuzhiyun static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun /* we just won't support this */
994*4882a593Smuzhiyun return -EINVAL;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun
musb_g_ep0_halt(struct usb_ep * e,int value)997*4882a593Smuzhiyun static int musb_g_ep0_halt(struct usb_ep *e, int value)
998*4882a593Smuzhiyun {
999*4882a593Smuzhiyun struct musb_ep *ep;
1000*4882a593Smuzhiyun struct musb *musb;
1001*4882a593Smuzhiyun void __iomem *base, *regs;
1002*4882a593Smuzhiyun unsigned long flags;
1003*4882a593Smuzhiyun int status;
1004*4882a593Smuzhiyun u16 csr;
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun if (!e || !value)
1007*4882a593Smuzhiyun return -EINVAL;
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun ep = to_musb_ep(e);
1010*4882a593Smuzhiyun musb = ep->musb;
1011*4882a593Smuzhiyun base = musb->mregs;
1012*4882a593Smuzhiyun regs = musb->control_ep->regs;
1013*4882a593Smuzhiyun status = 0;
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun spin_lock_irqsave(&musb->lock, flags);
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun if (!list_empty(&ep->req_list)) {
1018*4882a593Smuzhiyun status = -EBUSY;
1019*4882a593Smuzhiyun goto cleanup;
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun musb_ep_select(base, 0);
1023*4882a593Smuzhiyun csr = musb->ackpend;
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun switch (musb->ep0_state) {
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun /* Stalls are usually issued after parsing SETUP packet, either
1028*4882a593Smuzhiyun * directly in irq context from setup() or else later.
1029*4882a593Smuzhiyun */
1030*4882a593Smuzhiyun case MUSB_EP0_STAGE_TX: /* control-IN data */
1031*4882a593Smuzhiyun case MUSB_EP0_STAGE_ACKWAIT: /* STALL for zero-length data */
1032*4882a593Smuzhiyun case MUSB_EP0_STAGE_RX: /* control-OUT data */
1033*4882a593Smuzhiyun csr = musb_readw(regs, MUSB_CSR0);
1034*4882a593Smuzhiyun /* FALLTHROUGH */
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun /* It's also OK to issue stalls during callbacks when a non-empty
1037*4882a593Smuzhiyun * DATA stage buffer has been read (or even written).
1038*4882a593Smuzhiyun */
1039*4882a593Smuzhiyun case MUSB_EP0_STAGE_STATUSIN: /* control-OUT status */
1040*4882a593Smuzhiyun case MUSB_EP0_STAGE_STATUSOUT: /* control-IN status */
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun csr |= MUSB_CSR0_P_SENDSTALL;
1043*4882a593Smuzhiyun musb_writew(regs, MUSB_CSR0, csr);
1044*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_IDLE;
1045*4882a593Smuzhiyun musb->ackpend = 0;
1046*4882a593Smuzhiyun break;
1047*4882a593Smuzhiyun default:
1048*4882a593Smuzhiyun dev_dbg(musb->controller, "ep0 can't halt in state %d\n", musb->ep0_state);
1049*4882a593Smuzhiyun status = -EINVAL;
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun cleanup:
1053*4882a593Smuzhiyun spin_unlock_irqrestore(&musb->lock, flags);
1054*4882a593Smuzhiyun return status;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun const struct usb_ep_ops musb_g_ep0_ops = {
1058*4882a593Smuzhiyun .enable = musb_g_ep0_enable,
1059*4882a593Smuzhiyun .disable = musb_g_ep0_disable,
1060*4882a593Smuzhiyun .alloc_request = musb_alloc_request,
1061*4882a593Smuzhiyun .free_request = musb_free_request,
1062*4882a593Smuzhiyun .queue = musb_g_ep0_queue,
1063*4882a593Smuzhiyun .dequeue = musb_g_ep0_dequeue,
1064*4882a593Smuzhiyun .set_halt = musb_g_ep0_halt,
1065*4882a593Smuzhiyun };
1066