1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * MUSB OTG peripheral driver ep0 handling
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2005 Mentor Graphics Corporation
6*4882a593Smuzhiyun * Copyright (C) 2005-2006 by Texas Instruments
7*4882a593Smuzhiyun * Copyright (C) 2006-2007 Nokia Corporation
8*4882a593Smuzhiyun * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/list.h>
13*4882a593Smuzhiyun #include <linux/timer.h>
14*4882a593Smuzhiyun #include <linux/spinlock.h>
15*4882a593Smuzhiyun #include <linux/device.h>
16*4882a593Smuzhiyun #include <linux/interrupt.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include "musb_core.h"
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /* ep0 is always musb->endpoints[0].ep_in */
21*4882a593Smuzhiyun #define next_ep0_request(musb) next_in_request(&(musb)->endpoints[0])
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun * locking note: we use only the controller lock, for simpler correctness.
25*4882a593Smuzhiyun * It's always held with IRQs blocked.
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun * It protects the ep0 request queue as well as ep0_state, not just the
28*4882a593Smuzhiyun * controller and indexed registers. And that lock stays held unless it
29*4882a593Smuzhiyun * needs to be dropped to allow reentering this driver ... like upcalls to
30*4882a593Smuzhiyun * the gadget driver, or adjusting endpoint halt status.
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun
decode_ep0stage(u8 stage)33*4882a593Smuzhiyun static char *decode_ep0stage(u8 stage)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun switch (stage) {
36*4882a593Smuzhiyun case MUSB_EP0_STAGE_IDLE: return "idle";
37*4882a593Smuzhiyun case MUSB_EP0_STAGE_SETUP: return "setup";
38*4882a593Smuzhiyun case MUSB_EP0_STAGE_TX: return "in";
39*4882a593Smuzhiyun case MUSB_EP0_STAGE_RX: return "out";
40*4882a593Smuzhiyun case MUSB_EP0_STAGE_ACKWAIT: return "wait";
41*4882a593Smuzhiyun case MUSB_EP0_STAGE_STATUSIN: return "in/status";
42*4882a593Smuzhiyun case MUSB_EP0_STAGE_STATUSOUT: return "out/status";
43*4882a593Smuzhiyun default: return "?";
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /* handle a standard GET_STATUS request
48*4882a593Smuzhiyun * Context: caller holds controller lock
49*4882a593Smuzhiyun */
service_tx_status_request(struct musb * musb,const struct usb_ctrlrequest * ctrlrequest)50*4882a593Smuzhiyun static int service_tx_status_request(
51*4882a593Smuzhiyun struct musb *musb,
52*4882a593Smuzhiyun const struct usb_ctrlrequest *ctrlrequest)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun void __iomem *mbase = musb->mregs;
55*4882a593Smuzhiyun int handled = 1;
56*4882a593Smuzhiyun u8 result[2], epnum = 0;
57*4882a593Smuzhiyun const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun result[1] = 0;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun switch (recip) {
62*4882a593Smuzhiyun case USB_RECIP_DEVICE:
63*4882a593Smuzhiyun result[0] = musb->g.is_selfpowered << USB_DEVICE_SELF_POWERED;
64*4882a593Smuzhiyun result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
65*4882a593Smuzhiyun if (musb->g.is_otg) {
66*4882a593Smuzhiyun result[0] |= musb->g.b_hnp_enable
67*4882a593Smuzhiyun << USB_DEVICE_B_HNP_ENABLE;
68*4882a593Smuzhiyun result[0] |= musb->g.a_alt_hnp_support
69*4882a593Smuzhiyun << USB_DEVICE_A_ALT_HNP_SUPPORT;
70*4882a593Smuzhiyun result[0] |= musb->g.a_hnp_support
71*4882a593Smuzhiyun << USB_DEVICE_A_HNP_SUPPORT;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun break;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun case USB_RECIP_INTERFACE:
76*4882a593Smuzhiyun result[0] = 0;
77*4882a593Smuzhiyun break;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun case USB_RECIP_ENDPOINT: {
80*4882a593Smuzhiyun int is_in;
81*4882a593Smuzhiyun struct musb_ep *ep;
82*4882a593Smuzhiyun u16 tmp;
83*4882a593Smuzhiyun void __iomem *regs;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun epnum = (u8) ctrlrequest->wIndex;
86*4882a593Smuzhiyun if (!epnum) {
87*4882a593Smuzhiyun result[0] = 0;
88*4882a593Smuzhiyun break;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun is_in = epnum & USB_DIR_IN;
92*4882a593Smuzhiyun epnum &= 0x0f;
93*4882a593Smuzhiyun if (epnum >= MUSB_C_NUM_EPS) {
94*4882a593Smuzhiyun handled = -EINVAL;
95*4882a593Smuzhiyun break;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun if (is_in)
99*4882a593Smuzhiyun ep = &musb->endpoints[epnum].ep_in;
100*4882a593Smuzhiyun else
101*4882a593Smuzhiyun ep = &musb->endpoints[epnum].ep_out;
102*4882a593Smuzhiyun regs = musb->endpoints[epnum].regs;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun if (!ep->desc) {
105*4882a593Smuzhiyun handled = -EINVAL;
106*4882a593Smuzhiyun break;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun musb_ep_select(mbase, epnum);
110*4882a593Smuzhiyun if (is_in)
111*4882a593Smuzhiyun tmp = musb_readw(regs, MUSB_TXCSR)
112*4882a593Smuzhiyun & MUSB_TXCSR_P_SENDSTALL;
113*4882a593Smuzhiyun else
114*4882a593Smuzhiyun tmp = musb_readw(regs, MUSB_RXCSR)
115*4882a593Smuzhiyun & MUSB_RXCSR_P_SENDSTALL;
116*4882a593Smuzhiyun musb_ep_select(mbase, 0);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun result[0] = tmp ? 1 : 0;
119*4882a593Smuzhiyun } break;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun default:
122*4882a593Smuzhiyun /* class, vendor, etc ... delegate */
123*4882a593Smuzhiyun handled = 0;
124*4882a593Smuzhiyun break;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* fill up the fifo; caller updates csr0 */
128*4882a593Smuzhiyun if (handled > 0) {
129*4882a593Smuzhiyun u16 len = le16_to_cpu(ctrlrequest->wLength);
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun if (len > 2)
132*4882a593Smuzhiyun len = 2;
133*4882a593Smuzhiyun musb_write_fifo(&musb->endpoints[0], len, result);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun return handled;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /*
140*4882a593Smuzhiyun * handle a control-IN request, the end0 buffer contains the current request
141*4882a593Smuzhiyun * that is supposed to be a standard control request. Assumes the fifo to
142*4882a593Smuzhiyun * be at least 2 bytes long.
143*4882a593Smuzhiyun *
144*4882a593Smuzhiyun * @return 0 if the request was NOT HANDLED,
145*4882a593Smuzhiyun * < 0 when error
146*4882a593Smuzhiyun * > 0 when the request is processed
147*4882a593Smuzhiyun *
148*4882a593Smuzhiyun * Context: caller holds controller lock
149*4882a593Smuzhiyun */
150*4882a593Smuzhiyun static int
service_in_request(struct musb * musb,const struct usb_ctrlrequest * ctrlrequest)151*4882a593Smuzhiyun service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun int handled = 0; /* not handled */
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
156*4882a593Smuzhiyun == USB_TYPE_STANDARD) {
157*4882a593Smuzhiyun switch (ctrlrequest->bRequest) {
158*4882a593Smuzhiyun case USB_REQ_GET_STATUS:
159*4882a593Smuzhiyun handled = service_tx_status_request(musb,
160*4882a593Smuzhiyun ctrlrequest);
161*4882a593Smuzhiyun break;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun /* case USB_REQ_SYNC_FRAME: */
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun default:
166*4882a593Smuzhiyun break;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun return handled;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /*
173*4882a593Smuzhiyun * Context: caller holds controller lock
174*4882a593Smuzhiyun */
musb_g_ep0_giveback(struct musb * musb,struct usb_request * req)175*4882a593Smuzhiyun static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun musb_g_giveback(&musb->endpoints[0].ep_in, req, 0);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /*
181*4882a593Smuzhiyun * Tries to start B-device HNP negotiation if enabled via sysfs
182*4882a593Smuzhiyun */
musb_try_b_hnp_enable(struct musb * musb)183*4882a593Smuzhiyun static inline void musb_try_b_hnp_enable(struct musb *musb)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun void __iomem *mbase = musb->mregs;
186*4882a593Smuzhiyun u8 devctl;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun musb_dbg(musb, "HNP: Setting HR");
189*4882a593Smuzhiyun devctl = musb_readb(mbase, MUSB_DEVCTL);
190*4882a593Smuzhiyun musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /*
194*4882a593Smuzhiyun * Handle all control requests with no DATA stage, including standard
195*4882a593Smuzhiyun * requests such as:
196*4882a593Smuzhiyun * USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized
197*4882a593Smuzhiyun * always delegated to the gadget driver
198*4882a593Smuzhiyun * USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE
199*4882a593Smuzhiyun * always handled here, except for class/vendor/... features
200*4882a593Smuzhiyun *
201*4882a593Smuzhiyun * Context: caller holds controller lock
202*4882a593Smuzhiyun */
203*4882a593Smuzhiyun static int
service_zero_data_request(struct musb * musb,struct usb_ctrlrequest * ctrlrequest)204*4882a593Smuzhiyun service_zero_data_request(struct musb *musb,
205*4882a593Smuzhiyun struct usb_ctrlrequest *ctrlrequest)
206*4882a593Smuzhiyun __releases(musb->lock)
207*4882a593Smuzhiyun __acquires(musb->lock)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun int handled = -EINVAL;
210*4882a593Smuzhiyun void __iomem *mbase = musb->mregs;
211*4882a593Smuzhiyun const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /* the gadget driver handles everything except what we MUST handle */
214*4882a593Smuzhiyun if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
215*4882a593Smuzhiyun == USB_TYPE_STANDARD) {
216*4882a593Smuzhiyun switch (ctrlrequest->bRequest) {
217*4882a593Smuzhiyun case USB_REQ_SET_ADDRESS:
218*4882a593Smuzhiyun /* change it after the status stage */
219*4882a593Smuzhiyun musb->set_address = true;
220*4882a593Smuzhiyun musb->address = (u8) (ctrlrequest->wValue & 0x7f);
221*4882a593Smuzhiyun handled = 1;
222*4882a593Smuzhiyun break;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun case USB_REQ_CLEAR_FEATURE:
225*4882a593Smuzhiyun switch (recip) {
226*4882a593Smuzhiyun case USB_RECIP_DEVICE:
227*4882a593Smuzhiyun if (ctrlrequest->wValue
228*4882a593Smuzhiyun != USB_DEVICE_REMOTE_WAKEUP)
229*4882a593Smuzhiyun break;
230*4882a593Smuzhiyun musb->may_wakeup = 0;
231*4882a593Smuzhiyun handled = 1;
232*4882a593Smuzhiyun break;
233*4882a593Smuzhiyun case USB_RECIP_INTERFACE:
234*4882a593Smuzhiyun break;
235*4882a593Smuzhiyun case USB_RECIP_ENDPOINT:{
236*4882a593Smuzhiyun const u8 epnum =
237*4882a593Smuzhiyun ctrlrequest->wIndex & 0x0f;
238*4882a593Smuzhiyun struct musb_ep *musb_ep;
239*4882a593Smuzhiyun struct musb_hw_ep *ep;
240*4882a593Smuzhiyun struct musb_request *request;
241*4882a593Smuzhiyun void __iomem *regs;
242*4882a593Smuzhiyun int is_in;
243*4882a593Smuzhiyun u16 csr;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if (epnum == 0 || epnum >= MUSB_C_NUM_EPS ||
246*4882a593Smuzhiyun ctrlrequest->wValue != USB_ENDPOINT_HALT)
247*4882a593Smuzhiyun break;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun ep = musb->endpoints + epnum;
250*4882a593Smuzhiyun regs = ep->regs;
251*4882a593Smuzhiyun is_in = ctrlrequest->wIndex & USB_DIR_IN;
252*4882a593Smuzhiyun if (is_in)
253*4882a593Smuzhiyun musb_ep = &ep->ep_in;
254*4882a593Smuzhiyun else
255*4882a593Smuzhiyun musb_ep = &ep->ep_out;
256*4882a593Smuzhiyun if (!musb_ep->desc)
257*4882a593Smuzhiyun break;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun handled = 1;
260*4882a593Smuzhiyun /* Ignore request if endpoint is wedged */
261*4882a593Smuzhiyun if (musb_ep->wedged)
262*4882a593Smuzhiyun break;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun musb_ep_select(mbase, epnum);
265*4882a593Smuzhiyun if (is_in) {
266*4882a593Smuzhiyun csr = musb_readw(regs, MUSB_TXCSR);
267*4882a593Smuzhiyun csr |= MUSB_TXCSR_CLRDATATOG |
268*4882a593Smuzhiyun MUSB_TXCSR_P_WZC_BITS;
269*4882a593Smuzhiyun csr &= ~(MUSB_TXCSR_P_SENDSTALL |
270*4882a593Smuzhiyun MUSB_TXCSR_P_SENTSTALL |
271*4882a593Smuzhiyun MUSB_TXCSR_TXPKTRDY);
272*4882a593Smuzhiyun musb_writew(regs, MUSB_TXCSR, csr);
273*4882a593Smuzhiyun } else {
274*4882a593Smuzhiyun csr = musb_readw(regs, MUSB_RXCSR);
275*4882a593Smuzhiyun csr |= MUSB_RXCSR_CLRDATATOG |
276*4882a593Smuzhiyun MUSB_RXCSR_P_WZC_BITS;
277*4882a593Smuzhiyun csr &= ~(MUSB_RXCSR_P_SENDSTALL |
278*4882a593Smuzhiyun MUSB_RXCSR_P_SENTSTALL);
279*4882a593Smuzhiyun musb_writew(regs, MUSB_RXCSR, csr);
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /* Maybe start the first request in the queue */
283*4882a593Smuzhiyun request = next_request(musb_ep);
284*4882a593Smuzhiyun if (!musb_ep->busy && request) {
285*4882a593Smuzhiyun musb_dbg(musb, "restarting the request");
286*4882a593Smuzhiyun musb_ep_restart(musb, request);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /* select ep0 again */
290*4882a593Smuzhiyun musb_ep_select(mbase, 0);
291*4882a593Smuzhiyun } break;
292*4882a593Smuzhiyun default:
293*4882a593Smuzhiyun /* class, vendor, etc ... delegate */
294*4882a593Smuzhiyun handled = 0;
295*4882a593Smuzhiyun break;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun break;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun case USB_REQ_SET_FEATURE:
300*4882a593Smuzhiyun switch (recip) {
301*4882a593Smuzhiyun case USB_RECIP_DEVICE:
302*4882a593Smuzhiyun handled = 1;
303*4882a593Smuzhiyun switch (ctrlrequest->wValue) {
304*4882a593Smuzhiyun case USB_DEVICE_REMOTE_WAKEUP:
305*4882a593Smuzhiyun musb->may_wakeup = 1;
306*4882a593Smuzhiyun break;
307*4882a593Smuzhiyun case USB_DEVICE_TEST_MODE:
308*4882a593Smuzhiyun if (musb->g.speed != USB_SPEED_HIGH)
309*4882a593Smuzhiyun goto stall;
310*4882a593Smuzhiyun if (ctrlrequest->wIndex & 0xff)
311*4882a593Smuzhiyun goto stall;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun switch (ctrlrequest->wIndex >> 8) {
314*4882a593Smuzhiyun case USB_TEST_J:
315*4882a593Smuzhiyun pr_debug("USB_TEST_J\n");
316*4882a593Smuzhiyun musb->test_mode_nr =
317*4882a593Smuzhiyun MUSB_TEST_J;
318*4882a593Smuzhiyun break;
319*4882a593Smuzhiyun case USB_TEST_K:
320*4882a593Smuzhiyun pr_debug("USB_TEST_K\n");
321*4882a593Smuzhiyun musb->test_mode_nr =
322*4882a593Smuzhiyun MUSB_TEST_K;
323*4882a593Smuzhiyun break;
324*4882a593Smuzhiyun case USB_TEST_SE0_NAK:
325*4882a593Smuzhiyun pr_debug("USB_TEST_SE0_NAK\n");
326*4882a593Smuzhiyun musb->test_mode_nr =
327*4882a593Smuzhiyun MUSB_TEST_SE0_NAK;
328*4882a593Smuzhiyun break;
329*4882a593Smuzhiyun case USB_TEST_PACKET:
330*4882a593Smuzhiyun pr_debug("USB_TEST_PACKET\n");
331*4882a593Smuzhiyun musb->test_mode_nr =
332*4882a593Smuzhiyun MUSB_TEST_PACKET;
333*4882a593Smuzhiyun break;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun case 0xc0:
336*4882a593Smuzhiyun /* TEST_FORCE_HS */
337*4882a593Smuzhiyun pr_debug("TEST_FORCE_HS\n");
338*4882a593Smuzhiyun musb->test_mode_nr =
339*4882a593Smuzhiyun MUSB_TEST_FORCE_HS;
340*4882a593Smuzhiyun break;
341*4882a593Smuzhiyun case 0xc1:
342*4882a593Smuzhiyun /* TEST_FORCE_FS */
343*4882a593Smuzhiyun pr_debug("TEST_FORCE_FS\n");
344*4882a593Smuzhiyun musb->test_mode_nr =
345*4882a593Smuzhiyun MUSB_TEST_FORCE_FS;
346*4882a593Smuzhiyun break;
347*4882a593Smuzhiyun case 0xc2:
348*4882a593Smuzhiyun /* TEST_FIFO_ACCESS */
349*4882a593Smuzhiyun pr_debug("TEST_FIFO_ACCESS\n");
350*4882a593Smuzhiyun musb->test_mode_nr =
351*4882a593Smuzhiyun MUSB_TEST_FIFO_ACCESS;
352*4882a593Smuzhiyun break;
353*4882a593Smuzhiyun case 0xc3:
354*4882a593Smuzhiyun /* TEST_FORCE_HOST */
355*4882a593Smuzhiyun pr_debug("TEST_FORCE_HOST\n");
356*4882a593Smuzhiyun musb->test_mode_nr =
357*4882a593Smuzhiyun MUSB_TEST_FORCE_HOST;
358*4882a593Smuzhiyun break;
359*4882a593Smuzhiyun default:
360*4882a593Smuzhiyun goto stall;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /* enter test mode after irq */
364*4882a593Smuzhiyun if (handled > 0)
365*4882a593Smuzhiyun musb->test_mode = true;
366*4882a593Smuzhiyun break;
367*4882a593Smuzhiyun case USB_DEVICE_B_HNP_ENABLE:
368*4882a593Smuzhiyun if (!musb->g.is_otg)
369*4882a593Smuzhiyun goto stall;
370*4882a593Smuzhiyun musb->g.b_hnp_enable = 1;
371*4882a593Smuzhiyun musb_try_b_hnp_enable(musb);
372*4882a593Smuzhiyun break;
373*4882a593Smuzhiyun case USB_DEVICE_A_HNP_SUPPORT:
374*4882a593Smuzhiyun if (!musb->g.is_otg)
375*4882a593Smuzhiyun goto stall;
376*4882a593Smuzhiyun musb->g.a_hnp_support = 1;
377*4882a593Smuzhiyun break;
378*4882a593Smuzhiyun case USB_DEVICE_A_ALT_HNP_SUPPORT:
379*4882a593Smuzhiyun if (!musb->g.is_otg)
380*4882a593Smuzhiyun goto stall;
381*4882a593Smuzhiyun musb->g.a_alt_hnp_support = 1;
382*4882a593Smuzhiyun break;
383*4882a593Smuzhiyun case USB_DEVICE_DEBUG_MODE:
384*4882a593Smuzhiyun handled = 0;
385*4882a593Smuzhiyun break;
386*4882a593Smuzhiyun stall:
387*4882a593Smuzhiyun default:
388*4882a593Smuzhiyun handled = -EINVAL;
389*4882a593Smuzhiyun break;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun break;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun case USB_RECIP_INTERFACE:
394*4882a593Smuzhiyun break;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun case USB_RECIP_ENDPOINT:{
397*4882a593Smuzhiyun const u8 epnum =
398*4882a593Smuzhiyun ctrlrequest->wIndex & 0x0f;
399*4882a593Smuzhiyun struct musb_ep *musb_ep;
400*4882a593Smuzhiyun struct musb_hw_ep *ep;
401*4882a593Smuzhiyun void __iomem *regs;
402*4882a593Smuzhiyun int is_in;
403*4882a593Smuzhiyun u16 csr;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun if (epnum == 0 || epnum >= MUSB_C_NUM_EPS ||
406*4882a593Smuzhiyun ctrlrequest->wValue != USB_ENDPOINT_HALT)
407*4882a593Smuzhiyun break;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun ep = musb->endpoints + epnum;
410*4882a593Smuzhiyun regs = ep->regs;
411*4882a593Smuzhiyun is_in = ctrlrequest->wIndex & USB_DIR_IN;
412*4882a593Smuzhiyun if (is_in)
413*4882a593Smuzhiyun musb_ep = &ep->ep_in;
414*4882a593Smuzhiyun else
415*4882a593Smuzhiyun musb_ep = &ep->ep_out;
416*4882a593Smuzhiyun if (!musb_ep->desc)
417*4882a593Smuzhiyun break;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun musb_ep_select(mbase, epnum);
420*4882a593Smuzhiyun if (is_in) {
421*4882a593Smuzhiyun csr = musb_readw(regs, MUSB_TXCSR);
422*4882a593Smuzhiyun if (csr & MUSB_TXCSR_FIFONOTEMPTY)
423*4882a593Smuzhiyun csr |= MUSB_TXCSR_FLUSHFIFO;
424*4882a593Smuzhiyun csr |= MUSB_TXCSR_P_SENDSTALL
425*4882a593Smuzhiyun | MUSB_TXCSR_CLRDATATOG
426*4882a593Smuzhiyun | MUSB_TXCSR_P_WZC_BITS;
427*4882a593Smuzhiyun musb_writew(regs, MUSB_TXCSR, csr);
428*4882a593Smuzhiyun } else {
429*4882a593Smuzhiyun csr = musb_readw(regs, MUSB_RXCSR);
430*4882a593Smuzhiyun csr |= MUSB_RXCSR_P_SENDSTALL
431*4882a593Smuzhiyun | MUSB_RXCSR_FLUSHFIFO
432*4882a593Smuzhiyun | MUSB_RXCSR_CLRDATATOG
433*4882a593Smuzhiyun | MUSB_RXCSR_P_WZC_BITS;
434*4882a593Smuzhiyun musb_writew(regs, MUSB_RXCSR, csr);
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun /* select ep0 again */
438*4882a593Smuzhiyun musb_ep_select(mbase, 0);
439*4882a593Smuzhiyun handled = 1;
440*4882a593Smuzhiyun } break;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun default:
443*4882a593Smuzhiyun /* class, vendor, etc ... delegate */
444*4882a593Smuzhiyun handled = 0;
445*4882a593Smuzhiyun break;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun break;
448*4882a593Smuzhiyun default:
449*4882a593Smuzhiyun /* delegate SET_CONFIGURATION, etc */
450*4882a593Smuzhiyun handled = 0;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun } else
453*4882a593Smuzhiyun handled = 0;
454*4882a593Smuzhiyun return handled;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun /* we have an ep0out data packet
458*4882a593Smuzhiyun * Context: caller holds controller lock
459*4882a593Smuzhiyun */
ep0_rxstate(struct musb * musb)460*4882a593Smuzhiyun static void ep0_rxstate(struct musb *musb)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun void __iomem *regs = musb->control_ep->regs;
463*4882a593Smuzhiyun struct musb_request *request;
464*4882a593Smuzhiyun struct usb_request *req;
465*4882a593Smuzhiyun u16 count, csr;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun request = next_ep0_request(musb);
468*4882a593Smuzhiyun req = &request->request;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /* read packet and ack; or stall because of gadget driver bug:
471*4882a593Smuzhiyun * should have provided the rx buffer before setup() returned.
472*4882a593Smuzhiyun */
473*4882a593Smuzhiyun if (req) {
474*4882a593Smuzhiyun void *buf = req->buf + req->actual;
475*4882a593Smuzhiyun unsigned len = req->length - req->actual;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /* read the buffer */
478*4882a593Smuzhiyun count = musb_readb(regs, MUSB_COUNT0);
479*4882a593Smuzhiyun if (count > len) {
480*4882a593Smuzhiyun req->status = -EOVERFLOW;
481*4882a593Smuzhiyun count = len;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun if (count > 0) {
484*4882a593Smuzhiyun musb_read_fifo(&musb->endpoints[0], count, buf);
485*4882a593Smuzhiyun req->actual += count;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun csr = MUSB_CSR0_P_SVDRXPKTRDY;
488*4882a593Smuzhiyun if (count < 64 || req->actual == req->length) {
489*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
490*4882a593Smuzhiyun csr |= MUSB_CSR0_P_DATAEND;
491*4882a593Smuzhiyun } else
492*4882a593Smuzhiyun req = NULL;
493*4882a593Smuzhiyun } else
494*4882a593Smuzhiyun csr = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun /* Completion handler may choose to stall, e.g. because the
498*4882a593Smuzhiyun * message just received holds invalid data.
499*4882a593Smuzhiyun */
500*4882a593Smuzhiyun if (req) {
501*4882a593Smuzhiyun musb->ackpend = csr;
502*4882a593Smuzhiyun musb_g_ep0_giveback(musb, req);
503*4882a593Smuzhiyun if (!musb->ackpend)
504*4882a593Smuzhiyun return;
505*4882a593Smuzhiyun musb->ackpend = 0;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun musb_ep_select(musb->mregs, 0);
508*4882a593Smuzhiyun musb_writew(regs, MUSB_CSR0, csr);
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun /*
512*4882a593Smuzhiyun * transmitting to the host (IN), this code might be called from IRQ
513*4882a593Smuzhiyun * and from kernel thread.
514*4882a593Smuzhiyun *
515*4882a593Smuzhiyun * Context: caller holds controller lock
516*4882a593Smuzhiyun */
ep0_txstate(struct musb * musb)517*4882a593Smuzhiyun static void ep0_txstate(struct musb *musb)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun void __iomem *regs = musb->control_ep->regs;
520*4882a593Smuzhiyun struct musb_request *req = next_ep0_request(musb);
521*4882a593Smuzhiyun struct usb_request *request;
522*4882a593Smuzhiyun u16 csr = MUSB_CSR0_TXPKTRDY;
523*4882a593Smuzhiyun u8 *fifo_src;
524*4882a593Smuzhiyun u8 fifo_count;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun if (!req) {
527*4882a593Smuzhiyun /* WARN_ON(1); */
528*4882a593Smuzhiyun musb_dbg(musb, "odd; csr0 %04x", musb_readw(regs, MUSB_CSR0));
529*4882a593Smuzhiyun return;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun request = &req->request;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun /* load the data */
535*4882a593Smuzhiyun fifo_src = (u8 *) request->buf + request->actual;
536*4882a593Smuzhiyun fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE,
537*4882a593Smuzhiyun request->length - request->actual);
538*4882a593Smuzhiyun musb_write_fifo(&musb->endpoints[0], fifo_count, fifo_src);
539*4882a593Smuzhiyun request->actual += fifo_count;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun /* update the flags */
542*4882a593Smuzhiyun if (fifo_count < MUSB_MAX_END0_PACKET
543*4882a593Smuzhiyun || (request->actual == request->length
544*4882a593Smuzhiyun && !request->zero)) {
545*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
546*4882a593Smuzhiyun csr |= MUSB_CSR0_P_DATAEND;
547*4882a593Smuzhiyun } else
548*4882a593Smuzhiyun request = NULL;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /* report completions as soon as the fifo's loaded; there's no
551*4882a593Smuzhiyun * win in waiting till this last packet gets acked. (other than
552*4882a593Smuzhiyun * very precise fault reporting, needed by USB TMC; possible with
553*4882a593Smuzhiyun * this hardware, but not usable from portable gadget drivers.)
554*4882a593Smuzhiyun */
555*4882a593Smuzhiyun if (request) {
556*4882a593Smuzhiyun musb->ackpend = csr;
557*4882a593Smuzhiyun musb_g_ep0_giveback(musb, request);
558*4882a593Smuzhiyun if (!musb->ackpend)
559*4882a593Smuzhiyun return;
560*4882a593Smuzhiyun musb->ackpend = 0;
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /* send it out, triggering a "txpktrdy cleared" irq */
564*4882a593Smuzhiyun musb_ep_select(musb->mregs, 0);
565*4882a593Smuzhiyun musb_writew(regs, MUSB_CSR0, csr);
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun /*
569*4882a593Smuzhiyun * Read a SETUP packet (struct usb_ctrlrequest) from the hardware.
570*4882a593Smuzhiyun * Fields are left in USB byte-order.
571*4882a593Smuzhiyun *
572*4882a593Smuzhiyun * Context: caller holds controller lock.
573*4882a593Smuzhiyun */
574*4882a593Smuzhiyun static void
musb_read_setup(struct musb * musb,struct usb_ctrlrequest * req)575*4882a593Smuzhiyun musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun struct musb_request *r;
578*4882a593Smuzhiyun void __iomem *regs = musb->control_ep->regs;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req);
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun /* NOTE: earlier 2.6 versions changed setup packets to host
583*4882a593Smuzhiyun * order, but now USB packets always stay in USB byte order.
584*4882a593Smuzhiyun */
585*4882a593Smuzhiyun musb_dbg(musb, "SETUP req%02x.%02x v%04x i%04x l%d",
586*4882a593Smuzhiyun req->bRequestType,
587*4882a593Smuzhiyun req->bRequest,
588*4882a593Smuzhiyun le16_to_cpu(req->wValue),
589*4882a593Smuzhiyun le16_to_cpu(req->wIndex),
590*4882a593Smuzhiyun le16_to_cpu(req->wLength));
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun /* clean up any leftover transfers */
593*4882a593Smuzhiyun r = next_ep0_request(musb);
594*4882a593Smuzhiyun if (r)
595*4882a593Smuzhiyun musb_g_ep0_giveback(musb, &r->request);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun /* For zero-data requests we want to delay the STATUS stage to
598*4882a593Smuzhiyun * avoid SETUPEND errors. If we read data (OUT), delay accepting
599*4882a593Smuzhiyun * packets until there's a buffer to store them in.
600*4882a593Smuzhiyun *
601*4882a593Smuzhiyun * If we write data, the controller acts happier if we enable
602*4882a593Smuzhiyun * the TX FIFO right away, and give the controller a moment
603*4882a593Smuzhiyun * to switch modes...
604*4882a593Smuzhiyun */
605*4882a593Smuzhiyun musb->set_address = false;
606*4882a593Smuzhiyun musb->ackpend = MUSB_CSR0_P_SVDRXPKTRDY;
607*4882a593Smuzhiyun if (req->wLength == 0) {
608*4882a593Smuzhiyun if (req->bRequestType & USB_DIR_IN)
609*4882a593Smuzhiyun musb->ackpend |= MUSB_CSR0_TXPKTRDY;
610*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_ACKWAIT;
611*4882a593Smuzhiyun } else if (req->bRequestType & USB_DIR_IN) {
612*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_TX;
613*4882a593Smuzhiyun musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDRXPKTRDY);
614*4882a593Smuzhiyun while ((musb_readw(regs, MUSB_CSR0)
615*4882a593Smuzhiyun & MUSB_CSR0_RXPKTRDY) != 0)
616*4882a593Smuzhiyun cpu_relax();
617*4882a593Smuzhiyun musb->ackpend = 0;
618*4882a593Smuzhiyun } else
619*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_RX;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun static int
forward_to_driver(struct musb * musb,const struct usb_ctrlrequest * ctrlrequest)623*4882a593Smuzhiyun forward_to_driver(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
624*4882a593Smuzhiyun __releases(musb->lock)
625*4882a593Smuzhiyun __acquires(musb->lock)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun int retval;
628*4882a593Smuzhiyun if (!musb->gadget_driver)
629*4882a593Smuzhiyun return -EOPNOTSUPP;
630*4882a593Smuzhiyun spin_unlock(&musb->lock);
631*4882a593Smuzhiyun retval = musb->gadget_driver->setup(&musb->g, ctrlrequest);
632*4882a593Smuzhiyun spin_lock(&musb->lock);
633*4882a593Smuzhiyun return retval;
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun /*
637*4882a593Smuzhiyun * Handle peripheral ep0 interrupt
638*4882a593Smuzhiyun *
639*4882a593Smuzhiyun * Context: irq handler; we won't re-enter the driver that way.
640*4882a593Smuzhiyun */
musb_g_ep0_irq(struct musb * musb)641*4882a593Smuzhiyun irqreturn_t musb_g_ep0_irq(struct musb *musb)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun u16 csr;
644*4882a593Smuzhiyun u16 len;
645*4882a593Smuzhiyun void __iomem *mbase = musb->mregs;
646*4882a593Smuzhiyun void __iomem *regs = musb->endpoints[0].regs;
647*4882a593Smuzhiyun irqreturn_t retval = IRQ_NONE;
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun musb_ep_select(mbase, 0); /* select ep0 */
650*4882a593Smuzhiyun csr = musb_readw(regs, MUSB_CSR0);
651*4882a593Smuzhiyun len = musb_readb(regs, MUSB_COUNT0);
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun musb_dbg(musb, "csr %04x, count %d, ep0stage %s",
654*4882a593Smuzhiyun csr, len, decode_ep0stage(musb->ep0_state));
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun if (csr & MUSB_CSR0_P_DATAEND) {
657*4882a593Smuzhiyun /*
658*4882a593Smuzhiyun * If DATAEND is set we should not call the callback,
659*4882a593Smuzhiyun * hence the status stage is not complete.
660*4882a593Smuzhiyun */
661*4882a593Smuzhiyun return IRQ_HANDLED;
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun /* I sent a stall.. need to acknowledge it now.. */
665*4882a593Smuzhiyun if (csr & MUSB_CSR0_P_SENTSTALL) {
666*4882a593Smuzhiyun musb_writew(regs, MUSB_CSR0,
667*4882a593Smuzhiyun csr & ~MUSB_CSR0_P_SENTSTALL);
668*4882a593Smuzhiyun retval = IRQ_HANDLED;
669*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_IDLE;
670*4882a593Smuzhiyun csr = musb_readw(regs, MUSB_CSR0);
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun /* request ended "early" */
674*4882a593Smuzhiyun if (csr & MUSB_CSR0_P_SETUPEND) {
675*4882a593Smuzhiyun musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND);
676*4882a593Smuzhiyun retval = IRQ_HANDLED;
677*4882a593Smuzhiyun /* Transition into the early status phase */
678*4882a593Smuzhiyun switch (musb->ep0_state) {
679*4882a593Smuzhiyun case MUSB_EP0_STAGE_TX:
680*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
681*4882a593Smuzhiyun break;
682*4882a593Smuzhiyun case MUSB_EP0_STAGE_RX:
683*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
684*4882a593Smuzhiyun break;
685*4882a593Smuzhiyun default:
686*4882a593Smuzhiyun ERR("SetupEnd came in a wrong ep0stage %s\n",
687*4882a593Smuzhiyun decode_ep0stage(musb->ep0_state));
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun csr = musb_readw(regs, MUSB_CSR0);
690*4882a593Smuzhiyun /* NOTE: request may need completion */
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun /* docs from Mentor only describe tx, rx, and idle/setup states.
694*4882a593Smuzhiyun * we need to handle nuances around status stages, and also the
695*4882a593Smuzhiyun * case where status and setup stages come back-to-back ...
696*4882a593Smuzhiyun */
697*4882a593Smuzhiyun switch (musb->ep0_state) {
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun case MUSB_EP0_STAGE_TX:
700*4882a593Smuzhiyun /* irq on clearing txpktrdy */
701*4882a593Smuzhiyun if ((csr & MUSB_CSR0_TXPKTRDY) == 0) {
702*4882a593Smuzhiyun ep0_txstate(musb);
703*4882a593Smuzhiyun retval = IRQ_HANDLED;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun break;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun case MUSB_EP0_STAGE_RX:
708*4882a593Smuzhiyun /* irq on set rxpktrdy */
709*4882a593Smuzhiyun if (csr & MUSB_CSR0_RXPKTRDY) {
710*4882a593Smuzhiyun ep0_rxstate(musb);
711*4882a593Smuzhiyun retval = IRQ_HANDLED;
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun break;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun case MUSB_EP0_STAGE_STATUSIN:
716*4882a593Smuzhiyun /* end of sequence #2 (OUT/RX state) or #3 (no data) */
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun /* update address (if needed) only @ the end of the
719*4882a593Smuzhiyun * status phase per usb spec, which also guarantees
720*4882a593Smuzhiyun * we get 10 msec to receive this irq... until this
721*4882a593Smuzhiyun * is done we won't see the next packet.
722*4882a593Smuzhiyun */
723*4882a593Smuzhiyun if (musb->set_address) {
724*4882a593Smuzhiyun musb->set_address = false;
725*4882a593Smuzhiyun musb_writeb(mbase, MUSB_FADDR, musb->address);
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun /* enter test mode if needed (exit by reset) */
729*4882a593Smuzhiyun else if (musb->test_mode) {
730*4882a593Smuzhiyun musb_dbg(musb, "entering TESTMODE");
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun if (MUSB_TEST_PACKET == musb->test_mode_nr)
733*4882a593Smuzhiyun musb_load_testpacket(musb);
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun musb_writeb(mbase, MUSB_TESTMODE,
736*4882a593Smuzhiyun musb->test_mode_nr);
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun fallthrough;
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun case MUSB_EP0_STAGE_STATUSOUT:
741*4882a593Smuzhiyun /* end of sequence #1: write to host (TX state) */
742*4882a593Smuzhiyun {
743*4882a593Smuzhiyun struct musb_request *req;
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun req = next_ep0_request(musb);
746*4882a593Smuzhiyun if (req)
747*4882a593Smuzhiyun musb_g_ep0_giveback(musb, &req->request);
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun /*
751*4882a593Smuzhiyun * In case when several interrupts can get coalesced,
752*4882a593Smuzhiyun * check to see if we've already received a SETUP packet...
753*4882a593Smuzhiyun */
754*4882a593Smuzhiyun if (csr & MUSB_CSR0_RXPKTRDY)
755*4882a593Smuzhiyun goto setup;
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun retval = IRQ_HANDLED;
758*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_IDLE;
759*4882a593Smuzhiyun break;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun case MUSB_EP0_STAGE_IDLE:
762*4882a593Smuzhiyun /*
763*4882a593Smuzhiyun * This state is typically (but not always) indiscernible
764*4882a593Smuzhiyun * from the status states since the corresponding interrupts
765*4882a593Smuzhiyun * tend to happen within too little period of time (with only
766*4882a593Smuzhiyun * a zero-length packet in between) and so get coalesced...
767*4882a593Smuzhiyun */
768*4882a593Smuzhiyun retval = IRQ_HANDLED;
769*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_SETUP;
770*4882a593Smuzhiyun fallthrough;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun case MUSB_EP0_STAGE_SETUP:
773*4882a593Smuzhiyun setup:
774*4882a593Smuzhiyun if (csr & MUSB_CSR0_RXPKTRDY) {
775*4882a593Smuzhiyun struct usb_ctrlrequest setup;
776*4882a593Smuzhiyun int handled = 0;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun if (len != 8) {
779*4882a593Smuzhiyun ERR("SETUP packet len %d != 8 ?\n", len);
780*4882a593Smuzhiyun break;
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun musb_read_setup(musb, &setup);
783*4882a593Smuzhiyun retval = IRQ_HANDLED;
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun /* sometimes the RESET won't be reported */
786*4882a593Smuzhiyun if (unlikely(musb->g.speed == USB_SPEED_UNKNOWN)) {
787*4882a593Smuzhiyun u8 power;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun printk(KERN_NOTICE "%s: peripheral reset "
790*4882a593Smuzhiyun "irq lost!\n",
791*4882a593Smuzhiyun musb_driver_name);
792*4882a593Smuzhiyun power = musb_readb(mbase, MUSB_POWER);
793*4882a593Smuzhiyun musb->g.speed = (power & MUSB_POWER_HSMODE)
794*4882a593Smuzhiyun ? USB_SPEED_HIGH : USB_SPEED_FULL;
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun switch (musb->ep0_state) {
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun /* sequence #3 (no data stage), includes requests
801*4882a593Smuzhiyun * we can't forward (notably SET_ADDRESS and the
802*4882a593Smuzhiyun * device/endpoint feature set/clear operations)
803*4882a593Smuzhiyun * plus SET_CONFIGURATION and others we must
804*4882a593Smuzhiyun */
805*4882a593Smuzhiyun case MUSB_EP0_STAGE_ACKWAIT:
806*4882a593Smuzhiyun handled = service_zero_data_request(
807*4882a593Smuzhiyun musb, &setup);
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun /*
810*4882a593Smuzhiyun * We're expecting no data in any case, so
811*4882a593Smuzhiyun * always set the DATAEND bit -- doing this
812*4882a593Smuzhiyun * here helps avoid SetupEnd interrupt coming
813*4882a593Smuzhiyun * in the idle stage when we're stalling...
814*4882a593Smuzhiyun */
815*4882a593Smuzhiyun musb->ackpend |= MUSB_CSR0_P_DATAEND;
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun /* status stage might be immediate */
818*4882a593Smuzhiyun if (handled > 0)
819*4882a593Smuzhiyun musb->ep0_state =
820*4882a593Smuzhiyun MUSB_EP0_STAGE_STATUSIN;
821*4882a593Smuzhiyun break;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun /* sequence #1 (IN to host), includes GET_STATUS
824*4882a593Smuzhiyun * requests that we can't forward, GET_DESCRIPTOR
825*4882a593Smuzhiyun * and others that we must
826*4882a593Smuzhiyun */
827*4882a593Smuzhiyun case MUSB_EP0_STAGE_TX:
828*4882a593Smuzhiyun handled = service_in_request(musb, &setup);
829*4882a593Smuzhiyun if (handled > 0) {
830*4882a593Smuzhiyun musb->ackpend = MUSB_CSR0_TXPKTRDY
831*4882a593Smuzhiyun | MUSB_CSR0_P_DATAEND;
832*4882a593Smuzhiyun musb->ep0_state =
833*4882a593Smuzhiyun MUSB_EP0_STAGE_STATUSOUT;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun break;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun /* sequence #2 (OUT from host), always forward */
838*4882a593Smuzhiyun default: /* MUSB_EP0_STAGE_RX */
839*4882a593Smuzhiyun break;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun musb_dbg(musb, "handled %d, csr %04x, ep0stage %s",
843*4882a593Smuzhiyun handled, csr,
844*4882a593Smuzhiyun decode_ep0stage(musb->ep0_state));
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun /* unless we need to delegate this to the gadget
847*4882a593Smuzhiyun * driver, we know how to wrap this up: csr0 has
848*4882a593Smuzhiyun * not yet been written.
849*4882a593Smuzhiyun */
850*4882a593Smuzhiyun if (handled < 0)
851*4882a593Smuzhiyun goto stall;
852*4882a593Smuzhiyun else if (handled > 0)
853*4882a593Smuzhiyun goto finish;
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun handled = forward_to_driver(musb, &setup);
856*4882a593Smuzhiyun if (handled < 0) {
857*4882a593Smuzhiyun musb_ep_select(mbase, 0);
858*4882a593Smuzhiyun stall:
859*4882a593Smuzhiyun musb_dbg(musb, "stall (%d)", handled);
860*4882a593Smuzhiyun musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
861*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_IDLE;
862*4882a593Smuzhiyun finish:
863*4882a593Smuzhiyun musb_writew(regs, MUSB_CSR0,
864*4882a593Smuzhiyun musb->ackpend);
865*4882a593Smuzhiyun musb->ackpend = 0;
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun break;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun case MUSB_EP0_STAGE_ACKWAIT:
871*4882a593Smuzhiyun /* This should not happen. But happens with tusb6010 with
872*4882a593Smuzhiyun * g_file_storage and high speed. Do nothing.
873*4882a593Smuzhiyun */
874*4882a593Smuzhiyun retval = IRQ_HANDLED;
875*4882a593Smuzhiyun break;
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun default:
878*4882a593Smuzhiyun /* "can't happen" */
879*4882a593Smuzhiyun WARN_ON(1);
880*4882a593Smuzhiyun musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL);
881*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_IDLE;
882*4882a593Smuzhiyun break;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun return retval;
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun static int
musb_g_ep0_enable(struct usb_ep * ep,const struct usb_endpoint_descriptor * desc)890*4882a593Smuzhiyun musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun /* always enabled */
893*4882a593Smuzhiyun return -EINVAL;
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun
musb_g_ep0_disable(struct usb_ep * e)896*4882a593Smuzhiyun static int musb_g_ep0_disable(struct usb_ep *e)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun /* always enabled */
899*4882a593Smuzhiyun return -EINVAL;
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun static int
musb_g_ep0_queue(struct usb_ep * e,struct usb_request * r,gfp_t gfp_flags)903*4882a593Smuzhiyun musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
904*4882a593Smuzhiyun {
905*4882a593Smuzhiyun struct musb_ep *ep;
906*4882a593Smuzhiyun struct musb_request *req;
907*4882a593Smuzhiyun struct musb *musb;
908*4882a593Smuzhiyun int status;
909*4882a593Smuzhiyun unsigned long lockflags;
910*4882a593Smuzhiyun void __iomem *regs;
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun if (!e || !r)
913*4882a593Smuzhiyun return -EINVAL;
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun ep = to_musb_ep(e);
916*4882a593Smuzhiyun musb = ep->musb;
917*4882a593Smuzhiyun regs = musb->control_ep->regs;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun req = to_musb_request(r);
920*4882a593Smuzhiyun req->musb = musb;
921*4882a593Smuzhiyun req->request.actual = 0;
922*4882a593Smuzhiyun req->request.status = -EINPROGRESS;
923*4882a593Smuzhiyun req->tx = ep->is_in;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun spin_lock_irqsave(&musb->lock, lockflags);
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun if (!list_empty(&ep->req_list)) {
928*4882a593Smuzhiyun status = -EBUSY;
929*4882a593Smuzhiyun goto cleanup;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun switch (musb->ep0_state) {
933*4882a593Smuzhiyun case MUSB_EP0_STAGE_RX: /* control-OUT data */
934*4882a593Smuzhiyun case MUSB_EP0_STAGE_TX: /* control-IN data */
935*4882a593Smuzhiyun case MUSB_EP0_STAGE_ACKWAIT: /* zero-length data */
936*4882a593Smuzhiyun status = 0;
937*4882a593Smuzhiyun break;
938*4882a593Smuzhiyun default:
939*4882a593Smuzhiyun musb_dbg(musb, "ep0 request queued in state %d",
940*4882a593Smuzhiyun musb->ep0_state);
941*4882a593Smuzhiyun status = -EINVAL;
942*4882a593Smuzhiyun goto cleanup;
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun /* add request to the list */
946*4882a593Smuzhiyun list_add_tail(&req->list, &ep->req_list);
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun musb_dbg(musb, "queue to %s (%s), length=%d",
949*4882a593Smuzhiyun ep->name, ep->is_in ? "IN/TX" : "OUT/RX",
950*4882a593Smuzhiyun req->request.length);
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun musb_ep_select(musb->mregs, 0);
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun /* sequence #1, IN ... start writing the data */
955*4882a593Smuzhiyun if (musb->ep0_state == MUSB_EP0_STAGE_TX)
956*4882a593Smuzhiyun ep0_txstate(musb);
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun /* sequence #3, no-data ... issue IN status */
959*4882a593Smuzhiyun else if (musb->ep0_state == MUSB_EP0_STAGE_ACKWAIT) {
960*4882a593Smuzhiyun if (req->request.length)
961*4882a593Smuzhiyun status = -EINVAL;
962*4882a593Smuzhiyun else {
963*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
964*4882a593Smuzhiyun musb_writew(regs, MUSB_CSR0,
965*4882a593Smuzhiyun musb->ackpend | MUSB_CSR0_P_DATAEND);
966*4882a593Smuzhiyun musb->ackpend = 0;
967*4882a593Smuzhiyun musb_g_ep0_giveback(ep->musb, r);
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun /* else for sequence #2 (OUT), caller provides a buffer
971*4882a593Smuzhiyun * before the next packet arrives. deferred responses
972*4882a593Smuzhiyun * (after SETUP is acked) are racey.
973*4882a593Smuzhiyun */
974*4882a593Smuzhiyun } else if (musb->ackpend) {
975*4882a593Smuzhiyun musb_writew(regs, MUSB_CSR0, musb->ackpend);
976*4882a593Smuzhiyun musb->ackpend = 0;
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun cleanup:
980*4882a593Smuzhiyun spin_unlock_irqrestore(&musb->lock, lockflags);
981*4882a593Smuzhiyun return status;
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun
musb_g_ep0_dequeue(struct usb_ep * ep,struct usb_request * req)984*4882a593Smuzhiyun static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
985*4882a593Smuzhiyun {
986*4882a593Smuzhiyun /* we just won't support this */
987*4882a593Smuzhiyun return -EINVAL;
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun
musb_g_ep0_halt(struct usb_ep * e,int value)990*4882a593Smuzhiyun static int musb_g_ep0_halt(struct usb_ep *e, int value)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun struct musb_ep *ep;
993*4882a593Smuzhiyun struct musb *musb;
994*4882a593Smuzhiyun void __iomem *base, *regs;
995*4882a593Smuzhiyun unsigned long flags;
996*4882a593Smuzhiyun int status;
997*4882a593Smuzhiyun u16 csr;
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun if (!e || !value)
1000*4882a593Smuzhiyun return -EINVAL;
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun ep = to_musb_ep(e);
1003*4882a593Smuzhiyun musb = ep->musb;
1004*4882a593Smuzhiyun base = musb->mregs;
1005*4882a593Smuzhiyun regs = musb->control_ep->regs;
1006*4882a593Smuzhiyun status = 0;
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun spin_lock_irqsave(&musb->lock, flags);
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun if (!list_empty(&ep->req_list)) {
1011*4882a593Smuzhiyun status = -EBUSY;
1012*4882a593Smuzhiyun goto cleanup;
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun musb_ep_select(base, 0);
1016*4882a593Smuzhiyun csr = musb->ackpend;
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun switch (musb->ep0_state) {
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun /* Stalls are usually issued after parsing SETUP packet, either
1021*4882a593Smuzhiyun * directly in irq context from setup() or else later.
1022*4882a593Smuzhiyun */
1023*4882a593Smuzhiyun case MUSB_EP0_STAGE_TX: /* control-IN data */
1024*4882a593Smuzhiyun case MUSB_EP0_STAGE_ACKWAIT: /* STALL for zero-length data */
1025*4882a593Smuzhiyun case MUSB_EP0_STAGE_RX: /* control-OUT data */
1026*4882a593Smuzhiyun csr = musb_readw(regs, MUSB_CSR0);
1027*4882a593Smuzhiyun fallthrough;
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun /* It's also OK to issue stalls during callbacks when a non-empty
1030*4882a593Smuzhiyun * DATA stage buffer has been read (or even written).
1031*4882a593Smuzhiyun */
1032*4882a593Smuzhiyun case MUSB_EP0_STAGE_STATUSIN: /* control-OUT status */
1033*4882a593Smuzhiyun case MUSB_EP0_STAGE_STATUSOUT: /* control-IN status */
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun csr |= MUSB_CSR0_P_SENDSTALL;
1036*4882a593Smuzhiyun musb_writew(regs, MUSB_CSR0, csr);
1037*4882a593Smuzhiyun musb->ep0_state = MUSB_EP0_STAGE_IDLE;
1038*4882a593Smuzhiyun musb->ackpend = 0;
1039*4882a593Smuzhiyun break;
1040*4882a593Smuzhiyun default:
1041*4882a593Smuzhiyun musb_dbg(musb, "ep0 can't halt in state %d", musb->ep0_state);
1042*4882a593Smuzhiyun status = -EINVAL;
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun cleanup:
1046*4882a593Smuzhiyun spin_unlock_irqrestore(&musb->lock, flags);
1047*4882a593Smuzhiyun return status;
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun const struct usb_ep_ops musb_g_ep0_ops = {
1051*4882a593Smuzhiyun .enable = musb_g_ep0_enable,
1052*4882a593Smuzhiyun .disable = musb_g_ep0_disable,
1053*4882a593Smuzhiyun .alloc_request = musb_alloc_request,
1054*4882a593Smuzhiyun .free_request = musb_free_request,
1055*4882a593Smuzhiyun .queue = musb_g_ep0_queue,
1056*4882a593Smuzhiyun .dequeue = musb_g_ep0_dequeue,
1057*4882a593Smuzhiyun .set_halt = musb_g_ep0_halt,
1058*4882a593Smuzhiyun };
1059