xref: /OK3568_Linux_fs/kernel/drivers/usb/dwc3/ep0.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * ep0.c - DesignWare USB3 DRD Controller Endpoint 0 Handling
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Authors: Felipe Balbi <balbi@ti.com>,
8*4882a593Smuzhiyun  *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/spinlock.h>
14*4882a593Smuzhiyun #include <linux/platform_device.h>
15*4882a593Smuzhiyun #include <linux/pm_runtime.h>
16*4882a593Smuzhiyun #include <linux/interrupt.h>
17*4882a593Smuzhiyun #include <linux/io.h>
18*4882a593Smuzhiyun #include <linux/list.h>
19*4882a593Smuzhiyun #include <linux/dma-mapping.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include <linux/usb/ch9.h>
22*4882a593Smuzhiyun #include <linux/usb/gadget.h>
23*4882a593Smuzhiyun #include <linux/usb/composite.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include "core.h"
26*4882a593Smuzhiyun #include "debug.h"
27*4882a593Smuzhiyun #include "gadget.h"
28*4882a593Smuzhiyun #include "io.h"
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
31*4882a593Smuzhiyun static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
32*4882a593Smuzhiyun 		struct dwc3_ep *dep, struct dwc3_request *req);
33*4882a593Smuzhiyun 
dwc3_ep0_prepare_one_trb(struct dwc3_ep * dep,dma_addr_t buf_dma,u32 len,u32 type,bool chain)34*4882a593Smuzhiyun static void dwc3_ep0_prepare_one_trb(struct dwc3_ep *dep,
35*4882a593Smuzhiyun 		dma_addr_t buf_dma, u32 len, u32 type, bool chain)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	struct dwc3_trb			*trb;
38*4882a593Smuzhiyun 	struct dwc3			*dwc;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	dwc = dep->dwc;
41*4882a593Smuzhiyun 	trb = &dwc->ep0_trb[dep->trb_enqueue];
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	if (chain)
44*4882a593Smuzhiyun 		dep->trb_enqueue++;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	trb->bpl = lower_32_bits(buf_dma);
47*4882a593Smuzhiyun 	trb->bph = upper_32_bits(buf_dma);
48*4882a593Smuzhiyun 	trb->size = len;
49*4882a593Smuzhiyun 	trb->ctrl = type;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	trb->ctrl |= (DWC3_TRB_CTRL_HWO
52*4882a593Smuzhiyun 			| DWC3_TRB_CTRL_ISP_IMI);
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	if (chain)
55*4882a593Smuzhiyun 		trb->ctrl |= DWC3_TRB_CTRL_CHN;
56*4882a593Smuzhiyun 	else
57*4882a593Smuzhiyun 		trb->ctrl |= (DWC3_TRB_CTRL_IOC
58*4882a593Smuzhiyun 				| DWC3_TRB_CTRL_LST);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	trace_dwc3_prepare_trb(dep, trb);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
dwc3_ep0_start_trans(struct dwc3_ep * dep)63*4882a593Smuzhiyun static int dwc3_ep0_start_trans(struct dwc3_ep *dep)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	struct dwc3_gadget_ep_cmd_params params;
66*4882a593Smuzhiyun 	struct dwc3			*dwc;
67*4882a593Smuzhiyun 	int				ret;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	if (dep->flags & DWC3_EP_TRANSFER_STARTED)
70*4882a593Smuzhiyun 		return 0;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	dwc = dep->dwc;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	memset(&params, 0, sizeof(params));
75*4882a593Smuzhiyun 	params.param0 = upper_32_bits(dwc->ep0_trb_addr);
76*4882a593Smuzhiyun 	params.param1 = lower_32_bits(dwc->ep0_trb_addr);
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_STARTTRANSFER, &params);
79*4882a593Smuzhiyun 	if (ret < 0)
80*4882a593Smuzhiyun 		return ret;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	dwc->ep0_next_event = DWC3_EP0_COMPLETE;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	return 0;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
__dwc3_gadget_ep0_queue(struct dwc3_ep * dep,struct dwc3_request * req)87*4882a593Smuzhiyun static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
88*4882a593Smuzhiyun 		struct dwc3_request *req)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	struct dwc3		*dwc = dep->dwc;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	req->request.actual	= 0;
93*4882a593Smuzhiyun 	req->request.status	= -EINPROGRESS;
94*4882a593Smuzhiyun 	req->epnum		= dep->number;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	list_add_tail(&req->list, &dep->pending_list);
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	/*
99*4882a593Smuzhiyun 	 * Gadget driver might not be quick enough to queue a request
100*4882a593Smuzhiyun 	 * before we get a Transfer Not Ready event on this endpoint.
101*4882a593Smuzhiyun 	 *
102*4882a593Smuzhiyun 	 * In that case, we will set DWC3_EP_PENDING_REQUEST. When that
103*4882a593Smuzhiyun 	 * flag is set, it's telling us that as soon as Gadget queues the
104*4882a593Smuzhiyun 	 * required request, we should kick the transfer here because the
105*4882a593Smuzhiyun 	 * IRQ we were waiting for is long gone.
106*4882a593Smuzhiyun 	 */
107*4882a593Smuzhiyun 	if (dep->flags & DWC3_EP_PENDING_REQUEST) {
108*4882a593Smuzhiyun 		unsigned int direction;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 		direction = !!(dep->flags & DWC3_EP0_DIR_IN);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 		if (dwc->ep0state != EP0_DATA_PHASE) {
113*4882a593Smuzhiyun 			dev_WARN(dwc->dev, "Unexpected pending request\n");
114*4882a593Smuzhiyun 			return 0;
115*4882a593Smuzhiyun 		}
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 		__dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 		dep->flags &= ~(DWC3_EP_PENDING_REQUEST |
120*4882a593Smuzhiyun 				DWC3_EP0_DIR_IN);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 		return 0;
123*4882a593Smuzhiyun 	}
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	/*
126*4882a593Smuzhiyun 	 * In case gadget driver asked us to delay the STATUS phase,
127*4882a593Smuzhiyun 	 * handle it here.
128*4882a593Smuzhiyun 	 */
129*4882a593Smuzhiyun 	if (dwc->delayed_status) {
130*4882a593Smuzhiyun 		unsigned int direction;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 		direction = !dwc->ep0_expect_in;
133*4882a593Smuzhiyun 		dwc->delayed_status = false;
134*4882a593Smuzhiyun 		usb_gadget_set_state(dwc->gadget, USB_STATE_CONFIGURED);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 		if (dwc->ep0state == EP0_STATUS_PHASE)
137*4882a593Smuzhiyun 			__dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 		return 0;
140*4882a593Smuzhiyun 	}
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	/*
143*4882a593Smuzhiyun 	 * Unfortunately we have uncovered a limitation wrt the Data Phase.
144*4882a593Smuzhiyun 	 *
145*4882a593Smuzhiyun 	 * Section 9.4 says we can wait for the XferNotReady(DATA) event to
146*4882a593Smuzhiyun 	 * come before issueing Start Transfer command, but if we do, we will
147*4882a593Smuzhiyun 	 * miss situations where the host starts another SETUP phase instead of
148*4882a593Smuzhiyun 	 * the DATA phase.  Such cases happen at least on TD.7.6 of the Link
149*4882a593Smuzhiyun 	 * Layer Compliance Suite.
150*4882a593Smuzhiyun 	 *
151*4882a593Smuzhiyun 	 * The problem surfaces due to the fact that in case of back-to-back
152*4882a593Smuzhiyun 	 * SETUP packets there will be no XferNotReady(DATA) generated and we
153*4882a593Smuzhiyun 	 * will be stuck waiting for XferNotReady(DATA) forever.
154*4882a593Smuzhiyun 	 *
155*4882a593Smuzhiyun 	 * By looking at tables 9-13 and 9-14 of the Databook, we can see that
156*4882a593Smuzhiyun 	 * it tells us to start Data Phase right away. It also mentions that if
157*4882a593Smuzhiyun 	 * we receive a SETUP phase instead of the DATA phase, core will issue
158*4882a593Smuzhiyun 	 * XferComplete for the DATA phase, before actually initiating it in
159*4882a593Smuzhiyun 	 * the wire, with the TRB's status set to "SETUP_PENDING". Such status
160*4882a593Smuzhiyun 	 * can only be used to print some debugging logs, as the core expects
161*4882a593Smuzhiyun 	 * us to go through to the STATUS phase and start a CONTROL_STATUS TRB,
162*4882a593Smuzhiyun 	 * just so it completes right away, without transferring anything and,
163*4882a593Smuzhiyun 	 * only then, we can go back to the SETUP phase.
164*4882a593Smuzhiyun 	 *
165*4882a593Smuzhiyun 	 * Because of this scenario, SNPS decided to change the programming
166*4882a593Smuzhiyun 	 * model of control transfers and support on-demand transfers only for
167*4882a593Smuzhiyun 	 * the STATUS phase. To fix the issue we have now, we will always wait
168*4882a593Smuzhiyun 	 * for gadget driver to queue the DATA phase's struct usb_request, then
169*4882a593Smuzhiyun 	 * start it right away.
170*4882a593Smuzhiyun 	 *
171*4882a593Smuzhiyun 	 * If we're actually in a 2-stage transfer, we will wait for
172*4882a593Smuzhiyun 	 * XferNotReady(STATUS).
173*4882a593Smuzhiyun 	 */
174*4882a593Smuzhiyun 	if (dwc->three_stage_setup) {
175*4882a593Smuzhiyun 		unsigned int direction;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 		direction = dwc->ep0_expect_in;
178*4882a593Smuzhiyun 		dwc->ep0state = EP0_DATA_PHASE;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 		__dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 		dep->flags &= ~DWC3_EP0_DIR_IN;
183*4882a593Smuzhiyun 	}
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	return 0;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
dwc3_gadget_ep0_queue(struct usb_ep * ep,struct usb_request * request,gfp_t gfp_flags)188*4882a593Smuzhiyun int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
189*4882a593Smuzhiyun 		gfp_t gfp_flags)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 	struct dwc3_request		*req = to_dwc3_request(request);
192*4882a593Smuzhiyun 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
193*4882a593Smuzhiyun 	struct dwc3			*dwc = dep->dwc;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	unsigned long			flags;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	int				ret;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	spin_lock_irqsave(&dwc->lock, flags);
200*4882a593Smuzhiyun 	if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) {
201*4882a593Smuzhiyun 		dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
202*4882a593Smuzhiyun 				dep->name);
203*4882a593Smuzhiyun 		ret = -ESHUTDOWN;
204*4882a593Smuzhiyun 		goto out;
205*4882a593Smuzhiyun 	}
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/* we share one TRB for ep0/1 */
208*4882a593Smuzhiyun 	if (!list_empty(&dep->pending_list)) {
209*4882a593Smuzhiyun 		ret = -EBUSY;
210*4882a593Smuzhiyun 		goto out;
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	ret = __dwc3_gadget_ep0_queue(dep, req);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun out:
216*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dwc->lock, flags);
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	return ret;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
dwc3_ep0_stall_and_restart(struct dwc3 * dwc)221*4882a593Smuzhiyun void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	struct dwc3_ep		*dep;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	/* reinitialize physical ep1 */
226*4882a593Smuzhiyun 	dep = dwc->eps[1];
227*4882a593Smuzhiyun 	dep->flags = DWC3_EP_ENABLED;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	/* stall is always issued on EP0 */
230*4882a593Smuzhiyun 	dep = dwc->eps[0];
231*4882a593Smuzhiyun 	__dwc3_gadget_ep_set_halt(dep, 1, false);
232*4882a593Smuzhiyun 	dep->flags = DWC3_EP_ENABLED;
233*4882a593Smuzhiyun 	dwc->delayed_status = false;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	if (!list_empty(&dep->pending_list)) {
236*4882a593Smuzhiyun 		struct dwc3_request	*req;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 		req = next_request(&dep->pending_list);
239*4882a593Smuzhiyun 		dwc3_gadget_giveback(dep, req, -ECONNRESET);
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	dwc->eps[0]->trb_enqueue = 0;
243*4882a593Smuzhiyun 	dwc->eps[1]->trb_enqueue = 0;
244*4882a593Smuzhiyun 	dwc->ep0state = EP0_SETUP_PHASE;
245*4882a593Smuzhiyun 	dwc3_ep0_out_start(dwc);
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
__dwc3_gadget_ep0_set_halt(struct usb_ep * ep,int value)248*4882a593Smuzhiyun int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
251*4882a593Smuzhiyun 	struct dwc3			*dwc = dep->dwc;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	dwc3_ep0_stall_and_restart(dwc);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	return 0;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
dwc3_gadget_ep0_set_halt(struct usb_ep * ep,int value)258*4882a593Smuzhiyun int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
261*4882a593Smuzhiyun 	struct dwc3			*dwc = dep->dwc;
262*4882a593Smuzhiyun 	unsigned long			flags;
263*4882a593Smuzhiyun 	int				ret;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	spin_lock_irqsave(&dwc->lock, flags);
266*4882a593Smuzhiyun 	ret = __dwc3_gadget_ep0_set_halt(ep, value);
267*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dwc->lock, flags);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	return ret;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun 
dwc3_ep0_out_start(struct dwc3 * dwc)272*4882a593Smuzhiyun void dwc3_ep0_out_start(struct dwc3 *dwc)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	struct dwc3_ep			*dep;
275*4882a593Smuzhiyun 	int				ret;
276*4882a593Smuzhiyun 	int                             i;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	complete(&dwc->ep0_in_setup);
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	dep = dwc->eps[0];
281*4882a593Smuzhiyun 	dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 8,
282*4882a593Smuzhiyun 			DWC3_TRBCTL_CONTROL_SETUP, false);
283*4882a593Smuzhiyun 	ret = dwc3_ep0_start_trans(dep);
284*4882a593Smuzhiyun 	WARN_ON(ret < 0);
285*4882a593Smuzhiyun 	for (i = 2; i < DWC3_ENDPOINTS_NUM; i++) {
286*4882a593Smuzhiyun 		struct dwc3_ep *dwc3_ep;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 		dwc3_ep = dwc->eps[i];
289*4882a593Smuzhiyun 		if (!dwc3_ep)
290*4882a593Smuzhiyun 			continue;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 		if (!(dwc3_ep->flags & DWC3_EP_DELAY_STOP))
293*4882a593Smuzhiyun 			continue;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 		dwc3_ep->flags &= ~DWC3_EP_DELAY_STOP;
296*4882a593Smuzhiyun 		if (dwc->connected)
297*4882a593Smuzhiyun 			dwc3_stop_active_transfer(dwc3_ep, true, true);
298*4882a593Smuzhiyun 		else
299*4882a593Smuzhiyun 			dwc3_remove_requests(dwc, dwc3_ep, -ESHUTDOWN);
300*4882a593Smuzhiyun 	}
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun 
dwc3_wIndex_to_dep(struct dwc3 * dwc,__le16 wIndex_le)303*4882a593Smuzhiyun static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	struct dwc3_ep		*dep;
306*4882a593Smuzhiyun 	u32			windex = le16_to_cpu(wIndex_le);
307*4882a593Smuzhiyun 	u32			ep, epnum;
308*4882a593Smuzhiyun 	u8			num_in_eps, num_out_eps, min_eps;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	num_in_eps = DWC3_NUM_IN_EPS(&dwc->hwparams);
311*4882a593Smuzhiyun 	num_out_eps = dwc->num_eps - num_in_eps;
312*4882a593Smuzhiyun 	min_eps = min_t(u8, num_in_eps, num_out_eps);
313*4882a593Smuzhiyun 	ep = windex & USB_ENDPOINT_NUMBER_MASK;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	if (ep + 1 > min_eps && num_in_eps != num_out_eps) {
316*4882a593Smuzhiyun 		epnum = ep + min_eps;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	} else {
319*4882a593Smuzhiyun 		epnum = ep << 1;
320*4882a593Smuzhiyun 		if ((windex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
321*4882a593Smuzhiyun 			epnum |= 1;
322*4882a593Smuzhiyun 	}
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	dep = dwc->eps[epnum];
325*4882a593Smuzhiyun 	if (dep == NULL)
326*4882a593Smuzhiyun 		return NULL;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	if (dep->flags & DWC3_EP_ENABLED)
329*4882a593Smuzhiyun 		return dep;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	return NULL;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
dwc3_ep0_status_cmpl(struct usb_ep * ep,struct usb_request * req)334*4882a593Smuzhiyun static void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun /*
338*4882a593Smuzhiyun  * ch 9.4.5
339*4882a593Smuzhiyun  */
dwc3_ep0_handle_status(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl)340*4882a593Smuzhiyun static int dwc3_ep0_handle_status(struct dwc3 *dwc,
341*4882a593Smuzhiyun 		struct usb_ctrlrequest *ctrl)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	struct dwc3_ep		*dep;
344*4882a593Smuzhiyun 	u32			recip;
345*4882a593Smuzhiyun 	u32			value;
346*4882a593Smuzhiyun 	u32			reg;
347*4882a593Smuzhiyun 	u16			usb_status = 0;
348*4882a593Smuzhiyun 	__le16			*response_pkt;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	/* We don't support PTM_STATUS */
351*4882a593Smuzhiyun 	value = le16_to_cpu(ctrl->wValue);
352*4882a593Smuzhiyun 	if (value != 0)
353*4882a593Smuzhiyun 		return -EINVAL;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	recip = ctrl->bRequestType & USB_RECIP_MASK;
356*4882a593Smuzhiyun 	switch (recip) {
357*4882a593Smuzhiyun 	case USB_RECIP_DEVICE:
358*4882a593Smuzhiyun 		/*
359*4882a593Smuzhiyun 		 * LTM will be set once we know how to set this in HW.
360*4882a593Smuzhiyun 		 */
361*4882a593Smuzhiyun 		usb_status |= dwc->gadget->is_selfpowered;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 		if ((dwc->speed == DWC3_DSTS_SUPERSPEED) ||
364*4882a593Smuzhiyun 		    (dwc->speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
365*4882a593Smuzhiyun 			reg = dwc3_readl(dwc->regs, DWC3_DCTL);
366*4882a593Smuzhiyun 			if (reg & DWC3_DCTL_INITU1ENA)
367*4882a593Smuzhiyun 				usb_status |= 1 << USB_DEV_STAT_U1_ENABLED;
368*4882a593Smuzhiyun 			if (reg & DWC3_DCTL_INITU2ENA)
369*4882a593Smuzhiyun 				usb_status |= 1 << USB_DEV_STAT_U2_ENABLED;
370*4882a593Smuzhiyun 		}
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 		break;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	case USB_RECIP_INTERFACE:
375*4882a593Smuzhiyun 		/*
376*4882a593Smuzhiyun 		 * Function Remote Wake Capable	D0
377*4882a593Smuzhiyun 		 * Function Remote Wakeup	D1
378*4882a593Smuzhiyun 		 */
379*4882a593Smuzhiyun 		break;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	case USB_RECIP_ENDPOINT:
382*4882a593Smuzhiyun 		dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
383*4882a593Smuzhiyun 		if (!dep)
384*4882a593Smuzhiyun 			return -EINVAL;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 		if (dep->flags & DWC3_EP_STALL)
387*4882a593Smuzhiyun 			usb_status = 1 << USB_ENDPOINT_HALT;
388*4882a593Smuzhiyun 		break;
389*4882a593Smuzhiyun 	default:
390*4882a593Smuzhiyun 		return -EINVAL;
391*4882a593Smuzhiyun 	}
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	response_pkt = (__le16 *) dwc->setup_buf;
394*4882a593Smuzhiyun 	*response_pkt = cpu_to_le16(usb_status);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	dep = dwc->eps[0];
397*4882a593Smuzhiyun 	dwc->ep0_usb_req.dep = dep;
398*4882a593Smuzhiyun 	dwc->ep0_usb_req.request.length = sizeof(*response_pkt);
399*4882a593Smuzhiyun 	dwc->ep0_usb_req.request.buf = dwc->setup_buf;
400*4882a593Smuzhiyun 	dwc->ep0_usb_req.request.complete = dwc3_ep0_status_cmpl;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
dwc3_ep0_handle_u1(struct dwc3 * dwc,enum usb_device_state state,int set)405*4882a593Smuzhiyun static int dwc3_ep0_handle_u1(struct dwc3 *dwc, enum usb_device_state state,
406*4882a593Smuzhiyun 		int set)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	u32 reg;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	if (state != USB_STATE_CONFIGURED)
411*4882a593Smuzhiyun 		return -EINVAL;
412*4882a593Smuzhiyun 	if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
413*4882a593Smuzhiyun 			(dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
414*4882a593Smuzhiyun 		return -EINVAL;
415*4882a593Smuzhiyun 	if (set && dwc->dis_u1_entry_quirk)
416*4882a593Smuzhiyun 		return -EINVAL;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
419*4882a593Smuzhiyun 	if (set)
420*4882a593Smuzhiyun 		reg |= DWC3_DCTL_INITU1ENA;
421*4882a593Smuzhiyun 	else
422*4882a593Smuzhiyun 		reg &= ~DWC3_DCTL_INITU1ENA;
423*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	return 0;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun 
dwc3_ep0_handle_u2(struct dwc3 * dwc,enum usb_device_state state,int set)428*4882a593Smuzhiyun static int dwc3_ep0_handle_u2(struct dwc3 *dwc, enum usb_device_state state,
429*4882a593Smuzhiyun 		int set)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun 	u32 reg;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	if (state != USB_STATE_CONFIGURED)
435*4882a593Smuzhiyun 		return -EINVAL;
436*4882a593Smuzhiyun 	if ((dwc->speed != DWC3_DSTS_SUPERSPEED) &&
437*4882a593Smuzhiyun 			(dwc->speed != DWC3_DSTS_SUPERSPEED_PLUS))
438*4882a593Smuzhiyun 		return -EINVAL;
439*4882a593Smuzhiyun 	if (set && dwc->dis_u2_entry_quirk)
440*4882a593Smuzhiyun 		return -EINVAL;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
443*4882a593Smuzhiyun 	if (set)
444*4882a593Smuzhiyun 		reg |= DWC3_DCTL_INITU2ENA;
445*4882a593Smuzhiyun 	else
446*4882a593Smuzhiyun 		reg &= ~DWC3_DCTL_INITU2ENA;
447*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	return 0;
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun 
dwc3_ep0_handle_test(struct dwc3 * dwc,enum usb_device_state state,u32 wIndex,int set)452*4882a593Smuzhiyun static int dwc3_ep0_handle_test(struct dwc3 *dwc, enum usb_device_state state,
453*4882a593Smuzhiyun 		u32 wIndex, int set)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun 	if ((wIndex & 0xff) != 0)
456*4882a593Smuzhiyun 		return -EINVAL;
457*4882a593Smuzhiyun 	if (!set)
458*4882a593Smuzhiyun 		return -EINVAL;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	switch (wIndex >> 8) {
461*4882a593Smuzhiyun 	case USB_TEST_J:
462*4882a593Smuzhiyun 	case USB_TEST_K:
463*4882a593Smuzhiyun 	case USB_TEST_SE0_NAK:
464*4882a593Smuzhiyun 	case USB_TEST_PACKET:
465*4882a593Smuzhiyun 	case USB_TEST_FORCE_ENABLE:
466*4882a593Smuzhiyun 		dwc->test_mode_nr = wIndex >> 8;
467*4882a593Smuzhiyun 		dwc->test_mode = true;
468*4882a593Smuzhiyun 		break;
469*4882a593Smuzhiyun 	default:
470*4882a593Smuzhiyun 		return -EINVAL;
471*4882a593Smuzhiyun 	}
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	return 0;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun 
dwc3_ep0_handle_device(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl,int set)476*4882a593Smuzhiyun static int dwc3_ep0_handle_device(struct dwc3 *dwc,
477*4882a593Smuzhiyun 		struct usb_ctrlrequest *ctrl, int set)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun 	enum usb_device_state	state;
480*4882a593Smuzhiyun 	u32			wValue;
481*4882a593Smuzhiyun 	u32			wIndex;
482*4882a593Smuzhiyun 	int			ret = 0;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	wValue = le16_to_cpu(ctrl->wValue);
485*4882a593Smuzhiyun 	wIndex = le16_to_cpu(ctrl->wIndex);
486*4882a593Smuzhiyun 	state = dwc->gadget->state;
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	switch (wValue) {
489*4882a593Smuzhiyun 	case USB_DEVICE_REMOTE_WAKEUP:
490*4882a593Smuzhiyun 		break;
491*4882a593Smuzhiyun 	/*
492*4882a593Smuzhiyun 	 * 9.4.1 says only for SS, in AddressState only for
493*4882a593Smuzhiyun 	 * default control pipe
494*4882a593Smuzhiyun 	 */
495*4882a593Smuzhiyun 	case USB_DEVICE_U1_ENABLE:
496*4882a593Smuzhiyun 		ret = dwc3_ep0_handle_u1(dwc, state, set);
497*4882a593Smuzhiyun 		break;
498*4882a593Smuzhiyun 	case USB_DEVICE_U2_ENABLE:
499*4882a593Smuzhiyun 		ret = dwc3_ep0_handle_u2(dwc, state, set);
500*4882a593Smuzhiyun 		break;
501*4882a593Smuzhiyun 	case USB_DEVICE_LTM_ENABLE:
502*4882a593Smuzhiyun 		ret = -EINVAL;
503*4882a593Smuzhiyun 		break;
504*4882a593Smuzhiyun 	case USB_DEVICE_TEST_MODE:
505*4882a593Smuzhiyun 		ret = dwc3_ep0_handle_test(dwc, state, wIndex, set);
506*4882a593Smuzhiyun 		break;
507*4882a593Smuzhiyun 	default:
508*4882a593Smuzhiyun 		ret = -EINVAL;
509*4882a593Smuzhiyun 	}
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	return ret;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun 
dwc3_ep0_handle_intf(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl,int set)514*4882a593Smuzhiyun static int dwc3_ep0_handle_intf(struct dwc3 *dwc,
515*4882a593Smuzhiyun 		struct usb_ctrlrequest *ctrl, int set)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun 	u32			wValue;
518*4882a593Smuzhiyun 	int			ret = 0;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	wValue = le16_to_cpu(ctrl->wValue);
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	switch (wValue) {
523*4882a593Smuzhiyun 	case USB_INTRF_FUNC_SUSPEND:
524*4882a593Smuzhiyun 		/*
525*4882a593Smuzhiyun 		 * REVISIT: Ideally we would enable some low power mode here,
526*4882a593Smuzhiyun 		 * however it's unclear what we should be doing here.
527*4882a593Smuzhiyun 		 *
528*4882a593Smuzhiyun 		 * For now, we're not doing anything, just making sure we return
529*4882a593Smuzhiyun 		 * 0 so USB Command Verifier tests pass without any errors.
530*4882a593Smuzhiyun 		 */
531*4882a593Smuzhiyun 		break;
532*4882a593Smuzhiyun 	default:
533*4882a593Smuzhiyun 		ret = -EINVAL;
534*4882a593Smuzhiyun 	}
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	return ret;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun 
dwc3_ep0_handle_endpoint(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl,int set)539*4882a593Smuzhiyun static int dwc3_ep0_handle_endpoint(struct dwc3 *dwc,
540*4882a593Smuzhiyun 		struct usb_ctrlrequest *ctrl, int set)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun 	struct dwc3_ep		*dep;
543*4882a593Smuzhiyun 	u32			wValue;
544*4882a593Smuzhiyun 	int			ret;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	wValue = le16_to_cpu(ctrl->wValue);
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	switch (wValue) {
549*4882a593Smuzhiyun 	case USB_ENDPOINT_HALT:
550*4882a593Smuzhiyun 		dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
551*4882a593Smuzhiyun 		if (!dep)
552*4882a593Smuzhiyun 			return -EINVAL;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 		if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
555*4882a593Smuzhiyun 			break;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 		ret = __dwc3_gadget_ep_set_halt(dep, set, true);
558*4882a593Smuzhiyun 		if (ret)
559*4882a593Smuzhiyun 			return -EINVAL;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 		/* ClearFeature(Halt) may need delayed status */
562*4882a593Smuzhiyun 		if (!set && (dep->flags & DWC3_EP_END_TRANSFER_PENDING))
563*4882a593Smuzhiyun 			return USB_GADGET_DELAYED_STATUS;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 		break;
566*4882a593Smuzhiyun 	default:
567*4882a593Smuzhiyun 		return -EINVAL;
568*4882a593Smuzhiyun 	}
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	return 0;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun 
dwc3_ep0_handle_feature(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl,int set)573*4882a593Smuzhiyun static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
574*4882a593Smuzhiyun 		struct usb_ctrlrequest *ctrl, int set)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun 	u32			recip;
577*4882a593Smuzhiyun 	int			ret;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	recip = ctrl->bRequestType & USB_RECIP_MASK;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	switch (recip) {
582*4882a593Smuzhiyun 	case USB_RECIP_DEVICE:
583*4882a593Smuzhiyun 		ret = dwc3_ep0_handle_device(dwc, ctrl, set);
584*4882a593Smuzhiyun 		break;
585*4882a593Smuzhiyun 	case USB_RECIP_INTERFACE:
586*4882a593Smuzhiyun 		ret = dwc3_ep0_handle_intf(dwc, ctrl, set);
587*4882a593Smuzhiyun 		break;
588*4882a593Smuzhiyun 	case USB_RECIP_ENDPOINT:
589*4882a593Smuzhiyun 		ret = dwc3_ep0_handle_endpoint(dwc, ctrl, set);
590*4882a593Smuzhiyun 		break;
591*4882a593Smuzhiyun 	default:
592*4882a593Smuzhiyun 		ret = -EINVAL;
593*4882a593Smuzhiyun 	}
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	return ret;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun 
dwc3_ep0_set_address(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl)598*4882a593Smuzhiyun static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun 	enum usb_device_state state = dwc->gadget->state;
601*4882a593Smuzhiyun 	u32 addr;
602*4882a593Smuzhiyun 	u32 reg;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	addr = le16_to_cpu(ctrl->wValue);
605*4882a593Smuzhiyun 	if (addr > 127) {
606*4882a593Smuzhiyun 		dev_err(dwc->dev, "invalid device address %d\n", addr);
607*4882a593Smuzhiyun 		return -EINVAL;
608*4882a593Smuzhiyun 	}
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	if (state == USB_STATE_CONFIGURED) {
611*4882a593Smuzhiyun 		dev_err(dwc->dev, "can't SetAddress() from Configured State\n");
612*4882a593Smuzhiyun 		return -EINVAL;
613*4882a593Smuzhiyun 	}
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
616*4882a593Smuzhiyun 	reg &= ~(DWC3_DCFG_DEVADDR_MASK);
617*4882a593Smuzhiyun 	reg |= DWC3_DCFG_DEVADDR(addr);
618*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	if (addr)
621*4882a593Smuzhiyun 		usb_gadget_set_state(dwc->gadget, USB_STATE_ADDRESS);
622*4882a593Smuzhiyun 	else
623*4882a593Smuzhiyun 		usb_gadget_set_state(dwc->gadget, USB_STATE_DEFAULT);
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	return 0;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun 
dwc3_ep0_delegate_req(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl)628*4882a593Smuzhiyun static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun 	int ret = -EINVAL;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	if (dwc->async_callbacks) {
633*4882a593Smuzhiyun 		spin_unlock(&dwc->lock);
634*4882a593Smuzhiyun 		ret = dwc->gadget_driver->setup(dwc->gadget, ctrl);
635*4882a593Smuzhiyun 		spin_lock(&dwc->lock);
636*4882a593Smuzhiyun 	}
637*4882a593Smuzhiyun 	return ret;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun 
dwc3_ep0_set_config(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl)640*4882a593Smuzhiyun static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun 	enum usb_device_state state = dwc->gadget->state;
643*4882a593Smuzhiyun 	u32 cfg;
644*4882a593Smuzhiyun 	int ret;
645*4882a593Smuzhiyun 	u32 reg;
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	cfg = le16_to_cpu(ctrl->wValue);
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	switch (state) {
650*4882a593Smuzhiyun 	case USB_STATE_DEFAULT:
651*4882a593Smuzhiyun 		return -EINVAL;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	case USB_STATE_ADDRESS:
654*4882a593Smuzhiyun 		dwc3_gadget_clear_tx_fifos(dwc);
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 		ret = dwc3_ep0_delegate_req(dwc, ctrl);
657*4882a593Smuzhiyun 		/* if the cfg matches and the cfg is non zero */
658*4882a593Smuzhiyun 		if (cfg && (!ret || (ret == USB_GADGET_DELAYED_STATUS))) {
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 			/*
661*4882a593Smuzhiyun 			 * only change state if set_config has already
662*4882a593Smuzhiyun 			 * been processed. If gadget driver returns
663*4882a593Smuzhiyun 			 * USB_GADGET_DELAYED_STATUS, we will wait
664*4882a593Smuzhiyun 			 * to change the state on the next usb_ep_queue()
665*4882a593Smuzhiyun 			 */
666*4882a593Smuzhiyun 			if (ret == 0)
667*4882a593Smuzhiyun 				usb_gadget_set_state(dwc->gadget,
668*4882a593Smuzhiyun 						USB_STATE_CONFIGURED);
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 			/*
671*4882a593Smuzhiyun 			 * Enable transition to U1/U2 state when
672*4882a593Smuzhiyun 			 * nothing is pending from application.
673*4882a593Smuzhiyun 			 */
674*4882a593Smuzhiyun 			reg = dwc3_readl(dwc->regs, DWC3_DCTL);
675*4882a593Smuzhiyun 			if (!dwc->dis_u1_entry_quirk)
676*4882a593Smuzhiyun 				reg |= DWC3_DCTL_ACCEPTU1ENA;
677*4882a593Smuzhiyun 			if (!dwc->dis_u2_entry_quirk)
678*4882a593Smuzhiyun 				reg |= DWC3_DCTL_ACCEPTU2ENA;
679*4882a593Smuzhiyun 			dwc3_writel(dwc->regs, DWC3_DCTL, reg);
680*4882a593Smuzhiyun 		}
681*4882a593Smuzhiyun 		break;
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	case USB_STATE_CONFIGURED:
684*4882a593Smuzhiyun 		ret = dwc3_ep0_delegate_req(dwc, ctrl);
685*4882a593Smuzhiyun 		if (!cfg && !ret)
686*4882a593Smuzhiyun 			usb_gadget_set_state(dwc->gadget,
687*4882a593Smuzhiyun 					USB_STATE_ADDRESS);
688*4882a593Smuzhiyun 		break;
689*4882a593Smuzhiyun 	default:
690*4882a593Smuzhiyun 		ret = -EINVAL;
691*4882a593Smuzhiyun 	}
692*4882a593Smuzhiyun 	return ret;
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun 
dwc3_ep0_set_sel_cmpl(struct usb_ep * ep,struct usb_request * req)695*4882a593Smuzhiyun static void dwc3_ep0_set_sel_cmpl(struct usb_ep *ep, struct usb_request *req)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun 	struct dwc3_ep	*dep = to_dwc3_ep(ep);
698*4882a593Smuzhiyun 	struct dwc3	*dwc = dep->dwc;
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	u32		param = 0;
701*4882a593Smuzhiyun 	u32		reg;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	struct timing {
704*4882a593Smuzhiyun 		u8	u1sel;
705*4882a593Smuzhiyun 		u8	u1pel;
706*4882a593Smuzhiyun 		__le16	u2sel;
707*4882a593Smuzhiyun 		__le16	u2pel;
708*4882a593Smuzhiyun 	} __packed timing;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	int		ret;
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	memcpy(&timing, req->buf, sizeof(timing));
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	dwc->u1sel = timing.u1sel;
715*4882a593Smuzhiyun 	dwc->u1pel = timing.u1pel;
716*4882a593Smuzhiyun 	dwc->u2sel = le16_to_cpu(timing.u2sel);
717*4882a593Smuzhiyun 	dwc->u2pel = le16_to_cpu(timing.u2pel);
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
720*4882a593Smuzhiyun 	if (reg & DWC3_DCTL_INITU2ENA)
721*4882a593Smuzhiyun 		param = dwc->u2pel;
722*4882a593Smuzhiyun 	if (reg & DWC3_DCTL_INITU1ENA)
723*4882a593Smuzhiyun 		param = dwc->u1pel;
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	/*
726*4882a593Smuzhiyun 	 * According to Synopsys Databook, if parameter is
727*4882a593Smuzhiyun 	 * greater than 125, a value of zero should be
728*4882a593Smuzhiyun 	 * programmed in the register.
729*4882a593Smuzhiyun 	 */
730*4882a593Smuzhiyun 	if (param > 125)
731*4882a593Smuzhiyun 		param = 0;
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	/* now that we have the time, issue DGCMD Set Sel */
734*4882a593Smuzhiyun 	ret = dwc3_send_gadget_generic_command(dwc,
735*4882a593Smuzhiyun 			DWC3_DGCMD_SET_PERIODIC_PAR, param);
736*4882a593Smuzhiyun 	WARN_ON(ret < 0);
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun 
dwc3_ep0_set_sel(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl)739*4882a593Smuzhiyun static int dwc3_ep0_set_sel(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
740*4882a593Smuzhiyun {
741*4882a593Smuzhiyun 	struct dwc3_ep	*dep;
742*4882a593Smuzhiyun 	enum usb_device_state state = dwc->gadget->state;
743*4882a593Smuzhiyun 	u16		wLength;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	if (state == USB_STATE_DEFAULT)
746*4882a593Smuzhiyun 		return -EINVAL;
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	wLength = le16_to_cpu(ctrl->wLength);
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	if (wLength != 6) {
751*4882a593Smuzhiyun 		dev_err(dwc->dev, "Set SEL should be 6 bytes, got %d\n",
752*4882a593Smuzhiyun 				wLength);
753*4882a593Smuzhiyun 		return -EINVAL;
754*4882a593Smuzhiyun 	}
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 	/*
757*4882a593Smuzhiyun 	 * To handle Set SEL we need to receive 6 bytes from Host. So let's
758*4882a593Smuzhiyun 	 * queue a usb_request for 6 bytes.
759*4882a593Smuzhiyun 	 *
760*4882a593Smuzhiyun 	 * Remember, though, this controller can't handle non-wMaxPacketSize
761*4882a593Smuzhiyun 	 * aligned transfers on the OUT direction, so we queue a request for
762*4882a593Smuzhiyun 	 * wMaxPacketSize instead.
763*4882a593Smuzhiyun 	 */
764*4882a593Smuzhiyun 	dep = dwc->eps[0];
765*4882a593Smuzhiyun 	dwc->ep0_usb_req.dep = dep;
766*4882a593Smuzhiyun 	dwc->ep0_usb_req.request.length = dep->endpoint.maxpacket;
767*4882a593Smuzhiyun 	dwc->ep0_usb_req.request.buf = dwc->setup_buf;
768*4882a593Smuzhiyun 	dwc->ep0_usb_req.request.complete = dwc3_ep0_set_sel_cmpl;
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun 
dwc3_ep0_set_isoch_delay(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl)773*4882a593Smuzhiyun static int dwc3_ep0_set_isoch_delay(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
774*4882a593Smuzhiyun {
775*4882a593Smuzhiyun 	u16		wLength;
776*4882a593Smuzhiyun 	u16		wValue;
777*4882a593Smuzhiyun 	u16		wIndex;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	wValue = le16_to_cpu(ctrl->wValue);
780*4882a593Smuzhiyun 	wLength = le16_to_cpu(ctrl->wLength);
781*4882a593Smuzhiyun 	wIndex = le16_to_cpu(ctrl->wIndex);
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	if (wIndex || wLength)
784*4882a593Smuzhiyun 		return -EINVAL;
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	dwc->gadget->isoch_delay = wValue;
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	return 0;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun 
dwc3_ep0_std_request(struct dwc3 * dwc,struct usb_ctrlrequest * ctrl)791*4882a593Smuzhiyun static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun 	int ret;
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	switch (ctrl->bRequest) {
796*4882a593Smuzhiyun 	case USB_REQ_GET_STATUS:
797*4882a593Smuzhiyun 		ret = dwc3_ep0_handle_status(dwc, ctrl);
798*4882a593Smuzhiyun 		break;
799*4882a593Smuzhiyun 	case USB_REQ_CLEAR_FEATURE:
800*4882a593Smuzhiyun 		ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
801*4882a593Smuzhiyun 		break;
802*4882a593Smuzhiyun 	case USB_REQ_SET_FEATURE:
803*4882a593Smuzhiyun 		ret = dwc3_ep0_handle_feature(dwc, ctrl, 1);
804*4882a593Smuzhiyun 		break;
805*4882a593Smuzhiyun 	case USB_REQ_SET_ADDRESS:
806*4882a593Smuzhiyun 		ret = dwc3_ep0_set_address(dwc, ctrl);
807*4882a593Smuzhiyun 		break;
808*4882a593Smuzhiyun 	case USB_REQ_SET_CONFIGURATION:
809*4882a593Smuzhiyun 		ret = dwc3_ep0_set_config(dwc, ctrl);
810*4882a593Smuzhiyun 		break;
811*4882a593Smuzhiyun 	case USB_REQ_SET_SEL:
812*4882a593Smuzhiyun 		ret = dwc3_ep0_set_sel(dwc, ctrl);
813*4882a593Smuzhiyun 		break;
814*4882a593Smuzhiyun 	case USB_REQ_SET_ISOCH_DELAY:
815*4882a593Smuzhiyun 		ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
816*4882a593Smuzhiyun 		break;
817*4882a593Smuzhiyun 	default:
818*4882a593Smuzhiyun 		ret = dwc3_ep0_delegate_req(dwc, ctrl);
819*4882a593Smuzhiyun 		break;
820*4882a593Smuzhiyun 	}
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	return ret;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun 
dwc3_ep0_inspect_setup(struct dwc3 * dwc,const struct dwc3_event_depevt * event)825*4882a593Smuzhiyun static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
826*4882a593Smuzhiyun 		const struct dwc3_event_depevt *event)
827*4882a593Smuzhiyun {
828*4882a593Smuzhiyun 	struct usb_ctrlrequest *ctrl = (void *) dwc->ep0_trb;
829*4882a593Smuzhiyun 	int ret = -EINVAL;
830*4882a593Smuzhiyun 	u32 len;
831*4882a593Smuzhiyun 	struct dwc3_vendor	*vdwc = container_of(dwc, struct dwc3_vendor, dwc);
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	if (!dwc->gadget_driver || !vdwc->softconnect || !dwc->connected)
834*4882a593Smuzhiyun 		goto out;
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	trace_dwc3_ctrl_req(ctrl);
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	len = le16_to_cpu(ctrl->wLength);
839*4882a593Smuzhiyun 	if (!len) {
840*4882a593Smuzhiyun 		dwc->three_stage_setup = false;
841*4882a593Smuzhiyun 		dwc->ep0_expect_in = false;
842*4882a593Smuzhiyun 		dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
843*4882a593Smuzhiyun 	} else {
844*4882a593Smuzhiyun 		dwc->three_stage_setup = true;
845*4882a593Smuzhiyun 		dwc->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN);
846*4882a593Smuzhiyun 		dwc->ep0_next_event = DWC3_EP0_NRDY_DATA;
847*4882a593Smuzhiyun 	}
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
850*4882a593Smuzhiyun 		ret = dwc3_ep0_std_request(dwc, ctrl);
851*4882a593Smuzhiyun 	else
852*4882a593Smuzhiyun 		ret = dwc3_ep0_delegate_req(dwc, ctrl);
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 	if (ret == USB_GADGET_DELAYED_STATUS)
855*4882a593Smuzhiyun 		dwc->delayed_status = true;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun out:
858*4882a593Smuzhiyun 	if (ret < 0)
859*4882a593Smuzhiyun 		dwc3_ep0_stall_and_restart(dwc);
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun 
dwc3_ep0_complete_data(struct dwc3 * dwc,const struct dwc3_event_depevt * event)862*4882a593Smuzhiyun static void dwc3_ep0_complete_data(struct dwc3 *dwc,
863*4882a593Smuzhiyun 		const struct dwc3_event_depevt *event)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun 	struct dwc3_request	*r;
866*4882a593Smuzhiyun 	struct usb_request	*ur;
867*4882a593Smuzhiyun 	struct dwc3_trb		*trb;
868*4882a593Smuzhiyun 	struct dwc3_ep		*ep0;
869*4882a593Smuzhiyun 	u32			transferred = 0;
870*4882a593Smuzhiyun 	u32			status;
871*4882a593Smuzhiyun 	u32			length;
872*4882a593Smuzhiyun 	u8			epnum;
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	epnum = event->endpoint_number;
875*4882a593Smuzhiyun 	ep0 = dwc->eps[0];
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
878*4882a593Smuzhiyun 	trb = dwc->ep0_trb;
879*4882a593Smuzhiyun 	trace_dwc3_complete_trb(ep0, trb);
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	r = next_request(&ep0->pending_list);
882*4882a593Smuzhiyun 	if (!r)
883*4882a593Smuzhiyun 		return;
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	status = DWC3_TRB_SIZE_TRBSTS(trb->size);
886*4882a593Smuzhiyun 	if (status == DWC3_TRBSTS_SETUP_PENDING) {
887*4882a593Smuzhiyun 		dwc->setup_packet_pending = true;
888*4882a593Smuzhiyun 		if (r)
889*4882a593Smuzhiyun 			dwc3_gadget_giveback(ep0, r, -ECONNRESET);
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 		return;
892*4882a593Smuzhiyun 	}
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	ur = &r->request;
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	length = trb->size & DWC3_TRB_SIZE_MASK;
897*4882a593Smuzhiyun 	transferred = ur->length - length;
898*4882a593Smuzhiyun 	ur->actual += transferred;
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	if ((IS_ALIGNED(ur->length, ep0->endpoint.maxpacket) &&
901*4882a593Smuzhiyun 	     ur->length && ur->zero) || dwc->ep0_bounced) {
902*4882a593Smuzhiyun 		trb++;
903*4882a593Smuzhiyun 		trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
904*4882a593Smuzhiyun 		trace_dwc3_complete_trb(ep0, trb);
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 		if (r->direction)
907*4882a593Smuzhiyun 			dwc->eps[1]->trb_enqueue = 0;
908*4882a593Smuzhiyun 		else
909*4882a593Smuzhiyun 			dwc->eps[0]->trb_enqueue = 0;
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 		dwc->ep0_bounced = false;
912*4882a593Smuzhiyun 	}
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	if ((epnum & 1) && ur->actual < ur->length)
915*4882a593Smuzhiyun 		dwc3_ep0_stall_and_restart(dwc);
916*4882a593Smuzhiyun 	else
917*4882a593Smuzhiyun 		dwc3_gadget_giveback(ep0, r, 0);
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun 
dwc3_ep0_complete_status(struct dwc3 * dwc,const struct dwc3_event_depevt * event)920*4882a593Smuzhiyun static void dwc3_ep0_complete_status(struct dwc3 *dwc,
921*4882a593Smuzhiyun 		const struct dwc3_event_depevt *event)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun 	struct dwc3_request	*r;
924*4882a593Smuzhiyun 	struct dwc3_ep		*dep;
925*4882a593Smuzhiyun 	struct dwc3_trb		*trb;
926*4882a593Smuzhiyun 	u32			status;
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun 	dep = dwc->eps[0];
929*4882a593Smuzhiyun 	trb = dwc->ep0_trb;
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	trace_dwc3_complete_trb(dep, trb);
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 	if (!list_empty(&dep->pending_list)) {
934*4882a593Smuzhiyun 		r = next_request(&dep->pending_list);
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 		dwc3_gadget_giveback(dep, r, 0);
937*4882a593Smuzhiyun 	}
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	if (dwc->test_mode) {
940*4882a593Smuzhiyun 		int ret;
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 		ret = dwc3_gadget_set_test_mode(dwc, dwc->test_mode_nr);
943*4882a593Smuzhiyun 		if (ret < 0) {
944*4882a593Smuzhiyun 			dev_err(dwc->dev, "invalid test #%d\n",
945*4882a593Smuzhiyun 					dwc->test_mode_nr);
946*4882a593Smuzhiyun 			dwc3_ep0_stall_and_restart(dwc);
947*4882a593Smuzhiyun 			return;
948*4882a593Smuzhiyun 		}
949*4882a593Smuzhiyun 	}
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	status = DWC3_TRB_SIZE_TRBSTS(trb->size);
952*4882a593Smuzhiyun 	if (status == DWC3_TRBSTS_SETUP_PENDING)
953*4882a593Smuzhiyun 		dwc->setup_packet_pending = true;
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	dwc->ep0state = EP0_SETUP_PHASE;
956*4882a593Smuzhiyun 	dwc3_ep0_out_start(dwc);
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun 
dwc3_ep0_xfer_complete(struct dwc3 * dwc,const struct dwc3_event_depevt * event)959*4882a593Smuzhiyun static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
960*4882a593Smuzhiyun 			const struct dwc3_event_depevt *event)
961*4882a593Smuzhiyun {
962*4882a593Smuzhiyun 	struct dwc3_ep		*dep = dwc->eps[event->endpoint_number];
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
965*4882a593Smuzhiyun 	dep->resource_index = 0;
966*4882a593Smuzhiyun 	dwc->setup_packet_pending = false;
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	switch (dwc->ep0state) {
969*4882a593Smuzhiyun 	case EP0_SETUP_PHASE:
970*4882a593Smuzhiyun 		dwc3_ep0_inspect_setup(dwc, event);
971*4882a593Smuzhiyun 		break;
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	case EP0_DATA_PHASE:
974*4882a593Smuzhiyun 		dwc3_ep0_complete_data(dwc, event);
975*4882a593Smuzhiyun 		break;
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 	case EP0_STATUS_PHASE:
978*4882a593Smuzhiyun 		dwc3_ep0_complete_status(dwc, event);
979*4882a593Smuzhiyun 		break;
980*4882a593Smuzhiyun 	default:
981*4882a593Smuzhiyun 		WARN(true, "UNKNOWN ep0state %d\n", dwc->ep0state);
982*4882a593Smuzhiyun 	}
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun 
__dwc3_ep0_do_control_data(struct dwc3 * dwc,struct dwc3_ep * dep,struct dwc3_request * req)985*4882a593Smuzhiyun static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
986*4882a593Smuzhiyun 		struct dwc3_ep *dep, struct dwc3_request *req)
987*4882a593Smuzhiyun {
988*4882a593Smuzhiyun 	unsigned int		trb_length = 0;
989*4882a593Smuzhiyun 	int			ret;
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	req->direction = !!dep->number;
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	if (req->request.length == 0) {
994*4882a593Smuzhiyun 		if (!req->direction)
995*4882a593Smuzhiyun 			trb_length = dep->endpoint.maxpacket;
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 		dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, trb_length,
998*4882a593Smuzhiyun 				DWC3_TRBCTL_CONTROL_DATA, false);
999*4882a593Smuzhiyun 		ret = dwc3_ep0_start_trans(dep);
1000*4882a593Smuzhiyun 	} else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket)
1001*4882a593Smuzhiyun 			&& (dep->number == 0)) {
1002*4882a593Smuzhiyun 		u32	maxpacket;
1003*4882a593Smuzhiyun 		u32	rem;
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 		ret = usb_gadget_map_request_by_dev(dwc->sysdev,
1006*4882a593Smuzhiyun 				&req->request, dep->number);
1007*4882a593Smuzhiyun 		if (ret)
1008*4882a593Smuzhiyun 			return;
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 		maxpacket = dep->endpoint.maxpacket;
1011*4882a593Smuzhiyun 		rem = req->request.length % maxpacket;
1012*4882a593Smuzhiyun 		dwc->ep0_bounced = true;
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 		/* prepare normal TRB */
1015*4882a593Smuzhiyun 		dwc3_ep0_prepare_one_trb(dep, req->request.dma,
1016*4882a593Smuzhiyun 					 req->request.length,
1017*4882a593Smuzhiyun 					 DWC3_TRBCTL_CONTROL_DATA,
1018*4882a593Smuzhiyun 					 true);
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 		req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 		/* Now prepare one extra TRB to align transfer size */
1023*4882a593Smuzhiyun 		dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
1024*4882a593Smuzhiyun 					 maxpacket - rem,
1025*4882a593Smuzhiyun 					 DWC3_TRBCTL_CONTROL_DATA,
1026*4882a593Smuzhiyun 					 false);
1027*4882a593Smuzhiyun 		ret = dwc3_ep0_start_trans(dep);
1028*4882a593Smuzhiyun 	} else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) &&
1029*4882a593Smuzhiyun 		   req->request.length && req->request.zero) {
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 		ret = usb_gadget_map_request_by_dev(dwc->sysdev,
1032*4882a593Smuzhiyun 				&req->request, dep->number);
1033*4882a593Smuzhiyun 		if (ret)
1034*4882a593Smuzhiyun 			return;
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 		/* prepare normal TRB */
1037*4882a593Smuzhiyun 		dwc3_ep0_prepare_one_trb(dep, req->request.dma,
1038*4882a593Smuzhiyun 					 req->request.length,
1039*4882a593Smuzhiyun 					 DWC3_TRBCTL_CONTROL_DATA,
1040*4882a593Smuzhiyun 					 true);
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 		req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun 		if (!req->direction)
1045*4882a593Smuzhiyun 			trb_length = dep->endpoint.maxpacket;
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 		/* Now prepare one extra TRB to align transfer size */
1048*4882a593Smuzhiyun 		dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
1049*4882a593Smuzhiyun 					 trb_length, DWC3_TRBCTL_CONTROL_DATA,
1050*4882a593Smuzhiyun 					 false);
1051*4882a593Smuzhiyun 		ret = dwc3_ep0_start_trans(dep);
1052*4882a593Smuzhiyun 	} else {
1053*4882a593Smuzhiyun 		ret = usb_gadget_map_request_by_dev(dwc->sysdev,
1054*4882a593Smuzhiyun 				&req->request, dep->number);
1055*4882a593Smuzhiyun 		if (ret)
1056*4882a593Smuzhiyun 			return;
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun 		dwc3_ep0_prepare_one_trb(dep, req->request.dma,
1059*4882a593Smuzhiyun 				req->request.length, DWC3_TRBCTL_CONTROL_DATA,
1060*4882a593Smuzhiyun 				false);
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 		req->trb = &dwc->ep0_trb[dep->trb_enqueue];
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 		ret = dwc3_ep0_start_trans(dep);
1065*4882a593Smuzhiyun 	}
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 	WARN_ON(ret < 0);
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun 
dwc3_ep0_start_control_status(struct dwc3_ep * dep)1070*4882a593Smuzhiyun static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
1071*4882a593Smuzhiyun {
1072*4882a593Smuzhiyun 	struct dwc3		*dwc = dep->dwc;
1073*4882a593Smuzhiyun 	u32			type;
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
1076*4882a593Smuzhiyun 		: DWC3_TRBCTL_CONTROL_STATUS2;
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 0, type, false);
1079*4882a593Smuzhiyun 	return dwc3_ep0_start_trans(dep);
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun 
__dwc3_ep0_do_control_status(struct dwc3 * dwc,struct dwc3_ep * dep)1082*4882a593Smuzhiyun static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
1083*4882a593Smuzhiyun {
1084*4882a593Smuzhiyun 	WARN_ON(dwc3_ep0_start_control_status(dep));
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun 
dwc3_ep0_do_control_status(struct dwc3 * dwc,const struct dwc3_event_depevt * event)1087*4882a593Smuzhiyun static void dwc3_ep0_do_control_status(struct dwc3 *dwc,
1088*4882a593Smuzhiyun 		const struct dwc3_event_depevt *event)
1089*4882a593Smuzhiyun {
1090*4882a593Smuzhiyun 	struct dwc3_ep		*dep = dwc->eps[event->endpoint_number];
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	__dwc3_ep0_do_control_status(dwc, dep);
1093*4882a593Smuzhiyun }
1094*4882a593Smuzhiyun 
dwc3_ep0_send_delayed_status(struct dwc3 * dwc)1095*4882a593Smuzhiyun void dwc3_ep0_send_delayed_status(struct dwc3 *dwc)
1096*4882a593Smuzhiyun {
1097*4882a593Smuzhiyun 	unsigned int direction = !dwc->ep0_expect_in;
1098*4882a593Smuzhiyun 	struct dwc3_vendor *vdwc = container_of(dwc, struct dwc3_vendor, dwc);
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	dwc->delayed_status = false;
1101*4882a593Smuzhiyun 	vdwc->clear_stall_protocol = 0;
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 	if (dwc->ep0state != EP0_STATUS_PHASE)
1104*4882a593Smuzhiyun 		return;
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	__dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun 
dwc3_ep0_end_control_data(struct dwc3 * dwc,struct dwc3_ep * dep)1109*4882a593Smuzhiyun void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
1110*4882a593Smuzhiyun {
1111*4882a593Smuzhiyun 	struct dwc3_gadget_ep_cmd_params params;
1112*4882a593Smuzhiyun 	u32			cmd;
1113*4882a593Smuzhiyun 	int			ret;
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 	/*
1116*4882a593Smuzhiyun 	 * For status/DATA OUT stage, TRB will be queued on ep0 out
1117*4882a593Smuzhiyun 	 * endpoint for which resource index is zero. Hence allow
1118*4882a593Smuzhiyun 	 * queuing ENDXFER command for ep0 out endpoint.
1119*4882a593Smuzhiyun 	 */
1120*4882a593Smuzhiyun 	if (!dep->resource_index && dep->number)
1121*4882a593Smuzhiyun 		return;
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	cmd = DWC3_DEPCMD_ENDTRANSFER;
1124*4882a593Smuzhiyun 	cmd |= DWC3_DEPCMD_CMDIOC;
1125*4882a593Smuzhiyun 	cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
1126*4882a593Smuzhiyun 	memset(&params, 0, sizeof(params));
1127*4882a593Smuzhiyun 	ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
1128*4882a593Smuzhiyun 	WARN_ON_ONCE(ret);
1129*4882a593Smuzhiyun 	dep->resource_index = 0;
1130*4882a593Smuzhiyun }
1131*4882a593Smuzhiyun 
dwc3_ep0_xfernotready(struct dwc3 * dwc,const struct dwc3_event_depevt * event)1132*4882a593Smuzhiyun static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
1133*4882a593Smuzhiyun 		const struct dwc3_event_depevt *event)
1134*4882a593Smuzhiyun {
1135*4882a593Smuzhiyun 	struct dwc3_vendor	*vdwc = container_of(dwc, struct dwc3_vendor, dwc);
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	switch (event->status) {
1138*4882a593Smuzhiyun 	case DEPEVT_STATUS_CONTROL_DATA:
1139*4882a593Smuzhiyun 		if (!vdwc->softconnect || !dwc->connected)
1140*4882a593Smuzhiyun 			return;
1141*4882a593Smuzhiyun 		/*
1142*4882a593Smuzhiyun 		 * We already have a DATA transfer in the controller's cache,
1143*4882a593Smuzhiyun 		 * if we receive a XferNotReady(DATA) we will ignore it, unless
1144*4882a593Smuzhiyun 		 * it's for the wrong direction.
1145*4882a593Smuzhiyun 		 *
1146*4882a593Smuzhiyun 		 * In that case, we must issue END_TRANSFER command to the Data
1147*4882a593Smuzhiyun 		 * Phase we already have started and issue SetStall on the
1148*4882a593Smuzhiyun 		 * control endpoint.
1149*4882a593Smuzhiyun 		 */
1150*4882a593Smuzhiyun 		if (dwc->ep0_expect_in != event->endpoint_number) {
1151*4882a593Smuzhiyun 			struct dwc3_ep	*dep = dwc->eps[dwc->ep0_expect_in];
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun 			dev_err(dwc->dev, "unexpected direction for Data Phase\n");
1154*4882a593Smuzhiyun 			dwc3_ep0_end_control_data(dwc, dep);
1155*4882a593Smuzhiyun 			dwc3_ep0_stall_and_restart(dwc);
1156*4882a593Smuzhiyun 			return;
1157*4882a593Smuzhiyun 		}
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 		break;
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 	case DEPEVT_STATUS_CONTROL_STATUS:
1162*4882a593Smuzhiyun 		if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS)
1163*4882a593Smuzhiyun 			return;
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 		if (dwc->setup_packet_pending) {
1166*4882a593Smuzhiyun 			dwc3_ep0_stall_and_restart(dwc);
1167*4882a593Smuzhiyun 			return;
1168*4882a593Smuzhiyun 		}
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 		dwc->ep0state = EP0_STATUS_PHASE;
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 		if (dwc->delayed_status) {
1173*4882a593Smuzhiyun 			struct dwc3_ep *dep = dwc->eps[0];
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 			WARN_ON_ONCE(event->endpoint_number != 1);
1176*4882a593Smuzhiyun 			/*
1177*4882a593Smuzhiyun 			 * We should handle the delay STATUS phase here if the
1178*4882a593Smuzhiyun 			 * request for handling delay STATUS has been queued
1179*4882a593Smuzhiyun 			 * into the list.
1180*4882a593Smuzhiyun 			 */
1181*4882a593Smuzhiyun 			if (!list_empty(&dep->pending_list)) {
1182*4882a593Smuzhiyun 				dwc->delayed_status = false;
1183*4882a593Smuzhiyun 				usb_gadget_set_state(dwc->gadget,
1184*4882a593Smuzhiyun 						     USB_STATE_CONFIGURED);
1185*4882a593Smuzhiyun 				dwc3_ep0_do_control_status(dwc, event);
1186*4882a593Smuzhiyun 			}
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun 			return;
1189*4882a593Smuzhiyun 		}
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 		dwc3_ep0_do_control_status(dwc, event);
1192*4882a593Smuzhiyun 	}
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun 
dwc3_ep0_interrupt(struct dwc3 * dwc,const struct dwc3_event_depevt * event)1195*4882a593Smuzhiyun void dwc3_ep0_interrupt(struct dwc3 *dwc,
1196*4882a593Smuzhiyun 		const struct dwc3_event_depevt *event)
1197*4882a593Smuzhiyun {
1198*4882a593Smuzhiyun 	struct dwc3_ep	*dep = dwc->eps[event->endpoint_number];
1199*4882a593Smuzhiyun 	u8		cmd;
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun 	switch (event->endpoint_event) {
1202*4882a593Smuzhiyun 	case DWC3_DEPEVT_XFERCOMPLETE:
1203*4882a593Smuzhiyun 		dwc3_ep0_xfer_complete(dwc, event);
1204*4882a593Smuzhiyun 		break;
1205*4882a593Smuzhiyun 
1206*4882a593Smuzhiyun 	case DWC3_DEPEVT_XFERNOTREADY:
1207*4882a593Smuzhiyun 		dwc3_ep0_xfernotready(dwc, event);
1208*4882a593Smuzhiyun 		break;
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 	case DWC3_DEPEVT_XFERINPROGRESS:
1211*4882a593Smuzhiyun 	case DWC3_DEPEVT_RXTXFIFOEVT:
1212*4882a593Smuzhiyun 	case DWC3_DEPEVT_STREAMEVT:
1213*4882a593Smuzhiyun 		break;
1214*4882a593Smuzhiyun 	case DWC3_DEPEVT_EPCMDCMPLT:
1215*4882a593Smuzhiyun 		cmd = DEPEVT_PARAMETER_CMD(event->parameters);
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 		if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
1218*4882a593Smuzhiyun 			dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
1219*4882a593Smuzhiyun 			dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
1220*4882a593Smuzhiyun 		}
1221*4882a593Smuzhiyun 		break;
1222*4882a593Smuzhiyun 	}
1223*4882a593Smuzhiyun }
1224