xref: /OK3568_Linux_fs/u-boot/drivers/usb/dwc3/gadget.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /**
2*4882a593Smuzhiyun  * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Authors: Felipe Balbi <balbi@ti.com>,
7*4882a593Smuzhiyun  *	    Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Taken from Linux Kernel v3.19-rc1 (drivers/usb/dwc3/gadget.c) and ported
10*4882a593Smuzhiyun  * to uboot.
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * commit 8e74475b0e : usb: dwc3: gadget: use udc-core's reset notifier
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * SPDX-License-Identifier:     GPL-2.0
15*4882a593Smuzhiyun  */
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <common.h>
18*4882a593Smuzhiyun #include <malloc.h>
19*4882a593Smuzhiyun #include <asm/dma-mapping.h>
20*4882a593Smuzhiyun #include <linux/bug.h>
21*4882a593Smuzhiyun #include <linux/list.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include <linux/usb/ch9.h>
24*4882a593Smuzhiyun #include <linux/usb/gadget.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include "core.h"
27*4882a593Smuzhiyun #include "gadget.h"
28*4882a593Smuzhiyun #include "io.h"
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #include "linux-compat.h"
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /**
33*4882a593Smuzhiyun  * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
34*4882a593Smuzhiyun  * @dwc: pointer to our context structure
35*4882a593Smuzhiyun  * @mode: the mode to set (J, K SE0 NAK, Force Enable)
36*4882a593Smuzhiyun  *
37*4882a593Smuzhiyun  * Caller should take care of locking. This function will
38*4882a593Smuzhiyun  * return 0 on success or -EINVAL if wrong Test Selector
39*4882a593Smuzhiyun  * is passed
40*4882a593Smuzhiyun  */
dwc3_gadget_set_test_mode(struct dwc3 * dwc,int mode)41*4882a593Smuzhiyun int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun 	u32		reg;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
46*4882a593Smuzhiyun 	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	switch (mode) {
49*4882a593Smuzhiyun 	case TEST_J:
50*4882a593Smuzhiyun 	case TEST_K:
51*4882a593Smuzhiyun 	case TEST_SE0_NAK:
52*4882a593Smuzhiyun 	case TEST_PACKET:
53*4882a593Smuzhiyun 	case TEST_FORCE_EN:
54*4882a593Smuzhiyun 		reg |= mode << 1;
55*4882a593Smuzhiyun 		break;
56*4882a593Smuzhiyun 	default:
57*4882a593Smuzhiyun 		return -EINVAL;
58*4882a593Smuzhiyun 	}
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	return 0;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /**
66*4882a593Smuzhiyun  * dwc3_gadget_get_link_state - Gets current state of USB Link
67*4882a593Smuzhiyun  * @dwc: pointer to our context structure
68*4882a593Smuzhiyun  *
69*4882a593Smuzhiyun  * Caller should take care of locking. This function will
70*4882a593Smuzhiyun  * return the link state on success (>= 0) or -ETIMEDOUT.
71*4882a593Smuzhiyun  */
dwc3_gadget_get_link_state(struct dwc3 * dwc)72*4882a593Smuzhiyun int dwc3_gadget_get_link_state(struct dwc3 *dwc)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	u32		reg;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	return DWC3_DSTS_USBLNKST(reg);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun /**
82*4882a593Smuzhiyun  * dwc3_gadget_set_link_state - Sets USB Link to a particular State
83*4882a593Smuzhiyun  * @dwc: pointer to our context structure
84*4882a593Smuzhiyun  * @state: the state to put link into
85*4882a593Smuzhiyun  *
86*4882a593Smuzhiyun  * Caller should take care of locking. This function will
87*4882a593Smuzhiyun  * return 0 on success or -ETIMEDOUT.
88*4882a593Smuzhiyun  */
dwc3_gadget_set_link_state(struct dwc3 * dwc,enum dwc3_link_state state)89*4882a593Smuzhiyun int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	int		retries = 10000;
92*4882a593Smuzhiyun 	u32		reg;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	/*
95*4882a593Smuzhiyun 	 * Wait until device controller is ready. Only applies to 1.94a and
96*4882a593Smuzhiyun 	 * later RTL.
97*4882a593Smuzhiyun 	 */
98*4882a593Smuzhiyun 	if (dwc->revision >= DWC3_REVISION_194A) {
99*4882a593Smuzhiyun 		while (--retries) {
100*4882a593Smuzhiyun 			reg = dwc3_readl(dwc->regs, DWC3_DSTS);
101*4882a593Smuzhiyun 			if (reg & DWC3_DSTS_DCNRD)
102*4882a593Smuzhiyun 				udelay(5);
103*4882a593Smuzhiyun 			else
104*4882a593Smuzhiyun 				break;
105*4882a593Smuzhiyun 		}
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 		if (retries <= 0)
108*4882a593Smuzhiyun 			return -ETIMEDOUT;
109*4882a593Smuzhiyun 	}
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
112*4882a593Smuzhiyun 	reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	/* set requested state */
115*4882a593Smuzhiyun 	reg |= DWC3_DCTL_ULSTCHNGREQ(state);
116*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	/*
119*4882a593Smuzhiyun 	 * The following code is racy when called from dwc3_gadget_wakeup,
120*4882a593Smuzhiyun 	 * and is not needed, at least on newer versions
121*4882a593Smuzhiyun 	 */
122*4882a593Smuzhiyun 	if (dwc->revision >= DWC3_REVISION_194A)
123*4882a593Smuzhiyun 		return 0;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	/* wait for a change in DSTS */
126*4882a593Smuzhiyun 	retries = 10000;
127*4882a593Smuzhiyun 	while (--retries) {
128*4882a593Smuzhiyun 		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 		if (DWC3_DSTS_USBLNKST(reg) == state)
131*4882a593Smuzhiyun 			return 0;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 		udelay(5);
134*4882a593Smuzhiyun 	}
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	dev_vdbg(dwc->dev, "link state change request timed out\n");
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	return -ETIMEDOUT;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun /**
142*4882a593Smuzhiyun  * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
143*4882a593Smuzhiyun  * @dwc: pointer to our context structure
144*4882a593Smuzhiyun  *
145*4882a593Smuzhiyun  * This function will a best effort FIFO allocation in order
146*4882a593Smuzhiyun  * to improve FIFO usage and throughput, while still allowing
147*4882a593Smuzhiyun  * us to enable as many endpoints as possible.
148*4882a593Smuzhiyun  *
149*4882a593Smuzhiyun  * Keep in mind that this operation will be highly dependent
150*4882a593Smuzhiyun  * on the configured size for RAM1 - which contains TxFifo -,
151*4882a593Smuzhiyun  * the amount of endpoints enabled on coreConsultant tool, and
152*4882a593Smuzhiyun  * the width of the Master Bus.
153*4882a593Smuzhiyun  *
154*4882a593Smuzhiyun  * In the ideal world, we would always be able to satisfy the
155*4882a593Smuzhiyun  * following equation:
156*4882a593Smuzhiyun  *
157*4882a593Smuzhiyun  * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
158*4882a593Smuzhiyun  * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
159*4882a593Smuzhiyun  *
160*4882a593Smuzhiyun  * Unfortunately, due to many variables that's not always the case.
161*4882a593Smuzhiyun  */
dwc3_gadget_resize_tx_fifos(struct dwc3 * dwc)162*4882a593Smuzhiyun int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	int		last_fifo_depth = 0;
165*4882a593Smuzhiyun 	int		fifo_size;
166*4882a593Smuzhiyun 	int		mdwidth;
167*4882a593Smuzhiyun 	int		num;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	if (!dwc->needs_fifo_resize)
170*4882a593Smuzhiyun 		return 0;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	/* MDWIDTH is represented in bits, we need it in bytes */
175*4882a593Smuzhiyun 	mdwidth >>= 3;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	/*
178*4882a593Smuzhiyun 	 * FIXME For now we will only allocate 1 wMaxPacketSize space
179*4882a593Smuzhiyun 	 * for each enabled endpoint, later patches will come to
180*4882a593Smuzhiyun 	 * improve this algorithm so that we better use the internal
181*4882a593Smuzhiyun 	 * FIFO space
182*4882a593Smuzhiyun 	 */
183*4882a593Smuzhiyun 	for (num = 0; num < dwc->num_in_eps; num++) {
184*4882a593Smuzhiyun 		/* bit0 indicates direction; 1 means IN ep */
185*4882a593Smuzhiyun 		struct dwc3_ep	*dep = dwc->eps[(num << 1) | 1];
186*4882a593Smuzhiyun 		int		mult = 1;
187*4882a593Smuzhiyun 		int		tmp;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 		if (!(dep->flags & DWC3_EP_ENABLED))
190*4882a593Smuzhiyun 			continue;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 		if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
193*4882a593Smuzhiyun 				|| usb_endpoint_xfer_isoc(dep->endpoint.desc))
194*4882a593Smuzhiyun 			mult = 3;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 		/*
197*4882a593Smuzhiyun 		 * REVISIT: the following assumes we will always have enough
198*4882a593Smuzhiyun 		 * space available on the FIFO RAM for all possible use cases.
199*4882a593Smuzhiyun 		 * Make sure that's true somehow and change FIFO allocation
200*4882a593Smuzhiyun 		 * accordingly.
201*4882a593Smuzhiyun 		 *
202*4882a593Smuzhiyun 		 * If we have Bulk or Isochronous endpoints, we want
203*4882a593Smuzhiyun 		 * them to be able to be very, very fast. So we're giving
204*4882a593Smuzhiyun 		 * those endpoints a fifo_size which is enough for 3 full
205*4882a593Smuzhiyun 		 * packets
206*4882a593Smuzhiyun 		 */
207*4882a593Smuzhiyun 		tmp = mult * (dep->endpoint.maxpacket + mdwidth);
208*4882a593Smuzhiyun 		tmp += mdwidth;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 		fifo_size = DIV_ROUND_UP(tmp, mdwidth);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 		fifo_size |= (last_fifo_depth << 16);
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 		dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
215*4882a593Smuzhiyun 				dep->name, last_fifo_depth, fifo_size & 0xffff);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 		dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 		last_fifo_depth += (fifo_size & 0xffff);
220*4882a593Smuzhiyun 	}
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	return 0;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
dwc3_gadget_giveback(struct dwc3_ep * dep,struct dwc3_request * req,int status)225*4882a593Smuzhiyun void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
226*4882a593Smuzhiyun 		int status)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	struct dwc3			*dwc = dep->dwc;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	if (req->queued) {
231*4882a593Smuzhiyun 		dep->busy_slot++;
232*4882a593Smuzhiyun 		/*
233*4882a593Smuzhiyun 		 * Skip LINK TRB. We can't use req->trb and check for
234*4882a593Smuzhiyun 		 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
235*4882a593Smuzhiyun 		 * just completed (not the LINK TRB).
236*4882a593Smuzhiyun 		 */
237*4882a593Smuzhiyun 		if (((dep->busy_slot & DWC3_TRB_MASK) ==
238*4882a593Smuzhiyun 			DWC3_TRB_NUM- 1) &&
239*4882a593Smuzhiyun 			usb_endpoint_xfer_isoc(dep->endpoint.desc))
240*4882a593Smuzhiyun 			dep->busy_slot++;
241*4882a593Smuzhiyun 		req->queued = false;
242*4882a593Smuzhiyun 	}
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	list_del(&req->list);
245*4882a593Smuzhiyun 	req->trb = NULL;
246*4882a593Smuzhiyun 	if (req->request.length)
247*4882a593Smuzhiyun 		dwc3_flush_cache((uintptr_t)req->request.dma, req->request.length);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	if (req->request.status == -EINPROGRESS)
250*4882a593Smuzhiyun 		req->request.status = status;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	if (dwc->ep0_bounced && dep->number == 0)
253*4882a593Smuzhiyun 		dwc->ep0_bounced = false;
254*4882a593Smuzhiyun 	else
255*4882a593Smuzhiyun 		usb_gadget_unmap_request(&dwc->gadget, &req->request,
256*4882a593Smuzhiyun 				req->direction);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
259*4882a593Smuzhiyun 			req, dep->name, req->request.actual,
260*4882a593Smuzhiyun 			req->request.length, status);
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	spin_unlock(&dwc->lock);
263*4882a593Smuzhiyun 	usb_gadget_giveback_request(&dep->endpoint, &req->request);
264*4882a593Smuzhiyun 	spin_lock(&dwc->lock);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun 
dwc3_send_gadget_generic_command(struct dwc3 * dwc,unsigned cmd,u32 param)267*4882a593Smuzhiyun int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	u32		timeout = 500;
270*4882a593Smuzhiyun 	u32		reg;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
273*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	do {
276*4882a593Smuzhiyun 		reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
277*4882a593Smuzhiyun 		if (!(reg & DWC3_DGCMD_CMDACT)) {
278*4882a593Smuzhiyun 			dev_vdbg(dwc->dev, "Command Complete --> %d\n",
279*4882a593Smuzhiyun 					DWC3_DGCMD_STATUS(reg));
280*4882a593Smuzhiyun 			return 0;
281*4882a593Smuzhiyun 		}
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 		/*
284*4882a593Smuzhiyun 		 * We can't sleep here, because it's also called from
285*4882a593Smuzhiyun 		 * interrupt context.
286*4882a593Smuzhiyun 		 */
287*4882a593Smuzhiyun 		timeout--;
288*4882a593Smuzhiyun 		if (!timeout)
289*4882a593Smuzhiyun 			return -ETIMEDOUT;
290*4882a593Smuzhiyun 		udelay(1);
291*4882a593Smuzhiyun 	} while (1);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
dwc3_send_gadget_ep_cmd(struct dwc3 * dwc,unsigned ep,unsigned cmd,struct dwc3_gadget_ep_cmd_params * params)294*4882a593Smuzhiyun int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
295*4882a593Smuzhiyun 		unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	u32			timeout = 500;
298*4882a593Smuzhiyun 	u32			reg;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
301*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
302*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
305*4882a593Smuzhiyun 	do {
306*4882a593Smuzhiyun 		reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
307*4882a593Smuzhiyun 		if (!(reg & DWC3_DEPCMD_CMDACT)) {
308*4882a593Smuzhiyun 			dev_vdbg(dwc->dev, "Command Complete --> %d\n",
309*4882a593Smuzhiyun 					DWC3_DEPCMD_STATUS(reg));
310*4882a593Smuzhiyun 			return 0;
311*4882a593Smuzhiyun 		}
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 		/*
314*4882a593Smuzhiyun 		 * We can't sleep here, because it is also called from
315*4882a593Smuzhiyun 		 * interrupt context.
316*4882a593Smuzhiyun 		 */
317*4882a593Smuzhiyun 		timeout--;
318*4882a593Smuzhiyun 		if (!timeout)
319*4882a593Smuzhiyun 			return -ETIMEDOUT;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 		udelay(1);
322*4882a593Smuzhiyun 	} while (1);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
dwc3_trb_dma_offset(struct dwc3_ep * dep,struct dwc3_trb * trb)325*4882a593Smuzhiyun static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
326*4882a593Smuzhiyun 		struct dwc3_trb *trb)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	u32		offset = (char *) trb - (char *) dep->trb_pool;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	return dep->trb_pool_dma + offset;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun 
dwc3_alloc_trb_pool(struct dwc3_ep * dep)333*4882a593Smuzhiyun static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	if (dep->trb_pool)
336*4882a593Smuzhiyun 		return 0;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	if (dep->number == 0 || dep->number == 1)
339*4882a593Smuzhiyun 		return 0;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	dep->trb_pool = dma_alloc_coherent(sizeof(struct dwc3_trb) *
342*4882a593Smuzhiyun 					   DWC3_TRB_NUM,
343*4882a593Smuzhiyun 					   (unsigned long *)&dep->trb_pool_dma);
344*4882a593Smuzhiyun 	if (!dep->trb_pool) {
345*4882a593Smuzhiyun 		dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
346*4882a593Smuzhiyun 				dep->name);
347*4882a593Smuzhiyun 		return -ENOMEM;
348*4882a593Smuzhiyun 	}
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	return 0;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
dwc3_free_trb_pool(struct dwc3_ep * dep)353*4882a593Smuzhiyun static void dwc3_free_trb_pool(struct dwc3_ep *dep)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun 	dma_free_coherent(dep->trb_pool);
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	dep->trb_pool = NULL;
358*4882a593Smuzhiyun 	dep->trb_pool_dma = 0;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun 
dwc3_gadget_start_config(struct dwc3 * dwc,struct dwc3_ep * dep)361*4882a593Smuzhiyun static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun 	struct dwc3_gadget_ep_cmd_params params;
364*4882a593Smuzhiyun 	u32			cmd;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	memset(&params, 0x00, sizeof(params));
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	if (dep->number != 1) {
369*4882a593Smuzhiyun 		cmd = DWC3_DEPCMD_DEPSTARTCFG;
370*4882a593Smuzhiyun 		/* XferRscIdx == 0 for ep0 and 2 for the remaining */
371*4882a593Smuzhiyun 		if (dep->number > 1) {
372*4882a593Smuzhiyun 			if (dwc->start_config_issued)
373*4882a593Smuzhiyun 				return 0;
374*4882a593Smuzhiyun 			dwc->start_config_issued = true;
375*4882a593Smuzhiyun 			cmd |= DWC3_DEPCMD_PARAM(2);
376*4882a593Smuzhiyun 		}
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 		return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
379*4882a593Smuzhiyun 	}
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	return 0;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun 
dwc3_gadget_set_ep_config(struct dwc3 * dwc,struct dwc3_ep * dep,const struct usb_endpoint_descriptor * desc,const struct usb_ss_ep_comp_descriptor * comp_desc,bool ignore,bool restore)384*4882a593Smuzhiyun static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
385*4882a593Smuzhiyun 		const struct usb_endpoint_descriptor *desc,
386*4882a593Smuzhiyun 		const struct usb_ss_ep_comp_descriptor *comp_desc,
387*4882a593Smuzhiyun 		bool ignore, bool restore)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun 	struct dwc3_gadget_ep_cmd_params params;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	memset(&params, 0x00, sizeof(params));
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
394*4882a593Smuzhiyun 		| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	/* Burst size is only needed in SuperSpeed mode */
397*4882a593Smuzhiyun 	if (dwc->gadget.speed == USB_SPEED_SUPER) {
398*4882a593Smuzhiyun 		u32 burst = dep->endpoint.maxburst - 1;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 		params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	if (ignore)
404*4882a593Smuzhiyun 		params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	if (restore) {
407*4882a593Smuzhiyun 		params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
408*4882a593Smuzhiyun 		params.param2 |= dep->saved_state;
409*4882a593Smuzhiyun 	}
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
412*4882a593Smuzhiyun 		| DWC3_DEPCFG_XFER_NOT_READY_EN;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
415*4882a593Smuzhiyun 		params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
416*4882a593Smuzhiyun 			| DWC3_DEPCFG_STREAM_EVENT_EN;
417*4882a593Smuzhiyun 		dep->stream_capable = true;
418*4882a593Smuzhiyun 	}
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	if (!usb_endpoint_xfer_control(desc))
421*4882a593Smuzhiyun 		params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	/*
424*4882a593Smuzhiyun 	 * We are doing 1:1 mapping for endpoints, meaning
425*4882a593Smuzhiyun 	 * Physical Endpoints 2 maps to Logical Endpoint 2 and
426*4882a593Smuzhiyun 	 * so on. We consider the direction bit as part of the physical
427*4882a593Smuzhiyun 	 * endpoint number. So USB endpoint 0x81 is 0x03.
428*4882a593Smuzhiyun 	 */
429*4882a593Smuzhiyun 	params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	/*
432*4882a593Smuzhiyun 	 * We must use the lower 16 TX FIFOs even though
433*4882a593Smuzhiyun 	 * HW might have more
434*4882a593Smuzhiyun 	 */
435*4882a593Smuzhiyun 	if (dep->direction)
436*4882a593Smuzhiyun 		params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	if (desc->bInterval) {
439*4882a593Smuzhiyun 		params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
440*4882a593Smuzhiyun 		dep->interval = 1 << (desc->bInterval - 1);
441*4882a593Smuzhiyun 	}
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
444*4882a593Smuzhiyun 			DWC3_DEPCMD_SETEPCONFIG, &params);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun 
dwc3_gadget_set_xfer_resource(struct dwc3 * dwc,struct dwc3_ep * dep)447*4882a593Smuzhiyun static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun 	struct dwc3_gadget_ep_cmd_params params;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	memset(&params, 0x00, sizeof(params));
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	return dwc3_send_gadget_ep_cmd(dwc, dep->number,
456*4882a593Smuzhiyun 			DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun /**
460*4882a593Smuzhiyun  * __dwc3_gadget_ep_enable - Initializes a HW endpoint
461*4882a593Smuzhiyun  * @dep: endpoint to be initialized
462*4882a593Smuzhiyun  * @desc: USB Endpoint Descriptor
463*4882a593Smuzhiyun  *
464*4882a593Smuzhiyun  * Caller should take care of locking
465*4882a593Smuzhiyun  */
__dwc3_gadget_ep_enable(struct dwc3_ep * dep,const struct usb_endpoint_descriptor * desc,const struct usb_ss_ep_comp_descriptor * comp_desc,bool ignore,bool restore)466*4882a593Smuzhiyun static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
467*4882a593Smuzhiyun 		const struct usb_endpoint_descriptor *desc,
468*4882a593Smuzhiyun 		const struct usb_ss_ep_comp_descriptor *comp_desc,
469*4882a593Smuzhiyun 		bool ignore, bool restore)
470*4882a593Smuzhiyun {
471*4882a593Smuzhiyun 	struct dwc3		*dwc = dep->dwc;
472*4882a593Smuzhiyun 	u32			reg;
473*4882a593Smuzhiyun 	int			ret;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	if (!(dep->flags & DWC3_EP_ENABLED)) {
478*4882a593Smuzhiyun 		ret = dwc3_gadget_start_config(dwc, dep);
479*4882a593Smuzhiyun 		if (ret)
480*4882a593Smuzhiyun 			return ret;
481*4882a593Smuzhiyun 	}
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
484*4882a593Smuzhiyun 			restore);
485*4882a593Smuzhiyun 	if (ret)
486*4882a593Smuzhiyun 		return ret;
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	if (!(dep->flags & DWC3_EP_ENABLED)) {
489*4882a593Smuzhiyun 		struct dwc3_trb	*trb_st_hw;
490*4882a593Smuzhiyun 		struct dwc3_trb	*trb_link;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 		ret = dwc3_gadget_set_xfer_resource(dwc, dep);
493*4882a593Smuzhiyun 		if (ret)
494*4882a593Smuzhiyun 			return ret;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 		dep->endpoint.desc = desc;
497*4882a593Smuzhiyun 		dep->comp_desc = comp_desc;
498*4882a593Smuzhiyun 		dep->type = usb_endpoint_type(desc);
499*4882a593Smuzhiyun 		dep->flags |= DWC3_EP_ENABLED;
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 		reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
502*4882a593Smuzhiyun 		reg |= DWC3_DALEPENA_EP(dep->number);
503*4882a593Smuzhiyun 		dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 		if (!usb_endpoint_xfer_isoc(desc))
506*4882a593Smuzhiyun 			return 0;
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 		/* Link TRB for ISOC. The HWO bit is never reset */
509*4882a593Smuzhiyun 		trb_st_hw = &dep->trb_pool[0];
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 		trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
512*4882a593Smuzhiyun 		memset(trb_link, 0, sizeof(*trb_link));
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 		trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
515*4882a593Smuzhiyun 		trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
516*4882a593Smuzhiyun 		trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
517*4882a593Smuzhiyun 		trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
518*4882a593Smuzhiyun 	}
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	return 0;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
dwc3_remove_requests(struct dwc3 * dwc,struct dwc3_ep * dep)524*4882a593Smuzhiyun static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	struct dwc3_request		*req;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	if (!list_empty(&dep->req_queued)) {
529*4882a593Smuzhiyun 		dwc3_stop_active_transfer(dwc, dep->number, true);
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 		/* - giveback all requests to gadget driver */
532*4882a593Smuzhiyun 		while (!list_empty(&dep->req_queued)) {
533*4882a593Smuzhiyun 			req = next_request(&dep->req_queued);
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 			dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
536*4882a593Smuzhiyun 		}
537*4882a593Smuzhiyun 	}
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	while (!list_empty(&dep->request_list)) {
540*4882a593Smuzhiyun 		req = next_request(&dep->request_list);
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 		dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
543*4882a593Smuzhiyun 	}
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun /**
547*4882a593Smuzhiyun  * __dwc3_gadget_ep_disable - Disables a HW endpoint
548*4882a593Smuzhiyun  * @dep: the endpoint to disable
549*4882a593Smuzhiyun  *
550*4882a593Smuzhiyun  * This function also removes requests which are currently processed ny the
551*4882a593Smuzhiyun  * hardware and those which are not yet scheduled.
552*4882a593Smuzhiyun  * Caller should take care of locking.
553*4882a593Smuzhiyun  */
__dwc3_gadget_ep_disable(struct dwc3_ep * dep)554*4882a593Smuzhiyun static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun 	struct dwc3		*dwc = dep->dwc;
557*4882a593Smuzhiyun 	u32			reg;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	dwc3_remove_requests(dwc, dep);
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	/* make sure HW endpoint isn't stalled */
562*4882a593Smuzhiyun 	if (dep->flags & DWC3_EP_STALL)
563*4882a593Smuzhiyun 		__dwc3_gadget_ep_set_halt(dep, 0, false);
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
566*4882a593Smuzhiyun 	reg &= ~DWC3_DALEPENA_EP(dep->number);
567*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	dep->stream_capable = false;
570*4882a593Smuzhiyun 	dep->endpoint.desc = NULL;
571*4882a593Smuzhiyun 	dep->comp_desc = NULL;
572*4882a593Smuzhiyun 	dep->type = 0;
573*4882a593Smuzhiyun 	dep->flags = 0;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	return 0;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun /* -------------------------------------------------------------------------- */
579*4882a593Smuzhiyun 
dwc3_gadget_ep0_enable(struct usb_ep * ep,const struct usb_endpoint_descriptor * desc)580*4882a593Smuzhiyun static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
581*4882a593Smuzhiyun 		const struct usb_endpoint_descriptor *desc)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun 	return -EINVAL;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun 
dwc3_gadget_ep0_disable(struct usb_ep * ep)586*4882a593Smuzhiyun static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun 	return -EINVAL;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun /* -------------------------------------------------------------------------- */
592*4882a593Smuzhiyun 
dwc3_gadget_ep_enable(struct usb_ep * ep,const struct usb_endpoint_descriptor * desc)593*4882a593Smuzhiyun static int dwc3_gadget_ep_enable(struct usb_ep *ep,
594*4882a593Smuzhiyun 		const struct usb_endpoint_descriptor *desc)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun 	struct dwc3_ep			*dep;
597*4882a593Smuzhiyun 	unsigned long			flags = 0;
598*4882a593Smuzhiyun 	int				ret;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
601*4882a593Smuzhiyun 		pr_debug("dwc3: invalid parameters\n");
602*4882a593Smuzhiyun 		return -EINVAL;
603*4882a593Smuzhiyun 	}
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	if (!desc->wMaxPacketSize) {
606*4882a593Smuzhiyun 		pr_debug("dwc3: missing wMaxPacketSize\n");
607*4882a593Smuzhiyun 		return -EINVAL;
608*4882a593Smuzhiyun 	}
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	dep = to_dwc3_ep(ep);
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	if (dep->flags & DWC3_EP_ENABLED) {
613*4882a593Smuzhiyun 		WARN(true, "%s is already enabled\n",
614*4882a593Smuzhiyun 				dep->name);
615*4882a593Smuzhiyun 		return 0;
616*4882a593Smuzhiyun 	}
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	switch (usb_endpoint_type(desc)) {
619*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_CONTROL:
620*4882a593Smuzhiyun 		strlcat(dep->name, "-control", sizeof(dep->name));
621*4882a593Smuzhiyun 		break;
622*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_ISOC:
623*4882a593Smuzhiyun 		strlcat(dep->name, "-isoc", sizeof(dep->name));
624*4882a593Smuzhiyun 		break;
625*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_BULK:
626*4882a593Smuzhiyun 		strlcat(dep->name, "-bulk", sizeof(dep->name));
627*4882a593Smuzhiyun 		break;
628*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_INT:
629*4882a593Smuzhiyun 		strlcat(dep->name, "-int", sizeof(dep->name));
630*4882a593Smuzhiyun 		break;
631*4882a593Smuzhiyun 	default:
632*4882a593Smuzhiyun 		dev_err(dwc->dev, "invalid endpoint transfer type\n");
633*4882a593Smuzhiyun 	}
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	spin_lock_irqsave(&dwc->lock, flags);
636*4882a593Smuzhiyun 	ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
637*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dwc->lock, flags);
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	return ret;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun 
dwc3_gadget_ep_disable(struct usb_ep * ep)642*4882a593Smuzhiyun static int dwc3_gadget_ep_disable(struct usb_ep *ep)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun 	struct dwc3_ep			*dep;
645*4882a593Smuzhiyun 	unsigned long			flags = 0;
646*4882a593Smuzhiyun 	int				ret;
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	if (!ep) {
649*4882a593Smuzhiyun 		pr_debug("dwc3: invalid parameters\n");
650*4882a593Smuzhiyun 		return -EINVAL;
651*4882a593Smuzhiyun 	}
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	dep = to_dwc3_ep(ep);
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	if (!(dep->flags & DWC3_EP_ENABLED)) {
656*4882a593Smuzhiyun 		WARN(true, "%s is already disabled\n",
657*4882a593Smuzhiyun 				dep->name);
658*4882a593Smuzhiyun 		return 0;
659*4882a593Smuzhiyun 	}
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	snprintf(dep->name, sizeof(dep->name), "ep%d%s",
662*4882a593Smuzhiyun 			dep->number >> 1,
663*4882a593Smuzhiyun 			(dep->number & 1) ? "in" : "out");
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	spin_lock_irqsave(&dwc->lock, flags);
666*4882a593Smuzhiyun 	ret = __dwc3_gadget_ep_disable(dep);
667*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dwc->lock, flags);
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	return ret;
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun 
dwc3_gadget_ep_alloc_request(struct usb_ep * ep,gfp_t gfp_flags)672*4882a593Smuzhiyun static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
673*4882a593Smuzhiyun 	gfp_t gfp_flags)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun 	struct dwc3_request		*req;
676*4882a593Smuzhiyun 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	req = kzalloc(sizeof(*req), gfp_flags);
679*4882a593Smuzhiyun 	if (!req)
680*4882a593Smuzhiyun 		return NULL;
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	req->epnum	= dep->number;
683*4882a593Smuzhiyun 	req->dep	= dep;
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	return &req->request;
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun 
dwc3_gadget_ep_free_request(struct usb_ep * ep,struct usb_request * request)688*4882a593Smuzhiyun static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
689*4882a593Smuzhiyun 		struct usb_request *request)
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun 	struct dwc3_request		*req = to_dwc3_request(request);
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	kfree(req);
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun /**
697*4882a593Smuzhiyun  * dwc3_prepare_one_trb - setup one TRB from one request
698*4882a593Smuzhiyun  * @dep: endpoint for which this request is prepared
699*4882a593Smuzhiyun  * @req: dwc3_request pointer
700*4882a593Smuzhiyun  */
dwc3_prepare_one_trb(struct dwc3_ep * dep,struct dwc3_request * req,dma_addr_t dma,unsigned length,unsigned last,unsigned chain,unsigned node)701*4882a593Smuzhiyun static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
702*4882a593Smuzhiyun 		struct dwc3_request *req, dma_addr_t dma,
703*4882a593Smuzhiyun 		unsigned length, unsigned last, unsigned chain, unsigned node)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun 	struct dwc3_trb		*trb;
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
708*4882a593Smuzhiyun 			dep->name, req, (unsigned long long) dma,
709*4882a593Smuzhiyun 			length, last ? " last" : "",
710*4882a593Smuzhiyun 			chain ? " chain" : "");
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	if (!req->trb) {
716*4882a593Smuzhiyun 		dwc3_gadget_move_request_queued(req);
717*4882a593Smuzhiyun 		req->trb = trb;
718*4882a593Smuzhiyun 		req->trb_dma = dwc3_trb_dma_offset(dep, trb);
719*4882a593Smuzhiyun 		req->start_slot = dep->free_slot & DWC3_TRB_MASK;
720*4882a593Smuzhiyun 	}
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	dep->free_slot++;
723*4882a593Smuzhiyun 	/* Skip the LINK-TRB on ISOC */
724*4882a593Smuzhiyun 	if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
725*4882a593Smuzhiyun 			usb_endpoint_xfer_isoc(dep->endpoint.desc))
726*4882a593Smuzhiyun 		dep->free_slot++;
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	/*
729*4882a593Smuzhiyun 	 * According to the chapter 8.2.3.3 of DWC3 Databook,
730*4882a593Smuzhiyun 	 * for OUT endpoints, the total size of a Buffer Descriptor must be a
731*4882a593Smuzhiyun 	 * multiple of MaxPacketSize. So amend the TRB size to apply this rule.
732*4882a593Smuzhiyun 	 */
733*4882a593Smuzhiyun 	if (usb_endpoint_dir_out(dep->endpoint.desc)) {
734*4882a593Smuzhiyun 		length = dep->endpoint.maxpacket *
735*4882a593Smuzhiyun 			((length - 1) / dep->endpoint.maxpacket + 1);
736*4882a593Smuzhiyun 	}
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	trb->size = DWC3_TRB_SIZE_LENGTH(length);
739*4882a593Smuzhiyun 	trb->bpl = lower_32_bits(dma);
740*4882a593Smuzhiyun 	trb->bph = upper_32_bits(dma);
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	switch (usb_endpoint_type(dep->endpoint.desc)) {
743*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_CONTROL:
744*4882a593Smuzhiyun 		trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
745*4882a593Smuzhiyun 		break;
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_ISOC:
748*4882a593Smuzhiyun 		if (!node)
749*4882a593Smuzhiyun 			trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
750*4882a593Smuzhiyun 		else
751*4882a593Smuzhiyun 			trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 		/* always enable Interrupt on Missed ISOC */
754*4882a593Smuzhiyun 		trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
755*4882a593Smuzhiyun 		break;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_BULK:
758*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_INT:
759*4882a593Smuzhiyun 		trb->ctrl = DWC3_TRBCTL_NORMAL;
760*4882a593Smuzhiyun 		break;
761*4882a593Smuzhiyun 	default:
762*4882a593Smuzhiyun 		/*
763*4882a593Smuzhiyun 		 * This is only possible with faulty memory because we
764*4882a593Smuzhiyun 		 * checked it already :)
765*4882a593Smuzhiyun 		 */
766*4882a593Smuzhiyun 		BUG();
767*4882a593Smuzhiyun 	}
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	/* always enable Continue on Short Packet */
770*4882a593Smuzhiyun 	trb->ctrl |= DWC3_TRB_CTRL_CSP;
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	if (!req->request.no_interrupt && !chain)
773*4882a593Smuzhiyun 		trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	if (last)
776*4882a593Smuzhiyun 		trb->ctrl |= DWC3_TRB_CTRL_LST;
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	if (chain)
779*4882a593Smuzhiyun 		trb->ctrl |= DWC3_TRB_CTRL_CHN;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
782*4882a593Smuzhiyun 		trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	trb->ctrl |= DWC3_TRB_CTRL_HWO;
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	dwc3_flush_cache((uintptr_t)dma, length);
787*4882a593Smuzhiyun 	dwc3_flush_cache((uintptr_t)trb, sizeof(*trb));
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun /*
791*4882a593Smuzhiyun  * dwc3_prepare_trbs - setup TRBs from requests
792*4882a593Smuzhiyun  * @dep: endpoint for which requests are being prepared
793*4882a593Smuzhiyun  * @starting: true if the endpoint is idle and no requests are queued.
794*4882a593Smuzhiyun  *
795*4882a593Smuzhiyun  * The function goes through the requests list and sets up TRBs for the
796*4882a593Smuzhiyun  * transfers. The function returns once there are no more TRBs available or
797*4882a593Smuzhiyun  * it runs out of requests.
798*4882a593Smuzhiyun  */
dwc3_prepare_trbs(struct dwc3_ep * dep,bool starting)799*4882a593Smuzhiyun static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun 	struct dwc3_request	*req, *n;
802*4882a593Smuzhiyun 	u32			trbs_left;
803*4882a593Smuzhiyun 	u32			max;
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	/* the first request must not be queued */
808*4882a593Smuzhiyun 	trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	/* Can't wrap around on a non-isoc EP since there's no link TRB */
811*4882a593Smuzhiyun 	if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
812*4882a593Smuzhiyun 		max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
813*4882a593Smuzhiyun 		if (trbs_left > max)
814*4882a593Smuzhiyun 			trbs_left = max;
815*4882a593Smuzhiyun 	}
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	/*
818*4882a593Smuzhiyun 	 * If busy & slot are equal than it is either full or empty. If we are
819*4882a593Smuzhiyun 	 * starting to process requests then we are empty. Otherwise we are
820*4882a593Smuzhiyun 	 * full and don't do anything
821*4882a593Smuzhiyun 	 */
822*4882a593Smuzhiyun 	if (!trbs_left) {
823*4882a593Smuzhiyun 		if (!starting)
824*4882a593Smuzhiyun 			return;
825*4882a593Smuzhiyun 		trbs_left = DWC3_TRB_NUM;
826*4882a593Smuzhiyun 		/*
827*4882a593Smuzhiyun 		 * In case we start from scratch, we queue the ISOC requests
828*4882a593Smuzhiyun 		 * starting from slot 1. This is done because we use ring
829*4882a593Smuzhiyun 		 * buffer and have no LST bit to stop us. Instead, we place
830*4882a593Smuzhiyun 		 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
831*4882a593Smuzhiyun 		 * after the first request so we start at slot 1 and have
832*4882a593Smuzhiyun 		 * 7 requests proceed before we hit the first IOC.
833*4882a593Smuzhiyun 		 * Other transfer types don't use the ring buffer and are
834*4882a593Smuzhiyun 		 * processed from the first TRB until the last one. Since we
835*4882a593Smuzhiyun 		 * don't wrap around we have to start at the beginning.
836*4882a593Smuzhiyun 		 */
837*4882a593Smuzhiyun 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
838*4882a593Smuzhiyun 			dep->busy_slot = 1;
839*4882a593Smuzhiyun 			dep->free_slot = 1;
840*4882a593Smuzhiyun 		} else {
841*4882a593Smuzhiyun 			dep->busy_slot = 0;
842*4882a593Smuzhiyun 			dep->free_slot = 0;
843*4882a593Smuzhiyun 		}
844*4882a593Smuzhiyun 	}
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	/* The last TRB is a link TRB, not used for xfer */
847*4882a593Smuzhiyun 	if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
848*4882a593Smuzhiyun 		return;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	list_for_each_entry_safe(req, n, &dep->request_list, list) {
851*4882a593Smuzhiyun 		unsigned	length;
852*4882a593Smuzhiyun 		dma_addr_t	dma;
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 		dma = req->request.dma;
855*4882a593Smuzhiyun 		length = req->request.length;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 		dwc3_prepare_one_trb(dep, req, dma, length,
858*4882a593Smuzhiyun 				     true, false, 0);
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 		break;
861*4882a593Smuzhiyun 	}
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun 
__dwc3_gadget_kick_transfer(struct dwc3_ep * dep,u16 cmd_param,int start_new)864*4882a593Smuzhiyun static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
865*4882a593Smuzhiyun 		int start_new)
866*4882a593Smuzhiyun {
867*4882a593Smuzhiyun 	struct dwc3_gadget_ep_cmd_params params;
868*4882a593Smuzhiyun 	struct dwc3_request		*req;
869*4882a593Smuzhiyun 	struct dwc3			*dwc = dep->dwc;
870*4882a593Smuzhiyun 	int				ret;
871*4882a593Smuzhiyun 	u32				cmd;
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	if (start_new && (dep->flags & DWC3_EP_BUSY)) {
874*4882a593Smuzhiyun 		dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
875*4882a593Smuzhiyun 		return -EBUSY;
876*4882a593Smuzhiyun 	}
877*4882a593Smuzhiyun 	dep->flags &= ~DWC3_EP_PENDING_REQUEST;
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	/*
880*4882a593Smuzhiyun 	 * If we are getting here after a short-out-packet we don't enqueue any
881*4882a593Smuzhiyun 	 * new requests as we try to set the IOC bit only on the last request.
882*4882a593Smuzhiyun 	 */
883*4882a593Smuzhiyun 	if (start_new) {
884*4882a593Smuzhiyun 		if (list_empty(&dep->req_queued))
885*4882a593Smuzhiyun 			dwc3_prepare_trbs(dep, start_new);
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 		/* req points to the first request which will be sent */
888*4882a593Smuzhiyun 		req = next_request(&dep->req_queued);
889*4882a593Smuzhiyun 	} else {
890*4882a593Smuzhiyun 		dwc3_prepare_trbs(dep, start_new);
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 		/*
893*4882a593Smuzhiyun 		 * req points to the first request where HWO changed from 0 to 1
894*4882a593Smuzhiyun 		 */
895*4882a593Smuzhiyun 		req = next_request(&dep->req_queued);
896*4882a593Smuzhiyun 	}
897*4882a593Smuzhiyun 	if (!req) {
898*4882a593Smuzhiyun 		dep->flags |= DWC3_EP_PENDING_REQUEST;
899*4882a593Smuzhiyun 		return 0;
900*4882a593Smuzhiyun 	}
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	memset(&params, 0, sizeof(params));
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	if (start_new) {
905*4882a593Smuzhiyun 		params.param0 = upper_32_bits(req->trb_dma);
906*4882a593Smuzhiyun 		params.param1 = lower_32_bits(req->trb_dma);
907*4882a593Smuzhiyun 		cmd = DWC3_DEPCMD_STARTTRANSFER;
908*4882a593Smuzhiyun 	} else {
909*4882a593Smuzhiyun 		cmd = DWC3_DEPCMD_UPDATETRANSFER;
910*4882a593Smuzhiyun 	}
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	cmd |= DWC3_DEPCMD_PARAM(cmd_param);
913*4882a593Smuzhiyun 	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
914*4882a593Smuzhiyun 	if (ret < 0) {
915*4882a593Smuzhiyun 		dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 		/*
918*4882a593Smuzhiyun 		 * FIXME we need to iterate over the list of requests
919*4882a593Smuzhiyun 		 * here and stop, unmap, free and del each of the linked
920*4882a593Smuzhiyun 		 * requests instead of what we do now.
921*4882a593Smuzhiyun 		 */
922*4882a593Smuzhiyun 		usb_gadget_unmap_request(&dwc->gadget, &req->request,
923*4882a593Smuzhiyun 				req->direction);
924*4882a593Smuzhiyun 		list_del(&req->list);
925*4882a593Smuzhiyun 		return ret;
926*4882a593Smuzhiyun 	}
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun 	dep->flags |= DWC3_EP_BUSY;
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	if (start_new) {
931*4882a593Smuzhiyun 		dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
932*4882a593Smuzhiyun 				dep->number);
933*4882a593Smuzhiyun 		WARN_ON_ONCE(!dep->resource_index);
934*4882a593Smuzhiyun 	}
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	return 0;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun 
__dwc3_gadget_start_isoc(struct dwc3 * dwc,struct dwc3_ep * dep,u32 cur_uf)939*4882a593Smuzhiyun static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
940*4882a593Smuzhiyun 		struct dwc3_ep *dep, u32 cur_uf)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun 	u32 uf;
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	if (list_empty(&dep->request_list)) {
945*4882a593Smuzhiyun 		dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
946*4882a593Smuzhiyun 			dep->name);
947*4882a593Smuzhiyun 		dep->flags |= DWC3_EP_PENDING_REQUEST;
948*4882a593Smuzhiyun 		return;
949*4882a593Smuzhiyun 	}
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	/* 4 micro frames in the future */
952*4882a593Smuzhiyun 	uf = cur_uf + dep->interval * 4;
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	__dwc3_gadget_kick_transfer(dep, uf, 1);
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun 
dwc3_gadget_start_isoc(struct dwc3 * dwc,struct dwc3_ep * dep,const struct dwc3_event_depevt * event)957*4882a593Smuzhiyun static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
958*4882a593Smuzhiyun 		struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
959*4882a593Smuzhiyun {
960*4882a593Smuzhiyun 	u32 cur_uf, mask;
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	mask = ~(dep->interval - 1);
963*4882a593Smuzhiyun 	cur_uf = event->parameters & mask;
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	__dwc3_gadget_start_isoc(dwc, dep, cur_uf);
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun 
__dwc3_gadget_ep_queue(struct dwc3_ep * dep,struct dwc3_request * req)968*4882a593Smuzhiyun static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
969*4882a593Smuzhiyun {
970*4882a593Smuzhiyun 	struct dwc3		*dwc = dep->dwc;
971*4882a593Smuzhiyun 	int			ret;
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	req->request.actual	= 0;
974*4882a593Smuzhiyun 	req->request.status	= -EINPROGRESS;
975*4882a593Smuzhiyun 	req->direction		= dep->direction;
976*4882a593Smuzhiyun 	req->epnum		= dep->number;
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	/*
979*4882a593Smuzhiyun 	 * DWC3 hangs on OUT requests smaller than maxpacket size,
980*4882a593Smuzhiyun 	 * so HACK the request length
981*4882a593Smuzhiyun 	 */
982*4882a593Smuzhiyun 	if (dep->direction == 0 &&
983*4882a593Smuzhiyun 	    req->request.length < dep->endpoint.maxpacket)
984*4882a593Smuzhiyun 		req->request.length = dep->endpoint.maxpacket;
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	/*
987*4882a593Smuzhiyun 	 * We only add to our list of requests now and
988*4882a593Smuzhiyun 	 * start consuming the list once we get XferNotReady
989*4882a593Smuzhiyun 	 * IRQ.
990*4882a593Smuzhiyun 	 *
991*4882a593Smuzhiyun 	 * That way, we avoid doing anything that we don't need
992*4882a593Smuzhiyun 	 * to do now and defer it until the point we receive a
993*4882a593Smuzhiyun 	 * particular token from the Host side.
994*4882a593Smuzhiyun 	 *
995*4882a593Smuzhiyun 	 * This will also avoid Host cancelling URBs due to too
996*4882a593Smuzhiyun 	 * many NAKs.
997*4882a593Smuzhiyun 	 */
998*4882a593Smuzhiyun 	ret = usb_gadget_map_request(&dwc->gadget, &req->request,
999*4882a593Smuzhiyun 			dep->direction);
1000*4882a593Smuzhiyun 	if (ret)
1001*4882a593Smuzhiyun 		return ret;
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	list_add_tail(&req->list, &dep->request_list);
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	/*
1006*4882a593Smuzhiyun 	 * There are a few special cases:
1007*4882a593Smuzhiyun 	 *
1008*4882a593Smuzhiyun 	 * 1. XferNotReady with empty list of requests. We need to kick the
1009*4882a593Smuzhiyun 	 *    transfer here in that situation, otherwise we will be NAKing
1010*4882a593Smuzhiyun 	 *    forever. If we get XferNotReady before gadget driver has a
1011*4882a593Smuzhiyun 	 *    chance to queue a request, we will ACK the IRQ but won't be
1012*4882a593Smuzhiyun 	 *    able to receive the data until the next request is queued.
1013*4882a593Smuzhiyun 	 *    The following code is handling exactly that.
1014*4882a593Smuzhiyun 	 *
1015*4882a593Smuzhiyun 	 */
1016*4882a593Smuzhiyun 	if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1017*4882a593Smuzhiyun 		/*
1018*4882a593Smuzhiyun 		 * If xfernotready is already elapsed and it is a case
1019*4882a593Smuzhiyun 		 * of isoc transfer, then issue END TRANSFER, so that
1020*4882a593Smuzhiyun 		 * you can receive xfernotready again and can have
1021*4882a593Smuzhiyun 		 * notion of current microframe.
1022*4882a593Smuzhiyun 		 */
1023*4882a593Smuzhiyun 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1024*4882a593Smuzhiyun 			if (list_empty(&dep->req_queued)) {
1025*4882a593Smuzhiyun 				dwc3_stop_active_transfer(dwc, dep->number, true);
1026*4882a593Smuzhiyun 				dep->flags = DWC3_EP_ENABLED;
1027*4882a593Smuzhiyun 			}
1028*4882a593Smuzhiyun 			return 0;
1029*4882a593Smuzhiyun 		}
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 		ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1032*4882a593Smuzhiyun 		if (ret && ret != -EBUSY)
1033*4882a593Smuzhiyun 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1034*4882a593Smuzhiyun 					dep->name);
1035*4882a593Smuzhiyun 		return ret;
1036*4882a593Smuzhiyun 	}
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	/*
1039*4882a593Smuzhiyun 	 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1040*4882a593Smuzhiyun 	 *    kick the transfer here after queuing a request, otherwise the
1041*4882a593Smuzhiyun 	 *    core may not see the modified TRB(s).
1042*4882a593Smuzhiyun 	 */
1043*4882a593Smuzhiyun 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1044*4882a593Smuzhiyun 			(dep->flags & DWC3_EP_BUSY) &&
1045*4882a593Smuzhiyun 			!(dep->flags & DWC3_EP_MISSED_ISOC)) {
1046*4882a593Smuzhiyun 		WARN_ON_ONCE(!dep->resource_index);
1047*4882a593Smuzhiyun 		ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
1048*4882a593Smuzhiyun 				false);
1049*4882a593Smuzhiyun 		if (ret && ret != -EBUSY)
1050*4882a593Smuzhiyun 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1051*4882a593Smuzhiyun 					dep->name);
1052*4882a593Smuzhiyun 		return ret;
1053*4882a593Smuzhiyun 	}
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	/*
1056*4882a593Smuzhiyun 	 * 4. Stream Capable Bulk Endpoints. We need to start the transfer
1057*4882a593Smuzhiyun 	 * right away, otherwise host will not know we have streams to be
1058*4882a593Smuzhiyun 	 * handled.
1059*4882a593Smuzhiyun 	 */
1060*4882a593Smuzhiyun 	if (dep->stream_capable) {
1061*4882a593Smuzhiyun 		int	ret;
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 		ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1064*4882a593Smuzhiyun 		if (ret && ret != -EBUSY) {
1065*4882a593Smuzhiyun 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1066*4882a593Smuzhiyun 					dep->name);
1067*4882a593Smuzhiyun 		}
1068*4882a593Smuzhiyun 	}
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	return 0;
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun 
dwc3_gadget_ep_queue(struct usb_ep * ep,struct usb_request * request,gfp_t gfp_flags)1073*4882a593Smuzhiyun static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1074*4882a593Smuzhiyun 	gfp_t gfp_flags)
1075*4882a593Smuzhiyun {
1076*4882a593Smuzhiyun 	struct dwc3_request		*req = to_dwc3_request(request);
1077*4882a593Smuzhiyun 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 	unsigned long			flags = 0;
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 	int				ret;
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 	spin_lock_irqsave(&dwc->lock, flags);
1084*4882a593Smuzhiyun 	if (!dep->endpoint.desc) {
1085*4882a593Smuzhiyun 		dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1086*4882a593Smuzhiyun 				request, ep->name);
1087*4882a593Smuzhiyun 		ret = -ESHUTDOWN;
1088*4882a593Smuzhiyun 		goto out;
1089*4882a593Smuzhiyun 	}
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	if (req->dep != dep) {
1092*4882a593Smuzhiyun 		WARN(true, "request %p belongs to '%s'\n",
1093*4882a593Smuzhiyun 				request, req->dep->name);
1094*4882a593Smuzhiyun 		ret = -EINVAL;
1095*4882a593Smuzhiyun 		goto out;
1096*4882a593Smuzhiyun 	}
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun 	dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1099*4882a593Smuzhiyun 			request, ep->name, request->length);
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	ret = __dwc3_gadget_ep_queue(dep, req);
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun out:
1104*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dwc->lock, flags);
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	return ret;
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun 
dwc3_gadget_ep_dequeue(struct usb_ep * ep,struct usb_request * request)1109*4882a593Smuzhiyun static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1110*4882a593Smuzhiyun 		struct usb_request *request)
1111*4882a593Smuzhiyun {
1112*4882a593Smuzhiyun 	struct dwc3_request		*req = to_dwc3_request(request);
1113*4882a593Smuzhiyun 	struct dwc3_request		*r = NULL;
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1116*4882a593Smuzhiyun 	struct dwc3			*dwc = dep->dwc;
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 	unsigned long			flags = 0;
1119*4882a593Smuzhiyun 	int				ret = 0;
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	spin_lock_irqsave(&dwc->lock, flags);
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	list_for_each_entry(r, &dep->request_list, list) {
1124*4882a593Smuzhiyun 		if (r == req)
1125*4882a593Smuzhiyun 			break;
1126*4882a593Smuzhiyun 	}
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 	if (r != req) {
1129*4882a593Smuzhiyun 		list_for_each_entry(r, &dep->req_queued, list) {
1130*4882a593Smuzhiyun 			if (r == req)
1131*4882a593Smuzhiyun 				break;
1132*4882a593Smuzhiyun 		}
1133*4882a593Smuzhiyun 		if (r == req) {
1134*4882a593Smuzhiyun 			/* wait until it is processed */
1135*4882a593Smuzhiyun 			dwc3_stop_active_transfer(dwc, dep->number, true);
1136*4882a593Smuzhiyun 			goto out1;
1137*4882a593Smuzhiyun 		}
1138*4882a593Smuzhiyun 		dev_err(dwc->dev, "request %p was not queued to %s\n",
1139*4882a593Smuzhiyun 				request, ep->name);
1140*4882a593Smuzhiyun 		ret = -EINVAL;
1141*4882a593Smuzhiyun 		goto out0;
1142*4882a593Smuzhiyun 	}
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun out1:
1145*4882a593Smuzhiyun 	/* giveback the request */
1146*4882a593Smuzhiyun 	dwc3_gadget_giveback(dep, req, -ECONNRESET);
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun out0:
1149*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dwc->lock, flags);
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun 	return ret;
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun 
__dwc3_gadget_ep_set_halt(struct dwc3_ep * dep,int value,int protocol)1154*4882a593Smuzhiyun int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1155*4882a593Smuzhiyun {
1156*4882a593Smuzhiyun 	struct dwc3_gadget_ep_cmd_params	params;
1157*4882a593Smuzhiyun 	struct dwc3				*dwc = dep->dwc;
1158*4882a593Smuzhiyun 	int					ret;
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1161*4882a593Smuzhiyun 		dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1162*4882a593Smuzhiyun 		return -EINVAL;
1163*4882a593Smuzhiyun 	}
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 	memset(&params, 0x00, sizeof(params));
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	if (value) {
1168*4882a593Smuzhiyun 		if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
1169*4882a593Smuzhiyun 				(!list_empty(&dep->req_queued) ||
1170*4882a593Smuzhiyun 				 !list_empty(&dep->request_list)))) {
1171*4882a593Smuzhiyun 			dev_dbg(dwc->dev, "%s: pending request, cannot halt\n",
1172*4882a593Smuzhiyun 					dep->name);
1173*4882a593Smuzhiyun 			return -EAGAIN;
1174*4882a593Smuzhiyun 		}
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1177*4882a593Smuzhiyun 			DWC3_DEPCMD_SETSTALL, &params);
1178*4882a593Smuzhiyun 		if (ret)
1179*4882a593Smuzhiyun 			dev_err(dwc->dev, "failed to set STALL on %s\n",
1180*4882a593Smuzhiyun 					dep->name);
1181*4882a593Smuzhiyun 		else
1182*4882a593Smuzhiyun 			dep->flags |= DWC3_EP_STALL;
1183*4882a593Smuzhiyun 	} else {
1184*4882a593Smuzhiyun 		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1185*4882a593Smuzhiyun 			DWC3_DEPCMD_CLEARSTALL, &params);
1186*4882a593Smuzhiyun 		if (ret)
1187*4882a593Smuzhiyun 			dev_err(dwc->dev, "failed to clear STALL on %s\n",
1188*4882a593Smuzhiyun 					dep->name);
1189*4882a593Smuzhiyun 		else
1190*4882a593Smuzhiyun 			dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1191*4882a593Smuzhiyun 	}
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun 	return ret;
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun 
dwc3_gadget_ep_set_halt(struct usb_ep * ep,int value)1196*4882a593Smuzhiyun static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1197*4882a593Smuzhiyun {
1198*4882a593Smuzhiyun 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	unsigned long			flags = 0;
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 	int				ret;
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun 	spin_lock_irqsave(&dwc->lock, flags);
1205*4882a593Smuzhiyun 	ret = __dwc3_gadget_ep_set_halt(dep, value, false);
1206*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dwc->lock, flags);
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun 	return ret;
1209*4882a593Smuzhiyun }
1210*4882a593Smuzhiyun 
dwc3_gadget_ep_set_wedge(struct usb_ep * ep)1211*4882a593Smuzhiyun static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1212*4882a593Smuzhiyun {
1213*4882a593Smuzhiyun 	struct dwc3_ep			*dep = to_dwc3_ep(ep);
1214*4882a593Smuzhiyun 	unsigned long			flags = 0;
1215*4882a593Smuzhiyun 	int				ret;
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	spin_lock_irqsave(&dwc->lock, flags);
1218*4882a593Smuzhiyun 	dep->flags |= DWC3_EP_WEDGE;
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 	if (dep->number == 0 || dep->number == 1)
1221*4882a593Smuzhiyun 		ret = __dwc3_gadget_ep0_set_halt(ep, 1);
1222*4882a593Smuzhiyun 	else
1223*4882a593Smuzhiyun 		ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
1224*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dwc->lock, flags);
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 	return ret;
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun /* -------------------------------------------------------------------------- */
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1232*4882a593Smuzhiyun 	.bLength	= USB_DT_ENDPOINT_SIZE,
1233*4882a593Smuzhiyun 	.bDescriptorType = USB_DT_ENDPOINT,
1234*4882a593Smuzhiyun 	.bmAttributes	= USB_ENDPOINT_XFER_CONTROL,
1235*4882a593Smuzhiyun };
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1238*4882a593Smuzhiyun 	.enable		= dwc3_gadget_ep0_enable,
1239*4882a593Smuzhiyun 	.disable	= dwc3_gadget_ep0_disable,
1240*4882a593Smuzhiyun 	.alloc_request	= dwc3_gadget_ep_alloc_request,
1241*4882a593Smuzhiyun 	.free_request	= dwc3_gadget_ep_free_request,
1242*4882a593Smuzhiyun 	.queue		= dwc3_gadget_ep0_queue,
1243*4882a593Smuzhiyun 	.dequeue	= dwc3_gadget_ep_dequeue,
1244*4882a593Smuzhiyun 	.set_halt	= dwc3_gadget_ep0_set_halt,
1245*4882a593Smuzhiyun 	.set_wedge	= dwc3_gadget_ep_set_wedge,
1246*4882a593Smuzhiyun };
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1249*4882a593Smuzhiyun 	.enable		= dwc3_gadget_ep_enable,
1250*4882a593Smuzhiyun 	.disable	= dwc3_gadget_ep_disable,
1251*4882a593Smuzhiyun 	.alloc_request	= dwc3_gadget_ep_alloc_request,
1252*4882a593Smuzhiyun 	.free_request	= dwc3_gadget_ep_free_request,
1253*4882a593Smuzhiyun 	.queue		= dwc3_gadget_ep_queue,
1254*4882a593Smuzhiyun 	.dequeue	= dwc3_gadget_ep_dequeue,
1255*4882a593Smuzhiyun 	.set_halt	= dwc3_gadget_ep_set_halt,
1256*4882a593Smuzhiyun 	.set_wedge	= dwc3_gadget_ep_set_wedge,
1257*4882a593Smuzhiyun };
1258*4882a593Smuzhiyun 
1259*4882a593Smuzhiyun /* -------------------------------------------------------------------------- */
1260*4882a593Smuzhiyun 
dwc3_gadget_get_frame(struct usb_gadget * g)1261*4882a593Smuzhiyun static int dwc3_gadget_get_frame(struct usb_gadget *g)
1262*4882a593Smuzhiyun {
1263*4882a593Smuzhiyun 	struct dwc3		*dwc = gadget_to_dwc(g);
1264*4882a593Smuzhiyun 	u32			reg;
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1267*4882a593Smuzhiyun 	return DWC3_DSTS_SOFFN(reg);
1268*4882a593Smuzhiyun }
1269*4882a593Smuzhiyun 
dwc3_gadget_wakeup(struct usb_gadget * g)1270*4882a593Smuzhiyun static int dwc3_gadget_wakeup(struct usb_gadget *g)
1271*4882a593Smuzhiyun {
1272*4882a593Smuzhiyun 	struct dwc3		*dwc = gadget_to_dwc(g);
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun 	unsigned long		timeout;
1275*4882a593Smuzhiyun 	unsigned long		flags = 0;
1276*4882a593Smuzhiyun 
1277*4882a593Smuzhiyun 	u32			reg;
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 	int			ret = 0;
1280*4882a593Smuzhiyun 
1281*4882a593Smuzhiyun 	u8			link_state;
1282*4882a593Smuzhiyun 	u8			speed;
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 	spin_lock_irqsave(&dwc->lock, flags);
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 	/*
1287*4882a593Smuzhiyun 	 * According to the Databook Remote wakeup request should
1288*4882a593Smuzhiyun 	 * be issued only when the device is in early suspend state.
1289*4882a593Smuzhiyun 	 *
1290*4882a593Smuzhiyun 	 * We can check that via USB Link State bits in DSTS register.
1291*4882a593Smuzhiyun 	 */
1292*4882a593Smuzhiyun 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1293*4882a593Smuzhiyun 
1294*4882a593Smuzhiyun 	speed = reg & DWC3_DSTS_CONNECTSPD;
1295*4882a593Smuzhiyun 	if (speed == DWC3_DSTS_SUPERSPEED) {
1296*4882a593Smuzhiyun 		dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1297*4882a593Smuzhiyun 		ret = -EINVAL;
1298*4882a593Smuzhiyun 		goto out;
1299*4882a593Smuzhiyun 	}
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun 	link_state = DWC3_DSTS_USBLNKST(reg);
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun 	switch (link_state) {
1304*4882a593Smuzhiyun 	case DWC3_LINK_STATE_RX_DET:	/* in HS, means Early Suspend */
1305*4882a593Smuzhiyun 	case DWC3_LINK_STATE_U3:	/* in HS, means SUSPEND */
1306*4882a593Smuzhiyun 		break;
1307*4882a593Smuzhiyun 	default:
1308*4882a593Smuzhiyun 		dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1309*4882a593Smuzhiyun 				link_state);
1310*4882a593Smuzhiyun 		ret = -EINVAL;
1311*4882a593Smuzhiyun 		goto out;
1312*4882a593Smuzhiyun 	}
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 	ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1315*4882a593Smuzhiyun 	if (ret < 0) {
1316*4882a593Smuzhiyun 		dev_err(dwc->dev, "failed to put link in Recovery\n");
1317*4882a593Smuzhiyun 		goto out;
1318*4882a593Smuzhiyun 	}
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun 	/* Recent versions do this automatically */
1321*4882a593Smuzhiyun 	if (dwc->revision < DWC3_REVISION_194A) {
1322*4882a593Smuzhiyun 		/* write zeroes to Link Change Request */
1323*4882a593Smuzhiyun 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1324*4882a593Smuzhiyun 		reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1325*4882a593Smuzhiyun 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1326*4882a593Smuzhiyun 	}
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun 	/* poll until Link State changes to ON */
1329*4882a593Smuzhiyun 	timeout = 1000;
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun 	while (timeout--) {
1332*4882a593Smuzhiyun 		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1333*4882a593Smuzhiyun 
1334*4882a593Smuzhiyun 		/* in HS, means ON */
1335*4882a593Smuzhiyun 		if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1336*4882a593Smuzhiyun 			break;
1337*4882a593Smuzhiyun 	}
1338*4882a593Smuzhiyun 
1339*4882a593Smuzhiyun 	if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1340*4882a593Smuzhiyun 		dev_err(dwc->dev, "failed to send remote wakeup\n");
1341*4882a593Smuzhiyun 		ret = -EINVAL;
1342*4882a593Smuzhiyun 	}
1343*4882a593Smuzhiyun 
1344*4882a593Smuzhiyun out:
1345*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dwc->lock, flags);
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun 	return ret;
1348*4882a593Smuzhiyun }
1349*4882a593Smuzhiyun 
dwc3_gadget_set_selfpowered(struct usb_gadget * g,int is_selfpowered)1350*4882a593Smuzhiyun static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1351*4882a593Smuzhiyun 		int is_selfpowered)
1352*4882a593Smuzhiyun {
1353*4882a593Smuzhiyun 	struct dwc3		*dwc = gadget_to_dwc(g);
1354*4882a593Smuzhiyun 	unsigned long		flags = 0;
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	spin_lock_irqsave(&dwc->lock, flags);
1357*4882a593Smuzhiyun 	dwc->is_selfpowered = !!is_selfpowered;
1358*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dwc->lock, flags);
1359*4882a593Smuzhiyun 
1360*4882a593Smuzhiyun 	return 0;
1361*4882a593Smuzhiyun }
1362*4882a593Smuzhiyun 
dwc3_gadget_run_stop(struct dwc3 * dwc,int is_on,int suspend)1363*4882a593Smuzhiyun static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
1364*4882a593Smuzhiyun {
1365*4882a593Smuzhiyun 	u32			reg;
1366*4882a593Smuzhiyun 	u32			timeout = 500;
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1369*4882a593Smuzhiyun 	if (is_on) {
1370*4882a593Smuzhiyun 		if (dwc->revision <= DWC3_REVISION_187A) {
1371*4882a593Smuzhiyun 			reg &= ~DWC3_DCTL_TRGTULST_MASK;
1372*4882a593Smuzhiyun 			reg |= DWC3_DCTL_TRGTULST_RX_DET;
1373*4882a593Smuzhiyun 		}
1374*4882a593Smuzhiyun 
1375*4882a593Smuzhiyun 		if (dwc->revision >= DWC3_REVISION_194A)
1376*4882a593Smuzhiyun 			reg &= ~DWC3_DCTL_KEEP_CONNECT;
1377*4882a593Smuzhiyun 		reg |= DWC3_DCTL_RUN_STOP;
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun 		if (dwc->has_hibernation)
1380*4882a593Smuzhiyun 			reg |= DWC3_DCTL_KEEP_CONNECT;
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 		dwc->pullups_connected = true;
1383*4882a593Smuzhiyun 	} else {
1384*4882a593Smuzhiyun 		reg &= ~DWC3_DCTL_RUN_STOP;
1385*4882a593Smuzhiyun 
1386*4882a593Smuzhiyun 		if (dwc->has_hibernation && !suspend)
1387*4882a593Smuzhiyun 			reg &= ~DWC3_DCTL_KEEP_CONNECT;
1388*4882a593Smuzhiyun 
1389*4882a593Smuzhiyun 		dwc->pullups_connected = false;
1390*4882a593Smuzhiyun 	}
1391*4882a593Smuzhiyun 
1392*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1393*4882a593Smuzhiyun 
1394*4882a593Smuzhiyun 	do {
1395*4882a593Smuzhiyun 		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1396*4882a593Smuzhiyun 		if (is_on) {
1397*4882a593Smuzhiyun 			if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1398*4882a593Smuzhiyun 				break;
1399*4882a593Smuzhiyun 		} else {
1400*4882a593Smuzhiyun 			if (reg & DWC3_DSTS_DEVCTRLHLT)
1401*4882a593Smuzhiyun 				break;
1402*4882a593Smuzhiyun 		}
1403*4882a593Smuzhiyun 		timeout--;
1404*4882a593Smuzhiyun 		if (!timeout)
1405*4882a593Smuzhiyun 			return -ETIMEDOUT;
1406*4882a593Smuzhiyun 		udelay(1);
1407*4882a593Smuzhiyun 	} while (1);
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 	dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1410*4882a593Smuzhiyun 			dwc->gadget_driver
1411*4882a593Smuzhiyun 			? dwc->gadget_driver->function : "no-function",
1412*4882a593Smuzhiyun 			is_on ? "connect" : "disconnect");
1413*4882a593Smuzhiyun 
1414*4882a593Smuzhiyun 	return 0;
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun 
dwc3_gadget_pullup(struct usb_gadget * g,int is_on)1417*4882a593Smuzhiyun static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1418*4882a593Smuzhiyun {
1419*4882a593Smuzhiyun 	struct dwc3		*dwc = gadget_to_dwc(g);
1420*4882a593Smuzhiyun 	unsigned long		flags = 0;
1421*4882a593Smuzhiyun 	int			ret;
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun 	is_on = !!is_on;
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun 	spin_lock_irqsave(&dwc->lock, flags);
1426*4882a593Smuzhiyun 	ret = dwc3_gadget_run_stop(dwc, is_on, false);
1427*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dwc->lock, flags);
1428*4882a593Smuzhiyun 
1429*4882a593Smuzhiyun 	return ret;
1430*4882a593Smuzhiyun }
1431*4882a593Smuzhiyun 
dwc3_gadget_enable_irq(struct dwc3 * dwc)1432*4882a593Smuzhiyun static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1433*4882a593Smuzhiyun {
1434*4882a593Smuzhiyun 	u32			reg;
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 	/* Enable all but Start and End of Frame IRQs */
1437*4882a593Smuzhiyun 	reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1438*4882a593Smuzhiyun 			DWC3_DEVTEN_EVNTOVERFLOWEN |
1439*4882a593Smuzhiyun 			DWC3_DEVTEN_CMDCMPLTEN |
1440*4882a593Smuzhiyun 			DWC3_DEVTEN_ERRTICERREN |
1441*4882a593Smuzhiyun 			DWC3_DEVTEN_WKUPEVTEN |
1442*4882a593Smuzhiyun 			DWC3_DEVTEN_ULSTCNGEN |
1443*4882a593Smuzhiyun 			DWC3_DEVTEN_CONNECTDONEEN |
1444*4882a593Smuzhiyun 			DWC3_DEVTEN_USBRSTEN |
1445*4882a593Smuzhiyun 			DWC3_DEVTEN_DISCONNEVTEN);
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1448*4882a593Smuzhiyun }
1449*4882a593Smuzhiyun 
dwc3_gadget_disable_irq(struct dwc3 * dwc)1450*4882a593Smuzhiyun static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1451*4882a593Smuzhiyun {
1452*4882a593Smuzhiyun 	/* mask all interrupts */
1453*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1454*4882a593Smuzhiyun }
1455*4882a593Smuzhiyun 
dwc3_gadget_start(struct usb_gadget * g,struct usb_gadget_driver * driver)1456*4882a593Smuzhiyun static int dwc3_gadget_start(struct usb_gadget *g,
1457*4882a593Smuzhiyun 		struct usb_gadget_driver *driver)
1458*4882a593Smuzhiyun {
1459*4882a593Smuzhiyun 	struct dwc3		*dwc = gadget_to_dwc(g);
1460*4882a593Smuzhiyun 	struct dwc3_ep		*dep;
1461*4882a593Smuzhiyun 	unsigned long		flags = 0;
1462*4882a593Smuzhiyun 	int			ret = 0;
1463*4882a593Smuzhiyun 	u32			reg;
1464*4882a593Smuzhiyun 
1465*4882a593Smuzhiyun 	spin_lock_irqsave(&dwc->lock, flags);
1466*4882a593Smuzhiyun 
1467*4882a593Smuzhiyun 	if (dwc->gadget_driver) {
1468*4882a593Smuzhiyun 		dev_err(dwc->dev, "%s is already bound to %s\n",
1469*4882a593Smuzhiyun 				dwc->gadget.name,
1470*4882a593Smuzhiyun 				dwc->gadget_driver->function);
1471*4882a593Smuzhiyun 		ret = -EBUSY;
1472*4882a593Smuzhiyun 		goto err1;
1473*4882a593Smuzhiyun 	}
1474*4882a593Smuzhiyun 
1475*4882a593Smuzhiyun 	dwc->gadget_driver	= driver;
1476*4882a593Smuzhiyun 
1477*4882a593Smuzhiyun 	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1478*4882a593Smuzhiyun 	reg &= ~(DWC3_DCFG_SPEED_MASK);
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 	/**
1481*4882a593Smuzhiyun 	 * WORKAROUND: DWC3 revision < 2.20a have an issue
1482*4882a593Smuzhiyun 	 * which would cause metastability state on Run/Stop
1483*4882a593Smuzhiyun 	 * bit if we try to force the IP to USB2-only mode.
1484*4882a593Smuzhiyun 	 *
1485*4882a593Smuzhiyun 	 * Because of that, we cannot configure the IP to any
1486*4882a593Smuzhiyun 	 * speed other than the SuperSpeed
1487*4882a593Smuzhiyun 	 *
1488*4882a593Smuzhiyun 	 * Refers to:
1489*4882a593Smuzhiyun 	 *
1490*4882a593Smuzhiyun 	 * STAR#9000525659: Clock Domain Crossing on DCTL in
1491*4882a593Smuzhiyun 	 * USB 2.0 Mode
1492*4882a593Smuzhiyun 	 */
1493*4882a593Smuzhiyun 	if (dwc->revision < DWC3_REVISION_220A) {
1494*4882a593Smuzhiyun 		reg |= DWC3_DCFG_SUPERSPEED;
1495*4882a593Smuzhiyun 	} else {
1496*4882a593Smuzhiyun 		switch (dwc->maximum_speed) {
1497*4882a593Smuzhiyun 		case USB_SPEED_LOW:
1498*4882a593Smuzhiyun 			reg |= DWC3_DSTS_LOWSPEED;
1499*4882a593Smuzhiyun 			break;
1500*4882a593Smuzhiyun 		case USB_SPEED_FULL:
1501*4882a593Smuzhiyun 			reg |= DWC3_DSTS_FULLSPEED1;
1502*4882a593Smuzhiyun 			break;
1503*4882a593Smuzhiyun 		case USB_SPEED_HIGH:
1504*4882a593Smuzhiyun 			reg |= DWC3_DSTS_HIGHSPEED;
1505*4882a593Smuzhiyun 			break;
1506*4882a593Smuzhiyun 		case USB_SPEED_SUPER:	/* FALLTHROUGH */
1507*4882a593Smuzhiyun 		case USB_SPEED_UNKNOWN:	/* FALTHROUGH */
1508*4882a593Smuzhiyun 		default:
1509*4882a593Smuzhiyun 			reg |= DWC3_DSTS_SUPERSPEED;
1510*4882a593Smuzhiyun 		}
1511*4882a593Smuzhiyun 	}
1512*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	dwc->start_config_issued = false;
1515*4882a593Smuzhiyun 
1516*4882a593Smuzhiyun 	/* Start with SuperSpeed Default */
1517*4882a593Smuzhiyun 	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1518*4882a593Smuzhiyun 
1519*4882a593Smuzhiyun 	dep = dwc->eps[0];
1520*4882a593Smuzhiyun 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1521*4882a593Smuzhiyun 			false);
1522*4882a593Smuzhiyun 	if (ret) {
1523*4882a593Smuzhiyun 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1524*4882a593Smuzhiyun 		goto err2;
1525*4882a593Smuzhiyun 	}
1526*4882a593Smuzhiyun 
1527*4882a593Smuzhiyun 	dep = dwc->eps[1];
1528*4882a593Smuzhiyun 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1529*4882a593Smuzhiyun 			false);
1530*4882a593Smuzhiyun 	if (ret) {
1531*4882a593Smuzhiyun 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1532*4882a593Smuzhiyun 		goto err3;
1533*4882a593Smuzhiyun 	}
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 	/* begin to receive SETUP packets */
1536*4882a593Smuzhiyun 	dwc->ep0state = EP0_SETUP_PHASE;
1537*4882a593Smuzhiyun 	dwc3_ep0_out_start(dwc);
1538*4882a593Smuzhiyun 
1539*4882a593Smuzhiyun 	dwc3_gadget_enable_irq(dwc);
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dwc->lock, flags);
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun 	return 0;
1544*4882a593Smuzhiyun 
1545*4882a593Smuzhiyun err3:
1546*4882a593Smuzhiyun 	__dwc3_gadget_ep_disable(dwc->eps[0]);
1547*4882a593Smuzhiyun 
1548*4882a593Smuzhiyun err2:
1549*4882a593Smuzhiyun 	dwc->gadget_driver = NULL;
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun err1:
1552*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dwc->lock, flags);
1553*4882a593Smuzhiyun 
1554*4882a593Smuzhiyun 	return ret;
1555*4882a593Smuzhiyun }
1556*4882a593Smuzhiyun 
dwc3_gadget_stop(struct usb_gadget * g)1557*4882a593Smuzhiyun static int dwc3_gadget_stop(struct usb_gadget *g)
1558*4882a593Smuzhiyun {
1559*4882a593Smuzhiyun 	struct dwc3		*dwc = gadget_to_dwc(g);
1560*4882a593Smuzhiyun 	unsigned long		flags = 0;
1561*4882a593Smuzhiyun 
1562*4882a593Smuzhiyun 	spin_lock_irqsave(&dwc->lock, flags);
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun 	dwc3_gadget_disable_irq(dwc);
1565*4882a593Smuzhiyun 	__dwc3_gadget_ep_disable(dwc->eps[0]);
1566*4882a593Smuzhiyun 	__dwc3_gadget_ep_disable(dwc->eps[1]);
1567*4882a593Smuzhiyun 
1568*4882a593Smuzhiyun 	dwc->gadget_driver	= NULL;
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dwc->lock, flags);
1571*4882a593Smuzhiyun 
1572*4882a593Smuzhiyun 	return 0;
1573*4882a593Smuzhiyun }
1574*4882a593Smuzhiyun 
1575*4882a593Smuzhiyun static const struct usb_gadget_ops dwc3_gadget_ops = {
1576*4882a593Smuzhiyun 	.get_frame		= dwc3_gadget_get_frame,
1577*4882a593Smuzhiyun 	.wakeup			= dwc3_gadget_wakeup,
1578*4882a593Smuzhiyun 	.set_selfpowered	= dwc3_gadget_set_selfpowered,
1579*4882a593Smuzhiyun 	.pullup			= dwc3_gadget_pullup,
1580*4882a593Smuzhiyun 	.udc_start		= dwc3_gadget_start,
1581*4882a593Smuzhiyun 	.udc_stop		= dwc3_gadget_stop,
1582*4882a593Smuzhiyun };
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun /* -------------------------------------------------------------------------- */
1585*4882a593Smuzhiyun 
dwc3_gadget_init_hw_endpoints(struct dwc3 * dwc,u8 num,u32 direction)1586*4882a593Smuzhiyun static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1587*4882a593Smuzhiyun 		u8 num, u32 direction)
1588*4882a593Smuzhiyun {
1589*4882a593Smuzhiyun 	struct dwc3_ep			*dep;
1590*4882a593Smuzhiyun 	u8				i;
1591*4882a593Smuzhiyun 
1592*4882a593Smuzhiyun 	for (i = 0; i < num; i++) {
1593*4882a593Smuzhiyun 		u8 epnum = (i << 1) | (!!direction);
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun 		dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1596*4882a593Smuzhiyun 		if (!dep)
1597*4882a593Smuzhiyun 			return -ENOMEM;
1598*4882a593Smuzhiyun 
1599*4882a593Smuzhiyun 		dep->dwc = dwc;
1600*4882a593Smuzhiyun 		dep->number = epnum;
1601*4882a593Smuzhiyun 		dep->direction = !!direction;
1602*4882a593Smuzhiyun 		dwc->eps[epnum] = dep;
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 		snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1605*4882a593Smuzhiyun 				(epnum & 1) ? "in" : "out");
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 		dep->endpoint.name = dep->name;
1608*4882a593Smuzhiyun 
1609*4882a593Smuzhiyun 		dev_vdbg(dwc->dev, "initializing %s\n", dep->name);
1610*4882a593Smuzhiyun 
1611*4882a593Smuzhiyun 		if (epnum == 0 || epnum == 1) {
1612*4882a593Smuzhiyun 			usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
1613*4882a593Smuzhiyun 			dep->endpoint.maxburst = 1;
1614*4882a593Smuzhiyun 			dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1615*4882a593Smuzhiyun 			if (!epnum)
1616*4882a593Smuzhiyun 				dwc->gadget.ep0 = &dep->endpoint;
1617*4882a593Smuzhiyun 		} else {
1618*4882a593Smuzhiyun 			int		ret;
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 			if (dwc->maximum_speed < USB_SPEED_SUPER)
1621*4882a593Smuzhiyun 				usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
1622*4882a593Smuzhiyun 			else
1623*4882a593Smuzhiyun 				usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
1624*4882a593Smuzhiyun 			dep->endpoint.max_streams = 15;
1625*4882a593Smuzhiyun 			dep->endpoint.ops = &dwc3_gadget_ep_ops;
1626*4882a593Smuzhiyun 			list_add_tail(&dep->endpoint.ep_list,
1627*4882a593Smuzhiyun 					&dwc->gadget.ep_list);
1628*4882a593Smuzhiyun 
1629*4882a593Smuzhiyun 			ret = dwc3_alloc_trb_pool(dep);
1630*4882a593Smuzhiyun 			if (ret)
1631*4882a593Smuzhiyun 				return ret;
1632*4882a593Smuzhiyun 		}
1633*4882a593Smuzhiyun 
1634*4882a593Smuzhiyun 		INIT_LIST_HEAD(&dep->request_list);
1635*4882a593Smuzhiyun 		INIT_LIST_HEAD(&dep->req_queued);
1636*4882a593Smuzhiyun 	}
1637*4882a593Smuzhiyun 
1638*4882a593Smuzhiyun 	return 0;
1639*4882a593Smuzhiyun }
1640*4882a593Smuzhiyun 
dwc3_gadget_init_endpoints(struct dwc3 * dwc)1641*4882a593Smuzhiyun static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1642*4882a593Smuzhiyun {
1643*4882a593Smuzhiyun 	int				ret;
1644*4882a593Smuzhiyun 
1645*4882a593Smuzhiyun 	INIT_LIST_HEAD(&dwc->gadget.ep_list);
1646*4882a593Smuzhiyun 
1647*4882a593Smuzhiyun 	ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1648*4882a593Smuzhiyun 	if (ret < 0) {
1649*4882a593Smuzhiyun 		dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n");
1650*4882a593Smuzhiyun 		return ret;
1651*4882a593Smuzhiyun 	}
1652*4882a593Smuzhiyun 
1653*4882a593Smuzhiyun 	ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1654*4882a593Smuzhiyun 	if (ret < 0) {
1655*4882a593Smuzhiyun 		dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n");
1656*4882a593Smuzhiyun 		return ret;
1657*4882a593Smuzhiyun 	}
1658*4882a593Smuzhiyun 
1659*4882a593Smuzhiyun 	return 0;
1660*4882a593Smuzhiyun }
1661*4882a593Smuzhiyun 
dwc3_gadget_free_endpoints(struct dwc3 * dwc)1662*4882a593Smuzhiyun static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1663*4882a593Smuzhiyun {
1664*4882a593Smuzhiyun 	struct dwc3_ep			*dep;
1665*4882a593Smuzhiyun 	u8				epnum;
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun 	for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1668*4882a593Smuzhiyun 		dep = dwc->eps[epnum];
1669*4882a593Smuzhiyun 		if (!dep)
1670*4882a593Smuzhiyun 			continue;
1671*4882a593Smuzhiyun 		/*
1672*4882a593Smuzhiyun 		 * Physical endpoints 0 and 1 are special; they form the
1673*4882a593Smuzhiyun 		 * bi-directional USB endpoint 0.
1674*4882a593Smuzhiyun 		 *
1675*4882a593Smuzhiyun 		 * For those two physical endpoints, we don't allocate a TRB
1676*4882a593Smuzhiyun 		 * pool nor do we add them the endpoints list. Due to that, we
1677*4882a593Smuzhiyun 		 * shouldn't do these two operations otherwise we would end up
1678*4882a593Smuzhiyun 		 * with all sorts of bugs when removing dwc3.ko.
1679*4882a593Smuzhiyun 		 */
1680*4882a593Smuzhiyun 		if (epnum != 0 && epnum != 1) {
1681*4882a593Smuzhiyun 			dwc3_free_trb_pool(dep);
1682*4882a593Smuzhiyun 			list_del(&dep->endpoint.ep_list);
1683*4882a593Smuzhiyun 		}
1684*4882a593Smuzhiyun 
1685*4882a593Smuzhiyun 		kfree(dep);
1686*4882a593Smuzhiyun 	}
1687*4882a593Smuzhiyun }
1688*4882a593Smuzhiyun 
1689*4882a593Smuzhiyun /* -------------------------------------------------------------------------- */
1690*4882a593Smuzhiyun 
__dwc3_cleanup_done_trbs(struct dwc3 * dwc,struct dwc3_ep * dep,struct dwc3_request * req,struct dwc3_trb * trb,const struct dwc3_event_depevt * event,int status)1691*4882a593Smuzhiyun static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1692*4882a593Smuzhiyun 		struct dwc3_request *req, struct dwc3_trb *trb,
1693*4882a593Smuzhiyun 		const struct dwc3_event_depevt *event, int status)
1694*4882a593Smuzhiyun {
1695*4882a593Smuzhiyun 	unsigned int		count;
1696*4882a593Smuzhiyun 	unsigned int		s_pkt = 0;
1697*4882a593Smuzhiyun 	unsigned int		trb_status;
1698*4882a593Smuzhiyun 
1699*4882a593Smuzhiyun 	if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1700*4882a593Smuzhiyun 		/*
1701*4882a593Smuzhiyun 		 * We continue despite the error. There is not much we
1702*4882a593Smuzhiyun 		 * can do. If we don't clean it up we loop forever. If
1703*4882a593Smuzhiyun 		 * we skip the TRB then it gets overwritten after a
1704*4882a593Smuzhiyun 		 * while since we use them in a ring buffer. A BUG()
1705*4882a593Smuzhiyun 		 * would help. Lets hope that if this occurs, someone
1706*4882a593Smuzhiyun 		 * fixes the root cause instead of looking away :)
1707*4882a593Smuzhiyun 		 */
1708*4882a593Smuzhiyun 		dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1709*4882a593Smuzhiyun 				dep->name, trb);
1710*4882a593Smuzhiyun 	count = trb->size & DWC3_TRB_SIZE_MASK;
1711*4882a593Smuzhiyun 
1712*4882a593Smuzhiyun 	if (dep->direction) {
1713*4882a593Smuzhiyun 		if (count) {
1714*4882a593Smuzhiyun 			trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1715*4882a593Smuzhiyun 			if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1716*4882a593Smuzhiyun 				dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1717*4882a593Smuzhiyun 						dep->name);
1718*4882a593Smuzhiyun 				/*
1719*4882a593Smuzhiyun 				 * If missed isoc occurred and there is
1720*4882a593Smuzhiyun 				 * no request queued then issue END
1721*4882a593Smuzhiyun 				 * TRANSFER, so that core generates
1722*4882a593Smuzhiyun 				 * next xfernotready and we will issue
1723*4882a593Smuzhiyun 				 * a fresh START TRANSFER.
1724*4882a593Smuzhiyun 				 * If there are still queued request
1725*4882a593Smuzhiyun 				 * then wait, do not issue either END
1726*4882a593Smuzhiyun 				 * or UPDATE TRANSFER, just attach next
1727*4882a593Smuzhiyun 				 * request in request_list during
1728*4882a593Smuzhiyun 				 * giveback.If any future queued request
1729*4882a593Smuzhiyun 				 * is successfully transferred then we
1730*4882a593Smuzhiyun 				 * will issue UPDATE TRANSFER for all
1731*4882a593Smuzhiyun 				 * request in the request_list.
1732*4882a593Smuzhiyun 				 */
1733*4882a593Smuzhiyun 				dep->flags |= DWC3_EP_MISSED_ISOC;
1734*4882a593Smuzhiyun 			} else {
1735*4882a593Smuzhiyun 				dev_err(dwc->dev, "incomplete IN transfer %s\n",
1736*4882a593Smuzhiyun 						dep->name);
1737*4882a593Smuzhiyun 				status = -ECONNRESET;
1738*4882a593Smuzhiyun 			}
1739*4882a593Smuzhiyun 		} else {
1740*4882a593Smuzhiyun 			dep->flags &= ~DWC3_EP_MISSED_ISOC;
1741*4882a593Smuzhiyun 		}
1742*4882a593Smuzhiyun 	} else {
1743*4882a593Smuzhiyun 		if (count && (event->status & DEPEVT_STATUS_SHORT))
1744*4882a593Smuzhiyun 			s_pkt = 1;
1745*4882a593Smuzhiyun 	}
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun 	/*
1748*4882a593Smuzhiyun 	 * We assume here we will always receive the entire data block
1749*4882a593Smuzhiyun 	 * which we should receive. Meaning, if we program RX to
1750*4882a593Smuzhiyun 	 * receive 4K but we receive only 2K, we assume that's all we
1751*4882a593Smuzhiyun 	 * should receive and we simply bounce the request back to the
1752*4882a593Smuzhiyun 	 * gadget driver for further processing.
1753*4882a593Smuzhiyun 	 */
1754*4882a593Smuzhiyun 	req->request.actual += req->request.length - count;
1755*4882a593Smuzhiyun 	if (s_pkt)
1756*4882a593Smuzhiyun 		return 1;
1757*4882a593Smuzhiyun 	if ((event->status & DEPEVT_STATUS_LST) &&
1758*4882a593Smuzhiyun 			(trb->ctrl & (DWC3_TRB_CTRL_LST |
1759*4882a593Smuzhiyun 				DWC3_TRB_CTRL_HWO)))
1760*4882a593Smuzhiyun 		return 1;
1761*4882a593Smuzhiyun 	if ((event->status & DEPEVT_STATUS_IOC) &&
1762*4882a593Smuzhiyun 			(trb->ctrl & DWC3_TRB_CTRL_IOC))
1763*4882a593Smuzhiyun 		return 1;
1764*4882a593Smuzhiyun 	return 0;
1765*4882a593Smuzhiyun }
1766*4882a593Smuzhiyun 
dwc3_cleanup_done_reqs(struct dwc3 * dwc,struct dwc3_ep * dep,const struct dwc3_event_depevt * event,int status)1767*4882a593Smuzhiyun static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1768*4882a593Smuzhiyun 		const struct dwc3_event_depevt *event, int status)
1769*4882a593Smuzhiyun {
1770*4882a593Smuzhiyun 	struct dwc3_request	*req;
1771*4882a593Smuzhiyun 	struct dwc3_trb		*trb;
1772*4882a593Smuzhiyun 	unsigned int		slot;
1773*4882a593Smuzhiyun 
1774*4882a593Smuzhiyun 	req = next_request(&dep->req_queued);
1775*4882a593Smuzhiyun 	if (!req) {
1776*4882a593Smuzhiyun 		WARN_ON_ONCE(1);
1777*4882a593Smuzhiyun 		return 1;
1778*4882a593Smuzhiyun 	}
1779*4882a593Smuzhiyun 
1780*4882a593Smuzhiyun 	slot = req->start_slot;
1781*4882a593Smuzhiyun 	if ((slot == DWC3_TRB_NUM - 1) &&
1782*4882a593Smuzhiyun 	    usb_endpoint_xfer_isoc(dep->endpoint.desc))
1783*4882a593Smuzhiyun 		slot++;
1784*4882a593Smuzhiyun 	slot %= DWC3_TRB_NUM;
1785*4882a593Smuzhiyun 	trb = &dep->trb_pool[slot];
1786*4882a593Smuzhiyun 
1787*4882a593Smuzhiyun 	dwc3_flush_cache((uintptr_t)trb, sizeof(*trb));
1788*4882a593Smuzhiyun 	__dwc3_cleanup_done_trbs(dwc, dep, req, trb, event, status);
1789*4882a593Smuzhiyun 	dwc3_gadget_giveback(dep, req, status);
1790*4882a593Smuzhiyun 
1791*4882a593Smuzhiyun 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1792*4882a593Smuzhiyun 			list_empty(&dep->req_queued)) {
1793*4882a593Smuzhiyun 		if (list_empty(&dep->request_list)) {
1794*4882a593Smuzhiyun 			/*
1795*4882a593Smuzhiyun 			 * If there is no entry in request list then do
1796*4882a593Smuzhiyun 			 * not issue END TRANSFER now. Just set PENDING
1797*4882a593Smuzhiyun 			 * flag, so that END TRANSFER is issued when an
1798*4882a593Smuzhiyun 			 * entry is added into request list.
1799*4882a593Smuzhiyun 			 */
1800*4882a593Smuzhiyun 			dep->flags = DWC3_EP_PENDING_REQUEST;
1801*4882a593Smuzhiyun 		} else {
1802*4882a593Smuzhiyun 			dwc3_stop_active_transfer(dwc, dep->number, true);
1803*4882a593Smuzhiyun 			dep->flags = DWC3_EP_ENABLED;
1804*4882a593Smuzhiyun 		}
1805*4882a593Smuzhiyun 		return 1;
1806*4882a593Smuzhiyun 	}
1807*4882a593Smuzhiyun 
1808*4882a593Smuzhiyun 	return 1;
1809*4882a593Smuzhiyun }
1810*4882a593Smuzhiyun 
dwc3_endpoint_transfer_complete(struct dwc3 * dwc,struct dwc3_ep * dep,const struct dwc3_event_depevt * event)1811*4882a593Smuzhiyun static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
1812*4882a593Smuzhiyun 		struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1813*4882a593Smuzhiyun {
1814*4882a593Smuzhiyun 	unsigned		status = 0;
1815*4882a593Smuzhiyun 	int			clean_busy;
1816*4882a593Smuzhiyun 	u32			is_xfer_complete;
1817*4882a593Smuzhiyun 	int			ret;
1818*4882a593Smuzhiyun 
1819*4882a593Smuzhiyun 	is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE);
1820*4882a593Smuzhiyun 
1821*4882a593Smuzhiyun 	if (event->status & DEPEVT_STATUS_BUSERR)
1822*4882a593Smuzhiyun 		status = -ECONNRESET;
1823*4882a593Smuzhiyun 
1824*4882a593Smuzhiyun 	clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
1825*4882a593Smuzhiyun 	if (clean_busy && (is_xfer_complete ||
1826*4882a593Smuzhiyun 			   usb_endpoint_xfer_isoc(dep->endpoint.desc)))
1827*4882a593Smuzhiyun 		dep->flags &= ~DWC3_EP_BUSY;
1828*4882a593Smuzhiyun 
1829*4882a593Smuzhiyun 	/*
1830*4882a593Smuzhiyun 	 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1831*4882a593Smuzhiyun 	 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1832*4882a593Smuzhiyun 	 */
1833*4882a593Smuzhiyun 	if (dwc->revision < DWC3_REVISION_183A) {
1834*4882a593Smuzhiyun 		u32		reg;
1835*4882a593Smuzhiyun 		int		i;
1836*4882a593Smuzhiyun 
1837*4882a593Smuzhiyun 		for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
1838*4882a593Smuzhiyun 			dep = dwc->eps[i];
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun 			if (!(dep->flags & DWC3_EP_ENABLED))
1841*4882a593Smuzhiyun 				continue;
1842*4882a593Smuzhiyun 
1843*4882a593Smuzhiyun 			if (!list_empty(&dep->req_queued))
1844*4882a593Smuzhiyun 				return;
1845*4882a593Smuzhiyun 		}
1846*4882a593Smuzhiyun 
1847*4882a593Smuzhiyun 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1848*4882a593Smuzhiyun 		reg |= dwc->u1u2;
1849*4882a593Smuzhiyun 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1850*4882a593Smuzhiyun 
1851*4882a593Smuzhiyun 		dwc->u1u2 = 0;
1852*4882a593Smuzhiyun 	}
1853*4882a593Smuzhiyun 
1854*4882a593Smuzhiyun 	if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1855*4882a593Smuzhiyun 		ret = __dwc3_gadget_kick_transfer(dep, 0, is_xfer_complete);
1856*4882a593Smuzhiyun 		if (!ret || ret == -EBUSY)
1857*4882a593Smuzhiyun 			return;
1858*4882a593Smuzhiyun 	}
1859*4882a593Smuzhiyun }
1860*4882a593Smuzhiyun 
dwc3_endpoint_interrupt(struct dwc3 * dwc,const struct dwc3_event_depevt * event)1861*4882a593Smuzhiyun static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1862*4882a593Smuzhiyun 		const struct dwc3_event_depevt *event)
1863*4882a593Smuzhiyun {
1864*4882a593Smuzhiyun 	struct dwc3_ep		*dep;
1865*4882a593Smuzhiyun 	u8			epnum = event->endpoint_number;
1866*4882a593Smuzhiyun 
1867*4882a593Smuzhiyun 	dep = dwc->eps[epnum];
1868*4882a593Smuzhiyun 
1869*4882a593Smuzhiyun 	if (!(dep->flags & DWC3_EP_ENABLED))
1870*4882a593Smuzhiyun 		return;
1871*4882a593Smuzhiyun 
1872*4882a593Smuzhiyun 	if (epnum == 0 || epnum == 1) {
1873*4882a593Smuzhiyun 		dwc3_ep0_interrupt(dwc, event);
1874*4882a593Smuzhiyun 		return;
1875*4882a593Smuzhiyun 	}
1876*4882a593Smuzhiyun 
1877*4882a593Smuzhiyun 	switch (event->endpoint_event) {
1878*4882a593Smuzhiyun 	case DWC3_DEPEVT_XFERCOMPLETE:
1879*4882a593Smuzhiyun 		dep->resource_index = 0;
1880*4882a593Smuzhiyun 
1881*4882a593Smuzhiyun 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1882*4882a593Smuzhiyun 			dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1883*4882a593Smuzhiyun 					dep->name);
1884*4882a593Smuzhiyun 			return;
1885*4882a593Smuzhiyun 		}
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun 		dwc3_endpoint_transfer_complete(dwc, dep, event);
1888*4882a593Smuzhiyun 		break;
1889*4882a593Smuzhiyun 	case DWC3_DEPEVT_XFERINPROGRESS:
1890*4882a593Smuzhiyun 		dwc3_endpoint_transfer_complete(dwc, dep, event);
1891*4882a593Smuzhiyun 		break;
1892*4882a593Smuzhiyun 	case DWC3_DEPEVT_XFERNOTREADY:
1893*4882a593Smuzhiyun 		if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1894*4882a593Smuzhiyun 			dwc3_gadget_start_isoc(dwc, dep, event);
1895*4882a593Smuzhiyun 		} else {
1896*4882a593Smuzhiyun 			int ret;
1897*4882a593Smuzhiyun 
1898*4882a593Smuzhiyun 			dev_vdbg(dwc->dev, "%s: reason %s\n",
1899*4882a593Smuzhiyun 					dep->name, event->status &
1900*4882a593Smuzhiyun 					DEPEVT_STATUS_TRANSFER_ACTIVE
1901*4882a593Smuzhiyun 					? "Transfer Active"
1902*4882a593Smuzhiyun 					: "Transfer Not Active");
1903*4882a593Smuzhiyun 
1904*4882a593Smuzhiyun 			ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1905*4882a593Smuzhiyun 			if (!ret || ret == -EBUSY)
1906*4882a593Smuzhiyun 				return;
1907*4882a593Smuzhiyun 
1908*4882a593Smuzhiyun 			dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1909*4882a593Smuzhiyun 					dep->name);
1910*4882a593Smuzhiyun 		}
1911*4882a593Smuzhiyun 
1912*4882a593Smuzhiyun 		break;
1913*4882a593Smuzhiyun 	case DWC3_DEPEVT_STREAMEVT:
1914*4882a593Smuzhiyun 		if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
1915*4882a593Smuzhiyun 			dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1916*4882a593Smuzhiyun 					dep->name);
1917*4882a593Smuzhiyun 			return;
1918*4882a593Smuzhiyun 		}
1919*4882a593Smuzhiyun 
1920*4882a593Smuzhiyun 		switch (event->status) {
1921*4882a593Smuzhiyun 		case DEPEVT_STREAMEVT_FOUND:
1922*4882a593Smuzhiyun 			dev_vdbg(dwc->dev, "Stream %d found and started\n",
1923*4882a593Smuzhiyun 					event->parameters);
1924*4882a593Smuzhiyun 
1925*4882a593Smuzhiyun 			break;
1926*4882a593Smuzhiyun 		case DEPEVT_STREAMEVT_NOTFOUND:
1927*4882a593Smuzhiyun 			/* FALLTHROUGH */
1928*4882a593Smuzhiyun 		default:
1929*4882a593Smuzhiyun 			dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
1930*4882a593Smuzhiyun 		}
1931*4882a593Smuzhiyun 		break;
1932*4882a593Smuzhiyun 	case DWC3_DEPEVT_RXTXFIFOEVT:
1933*4882a593Smuzhiyun 		dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
1934*4882a593Smuzhiyun 		break;
1935*4882a593Smuzhiyun 	case DWC3_DEPEVT_EPCMDCMPLT:
1936*4882a593Smuzhiyun 		dev_vdbg(dwc->dev, "Endpoint Command Complete\n");
1937*4882a593Smuzhiyun 		break;
1938*4882a593Smuzhiyun 	}
1939*4882a593Smuzhiyun }
1940*4882a593Smuzhiyun 
dwc3_disconnect_gadget(struct dwc3 * dwc)1941*4882a593Smuzhiyun static void dwc3_disconnect_gadget(struct dwc3 *dwc)
1942*4882a593Smuzhiyun {
1943*4882a593Smuzhiyun 	if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
1944*4882a593Smuzhiyun 		spin_unlock(&dwc->lock);
1945*4882a593Smuzhiyun 		dwc->gadget_driver->disconnect(&dwc->gadget);
1946*4882a593Smuzhiyun 		spin_lock(&dwc->lock);
1947*4882a593Smuzhiyun 	}
1948*4882a593Smuzhiyun }
1949*4882a593Smuzhiyun 
dwc3_suspend_gadget(struct dwc3 * dwc)1950*4882a593Smuzhiyun static void dwc3_suspend_gadget(struct dwc3 *dwc)
1951*4882a593Smuzhiyun {
1952*4882a593Smuzhiyun 	if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
1953*4882a593Smuzhiyun 		spin_unlock(&dwc->lock);
1954*4882a593Smuzhiyun 		dwc->gadget_driver->suspend(&dwc->gadget);
1955*4882a593Smuzhiyun 		spin_lock(&dwc->lock);
1956*4882a593Smuzhiyun 	}
1957*4882a593Smuzhiyun }
1958*4882a593Smuzhiyun 
dwc3_resume_gadget(struct dwc3 * dwc)1959*4882a593Smuzhiyun static void dwc3_resume_gadget(struct dwc3 *dwc)
1960*4882a593Smuzhiyun {
1961*4882a593Smuzhiyun 	if (dwc->gadget_driver && dwc->gadget_driver->resume) {
1962*4882a593Smuzhiyun 		spin_unlock(&dwc->lock);
1963*4882a593Smuzhiyun 		dwc->gadget_driver->resume(&dwc->gadget);
1964*4882a593Smuzhiyun 	}
1965*4882a593Smuzhiyun }
1966*4882a593Smuzhiyun 
dwc3_reset_gadget(struct dwc3 * dwc)1967*4882a593Smuzhiyun static void dwc3_reset_gadget(struct dwc3 *dwc)
1968*4882a593Smuzhiyun {
1969*4882a593Smuzhiyun 	if (!dwc->gadget_driver)
1970*4882a593Smuzhiyun 		return;
1971*4882a593Smuzhiyun 
1972*4882a593Smuzhiyun 	if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
1973*4882a593Smuzhiyun 		spin_unlock(&dwc->lock);
1974*4882a593Smuzhiyun 		usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
1975*4882a593Smuzhiyun 		spin_lock(&dwc->lock);
1976*4882a593Smuzhiyun 	}
1977*4882a593Smuzhiyun }
1978*4882a593Smuzhiyun 
dwc3_stop_active_transfer(struct dwc3 * dwc,u32 epnum,bool force)1979*4882a593Smuzhiyun static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
1980*4882a593Smuzhiyun {
1981*4882a593Smuzhiyun 	struct dwc3_ep *dep;
1982*4882a593Smuzhiyun 	struct dwc3_gadget_ep_cmd_params params;
1983*4882a593Smuzhiyun 	u32 cmd;
1984*4882a593Smuzhiyun 	int ret;
1985*4882a593Smuzhiyun 
1986*4882a593Smuzhiyun 	dep = dwc->eps[epnum];
1987*4882a593Smuzhiyun 
1988*4882a593Smuzhiyun 	if (!dep->resource_index)
1989*4882a593Smuzhiyun 		return;
1990*4882a593Smuzhiyun 
1991*4882a593Smuzhiyun 	/*
1992*4882a593Smuzhiyun 	 * NOTICE: We are violating what the Databook says about the
1993*4882a593Smuzhiyun 	 * EndTransfer command. Ideally we would _always_ wait for the
1994*4882a593Smuzhiyun 	 * EndTransfer Command Completion IRQ, but that's causing too
1995*4882a593Smuzhiyun 	 * much trouble synchronizing between us and gadget driver.
1996*4882a593Smuzhiyun 	 *
1997*4882a593Smuzhiyun 	 * We have discussed this with the IP Provider and it was
1998*4882a593Smuzhiyun 	 * suggested to giveback all requests here, but give HW some
1999*4882a593Smuzhiyun 	 * extra time to synchronize with the interconnect. We're using
2000*4882a593Smuzhiyun 	 * an arbitraty 100us delay for that.
2001*4882a593Smuzhiyun 	 *
2002*4882a593Smuzhiyun 	 * Note also that a similar handling was tested by Synopsys
2003*4882a593Smuzhiyun 	 * (thanks a lot Paul) and nothing bad has come out of it.
2004*4882a593Smuzhiyun 	 * In short, what we're doing is:
2005*4882a593Smuzhiyun 	 *
2006*4882a593Smuzhiyun 	 * - Issue EndTransfer WITH CMDIOC bit set
2007*4882a593Smuzhiyun 	 * - Wait 100us
2008*4882a593Smuzhiyun 	 */
2009*4882a593Smuzhiyun 
2010*4882a593Smuzhiyun 	cmd = DWC3_DEPCMD_ENDTRANSFER;
2011*4882a593Smuzhiyun 	cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
2012*4882a593Smuzhiyun 	cmd |= DWC3_DEPCMD_CMDIOC;
2013*4882a593Smuzhiyun 	cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
2014*4882a593Smuzhiyun 	memset(&params, 0, sizeof(params));
2015*4882a593Smuzhiyun 	ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
2016*4882a593Smuzhiyun 	WARN_ON_ONCE(ret);
2017*4882a593Smuzhiyun 	dep->resource_index = 0;
2018*4882a593Smuzhiyun 	dep->flags &= ~DWC3_EP_BUSY;
2019*4882a593Smuzhiyun 	udelay(100);
2020*4882a593Smuzhiyun }
2021*4882a593Smuzhiyun 
dwc3_stop_active_transfers(struct dwc3 * dwc)2022*4882a593Smuzhiyun static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2023*4882a593Smuzhiyun {
2024*4882a593Smuzhiyun 	u32 epnum;
2025*4882a593Smuzhiyun 
2026*4882a593Smuzhiyun 	for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2027*4882a593Smuzhiyun 		struct dwc3_ep *dep;
2028*4882a593Smuzhiyun 
2029*4882a593Smuzhiyun 		dep = dwc->eps[epnum];
2030*4882a593Smuzhiyun 		if (!dep)
2031*4882a593Smuzhiyun 			continue;
2032*4882a593Smuzhiyun 
2033*4882a593Smuzhiyun 		if (!(dep->flags & DWC3_EP_ENABLED))
2034*4882a593Smuzhiyun 			continue;
2035*4882a593Smuzhiyun 
2036*4882a593Smuzhiyun 		dwc3_remove_requests(dwc, dep);
2037*4882a593Smuzhiyun 	}
2038*4882a593Smuzhiyun }
2039*4882a593Smuzhiyun 
dwc3_clear_stall_all_ep(struct dwc3 * dwc)2040*4882a593Smuzhiyun static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2041*4882a593Smuzhiyun {
2042*4882a593Smuzhiyun 	u32 epnum;
2043*4882a593Smuzhiyun 
2044*4882a593Smuzhiyun 	for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2045*4882a593Smuzhiyun 		struct dwc3_ep *dep;
2046*4882a593Smuzhiyun 		struct dwc3_gadget_ep_cmd_params params;
2047*4882a593Smuzhiyun 		int ret;
2048*4882a593Smuzhiyun 
2049*4882a593Smuzhiyun 		dep = dwc->eps[epnum];
2050*4882a593Smuzhiyun 		if (!dep)
2051*4882a593Smuzhiyun 			continue;
2052*4882a593Smuzhiyun 
2053*4882a593Smuzhiyun 		if (!(dep->flags & DWC3_EP_STALL))
2054*4882a593Smuzhiyun 			continue;
2055*4882a593Smuzhiyun 
2056*4882a593Smuzhiyun 		dep->flags &= ~DWC3_EP_STALL;
2057*4882a593Smuzhiyun 
2058*4882a593Smuzhiyun 		memset(&params, 0, sizeof(params));
2059*4882a593Smuzhiyun 		ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2060*4882a593Smuzhiyun 				DWC3_DEPCMD_CLEARSTALL, &params);
2061*4882a593Smuzhiyun 		WARN_ON_ONCE(ret);
2062*4882a593Smuzhiyun 	}
2063*4882a593Smuzhiyun }
2064*4882a593Smuzhiyun 
dwc3_gadget_disconnect_interrupt(struct dwc3 * dwc)2065*4882a593Smuzhiyun static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2066*4882a593Smuzhiyun {
2067*4882a593Smuzhiyun 	int			reg;
2068*4882a593Smuzhiyun 
2069*4882a593Smuzhiyun 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2070*4882a593Smuzhiyun 	reg &= ~DWC3_DCTL_INITU1ENA;
2071*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2072*4882a593Smuzhiyun 
2073*4882a593Smuzhiyun 	reg &= ~DWC3_DCTL_INITU2ENA;
2074*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2075*4882a593Smuzhiyun 
2076*4882a593Smuzhiyun 	dwc3_disconnect_gadget(dwc);
2077*4882a593Smuzhiyun 	dwc->start_config_issued = false;
2078*4882a593Smuzhiyun 
2079*4882a593Smuzhiyun 	dwc->gadget.speed = USB_SPEED_UNKNOWN;
2080*4882a593Smuzhiyun 	dwc->setup_packet_pending = false;
2081*4882a593Smuzhiyun 	usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
2082*4882a593Smuzhiyun }
2083*4882a593Smuzhiyun 
dwc3_gadget_reset_interrupt(struct dwc3 * dwc)2084*4882a593Smuzhiyun static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2085*4882a593Smuzhiyun {
2086*4882a593Smuzhiyun 	u32			reg;
2087*4882a593Smuzhiyun 
2088*4882a593Smuzhiyun 	/*
2089*4882a593Smuzhiyun 	 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2090*4882a593Smuzhiyun 	 * would cause a missing Disconnect Event if there's a
2091*4882a593Smuzhiyun 	 * pending Setup Packet in the FIFO.
2092*4882a593Smuzhiyun 	 *
2093*4882a593Smuzhiyun 	 * There's no suggested workaround on the official Bug
2094*4882a593Smuzhiyun 	 * report, which states that "unless the driver/application
2095*4882a593Smuzhiyun 	 * is doing any special handling of a disconnect event,
2096*4882a593Smuzhiyun 	 * there is no functional issue".
2097*4882a593Smuzhiyun 	 *
2098*4882a593Smuzhiyun 	 * Unfortunately, it turns out that we _do_ some special
2099*4882a593Smuzhiyun 	 * handling of a disconnect event, namely complete all
2100*4882a593Smuzhiyun 	 * pending transfers, notify gadget driver of the
2101*4882a593Smuzhiyun 	 * disconnection, and so on.
2102*4882a593Smuzhiyun 	 *
2103*4882a593Smuzhiyun 	 * Our suggested workaround is to follow the Disconnect
2104*4882a593Smuzhiyun 	 * Event steps here, instead, based on a setup_packet_pending
2105*4882a593Smuzhiyun 	 * flag. Such flag gets set whenever we have a XferNotReady
2106*4882a593Smuzhiyun 	 * event on EP0 and gets cleared on XferComplete for the
2107*4882a593Smuzhiyun 	 * same endpoint.
2108*4882a593Smuzhiyun 	 *
2109*4882a593Smuzhiyun 	 * Refers to:
2110*4882a593Smuzhiyun 	 *
2111*4882a593Smuzhiyun 	 * STAR#9000466709: RTL: Device : Disconnect event not
2112*4882a593Smuzhiyun 	 * generated if setup packet pending in FIFO
2113*4882a593Smuzhiyun 	 */
2114*4882a593Smuzhiyun 	if (dwc->revision < DWC3_REVISION_188A) {
2115*4882a593Smuzhiyun 		if (dwc->setup_packet_pending)
2116*4882a593Smuzhiyun 			dwc3_gadget_disconnect_interrupt(dwc);
2117*4882a593Smuzhiyun 	}
2118*4882a593Smuzhiyun 
2119*4882a593Smuzhiyun 	dwc3_reset_gadget(dwc);
2120*4882a593Smuzhiyun 
2121*4882a593Smuzhiyun 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2122*4882a593Smuzhiyun 	reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2123*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2124*4882a593Smuzhiyun 	dwc->test_mode = false;
2125*4882a593Smuzhiyun 
2126*4882a593Smuzhiyun 	dwc3_stop_active_transfers(dwc);
2127*4882a593Smuzhiyun 	dwc3_clear_stall_all_ep(dwc);
2128*4882a593Smuzhiyun 	dwc->start_config_issued = false;
2129*4882a593Smuzhiyun 
2130*4882a593Smuzhiyun 	/* Reset device address to zero */
2131*4882a593Smuzhiyun 	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2132*4882a593Smuzhiyun 	reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2133*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2134*4882a593Smuzhiyun }
2135*4882a593Smuzhiyun 
dwc3_update_ram_clk_sel(struct dwc3 * dwc,u32 speed)2136*4882a593Smuzhiyun static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2137*4882a593Smuzhiyun {
2138*4882a593Smuzhiyun 	u32 reg;
2139*4882a593Smuzhiyun 	u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2140*4882a593Smuzhiyun 
2141*4882a593Smuzhiyun 	/*
2142*4882a593Smuzhiyun 	 * We change the clock only at SS but I dunno why I would want to do
2143*4882a593Smuzhiyun 	 * this. Maybe it becomes part of the power saving plan.
2144*4882a593Smuzhiyun 	 */
2145*4882a593Smuzhiyun 
2146*4882a593Smuzhiyun 	if (speed != DWC3_DSTS_SUPERSPEED)
2147*4882a593Smuzhiyun 		return;
2148*4882a593Smuzhiyun 
2149*4882a593Smuzhiyun 	/*
2150*4882a593Smuzhiyun 	 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2151*4882a593Smuzhiyun 	 * each time on Connect Done.
2152*4882a593Smuzhiyun 	 */
2153*4882a593Smuzhiyun 	if (!usb30_clock)
2154*4882a593Smuzhiyun 		return;
2155*4882a593Smuzhiyun 
2156*4882a593Smuzhiyun 	reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2157*4882a593Smuzhiyun 	reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2158*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2159*4882a593Smuzhiyun }
2160*4882a593Smuzhiyun 
dwc3_gadget_conndone_interrupt(struct dwc3 * dwc)2161*4882a593Smuzhiyun static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2162*4882a593Smuzhiyun {
2163*4882a593Smuzhiyun 	struct dwc3_ep		*dep;
2164*4882a593Smuzhiyun 	int			ret;
2165*4882a593Smuzhiyun 	u32			reg;
2166*4882a593Smuzhiyun 	u8			speed;
2167*4882a593Smuzhiyun 
2168*4882a593Smuzhiyun 	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2169*4882a593Smuzhiyun 	speed = reg & DWC3_DSTS_CONNECTSPD;
2170*4882a593Smuzhiyun 	dwc->speed = speed;
2171*4882a593Smuzhiyun 
2172*4882a593Smuzhiyun 	dwc3_update_ram_clk_sel(dwc, speed);
2173*4882a593Smuzhiyun 
2174*4882a593Smuzhiyun 	switch (speed) {
2175*4882a593Smuzhiyun 	case DWC3_DCFG_SUPERSPEED:
2176*4882a593Smuzhiyun 		/*
2177*4882a593Smuzhiyun 		 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2178*4882a593Smuzhiyun 		 * would cause a missing USB3 Reset event.
2179*4882a593Smuzhiyun 		 *
2180*4882a593Smuzhiyun 		 * In such situations, we should force a USB3 Reset
2181*4882a593Smuzhiyun 		 * event by calling our dwc3_gadget_reset_interrupt()
2182*4882a593Smuzhiyun 		 * routine.
2183*4882a593Smuzhiyun 		 *
2184*4882a593Smuzhiyun 		 * Refers to:
2185*4882a593Smuzhiyun 		 *
2186*4882a593Smuzhiyun 		 * STAR#9000483510: RTL: SS : USB3 reset event may
2187*4882a593Smuzhiyun 		 * not be generated always when the link enters poll
2188*4882a593Smuzhiyun 		 */
2189*4882a593Smuzhiyun 		if (dwc->revision < DWC3_REVISION_190A)
2190*4882a593Smuzhiyun 			dwc3_gadget_reset_interrupt(dwc);
2191*4882a593Smuzhiyun 
2192*4882a593Smuzhiyun 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2193*4882a593Smuzhiyun 		dwc->gadget.ep0->maxpacket = 512;
2194*4882a593Smuzhiyun 		dwc->gadget.speed = USB_SPEED_SUPER;
2195*4882a593Smuzhiyun 		break;
2196*4882a593Smuzhiyun 	case DWC3_DCFG_HIGHSPEED:
2197*4882a593Smuzhiyun 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2198*4882a593Smuzhiyun 		dwc->gadget.ep0->maxpacket = 64;
2199*4882a593Smuzhiyun 		dwc->gadget.speed = USB_SPEED_HIGH;
2200*4882a593Smuzhiyun 		break;
2201*4882a593Smuzhiyun 	case DWC3_DCFG_FULLSPEED2:
2202*4882a593Smuzhiyun 	case DWC3_DCFG_FULLSPEED1:
2203*4882a593Smuzhiyun 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2204*4882a593Smuzhiyun 		dwc->gadget.ep0->maxpacket = 64;
2205*4882a593Smuzhiyun 		dwc->gadget.speed = USB_SPEED_FULL;
2206*4882a593Smuzhiyun 		break;
2207*4882a593Smuzhiyun 	case DWC3_DCFG_LOWSPEED:
2208*4882a593Smuzhiyun 		dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2209*4882a593Smuzhiyun 		dwc->gadget.ep0->maxpacket = 8;
2210*4882a593Smuzhiyun 		dwc->gadget.speed = USB_SPEED_LOW;
2211*4882a593Smuzhiyun 		break;
2212*4882a593Smuzhiyun 	}
2213*4882a593Smuzhiyun 
2214*4882a593Smuzhiyun 	/* Enable USB2 LPM Capability */
2215*4882a593Smuzhiyun 
2216*4882a593Smuzhiyun 	if ((dwc->revision > DWC3_REVISION_194A)
2217*4882a593Smuzhiyun 			&& (speed != DWC3_DCFG_SUPERSPEED)) {
2218*4882a593Smuzhiyun 		reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2219*4882a593Smuzhiyun 		reg |= DWC3_DCFG_LPM_CAP;
2220*4882a593Smuzhiyun 		dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2221*4882a593Smuzhiyun 
2222*4882a593Smuzhiyun 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2223*4882a593Smuzhiyun 		reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2224*4882a593Smuzhiyun 
2225*4882a593Smuzhiyun 		reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
2226*4882a593Smuzhiyun 
2227*4882a593Smuzhiyun 		/*
2228*4882a593Smuzhiyun 		 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2229*4882a593Smuzhiyun 		 * DCFG.LPMCap is set, core responses with an ACK and the
2230*4882a593Smuzhiyun 		 * BESL value in the LPM token is less than or equal to LPM
2231*4882a593Smuzhiyun 		 * NYET threshold.
2232*4882a593Smuzhiyun 		 */
2233*4882a593Smuzhiyun 		if (dwc->revision < DWC3_REVISION_240A 	&& dwc->has_lpm_erratum)
2234*4882a593Smuzhiyun 			WARN(true, "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
2235*4882a593Smuzhiyun 
2236*4882a593Smuzhiyun 		if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
2237*4882a593Smuzhiyun 			reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
2238*4882a593Smuzhiyun 
2239*4882a593Smuzhiyun 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2240*4882a593Smuzhiyun 	} else {
2241*4882a593Smuzhiyun 		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2242*4882a593Smuzhiyun 		reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2243*4882a593Smuzhiyun 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2244*4882a593Smuzhiyun 	}
2245*4882a593Smuzhiyun 
2246*4882a593Smuzhiyun 	dep = dwc->eps[0];
2247*4882a593Smuzhiyun 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2248*4882a593Smuzhiyun 			false);
2249*4882a593Smuzhiyun 	if (ret) {
2250*4882a593Smuzhiyun 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2251*4882a593Smuzhiyun 		return;
2252*4882a593Smuzhiyun 	}
2253*4882a593Smuzhiyun 
2254*4882a593Smuzhiyun 	dep = dwc->eps[1];
2255*4882a593Smuzhiyun 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2256*4882a593Smuzhiyun 			false);
2257*4882a593Smuzhiyun 	if (ret) {
2258*4882a593Smuzhiyun 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2259*4882a593Smuzhiyun 		return;
2260*4882a593Smuzhiyun 	}
2261*4882a593Smuzhiyun 
2262*4882a593Smuzhiyun 	/*
2263*4882a593Smuzhiyun 	 * Configure PHY via GUSB3PIPECTLn if required.
2264*4882a593Smuzhiyun 	 *
2265*4882a593Smuzhiyun 	 * Update GTXFIFOSIZn
2266*4882a593Smuzhiyun 	 *
2267*4882a593Smuzhiyun 	 * In both cases reset values should be sufficient.
2268*4882a593Smuzhiyun 	 */
2269*4882a593Smuzhiyun }
2270*4882a593Smuzhiyun 
dwc3_gadget_wakeup_interrupt(struct dwc3 * dwc)2271*4882a593Smuzhiyun static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2272*4882a593Smuzhiyun {
2273*4882a593Smuzhiyun 	/*
2274*4882a593Smuzhiyun 	 * TODO take core out of low power mode when that's
2275*4882a593Smuzhiyun 	 * implemented.
2276*4882a593Smuzhiyun 	 */
2277*4882a593Smuzhiyun 
2278*4882a593Smuzhiyun 	dwc->gadget_driver->resume(&dwc->gadget);
2279*4882a593Smuzhiyun }
2280*4882a593Smuzhiyun 
dwc3_gadget_linksts_change_interrupt(struct dwc3 * dwc,unsigned int evtinfo)2281*4882a593Smuzhiyun static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2282*4882a593Smuzhiyun 		unsigned int evtinfo)
2283*4882a593Smuzhiyun {
2284*4882a593Smuzhiyun 	enum dwc3_link_state	next = evtinfo & DWC3_LINK_STATE_MASK;
2285*4882a593Smuzhiyun 	unsigned int		pwropt;
2286*4882a593Smuzhiyun 
2287*4882a593Smuzhiyun 	/*
2288*4882a593Smuzhiyun 	 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2289*4882a593Smuzhiyun 	 * Hibernation mode enabled which would show up when device detects
2290*4882a593Smuzhiyun 	 * host-initiated U3 exit.
2291*4882a593Smuzhiyun 	 *
2292*4882a593Smuzhiyun 	 * In that case, device will generate a Link State Change Interrupt
2293*4882a593Smuzhiyun 	 * from U3 to RESUME which is only necessary if Hibernation is
2294*4882a593Smuzhiyun 	 * configured in.
2295*4882a593Smuzhiyun 	 *
2296*4882a593Smuzhiyun 	 * There are no functional changes due to such spurious event and we
2297*4882a593Smuzhiyun 	 * just need to ignore it.
2298*4882a593Smuzhiyun 	 *
2299*4882a593Smuzhiyun 	 * Refers to:
2300*4882a593Smuzhiyun 	 *
2301*4882a593Smuzhiyun 	 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2302*4882a593Smuzhiyun 	 * operational mode
2303*4882a593Smuzhiyun 	 */
2304*4882a593Smuzhiyun 	pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2305*4882a593Smuzhiyun 	if ((dwc->revision < DWC3_REVISION_250A) &&
2306*4882a593Smuzhiyun 			(pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2307*4882a593Smuzhiyun 		if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2308*4882a593Smuzhiyun 				(next == DWC3_LINK_STATE_RESUME)) {
2309*4882a593Smuzhiyun 			dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n");
2310*4882a593Smuzhiyun 			return;
2311*4882a593Smuzhiyun 		}
2312*4882a593Smuzhiyun 	}
2313*4882a593Smuzhiyun 
2314*4882a593Smuzhiyun 	/*
2315*4882a593Smuzhiyun 	 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2316*4882a593Smuzhiyun 	 * on the link partner, the USB session might do multiple entry/exit
2317*4882a593Smuzhiyun 	 * of low power states before a transfer takes place.
2318*4882a593Smuzhiyun 	 *
2319*4882a593Smuzhiyun 	 * Due to this problem, we might experience lower throughput. The
2320*4882a593Smuzhiyun 	 * suggested workaround is to disable DCTL[12:9] bits if we're
2321*4882a593Smuzhiyun 	 * transitioning from U1/U2 to U0 and enable those bits again
2322*4882a593Smuzhiyun 	 * after a transfer completes and there are no pending transfers
2323*4882a593Smuzhiyun 	 * on any of the enabled endpoints.
2324*4882a593Smuzhiyun 	 *
2325*4882a593Smuzhiyun 	 * This is the first half of that workaround.
2326*4882a593Smuzhiyun 	 *
2327*4882a593Smuzhiyun 	 * Refers to:
2328*4882a593Smuzhiyun 	 *
2329*4882a593Smuzhiyun 	 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2330*4882a593Smuzhiyun 	 * core send LGO_Ux entering U0
2331*4882a593Smuzhiyun 	 */
2332*4882a593Smuzhiyun 	if (dwc->revision < DWC3_REVISION_183A) {
2333*4882a593Smuzhiyun 		if (next == DWC3_LINK_STATE_U0) {
2334*4882a593Smuzhiyun 			u32	u1u2;
2335*4882a593Smuzhiyun 			u32	reg;
2336*4882a593Smuzhiyun 
2337*4882a593Smuzhiyun 			switch (dwc->link_state) {
2338*4882a593Smuzhiyun 			case DWC3_LINK_STATE_U1:
2339*4882a593Smuzhiyun 			case DWC3_LINK_STATE_U2:
2340*4882a593Smuzhiyun 				reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2341*4882a593Smuzhiyun 				u1u2 = reg & (DWC3_DCTL_INITU2ENA
2342*4882a593Smuzhiyun 						| DWC3_DCTL_ACCEPTU2ENA
2343*4882a593Smuzhiyun 						| DWC3_DCTL_INITU1ENA
2344*4882a593Smuzhiyun 						| DWC3_DCTL_ACCEPTU1ENA);
2345*4882a593Smuzhiyun 
2346*4882a593Smuzhiyun 				if (!dwc->u1u2)
2347*4882a593Smuzhiyun 					dwc->u1u2 = reg & u1u2;
2348*4882a593Smuzhiyun 
2349*4882a593Smuzhiyun 				reg &= ~u1u2;
2350*4882a593Smuzhiyun 
2351*4882a593Smuzhiyun 				dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2352*4882a593Smuzhiyun 				break;
2353*4882a593Smuzhiyun 			default:
2354*4882a593Smuzhiyun 				/* do nothing */
2355*4882a593Smuzhiyun 				break;
2356*4882a593Smuzhiyun 			}
2357*4882a593Smuzhiyun 		}
2358*4882a593Smuzhiyun 	}
2359*4882a593Smuzhiyun 
2360*4882a593Smuzhiyun 	switch (next) {
2361*4882a593Smuzhiyun 	case DWC3_LINK_STATE_U1:
2362*4882a593Smuzhiyun 		if (dwc->speed == USB_SPEED_SUPER)
2363*4882a593Smuzhiyun 			dwc3_suspend_gadget(dwc);
2364*4882a593Smuzhiyun 		break;
2365*4882a593Smuzhiyun 	case DWC3_LINK_STATE_U2:
2366*4882a593Smuzhiyun 	case DWC3_LINK_STATE_U3:
2367*4882a593Smuzhiyun 		dwc3_suspend_gadget(dwc);
2368*4882a593Smuzhiyun 		break;
2369*4882a593Smuzhiyun 	case DWC3_LINK_STATE_RESUME:
2370*4882a593Smuzhiyun 		dwc3_resume_gadget(dwc);
2371*4882a593Smuzhiyun 		break;
2372*4882a593Smuzhiyun 	default:
2373*4882a593Smuzhiyun 		/* do nothing */
2374*4882a593Smuzhiyun 		break;
2375*4882a593Smuzhiyun 	}
2376*4882a593Smuzhiyun 
2377*4882a593Smuzhiyun 	dwc->link_state = next;
2378*4882a593Smuzhiyun }
2379*4882a593Smuzhiyun 
dwc3_gadget_hibernation_interrupt(struct dwc3 * dwc,unsigned int evtinfo)2380*4882a593Smuzhiyun static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2381*4882a593Smuzhiyun 		unsigned int evtinfo)
2382*4882a593Smuzhiyun {
2383*4882a593Smuzhiyun 	unsigned int is_ss = evtinfo & (1UL << 4);
2384*4882a593Smuzhiyun 
2385*4882a593Smuzhiyun 	/**
2386*4882a593Smuzhiyun 	 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2387*4882a593Smuzhiyun 	 * have a known issue which can cause USB CV TD.9.23 to fail
2388*4882a593Smuzhiyun 	 * randomly.
2389*4882a593Smuzhiyun 	 *
2390*4882a593Smuzhiyun 	 * Because of this issue, core could generate bogus hibernation
2391*4882a593Smuzhiyun 	 * events which SW needs to ignore.
2392*4882a593Smuzhiyun 	 *
2393*4882a593Smuzhiyun 	 * Refers to:
2394*4882a593Smuzhiyun 	 *
2395*4882a593Smuzhiyun 	 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2396*4882a593Smuzhiyun 	 * Device Fallback from SuperSpeed
2397*4882a593Smuzhiyun 	 */
2398*4882a593Smuzhiyun 	if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2399*4882a593Smuzhiyun 		return;
2400*4882a593Smuzhiyun 
2401*4882a593Smuzhiyun 	/* enter hibernation here */
2402*4882a593Smuzhiyun }
2403*4882a593Smuzhiyun 
dwc3_gadget_interrupt(struct dwc3 * dwc,const struct dwc3_event_devt * event)2404*4882a593Smuzhiyun static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2405*4882a593Smuzhiyun 		const struct dwc3_event_devt *event)
2406*4882a593Smuzhiyun {
2407*4882a593Smuzhiyun 	switch (event->type) {
2408*4882a593Smuzhiyun 	case DWC3_DEVICE_EVENT_DISCONNECT:
2409*4882a593Smuzhiyun 		dwc3_gadget_disconnect_interrupt(dwc);
2410*4882a593Smuzhiyun 		break;
2411*4882a593Smuzhiyun 	case DWC3_DEVICE_EVENT_RESET:
2412*4882a593Smuzhiyun 		dwc3_gadget_reset_interrupt(dwc);
2413*4882a593Smuzhiyun 		break;
2414*4882a593Smuzhiyun 	case DWC3_DEVICE_EVENT_CONNECT_DONE:
2415*4882a593Smuzhiyun 		dwc3_gadget_conndone_interrupt(dwc);
2416*4882a593Smuzhiyun 		break;
2417*4882a593Smuzhiyun 	case DWC3_DEVICE_EVENT_WAKEUP:
2418*4882a593Smuzhiyun 		dwc3_gadget_wakeup_interrupt(dwc);
2419*4882a593Smuzhiyun 		break;
2420*4882a593Smuzhiyun 	case DWC3_DEVICE_EVENT_HIBER_REQ:
2421*4882a593Smuzhiyun 		if (!dwc->has_hibernation) {
2422*4882a593Smuzhiyun 			WARN(1 ,"unexpected hibernation event\n");
2423*4882a593Smuzhiyun 			break;
2424*4882a593Smuzhiyun 		}
2425*4882a593Smuzhiyun 		dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2426*4882a593Smuzhiyun 		break;
2427*4882a593Smuzhiyun 	case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2428*4882a593Smuzhiyun 		dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2429*4882a593Smuzhiyun 		break;
2430*4882a593Smuzhiyun 	case DWC3_DEVICE_EVENT_EOPF:
2431*4882a593Smuzhiyun 		dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2432*4882a593Smuzhiyun 		break;
2433*4882a593Smuzhiyun 	case DWC3_DEVICE_EVENT_SOF:
2434*4882a593Smuzhiyun 		dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2435*4882a593Smuzhiyun 		break;
2436*4882a593Smuzhiyun 	case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2437*4882a593Smuzhiyun 		dev_vdbg(dwc->dev, "Erratic Error\n");
2438*4882a593Smuzhiyun 		break;
2439*4882a593Smuzhiyun 	case DWC3_DEVICE_EVENT_CMD_CMPL:
2440*4882a593Smuzhiyun 		dev_vdbg(dwc->dev, "Command Complete\n");
2441*4882a593Smuzhiyun 		break;
2442*4882a593Smuzhiyun 	case DWC3_DEVICE_EVENT_OVERFLOW:
2443*4882a593Smuzhiyun 		dev_vdbg(dwc->dev, "Overflow\n");
2444*4882a593Smuzhiyun 		break;
2445*4882a593Smuzhiyun 	default:
2446*4882a593Smuzhiyun 		dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2447*4882a593Smuzhiyun 	}
2448*4882a593Smuzhiyun }
2449*4882a593Smuzhiyun 
dwc3_process_event_entry(struct dwc3 * dwc,const union dwc3_event * event)2450*4882a593Smuzhiyun static void dwc3_process_event_entry(struct dwc3 *dwc,
2451*4882a593Smuzhiyun 		const union dwc3_event *event)
2452*4882a593Smuzhiyun {
2453*4882a593Smuzhiyun 	/* Endpoint IRQ, handle it and return early */
2454*4882a593Smuzhiyun 	if (event->type.is_devspec == 0) {
2455*4882a593Smuzhiyun 		/* depevt */
2456*4882a593Smuzhiyun 		return dwc3_endpoint_interrupt(dwc, &event->depevt);
2457*4882a593Smuzhiyun 	}
2458*4882a593Smuzhiyun 
2459*4882a593Smuzhiyun 	switch (event->type.type) {
2460*4882a593Smuzhiyun 	case DWC3_EVENT_TYPE_DEV:
2461*4882a593Smuzhiyun 		dwc3_gadget_interrupt(dwc, &event->devt);
2462*4882a593Smuzhiyun 		break;
2463*4882a593Smuzhiyun 	/* REVISIT what to do with Carkit and I2C events ? */
2464*4882a593Smuzhiyun 	default:
2465*4882a593Smuzhiyun 		dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2466*4882a593Smuzhiyun 	}
2467*4882a593Smuzhiyun }
2468*4882a593Smuzhiyun 
dwc3_process_event_buf(struct dwc3 * dwc,u32 buf)2469*4882a593Smuzhiyun static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2470*4882a593Smuzhiyun {
2471*4882a593Smuzhiyun 	struct dwc3_event_buffer *evt;
2472*4882a593Smuzhiyun 	irqreturn_t ret = IRQ_NONE;
2473*4882a593Smuzhiyun 	int left;
2474*4882a593Smuzhiyun 	u32 reg;
2475*4882a593Smuzhiyun 
2476*4882a593Smuzhiyun 	evt = dwc->ev_buffs[buf];
2477*4882a593Smuzhiyun 	left = evt->count;
2478*4882a593Smuzhiyun 
2479*4882a593Smuzhiyun 	if (!(evt->flags & DWC3_EVENT_PENDING))
2480*4882a593Smuzhiyun 		return IRQ_NONE;
2481*4882a593Smuzhiyun 
2482*4882a593Smuzhiyun 	while (left > 0) {
2483*4882a593Smuzhiyun 		union dwc3_event event;
2484*4882a593Smuzhiyun 
2485*4882a593Smuzhiyun 		event.raw = *(u32 *) (evt->buf + evt->lpos);
2486*4882a593Smuzhiyun 
2487*4882a593Smuzhiyun 		dwc3_process_event_entry(dwc, &event);
2488*4882a593Smuzhiyun 
2489*4882a593Smuzhiyun 		/*
2490*4882a593Smuzhiyun 		 * FIXME we wrap around correctly to the next entry as
2491*4882a593Smuzhiyun 		 * almost all entries are 4 bytes in size. There is one
2492*4882a593Smuzhiyun 		 * entry which has 12 bytes which is a regular entry
2493*4882a593Smuzhiyun 		 * followed by 8 bytes data. ATM I don't know how
2494*4882a593Smuzhiyun 		 * things are organized if we get next to the a
2495*4882a593Smuzhiyun 		 * boundary so I worry about that once we try to handle
2496*4882a593Smuzhiyun 		 * that.
2497*4882a593Smuzhiyun 		 */
2498*4882a593Smuzhiyun 		evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2499*4882a593Smuzhiyun 		left -= 4;
2500*4882a593Smuzhiyun 
2501*4882a593Smuzhiyun 		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2502*4882a593Smuzhiyun 	}
2503*4882a593Smuzhiyun 
2504*4882a593Smuzhiyun 	evt->count = 0;
2505*4882a593Smuzhiyun 	evt->flags &= ~DWC3_EVENT_PENDING;
2506*4882a593Smuzhiyun 	ret = IRQ_HANDLED;
2507*4882a593Smuzhiyun 
2508*4882a593Smuzhiyun 	/* Unmask interrupt */
2509*4882a593Smuzhiyun 	reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2510*4882a593Smuzhiyun 	reg &= ~DWC3_GEVNTSIZ_INTMASK;
2511*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2512*4882a593Smuzhiyun 
2513*4882a593Smuzhiyun 	return ret;
2514*4882a593Smuzhiyun }
2515*4882a593Smuzhiyun 
dwc3_thread_interrupt(int irq,void * _dwc)2516*4882a593Smuzhiyun static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
2517*4882a593Smuzhiyun {
2518*4882a593Smuzhiyun 	struct dwc3 *dwc = _dwc;
2519*4882a593Smuzhiyun 	unsigned long flags = 0;
2520*4882a593Smuzhiyun 	irqreturn_t ret = IRQ_NONE;
2521*4882a593Smuzhiyun 	int i;
2522*4882a593Smuzhiyun 
2523*4882a593Smuzhiyun 	spin_lock_irqsave(&dwc->lock, flags);
2524*4882a593Smuzhiyun 
2525*4882a593Smuzhiyun 	for (i = 0; i < dwc->num_event_buffers; i++)
2526*4882a593Smuzhiyun 		ret |= dwc3_process_event_buf(dwc, i);
2527*4882a593Smuzhiyun 
2528*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dwc->lock, flags);
2529*4882a593Smuzhiyun 
2530*4882a593Smuzhiyun 	return ret;
2531*4882a593Smuzhiyun }
2532*4882a593Smuzhiyun 
dwc3_check_event_buf(struct dwc3 * dwc,u32 buf)2533*4882a593Smuzhiyun static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
2534*4882a593Smuzhiyun {
2535*4882a593Smuzhiyun 	struct dwc3_event_buffer *evt;
2536*4882a593Smuzhiyun 	u32 count;
2537*4882a593Smuzhiyun 	u32 reg;
2538*4882a593Smuzhiyun 
2539*4882a593Smuzhiyun 	evt = dwc->ev_buffs[buf];
2540*4882a593Smuzhiyun 	dwc3_invalidate_cache((uintptr_t)evt->buf, evt->length);
2541*4882a593Smuzhiyun 
2542*4882a593Smuzhiyun 	count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2543*4882a593Smuzhiyun 	count &= DWC3_GEVNTCOUNT_MASK;
2544*4882a593Smuzhiyun 	if (!count)
2545*4882a593Smuzhiyun 		return IRQ_NONE;
2546*4882a593Smuzhiyun 
2547*4882a593Smuzhiyun 	evt->count = count;
2548*4882a593Smuzhiyun 	evt->flags |= DWC3_EVENT_PENDING;
2549*4882a593Smuzhiyun 
2550*4882a593Smuzhiyun 	/* Mask interrupt */
2551*4882a593Smuzhiyun 	reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2552*4882a593Smuzhiyun 	reg |= DWC3_GEVNTSIZ_INTMASK;
2553*4882a593Smuzhiyun 	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2554*4882a593Smuzhiyun 
2555*4882a593Smuzhiyun 	return IRQ_WAKE_THREAD;
2556*4882a593Smuzhiyun }
2557*4882a593Smuzhiyun 
dwc3_interrupt(int irq,void * _dwc)2558*4882a593Smuzhiyun static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2559*4882a593Smuzhiyun {
2560*4882a593Smuzhiyun 	struct dwc3			*dwc = _dwc;
2561*4882a593Smuzhiyun 	int				i;
2562*4882a593Smuzhiyun 	irqreturn_t			ret = IRQ_NONE;
2563*4882a593Smuzhiyun 
2564*4882a593Smuzhiyun 	spin_lock(&dwc->lock);
2565*4882a593Smuzhiyun 
2566*4882a593Smuzhiyun 	for (i = 0; i < dwc->num_event_buffers; i++) {
2567*4882a593Smuzhiyun 		irqreturn_t status;
2568*4882a593Smuzhiyun 
2569*4882a593Smuzhiyun 		status = dwc3_check_event_buf(dwc, i);
2570*4882a593Smuzhiyun 		if (status == IRQ_WAKE_THREAD)
2571*4882a593Smuzhiyun 			ret = status;
2572*4882a593Smuzhiyun 	}
2573*4882a593Smuzhiyun 
2574*4882a593Smuzhiyun 	spin_unlock(&dwc->lock);
2575*4882a593Smuzhiyun 
2576*4882a593Smuzhiyun 	return ret;
2577*4882a593Smuzhiyun }
2578*4882a593Smuzhiyun 
2579*4882a593Smuzhiyun /**
2580*4882a593Smuzhiyun  * dwc3_gadget_init - Initializes gadget related registers
2581*4882a593Smuzhiyun  * @dwc: pointer to our controller context structure
2582*4882a593Smuzhiyun  *
2583*4882a593Smuzhiyun  * Returns 0 on success otherwise negative errno.
2584*4882a593Smuzhiyun  */
dwc3_gadget_init(struct dwc3 * dwc)2585*4882a593Smuzhiyun int dwc3_gadget_init(struct dwc3 *dwc)
2586*4882a593Smuzhiyun {
2587*4882a593Smuzhiyun 	int					ret;
2588*4882a593Smuzhiyun 
2589*4882a593Smuzhiyun 	dwc->ctrl_req = dma_alloc_coherent(sizeof(*dwc->ctrl_req),
2590*4882a593Smuzhiyun 					(unsigned long *)&dwc->ctrl_req_addr);
2591*4882a593Smuzhiyun 	if (!dwc->ctrl_req) {
2592*4882a593Smuzhiyun 		dev_err(dwc->dev, "failed to allocate ctrl request\n");
2593*4882a593Smuzhiyun 		ret = -ENOMEM;
2594*4882a593Smuzhiyun 		goto err0;
2595*4882a593Smuzhiyun 	}
2596*4882a593Smuzhiyun 
2597*4882a593Smuzhiyun 	dwc->ep0_trb = dma_alloc_coherent(sizeof(*dwc->ep0_trb) * 2,
2598*4882a593Smuzhiyun 					  (unsigned long *)&dwc->ep0_trb_addr);
2599*4882a593Smuzhiyun 	if (!dwc->ep0_trb) {
2600*4882a593Smuzhiyun 		dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2601*4882a593Smuzhiyun 		ret = -ENOMEM;
2602*4882a593Smuzhiyun 		goto err1;
2603*4882a593Smuzhiyun 	}
2604*4882a593Smuzhiyun 
2605*4882a593Smuzhiyun 	dwc->setup_buf = memalign(CONFIG_SYS_CACHELINE_SIZE,
2606*4882a593Smuzhiyun 				  DWC3_EP0_BOUNCE_SIZE);
2607*4882a593Smuzhiyun 	if (!dwc->setup_buf) {
2608*4882a593Smuzhiyun 		ret = -ENOMEM;
2609*4882a593Smuzhiyun 		goto err2;
2610*4882a593Smuzhiyun 	}
2611*4882a593Smuzhiyun 
2612*4882a593Smuzhiyun 	dwc->ep0_bounce = dma_alloc_coherent(DWC3_EP0_BOUNCE_SIZE,
2613*4882a593Smuzhiyun 					(unsigned long *)&dwc->ep0_bounce_addr);
2614*4882a593Smuzhiyun 	if (!dwc->ep0_bounce) {
2615*4882a593Smuzhiyun 		dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2616*4882a593Smuzhiyun 		ret = -ENOMEM;
2617*4882a593Smuzhiyun 		goto err3;
2618*4882a593Smuzhiyun 	}
2619*4882a593Smuzhiyun 
2620*4882a593Smuzhiyun 	dwc->gadget.ops			= &dwc3_gadget_ops;
2621*4882a593Smuzhiyun 	dwc->gadget.max_speed		= dwc->maximum_speed;
2622*4882a593Smuzhiyun 	dwc->gadget.speed		= USB_SPEED_UNKNOWN;
2623*4882a593Smuzhiyun 	dwc->gadget.name		= "dwc3-gadget";
2624*4882a593Smuzhiyun 
2625*4882a593Smuzhiyun 	/*
2626*4882a593Smuzhiyun 	 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2627*4882a593Smuzhiyun 	 * on ep out.
2628*4882a593Smuzhiyun 	 */
2629*4882a593Smuzhiyun 	dwc->gadget.quirk_ep_out_aligned_size = true;
2630*4882a593Smuzhiyun 
2631*4882a593Smuzhiyun 	/*
2632*4882a593Smuzhiyun 	 * REVISIT: Here we should clear all pending IRQs to be
2633*4882a593Smuzhiyun 	 * sure we're starting from a well known location.
2634*4882a593Smuzhiyun 	 */
2635*4882a593Smuzhiyun 
2636*4882a593Smuzhiyun 	ret = dwc3_gadget_init_endpoints(dwc);
2637*4882a593Smuzhiyun 	if (ret)
2638*4882a593Smuzhiyun 		goto err4;
2639*4882a593Smuzhiyun 
2640*4882a593Smuzhiyun 	ret = usb_add_gadget_udc((struct device *)dwc->dev, &dwc->gadget);
2641*4882a593Smuzhiyun 	if (ret) {
2642*4882a593Smuzhiyun 		dev_err(dwc->dev, "failed to register udc\n");
2643*4882a593Smuzhiyun 		goto err4;
2644*4882a593Smuzhiyun 	}
2645*4882a593Smuzhiyun 
2646*4882a593Smuzhiyun 	return 0;
2647*4882a593Smuzhiyun 
2648*4882a593Smuzhiyun err4:
2649*4882a593Smuzhiyun 	dwc3_gadget_free_endpoints(dwc);
2650*4882a593Smuzhiyun 	dma_free_coherent(dwc->ep0_bounce);
2651*4882a593Smuzhiyun 
2652*4882a593Smuzhiyun err3:
2653*4882a593Smuzhiyun 	kfree(dwc->setup_buf);
2654*4882a593Smuzhiyun 
2655*4882a593Smuzhiyun err2:
2656*4882a593Smuzhiyun 	dma_free_coherent(dwc->ep0_trb);
2657*4882a593Smuzhiyun 
2658*4882a593Smuzhiyun err1:
2659*4882a593Smuzhiyun 	dma_free_coherent(dwc->ctrl_req);
2660*4882a593Smuzhiyun 
2661*4882a593Smuzhiyun err0:
2662*4882a593Smuzhiyun 	return ret;
2663*4882a593Smuzhiyun }
2664*4882a593Smuzhiyun 
2665*4882a593Smuzhiyun /* -------------------------------------------------------------------------- */
2666*4882a593Smuzhiyun 
dwc3_gadget_exit(struct dwc3 * dwc)2667*4882a593Smuzhiyun void dwc3_gadget_exit(struct dwc3 *dwc)
2668*4882a593Smuzhiyun {
2669*4882a593Smuzhiyun 	usb_del_gadget_udc(&dwc->gadget);
2670*4882a593Smuzhiyun 
2671*4882a593Smuzhiyun 	dwc3_gadget_free_endpoints(dwc);
2672*4882a593Smuzhiyun 
2673*4882a593Smuzhiyun 	dma_free_coherent(dwc->ep0_bounce);
2674*4882a593Smuzhiyun 
2675*4882a593Smuzhiyun 	kfree(dwc->setup_buf);
2676*4882a593Smuzhiyun 
2677*4882a593Smuzhiyun 	dma_free_coherent(dwc->ep0_trb);
2678*4882a593Smuzhiyun 
2679*4882a593Smuzhiyun 	dma_free_coherent(dwc->ctrl_req);
2680*4882a593Smuzhiyun }
2681*4882a593Smuzhiyun 
2682*4882a593Smuzhiyun /**
2683*4882a593Smuzhiyun  * dwc3_gadget_uboot_handle_interrupt - handle dwc3 gadget interrupt
2684*4882a593Smuzhiyun  * @dwc: struct dwce *
2685*4882a593Smuzhiyun  *
2686*4882a593Smuzhiyun  * Handles ep0 and gadget interrupt
2687*4882a593Smuzhiyun  *
2688*4882a593Smuzhiyun  * Should be called from dwc3 core.
2689*4882a593Smuzhiyun  */
dwc3_gadget_uboot_handle_interrupt(struct dwc3 * dwc)2690*4882a593Smuzhiyun void dwc3_gadget_uboot_handle_interrupt(struct dwc3 *dwc)
2691*4882a593Smuzhiyun {
2692*4882a593Smuzhiyun 	int ret = dwc3_interrupt(0, dwc);
2693*4882a593Smuzhiyun 
2694*4882a593Smuzhiyun 	if (ret == IRQ_WAKE_THREAD) {
2695*4882a593Smuzhiyun 		int i;
2696*4882a593Smuzhiyun 		struct dwc3_event_buffer *evt;
2697*4882a593Smuzhiyun 
2698*4882a593Smuzhiyun 		dwc3_thread_interrupt(0, dwc);
2699*4882a593Smuzhiyun 
2700*4882a593Smuzhiyun 		/* Clean + Invalidate the buffers after touching them */
2701*4882a593Smuzhiyun 		for (i = 0; i < dwc->num_event_buffers; i++) {
2702*4882a593Smuzhiyun 			evt = dwc->ev_buffs[i];
2703*4882a593Smuzhiyun 			dwc3_flush_cache((uintptr_t)evt->buf, evt->length);
2704*4882a593Smuzhiyun 		}
2705*4882a593Smuzhiyun 	}
2706*4882a593Smuzhiyun }
2707