xref: /OK3568_Linux_fs/kernel/drivers/usb/musb/musb_host.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * MUSB OTG driver host support
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright 2005 Mentor Graphics Corporation
6*4882a593Smuzhiyun  * Copyright (C) 2005-2006 by Texas Instruments
7*4882a593Smuzhiyun  * Copyright (C) 2006-2007 Nokia Corporation
8*4882a593Smuzhiyun  * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/delay.h>
14*4882a593Smuzhiyun #include <linux/sched.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/errno.h>
17*4882a593Smuzhiyun #include <linux/list.h>
18*4882a593Smuzhiyun #include <linux/dma-mapping.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include "musb_core.h"
21*4882a593Smuzhiyun #include "musb_host.h"
22*4882a593Smuzhiyun #include "musb_trace.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /* MUSB HOST status 22-mar-2006
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * - There's still lots of partial code duplication for fault paths, so
27*4882a593Smuzhiyun  *   they aren't handled as consistently as they need to be.
28*4882a593Smuzhiyun  *
29*4882a593Smuzhiyun  * - PIO mostly behaved when last tested.
30*4882a593Smuzhiyun  *     + including ep0, with all usbtest cases 9, 10
31*4882a593Smuzhiyun  *     + usbtest 14 (ep0out) doesn't seem to run at all
32*4882a593Smuzhiyun  *     + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
33*4882a593Smuzhiyun  *       configurations, but otherwise double buffering passes basic tests.
34*4882a593Smuzhiyun  *     + for 2.6.N, for N > ~10, needs API changes for hcd framework.
35*4882a593Smuzhiyun  *
36*4882a593Smuzhiyun  * - DMA (CPPI) ... partially behaves, not currently recommended
37*4882a593Smuzhiyun  *     + about 1/15 the speed of typical EHCI implementations (PCI)
38*4882a593Smuzhiyun  *     + RX, all too often reqpkt seems to misbehave after tx
39*4882a593Smuzhiyun  *     + TX, no known issues (other than evident silicon issue)
40*4882a593Smuzhiyun  *
41*4882a593Smuzhiyun  * - DMA (Mentor/OMAP) ...has at least toggle update problems
42*4882a593Smuzhiyun  *
43*4882a593Smuzhiyun  * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
44*4882a593Smuzhiyun  *   starvation ... nothing yet for TX, interrupt, or bulk.
45*4882a593Smuzhiyun  *
46*4882a593Smuzhiyun  * - Not tested with HNP, but some SRP paths seem to behave.
47*4882a593Smuzhiyun  *
48*4882a593Smuzhiyun  * NOTE 24-August-2006:
49*4882a593Smuzhiyun  *
50*4882a593Smuzhiyun  * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
51*4882a593Smuzhiyun  *   extra endpoint for periodic use enabling hub + keybd + mouse.  That
52*4882a593Smuzhiyun  *   mostly works, except that with "usbnet" it's easy to trigger cases
53*4882a593Smuzhiyun  *   with "ping" where RX loses.  (a) ping to davinci, even "ping -f",
54*4882a593Smuzhiyun  *   fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
55*4882a593Smuzhiyun  *   although ARP RX wins.  (That test was done with a full speed link.)
56*4882a593Smuzhiyun  */
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun  * NOTE on endpoint usage:
61*4882a593Smuzhiyun  *
62*4882a593Smuzhiyun  * CONTROL transfers all go through ep0.  BULK ones go through dedicated IN
63*4882a593Smuzhiyun  * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
64*4882a593Smuzhiyun  * (Yes, bulk _could_ use more of the endpoints than that, and would even
65*4882a593Smuzhiyun  * benefit from it.)
66*4882a593Smuzhiyun  *
67*4882a593Smuzhiyun  * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
68*4882a593Smuzhiyun  * So far that scheduling is both dumb and optimistic:  the endpoint will be
69*4882a593Smuzhiyun  * "claimed" until its software queue is no longer refilled.  No multiplexing
70*4882a593Smuzhiyun  * of transfers between endpoints, or anything clever.
71*4882a593Smuzhiyun  */
72*4882a593Smuzhiyun 
hcd_to_musb(struct usb_hcd * hcd)73*4882a593Smuzhiyun struct musb *hcd_to_musb(struct usb_hcd *hcd)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	return *(struct musb **) hcd->hcd_priv;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun static void musb_ep_program(struct musb *musb, u8 epnum,
80*4882a593Smuzhiyun 			struct urb *urb, int is_out,
81*4882a593Smuzhiyun 			u8 *buf, u32 offset, u32 len);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /*
84*4882a593Smuzhiyun  * Clear TX fifo. Needed to avoid BABBLE errors.
85*4882a593Smuzhiyun  */
musb_h_tx_flush_fifo(struct musb_hw_ep * ep)86*4882a593Smuzhiyun static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	struct musb	*musb = ep->musb;
89*4882a593Smuzhiyun 	void __iomem	*epio = ep->regs;
90*4882a593Smuzhiyun 	u16		csr;
91*4882a593Smuzhiyun 	int		retries = 1000;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	csr = musb_readw(epio, MUSB_TXCSR);
94*4882a593Smuzhiyun 	while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
95*4882a593Smuzhiyun 		csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY;
96*4882a593Smuzhiyun 		musb_writew(epio, MUSB_TXCSR, csr);
97*4882a593Smuzhiyun 		csr = musb_readw(epio, MUSB_TXCSR);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 		/*
100*4882a593Smuzhiyun 		 * FIXME: sometimes the tx fifo flush failed, it has been
101*4882a593Smuzhiyun 		 * observed during device disconnect on AM335x.
102*4882a593Smuzhiyun 		 *
103*4882a593Smuzhiyun 		 * To reproduce the issue, ensure tx urb(s) are queued when
104*4882a593Smuzhiyun 		 * unplug the usb device which is connected to AM335x usb
105*4882a593Smuzhiyun 		 * host port.
106*4882a593Smuzhiyun 		 *
107*4882a593Smuzhiyun 		 * I found using a usb-ethernet device and running iperf
108*4882a593Smuzhiyun 		 * (client on AM335x) has very high chance to trigger it.
109*4882a593Smuzhiyun 		 *
110*4882a593Smuzhiyun 		 * Better to turn on musb_dbg() in musb_cleanup_urb() with
111*4882a593Smuzhiyun 		 * CPPI enabled to see the issue when aborting the tx channel.
112*4882a593Smuzhiyun 		 */
113*4882a593Smuzhiyun 		if (dev_WARN_ONCE(musb->controller, retries-- < 1,
114*4882a593Smuzhiyun 				"Could not flush host TX%d fifo: csr: %04x\n",
115*4882a593Smuzhiyun 				ep->epnum, csr))
116*4882a593Smuzhiyun 			return;
117*4882a593Smuzhiyun 		mdelay(1);
118*4882a593Smuzhiyun 	}
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
musb_h_ep0_flush_fifo(struct musb_hw_ep * ep)121*4882a593Smuzhiyun static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	void __iomem	*epio = ep->regs;
124*4882a593Smuzhiyun 	u16		csr;
125*4882a593Smuzhiyun 	int		retries = 5;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	/* scrub any data left in the fifo */
128*4882a593Smuzhiyun 	do {
129*4882a593Smuzhiyun 		csr = musb_readw(epio, MUSB_TXCSR);
130*4882a593Smuzhiyun 		if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
131*4882a593Smuzhiyun 			break;
132*4882a593Smuzhiyun 		musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
133*4882a593Smuzhiyun 		csr = musb_readw(epio, MUSB_TXCSR);
134*4882a593Smuzhiyun 		udelay(10);
135*4882a593Smuzhiyun 	} while (--retries);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
138*4882a593Smuzhiyun 			ep->epnum, csr);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	/* and reset for the next transfer */
141*4882a593Smuzhiyun 	musb_writew(epio, MUSB_TXCSR, 0);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /*
145*4882a593Smuzhiyun  * Start transmit. Caller is responsible for locking shared resources.
146*4882a593Smuzhiyun  * musb must be locked.
147*4882a593Smuzhiyun  */
musb_h_tx_start(struct musb_hw_ep * ep)148*4882a593Smuzhiyun static inline void musb_h_tx_start(struct musb_hw_ep *ep)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	u16	txcsr;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	/* NOTE: no locks here; caller should lock and select EP */
153*4882a593Smuzhiyun 	if (ep->epnum) {
154*4882a593Smuzhiyun 		txcsr = musb_readw(ep->regs, MUSB_TXCSR);
155*4882a593Smuzhiyun 		txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
156*4882a593Smuzhiyun 		musb_writew(ep->regs, MUSB_TXCSR, txcsr);
157*4882a593Smuzhiyun 	} else {
158*4882a593Smuzhiyun 		txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
159*4882a593Smuzhiyun 		musb_writew(ep->regs, MUSB_CSR0, txcsr);
160*4882a593Smuzhiyun 	}
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
musb_h_tx_dma_start(struct musb_hw_ep * ep)164*4882a593Smuzhiyun static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	u16	txcsr;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	/* NOTE: no locks here; caller should lock and select EP */
169*4882a593Smuzhiyun 	txcsr = musb_readw(ep->regs, MUSB_TXCSR);
170*4882a593Smuzhiyun 	txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
171*4882a593Smuzhiyun 	if (is_cppi_enabled(ep->musb))
172*4882a593Smuzhiyun 		txcsr |= MUSB_TXCSR_DMAMODE;
173*4882a593Smuzhiyun 	musb_writew(ep->regs, MUSB_TXCSR, txcsr);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
musb_ep_set_qh(struct musb_hw_ep * ep,int is_in,struct musb_qh * qh)176*4882a593Smuzhiyun static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	if (is_in != 0 || ep->is_shared_fifo)
179*4882a593Smuzhiyun 		ep->in_qh  = qh;
180*4882a593Smuzhiyun 	if (is_in == 0 || ep->is_shared_fifo)
181*4882a593Smuzhiyun 		ep->out_qh = qh;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
musb_ep_get_qh(struct musb_hw_ep * ep,int is_in)184*4882a593Smuzhiyun static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	return is_in ? ep->in_qh : ep->out_qh;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun /*
190*4882a593Smuzhiyun  * Start the URB at the front of an endpoint's queue
191*4882a593Smuzhiyun  * end must be claimed from the caller.
192*4882a593Smuzhiyun  *
193*4882a593Smuzhiyun  * Context: controller locked, irqs blocked
194*4882a593Smuzhiyun  */
195*4882a593Smuzhiyun static void
musb_start_urb(struct musb * musb,int is_in,struct musb_qh * qh)196*4882a593Smuzhiyun musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	u32			len;
199*4882a593Smuzhiyun 	void __iomem		*mbase =  musb->mregs;
200*4882a593Smuzhiyun 	struct urb		*urb = next_urb(qh);
201*4882a593Smuzhiyun 	void			*buf = urb->transfer_buffer;
202*4882a593Smuzhiyun 	u32			offset = 0;
203*4882a593Smuzhiyun 	struct musb_hw_ep	*hw_ep = qh->hw_ep;
204*4882a593Smuzhiyun 	int			epnum = hw_ep->epnum;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	/* initialize software qh state */
207*4882a593Smuzhiyun 	qh->offset = 0;
208*4882a593Smuzhiyun 	qh->segsize = 0;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	/* gather right source of data */
211*4882a593Smuzhiyun 	switch (qh->type) {
212*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_CONTROL:
213*4882a593Smuzhiyun 		/* control transfers always start with SETUP */
214*4882a593Smuzhiyun 		is_in = 0;
215*4882a593Smuzhiyun 		musb->ep0_stage = MUSB_EP0_START;
216*4882a593Smuzhiyun 		buf = urb->setup_packet;
217*4882a593Smuzhiyun 		len = 8;
218*4882a593Smuzhiyun 		break;
219*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_ISOC:
220*4882a593Smuzhiyun 		qh->iso_idx = 0;
221*4882a593Smuzhiyun 		qh->frame = 0;
222*4882a593Smuzhiyun 		offset = urb->iso_frame_desc[0].offset;
223*4882a593Smuzhiyun 		len = urb->iso_frame_desc[0].length;
224*4882a593Smuzhiyun 		break;
225*4882a593Smuzhiyun 	default:		/* bulk, interrupt */
226*4882a593Smuzhiyun 		/* actual_length may be nonzero on retry paths */
227*4882a593Smuzhiyun 		buf = urb->transfer_buffer + urb->actual_length;
228*4882a593Smuzhiyun 		len = urb->transfer_buffer_length - urb->actual_length;
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	trace_musb_urb_start(musb, urb);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	/* Configure endpoint */
234*4882a593Smuzhiyun 	musb_ep_set_qh(hw_ep, is_in, qh);
235*4882a593Smuzhiyun 	musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	/* transmit may have more work: start it when it is time */
238*4882a593Smuzhiyun 	if (is_in)
239*4882a593Smuzhiyun 		return;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	/* determine if the time is right for a periodic transfer */
242*4882a593Smuzhiyun 	switch (qh->type) {
243*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_ISOC:
244*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_INT:
245*4882a593Smuzhiyun 		musb_dbg(musb, "check whether there's still time for periodic Tx");
246*4882a593Smuzhiyun 		/* FIXME this doesn't implement that scheduling policy ...
247*4882a593Smuzhiyun 		 * or handle framecounter wrapping
248*4882a593Smuzhiyun 		 */
249*4882a593Smuzhiyun 		if (1) {	/* Always assume URB_ISO_ASAP */
250*4882a593Smuzhiyun 			/* REVISIT the SOF irq handler shouldn't duplicate
251*4882a593Smuzhiyun 			 * this code; and we don't init urb->start_frame...
252*4882a593Smuzhiyun 			 */
253*4882a593Smuzhiyun 			qh->frame = 0;
254*4882a593Smuzhiyun 			goto start;
255*4882a593Smuzhiyun 		} else {
256*4882a593Smuzhiyun 			qh->frame = urb->start_frame;
257*4882a593Smuzhiyun 			/* enable SOF interrupt so we can count down */
258*4882a593Smuzhiyun 			musb_dbg(musb, "SOF for %d", epnum);
259*4882a593Smuzhiyun #if 1 /* ifndef	CONFIG_ARCH_DAVINCI */
260*4882a593Smuzhiyun 			musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
261*4882a593Smuzhiyun #endif
262*4882a593Smuzhiyun 		}
263*4882a593Smuzhiyun 		break;
264*4882a593Smuzhiyun 	default:
265*4882a593Smuzhiyun start:
266*4882a593Smuzhiyun 		musb_dbg(musb, "Start TX%d %s", epnum,
267*4882a593Smuzhiyun 			hw_ep->tx_channel ? "dma" : "pio");
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 		if (!hw_ep->tx_channel)
270*4882a593Smuzhiyun 			musb_h_tx_start(hw_ep);
271*4882a593Smuzhiyun 		else if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
272*4882a593Smuzhiyun 			musb_h_tx_dma_start(hw_ep);
273*4882a593Smuzhiyun 	}
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun /* Context: caller owns controller lock, IRQs are blocked */
musb_giveback(struct musb * musb,struct urb * urb,int status)277*4882a593Smuzhiyun static void musb_giveback(struct musb *musb, struct urb *urb, int status)
278*4882a593Smuzhiyun __releases(musb->lock)
279*4882a593Smuzhiyun __acquires(musb->lock)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	trace_musb_urb_gb(musb, urb);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
284*4882a593Smuzhiyun 	spin_unlock(&musb->lock);
285*4882a593Smuzhiyun 	usb_hcd_giveback_urb(musb->hcd, urb, status);
286*4882a593Smuzhiyun 	spin_lock(&musb->lock);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun /*
290*4882a593Smuzhiyun  * Advance this hardware endpoint's queue, completing the specified URB and
291*4882a593Smuzhiyun  * advancing to either the next URB queued to that qh, or else invalidating
292*4882a593Smuzhiyun  * that qh and advancing to the next qh scheduled after the current one.
293*4882a593Smuzhiyun  *
294*4882a593Smuzhiyun  * Context: caller owns controller lock, IRQs are blocked
295*4882a593Smuzhiyun  */
musb_advance_schedule(struct musb * musb,struct urb * urb,struct musb_hw_ep * hw_ep,int is_in)296*4882a593Smuzhiyun static void musb_advance_schedule(struct musb *musb, struct urb *urb,
297*4882a593Smuzhiyun 				  struct musb_hw_ep *hw_ep, int is_in)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun 	struct musb_qh		*qh = musb_ep_get_qh(hw_ep, is_in);
300*4882a593Smuzhiyun 	struct musb_hw_ep	*ep = qh->hw_ep;
301*4882a593Smuzhiyun 	int			ready = qh->is_ready;
302*4882a593Smuzhiyun 	int			status;
303*4882a593Smuzhiyun 	u16			toggle;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	/* save toggle eagerly, for paranoia */
308*4882a593Smuzhiyun 	switch (qh->type) {
309*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_BULK:
310*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_INT:
311*4882a593Smuzhiyun 		toggle = musb->io.get_toggle(qh, !is_in);
312*4882a593Smuzhiyun 		usb_settoggle(urb->dev, qh->epnum, !is_in, toggle ? 1 : 0);
313*4882a593Smuzhiyun 		break;
314*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_ISOC:
315*4882a593Smuzhiyun 		if (status == 0 && urb->error_count)
316*4882a593Smuzhiyun 			status = -EXDEV;
317*4882a593Smuzhiyun 		break;
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	qh->is_ready = 0;
321*4882a593Smuzhiyun 	musb_giveback(musb, urb, status);
322*4882a593Smuzhiyun 	qh->is_ready = ready;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	/* reclaim resources (and bandwidth) ASAP; deschedule it, and
325*4882a593Smuzhiyun 	 * invalidate qh as soon as list_empty(&hep->urb_list)
326*4882a593Smuzhiyun 	 */
327*4882a593Smuzhiyun 	if (list_empty(&qh->hep->urb_list)) {
328*4882a593Smuzhiyun 		struct list_head	*head;
329*4882a593Smuzhiyun 		struct dma_controller	*dma = musb->dma_controller;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 		if (is_in) {
332*4882a593Smuzhiyun 			ep->rx_reinit = 1;
333*4882a593Smuzhiyun 			if (ep->rx_channel) {
334*4882a593Smuzhiyun 				dma->channel_release(ep->rx_channel);
335*4882a593Smuzhiyun 				ep->rx_channel = NULL;
336*4882a593Smuzhiyun 			}
337*4882a593Smuzhiyun 		} else {
338*4882a593Smuzhiyun 			ep->tx_reinit = 1;
339*4882a593Smuzhiyun 			if (ep->tx_channel) {
340*4882a593Smuzhiyun 				dma->channel_release(ep->tx_channel);
341*4882a593Smuzhiyun 				ep->tx_channel = NULL;
342*4882a593Smuzhiyun 			}
343*4882a593Smuzhiyun 		}
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 		/* Clobber old pointers to this qh */
346*4882a593Smuzhiyun 		musb_ep_set_qh(ep, is_in, NULL);
347*4882a593Smuzhiyun 		qh->hep->hcpriv = NULL;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 		switch (qh->type) {
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 		case USB_ENDPOINT_XFER_CONTROL:
352*4882a593Smuzhiyun 		case USB_ENDPOINT_XFER_BULK:
353*4882a593Smuzhiyun 			/* fifo policy for these lists, except that NAKing
354*4882a593Smuzhiyun 			 * should rotate a qh to the end (for fairness).
355*4882a593Smuzhiyun 			 */
356*4882a593Smuzhiyun 			if (qh->mux == 1) {
357*4882a593Smuzhiyun 				head = qh->ring.prev;
358*4882a593Smuzhiyun 				list_del(&qh->ring);
359*4882a593Smuzhiyun 				kfree(qh);
360*4882a593Smuzhiyun 				qh = first_qh(head);
361*4882a593Smuzhiyun 				break;
362*4882a593Smuzhiyun 			}
363*4882a593Smuzhiyun 			fallthrough;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 		case USB_ENDPOINT_XFER_ISOC:
366*4882a593Smuzhiyun 		case USB_ENDPOINT_XFER_INT:
367*4882a593Smuzhiyun 			/* this is where periodic bandwidth should be
368*4882a593Smuzhiyun 			 * de-allocated if it's tracked and allocated;
369*4882a593Smuzhiyun 			 * and where we'd update the schedule tree...
370*4882a593Smuzhiyun 			 */
371*4882a593Smuzhiyun 			kfree(qh);
372*4882a593Smuzhiyun 			qh = NULL;
373*4882a593Smuzhiyun 			break;
374*4882a593Smuzhiyun 		}
375*4882a593Smuzhiyun 	}
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	if (qh != NULL && qh->is_ready) {
378*4882a593Smuzhiyun 		musb_dbg(musb, "... next ep%d %cX urb %p",
379*4882a593Smuzhiyun 		    hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
380*4882a593Smuzhiyun 		musb_start_urb(musb, is_in, qh);
381*4882a593Smuzhiyun 	}
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun 
musb_h_flush_rxfifo(struct musb_hw_ep * hw_ep,u16 csr)384*4882a593Smuzhiyun static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun 	/* we don't want fifo to fill itself again;
387*4882a593Smuzhiyun 	 * ignore dma (various models),
388*4882a593Smuzhiyun 	 * leave toggle alone (may not have been saved yet)
389*4882a593Smuzhiyun 	 */
390*4882a593Smuzhiyun 	csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
391*4882a593Smuzhiyun 	csr &= ~(MUSB_RXCSR_H_REQPKT
392*4882a593Smuzhiyun 		| MUSB_RXCSR_H_AUTOREQ
393*4882a593Smuzhiyun 		| MUSB_RXCSR_AUTOCLEAR);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	/* write 2x to allow double buffering */
396*4882a593Smuzhiyun 	musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
397*4882a593Smuzhiyun 	musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	/* flush writebuffer */
400*4882a593Smuzhiyun 	return musb_readw(hw_ep->regs, MUSB_RXCSR);
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun /*
404*4882a593Smuzhiyun  * PIO RX for a packet (or part of it).
405*4882a593Smuzhiyun  */
406*4882a593Smuzhiyun static bool
musb_host_packet_rx(struct musb * musb,struct urb * urb,u8 epnum,u8 iso_err)407*4882a593Smuzhiyun musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	u16			rx_count;
410*4882a593Smuzhiyun 	u8			*buf;
411*4882a593Smuzhiyun 	u16			csr;
412*4882a593Smuzhiyun 	bool			done = false;
413*4882a593Smuzhiyun 	u32			length;
414*4882a593Smuzhiyun 	int			do_flush = 0;
415*4882a593Smuzhiyun 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
416*4882a593Smuzhiyun 	void __iomem		*epio = hw_ep->regs;
417*4882a593Smuzhiyun 	struct musb_qh		*qh = hw_ep->in_qh;
418*4882a593Smuzhiyun 	int			pipe = urb->pipe;
419*4882a593Smuzhiyun 	void			*buffer = urb->transfer_buffer;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	/* musb_ep_select(mbase, epnum); */
422*4882a593Smuzhiyun 	rx_count = musb_readw(epio, MUSB_RXCOUNT);
423*4882a593Smuzhiyun 	musb_dbg(musb, "RX%d count %d, buffer %p len %d/%d", epnum, rx_count,
424*4882a593Smuzhiyun 			urb->transfer_buffer, qh->offset,
425*4882a593Smuzhiyun 			urb->transfer_buffer_length);
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	/* unload FIFO */
428*4882a593Smuzhiyun 	if (usb_pipeisoc(pipe)) {
429*4882a593Smuzhiyun 		int					status = 0;
430*4882a593Smuzhiyun 		struct usb_iso_packet_descriptor	*d;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 		if (iso_err) {
433*4882a593Smuzhiyun 			status = -EILSEQ;
434*4882a593Smuzhiyun 			urb->error_count++;
435*4882a593Smuzhiyun 		}
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 		d = urb->iso_frame_desc + qh->iso_idx;
438*4882a593Smuzhiyun 		buf = buffer + d->offset;
439*4882a593Smuzhiyun 		length = d->length;
440*4882a593Smuzhiyun 		if (rx_count > length) {
441*4882a593Smuzhiyun 			if (status == 0) {
442*4882a593Smuzhiyun 				status = -EOVERFLOW;
443*4882a593Smuzhiyun 				urb->error_count++;
444*4882a593Smuzhiyun 			}
445*4882a593Smuzhiyun 			musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
446*4882a593Smuzhiyun 			do_flush = 1;
447*4882a593Smuzhiyun 		} else
448*4882a593Smuzhiyun 			length = rx_count;
449*4882a593Smuzhiyun 		urb->actual_length += length;
450*4882a593Smuzhiyun 		d->actual_length = length;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 		d->status = status;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 		/* see if we are done */
455*4882a593Smuzhiyun 		done = (++qh->iso_idx >= urb->number_of_packets);
456*4882a593Smuzhiyun 	} else {
457*4882a593Smuzhiyun 		/* non-isoch */
458*4882a593Smuzhiyun 		buf = buffer + qh->offset;
459*4882a593Smuzhiyun 		length = urb->transfer_buffer_length - qh->offset;
460*4882a593Smuzhiyun 		if (rx_count > length) {
461*4882a593Smuzhiyun 			if (urb->status == -EINPROGRESS)
462*4882a593Smuzhiyun 				urb->status = -EOVERFLOW;
463*4882a593Smuzhiyun 			musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
464*4882a593Smuzhiyun 			do_flush = 1;
465*4882a593Smuzhiyun 		} else
466*4882a593Smuzhiyun 			length = rx_count;
467*4882a593Smuzhiyun 		urb->actual_length += length;
468*4882a593Smuzhiyun 		qh->offset += length;
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 		/* see if we are done */
471*4882a593Smuzhiyun 		done = (urb->actual_length == urb->transfer_buffer_length)
472*4882a593Smuzhiyun 			|| (rx_count < qh->maxpacket)
473*4882a593Smuzhiyun 			|| (urb->status != -EINPROGRESS);
474*4882a593Smuzhiyun 		if (done
475*4882a593Smuzhiyun 				&& (urb->status == -EINPROGRESS)
476*4882a593Smuzhiyun 				&& (urb->transfer_flags & URB_SHORT_NOT_OK)
477*4882a593Smuzhiyun 				&& (urb->actual_length
478*4882a593Smuzhiyun 					< urb->transfer_buffer_length))
479*4882a593Smuzhiyun 			urb->status = -EREMOTEIO;
480*4882a593Smuzhiyun 	}
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	musb_read_fifo(hw_ep, length, buf);
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	csr = musb_readw(epio, MUSB_RXCSR);
485*4882a593Smuzhiyun 	csr |= MUSB_RXCSR_H_WZC_BITS;
486*4882a593Smuzhiyun 	if (unlikely(do_flush))
487*4882a593Smuzhiyun 		musb_h_flush_rxfifo(hw_ep, csr);
488*4882a593Smuzhiyun 	else {
489*4882a593Smuzhiyun 		/* REVISIT this assumes AUTOCLEAR is never set */
490*4882a593Smuzhiyun 		csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
491*4882a593Smuzhiyun 		if (!done)
492*4882a593Smuzhiyun 			csr |= MUSB_RXCSR_H_REQPKT;
493*4882a593Smuzhiyun 		musb_writew(epio, MUSB_RXCSR, csr);
494*4882a593Smuzhiyun 	}
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	return done;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun /* we don't always need to reinit a given side of an endpoint...
500*4882a593Smuzhiyun  * when we do, use tx/rx reinit routine and then construct a new CSR
501*4882a593Smuzhiyun  * to address data toggle, NYET, and DMA or PIO.
502*4882a593Smuzhiyun  *
503*4882a593Smuzhiyun  * it's possible that driver bugs (especially for DMA) or aborting a
504*4882a593Smuzhiyun  * transfer might have left the endpoint busier than it should be.
505*4882a593Smuzhiyun  * the busy/not-empty tests are basically paranoia.
506*4882a593Smuzhiyun  */
507*4882a593Smuzhiyun static void
musb_rx_reinit(struct musb * musb,struct musb_qh * qh,u8 epnum)508*4882a593Smuzhiyun musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun 	struct musb_hw_ep *ep = musb->endpoints + epnum;
511*4882a593Smuzhiyun 	u16	csr;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	/* NOTE:  we know the "rx" fifo reinit never triggers for ep0.
514*4882a593Smuzhiyun 	 * That always uses tx_reinit since ep0 repurposes TX register
515*4882a593Smuzhiyun 	 * offsets; the initial SETUP packet is also a kind of OUT.
516*4882a593Smuzhiyun 	 */
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	/* if programmed for Tx, put it in RX mode */
519*4882a593Smuzhiyun 	if (ep->is_shared_fifo) {
520*4882a593Smuzhiyun 		csr = musb_readw(ep->regs, MUSB_TXCSR);
521*4882a593Smuzhiyun 		if (csr & MUSB_TXCSR_MODE) {
522*4882a593Smuzhiyun 			musb_h_tx_flush_fifo(ep);
523*4882a593Smuzhiyun 			csr = musb_readw(ep->regs, MUSB_TXCSR);
524*4882a593Smuzhiyun 			musb_writew(ep->regs, MUSB_TXCSR,
525*4882a593Smuzhiyun 				    csr | MUSB_TXCSR_FRCDATATOG);
526*4882a593Smuzhiyun 		}
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 		/*
529*4882a593Smuzhiyun 		 * Clear the MODE bit (and everything else) to enable Rx.
530*4882a593Smuzhiyun 		 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
531*4882a593Smuzhiyun 		 */
532*4882a593Smuzhiyun 		if (csr & MUSB_TXCSR_DMAMODE)
533*4882a593Smuzhiyun 			musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
534*4882a593Smuzhiyun 		musb_writew(ep->regs, MUSB_TXCSR, 0);
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	/* scrub all previous state, clearing toggle */
537*4882a593Smuzhiyun 	}
538*4882a593Smuzhiyun 	csr = musb_readw(ep->regs, MUSB_RXCSR);
539*4882a593Smuzhiyun 	if (csr & MUSB_RXCSR_RXPKTRDY)
540*4882a593Smuzhiyun 		WARNING("rx%d, packet/%d ready?\n", ep->epnum,
541*4882a593Smuzhiyun 			musb_readw(ep->regs, MUSB_RXCOUNT));
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	/* target addr and (for multipoint) hub addr/port */
546*4882a593Smuzhiyun 	if (musb->is_multipoint) {
547*4882a593Smuzhiyun 		musb_write_rxfunaddr(musb, epnum, qh->addr_reg);
548*4882a593Smuzhiyun 		musb_write_rxhubaddr(musb, epnum, qh->h_addr_reg);
549*4882a593Smuzhiyun 		musb_write_rxhubport(musb, epnum, qh->h_port_reg);
550*4882a593Smuzhiyun 	} else
551*4882a593Smuzhiyun 		musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	/* protocol/endpoint, interval/NAKlimit, i/o size */
554*4882a593Smuzhiyun 	musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
555*4882a593Smuzhiyun 	musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
556*4882a593Smuzhiyun 	/* NOTE: bulk combining rewrites high bits of maxpacket */
557*4882a593Smuzhiyun 	/* Set RXMAXP with the FIFO size of the endpoint
558*4882a593Smuzhiyun 	 * to disable double buffer mode.
559*4882a593Smuzhiyun 	 */
560*4882a593Smuzhiyun 	musb_writew(ep->regs, MUSB_RXMAXP,
561*4882a593Smuzhiyun 			qh->maxpacket | ((qh->hb_mult - 1) << 11));
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	ep->rx_reinit = 0;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun 
musb_tx_dma_set_mode_mentor(struct dma_controller * dma,struct musb_hw_ep * hw_ep,struct musb_qh * qh,struct urb * urb,u32 offset,u32 * length,u8 * mode)566*4882a593Smuzhiyun static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
567*4882a593Smuzhiyun 		struct musb_hw_ep *hw_ep, struct musb_qh *qh,
568*4882a593Smuzhiyun 		struct urb *urb, u32 offset,
569*4882a593Smuzhiyun 		u32 *length, u8 *mode)
570*4882a593Smuzhiyun {
571*4882a593Smuzhiyun 	struct dma_channel	*channel = hw_ep->tx_channel;
572*4882a593Smuzhiyun 	void __iomem		*epio = hw_ep->regs;
573*4882a593Smuzhiyun 	u16			pkt_size = qh->maxpacket;
574*4882a593Smuzhiyun 	u16			csr;
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	if (*length > channel->max_len)
577*4882a593Smuzhiyun 		*length = channel->max_len;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	csr = musb_readw(epio, MUSB_TXCSR);
580*4882a593Smuzhiyun 	if (*length > pkt_size) {
581*4882a593Smuzhiyun 		*mode = 1;
582*4882a593Smuzhiyun 		csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
583*4882a593Smuzhiyun 		/* autoset shouldn't be set in high bandwidth */
584*4882a593Smuzhiyun 		/*
585*4882a593Smuzhiyun 		 * Enable Autoset according to table
586*4882a593Smuzhiyun 		 * below
587*4882a593Smuzhiyun 		 * bulk_split hb_mult	Autoset_Enable
588*4882a593Smuzhiyun 		 *	0	1	Yes(Normal)
589*4882a593Smuzhiyun 		 *	0	>1	No(High BW ISO)
590*4882a593Smuzhiyun 		 *	1	1	Yes(HS bulk)
591*4882a593Smuzhiyun 		 *	1	>1	Yes(FS bulk)
592*4882a593Smuzhiyun 		 */
593*4882a593Smuzhiyun 		if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
594*4882a593Smuzhiyun 					can_bulk_split(hw_ep->musb, qh->type)))
595*4882a593Smuzhiyun 			csr |= MUSB_TXCSR_AUTOSET;
596*4882a593Smuzhiyun 	} else {
597*4882a593Smuzhiyun 		*mode = 0;
598*4882a593Smuzhiyun 		csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
599*4882a593Smuzhiyun 		csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
600*4882a593Smuzhiyun 	}
601*4882a593Smuzhiyun 	channel->desired_mode = *mode;
602*4882a593Smuzhiyun 	musb_writew(epio, MUSB_TXCSR, csr);
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun 
musb_tx_dma_set_mode_cppi_tusb(struct dma_controller * dma,struct musb_hw_ep * hw_ep,struct musb_qh * qh,struct urb * urb,u32 offset,u32 * length,u8 * mode)605*4882a593Smuzhiyun static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
606*4882a593Smuzhiyun 					   struct musb_hw_ep *hw_ep,
607*4882a593Smuzhiyun 					   struct musb_qh *qh,
608*4882a593Smuzhiyun 					   struct urb *urb,
609*4882a593Smuzhiyun 					   u32 offset,
610*4882a593Smuzhiyun 					   u32 *length,
611*4882a593Smuzhiyun 					   u8 *mode)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun 	struct dma_channel *channel = hw_ep->tx_channel;
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	channel->actual_len = 0;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	/*
618*4882a593Smuzhiyun 	 * TX uses "RNDIS" mode automatically but needs help
619*4882a593Smuzhiyun 	 * to identify the zero-length-final-packet case.
620*4882a593Smuzhiyun 	 */
621*4882a593Smuzhiyun 	*mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun 
musb_tx_dma_program(struct dma_controller * dma,struct musb_hw_ep * hw_ep,struct musb_qh * qh,struct urb * urb,u32 offset,u32 length)624*4882a593Smuzhiyun static bool musb_tx_dma_program(struct dma_controller *dma,
625*4882a593Smuzhiyun 		struct musb_hw_ep *hw_ep, struct musb_qh *qh,
626*4882a593Smuzhiyun 		struct urb *urb, u32 offset, u32 length)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun 	struct dma_channel	*channel = hw_ep->tx_channel;
629*4882a593Smuzhiyun 	u16			pkt_size = qh->maxpacket;
630*4882a593Smuzhiyun 	u8			mode;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
633*4882a593Smuzhiyun 		musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset,
634*4882a593Smuzhiyun 					    &length, &mode);
635*4882a593Smuzhiyun 	else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
636*4882a593Smuzhiyun 		musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset,
637*4882a593Smuzhiyun 					       &length, &mode);
638*4882a593Smuzhiyun 	else
639*4882a593Smuzhiyun 		return false;
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	qh->segsize = length;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	/*
644*4882a593Smuzhiyun 	 * Ensure the data reaches to main memory before starting
645*4882a593Smuzhiyun 	 * DMA transfer
646*4882a593Smuzhiyun 	 */
647*4882a593Smuzhiyun 	wmb();
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	if (!dma->channel_program(channel, pkt_size, mode,
650*4882a593Smuzhiyun 			urb->transfer_dma + offset, length)) {
651*4882a593Smuzhiyun 		void __iomem *epio = hw_ep->regs;
652*4882a593Smuzhiyun 		u16 csr;
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 		dma->channel_release(channel);
655*4882a593Smuzhiyun 		hw_ep->tx_channel = NULL;
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 		csr = musb_readw(epio, MUSB_TXCSR);
658*4882a593Smuzhiyun 		csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
659*4882a593Smuzhiyun 		musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
660*4882a593Smuzhiyun 		return false;
661*4882a593Smuzhiyun 	}
662*4882a593Smuzhiyun 	return true;
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun /*
666*4882a593Smuzhiyun  * Program an HDRC endpoint as per the given URB
667*4882a593Smuzhiyun  * Context: irqs blocked, controller lock held
668*4882a593Smuzhiyun  */
musb_ep_program(struct musb * musb,u8 epnum,struct urb * urb,int is_out,u8 * buf,u32 offset,u32 len)669*4882a593Smuzhiyun static void musb_ep_program(struct musb *musb, u8 epnum,
670*4882a593Smuzhiyun 			struct urb *urb, int is_out,
671*4882a593Smuzhiyun 			u8 *buf, u32 offset, u32 len)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun 	struct dma_controller	*dma_controller;
674*4882a593Smuzhiyun 	struct dma_channel	*dma_channel;
675*4882a593Smuzhiyun 	u8			dma_ok;
676*4882a593Smuzhiyun 	void __iomem		*mbase = musb->mregs;
677*4882a593Smuzhiyun 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
678*4882a593Smuzhiyun 	void __iomem		*epio = hw_ep->regs;
679*4882a593Smuzhiyun 	struct musb_qh		*qh = musb_ep_get_qh(hw_ep, !is_out);
680*4882a593Smuzhiyun 	u16			packet_sz = qh->maxpacket;
681*4882a593Smuzhiyun 	u8			use_dma = 1;
682*4882a593Smuzhiyun 	u16			csr;
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	musb_dbg(musb, "%s hw%d urb %p spd%d dev%d ep%d%s "
685*4882a593Smuzhiyun 				"h_addr%02x h_port%02x bytes %d",
686*4882a593Smuzhiyun 			is_out ? "-->" : "<--",
687*4882a593Smuzhiyun 			epnum, urb, urb->dev->speed,
688*4882a593Smuzhiyun 			qh->addr_reg, qh->epnum, is_out ? "out" : "in",
689*4882a593Smuzhiyun 			qh->h_addr_reg, qh->h_port_reg,
690*4882a593Smuzhiyun 			len);
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	musb_ep_select(mbase, epnum);
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	if (is_out && !len) {
695*4882a593Smuzhiyun 		use_dma = 0;
696*4882a593Smuzhiyun 		csr = musb_readw(epio, MUSB_TXCSR);
697*4882a593Smuzhiyun 		csr &= ~MUSB_TXCSR_DMAENAB;
698*4882a593Smuzhiyun 		musb_writew(epio, MUSB_TXCSR, csr);
699*4882a593Smuzhiyun 		hw_ep->tx_channel = NULL;
700*4882a593Smuzhiyun 	}
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	/* candidate for DMA? */
703*4882a593Smuzhiyun 	dma_controller = musb->dma_controller;
704*4882a593Smuzhiyun 	if (use_dma && is_dma_capable() && epnum && dma_controller) {
705*4882a593Smuzhiyun 		dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
706*4882a593Smuzhiyun 		if (!dma_channel) {
707*4882a593Smuzhiyun 			dma_channel = dma_controller->channel_alloc(
708*4882a593Smuzhiyun 					dma_controller, hw_ep, is_out);
709*4882a593Smuzhiyun 			if (is_out)
710*4882a593Smuzhiyun 				hw_ep->tx_channel = dma_channel;
711*4882a593Smuzhiyun 			else
712*4882a593Smuzhiyun 				hw_ep->rx_channel = dma_channel;
713*4882a593Smuzhiyun 		}
714*4882a593Smuzhiyun 	} else
715*4882a593Smuzhiyun 		dma_channel = NULL;
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	/* make sure we clear DMAEnab, autoSet bits from previous run */
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	/* OUT/transmit/EP0 or IN/receive? */
720*4882a593Smuzhiyun 	if (is_out) {
721*4882a593Smuzhiyun 		u16	csr;
722*4882a593Smuzhiyun 		u16	int_txe;
723*4882a593Smuzhiyun 		u16	load_count;
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 		csr = musb_readw(epio, MUSB_TXCSR);
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 		/* disable interrupt in case we flush */
728*4882a593Smuzhiyun 		int_txe = musb->intrtxe;
729*4882a593Smuzhiyun 		musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 		/* general endpoint setup */
732*4882a593Smuzhiyun 		if (epnum) {
733*4882a593Smuzhiyun 			/* flush all old state, set default */
734*4882a593Smuzhiyun 			/*
735*4882a593Smuzhiyun 			 * We could be flushing valid
736*4882a593Smuzhiyun 			 * packets in double buffering
737*4882a593Smuzhiyun 			 * case
738*4882a593Smuzhiyun 			 */
739*4882a593Smuzhiyun 			if (!hw_ep->tx_double_buffered)
740*4882a593Smuzhiyun 				musb_h_tx_flush_fifo(hw_ep);
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 			/*
743*4882a593Smuzhiyun 			 * We must not clear the DMAMODE bit before or in
744*4882a593Smuzhiyun 			 * the same cycle with the DMAENAB bit, so we clear
745*4882a593Smuzhiyun 			 * the latter first...
746*4882a593Smuzhiyun 			 */
747*4882a593Smuzhiyun 			csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
748*4882a593Smuzhiyun 					| MUSB_TXCSR_AUTOSET
749*4882a593Smuzhiyun 					| MUSB_TXCSR_DMAENAB
750*4882a593Smuzhiyun 					| MUSB_TXCSR_FRCDATATOG
751*4882a593Smuzhiyun 					| MUSB_TXCSR_H_RXSTALL
752*4882a593Smuzhiyun 					| MUSB_TXCSR_H_ERROR
753*4882a593Smuzhiyun 					| MUSB_TXCSR_TXPKTRDY
754*4882a593Smuzhiyun 					);
755*4882a593Smuzhiyun 			csr |= MUSB_TXCSR_MODE;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 			if (!hw_ep->tx_double_buffered)
758*4882a593Smuzhiyun 				csr |= musb->io.set_toggle(qh, is_out, urb);
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 			musb_writew(epio, MUSB_TXCSR, csr);
761*4882a593Smuzhiyun 			/* REVISIT may need to clear FLUSHFIFO ... */
762*4882a593Smuzhiyun 			csr &= ~MUSB_TXCSR_DMAMODE;
763*4882a593Smuzhiyun 			musb_writew(epio, MUSB_TXCSR, csr);
764*4882a593Smuzhiyun 			csr = musb_readw(epio, MUSB_TXCSR);
765*4882a593Smuzhiyun 		} else {
766*4882a593Smuzhiyun 			/* endpoint 0: just flush */
767*4882a593Smuzhiyun 			musb_h_ep0_flush_fifo(hw_ep);
768*4882a593Smuzhiyun 		}
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 		/* target addr and (for multipoint) hub addr/port */
771*4882a593Smuzhiyun 		if (musb->is_multipoint) {
772*4882a593Smuzhiyun 			musb_write_txfunaddr(musb, epnum, qh->addr_reg);
773*4882a593Smuzhiyun 			musb_write_txhubaddr(musb, epnum, qh->h_addr_reg);
774*4882a593Smuzhiyun 			musb_write_txhubport(musb, epnum, qh->h_port_reg);
775*4882a593Smuzhiyun /* FIXME if !epnum, do the same for RX ... */
776*4882a593Smuzhiyun 		} else
777*4882a593Smuzhiyun 			musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 		/* protocol/endpoint/interval/NAKlimit */
780*4882a593Smuzhiyun 		if (epnum) {
781*4882a593Smuzhiyun 			musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
782*4882a593Smuzhiyun 			if (can_bulk_split(musb, qh->type)) {
783*4882a593Smuzhiyun 				qh->hb_mult = hw_ep->max_packet_sz_tx
784*4882a593Smuzhiyun 						/ packet_sz;
785*4882a593Smuzhiyun 				musb_writew(epio, MUSB_TXMAXP, packet_sz
786*4882a593Smuzhiyun 					| ((qh->hb_mult) - 1) << 11);
787*4882a593Smuzhiyun 			} else {
788*4882a593Smuzhiyun 				musb_writew(epio, MUSB_TXMAXP,
789*4882a593Smuzhiyun 						qh->maxpacket |
790*4882a593Smuzhiyun 						((qh->hb_mult - 1) << 11));
791*4882a593Smuzhiyun 			}
792*4882a593Smuzhiyun 			musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
793*4882a593Smuzhiyun 		} else {
794*4882a593Smuzhiyun 			musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
795*4882a593Smuzhiyun 			if (musb->is_multipoint)
796*4882a593Smuzhiyun 				musb_writeb(epio, MUSB_TYPE0,
797*4882a593Smuzhiyun 						qh->type_reg);
798*4882a593Smuzhiyun 		}
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 		if (can_bulk_split(musb, qh->type))
801*4882a593Smuzhiyun 			load_count = min((u32) hw_ep->max_packet_sz_tx,
802*4882a593Smuzhiyun 						len);
803*4882a593Smuzhiyun 		else
804*4882a593Smuzhiyun 			load_count = min((u32) packet_sz, len);
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 		if (dma_channel && musb_tx_dma_program(dma_controller,
807*4882a593Smuzhiyun 					hw_ep, qh, urb, offset, len))
808*4882a593Smuzhiyun 			load_count = 0;
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 		if (load_count) {
811*4882a593Smuzhiyun 			/* PIO to load FIFO */
812*4882a593Smuzhiyun 			qh->segsize = load_count;
813*4882a593Smuzhiyun 			if (!buf) {
814*4882a593Smuzhiyun 				sg_miter_start(&qh->sg_miter, urb->sg, 1,
815*4882a593Smuzhiyun 						SG_MITER_ATOMIC
816*4882a593Smuzhiyun 						| SG_MITER_FROM_SG);
817*4882a593Smuzhiyun 				if (!sg_miter_next(&qh->sg_miter)) {
818*4882a593Smuzhiyun 					dev_err(musb->controller,
819*4882a593Smuzhiyun 							"error: sg"
820*4882a593Smuzhiyun 							"list empty\n");
821*4882a593Smuzhiyun 					sg_miter_stop(&qh->sg_miter);
822*4882a593Smuzhiyun 					goto finish;
823*4882a593Smuzhiyun 				}
824*4882a593Smuzhiyun 				buf = qh->sg_miter.addr + urb->sg->offset +
825*4882a593Smuzhiyun 					urb->actual_length;
826*4882a593Smuzhiyun 				load_count = min_t(u32, load_count,
827*4882a593Smuzhiyun 						qh->sg_miter.length);
828*4882a593Smuzhiyun 				musb_write_fifo(hw_ep, load_count, buf);
829*4882a593Smuzhiyun 				qh->sg_miter.consumed = load_count;
830*4882a593Smuzhiyun 				sg_miter_stop(&qh->sg_miter);
831*4882a593Smuzhiyun 			} else
832*4882a593Smuzhiyun 				musb_write_fifo(hw_ep, load_count, buf);
833*4882a593Smuzhiyun 		}
834*4882a593Smuzhiyun finish:
835*4882a593Smuzhiyun 		/* re-enable interrupt */
836*4882a593Smuzhiyun 		musb_writew(mbase, MUSB_INTRTXE, int_txe);
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	/* IN/receive */
839*4882a593Smuzhiyun 	} else {
840*4882a593Smuzhiyun 		u16 csr = 0;
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 		if (hw_ep->rx_reinit) {
843*4882a593Smuzhiyun 			musb_rx_reinit(musb, qh, epnum);
844*4882a593Smuzhiyun 			csr |= musb->io.set_toggle(qh, is_out, urb);
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 			if (qh->type == USB_ENDPOINT_XFER_INT)
847*4882a593Smuzhiyun 				csr |= MUSB_RXCSR_DISNYET;
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 		} else {
850*4882a593Smuzhiyun 			csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 			if (csr & (MUSB_RXCSR_RXPKTRDY
853*4882a593Smuzhiyun 					| MUSB_RXCSR_DMAENAB
854*4882a593Smuzhiyun 					| MUSB_RXCSR_H_REQPKT))
855*4882a593Smuzhiyun 				ERR("broken !rx_reinit, ep%d csr %04x\n",
856*4882a593Smuzhiyun 						hw_ep->epnum, csr);
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 			/* scrub any stale state, leaving toggle alone */
859*4882a593Smuzhiyun 			csr &= MUSB_RXCSR_DISNYET;
860*4882a593Smuzhiyun 		}
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 		/* kick things off */
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 		if ((is_cppi_enabled(musb) || tusb_dma_omap(musb)) && dma_channel) {
865*4882a593Smuzhiyun 			/* Candidate for DMA */
866*4882a593Smuzhiyun 			dma_channel->actual_len = 0L;
867*4882a593Smuzhiyun 			qh->segsize = len;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 			/* AUTOREQ is in a DMA register */
870*4882a593Smuzhiyun 			musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
871*4882a593Smuzhiyun 			csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 			/*
874*4882a593Smuzhiyun 			 * Unless caller treats short RX transfers as
875*4882a593Smuzhiyun 			 * errors, we dare not queue multiple transfers.
876*4882a593Smuzhiyun 			 */
877*4882a593Smuzhiyun 			dma_ok = dma_controller->channel_program(dma_channel,
878*4882a593Smuzhiyun 					packet_sz, !(urb->transfer_flags &
879*4882a593Smuzhiyun 						     URB_SHORT_NOT_OK),
880*4882a593Smuzhiyun 					urb->transfer_dma + offset,
881*4882a593Smuzhiyun 					qh->segsize);
882*4882a593Smuzhiyun 			if (!dma_ok) {
883*4882a593Smuzhiyun 				dma_controller->channel_release(dma_channel);
884*4882a593Smuzhiyun 				hw_ep->rx_channel = dma_channel = NULL;
885*4882a593Smuzhiyun 			} else
886*4882a593Smuzhiyun 				csr |= MUSB_RXCSR_DMAENAB;
887*4882a593Smuzhiyun 		}
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 		csr |= MUSB_RXCSR_H_REQPKT;
890*4882a593Smuzhiyun 		musb_dbg(musb, "RXCSR%d := %04x", epnum, csr);
891*4882a593Smuzhiyun 		musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
892*4882a593Smuzhiyun 		csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
893*4882a593Smuzhiyun 	}
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun /* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
897*4882a593Smuzhiyun  * the end; avoids starvation for other endpoints.
898*4882a593Smuzhiyun  */
musb_bulk_nak_timeout(struct musb * musb,struct musb_hw_ep * ep,int is_in)899*4882a593Smuzhiyun static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
900*4882a593Smuzhiyun 	int is_in)
901*4882a593Smuzhiyun {
902*4882a593Smuzhiyun 	struct dma_channel	*dma;
903*4882a593Smuzhiyun 	struct urb		*urb;
904*4882a593Smuzhiyun 	void __iomem		*mbase = musb->mregs;
905*4882a593Smuzhiyun 	void __iomem		*epio = ep->regs;
906*4882a593Smuzhiyun 	struct musb_qh		*cur_qh, *next_qh;
907*4882a593Smuzhiyun 	u16			rx_csr, tx_csr;
908*4882a593Smuzhiyun 	u16			toggle;
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	musb_ep_select(mbase, ep->epnum);
911*4882a593Smuzhiyun 	if (is_in) {
912*4882a593Smuzhiyun 		dma = is_dma_capable() ? ep->rx_channel : NULL;
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 		/*
915*4882a593Smuzhiyun 		 * Need to stop the transaction by clearing REQPKT first
916*4882a593Smuzhiyun 		 * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
917*4882a593Smuzhiyun 		 * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
918*4882a593Smuzhiyun 		 */
919*4882a593Smuzhiyun 		rx_csr = musb_readw(epio, MUSB_RXCSR);
920*4882a593Smuzhiyun 		rx_csr |= MUSB_RXCSR_H_WZC_BITS;
921*4882a593Smuzhiyun 		rx_csr &= ~MUSB_RXCSR_H_REQPKT;
922*4882a593Smuzhiyun 		musb_writew(epio, MUSB_RXCSR, rx_csr);
923*4882a593Smuzhiyun 		rx_csr &= ~MUSB_RXCSR_DATAERROR;
924*4882a593Smuzhiyun 		musb_writew(epio, MUSB_RXCSR, rx_csr);
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 		cur_qh = first_qh(&musb->in_bulk);
927*4882a593Smuzhiyun 	} else {
928*4882a593Smuzhiyun 		dma = is_dma_capable() ? ep->tx_channel : NULL;
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 		/* clear nak timeout bit */
931*4882a593Smuzhiyun 		tx_csr = musb_readw(epio, MUSB_TXCSR);
932*4882a593Smuzhiyun 		tx_csr |= MUSB_TXCSR_H_WZC_BITS;
933*4882a593Smuzhiyun 		tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
934*4882a593Smuzhiyun 		musb_writew(epio, MUSB_TXCSR, tx_csr);
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 		cur_qh = first_qh(&musb->out_bulk);
937*4882a593Smuzhiyun 	}
938*4882a593Smuzhiyun 	if (cur_qh) {
939*4882a593Smuzhiyun 		urb = next_urb(cur_qh);
940*4882a593Smuzhiyun 		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
941*4882a593Smuzhiyun 			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
942*4882a593Smuzhiyun 			musb->dma_controller->channel_abort(dma);
943*4882a593Smuzhiyun 			urb->actual_length += dma->actual_len;
944*4882a593Smuzhiyun 			dma->actual_len = 0L;
945*4882a593Smuzhiyun 		}
946*4882a593Smuzhiyun 		toggle = musb->io.get_toggle(cur_qh, !is_in);
947*4882a593Smuzhiyun 		usb_settoggle(urb->dev, cur_qh->epnum, !is_in, toggle ? 1 : 0);
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 		if (is_in) {
950*4882a593Smuzhiyun 			/* move cur_qh to end of queue */
951*4882a593Smuzhiyun 			list_move_tail(&cur_qh->ring, &musb->in_bulk);
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 			/* get the next qh from musb->in_bulk */
954*4882a593Smuzhiyun 			next_qh = first_qh(&musb->in_bulk);
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun 			/* set rx_reinit and schedule the next qh */
957*4882a593Smuzhiyun 			ep->rx_reinit = 1;
958*4882a593Smuzhiyun 		} else {
959*4882a593Smuzhiyun 			/* move cur_qh to end of queue */
960*4882a593Smuzhiyun 			list_move_tail(&cur_qh->ring, &musb->out_bulk);
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 			/* get the next qh from musb->out_bulk */
963*4882a593Smuzhiyun 			next_qh = first_qh(&musb->out_bulk);
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 			/* set tx_reinit and schedule the next qh */
966*4882a593Smuzhiyun 			ep->tx_reinit = 1;
967*4882a593Smuzhiyun 		}
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 		if (next_qh)
970*4882a593Smuzhiyun 			musb_start_urb(musb, is_in, next_qh);
971*4882a593Smuzhiyun 	}
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun /*
975*4882a593Smuzhiyun  * Service the default endpoint (ep0) as host.
976*4882a593Smuzhiyun  * Return true until it's time to start the status stage.
977*4882a593Smuzhiyun  */
musb_h_ep0_continue(struct musb * musb,u16 len,struct urb * urb)978*4882a593Smuzhiyun static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
979*4882a593Smuzhiyun {
980*4882a593Smuzhiyun 	bool			 more = false;
981*4882a593Smuzhiyun 	u8			*fifo_dest = NULL;
982*4882a593Smuzhiyun 	u16			fifo_count = 0;
983*4882a593Smuzhiyun 	struct musb_hw_ep	*hw_ep = musb->control_ep;
984*4882a593Smuzhiyun 	struct musb_qh		*qh = hw_ep->in_qh;
985*4882a593Smuzhiyun 	struct usb_ctrlrequest	*request;
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	switch (musb->ep0_stage) {
988*4882a593Smuzhiyun 	case MUSB_EP0_IN:
989*4882a593Smuzhiyun 		fifo_dest = urb->transfer_buffer + urb->actual_length;
990*4882a593Smuzhiyun 		fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
991*4882a593Smuzhiyun 				   urb->actual_length);
992*4882a593Smuzhiyun 		if (fifo_count < len)
993*4882a593Smuzhiyun 			urb->status = -EOVERFLOW;
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 		musb_read_fifo(hw_ep, fifo_count, fifo_dest);
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 		urb->actual_length += fifo_count;
998*4882a593Smuzhiyun 		if (len < qh->maxpacket) {
999*4882a593Smuzhiyun 			/* always terminate on short read; it's
1000*4882a593Smuzhiyun 			 * rarely reported as an error.
1001*4882a593Smuzhiyun 			 */
1002*4882a593Smuzhiyun 		} else if (urb->actual_length <
1003*4882a593Smuzhiyun 				urb->transfer_buffer_length)
1004*4882a593Smuzhiyun 			more = true;
1005*4882a593Smuzhiyun 		break;
1006*4882a593Smuzhiyun 	case MUSB_EP0_START:
1007*4882a593Smuzhiyun 		request = (struct usb_ctrlrequest *) urb->setup_packet;
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 		if (!request->wLength) {
1010*4882a593Smuzhiyun 			musb_dbg(musb, "start no-DATA");
1011*4882a593Smuzhiyun 			break;
1012*4882a593Smuzhiyun 		} else if (request->bRequestType & USB_DIR_IN) {
1013*4882a593Smuzhiyun 			musb_dbg(musb, "start IN-DATA");
1014*4882a593Smuzhiyun 			musb->ep0_stage = MUSB_EP0_IN;
1015*4882a593Smuzhiyun 			more = true;
1016*4882a593Smuzhiyun 			break;
1017*4882a593Smuzhiyun 		} else {
1018*4882a593Smuzhiyun 			musb_dbg(musb, "start OUT-DATA");
1019*4882a593Smuzhiyun 			musb->ep0_stage = MUSB_EP0_OUT;
1020*4882a593Smuzhiyun 			more = true;
1021*4882a593Smuzhiyun 		}
1022*4882a593Smuzhiyun 		fallthrough;
1023*4882a593Smuzhiyun 	case MUSB_EP0_OUT:
1024*4882a593Smuzhiyun 		fifo_count = min_t(size_t, qh->maxpacket,
1025*4882a593Smuzhiyun 				   urb->transfer_buffer_length -
1026*4882a593Smuzhiyun 				   urb->actual_length);
1027*4882a593Smuzhiyun 		if (fifo_count) {
1028*4882a593Smuzhiyun 			fifo_dest = (u8 *) (urb->transfer_buffer
1029*4882a593Smuzhiyun 					+ urb->actual_length);
1030*4882a593Smuzhiyun 			musb_dbg(musb, "Sending %d byte%s to ep0 fifo %p",
1031*4882a593Smuzhiyun 					fifo_count,
1032*4882a593Smuzhiyun 					(fifo_count == 1) ? "" : "s",
1033*4882a593Smuzhiyun 					fifo_dest);
1034*4882a593Smuzhiyun 			musb_write_fifo(hw_ep, fifo_count, fifo_dest);
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 			urb->actual_length += fifo_count;
1037*4882a593Smuzhiyun 			more = true;
1038*4882a593Smuzhiyun 		}
1039*4882a593Smuzhiyun 		break;
1040*4882a593Smuzhiyun 	default:
1041*4882a593Smuzhiyun 		ERR("bogus ep0 stage %d\n", musb->ep0_stage);
1042*4882a593Smuzhiyun 		break;
1043*4882a593Smuzhiyun 	}
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	return more;
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun /*
1049*4882a593Smuzhiyun  * Handle default endpoint interrupt as host. Only called in IRQ time
1050*4882a593Smuzhiyun  * from musb_interrupt().
1051*4882a593Smuzhiyun  *
1052*4882a593Smuzhiyun  * called with controller irqlocked
1053*4882a593Smuzhiyun  */
musb_h_ep0_irq(struct musb * musb)1054*4882a593Smuzhiyun irqreturn_t musb_h_ep0_irq(struct musb *musb)
1055*4882a593Smuzhiyun {
1056*4882a593Smuzhiyun 	struct urb		*urb;
1057*4882a593Smuzhiyun 	u16			csr, len;
1058*4882a593Smuzhiyun 	int			status = 0;
1059*4882a593Smuzhiyun 	void __iomem		*mbase = musb->mregs;
1060*4882a593Smuzhiyun 	struct musb_hw_ep	*hw_ep = musb->control_ep;
1061*4882a593Smuzhiyun 	void __iomem		*epio = hw_ep->regs;
1062*4882a593Smuzhiyun 	struct musb_qh		*qh = hw_ep->in_qh;
1063*4882a593Smuzhiyun 	bool			complete = false;
1064*4882a593Smuzhiyun 	irqreturn_t		retval = IRQ_NONE;
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun 	/* ep0 only has one queue, "in" */
1067*4882a593Smuzhiyun 	urb = next_urb(qh);
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	musb_ep_select(mbase, 0);
1070*4882a593Smuzhiyun 	csr = musb_readw(epio, MUSB_CSR0);
1071*4882a593Smuzhiyun 	len = (csr & MUSB_CSR0_RXPKTRDY)
1072*4882a593Smuzhiyun 			? musb_readb(epio, MUSB_COUNT0)
1073*4882a593Smuzhiyun 			: 0;
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	musb_dbg(musb, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d",
1076*4882a593Smuzhiyun 		csr, qh, len, urb, musb->ep0_stage);
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	/* if we just did status stage, we are done */
1079*4882a593Smuzhiyun 	if (MUSB_EP0_STATUS == musb->ep0_stage) {
1080*4882a593Smuzhiyun 		retval = IRQ_HANDLED;
1081*4882a593Smuzhiyun 		complete = true;
1082*4882a593Smuzhiyun 	}
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	/* prepare status */
1085*4882a593Smuzhiyun 	if (csr & MUSB_CSR0_H_RXSTALL) {
1086*4882a593Smuzhiyun 		musb_dbg(musb, "STALLING ENDPOINT");
1087*4882a593Smuzhiyun 		status = -EPIPE;
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 	} else if (csr & MUSB_CSR0_H_ERROR) {
1090*4882a593Smuzhiyun 		musb_dbg(musb, "no response, csr0 %04x", csr);
1091*4882a593Smuzhiyun 		status = -EPROTO;
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	} else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1094*4882a593Smuzhiyun 		musb_dbg(musb, "control NAK timeout");
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 		/* NOTE:  this code path would be a good place to PAUSE a
1097*4882a593Smuzhiyun 		 * control transfer, if another one is queued, so that
1098*4882a593Smuzhiyun 		 * ep0 is more likely to stay busy.  That's already done
1099*4882a593Smuzhiyun 		 * for bulk RX transfers.
1100*4882a593Smuzhiyun 		 *
1101*4882a593Smuzhiyun 		 * if (qh->ring.next != &musb->control), then
1102*4882a593Smuzhiyun 		 * we have a candidate... NAKing is *NOT* an error
1103*4882a593Smuzhiyun 		 */
1104*4882a593Smuzhiyun 		musb_writew(epio, MUSB_CSR0, 0);
1105*4882a593Smuzhiyun 		retval = IRQ_HANDLED;
1106*4882a593Smuzhiyun 	}
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	if (status) {
1109*4882a593Smuzhiyun 		musb_dbg(musb, "aborting");
1110*4882a593Smuzhiyun 		retval = IRQ_HANDLED;
1111*4882a593Smuzhiyun 		if (urb)
1112*4882a593Smuzhiyun 			urb->status = status;
1113*4882a593Smuzhiyun 		complete = true;
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 		/* use the proper sequence to abort the transfer */
1116*4882a593Smuzhiyun 		if (csr & MUSB_CSR0_H_REQPKT) {
1117*4882a593Smuzhiyun 			csr &= ~MUSB_CSR0_H_REQPKT;
1118*4882a593Smuzhiyun 			musb_writew(epio, MUSB_CSR0, csr);
1119*4882a593Smuzhiyun 			csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1120*4882a593Smuzhiyun 			musb_writew(epio, MUSB_CSR0, csr);
1121*4882a593Smuzhiyun 		} else {
1122*4882a593Smuzhiyun 			musb_h_ep0_flush_fifo(hw_ep);
1123*4882a593Smuzhiyun 		}
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 		musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun 		/* clear it */
1128*4882a593Smuzhiyun 		musb_writew(epio, MUSB_CSR0, 0);
1129*4882a593Smuzhiyun 	}
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	if (unlikely(!urb)) {
1132*4882a593Smuzhiyun 		/* stop endpoint since we have no place for its data, this
1133*4882a593Smuzhiyun 		 * SHOULD NEVER HAPPEN! */
1134*4882a593Smuzhiyun 		ERR("no URB for end 0\n");
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 		musb_h_ep0_flush_fifo(hw_ep);
1137*4882a593Smuzhiyun 		goto done;
1138*4882a593Smuzhiyun 	}
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun 	if (!complete) {
1141*4882a593Smuzhiyun 		/* call common logic and prepare response */
1142*4882a593Smuzhiyun 		if (musb_h_ep0_continue(musb, len, urb)) {
1143*4882a593Smuzhiyun 			/* more packets required */
1144*4882a593Smuzhiyun 			csr = (MUSB_EP0_IN == musb->ep0_stage)
1145*4882a593Smuzhiyun 				?  MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1146*4882a593Smuzhiyun 		} else {
1147*4882a593Smuzhiyun 			/* data transfer complete; perform status phase */
1148*4882a593Smuzhiyun 			if (usb_pipeout(urb->pipe)
1149*4882a593Smuzhiyun 					|| !urb->transfer_buffer_length)
1150*4882a593Smuzhiyun 				csr = MUSB_CSR0_H_STATUSPKT
1151*4882a593Smuzhiyun 					| MUSB_CSR0_H_REQPKT;
1152*4882a593Smuzhiyun 			else
1153*4882a593Smuzhiyun 				csr = MUSB_CSR0_H_STATUSPKT
1154*4882a593Smuzhiyun 					| MUSB_CSR0_TXPKTRDY;
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun 			/* disable ping token in status phase */
1157*4882a593Smuzhiyun 			csr |= MUSB_CSR0_H_DIS_PING;
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 			/* flag status stage */
1160*4882a593Smuzhiyun 			musb->ep0_stage = MUSB_EP0_STATUS;
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 			musb_dbg(musb, "ep0 STATUS, csr %04x", csr);
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 		}
1165*4882a593Smuzhiyun 		musb_writew(epio, MUSB_CSR0, csr);
1166*4882a593Smuzhiyun 		retval = IRQ_HANDLED;
1167*4882a593Smuzhiyun 	} else
1168*4882a593Smuzhiyun 		musb->ep0_stage = MUSB_EP0_IDLE;
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 	/* call completion handler if done */
1171*4882a593Smuzhiyun 	if (complete)
1172*4882a593Smuzhiyun 		musb_advance_schedule(musb, urb, hw_ep, 1);
1173*4882a593Smuzhiyun done:
1174*4882a593Smuzhiyun 	return retval;
1175*4882a593Smuzhiyun }
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun #ifdef CONFIG_USB_INVENTRA_DMA
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun /* Host side TX (OUT) using Mentor DMA works as follows:
1181*4882a593Smuzhiyun 	submit_urb ->
1182*4882a593Smuzhiyun 		- if queue was empty, Program Endpoint
1183*4882a593Smuzhiyun 		- ... which starts DMA to fifo in mode 1 or 0
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	DMA Isr (transfer complete) -> TxAvail()
1186*4882a593Smuzhiyun 		- Stop DMA (~DmaEnab)	(<--- Alert ... currently happens
1187*4882a593Smuzhiyun 					only in musb_cleanup_urb)
1188*4882a593Smuzhiyun 		- TxPktRdy has to be set in mode 0 or for
1189*4882a593Smuzhiyun 			short packets in mode 1.
1190*4882a593Smuzhiyun */
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun #endif
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun /* Service a Tx-Available or dma completion irq for the endpoint */
musb_host_tx(struct musb * musb,u8 epnum)1195*4882a593Smuzhiyun void musb_host_tx(struct musb *musb, u8 epnum)
1196*4882a593Smuzhiyun {
1197*4882a593Smuzhiyun 	int			pipe;
1198*4882a593Smuzhiyun 	bool			done = false;
1199*4882a593Smuzhiyun 	u16			tx_csr;
1200*4882a593Smuzhiyun 	size_t			length = 0;
1201*4882a593Smuzhiyun 	size_t			offset = 0;
1202*4882a593Smuzhiyun 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
1203*4882a593Smuzhiyun 	void __iomem		*epio = hw_ep->regs;
1204*4882a593Smuzhiyun 	struct musb_qh		*qh = hw_ep->out_qh;
1205*4882a593Smuzhiyun 	struct urb		*urb = next_urb(qh);
1206*4882a593Smuzhiyun 	u32			status = 0;
1207*4882a593Smuzhiyun 	void __iomem		*mbase = musb->mregs;
1208*4882a593Smuzhiyun 	struct dma_channel	*dma;
1209*4882a593Smuzhiyun 	bool			transfer_pending = false;
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun 	musb_ep_select(mbase, epnum);
1212*4882a593Smuzhiyun 	tx_csr = musb_readw(epio, MUSB_TXCSR);
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 	/* with CPPI, DMA sometimes triggers "extra" irqs */
1215*4882a593Smuzhiyun 	if (!urb) {
1216*4882a593Smuzhiyun 		musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
1217*4882a593Smuzhiyun 		return;
1218*4882a593Smuzhiyun 	}
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 	pipe = urb->pipe;
1221*4882a593Smuzhiyun 	dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1222*4882a593Smuzhiyun 	trace_musb_urb_tx(musb, urb);
1223*4882a593Smuzhiyun 	musb_dbg(musb, "OUT/TX%d end, csr %04x%s", epnum, tx_csr,
1224*4882a593Smuzhiyun 			dma ? ", dma" : "");
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 	/* check for errors */
1227*4882a593Smuzhiyun 	if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1228*4882a593Smuzhiyun 		/* dma was disabled, fifo flushed */
1229*4882a593Smuzhiyun 		musb_dbg(musb, "TX end %d stall", epnum);
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 		/* stall; record URB status */
1232*4882a593Smuzhiyun 		status = -EPIPE;
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 	} else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1235*4882a593Smuzhiyun 		/* (NON-ISO) dma was disabled, fifo flushed */
1236*4882a593Smuzhiyun 		musb_dbg(musb, "TX 3strikes on ep=%d", epnum);
1237*4882a593Smuzhiyun 
1238*4882a593Smuzhiyun 		status = -ETIMEDOUT;
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun 	} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1241*4882a593Smuzhiyun 		if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
1242*4882a593Smuzhiyun 				&& !list_is_singular(&musb->out_bulk)) {
1243*4882a593Smuzhiyun 			musb_dbg(musb, "NAK timeout on TX%d ep", epnum);
1244*4882a593Smuzhiyun 			musb_bulk_nak_timeout(musb, hw_ep, 0);
1245*4882a593Smuzhiyun 		} else {
1246*4882a593Smuzhiyun 			musb_dbg(musb, "TX ep%d device not responding", epnum);
1247*4882a593Smuzhiyun 			/* NOTE:  this code path would be a good place to PAUSE a
1248*4882a593Smuzhiyun 			 * transfer, if there's some other (nonperiodic) tx urb
1249*4882a593Smuzhiyun 			 * that could use this fifo.  (dma complicates it...)
1250*4882a593Smuzhiyun 			 * That's already done for bulk RX transfers.
1251*4882a593Smuzhiyun 			 *
1252*4882a593Smuzhiyun 			 * if (bulk && qh->ring.next != &musb->out_bulk), then
1253*4882a593Smuzhiyun 			 * we have a candidate... NAKing is *NOT* an error
1254*4882a593Smuzhiyun 			 */
1255*4882a593Smuzhiyun 			musb_ep_select(mbase, epnum);
1256*4882a593Smuzhiyun 			musb_writew(epio, MUSB_TXCSR,
1257*4882a593Smuzhiyun 					MUSB_TXCSR_H_WZC_BITS
1258*4882a593Smuzhiyun 					| MUSB_TXCSR_TXPKTRDY);
1259*4882a593Smuzhiyun 		}
1260*4882a593Smuzhiyun 		return;
1261*4882a593Smuzhiyun 	}
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun done:
1264*4882a593Smuzhiyun 	if (status) {
1265*4882a593Smuzhiyun 		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1266*4882a593Smuzhiyun 			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1267*4882a593Smuzhiyun 			musb->dma_controller->channel_abort(dma);
1268*4882a593Smuzhiyun 		}
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 		/* do the proper sequence to abort the transfer in the
1271*4882a593Smuzhiyun 		 * usb core; the dma engine should already be stopped.
1272*4882a593Smuzhiyun 		 */
1273*4882a593Smuzhiyun 		musb_h_tx_flush_fifo(hw_ep);
1274*4882a593Smuzhiyun 		tx_csr &= ~(MUSB_TXCSR_AUTOSET
1275*4882a593Smuzhiyun 				| MUSB_TXCSR_DMAENAB
1276*4882a593Smuzhiyun 				| MUSB_TXCSR_H_ERROR
1277*4882a593Smuzhiyun 				| MUSB_TXCSR_H_RXSTALL
1278*4882a593Smuzhiyun 				| MUSB_TXCSR_H_NAKTIMEOUT
1279*4882a593Smuzhiyun 				);
1280*4882a593Smuzhiyun 
1281*4882a593Smuzhiyun 		musb_ep_select(mbase, epnum);
1282*4882a593Smuzhiyun 		musb_writew(epio, MUSB_TXCSR, tx_csr);
1283*4882a593Smuzhiyun 		/* REVISIT may need to clear FLUSHFIFO ... */
1284*4882a593Smuzhiyun 		musb_writew(epio, MUSB_TXCSR, tx_csr);
1285*4882a593Smuzhiyun 		musb_writeb(epio, MUSB_TXINTERVAL, 0);
1286*4882a593Smuzhiyun 
1287*4882a593Smuzhiyun 		done = true;
1288*4882a593Smuzhiyun 	}
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun 	/* second cppi case */
1291*4882a593Smuzhiyun 	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1292*4882a593Smuzhiyun 		musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
1293*4882a593Smuzhiyun 		return;
1294*4882a593Smuzhiyun 	}
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun 	if (is_dma_capable() && dma && !status) {
1297*4882a593Smuzhiyun 		/*
1298*4882a593Smuzhiyun 		 * DMA has completed.  But if we're using DMA mode 1 (multi
1299*4882a593Smuzhiyun 		 * packet DMA), we need a terminal TXPKTRDY interrupt before
1300*4882a593Smuzhiyun 		 * we can consider this transfer completed, lest we trash
1301*4882a593Smuzhiyun 		 * its last packet when writing the next URB's data.  So we
1302*4882a593Smuzhiyun 		 * switch back to mode 0 to get that interrupt; we'll come
1303*4882a593Smuzhiyun 		 * back here once it happens.
1304*4882a593Smuzhiyun 		 */
1305*4882a593Smuzhiyun 		if (tx_csr & MUSB_TXCSR_DMAMODE) {
1306*4882a593Smuzhiyun 			/*
1307*4882a593Smuzhiyun 			 * We shouldn't clear DMAMODE with DMAENAB set; so
1308*4882a593Smuzhiyun 			 * clear them in a safe order.  That should be OK
1309*4882a593Smuzhiyun 			 * once TXPKTRDY has been set (and I've never seen
1310*4882a593Smuzhiyun 			 * it being 0 at this moment -- DMA interrupt latency
1311*4882a593Smuzhiyun 			 * is significant) but if it hasn't been then we have
1312*4882a593Smuzhiyun 			 * no choice but to stop being polite and ignore the
1313*4882a593Smuzhiyun 			 * programmer's guide... :-)
1314*4882a593Smuzhiyun 			 *
1315*4882a593Smuzhiyun 			 * Note that we must write TXCSR with TXPKTRDY cleared
1316*4882a593Smuzhiyun 			 * in order not to re-trigger the packet send (this bit
1317*4882a593Smuzhiyun 			 * can't be cleared by CPU), and there's another caveat:
1318*4882a593Smuzhiyun 			 * TXPKTRDY may be set shortly and then cleared in the
1319*4882a593Smuzhiyun 			 * double-buffered FIFO mode, so we do an extra TXCSR
1320*4882a593Smuzhiyun 			 * read for debouncing...
1321*4882a593Smuzhiyun 			 */
1322*4882a593Smuzhiyun 			tx_csr &= musb_readw(epio, MUSB_TXCSR);
1323*4882a593Smuzhiyun 			if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1324*4882a593Smuzhiyun 				tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1325*4882a593Smuzhiyun 					    MUSB_TXCSR_TXPKTRDY);
1326*4882a593Smuzhiyun 				musb_writew(epio, MUSB_TXCSR,
1327*4882a593Smuzhiyun 					    tx_csr | MUSB_TXCSR_H_WZC_BITS);
1328*4882a593Smuzhiyun 			}
1329*4882a593Smuzhiyun 			tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1330*4882a593Smuzhiyun 				    MUSB_TXCSR_TXPKTRDY);
1331*4882a593Smuzhiyun 			musb_writew(epio, MUSB_TXCSR,
1332*4882a593Smuzhiyun 				    tx_csr | MUSB_TXCSR_H_WZC_BITS);
1333*4882a593Smuzhiyun 
1334*4882a593Smuzhiyun 			/*
1335*4882a593Smuzhiyun 			 * There is no guarantee that we'll get an interrupt
1336*4882a593Smuzhiyun 			 * after clearing DMAMODE as we might have done this
1337*4882a593Smuzhiyun 			 * too late (after TXPKTRDY was cleared by controller).
1338*4882a593Smuzhiyun 			 * Re-read TXCSR as we have spoiled its previous value.
1339*4882a593Smuzhiyun 			 */
1340*4882a593Smuzhiyun 			tx_csr = musb_readw(epio, MUSB_TXCSR);
1341*4882a593Smuzhiyun 		}
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 		/*
1344*4882a593Smuzhiyun 		 * We may get here from a DMA completion or TXPKTRDY interrupt.
1345*4882a593Smuzhiyun 		 * In any case, we must check the FIFO status here and bail out
1346*4882a593Smuzhiyun 		 * only if the FIFO still has data -- that should prevent the
1347*4882a593Smuzhiyun 		 * "missed" TXPKTRDY interrupts and deal with double-buffered
1348*4882a593Smuzhiyun 		 * FIFO mode too...
1349*4882a593Smuzhiyun 		 */
1350*4882a593Smuzhiyun 		if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1351*4882a593Smuzhiyun 			musb_dbg(musb,
1352*4882a593Smuzhiyun 				"DMA complete but FIFO not empty, CSR %04x",
1353*4882a593Smuzhiyun 				tx_csr);
1354*4882a593Smuzhiyun 			return;
1355*4882a593Smuzhiyun 		}
1356*4882a593Smuzhiyun 	}
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 	if (!status || dma || usb_pipeisoc(pipe)) {
1359*4882a593Smuzhiyun 		if (dma)
1360*4882a593Smuzhiyun 			length = dma->actual_len;
1361*4882a593Smuzhiyun 		else
1362*4882a593Smuzhiyun 			length = qh->segsize;
1363*4882a593Smuzhiyun 		qh->offset += length;
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 		if (usb_pipeisoc(pipe)) {
1366*4882a593Smuzhiyun 			struct usb_iso_packet_descriptor	*d;
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 			d = urb->iso_frame_desc + qh->iso_idx;
1369*4882a593Smuzhiyun 			d->actual_length = length;
1370*4882a593Smuzhiyun 			d->status = status;
1371*4882a593Smuzhiyun 			if (++qh->iso_idx >= urb->number_of_packets) {
1372*4882a593Smuzhiyun 				done = true;
1373*4882a593Smuzhiyun 			} else {
1374*4882a593Smuzhiyun 				d++;
1375*4882a593Smuzhiyun 				offset = d->offset;
1376*4882a593Smuzhiyun 				length = d->length;
1377*4882a593Smuzhiyun 			}
1378*4882a593Smuzhiyun 		} else if (dma && urb->transfer_buffer_length == qh->offset) {
1379*4882a593Smuzhiyun 			done = true;
1380*4882a593Smuzhiyun 		} else {
1381*4882a593Smuzhiyun 			/* see if we need to send more data, or ZLP */
1382*4882a593Smuzhiyun 			if (qh->segsize < qh->maxpacket)
1383*4882a593Smuzhiyun 				done = true;
1384*4882a593Smuzhiyun 			else if (qh->offset == urb->transfer_buffer_length
1385*4882a593Smuzhiyun 					&& !(urb->transfer_flags
1386*4882a593Smuzhiyun 						& URB_ZERO_PACKET))
1387*4882a593Smuzhiyun 				done = true;
1388*4882a593Smuzhiyun 			if (!done) {
1389*4882a593Smuzhiyun 				offset = qh->offset;
1390*4882a593Smuzhiyun 				length = urb->transfer_buffer_length - offset;
1391*4882a593Smuzhiyun 				transfer_pending = true;
1392*4882a593Smuzhiyun 			}
1393*4882a593Smuzhiyun 		}
1394*4882a593Smuzhiyun 	}
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun 	/* urb->status != -EINPROGRESS means request has been faulted,
1397*4882a593Smuzhiyun 	 * so we must abort this transfer after cleanup
1398*4882a593Smuzhiyun 	 */
1399*4882a593Smuzhiyun 	if (urb->status != -EINPROGRESS) {
1400*4882a593Smuzhiyun 		done = true;
1401*4882a593Smuzhiyun 		if (status == 0)
1402*4882a593Smuzhiyun 			status = urb->status;
1403*4882a593Smuzhiyun 	}
1404*4882a593Smuzhiyun 
1405*4882a593Smuzhiyun 	if (done) {
1406*4882a593Smuzhiyun 		/* set status */
1407*4882a593Smuzhiyun 		urb->status = status;
1408*4882a593Smuzhiyun 		urb->actual_length = qh->offset;
1409*4882a593Smuzhiyun 		musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1410*4882a593Smuzhiyun 		return;
1411*4882a593Smuzhiyun 	} else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1412*4882a593Smuzhiyun 		if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1413*4882a593Smuzhiyun 				offset, length)) {
1414*4882a593Smuzhiyun 			if (is_cppi_enabled(musb) || tusb_dma_omap(musb))
1415*4882a593Smuzhiyun 				musb_h_tx_dma_start(hw_ep);
1416*4882a593Smuzhiyun 			return;
1417*4882a593Smuzhiyun 		}
1418*4882a593Smuzhiyun 	} else	if (tx_csr & MUSB_TXCSR_DMAENAB) {
1419*4882a593Smuzhiyun 		musb_dbg(musb, "not complete, but DMA enabled?");
1420*4882a593Smuzhiyun 		return;
1421*4882a593Smuzhiyun 	}
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun 	/*
1424*4882a593Smuzhiyun 	 * PIO: start next packet in this URB.
1425*4882a593Smuzhiyun 	 *
1426*4882a593Smuzhiyun 	 * REVISIT: some docs say that when hw_ep->tx_double_buffered,
1427*4882a593Smuzhiyun 	 * (and presumably, FIFO is not half-full) we should write *two*
1428*4882a593Smuzhiyun 	 * packets before updating TXCSR; other docs disagree...
1429*4882a593Smuzhiyun 	 */
1430*4882a593Smuzhiyun 	if (length > qh->maxpacket)
1431*4882a593Smuzhiyun 		length = qh->maxpacket;
1432*4882a593Smuzhiyun 	/* Unmap the buffer so that CPU can use it */
1433*4882a593Smuzhiyun 	usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun 	/*
1436*4882a593Smuzhiyun 	 * We need to map sg if the transfer_buffer is
1437*4882a593Smuzhiyun 	 * NULL.
1438*4882a593Smuzhiyun 	 */
1439*4882a593Smuzhiyun 	if (!urb->transfer_buffer) {
1440*4882a593Smuzhiyun 		/* sg_miter_start is already done in musb_ep_program */
1441*4882a593Smuzhiyun 		if (!sg_miter_next(&qh->sg_miter)) {
1442*4882a593Smuzhiyun 			dev_err(musb->controller, "error: sg list empty\n");
1443*4882a593Smuzhiyun 			sg_miter_stop(&qh->sg_miter);
1444*4882a593Smuzhiyun 			status = -EINVAL;
1445*4882a593Smuzhiyun 			goto done;
1446*4882a593Smuzhiyun 		}
1447*4882a593Smuzhiyun 		length = min_t(u32, length, qh->sg_miter.length);
1448*4882a593Smuzhiyun 		musb_write_fifo(hw_ep, length, qh->sg_miter.addr);
1449*4882a593Smuzhiyun 		qh->sg_miter.consumed = length;
1450*4882a593Smuzhiyun 		sg_miter_stop(&qh->sg_miter);
1451*4882a593Smuzhiyun 	} else {
1452*4882a593Smuzhiyun 		musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1453*4882a593Smuzhiyun 	}
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 	qh->segsize = length;
1456*4882a593Smuzhiyun 
1457*4882a593Smuzhiyun 	musb_ep_select(mbase, epnum);
1458*4882a593Smuzhiyun 	musb_writew(epio, MUSB_TXCSR,
1459*4882a593Smuzhiyun 			MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1460*4882a593Smuzhiyun }
1461*4882a593Smuzhiyun 
1462*4882a593Smuzhiyun #ifdef CONFIG_USB_TI_CPPI41_DMA
1463*4882a593Smuzhiyun /* Seems to set up ISO for cppi41 and not advance len. See commit c57c41d */
musb_rx_dma_iso_cppi41(struct dma_controller * dma,struct musb_hw_ep * hw_ep,struct musb_qh * qh,struct urb * urb,size_t len)1464*4882a593Smuzhiyun static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1465*4882a593Smuzhiyun 				  struct musb_hw_ep *hw_ep,
1466*4882a593Smuzhiyun 				  struct musb_qh *qh,
1467*4882a593Smuzhiyun 				  struct urb *urb,
1468*4882a593Smuzhiyun 				  size_t len)
1469*4882a593Smuzhiyun {
1470*4882a593Smuzhiyun 	struct dma_channel *channel = hw_ep->rx_channel;
1471*4882a593Smuzhiyun 	void __iomem *epio = hw_ep->regs;
1472*4882a593Smuzhiyun 	dma_addr_t *buf;
1473*4882a593Smuzhiyun 	u32 length;
1474*4882a593Smuzhiyun 	u16 val;
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 	buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset +
1477*4882a593Smuzhiyun 		(u32)urb->transfer_dma;
1478*4882a593Smuzhiyun 
1479*4882a593Smuzhiyun 	length = urb->iso_frame_desc[qh->iso_idx].length;
1480*4882a593Smuzhiyun 
1481*4882a593Smuzhiyun 	val = musb_readw(epio, MUSB_RXCSR);
1482*4882a593Smuzhiyun 	val |= MUSB_RXCSR_DMAENAB;
1483*4882a593Smuzhiyun 	musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1484*4882a593Smuzhiyun 
1485*4882a593Smuzhiyun 	return dma->channel_program(channel, qh->maxpacket, 0,
1486*4882a593Smuzhiyun 				   (u32)buf, length);
1487*4882a593Smuzhiyun }
1488*4882a593Smuzhiyun #else
musb_rx_dma_iso_cppi41(struct dma_controller * dma,struct musb_hw_ep * hw_ep,struct musb_qh * qh,struct urb * urb,size_t len)1489*4882a593Smuzhiyun static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
1490*4882a593Smuzhiyun 					 struct musb_hw_ep *hw_ep,
1491*4882a593Smuzhiyun 					 struct musb_qh *qh,
1492*4882a593Smuzhiyun 					 struct urb *urb,
1493*4882a593Smuzhiyun 					 size_t len)
1494*4882a593Smuzhiyun {
1495*4882a593Smuzhiyun 	return false;
1496*4882a593Smuzhiyun }
1497*4882a593Smuzhiyun #endif
1498*4882a593Smuzhiyun 
1499*4882a593Smuzhiyun #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
1500*4882a593Smuzhiyun 	defined(CONFIG_USB_TI_CPPI41_DMA)
1501*4882a593Smuzhiyun /* Host side RX (IN) using Mentor DMA works as follows:
1502*4882a593Smuzhiyun 	submit_urb ->
1503*4882a593Smuzhiyun 		- if queue was empty, ProgramEndpoint
1504*4882a593Smuzhiyun 		- first IN token is sent out (by setting ReqPkt)
1505*4882a593Smuzhiyun 	LinuxIsr -> RxReady()
1506*4882a593Smuzhiyun 	/\	=> first packet is received
1507*4882a593Smuzhiyun 	|	- Set in mode 0 (DmaEnab, ~ReqPkt)
1508*4882a593Smuzhiyun 	|		-> DMA Isr (transfer complete) -> RxReady()
1509*4882a593Smuzhiyun 	|		    - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1510*4882a593Smuzhiyun 	|		    - if urb not complete, send next IN token (ReqPkt)
1511*4882a593Smuzhiyun 	|			   |		else complete urb.
1512*4882a593Smuzhiyun 	|			   |
1513*4882a593Smuzhiyun 	---------------------------
1514*4882a593Smuzhiyun  *
1515*4882a593Smuzhiyun  * Nuances of mode 1:
1516*4882a593Smuzhiyun  *	For short packets, no ack (+RxPktRdy) is sent automatically
1517*4882a593Smuzhiyun  *	(even if AutoClear is ON)
1518*4882a593Smuzhiyun  *	For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1519*4882a593Smuzhiyun  *	automatically => major problem, as collecting the next packet becomes
1520*4882a593Smuzhiyun  *	difficult. Hence mode 1 is not used.
1521*4882a593Smuzhiyun  *
1522*4882a593Smuzhiyun  * REVISIT
1523*4882a593Smuzhiyun  *	All we care about at this driver level is that
1524*4882a593Smuzhiyun  *       (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1525*4882a593Smuzhiyun  *       (b) termination conditions are: short RX, or buffer full;
1526*4882a593Smuzhiyun  *       (c) fault modes include
1527*4882a593Smuzhiyun  *           - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1528*4882a593Smuzhiyun  *             (and that endpoint's dma queue stops immediately)
1529*4882a593Smuzhiyun  *           - overflow (full, PLUS more bytes in the terminal packet)
1530*4882a593Smuzhiyun  *
1531*4882a593Smuzhiyun  *	So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1532*4882a593Smuzhiyun  *	thus be a great candidate for using mode 1 ... for all but the
1533*4882a593Smuzhiyun  *	last packet of one URB's transfer.
1534*4882a593Smuzhiyun  */
musb_rx_dma_inventra_cppi41(struct dma_controller * dma,struct musb_hw_ep * hw_ep,struct musb_qh * qh,struct urb * urb,size_t len)1535*4882a593Smuzhiyun static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1536*4882a593Smuzhiyun 				       struct musb_hw_ep *hw_ep,
1537*4882a593Smuzhiyun 				       struct musb_qh *qh,
1538*4882a593Smuzhiyun 				       struct urb *urb,
1539*4882a593Smuzhiyun 				       size_t len)
1540*4882a593Smuzhiyun {
1541*4882a593Smuzhiyun 	struct dma_channel *channel = hw_ep->rx_channel;
1542*4882a593Smuzhiyun 	void __iomem *epio = hw_ep->regs;
1543*4882a593Smuzhiyun 	u16 val;
1544*4882a593Smuzhiyun 	int pipe;
1545*4882a593Smuzhiyun 	bool done;
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun 	pipe = urb->pipe;
1548*4882a593Smuzhiyun 
1549*4882a593Smuzhiyun 	if (usb_pipeisoc(pipe)) {
1550*4882a593Smuzhiyun 		struct usb_iso_packet_descriptor *d;
1551*4882a593Smuzhiyun 
1552*4882a593Smuzhiyun 		d = urb->iso_frame_desc + qh->iso_idx;
1553*4882a593Smuzhiyun 		d->actual_length = len;
1554*4882a593Smuzhiyun 
1555*4882a593Smuzhiyun 		/* even if there was an error, we did the dma
1556*4882a593Smuzhiyun 		 * for iso_frame_desc->length
1557*4882a593Smuzhiyun 		 */
1558*4882a593Smuzhiyun 		if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1559*4882a593Smuzhiyun 			d->status = 0;
1560*4882a593Smuzhiyun 
1561*4882a593Smuzhiyun 		if (++qh->iso_idx >= urb->number_of_packets) {
1562*4882a593Smuzhiyun 			done = true;
1563*4882a593Smuzhiyun 		} else {
1564*4882a593Smuzhiyun 			/* REVISIT: Why ignore return value here? */
1565*4882a593Smuzhiyun 			if (musb_dma_cppi41(hw_ep->musb))
1566*4882a593Smuzhiyun 				done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh,
1567*4882a593Smuzhiyun 							      urb, len);
1568*4882a593Smuzhiyun 			done = false;
1569*4882a593Smuzhiyun 		}
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun 	} else  {
1572*4882a593Smuzhiyun 		/* done if urb buffer is full or short packet is recd */
1573*4882a593Smuzhiyun 		done = (urb->actual_length + len >=
1574*4882a593Smuzhiyun 			urb->transfer_buffer_length
1575*4882a593Smuzhiyun 			|| channel->actual_len < qh->maxpacket
1576*4882a593Smuzhiyun 			|| channel->rx_packet_done);
1577*4882a593Smuzhiyun 	}
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun 	/* send IN token for next packet, without AUTOREQ */
1580*4882a593Smuzhiyun 	if (!done) {
1581*4882a593Smuzhiyun 		val = musb_readw(epio, MUSB_RXCSR);
1582*4882a593Smuzhiyun 		val |= MUSB_RXCSR_H_REQPKT;
1583*4882a593Smuzhiyun 		musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1584*4882a593Smuzhiyun 	}
1585*4882a593Smuzhiyun 
1586*4882a593Smuzhiyun 	return done;
1587*4882a593Smuzhiyun }
1588*4882a593Smuzhiyun 
1589*4882a593Smuzhiyun /* Disadvantage of using mode 1:
1590*4882a593Smuzhiyun  *	It's basically usable only for mass storage class; essentially all
1591*4882a593Smuzhiyun  *	other protocols also terminate transfers on short packets.
1592*4882a593Smuzhiyun  *
1593*4882a593Smuzhiyun  * Details:
1594*4882a593Smuzhiyun  *	An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1595*4882a593Smuzhiyun  *	If you try to use mode 1 for (transfer_buffer_length - 512), and try
1596*4882a593Smuzhiyun  *	to use the extra IN token to grab the last packet using mode 0, then
1597*4882a593Smuzhiyun  *	the problem is that you cannot be sure when the device will send the
1598*4882a593Smuzhiyun  *	last packet and RxPktRdy set. Sometimes the packet is recd too soon
1599*4882a593Smuzhiyun  *	such that it gets lost when RxCSR is re-set at the end of the mode 1
1600*4882a593Smuzhiyun  *	transfer, while sometimes it is recd just a little late so that if you
1601*4882a593Smuzhiyun  *	try to configure for mode 0 soon after the mode 1 transfer is
1602*4882a593Smuzhiyun  *	completed, you will find rxcount 0. Okay, so you might think why not
1603*4882a593Smuzhiyun  *	wait for an interrupt when the pkt is recd. Well, you won't get any!
1604*4882a593Smuzhiyun  */
musb_rx_dma_in_inventra_cppi41(struct dma_controller * dma,struct musb_hw_ep * hw_ep,struct musb_qh * qh,struct urb * urb,size_t len,u8 iso_err)1605*4882a593Smuzhiyun static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1606*4882a593Smuzhiyun 					  struct musb_hw_ep *hw_ep,
1607*4882a593Smuzhiyun 					  struct musb_qh *qh,
1608*4882a593Smuzhiyun 					  struct urb *urb,
1609*4882a593Smuzhiyun 					  size_t len,
1610*4882a593Smuzhiyun 					  u8 iso_err)
1611*4882a593Smuzhiyun {
1612*4882a593Smuzhiyun 	struct musb *musb = hw_ep->musb;
1613*4882a593Smuzhiyun 	void __iomem *epio = hw_ep->regs;
1614*4882a593Smuzhiyun 	struct dma_channel *channel = hw_ep->rx_channel;
1615*4882a593Smuzhiyun 	u16 rx_count, val;
1616*4882a593Smuzhiyun 	int length, pipe, done;
1617*4882a593Smuzhiyun 	dma_addr_t buf;
1618*4882a593Smuzhiyun 
1619*4882a593Smuzhiyun 	rx_count = musb_readw(epio, MUSB_RXCOUNT);
1620*4882a593Smuzhiyun 	pipe = urb->pipe;
1621*4882a593Smuzhiyun 
1622*4882a593Smuzhiyun 	if (usb_pipeisoc(pipe)) {
1623*4882a593Smuzhiyun 		int d_status = 0;
1624*4882a593Smuzhiyun 		struct usb_iso_packet_descriptor *d;
1625*4882a593Smuzhiyun 
1626*4882a593Smuzhiyun 		d = urb->iso_frame_desc + qh->iso_idx;
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun 		if (iso_err) {
1629*4882a593Smuzhiyun 			d_status = -EILSEQ;
1630*4882a593Smuzhiyun 			urb->error_count++;
1631*4882a593Smuzhiyun 		}
1632*4882a593Smuzhiyun 		if (rx_count > d->length) {
1633*4882a593Smuzhiyun 			if (d_status == 0) {
1634*4882a593Smuzhiyun 				d_status = -EOVERFLOW;
1635*4882a593Smuzhiyun 				urb->error_count++;
1636*4882a593Smuzhiyun 			}
1637*4882a593Smuzhiyun 			musb_dbg(musb, "** OVERFLOW %d into %d",
1638*4882a593Smuzhiyun 				rx_count, d->length);
1639*4882a593Smuzhiyun 
1640*4882a593Smuzhiyun 			length = d->length;
1641*4882a593Smuzhiyun 		} else
1642*4882a593Smuzhiyun 			length = rx_count;
1643*4882a593Smuzhiyun 		d->status = d_status;
1644*4882a593Smuzhiyun 		buf = urb->transfer_dma + d->offset;
1645*4882a593Smuzhiyun 	} else {
1646*4882a593Smuzhiyun 		length = rx_count;
1647*4882a593Smuzhiyun 		buf = urb->transfer_dma + urb->actual_length;
1648*4882a593Smuzhiyun 	}
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun 	channel->desired_mode = 0;
1651*4882a593Smuzhiyun #ifdef USE_MODE1
1652*4882a593Smuzhiyun 	/* because of the issue below, mode 1 will
1653*4882a593Smuzhiyun 	 * only rarely behave with correct semantics.
1654*4882a593Smuzhiyun 	 */
1655*4882a593Smuzhiyun 	if ((urb->transfer_flags & URB_SHORT_NOT_OK)
1656*4882a593Smuzhiyun 	    && (urb->transfer_buffer_length - urb->actual_length)
1657*4882a593Smuzhiyun 	    > qh->maxpacket)
1658*4882a593Smuzhiyun 		channel->desired_mode = 1;
1659*4882a593Smuzhiyun 	if (rx_count < hw_ep->max_packet_sz_rx) {
1660*4882a593Smuzhiyun 		length = rx_count;
1661*4882a593Smuzhiyun 		channel->desired_mode = 0;
1662*4882a593Smuzhiyun 	} else {
1663*4882a593Smuzhiyun 		length = urb->transfer_buffer_length;
1664*4882a593Smuzhiyun 	}
1665*4882a593Smuzhiyun #endif
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun 	/* See comments above on disadvantages of using mode 1 */
1668*4882a593Smuzhiyun 	val = musb_readw(epio, MUSB_RXCSR);
1669*4882a593Smuzhiyun 	val &= ~MUSB_RXCSR_H_REQPKT;
1670*4882a593Smuzhiyun 
1671*4882a593Smuzhiyun 	if (channel->desired_mode == 0)
1672*4882a593Smuzhiyun 		val &= ~MUSB_RXCSR_H_AUTOREQ;
1673*4882a593Smuzhiyun 	else
1674*4882a593Smuzhiyun 		val |= MUSB_RXCSR_H_AUTOREQ;
1675*4882a593Smuzhiyun 	val |= MUSB_RXCSR_DMAENAB;
1676*4882a593Smuzhiyun 
1677*4882a593Smuzhiyun 	/* autoclear shouldn't be set in high bandwidth */
1678*4882a593Smuzhiyun 	if (qh->hb_mult == 1)
1679*4882a593Smuzhiyun 		val |= MUSB_RXCSR_AUTOCLEAR;
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun 	musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
1682*4882a593Smuzhiyun 
1683*4882a593Smuzhiyun 	/* REVISIT if when actual_length != 0,
1684*4882a593Smuzhiyun 	 * transfer_buffer_length needs to be
1685*4882a593Smuzhiyun 	 * adjusted first...
1686*4882a593Smuzhiyun 	 */
1687*4882a593Smuzhiyun 	done = dma->channel_program(channel, qh->maxpacket,
1688*4882a593Smuzhiyun 				   channel->desired_mode,
1689*4882a593Smuzhiyun 				   buf, length);
1690*4882a593Smuzhiyun 
1691*4882a593Smuzhiyun 	if (!done) {
1692*4882a593Smuzhiyun 		dma->channel_release(channel);
1693*4882a593Smuzhiyun 		hw_ep->rx_channel = NULL;
1694*4882a593Smuzhiyun 		channel = NULL;
1695*4882a593Smuzhiyun 		val = musb_readw(epio, MUSB_RXCSR);
1696*4882a593Smuzhiyun 		val &= ~(MUSB_RXCSR_DMAENAB
1697*4882a593Smuzhiyun 			 | MUSB_RXCSR_H_AUTOREQ
1698*4882a593Smuzhiyun 			 | MUSB_RXCSR_AUTOCLEAR);
1699*4882a593Smuzhiyun 		musb_writew(epio, MUSB_RXCSR, val);
1700*4882a593Smuzhiyun 	}
1701*4882a593Smuzhiyun 
1702*4882a593Smuzhiyun 	return done;
1703*4882a593Smuzhiyun }
1704*4882a593Smuzhiyun #else
musb_rx_dma_inventra_cppi41(struct dma_controller * dma,struct musb_hw_ep * hw_ep,struct musb_qh * qh,struct urb * urb,size_t len)1705*4882a593Smuzhiyun static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
1706*4882a593Smuzhiyun 					      struct musb_hw_ep *hw_ep,
1707*4882a593Smuzhiyun 					      struct musb_qh *qh,
1708*4882a593Smuzhiyun 					      struct urb *urb,
1709*4882a593Smuzhiyun 					      size_t len)
1710*4882a593Smuzhiyun {
1711*4882a593Smuzhiyun 	return false;
1712*4882a593Smuzhiyun }
1713*4882a593Smuzhiyun 
musb_rx_dma_in_inventra_cppi41(struct dma_controller * dma,struct musb_hw_ep * hw_ep,struct musb_qh * qh,struct urb * urb,size_t len,u8 iso_err)1714*4882a593Smuzhiyun static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
1715*4882a593Smuzhiyun 						 struct musb_hw_ep *hw_ep,
1716*4882a593Smuzhiyun 						 struct musb_qh *qh,
1717*4882a593Smuzhiyun 						 struct urb *urb,
1718*4882a593Smuzhiyun 						 size_t len,
1719*4882a593Smuzhiyun 						 u8 iso_err)
1720*4882a593Smuzhiyun {
1721*4882a593Smuzhiyun 	return false;
1722*4882a593Smuzhiyun }
1723*4882a593Smuzhiyun #endif
1724*4882a593Smuzhiyun 
1725*4882a593Smuzhiyun /*
1726*4882a593Smuzhiyun  * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1727*4882a593Smuzhiyun  * and high-bandwidth IN transfer cases.
1728*4882a593Smuzhiyun  */
musb_host_rx(struct musb * musb,u8 epnum)1729*4882a593Smuzhiyun void musb_host_rx(struct musb *musb, u8 epnum)
1730*4882a593Smuzhiyun {
1731*4882a593Smuzhiyun 	struct urb		*urb;
1732*4882a593Smuzhiyun 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
1733*4882a593Smuzhiyun 	struct dma_controller	*c = musb->dma_controller;
1734*4882a593Smuzhiyun 	void __iomem		*epio = hw_ep->regs;
1735*4882a593Smuzhiyun 	struct musb_qh		*qh = hw_ep->in_qh;
1736*4882a593Smuzhiyun 	size_t			xfer_len;
1737*4882a593Smuzhiyun 	void __iomem		*mbase = musb->mregs;
1738*4882a593Smuzhiyun 	u16			rx_csr, val;
1739*4882a593Smuzhiyun 	bool			iso_err = false;
1740*4882a593Smuzhiyun 	bool			done = false;
1741*4882a593Smuzhiyun 	u32			status;
1742*4882a593Smuzhiyun 	struct dma_channel	*dma;
1743*4882a593Smuzhiyun 	unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
1744*4882a593Smuzhiyun 
1745*4882a593Smuzhiyun 	musb_ep_select(mbase, epnum);
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun 	urb = next_urb(qh);
1748*4882a593Smuzhiyun 	dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1749*4882a593Smuzhiyun 	status = 0;
1750*4882a593Smuzhiyun 	xfer_len = 0;
1751*4882a593Smuzhiyun 
1752*4882a593Smuzhiyun 	rx_csr = musb_readw(epio, MUSB_RXCSR);
1753*4882a593Smuzhiyun 	val = rx_csr;
1754*4882a593Smuzhiyun 
1755*4882a593Smuzhiyun 	if (unlikely(!urb)) {
1756*4882a593Smuzhiyun 		/* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1757*4882a593Smuzhiyun 		 * usbtest #11 (unlinks) triggers it regularly, sometimes
1758*4882a593Smuzhiyun 		 * with fifo full.  (Only with DMA??)
1759*4882a593Smuzhiyun 		 */
1760*4882a593Smuzhiyun 		musb_dbg(musb, "BOGUS RX%d ready, csr %04x, count %d",
1761*4882a593Smuzhiyun 			epnum, val, musb_readw(epio, MUSB_RXCOUNT));
1762*4882a593Smuzhiyun 		musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1763*4882a593Smuzhiyun 		return;
1764*4882a593Smuzhiyun 	}
1765*4882a593Smuzhiyun 
1766*4882a593Smuzhiyun 	trace_musb_urb_rx(musb, urb);
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun 	/* check for errors, concurrent stall & unlink is not really
1769*4882a593Smuzhiyun 	 * handled yet! */
1770*4882a593Smuzhiyun 	if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1771*4882a593Smuzhiyun 		musb_dbg(musb, "RX end %d STALL", epnum);
1772*4882a593Smuzhiyun 
1773*4882a593Smuzhiyun 		/* stall; record URB status */
1774*4882a593Smuzhiyun 		status = -EPIPE;
1775*4882a593Smuzhiyun 
1776*4882a593Smuzhiyun 	} else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1777*4882a593Smuzhiyun 		dev_err(musb->controller, "ep%d RX three-strikes error", epnum);
1778*4882a593Smuzhiyun 
1779*4882a593Smuzhiyun 		/*
1780*4882a593Smuzhiyun 		 * The three-strikes error could only happen when the USB
1781*4882a593Smuzhiyun 		 * device is not accessible, for example detached or powered
1782*4882a593Smuzhiyun 		 * off. So return the fatal error -ESHUTDOWN so hopefully the
1783*4882a593Smuzhiyun 		 * USB device drivers won't immediately resubmit the same URB.
1784*4882a593Smuzhiyun 		 */
1785*4882a593Smuzhiyun 		status = -ESHUTDOWN;
1786*4882a593Smuzhiyun 		musb_writeb(epio, MUSB_RXINTERVAL, 0);
1787*4882a593Smuzhiyun 
1788*4882a593Smuzhiyun 		rx_csr &= ~MUSB_RXCSR_H_ERROR;
1789*4882a593Smuzhiyun 		musb_writew(epio, MUSB_RXCSR, rx_csr);
1790*4882a593Smuzhiyun 
1791*4882a593Smuzhiyun 	} else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1792*4882a593Smuzhiyun 
1793*4882a593Smuzhiyun 		if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1794*4882a593Smuzhiyun 			musb_dbg(musb, "RX end %d NAK timeout", epnum);
1795*4882a593Smuzhiyun 
1796*4882a593Smuzhiyun 			/* NOTE: NAKing is *NOT* an error, so we want to
1797*4882a593Smuzhiyun 			 * continue.  Except ... if there's a request for
1798*4882a593Smuzhiyun 			 * another QH, use that instead of starving it.
1799*4882a593Smuzhiyun 			 *
1800*4882a593Smuzhiyun 			 * Devices like Ethernet and serial adapters keep
1801*4882a593Smuzhiyun 			 * reads posted at all times, which will starve
1802*4882a593Smuzhiyun 			 * other devices without this logic.
1803*4882a593Smuzhiyun 			 */
1804*4882a593Smuzhiyun 			if (usb_pipebulk(urb->pipe)
1805*4882a593Smuzhiyun 					&& qh->mux == 1
1806*4882a593Smuzhiyun 					&& !list_is_singular(&musb->in_bulk)) {
1807*4882a593Smuzhiyun 				musb_bulk_nak_timeout(musb, hw_ep, 1);
1808*4882a593Smuzhiyun 				return;
1809*4882a593Smuzhiyun 			}
1810*4882a593Smuzhiyun 			musb_ep_select(mbase, epnum);
1811*4882a593Smuzhiyun 			rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1812*4882a593Smuzhiyun 			rx_csr &= ~MUSB_RXCSR_DATAERROR;
1813*4882a593Smuzhiyun 			musb_writew(epio, MUSB_RXCSR, rx_csr);
1814*4882a593Smuzhiyun 
1815*4882a593Smuzhiyun 			goto finish;
1816*4882a593Smuzhiyun 		} else {
1817*4882a593Smuzhiyun 			musb_dbg(musb, "RX end %d ISO data error", epnum);
1818*4882a593Smuzhiyun 			/* packet error reported later */
1819*4882a593Smuzhiyun 			iso_err = true;
1820*4882a593Smuzhiyun 		}
1821*4882a593Smuzhiyun 	} else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1822*4882a593Smuzhiyun 		musb_dbg(musb, "end %d high bandwidth incomplete ISO packet RX",
1823*4882a593Smuzhiyun 				epnum);
1824*4882a593Smuzhiyun 		status = -EPROTO;
1825*4882a593Smuzhiyun 	}
1826*4882a593Smuzhiyun 
1827*4882a593Smuzhiyun 	/* faults abort the transfer */
1828*4882a593Smuzhiyun 	if (status) {
1829*4882a593Smuzhiyun 		/* clean up dma and collect transfer count */
1830*4882a593Smuzhiyun 		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1831*4882a593Smuzhiyun 			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1832*4882a593Smuzhiyun 			musb->dma_controller->channel_abort(dma);
1833*4882a593Smuzhiyun 			xfer_len = dma->actual_len;
1834*4882a593Smuzhiyun 		}
1835*4882a593Smuzhiyun 		musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1836*4882a593Smuzhiyun 		musb_writeb(epio, MUSB_RXINTERVAL, 0);
1837*4882a593Smuzhiyun 		done = true;
1838*4882a593Smuzhiyun 		goto finish;
1839*4882a593Smuzhiyun 	}
1840*4882a593Smuzhiyun 
1841*4882a593Smuzhiyun 	if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1842*4882a593Smuzhiyun 		/* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1843*4882a593Smuzhiyun 		ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1844*4882a593Smuzhiyun 		goto finish;
1845*4882a593Smuzhiyun 	}
1846*4882a593Smuzhiyun 
1847*4882a593Smuzhiyun 	/* thorough shutdown for now ... given more precise fault handling
1848*4882a593Smuzhiyun 	 * and better queueing support, we might keep a DMA pipeline going
1849*4882a593Smuzhiyun 	 * while processing this irq for earlier completions.
1850*4882a593Smuzhiyun 	 */
1851*4882a593Smuzhiyun 
1852*4882a593Smuzhiyun 	/* FIXME this is _way_ too much in-line logic for Mentor DMA */
1853*4882a593Smuzhiyun 	if (!musb_dma_inventra(musb) && !musb_dma_ux500(musb) &&
1854*4882a593Smuzhiyun 	    (rx_csr & MUSB_RXCSR_H_REQPKT)) {
1855*4882a593Smuzhiyun 		/* REVISIT this happened for a while on some short reads...
1856*4882a593Smuzhiyun 		 * the cleanup still needs investigation... looks bad...
1857*4882a593Smuzhiyun 		 * and also duplicates dma cleanup code above ... plus,
1858*4882a593Smuzhiyun 		 * shouldn't this be the "half full" double buffer case?
1859*4882a593Smuzhiyun 		 */
1860*4882a593Smuzhiyun 		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1861*4882a593Smuzhiyun 			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1862*4882a593Smuzhiyun 			musb->dma_controller->channel_abort(dma);
1863*4882a593Smuzhiyun 			xfer_len = dma->actual_len;
1864*4882a593Smuzhiyun 			done = true;
1865*4882a593Smuzhiyun 		}
1866*4882a593Smuzhiyun 
1867*4882a593Smuzhiyun 		musb_dbg(musb, "RXCSR%d %04x, reqpkt, len %zu%s", epnum, rx_csr,
1868*4882a593Smuzhiyun 				xfer_len, dma ? ", dma" : "");
1869*4882a593Smuzhiyun 		rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1870*4882a593Smuzhiyun 
1871*4882a593Smuzhiyun 		musb_ep_select(mbase, epnum);
1872*4882a593Smuzhiyun 		musb_writew(epio, MUSB_RXCSR,
1873*4882a593Smuzhiyun 				MUSB_RXCSR_H_WZC_BITS | rx_csr);
1874*4882a593Smuzhiyun 	}
1875*4882a593Smuzhiyun 
1876*4882a593Smuzhiyun 	if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1877*4882a593Smuzhiyun 		xfer_len = dma->actual_len;
1878*4882a593Smuzhiyun 
1879*4882a593Smuzhiyun 		val &= ~(MUSB_RXCSR_DMAENAB
1880*4882a593Smuzhiyun 			| MUSB_RXCSR_H_AUTOREQ
1881*4882a593Smuzhiyun 			| MUSB_RXCSR_AUTOCLEAR
1882*4882a593Smuzhiyun 			| MUSB_RXCSR_RXPKTRDY);
1883*4882a593Smuzhiyun 		musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1884*4882a593Smuzhiyun 
1885*4882a593Smuzhiyun 		if (musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1886*4882a593Smuzhiyun 		    musb_dma_cppi41(musb)) {
1887*4882a593Smuzhiyun 			    done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len);
1888*4882a593Smuzhiyun 			    musb_dbg(hw_ep->musb,
1889*4882a593Smuzhiyun 				    "ep %d dma %s, rxcsr %04x, rxcount %d",
1890*4882a593Smuzhiyun 				    epnum, done ? "off" : "reset",
1891*4882a593Smuzhiyun 				    musb_readw(epio, MUSB_RXCSR),
1892*4882a593Smuzhiyun 				    musb_readw(epio, MUSB_RXCOUNT));
1893*4882a593Smuzhiyun 		} else {
1894*4882a593Smuzhiyun 			done = true;
1895*4882a593Smuzhiyun 		}
1896*4882a593Smuzhiyun 
1897*4882a593Smuzhiyun 	} else if (urb->status == -EINPROGRESS) {
1898*4882a593Smuzhiyun 		/* if no errors, be sure a packet is ready for unloading */
1899*4882a593Smuzhiyun 		if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1900*4882a593Smuzhiyun 			status = -EPROTO;
1901*4882a593Smuzhiyun 			ERR("Rx interrupt with no errors or packet!\n");
1902*4882a593Smuzhiyun 
1903*4882a593Smuzhiyun 			/* FIXME this is another "SHOULD NEVER HAPPEN" */
1904*4882a593Smuzhiyun 
1905*4882a593Smuzhiyun /* SCRUB (RX) */
1906*4882a593Smuzhiyun 			/* do the proper sequence to abort the transfer */
1907*4882a593Smuzhiyun 			musb_ep_select(mbase, epnum);
1908*4882a593Smuzhiyun 			val &= ~MUSB_RXCSR_H_REQPKT;
1909*4882a593Smuzhiyun 			musb_writew(epio, MUSB_RXCSR, val);
1910*4882a593Smuzhiyun 			goto finish;
1911*4882a593Smuzhiyun 		}
1912*4882a593Smuzhiyun 
1913*4882a593Smuzhiyun 		/* we are expecting IN packets */
1914*4882a593Smuzhiyun 		if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
1915*4882a593Smuzhiyun 		    musb_dma_cppi41(musb)) && dma) {
1916*4882a593Smuzhiyun 			musb_dbg(hw_ep->musb,
1917*4882a593Smuzhiyun 				"RX%d count %d, buffer 0x%llx len %d/%d",
1918*4882a593Smuzhiyun 				epnum, musb_readw(epio, MUSB_RXCOUNT),
1919*4882a593Smuzhiyun 				(unsigned long long) urb->transfer_dma
1920*4882a593Smuzhiyun 				+ urb->actual_length,
1921*4882a593Smuzhiyun 				qh->offset,
1922*4882a593Smuzhiyun 				urb->transfer_buffer_length);
1923*4882a593Smuzhiyun 
1924*4882a593Smuzhiyun 			if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
1925*4882a593Smuzhiyun 							   xfer_len, iso_err))
1926*4882a593Smuzhiyun 				goto finish;
1927*4882a593Smuzhiyun 			else
1928*4882a593Smuzhiyun 				dev_err(musb->controller, "error: rx_dma failed\n");
1929*4882a593Smuzhiyun 		}
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun 		if (!dma) {
1932*4882a593Smuzhiyun 			unsigned int received_len;
1933*4882a593Smuzhiyun 
1934*4882a593Smuzhiyun 			/* Unmap the buffer so that CPU can use it */
1935*4882a593Smuzhiyun 			usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1936*4882a593Smuzhiyun 
1937*4882a593Smuzhiyun 			/*
1938*4882a593Smuzhiyun 			 * We need to map sg if the transfer_buffer is
1939*4882a593Smuzhiyun 			 * NULL.
1940*4882a593Smuzhiyun 			 */
1941*4882a593Smuzhiyun 			if (!urb->transfer_buffer) {
1942*4882a593Smuzhiyun 				qh->use_sg = true;
1943*4882a593Smuzhiyun 				sg_miter_start(&qh->sg_miter, urb->sg, 1,
1944*4882a593Smuzhiyun 						sg_flags);
1945*4882a593Smuzhiyun 			}
1946*4882a593Smuzhiyun 
1947*4882a593Smuzhiyun 			if (qh->use_sg) {
1948*4882a593Smuzhiyun 				if (!sg_miter_next(&qh->sg_miter)) {
1949*4882a593Smuzhiyun 					dev_err(musb->controller, "error: sg list empty\n");
1950*4882a593Smuzhiyun 					sg_miter_stop(&qh->sg_miter);
1951*4882a593Smuzhiyun 					status = -EINVAL;
1952*4882a593Smuzhiyun 					done = true;
1953*4882a593Smuzhiyun 					goto finish;
1954*4882a593Smuzhiyun 				}
1955*4882a593Smuzhiyun 				urb->transfer_buffer = qh->sg_miter.addr;
1956*4882a593Smuzhiyun 				received_len = urb->actual_length;
1957*4882a593Smuzhiyun 				qh->offset = 0x0;
1958*4882a593Smuzhiyun 				done = musb_host_packet_rx(musb, urb, epnum,
1959*4882a593Smuzhiyun 						iso_err);
1960*4882a593Smuzhiyun 				/* Calculate the number of bytes received */
1961*4882a593Smuzhiyun 				received_len = urb->actual_length -
1962*4882a593Smuzhiyun 					received_len;
1963*4882a593Smuzhiyun 				qh->sg_miter.consumed = received_len;
1964*4882a593Smuzhiyun 				sg_miter_stop(&qh->sg_miter);
1965*4882a593Smuzhiyun 			} else {
1966*4882a593Smuzhiyun 				done = musb_host_packet_rx(musb, urb,
1967*4882a593Smuzhiyun 						epnum, iso_err);
1968*4882a593Smuzhiyun 			}
1969*4882a593Smuzhiyun 			musb_dbg(musb, "read %spacket", done ? "last " : "");
1970*4882a593Smuzhiyun 		}
1971*4882a593Smuzhiyun 	}
1972*4882a593Smuzhiyun 
1973*4882a593Smuzhiyun finish:
1974*4882a593Smuzhiyun 	urb->actual_length += xfer_len;
1975*4882a593Smuzhiyun 	qh->offset += xfer_len;
1976*4882a593Smuzhiyun 	if (done) {
1977*4882a593Smuzhiyun 		if (qh->use_sg) {
1978*4882a593Smuzhiyun 			qh->use_sg = false;
1979*4882a593Smuzhiyun 			urb->transfer_buffer = NULL;
1980*4882a593Smuzhiyun 		}
1981*4882a593Smuzhiyun 
1982*4882a593Smuzhiyun 		if (urb->status == -EINPROGRESS)
1983*4882a593Smuzhiyun 			urb->status = status;
1984*4882a593Smuzhiyun 		musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1985*4882a593Smuzhiyun 	}
1986*4882a593Smuzhiyun }
1987*4882a593Smuzhiyun 
1988*4882a593Smuzhiyun /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1989*4882a593Smuzhiyun  * the software schedule associates multiple such nodes with a given
1990*4882a593Smuzhiyun  * host side hardware endpoint + direction; scheduling may activate
1991*4882a593Smuzhiyun  * that hardware endpoint.
1992*4882a593Smuzhiyun  */
musb_schedule(struct musb * musb,struct musb_qh * qh,int is_in)1993*4882a593Smuzhiyun static int musb_schedule(
1994*4882a593Smuzhiyun 	struct musb		*musb,
1995*4882a593Smuzhiyun 	struct musb_qh		*qh,
1996*4882a593Smuzhiyun 	int			is_in)
1997*4882a593Smuzhiyun {
1998*4882a593Smuzhiyun 	int			idle = 0;
1999*4882a593Smuzhiyun 	int			best_diff;
2000*4882a593Smuzhiyun 	int			best_end, epnum;
2001*4882a593Smuzhiyun 	struct musb_hw_ep	*hw_ep = NULL;
2002*4882a593Smuzhiyun 	struct list_head	*head = NULL;
2003*4882a593Smuzhiyun 	u8			toggle;
2004*4882a593Smuzhiyun 	u8			txtype;
2005*4882a593Smuzhiyun 	struct urb		*urb = next_urb(qh);
2006*4882a593Smuzhiyun 
2007*4882a593Smuzhiyun 	/* use fixed hardware for control and bulk */
2008*4882a593Smuzhiyun 	if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
2009*4882a593Smuzhiyun 		head = &musb->control;
2010*4882a593Smuzhiyun 		hw_ep = musb->control_ep;
2011*4882a593Smuzhiyun 		goto success;
2012*4882a593Smuzhiyun 	}
2013*4882a593Smuzhiyun 
2014*4882a593Smuzhiyun 	/* else, periodic transfers get muxed to other endpoints */
2015*4882a593Smuzhiyun 
2016*4882a593Smuzhiyun 	/*
2017*4882a593Smuzhiyun 	 * We know this qh hasn't been scheduled, so all we need to do
2018*4882a593Smuzhiyun 	 * is choose which hardware endpoint to put it on ...
2019*4882a593Smuzhiyun 	 *
2020*4882a593Smuzhiyun 	 * REVISIT what we really want here is a regular schedule tree
2021*4882a593Smuzhiyun 	 * like e.g. OHCI uses.
2022*4882a593Smuzhiyun 	 */
2023*4882a593Smuzhiyun 	best_diff = 4096;
2024*4882a593Smuzhiyun 	best_end = -1;
2025*4882a593Smuzhiyun 
2026*4882a593Smuzhiyun 	for (epnum = 1, hw_ep = musb->endpoints + 1;
2027*4882a593Smuzhiyun 			epnum < musb->nr_endpoints;
2028*4882a593Smuzhiyun 			epnum++, hw_ep++) {
2029*4882a593Smuzhiyun 		int	diff;
2030*4882a593Smuzhiyun 
2031*4882a593Smuzhiyun 		if (musb_ep_get_qh(hw_ep, is_in) != NULL)
2032*4882a593Smuzhiyun 			continue;
2033*4882a593Smuzhiyun 
2034*4882a593Smuzhiyun 		if (hw_ep == musb->bulk_ep)
2035*4882a593Smuzhiyun 			continue;
2036*4882a593Smuzhiyun 
2037*4882a593Smuzhiyun 		if (is_in)
2038*4882a593Smuzhiyun 			diff = hw_ep->max_packet_sz_rx;
2039*4882a593Smuzhiyun 		else
2040*4882a593Smuzhiyun 			diff = hw_ep->max_packet_sz_tx;
2041*4882a593Smuzhiyun 		diff -= (qh->maxpacket * qh->hb_mult);
2042*4882a593Smuzhiyun 
2043*4882a593Smuzhiyun 		if (diff >= 0 && best_diff > diff) {
2044*4882a593Smuzhiyun 
2045*4882a593Smuzhiyun 			/*
2046*4882a593Smuzhiyun 			 * Mentor controller has a bug in that if we schedule
2047*4882a593Smuzhiyun 			 * a BULK Tx transfer on an endpoint that had earlier
2048*4882a593Smuzhiyun 			 * handled ISOC then the BULK transfer has to start on
2049*4882a593Smuzhiyun 			 * a zero toggle.  If the BULK transfer starts on a 1
2050*4882a593Smuzhiyun 			 * toggle then this transfer will fail as the mentor
2051*4882a593Smuzhiyun 			 * controller starts the Bulk transfer on a 0 toggle
2052*4882a593Smuzhiyun 			 * irrespective of the programming of the toggle bits
2053*4882a593Smuzhiyun 			 * in the TXCSR register.  Check for this condition
2054*4882a593Smuzhiyun 			 * while allocating the EP for a Tx Bulk transfer.  If
2055*4882a593Smuzhiyun 			 * so skip this EP.
2056*4882a593Smuzhiyun 			 */
2057*4882a593Smuzhiyun 			hw_ep = musb->endpoints + epnum;
2058*4882a593Smuzhiyun 			toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
2059*4882a593Smuzhiyun 			txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
2060*4882a593Smuzhiyun 					>> 4) & 0x3;
2061*4882a593Smuzhiyun 			if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
2062*4882a593Smuzhiyun 				toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
2063*4882a593Smuzhiyun 				continue;
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun 			best_diff = diff;
2066*4882a593Smuzhiyun 			best_end = epnum;
2067*4882a593Smuzhiyun 		}
2068*4882a593Smuzhiyun 	}
2069*4882a593Smuzhiyun 	/* use bulk reserved ep1 if no other ep is free */
2070*4882a593Smuzhiyun 	if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
2071*4882a593Smuzhiyun 		hw_ep = musb->bulk_ep;
2072*4882a593Smuzhiyun 		if (is_in)
2073*4882a593Smuzhiyun 			head = &musb->in_bulk;
2074*4882a593Smuzhiyun 		else
2075*4882a593Smuzhiyun 			head = &musb->out_bulk;
2076*4882a593Smuzhiyun 
2077*4882a593Smuzhiyun 		/* Enable bulk RX/TX NAK timeout scheme when bulk requests are
2078*4882a593Smuzhiyun 		 * multiplexed. This scheme does not work in high speed to full
2079*4882a593Smuzhiyun 		 * speed scenario as NAK interrupts are not coming from a
2080*4882a593Smuzhiyun 		 * full speed device connected to a high speed device.
2081*4882a593Smuzhiyun 		 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
2082*4882a593Smuzhiyun 		 * 4 (8 frame or 8ms) for FS device.
2083*4882a593Smuzhiyun 		 */
2084*4882a593Smuzhiyun 		if (qh->dev)
2085*4882a593Smuzhiyun 			qh->intv_reg =
2086*4882a593Smuzhiyun 				(USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
2087*4882a593Smuzhiyun 		goto success;
2088*4882a593Smuzhiyun 	} else if (best_end < 0) {
2089*4882a593Smuzhiyun 		dev_err(musb->controller,
2090*4882a593Smuzhiyun 				"%s hwep alloc failed for %dx%d\n",
2091*4882a593Smuzhiyun 				musb_ep_xfertype_string(qh->type),
2092*4882a593Smuzhiyun 				qh->hb_mult, qh->maxpacket);
2093*4882a593Smuzhiyun 		return -ENOSPC;
2094*4882a593Smuzhiyun 	}
2095*4882a593Smuzhiyun 
2096*4882a593Smuzhiyun 	idle = 1;
2097*4882a593Smuzhiyun 	qh->mux = 0;
2098*4882a593Smuzhiyun 	hw_ep = musb->endpoints + best_end;
2099*4882a593Smuzhiyun 	musb_dbg(musb, "qh %p periodic slot %d", qh, best_end);
2100*4882a593Smuzhiyun success:
2101*4882a593Smuzhiyun 	if (head) {
2102*4882a593Smuzhiyun 		idle = list_empty(head);
2103*4882a593Smuzhiyun 		list_add_tail(&qh->ring, head);
2104*4882a593Smuzhiyun 		qh->mux = 1;
2105*4882a593Smuzhiyun 	}
2106*4882a593Smuzhiyun 	qh->hw_ep = hw_ep;
2107*4882a593Smuzhiyun 	qh->hep->hcpriv = qh;
2108*4882a593Smuzhiyun 	if (idle)
2109*4882a593Smuzhiyun 		musb_start_urb(musb, is_in, qh);
2110*4882a593Smuzhiyun 	return 0;
2111*4882a593Smuzhiyun }
2112*4882a593Smuzhiyun 
musb_urb_enqueue(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)2113*4882a593Smuzhiyun static int musb_urb_enqueue(
2114*4882a593Smuzhiyun 	struct usb_hcd			*hcd,
2115*4882a593Smuzhiyun 	struct urb			*urb,
2116*4882a593Smuzhiyun 	gfp_t				mem_flags)
2117*4882a593Smuzhiyun {
2118*4882a593Smuzhiyun 	unsigned long			flags;
2119*4882a593Smuzhiyun 	struct musb			*musb = hcd_to_musb(hcd);
2120*4882a593Smuzhiyun 	struct usb_host_endpoint	*hep = urb->ep;
2121*4882a593Smuzhiyun 	struct musb_qh			*qh;
2122*4882a593Smuzhiyun 	struct usb_endpoint_descriptor	*epd = &hep->desc;
2123*4882a593Smuzhiyun 	int				ret;
2124*4882a593Smuzhiyun 	unsigned			type_reg;
2125*4882a593Smuzhiyun 	unsigned			interval;
2126*4882a593Smuzhiyun 
2127*4882a593Smuzhiyun 	/* host role must be active */
2128*4882a593Smuzhiyun 	if (!is_host_active(musb) || !musb->is_active)
2129*4882a593Smuzhiyun 		return -ENODEV;
2130*4882a593Smuzhiyun 
2131*4882a593Smuzhiyun 	trace_musb_urb_enq(musb, urb);
2132*4882a593Smuzhiyun 
2133*4882a593Smuzhiyun 	spin_lock_irqsave(&musb->lock, flags);
2134*4882a593Smuzhiyun 	ret = usb_hcd_link_urb_to_ep(hcd, urb);
2135*4882a593Smuzhiyun 	qh = ret ? NULL : hep->hcpriv;
2136*4882a593Smuzhiyun 	if (qh)
2137*4882a593Smuzhiyun 		urb->hcpriv = qh;
2138*4882a593Smuzhiyun 	spin_unlock_irqrestore(&musb->lock, flags);
2139*4882a593Smuzhiyun 
2140*4882a593Smuzhiyun 	/* DMA mapping was already done, if needed, and this urb is on
2141*4882a593Smuzhiyun 	 * hep->urb_list now ... so we're done, unless hep wasn't yet
2142*4882a593Smuzhiyun 	 * scheduled onto a live qh.
2143*4882a593Smuzhiyun 	 *
2144*4882a593Smuzhiyun 	 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
2145*4882a593Smuzhiyun 	 * disabled, testing for empty qh->ring and avoiding qh setup costs
2146*4882a593Smuzhiyun 	 * except for the first urb queued after a config change.
2147*4882a593Smuzhiyun 	 */
2148*4882a593Smuzhiyun 	if (qh || ret)
2149*4882a593Smuzhiyun 		return ret;
2150*4882a593Smuzhiyun 
2151*4882a593Smuzhiyun 	/* Allocate and initialize qh, minimizing the work done each time
2152*4882a593Smuzhiyun 	 * hw_ep gets reprogrammed, or with irqs blocked.  Then schedule it.
2153*4882a593Smuzhiyun 	 *
2154*4882a593Smuzhiyun 	 * REVISIT consider a dedicated qh kmem_cache, so it's harder
2155*4882a593Smuzhiyun 	 * for bugs in other kernel code to break this driver...
2156*4882a593Smuzhiyun 	 */
2157*4882a593Smuzhiyun 	qh = kzalloc(sizeof *qh, mem_flags);
2158*4882a593Smuzhiyun 	if (!qh) {
2159*4882a593Smuzhiyun 		spin_lock_irqsave(&musb->lock, flags);
2160*4882a593Smuzhiyun 		usb_hcd_unlink_urb_from_ep(hcd, urb);
2161*4882a593Smuzhiyun 		spin_unlock_irqrestore(&musb->lock, flags);
2162*4882a593Smuzhiyun 		return -ENOMEM;
2163*4882a593Smuzhiyun 	}
2164*4882a593Smuzhiyun 
2165*4882a593Smuzhiyun 	qh->hep = hep;
2166*4882a593Smuzhiyun 	qh->dev = urb->dev;
2167*4882a593Smuzhiyun 	INIT_LIST_HEAD(&qh->ring);
2168*4882a593Smuzhiyun 	qh->is_ready = 1;
2169*4882a593Smuzhiyun 
2170*4882a593Smuzhiyun 	qh->maxpacket = usb_endpoint_maxp(epd);
2171*4882a593Smuzhiyun 	qh->type = usb_endpoint_type(epd);
2172*4882a593Smuzhiyun 
2173*4882a593Smuzhiyun 	/* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
2174*4882a593Smuzhiyun 	 * Some musb cores don't support high bandwidth ISO transfers; and
2175*4882a593Smuzhiyun 	 * we don't (yet!) support high bandwidth interrupt transfers.
2176*4882a593Smuzhiyun 	 */
2177*4882a593Smuzhiyun 	qh->hb_mult = usb_endpoint_maxp_mult(epd);
2178*4882a593Smuzhiyun 	if (qh->hb_mult > 1) {
2179*4882a593Smuzhiyun 		int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
2180*4882a593Smuzhiyun 
2181*4882a593Smuzhiyun 		if (ok)
2182*4882a593Smuzhiyun 			ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
2183*4882a593Smuzhiyun 				|| (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
2184*4882a593Smuzhiyun 		if (!ok) {
2185*4882a593Smuzhiyun 			dev_err(musb->controller,
2186*4882a593Smuzhiyun 				"high bandwidth %s (%dx%d) not supported\n",
2187*4882a593Smuzhiyun 				musb_ep_xfertype_string(qh->type),
2188*4882a593Smuzhiyun 				qh->hb_mult, qh->maxpacket & 0x7ff);
2189*4882a593Smuzhiyun 			ret = -EMSGSIZE;
2190*4882a593Smuzhiyun 			goto done;
2191*4882a593Smuzhiyun 		}
2192*4882a593Smuzhiyun 		qh->maxpacket &= 0x7ff;
2193*4882a593Smuzhiyun 	}
2194*4882a593Smuzhiyun 
2195*4882a593Smuzhiyun 	qh->epnum = usb_endpoint_num(epd);
2196*4882a593Smuzhiyun 
2197*4882a593Smuzhiyun 	/* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
2198*4882a593Smuzhiyun 	qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
2199*4882a593Smuzhiyun 
2200*4882a593Smuzhiyun 	/* precompute rxtype/txtype/type0 register */
2201*4882a593Smuzhiyun 	type_reg = (qh->type << 4) | qh->epnum;
2202*4882a593Smuzhiyun 	switch (urb->dev->speed) {
2203*4882a593Smuzhiyun 	case USB_SPEED_LOW:
2204*4882a593Smuzhiyun 		type_reg |= 0xc0;
2205*4882a593Smuzhiyun 		break;
2206*4882a593Smuzhiyun 	case USB_SPEED_FULL:
2207*4882a593Smuzhiyun 		type_reg |= 0x80;
2208*4882a593Smuzhiyun 		break;
2209*4882a593Smuzhiyun 	default:
2210*4882a593Smuzhiyun 		type_reg |= 0x40;
2211*4882a593Smuzhiyun 	}
2212*4882a593Smuzhiyun 	qh->type_reg = type_reg;
2213*4882a593Smuzhiyun 
2214*4882a593Smuzhiyun 	/* Precompute RXINTERVAL/TXINTERVAL register */
2215*4882a593Smuzhiyun 	switch (qh->type) {
2216*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_INT:
2217*4882a593Smuzhiyun 		/*
2218*4882a593Smuzhiyun 		 * Full/low speeds use the  linear encoding,
2219*4882a593Smuzhiyun 		 * high speed uses the logarithmic encoding.
2220*4882a593Smuzhiyun 		 */
2221*4882a593Smuzhiyun 		if (urb->dev->speed <= USB_SPEED_FULL) {
2222*4882a593Smuzhiyun 			interval = max_t(u8, epd->bInterval, 1);
2223*4882a593Smuzhiyun 			break;
2224*4882a593Smuzhiyun 		}
2225*4882a593Smuzhiyun 		fallthrough;
2226*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_ISOC:
2227*4882a593Smuzhiyun 		/* ISO always uses logarithmic encoding */
2228*4882a593Smuzhiyun 		interval = min_t(u8, epd->bInterval, 16);
2229*4882a593Smuzhiyun 		break;
2230*4882a593Smuzhiyun 	default:
2231*4882a593Smuzhiyun 		/* REVISIT we actually want to use NAK limits, hinting to the
2232*4882a593Smuzhiyun 		 * transfer scheduling logic to try some other qh, e.g. try
2233*4882a593Smuzhiyun 		 * for 2 msec first:
2234*4882a593Smuzhiyun 		 *
2235*4882a593Smuzhiyun 		 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
2236*4882a593Smuzhiyun 		 *
2237*4882a593Smuzhiyun 		 * The downside of disabling this is that transfer scheduling
2238*4882a593Smuzhiyun 		 * gets VERY unfair for nonperiodic transfers; a misbehaving
2239*4882a593Smuzhiyun 		 * peripheral could make that hurt.  That's perfectly normal
2240*4882a593Smuzhiyun 		 * for reads from network or serial adapters ... so we have
2241*4882a593Smuzhiyun 		 * partial NAKlimit support for bulk RX.
2242*4882a593Smuzhiyun 		 *
2243*4882a593Smuzhiyun 		 * The upside of disabling it is simpler transfer scheduling.
2244*4882a593Smuzhiyun 		 */
2245*4882a593Smuzhiyun 		interval = 0;
2246*4882a593Smuzhiyun 	}
2247*4882a593Smuzhiyun 	qh->intv_reg = interval;
2248*4882a593Smuzhiyun 
2249*4882a593Smuzhiyun 	/* precompute addressing for external hub/tt ports */
2250*4882a593Smuzhiyun 	if (musb->is_multipoint) {
2251*4882a593Smuzhiyun 		struct usb_device	*parent = urb->dev->parent;
2252*4882a593Smuzhiyun 
2253*4882a593Smuzhiyun 		if (parent != hcd->self.root_hub) {
2254*4882a593Smuzhiyun 			qh->h_addr_reg = (u8) parent->devnum;
2255*4882a593Smuzhiyun 
2256*4882a593Smuzhiyun 			/* set up tt info if needed */
2257*4882a593Smuzhiyun 			if (urb->dev->tt) {
2258*4882a593Smuzhiyun 				qh->h_port_reg = (u8) urb->dev->ttport;
2259*4882a593Smuzhiyun 				if (urb->dev->tt->hub)
2260*4882a593Smuzhiyun 					qh->h_addr_reg =
2261*4882a593Smuzhiyun 						(u8) urb->dev->tt->hub->devnum;
2262*4882a593Smuzhiyun 				if (urb->dev->tt->multi)
2263*4882a593Smuzhiyun 					qh->h_addr_reg |= 0x80;
2264*4882a593Smuzhiyun 			}
2265*4882a593Smuzhiyun 		}
2266*4882a593Smuzhiyun 	}
2267*4882a593Smuzhiyun 
2268*4882a593Smuzhiyun 	/* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2269*4882a593Smuzhiyun 	 * until we get real dma queues (with an entry for each urb/buffer),
2270*4882a593Smuzhiyun 	 * we only have work to do in the former case.
2271*4882a593Smuzhiyun 	 */
2272*4882a593Smuzhiyun 	spin_lock_irqsave(&musb->lock, flags);
2273*4882a593Smuzhiyun 	if (hep->hcpriv || !next_urb(qh)) {
2274*4882a593Smuzhiyun 		/* some concurrent activity submitted another urb to hep...
2275*4882a593Smuzhiyun 		 * odd, rare, error prone, but legal.
2276*4882a593Smuzhiyun 		 */
2277*4882a593Smuzhiyun 		kfree(qh);
2278*4882a593Smuzhiyun 		qh = NULL;
2279*4882a593Smuzhiyun 		ret = 0;
2280*4882a593Smuzhiyun 	} else
2281*4882a593Smuzhiyun 		ret = musb_schedule(musb, qh,
2282*4882a593Smuzhiyun 				epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2283*4882a593Smuzhiyun 
2284*4882a593Smuzhiyun 	if (ret == 0) {
2285*4882a593Smuzhiyun 		urb->hcpriv = qh;
2286*4882a593Smuzhiyun 		/* FIXME set urb->start_frame for iso/intr, it's tested in
2287*4882a593Smuzhiyun 		 * musb_start_urb(), but otherwise only konicawc cares ...
2288*4882a593Smuzhiyun 		 */
2289*4882a593Smuzhiyun 	}
2290*4882a593Smuzhiyun 	spin_unlock_irqrestore(&musb->lock, flags);
2291*4882a593Smuzhiyun 
2292*4882a593Smuzhiyun done:
2293*4882a593Smuzhiyun 	if (ret != 0) {
2294*4882a593Smuzhiyun 		spin_lock_irqsave(&musb->lock, flags);
2295*4882a593Smuzhiyun 		usb_hcd_unlink_urb_from_ep(hcd, urb);
2296*4882a593Smuzhiyun 		spin_unlock_irqrestore(&musb->lock, flags);
2297*4882a593Smuzhiyun 		kfree(qh);
2298*4882a593Smuzhiyun 	}
2299*4882a593Smuzhiyun 	return ret;
2300*4882a593Smuzhiyun }
2301*4882a593Smuzhiyun 
2302*4882a593Smuzhiyun 
2303*4882a593Smuzhiyun /*
2304*4882a593Smuzhiyun  * abort a transfer that's at the head of a hardware queue.
2305*4882a593Smuzhiyun  * called with controller locked, irqs blocked
2306*4882a593Smuzhiyun  * that hardware queue advances to the next transfer, unless prevented
2307*4882a593Smuzhiyun  */
musb_cleanup_urb(struct urb * urb,struct musb_qh * qh)2308*4882a593Smuzhiyun static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2309*4882a593Smuzhiyun {
2310*4882a593Smuzhiyun 	struct musb_hw_ep	*ep = qh->hw_ep;
2311*4882a593Smuzhiyun 	struct musb		*musb = ep->musb;
2312*4882a593Smuzhiyun 	void __iomem		*epio = ep->regs;
2313*4882a593Smuzhiyun 	unsigned		hw_end = ep->epnum;
2314*4882a593Smuzhiyun 	void __iomem		*regs = ep->musb->mregs;
2315*4882a593Smuzhiyun 	int			is_in = usb_pipein(urb->pipe);
2316*4882a593Smuzhiyun 	int			status = 0;
2317*4882a593Smuzhiyun 	u16			csr;
2318*4882a593Smuzhiyun 	struct dma_channel	*dma = NULL;
2319*4882a593Smuzhiyun 
2320*4882a593Smuzhiyun 	musb_ep_select(regs, hw_end);
2321*4882a593Smuzhiyun 
2322*4882a593Smuzhiyun 	if (is_dma_capable()) {
2323*4882a593Smuzhiyun 		dma = is_in ? ep->rx_channel : ep->tx_channel;
2324*4882a593Smuzhiyun 		if (dma) {
2325*4882a593Smuzhiyun 			status = ep->musb->dma_controller->channel_abort(dma);
2326*4882a593Smuzhiyun 			musb_dbg(musb, "abort %cX%d DMA for urb %p --> %d",
2327*4882a593Smuzhiyun 				is_in ? 'R' : 'T', ep->epnum,
2328*4882a593Smuzhiyun 				urb, status);
2329*4882a593Smuzhiyun 			urb->actual_length += dma->actual_len;
2330*4882a593Smuzhiyun 		}
2331*4882a593Smuzhiyun 	}
2332*4882a593Smuzhiyun 
2333*4882a593Smuzhiyun 	/* turn off DMA requests, discard state, stop polling ... */
2334*4882a593Smuzhiyun 	if (ep->epnum && is_in) {
2335*4882a593Smuzhiyun 		/* giveback saves bulk toggle */
2336*4882a593Smuzhiyun 		csr = musb_h_flush_rxfifo(ep, 0);
2337*4882a593Smuzhiyun 
2338*4882a593Smuzhiyun 		/* clear the endpoint's irq status here to avoid bogus irqs */
2339*4882a593Smuzhiyun 		if (is_dma_capable() && dma)
2340*4882a593Smuzhiyun 			musb_platform_clear_ep_rxintr(musb, ep->epnum);
2341*4882a593Smuzhiyun 	} else if (ep->epnum) {
2342*4882a593Smuzhiyun 		musb_h_tx_flush_fifo(ep);
2343*4882a593Smuzhiyun 		csr = musb_readw(epio, MUSB_TXCSR);
2344*4882a593Smuzhiyun 		csr &= ~(MUSB_TXCSR_AUTOSET
2345*4882a593Smuzhiyun 			| MUSB_TXCSR_DMAENAB
2346*4882a593Smuzhiyun 			| MUSB_TXCSR_H_RXSTALL
2347*4882a593Smuzhiyun 			| MUSB_TXCSR_H_NAKTIMEOUT
2348*4882a593Smuzhiyun 			| MUSB_TXCSR_H_ERROR
2349*4882a593Smuzhiyun 			| MUSB_TXCSR_TXPKTRDY);
2350*4882a593Smuzhiyun 		musb_writew(epio, MUSB_TXCSR, csr);
2351*4882a593Smuzhiyun 		/* REVISIT may need to clear FLUSHFIFO ... */
2352*4882a593Smuzhiyun 		musb_writew(epio, MUSB_TXCSR, csr);
2353*4882a593Smuzhiyun 		/* flush cpu writebuffer */
2354*4882a593Smuzhiyun 		csr = musb_readw(epio, MUSB_TXCSR);
2355*4882a593Smuzhiyun 	} else  {
2356*4882a593Smuzhiyun 		musb_h_ep0_flush_fifo(ep);
2357*4882a593Smuzhiyun 	}
2358*4882a593Smuzhiyun 	if (status == 0)
2359*4882a593Smuzhiyun 		musb_advance_schedule(ep->musb, urb, ep, is_in);
2360*4882a593Smuzhiyun 	return status;
2361*4882a593Smuzhiyun }
2362*4882a593Smuzhiyun 
musb_urb_dequeue(struct usb_hcd * hcd,struct urb * urb,int status)2363*4882a593Smuzhiyun static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2364*4882a593Smuzhiyun {
2365*4882a593Smuzhiyun 	struct musb		*musb = hcd_to_musb(hcd);
2366*4882a593Smuzhiyun 	struct musb_qh		*qh;
2367*4882a593Smuzhiyun 	unsigned long		flags;
2368*4882a593Smuzhiyun 	int			is_in  = usb_pipein(urb->pipe);
2369*4882a593Smuzhiyun 	int			ret;
2370*4882a593Smuzhiyun 
2371*4882a593Smuzhiyun 	trace_musb_urb_deq(musb, urb);
2372*4882a593Smuzhiyun 
2373*4882a593Smuzhiyun 	spin_lock_irqsave(&musb->lock, flags);
2374*4882a593Smuzhiyun 	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2375*4882a593Smuzhiyun 	if (ret)
2376*4882a593Smuzhiyun 		goto done;
2377*4882a593Smuzhiyun 
2378*4882a593Smuzhiyun 	qh = urb->hcpriv;
2379*4882a593Smuzhiyun 	if (!qh)
2380*4882a593Smuzhiyun 		goto done;
2381*4882a593Smuzhiyun 
2382*4882a593Smuzhiyun 	/*
2383*4882a593Smuzhiyun 	 * Any URB not actively programmed into endpoint hardware can be
2384*4882a593Smuzhiyun 	 * immediately given back; that's any URB not at the head of an
2385*4882a593Smuzhiyun 	 * endpoint queue, unless someday we get real DMA queues.  And even
2386*4882a593Smuzhiyun 	 * if it's at the head, it might not be known to the hardware...
2387*4882a593Smuzhiyun 	 *
2388*4882a593Smuzhiyun 	 * Otherwise abort current transfer, pending DMA, etc.; urb->status
2389*4882a593Smuzhiyun 	 * has already been updated.  This is a synchronous abort; it'd be
2390*4882a593Smuzhiyun 	 * OK to hold off until after some IRQ, though.
2391*4882a593Smuzhiyun 	 *
2392*4882a593Smuzhiyun 	 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
2393*4882a593Smuzhiyun 	 */
2394*4882a593Smuzhiyun 	if (!qh->is_ready
2395*4882a593Smuzhiyun 			|| urb->urb_list.prev != &qh->hep->urb_list
2396*4882a593Smuzhiyun 			|| musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2397*4882a593Smuzhiyun 		int	ready = qh->is_ready;
2398*4882a593Smuzhiyun 
2399*4882a593Smuzhiyun 		qh->is_ready = 0;
2400*4882a593Smuzhiyun 		musb_giveback(musb, urb, 0);
2401*4882a593Smuzhiyun 		qh->is_ready = ready;
2402*4882a593Smuzhiyun 
2403*4882a593Smuzhiyun 		/* If nothing else (usually musb_giveback) is using it
2404*4882a593Smuzhiyun 		 * and its URB list has emptied, recycle this qh.
2405*4882a593Smuzhiyun 		 */
2406*4882a593Smuzhiyun 		if (ready && list_empty(&qh->hep->urb_list)) {
2407*4882a593Smuzhiyun 			qh->hep->hcpriv = NULL;
2408*4882a593Smuzhiyun 			list_del(&qh->ring);
2409*4882a593Smuzhiyun 			kfree(qh);
2410*4882a593Smuzhiyun 		}
2411*4882a593Smuzhiyun 	} else
2412*4882a593Smuzhiyun 		ret = musb_cleanup_urb(urb, qh);
2413*4882a593Smuzhiyun done:
2414*4882a593Smuzhiyun 	spin_unlock_irqrestore(&musb->lock, flags);
2415*4882a593Smuzhiyun 	return ret;
2416*4882a593Smuzhiyun }
2417*4882a593Smuzhiyun 
2418*4882a593Smuzhiyun /* disable an endpoint */
2419*4882a593Smuzhiyun static void
musb_h_disable(struct usb_hcd * hcd,struct usb_host_endpoint * hep)2420*4882a593Smuzhiyun musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2421*4882a593Smuzhiyun {
2422*4882a593Smuzhiyun 	u8			is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
2423*4882a593Smuzhiyun 	unsigned long		flags;
2424*4882a593Smuzhiyun 	struct musb		*musb = hcd_to_musb(hcd);
2425*4882a593Smuzhiyun 	struct musb_qh		*qh;
2426*4882a593Smuzhiyun 	struct urb		*urb;
2427*4882a593Smuzhiyun 
2428*4882a593Smuzhiyun 	spin_lock_irqsave(&musb->lock, flags);
2429*4882a593Smuzhiyun 
2430*4882a593Smuzhiyun 	qh = hep->hcpriv;
2431*4882a593Smuzhiyun 	if (qh == NULL)
2432*4882a593Smuzhiyun 		goto exit;
2433*4882a593Smuzhiyun 
2434*4882a593Smuzhiyun 	/* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2435*4882a593Smuzhiyun 
2436*4882a593Smuzhiyun 	/* Kick the first URB off the hardware, if needed */
2437*4882a593Smuzhiyun 	qh->is_ready = 0;
2438*4882a593Smuzhiyun 	if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2439*4882a593Smuzhiyun 		urb = next_urb(qh);
2440*4882a593Smuzhiyun 
2441*4882a593Smuzhiyun 		/* make software (then hardware) stop ASAP */
2442*4882a593Smuzhiyun 		if (!urb->unlinked)
2443*4882a593Smuzhiyun 			urb->status = -ESHUTDOWN;
2444*4882a593Smuzhiyun 
2445*4882a593Smuzhiyun 		/* cleanup */
2446*4882a593Smuzhiyun 		musb_cleanup_urb(urb, qh);
2447*4882a593Smuzhiyun 
2448*4882a593Smuzhiyun 		/* Then nuke all the others ... and advance the
2449*4882a593Smuzhiyun 		 * queue on hw_ep (e.g. bulk ring) when we're done.
2450*4882a593Smuzhiyun 		 */
2451*4882a593Smuzhiyun 		while (!list_empty(&hep->urb_list)) {
2452*4882a593Smuzhiyun 			urb = next_urb(qh);
2453*4882a593Smuzhiyun 			urb->status = -ESHUTDOWN;
2454*4882a593Smuzhiyun 			musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2455*4882a593Smuzhiyun 		}
2456*4882a593Smuzhiyun 	} else {
2457*4882a593Smuzhiyun 		/* Just empty the queue; the hardware is busy with
2458*4882a593Smuzhiyun 		 * other transfers, and since !qh->is_ready nothing
2459*4882a593Smuzhiyun 		 * will activate any of these as it advances.
2460*4882a593Smuzhiyun 		 */
2461*4882a593Smuzhiyun 		while (!list_empty(&hep->urb_list))
2462*4882a593Smuzhiyun 			musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2463*4882a593Smuzhiyun 
2464*4882a593Smuzhiyun 		hep->hcpriv = NULL;
2465*4882a593Smuzhiyun 		list_del(&qh->ring);
2466*4882a593Smuzhiyun 		kfree(qh);
2467*4882a593Smuzhiyun 	}
2468*4882a593Smuzhiyun exit:
2469*4882a593Smuzhiyun 	spin_unlock_irqrestore(&musb->lock, flags);
2470*4882a593Smuzhiyun }
2471*4882a593Smuzhiyun 
musb_h_get_frame_number(struct usb_hcd * hcd)2472*4882a593Smuzhiyun static int musb_h_get_frame_number(struct usb_hcd *hcd)
2473*4882a593Smuzhiyun {
2474*4882a593Smuzhiyun 	struct musb	*musb = hcd_to_musb(hcd);
2475*4882a593Smuzhiyun 
2476*4882a593Smuzhiyun 	return musb_readw(musb->mregs, MUSB_FRAME);
2477*4882a593Smuzhiyun }
2478*4882a593Smuzhiyun 
musb_h_start(struct usb_hcd * hcd)2479*4882a593Smuzhiyun static int musb_h_start(struct usb_hcd *hcd)
2480*4882a593Smuzhiyun {
2481*4882a593Smuzhiyun 	struct musb	*musb = hcd_to_musb(hcd);
2482*4882a593Smuzhiyun 
2483*4882a593Smuzhiyun 	/* NOTE: musb_start() is called when the hub driver turns
2484*4882a593Smuzhiyun 	 * on port power, or when (OTG) peripheral starts.
2485*4882a593Smuzhiyun 	 */
2486*4882a593Smuzhiyun 	hcd->state = HC_STATE_RUNNING;
2487*4882a593Smuzhiyun 	musb->port1_status = 0;
2488*4882a593Smuzhiyun 	return 0;
2489*4882a593Smuzhiyun }
2490*4882a593Smuzhiyun 
musb_h_stop(struct usb_hcd * hcd)2491*4882a593Smuzhiyun static void musb_h_stop(struct usb_hcd *hcd)
2492*4882a593Smuzhiyun {
2493*4882a593Smuzhiyun 	musb_stop(hcd_to_musb(hcd));
2494*4882a593Smuzhiyun 	hcd->state = HC_STATE_HALT;
2495*4882a593Smuzhiyun }
2496*4882a593Smuzhiyun 
musb_bus_suspend(struct usb_hcd * hcd)2497*4882a593Smuzhiyun static int musb_bus_suspend(struct usb_hcd *hcd)
2498*4882a593Smuzhiyun {
2499*4882a593Smuzhiyun 	struct musb	*musb = hcd_to_musb(hcd);
2500*4882a593Smuzhiyun 	u8		devctl;
2501*4882a593Smuzhiyun 	int		ret;
2502*4882a593Smuzhiyun 
2503*4882a593Smuzhiyun 	ret = musb_port_suspend(musb, true);
2504*4882a593Smuzhiyun 	if (ret)
2505*4882a593Smuzhiyun 		return ret;
2506*4882a593Smuzhiyun 
2507*4882a593Smuzhiyun 	if (!is_host_active(musb))
2508*4882a593Smuzhiyun 		return 0;
2509*4882a593Smuzhiyun 
2510*4882a593Smuzhiyun 	switch (musb->xceiv->otg->state) {
2511*4882a593Smuzhiyun 	case OTG_STATE_A_SUSPEND:
2512*4882a593Smuzhiyun 		return 0;
2513*4882a593Smuzhiyun 	case OTG_STATE_A_WAIT_VRISE:
2514*4882a593Smuzhiyun 		/* ID could be grounded even if there's no device
2515*4882a593Smuzhiyun 		 * on the other end of the cable.  NOTE that the
2516*4882a593Smuzhiyun 		 * A_WAIT_VRISE timers are messy with MUSB...
2517*4882a593Smuzhiyun 		 */
2518*4882a593Smuzhiyun 		devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2519*4882a593Smuzhiyun 		if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2520*4882a593Smuzhiyun 			musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
2521*4882a593Smuzhiyun 		break;
2522*4882a593Smuzhiyun 	default:
2523*4882a593Smuzhiyun 		break;
2524*4882a593Smuzhiyun 	}
2525*4882a593Smuzhiyun 
2526*4882a593Smuzhiyun 	if (musb->is_active) {
2527*4882a593Smuzhiyun 		WARNING("trying to suspend as %s while active\n",
2528*4882a593Smuzhiyun 				usb_otg_state_string(musb->xceiv->otg->state));
2529*4882a593Smuzhiyun 		return -EBUSY;
2530*4882a593Smuzhiyun 	} else
2531*4882a593Smuzhiyun 		return 0;
2532*4882a593Smuzhiyun }
2533*4882a593Smuzhiyun 
musb_bus_resume(struct usb_hcd * hcd)2534*4882a593Smuzhiyun static int musb_bus_resume(struct usb_hcd *hcd)
2535*4882a593Smuzhiyun {
2536*4882a593Smuzhiyun 	struct musb *musb = hcd_to_musb(hcd);
2537*4882a593Smuzhiyun 
2538*4882a593Smuzhiyun 	if (musb->config &&
2539*4882a593Smuzhiyun 	    musb->config->host_port_deassert_reset_at_resume)
2540*4882a593Smuzhiyun 		musb_port_reset(musb, false);
2541*4882a593Smuzhiyun 
2542*4882a593Smuzhiyun 	return 0;
2543*4882a593Smuzhiyun }
2544*4882a593Smuzhiyun 
2545*4882a593Smuzhiyun #ifndef CONFIG_MUSB_PIO_ONLY
2546*4882a593Smuzhiyun 
2547*4882a593Smuzhiyun #define MUSB_USB_DMA_ALIGN 4
2548*4882a593Smuzhiyun 
2549*4882a593Smuzhiyun struct musb_temp_buffer {
2550*4882a593Smuzhiyun 	void *kmalloc_ptr;
2551*4882a593Smuzhiyun 	void *old_xfer_buffer;
2552*4882a593Smuzhiyun 	u8 data[];
2553*4882a593Smuzhiyun };
2554*4882a593Smuzhiyun 
musb_free_temp_buffer(struct urb * urb)2555*4882a593Smuzhiyun static void musb_free_temp_buffer(struct urb *urb)
2556*4882a593Smuzhiyun {
2557*4882a593Smuzhiyun 	enum dma_data_direction dir;
2558*4882a593Smuzhiyun 	struct musb_temp_buffer *temp;
2559*4882a593Smuzhiyun 	size_t length;
2560*4882a593Smuzhiyun 
2561*4882a593Smuzhiyun 	if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2562*4882a593Smuzhiyun 		return;
2563*4882a593Smuzhiyun 
2564*4882a593Smuzhiyun 	dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2565*4882a593Smuzhiyun 
2566*4882a593Smuzhiyun 	temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
2567*4882a593Smuzhiyun 			    data);
2568*4882a593Smuzhiyun 
2569*4882a593Smuzhiyun 	if (dir == DMA_FROM_DEVICE) {
2570*4882a593Smuzhiyun 		if (usb_pipeisoc(urb->pipe))
2571*4882a593Smuzhiyun 			length = urb->transfer_buffer_length;
2572*4882a593Smuzhiyun 		else
2573*4882a593Smuzhiyun 			length = urb->actual_length;
2574*4882a593Smuzhiyun 
2575*4882a593Smuzhiyun 		memcpy(temp->old_xfer_buffer, temp->data, length);
2576*4882a593Smuzhiyun 	}
2577*4882a593Smuzhiyun 	urb->transfer_buffer = temp->old_xfer_buffer;
2578*4882a593Smuzhiyun 	kfree(temp->kmalloc_ptr);
2579*4882a593Smuzhiyun 
2580*4882a593Smuzhiyun 	urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
2581*4882a593Smuzhiyun }
2582*4882a593Smuzhiyun 
musb_alloc_temp_buffer(struct urb * urb,gfp_t mem_flags)2583*4882a593Smuzhiyun static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
2584*4882a593Smuzhiyun {
2585*4882a593Smuzhiyun 	enum dma_data_direction dir;
2586*4882a593Smuzhiyun 	struct musb_temp_buffer *temp;
2587*4882a593Smuzhiyun 	void *kmalloc_ptr;
2588*4882a593Smuzhiyun 	size_t kmalloc_size;
2589*4882a593Smuzhiyun 
2590*4882a593Smuzhiyun 	if (urb->num_sgs || urb->sg ||
2591*4882a593Smuzhiyun 	    urb->transfer_buffer_length == 0 ||
2592*4882a593Smuzhiyun 	    !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
2593*4882a593Smuzhiyun 		return 0;
2594*4882a593Smuzhiyun 
2595*4882a593Smuzhiyun 	dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2596*4882a593Smuzhiyun 
2597*4882a593Smuzhiyun 	/* Allocate a buffer with enough padding for alignment */
2598*4882a593Smuzhiyun 	kmalloc_size = urb->transfer_buffer_length +
2599*4882a593Smuzhiyun 		sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;
2600*4882a593Smuzhiyun 
2601*4882a593Smuzhiyun 	kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
2602*4882a593Smuzhiyun 	if (!kmalloc_ptr)
2603*4882a593Smuzhiyun 		return -ENOMEM;
2604*4882a593Smuzhiyun 
2605*4882a593Smuzhiyun 	/* Position our struct temp_buffer such that data is aligned */
2606*4882a593Smuzhiyun 	temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);
2607*4882a593Smuzhiyun 
2608*4882a593Smuzhiyun 
2609*4882a593Smuzhiyun 	temp->kmalloc_ptr = kmalloc_ptr;
2610*4882a593Smuzhiyun 	temp->old_xfer_buffer = urb->transfer_buffer;
2611*4882a593Smuzhiyun 	if (dir == DMA_TO_DEVICE)
2612*4882a593Smuzhiyun 		memcpy(temp->data, urb->transfer_buffer,
2613*4882a593Smuzhiyun 		       urb->transfer_buffer_length);
2614*4882a593Smuzhiyun 	urb->transfer_buffer = temp->data;
2615*4882a593Smuzhiyun 
2616*4882a593Smuzhiyun 	urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
2617*4882a593Smuzhiyun 
2618*4882a593Smuzhiyun 	return 0;
2619*4882a593Smuzhiyun }
2620*4882a593Smuzhiyun 
musb_map_urb_for_dma(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)2621*4882a593Smuzhiyun static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
2622*4882a593Smuzhiyun 				      gfp_t mem_flags)
2623*4882a593Smuzhiyun {
2624*4882a593Smuzhiyun 	struct musb	*musb = hcd_to_musb(hcd);
2625*4882a593Smuzhiyun 	int ret;
2626*4882a593Smuzhiyun 
2627*4882a593Smuzhiyun 	/*
2628*4882a593Smuzhiyun 	 * The DMA engine in RTL1.8 and above cannot handle
2629*4882a593Smuzhiyun 	 * DMA addresses that are not aligned to a 4 byte boundary.
2630*4882a593Smuzhiyun 	 * For such engine implemented (un)map_urb_for_dma hooks.
2631*4882a593Smuzhiyun 	 * Do not use these hooks for RTL<1.8
2632*4882a593Smuzhiyun 	 */
2633*4882a593Smuzhiyun 	if (musb->hwvers < MUSB_HWVERS_1800)
2634*4882a593Smuzhiyun 		return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2635*4882a593Smuzhiyun 
2636*4882a593Smuzhiyun 	ret = musb_alloc_temp_buffer(urb, mem_flags);
2637*4882a593Smuzhiyun 	if (ret)
2638*4882a593Smuzhiyun 		return ret;
2639*4882a593Smuzhiyun 
2640*4882a593Smuzhiyun 	ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2641*4882a593Smuzhiyun 	if (ret)
2642*4882a593Smuzhiyun 		musb_free_temp_buffer(urb);
2643*4882a593Smuzhiyun 
2644*4882a593Smuzhiyun 	return ret;
2645*4882a593Smuzhiyun }
2646*4882a593Smuzhiyun 
musb_unmap_urb_for_dma(struct usb_hcd * hcd,struct urb * urb)2647*4882a593Smuzhiyun static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
2648*4882a593Smuzhiyun {
2649*4882a593Smuzhiyun 	struct musb	*musb = hcd_to_musb(hcd);
2650*4882a593Smuzhiyun 
2651*4882a593Smuzhiyun 	usb_hcd_unmap_urb_for_dma(hcd, urb);
2652*4882a593Smuzhiyun 
2653*4882a593Smuzhiyun 	/* Do not use this hook for RTL<1.8 (see description above) */
2654*4882a593Smuzhiyun 	if (musb->hwvers < MUSB_HWVERS_1800)
2655*4882a593Smuzhiyun 		return;
2656*4882a593Smuzhiyun 
2657*4882a593Smuzhiyun 	musb_free_temp_buffer(urb);
2658*4882a593Smuzhiyun }
2659*4882a593Smuzhiyun #endif /* !CONFIG_MUSB_PIO_ONLY */
2660*4882a593Smuzhiyun 
2661*4882a593Smuzhiyun static const struct hc_driver musb_hc_driver = {
2662*4882a593Smuzhiyun 	.description		= "musb-hcd",
2663*4882a593Smuzhiyun 	.product_desc		= "MUSB HDRC host driver",
2664*4882a593Smuzhiyun 	.hcd_priv_size		= sizeof(struct musb *),
2665*4882a593Smuzhiyun 	.flags			= HCD_USB2 | HCD_DMA | HCD_MEMORY,
2666*4882a593Smuzhiyun 
2667*4882a593Smuzhiyun 	/* not using irq handler or reset hooks from usbcore, since
2668*4882a593Smuzhiyun 	 * those must be shared with peripheral code for OTG configs
2669*4882a593Smuzhiyun 	 */
2670*4882a593Smuzhiyun 
2671*4882a593Smuzhiyun 	.start			= musb_h_start,
2672*4882a593Smuzhiyun 	.stop			= musb_h_stop,
2673*4882a593Smuzhiyun 
2674*4882a593Smuzhiyun 	.get_frame_number	= musb_h_get_frame_number,
2675*4882a593Smuzhiyun 
2676*4882a593Smuzhiyun 	.urb_enqueue		= musb_urb_enqueue,
2677*4882a593Smuzhiyun 	.urb_dequeue		= musb_urb_dequeue,
2678*4882a593Smuzhiyun 	.endpoint_disable	= musb_h_disable,
2679*4882a593Smuzhiyun 
2680*4882a593Smuzhiyun #ifndef CONFIG_MUSB_PIO_ONLY
2681*4882a593Smuzhiyun 	.map_urb_for_dma	= musb_map_urb_for_dma,
2682*4882a593Smuzhiyun 	.unmap_urb_for_dma	= musb_unmap_urb_for_dma,
2683*4882a593Smuzhiyun #endif
2684*4882a593Smuzhiyun 
2685*4882a593Smuzhiyun 	.hub_status_data	= musb_hub_status_data,
2686*4882a593Smuzhiyun 	.hub_control		= musb_hub_control,
2687*4882a593Smuzhiyun 	.bus_suspend		= musb_bus_suspend,
2688*4882a593Smuzhiyun 	.bus_resume		= musb_bus_resume,
2689*4882a593Smuzhiyun 	/* .start_port_reset	= NULL, */
2690*4882a593Smuzhiyun 	/* .hub_irq_enable	= NULL, */
2691*4882a593Smuzhiyun };
2692*4882a593Smuzhiyun 
musb_host_alloc(struct musb * musb)2693*4882a593Smuzhiyun int musb_host_alloc(struct musb *musb)
2694*4882a593Smuzhiyun {
2695*4882a593Smuzhiyun 	struct device	*dev = musb->controller;
2696*4882a593Smuzhiyun 
2697*4882a593Smuzhiyun 	/* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
2698*4882a593Smuzhiyun 	musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
2699*4882a593Smuzhiyun 	if (!musb->hcd)
2700*4882a593Smuzhiyun 		return -EINVAL;
2701*4882a593Smuzhiyun 
2702*4882a593Smuzhiyun 	*musb->hcd->hcd_priv = (unsigned long) musb;
2703*4882a593Smuzhiyun 	musb->hcd->self.uses_pio_for_control = 1;
2704*4882a593Smuzhiyun 	musb->hcd->uses_new_polling = 1;
2705*4882a593Smuzhiyun 	musb->hcd->has_tt = 1;
2706*4882a593Smuzhiyun 
2707*4882a593Smuzhiyun 	return 0;
2708*4882a593Smuzhiyun }
2709*4882a593Smuzhiyun 
musb_host_cleanup(struct musb * musb)2710*4882a593Smuzhiyun void musb_host_cleanup(struct musb *musb)
2711*4882a593Smuzhiyun {
2712*4882a593Smuzhiyun 	if (musb->port_mode == MUSB_PERIPHERAL)
2713*4882a593Smuzhiyun 		return;
2714*4882a593Smuzhiyun 	usb_remove_hcd(musb->hcd);
2715*4882a593Smuzhiyun }
2716*4882a593Smuzhiyun 
musb_host_free(struct musb * musb)2717*4882a593Smuzhiyun void musb_host_free(struct musb *musb)
2718*4882a593Smuzhiyun {
2719*4882a593Smuzhiyun 	usb_put_hcd(musb->hcd);
2720*4882a593Smuzhiyun }
2721*4882a593Smuzhiyun 
musb_host_setup(struct musb * musb,int power_budget)2722*4882a593Smuzhiyun int musb_host_setup(struct musb *musb, int power_budget)
2723*4882a593Smuzhiyun {
2724*4882a593Smuzhiyun 	int ret;
2725*4882a593Smuzhiyun 	struct usb_hcd *hcd = musb->hcd;
2726*4882a593Smuzhiyun 
2727*4882a593Smuzhiyun 	if (musb->port_mode == MUSB_HOST) {
2728*4882a593Smuzhiyun 		MUSB_HST_MODE(musb);
2729*4882a593Smuzhiyun 		musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2730*4882a593Smuzhiyun 	}
2731*4882a593Smuzhiyun 	otg_set_host(musb->xceiv->otg, &hcd->self);
2732*4882a593Smuzhiyun 	/* don't support otg protocols */
2733*4882a593Smuzhiyun 	hcd->self.otg_port = 0;
2734*4882a593Smuzhiyun 	musb->xceiv->otg->host = &hcd->self;
2735*4882a593Smuzhiyun 	hcd->power_budget = 2 * (power_budget ? : 250);
2736*4882a593Smuzhiyun 	hcd->skip_phy_initialization = 1;
2737*4882a593Smuzhiyun 
2738*4882a593Smuzhiyun 	ret = usb_add_hcd(hcd, 0, 0);
2739*4882a593Smuzhiyun 	if (ret < 0)
2740*4882a593Smuzhiyun 		return ret;
2741*4882a593Smuzhiyun 
2742*4882a593Smuzhiyun 	device_wakeup_enable(hcd->self.controller);
2743*4882a593Smuzhiyun 	return 0;
2744*4882a593Smuzhiyun }
2745*4882a593Smuzhiyun 
musb_host_resume_root_hub(struct musb * musb)2746*4882a593Smuzhiyun void musb_host_resume_root_hub(struct musb *musb)
2747*4882a593Smuzhiyun {
2748*4882a593Smuzhiyun 	usb_hcd_resume_root_hub(musb->hcd);
2749*4882a593Smuzhiyun }
2750*4882a593Smuzhiyun 
musb_host_poke_root_hub(struct musb * musb)2751*4882a593Smuzhiyun void musb_host_poke_root_hub(struct musb *musb)
2752*4882a593Smuzhiyun {
2753*4882a593Smuzhiyun 	MUSB_HST_MODE(musb);
2754*4882a593Smuzhiyun 	if (musb->hcd->status_urb)
2755*4882a593Smuzhiyun 		usb_hcd_poll_rh_status(musb->hcd);
2756*4882a593Smuzhiyun 	else
2757*4882a593Smuzhiyun 		usb_hcd_resume_root_hub(musb->hcd);
2758*4882a593Smuzhiyun }
2759