1eb81955bSIlya Yanok /*
2eb81955bSIlya Yanok * MUSB OTG driver host support
3eb81955bSIlya Yanok *
4eb81955bSIlya Yanok * Copyright 2005 Mentor Graphics Corporation
5eb81955bSIlya Yanok * Copyright (C) 2005-2006 by Texas Instruments
6eb81955bSIlya Yanok * Copyright (C) 2006-2007 Nokia Corporation
7eb81955bSIlya Yanok * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
8eb81955bSIlya Yanok *
9*5b8031ccSTom Rini * SPDX-License-Identifier: GPL-2.0
10eb81955bSIlya Yanok */
11eb81955bSIlya Yanok
12eb81955bSIlya Yanok #ifndef __UBOOT__
13eb81955bSIlya Yanok #include <linux/module.h>
14eb81955bSIlya Yanok #include <linux/kernel.h>
15eb81955bSIlya Yanok #include <linux/delay.h>
16eb81955bSIlya Yanok #include <linux/sched.h>
17eb81955bSIlya Yanok #include <linux/slab.h>
18eb81955bSIlya Yanok #include <linux/errno.h>
19eb81955bSIlya Yanok #include <linux/init.h>
20eb81955bSIlya Yanok #include <linux/list.h>
21eb81955bSIlya Yanok #include <linux/dma-mapping.h>
22eb81955bSIlya Yanok #else
23eb81955bSIlya Yanok #include <common.h>
24eb81955bSIlya Yanok #include <usb.h>
25eb81955bSIlya Yanok #include "linux-compat.h"
26eb81955bSIlya Yanok #include "usb-compat.h"
27eb81955bSIlya Yanok #endif
28eb81955bSIlya Yanok
29eb81955bSIlya Yanok #include "musb_core.h"
30eb81955bSIlya Yanok #include "musb_host.h"
31eb81955bSIlya Yanok
32eb81955bSIlya Yanok
33eb81955bSIlya Yanok /* MUSB HOST status 22-mar-2006
34eb81955bSIlya Yanok *
35eb81955bSIlya Yanok * - There's still lots of partial code duplication for fault paths, so
36eb81955bSIlya Yanok * they aren't handled as consistently as they need to be.
37eb81955bSIlya Yanok *
38eb81955bSIlya Yanok * - PIO mostly behaved when last tested.
39eb81955bSIlya Yanok * + including ep0, with all usbtest cases 9, 10
40eb81955bSIlya Yanok * + usbtest 14 (ep0out) doesn't seem to run at all
41eb81955bSIlya Yanok * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
42eb81955bSIlya Yanok * configurations, but otherwise double buffering passes basic tests.
43eb81955bSIlya Yanok * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
44eb81955bSIlya Yanok *
45eb81955bSIlya Yanok * - DMA (CPPI) ... partially behaves, not currently recommended
46eb81955bSIlya Yanok * + about 1/15 the speed of typical EHCI implementations (PCI)
47eb81955bSIlya Yanok * + RX, all too often reqpkt seems to misbehave after tx
48eb81955bSIlya Yanok * + TX, no known issues (other than evident silicon issue)
49eb81955bSIlya Yanok *
50eb81955bSIlya Yanok * - DMA (Mentor/OMAP) ...has at least toggle update problems
51eb81955bSIlya Yanok *
52eb81955bSIlya Yanok * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
53eb81955bSIlya Yanok * starvation ... nothing yet for TX, interrupt, or bulk.
54eb81955bSIlya Yanok *
55eb81955bSIlya Yanok * - Not tested with HNP, but some SRP paths seem to behave.
56eb81955bSIlya Yanok *
57eb81955bSIlya Yanok * NOTE 24-August-2006:
58eb81955bSIlya Yanok *
59eb81955bSIlya Yanok * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
60eb81955bSIlya Yanok * extra endpoint for periodic use enabling hub + keybd + mouse. That
61eb81955bSIlya Yanok * mostly works, except that with "usbnet" it's easy to trigger cases
62eb81955bSIlya Yanok * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
63eb81955bSIlya Yanok * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
64eb81955bSIlya Yanok * although ARP RX wins. (That test was done with a full speed link.)
65eb81955bSIlya Yanok */
66eb81955bSIlya Yanok
67eb81955bSIlya Yanok
68eb81955bSIlya Yanok /*
69eb81955bSIlya Yanok * NOTE on endpoint usage:
70eb81955bSIlya Yanok *
71eb81955bSIlya Yanok * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
72eb81955bSIlya Yanok * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
73eb81955bSIlya Yanok * (Yes, bulk _could_ use more of the endpoints than that, and would even
74eb81955bSIlya Yanok * benefit from it.)
75eb81955bSIlya Yanok *
76eb81955bSIlya Yanok * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
77eb81955bSIlya Yanok * So far that scheduling is both dumb and optimistic: the endpoint will be
78eb81955bSIlya Yanok * "claimed" until its software queue is no longer refilled. No multiplexing
79eb81955bSIlya Yanok * of transfers between endpoints, or anything clever.
80eb81955bSIlya Yanok */
81eb81955bSIlya Yanok
82eb81955bSIlya Yanok
83eb81955bSIlya Yanok static void musb_ep_program(struct musb *musb, u8 epnum,
84eb81955bSIlya Yanok struct urb *urb, int is_out,
85eb81955bSIlya Yanok u8 *buf, u32 offset, u32 len);
86eb81955bSIlya Yanok
87eb81955bSIlya Yanok /*
88eb81955bSIlya Yanok * Clear TX fifo. Needed to avoid BABBLE errors.
89eb81955bSIlya Yanok */
musb_h_tx_flush_fifo(struct musb_hw_ep * ep)90eb81955bSIlya Yanok static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
91eb81955bSIlya Yanok {
92eb81955bSIlya Yanok struct musb *musb = ep->musb;
93eb81955bSIlya Yanok void __iomem *epio = ep->regs;
94eb81955bSIlya Yanok u16 csr;
95eb81955bSIlya Yanok u16 lastcsr = 0;
96eb81955bSIlya Yanok int retries = 1000;
97eb81955bSIlya Yanok
98eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_TXCSR);
99eb81955bSIlya Yanok while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
100eb81955bSIlya Yanok if (csr != lastcsr)
101eb81955bSIlya Yanok dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
102eb81955bSIlya Yanok lastcsr = csr;
103eb81955bSIlya Yanok csr |= MUSB_TXCSR_FLUSHFIFO;
104eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, csr);
105eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_TXCSR);
106eb81955bSIlya Yanok if (WARN(retries-- < 1,
107eb81955bSIlya Yanok "Could not flush host TX%d fifo: csr: %04x\n",
108eb81955bSIlya Yanok ep->epnum, csr))
109eb81955bSIlya Yanok return;
110eb81955bSIlya Yanok mdelay(1);
111eb81955bSIlya Yanok }
112eb81955bSIlya Yanok }
113eb81955bSIlya Yanok
musb_h_ep0_flush_fifo(struct musb_hw_ep * ep)114eb81955bSIlya Yanok static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
115eb81955bSIlya Yanok {
116eb81955bSIlya Yanok void __iomem *epio = ep->regs;
117eb81955bSIlya Yanok u16 csr;
118eb81955bSIlya Yanok int retries = 5;
119eb81955bSIlya Yanok
120eb81955bSIlya Yanok /* scrub any data left in the fifo */
121eb81955bSIlya Yanok do {
122eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_TXCSR);
123eb81955bSIlya Yanok if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
124eb81955bSIlya Yanok break;
125eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
126eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_TXCSR);
127eb81955bSIlya Yanok udelay(10);
128eb81955bSIlya Yanok } while (--retries);
129eb81955bSIlya Yanok
130eb81955bSIlya Yanok WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
131eb81955bSIlya Yanok ep->epnum, csr);
132eb81955bSIlya Yanok
133eb81955bSIlya Yanok /* and reset for the next transfer */
134eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, 0);
135eb81955bSIlya Yanok }
136eb81955bSIlya Yanok
137eb81955bSIlya Yanok /*
138eb81955bSIlya Yanok * Start transmit. Caller is responsible for locking shared resources.
139eb81955bSIlya Yanok * musb must be locked.
140eb81955bSIlya Yanok */
musb_h_tx_start(struct musb_hw_ep * ep)141eb81955bSIlya Yanok static inline void musb_h_tx_start(struct musb_hw_ep *ep)
142eb81955bSIlya Yanok {
143eb81955bSIlya Yanok u16 txcsr;
144eb81955bSIlya Yanok
145eb81955bSIlya Yanok /* NOTE: no locks here; caller should lock and select EP */
146eb81955bSIlya Yanok if (ep->epnum) {
147eb81955bSIlya Yanok txcsr = musb_readw(ep->regs, MUSB_TXCSR);
148eb81955bSIlya Yanok txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
149eb81955bSIlya Yanok musb_writew(ep->regs, MUSB_TXCSR, txcsr);
150eb81955bSIlya Yanok } else {
151eb81955bSIlya Yanok txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
152eb81955bSIlya Yanok musb_writew(ep->regs, MUSB_CSR0, txcsr);
153eb81955bSIlya Yanok }
154eb81955bSIlya Yanok
155eb81955bSIlya Yanok }
156eb81955bSIlya Yanok
musb_h_tx_dma_start(struct musb_hw_ep * ep)157eb81955bSIlya Yanok static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
158eb81955bSIlya Yanok {
159eb81955bSIlya Yanok u16 txcsr;
160eb81955bSIlya Yanok
161eb81955bSIlya Yanok /* NOTE: no locks here; caller should lock and select EP */
162eb81955bSIlya Yanok txcsr = musb_readw(ep->regs, MUSB_TXCSR);
163eb81955bSIlya Yanok txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
164eb81955bSIlya Yanok if (is_cppi_enabled())
165eb81955bSIlya Yanok txcsr |= MUSB_TXCSR_DMAMODE;
166eb81955bSIlya Yanok musb_writew(ep->regs, MUSB_TXCSR, txcsr);
167eb81955bSIlya Yanok }
168eb81955bSIlya Yanok
musb_ep_set_qh(struct musb_hw_ep * ep,int is_in,struct musb_qh * qh)169eb81955bSIlya Yanok static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
170eb81955bSIlya Yanok {
171eb81955bSIlya Yanok if (is_in != 0 || ep->is_shared_fifo)
172eb81955bSIlya Yanok ep->in_qh = qh;
173eb81955bSIlya Yanok if (is_in == 0 || ep->is_shared_fifo)
174eb81955bSIlya Yanok ep->out_qh = qh;
175eb81955bSIlya Yanok }
176eb81955bSIlya Yanok
musb_ep_get_qh(struct musb_hw_ep * ep,int is_in)177eb81955bSIlya Yanok static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
178eb81955bSIlya Yanok {
179eb81955bSIlya Yanok return is_in ? ep->in_qh : ep->out_qh;
180eb81955bSIlya Yanok }
181eb81955bSIlya Yanok
182eb81955bSIlya Yanok /*
183eb81955bSIlya Yanok * Start the URB at the front of an endpoint's queue
184eb81955bSIlya Yanok * end must be claimed from the caller.
185eb81955bSIlya Yanok *
186eb81955bSIlya Yanok * Context: controller locked, irqs blocked
187eb81955bSIlya Yanok */
188eb81955bSIlya Yanok static void
musb_start_urb(struct musb * musb,int is_in,struct musb_qh * qh)189eb81955bSIlya Yanok musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
190eb81955bSIlya Yanok {
191eb81955bSIlya Yanok u16 frame;
192eb81955bSIlya Yanok u32 len;
193eb81955bSIlya Yanok void __iomem *mbase = musb->mregs;
194eb81955bSIlya Yanok struct urb *urb = next_urb(qh);
195eb81955bSIlya Yanok void *buf = urb->transfer_buffer;
196eb81955bSIlya Yanok u32 offset = 0;
197eb81955bSIlya Yanok struct musb_hw_ep *hw_ep = qh->hw_ep;
198eb81955bSIlya Yanok unsigned pipe = urb->pipe;
199eb81955bSIlya Yanok u8 address = usb_pipedevice(pipe);
200eb81955bSIlya Yanok int epnum = hw_ep->epnum;
201eb81955bSIlya Yanok
202eb81955bSIlya Yanok /* initialize software qh state */
203eb81955bSIlya Yanok qh->offset = 0;
204eb81955bSIlya Yanok qh->segsize = 0;
205eb81955bSIlya Yanok
206eb81955bSIlya Yanok /* gather right source of data */
207eb81955bSIlya Yanok switch (qh->type) {
208eb81955bSIlya Yanok case USB_ENDPOINT_XFER_CONTROL:
209eb81955bSIlya Yanok /* control transfers always start with SETUP */
210eb81955bSIlya Yanok is_in = 0;
211eb81955bSIlya Yanok musb->ep0_stage = MUSB_EP0_START;
212eb81955bSIlya Yanok buf = urb->setup_packet;
213eb81955bSIlya Yanok len = 8;
214eb81955bSIlya Yanok break;
215eb81955bSIlya Yanok #ifndef __UBOOT__
216eb81955bSIlya Yanok case USB_ENDPOINT_XFER_ISOC:
217eb81955bSIlya Yanok qh->iso_idx = 0;
218eb81955bSIlya Yanok qh->frame = 0;
219eb81955bSIlya Yanok offset = urb->iso_frame_desc[0].offset;
220eb81955bSIlya Yanok len = urb->iso_frame_desc[0].length;
221eb81955bSIlya Yanok break;
222eb81955bSIlya Yanok #endif
223eb81955bSIlya Yanok default: /* bulk, interrupt */
224eb81955bSIlya Yanok /* actual_length may be nonzero on retry paths */
225eb81955bSIlya Yanok buf = urb->transfer_buffer + urb->actual_length;
226eb81955bSIlya Yanok len = urb->transfer_buffer_length - urb->actual_length;
227eb81955bSIlya Yanok }
228eb81955bSIlya Yanok
229eb81955bSIlya Yanok dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
230eb81955bSIlya Yanok qh, urb, address, qh->epnum,
231eb81955bSIlya Yanok is_in ? "in" : "out",
232eb81955bSIlya Yanok ({char *s; switch (qh->type) {
233eb81955bSIlya Yanok case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
234eb81955bSIlya Yanok case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
235eb81955bSIlya Yanok #ifndef __UBOOT__
236eb81955bSIlya Yanok case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
237eb81955bSIlya Yanok #endif
238eb81955bSIlya Yanok default: s = "-intr"; break;
239eb81955bSIlya Yanok }; s; }),
240eb81955bSIlya Yanok epnum, buf + offset, len);
241eb81955bSIlya Yanok
242eb81955bSIlya Yanok /* Configure endpoint */
243eb81955bSIlya Yanok musb_ep_set_qh(hw_ep, is_in, qh);
244eb81955bSIlya Yanok musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
245eb81955bSIlya Yanok
246eb81955bSIlya Yanok /* transmit may have more work: start it when it is time */
247eb81955bSIlya Yanok if (is_in)
248eb81955bSIlya Yanok return;
249eb81955bSIlya Yanok
250eb81955bSIlya Yanok /* determine if the time is right for a periodic transfer */
251eb81955bSIlya Yanok switch (qh->type) {
252eb81955bSIlya Yanok #ifndef __UBOOT__
253eb81955bSIlya Yanok case USB_ENDPOINT_XFER_ISOC:
254eb81955bSIlya Yanok #endif
255eb81955bSIlya Yanok case USB_ENDPOINT_XFER_INT:
256eb81955bSIlya Yanok dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n");
257eb81955bSIlya Yanok frame = musb_readw(mbase, MUSB_FRAME);
258eb81955bSIlya Yanok /* FIXME this doesn't implement that scheduling policy ...
259eb81955bSIlya Yanok * or handle framecounter wrapping
260eb81955bSIlya Yanok */
261eb81955bSIlya Yanok #ifndef __UBOOT__
262eb81955bSIlya Yanok if ((urb->transfer_flags & URB_ISO_ASAP)
263eb81955bSIlya Yanok || (frame >= urb->start_frame)) {
264eb81955bSIlya Yanok /* REVISIT the SOF irq handler shouldn't duplicate
265eb81955bSIlya Yanok * this code; and we don't init urb->start_frame...
266eb81955bSIlya Yanok */
267eb81955bSIlya Yanok qh->frame = 0;
268eb81955bSIlya Yanok goto start;
269eb81955bSIlya Yanok } else {
270eb81955bSIlya Yanok #endif
271eb81955bSIlya Yanok qh->frame = urb->start_frame;
272eb81955bSIlya Yanok /* enable SOF interrupt so we can count down */
273eb81955bSIlya Yanok dev_dbg(musb->controller, "SOF for %d\n", epnum);
274eb81955bSIlya Yanok #if 1 /* ifndef CONFIG_ARCH_DAVINCI */
275eb81955bSIlya Yanok musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
276eb81955bSIlya Yanok #endif
277eb81955bSIlya Yanok #ifndef __UBOOT__
278eb81955bSIlya Yanok }
279eb81955bSIlya Yanok #endif
280eb81955bSIlya Yanok break;
281eb81955bSIlya Yanok default:
282eb81955bSIlya Yanok start:
283eb81955bSIlya Yanok dev_dbg(musb->controller, "Start TX%d %s\n", epnum,
284eb81955bSIlya Yanok hw_ep->tx_channel ? "dma" : "pio");
285eb81955bSIlya Yanok
286eb81955bSIlya Yanok if (!hw_ep->tx_channel)
287eb81955bSIlya Yanok musb_h_tx_start(hw_ep);
288eb81955bSIlya Yanok else if (is_cppi_enabled() || tusb_dma_omap())
289eb81955bSIlya Yanok musb_h_tx_dma_start(hw_ep);
290eb81955bSIlya Yanok }
291eb81955bSIlya Yanok }
292eb81955bSIlya Yanok
293eb81955bSIlya Yanok /* Context: caller owns controller lock, IRQs are blocked */
musb_giveback(struct musb * musb,struct urb * urb,int status)294eb81955bSIlya Yanok static void musb_giveback(struct musb *musb, struct urb *urb, int status)
295eb81955bSIlya Yanok __releases(musb->lock)
296eb81955bSIlya Yanok __acquires(musb->lock)
297eb81955bSIlya Yanok {
298eb81955bSIlya Yanok dev_dbg(musb->controller,
299eb81955bSIlya Yanok "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
300eb81955bSIlya Yanok urb, urb->complete, status,
301eb81955bSIlya Yanok usb_pipedevice(urb->pipe),
302eb81955bSIlya Yanok usb_pipeendpoint(urb->pipe),
303eb81955bSIlya Yanok usb_pipein(urb->pipe) ? "in" : "out",
304eb81955bSIlya Yanok urb->actual_length, urb->transfer_buffer_length
305eb81955bSIlya Yanok );
306eb81955bSIlya Yanok
307eb81955bSIlya Yanok usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
308eb81955bSIlya Yanok spin_unlock(&musb->lock);
309eb81955bSIlya Yanok usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
310eb81955bSIlya Yanok spin_lock(&musb->lock);
311eb81955bSIlya Yanok }
312eb81955bSIlya Yanok
313eb81955bSIlya Yanok /* For bulk/interrupt endpoints only */
musb_save_toggle(struct musb_qh * qh,int is_in,struct urb * urb)314eb81955bSIlya Yanok static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
315eb81955bSIlya Yanok struct urb *urb)
316eb81955bSIlya Yanok {
317eb81955bSIlya Yanok void __iomem *epio = qh->hw_ep->regs;
318eb81955bSIlya Yanok u16 csr;
319eb81955bSIlya Yanok
320eb81955bSIlya Yanok /*
321eb81955bSIlya Yanok * FIXME: the current Mentor DMA code seems to have
322eb81955bSIlya Yanok * problems getting toggle correct.
323eb81955bSIlya Yanok */
324eb81955bSIlya Yanok
325eb81955bSIlya Yanok if (is_in)
326eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
327eb81955bSIlya Yanok else
328eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
329eb81955bSIlya Yanok
330eb81955bSIlya Yanok usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
331eb81955bSIlya Yanok }
332eb81955bSIlya Yanok
333eb81955bSIlya Yanok /*
334eb81955bSIlya Yanok * Advance this hardware endpoint's queue, completing the specified URB and
335eb81955bSIlya Yanok * advancing to either the next URB queued to that qh, or else invalidating
336eb81955bSIlya Yanok * that qh and advancing to the next qh scheduled after the current one.
337eb81955bSIlya Yanok *
338eb81955bSIlya Yanok * Context: caller owns controller lock, IRQs are blocked
339eb81955bSIlya Yanok */
musb_advance_schedule(struct musb * musb,struct urb * urb,struct musb_hw_ep * hw_ep,int is_in)340eb81955bSIlya Yanok static void musb_advance_schedule(struct musb *musb, struct urb *urb,
341eb81955bSIlya Yanok struct musb_hw_ep *hw_ep, int is_in)
342eb81955bSIlya Yanok {
343eb81955bSIlya Yanok struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
344eb81955bSIlya Yanok struct musb_hw_ep *ep = qh->hw_ep;
345eb81955bSIlya Yanok int ready = qh->is_ready;
346eb81955bSIlya Yanok int status;
347eb81955bSIlya Yanok
348eb81955bSIlya Yanok status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
349eb81955bSIlya Yanok
350eb81955bSIlya Yanok /* save toggle eagerly, for paranoia */
351eb81955bSIlya Yanok switch (qh->type) {
352eb81955bSIlya Yanok case USB_ENDPOINT_XFER_BULK:
353eb81955bSIlya Yanok case USB_ENDPOINT_XFER_INT:
354eb81955bSIlya Yanok musb_save_toggle(qh, is_in, urb);
355eb81955bSIlya Yanok break;
356eb81955bSIlya Yanok #ifndef __UBOOT__
357eb81955bSIlya Yanok case USB_ENDPOINT_XFER_ISOC:
358eb81955bSIlya Yanok if (status == 0 && urb->error_count)
359eb81955bSIlya Yanok status = -EXDEV;
360eb81955bSIlya Yanok break;
361eb81955bSIlya Yanok #endif
362eb81955bSIlya Yanok }
363eb81955bSIlya Yanok
364eb81955bSIlya Yanok qh->is_ready = 0;
365eb81955bSIlya Yanok musb_giveback(musb, urb, status);
366eb81955bSIlya Yanok qh->is_ready = ready;
367eb81955bSIlya Yanok
368eb81955bSIlya Yanok /* reclaim resources (and bandwidth) ASAP; deschedule it, and
369eb81955bSIlya Yanok * invalidate qh as soon as list_empty(&hep->urb_list)
370eb81955bSIlya Yanok */
371eb81955bSIlya Yanok if (list_empty(&qh->hep->urb_list)) {
372eb81955bSIlya Yanok struct list_head *head;
373eb81955bSIlya Yanok struct dma_controller *dma = musb->dma_controller;
374eb81955bSIlya Yanok
375eb81955bSIlya Yanok if (is_in) {
376eb81955bSIlya Yanok ep->rx_reinit = 1;
377eb81955bSIlya Yanok if (ep->rx_channel) {
378eb81955bSIlya Yanok dma->channel_release(ep->rx_channel);
379eb81955bSIlya Yanok ep->rx_channel = NULL;
380eb81955bSIlya Yanok }
381eb81955bSIlya Yanok } else {
382eb81955bSIlya Yanok ep->tx_reinit = 1;
383eb81955bSIlya Yanok if (ep->tx_channel) {
384eb81955bSIlya Yanok dma->channel_release(ep->tx_channel);
385eb81955bSIlya Yanok ep->tx_channel = NULL;
386eb81955bSIlya Yanok }
387eb81955bSIlya Yanok }
388eb81955bSIlya Yanok
389eb81955bSIlya Yanok /* Clobber old pointers to this qh */
390eb81955bSIlya Yanok musb_ep_set_qh(ep, is_in, NULL);
391eb81955bSIlya Yanok qh->hep->hcpriv = NULL;
392eb81955bSIlya Yanok
393eb81955bSIlya Yanok switch (qh->type) {
394eb81955bSIlya Yanok
395eb81955bSIlya Yanok case USB_ENDPOINT_XFER_CONTROL:
396eb81955bSIlya Yanok case USB_ENDPOINT_XFER_BULK:
397eb81955bSIlya Yanok /* fifo policy for these lists, except that NAKing
398eb81955bSIlya Yanok * should rotate a qh to the end (for fairness).
399eb81955bSIlya Yanok */
400eb81955bSIlya Yanok if (qh->mux == 1) {
401eb81955bSIlya Yanok head = qh->ring.prev;
402eb81955bSIlya Yanok list_del(&qh->ring);
403eb81955bSIlya Yanok kfree(qh);
404eb81955bSIlya Yanok qh = first_qh(head);
405eb81955bSIlya Yanok break;
406eb81955bSIlya Yanok }
407eb81955bSIlya Yanok
408eb81955bSIlya Yanok case USB_ENDPOINT_XFER_ISOC:
409eb81955bSIlya Yanok case USB_ENDPOINT_XFER_INT:
410eb81955bSIlya Yanok /* this is where periodic bandwidth should be
411eb81955bSIlya Yanok * de-allocated if it's tracked and allocated;
412eb81955bSIlya Yanok * and where we'd update the schedule tree...
413eb81955bSIlya Yanok */
414eb81955bSIlya Yanok kfree(qh);
415eb81955bSIlya Yanok qh = NULL;
416eb81955bSIlya Yanok break;
417eb81955bSIlya Yanok }
418eb81955bSIlya Yanok }
419eb81955bSIlya Yanok
420eb81955bSIlya Yanok if (qh != NULL && qh->is_ready) {
421eb81955bSIlya Yanok dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
422eb81955bSIlya Yanok hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
423eb81955bSIlya Yanok musb_start_urb(musb, is_in, qh);
424eb81955bSIlya Yanok }
425eb81955bSIlya Yanok }
426eb81955bSIlya Yanok
musb_h_flush_rxfifo(struct musb_hw_ep * hw_ep,u16 csr)427eb81955bSIlya Yanok static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
428eb81955bSIlya Yanok {
429eb81955bSIlya Yanok /* we don't want fifo to fill itself again;
430eb81955bSIlya Yanok * ignore dma (various models),
431eb81955bSIlya Yanok * leave toggle alone (may not have been saved yet)
432eb81955bSIlya Yanok */
433eb81955bSIlya Yanok csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
434eb81955bSIlya Yanok csr &= ~(MUSB_RXCSR_H_REQPKT
435eb81955bSIlya Yanok | MUSB_RXCSR_H_AUTOREQ
436eb81955bSIlya Yanok | MUSB_RXCSR_AUTOCLEAR);
437eb81955bSIlya Yanok
438eb81955bSIlya Yanok /* write 2x to allow double buffering */
439eb81955bSIlya Yanok musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
440eb81955bSIlya Yanok musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
441eb81955bSIlya Yanok
442eb81955bSIlya Yanok /* flush writebuffer */
443eb81955bSIlya Yanok return musb_readw(hw_ep->regs, MUSB_RXCSR);
444eb81955bSIlya Yanok }
445eb81955bSIlya Yanok
446eb81955bSIlya Yanok /*
447eb81955bSIlya Yanok * PIO RX for a packet (or part of it).
448eb81955bSIlya Yanok */
449eb81955bSIlya Yanok static bool
musb_host_packet_rx(struct musb * musb,struct urb * urb,u8 epnum,u8 iso_err)450eb81955bSIlya Yanok musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
451eb81955bSIlya Yanok {
452eb81955bSIlya Yanok u16 rx_count;
453eb81955bSIlya Yanok u8 *buf;
454eb81955bSIlya Yanok u16 csr;
455eb81955bSIlya Yanok bool done = false;
456eb81955bSIlya Yanok u32 length;
457eb81955bSIlya Yanok int do_flush = 0;
458eb81955bSIlya Yanok struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
459eb81955bSIlya Yanok void __iomem *epio = hw_ep->regs;
460eb81955bSIlya Yanok struct musb_qh *qh = hw_ep->in_qh;
461eb81955bSIlya Yanok int pipe = urb->pipe;
462eb81955bSIlya Yanok void *buffer = urb->transfer_buffer;
463eb81955bSIlya Yanok
464eb81955bSIlya Yanok /* musb_ep_select(mbase, epnum); */
465eb81955bSIlya Yanok rx_count = musb_readw(epio, MUSB_RXCOUNT);
466eb81955bSIlya Yanok dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
467eb81955bSIlya Yanok urb->transfer_buffer, qh->offset,
468eb81955bSIlya Yanok urb->transfer_buffer_length);
469eb81955bSIlya Yanok
470eb81955bSIlya Yanok /* unload FIFO */
471eb81955bSIlya Yanok #ifndef __UBOOT__
472eb81955bSIlya Yanok if (usb_pipeisoc(pipe)) {
473eb81955bSIlya Yanok int status = 0;
474eb81955bSIlya Yanok struct usb_iso_packet_descriptor *d;
475eb81955bSIlya Yanok
476eb81955bSIlya Yanok if (iso_err) {
477eb81955bSIlya Yanok status = -EILSEQ;
478eb81955bSIlya Yanok urb->error_count++;
479eb81955bSIlya Yanok }
480eb81955bSIlya Yanok
481eb81955bSIlya Yanok d = urb->iso_frame_desc + qh->iso_idx;
482eb81955bSIlya Yanok buf = buffer + d->offset;
483eb81955bSIlya Yanok length = d->length;
484eb81955bSIlya Yanok if (rx_count > length) {
485eb81955bSIlya Yanok if (status == 0) {
486eb81955bSIlya Yanok status = -EOVERFLOW;
487eb81955bSIlya Yanok urb->error_count++;
488eb81955bSIlya Yanok }
489eb81955bSIlya Yanok dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
490eb81955bSIlya Yanok do_flush = 1;
491eb81955bSIlya Yanok } else
492eb81955bSIlya Yanok length = rx_count;
493eb81955bSIlya Yanok urb->actual_length += length;
494eb81955bSIlya Yanok d->actual_length = length;
495eb81955bSIlya Yanok
496eb81955bSIlya Yanok d->status = status;
497eb81955bSIlya Yanok
498eb81955bSIlya Yanok /* see if we are done */
499eb81955bSIlya Yanok done = (++qh->iso_idx >= urb->number_of_packets);
500eb81955bSIlya Yanok } else {
501eb81955bSIlya Yanok #endif
502eb81955bSIlya Yanok /* non-isoch */
503eb81955bSIlya Yanok buf = buffer + qh->offset;
504eb81955bSIlya Yanok length = urb->transfer_buffer_length - qh->offset;
505eb81955bSIlya Yanok if (rx_count > length) {
506eb81955bSIlya Yanok if (urb->status == -EINPROGRESS)
507eb81955bSIlya Yanok urb->status = -EOVERFLOW;
508eb81955bSIlya Yanok dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
509eb81955bSIlya Yanok do_flush = 1;
510eb81955bSIlya Yanok } else
511eb81955bSIlya Yanok length = rx_count;
512eb81955bSIlya Yanok urb->actual_length += length;
513eb81955bSIlya Yanok qh->offset += length;
514eb81955bSIlya Yanok
515eb81955bSIlya Yanok /* see if we are done */
516eb81955bSIlya Yanok done = (urb->actual_length == urb->transfer_buffer_length)
517eb81955bSIlya Yanok || (rx_count < qh->maxpacket)
518eb81955bSIlya Yanok || (urb->status != -EINPROGRESS);
519eb81955bSIlya Yanok if (done
520eb81955bSIlya Yanok && (urb->status == -EINPROGRESS)
521eb81955bSIlya Yanok && (urb->transfer_flags & URB_SHORT_NOT_OK)
522eb81955bSIlya Yanok && (urb->actual_length
523eb81955bSIlya Yanok < urb->transfer_buffer_length))
524eb81955bSIlya Yanok urb->status = -EREMOTEIO;
525eb81955bSIlya Yanok #ifndef __UBOOT__
526eb81955bSIlya Yanok }
527eb81955bSIlya Yanok #endif
528eb81955bSIlya Yanok
529eb81955bSIlya Yanok musb_read_fifo(hw_ep, length, buf);
530eb81955bSIlya Yanok
531eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_RXCSR);
532eb81955bSIlya Yanok csr |= MUSB_RXCSR_H_WZC_BITS;
533eb81955bSIlya Yanok if (unlikely(do_flush))
534eb81955bSIlya Yanok musb_h_flush_rxfifo(hw_ep, csr);
535eb81955bSIlya Yanok else {
536eb81955bSIlya Yanok /* REVISIT this assumes AUTOCLEAR is never set */
537eb81955bSIlya Yanok csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
538eb81955bSIlya Yanok if (!done)
539eb81955bSIlya Yanok csr |= MUSB_RXCSR_H_REQPKT;
540eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR, csr);
541eb81955bSIlya Yanok }
542eb81955bSIlya Yanok
543eb81955bSIlya Yanok return done;
544eb81955bSIlya Yanok }
545eb81955bSIlya Yanok
546eb81955bSIlya Yanok /* we don't always need to reinit a given side of an endpoint...
547eb81955bSIlya Yanok * when we do, use tx/rx reinit routine and then construct a new CSR
548eb81955bSIlya Yanok * to address data toggle, NYET, and DMA or PIO.
549eb81955bSIlya Yanok *
550eb81955bSIlya Yanok * it's possible that driver bugs (especially for DMA) or aborting a
551eb81955bSIlya Yanok * transfer might have left the endpoint busier than it should be.
552eb81955bSIlya Yanok * the busy/not-empty tests are basically paranoia.
553eb81955bSIlya Yanok */
554eb81955bSIlya Yanok static void
musb_rx_reinit(struct musb * musb,struct musb_qh * qh,struct musb_hw_ep * ep)555eb81955bSIlya Yanok musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
556eb81955bSIlya Yanok {
557eb81955bSIlya Yanok u16 csr;
558eb81955bSIlya Yanok
559eb81955bSIlya Yanok /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
560eb81955bSIlya Yanok * That always uses tx_reinit since ep0 repurposes TX register
561eb81955bSIlya Yanok * offsets; the initial SETUP packet is also a kind of OUT.
562eb81955bSIlya Yanok */
563eb81955bSIlya Yanok
564eb81955bSIlya Yanok /* if programmed for Tx, put it in RX mode */
565eb81955bSIlya Yanok if (ep->is_shared_fifo) {
566eb81955bSIlya Yanok csr = musb_readw(ep->regs, MUSB_TXCSR);
567eb81955bSIlya Yanok if (csr & MUSB_TXCSR_MODE) {
568eb81955bSIlya Yanok musb_h_tx_flush_fifo(ep);
569eb81955bSIlya Yanok csr = musb_readw(ep->regs, MUSB_TXCSR);
570eb81955bSIlya Yanok musb_writew(ep->regs, MUSB_TXCSR,
571eb81955bSIlya Yanok csr | MUSB_TXCSR_FRCDATATOG);
572eb81955bSIlya Yanok }
573eb81955bSIlya Yanok
574eb81955bSIlya Yanok /*
575eb81955bSIlya Yanok * Clear the MODE bit (and everything else) to enable Rx.
576eb81955bSIlya Yanok * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
577eb81955bSIlya Yanok */
578eb81955bSIlya Yanok if (csr & MUSB_TXCSR_DMAMODE)
579eb81955bSIlya Yanok musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
580eb81955bSIlya Yanok musb_writew(ep->regs, MUSB_TXCSR, 0);
581eb81955bSIlya Yanok
582eb81955bSIlya Yanok /* scrub all previous state, clearing toggle */
583eb81955bSIlya Yanok } else {
584eb81955bSIlya Yanok csr = musb_readw(ep->regs, MUSB_RXCSR);
585eb81955bSIlya Yanok if (csr & MUSB_RXCSR_RXPKTRDY)
586eb81955bSIlya Yanok WARNING("rx%d, packet/%d ready?\n", ep->epnum,
587eb81955bSIlya Yanok musb_readw(ep->regs, MUSB_RXCOUNT));
588eb81955bSIlya Yanok
589eb81955bSIlya Yanok musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
590eb81955bSIlya Yanok }
591eb81955bSIlya Yanok
592eb81955bSIlya Yanok /* target addr and (for multipoint) hub addr/port */
593eb81955bSIlya Yanok if (musb->is_multipoint) {
594eb81955bSIlya Yanok musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
595eb81955bSIlya Yanok musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
596eb81955bSIlya Yanok musb_write_rxhubport(ep->target_regs, qh->h_port_reg);
597eb81955bSIlya Yanok
598eb81955bSIlya Yanok } else
599eb81955bSIlya Yanok musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
600eb81955bSIlya Yanok
601eb81955bSIlya Yanok /* protocol/endpoint, interval/NAKlimit, i/o size */
602eb81955bSIlya Yanok musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
603eb81955bSIlya Yanok musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
604eb81955bSIlya Yanok /* NOTE: bulk combining rewrites high bits of maxpacket */
605eb81955bSIlya Yanok /* Set RXMAXP with the FIFO size of the endpoint
606eb81955bSIlya Yanok * to disable double buffer mode.
607eb81955bSIlya Yanok */
608eb81955bSIlya Yanok if (musb->double_buffer_not_ok)
609eb81955bSIlya Yanok musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
610eb81955bSIlya Yanok else
611eb81955bSIlya Yanok musb_writew(ep->regs, MUSB_RXMAXP,
612eb81955bSIlya Yanok qh->maxpacket | ((qh->hb_mult - 1) << 11));
613eb81955bSIlya Yanok
614eb81955bSIlya Yanok ep->rx_reinit = 0;
615eb81955bSIlya Yanok }
616eb81955bSIlya Yanok
musb_tx_dma_program(struct dma_controller * dma,struct musb_hw_ep * hw_ep,struct musb_qh * qh,struct urb * urb,u32 offset,u32 length)617eb81955bSIlya Yanok static bool musb_tx_dma_program(struct dma_controller *dma,
618eb81955bSIlya Yanok struct musb_hw_ep *hw_ep, struct musb_qh *qh,
619eb81955bSIlya Yanok struct urb *urb, u32 offset, u32 length)
620eb81955bSIlya Yanok {
621eb81955bSIlya Yanok struct dma_channel *channel = hw_ep->tx_channel;
622eb81955bSIlya Yanok void __iomem *epio = hw_ep->regs;
623eb81955bSIlya Yanok u16 pkt_size = qh->maxpacket;
624eb81955bSIlya Yanok u16 csr;
625eb81955bSIlya Yanok u8 mode;
626eb81955bSIlya Yanok
627eb81955bSIlya Yanok #ifdef CONFIG_USB_INVENTRA_DMA
628eb81955bSIlya Yanok if (length > channel->max_len)
629eb81955bSIlya Yanok length = channel->max_len;
630eb81955bSIlya Yanok
631eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_TXCSR);
632eb81955bSIlya Yanok if (length > pkt_size) {
633eb81955bSIlya Yanok mode = 1;
634eb81955bSIlya Yanok csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
635eb81955bSIlya Yanok /* autoset shouldn't be set in high bandwidth */
636eb81955bSIlya Yanok if (qh->hb_mult == 1)
637eb81955bSIlya Yanok csr |= MUSB_TXCSR_AUTOSET;
638eb81955bSIlya Yanok } else {
639eb81955bSIlya Yanok mode = 0;
640eb81955bSIlya Yanok csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
641eb81955bSIlya Yanok csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
642eb81955bSIlya Yanok }
643eb81955bSIlya Yanok channel->desired_mode = mode;
644eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, csr);
645eb81955bSIlya Yanok #else
646eb81955bSIlya Yanok if (!is_cppi_enabled() && !tusb_dma_omap())
647eb81955bSIlya Yanok return false;
648eb81955bSIlya Yanok
649eb81955bSIlya Yanok channel->actual_len = 0;
650eb81955bSIlya Yanok
651eb81955bSIlya Yanok /*
652eb81955bSIlya Yanok * TX uses "RNDIS" mode automatically but needs help
653eb81955bSIlya Yanok * to identify the zero-length-final-packet case.
654eb81955bSIlya Yanok */
655eb81955bSIlya Yanok mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
656eb81955bSIlya Yanok #endif
657eb81955bSIlya Yanok
658eb81955bSIlya Yanok qh->segsize = length;
659eb81955bSIlya Yanok
660eb81955bSIlya Yanok /*
661eb81955bSIlya Yanok * Ensure the data reaches to main memory before starting
662eb81955bSIlya Yanok * DMA transfer
663eb81955bSIlya Yanok */
664eb81955bSIlya Yanok wmb();
665eb81955bSIlya Yanok
666eb81955bSIlya Yanok if (!dma->channel_program(channel, pkt_size, mode,
667eb81955bSIlya Yanok urb->transfer_dma + offset, length)) {
668eb81955bSIlya Yanok dma->channel_release(channel);
669eb81955bSIlya Yanok hw_ep->tx_channel = NULL;
670eb81955bSIlya Yanok
671eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_TXCSR);
672eb81955bSIlya Yanok csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
673eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
674eb81955bSIlya Yanok return false;
675eb81955bSIlya Yanok }
676eb81955bSIlya Yanok return true;
677eb81955bSIlya Yanok }
678eb81955bSIlya Yanok
679eb81955bSIlya Yanok /*
680eb81955bSIlya Yanok * Program an HDRC endpoint as per the given URB
681eb81955bSIlya Yanok * Context: irqs blocked, controller lock held
682eb81955bSIlya Yanok */
musb_ep_program(struct musb * musb,u8 epnum,struct urb * urb,int is_out,u8 * buf,u32 offset,u32 len)683eb81955bSIlya Yanok static void musb_ep_program(struct musb *musb, u8 epnum,
684eb81955bSIlya Yanok struct urb *urb, int is_out,
685eb81955bSIlya Yanok u8 *buf, u32 offset, u32 len)
686eb81955bSIlya Yanok {
687eb81955bSIlya Yanok struct dma_controller *dma_controller;
688eb81955bSIlya Yanok struct dma_channel *dma_channel;
689eb81955bSIlya Yanok u8 dma_ok;
690eb81955bSIlya Yanok void __iomem *mbase = musb->mregs;
691eb81955bSIlya Yanok struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
692eb81955bSIlya Yanok void __iomem *epio = hw_ep->regs;
693eb81955bSIlya Yanok struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
694eb81955bSIlya Yanok u16 packet_sz = qh->maxpacket;
695eb81955bSIlya Yanok
696eb81955bSIlya Yanok dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s "
697eb81955bSIlya Yanok "h_addr%02x h_port%02x bytes %d\n",
698eb81955bSIlya Yanok is_out ? "-->" : "<--",
699eb81955bSIlya Yanok epnum, urb, urb->dev->speed,
700eb81955bSIlya Yanok qh->addr_reg, qh->epnum, is_out ? "out" : "in",
701eb81955bSIlya Yanok qh->h_addr_reg, qh->h_port_reg,
702eb81955bSIlya Yanok len);
703eb81955bSIlya Yanok
704eb81955bSIlya Yanok musb_ep_select(mbase, epnum);
705eb81955bSIlya Yanok
706eb81955bSIlya Yanok /* candidate for DMA? */
707eb81955bSIlya Yanok dma_controller = musb->dma_controller;
708eb81955bSIlya Yanok if (is_dma_capable() && epnum && dma_controller) {
709eb81955bSIlya Yanok dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
710eb81955bSIlya Yanok if (!dma_channel) {
711eb81955bSIlya Yanok dma_channel = dma_controller->channel_alloc(
712eb81955bSIlya Yanok dma_controller, hw_ep, is_out);
713eb81955bSIlya Yanok if (is_out)
714eb81955bSIlya Yanok hw_ep->tx_channel = dma_channel;
715eb81955bSIlya Yanok else
716eb81955bSIlya Yanok hw_ep->rx_channel = dma_channel;
717eb81955bSIlya Yanok }
718eb81955bSIlya Yanok } else
719eb81955bSIlya Yanok dma_channel = NULL;
720eb81955bSIlya Yanok
721eb81955bSIlya Yanok /* make sure we clear DMAEnab, autoSet bits from previous run */
722eb81955bSIlya Yanok
723eb81955bSIlya Yanok /* OUT/transmit/EP0 or IN/receive? */
724eb81955bSIlya Yanok if (is_out) {
725eb81955bSIlya Yanok u16 csr;
726eb81955bSIlya Yanok u16 int_txe;
727eb81955bSIlya Yanok u16 load_count;
728eb81955bSIlya Yanok
729eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_TXCSR);
730eb81955bSIlya Yanok
731eb81955bSIlya Yanok /* disable interrupt in case we flush */
732eb81955bSIlya Yanok int_txe = musb_readw(mbase, MUSB_INTRTXE);
733eb81955bSIlya Yanok musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
734eb81955bSIlya Yanok
735eb81955bSIlya Yanok /* general endpoint setup */
736eb81955bSIlya Yanok if (epnum) {
737eb81955bSIlya Yanok /* flush all old state, set default */
738eb81955bSIlya Yanok musb_h_tx_flush_fifo(hw_ep);
739eb81955bSIlya Yanok
740eb81955bSIlya Yanok /*
741eb81955bSIlya Yanok * We must not clear the DMAMODE bit before or in
742eb81955bSIlya Yanok * the same cycle with the DMAENAB bit, so we clear
743eb81955bSIlya Yanok * the latter first...
744eb81955bSIlya Yanok */
745eb81955bSIlya Yanok csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
746eb81955bSIlya Yanok | MUSB_TXCSR_AUTOSET
747eb81955bSIlya Yanok | MUSB_TXCSR_DMAENAB
748eb81955bSIlya Yanok | MUSB_TXCSR_FRCDATATOG
749eb81955bSIlya Yanok | MUSB_TXCSR_H_RXSTALL
750eb81955bSIlya Yanok | MUSB_TXCSR_H_ERROR
751eb81955bSIlya Yanok | MUSB_TXCSR_TXPKTRDY
752eb81955bSIlya Yanok );
753eb81955bSIlya Yanok csr |= MUSB_TXCSR_MODE;
754eb81955bSIlya Yanok
755eb81955bSIlya Yanok if (usb_gettoggle(urb->dev, qh->epnum, 1))
756eb81955bSIlya Yanok csr |= MUSB_TXCSR_H_WR_DATATOGGLE
757eb81955bSIlya Yanok | MUSB_TXCSR_H_DATATOGGLE;
758eb81955bSIlya Yanok else
759eb81955bSIlya Yanok csr |= MUSB_TXCSR_CLRDATATOG;
760eb81955bSIlya Yanok
761eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, csr);
762eb81955bSIlya Yanok /* REVISIT may need to clear FLUSHFIFO ... */
763eb81955bSIlya Yanok csr &= ~MUSB_TXCSR_DMAMODE;
764eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, csr);
765eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_TXCSR);
766eb81955bSIlya Yanok } else {
767eb81955bSIlya Yanok /* endpoint 0: just flush */
768eb81955bSIlya Yanok musb_h_ep0_flush_fifo(hw_ep);
769eb81955bSIlya Yanok }
770eb81955bSIlya Yanok
771eb81955bSIlya Yanok /* target addr and (for multipoint) hub addr/port */
772eb81955bSIlya Yanok if (musb->is_multipoint) {
773eb81955bSIlya Yanok musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
774eb81955bSIlya Yanok musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
775eb81955bSIlya Yanok musb_write_txhubport(mbase, epnum, qh->h_port_reg);
776eb81955bSIlya Yanok /* FIXME if !epnum, do the same for RX ... */
777eb81955bSIlya Yanok } else
778eb81955bSIlya Yanok musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
779eb81955bSIlya Yanok
780eb81955bSIlya Yanok /* protocol/endpoint/interval/NAKlimit */
781eb81955bSIlya Yanok if (epnum) {
782eb81955bSIlya Yanok musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
783eb81955bSIlya Yanok if (musb->double_buffer_not_ok)
784eb81955bSIlya Yanok musb_writew(epio, MUSB_TXMAXP,
785eb81955bSIlya Yanok hw_ep->max_packet_sz_tx);
786eb81955bSIlya Yanok else if (can_bulk_split(musb, qh->type))
787eb81955bSIlya Yanok musb_writew(epio, MUSB_TXMAXP, packet_sz
788eb81955bSIlya Yanok | ((hw_ep->max_packet_sz_tx /
789eb81955bSIlya Yanok packet_sz) - 1) << 11);
790eb81955bSIlya Yanok else
791eb81955bSIlya Yanok musb_writew(epio, MUSB_TXMAXP,
792eb81955bSIlya Yanok qh->maxpacket |
793eb81955bSIlya Yanok ((qh->hb_mult - 1) << 11));
794eb81955bSIlya Yanok musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
795eb81955bSIlya Yanok } else {
796eb81955bSIlya Yanok musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
797eb81955bSIlya Yanok if (musb->is_multipoint)
798eb81955bSIlya Yanok musb_writeb(epio, MUSB_TYPE0,
799eb81955bSIlya Yanok qh->type_reg);
800eb81955bSIlya Yanok }
801eb81955bSIlya Yanok
802eb81955bSIlya Yanok if (can_bulk_split(musb, qh->type))
803eb81955bSIlya Yanok load_count = min((u32) hw_ep->max_packet_sz_tx,
804eb81955bSIlya Yanok len);
805eb81955bSIlya Yanok else
806eb81955bSIlya Yanok load_count = min((u32) packet_sz, len);
807eb81955bSIlya Yanok
808eb81955bSIlya Yanok if (dma_channel && musb_tx_dma_program(dma_controller,
809eb81955bSIlya Yanok hw_ep, qh, urb, offset, len))
810eb81955bSIlya Yanok load_count = 0;
811eb81955bSIlya Yanok
812eb81955bSIlya Yanok if (load_count) {
813eb81955bSIlya Yanok /* PIO to load FIFO */
814eb81955bSIlya Yanok qh->segsize = load_count;
815eb81955bSIlya Yanok musb_write_fifo(hw_ep, load_count, buf);
816eb81955bSIlya Yanok }
817eb81955bSIlya Yanok
818eb81955bSIlya Yanok /* re-enable interrupt */
819eb81955bSIlya Yanok musb_writew(mbase, MUSB_INTRTXE, int_txe);
820eb81955bSIlya Yanok
821eb81955bSIlya Yanok /* IN/receive */
822eb81955bSIlya Yanok } else {
823eb81955bSIlya Yanok u16 csr;
824eb81955bSIlya Yanok
825eb81955bSIlya Yanok if (hw_ep->rx_reinit) {
826eb81955bSIlya Yanok musb_rx_reinit(musb, qh, hw_ep);
827eb81955bSIlya Yanok
828eb81955bSIlya Yanok /* init new state: toggle and NYET, maybe DMA later */
829eb81955bSIlya Yanok if (usb_gettoggle(urb->dev, qh->epnum, 0))
830eb81955bSIlya Yanok csr = MUSB_RXCSR_H_WR_DATATOGGLE
831eb81955bSIlya Yanok | MUSB_RXCSR_H_DATATOGGLE;
832eb81955bSIlya Yanok else
833eb81955bSIlya Yanok csr = 0;
834eb81955bSIlya Yanok if (qh->type == USB_ENDPOINT_XFER_INT)
835eb81955bSIlya Yanok csr |= MUSB_RXCSR_DISNYET;
836eb81955bSIlya Yanok
837eb81955bSIlya Yanok } else {
838eb81955bSIlya Yanok csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
839eb81955bSIlya Yanok
840eb81955bSIlya Yanok if (csr & (MUSB_RXCSR_RXPKTRDY
841eb81955bSIlya Yanok | MUSB_RXCSR_DMAENAB
842eb81955bSIlya Yanok | MUSB_RXCSR_H_REQPKT))
843eb81955bSIlya Yanok ERR("broken !rx_reinit, ep%d csr %04x\n",
844eb81955bSIlya Yanok hw_ep->epnum, csr);
845eb81955bSIlya Yanok
846eb81955bSIlya Yanok /* scrub any stale state, leaving toggle alone */
847eb81955bSIlya Yanok csr &= MUSB_RXCSR_DISNYET;
848eb81955bSIlya Yanok }
849eb81955bSIlya Yanok
850eb81955bSIlya Yanok /* kick things off */
851eb81955bSIlya Yanok
852eb81955bSIlya Yanok if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
853eb81955bSIlya Yanok /* Candidate for DMA */
854eb81955bSIlya Yanok dma_channel->actual_len = 0L;
855eb81955bSIlya Yanok qh->segsize = len;
856eb81955bSIlya Yanok
857eb81955bSIlya Yanok /* AUTOREQ is in a DMA register */
858eb81955bSIlya Yanok musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
859eb81955bSIlya Yanok csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
860eb81955bSIlya Yanok
861eb81955bSIlya Yanok /*
862eb81955bSIlya Yanok * Unless caller treats short RX transfers as
863eb81955bSIlya Yanok * errors, we dare not queue multiple transfers.
864eb81955bSIlya Yanok */
865eb81955bSIlya Yanok dma_ok = dma_controller->channel_program(dma_channel,
866eb81955bSIlya Yanok packet_sz, !(urb->transfer_flags &
867eb81955bSIlya Yanok URB_SHORT_NOT_OK),
868eb81955bSIlya Yanok urb->transfer_dma + offset,
869eb81955bSIlya Yanok qh->segsize);
870eb81955bSIlya Yanok if (!dma_ok) {
871eb81955bSIlya Yanok dma_controller->channel_release(dma_channel);
872eb81955bSIlya Yanok hw_ep->rx_channel = dma_channel = NULL;
873eb81955bSIlya Yanok } else
874eb81955bSIlya Yanok csr |= MUSB_RXCSR_DMAENAB;
875eb81955bSIlya Yanok }
876eb81955bSIlya Yanok
877eb81955bSIlya Yanok csr |= MUSB_RXCSR_H_REQPKT;
878eb81955bSIlya Yanok dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr);
879eb81955bSIlya Yanok musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
880eb81955bSIlya Yanok csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
881eb81955bSIlya Yanok }
882eb81955bSIlya Yanok }
883eb81955bSIlya Yanok
884eb81955bSIlya Yanok
885eb81955bSIlya Yanok /*
886eb81955bSIlya Yanok * Service the default endpoint (ep0) as host.
887eb81955bSIlya Yanok * Return true until it's time to start the status stage.
888eb81955bSIlya Yanok */
musb_h_ep0_continue(struct musb * musb,u16 len,struct urb * urb)889eb81955bSIlya Yanok static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
890eb81955bSIlya Yanok {
891eb81955bSIlya Yanok bool more = false;
892eb81955bSIlya Yanok u8 *fifo_dest = NULL;
893eb81955bSIlya Yanok u16 fifo_count = 0;
894eb81955bSIlya Yanok struct musb_hw_ep *hw_ep = musb->control_ep;
895eb81955bSIlya Yanok struct musb_qh *qh = hw_ep->in_qh;
896eb81955bSIlya Yanok struct usb_ctrlrequest *request;
897eb81955bSIlya Yanok
898eb81955bSIlya Yanok switch (musb->ep0_stage) {
899eb81955bSIlya Yanok case MUSB_EP0_IN:
900eb81955bSIlya Yanok fifo_dest = urb->transfer_buffer + urb->actual_length;
901eb81955bSIlya Yanok fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
902eb81955bSIlya Yanok urb->actual_length);
903eb81955bSIlya Yanok if (fifo_count < len)
904eb81955bSIlya Yanok urb->status = -EOVERFLOW;
905eb81955bSIlya Yanok
906eb81955bSIlya Yanok musb_read_fifo(hw_ep, fifo_count, fifo_dest);
907eb81955bSIlya Yanok
908eb81955bSIlya Yanok urb->actual_length += fifo_count;
909eb81955bSIlya Yanok if (len < qh->maxpacket) {
910eb81955bSIlya Yanok /* always terminate on short read; it's
911eb81955bSIlya Yanok * rarely reported as an error.
912eb81955bSIlya Yanok */
913eb81955bSIlya Yanok } else if (urb->actual_length <
914eb81955bSIlya Yanok urb->transfer_buffer_length)
915eb81955bSIlya Yanok more = true;
916eb81955bSIlya Yanok break;
917eb81955bSIlya Yanok case MUSB_EP0_START:
918eb81955bSIlya Yanok request = (struct usb_ctrlrequest *) urb->setup_packet;
919eb81955bSIlya Yanok
920eb81955bSIlya Yanok if (!request->wLength) {
921eb81955bSIlya Yanok dev_dbg(musb->controller, "start no-DATA\n");
922eb81955bSIlya Yanok break;
923eb81955bSIlya Yanok } else if (request->bRequestType & USB_DIR_IN) {
924eb81955bSIlya Yanok dev_dbg(musb->controller, "start IN-DATA\n");
925eb81955bSIlya Yanok musb->ep0_stage = MUSB_EP0_IN;
926eb81955bSIlya Yanok more = true;
927eb81955bSIlya Yanok break;
928eb81955bSIlya Yanok } else {
929eb81955bSIlya Yanok dev_dbg(musb->controller, "start OUT-DATA\n");
930eb81955bSIlya Yanok musb->ep0_stage = MUSB_EP0_OUT;
931eb81955bSIlya Yanok more = true;
932eb81955bSIlya Yanok }
933eb81955bSIlya Yanok /* FALLTHROUGH */
934eb81955bSIlya Yanok case MUSB_EP0_OUT:
935eb81955bSIlya Yanok fifo_count = min_t(size_t, qh->maxpacket,
936eb81955bSIlya Yanok urb->transfer_buffer_length -
937eb81955bSIlya Yanok urb->actual_length);
938eb81955bSIlya Yanok if (fifo_count) {
939eb81955bSIlya Yanok fifo_dest = (u8 *) (urb->transfer_buffer
940eb81955bSIlya Yanok + urb->actual_length);
941eb81955bSIlya Yanok dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n",
942eb81955bSIlya Yanok fifo_count,
943eb81955bSIlya Yanok (fifo_count == 1) ? "" : "s",
944eb81955bSIlya Yanok fifo_dest);
945eb81955bSIlya Yanok musb_write_fifo(hw_ep, fifo_count, fifo_dest);
946eb81955bSIlya Yanok
947eb81955bSIlya Yanok urb->actual_length += fifo_count;
948eb81955bSIlya Yanok more = true;
949eb81955bSIlya Yanok }
950eb81955bSIlya Yanok break;
951eb81955bSIlya Yanok default:
952eb81955bSIlya Yanok ERR("bogus ep0 stage %d\n", musb->ep0_stage);
953eb81955bSIlya Yanok break;
954eb81955bSIlya Yanok }
955eb81955bSIlya Yanok
956eb81955bSIlya Yanok return more;
957eb81955bSIlya Yanok }
958eb81955bSIlya Yanok
959eb81955bSIlya Yanok /*
960eb81955bSIlya Yanok * Handle default endpoint interrupt as host. Only called in IRQ time
961eb81955bSIlya Yanok * from musb_interrupt().
962eb81955bSIlya Yanok *
963eb81955bSIlya Yanok * called with controller irqlocked
964eb81955bSIlya Yanok */
musb_h_ep0_irq(struct musb * musb)965eb81955bSIlya Yanok irqreturn_t musb_h_ep0_irq(struct musb *musb)
966eb81955bSIlya Yanok {
967eb81955bSIlya Yanok struct urb *urb;
968eb81955bSIlya Yanok u16 csr, len;
969eb81955bSIlya Yanok int status = 0;
970eb81955bSIlya Yanok void __iomem *mbase = musb->mregs;
971eb81955bSIlya Yanok struct musb_hw_ep *hw_ep = musb->control_ep;
972eb81955bSIlya Yanok void __iomem *epio = hw_ep->regs;
973eb81955bSIlya Yanok struct musb_qh *qh = hw_ep->in_qh;
974eb81955bSIlya Yanok bool complete = false;
975eb81955bSIlya Yanok irqreturn_t retval = IRQ_NONE;
976eb81955bSIlya Yanok
977eb81955bSIlya Yanok /* ep0 only has one queue, "in" */
978eb81955bSIlya Yanok urb = next_urb(qh);
979eb81955bSIlya Yanok
980eb81955bSIlya Yanok musb_ep_select(mbase, 0);
981eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_CSR0);
982eb81955bSIlya Yanok len = (csr & MUSB_CSR0_RXPKTRDY)
983eb81955bSIlya Yanok ? musb_readb(epio, MUSB_COUNT0)
984eb81955bSIlya Yanok : 0;
985eb81955bSIlya Yanok
986eb81955bSIlya Yanok dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
987eb81955bSIlya Yanok csr, qh, len, urb, musb->ep0_stage);
988eb81955bSIlya Yanok
989eb81955bSIlya Yanok /* if we just did status stage, we are done */
990eb81955bSIlya Yanok if (MUSB_EP0_STATUS == musb->ep0_stage) {
991eb81955bSIlya Yanok retval = IRQ_HANDLED;
992eb81955bSIlya Yanok complete = true;
993eb81955bSIlya Yanok }
994eb81955bSIlya Yanok
995eb81955bSIlya Yanok /* prepare status */
996eb81955bSIlya Yanok if (csr & MUSB_CSR0_H_RXSTALL) {
997eb81955bSIlya Yanok dev_dbg(musb->controller, "STALLING ENDPOINT\n");
998eb81955bSIlya Yanok status = -EPIPE;
999eb81955bSIlya Yanok
1000eb81955bSIlya Yanok } else if (csr & MUSB_CSR0_H_ERROR) {
1001eb81955bSIlya Yanok dev_dbg(musb->controller, "no response, csr0 %04x\n", csr);
1002eb81955bSIlya Yanok status = -EPROTO;
1003eb81955bSIlya Yanok
1004eb81955bSIlya Yanok } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1005eb81955bSIlya Yanok dev_dbg(musb->controller, "control NAK timeout\n");
1006eb81955bSIlya Yanok
1007eb81955bSIlya Yanok /* NOTE: this code path would be a good place to PAUSE a
1008eb81955bSIlya Yanok * control transfer, if another one is queued, so that
1009eb81955bSIlya Yanok * ep0 is more likely to stay busy. That's already done
1010eb81955bSIlya Yanok * for bulk RX transfers.
1011eb81955bSIlya Yanok *
1012eb81955bSIlya Yanok * if (qh->ring.next != &musb->control), then
1013eb81955bSIlya Yanok * we have a candidate... NAKing is *NOT* an error
1014eb81955bSIlya Yanok */
1015eb81955bSIlya Yanok musb_writew(epio, MUSB_CSR0, 0);
1016eb81955bSIlya Yanok retval = IRQ_HANDLED;
1017eb81955bSIlya Yanok }
1018eb81955bSIlya Yanok
1019eb81955bSIlya Yanok if (status) {
1020eb81955bSIlya Yanok dev_dbg(musb->controller, "aborting\n");
1021eb81955bSIlya Yanok retval = IRQ_HANDLED;
1022eb81955bSIlya Yanok if (urb)
1023eb81955bSIlya Yanok urb->status = status;
1024eb81955bSIlya Yanok complete = true;
1025eb81955bSIlya Yanok
1026eb81955bSIlya Yanok /* use the proper sequence to abort the transfer */
1027eb81955bSIlya Yanok if (csr & MUSB_CSR0_H_REQPKT) {
1028eb81955bSIlya Yanok csr &= ~MUSB_CSR0_H_REQPKT;
1029eb81955bSIlya Yanok musb_writew(epio, MUSB_CSR0, csr);
1030eb81955bSIlya Yanok csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1031eb81955bSIlya Yanok musb_writew(epio, MUSB_CSR0, csr);
1032eb81955bSIlya Yanok } else {
1033eb81955bSIlya Yanok musb_h_ep0_flush_fifo(hw_ep);
1034eb81955bSIlya Yanok }
1035eb81955bSIlya Yanok
1036eb81955bSIlya Yanok musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1037eb81955bSIlya Yanok
1038eb81955bSIlya Yanok /* clear it */
1039eb81955bSIlya Yanok musb_writew(epio, MUSB_CSR0, 0);
1040eb81955bSIlya Yanok }
1041eb81955bSIlya Yanok
1042eb81955bSIlya Yanok if (unlikely(!urb)) {
1043eb81955bSIlya Yanok /* stop endpoint since we have no place for its data, this
1044eb81955bSIlya Yanok * SHOULD NEVER HAPPEN! */
1045eb81955bSIlya Yanok ERR("no URB for end 0\n");
1046eb81955bSIlya Yanok
1047eb81955bSIlya Yanok musb_h_ep0_flush_fifo(hw_ep);
1048eb81955bSIlya Yanok goto done;
1049eb81955bSIlya Yanok }
1050eb81955bSIlya Yanok
1051eb81955bSIlya Yanok if (!complete) {
1052eb81955bSIlya Yanok /* call common logic and prepare response */
1053eb81955bSIlya Yanok if (musb_h_ep0_continue(musb, len, urb)) {
1054eb81955bSIlya Yanok /* more packets required */
1055eb81955bSIlya Yanok csr = (MUSB_EP0_IN == musb->ep0_stage)
1056eb81955bSIlya Yanok ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1057eb81955bSIlya Yanok } else {
1058eb81955bSIlya Yanok /* data transfer complete; perform status phase */
1059eb81955bSIlya Yanok if (usb_pipeout(urb->pipe)
1060eb81955bSIlya Yanok || !urb->transfer_buffer_length)
1061eb81955bSIlya Yanok csr = MUSB_CSR0_H_STATUSPKT
1062eb81955bSIlya Yanok | MUSB_CSR0_H_REQPKT;
1063eb81955bSIlya Yanok else
1064eb81955bSIlya Yanok csr = MUSB_CSR0_H_STATUSPKT
1065eb81955bSIlya Yanok | MUSB_CSR0_TXPKTRDY;
1066eb81955bSIlya Yanok
1067eb81955bSIlya Yanok /* flag status stage */
1068eb81955bSIlya Yanok musb->ep0_stage = MUSB_EP0_STATUS;
1069eb81955bSIlya Yanok
1070eb81955bSIlya Yanok dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr);
1071eb81955bSIlya Yanok
1072eb81955bSIlya Yanok }
1073eb81955bSIlya Yanok musb_writew(epio, MUSB_CSR0, csr);
1074eb81955bSIlya Yanok retval = IRQ_HANDLED;
1075eb81955bSIlya Yanok } else
1076eb81955bSIlya Yanok musb->ep0_stage = MUSB_EP0_IDLE;
1077eb81955bSIlya Yanok
1078eb81955bSIlya Yanok /* call completion handler if done */
1079eb81955bSIlya Yanok if (complete)
1080eb81955bSIlya Yanok musb_advance_schedule(musb, urb, hw_ep, 1);
1081eb81955bSIlya Yanok done:
1082eb81955bSIlya Yanok return retval;
1083eb81955bSIlya Yanok }
1084eb81955bSIlya Yanok
1085eb81955bSIlya Yanok
1086eb81955bSIlya Yanok #ifdef CONFIG_USB_INVENTRA_DMA
1087eb81955bSIlya Yanok
1088eb81955bSIlya Yanok /* Host side TX (OUT) using Mentor DMA works as follows:
1089eb81955bSIlya Yanok submit_urb ->
1090eb81955bSIlya Yanok - if queue was empty, Program Endpoint
1091eb81955bSIlya Yanok - ... which starts DMA to fifo in mode 1 or 0
1092eb81955bSIlya Yanok
1093eb81955bSIlya Yanok DMA Isr (transfer complete) -> TxAvail()
1094eb81955bSIlya Yanok - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
1095eb81955bSIlya Yanok only in musb_cleanup_urb)
1096eb81955bSIlya Yanok - TxPktRdy has to be set in mode 0 or for
1097eb81955bSIlya Yanok short packets in mode 1.
1098eb81955bSIlya Yanok */
1099eb81955bSIlya Yanok
1100eb81955bSIlya Yanok #endif
1101eb81955bSIlya Yanok
1102eb81955bSIlya Yanok /* Service a Tx-Available or dma completion irq for the endpoint */
musb_host_tx(struct musb * musb,u8 epnum)1103eb81955bSIlya Yanok void musb_host_tx(struct musb *musb, u8 epnum)
1104eb81955bSIlya Yanok {
1105eb81955bSIlya Yanok int pipe;
1106eb81955bSIlya Yanok bool done = false;
1107eb81955bSIlya Yanok u16 tx_csr;
1108eb81955bSIlya Yanok size_t length = 0;
1109eb81955bSIlya Yanok size_t offset = 0;
1110eb81955bSIlya Yanok struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1111eb81955bSIlya Yanok void __iomem *epio = hw_ep->regs;
1112eb81955bSIlya Yanok struct musb_qh *qh = hw_ep->out_qh;
1113eb81955bSIlya Yanok struct urb *urb = next_urb(qh);
1114eb81955bSIlya Yanok u32 status = 0;
1115eb81955bSIlya Yanok void __iomem *mbase = musb->mregs;
1116eb81955bSIlya Yanok struct dma_channel *dma;
1117eb81955bSIlya Yanok bool transfer_pending = false;
1118eb81955bSIlya Yanok
1119eb81955bSIlya Yanok musb_ep_select(mbase, epnum);
1120eb81955bSIlya Yanok tx_csr = musb_readw(epio, MUSB_TXCSR);
1121eb81955bSIlya Yanok
1122eb81955bSIlya Yanok /* with CPPI, DMA sometimes triggers "extra" irqs */
1123eb81955bSIlya Yanok if (!urb) {
1124eb81955bSIlya Yanok dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1125eb81955bSIlya Yanok return;
1126eb81955bSIlya Yanok }
1127eb81955bSIlya Yanok
1128eb81955bSIlya Yanok pipe = urb->pipe;
1129eb81955bSIlya Yanok dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1130eb81955bSIlya Yanok dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
1131eb81955bSIlya Yanok dma ? ", dma" : "");
1132eb81955bSIlya Yanok
1133eb81955bSIlya Yanok /* check for errors */
1134eb81955bSIlya Yanok if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1135eb81955bSIlya Yanok /* dma was disabled, fifo flushed */
1136eb81955bSIlya Yanok dev_dbg(musb->controller, "TX end %d stall\n", epnum);
1137eb81955bSIlya Yanok
1138eb81955bSIlya Yanok /* stall; record URB status */
1139eb81955bSIlya Yanok status = -EPIPE;
1140eb81955bSIlya Yanok
1141eb81955bSIlya Yanok } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1142eb81955bSIlya Yanok /* (NON-ISO) dma was disabled, fifo flushed */
1143eb81955bSIlya Yanok dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum);
1144eb81955bSIlya Yanok
1145eb81955bSIlya Yanok status = -ETIMEDOUT;
1146eb81955bSIlya Yanok
1147eb81955bSIlya Yanok } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1148eb81955bSIlya Yanok dev_dbg(musb->controller, "TX end=%d device not responding\n", epnum);
1149eb81955bSIlya Yanok
1150eb81955bSIlya Yanok /* NOTE: this code path would be a good place to PAUSE a
1151eb81955bSIlya Yanok * transfer, if there's some other (nonperiodic) tx urb
1152eb81955bSIlya Yanok * that could use this fifo. (dma complicates it...)
1153eb81955bSIlya Yanok * That's already done for bulk RX transfers.
1154eb81955bSIlya Yanok *
1155eb81955bSIlya Yanok * if (bulk && qh->ring.next != &musb->out_bulk), then
1156eb81955bSIlya Yanok * we have a candidate... NAKing is *NOT* an error
1157eb81955bSIlya Yanok */
1158eb81955bSIlya Yanok musb_ep_select(mbase, epnum);
1159eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR,
1160eb81955bSIlya Yanok MUSB_TXCSR_H_WZC_BITS
1161eb81955bSIlya Yanok | MUSB_TXCSR_TXPKTRDY);
1162eb81955bSIlya Yanok return;
1163eb81955bSIlya Yanok }
1164eb81955bSIlya Yanok
1165eb81955bSIlya Yanok if (status) {
1166eb81955bSIlya Yanok if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1167eb81955bSIlya Yanok dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1168eb81955bSIlya Yanok (void) musb->dma_controller->channel_abort(dma);
1169eb81955bSIlya Yanok }
1170eb81955bSIlya Yanok
1171eb81955bSIlya Yanok /* do the proper sequence to abort the transfer in the
1172eb81955bSIlya Yanok * usb core; the dma engine should already be stopped.
1173eb81955bSIlya Yanok */
1174eb81955bSIlya Yanok musb_h_tx_flush_fifo(hw_ep);
1175eb81955bSIlya Yanok tx_csr &= ~(MUSB_TXCSR_AUTOSET
1176eb81955bSIlya Yanok | MUSB_TXCSR_DMAENAB
1177eb81955bSIlya Yanok | MUSB_TXCSR_H_ERROR
1178eb81955bSIlya Yanok | MUSB_TXCSR_H_RXSTALL
1179eb81955bSIlya Yanok | MUSB_TXCSR_H_NAKTIMEOUT
1180eb81955bSIlya Yanok );
1181eb81955bSIlya Yanok
1182eb81955bSIlya Yanok musb_ep_select(mbase, epnum);
1183eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, tx_csr);
1184eb81955bSIlya Yanok /* REVISIT may need to clear FLUSHFIFO ... */
1185eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, tx_csr);
1186eb81955bSIlya Yanok musb_writeb(epio, MUSB_TXINTERVAL, 0);
1187eb81955bSIlya Yanok
1188eb81955bSIlya Yanok done = true;
1189eb81955bSIlya Yanok }
1190eb81955bSIlya Yanok
1191eb81955bSIlya Yanok /* second cppi case */
1192eb81955bSIlya Yanok if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1193eb81955bSIlya Yanok dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1194eb81955bSIlya Yanok return;
1195eb81955bSIlya Yanok }
1196eb81955bSIlya Yanok
1197eb81955bSIlya Yanok if (is_dma_capable() && dma && !status) {
1198eb81955bSIlya Yanok /*
1199eb81955bSIlya Yanok * DMA has completed. But if we're using DMA mode 1 (multi
1200eb81955bSIlya Yanok * packet DMA), we need a terminal TXPKTRDY interrupt before
1201eb81955bSIlya Yanok * we can consider this transfer completed, lest we trash
1202eb81955bSIlya Yanok * its last packet when writing the next URB's data. So we
1203eb81955bSIlya Yanok * switch back to mode 0 to get that interrupt; we'll come
1204eb81955bSIlya Yanok * back here once it happens.
1205eb81955bSIlya Yanok */
1206eb81955bSIlya Yanok if (tx_csr & MUSB_TXCSR_DMAMODE) {
1207eb81955bSIlya Yanok /*
1208eb81955bSIlya Yanok * We shouldn't clear DMAMODE with DMAENAB set; so
1209eb81955bSIlya Yanok * clear them in a safe order. That should be OK
1210eb81955bSIlya Yanok * once TXPKTRDY has been set (and I've never seen
1211eb81955bSIlya Yanok * it being 0 at this moment -- DMA interrupt latency
1212eb81955bSIlya Yanok * is significant) but if it hasn't been then we have
1213eb81955bSIlya Yanok * no choice but to stop being polite and ignore the
1214eb81955bSIlya Yanok * programmer's guide... :-)
1215eb81955bSIlya Yanok *
1216eb81955bSIlya Yanok * Note that we must write TXCSR with TXPKTRDY cleared
1217eb81955bSIlya Yanok * in order not to re-trigger the packet send (this bit
1218eb81955bSIlya Yanok * can't be cleared by CPU), and there's another caveat:
1219eb81955bSIlya Yanok * TXPKTRDY may be set shortly and then cleared in the
1220eb81955bSIlya Yanok * double-buffered FIFO mode, so we do an extra TXCSR
1221eb81955bSIlya Yanok * read for debouncing...
1222eb81955bSIlya Yanok */
1223eb81955bSIlya Yanok tx_csr &= musb_readw(epio, MUSB_TXCSR);
1224eb81955bSIlya Yanok if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1225eb81955bSIlya Yanok tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1226eb81955bSIlya Yanok MUSB_TXCSR_TXPKTRDY);
1227eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR,
1228eb81955bSIlya Yanok tx_csr | MUSB_TXCSR_H_WZC_BITS);
1229eb81955bSIlya Yanok }
1230eb81955bSIlya Yanok tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1231eb81955bSIlya Yanok MUSB_TXCSR_TXPKTRDY);
1232eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR,
1233eb81955bSIlya Yanok tx_csr | MUSB_TXCSR_H_WZC_BITS);
1234eb81955bSIlya Yanok
1235eb81955bSIlya Yanok /*
1236eb81955bSIlya Yanok * There is no guarantee that we'll get an interrupt
1237eb81955bSIlya Yanok * after clearing DMAMODE as we might have done this
1238eb81955bSIlya Yanok * too late (after TXPKTRDY was cleared by controller).
1239eb81955bSIlya Yanok * Re-read TXCSR as we have spoiled its previous value.
1240eb81955bSIlya Yanok */
1241eb81955bSIlya Yanok tx_csr = musb_readw(epio, MUSB_TXCSR);
1242eb81955bSIlya Yanok }
1243eb81955bSIlya Yanok
1244eb81955bSIlya Yanok /*
1245eb81955bSIlya Yanok * We may get here from a DMA completion or TXPKTRDY interrupt.
1246eb81955bSIlya Yanok * In any case, we must check the FIFO status here and bail out
1247eb81955bSIlya Yanok * only if the FIFO still has data -- that should prevent the
1248eb81955bSIlya Yanok * "missed" TXPKTRDY interrupts and deal with double-buffered
1249eb81955bSIlya Yanok * FIFO mode too...
1250eb81955bSIlya Yanok */
1251eb81955bSIlya Yanok if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1252eb81955bSIlya Yanok dev_dbg(musb->controller, "DMA complete but packet still in FIFO, "
1253eb81955bSIlya Yanok "CSR %04x\n", tx_csr);
1254eb81955bSIlya Yanok return;
1255eb81955bSIlya Yanok }
1256eb81955bSIlya Yanok }
1257eb81955bSIlya Yanok
1258eb81955bSIlya Yanok if (!status || dma || usb_pipeisoc(pipe)) {
1259eb81955bSIlya Yanok if (dma)
1260eb81955bSIlya Yanok length = dma->actual_len;
1261eb81955bSIlya Yanok else
1262eb81955bSIlya Yanok length = qh->segsize;
1263eb81955bSIlya Yanok qh->offset += length;
1264eb81955bSIlya Yanok
1265eb81955bSIlya Yanok if (usb_pipeisoc(pipe)) {
1266eb81955bSIlya Yanok #ifndef __UBOOT__
1267eb81955bSIlya Yanok struct usb_iso_packet_descriptor *d;
1268eb81955bSIlya Yanok
1269eb81955bSIlya Yanok d = urb->iso_frame_desc + qh->iso_idx;
1270eb81955bSIlya Yanok d->actual_length = length;
1271eb81955bSIlya Yanok d->status = status;
1272eb81955bSIlya Yanok if (++qh->iso_idx >= urb->number_of_packets) {
1273eb81955bSIlya Yanok done = true;
1274eb81955bSIlya Yanok } else {
1275eb81955bSIlya Yanok d++;
1276eb81955bSIlya Yanok offset = d->offset;
1277eb81955bSIlya Yanok length = d->length;
1278eb81955bSIlya Yanok }
1279eb81955bSIlya Yanok #endif
1280eb81955bSIlya Yanok } else if (dma && urb->transfer_buffer_length == qh->offset) {
1281eb81955bSIlya Yanok done = true;
1282eb81955bSIlya Yanok } else {
1283eb81955bSIlya Yanok /* see if we need to send more data, or ZLP */
1284eb81955bSIlya Yanok if (qh->segsize < qh->maxpacket)
1285eb81955bSIlya Yanok done = true;
1286eb81955bSIlya Yanok else if (qh->offset == urb->transfer_buffer_length
1287eb81955bSIlya Yanok && !(urb->transfer_flags
1288eb81955bSIlya Yanok & URB_ZERO_PACKET))
1289eb81955bSIlya Yanok done = true;
1290eb81955bSIlya Yanok if (!done) {
1291eb81955bSIlya Yanok offset = qh->offset;
1292eb81955bSIlya Yanok length = urb->transfer_buffer_length - offset;
1293eb81955bSIlya Yanok transfer_pending = true;
1294eb81955bSIlya Yanok }
1295eb81955bSIlya Yanok }
1296eb81955bSIlya Yanok }
1297eb81955bSIlya Yanok
1298eb81955bSIlya Yanok /* urb->status != -EINPROGRESS means request has been faulted,
1299eb81955bSIlya Yanok * so we must abort this transfer after cleanup
1300eb81955bSIlya Yanok */
1301eb81955bSIlya Yanok if (urb->status != -EINPROGRESS) {
1302eb81955bSIlya Yanok done = true;
1303eb81955bSIlya Yanok if (status == 0)
1304eb81955bSIlya Yanok status = urb->status;
1305eb81955bSIlya Yanok }
1306eb81955bSIlya Yanok
1307eb81955bSIlya Yanok if (done) {
1308eb81955bSIlya Yanok /* set status */
1309eb81955bSIlya Yanok urb->status = status;
1310eb81955bSIlya Yanok urb->actual_length = qh->offset;
1311eb81955bSIlya Yanok musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1312eb81955bSIlya Yanok return;
1313eb81955bSIlya Yanok } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1314eb81955bSIlya Yanok if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1315eb81955bSIlya Yanok offset, length)) {
1316eb81955bSIlya Yanok if (is_cppi_enabled() || tusb_dma_omap())
1317eb81955bSIlya Yanok musb_h_tx_dma_start(hw_ep);
1318eb81955bSIlya Yanok return;
1319eb81955bSIlya Yanok }
1320eb81955bSIlya Yanok } else if (tx_csr & MUSB_TXCSR_DMAENAB) {
1321eb81955bSIlya Yanok dev_dbg(musb->controller, "not complete, but DMA enabled?\n");
1322eb81955bSIlya Yanok return;
1323eb81955bSIlya Yanok }
1324eb81955bSIlya Yanok
1325eb81955bSIlya Yanok /*
1326eb81955bSIlya Yanok * PIO: start next packet in this URB.
1327eb81955bSIlya Yanok *
1328eb81955bSIlya Yanok * REVISIT: some docs say that when hw_ep->tx_double_buffered,
1329eb81955bSIlya Yanok * (and presumably, FIFO is not half-full) we should write *two*
1330eb81955bSIlya Yanok * packets before updating TXCSR; other docs disagree...
1331eb81955bSIlya Yanok */
1332eb81955bSIlya Yanok if (length > qh->maxpacket)
1333eb81955bSIlya Yanok length = qh->maxpacket;
1334eb81955bSIlya Yanok /* Unmap the buffer so that CPU can use it */
1335eb81955bSIlya Yanok usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
1336eb81955bSIlya Yanok musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1337eb81955bSIlya Yanok qh->segsize = length;
1338eb81955bSIlya Yanok
1339eb81955bSIlya Yanok musb_ep_select(mbase, epnum);
1340eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR,
1341eb81955bSIlya Yanok MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1342eb81955bSIlya Yanok }
1343eb81955bSIlya Yanok
1344eb81955bSIlya Yanok
1345eb81955bSIlya Yanok #ifdef CONFIG_USB_INVENTRA_DMA
1346eb81955bSIlya Yanok
1347eb81955bSIlya Yanok /* Host side RX (IN) using Mentor DMA works as follows:
1348eb81955bSIlya Yanok submit_urb ->
1349eb81955bSIlya Yanok - if queue was empty, ProgramEndpoint
1350eb81955bSIlya Yanok - first IN token is sent out (by setting ReqPkt)
1351eb81955bSIlya Yanok LinuxIsr -> RxReady()
1352eb81955bSIlya Yanok /\ => first packet is received
1353eb81955bSIlya Yanok | - Set in mode 0 (DmaEnab, ~ReqPkt)
1354eb81955bSIlya Yanok | -> DMA Isr (transfer complete) -> RxReady()
1355eb81955bSIlya Yanok | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1356eb81955bSIlya Yanok | - if urb not complete, send next IN token (ReqPkt)
1357eb81955bSIlya Yanok | | else complete urb.
1358eb81955bSIlya Yanok | |
1359eb81955bSIlya Yanok ---------------------------
1360eb81955bSIlya Yanok *
1361eb81955bSIlya Yanok * Nuances of mode 1:
1362eb81955bSIlya Yanok * For short packets, no ack (+RxPktRdy) is sent automatically
1363eb81955bSIlya Yanok * (even if AutoClear is ON)
1364eb81955bSIlya Yanok * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1365eb81955bSIlya Yanok * automatically => major problem, as collecting the next packet becomes
1366eb81955bSIlya Yanok * difficult. Hence mode 1 is not used.
1367eb81955bSIlya Yanok *
1368eb81955bSIlya Yanok * REVISIT
1369eb81955bSIlya Yanok * All we care about at this driver level is that
1370eb81955bSIlya Yanok * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1371eb81955bSIlya Yanok * (b) termination conditions are: short RX, or buffer full;
1372eb81955bSIlya Yanok * (c) fault modes include
1373eb81955bSIlya Yanok * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1374eb81955bSIlya Yanok * (and that endpoint's dma queue stops immediately)
1375eb81955bSIlya Yanok * - overflow (full, PLUS more bytes in the terminal packet)
1376eb81955bSIlya Yanok *
1377eb81955bSIlya Yanok * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1378eb81955bSIlya Yanok * thus be a great candidate for using mode 1 ... for all but the
1379eb81955bSIlya Yanok * last packet of one URB's transfer.
1380eb81955bSIlya Yanok */
1381eb81955bSIlya Yanok
1382eb81955bSIlya Yanok #endif
1383eb81955bSIlya Yanok
1384eb81955bSIlya Yanok /* Schedule next QH from musb->in_bulk and move the current qh to
1385eb81955bSIlya Yanok * the end; avoids starvation for other endpoints.
1386eb81955bSIlya Yanok */
musb_bulk_rx_nak_timeout(struct musb * musb,struct musb_hw_ep * ep)1387eb81955bSIlya Yanok static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
1388eb81955bSIlya Yanok {
1389eb81955bSIlya Yanok struct dma_channel *dma;
1390eb81955bSIlya Yanok struct urb *urb;
1391eb81955bSIlya Yanok void __iomem *mbase = musb->mregs;
1392eb81955bSIlya Yanok void __iomem *epio = ep->regs;
1393eb81955bSIlya Yanok struct musb_qh *cur_qh, *next_qh;
1394eb81955bSIlya Yanok u16 rx_csr;
1395eb81955bSIlya Yanok
1396eb81955bSIlya Yanok musb_ep_select(mbase, ep->epnum);
1397eb81955bSIlya Yanok dma = is_dma_capable() ? ep->rx_channel : NULL;
1398eb81955bSIlya Yanok
1399eb81955bSIlya Yanok /* clear nak timeout bit */
1400eb81955bSIlya Yanok rx_csr = musb_readw(epio, MUSB_RXCSR);
1401eb81955bSIlya Yanok rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1402eb81955bSIlya Yanok rx_csr &= ~MUSB_RXCSR_DATAERROR;
1403eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR, rx_csr);
1404eb81955bSIlya Yanok
1405eb81955bSIlya Yanok cur_qh = first_qh(&musb->in_bulk);
1406eb81955bSIlya Yanok if (cur_qh) {
1407eb81955bSIlya Yanok urb = next_urb(cur_qh);
1408eb81955bSIlya Yanok if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1409eb81955bSIlya Yanok dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1410eb81955bSIlya Yanok musb->dma_controller->channel_abort(dma);
1411eb81955bSIlya Yanok urb->actual_length += dma->actual_len;
1412eb81955bSIlya Yanok dma->actual_len = 0L;
1413eb81955bSIlya Yanok }
1414eb81955bSIlya Yanok musb_save_toggle(cur_qh, 1, urb);
1415eb81955bSIlya Yanok
1416eb81955bSIlya Yanok /* move cur_qh to end of queue */
1417eb81955bSIlya Yanok list_move_tail(&cur_qh->ring, &musb->in_bulk);
1418eb81955bSIlya Yanok
1419eb81955bSIlya Yanok /* get the next qh from musb->in_bulk */
1420eb81955bSIlya Yanok next_qh = first_qh(&musb->in_bulk);
1421eb81955bSIlya Yanok
1422eb81955bSIlya Yanok /* set rx_reinit and schedule the next qh */
1423eb81955bSIlya Yanok ep->rx_reinit = 1;
1424eb81955bSIlya Yanok musb_start_urb(musb, 1, next_qh);
1425eb81955bSIlya Yanok }
1426eb81955bSIlya Yanok }
1427eb81955bSIlya Yanok
1428eb81955bSIlya Yanok /*
1429eb81955bSIlya Yanok * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1430eb81955bSIlya Yanok * and high-bandwidth IN transfer cases.
1431eb81955bSIlya Yanok */
musb_host_rx(struct musb * musb,u8 epnum)1432eb81955bSIlya Yanok void musb_host_rx(struct musb *musb, u8 epnum)
1433eb81955bSIlya Yanok {
1434eb81955bSIlya Yanok struct urb *urb;
1435eb81955bSIlya Yanok struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1436eb81955bSIlya Yanok void __iomem *epio = hw_ep->regs;
1437eb81955bSIlya Yanok struct musb_qh *qh = hw_ep->in_qh;
1438eb81955bSIlya Yanok size_t xfer_len;
1439eb81955bSIlya Yanok void __iomem *mbase = musb->mregs;
1440eb81955bSIlya Yanok int pipe;
1441eb81955bSIlya Yanok u16 rx_csr, val;
1442eb81955bSIlya Yanok bool iso_err = false;
1443eb81955bSIlya Yanok bool done = false;
1444eb81955bSIlya Yanok u32 status;
1445eb81955bSIlya Yanok struct dma_channel *dma;
1446eb81955bSIlya Yanok
1447eb81955bSIlya Yanok musb_ep_select(mbase, epnum);
1448eb81955bSIlya Yanok
1449eb81955bSIlya Yanok urb = next_urb(qh);
1450eb81955bSIlya Yanok dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1451eb81955bSIlya Yanok status = 0;
1452eb81955bSIlya Yanok xfer_len = 0;
1453eb81955bSIlya Yanok
1454eb81955bSIlya Yanok rx_csr = musb_readw(epio, MUSB_RXCSR);
1455eb81955bSIlya Yanok val = rx_csr;
1456eb81955bSIlya Yanok
1457eb81955bSIlya Yanok if (unlikely(!urb)) {
1458eb81955bSIlya Yanok /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1459eb81955bSIlya Yanok * usbtest #11 (unlinks) triggers it regularly, sometimes
1460eb81955bSIlya Yanok * with fifo full. (Only with DMA??)
1461eb81955bSIlya Yanok */
1462eb81955bSIlya Yanok dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
1463eb81955bSIlya Yanok musb_readw(epio, MUSB_RXCOUNT));
1464eb81955bSIlya Yanok musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1465eb81955bSIlya Yanok return;
1466eb81955bSIlya Yanok }
1467eb81955bSIlya Yanok
1468eb81955bSIlya Yanok pipe = urb->pipe;
1469eb81955bSIlya Yanok
1470eb81955bSIlya Yanok dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1471eb81955bSIlya Yanok epnum, rx_csr, urb->actual_length,
1472eb81955bSIlya Yanok dma ? dma->actual_len : 0);
1473eb81955bSIlya Yanok
1474eb81955bSIlya Yanok /* check for errors, concurrent stall & unlink is not really
1475eb81955bSIlya Yanok * handled yet! */
1476eb81955bSIlya Yanok if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1477eb81955bSIlya Yanok dev_dbg(musb->controller, "RX end %d STALL\n", epnum);
1478eb81955bSIlya Yanok
1479eb81955bSIlya Yanok /* stall; record URB status */
1480eb81955bSIlya Yanok status = -EPIPE;
1481eb81955bSIlya Yanok
1482eb81955bSIlya Yanok } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1483eb81955bSIlya Yanok dev_dbg(musb->controller, "end %d RX proto error\n", epnum);
1484eb81955bSIlya Yanok
1485eb81955bSIlya Yanok status = -EPROTO;
1486eb81955bSIlya Yanok musb_writeb(epio, MUSB_RXINTERVAL, 0);
1487eb81955bSIlya Yanok
1488eb81955bSIlya Yanok } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1489eb81955bSIlya Yanok
1490eb81955bSIlya Yanok if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1491eb81955bSIlya Yanok dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum);
1492eb81955bSIlya Yanok
1493eb81955bSIlya Yanok /* NOTE: NAKing is *NOT* an error, so we want to
1494eb81955bSIlya Yanok * continue. Except ... if there's a request for
1495eb81955bSIlya Yanok * another QH, use that instead of starving it.
1496eb81955bSIlya Yanok *
1497eb81955bSIlya Yanok * Devices like Ethernet and serial adapters keep
1498eb81955bSIlya Yanok * reads posted at all times, which will starve
1499eb81955bSIlya Yanok * other devices without this logic.
1500eb81955bSIlya Yanok */
1501eb81955bSIlya Yanok if (usb_pipebulk(urb->pipe)
1502eb81955bSIlya Yanok && qh->mux == 1
1503eb81955bSIlya Yanok && !list_is_singular(&musb->in_bulk)) {
1504eb81955bSIlya Yanok musb_bulk_rx_nak_timeout(musb, hw_ep);
1505eb81955bSIlya Yanok return;
1506eb81955bSIlya Yanok }
1507eb81955bSIlya Yanok musb_ep_select(mbase, epnum);
1508eb81955bSIlya Yanok rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1509eb81955bSIlya Yanok rx_csr &= ~MUSB_RXCSR_DATAERROR;
1510eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR, rx_csr);
1511eb81955bSIlya Yanok
1512eb81955bSIlya Yanok goto finish;
1513eb81955bSIlya Yanok } else {
1514eb81955bSIlya Yanok dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum);
1515eb81955bSIlya Yanok /* packet error reported later */
1516eb81955bSIlya Yanok iso_err = true;
1517eb81955bSIlya Yanok }
1518eb81955bSIlya Yanok } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1519eb81955bSIlya Yanok dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n",
1520eb81955bSIlya Yanok epnum);
1521eb81955bSIlya Yanok status = -EPROTO;
1522eb81955bSIlya Yanok }
1523eb81955bSIlya Yanok
1524eb81955bSIlya Yanok /* faults abort the transfer */
1525eb81955bSIlya Yanok if (status) {
1526eb81955bSIlya Yanok /* clean up dma and collect transfer count */
1527eb81955bSIlya Yanok if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1528eb81955bSIlya Yanok dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1529eb81955bSIlya Yanok (void) musb->dma_controller->channel_abort(dma);
1530eb81955bSIlya Yanok xfer_len = dma->actual_len;
1531eb81955bSIlya Yanok }
1532eb81955bSIlya Yanok musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1533eb81955bSIlya Yanok musb_writeb(epio, MUSB_RXINTERVAL, 0);
1534eb81955bSIlya Yanok done = true;
1535eb81955bSIlya Yanok goto finish;
1536eb81955bSIlya Yanok }
1537eb81955bSIlya Yanok
1538eb81955bSIlya Yanok if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1539eb81955bSIlya Yanok /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1540eb81955bSIlya Yanok ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1541eb81955bSIlya Yanok goto finish;
1542eb81955bSIlya Yanok }
1543eb81955bSIlya Yanok
1544eb81955bSIlya Yanok /* thorough shutdown for now ... given more precise fault handling
1545eb81955bSIlya Yanok * and better queueing support, we might keep a DMA pipeline going
1546eb81955bSIlya Yanok * while processing this irq for earlier completions.
1547eb81955bSIlya Yanok */
1548eb81955bSIlya Yanok
1549eb81955bSIlya Yanok /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1550eb81955bSIlya Yanok
1551eb81955bSIlya Yanok #ifndef CONFIG_USB_INVENTRA_DMA
1552eb81955bSIlya Yanok if (rx_csr & MUSB_RXCSR_H_REQPKT) {
1553eb81955bSIlya Yanok /* REVISIT this happened for a while on some short reads...
1554eb81955bSIlya Yanok * the cleanup still needs investigation... looks bad...
1555eb81955bSIlya Yanok * and also duplicates dma cleanup code above ... plus,
1556eb81955bSIlya Yanok * shouldn't this be the "half full" double buffer case?
1557eb81955bSIlya Yanok */
1558eb81955bSIlya Yanok if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1559eb81955bSIlya Yanok dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1560eb81955bSIlya Yanok (void) musb->dma_controller->channel_abort(dma);
1561eb81955bSIlya Yanok xfer_len = dma->actual_len;
1562eb81955bSIlya Yanok done = true;
1563eb81955bSIlya Yanok }
1564eb81955bSIlya Yanok
1565eb81955bSIlya Yanok dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
1566eb81955bSIlya Yanok xfer_len, dma ? ", dma" : "");
1567eb81955bSIlya Yanok rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1568eb81955bSIlya Yanok
1569eb81955bSIlya Yanok musb_ep_select(mbase, epnum);
1570eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR,
1571eb81955bSIlya Yanok MUSB_RXCSR_H_WZC_BITS | rx_csr);
1572eb81955bSIlya Yanok }
1573eb81955bSIlya Yanok #endif
1574eb81955bSIlya Yanok if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1575eb81955bSIlya Yanok xfer_len = dma->actual_len;
1576eb81955bSIlya Yanok
1577eb81955bSIlya Yanok val &= ~(MUSB_RXCSR_DMAENAB
1578eb81955bSIlya Yanok | MUSB_RXCSR_H_AUTOREQ
1579eb81955bSIlya Yanok | MUSB_RXCSR_AUTOCLEAR
1580eb81955bSIlya Yanok | MUSB_RXCSR_RXPKTRDY);
1581eb81955bSIlya Yanok musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1582eb81955bSIlya Yanok
1583eb81955bSIlya Yanok #ifdef CONFIG_USB_INVENTRA_DMA
1584eb81955bSIlya Yanok if (usb_pipeisoc(pipe)) {
1585eb81955bSIlya Yanok struct usb_iso_packet_descriptor *d;
1586eb81955bSIlya Yanok
1587eb81955bSIlya Yanok d = urb->iso_frame_desc + qh->iso_idx;
1588eb81955bSIlya Yanok d->actual_length = xfer_len;
1589eb81955bSIlya Yanok
1590eb81955bSIlya Yanok /* even if there was an error, we did the dma
1591eb81955bSIlya Yanok * for iso_frame_desc->length
1592eb81955bSIlya Yanok */
1593eb81955bSIlya Yanok if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1594eb81955bSIlya Yanok d->status = 0;
1595eb81955bSIlya Yanok
1596eb81955bSIlya Yanok if (++qh->iso_idx >= urb->number_of_packets)
1597eb81955bSIlya Yanok done = true;
1598eb81955bSIlya Yanok else
1599eb81955bSIlya Yanok done = false;
1600eb81955bSIlya Yanok
1601eb81955bSIlya Yanok } else {
1602eb81955bSIlya Yanok /* done if urb buffer is full or short packet is recd */
1603eb81955bSIlya Yanok done = (urb->actual_length + xfer_len >=
1604eb81955bSIlya Yanok urb->transfer_buffer_length
1605eb81955bSIlya Yanok || dma->actual_len < qh->maxpacket);
1606eb81955bSIlya Yanok }
1607eb81955bSIlya Yanok
1608eb81955bSIlya Yanok /* send IN token for next packet, without AUTOREQ */
1609eb81955bSIlya Yanok if (!done) {
1610eb81955bSIlya Yanok val |= MUSB_RXCSR_H_REQPKT;
1611eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR,
1612eb81955bSIlya Yanok MUSB_RXCSR_H_WZC_BITS | val);
1613eb81955bSIlya Yanok }
1614eb81955bSIlya Yanok
1615eb81955bSIlya Yanok dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
1616eb81955bSIlya Yanok done ? "off" : "reset",
1617eb81955bSIlya Yanok musb_readw(epio, MUSB_RXCSR),
1618eb81955bSIlya Yanok musb_readw(epio, MUSB_RXCOUNT));
1619eb81955bSIlya Yanok #else
1620eb81955bSIlya Yanok done = true;
1621eb81955bSIlya Yanok #endif
1622eb81955bSIlya Yanok } else if (urb->status == -EINPROGRESS) {
1623eb81955bSIlya Yanok /* if no errors, be sure a packet is ready for unloading */
1624eb81955bSIlya Yanok if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1625eb81955bSIlya Yanok status = -EPROTO;
1626eb81955bSIlya Yanok ERR("Rx interrupt with no errors or packet!\n");
1627eb81955bSIlya Yanok
1628eb81955bSIlya Yanok /* FIXME this is another "SHOULD NEVER HAPPEN" */
1629eb81955bSIlya Yanok
1630eb81955bSIlya Yanok /* SCRUB (RX) */
1631eb81955bSIlya Yanok /* do the proper sequence to abort the transfer */
1632eb81955bSIlya Yanok musb_ep_select(mbase, epnum);
1633eb81955bSIlya Yanok val &= ~MUSB_RXCSR_H_REQPKT;
1634eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR, val);
1635eb81955bSIlya Yanok goto finish;
1636eb81955bSIlya Yanok }
1637eb81955bSIlya Yanok
1638eb81955bSIlya Yanok /* we are expecting IN packets */
1639eb81955bSIlya Yanok #ifdef CONFIG_USB_INVENTRA_DMA
1640eb81955bSIlya Yanok if (dma) {
1641eb81955bSIlya Yanok struct dma_controller *c;
1642eb81955bSIlya Yanok u16 rx_count;
1643eb81955bSIlya Yanok int ret, length;
1644eb81955bSIlya Yanok dma_addr_t buf;
1645eb81955bSIlya Yanok
1646eb81955bSIlya Yanok rx_count = musb_readw(epio, MUSB_RXCOUNT);
1647eb81955bSIlya Yanok
1648eb81955bSIlya Yanok dev_dbg(musb->controller, "RX%d count %d, buffer 0x%x len %d/%d\n",
1649eb81955bSIlya Yanok epnum, rx_count,
1650eb81955bSIlya Yanok urb->transfer_dma
1651eb81955bSIlya Yanok + urb->actual_length,
1652eb81955bSIlya Yanok qh->offset,
1653eb81955bSIlya Yanok urb->transfer_buffer_length);
1654eb81955bSIlya Yanok
1655eb81955bSIlya Yanok c = musb->dma_controller;
1656eb81955bSIlya Yanok
1657eb81955bSIlya Yanok if (usb_pipeisoc(pipe)) {
1658eb81955bSIlya Yanok int d_status = 0;
1659eb81955bSIlya Yanok struct usb_iso_packet_descriptor *d;
1660eb81955bSIlya Yanok
1661eb81955bSIlya Yanok d = urb->iso_frame_desc + qh->iso_idx;
1662eb81955bSIlya Yanok
1663eb81955bSIlya Yanok if (iso_err) {
1664eb81955bSIlya Yanok d_status = -EILSEQ;
1665eb81955bSIlya Yanok urb->error_count++;
1666eb81955bSIlya Yanok }
1667eb81955bSIlya Yanok if (rx_count > d->length) {
1668eb81955bSIlya Yanok if (d_status == 0) {
1669eb81955bSIlya Yanok d_status = -EOVERFLOW;
1670eb81955bSIlya Yanok urb->error_count++;
1671eb81955bSIlya Yanok }
1672eb81955bSIlya Yanok dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",\
1673eb81955bSIlya Yanok rx_count, d->length);
1674eb81955bSIlya Yanok
1675eb81955bSIlya Yanok length = d->length;
1676eb81955bSIlya Yanok } else
1677eb81955bSIlya Yanok length = rx_count;
1678eb81955bSIlya Yanok d->status = d_status;
1679eb81955bSIlya Yanok buf = urb->transfer_dma + d->offset;
1680eb81955bSIlya Yanok } else {
1681eb81955bSIlya Yanok length = rx_count;
1682eb81955bSIlya Yanok buf = urb->transfer_dma +
1683eb81955bSIlya Yanok urb->actual_length;
1684eb81955bSIlya Yanok }
1685eb81955bSIlya Yanok
1686eb81955bSIlya Yanok dma->desired_mode = 0;
1687eb81955bSIlya Yanok #ifdef USE_MODE1
1688eb81955bSIlya Yanok /* because of the issue below, mode 1 will
1689eb81955bSIlya Yanok * only rarely behave with correct semantics.
1690eb81955bSIlya Yanok */
1691eb81955bSIlya Yanok if ((urb->transfer_flags &
1692eb81955bSIlya Yanok URB_SHORT_NOT_OK)
1693eb81955bSIlya Yanok && (urb->transfer_buffer_length -
1694eb81955bSIlya Yanok urb->actual_length)
1695eb81955bSIlya Yanok > qh->maxpacket)
1696eb81955bSIlya Yanok dma->desired_mode = 1;
1697eb81955bSIlya Yanok if (rx_count < hw_ep->max_packet_sz_rx) {
1698eb81955bSIlya Yanok length = rx_count;
1699eb81955bSIlya Yanok dma->desired_mode = 0;
1700eb81955bSIlya Yanok } else {
1701eb81955bSIlya Yanok length = urb->transfer_buffer_length;
1702eb81955bSIlya Yanok }
1703eb81955bSIlya Yanok #endif
1704eb81955bSIlya Yanok
1705eb81955bSIlya Yanok /* Disadvantage of using mode 1:
1706eb81955bSIlya Yanok * It's basically usable only for mass storage class; essentially all
1707eb81955bSIlya Yanok * other protocols also terminate transfers on short packets.
1708eb81955bSIlya Yanok *
1709eb81955bSIlya Yanok * Details:
1710eb81955bSIlya Yanok * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1711eb81955bSIlya Yanok * If you try to use mode 1 for (transfer_buffer_length - 512), and try
1712eb81955bSIlya Yanok * to use the extra IN token to grab the last packet using mode 0, then
1713eb81955bSIlya Yanok * the problem is that you cannot be sure when the device will send the
1714eb81955bSIlya Yanok * last packet and RxPktRdy set. Sometimes the packet is recd too soon
1715eb81955bSIlya Yanok * such that it gets lost when RxCSR is re-set at the end of the mode 1
1716eb81955bSIlya Yanok * transfer, while sometimes it is recd just a little late so that if you
1717eb81955bSIlya Yanok * try to configure for mode 0 soon after the mode 1 transfer is
1718eb81955bSIlya Yanok * completed, you will find rxcount 0. Okay, so you might think why not
1719eb81955bSIlya Yanok * wait for an interrupt when the pkt is recd. Well, you won't get any!
1720eb81955bSIlya Yanok */
1721eb81955bSIlya Yanok
1722eb81955bSIlya Yanok val = musb_readw(epio, MUSB_RXCSR);
1723eb81955bSIlya Yanok val &= ~MUSB_RXCSR_H_REQPKT;
1724eb81955bSIlya Yanok
1725eb81955bSIlya Yanok if (dma->desired_mode == 0)
1726eb81955bSIlya Yanok val &= ~MUSB_RXCSR_H_AUTOREQ;
1727eb81955bSIlya Yanok else
1728eb81955bSIlya Yanok val |= MUSB_RXCSR_H_AUTOREQ;
1729eb81955bSIlya Yanok val |= MUSB_RXCSR_DMAENAB;
1730eb81955bSIlya Yanok
1731eb81955bSIlya Yanok /* autoclear shouldn't be set in high bandwidth */
1732eb81955bSIlya Yanok if (qh->hb_mult == 1)
1733eb81955bSIlya Yanok val |= MUSB_RXCSR_AUTOCLEAR;
1734eb81955bSIlya Yanok
1735eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR,
1736eb81955bSIlya Yanok MUSB_RXCSR_H_WZC_BITS | val);
1737eb81955bSIlya Yanok
1738eb81955bSIlya Yanok /* REVISIT if when actual_length != 0,
1739eb81955bSIlya Yanok * transfer_buffer_length needs to be
1740eb81955bSIlya Yanok * adjusted first...
1741eb81955bSIlya Yanok */
1742eb81955bSIlya Yanok ret = c->channel_program(
1743eb81955bSIlya Yanok dma, qh->maxpacket,
1744eb81955bSIlya Yanok dma->desired_mode, buf, length);
1745eb81955bSIlya Yanok
1746eb81955bSIlya Yanok if (!ret) {
1747eb81955bSIlya Yanok c->channel_release(dma);
1748eb81955bSIlya Yanok hw_ep->rx_channel = NULL;
1749eb81955bSIlya Yanok dma = NULL;
1750eb81955bSIlya Yanok val = musb_readw(epio, MUSB_RXCSR);
1751eb81955bSIlya Yanok val &= ~(MUSB_RXCSR_DMAENAB
1752eb81955bSIlya Yanok | MUSB_RXCSR_H_AUTOREQ
1753eb81955bSIlya Yanok | MUSB_RXCSR_AUTOCLEAR);
1754eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR, val);
1755eb81955bSIlya Yanok }
1756eb81955bSIlya Yanok }
1757eb81955bSIlya Yanok #endif /* Mentor DMA */
1758eb81955bSIlya Yanok
1759eb81955bSIlya Yanok if (!dma) {
1760eb81955bSIlya Yanok /* Unmap the buffer so that CPU can use it */
1761eb81955bSIlya Yanok usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
1762eb81955bSIlya Yanok done = musb_host_packet_rx(musb, urb,
1763eb81955bSIlya Yanok epnum, iso_err);
1764eb81955bSIlya Yanok dev_dbg(musb->controller, "read %spacket\n", done ? "last " : "");
1765eb81955bSIlya Yanok }
1766eb81955bSIlya Yanok }
1767eb81955bSIlya Yanok
1768eb81955bSIlya Yanok finish:
1769eb81955bSIlya Yanok urb->actual_length += xfer_len;
1770eb81955bSIlya Yanok qh->offset += xfer_len;
1771eb81955bSIlya Yanok if (done) {
1772eb81955bSIlya Yanok if (urb->status == -EINPROGRESS)
1773eb81955bSIlya Yanok urb->status = status;
1774eb81955bSIlya Yanok musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1775eb81955bSIlya Yanok }
1776eb81955bSIlya Yanok }
1777eb81955bSIlya Yanok
1778eb81955bSIlya Yanok /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1779eb81955bSIlya Yanok * the software schedule associates multiple such nodes with a given
1780eb81955bSIlya Yanok * host side hardware endpoint + direction; scheduling may activate
1781eb81955bSIlya Yanok * that hardware endpoint.
1782eb81955bSIlya Yanok */
musb_schedule(struct musb * musb,struct musb_qh * qh,int is_in)1783eb81955bSIlya Yanok static int musb_schedule(
1784eb81955bSIlya Yanok struct musb *musb,
1785eb81955bSIlya Yanok struct musb_qh *qh,
1786eb81955bSIlya Yanok int is_in)
1787eb81955bSIlya Yanok {
1788eb81955bSIlya Yanok int idle;
1789eb81955bSIlya Yanok int best_diff;
1790eb81955bSIlya Yanok int best_end, epnum;
1791eb81955bSIlya Yanok struct musb_hw_ep *hw_ep = NULL;
1792eb81955bSIlya Yanok struct list_head *head = NULL;
1793eb81955bSIlya Yanok u8 toggle;
1794eb81955bSIlya Yanok u8 txtype;
1795eb81955bSIlya Yanok struct urb *urb = next_urb(qh);
1796eb81955bSIlya Yanok
1797eb81955bSIlya Yanok /* use fixed hardware for control and bulk */
1798eb81955bSIlya Yanok if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1799eb81955bSIlya Yanok head = &musb->control;
1800eb81955bSIlya Yanok hw_ep = musb->control_ep;
1801eb81955bSIlya Yanok goto success;
1802eb81955bSIlya Yanok }
1803eb81955bSIlya Yanok
1804eb81955bSIlya Yanok /* else, periodic transfers get muxed to other endpoints */
1805eb81955bSIlya Yanok
1806eb81955bSIlya Yanok /*
1807eb81955bSIlya Yanok * We know this qh hasn't been scheduled, so all we need to do
1808eb81955bSIlya Yanok * is choose which hardware endpoint to put it on ...
1809eb81955bSIlya Yanok *
1810eb81955bSIlya Yanok * REVISIT what we really want here is a regular schedule tree
1811eb81955bSIlya Yanok * like e.g. OHCI uses.
1812eb81955bSIlya Yanok */
1813eb81955bSIlya Yanok best_diff = 4096;
1814eb81955bSIlya Yanok best_end = -1;
1815eb81955bSIlya Yanok
1816eb81955bSIlya Yanok for (epnum = 1, hw_ep = musb->endpoints + 1;
1817eb81955bSIlya Yanok epnum < musb->nr_endpoints;
1818eb81955bSIlya Yanok epnum++, hw_ep++) {
1819eb81955bSIlya Yanok int diff;
1820eb81955bSIlya Yanok
1821eb81955bSIlya Yanok if (musb_ep_get_qh(hw_ep, is_in) != NULL)
1822eb81955bSIlya Yanok continue;
1823eb81955bSIlya Yanok
1824eb81955bSIlya Yanok if (hw_ep == musb->bulk_ep)
1825eb81955bSIlya Yanok continue;
1826eb81955bSIlya Yanok
1827eb81955bSIlya Yanok if (is_in)
1828eb81955bSIlya Yanok diff = hw_ep->max_packet_sz_rx;
1829eb81955bSIlya Yanok else
1830eb81955bSIlya Yanok diff = hw_ep->max_packet_sz_tx;
1831eb81955bSIlya Yanok diff -= (qh->maxpacket * qh->hb_mult);
1832eb81955bSIlya Yanok
1833eb81955bSIlya Yanok if (diff >= 0 && best_diff > diff) {
1834eb81955bSIlya Yanok
1835eb81955bSIlya Yanok /*
1836eb81955bSIlya Yanok * Mentor controller has a bug in that if we schedule
1837eb81955bSIlya Yanok * a BULK Tx transfer on an endpoint that had earlier
1838eb81955bSIlya Yanok * handled ISOC then the BULK transfer has to start on
1839eb81955bSIlya Yanok * a zero toggle. If the BULK transfer starts on a 1
1840eb81955bSIlya Yanok * toggle then this transfer will fail as the mentor
1841eb81955bSIlya Yanok * controller starts the Bulk transfer on a 0 toggle
1842eb81955bSIlya Yanok * irrespective of the programming of the toggle bits
1843eb81955bSIlya Yanok * in the TXCSR register. Check for this condition
1844eb81955bSIlya Yanok * while allocating the EP for a Tx Bulk transfer. If
1845eb81955bSIlya Yanok * so skip this EP.
1846eb81955bSIlya Yanok */
1847eb81955bSIlya Yanok hw_ep = musb->endpoints + epnum;
1848eb81955bSIlya Yanok toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
1849eb81955bSIlya Yanok txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
1850eb81955bSIlya Yanok >> 4) & 0x3;
1851eb81955bSIlya Yanok if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
1852eb81955bSIlya Yanok toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
1853eb81955bSIlya Yanok continue;
1854eb81955bSIlya Yanok
1855eb81955bSIlya Yanok best_diff = diff;
1856eb81955bSIlya Yanok best_end = epnum;
1857eb81955bSIlya Yanok }
1858eb81955bSIlya Yanok }
1859eb81955bSIlya Yanok /* use bulk reserved ep1 if no other ep is free */
1860eb81955bSIlya Yanok if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
1861eb81955bSIlya Yanok hw_ep = musb->bulk_ep;
1862eb81955bSIlya Yanok if (is_in)
1863eb81955bSIlya Yanok head = &musb->in_bulk;
1864eb81955bSIlya Yanok else
1865eb81955bSIlya Yanok head = &musb->out_bulk;
1866eb81955bSIlya Yanok
1867eb81955bSIlya Yanok /* Enable bulk RX NAK timeout scheme when bulk requests are
1868eb81955bSIlya Yanok * multiplexed. This scheme doen't work in high speed to full
1869eb81955bSIlya Yanok * speed scenario as NAK interrupts are not coming from a
1870eb81955bSIlya Yanok * full speed device connected to a high speed device.
1871eb81955bSIlya Yanok * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
1872eb81955bSIlya Yanok * 4 (8 frame or 8ms) for FS device.
1873eb81955bSIlya Yanok */
1874eb81955bSIlya Yanok if (is_in && qh->dev)
1875eb81955bSIlya Yanok qh->intv_reg =
1876eb81955bSIlya Yanok (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
1877eb81955bSIlya Yanok goto success;
1878eb81955bSIlya Yanok } else if (best_end < 0) {
1879eb81955bSIlya Yanok return -ENOSPC;
1880eb81955bSIlya Yanok }
1881eb81955bSIlya Yanok
1882eb81955bSIlya Yanok idle = 1;
1883eb81955bSIlya Yanok qh->mux = 0;
1884eb81955bSIlya Yanok hw_ep = musb->endpoints + best_end;
1885eb81955bSIlya Yanok dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end);
1886eb81955bSIlya Yanok success:
1887eb81955bSIlya Yanok if (head) {
1888eb81955bSIlya Yanok idle = list_empty(head);
1889eb81955bSIlya Yanok list_add_tail(&qh->ring, head);
1890eb81955bSIlya Yanok qh->mux = 1;
1891eb81955bSIlya Yanok }
1892eb81955bSIlya Yanok qh->hw_ep = hw_ep;
1893eb81955bSIlya Yanok qh->hep->hcpriv = qh;
1894eb81955bSIlya Yanok if (idle)
1895eb81955bSIlya Yanok musb_start_urb(musb, is_in, qh);
1896eb81955bSIlya Yanok return 0;
1897eb81955bSIlya Yanok }
1898eb81955bSIlya Yanok
1899eb81955bSIlya Yanok #ifdef __UBOOT__
1900eb81955bSIlya Yanok /* check if transaction translator is needed for device */
tt_needed(struct musb * musb,struct usb_device * dev)1901eb81955bSIlya Yanok static int tt_needed(struct musb *musb, struct usb_device *dev)
1902eb81955bSIlya Yanok {
1903eb81955bSIlya Yanok if ((musb_readb(musb->mregs, MUSB_POWER) & MUSB_POWER_HSMODE) &&
1904eb81955bSIlya Yanok (dev->speed < USB_SPEED_HIGH))
1905eb81955bSIlya Yanok return 1;
1906eb81955bSIlya Yanok return 0;
1907eb81955bSIlya Yanok }
1908eb81955bSIlya Yanok #endif
1909eb81955bSIlya Yanok
1910eb81955bSIlya Yanok #ifndef __UBOOT__
musb_urb_enqueue(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)1911eb81955bSIlya Yanok static int musb_urb_enqueue(
1912eb81955bSIlya Yanok #else
1913eb81955bSIlya Yanok int musb_urb_enqueue(
1914eb81955bSIlya Yanok #endif
1915eb81955bSIlya Yanok struct usb_hcd *hcd,
1916eb81955bSIlya Yanok struct urb *urb,
1917eb81955bSIlya Yanok gfp_t mem_flags)
1918eb81955bSIlya Yanok {
1919eb81955bSIlya Yanok unsigned long flags;
1920eb81955bSIlya Yanok struct musb *musb = hcd_to_musb(hcd);
1921eb81955bSIlya Yanok struct usb_host_endpoint *hep = urb->ep;
1922eb81955bSIlya Yanok struct musb_qh *qh;
1923eb81955bSIlya Yanok struct usb_endpoint_descriptor *epd = &hep->desc;
1924eb81955bSIlya Yanok int ret;
1925eb81955bSIlya Yanok unsigned type_reg;
1926eb81955bSIlya Yanok unsigned interval;
1927eb81955bSIlya Yanok
1928eb81955bSIlya Yanok /* host role must be active */
1929eb81955bSIlya Yanok if (!is_host_active(musb) || !musb->is_active)
1930eb81955bSIlya Yanok return -ENODEV;
1931eb81955bSIlya Yanok
1932eb81955bSIlya Yanok spin_lock_irqsave(&musb->lock, flags);
1933eb81955bSIlya Yanok ret = usb_hcd_link_urb_to_ep(hcd, urb);
1934eb81955bSIlya Yanok qh = ret ? NULL : hep->hcpriv;
1935eb81955bSIlya Yanok if (qh)
1936eb81955bSIlya Yanok urb->hcpriv = qh;
1937eb81955bSIlya Yanok spin_unlock_irqrestore(&musb->lock, flags);
1938eb81955bSIlya Yanok
1939eb81955bSIlya Yanok /* DMA mapping was already done, if needed, and this urb is on
1940eb81955bSIlya Yanok * hep->urb_list now ... so we're done, unless hep wasn't yet
1941eb81955bSIlya Yanok * scheduled onto a live qh.
1942eb81955bSIlya Yanok *
1943eb81955bSIlya Yanok * REVISIT best to keep hep->hcpriv valid until the endpoint gets
1944eb81955bSIlya Yanok * disabled, testing for empty qh->ring and avoiding qh setup costs
1945eb81955bSIlya Yanok * except for the first urb queued after a config change.
1946eb81955bSIlya Yanok */
1947eb81955bSIlya Yanok if (qh || ret)
1948eb81955bSIlya Yanok return ret;
1949eb81955bSIlya Yanok
1950eb81955bSIlya Yanok /* Allocate and initialize qh, minimizing the work done each time
1951eb81955bSIlya Yanok * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
1952eb81955bSIlya Yanok *
1953eb81955bSIlya Yanok * REVISIT consider a dedicated qh kmem_cache, so it's harder
1954eb81955bSIlya Yanok * for bugs in other kernel code to break this driver...
1955eb81955bSIlya Yanok */
1956eb81955bSIlya Yanok qh = kzalloc(sizeof *qh, mem_flags);
1957eb81955bSIlya Yanok if (!qh) {
1958eb81955bSIlya Yanok spin_lock_irqsave(&musb->lock, flags);
1959eb81955bSIlya Yanok usb_hcd_unlink_urb_from_ep(hcd, urb);
1960eb81955bSIlya Yanok spin_unlock_irqrestore(&musb->lock, flags);
1961eb81955bSIlya Yanok return -ENOMEM;
1962eb81955bSIlya Yanok }
1963eb81955bSIlya Yanok
1964eb81955bSIlya Yanok qh->hep = hep;
1965eb81955bSIlya Yanok qh->dev = urb->dev;
1966eb81955bSIlya Yanok INIT_LIST_HEAD(&qh->ring);
1967eb81955bSIlya Yanok qh->is_ready = 1;
1968eb81955bSIlya Yanok
1969eb81955bSIlya Yanok qh->maxpacket = usb_endpoint_maxp(epd);
1970eb81955bSIlya Yanok qh->type = usb_endpoint_type(epd);
1971eb81955bSIlya Yanok
1972eb81955bSIlya Yanok /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
1973eb81955bSIlya Yanok * Some musb cores don't support high bandwidth ISO transfers; and
1974eb81955bSIlya Yanok * we don't (yet!) support high bandwidth interrupt transfers.
1975eb81955bSIlya Yanok */
1976eb81955bSIlya Yanok qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
1977eb81955bSIlya Yanok if (qh->hb_mult > 1) {
1978eb81955bSIlya Yanok int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
1979eb81955bSIlya Yanok
1980eb81955bSIlya Yanok if (ok)
1981eb81955bSIlya Yanok ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
1982eb81955bSIlya Yanok || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
1983eb81955bSIlya Yanok if (!ok) {
1984eb81955bSIlya Yanok ret = -EMSGSIZE;
1985eb81955bSIlya Yanok goto done;
1986eb81955bSIlya Yanok }
1987eb81955bSIlya Yanok qh->maxpacket &= 0x7ff;
1988eb81955bSIlya Yanok }
1989eb81955bSIlya Yanok
1990eb81955bSIlya Yanok qh->epnum = usb_endpoint_num(epd);
1991eb81955bSIlya Yanok
1992eb81955bSIlya Yanok /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
1993eb81955bSIlya Yanok qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
1994eb81955bSIlya Yanok
1995eb81955bSIlya Yanok /* precompute rxtype/txtype/type0 register */
1996eb81955bSIlya Yanok type_reg = (qh->type << 4) | qh->epnum;
1997eb81955bSIlya Yanok switch (urb->dev->speed) {
1998eb81955bSIlya Yanok case USB_SPEED_LOW:
1999eb81955bSIlya Yanok type_reg |= 0xc0;
2000eb81955bSIlya Yanok break;
2001eb81955bSIlya Yanok case USB_SPEED_FULL:
2002eb81955bSIlya Yanok type_reg |= 0x80;
2003eb81955bSIlya Yanok break;
2004eb81955bSIlya Yanok default:
2005eb81955bSIlya Yanok type_reg |= 0x40;
2006eb81955bSIlya Yanok }
2007eb81955bSIlya Yanok qh->type_reg = type_reg;
2008eb81955bSIlya Yanok
2009eb81955bSIlya Yanok /* Precompute RXINTERVAL/TXINTERVAL register */
2010eb81955bSIlya Yanok switch (qh->type) {
2011eb81955bSIlya Yanok case USB_ENDPOINT_XFER_INT:
2012eb81955bSIlya Yanok /*
2013eb81955bSIlya Yanok * Full/low speeds use the linear encoding,
2014eb81955bSIlya Yanok * high speed uses the logarithmic encoding.
2015eb81955bSIlya Yanok */
2016eb81955bSIlya Yanok if (urb->dev->speed <= USB_SPEED_FULL) {
2017eb81955bSIlya Yanok interval = max_t(u8, epd->bInterval, 1);
2018eb81955bSIlya Yanok break;
2019eb81955bSIlya Yanok }
2020eb81955bSIlya Yanok /* FALLTHROUGH */
2021eb81955bSIlya Yanok case USB_ENDPOINT_XFER_ISOC:
2022eb81955bSIlya Yanok /* ISO always uses logarithmic encoding */
2023eb81955bSIlya Yanok interval = min_t(u8, epd->bInterval, 16);
2024eb81955bSIlya Yanok break;
2025eb81955bSIlya Yanok default:
2026eb81955bSIlya Yanok /* REVISIT we actually want to use NAK limits, hinting to the
2027eb81955bSIlya Yanok * transfer scheduling logic to try some other qh, e.g. try
2028eb81955bSIlya Yanok * for 2 msec first:
2029eb81955bSIlya Yanok *
2030eb81955bSIlya Yanok * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
2031eb81955bSIlya Yanok *
2032eb81955bSIlya Yanok * The downside of disabling this is that transfer scheduling
2033eb81955bSIlya Yanok * gets VERY unfair for nonperiodic transfers; a misbehaving
2034eb81955bSIlya Yanok * peripheral could make that hurt. That's perfectly normal
2035eb81955bSIlya Yanok * for reads from network or serial adapters ... so we have
2036eb81955bSIlya Yanok * partial NAKlimit support for bulk RX.
2037eb81955bSIlya Yanok *
2038eb81955bSIlya Yanok * The upside of disabling it is simpler transfer scheduling.
2039eb81955bSIlya Yanok */
2040eb81955bSIlya Yanok interval = 0;
2041eb81955bSIlya Yanok }
2042eb81955bSIlya Yanok qh->intv_reg = interval;
2043eb81955bSIlya Yanok
2044eb81955bSIlya Yanok /* precompute addressing for external hub/tt ports */
2045eb81955bSIlya Yanok if (musb->is_multipoint) {
2046e740ca3cSHans de Goede #ifndef __UBOOT__
2047eb81955bSIlya Yanok struct usb_device *parent = urb->dev->parent;
2048e740ca3cSHans de Goede #else
2049e740ca3cSHans de Goede struct usb_device *parent = usb_dev_get_parent(urb->dev);
2050e740ca3cSHans de Goede #endif
2051eb81955bSIlya Yanok
2052eb81955bSIlya Yanok #ifndef __UBOOT__
2053eb81955bSIlya Yanok if (parent != hcd->self.root_hub) {
2054eb81955bSIlya Yanok #else
2055eb81955bSIlya Yanok if (parent) {
2056eb81955bSIlya Yanok #endif
2057eb81955bSIlya Yanok qh->h_addr_reg = (u8) parent->devnum;
2058eb81955bSIlya Yanok
2059eb81955bSIlya Yanok #ifndef __UBOOT__
2060eb81955bSIlya Yanok /* set up tt info if needed */
2061eb81955bSIlya Yanok if (urb->dev->tt) {
2062eb81955bSIlya Yanok qh->h_port_reg = (u8) urb->dev->ttport;
2063eb81955bSIlya Yanok if (urb->dev->tt->hub)
2064eb81955bSIlya Yanok qh->h_addr_reg =
2065eb81955bSIlya Yanok (u8) urb->dev->tt->hub->devnum;
2066eb81955bSIlya Yanok if (urb->dev->tt->multi)
2067eb81955bSIlya Yanok qh->h_addr_reg |= 0x80;
2068eb81955bSIlya Yanok }
2069eb81955bSIlya Yanok #else
2070eb81955bSIlya Yanok if (tt_needed(musb, urb->dev)) {
2071faa7db24SStefan Brüns uint8_t portnr = 0;
2072faa7db24SStefan Brüns uint8_t hubaddr = 0;
2073faa7db24SStefan Brüns usb_find_usb2_hub_address_port(urb->dev,
2074faa7db24SStefan Brüns &hubaddr,
2075faa7db24SStefan Brüns &portnr);
2076faa7db24SStefan Brüns qh->h_addr_reg = hubaddr;
2077ac3abf0bSStefan Brüns qh->h_port_reg = portnr;
2078eb81955bSIlya Yanok }
2079eb81955bSIlya Yanok #endif
2080eb81955bSIlya Yanok }
2081eb81955bSIlya Yanok }
2082eb81955bSIlya Yanok
2083eb81955bSIlya Yanok /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2084eb81955bSIlya Yanok * until we get real dma queues (with an entry for each urb/buffer),
2085eb81955bSIlya Yanok * we only have work to do in the former case.
2086eb81955bSIlya Yanok */
2087eb81955bSIlya Yanok spin_lock_irqsave(&musb->lock, flags);
2088eb81955bSIlya Yanok if (hep->hcpriv) {
2089eb81955bSIlya Yanok /* some concurrent activity submitted another urb to hep...
2090eb81955bSIlya Yanok * odd, rare, error prone, but legal.
2091eb81955bSIlya Yanok */
2092eb81955bSIlya Yanok kfree(qh);
2093eb81955bSIlya Yanok qh = NULL;
2094eb81955bSIlya Yanok ret = 0;
2095eb81955bSIlya Yanok } else
2096eb81955bSIlya Yanok ret = musb_schedule(musb, qh,
2097eb81955bSIlya Yanok epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2098eb81955bSIlya Yanok
2099eb81955bSIlya Yanok if (ret == 0) {
2100eb81955bSIlya Yanok urb->hcpriv = qh;
2101eb81955bSIlya Yanok /* FIXME set urb->start_frame for iso/intr, it's tested in
2102eb81955bSIlya Yanok * musb_start_urb(), but otherwise only konicawc cares ...
2103eb81955bSIlya Yanok */
2104eb81955bSIlya Yanok }
2105eb81955bSIlya Yanok spin_unlock_irqrestore(&musb->lock, flags);
2106eb81955bSIlya Yanok
2107eb81955bSIlya Yanok done:
2108eb81955bSIlya Yanok if (ret != 0) {
2109eb81955bSIlya Yanok spin_lock_irqsave(&musb->lock, flags);
2110eb81955bSIlya Yanok usb_hcd_unlink_urb_from_ep(hcd, urb);
2111eb81955bSIlya Yanok spin_unlock_irqrestore(&musb->lock, flags);
2112eb81955bSIlya Yanok kfree(qh);
2113eb81955bSIlya Yanok }
2114eb81955bSIlya Yanok return ret;
2115eb81955bSIlya Yanok }
2116eb81955bSIlya Yanok
2117eb81955bSIlya Yanok /*
2118eb81955bSIlya Yanok * abort a transfer that's at the head of a hardware queue.
2119eb81955bSIlya Yanok * called with controller locked, irqs blocked
2120eb81955bSIlya Yanok * that hardware queue advances to the next transfer, unless prevented
2121eb81955bSIlya Yanok */
2122eb81955bSIlya Yanok static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2123eb81955bSIlya Yanok {
2124eb81955bSIlya Yanok struct musb_hw_ep *ep = qh->hw_ep;
2125eb81955bSIlya Yanok struct musb *musb = ep->musb;
2126eb81955bSIlya Yanok void __iomem *epio = ep->regs;
2127eb81955bSIlya Yanok unsigned hw_end = ep->epnum;
2128eb81955bSIlya Yanok void __iomem *regs = ep->musb->mregs;
2129eb81955bSIlya Yanok int is_in = usb_pipein(urb->pipe);
2130eb81955bSIlya Yanok int status = 0;
2131eb81955bSIlya Yanok u16 csr;
2132eb81955bSIlya Yanok
2133eb81955bSIlya Yanok musb_ep_select(regs, hw_end);
2134eb81955bSIlya Yanok
2135eb81955bSIlya Yanok if (is_dma_capable()) {
2136eb81955bSIlya Yanok struct dma_channel *dma;
2137eb81955bSIlya Yanok
2138eb81955bSIlya Yanok dma = is_in ? ep->rx_channel : ep->tx_channel;
2139eb81955bSIlya Yanok if (dma) {
2140eb81955bSIlya Yanok status = ep->musb->dma_controller->channel_abort(dma);
2141eb81955bSIlya Yanok dev_dbg(musb->controller,
2142eb81955bSIlya Yanok "abort %cX%d DMA for urb %p --> %d\n",
2143eb81955bSIlya Yanok is_in ? 'R' : 'T', ep->epnum,
2144eb81955bSIlya Yanok urb, status);
2145eb81955bSIlya Yanok urb->actual_length += dma->actual_len;
2146eb81955bSIlya Yanok }
2147eb81955bSIlya Yanok }
2148eb81955bSIlya Yanok
2149eb81955bSIlya Yanok /* turn off DMA requests, discard state, stop polling ... */
2150eb81955bSIlya Yanok if (ep->epnum && is_in) {
2151eb81955bSIlya Yanok /* giveback saves bulk toggle */
2152eb81955bSIlya Yanok csr = musb_h_flush_rxfifo(ep, 0);
2153eb81955bSIlya Yanok
2154eb81955bSIlya Yanok /* REVISIT we still get an irq; should likely clear the
2155eb81955bSIlya Yanok * endpoint's irq status here to avoid bogus irqs.
2156eb81955bSIlya Yanok * clearing that status is platform-specific...
2157eb81955bSIlya Yanok */
2158eb81955bSIlya Yanok } else if (ep->epnum) {
2159eb81955bSIlya Yanok musb_h_tx_flush_fifo(ep);
2160eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_TXCSR);
2161eb81955bSIlya Yanok csr &= ~(MUSB_TXCSR_AUTOSET
2162eb81955bSIlya Yanok | MUSB_TXCSR_DMAENAB
2163eb81955bSIlya Yanok | MUSB_TXCSR_H_RXSTALL
2164eb81955bSIlya Yanok | MUSB_TXCSR_H_NAKTIMEOUT
2165eb81955bSIlya Yanok | MUSB_TXCSR_H_ERROR
2166eb81955bSIlya Yanok | MUSB_TXCSR_TXPKTRDY);
2167eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, csr);
2168eb81955bSIlya Yanok /* REVISIT may need to clear FLUSHFIFO ... */
2169eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, csr);
2170eb81955bSIlya Yanok /* flush cpu writebuffer */
2171eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_TXCSR);
2172eb81955bSIlya Yanok } else {
2173eb81955bSIlya Yanok musb_h_ep0_flush_fifo(ep);
2174eb81955bSIlya Yanok }
2175eb81955bSIlya Yanok if (status == 0)
2176eb81955bSIlya Yanok musb_advance_schedule(ep->musb, urb, ep, is_in);
2177eb81955bSIlya Yanok return status;
2178eb81955bSIlya Yanok }
2179eb81955bSIlya Yanok
2180b918a0c6SHans de Goede #ifndef __UBOOT__
2181b918a0c6SHans de Goede static int musb_urb_dequeue(
2182b918a0c6SHans de Goede #else
2183b918a0c6SHans de Goede int musb_urb_dequeue(
2184b918a0c6SHans de Goede #endif
2185b918a0c6SHans de Goede struct usb_hcd *hcd,
2186b918a0c6SHans de Goede struct urb *urb,
2187b918a0c6SHans de Goede int status)
2188eb81955bSIlya Yanok {
2189eb81955bSIlya Yanok struct musb *musb = hcd_to_musb(hcd);
2190eb81955bSIlya Yanok struct musb_qh *qh;
2191eb81955bSIlya Yanok unsigned long flags;
2192eb81955bSIlya Yanok int is_in = usb_pipein(urb->pipe);
2193eb81955bSIlya Yanok int ret;
2194eb81955bSIlya Yanok
2195eb81955bSIlya Yanok dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb,
2196eb81955bSIlya Yanok usb_pipedevice(urb->pipe),
2197eb81955bSIlya Yanok usb_pipeendpoint(urb->pipe),
2198eb81955bSIlya Yanok is_in ? "in" : "out");
2199eb81955bSIlya Yanok
2200eb81955bSIlya Yanok spin_lock_irqsave(&musb->lock, flags);
2201eb81955bSIlya Yanok ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2202eb81955bSIlya Yanok if (ret)
2203eb81955bSIlya Yanok goto done;
2204eb81955bSIlya Yanok
2205eb81955bSIlya Yanok qh = urb->hcpriv;
2206eb81955bSIlya Yanok if (!qh)
2207eb81955bSIlya Yanok goto done;
2208eb81955bSIlya Yanok
2209eb81955bSIlya Yanok /*
2210eb81955bSIlya Yanok * Any URB not actively programmed into endpoint hardware can be
2211eb81955bSIlya Yanok * immediately given back; that's any URB not at the head of an
2212eb81955bSIlya Yanok * endpoint queue, unless someday we get real DMA queues. And even
2213eb81955bSIlya Yanok * if it's at the head, it might not be known to the hardware...
2214eb81955bSIlya Yanok *
2215eb81955bSIlya Yanok * Otherwise abort current transfer, pending DMA, etc.; urb->status
2216eb81955bSIlya Yanok * has already been updated. This is a synchronous abort; it'd be
2217eb81955bSIlya Yanok * OK to hold off until after some IRQ, though.
2218eb81955bSIlya Yanok *
2219eb81955bSIlya Yanok * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
2220eb81955bSIlya Yanok */
2221eb81955bSIlya Yanok if (!qh->is_ready
2222eb81955bSIlya Yanok || urb->urb_list.prev != &qh->hep->urb_list
2223eb81955bSIlya Yanok || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2224eb81955bSIlya Yanok int ready = qh->is_ready;
2225eb81955bSIlya Yanok
2226eb81955bSIlya Yanok qh->is_ready = 0;
2227eb81955bSIlya Yanok musb_giveback(musb, urb, 0);
2228eb81955bSIlya Yanok qh->is_ready = ready;
2229eb81955bSIlya Yanok
2230eb81955bSIlya Yanok /* If nothing else (usually musb_giveback) is using it
2231eb81955bSIlya Yanok * and its URB list has emptied, recycle this qh.
2232eb81955bSIlya Yanok */
2233eb81955bSIlya Yanok if (ready && list_empty(&qh->hep->urb_list)) {
2234eb81955bSIlya Yanok qh->hep->hcpriv = NULL;
2235eb81955bSIlya Yanok list_del(&qh->ring);
2236eb81955bSIlya Yanok kfree(qh);
2237eb81955bSIlya Yanok }
2238eb81955bSIlya Yanok } else
2239eb81955bSIlya Yanok ret = musb_cleanup_urb(urb, qh);
2240eb81955bSIlya Yanok done:
2241eb81955bSIlya Yanok spin_unlock_irqrestore(&musb->lock, flags);
2242eb81955bSIlya Yanok return ret;
2243eb81955bSIlya Yanok }
2244eb81955bSIlya Yanok
2245b918a0c6SHans de Goede #ifndef __UBOOT__
2246eb81955bSIlya Yanok /* disable an endpoint */
2247eb81955bSIlya Yanok static void
2248eb81955bSIlya Yanok musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2249eb81955bSIlya Yanok {
2250eb81955bSIlya Yanok u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
2251eb81955bSIlya Yanok unsigned long flags;
2252eb81955bSIlya Yanok struct musb *musb = hcd_to_musb(hcd);
2253eb81955bSIlya Yanok struct musb_qh *qh;
2254eb81955bSIlya Yanok struct urb *urb;
2255eb81955bSIlya Yanok
2256eb81955bSIlya Yanok spin_lock_irqsave(&musb->lock, flags);
2257eb81955bSIlya Yanok
2258eb81955bSIlya Yanok qh = hep->hcpriv;
2259eb81955bSIlya Yanok if (qh == NULL)
2260eb81955bSIlya Yanok goto exit;
2261eb81955bSIlya Yanok
2262eb81955bSIlya Yanok /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2263eb81955bSIlya Yanok
2264eb81955bSIlya Yanok /* Kick the first URB off the hardware, if needed */
2265eb81955bSIlya Yanok qh->is_ready = 0;
2266eb81955bSIlya Yanok if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2267eb81955bSIlya Yanok urb = next_urb(qh);
2268eb81955bSIlya Yanok
2269eb81955bSIlya Yanok /* make software (then hardware) stop ASAP */
2270eb81955bSIlya Yanok if (!urb->unlinked)
2271eb81955bSIlya Yanok urb->status = -ESHUTDOWN;
2272eb81955bSIlya Yanok
2273eb81955bSIlya Yanok /* cleanup */
2274eb81955bSIlya Yanok musb_cleanup_urb(urb, qh);
2275eb81955bSIlya Yanok
2276eb81955bSIlya Yanok /* Then nuke all the others ... and advance the
2277eb81955bSIlya Yanok * queue on hw_ep (e.g. bulk ring) when we're done.
2278eb81955bSIlya Yanok */
2279eb81955bSIlya Yanok while (!list_empty(&hep->urb_list)) {
2280eb81955bSIlya Yanok urb = next_urb(qh);
2281eb81955bSIlya Yanok urb->status = -ESHUTDOWN;
2282eb81955bSIlya Yanok musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2283eb81955bSIlya Yanok }
2284eb81955bSIlya Yanok } else {
2285eb81955bSIlya Yanok /* Just empty the queue; the hardware is busy with
2286eb81955bSIlya Yanok * other transfers, and since !qh->is_ready nothing
2287eb81955bSIlya Yanok * will activate any of these as it advances.
2288eb81955bSIlya Yanok */
2289eb81955bSIlya Yanok while (!list_empty(&hep->urb_list))
2290eb81955bSIlya Yanok musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2291eb81955bSIlya Yanok
2292eb81955bSIlya Yanok hep->hcpriv = NULL;
2293eb81955bSIlya Yanok list_del(&qh->ring);
2294eb81955bSIlya Yanok kfree(qh);
2295eb81955bSIlya Yanok }
2296eb81955bSIlya Yanok exit:
2297eb81955bSIlya Yanok spin_unlock_irqrestore(&musb->lock, flags);
2298eb81955bSIlya Yanok }
2299eb81955bSIlya Yanok
2300eb81955bSIlya Yanok static int musb_h_get_frame_number(struct usb_hcd *hcd)
2301eb81955bSIlya Yanok {
2302eb81955bSIlya Yanok struct musb *musb = hcd_to_musb(hcd);
2303eb81955bSIlya Yanok
2304eb81955bSIlya Yanok return musb_readw(musb->mregs, MUSB_FRAME);
2305eb81955bSIlya Yanok }
2306eb81955bSIlya Yanok
2307eb81955bSIlya Yanok static int musb_h_start(struct usb_hcd *hcd)
2308eb81955bSIlya Yanok {
2309eb81955bSIlya Yanok struct musb *musb = hcd_to_musb(hcd);
2310eb81955bSIlya Yanok
2311eb81955bSIlya Yanok /* NOTE: musb_start() is called when the hub driver turns
2312eb81955bSIlya Yanok * on port power, or when (OTG) peripheral starts.
2313eb81955bSIlya Yanok */
2314eb81955bSIlya Yanok hcd->state = HC_STATE_RUNNING;
2315eb81955bSIlya Yanok musb->port1_status = 0;
2316eb81955bSIlya Yanok return 0;
2317eb81955bSIlya Yanok }
2318eb81955bSIlya Yanok
2319eb81955bSIlya Yanok static void musb_h_stop(struct usb_hcd *hcd)
2320eb81955bSIlya Yanok {
2321eb81955bSIlya Yanok musb_stop(hcd_to_musb(hcd));
2322eb81955bSIlya Yanok hcd->state = HC_STATE_HALT;
2323eb81955bSIlya Yanok }
2324eb81955bSIlya Yanok
2325eb81955bSIlya Yanok static int musb_bus_suspend(struct usb_hcd *hcd)
2326eb81955bSIlya Yanok {
2327eb81955bSIlya Yanok struct musb *musb = hcd_to_musb(hcd);
2328eb81955bSIlya Yanok u8 devctl;
2329eb81955bSIlya Yanok
2330eb81955bSIlya Yanok if (!is_host_active(musb))
2331eb81955bSIlya Yanok return 0;
2332eb81955bSIlya Yanok
2333eb81955bSIlya Yanok switch (musb->xceiv->state) {
2334eb81955bSIlya Yanok case OTG_STATE_A_SUSPEND:
2335eb81955bSIlya Yanok return 0;
2336eb81955bSIlya Yanok case OTG_STATE_A_WAIT_VRISE:
2337eb81955bSIlya Yanok /* ID could be grounded even if there's no device
2338eb81955bSIlya Yanok * on the other end of the cable. NOTE that the
2339eb81955bSIlya Yanok * A_WAIT_VRISE timers are messy with MUSB...
2340eb81955bSIlya Yanok */
2341eb81955bSIlya Yanok devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2342eb81955bSIlya Yanok if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2343eb81955bSIlya Yanok musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
2344eb81955bSIlya Yanok break;
2345eb81955bSIlya Yanok default:
2346eb81955bSIlya Yanok break;
2347eb81955bSIlya Yanok }
2348eb81955bSIlya Yanok
2349eb81955bSIlya Yanok if (musb->is_active) {
2350eb81955bSIlya Yanok WARNING("trying to suspend as %s while active\n",
2351eb81955bSIlya Yanok otg_state_string(musb->xceiv->state));
2352eb81955bSIlya Yanok return -EBUSY;
2353eb81955bSIlya Yanok } else
2354eb81955bSIlya Yanok return 0;
2355eb81955bSIlya Yanok }
2356eb81955bSIlya Yanok
2357eb81955bSIlya Yanok static int musb_bus_resume(struct usb_hcd *hcd)
2358eb81955bSIlya Yanok {
2359eb81955bSIlya Yanok /* resuming child port does the work */
2360eb81955bSIlya Yanok return 0;
2361eb81955bSIlya Yanok }
2362eb81955bSIlya Yanok
2363eb81955bSIlya Yanok const struct hc_driver musb_hc_driver = {
2364eb81955bSIlya Yanok .description = "musb-hcd",
2365eb81955bSIlya Yanok .product_desc = "MUSB HDRC host driver",
2366eb81955bSIlya Yanok .hcd_priv_size = sizeof(struct musb),
2367eb81955bSIlya Yanok .flags = HCD_USB2 | HCD_MEMORY,
2368eb81955bSIlya Yanok
2369eb81955bSIlya Yanok /* not using irq handler or reset hooks from usbcore, since
2370eb81955bSIlya Yanok * those must be shared with peripheral code for OTG configs
2371eb81955bSIlya Yanok */
2372eb81955bSIlya Yanok
2373eb81955bSIlya Yanok .start = musb_h_start,
2374eb81955bSIlya Yanok .stop = musb_h_stop,
2375eb81955bSIlya Yanok
2376eb81955bSIlya Yanok .get_frame_number = musb_h_get_frame_number,
2377eb81955bSIlya Yanok
2378eb81955bSIlya Yanok .urb_enqueue = musb_urb_enqueue,
2379eb81955bSIlya Yanok .urb_dequeue = musb_urb_dequeue,
2380eb81955bSIlya Yanok .endpoint_disable = musb_h_disable,
2381eb81955bSIlya Yanok
2382eb81955bSIlya Yanok .hub_status_data = musb_hub_status_data,
2383eb81955bSIlya Yanok .hub_control = musb_hub_control,
2384eb81955bSIlya Yanok .bus_suspend = musb_bus_suspend,
2385eb81955bSIlya Yanok .bus_resume = musb_bus_resume,
2386eb81955bSIlya Yanok /* .start_port_reset = NULL, */
2387eb81955bSIlya Yanok /* .hub_irq_enable = NULL, */
2388eb81955bSIlya Yanok };
2389eb81955bSIlya Yanok #endif
2390