1eb81955bSIlya Yanok /*
2eb81955bSIlya Yanok * MUSB OTG driver peripheral support
3eb81955bSIlya Yanok *
4eb81955bSIlya Yanok * Copyright 2005 Mentor Graphics Corporation
5eb81955bSIlya Yanok * Copyright (C) 2005-2006 by Texas Instruments
6eb81955bSIlya Yanok * Copyright (C) 2006-2007 Nokia Corporation
7eb81955bSIlya Yanok * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
8eb81955bSIlya Yanok *
9*5b8031ccSTom Rini * SPDX-License-Identifier: GPL-2.0
10eb81955bSIlya Yanok */
11eb81955bSIlya Yanok
12eb81955bSIlya Yanok #ifndef __UBOOT__
13eb81955bSIlya Yanok #include <linux/kernel.h>
14eb81955bSIlya Yanok #include <linux/list.h>
15eb81955bSIlya Yanok #include <linux/timer.h>
16eb81955bSIlya Yanok #include <linux/module.h>
17eb81955bSIlya Yanok #include <linux/smp.h>
18eb81955bSIlya Yanok #include <linux/spinlock.h>
19eb81955bSIlya Yanok #include <linux/delay.h>
20eb81955bSIlya Yanok #include <linux/dma-mapping.h>
21eb81955bSIlya Yanok #include <linux/slab.h>
22eb81955bSIlya Yanok #else
23eb81955bSIlya Yanok #include <common.h>
24eb81955bSIlya Yanok #include <linux/usb/ch9.h>
25eb81955bSIlya Yanok #include "linux-compat.h"
26eb81955bSIlya Yanok #endif
27eb81955bSIlya Yanok
28eb81955bSIlya Yanok #include "musb_core.h"
29eb81955bSIlya Yanok
30eb81955bSIlya Yanok
31eb81955bSIlya Yanok /* MUSB PERIPHERAL status 3-mar-2006:
32eb81955bSIlya Yanok *
33eb81955bSIlya Yanok * - EP0 seems solid. It passes both USBCV and usbtest control cases.
34eb81955bSIlya Yanok * Minor glitches:
35eb81955bSIlya Yanok *
36eb81955bSIlya Yanok * + remote wakeup to Linux hosts work, but saw USBCV failures;
37eb81955bSIlya Yanok * in one test run (operator error?)
38eb81955bSIlya Yanok * + endpoint halt tests -- in both usbtest and usbcv -- seem
39eb81955bSIlya Yanok * to break when dma is enabled ... is something wrongly
40eb81955bSIlya Yanok * clearing SENDSTALL?
41eb81955bSIlya Yanok *
42eb81955bSIlya Yanok * - Mass storage behaved ok when last tested. Network traffic patterns
43eb81955bSIlya Yanok * (with lots of short transfers etc) need retesting; they turn up the
44eb81955bSIlya Yanok * worst cases of the DMA, since short packets are typical but are not
45eb81955bSIlya Yanok * required.
46eb81955bSIlya Yanok *
47eb81955bSIlya Yanok * - TX/IN
48eb81955bSIlya Yanok * + both pio and dma behave in with network and g_zero tests
49eb81955bSIlya Yanok * + no cppi throughput issues other than no-hw-queueing
50eb81955bSIlya Yanok * + failed with FLAT_REG (DaVinci)
51eb81955bSIlya Yanok * + seems to behave with double buffering, PIO -and- CPPI
52eb81955bSIlya Yanok * + with gadgetfs + AIO, requests got lost?
53eb81955bSIlya Yanok *
54eb81955bSIlya Yanok * - RX/OUT
55eb81955bSIlya Yanok * + both pio and dma behave in with network and g_zero tests
56eb81955bSIlya Yanok * + dma is slow in typical case (short_not_ok is clear)
57eb81955bSIlya Yanok * + double buffering ok with PIO
58eb81955bSIlya Yanok * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
59eb81955bSIlya Yanok * + request lossage observed with gadgetfs
60eb81955bSIlya Yanok *
61eb81955bSIlya Yanok * - ISO not tested ... might work, but only weakly isochronous
62eb81955bSIlya Yanok *
63eb81955bSIlya Yanok * - Gadget driver disabling of softconnect during bind() is ignored; so
64eb81955bSIlya Yanok * drivers can't hold off host requests until userspace is ready.
65eb81955bSIlya Yanok * (Workaround: they can turn it off later.)
66eb81955bSIlya Yanok *
67eb81955bSIlya Yanok * - PORTABILITY (assumes PIO works):
68eb81955bSIlya Yanok * + DaVinci, basically works with cppi dma
69eb81955bSIlya Yanok * + OMAP 2430, ditto with mentor dma
70eb81955bSIlya Yanok * + TUSB 6010, platform-specific dma in the works
71eb81955bSIlya Yanok */
72eb81955bSIlya Yanok
73eb81955bSIlya Yanok /* ----------------------------------------------------------------------- */
74eb81955bSIlya Yanok
75eb81955bSIlya Yanok #define is_buffer_mapped(req) (is_dma_capable() && \
76eb81955bSIlya Yanok (req->map_state != UN_MAPPED))
77eb81955bSIlya Yanok
7895de1e2fSPaul Kocialkowski #ifndef CONFIG_USB_MUSB_PIO_ONLY
79eb81955bSIlya Yanok /* Maps the buffer to dma */
80eb81955bSIlya Yanok
map_dma_buffer(struct musb_request * request,struct musb * musb,struct musb_ep * musb_ep)81eb81955bSIlya Yanok static inline void map_dma_buffer(struct musb_request *request,
82eb81955bSIlya Yanok struct musb *musb, struct musb_ep *musb_ep)
83eb81955bSIlya Yanok {
84eb81955bSIlya Yanok int compatible = true;
85eb81955bSIlya Yanok struct dma_controller *dma = musb->dma_controller;
86eb81955bSIlya Yanok
87eb81955bSIlya Yanok request->map_state = UN_MAPPED;
88eb81955bSIlya Yanok
89eb81955bSIlya Yanok if (!is_dma_capable() || !musb_ep->dma)
90eb81955bSIlya Yanok return;
91eb81955bSIlya Yanok
92eb81955bSIlya Yanok /* Check if DMA engine can handle this request.
93eb81955bSIlya Yanok * DMA code must reject the USB request explicitly.
94eb81955bSIlya Yanok * Default behaviour is to map the request.
95eb81955bSIlya Yanok */
96eb81955bSIlya Yanok if (dma->is_compatible)
97eb81955bSIlya Yanok compatible = dma->is_compatible(musb_ep->dma,
98eb81955bSIlya Yanok musb_ep->packet_sz, request->request.buf,
99eb81955bSIlya Yanok request->request.length);
100eb81955bSIlya Yanok if (!compatible)
101eb81955bSIlya Yanok return;
102eb81955bSIlya Yanok
103eb81955bSIlya Yanok if (request->request.dma == DMA_ADDR_INVALID) {
104eb81955bSIlya Yanok request->request.dma = dma_map_single(
105eb81955bSIlya Yanok musb->controller,
106eb81955bSIlya Yanok request->request.buf,
107eb81955bSIlya Yanok request->request.length,
108eb81955bSIlya Yanok request->tx
109eb81955bSIlya Yanok ? DMA_TO_DEVICE
110eb81955bSIlya Yanok : DMA_FROM_DEVICE);
111eb81955bSIlya Yanok request->map_state = MUSB_MAPPED;
112eb81955bSIlya Yanok } else {
113eb81955bSIlya Yanok dma_sync_single_for_device(musb->controller,
114eb81955bSIlya Yanok request->request.dma,
115eb81955bSIlya Yanok request->request.length,
116eb81955bSIlya Yanok request->tx
117eb81955bSIlya Yanok ? DMA_TO_DEVICE
118eb81955bSIlya Yanok : DMA_FROM_DEVICE);
119eb81955bSIlya Yanok request->map_state = PRE_MAPPED;
120eb81955bSIlya Yanok }
121eb81955bSIlya Yanok }
122eb81955bSIlya Yanok
123eb81955bSIlya Yanok /* Unmap the buffer from dma and maps it back to cpu */
unmap_dma_buffer(struct musb_request * request,struct musb * musb)124eb81955bSIlya Yanok static inline void unmap_dma_buffer(struct musb_request *request,
125eb81955bSIlya Yanok struct musb *musb)
126eb81955bSIlya Yanok {
127eb81955bSIlya Yanok if (!is_buffer_mapped(request))
128eb81955bSIlya Yanok return;
129eb81955bSIlya Yanok
130eb81955bSIlya Yanok if (request->request.dma == DMA_ADDR_INVALID) {
131eb81955bSIlya Yanok dev_vdbg(musb->controller,
132eb81955bSIlya Yanok "not unmapping a never mapped buffer\n");
133eb81955bSIlya Yanok return;
134eb81955bSIlya Yanok }
135eb81955bSIlya Yanok if (request->map_state == MUSB_MAPPED) {
136eb81955bSIlya Yanok dma_unmap_single(musb->controller,
137eb81955bSIlya Yanok request->request.dma,
138eb81955bSIlya Yanok request->request.length,
139eb81955bSIlya Yanok request->tx
140eb81955bSIlya Yanok ? DMA_TO_DEVICE
141eb81955bSIlya Yanok : DMA_FROM_DEVICE);
142eb81955bSIlya Yanok request->request.dma = DMA_ADDR_INVALID;
143eb81955bSIlya Yanok } else { /* PRE_MAPPED */
144eb81955bSIlya Yanok dma_sync_single_for_cpu(musb->controller,
145eb81955bSIlya Yanok request->request.dma,
146eb81955bSIlya Yanok request->request.length,
147eb81955bSIlya Yanok request->tx
148eb81955bSIlya Yanok ? DMA_TO_DEVICE
149eb81955bSIlya Yanok : DMA_FROM_DEVICE);
150eb81955bSIlya Yanok }
151eb81955bSIlya Yanok request->map_state = UN_MAPPED;
152eb81955bSIlya Yanok }
153eb81955bSIlya Yanok #else
map_dma_buffer(struct musb_request * request,struct musb * musb,struct musb_ep * musb_ep)154eb81955bSIlya Yanok static inline void map_dma_buffer(struct musb_request *request,
155eb81955bSIlya Yanok struct musb *musb, struct musb_ep *musb_ep)
156eb81955bSIlya Yanok {
157eb81955bSIlya Yanok }
158eb81955bSIlya Yanok
unmap_dma_buffer(struct musb_request * request,struct musb * musb)159eb81955bSIlya Yanok static inline void unmap_dma_buffer(struct musb_request *request,
160eb81955bSIlya Yanok struct musb *musb)
161eb81955bSIlya Yanok {
162eb81955bSIlya Yanok }
163eb81955bSIlya Yanok #endif
164eb81955bSIlya Yanok
165eb81955bSIlya Yanok /*
166eb81955bSIlya Yanok * Immediately complete a request.
167eb81955bSIlya Yanok *
168eb81955bSIlya Yanok * @param request the request to complete
169eb81955bSIlya Yanok * @param status the status to complete the request with
170eb81955bSIlya Yanok * Context: controller locked, IRQs blocked.
171eb81955bSIlya Yanok */
musb_g_giveback(struct musb_ep * ep,struct usb_request * request,int status)172eb81955bSIlya Yanok void musb_g_giveback(
173eb81955bSIlya Yanok struct musb_ep *ep,
174eb81955bSIlya Yanok struct usb_request *request,
175eb81955bSIlya Yanok int status)
176eb81955bSIlya Yanok __releases(ep->musb->lock)
177eb81955bSIlya Yanok __acquires(ep->musb->lock)
178eb81955bSIlya Yanok {
179eb81955bSIlya Yanok struct musb_request *req;
180eb81955bSIlya Yanok struct musb *musb;
181eb81955bSIlya Yanok int busy = ep->busy;
182eb81955bSIlya Yanok
183eb81955bSIlya Yanok req = to_musb_request(request);
184eb81955bSIlya Yanok
185eb81955bSIlya Yanok list_del(&req->list);
186eb81955bSIlya Yanok if (req->request.status == -EINPROGRESS)
187eb81955bSIlya Yanok req->request.status = status;
188eb81955bSIlya Yanok musb = req->musb;
189eb81955bSIlya Yanok
190eb81955bSIlya Yanok ep->busy = 1;
191eb81955bSIlya Yanok spin_unlock(&musb->lock);
192eb81955bSIlya Yanok unmap_dma_buffer(req, musb);
193eb81955bSIlya Yanok if (request->status == 0)
194eb81955bSIlya Yanok dev_dbg(musb->controller, "%s done request %p, %d/%d\n",
195eb81955bSIlya Yanok ep->end_point.name, request,
196eb81955bSIlya Yanok req->request.actual, req->request.length);
197eb81955bSIlya Yanok else
198eb81955bSIlya Yanok dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
199eb81955bSIlya Yanok ep->end_point.name, request,
200eb81955bSIlya Yanok req->request.actual, req->request.length,
201eb81955bSIlya Yanok request->status);
202eb81955bSIlya Yanok req->request.complete(&req->ep->end_point, &req->request);
203eb81955bSIlya Yanok spin_lock(&musb->lock);
204eb81955bSIlya Yanok ep->busy = busy;
205eb81955bSIlya Yanok }
206eb81955bSIlya Yanok
207eb81955bSIlya Yanok /* ----------------------------------------------------------------------- */
208eb81955bSIlya Yanok
209eb81955bSIlya Yanok /*
210eb81955bSIlya Yanok * Abort requests queued to an endpoint using the status. Synchronous.
211eb81955bSIlya Yanok * caller locked controller and blocked irqs, and selected this ep.
212eb81955bSIlya Yanok */
nuke(struct musb_ep * ep,const int status)213eb81955bSIlya Yanok static void nuke(struct musb_ep *ep, const int status)
214eb81955bSIlya Yanok {
215eb81955bSIlya Yanok struct musb *musb = ep->musb;
216eb81955bSIlya Yanok struct musb_request *req = NULL;
217eb81955bSIlya Yanok void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
218eb81955bSIlya Yanok
219eb81955bSIlya Yanok ep->busy = 1;
220eb81955bSIlya Yanok
221eb81955bSIlya Yanok if (is_dma_capable() && ep->dma) {
222eb81955bSIlya Yanok struct dma_controller *c = ep->musb->dma_controller;
223eb81955bSIlya Yanok int value;
224eb81955bSIlya Yanok
225eb81955bSIlya Yanok if (ep->is_in) {
226eb81955bSIlya Yanok /*
227eb81955bSIlya Yanok * The programming guide says that we must not clear
228eb81955bSIlya Yanok * the DMAMODE bit before DMAENAB, so we only
229eb81955bSIlya Yanok * clear it in the second write...
230eb81955bSIlya Yanok */
231eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR,
232eb81955bSIlya Yanok MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
233eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR,
234eb81955bSIlya Yanok 0 | MUSB_TXCSR_FLUSHFIFO);
235eb81955bSIlya Yanok } else {
236eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR,
237eb81955bSIlya Yanok 0 | MUSB_RXCSR_FLUSHFIFO);
238eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR,
239eb81955bSIlya Yanok 0 | MUSB_RXCSR_FLUSHFIFO);
240eb81955bSIlya Yanok }
241eb81955bSIlya Yanok
242eb81955bSIlya Yanok value = c->channel_abort(ep->dma);
243eb81955bSIlya Yanok dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
244eb81955bSIlya Yanok ep->name, value);
245eb81955bSIlya Yanok c->channel_release(ep->dma);
246eb81955bSIlya Yanok ep->dma = NULL;
247eb81955bSIlya Yanok }
248eb81955bSIlya Yanok
249eb81955bSIlya Yanok while (!list_empty(&ep->req_list)) {
250eb81955bSIlya Yanok req = list_first_entry(&ep->req_list, struct musb_request, list);
251eb81955bSIlya Yanok musb_g_giveback(ep, &req->request, status);
252eb81955bSIlya Yanok }
253eb81955bSIlya Yanok }
254eb81955bSIlya Yanok
255eb81955bSIlya Yanok /* ----------------------------------------------------------------------- */
256eb81955bSIlya Yanok
257eb81955bSIlya Yanok /* Data transfers - pure PIO, pure DMA, or mixed mode */
258eb81955bSIlya Yanok
259eb81955bSIlya Yanok /*
260eb81955bSIlya Yanok * This assumes the separate CPPI engine is responding to DMA requests
261eb81955bSIlya Yanok * from the usb core ... sequenced a bit differently from mentor dma.
262eb81955bSIlya Yanok */
263eb81955bSIlya Yanok
max_ep_writesize(struct musb * musb,struct musb_ep * ep)264eb81955bSIlya Yanok static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
265eb81955bSIlya Yanok {
266eb81955bSIlya Yanok if (can_bulk_split(musb, ep->type))
267eb81955bSIlya Yanok return ep->hw_ep->max_packet_sz_tx;
268eb81955bSIlya Yanok else
269eb81955bSIlya Yanok return ep->packet_sz;
270eb81955bSIlya Yanok }
271eb81955bSIlya Yanok
272eb81955bSIlya Yanok
273eb81955bSIlya Yanok #ifdef CONFIG_USB_INVENTRA_DMA
274eb81955bSIlya Yanok
275eb81955bSIlya Yanok /* Peripheral tx (IN) using Mentor DMA works as follows:
276eb81955bSIlya Yanok Only mode 0 is used for transfers <= wPktSize,
277eb81955bSIlya Yanok mode 1 is used for larger transfers,
278eb81955bSIlya Yanok
279eb81955bSIlya Yanok One of the following happens:
280eb81955bSIlya Yanok - Host sends IN token which causes an endpoint interrupt
281eb81955bSIlya Yanok -> TxAvail
282eb81955bSIlya Yanok -> if DMA is currently busy, exit.
283eb81955bSIlya Yanok -> if queue is non-empty, txstate().
284eb81955bSIlya Yanok
285eb81955bSIlya Yanok - Request is queued by the gadget driver.
286eb81955bSIlya Yanok -> if queue was previously empty, txstate()
287eb81955bSIlya Yanok
288eb81955bSIlya Yanok txstate()
289eb81955bSIlya Yanok -> start
290eb81955bSIlya Yanok /\ -> setup DMA
291eb81955bSIlya Yanok | (data is transferred to the FIFO, then sent out when
292eb81955bSIlya Yanok | IN token(s) are recd from Host.
293eb81955bSIlya Yanok | -> DMA interrupt on completion
294eb81955bSIlya Yanok | calls TxAvail.
295eb81955bSIlya Yanok | -> stop DMA, ~DMAENAB,
296eb81955bSIlya Yanok | -> set TxPktRdy for last short pkt or zlp
297eb81955bSIlya Yanok | -> Complete Request
298eb81955bSIlya Yanok | -> Continue next request (call txstate)
299eb81955bSIlya Yanok |___________________________________|
300eb81955bSIlya Yanok
301eb81955bSIlya Yanok * Non-Mentor DMA engines can of course work differently, such as by
302eb81955bSIlya Yanok * upleveling from irq-per-packet to irq-per-buffer.
303eb81955bSIlya Yanok */
304eb81955bSIlya Yanok
305eb81955bSIlya Yanok #endif
306eb81955bSIlya Yanok
307eb81955bSIlya Yanok /*
308eb81955bSIlya Yanok * An endpoint is transmitting data. This can be called either from
309eb81955bSIlya Yanok * the IRQ routine or from ep.queue() to kickstart a request on an
310eb81955bSIlya Yanok * endpoint.
311eb81955bSIlya Yanok *
312eb81955bSIlya Yanok * Context: controller locked, IRQs blocked, endpoint selected
313eb81955bSIlya Yanok */
txstate(struct musb * musb,struct musb_request * req)314eb81955bSIlya Yanok static void txstate(struct musb *musb, struct musb_request *req)
315eb81955bSIlya Yanok {
316eb81955bSIlya Yanok u8 epnum = req->epnum;
317eb81955bSIlya Yanok struct musb_ep *musb_ep;
318eb81955bSIlya Yanok void __iomem *epio = musb->endpoints[epnum].regs;
319eb81955bSIlya Yanok struct usb_request *request;
320eb81955bSIlya Yanok u16 fifo_count = 0, csr;
321eb81955bSIlya Yanok int use_dma = 0;
322eb81955bSIlya Yanok
323eb81955bSIlya Yanok musb_ep = req->ep;
324eb81955bSIlya Yanok
325eb81955bSIlya Yanok /* Check if EP is disabled */
326eb81955bSIlya Yanok if (!musb_ep->desc) {
327eb81955bSIlya Yanok dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
328eb81955bSIlya Yanok musb_ep->end_point.name);
329eb81955bSIlya Yanok return;
330eb81955bSIlya Yanok }
331eb81955bSIlya Yanok
332eb81955bSIlya Yanok /* we shouldn't get here while DMA is active ... but we do ... */
333eb81955bSIlya Yanok if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
334eb81955bSIlya Yanok dev_dbg(musb->controller, "dma pending...\n");
335eb81955bSIlya Yanok return;
336eb81955bSIlya Yanok }
337eb81955bSIlya Yanok
338eb81955bSIlya Yanok /* read TXCSR before */
339eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_TXCSR);
340eb81955bSIlya Yanok
341eb81955bSIlya Yanok request = &req->request;
342eb81955bSIlya Yanok fifo_count = min(max_ep_writesize(musb, musb_ep),
343eb81955bSIlya Yanok (int)(request->length - request->actual));
344eb81955bSIlya Yanok
345eb81955bSIlya Yanok if (csr & MUSB_TXCSR_TXPKTRDY) {
346eb81955bSIlya Yanok dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
347eb81955bSIlya Yanok musb_ep->end_point.name, csr);
348eb81955bSIlya Yanok return;
349eb81955bSIlya Yanok }
350eb81955bSIlya Yanok
351eb81955bSIlya Yanok if (csr & MUSB_TXCSR_P_SENDSTALL) {
352eb81955bSIlya Yanok dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
353eb81955bSIlya Yanok musb_ep->end_point.name, csr);
354eb81955bSIlya Yanok return;
355eb81955bSIlya Yanok }
356eb81955bSIlya Yanok
357eb81955bSIlya Yanok dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
358eb81955bSIlya Yanok epnum, musb_ep->packet_sz, fifo_count,
359eb81955bSIlya Yanok csr);
360eb81955bSIlya Yanok
36195de1e2fSPaul Kocialkowski #ifndef CONFIG_USB_MUSB_PIO_ONLY
362eb81955bSIlya Yanok if (is_buffer_mapped(req)) {
363eb81955bSIlya Yanok struct dma_controller *c = musb->dma_controller;
364eb81955bSIlya Yanok size_t request_size;
365eb81955bSIlya Yanok
366eb81955bSIlya Yanok /* setup DMA, then program endpoint CSR */
367eb81955bSIlya Yanok request_size = min_t(size_t, request->length - request->actual,
368eb81955bSIlya Yanok musb_ep->dma->max_len);
369eb81955bSIlya Yanok
370eb81955bSIlya Yanok use_dma = (request->dma != DMA_ADDR_INVALID);
371eb81955bSIlya Yanok
372eb81955bSIlya Yanok /* MUSB_TXCSR_P_ISO is still set correctly */
373eb81955bSIlya Yanok
374eb81955bSIlya Yanok #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
375eb81955bSIlya Yanok {
376eb81955bSIlya Yanok if (request_size < musb_ep->packet_sz)
377eb81955bSIlya Yanok musb_ep->dma->desired_mode = 0;
378eb81955bSIlya Yanok else
379eb81955bSIlya Yanok musb_ep->dma->desired_mode = 1;
380eb81955bSIlya Yanok
381eb81955bSIlya Yanok use_dma = use_dma && c->channel_program(
382eb81955bSIlya Yanok musb_ep->dma, musb_ep->packet_sz,
383eb81955bSIlya Yanok musb_ep->dma->desired_mode,
384eb81955bSIlya Yanok request->dma + request->actual, request_size);
385eb81955bSIlya Yanok if (use_dma) {
386eb81955bSIlya Yanok if (musb_ep->dma->desired_mode == 0) {
387eb81955bSIlya Yanok /*
388eb81955bSIlya Yanok * We must not clear the DMAMODE bit
389eb81955bSIlya Yanok * before the DMAENAB bit -- and the
390eb81955bSIlya Yanok * latter doesn't always get cleared
391eb81955bSIlya Yanok * before we get here...
392eb81955bSIlya Yanok */
393eb81955bSIlya Yanok csr &= ~(MUSB_TXCSR_AUTOSET
394eb81955bSIlya Yanok | MUSB_TXCSR_DMAENAB);
395eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, csr
396eb81955bSIlya Yanok | MUSB_TXCSR_P_WZC_BITS);
397eb81955bSIlya Yanok csr &= ~MUSB_TXCSR_DMAMODE;
398eb81955bSIlya Yanok csr |= (MUSB_TXCSR_DMAENAB |
399eb81955bSIlya Yanok MUSB_TXCSR_MODE);
400eb81955bSIlya Yanok /* against programming guide */
401eb81955bSIlya Yanok } else {
402eb81955bSIlya Yanok csr |= (MUSB_TXCSR_DMAENAB
403eb81955bSIlya Yanok | MUSB_TXCSR_DMAMODE
404eb81955bSIlya Yanok | MUSB_TXCSR_MODE);
405eb81955bSIlya Yanok if (!musb_ep->hb_mult)
406eb81955bSIlya Yanok csr |= MUSB_TXCSR_AUTOSET;
407eb81955bSIlya Yanok }
408eb81955bSIlya Yanok csr &= ~MUSB_TXCSR_P_UNDERRUN;
409eb81955bSIlya Yanok
410eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, csr);
411eb81955bSIlya Yanok }
412eb81955bSIlya Yanok }
413eb81955bSIlya Yanok
414eb81955bSIlya Yanok #elif defined(CONFIG_USB_TI_CPPI_DMA)
415eb81955bSIlya Yanok /* program endpoint CSR first, then setup DMA */
416eb81955bSIlya Yanok csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
417eb81955bSIlya Yanok csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
418eb81955bSIlya Yanok MUSB_TXCSR_MODE;
419eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR,
420eb81955bSIlya Yanok (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
421eb81955bSIlya Yanok | csr);
422eb81955bSIlya Yanok
423eb81955bSIlya Yanok /* ensure writebuffer is empty */
424eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_TXCSR);
425eb81955bSIlya Yanok
426eb81955bSIlya Yanok /* NOTE host side sets DMAENAB later than this; both are
427eb81955bSIlya Yanok * OK since the transfer dma glue (between CPPI and Mentor
428eb81955bSIlya Yanok * fifos) just tells CPPI it could start. Data only moves
429eb81955bSIlya Yanok * to the USB TX fifo when both fifos are ready.
430eb81955bSIlya Yanok */
431eb81955bSIlya Yanok
432eb81955bSIlya Yanok /* "mode" is irrelevant here; handle terminating ZLPs like
433eb81955bSIlya Yanok * PIO does, since the hardware RNDIS mode seems unreliable
434eb81955bSIlya Yanok * except for the last-packet-is-already-short case.
435eb81955bSIlya Yanok */
436eb81955bSIlya Yanok use_dma = use_dma && c->channel_program(
437eb81955bSIlya Yanok musb_ep->dma, musb_ep->packet_sz,
438eb81955bSIlya Yanok 0,
439eb81955bSIlya Yanok request->dma + request->actual,
440eb81955bSIlya Yanok request_size);
441eb81955bSIlya Yanok if (!use_dma) {
442eb81955bSIlya Yanok c->channel_release(musb_ep->dma);
443eb81955bSIlya Yanok musb_ep->dma = NULL;
444eb81955bSIlya Yanok csr &= ~MUSB_TXCSR_DMAENAB;
445eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, csr);
446eb81955bSIlya Yanok /* invariant: prequest->buf is non-null */
447eb81955bSIlya Yanok }
448eb81955bSIlya Yanok #elif defined(CONFIG_USB_TUSB_OMAP_DMA)
449eb81955bSIlya Yanok use_dma = use_dma && c->channel_program(
450eb81955bSIlya Yanok musb_ep->dma, musb_ep->packet_sz,
451eb81955bSIlya Yanok request->zero,
452eb81955bSIlya Yanok request->dma + request->actual,
453eb81955bSIlya Yanok request_size);
454eb81955bSIlya Yanok #endif
455eb81955bSIlya Yanok }
456eb81955bSIlya Yanok #endif
457eb81955bSIlya Yanok
458eb81955bSIlya Yanok if (!use_dma) {
459eb81955bSIlya Yanok /*
460eb81955bSIlya Yanok * Unmap the dma buffer back to cpu if dma channel
461eb81955bSIlya Yanok * programming fails
462eb81955bSIlya Yanok */
463eb81955bSIlya Yanok unmap_dma_buffer(req, musb);
464eb81955bSIlya Yanok
465eb81955bSIlya Yanok musb_write_fifo(musb_ep->hw_ep, fifo_count,
466eb81955bSIlya Yanok (u8 *) (request->buf + request->actual));
467eb81955bSIlya Yanok request->actual += fifo_count;
468eb81955bSIlya Yanok csr |= MUSB_TXCSR_TXPKTRDY;
469eb81955bSIlya Yanok csr &= ~MUSB_TXCSR_P_UNDERRUN;
470eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, csr);
471eb81955bSIlya Yanok }
472eb81955bSIlya Yanok
473eb81955bSIlya Yanok /* host may already have the data when this message shows... */
474eb81955bSIlya Yanok dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
475eb81955bSIlya Yanok musb_ep->end_point.name, use_dma ? "dma" : "pio",
476eb81955bSIlya Yanok request->actual, request->length,
477eb81955bSIlya Yanok musb_readw(epio, MUSB_TXCSR),
478eb81955bSIlya Yanok fifo_count,
479eb81955bSIlya Yanok musb_readw(epio, MUSB_TXMAXP));
480eb81955bSIlya Yanok }
481eb81955bSIlya Yanok
482eb81955bSIlya Yanok /*
483eb81955bSIlya Yanok * FIFO state update (e.g. data ready).
484eb81955bSIlya Yanok * Called from IRQ, with controller locked.
485eb81955bSIlya Yanok */
musb_g_tx(struct musb * musb,u8 epnum)486eb81955bSIlya Yanok void musb_g_tx(struct musb *musb, u8 epnum)
487eb81955bSIlya Yanok {
488eb81955bSIlya Yanok u16 csr;
489eb81955bSIlya Yanok struct musb_request *req;
490eb81955bSIlya Yanok struct usb_request *request;
491eb81955bSIlya Yanok u8 __iomem *mbase = musb->mregs;
492eb81955bSIlya Yanok struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
493eb81955bSIlya Yanok void __iomem *epio = musb->endpoints[epnum].regs;
494eb81955bSIlya Yanok struct dma_channel *dma;
495eb81955bSIlya Yanok
496eb81955bSIlya Yanok musb_ep_select(mbase, epnum);
497eb81955bSIlya Yanok req = next_request(musb_ep);
498eb81955bSIlya Yanok request = &req->request;
499eb81955bSIlya Yanok
500eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_TXCSR);
501eb81955bSIlya Yanok dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
502eb81955bSIlya Yanok
503eb81955bSIlya Yanok dma = is_dma_capable() ? musb_ep->dma : NULL;
504eb81955bSIlya Yanok
505eb81955bSIlya Yanok /*
506eb81955bSIlya Yanok * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
507eb81955bSIlya Yanok * probably rates reporting as a host error.
508eb81955bSIlya Yanok */
509eb81955bSIlya Yanok if (csr & MUSB_TXCSR_P_SENTSTALL) {
510eb81955bSIlya Yanok csr |= MUSB_TXCSR_P_WZC_BITS;
511eb81955bSIlya Yanok csr &= ~MUSB_TXCSR_P_SENTSTALL;
512eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, csr);
513eb81955bSIlya Yanok return;
514eb81955bSIlya Yanok }
515eb81955bSIlya Yanok
516eb81955bSIlya Yanok if (csr & MUSB_TXCSR_P_UNDERRUN) {
517eb81955bSIlya Yanok /* We NAKed, no big deal... little reason to care. */
518eb81955bSIlya Yanok csr |= MUSB_TXCSR_P_WZC_BITS;
519eb81955bSIlya Yanok csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
520eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, csr);
521eb81955bSIlya Yanok dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
522eb81955bSIlya Yanok epnum, request);
523eb81955bSIlya Yanok }
524eb81955bSIlya Yanok
525eb81955bSIlya Yanok if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
526eb81955bSIlya Yanok /*
527eb81955bSIlya Yanok * SHOULD NOT HAPPEN... has with CPPI though, after
528eb81955bSIlya Yanok * changing SENDSTALL (and other cases); harmless?
529eb81955bSIlya Yanok */
530eb81955bSIlya Yanok dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
531eb81955bSIlya Yanok return;
532eb81955bSIlya Yanok }
533eb81955bSIlya Yanok
534eb81955bSIlya Yanok if (request) {
535eb81955bSIlya Yanok u8 is_dma = 0;
536eb81955bSIlya Yanok
537eb81955bSIlya Yanok if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
538eb81955bSIlya Yanok is_dma = 1;
539eb81955bSIlya Yanok csr |= MUSB_TXCSR_P_WZC_BITS;
540eb81955bSIlya Yanok csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
541eb81955bSIlya Yanok MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
542eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, csr);
543eb81955bSIlya Yanok /* Ensure writebuffer is empty. */
544eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_TXCSR);
545eb81955bSIlya Yanok request->actual += musb_ep->dma->actual_len;
546eb81955bSIlya Yanok dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
547eb81955bSIlya Yanok epnum, csr, musb_ep->dma->actual_len, request);
548eb81955bSIlya Yanok }
549eb81955bSIlya Yanok
550eb81955bSIlya Yanok /*
551eb81955bSIlya Yanok * First, maybe a terminating short packet. Some DMA
552eb81955bSIlya Yanok * engines might handle this by themselves.
553eb81955bSIlya Yanok */
554eb81955bSIlya Yanok if ((request->zero && request->length
555eb81955bSIlya Yanok && (request->length % musb_ep->packet_sz == 0)
556eb81955bSIlya Yanok && (request->actual == request->length))
557eb81955bSIlya Yanok #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
558eb81955bSIlya Yanok || (is_dma && (!dma->desired_mode ||
559eb81955bSIlya Yanok (request->actual &
560eb81955bSIlya Yanok (musb_ep->packet_sz - 1))))
561eb81955bSIlya Yanok #endif
562eb81955bSIlya Yanok ) {
563eb81955bSIlya Yanok /*
564eb81955bSIlya Yanok * On DMA completion, FIFO may not be
565eb81955bSIlya Yanok * available yet...
566eb81955bSIlya Yanok */
567eb81955bSIlya Yanok if (csr & MUSB_TXCSR_TXPKTRDY)
568eb81955bSIlya Yanok return;
569eb81955bSIlya Yanok
570eb81955bSIlya Yanok dev_dbg(musb->controller, "sending zero pkt\n");
571eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
572eb81955bSIlya Yanok | MUSB_TXCSR_TXPKTRDY);
573eb81955bSIlya Yanok request->zero = 0;
574eb81955bSIlya Yanok }
575eb81955bSIlya Yanok
576eb81955bSIlya Yanok if (request->actual == request->length) {
577eb81955bSIlya Yanok musb_g_giveback(musb_ep, request, 0);
578eb81955bSIlya Yanok /*
579eb81955bSIlya Yanok * In the giveback function the MUSB lock is
580eb81955bSIlya Yanok * released and acquired after sometime. During
581eb81955bSIlya Yanok * this time period the INDEX register could get
582eb81955bSIlya Yanok * changed by the gadget_queue function especially
583eb81955bSIlya Yanok * on SMP systems. Reselect the INDEX to be sure
584eb81955bSIlya Yanok * we are reading/modifying the right registers
585eb81955bSIlya Yanok */
586eb81955bSIlya Yanok musb_ep_select(mbase, epnum);
587eb81955bSIlya Yanok req = musb_ep->desc ? next_request(musb_ep) : NULL;
588eb81955bSIlya Yanok if (!req) {
589eb81955bSIlya Yanok dev_dbg(musb->controller, "%s idle now\n",
590eb81955bSIlya Yanok musb_ep->end_point.name);
591eb81955bSIlya Yanok return;
592eb81955bSIlya Yanok }
593eb81955bSIlya Yanok }
594eb81955bSIlya Yanok
595eb81955bSIlya Yanok txstate(musb, req);
596eb81955bSIlya Yanok }
597eb81955bSIlya Yanok }
598eb81955bSIlya Yanok
599eb81955bSIlya Yanok /* ------------------------------------------------------------ */
600eb81955bSIlya Yanok
601eb81955bSIlya Yanok #ifdef CONFIG_USB_INVENTRA_DMA
602eb81955bSIlya Yanok
603eb81955bSIlya Yanok /* Peripheral rx (OUT) using Mentor DMA works as follows:
604eb81955bSIlya Yanok - Only mode 0 is used.
605eb81955bSIlya Yanok
606eb81955bSIlya Yanok - Request is queued by the gadget class driver.
607eb81955bSIlya Yanok -> if queue was previously empty, rxstate()
608eb81955bSIlya Yanok
609eb81955bSIlya Yanok - Host sends OUT token which causes an endpoint interrupt
610eb81955bSIlya Yanok /\ -> RxReady
611eb81955bSIlya Yanok | -> if request queued, call rxstate
612eb81955bSIlya Yanok | /\ -> setup DMA
613eb81955bSIlya Yanok | | -> DMA interrupt on completion
614eb81955bSIlya Yanok | | -> RxReady
615eb81955bSIlya Yanok | | -> stop DMA
616eb81955bSIlya Yanok | | -> ack the read
617eb81955bSIlya Yanok | | -> if data recd = max expected
618eb81955bSIlya Yanok | | by the request, or host
619eb81955bSIlya Yanok | | sent a short packet,
620eb81955bSIlya Yanok | | complete the request,
621eb81955bSIlya Yanok | | and start the next one.
622eb81955bSIlya Yanok | |_____________________________________|
623eb81955bSIlya Yanok | else just wait for the host
624eb81955bSIlya Yanok | to send the next OUT token.
625eb81955bSIlya Yanok |__________________________________________________|
626eb81955bSIlya Yanok
627eb81955bSIlya Yanok * Non-Mentor DMA engines can of course work differently.
628eb81955bSIlya Yanok */
629eb81955bSIlya Yanok
630eb81955bSIlya Yanok #endif
631eb81955bSIlya Yanok
632eb81955bSIlya Yanok /*
633eb81955bSIlya Yanok * Context: controller locked, IRQs blocked, endpoint selected
634eb81955bSIlya Yanok */
rxstate(struct musb * musb,struct musb_request * req)635eb81955bSIlya Yanok static void rxstate(struct musb *musb, struct musb_request *req)
636eb81955bSIlya Yanok {
637eb81955bSIlya Yanok const u8 epnum = req->epnum;
638eb81955bSIlya Yanok struct usb_request *request = &req->request;
639eb81955bSIlya Yanok struct musb_ep *musb_ep;
640eb81955bSIlya Yanok void __iomem *epio = musb->endpoints[epnum].regs;
641eb81955bSIlya Yanok unsigned fifo_count = 0;
642eb81955bSIlya Yanok u16 len;
643eb81955bSIlya Yanok u16 csr = musb_readw(epio, MUSB_RXCSR);
644eb81955bSIlya Yanok struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
645eb81955bSIlya Yanok u8 use_mode_1;
646eb81955bSIlya Yanok
647eb81955bSIlya Yanok if (hw_ep->is_shared_fifo)
648eb81955bSIlya Yanok musb_ep = &hw_ep->ep_in;
649eb81955bSIlya Yanok else
650eb81955bSIlya Yanok musb_ep = &hw_ep->ep_out;
651eb81955bSIlya Yanok
652eb81955bSIlya Yanok len = musb_ep->packet_sz;
653eb81955bSIlya Yanok
654eb81955bSIlya Yanok /* Check if EP is disabled */
655eb81955bSIlya Yanok if (!musb_ep->desc) {
656eb81955bSIlya Yanok dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
657eb81955bSIlya Yanok musb_ep->end_point.name);
658eb81955bSIlya Yanok return;
659eb81955bSIlya Yanok }
660eb81955bSIlya Yanok
661eb81955bSIlya Yanok /* We shouldn't get here while DMA is active, but we do... */
662eb81955bSIlya Yanok if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
663eb81955bSIlya Yanok dev_dbg(musb->controller, "DMA pending...\n");
664eb81955bSIlya Yanok return;
665eb81955bSIlya Yanok }
666eb81955bSIlya Yanok
667eb81955bSIlya Yanok if (csr & MUSB_RXCSR_P_SENDSTALL) {
668eb81955bSIlya Yanok dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
669eb81955bSIlya Yanok musb_ep->end_point.name, csr);
670eb81955bSIlya Yanok return;
671eb81955bSIlya Yanok }
672eb81955bSIlya Yanok
673eb81955bSIlya Yanok if (is_cppi_enabled() && is_buffer_mapped(req)) {
674eb81955bSIlya Yanok struct dma_controller *c = musb->dma_controller;
675eb81955bSIlya Yanok struct dma_channel *channel = musb_ep->dma;
676eb81955bSIlya Yanok
677eb81955bSIlya Yanok /* NOTE: CPPI won't actually stop advancing the DMA
678eb81955bSIlya Yanok * queue after short packet transfers, so this is almost
679eb81955bSIlya Yanok * always going to run as IRQ-per-packet DMA so that
680eb81955bSIlya Yanok * faults will be handled correctly.
681eb81955bSIlya Yanok */
682eb81955bSIlya Yanok if (c->channel_program(channel,
683eb81955bSIlya Yanok musb_ep->packet_sz,
684eb81955bSIlya Yanok !request->short_not_ok,
685eb81955bSIlya Yanok request->dma + request->actual,
686eb81955bSIlya Yanok request->length - request->actual)) {
687eb81955bSIlya Yanok
688eb81955bSIlya Yanok /* make sure that if an rxpkt arrived after the irq,
689eb81955bSIlya Yanok * the cppi engine will be ready to take it as soon
690eb81955bSIlya Yanok * as DMA is enabled
691eb81955bSIlya Yanok */
692eb81955bSIlya Yanok csr &= ~(MUSB_RXCSR_AUTOCLEAR
693eb81955bSIlya Yanok | MUSB_RXCSR_DMAMODE);
694eb81955bSIlya Yanok csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
695eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR, csr);
696eb81955bSIlya Yanok return;
697eb81955bSIlya Yanok }
698eb81955bSIlya Yanok }
699eb81955bSIlya Yanok
700eb81955bSIlya Yanok if (csr & MUSB_RXCSR_RXPKTRDY) {
701eb81955bSIlya Yanok len = musb_readw(epio, MUSB_RXCOUNT);
702eb81955bSIlya Yanok
703eb81955bSIlya Yanok /*
704eb81955bSIlya Yanok * Enable Mode 1 on RX transfers only when short_not_ok flag
705eb81955bSIlya Yanok * is set. Currently short_not_ok flag is set only from
706eb81955bSIlya Yanok * file_storage and f_mass_storage drivers
707eb81955bSIlya Yanok */
708eb81955bSIlya Yanok
709eb81955bSIlya Yanok if (request->short_not_ok && len == musb_ep->packet_sz)
710eb81955bSIlya Yanok use_mode_1 = 1;
711eb81955bSIlya Yanok else
712eb81955bSIlya Yanok use_mode_1 = 0;
713eb81955bSIlya Yanok
714eb81955bSIlya Yanok if (request->actual < request->length) {
715eb81955bSIlya Yanok #ifdef CONFIG_USB_INVENTRA_DMA
716eb81955bSIlya Yanok if (is_buffer_mapped(req)) {
717eb81955bSIlya Yanok struct dma_controller *c;
718eb81955bSIlya Yanok struct dma_channel *channel;
719eb81955bSIlya Yanok int use_dma = 0;
720eb81955bSIlya Yanok
721eb81955bSIlya Yanok c = musb->dma_controller;
722eb81955bSIlya Yanok channel = musb_ep->dma;
723eb81955bSIlya Yanok
724eb81955bSIlya Yanok /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
725eb81955bSIlya Yanok * mode 0 only. So we do not get endpoint interrupts due to DMA
726eb81955bSIlya Yanok * completion. We only get interrupts from DMA controller.
727eb81955bSIlya Yanok *
728eb81955bSIlya Yanok * We could operate in DMA mode 1 if we knew the size of the tranfer
729eb81955bSIlya Yanok * in advance. For mass storage class, request->length = what the host
730eb81955bSIlya Yanok * sends, so that'd work. But for pretty much everything else,
731eb81955bSIlya Yanok * request->length is routinely more than what the host sends. For
732eb81955bSIlya Yanok * most these gadgets, end of is signified either by a short packet,
733eb81955bSIlya Yanok * or filling the last byte of the buffer. (Sending extra data in
734eb81955bSIlya Yanok * that last pckate should trigger an overflow fault.) But in mode 1,
735eb81955bSIlya Yanok * we don't get DMA completion interrupt for short packets.
736eb81955bSIlya Yanok *
737eb81955bSIlya Yanok * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
738eb81955bSIlya Yanok * to get endpoint interrupt on every DMA req, but that didn't seem
739eb81955bSIlya Yanok * to work reliably.
740eb81955bSIlya Yanok *
741eb81955bSIlya Yanok * REVISIT an updated g_file_storage can set req->short_not_ok, which
742eb81955bSIlya Yanok * then becomes usable as a runtime "use mode 1" hint...
743eb81955bSIlya Yanok */
744eb81955bSIlya Yanok
745eb81955bSIlya Yanok /* Experimental: Mode1 works with mass storage use cases */
746eb81955bSIlya Yanok if (use_mode_1) {
747eb81955bSIlya Yanok csr |= MUSB_RXCSR_AUTOCLEAR;
748eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR, csr);
749eb81955bSIlya Yanok csr |= MUSB_RXCSR_DMAENAB;
750eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR, csr);
751eb81955bSIlya Yanok
752eb81955bSIlya Yanok /*
753eb81955bSIlya Yanok * this special sequence (enabling and then
754eb81955bSIlya Yanok * disabling MUSB_RXCSR_DMAMODE) is required
755eb81955bSIlya Yanok * to get DMAReq to activate
756eb81955bSIlya Yanok */
757eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR,
758eb81955bSIlya Yanok csr | MUSB_RXCSR_DMAMODE);
759eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR, csr);
760eb81955bSIlya Yanok
761eb81955bSIlya Yanok } else {
762eb81955bSIlya Yanok if (!musb_ep->hb_mult &&
763eb81955bSIlya Yanok musb_ep->hw_ep->rx_double_buffered)
764eb81955bSIlya Yanok csr |= MUSB_RXCSR_AUTOCLEAR;
765eb81955bSIlya Yanok csr |= MUSB_RXCSR_DMAENAB;
766eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR, csr);
767eb81955bSIlya Yanok }
768eb81955bSIlya Yanok
769eb81955bSIlya Yanok if (request->actual < request->length) {
770eb81955bSIlya Yanok int transfer_size = 0;
771eb81955bSIlya Yanok if (use_mode_1) {
772eb81955bSIlya Yanok transfer_size = min(request->length - request->actual,
773eb81955bSIlya Yanok channel->max_len);
774eb81955bSIlya Yanok musb_ep->dma->desired_mode = 1;
775eb81955bSIlya Yanok } else {
776eb81955bSIlya Yanok transfer_size = min(request->length - request->actual,
777eb81955bSIlya Yanok (unsigned)len);
778eb81955bSIlya Yanok musb_ep->dma->desired_mode = 0;
779eb81955bSIlya Yanok }
780eb81955bSIlya Yanok
781eb81955bSIlya Yanok use_dma = c->channel_program(
782eb81955bSIlya Yanok channel,
783eb81955bSIlya Yanok musb_ep->packet_sz,
784eb81955bSIlya Yanok channel->desired_mode,
785eb81955bSIlya Yanok request->dma
786eb81955bSIlya Yanok + request->actual,
787eb81955bSIlya Yanok transfer_size);
788eb81955bSIlya Yanok }
789eb81955bSIlya Yanok
790eb81955bSIlya Yanok if (use_dma)
791eb81955bSIlya Yanok return;
792eb81955bSIlya Yanok }
793eb81955bSIlya Yanok #elif defined(CONFIG_USB_UX500_DMA)
794eb81955bSIlya Yanok if ((is_buffer_mapped(req)) &&
795eb81955bSIlya Yanok (request->actual < request->length)) {
796eb81955bSIlya Yanok
797eb81955bSIlya Yanok struct dma_controller *c;
798eb81955bSIlya Yanok struct dma_channel *channel;
799eb81955bSIlya Yanok int transfer_size = 0;
800eb81955bSIlya Yanok
801eb81955bSIlya Yanok c = musb->dma_controller;
802eb81955bSIlya Yanok channel = musb_ep->dma;
803eb81955bSIlya Yanok
804eb81955bSIlya Yanok /* In case first packet is short */
805eb81955bSIlya Yanok if (len < musb_ep->packet_sz)
806eb81955bSIlya Yanok transfer_size = len;
807eb81955bSIlya Yanok else if (request->short_not_ok)
808eb81955bSIlya Yanok transfer_size = min(request->length -
809eb81955bSIlya Yanok request->actual,
810eb81955bSIlya Yanok channel->max_len);
811eb81955bSIlya Yanok else
812eb81955bSIlya Yanok transfer_size = min(request->length -
813eb81955bSIlya Yanok request->actual,
814eb81955bSIlya Yanok (unsigned)len);
815eb81955bSIlya Yanok
816eb81955bSIlya Yanok csr &= ~MUSB_RXCSR_DMAMODE;
817eb81955bSIlya Yanok csr |= (MUSB_RXCSR_DMAENAB |
818eb81955bSIlya Yanok MUSB_RXCSR_AUTOCLEAR);
819eb81955bSIlya Yanok
820eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR, csr);
821eb81955bSIlya Yanok
822eb81955bSIlya Yanok if (transfer_size <= musb_ep->packet_sz) {
823eb81955bSIlya Yanok musb_ep->dma->desired_mode = 0;
824eb81955bSIlya Yanok } else {
825eb81955bSIlya Yanok musb_ep->dma->desired_mode = 1;
826eb81955bSIlya Yanok /* Mode must be set after DMAENAB */
827eb81955bSIlya Yanok csr |= MUSB_RXCSR_DMAMODE;
828eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR, csr);
829eb81955bSIlya Yanok }
830eb81955bSIlya Yanok
831eb81955bSIlya Yanok if (c->channel_program(channel,
832eb81955bSIlya Yanok musb_ep->packet_sz,
833eb81955bSIlya Yanok channel->desired_mode,
834eb81955bSIlya Yanok request->dma
835eb81955bSIlya Yanok + request->actual,
836eb81955bSIlya Yanok transfer_size))
837eb81955bSIlya Yanok
838eb81955bSIlya Yanok return;
839eb81955bSIlya Yanok }
840eb81955bSIlya Yanok #endif /* Mentor's DMA */
841eb81955bSIlya Yanok
842eb81955bSIlya Yanok fifo_count = request->length - request->actual;
843eb81955bSIlya Yanok dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
844eb81955bSIlya Yanok musb_ep->end_point.name,
845eb81955bSIlya Yanok len, fifo_count,
846eb81955bSIlya Yanok musb_ep->packet_sz);
847eb81955bSIlya Yanok
848eb81955bSIlya Yanok fifo_count = min_t(unsigned, len, fifo_count);
849eb81955bSIlya Yanok
850eb81955bSIlya Yanok #ifdef CONFIG_USB_TUSB_OMAP_DMA
851eb81955bSIlya Yanok if (tusb_dma_omap() && is_buffer_mapped(req)) {
852eb81955bSIlya Yanok struct dma_controller *c = musb->dma_controller;
853eb81955bSIlya Yanok struct dma_channel *channel = musb_ep->dma;
854eb81955bSIlya Yanok u32 dma_addr = request->dma + request->actual;
855eb81955bSIlya Yanok int ret;
856eb81955bSIlya Yanok
857eb81955bSIlya Yanok ret = c->channel_program(channel,
858eb81955bSIlya Yanok musb_ep->packet_sz,
859eb81955bSIlya Yanok channel->desired_mode,
860eb81955bSIlya Yanok dma_addr,
861eb81955bSIlya Yanok fifo_count);
862eb81955bSIlya Yanok if (ret)
863eb81955bSIlya Yanok return;
864eb81955bSIlya Yanok }
865eb81955bSIlya Yanok #endif
866eb81955bSIlya Yanok /*
867eb81955bSIlya Yanok * Unmap the dma buffer back to cpu if dma channel
868eb81955bSIlya Yanok * programming fails. This buffer is mapped if the
869eb81955bSIlya Yanok * channel allocation is successful
870eb81955bSIlya Yanok */
871eb81955bSIlya Yanok if (is_buffer_mapped(req)) {
872eb81955bSIlya Yanok unmap_dma_buffer(req, musb);
873eb81955bSIlya Yanok
874eb81955bSIlya Yanok /*
875eb81955bSIlya Yanok * Clear DMAENAB and AUTOCLEAR for the
876eb81955bSIlya Yanok * PIO mode transfer
877eb81955bSIlya Yanok */
878eb81955bSIlya Yanok csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
879eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR, csr);
880eb81955bSIlya Yanok }
881eb81955bSIlya Yanok
882eb81955bSIlya Yanok musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
883eb81955bSIlya Yanok (request->buf + request->actual));
884eb81955bSIlya Yanok request->actual += fifo_count;
885eb81955bSIlya Yanok
886eb81955bSIlya Yanok /* REVISIT if we left anything in the fifo, flush
887eb81955bSIlya Yanok * it and report -EOVERFLOW
888eb81955bSIlya Yanok */
889eb81955bSIlya Yanok
890eb81955bSIlya Yanok /* ack the read! */
891eb81955bSIlya Yanok csr |= MUSB_RXCSR_P_WZC_BITS;
892eb81955bSIlya Yanok csr &= ~MUSB_RXCSR_RXPKTRDY;
893eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR, csr);
894eb81955bSIlya Yanok }
895eb81955bSIlya Yanok }
896eb81955bSIlya Yanok
897eb81955bSIlya Yanok /* reach the end or short packet detected */
898eb81955bSIlya Yanok if (request->actual == request->length || len < musb_ep->packet_sz)
899eb81955bSIlya Yanok musb_g_giveback(musb_ep, request, 0);
900eb81955bSIlya Yanok }
901eb81955bSIlya Yanok
902eb81955bSIlya Yanok /*
903eb81955bSIlya Yanok * Data ready for a request; called from IRQ
904eb81955bSIlya Yanok */
musb_g_rx(struct musb * musb,u8 epnum)905eb81955bSIlya Yanok void musb_g_rx(struct musb *musb, u8 epnum)
906eb81955bSIlya Yanok {
907eb81955bSIlya Yanok u16 csr;
908eb81955bSIlya Yanok struct musb_request *req;
909eb81955bSIlya Yanok struct usb_request *request;
910eb81955bSIlya Yanok void __iomem *mbase = musb->mregs;
911eb81955bSIlya Yanok struct musb_ep *musb_ep;
912eb81955bSIlya Yanok void __iomem *epio = musb->endpoints[epnum].regs;
913eb81955bSIlya Yanok struct dma_channel *dma;
914eb81955bSIlya Yanok struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
915eb81955bSIlya Yanok
916eb81955bSIlya Yanok if (hw_ep->is_shared_fifo)
917eb81955bSIlya Yanok musb_ep = &hw_ep->ep_in;
918eb81955bSIlya Yanok else
919eb81955bSIlya Yanok musb_ep = &hw_ep->ep_out;
920eb81955bSIlya Yanok
921eb81955bSIlya Yanok musb_ep_select(mbase, epnum);
922eb81955bSIlya Yanok
923eb81955bSIlya Yanok req = next_request(musb_ep);
924eb81955bSIlya Yanok if (!req)
925eb81955bSIlya Yanok return;
926eb81955bSIlya Yanok
927eb81955bSIlya Yanok request = &req->request;
928eb81955bSIlya Yanok
929eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_RXCSR);
930eb81955bSIlya Yanok dma = is_dma_capable() ? musb_ep->dma : NULL;
931eb81955bSIlya Yanok
932eb81955bSIlya Yanok dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
933eb81955bSIlya Yanok csr, dma ? " (dma)" : "", request);
934eb81955bSIlya Yanok
935eb81955bSIlya Yanok if (csr & MUSB_RXCSR_P_SENTSTALL) {
936eb81955bSIlya Yanok csr |= MUSB_RXCSR_P_WZC_BITS;
937eb81955bSIlya Yanok csr &= ~MUSB_RXCSR_P_SENTSTALL;
938eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR, csr);
939eb81955bSIlya Yanok return;
940eb81955bSIlya Yanok }
941eb81955bSIlya Yanok
942eb81955bSIlya Yanok if (csr & MUSB_RXCSR_P_OVERRUN) {
943eb81955bSIlya Yanok /* csr |= MUSB_RXCSR_P_WZC_BITS; */
944eb81955bSIlya Yanok csr &= ~MUSB_RXCSR_P_OVERRUN;
945eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR, csr);
946eb81955bSIlya Yanok
947eb81955bSIlya Yanok dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
948eb81955bSIlya Yanok if (request->status == -EINPROGRESS)
949eb81955bSIlya Yanok request->status = -EOVERFLOW;
950eb81955bSIlya Yanok }
951eb81955bSIlya Yanok if (csr & MUSB_RXCSR_INCOMPRX) {
952eb81955bSIlya Yanok /* REVISIT not necessarily an error */
953eb81955bSIlya Yanok dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
954eb81955bSIlya Yanok }
955eb81955bSIlya Yanok
956eb81955bSIlya Yanok if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
957eb81955bSIlya Yanok /* "should not happen"; likely RXPKTRDY pending for DMA */
958eb81955bSIlya Yanok dev_dbg(musb->controller, "%s busy, csr %04x\n",
959eb81955bSIlya Yanok musb_ep->end_point.name, csr);
960eb81955bSIlya Yanok return;
961eb81955bSIlya Yanok }
962eb81955bSIlya Yanok
963eb81955bSIlya Yanok if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
964eb81955bSIlya Yanok csr &= ~(MUSB_RXCSR_AUTOCLEAR
965eb81955bSIlya Yanok | MUSB_RXCSR_DMAENAB
966eb81955bSIlya Yanok | MUSB_RXCSR_DMAMODE);
967eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR,
968eb81955bSIlya Yanok MUSB_RXCSR_P_WZC_BITS | csr);
969eb81955bSIlya Yanok
970eb81955bSIlya Yanok request->actual += musb_ep->dma->actual_len;
971eb81955bSIlya Yanok
972eb81955bSIlya Yanok dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
973eb81955bSIlya Yanok epnum, csr,
974eb81955bSIlya Yanok musb_readw(epio, MUSB_RXCSR),
975eb81955bSIlya Yanok musb_ep->dma->actual_len, request);
976eb81955bSIlya Yanok
977eb81955bSIlya Yanok #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
978eb81955bSIlya Yanok defined(CONFIG_USB_UX500_DMA)
979eb81955bSIlya Yanok /* Autoclear doesn't clear RxPktRdy for short packets */
980eb81955bSIlya Yanok if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
981eb81955bSIlya Yanok || (dma->actual_len
982eb81955bSIlya Yanok & (musb_ep->packet_sz - 1))) {
983eb81955bSIlya Yanok /* ack the read! */
984eb81955bSIlya Yanok csr &= ~MUSB_RXCSR_RXPKTRDY;
985eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR, csr);
986eb81955bSIlya Yanok }
987eb81955bSIlya Yanok
988eb81955bSIlya Yanok /* incomplete, and not short? wait for next IN packet */
989eb81955bSIlya Yanok if ((request->actual < request->length)
990eb81955bSIlya Yanok && (musb_ep->dma->actual_len
991eb81955bSIlya Yanok == musb_ep->packet_sz)) {
992eb81955bSIlya Yanok /* In double buffer case, continue to unload fifo if
993eb81955bSIlya Yanok * there is Rx packet in FIFO.
994eb81955bSIlya Yanok **/
995eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_RXCSR);
996eb81955bSIlya Yanok if ((csr & MUSB_RXCSR_RXPKTRDY) &&
997eb81955bSIlya Yanok hw_ep->rx_double_buffered)
998eb81955bSIlya Yanok goto exit;
999eb81955bSIlya Yanok return;
1000eb81955bSIlya Yanok }
1001eb81955bSIlya Yanok #endif
1002eb81955bSIlya Yanok musb_g_giveback(musb_ep, request, 0);
1003eb81955bSIlya Yanok /*
1004eb81955bSIlya Yanok * In the giveback function the MUSB lock is
1005eb81955bSIlya Yanok * released and acquired after sometime. During
1006eb81955bSIlya Yanok * this time period the INDEX register could get
1007eb81955bSIlya Yanok * changed by the gadget_queue function especially
1008eb81955bSIlya Yanok * on SMP systems. Reselect the INDEX to be sure
1009eb81955bSIlya Yanok * we are reading/modifying the right registers
1010eb81955bSIlya Yanok */
1011eb81955bSIlya Yanok musb_ep_select(mbase, epnum);
1012eb81955bSIlya Yanok
1013eb81955bSIlya Yanok req = next_request(musb_ep);
1014eb81955bSIlya Yanok if (!req)
1015eb81955bSIlya Yanok return;
1016eb81955bSIlya Yanok }
1017eb81955bSIlya Yanok #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
1018eb81955bSIlya Yanok defined(CONFIG_USB_UX500_DMA)
1019eb81955bSIlya Yanok exit:
1020eb81955bSIlya Yanok #endif
1021eb81955bSIlya Yanok /* Analyze request */
1022eb81955bSIlya Yanok rxstate(musb, req);
1023eb81955bSIlya Yanok }
1024eb81955bSIlya Yanok
1025eb81955bSIlya Yanok /* ------------------------------------------------------------ */
1026eb81955bSIlya Yanok
musb_gadget_enable(struct usb_ep * ep,const struct usb_endpoint_descriptor * desc)1027eb81955bSIlya Yanok static int musb_gadget_enable(struct usb_ep *ep,
1028eb81955bSIlya Yanok const struct usb_endpoint_descriptor *desc)
1029eb81955bSIlya Yanok {
1030eb81955bSIlya Yanok unsigned long flags;
1031eb81955bSIlya Yanok struct musb_ep *musb_ep;
1032eb81955bSIlya Yanok struct musb_hw_ep *hw_ep;
1033eb81955bSIlya Yanok void __iomem *regs;
1034eb81955bSIlya Yanok struct musb *musb;
1035eb81955bSIlya Yanok void __iomem *mbase;
1036eb81955bSIlya Yanok u8 epnum;
1037eb81955bSIlya Yanok u16 csr;
1038eb81955bSIlya Yanok unsigned tmp;
1039eb81955bSIlya Yanok int status = -EINVAL;
1040eb81955bSIlya Yanok
1041eb81955bSIlya Yanok if (!ep || !desc)
1042eb81955bSIlya Yanok return -EINVAL;
1043eb81955bSIlya Yanok
1044eb81955bSIlya Yanok musb_ep = to_musb_ep(ep);
1045eb81955bSIlya Yanok hw_ep = musb_ep->hw_ep;
1046eb81955bSIlya Yanok regs = hw_ep->regs;
1047eb81955bSIlya Yanok musb = musb_ep->musb;
1048eb81955bSIlya Yanok mbase = musb->mregs;
1049eb81955bSIlya Yanok epnum = musb_ep->current_epnum;
1050eb81955bSIlya Yanok
1051eb81955bSIlya Yanok spin_lock_irqsave(&musb->lock, flags);
1052eb81955bSIlya Yanok
1053eb81955bSIlya Yanok if (musb_ep->desc) {
1054eb81955bSIlya Yanok status = -EBUSY;
1055eb81955bSIlya Yanok goto fail;
1056eb81955bSIlya Yanok }
1057eb81955bSIlya Yanok musb_ep->type = usb_endpoint_type(desc);
1058eb81955bSIlya Yanok
1059eb81955bSIlya Yanok /* check direction and (later) maxpacket size against endpoint */
1060eb81955bSIlya Yanok if (usb_endpoint_num(desc) != epnum)
1061eb81955bSIlya Yanok goto fail;
1062eb81955bSIlya Yanok
1063eb81955bSIlya Yanok /* REVISIT this rules out high bandwidth periodic transfers */
1064eb81955bSIlya Yanok tmp = usb_endpoint_maxp(desc);
1065eb81955bSIlya Yanok if (tmp & ~0x07ff) {
1066eb81955bSIlya Yanok int ok;
1067eb81955bSIlya Yanok
1068eb81955bSIlya Yanok if (usb_endpoint_dir_in(desc))
1069eb81955bSIlya Yanok ok = musb->hb_iso_tx;
1070eb81955bSIlya Yanok else
1071eb81955bSIlya Yanok ok = musb->hb_iso_rx;
1072eb81955bSIlya Yanok
1073eb81955bSIlya Yanok if (!ok) {
1074eb81955bSIlya Yanok dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
1075eb81955bSIlya Yanok goto fail;
1076eb81955bSIlya Yanok }
1077eb81955bSIlya Yanok musb_ep->hb_mult = (tmp >> 11) & 3;
1078eb81955bSIlya Yanok } else {
1079eb81955bSIlya Yanok musb_ep->hb_mult = 0;
1080eb81955bSIlya Yanok }
1081eb81955bSIlya Yanok
1082eb81955bSIlya Yanok musb_ep->packet_sz = tmp & 0x7ff;
1083eb81955bSIlya Yanok tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
1084eb81955bSIlya Yanok
1085eb81955bSIlya Yanok /* enable the interrupts for the endpoint, set the endpoint
1086eb81955bSIlya Yanok * packet size (or fail), set the mode, clear the fifo
1087eb81955bSIlya Yanok */
1088eb81955bSIlya Yanok musb_ep_select(mbase, epnum);
1089eb81955bSIlya Yanok if (usb_endpoint_dir_in(desc)) {
1090eb81955bSIlya Yanok u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
1091eb81955bSIlya Yanok
1092eb81955bSIlya Yanok if (hw_ep->is_shared_fifo)
1093eb81955bSIlya Yanok musb_ep->is_in = 1;
1094eb81955bSIlya Yanok if (!musb_ep->is_in)
1095eb81955bSIlya Yanok goto fail;
1096eb81955bSIlya Yanok
1097eb81955bSIlya Yanok if (tmp > hw_ep->max_packet_sz_tx) {
1098eb81955bSIlya Yanok dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1099eb81955bSIlya Yanok goto fail;
1100eb81955bSIlya Yanok }
1101eb81955bSIlya Yanok
1102eb81955bSIlya Yanok int_txe |= (1 << epnum);
1103eb81955bSIlya Yanok musb_writew(mbase, MUSB_INTRTXE, int_txe);
1104eb81955bSIlya Yanok
1105eb81955bSIlya Yanok /* REVISIT if can_bulk_split(), use by updating "tmp";
1106eb81955bSIlya Yanok * likewise high bandwidth periodic tx
1107eb81955bSIlya Yanok */
1108eb81955bSIlya Yanok /* Set TXMAXP with the FIFO size of the endpoint
1109eb81955bSIlya Yanok * to disable double buffering mode.
1110eb81955bSIlya Yanok */
1111eb81955bSIlya Yanok if (musb->double_buffer_not_ok)
1112eb81955bSIlya Yanok musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
1113eb81955bSIlya Yanok else
1114eb81955bSIlya Yanok musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
1115eb81955bSIlya Yanok | (musb_ep->hb_mult << 11));
1116eb81955bSIlya Yanok
1117eb81955bSIlya Yanok csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
1118eb81955bSIlya Yanok if (musb_readw(regs, MUSB_TXCSR)
1119eb81955bSIlya Yanok & MUSB_TXCSR_FIFONOTEMPTY)
1120eb81955bSIlya Yanok csr |= MUSB_TXCSR_FLUSHFIFO;
1121eb81955bSIlya Yanok if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1122eb81955bSIlya Yanok csr |= MUSB_TXCSR_P_ISO;
1123eb81955bSIlya Yanok
1124eb81955bSIlya Yanok /* set twice in case of double buffering */
1125eb81955bSIlya Yanok musb_writew(regs, MUSB_TXCSR, csr);
1126eb81955bSIlya Yanok /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1127eb81955bSIlya Yanok musb_writew(regs, MUSB_TXCSR, csr);
1128eb81955bSIlya Yanok
1129eb81955bSIlya Yanok } else {
1130eb81955bSIlya Yanok u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
1131eb81955bSIlya Yanok
1132eb81955bSIlya Yanok if (hw_ep->is_shared_fifo)
1133eb81955bSIlya Yanok musb_ep->is_in = 0;
1134eb81955bSIlya Yanok if (musb_ep->is_in)
1135eb81955bSIlya Yanok goto fail;
1136eb81955bSIlya Yanok
1137eb81955bSIlya Yanok if (tmp > hw_ep->max_packet_sz_rx) {
1138eb81955bSIlya Yanok dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1139eb81955bSIlya Yanok goto fail;
1140eb81955bSIlya Yanok }
1141eb81955bSIlya Yanok
1142eb81955bSIlya Yanok int_rxe |= (1 << epnum);
1143eb81955bSIlya Yanok musb_writew(mbase, MUSB_INTRRXE, int_rxe);
1144eb81955bSIlya Yanok
1145eb81955bSIlya Yanok /* REVISIT if can_bulk_combine() use by updating "tmp"
1146eb81955bSIlya Yanok * likewise high bandwidth periodic rx
1147eb81955bSIlya Yanok */
1148eb81955bSIlya Yanok /* Set RXMAXP with the FIFO size of the endpoint
1149eb81955bSIlya Yanok * to disable double buffering mode.
1150eb81955bSIlya Yanok */
1151eb81955bSIlya Yanok if (musb->double_buffer_not_ok)
1152eb81955bSIlya Yanok musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
1153eb81955bSIlya Yanok else
1154eb81955bSIlya Yanok musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
1155eb81955bSIlya Yanok | (musb_ep->hb_mult << 11));
1156eb81955bSIlya Yanok
1157eb81955bSIlya Yanok /* force shared fifo to OUT-only mode */
1158eb81955bSIlya Yanok if (hw_ep->is_shared_fifo) {
1159eb81955bSIlya Yanok csr = musb_readw(regs, MUSB_TXCSR);
1160eb81955bSIlya Yanok csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
1161eb81955bSIlya Yanok musb_writew(regs, MUSB_TXCSR, csr);
1162eb81955bSIlya Yanok }
1163eb81955bSIlya Yanok
1164eb81955bSIlya Yanok csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
1165eb81955bSIlya Yanok if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1166eb81955bSIlya Yanok csr |= MUSB_RXCSR_P_ISO;
1167eb81955bSIlya Yanok else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
1168eb81955bSIlya Yanok csr |= MUSB_RXCSR_DISNYET;
1169eb81955bSIlya Yanok
1170eb81955bSIlya Yanok /* set twice in case of double buffering */
1171eb81955bSIlya Yanok musb_writew(regs, MUSB_RXCSR, csr);
1172eb81955bSIlya Yanok musb_writew(regs, MUSB_RXCSR, csr);
1173eb81955bSIlya Yanok }
1174eb81955bSIlya Yanok
1175eb81955bSIlya Yanok /* NOTE: all the I/O code _should_ work fine without DMA, in case
1176eb81955bSIlya Yanok * for some reason you run out of channels here.
1177eb81955bSIlya Yanok */
1178eb81955bSIlya Yanok if (is_dma_capable() && musb->dma_controller) {
1179eb81955bSIlya Yanok struct dma_controller *c = musb->dma_controller;
1180eb81955bSIlya Yanok
1181eb81955bSIlya Yanok musb_ep->dma = c->channel_alloc(c, hw_ep,
1182eb81955bSIlya Yanok (desc->bEndpointAddress & USB_DIR_IN));
1183eb81955bSIlya Yanok } else
1184eb81955bSIlya Yanok musb_ep->dma = NULL;
1185eb81955bSIlya Yanok
1186eb81955bSIlya Yanok musb_ep->desc = desc;
1187eb81955bSIlya Yanok musb_ep->busy = 0;
1188eb81955bSIlya Yanok musb_ep->wedged = 0;
1189eb81955bSIlya Yanok status = 0;
1190eb81955bSIlya Yanok
1191eb81955bSIlya Yanok pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
1192eb81955bSIlya Yanok musb_driver_name, musb_ep->end_point.name,
1193eb81955bSIlya Yanok ({ char *s; switch (musb_ep->type) {
1194eb81955bSIlya Yanok case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
1195eb81955bSIlya Yanok case USB_ENDPOINT_XFER_INT: s = "int"; break;
1196eb81955bSIlya Yanok default: s = "iso"; break;
1197eb81955bSIlya Yanok }; s; }),
1198eb81955bSIlya Yanok musb_ep->is_in ? "IN" : "OUT",
1199eb81955bSIlya Yanok musb_ep->dma ? "dma, " : "",
1200eb81955bSIlya Yanok musb_ep->packet_sz);
1201eb81955bSIlya Yanok
1202eb81955bSIlya Yanok schedule_work(&musb->irq_work);
1203eb81955bSIlya Yanok
1204eb81955bSIlya Yanok fail:
1205eb81955bSIlya Yanok spin_unlock_irqrestore(&musb->lock, flags);
1206eb81955bSIlya Yanok return status;
1207eb81955bSIlya Yanok }
1208eb81955bSIlya Yanok
1209eb81955bSIlya Yanok /*
1210eb81955bSIlya Yanok * Disable an endpoint flushing all requests queued.
1211eb81955bSIlya Yanok */
musb_gadget_disable(struct usb_ep * ep)1212eb81955bSIlya Yanok static int musb_gadget_disable(struct usb_ep *ep)
1213eb81955bSIlya Yanok {
1214eb81955bSIlya Yanok unsigned long flags;
1215eb81955bSIlya Yanok struct musb *musb;
1216eb81955bSIlya Yanok u8 epnum;
1217eb81955bSIlya Yanok struct musb_ep *musb_ep;
1218eb81955bSIlya Yanok void __iomem *epio;
1219eb81955bSIlya Yanok int status = 0;
1220eb81955bSIlya Yanok
1221eb81955bSIlya Yanok musb_ep = to_musb_ep(ep);
1222eb81955bSIlya Yanok musb = musb_ep->musb;
1223eb81955bSIlya Yanok epnum = musb_ep->current_epnum;
1224eb81955bSIlya Yanok epio = musb->endpoints[epnum].regs;
1225eb81955bSIlya Yanok
1226eb81955bSIlya Yanok spin_lock_irqsave(&musb->lock, flags);
1227eb81955bSIlya Yanok musb_ep_select(musb->mregs, epnum);
1228eb81955bSIlya Yanok
1229eb81955bSIlya Yanok /* zero the endpoint sizes */
1230eb81955bSIlya Yanok if (musb_ep->is_in) {
1231eb81955bSIlya Yanok u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
1232eb81955bSIlya Yanok int_txe &= ~(1 << epnum);
1233eb81955bSIlya Yanok musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
1234eb81955bSIlya Yanok musb_writew(epio, MUSB_TXMAXP, 0);
1235eb81955bSIlya Yanok } else {
1236eb81955bSIlya Yanok u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
1237eb81955bSIlya Yanok int_rxe &= ~(1 << epnum);
1238eb81955bSIlya Yanok musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
1239eb81955bSIlya Yanok musb_writew(epio, MUSB_RXMAXP, 0);
1240eb81955bSIlya Yanok }
1241eb81955bSIlya Yanok
1242eb81955bSIlya Yanok musb_ep->desc = NULL;
1243eb81955bSIlya Yanok #ifndef __UBOOT__
1244eb81955bSIlya Yanok musb_ep->end_point.desc = NULL;
1245eb81955bSIlya Yanok #endif
1246eb81955bSIlya Yanok
1247eb81955bSIlya Yanok /* abort all pending DMA and requests */
1248eb81955bSIlya Yanok nuke(musb_ep, -ESHUTDOWN);
1249eb81955bSIlya Yanok
1250eb81955bSIlya Yanok schedule_work(&musb->irq_work);
1251eb81955bSIlya Yanok
1252eb81955bSIlya Yanok spin_unlock_irqrestore(&(musb->lock), flags);
1253eb81955bSIlya Yanok
1254eb81955bSIlya Yanok dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
1255eb81955bSIlya Yanok
1256eb81955bSIlya Yanok return status;
1257eb81955bSIlya Yanok }
1258eb81955bSIlya Yanok
1259eb81955bSIlya Yanok /*
1260eb81955bSIlya Yanok * Allocate a request for an endpoint.
1261eb81955bSIlya Yanok * Reused by ep0 code.
1262eb81955bSIlya Yanok */
musb_alloc_request(struct usb_ep * ep,gfp_t gfp_flags)1263eb81955bSIlya Yanok struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1264eb81955bSIlya Yanok {
1265eb81955bSIlya Yanok struct musb_ep *musb_ep = to_musb_ep(ep);
1266eb81955bSIlya Yanok struct musb *musb = musb_ep->musb;
1267eb81955bSIlya Yanok struct musb_request *request = NULL;
1268eb81955bSIlya Yanok
1269eb81955bSIlya Yanok request = kzalloc(sizeof *request, gfp_flags);
1270eb81955bSIlya Yanok if (!request) {
1271eb81955bSIlya Yanok dev_dbg(musb->controller, "not enough memory\n");
1272eb81955bSIlya Yanok return NULL;
1273eb81955bSIlya Yanok }
1274eb81955bSIlya Yanok
1275eb81955bSIlya Yanok request->request.dma = DMA_ADDR_INVALID;
1276eb81955bSIlya Yanok request->epnum = musb_ep->current_epnum;
1277eb81955bSIlya Yanok request->ep = musb_ep;
1278eb81955bSIlya Yanok
1279eb81955bSIlya Yanok return &request->request;
1280eb81955bSIlya Yanok }
1281eb81955bSIlya Yanok
1282eb81955bSIlya Yanok /*
1283eb81955bSIlya Yanok * Free a request
1284eb81955bSIlya Yanok * Reused by ep0 code.
1285eb81955bSIlya Yanok */
musb_free_request(struct usb_ep * ep,struct usb_request * req)1286eb81955bSIlya Yanok void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1287eb81955bSIlya Yanok {
1288eb81955bSIlya Yanok kfree(to_musb_request(req));
1289eb81955bSIlya Yanok }
1290eb81955bSIlya Yanok
1291eb81955bSIlya Yanok static LIST_HEAD(buffers);
1292eb81955bSIlya Yanok
1293eb81955bSIlya Yanok struct free_record {
1294eb81955bSIlya Yanok struct list_head list;
1295eb81955bSIlya Yanok struct device *dev;
1296eb81955bSIlya Yanok unsigned bytes;
1297eb81955bSIlya Yanok dma_addr_t dma;
1298eb81955bSIlya Yanok };
1299eb81955bSIlya Yanok
1300eb81955bSIlya Yanok /*
1301eb81955bSIlya Yanok * Context: controller locked, IRQs blocked.
1302eb81955bSIlya Yanok */
musb_ep_restart(struct musb * musb,struct musb_request * req)1303eb81955bSIlya Yanok void musb_ep_restart(struct musb *musb, struct musb_request *req)
1304eb81955bSIlya Yanok {
1305eb81955bSIlya Yanok dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
1306eb81955bSIlya Yanok req->tx ? "TX/IN" : "RX/OUT",
1307eb81955bSIlya Yanok &req->request, req->request.length, req->epnum);
1308eb81955bSIlya Yanok
1309eb81955bSIlya Yanok musb_ep_select(musb->mregs, req->epnum);
1310eb81955bSIlya Yanok if (req->tx)
1311eb81955bSIlya Yanok txstate(musb, req);
1312eb81955bSIlya Yanok else
1313eb81955bSIlya Yanok rxstate(musb, req);
1314eb81955bSIlya Yanok }
1315eb81955bSIlya Yanok
musb_gadget_queue(struct usb_ep * ep,struct usb_request * req,gfp_t gfp_flags)1316eb81955bSIlya Yanok static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1317eb81955bSIlya Yanok gfp_t gfp_flags)
1318eb81955bSIlya Yanok {
1319eb81955bSIlya Yanok struct musb_ep *musb_ep;
1320eb81955bSIlya Yanok struct musb_request *request;
1321eb81955bSIlya Yanok struct musb *musb;
1322eb81955bSIlya Yanok int status = 0;
1323eb81955bSIlya Yanok unsigned long lockflags;
1324eb81955bSIlya Yanok
1325eb81955bSIlya Yanok if (!ep || !req)
1326eb81955bSIlya Yanok return -EINVAL;
1327eb81955bSIlya Yanok if (!req->buf)
1328eb81955bSIlya Yanok return -ENODATA;
1329eb81955bSIlya Yanok
1330eb81955bSIlya Yanok musb_ep = to_musb_ep(ep);
1331eb81955bSIlya Yanok musb = musb_ep->musb;
1332eb81955bSIlya Yanok
1333eb81955bSIlya Yanok request = to_musb_request(req);
1334eb81955bSIlya Yanok request->musb = musb;
1335eb81955bSIlya Yanok
1336eb81955bSIlya Yanok if (request->ep != musb_ep)
1337eb81955bSIlya Yanok return -EINVAL;
1338eb81955bSIlya Yanok
1339eb81955bSIlya Yanok dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
1340eb81955bSIlya Yanok
1341eb81955bSIlya Yanok /* request is mine now... */
1342eb81955bSIlya Yanok request->request.actual = 0;
1343eb81955bSIlya Yanok request->request.status = -EINPROGRESS;
1344eb81955bSIlya Yanok request->epnum = musb_ep->current_epnum;
1345eb81955bSIlya Yanok request->tx = musb_ep->is_in;
1346eb81955bSIlya Yanok
1347eb81955bSIlya Yanok map_dma_buffer(request, musb, musb_ep);
1348eb81955bSIlya Yanok
1349eb81955bSIlya Yanok spin_lock_irqsave(&musb->lock, lockflags);
1350eb81955bSIlya Yanok
1351eb81955bSIlya Yanok /* don't queue if the ep is down */
1352eb81955bSIlya Yanok if (!musb_ep->desc) {
1353eb81955bSIlya Yanok dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
1354eb81955bSIlya Yanok req, ep->name, "disabled");
1355eb81955bSIlya Yanok status = -ESHUTDOWN;
1356eb81955bSIlya Yanok goto cleanup;
1357eb81955bSIlya Yanok }
1358eb81955bSIlya Yanok
1359eb81955bSIlya Yanok /* add request to the list */
1360eb81955bSIlya Yanok list_add_tail(&request->list, &musb_ep->req_list);
1361eb81955bSIlya Yanok
1362eb81955bSIlya Yanok /* it this is the head of the queue, start i/o ... */
1363eb81955bSIlya Yanok if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
1364eb81955bSIlya Yanok musb_ep_restart(musb, request);
1365eb81955bSIlya Yanok
1366eb81955bSIlya Yanok cleanup:
1367eb81955bSIlya Yanok spin_unlock_irqrestore(&musb->lock, lockflags);
1368eb81955bSIlya Yanok return status;
1369eb81955bSIlya Yanok }
1370eb81955bSIlya Yanok
musb_gadget_dequeue(struct usb_ep * ep,struct usb_request * request)1371eb81955bSIlya Yanok static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1372eb81955bSIlya Yanok {
1373eb81955bSIlya Yanok struct musb_ep *musb_ep = to_musb_ep(ep);
1374eb81955bSIlya Yanok struct musb_request *req = to_musb_request(request);
1375eb81955bSIlya Yanok struct musb_request *r;
1376eb81955bSIlya Yanok unsigned long flags;
1377eb81955bSIlya Yanok int status = 0;
1378eb81955bSIlya Yanok struct musb *musb = musb_ep->musb;
1379eb81955bSIlya Yanok
1380eb81955bSIlya Yanok if (!ep || !request || to_musb_request(request)->ep != musb_ep)
1381eb81955bSIlya Yanok return -EINVAL;
1382eb81955bSIlya Yanok
1383eb81955bSIlya Yanok spin_lock_irqsave(&musb->lock, flags);
1384eb81955bSIlya Yanok
1385eb81955bSIlya Yanok list_for_each_entry(r, &musb_ep->req_list, list) {
1386eb81955bSIlya Yanok if (r == req)
1387eb81955bSIlya Yanok break;
1388eb81955bSIlya Yanok }
1389eb81955bSIlya Yanok if (r != req) {
1390eb81955bSIlya Yanok dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
1391eb81955bSIlya Yanok status = -EINVAL;
1392eb81955bSIlya Yanok goto done;
1393eb81955bSIlya Yanok }
1394eb81955bSIlya Yanok
1395eb81955bSIlya Yanok /* if the hardware doesn't have the request, easy ... */
1396eb81955bSIlya Yanok if (musb_ep->req_list.next != &req->list || musb_ep->busy)
1397eb81955bSIlya Yanok musb_g_giveback(musb_ep, request, -ECONNRESET);
1398eb81955bSIlya Yanok
1399eb81955bSIlya Yanok /* ... else abort the dma transfer ... */
1400eb81955bSIlya Yanok else if (is_dma_capable() && musb_ep->dma) {
1401eb81955bSIlya Yanok struct dma_controller *c = musb->dma_controller;
1402eb81955bSIlya Yanok
1403eb81955bSIlya Yanok musb_ep_select(musb->mregs, musb_ep->current_epnum);
1404eb81955bSIlya Yanok if (c->channel_abort)
1405eb81955bSIlya Yanok status = c->channel_abort(musb_ep->dma);
1406eb81955bSIlya Yanok else
1407eb81955bSIlya Yanok status = -EBUSY;
1408eb81955bSIlya Yanok if (status == 0)
1409eb81955bSIlya Yanok musb_g_giveback(musb_ep, request, -ECONNRESET);
1410eb81955bSIlya Yanok } else {
1411eb81955bSIlya Yanok /* NOTE: by sticking to easily tested hardware/driver states,
1412eb81955bSIlya Yanok * we leave counting of in-flight packets imprecise.
1413eb81955bSIlya Yanok */
1414eb81955bSIlya Yanok musb_g_giveback(musb_ep, request, -ECONNRESET);
1415eb81955bSIlya Yanok }
1416eb81955bSIlya Yanok
1417eb81955bSIlya Yanok done:
1418eb81955bSIlya Yanok spin_unlock_irqrestore(&musb->lock, flags);
1419eb81955bSIlya Yanok return status;
1420eb81955bSIlya Yanok }
1421eb81955bSIlya Yanok
1422eb81955bSIlya Yanok /*
1423eb81955bSIlya Yanok * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1424eb81955bSIlya Yanok * data but will queue requests.
1425eb81955bSIlya Yanok *
1426eb81955bSIlya Yanok * exported to ep0 code
1427eb81955bSIlya Yanok */
musb_gadget_set_halt(struct usb_ep * ep,int value)1428eb81955bSIlya Yanok static int musb_gadget_set_halt(struct usb_ep *ep, int value)
1429eb81955bSIlya Yanok {
1430eb81955bSIlya Yanok struct musb_ep *musb_ep = to_musb_ep(ep);
1431eb81955bSIlya Yanok u8 epnum = musb_ep->current_epnum;
1432eb81955bSIlya Yanok struct musb *musb = musb_ep->musb;
1433eb81955bSIlya Yanok void __iomem *epio = musb->endpoints[epnum].regs;
1434eb81955bSIlya Yanok void __iomem *mbase;
1435eb81955bSIlya Yanok unsigned long flags;
1436eb81955bSIlya Yanok u16 csr;
1437eb81955bSIlya Yanok struct musb_request *request;
1438eb81955bSIlya Yanok int status = 0;
1439eb81955bSIlya Yanok
1440eb81955bSIlya Yanok if (!ep)
1441eb81955bSIlya Yanok return -EINVAL;
1442eb81955bSIlya Yanok mbase = musb->mregs;
1443eb81955bSIlya Yanok
1444eb81955bSIlya Yanok spin_lock_irqsave(&musb->lock, flags);
1445eb81955bSIlya Yanok
1446eb81955bSIlya Yanok if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1447eb81955bSIlya Yanok status = -EINVAL;
1448eb81955bSIlya Yanok goto done;
1449eb81955bSIlya Yanok }
1450eb81955bSIlya Yanok
1451eb81955bSIlya Yanok musb_ep_select(mbase, epnum);
1452eb81955bSIlya Yanok
1453eb81955bSIlya Yanok request = next_request(musb_ep);
1454eb81955bSIlya Yanok if (value) {
1455eb81955bSIlya Yanok if (request) {
1456eb81955bSIlya Yanok dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
1457eb81955bSIlya Yanok ep->name);
1458eb81955bSIlya Yanok status = -EAGAIN;
1459eb81955bSIlya Yanok goto done;
1460eb81955bSIlya Yanok }
1461eb81955bSIlya Yanok /* Cannot portably stall with non-empty FIFO */
1462eb81955bSIlya Yanok if (musb_ep->is_in) {
1463eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_TXCSR);
1464eb81955bSIlya Yanok if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1465eb81955bSIlya Yanok dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
1466eb81955bSIlya Yanok status = -EAGAIN;
1467eb81955bSIlya Yanok goto done;
1468eb81955bSIlya Yanok }
1469eb81955bSIlya Yanok }
1470eb81955bSIlya Yanok } else
1471eb81955bSIlya Yanok musb_ep->wedged = 0;
1472eb81955bSIlya Yanok
1473eb81955bSIlya Yanok /* set/clear the stall and toggle bits */
1474eb81955bSIlya Yanok dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1475eb81955bSIlya Yanok if (musb_ep->is_in) {
1476eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_TXCSR);
1477eb81955bSIlya Yanok csr |= MUSB_TXCSR_P_WZC_BITS
1478eb81955bSIlya Yanok | MUSB_TXCSR_CLRDATATOG;
1479eb81955bSIlya Yanok if (value)
1480eb81955bSIlya Yanok csr |= MUSB_TXCSR_P_SENDSTALL;
1481eb81955bSIlya Yanok else
1482eb81955bSIlya Yanok csr &= ~(MUSB_TXCSR_P_SENDSTALL
1483eb81955bSIlya Yanok | MUSB_TXCSR_P_SENTSTALL);
1484eb81955bSIlya Yanok csr &= ~MUSB_TXCSR_TXPKTRDY;
1485eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, csr);
1486eb81955bSIlya Yanok } else {
1487eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_RXCSR);
1488eb81955bSIlya Yanok csr |= MUSB_RXCSR_P_WZC_BITS
1489eb81955bSIlya Yanok | MUSB_RXCSR_FLUSHFIFO
1490eb81955bSIlya Yanok | MUSB_RXCSR_CLRDATATOG;
1491eb81955bSIlya Yanok if (value)
1492eb81955bSIlya Yanok csr |= MUSB_RXCSR_P_SENDSTALL;
1493eb81955bSIlya Yanok else
1494eb81955bSIlya Yanok csr &= ~(MUSB_RXCSR_P_SENDSTALL
1495eb81955bSIlya Yanok | MUSB_RXCSR_P_SENTSTALL);
1496eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR, csr);
1497eb81955bSIlya Yanok }
1498eb81955bSIlya Yanok
1499eb81955bSIlya Yanok /* maybe start the first request in the queue */
1500eb81955bSIlya Yanok if (!musb_ep->busy && !value && request) {
1501eb81955bSIlya Yanok dev_dbg(musb->controller, "restarting the request\n");
1502eb81955bSIlya Yanok musb_ep_restart(musb, request);
1503eb81955bSIlya Yanok }
1504eb81955bSIlya Yanok
1505eb81955bSIlya Yanok done:
1506eb81955bSIlya Yanok spin_unlock_irqrestore(&musb->lock, flags);
1507eb81955bSIlya Yanok return status;
1508eb81955bSIlya Yanok }
1509eb81955bSIlya Yanok
1510eb81955bSIlya Yanok #ifndef __UBOOT__
1511eb81955bSIlya Yanok /*
1512eb81955bSIlya Yanok * Sets the halt feature with the clear requests ignored
1513eb81955bSIlya Yanok */
musb_gadget_set_wedge(struct usb_ep * ep)1514eb81955bSIlya Yanok static int musb_gadget_set_wedge(struct usb_ep *ep)
1515eb81955bSIlya Yanok {
1516eb81955bSIlya Yanok struct musb_ep *musb_ep = to_musb_ep(ep);
1517eb81955bSIlya Yanok
1518eb81955bSIlya Yanok if (!ep)
1519eb81955bSIlya Yanok return -EINVAL;
1520eb81955bSIlya Yanok
1521eb81955bSIlya Yanok musb_ep->wedged = 1;
1522eb81955bSIlya Yanok
1523eb81955bSIlya Yanok return usb_ep_set_halt(ep);
1524eb81955bSIlya Yanok }
1525eb81955bSIlya Yanok #endif
1526eb81955bSIlya Yanok
musb_gadget_fifo_status(struct usb_ep * ep)1527eb81955bSIlya Yanok static int musb_gadget_fifo_status(struct usb_ep *ep)
1528eb81955bSIlya Yanok {
1529eb81955bSIlya Yanok struct musb_ep *musb_ep = to_musb_ep(ep);
1530eb81955bSIlya Yanok void __iomem *epio = musb_ep->hw_ep->regs;
1531eb81955bSIlya Yanok int retval = -EINVAL;
1532eb81955bSIlya Yanok
1533eb81955bSIlya Yanok if (musb_ep->desc && !musb_ep->is_in) {
1534eb81955bSIlya Yanok struct musb *musb = musb_ep->musb;
1535eb81955bSIlya Yanok int epnum = musb_ep->current_epnum;
1536eb81955bSIlya Yanok void __iomem *mbase = musb->mregs;
1537eb81955bSIlya Yanok unsigned long flags;
1538eb81955bSIlya Yanok
1539eb81955bSIlya Yanok spin_lock_irqsave(&musb->lock, flags);
1540eb81955bSIlya Yanok
1541eb81955bSIlya Yanok musb_ep_select(mbase, epnum);
1542eb81955bSIlya Yanok /* FIXME return zero unless RXPKTRDY is set */
1543eb81955bSIlya Yanok retval = musb_readw(epio, MUSB_RXCOUNT);
1544eb81955bSIlya Yanok
1545eb81955bSIlya Yanok spin_unlock_irqrestore(&musb->lock, flags);
1546eb81955bSIlya Yanok }
1547eb81955bSIlya Yanok return retval;
1548eb81955bSIlya Yanok }
1549eb81955bSIlya Yanok
musb_gadget_fifo_flush(struct usb_ep * ep)1550eb81955bSIlya Yanok static void musb_gadget_fifo_flush(struct usb_ep *ep)
1551eb81955bSIlya Yanok {
1552eb81955bSIlya Yanok struct musb_ep *musb_ep = to_musb_ep(ep);
1553eb81955bSIlya Yanok struct musb *musb = musb_ep->musb;
1554eb81955bSIlya Yanok u8 epnum = musb_ep->current_epnum;
1555eb81955bSIlya Yanok void __iomem *epio = musb->endpoints[epnum].regs;
1556eb81955bSIlya Yanok void __iomem *mbase;
1557eb81955bSIlya Yanok unsigned long flags;
1558eb81955bSIlya Yanok u16 csr, int_txe;
1559eb81955bSIlya Yanok
1560eb81955bSIlya Yanok mbase = musb->mregs;
1561eb81955bSIlya Yanok
1562eb81955bSIlya Yanok spin_lock_irqsave(&musb->lock, flags);
1563eb81955bSIlya Yanok musb_ep_select(mbase, (u8) epnum);
1564eb81955bSIlya Yanok
1565eb81955bSIlya Yanok /* disable interrupts */
1566eb81955bSIlya Yanok int_txe = musb_readw(mbase, MUSB_INTRTXE);
1567eb81955bSIlya Yanok musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
1568eb81955bSIlya Yanok
1569eb81955bSIlya Yanok if (musb_ep->is_in) {
1570eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_TXCSR);
1571eb81955bSIlya Yanok if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1572eb81955bSIlya Yanok csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1573eb81955bSIlya Yanok /*
1574eb81955bSIlya Yanok * Setting both TXPKTRDY and FLUSHFIFO makes controller
1575eb81955bSIlya Yanok * to interrupt current FIFO loading, but not flushing
1576eb81955bSIlya Yanok * the already loaded ones.
1577eb81955bSIlya Yanok */
1578eb81955bSIlya Yanok csr &= ~MUSB_TXCSR_TXPKTRDY;
1579eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, csr);
1580eb81955bSIlya Yanok /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1581eb81955bSIlya Yanok musb_writew(epio, MUSB_TXCSR, csr);
1582eb81955bSIlya Yanok }
1583eb81955bSIlya Yanok } else {
1584eb81955bSIlya Yanok csr = musb_readw(epio, MUSB_RXCSR);
1585eb81955bSIlya Yanok csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1586eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR, csr);
1587eb81955bSIlya Yanok musb_writew(epio, MUSB_RXCSR, csr);
1588eb81955bSIlya Yanok }
1589eb81955bSIlya Yanok
1590eb81955bSIlya Yanok /* re-enable interrupt */
1591eb81955bSIlya Yanok musb_writew(mbase, MUSB_INTRTXE, int_txe);
1592eb81955bSIlya Yanok spin_unlock_irqrestore(&musb->lock, flags);
1593eb81955bSIlya Yanok }
1594eb81955bSIlya Yanok
1595eb81955bSIlya Yanok static const struct usb_ep_ops musb_ep_ops = {
1596eb81955bSIlya Yanok .enable = musb_gadget_enable,
1597eb81955bSIlya Yanok .disable = musb_gadget_disable,
1598eb81955bSIlya Yanok .alloc_request = musb_alloc_request,
1599eb81955bSIlya Yanok .free_request = musb_free_request,
1600eb81955bSIlya Yanok .queue = musb_gadget_queue,
1601eb81955bSIlya Yanok .dequeue = musb_gadget_dequeue,
1602eb81955bSIlya Yanok .set_halt = musb_gadget_set_halt,
1603eb81955bSIlya Yanok #ifndef __UBOOT__
1604eb81955bSIlya Yanok .set_wedge = musb_gadget_set_wedge,
1605eb81955bSIlya Yanok #endif
1606eb81955bSIlya Yanok .fifo_status = musb_gadget_fifo_status,
1607eb81955bSIlya Yanok .fifo_flush = musb_gadget_fifo_flush
1608eb81955bSIlya Yanok };
1609eb81955bSIlya Yanok
1610eb81955bSIlya Yanok /* ----------------------------------------------------------------------- */
1611eb81955bSIlya Yanok
musb_gadget_get_frame(struct usb_gadget * gadget)1612eb81955bSIlya Yanok static int musb_gadget_get_frame(struct usb_gadget *gadget)
1613eb81955bSIlya Yanok {
1614eb81955bSIlya Yanok struct musb *musb = gadget_to_musb(gadget);
1615eb81955bSIlya Yanok
1616eb81955bSIlya Yanok return (int)musb_readw(musb->mregs, MUSB_FRAME);
1617eb81955bSIlya Yanok }
1618eb81955bSIlya Yanok
musb_gadget_wakeup(struct usb_gadget * gadget)1619eb81955bSIlya Yanok static int musb_gadget_wakeup(struct usb_gadget *gadget)
1620eb81955bSIlya Yanok {
1621eb81955bSIlya Yanok #ifndef __UBOOT__
1622eb81955bSIlya Yanok struct musb *musb = gadget_to_musb(gadget);
1623eb81955bSIlya Yanok void __iomem *mregs = musb->mregs;
1624eb81955bSIlya Yanok unsigned long flags;
1625eb81955bSIlya Yanok int status = -EINVAL;
1626eb81955bSIlya Yanok u8 power, devctl;
1627eb81955bSIlya Yanok int retries;
1628eb81955bSIlya Yanok
1629eb81955bSIlya Yanok spin_lock_irqsave(&musb->lock, flags);
1630eb81955bSIlya Yanok
1631eb81955bSIlya Yanok switch (musb->xceiv->state) {
1632eb81955bSIlya Yanok case OTG_STATE_B_PERIPHERAL:
1633eb81955bSIlya Yanok /* NOTE: OTG state machine doesn't include B_SUSPENDED;
1634eb81955bSIlya Yanok * that's part of the standard usb 1.1 state machine, and
1635eb81955bSIlya Yanok * doesn't affect OTG transitions.
1636eb81955bSIlya Yanok */
1637eb81955bSIlya Yanok if (musb->may_wakeup && musb->is_suspended)
1638eb81955bSIlya Yanok break;
1639eb81955bSIlya Yanok goto done;
1640eb81955bSIlya Yanok case OTG_STATE_B_IDLE:
1641eb81955bSIlya Yanok /* Start SRP ... OTG not required. */
1642eb81955bSIlya Yanok devctl = musb_readb(mregs, MUSB_DEVCTL);
1643eb81955bSIlya Yanok dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
1644eb81955bSIlya Yanok devctl |= MUSB_DEVCTL_SESSION;
1645eb81955bSIlya Yanok musb_writeb(mregs, MUSB_DEVCTL, devctl);
1646eb81955bSIlya Yanok devctl = musb_readb(mregs, MUSB_DEVCTL);
1647eb81955bSIlya Yanok retries = 100;
1648eb81955bSIlya Yanok while (!(devctl & MUSB_DEVCTL_SESSION)) {
1649eb81955bSIlya Yanok devctl = musb_readb(mregs, MUSB_DEVCTL);
1650eb81955bSIlya Yanok if (retries-- < 1)
1651eb81955bSIlya Yanok break;
1652eb81955bSIlya Yanok }
1653eb81955bSIlya Yanok retries = 10000;
1654eb81955bSIlya Yanok while (devctl & MUSB_DEVCTL_SESSION) {
1655eb81955bSIlya Yanok devctl = musb_readb(mregs, MUSB_DEVCTL);
1656eb81955bSIlya Yanok if (retries-- < 1)
1657eb81955bSIlya Yanok break;
1658eb81955bSIlya Yanok }
1659eb81955bSIlya Yanok
1660eb81955bSIlya Yanok spin_unlock_irqrestore(&musb->lock, flags);
1661eb81955bSIlya Yanok otg_start_srp(musb->xceiv->otg);
1662eb81955bSIlya Yanok spin_lock_irqsave(&musb->lock, flags);
1663eb81955bSIlya Yanok
1664eb81955bSIlya Yanok /* Block idling for at least 1s */
1665eb81955bSIlya Yanok musb_platform_try_idle(musb,
1666eb81955bSIlya Yanok jiffies + msecs_to_jiffies(1 * HZ));
1667eb81955bSIlya Yanok
1668eb81955bSIlya Yanok status = 0;
1669eb81955bSIlya Yanok goto done;
1670eb81955bSIlya Yanok default:
1671eb81955bSIlya Yanok dev_dbg(musb->controller, "Unhandled wake: %s\n",
1672eb81955bSIlya Yanok otg_state_string(musb->xceiv->state));
1673eb81955bSIlya Yanok goto done;
1674eb81955bSIlya Yanok }
1675eb81955bSIlya Yanok
1676eb81955bSIlya Yanok status = 0;
1677eb81955bSIlya Yanok
1678eb81955bSIlya Yanok power = musb_readb(mregs, MUSB_POWER);
1679eb81955bSIlya Yanok power |= MUSB_POWER_RESUME;
1680eb81955bSIlya Yanok musb_writeb(mregs, MUSB_POWER, power);
1681eb81955bSIlya Yanok dev_dbg(musb->controller, "issue wakeup\n");
1682eb81955bSIlya Yanok
1683eb81955bSIlya Yanok /* FIXME do this next chunk in a timer callback, no udelay */
1684eb81955bSIlya Yanok mdelay(2);
1685eb81955bSIlya Yanok
1686eb81955bSIlya Yanok power = musb_readb(mregs, MUSB_POWER);
1687eb81955bSIlya Yanok power &= ~MUSB_POWER_RESUME;
1688eb81955bSIlya Yanok musb_writeb(mregs, MUSB_POWER, power);
1689eb81955bSIlya Yanok done:
1690eb81955bSIlya Yanok spin_unlock_irqrestore(&musb->lock, flags);
1691eb81955bSIlya Yanok return status;
1692eb81955bSIlya Yanok #else
1693eb81955bSIlya Yanok return 0;
1694eb81955bSIlya Yanok #endif
1695eb81955bSIlya Yanok }
1696eb81955bSIlya Yanok
1697eb81955bSIlya Yanok static int
musb_gadget_set_self_powered(struct usb_gadget * gadget,int is_selfpowered)1698eb81955bSIlya Yanok musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1699eb81955bSIlya Yanok {
1700eb81955bSIlya Yanok struct musb *musb = gadget_to_musb(gadget);
1701eb81955bSIlya Yanok
1702eb81955bSIlya Yanok musb->is_self_powered = !!is_selfpowered;
1703eb81955bSIlya Yanok return 0;
1704eb81955bSIlya Yanok }
1705eb81955bSIlya Yanok
musb_pullup(struct musb * musb,int is_on)1706eb81955bSIlya Yanok static void musb_pullup(struct musb *musb, int is_on)
1707eb81955bSIlya Yanok {
1708eb81955bSIlya Yanok u8 power;
1709eb81955bSIlya Yanok
1710eb81955bSIlya Yanok power = musb_readb(musb->mregs, MUSB_POWER);
1711eb81955bSIlya Yanok if (is_on)
1712eb81955bSIlya Yanok power |= MUSB_POWER_SOFTCONN;
1713eb81955bSIlya Yanok else
1714eb81955bSIlya Yanok power &= ~MUSB_POWER_SOFTCONN;
1715eb81955bSIlya Yanok
1716eb81955bSIlya Yanok /* FIXME if on, HdrcStart; if off, HdrcStop */
1717eb81955bSIlya Yanok
1718eb81955bSIlya Yanok dev_dbg(musb->controller, "gadget D+ pullup %s\n",
1719eb81955bSIlya Yanok is_on ? "on" : "off");
1720eb81955bSIlya Yanok musb_writeb(musb->mregs, MUSB_POWER, power);
1721eb81955bSIlya Yanok }
1722eb81955bSIlya Yanok
1723eb81955bSIlya Yanok #if 0
1724eb81955bSIlya Yanok static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1725eb81955bSIlya Yanok {
1726eb81955bSIlya Yanok dev_dbg(musb->controller, "<= %s =>\n", __func__);
1727eb81955bSIlya Yanok
1728eb81955bSIlya Yanok /*
1729eb81955bSIlya Yanok * FIXME iff driver's softconnect flag is set (as it is during probe,
1730eb81955bSIlya Yanok * though that can clear it), just musb_pullup().
1731eb81955bSIlya Yanok */
1732eb81955bSIlya Yanok
1733eb81955bSIlya Yanok return -EINVAL;
1734eb81955bSIlya Yanok }
1735eb81955bSIlya Yanok #endif
1736eb81955bSIlya Yanok
musb_gadget_vbus_draw(struct usb_gadget * gadget,unsigned mA)1737eb81955bSIlya Yanok static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1738eb81955bSIlya Yanok {
1739eb81955bSIlya Yanok #ifndef __UBOOT__
1740eb81955bSIlya Yanok struct musb *musb = gadget_to_musb(gadget);
1741eb81955bSIlya Yanok
1742eb81955bSIlya Yanok if (!musb->xceiv->set_power)
1743eb81955bSIlya Yanok return -EOPNOTSUPP;
1744eb81955bSIlya Yanok return usb_phy_set_power(musb->xceiv, mA);
1745eb81955bSIlya Yanok #else
1746eb81955bSIlya Yanok return 0;
1747eb81955bSIlya Yanok #endif
1748eb81955bSIlya Yanok }
1749eb81955bSIlya Yanok
musb_gadget_pullup(struct usb_gadget * gadget,int is_on)1750eb81955bSIlya Yanok static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1751eb81955bSIlya Yanok {
1752eb81955bSIlya Yanok struct musb *musb = gadget_to_musb(gadget);
1753eb81955bSIlya Yanok unsigned long flags;
1754eb81955bSIlya Yanok
1755eb81955bSIlya Yanok is_on = !!is_on;
1756eb81955bSIlya Yanok
1757eb81955bSIlya Yanok pm_runtime_get_sync(musb->controller);
1758eb81955bSIlya Yanok
1759eb81955bSIlya Yanok /* NOTE: this assumes we are sensing vbus; we'd rather
1760eb81955bSIlya Yanok * not pullup unless the B-session is active.
1761eb81955bSIlya Yanok */
1762eb81955bSIlya Yanok spin_lock_irqsave(&musb->lock, flags);
1763eb81955bSIlya Yanok if (is_on != musb->softconnect) {
1764eb81955bSIlya Yanok musb->softconnect = is_on;
1765eb81955bSIlya Yanok musb_pullup(musb, is_on);
1766eb81955bSIlya Yanok }
1767eb81955bSIlya Yanok spin_unlock_irqrestore(&musb->lock, flags);
1768eb81955bSIlya Yanok
1769eb81955bSIlya Yanok pm_runtime_put(musb->controller);
1770eb81955bSIlya Yanok
1771eb81955bSIlya Yanok return 0;
1772eb81955bSIlya Yanok }
1773eb81955bSIlya Yanok
1774eb81955bSIlya Yanok #ifndef __UBOOT__
1775eb81955bSIlya Yanok static int musb_gadget_start(struct usb_gadget *g,
1776eb81955bSIlya Yanok struct usb_gadget_driver *driver);
1777eb81955bSIlya Yanok static int musb_gadget_stop(struct usb_gadget *g,
1778eb81955bSIlya Yanok struct usb_gadget_driver *driver);
1779eb81955bSIlya Yanok #endif
1780eb81955bSIlya Yanok
1781eb81955bSIlya Yanok static const struct usb_gadget_ops musb_gadget_operations = {
1782eb81955bSIlya Yanok .get_frame = musb_gadget_get_frame,
1783eb81955bSIlya Yanok .wakeup = musb_gadget_wakeup,
1784eb81955bSIlya Yanok .set_selfpowered = musb_gadget_set_self_powered,
1785eb81955bSIlya Yanok /* .vbus_session = musb_gadget_vbus_session, */
1786eb81955bSIlya Yanok .vbus_draw = musb_gadget_vbus_draw,
1787eb81955bSIlya Yanok .pullup = musb_gadget_pullup,
1788eb81955bSIlya Yanok #ifndef __UBOOT__
1789eb81955bSIlya Yanok .udc_start = musb_gadget_start,
1790eb81955bSIlya Yanok .udc_stop = musb_gadget_stop,
1791eb81955bSIlya Yanok #endif
1792eb81955bSIlya Yanok };
1793eb81955bSIlya Yanok
1794eb81955bSIlya Yanok /* ----------------------------------------------------------------------- */
1795eb81955bSIlya Yanok
1796eb81955bSIlya Yanok /* Registration */
1797eb81955bSIlya Yanok
1798eb81955bSIlya Yanok /* Only this registration code "knows" the rule (from USB standards)
1799eb81955bSIlya Yanok * about there being only one external upstream port. It assumes
1800eb81955bSIlya Yanok * all peripheral ports are external...
1801eb81955bSIlya Yanok */
1802eb81955bSIlya Yanok
1803eb81955bSIlya Yanok #ifndef __UBOOT__
musb_gadget_release(struct device * dev)1804eb81955bSIlya Yanok static void musb_gadget_release(struct device *dev)
1805eb81955bSIlya Yanok {
1806eb81955bSIlya Yanok /* kref_put(WHAT) */
1807eb81955bSIlya Yanok dev_dbg(dev, "%s\n", __func__);
1808eb81955bSIlya Yanok }
1809eb81955bSIlya Yanok #endif
1810eb81955bSIlya Yanok
1811eb81955bSIlya Yanok
1812eb81955bSIlya Yanok static void __devinit
init_peripheral_ep(struct musb * musb,struct musb_ep * ep,u8 epnum,int is_in)1813eb81955bSIlya Yanok init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1814eb81955bSIlya Yanok {
1815eb81955bSIlya Yanok struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1816eb81955bSIlya Yanok
1817eb81955bSIlya Yanok memset(ep, 0, sizeof *ep);
1818eb81955bSIlya Yanok
1819eb81955bSIlya Yanok ep->current_epnum = epnum;
1820eb81955bSIlya Yanok ep->musb = musb;
1821eb81955bSIlya Yanok ep->hw_ep = hw_ep;
1822eb81955bSIlya Yanok ep->is_in = is_in;
1823eb81955bSIlya Yanok
1824eb81955bSIlya Yanok INIT_LIST_HEAD(&ep->req_list);
1825eb81955bSIlya Yanok
1826eb81955bSIlya Yanok sprintf(ep->name, "ep%d%s", epnum,
1827eb81955bSIlya Yanok (!epnum || hw_ep->is_shared_fifo) ? "" : (
1828eb81955bSIlya Yanok is_in ? "in" : "out"));
1829eb81955bSIlya Yanok ep->end_point.name = ep->name;
1830eb81955bSIlya Yanok INIT_LIST_HEAD(&ep->end_point.ep_list);
1831eb81955bSIlya Yanok if (!epnum) {
1832eb81955bSIlya Yanok ep->end_point.maxpacket = 64;
1833eb81955bSIlya Yanok ep->end_point.ops = &musb_g_ep0_ops;
1834eb81955bSIlya Yanok musb->g.ep0 = &ep->end_point;
1835eb81955bSIlya Yanok } else {
1836eb81955bSIlya Yanok if (is_in)
1837eb81955bSIlya Yanok ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
1838eb81955bSIlya Yanok else
1839eb81955bSIlya Yanok ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
1840eb81955bSIlya Yanok ep->end_point.ops = &musb_ep_ops;
1841eb81955bSIlya Yanok list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1842eb81955bSIlya Yanok }
1843eb81955bSIlya Yanok }
1844eb81955bSIlya Yanok
1845eb81955bSIlya Yanok /*
1846eb81955bSIlya Yanok * Initialize the endpoints exposed to peripheral drivers, with backlinks
1847eb81955bSIlya Yanok * to the rest of the driver state.
1848eb81955bSIlya Yanok */
musb_g_init_endpoints(struct musb * musb)1849eb81955bSIlya Yanok static inline void __devinit musb_g_init_endpoints(struct musb *musb)
1850eb81955bSIlya Yanok {
1851eb81955bSIlya Yanok u8 epnum;
1852eb81955bSIlya Yanok struct musb_hw_ep *hw_ep;
1853eb81955bSIlya Yanok unsigned count = 0;
1854eb81955bSIlya Yanok
1855eb81955bSIlya Yanok /* initialize endpoint list just once */
1856eb81955bSIlya Yanok INIT_LIST_HEAD(&(musb->g.ep_list));
1857eb81955bSIlya Yanok
1858eb81955bSIlya Yanok for (epnum = 0, hw_ep = musb->endpoints;
1859eb81955bSIlya Yanok epnum < musb->nr_endpoints;
1860eb81955bSIlya Yanok epnum++, hw_ep++) {
1861eb81955bSIlya Yanok if (hw_ep->is_shared_fifo /* || !epnum */) {
1862eb81955bSIlya Yanok init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1863eb81955bSIlya Yanok count++;
1864eb81955bSIlya Yanok } else {
1865eb81955bSIlya Yanok if (hw_ep->max_packet_sz_tx) {
1866eb81955bSIlya Yanok init_peripheral_ep(musb, &hw_ep->ep_in,
1867eb81955bSIlya Yanok epnum, 1);
1868eb81955bSIlya Yanok count++;
1869eb81955bSIlya Yanok }
1870eb81955bSIlya Yanok if (hw_ep->max_packet_sz_rx) {
1871eb81955bSIlya Yanok init_peripheral_ep(musb, &hw_ep->ep_out,
1872eb81955bSIlya Yanok epnum, 0);
1873eb81955bSIlya Yanok count++;
1874eb81955bSIlya Yanok }
1875eb81955bSIlya Yanok }
1876eb81955bSIlya Yanok }
1877eb81955bSIlya Yanok }
1878eb81955bSIlya Yanok
1879eb81955bSIlya Yanok /* called once during driver setup to initialize and link into
1880eb81955bSIlya Yanok * the driver model; memory is zeroed.
1881eb81955bSIlya Yanok */
musb_gadget_setup(struct musb * musb)1882eb81955bSIlya Yanok int __devinit musb_gadget_setup(struct musb *musb)
1883eb81955bSIlya Yanok {
1884eb81955bSIlya Yanok int status;
1885eb81955bSIlya Yanok
1886eb81955bSIlya Yanok /* REVISIT minor race: if (erroneously) setting up two
1887eb81955bSIlya Yanok * musb peripherals at the same time, only the bus lock
1888eb81955bSIlya Yanok * is probably held.
1889eb81955bSIlya Yanok */
1890eb81955bSIlya Yanok
1891eb81955bSIlya Yanok musb->g.ops = &musb_gadget_operations;
1892eb81955bSIlya Yanok #ifndef __UBOOT__
1893eb81955bSIlya Yanok musb->g.max_speed = USB_SPEED_HIGH;
1894eb81955bSIlya Yanok #endif
1895eb81955bSIlya Yanok musb->g.speed = USB_SPEED_UNKNOWN;
1896eb81955bSIlya Yanok
1897eb81955bSIlya Yanok #ifndef __UBOOT__
1898eb81955bSIlya Yanok /* this "gadget" abstracts/virtualizes the controller */
1899eb81955bSIlya Yanok dev_set_name(&musb->g.dev, "gadget");
1900eb81955bSIlya Yanok musb->g.dev.parent = musb->controller;
1901eb81955bSIlya Yanok musb->g.dev.dma_mask = musb->controller->dma_mask;
1902eb81955bSIlya Yanok musb->g.dev.release = musb_gadget_release;
1903eb81955bSIlya Yanok #endif
1904eb81955bSIlya Yanok musb->g.name = musb_driver_name;
1905eb81955bSIlya Yanok
1906eb81955bSIlya Yanok #ifndef __UBOOT__
1907eb81955bSIlya Yanok if (is_otg_enabled(musb))
1908eb81955bSIlya Yanok musb->g.is_otg = 1;
1909eb81955bSIlya Yanok #endif
1910eb81955bSIlya Yanok
1911eb81955bSIlya Yanok musb_g_init_endpoints(musb);
1912eb81955bSIlya Yanok
1913eb81955bSIlya Yanok musb->is_active = 0;
1914eb81955bSIlya Yanok musb_platform_try_idle(musb, 0);
1915eb81955bSIlya Yanok
1916eb81955bSIlya Yanok #ifndef __UBOOT__
1917eb81955bSIlya Yanok status = device_register(&musb->g.dev);
1918eb81955bSIlya Yanok if (status != 0) {
1919eb81955bSIlya Yanok put_device(&musb->g.dev);
1920eb81955bSIlya Yanok return status;
1921eb81955bSIlya Yanok }
1922eb81955bSIlya Yanok status = usb_add_gadget_udc(musb->controller, &musb->g);
1923eb81955bSIlya Yanok if (status)
1924eb81955bSIlya Yanok goto err;
1925eb81955bSIlya Yanok #endif
1926eb81955bSIlya Yanok
1927eb81955bSIlya Yanok return 0;
1928eb81955bSIlya Yanok #ifndef __UBOOT__
1929eb81955bSIlya Yanok err:
1930eb81955bSIlya Yanok musb->g.dev.parent = NULL;
1931eb81955bSIlya Yanok device_unregister(&musb->g.dev);
1932eb81955bSIlya Yanok return status;
1933eb81955bSIlya Yanok #endif
1934eb81955bSIlya Yanok }
1935eb81955bSIlya Yanok
musb_gadget_cleanup(struct musb * musb)1936eb81955bSIlya Yanok void musb_gadget_cleanup(struct musb *musb)
1937eb81955bSIlya Yanok {
1938eb81955bSIlya Yanok #ifndef __UBOOT__
1939eb81955bSIlya Yanok usb_del_gadget_udc(&musb->g);
1940eb81955bSIlya Yanok if (musb->g.dev.parent)
1941eb81955bSIlya Yanok device_unregister(&musb->g.dev);
1942eb81955bSIlya Yanok #endif
1943eb81955bSIlya Yanok }
1944eb81955bSIlya Yanok
1945eb81955bSIlya Yanok /*
1946eb81955bSIlya Yanok * Register the gadget driver. Used by gadget drivers when
1947eb81955bSIlya Yanok * registering themselves with the controller.
1948eb81955bSIlya Yanok *
1949eb81955bSIlya Yanok * -EINVAL something went wrong (not driver)
1950eb81955bSIlya Yanok * -EBUSY another gadget is already using the controller
1951eb81955bSIlya Yanok * -ENOMEM no memory to perform the operation
1952eb81955bSIlya Yanok *
1953eb81955bSIlya Yanok * @param driver the gadget driver
1954eb81955bSIlya Yanok * @return <0 if error, 0 if everything is fine
1955eb81955bSIlya Yanok */
1956eb81955bSIlya Yanok #ifndef __UBOOT__
musb_gadget_start(struct usb_gadget * g,struct usb_gadget_driver * driver)1957eb81955bSIlya Yanok static int musb_gadget_start(struct usb_gadget *g,
1958eb81955bSIlya Yanok struct usb_gadget_driver *driver)
1959eb81955bSIlya Yanok #else
1960eb81955bSIlya Yanok int musb_gadget_start(struct usb_gadget *g,
1961eb81955bSIlya Yanok struct usb_gadget_driver *driver)
1962eb81955bSIlya Yanok #endif
1963eb81955bSIlya Yanok {
1964eb81955bSIlya Yanok struct musb *musb = gadget_to_musb(g);
1965eb81955bSIlya Yanok #ifndef __UBOOT__
1966eb81955bSIlya Yanok struct usb_otg *otg = musb->xceiv->otg;
1967eb81955bSIlya Yanok #endif
1968eb81955bSIlya Yanok unsigned long flags;
1969eb81955bSIlya Yanok int retval = -EINVAL;
1970eb81955bSIlya Yanok
1971eb81955bSIlya Yanok #ifndef __UBOOT__
1972eb81955bSIlya Yanok if (driver->max_speed < USB_SPEED_HIGH)
1973eb81955bSIlya Yanok goto err0;
1974eb81955bSIlya Yanok #endif
1975eb81955bSIlya Yanok
1976eb81955bSIlya Yanok pm_runtime_get_sync(musb->controller);
1977eb81955bSIlya Yanok
1978eb81955bSIlya Yanok #ifndef __UBOOT__
1979eb81955bSIlya Yanok dev_dbg(musb->controller, "registering driver %s\n", driver->function);
1980eb81955bSIlya Yanok #endif
1981eb81955bSIlya Yanok
1982eb81955bSIlya Yanok musb->softconnect = 0;
1983eb81955bSIlya Yanok musb->gadget_driver = driver;
1984eb81955bSIlya Yanok
1985eb81955bSIlya Yanok spin_lock_irqsave(&musb->lock, flags);
1986eb81955bSIlya Yanok musb->is_active = 1;
1987eb81955bSIlya Yanok
1988eb81955bSIlya Yanok #ifndef __UBOOT__
1989eb81955bSIlya Yanok otg_set_peripheral(otg, &musb->g);
1990eb81955bSIlya Yanok musb->xceiv->state = OTG_STATE_B_IDLE;
1991eb81955bSIlya Yanok
1992eb81955bSIlya Yanok /*
1993eb81955bSIlya Yanok * FIXME this ignores the softconnect flag. Drivers are
1994eb81955bSIlya Yanok * allowed hold the peripheral inactive until for example
1995eb81955bSIlya Yanok * userspace hooks up printer hardware or DSP codecs, so
1996eb81955bSIlya Yanok * hosts only see fully functional devices.
1997eb81955bSIlya Yanok */
1998eb81955bSIlya Yanok
1999eb81955bSIlya Yanok if (!is_otg_enabled(musb))
2000eb81955bSIlya Yanok #endif
2001eb81955bSIlya Yanok musb_start(musb);
2002eb81955bSIlya Yanok
2003eb81955bSIlya Yanok spin_unlock_irqrestore(&musb->lock, flags);
2004eb81955bSIlya Yanok
2005eb81955bSIlya Yanok #ifndef __UBOOT__
2006eb81955bSIlya Yanok if (is_otg_enabled(musb)) {
2007eb81955bSIlya Yanok struct usb_hcd *hcd = musb_to_hcd(musb);
2008eb81955bSIlya Yanok
2009eb81955bSIlya Yanok dev_dbg(musb->controller, "OTG startup...\n");
2010eb81955bSIlya Yanok
2011eb81955bSIlya Yanok /* REVISIT: funcall to other code, which also
2012eb81955bSIlya Yanok * handles power budgeting ... this way also
2013eb81955bSIlya Yanok * ensures HdrcStart is indirectly called.
2014eb81955bSIlya Yanok */
2015eb81955bSIlya Yanok retval = usb_add_hcd(musb_to_hcd(musb), 0, 0);
2016eb81955bSIlya Yanok if (retval < 0) {
2017eb81955bSIlya Yanok dev_dbg(musb->controller, "add_hcd failed, %d\n", retval);
2018eb81955bSIlya Yanok goto err2;
2019eb81955bSIlya Yanok }
2020eb81955bSIlya Yanok
2021eb81955bSIlya Yanok if ((musb->xceiv->last_event == USB_EVENT_ID)
2022eb81955bSIlya Yanok && otg->set_vbus)
2023eb81955bSIlya Yanok otg_set_vbus(otg, 1);
2024eb81955bSIlya Yanok
2025eb81955bSIlya Yanok hcd->self.uses_pio_for_control = 1;
2026eb81955bSIlya Yanok }
2027eb81955bSIlya Yanok if (musb->xceiv->last_event == USB_EVENT_NONE)
2028eb81955bSIlya Yanok pm_runtime_put(musb->controller);
2029eb81955bSIlya Yanok #endif
2030eb81955bSIlya Yanok
2031eb81955bSIlya Yanok return 0;
2032eb81955bSIlya Yanok
2033eb81955bSIlya Yanok #ifndef __UBOOT__
2034eb81955bSIlya Yanok err2:
2035eb81955bSIlya Yanok if (!is_otg_enabled(musb))
2036eb81955bSIlya Yanok musb_stop(musb);
2037eb81955bSIlya Yanok err0:
2038eb81955bSIlya Yanok return retval;
2039eb81955bSIlya Yanok #endif
2040eb81955bSIlya Yanok }
2041eb81955bSIlya Yanok
2042eb81955bSIlya Yanok #ifndef __UBOOT__
stop_activity(struct musb * musb,struct usb_gadget_driver * driver)2043eb81955bSIlya Yanok static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
2044eb81955bSIlya Yanok {
2045eb81955bSIlya Yanok int i;
2046eb81955bSIlya Yanok struct musb_hw_ep *hw_ep;
2047eb81955bSIlya Yanok
2048eb81955bSIlya Yanok /* don't disconnect if it's not connected */
2049eb81955bSIlya Yanok if (musb->g.speed == USB_SPEED_UNKNOWN)
2050eb81955bSIlya Yanok driver = NULL;
2051eb81955bSIlya Yanok else
2052eb81955bSIlya Yanok musb->g.speed = USB_SPEED_UNKNOWN;
2053eb81955bSIlya Yanok
2054eb81955bSIlya Yanok /* deactivate the hardware */
2055eb81955bSIlya Yanok if (musb->softconnect) {
2056eb81955bSIlya Yanok musb->softconnect = 0;
2057eb81955bSIlya Yanok musb_pullup(musb, 0);
2058eb81955bSIlya Yanok }
2059eb81955bSIlya Yanok musb_stop(musb);
2060eb81955bSIlya Yanok
2061eb81955bSIlya Yanok /* killing any outstanding requests will quiesce the driver;
2062eb81955bSIlya Yanok * then report disconnect
2063eb81955bSIlya Yanok */
2064eb81955bSIlya Yanok if (driver) {
2065eb81955bSIlya Yanok for (i = 0, hw_ep = musb->endpoints;
2066eb81955bSIlya Yanok i < musb->nr_endpoints;
2067eb81955bSIlya Yanok i++, hw_ep++) {
2068eb81955bSIlya Yanok musb_ep_select(musb->mregs, i);
2069eb81955bSIlya Yanok if (hw_ep->is_shared_fifo /* || !epnum */) {
2070eb81955bSIlya Yanok nuke(&hw_ep->ep_in, -ESHUTDOWN);
2071eb81955bSIlya Yanok } else {
2072eb81955bSIlya Yanok if (hw_ep->max_packet_sz_tx)
2073eb81955bSIlya Yanok nuke(&hw_ep->ep_in, -ESHUTDOWN);
2074eb81955bSIlya Yanok if (hw_ep->max_packet_sz_rx)
2075eb81955bSIlya Yanok nuke(&hw_ep->ep_out, -ESHUTDOWN);
2076eb81955bSIlya Yanok }
2077eb81955bSIlya Yanok }
2078eb81955bSIlya Yanok }
2079eb81955bSIlya Yanok }
2080eb81955bSIlya Yanok
2081eb81955bSIlya Yanok /*
2082eb81955bSIlya Yanok * Unregister the gadget driver. Used by gadget drivers when
2083eb81955bSIlya Yanok * unregistering themselves from the controller.
2084eb81955bSIlya Yanok *
2085eb81955bSIlya Yanok * @param driver the gadget driver to unregister
2086eb81955bSIlya Yanok */
musb_gadget_stop(struct usb_gadget * g,struct usb_gadget_driver * driver)2087eb81955bSIlya Yanok static int musb_gadget_stop(struct usb_gadget *g,
2088eb81955bSIlya Yanok struct usb_gadget_driver *driver)
2089eb81955bSIlya Yanok {
2090eb81955bSIlya Yanok struct musb *musb = gadget_to_musb(g);
2091eb81955bSIlya Yanok unsigned long flags;
2092eb81955bSIlya Yanok
2093eb81955bSIlya Yanok if (musb->xceiv->last_event == USB_EVENT_NONE)
2094eb81955bSIlya Yanok pm_runtime_get_sync(musb->controller);
2095eb81955bSIlya Yanok
2096eb81955bSIlya Yanok /*
2097eb81955bSIlya Yanok * REVISIT always use otg_set_peripheral() here too;
2098eb81955bSIlya Yanok * this needs to shut down the OTG engine.
2099eb81955bSIlya Yanok */
2100eb81955bSIlya Yanok
2101eb81955bSIlya Yanok spin_lock_irqsave(&musb->lock, flags);
2102eb81955bSIlya Yanok
2103eb81955bSIlya Yanok musb_hnp_stop(musb);
2104eb81955bSIlya Yanok
2105eb81955bSIlya Yanok (void) musb_gadget_vbus_draw(&musb->g, 0);
2106eb81955bSIlya Yanok
2107eb81955bSIlya Yanok musb->xceiv->state = OTG_STATE_UNDEFINED;
2108eb81955bSIlya Yanok stop_activity(musb, driver);
2109eb81955bSIlya Yanok otg_set_peripheral(musb->xceiv->otg, NULL);
2110eb81955bSIlya Yanok
2111eb81955bSIlya Yanok dev_dbg(musb->controller, "unregistering driver %s\n", driver->function);
2112eb81955bSIlya Yanok
2113eb81955bSIlya Yanok musb->is_active = 0;
2114eb81955bSIlya Yanok musb_platform_try_idle(musb, 0);
2115eb81955bSIlya Yanok spin_unlock_irqrestore(&musb->lock, flags);
2116eb81955bSIlya Yanok
2117eb81955bSIlya Yanok if (is_otg_enabled(musb)) {
2118eb81955bSIlya Yanok usb_remove_hcd(musb_to_hcd(musb));
2119eb81955bSIlya Yanok /* FIXME we need to be able to register another
2120eb81955bSIlya Yanok * gadget driver here and have everything work;
2121eb81955bSIlya Yanok * that currently misbehaves.
2122eb81955bSIlya Yanok */
2123eb81955bSIlya Yanok }
2124eb81955bSIlya Yanok
2125eb81955bSIlya Yanok if (!is_otg_enabled(musb))
2126eb81955bSIlya Yanok musb_stop(musb);
2127eb81955bSIlya Yanok
2128eb81955bSIlya Yanok pm_runtime_put(musb->controller);
2129eb81955bSIlya Yanok
2130eb81955bSIlya Yanok return 0;
2131eb81955bSIlya Yanok }
2132eb81955bSIlya Yanok #endif
2133eb81955bSIlya Yanok
2134eb81955bSIlya Yanok /* ----------------------------------------------------------------------- */
2135eb81955bSIlya Yanok
2136eb81955bSIlya Yanok /* lifecycle operations called through plat_uds.c */
2137eb81955bSIlya Yanok
musb_g_resume(struct musb * musb)2138eb81955bSIlya Yanok void musb_g_resume(struct musb *musb)
2139eb81955bSIlya Yanok {
2140eb81955bSIlya Yanok #ifndef __UBOOT__
2141eb81955bSIlya Yanok musb->is_suspended = 0;
2142eb81955bSIlya Yanok switch (musb->xceiv->state) {
2143eb81955bSIlya Yanok case OTG_STATE_B_IDLE:
2144eb81955bSIlya Yanok break;
2145eb81955bSIlya Yanok case OTG_STATE_B_WAIT_ACON:
2146eb81955bSIlya Yanok case OTG_STATE_B_PERIPHERAL:
2147eb81955bSIlya Yanok musb->is_active = 1;
2148eb81955bSIlya Yanok if (musb->gadget_driver && musb->gadget_driver->resume) {
2149eb81955bSIlya Yanok spin_unlock(&musb->lock);
2150eb81955bSIlya Yanok musb->gadget_driver->resume(&musb->g);
2151eb81955bSIlya Yanok spin_lock(&musb->lock);
2152eb81955bSIlya Yanok }
2153eb81955bSIlya Yanok break;
2154eb81955bSIlya Yanok default:
2155eb81955bSIlya Yanok WARNING("unhandled RESUME transition (%s)\n",
2156eb81955bSIlya Yanok otg_state_string(musb->xceiv->state));
2157eb81955bSIlya Yanok }
2158eb81955bSIlya Yanok #endif
2159eb81955bSIlya Yanok }
2160eb81955bSIlya Yanok
2161eb81955bSIlya Yanok /* called when SOF packets stop for 3+ msec */
musb_g_suspend(struct musb * musb)2162eb81955bSIlya Yanok void musb_g_suspend(struct musb *musb)
2163eb81955bSIlya Yanok {
2164eb81955bSIlya Yanok #ifndef __UBOOT__
2165eb81955bSIlya Yanok u8 devctl;
2166eb81955bSIlya Yanok
2167eb81955bSIlya Yanok devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2168eb81955bSIlya Yanok dev_dbg(musb->controller, "devctl %02x\n", devctl);
2169eb81955bSIlya Yanok
2170eb81955bSIlya Yanok switch (musb->xceiv->state) {
2171eb81955bSIlya Yanok case OTG_STATE_B_IDLE:
2172eb81955bSIlya Yanok if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2173eb81955bSIlya Yanok musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2174eb81955bSIlya Yanok break;
2175eb81955bSIlya Yanok case OTG_STATE_B_PERIPHERAL:
2176eb81955bSIlya Yanok musb->is_suspended = 1;
2177eb81955bSIlya Yanok if (musb->gadget_driver && musb->gadget_driver->suspend) {
2178eb81955bSIlya Yanok spin_unlock(&musb->lock);
2179eb81955bSIlya Yanok musb->gadget_driver->suspend(&musb->g);
2180eb81955bSIlya Yanok spin_lock(&musb->lock);
2181eb81955bSIlya Yanok }
2182eb81955bSIlya Yanok break;
2183eb81955bSIlya Yanok default:
2184eb81955bSIlya Yanok /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
2185eb81955bSIlya Yanok * A_PERIPHERAL may need care too
2186eb81955bSIlya Yanok */
2187eb81955bSIlya Yanok WARNING("unhandled SUSPEND transition (%s)\n",
2188eb81955bSIlya Yanok otg_state_string(musb->xceiv->state));
2189eb81955bSIlya Yanok }
2190eb81955bSIlya Yanok #endif
2191eb81955bSIlya Yanok }
2192eb81955bSIlya Yanok
2193eb81955bSIlya Yanok /* Called during SRP */
musb_g_wakeup(struct musb * musb)2194eb81955bSIlya Yanok void musb_g_wakeup(struct musb *musb)
2195eb81955bSIlya Yanok {
2196eb81955bSIlya Yanok musb_gadget_wakeup(&musb->g);
2197eb81955bSIlya Yanok }
2198eb81955bSIlya Yanok
2199eb81955bSIlya Yanok /* called when VBUS drops below session threshold, and in other cases */
musb_g_disconnect(struct musb * musb)2200eb81955bSIlya Yanok void musb_g_disconnect(struct musb *musb)
2201eb81955bSIlya Yanok {
2202eb81955bSIlya Yanok void __iomem *mregs = musb->mregs;
2203eb81955bSIlya Yanok u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
2204eb81955bSIlya Yanok
2205eb81955bSIlya Yanok dev_dbg(musb->controller, "devctl %02x\n", devctl);
2206eb81955bSIlya Yanok
2207eb81955bSIlya Yanok /* clear HR */
2208eb81955bSIlya Yanok musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
2209eb81955bSIlya Yanok
2210eb81955bSIlya Yanok /* don't draw vbus until new b-default session */
2211eb81955bSIlya Yanok (void) musb_gadget_vbus_draw(&musb->g, 0);
2212eb81955bSIlya Yanok
2213eb81955bSIlya Yanok musb->g.speed = USB_SPEED_UNKNOWN;
2214eb81955bSIlya Yanok if (musb->gadget_driver && musb->gadget_driver->disconnect) {
2215eb81955bSIlya Yanok spin_unlock(&musb->lock);
2216eb81955bSIlya Yanok musb->gadget_driver->disconnect(&musb->g);
2217eb81955bSIlya Yanok spin_lock(&musb->lock);
2218eb81955bSIlya Yanok }
2219eb81955bSIlya Yanok
2220eb81955bSIlya Yanok #ifndef __UBOOT__
2221eb81955bSIlya Yanok switch (musb->xceiv->state) {
2222eb81955bSIlya Yanok default:
2223eb81955bSIlya Yanok dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
2224eb81955bSIlya Yanok otg_state_string(musb->xceiv->state));
2225eb81955bSIlya Yanok musb->xceiv->state = OTG_STATE_A_IDLE;
2226eb81955bSIlya Yanok MUSB_HST_MODE(musb);
2227eb81955bSIlya Yanok break;
2228eb81955bSIlya Yanok case OTG_STATE_A_PERIPHERAL:
2229eb81955bSIlya Yanok musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
2230eb81955bSIlya Yanok MUSB_HST_MODE(musb);
2231eb81955bSIlya Yanok break;
2232eb81955bSIlya Yanok case OTG_STATE_B_WAIT_ACON:
2233eb81955bSIlya Yanok case OTG_STATE_B_HOST:
2234eb81955bSIlya Yanok case OTG_STATE_B_PERIPHERAL:
2235eb81955bSIlya Yanok case OTG_STATE_B_IDLE:
2236eb81955bSIlya Yanok musb->xceiv->state = OTG_STATE_B_IDLE;
2237eb81955bSIlya Yanok break;
2238eb81955bSIlya Yanok case OTG_STATE_B_SRP_INIT:
2239eb81955bSIlya Yanok break;
2240eb81955bSIlya Yanok }
2241eb81955bSIlya Yanok #endif
2242eb81955bSIlya Yanok
2243eb81955bSIlya Yanok musb->is_active = 0;
2244eb81955bSIlya Yanok }
2245eb81955bSIlya Yanok
musb_g_reset(struct musb * musb)2246eb81955bSIlya Yanok void musb_g_reset(struct musb *musb)
2247eb81955bSIlya Yanok __releases(musb->lock)
2248eb81955bSIlya Yanok __acquires(musb->lock)
2249eb81955bSIlya Yanok {
2250eb81955bSIlya Yanok void __iomem *mbase = musb->mregs;
2251eb81955bSIlya Yanok u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
2252eb81955bSIlya Yanok u8 power;
2253eb81955bSIlya Yanok
2254eb81955bSIlya Yanok #ifndef __UBOOT__
2255eb81955bSIlya Yanok dev_dbg(musb->controller, "<== %s addr=%x driver '%s'\n",
2256eb81955bSIlya Yanok (devctl & MUSB_DEVCTL_BDEVICE)
2257eb81955bSIlya Yanok ? "B-Device" : "A-Device",
2258eb81955bSIlya Yanok musb_readb(mbase, MUSB_FADDR),
2259eb81955bSIlya Yanok musb->gadget_driver
2260eb81955bSIlya Yanok ? musb->gadget_driver->driver.name
2261eb81955bSIlya Yanok : NULL
2262eb81955bSIlya Yanok );
2263eb81955bSIlya Yanok #endif
2264eb81955bSIlya Yanok
2265eb81955bSIlya Yanok /* report disconnect, if we didn't already (flushing EP state) */
2266eb81955bSIlya Yanok if (musb->g.speed != USB_SPEED_UNKNOWN)
2267eb81955bSIlya Yanok musb_g_disconnect(musb);
2268eb81955bSIlya Yanok
2269eb81955bSIlya Yanok /* clear HR */
2270eb81955bSIlya Yanok else if (devctl & MUSB_DEVCTL_HR)
2271eb81955bSIlya Yanok musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2272eb81955bSIlya Yanok
2273eb81955bSIlya Yanok
2274eb81955bSIlya Yanok /* what speed did we negotiate? */
2275eb81955bSIlya Yanok power = musb_readb(mbase, MUSB_POWER);
2276eb81955bSIlya Yanok musb->g.speed = (power & MUSB_POWER_HSMODE)
2277eb81955bSIlya Yanok ? USB_SPEED_HIGH : USB_SPEED_FULL;
2278eb81955bSIlya Yanok
2279eb81955bSIlya Yanok /* start in USB_STATE_DEFAULT */
2280eb81955bSIlya Yanok musb->is_active = 1;
2281eb81955bSIlya Yanok musb->is_suspended = 0;
2282eb81955bSIlya Yanok MUSB_DEV_MODE(musb);
2283eb81955bSIlya Yanok musb->address = 0;
2284eb81955bSIlya Yanok musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2285eb81955bSIlya Yanok
2286eb81955bSIlya Yanok musb->may_wakeup = 0;
2287eb81955bSIlya Yanok musb->g.b_hnp_enable = 0;
2288eb81955bSIlya Yanok musb->g.a_alt_hnp_support = 0;
2289eb81955bSIlya Yanok musb->g.a_hnp_support = 0;
2290eb81955bSIlya Yanok
2291eb81955bSIlya Yanok #ifndef __UBOOT__
2292eb81955bSIlya Yanok /* Normal reset, as B-Device;
2293eb81955bSIlya Yanok * or else after HNP, as A-Device
2294eb81955bSIlya Yanok */
2295eb81955bSIlya Yanok if (devctl & MUSB_DEVCTL_BDEVICE) {
2296eb81955bSIlya Yanok musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2297eb81955bSIlya Yanok musb->g.is_a_peripheral = 0;
2298eb81955bSIlya Yanok } else if (is_otg_enabled(musb)) {
2299eb81955bSIlya Yanok musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
2300eb81955bSIlya Yanok musb->g.is_a_peripheral = 1;
2301eb81955bSIlya Yanok } else
2302eb81955bSIlya Yanok WARN_ON(1);
2303eb81955bSIlya Yanok
2304eb81955bSIlya Yanok /* start with default limits on VBUS power draw */
2305eb81955bSIlya Yanok (void) musb_gadget_vbus_draw(&musb->g,
2306eb81955bSIlya Yanok is_otg_enabled(musb) ? 8 : 100);
2307eb81955bSIlya Yanok #endif
2308eb81955bSIlya Yanok }
2309