1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * ISP1362 HCD (Host Controller Driver) for USB.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Derived from the SL811 HCD, rewritten for ISP116x.
8*4882a593Smuzhiyun * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Portions:
11*4882a593Smuzhiyun * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
12*4882a593Smuzhiyun * Copyright (C) 2004 David Brownell
13*4882a593Smuzhiyun */
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun /*
16*4882a593Smuzhiyun * The ISP1362 chip requires a large delay (300ns and 462ns) between
17*4882a593Smuzhiyun * accesses to the address and data register.
18*4882a593Smuzhiyun * The following timing options exist:
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * 1. Configure your memory controller to add such delays if it can (the best)
21*4882a593Smuzhiyun * 2. Implement platform-specific delay function possibly
22*4882a593Smuzhiyun * combined with configuring the memory controller; see
23*4882a593Smuzhiyun * include/linux/usb_isp1362.h for more info.
24*4882a593Smuzhiyun * 3. Use ndelay (easiest, poorest).
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
27*4882a593Smuzhiyun * platform specific section of isp1362.h to select the appropriate variant.
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * Also note that according to the Philips "ISP1362 Errata" document
30*4882a593Smuzhiyun * Rev 1.00 from 27 May data corruption may occur when the #WR signal
31*4882a593Smuzhiyun * is reasserted (even with #CS deasserted) within 132ns after a
32*4882a593Smuzhiyun * write cycle to any controller register. If the hardware doesn't
33*4882a593Smuzhiyun * implement the recommended fix (gating the #WR with #CS) software
34*4882a593Smuzhiyun * must ensure that no further write cycle (not necessarily to the chip!)
35*4882a593Smuzhiyun * is issued by the CPU within this interval.
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun * For PXA25x this can be ensured by using VLIO with the maximum
38*4882a593Smuzhiyun * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
39*4882a593Smuzhiyun */
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #undef ISP1362_DEBUG
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
45*4882a593Smuzhiyun * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
46*4882a593Smuzhiyun * requests are carried out in separate frames. This will delay any SETUP
47*4882a593Smuzhiyun * packets until the start of the next frame so that this situation is
48*4882a593Smuzhiyun * unlikely to occur (and makes usbtest happy running with a PXA255 target
49*4882a593Smuzhiyun * device).
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun #undef BUGGY_PXA2XX_UDC_USBTEST
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #undef PTD_TRACE
54*4882a593Smuzhiyun #undef URB_TRACE
55*4882a593Smuzhiyun #undef VERBOSE
56*4882a593Smuzhiyun #undef REGISTERS
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /* This enables a memory test on the ISP1362 chip memory to make sure the
59*4882a593Smuzhiyun * chip access timing is correct.
60*4882a593Smuzhiyun */
61*4882a593Smuzhiyun #undef CHIP_BUFFER_TEST
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun #include <linux/module.h>
64*4882a593Smuzhiyun #include <linux/moduleparam.h>
65*4882a593Smuzhiyun #include <linux/kernel.h>
66*4882a593Smuzhiyun #include <linux/delay.h>
67*4882a593Smuzhiyun #include <linux/ioport.h>
68*4882a593Smuzhiyun #include <linux/sched.h>
69*4882a593Smuzhiyun #include <linux/slab.h>
70*4882a593Smuzhiyun #include <linux/errno.h>
71*4882a593Smuzhiyun #include <linux/list.h>
72*4882a593Smuzhiyun #include <linux/interrupt.h>
73*4882a593Smuzhiyun #include <linux/usb.h>
74*4882a593Smuzhiyun #include <linux/usb/isp1362.h>
75*4882a593Smuzhiyun #include <linux/usb/hcd.h>
76*4882a593Smuzhiyun #include <linux/platform_device.h>
77*4882a593Smuzhiyun #include <linux/pm.h>
78*4882a593Smuzhiyun #include <linux/io.h>
79*4882a593Smuzhiyun #include <linux/bitmap.h>
80*4882a593Smuzhiyun #include <linux/prefetch.h>
81*4882a593Smuzhiyun #include <linux/debugfs.h>
82*4882a593Smuzhiyun #include <linux/seq_file.h>
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun #include <asm/irq.h>
85*4882a593Smuzhiyun #include <asm/byteorder.h>
86*4882a593Smuzhiyun #include <asm/unaligned.h>
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun static int dbg_level;
89*4882a593Smuzhiyun #ifdef ISP1362_DEBUG
90*4882a593Smuzhiyun module_param(dbg_level, int, 0644);
91*4882a593Smuzhiyun #else
92*4882a593Smuzhiyun module_param(dbg_level, int, 0);
93*4882a593Smuzhiyun #endif
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun #include "../core/usb.h"
96*4882a593Smuzhiyun #include "isp1362.h"
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun #define DRIVER_VERSION "2005-04-04"
100*4882a593Smuzhiyun #define DRIVER_DESC "ISP1362 USB Host Controller Driver"
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun MODULE_DESCRIPTION(DRIVER_DESC);
103*4882a593Smuzhiyun MODULE_LICENSE("GPL");
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun static const char hcd_name[] = "isp1362-hcd";
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun static void isp1362_hc_stop(struct usb_hcd *hcd);
108*4882a593Smuzhiyun static int isp1362_hc_start(struct usb_hcd *hcd);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
114*4882a593Smuzhiyun * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
115*4882a593Smuzhiyun * completion.
116*4882a593Smuzhiyun * We don't need a 'disable' counterpart, since interrupts will be disabled
117*4882a593Smuzhiyun * only by the interrupt handler.
118*4882a593Smuzhiyun */
isp1362_enable_int(struct isp1362_hcd * isp1362_hcd,u16 mask)119*4882a593Smuzhiyun static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
122*4882a593Smuzhiyun return;
123*4882a593Smuzhiyun if (mask & ~isp1362_hcd->irqenb)
124*4882a593Smuzhiyun isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
125*4882a593Smuzhiyun isp1362_hcd->irqenb |= mask;
126*4882a593Smuzhiyun if (isp1362_hcd->irq_active)
127*4882a593Smuzhiyun return;
128*4882a593Smuzhiyun isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
132*4882a593Smuzhiyun
get_ptd_queue(struct isp1362_hcd * isp1362_hcd,u16 offset)133*4882a593Smuzhiyun static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
134*4882a593Smuzhiyun u16 offset)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun struct isp1362_ep_queue *epq = NULL;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun if (offset < isp1362_hcd->istl_queue[1].buf_start)
139*4882a593Smuzhiyun epq = &isp1362_hcd->istl_queue[0];
140*4882a593Smuzhiyun else if (offset < isp1362_hcd->intl_queue.buf_start)
141*4882a593Smuzhiyun epq = &isp1362_hcd->istl_queue[1];
142*4882a593Smuzhiyun else if (offset < isp1362_hcd->atl_queue.buf_start)
143*4882a593Smuzhiyun epq = &isp1362_hcd->intl_queue;
144*4882a593Smuzhiyun else if (offset < isp1362_hcd->atl_queue.buf_start +
145*4882a593Smuzhiyun isp1362_hcd->atl_queue.buf_size)
146*4882a593Smuzhiyun epq = &isp1362_hcd->atl_queue;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun if (epq)
149*4882a593Smuzhiyun DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
150*4882a593Smuzhiyun else
151*4882a593Smuzhiyun pr_warn("%s: invalid PTD $%04x\n", __func__, offset);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun return epq;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
get_ptd_offset(struct isp1362_ep_queue * epq,u8 index)156*4882a593Smuzhiyun static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun int offset;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun if (index * epq->blk_size > epq->buf_size) {
161*4882a593Smuzhiyun pr_warn("%s: Bad %s index %d(%d)\n",
162*4882a593Smuzhiyun __func__, epq->name, index,
163*4882a593Smuzhiyun epq->buf_size / epq->blk_size);
164*4882a593Smuzhiyun return -EINVAL;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun offset = epq->buf_start + index * epq->blk_size;
167*4882a593Smuzhiyun DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun return offset;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
173*4882a593Smuzhiyun
max_transfer_size(struct isp1362_ep_queue * epq,size_t size,int mps)174*4882a593Smuzhiyun static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
175*4882a593Smuzhiyun int mps)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
180*4882a593Smuzhiyun if (xfer_size < size && xfer_size % mps)
181*4882a593Smuzhiyun xfer_size -= xfer_size % mps;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun return xfer_size;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
claim_ptd_buffers(struct isp1362_ep_queue * epq,struct isp1362_ep * ep,u16 len)186*4882a593Smuzhiyun static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
187*4882a593Smuzhiyun struct isp1362_ep *ep, u16 len)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun int ptd_offset = -EINVAL;
190*4882a593Smuzhiyun int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
191*4882a593Smuzhiyun int found;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun BUG_ON(len > epq->buf_size);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun if (!epq->buf_avail)
196*4882a593Smuzhiyun return -ENOMEM;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun if (ep->num_ptds)
199*4882a593Smuzhiyun pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
200*4882a593Smuzhiyun epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
201*4882a593Smuzhiyun BUG_ON(ep->num_ptds != 0);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0,
204*4882a593Smuzhiyun num_ptds, 0);
205*4882a593Smuzhiyun if (found >= epq->buf_count)
206*4882a593Smuzhiyun return -EOVERFLOW;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
209*4882a593Smuzhiyun num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
210*4882a593Smuzhiyun ptd_offset = get_ptd_offset(epq, found);
211*4882a593Smuzhiyun WARN_ON(ptd_offset < 0);
212*4882a593Smuzhiyun ep->ptd_offset = ptd_offset;
213*4882a593Smuzhiyun ep->num_ptds += num_ptds;
214*4882a593Smuzhiyun epq->buf_avail -= num_ptds;
215*4882a593Smuzhiyun BUG_ON(epq->buf_avail > epq->buf_count);
216*4882a593Smuzhiyun ep->ptd_index = found;
217*4882a593Smuzhiyun bitmap_set(&epq->buf_map, found, num_ptds);
218*4882a593Smuzhiyun DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
219*4882a593Smuzhiyun __func__, epq->name, ep->ptd_index, ep->ptd_offset,
220*4882a593Smuzhiyun epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun return found;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
release_ptd_buffers(struct isp1362_ep_queue * epq,struct isp1362_ep * ep)225*4882a593Smuzhiyun static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun int last = ep->ptd_index + ep->num_ptds;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun if (last > epq->buf_count)
230*4882a593Smuzhiyun pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
231*4882a593Smuzhiyun __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
232*4882a593Smuzhiyun ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
233*4882a593Smuzhiyun epq->buf_map, epq->skip_map);
234*4882a593Smuzhiyun BUG_ON(last > epq->buf_count);
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun bitmap_clear(&epq->buf_map, ep->ptd_index, ep->num_ptds);
237*4882a593Smuzhiyun bitmap_set(&epq->skip_map, ep->ptd_index, ep->num_ptds);
238*4882a593Smuzhiyun epq->buf_avail += ep->num_ptds;
239*4882a593Smuzhiyun epq->ptd_count--;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun BUG_ON(epq->buf_avail > epq->buf_count);
242*4882a593Smuzhiyun BUG_ON(epq->ptd_count > epq->buf_count);
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
245*4882a593Smuzhiyun __func__, epq->name,
246*4882a593Smuzhiyun ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
247*4882a593Smuzhiyun DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
248*4882a593Smuzhiyun epq->buf_map, epq->skip_map);
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun ep->num_ptds = 0;
251*4882a593Smuzhiyun ep->ptd_offset = -EINVAL;
252*4882a593Smuzhiyun ep->ptd_index = -EINVAL;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /*
258*4882a593Smuzhiyun Set up PTD's.
259*4882a593Smuzhiyun */
prepare_ptd(struct isp1362_hcd * isp1362_hcd,struct urb * urb,struct isp1362_ep * ep,struct isp1362_ep_queue * epq,u16 fno)260*4882a593Smuzhiyun static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
261*4882a593Smuzhiyun struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
262*4882a593Smuzhiyun u16 fno)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun struct ptd *ptd;
265*4882a593Smuzhiyun int toggle;
266*4882a593Smuzhiyun int dir;
267*4882a593Smuzhiyun u16 len;
268*4882a593Smuzhiyun size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun ptd = &ep->ptd;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun switch (ep->nextpid) {
277*4882a593Smuzhiyun case USB_PID_IN:
278*4882a593Smuzhiyun toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
279*4882a593Smuzhiyun dir = PTD_DIR_IN;
280*4882a593Smuzhiyun if (usb_pipecontrol(urb->pipe)) {
281*4882a593Smuzhiyun len = min_t(size_t, ep->maxpacket, buf_len);
282*4882a593Smuzhiyun } else if (usb_pipeisoc(urb->pipe)) {
283*4882a593Smuzhiyun len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
284*4882a593Smuzhiyun ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
285*4882a593Smuzhiyun } else
286*4882a593Smuzhiyun len = max_transfer_size(epq, buf_len, ep->maxpacket);
287*4882a593Smuzhiyun DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
288*4882a593Smuzhiyun (int)buf_len);
289*4882a593Smuzhiyun break;
290*4882a593Smuzhiyun case USB_PID_OUT:
291*4882a593Smuzhiyun toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
292*4882a593Smuzhiyun dir = PTD_DIR_OUT;
293*4882a593Smuzhiyun if (usb_pipecontrol(urb->pipe))
294*4882a593Smuzhiyun len = min_t(size_t, ep->maxpacket, buf_len);
295*4882a593Smuzhiyun else if (usb_pipeisoc(urb->pipe))
296*4882a593Smuzhiyun len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
297*4882a593Smuzhiyun else
298*4882a593Smuzhiyun len = max_transfer_size(epq, buf_len, ep->maxpacket);
299*4882a593Smuzhiyun if (len == 0)
300*4882a593Smuzhiyun pr_info("%s: Sending ZERO packet: %d\n", __func__,
301*4882a593Smuzhiyun urb->transfer_flags & URB_ZERO_PACKET);
302*4882a593Smuzhiyun DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
303*4882a593Smuzhiyun (int)buf_len);
304*4882a593Smuzhiyun break;
305*4882a593Smuzhiyun case USB_PID_SETUP:
306*4882a593Smuzhiyun toggle = 0;
307*4882a593Smuzhiyun dir = PTD_DIR_SETUP;
308*4882a593Smuzhiyun len = sizeof(struct usb_ctrlrequest);
309*4882a593Smuzhiyun DBG(1, "%s: SETUP len %d\n", __func__, len);
310*4882a593Smuzhiyun ep->data = urb->setup_packet;
311*4882a593Smuzhiyun break;
312*4882a593Smuzhiyun case USB_PID_ACK:
313*4882a593Smuzhiyun toggle = 1;
314*4882a593Smuzhiyun len = 0;
315*4882a593Smuzhiyun dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
316*4882a593Smuzhiyun PTD_DIR_OUT : PTD_DIR_IN;
317*4882a593Smuzhiyun DBG(1, "%s: ACK len %d\n", __func__, len);
318*4882a593Smuzhiyun break;
319*4882a593Smuzhiyun default:
320*4882a593Smuzhiyun toggle = dir = len = 0;
321*4882a593Smuzhiyun pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
322*4882a593Smuzhiyun BUG_ON(1);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun ep->length = len;
326*4882a593Smuzhiyun if (!len)
327*4882a593Smuzhiyun ep->data = NULL;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
330*4882a593Smuzhiyun ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
331*4882a593Smuzhiyun PTD_EP(ep->epnum);
332*4882a593Smuzhiyun ptd->len = PTD_LEN(len) | PTD_DIR(dir);
333*4882a593Smuzhiyun ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun if (usb_pipeint(urb->pipe)) {
336*4882a593Smuzhiyun ptd->faddr |= PTD_SF_INT(ep->branch);
337*4882a593Smuzhiyun ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun if (usb_pipeisoc(urb->pipe))
340*4882a593Smuzhiyun ptd->faddr |= PTD_SF_ISO(fno);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun DBG(1, "%s: Finished\n", __func__);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
isp1362_write_ptd(struct isp1362_hcd * isp1362_hcd,struct isp1362_ep * ep,struct isp1362_ep_queue * epq)345*4882a593Smuzhiyun static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
346*4882a593Smuzhiyun struct isp1362_ep_queue *epq)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun struct ptd *ptd = &ep->ptd;
349*4882a593Smuzhiyun int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun prefetch(ptd);
352*4882a593Smuzhiyun isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
353*4882a593Smuzhiyun if (len)
354*4882a593Smuzhiyun isp1362_write_buffer(isp1362_hcd, ep->data,
355*4882a593Smuzhiyun ep->ptd_offset + PTD_HEADER_SIZE, len);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun dump_ptd(ptd);
358*4882a593Smuzhiyun dump_ptd_out_data(ptd, ep->data);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
isp1362_read_ptd(struct isp1362_hcd * isp1362_hcd,struct isp1362_ep * ep,struct isp1362_ep_queue * epq)361*4882a593Smuzhiyun static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
362*4882a593Smuzhiyun struct isp1362_ep_queue *epq)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun struct ptd *ptd = &ep->ptd;
365*4882a593Smuzhiyun int act_len;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun WARN_ON(list_empty(&ep->active));
368*4882a593Smuzhiyun BUG_ON(ep->ptd_offset < 0);
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun list_del_init(&ep->active);
371*4882a593Smuzhiyun DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun prefetchw(ptd);
374*4882a593Smuzhiyun isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
375*4882a593Smuzhiyun dump_ptd(ptd);
376*4882a593Smuzhiyun act_len = PTD_GET_COUNT(ptd);
377*4882a593Smuzhiyun if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
378*4882a593Smuzhiyun return;
379*4882a593Smuzhiyun if (act_len > ep->length)
380*4882a593Smuzhiyun pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
381*4882a593Smuzhiyun ep->ptd_offset, act_len, ep->length);
382*4882a593Smuzhiyun BUG_ON(act_len > ep->length);
383*4882a593Smuzhiyun /* Only transfer the amount of data that has actually been overwritten
384*4882a593Smuzhiyun * in the chip buffer. We don't want any data that doesn't belong to the
385*4882a593Smuzhiyun * transfer to leak out of the chip to the callers transfer buffer!
386*4882a593Smuzhiyun */
387*4882a593Smuzhiyun prefetchw(ep->data);
388*4882a593Smuzhiyun isp1362_read_buffer(isp1362_hcd, ep->data,
389*4882a593Smuzhiyun ep->ptd_offset + PTD_HEADER_SIZE, act_len);
390*4882a593Smuzhiyun dump_ptd_in_data(ptd, ep->data);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun /*
394*4882a593Smuzhiyun * INT PTDs will stay in the chip until data is available.
395*4882a593Smuzhiyun * This function will remove a PTD from the chip when the URB is dequeued.
396*4882a593Smuzhiyun * Must be called with the spinlock held and IRQs disabled
397*4882a593Smuzhiyun */
remove_ptd(struct isp1362_hcd * isp1362_hcd,struct isp1362_ep * ep)398*4882a593Smuzhiyun static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun int index;
402*4882a593Smuzhiyun struct isp1362_ep_queue *epq;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
405*4882a593Smuzhiyun BUG_ON(ep->ptd_offset < 0);
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
408*4882a593Smuzhiyun BUG_ON(!epq);
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun /* put ep in remove_list for cleanup */
411*4882a593Smuzhiyun WARN_ON(!list_empty(&ep->remove_list));
412*4882a593Smuzhiyun list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
413*4882a593Smuzhiyun /* let SOF interrupt handle the cleanup */
414*4882a593Smuzhiyun isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun index = ep->ptd_index;
417*4882a593Smuzhiyun if (index < 0)
418*4882a593Smuzhiyun /* ISO queues don't have SKIP registers */
419*4882a593Smuzhiyun return;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
422*4882a593Smuzhiyun index, ep->ptd_offset, epq->skip_map, 1 << index);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun /* prevent further processing of PTD (will be effective after next SOF) */
425*4882a593Smuzhiyun epq->skip_map |= 1 << index;
426*4882a593Smuzhiyun if (epq == &isp1362_hcd->atl_queue) {
427*4882a593Smuzhiyun DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
428*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
429*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
430*4882a593Smuzhiyun if (~epq->skip_map == 0)
431*4882a593Smuzhiyun isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
432*4882a593Smuzhiyun } else if (epq == &isp1362_hcd->intl_queue) {
433*4882a593Smuzhiyun DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
434*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
435*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
436*4882a593Smuzhiyun if (~epq->skip_map == 0)
437*4882a593Smuzhiyun isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun /*
442*4882a593Smuzhiyun Take done or failed requests out of schedule. Give back
443*4882a593Smuzhiyun processed urbs.
444*4882a593Smuzhiyun */
finish_request(struct isp1362_hcd * isp1362_hcd,struct isp1362_ep * ep,struct urb * urb,int status)445*4882a593Smuzhiyun static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
446*4882a593Smuzhiyun struct urb *urb, int status)
447*4882a593Smuzhiyun __releases(isp1362_hcd->lock)
448*4882a593Smuzhiyun __acquires(isp1362_hcd->lock)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun urb->hcpriv = NULL;
451*4882a593Smuzhiyun ep->error_count = 0;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun if (usb_pipecontrol(urb->pipe))
454*4882a593Smuzhiyun ep->nextpid = USB_PID_SETUP;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
457*4882a593Smuzhiyun ep->num_req, usb_pipedevice(urb->pipe),
458*4882a593Smuzhiyun usb_pipeendpoint(urb->pipe),
459*4882a593Smuzhiyun !usb_pipein(urb->pipe) ? "out" : "in",
460*4882a593Smuzhiyun usb_pipecontrol(urb->pipe) ? "ctrl" :
461*4882a593Smuzhiyun usb_pipeint(urb->pipe) ? "int" :
462*4882a593Smuzhiyun usb_pipebulk(urb->pipe) ? "bulk" :
463*4882a593Smuzhiyun "iso",
464*4882a593Smuzhiyun urb->actual_length, urb->transfer_buffer_length,
465*4882a593Smuzhiyun !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
466*4882a593Smuzhiyun "short_ok" : "", urb->status);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
470*4882a593Smuzhiyun spin_unlock(&isp1362_hcd->lock);
471*4882a593Smuzhiyun usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
472*4882a593Smuzhiyun spin_lock(&isp1362_hcd->lock);
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /* take idle endpoints out of the schedule right away */
475*4882a593Smuzhiyun if (!list_empty(&ep->hep->urb_list))
476*4882a593Smuzhiyun return;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun /* async deschedule */
479*4882a593Smuzhiyun if (!list_empty(&ep->schedule)) {
480*4882a593Smuzhiyun list_del_init(&ep->schedule);
481*4882a593Smuzhiyun return;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun if (ep->interval) {
486*4882a593Smuzhiyun /* periodic deschedule */
487*4882a593Smuzhiyun DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
488*4882a593Smuzhiyun ep, ep->branch, ep->load,
489*4882a593Smuzhiyun isp1362_hcd->load[ep->branch],
490*4882a593Smuzhiyun isp1362_hcd->load[ep->branch] - ep->load);
491*4882a593Smuzhiyun isp1362_hcd->load[ep->branch] -= ep->load;
492*4882a593Smuzhiyun ep->branch = PERIODIC_SIZE;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun /*
497*4882a593Smuzhiyun * Analyze transfer results, handle partial transfers and errors
498*4882a593Smuzhiyun */
postproc_ep(struct isp1362_hcd * isp1362_hcd,struct isp1362_ep * ep)499*4882a593Smuzhiyun static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun struct urb *urb = get_urb(ep);
502*4882a593Smuzhiyun struct usb_device *udev;
503*4882a593Smuzhiyun struct ptd *ptd;
504*4882a593Smuzhiyun int short_ok;
505*4882a593Smuzhiyun u16 len;
506*4882a593Smuzhiyun int urbstat = -EINPROGRESS;
507*4882a593Smuzhiyun u8 cc;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun udev = urb->dev;
512*4882a593Smuzhiyun ptd = &ep->ptd;
513*4882a593Smuzhiyun cc = PTD_GET_CC(ptd);
514*4882a593Smuzhiyun if (cc == PTD_NOTACCESSED) {
515*4882a593Smuzhiyun pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
516*4882a593Smuzhiyun ep->num_req, ptd);
517*4882a593Smuzhiyun cc = PTD_DEVNOTRESP;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
521*4882a593Smuzhiyun len = urb->transfer_buffer_length - urb->actual_length;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun /* Data underrun is special. For allowed underrun
524*4882a593Smuzhiyun we clear the error and continue as normal. For
525*4882a593Smuzhiyun forbidden underrun we finish the DATA stage
526*4882a593Smuzhiyun immediately while for control transfer,
527*4882a593Smuzhiyun we do a STATUS stage.
528*4882a593Smuzhiyun */
529*4882a593Smuzhiyun if (cc == PTD_DATAUNDERRUN) {
530*4882a593Smuzhiyun if (short_ok) {
531*4882a593Smuzhiyun DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
532*4882a593Smuzhiyun __func__, ep->num_req, short_ok ? "" : "not_",
533*4882a593Smuzhiyun PTD_GET_COUNT(ptd), ep->maxpacket, len);
534*4882a593Smuzhiyun cc = PTD_CC_NOERROR;
535*4882a593Smuzhiyun urbstat = 0;
536*4882a593Smuzhiyun } else {
537*4882a593Smuzhiyun DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
538*4882a593Smuzhiyun __func__, ep->num_req,
539*4882a593Smuzhiyun usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
540*4882a593Smuzhiyun short_ok ? "" : "not_",
541*4882a593Smuzhiyun PTD_GET_COUNT(ptd), ep->maxpacket, len);
542*4882a593Smuzhiyun /* save the data underrun error code for later and
543*4882a593Smuzhiyun * proceed with the status stage
544*4882a593Smuzhiyun */
545*4882a593Smuzhiyun urb->actual_length += PTD_GET_COUNT(ptd);
546*4882a593Smuzhiyun if (usb_pipecontrol(urb->pipe)) {
547*4882a593Smuzhiyun ep->nextpid = USB_PID_ACK;
548*4882a593Smuzhiyun BUG_ON(urb->actual_length > urb->transfer_buffer_length);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun if (urb->status == -EINPROGRESS)
551*4882a593Smuzhiyun urb->status = cc_to_error[PTD_DATAUNDERRUN];
552*4882a593Smuzhiyun } else {
553*4882a593Smuzhiyun usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
554*4882a593Smuzhiyun PTD_GET_TOGGLE(ptd));
555*4882a593Smuzhiyun urbstat = cc_to_error[PTD_DATAUNDERRUN];
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun goto out;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun if (cc != PTD_CC_NOERROR) {
562*4882a593Smuzhiyun if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
563*4882a593Smuzhiyun urbstat = cc_to_error[cc];
564*4882a593Smuzhiyun DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
565*4882a593Smuzhiyun __func__, ep->num_req, ep->nextpid, urbstat, cc,
566*4882a593Smuzhiyun ep->error_count);
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun goto out;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun switch (ep->nextpid) {
572*4882a593Smuzhiyun case USB_PID_OUT:
573*4882a593Smuzhiyun if (PTD_GET_COUNT(ptd) != ep->length)
574*4882a593Smuzhiyun pr_err("%s: count=%d len=%d\n", __func__,
575*4882a593Smuzhiyun PTD_GET_COUNT(ptd), ep->length);
576*4882a593Smuzhiyun BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
577*4882a593Smuzhiyun urb->actual_length += ep->length;
578*4882a593Smuzhiyun BUG_ON(urb->actual_length > urb->transfer_buffer_length);
579*4882a593Smuzhiyun usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
580*4882a593Smuzhiyun if (urb->actual_length == urb->transfer_buffer_length) {
581*4882a593Smuzhiyun DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
582*4882a593Smuzhiyun ep->num_req, len, ep->maxpacket, urbstat);
583*4882a593Smuzhiyun if (usb_pipecontrol(urb->pipe)) {
584*4882a593Smuzhiyun DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
585*4882a593Smuzhiyun ep->num_req,
586*4882a593Smuzhiyun usb_pipein(urb->pipe) ? "IN" : "OUT");
587*4882a593Smuzhiyun ep->nextpid = USB_PID_ACK;
588*4882a593Smuzhiyun } else {
589*4882a593Smuzhiyun if (len % ep->maxpacket ||
590*4882a593Smuzhiyun !(urb->transfer_flags & URB_ZERO_PACKET)) {
591*4882a593Smuzhiyun urbstat = 0;
592*4882a593Smuzhiyun DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
593*4882a593Smuzhiyun __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
594*4882a593Smuzhiyun urbstat, len, ep->maxpacket, urb->actual_length);
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun break;
599*4882a593Smuzhiyun case USB_PID_IN:
600*4882a593Smuzhiyun len = PTD_GET_COUNT(ptd);
601*4882a593Smuzhiyun BUG_ON(len > ep->length);
602*4882a593Smuzhiyun urb->actual_length += len;
603*4882a593Smuzhiyun BUG_ON(urb->actual_length > urb->transfer_buffer_length);
604*4882a593Smuzhiyun usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
605*4882a593Smuzhiyun /* if transfer completed or (allowed) data underrun */
606*4882a593Smuzhiyun if ((urb->transfer_buffer_length == urb->actual_length) ||
607*4882a593Smuzhiyun len % ep->maxpacket) {
608*4882a593Smuzhiyun DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
609*4882a593Smuzhiyun ep->num_req, len, ep->maxpacket, urbstat);
610*4882a593Smuzhiyun if (usb_pipecontrol(urb->pipe)) {
611*4882a593Smuzhiyun DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
612*4882a593Smuzhiyun ep->num_req,
613*4882a593Smuzhiyun usb_pipein(urb->pipe) ? "IN" : "OUT");
614*4882a593Smuzhiyun ep->nextpid = USB_PID_ACK;
615*4882a593Smuzhiyun } else {
616*4882a593Smuzhiyun urbstat = 0;
617*4882a593Smuzhiyun DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
618*4882a593Smuzhiyun __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
619*4882a593Smuzhiyun urbstat, len, ep->maxpacket, urb->actual_length);
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun break;
623*4882a593Smuzhiyun case USB_PID_SETUP:
624*4882a593Smuzhiyun if (urb->transfer_buffer_length == urb->actual_length) {
625*4882a593Smuzhiyun ep->nextpid = USB_PID_ACK;
626*4882a593Smuzhiyun } else if (usb_pipeout(urb->pipe)) {
627*4882a593Smuzhiyun usb_settoggle(udev, 0, 1, 1);
628*4882a593Smuzhiyun ep->nextpid = USB_PID_OUT;
629*4882a593Smuzhiyun } else {
630*4882a593Smuzhiyun usb_settoggle(udev, 0, 0, 1);
631*4882a593Smuzhiyun ep->nextpid = USB_PID_IN;
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun break;
634*4882a593Smuzhiyun case USB_PID_ACK:
635*4882a593Smuzhiyun DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
636*4882a593Smuzhiyun urbstat);
637*4882a593Smuzhiyun WARN_ON(urbstat != -EINPROGRESS);
638*4882a593Smuzhiyun urbstat = 0;
639*4882a593Smuzhiyun ep->nextpid = 0;
640*4882a593Smuzhiyun break;
641*4882a593Smuzhiyun default:
642*4882a593Smuzhiyun BUG_ON(1);
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun out:
646*4882a593Smuzhiyun if (urbstat != -EINPROGRESS) {
647*4882a593Smuzhiyun DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
648*4882a593Smuzhiyun ep, ep->num_req, urb, urbstat);
649*4882a593Smuzhiyun finish_request(isp1362_hcd, ep, urb, urbstat);
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun
finish_unlinks(struct isp1362_hcd * isp1362_hcd)653*4882a593Smuzhiyun static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun struct isp1362_ep *ep;
656*4882a593Smuzhiyun struct isp1362_ep *tmp;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
659*4882a593Smuzhiyun struct isp1362_ep_queue *epq =
660*4882a593Smuzhiyun get_ptd_queue(isp1362_hcd, ep->ptd_offset);
661*4882a593Smuzhiyun int index = ep->ptd_index;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun BUG_ON(epq == NULL);
664*4882a593Smuzhiyun if (index >= 0) {
665*4882a593Smuzhiyun DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
666*4882a593Smuzhiyun BUG_ON(ep->num_ptds == 0);
667*4882a593Smuzhiyun release_ptd_buffers(epq, ep);
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun if (!list_empty(&ep->hep->urb_list)) {
670*4882a593Smuzhiyun struct urb *urb = get_urb(ep);
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
673*4882a593Smuzhiyun ep->num_req, ep);
674*4882a593Smuzhiyun finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun WARN_ON(list_empty(&ep->active));
677*4882a593Smuzhiyun if (!list_empty(&ep->active)) {
678*4882a593Smuzhiyun list_del_init(&ep->active);
679*4882a593Smuzhiyun DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun list_del_init(&ep->remove_list);
682*4882a593Smuzhiyun DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun DBG(1, "%s: Done\n", __func__);
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
enable_atl_transfers(struct isp1362_hcd * isp1362_hcd,int count)687*4882a593Smuzhiyun static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun if (count > 0) {
690*4882a593Smuzhiyun if (count < isp1362_hcd->atl_queue.ptd_count)
691*4882a593Smuzhiyun isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
692*4882a593Smuzhiyun isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
693*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
694*4882a593Smuzhiyun isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
695*4882a593Smuzhiyun } else
696*4882a593Smuzhiyun isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
enable_intl_transfers(struct isp1362_hcd * isp1362_hcd)699*4882a593Smuzhiyun static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
700*4882a593Smuzhiyun {
701*4882a593Smuzhiyun isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
702*4882a593Smuzhiyun isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
703*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
enable_istl_transfers(struct isp1362_hcd * isp1362_hcd,int flip)706*4882a593Smuzhiyun static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
709*4882a593Smuzhiyun isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
710*4882a593Smuzhiyun HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
submit_req(struct isp1362_hcd * isp1362_hcd,struct urb * urb,struct isp1362_ep * ep,struct isp1362_ep_queue * epq)713*4882a593Smuzhiyun static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
714*4882a593Smuzhiyun struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
715*4882a593Smuzhiyun {
716*4882a593Smuzhiyun int index;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
719*4882a593Smuzhiyun index = claim_ptd_buffers(epq, ep, ep->length);
720*4882a593Smuzhiyun if (index == -ENOMEM) {
721*4882a593Smuzhiyun DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
722*4882a593Smuzhiyun ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
723*4882a593Smuzhiyun return index;
724*4882a593Smuzhiyun } else if (index == -EOVERFLOW) {
725*4882a593Smuzhiyun DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
726*4882a593Smuzhiyun __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
727*4882a593Smuzhiyun epq->buf_map, epq->skip_map);
728*4882a593Smuzhiyun return index;
729*4882a593Smuzhiyun } else
730*4882a593Smuzhiyun BUG_ON(index < 0);
731*4882a593Smuzhiyun list_add_tail(&ep->active, &epq->active);
732*4882a593Smuzhiyun DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
733*4882a593Smuzhiyun ep, ep->num_req, ep->length, &epq->active);
734*4882a593Smuzhiyun DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
735*4882a593Smuzhiyun ep->ptd_offset, ep, ep->num_req);
736*4882a593Smuzhiyun isp1362_write_ptd(isp1362_hcd, ep, epq);
737*4882a593Smuzhiyun __clear_bit(ep->ptd_index, &epq->skip_map);
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun return 0;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
start_atl_transfers(struct isp1362_hcd * isp1362_hcd)742*4882a593Smuzhiyun static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
743*4882a593Smuzhiyun {
744*4882a593Smuzhiyun int ptd_count = 0;
745*4882a593Smuzhiyun struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
746*4882a593Smuzhiyun struct isp1362_ep *ep;
747*4882a593Smuzhiyun int defer = 0;
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun if (atomic_read(&epq->finishing)) {
750*4882a593Smuzhiyun DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
751*4882a593Smuzhiyun return;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
755*4882a593Smuzhiyun struct urb *urb = get_urb(ep);
756*4882a593Smuzhiyun int ret;
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun if (!list_empty(&ep->active)) {
759*4882a593Smuzhiyun DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
760*4882a593Smuzhiyun continue;
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
764*4882a593Smuzhiyun ep, ep->num_req);
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun ret = submit_req(isp1362_hcd, urb, ep, epq);
767*4882a593Smuzhiyun if (ret == -ENOMEM) {
768*4882a593Smuzhiyun defer = 1;
769*4882a593Smuzhiyun break;
770*4882a593Smuzhiyun } else if (ret == -EOVERFLOW) {
771*4882a593Smuzhiyun defer = 1;
772*4882a593Smuzhiyun continue;
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun #ifdef BUGGY_PXA2XX_UDC_USBTEST
775*4882a593Smuzhiyun defer = ep->nextpid == USB_PID_SETUP;
776*4882a593Smuzhiyun #endif
777*4882a593Smuzhiyun ptd_count++;
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun /* Avoid starving of endpoints */
781*4882a593Smuzhiyun if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
782*4882a593Smuzhiyun DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
783*4882a593Smuzhiyun list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun if (ptd_count || defer)
786*4882a593Smuzhiyun enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun epq->ptd_count += ptd_count;
789*4882a593Smuzhiyun if (epq->ptd_count > epq->stat_maxptds) {
790*4882a593Smuzhiyun epq->stat_maxptds = epq->ptd_count;
791*4882a593Smuzhiyun DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
start_intl_transfers(struct isp1362_hcd * isp1362_hcd)795*4882a593Smuzhiyun static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun int ptd_count = 0;
798*4882a593Smuzhiyun struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
799*4882a593Smuzhiyun struct isp1362_ep *ep;
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun if (atomic_read(&epq->finishing)) {
802*4882a593Smuzhiyun DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
803*4882a593Smuzhiyun return;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
807*4882a593Smuzhiyun struct urb *urb = get_urb(ep);
808*4882a593Smuzhiyun int ret;
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun if (!list_empty(&ep->active)) {
811*4882a593Smuzhiyun DBG(1, "%s: Skipping active %s ep %p\n", __func__,
812*4882a593Smuzhiyun epq->name, ep);
813*4882a593Smuzhiyun continue;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
817*4882a593Smuzhiyun epq->name, ep, ep->num_req);
818*4882a593Smuzhiyun ret = submit_req(isp1362_hcd, urb, ep, epq);
819*4882a593Smuzhiyun if (ret == -ENOMEM)
820*4882a593Smuzhiyun break;
821*4882a593Smuzhiyun else if (ret == -EOVERFLOW)
822*4882a593Smuzhiyun continue;
823*4882a593Smuzhiyun ptd_count++;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun if (ptd_count) {
827*4882a593Smuzhiyun static int last_count;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun if (ptd_count != last_count) {
830*4882a593Smuzhiyun DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
831*4882a593Smuzhiyun last_count = ptd_count;
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun enable_intl_transfers(isp1362_hcd);
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun epq->ptd_count += ptd_count;
837*4882a593Smuzhiyun if (epq->ptd_count > epq->stat_maxptds)
838*4882a593Smuzhiyun epq->stat_maxptds = epq->ptd_count;
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
next_ptd(struct isp1362_ep_queue * epq,struct isp1362_ep * ep)841*4882a593Smuzhiyun static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun u16 ptd_offset = ep->ptd_offset;
844*4882a593Smuzhiyun int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
847*4882a593Smuzhiyun ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun ptd_offset += num_ptds * epq->blk_size;
850*4882a593Smuzhiyun if (ptd_offset < epq->buf_start + epq->buf_size)
851*4882a593Smuzhiyun return ptd_offset;
852*4882a593Smuzhiyun else
853*4882a593Smuzhiyun return -ENOMEM;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
start_iso_transfers(struct isp1362_hcd * isp1362_hcd)856*4882a593Smuzhiyun static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun int ptd_count = 0;
859*4882a593Smuzhiyun int flip = isp1362_hcd->istl_flip;
860*4882a593Smuzhiyun struct isp1362_ep_queue *epq;
861*4882a593Smuzhiyun int ptd_offset;
862*4882a593Smuzhiyun struct isp1362_ep *ep;
863*4882a593Smuzhiyun struct isp1362_ep *tmp;
864*4882a593Smuzhiyun u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun fill2:
867*4882a593Smuzhiyun epq = &isp1362_hcd->istl_queue[flip];
868*4882a593Smuzhiyun if (atomic_read(&epq->finishing)) {
869*4882a593Smuzhiyun DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
870*4882a593Smuzhiyun return;
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun if (!list_empty(&epq->active))
874*4882a593Smuzhiyun return;
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun ptd_offset = epq->buf_start;
877*4882a593Smuzhiyun list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
878*4882a593Smuzhiyun struct urb *urb = get_urb(ep);
879*4882a593Smuzhiyun s16 diff = fno - (u16)urb->start_frame;
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun if (diff > urb->number_of_packets) {
884*4882a593Smuzhiyun /* time frame for this URB has elapsed */
885*4882a593Smuzhiyun finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
886*4882a593Smuzhiyun continue;
887*4882a593Smuzhiyun } else if (diff < -1) {
888*4882a593Smuzhiyun /* URB is not due in this frame or the next one.
889*4882a593Smuzhiyun * Comparing with '-1' instead of '0' accounts for double
890*4882a593Smuzhiyun * buffering in the ISP1362 which enables us to queue the PTD
891*4882a593Smuzhiyun * one frame ahead of time
892*4882a593Smuzhiyun */
893*4882a593Smuzhiyun } else if (diff == -1) {
894*4882a593Smuzhiyun /* submit PTD's that are due in the next frame */
895*4882a593Smuzhiyun prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
896*4882a593Smuzhiyun if (ptd_offset + PTD_HEADER_SIZE + ep->length >
897*4882a593Smuzhiyun epq->buf_start + epq->buf_size) {
898*4882a593Smuzhiyun pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
899*4882a593Smuzhiyun __func__, ep->length);
900*4882a593Smuzhiyun continue;
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun ep->ptd_offset = ptd_offset;
903*4882a593Smuzhiyun list_add_tail(&ep->active, &epq->active);
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun ptd_offset = next_ptd(epq, ep);
906*4882a593Smuzhiyun if (ptd_offset < 0) {
907*4882a593Smuzhiyun pr_warn("%s: req %d No more %s PTD buffers available\n",
908*4882a593Smuzhiyun __func__, ep->num_req, epq->name);
909*4882a593Smuzhiyun break;
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun list_for_each_entry(ep, &epq->active, active) {
914*4882a593Smuzhiyun if (epq->active.next == &ep->active)
915*4882a593Smuzhiyun ep->ptd.mps |= PTD_LAST_MSK;
916*4882a593Smuzhiyun isp1362_write_ptd(isp1362_hcd, ep, epq);
917*4882a593Smuzhiyun ptd_count++;
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun if (ptd_count)
921*4882a593Smuzhiyun enable_istl_transfers(isp1362_hcd, flip);
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun epq->ptd_count += ptd_count;
924*4882a593Smuzhiyun if (epq->ptd_count > epq->stat_maxptds)
925*4882a593Smuzhiyun epq->stat_maxptds = epq->ptd_count;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun /* check, whether the second ISTL buffer may also be filled */
928*4882a593Smuzhiyun if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
929*4882a593Smuzhiyun (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
930*4882a593Smuzhiyun fno++;
931*4882a593Smuzhiyun ptd_count = 0;
932*4882a593Smuzhiyun flip = 1 - flip;
933*4882a593Smuzhiyun goto fill2;
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun
finish_transfers(struct isp1362_hcd * isp1362_hcd,unsigned long done_map,struct isp1362_ep_queue * epq)937*4882a593Smuzhiyun static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
938*4882a593Smuzhiyun struct isp1362_ep_queue *epq)
939*4882a593Smuzhiyun {
940*4882a593Smuzhiyun struct isp1362_ep *ep;
941*4882a593Smuzhiyun struct isp1362_ep *tmp;
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun if (list_empty(&epq->active)) {
944*4882a593Smuzhiyun DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
945*4882a593Smuzhiyun return;
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun atomic_inc(&epq->finishing);
951*4882a593Smuzhiyun list_for_each_entry_safe(ep, tmp, &epq->active, active) {
952*4882a593Smuzhiyun int index = ep->ptd_index;
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
955*4882a593Smuzhiyun index, ep->ptd_offset);
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun BUG_ON(index < 0);
958*4882a593Smuzhiyun if (__test_and_clear_bit(index, &done_map)) {
959*4882a593Smuzhiyun isp1362_read_ptd(isp1362_hcd, ep, epq);
960*4882a593Smuzhiyun epq->free_ptd = index;
961*4882a593Smuzhiyun BUG_ON(ep->num_ptds == 0);
962*4882a593Smuzhiyun release_ptd_buffers(epq, ep);
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
965*4882a593Smuzhiyun ep, ep->num_req);
966*4882a593Smuzhiyun if (!list_empty(&ep->remove_list)) {
967*4882a593Smuzhiyun list_del_init(&ep->remove_list);
968*4882a593Smuzhiyun DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
971*4882a593Smuzhiyun ep, ep->num_req);
972*4882a593Smuzhiyun postproc_ep(isp1362_hcd, ep);
973*4882a593Smuzhiyun }
974*4882a593Smuzhiyun if (!done_map)
975*4882a593Smuzhiyun break;
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun if (done_map)
978*4882a593Smuzhiyun pr_warn("%s: done_map not clear: %08lx:%08lx\n",
979*4882a593Smuzhiyun __func__, done_map, epq->skip_map);
980*4882a593Smuzhiyun atomic_dec(&epq->finishing);
981*4882a593Smuzhiyun }
982*4882a593Smuzhiyun
finish_iso_transfers(struct isp1362_hcd * isp1362_hcd,struct isp1362_ep_queue * epq)983*4882a593Smuzhiyun static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
984*4882a593Smuzhiyun {
985*4882a593Smuzhiyun struct isp1362_ep *ep;
986*4882a593Smuzhiyun struct isp1362_ep *tmp;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun if (list_empty(&epq->active)) {
989*4882a593Smuzhiyun DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
990*4882a593Smuzhiyun return;
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun atomic_inc(&epq->finishing);
996*4882a593Smuzhiyun list_for_each_entry_safe(ep, tmp, &epq->active, active) {
997*4882a593Smuzhiyun DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun isp1362_read_ptd(isp1362_hcd, ep, epq);
1000*4882a593Smuzhiyun DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
1001*4882a593Smuzhiyun postproc_ep(isp1362_hcd, ep);
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun WARN_ON(epq->blk_size != 0);
1004*4882a593Smuzhiyun atomic_dec(&epq->finishing);
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun
isp1362_irq(struct usb_hcd * hcd)1007*4882a593Smuzhiyun static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
1008*4882a593Smuzhiyun {
1009*4882a593Smuzhiyun int handled = 0;
1010*4882a593Smuzhiyun struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1011*4882a593Smuzhiyun u16 irqstat;
1012*4882a593Smuzhiyun u16 svc_mask;
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun spin_lock(&isp1362_hcd->lock);
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun BUG_ON(isp1362_hcd->irq_active++);
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
1021*4882a593Smuzhiyun DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun /* only handle interrupts that are currently enabled */
1024*4882a593Smuzhiyun irqstat &= isp1362_hcd->irqenb;
1025*4882a593Smuzhiyun isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
1026*4882a593Smuzhiyun svc_mask = irqstat;
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun if (irqstat & HCuPINT_SOF) {
1029*4882a593Smuzhiyun isp1362_hcd->irqenb &= ~HCuPINT_SOF;
1030*4882a593Smuzhiyun isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
1031*4882a593Smuzhiyun handled = 1;
1032*4882a593Smuzhiyun svc_mask &= ~HCuPINT_SOF;
1033*4882a593Smuzhiyun DBG(3, "%s: SOF\n", __func__);
1034*4882a593Smuzhiyun isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1035*4882a593Smuzhiyun if (!list_empty(&isp1362_hcd->remove_list))
1036*4882a593Smuzhiyun finish_unlinks(isp1362_hcd);
1037*4882a593Smuzhiyun if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
1038*4882a593Smuzhiyun if (list_empty(&isp1362_hcd->atl_queue.active)) {
1039*4882a593Smuzhiyun start_atl_transfers(isp1362_hcd);
1040*4882a593Smuzhiyun } else {
1041*4882a593Smuzhiyun isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
1042*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
1043*4882a593Smuzhiyun isp1362_hcd->atl_queue.skip_map);
1044*4882a593Smuzhiyun isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun if (irqstat & HCuPINT_ISTL0) {
1050*4882a593Smuzhiyun isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
1051*4882a593Smuzhiyun handled = 1;
1052*4882a593Smuzhiyun svc_mask &= ~HCuPINT_ISTL0;
1053*4882a593Smuzhiyun isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
1054*4882a593Smuzhiyun DBG(1, "%s: ISTL0\n", __func__);
1055*4882a593Smuzhiyun WARN_ON((int)!!isp1362_hcd->istl_flip);
1056*4882a593Smuzhiyun WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1057*4882a593Smuzhiyun HCBUFSTAT_ISTL0_ACTIVE);
1058*4882a593Smuzhiyun WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1059*4882a593Smuzhiyun HCBUFSTAT_ISTL0_DONE));
1060*4882a593Smuzhiyun isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun if (irqstat & HCuPINT_ISTL1) {
1064*4882a593Smuzhiyun isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
1065*4882a593Smuzhiyun handled = 1;
1066*4882a593Smuzhiyun svc_mask &= ~HCuPINT_ISTL1;
1067*4882a593Smuzhiyun isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
1068*4882a593Smuzhiyun DBG(1, "%s: ISTL1\n", __func__);
1069*4882a593Smuzhiyun WARN_ON(!(int)isp1362_hcd->istl_flip);
1070*4882a593Smuzhiyun WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1071*4882a593Smuzhiyun HCBUFSTAT_ISTL1_ACTIVE);
1072*4882a593Smuzhiyun WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1073*4882a593Smuzhiyun HCBUFSTAT_ISTL1_DONE));
1074*4882a593Smuzhiyun isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
1078*4882a593Smuzhiyun WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
1079*4882a593Smuzhiyun (HCuPINT_ISTL0 | HCuPINT_ISTL1));
1080*4882a593Smuzhiyun finish_iso_transfers(isp1362_hcd,
1081*4882a593Smuzhiyun &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
1082*4882a593Smuzhiyun start_iso_transfers(isp1362_hcd);
1083*4882a593Smuzhiyun isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun if (irqstat & HCuPINT_INTL) {
1087*4882a593Smuzhiyun u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1088*4882a593Smuzhiyun u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
1089*4882a593Smuzhiyun isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun DBG(2, "%s: INTL\n", __func__);
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun svc_mask &= ~HCuPINT_INTL;
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
1096*4882a593Smuzhiyun if (~(done_map | skip_map) == 0)
1097*4882a593Smuzhiyun /* All PTDs are finished, disable INTL processing entirely */
1098*4882a593Smuzhiyun isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun handled = 1;
1101*4882a593Smuzhiyun WARN_ON(!done_map);
1102*4882a593Smuzhiyun if (done_map) {
1103*4882a593Smuzhiyun DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
1104*4882a593Smuzhiyun finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1105*4882a593Smuzhiyun start_intl_transfers(isp1362_hcd);
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun if (irqstat & HCuPINT_ATL) {
1110*4882a593Smuzhiyun u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1111*4882a593Smuzhiyun u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
1112*4882a593Smuzhiyun isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun DBG(2, "%s: ATL\n", __func__);
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun svc_mask &= ~HCuPINT_ATL;
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
1119*4882a593Smuzhiyun if (~(done_map | skip_map) == 0)
1120*4882a593Smuzhiyun isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1121*4882a593Smuzhiyun if (done_map) {
1122*4882a593Smuzhiyun DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
1123*4882a593Smuzhiyun finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1124*4882a593Smuzhiyun start_atl_transfers(isp1362_hcd);
1125*4882a593Smuzhiyun }
1126*4882a593Smuzhiyun handled = 1;
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun if (irqstat & HCuPINT_OPR) {
1130*4882a593Smuzhiyun u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
1131*4882a593Smuzhiyun isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun svc_mask &= ~HCuPINT_OPR;
1134*4882a593Smuzhiyun DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
1135*4882a593Smuzhiyun intstat &= isp1362_hcd->intenb;
1136*4882a593Smuzhiyun if (intstat & OHCI_INTR_UE) {
1137*4882a593Smuzhiyun pr_err("Unrecoverable error\n");
1138*4882a593Smuzhiyun /* FIXME: do here reset or cleanup or whatever */
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun if (intstat & OHCI_INTR_RHSC) {
1141*4882a593Smuzhiyun isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
1142*4882a593Smuzhiyun isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
1143*4882a593Smuzhiyun isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
1144*4882a593Smuzhiyun }
1145*4882a593Smuzhiyun if (intstat & OHCI_INTR_RD) {
1146*4882a593Smuzhiyun pr_info("%s: RESUME DETECTED\n", __func__);
1147*4882a593Smuzhiyun isp1362_show_reg(isp1362_hcd, HCCONTROL);
1148*4882a593Smuzhiyun usb_hcd_resume_root_hub(hcd);
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
1151*4882a593Smuzhiyun irqstat &= ~HCuPINT_OPR;
1152*4882a593Smuzhiyun handled = 1;
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun if (irqstat & HCuPINT_SUSP) {
1156*4882a593Smuzhiyun isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
1157*4882a593Smuzhiyun handled = 1;
1158*4882a593Smuzhiyun svc_mask &= ~HCuPINT_SUSP;
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun pr_info("%s: SUSPEND IRQ\n", __func__);
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun if (irqstat & HCuPINT_CLKRDY) {
1164*4882a593Smuzhiyun isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
1165*4882a593Smuzhiyun handled = 1;
1166*4882a593Smuzhiyun isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
1167*4882a593Smuzhiyun svc_mask &= ~HCuPINT_CLKRDY;
1168*4882a593Smuzhiyun pr_info("%s: CLKRDY IRQ\n", __func__);
1169*4882a593Smuzhiyun }
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun if (svc_mask)
1172*4882a593Smuzhiyun pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
1175*4882a593Smuzhiyun isp1362_hcd->irq_active--;
1176*4882a593Smuzhiyun spin_unlock(&isp1362_hcd->lock);
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun return IRQ_RETVAL(handled);
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun #define MAX_PERIODIC_LOAD 900 /* out of 1000 usec */
balance(struct isp1362_hcd * isp1362_hcd,u16 interval,u16 load)1184*4882a593Smuzhiyun static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
1185*4882a593Smuzhiyun {
1186*4882a593Smuzhiyun int i, branch = -ENOSPC;
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun /* search for the least loaded schedule branch of that interval
1189*4882a593Smuzhiyun * which has enough bandwidth left unreserved.
1190*4882a593Smuzhiyun */
1191*4882a593Smuzhiyun for (i = 0; i < interval; i++) {
1192*4882a593Smuzhiyun if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
1193*4882a593Smuzhiyun int j;
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun for (j = i; j < PERIODIC_SIZE; j += interval) {
1196*4882a593Smuzhiyun if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
1197*4882a593Smuzhiyun pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
1198*4882a593Smuzhiyun load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
1199*4882a593Smuzhiyun break;
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun }
1202*4882a593Smuzhiyun if (j < PERIODIC_SIZE)
1203*4882a593Smuzhiyun continue;
1204*4882a593Smuzhiyun branch = i;
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun }
1207*4882a593Smuzhiyun return branch;
1208*4882a593Smuzhiyun }
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun /* NB! ALL the code above this point runs with isp1362_hcd->lock
1211*4882a593Smuzhiyun held, irqs off
1212*4882a593Smuzhiyun */
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
1215*4882a593Smuzhiyun
isp1362_urb_enqueue(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)1216*4882a593Smuzhiyun static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1217*4882a593Smuzhiyun struct urb *urb,
1218*4882a593Smuzhiyun gfp_t mem_flags)
1219*4882a593Smuzhiyun {
1220*4882a593Smuzhiyun struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1221*4882a593Smuzhiyun struct usb_device *udev = urb->dev;
1222*4882a593Smuzhiyun unsigned int pipe = urb->pipe;
1223*4882a593Smuzhiyun int is_out = !usb_pipein(pipe);
1224*4882a593Smuzhiyun int type = usb_pipetype(pipe);
1225*4882a593Smuzhiyun int epnum = usb_pipeendpoint(pipe);
1226*4882a593Smuzhiyun struct usb_host_endpoint *hep = urb->ep;
1227*4882a593Smuzhiyun struct isp1362_ep *ep = NULL;
1228*4882a593Smuzhiyun unsigned long flags;
1229*4882a593Smuzhiyun int retval = 0;
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun DBG(3, "%s: urb %p\n", __func__, urb);
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun if (type == PIPE_ISOCHRONOUS) {
1234*4882a593Smuzhiyun pr_err("Isochronous transfers not supported\n");
1235*4882a593Smuzhiyun return -ENOSPC;
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
1239*4882a593Smuzhiyun usb_pipedevice(pipe), epnum,
1240*4882a593Smuzhiyun is_out ? "out" : "in",
1241*4882a593Smuzhiyun usb_pipecontrol(pipe) ? "ctrl" :
1242*4882a593Smuzhiyun usb_pipeint(pipe) ? "int" :
1243*4882a593Smuzhiyun usb_pipebulk(pipe) ? "bulk" :
1244*4882a593Smuzhiyun "iso",
1245*4882a593Smuzhiyun urb->transfer_buffer_length,
1246*4882a593Smuzhiyun (urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
1247*4882a593Smuzhiyun !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
1248*4882a593Smuzhiyun "short_ok" : "");
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun /* avoid all allocations within spinlocks: request or endpoint */
1251*4882a593Smuzhiyun if (!hep->hcpriv) {
1252*4882a593Smuzhiyun ep = kzalloc(sizeof *ep, mem_flags);
1253*4882a593Smuzhiyun if (!ep)
1254*4882a593Smuzhiyun return -ENOMEM;
1255*4882a593Smuzhiyun }
1256*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
1257*4882a593Smuzhiyun
1258*4882a593Smuzhiyun /* don't submit to a dead or disabled port */
1259*4882a593Smuzhiyun if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
1260*4882a593Smuzhiyun USB_PORT_STAT_ENABLE) ||
1261*4882a593Smuzhiyun !HC_IS_RUNNING(hcd->state)) {
1262*4882a593Smuzhiyun kfree(ep);
1263*4882a593Smuzhiyun retval = -ENODEV;
1264*4882a593Smuzhiyun goto fail_not_linked;
1265*4882a593Smuzhiyun }
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun retval = usb_hcd_link_urb_to_ep(hcd, urb);
1268*4882a593Smuzhiyun if (retval) {
1269*4882a593Smuzhiyun kfree(ep);
1270*4882a593Smuzhiyun goto fail_not_linked;
1271*4882a593Smuzhiyun }
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun if (hep->hcpriv) {
1274*4882a593Smuzhiyun ep = hep->hcpriv;
1275*4882a593Smuzhiyun } else {
1276*4882a593Smuzhiyun INIT_LIST_HEAD(&ep->schedule);
1277*4882a593Smuzhiyun INIT_LIST_HEAD(&ep->active);
1278*4882a593Smuzhiyun INIT_LIST_HEAD(&ep->remove_list);
1279*4882a593Smuzhiyun ep->udev = usb_get_dev(udev);
1280*4882a593Smuzhiyun ep->hep = hep;
1281*4882a593Smuzhiyun ep->epnum = epnum;
1282*4882a593Smuzhiyun ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
1283*4882a593Smuzhiyun ep->ptd_offset = -EINVAL;
1284*4882a593Smuzhiyun ep->ptd_index = -EINVAL;
1285*4882a593Smuzhiyun usb_settoggle(udev, epnum, is_out, 0);
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun if (type == PIPE_CONTROL)
1288*4882a593Smuzhiyun ep->nextpid = USB_PID_SETUP;
1289*4882a593Smuzhiyun else if (is_out)
1290*4882a593Smuzhiyun ep->nextpid = USB_PID_OUT;
1291*4882a593Smuzhiyun else
1292*4882a593Smuzhiyun ep->nextpid = USB_PID_IN;
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun switch (type) {
1295*4882a593Smuzhiyun case PIPE_ISOCHRONOUS:
1296*4882a593Smuzhiyun case PIPE_INTERRUPT:
1297*4882a593Smuzhiyun if (urb->interval > PERIODIC_SIZE)
1298*4882a593Smuzhiyun urb->interval = PERIODIC_SIZE;
1299*4882a593Smuzhiyun ep->interval = urb->interval;
1300*4882a593Smuzhiyun ep->branch = PERIODIC_SIZE;
1301*4882a593Smuzhiyun ep->load = usb_calc_bus_time(udev->speed, !is_out,
1302*4882a593Smuzhiyun (type == PIPE_ISOCHRONOUS),
1303*4882a593Smuzhiyun usb_maxpacket(udev, pipe, is_out)) / 1000;
1304*4882a593Smuzhiyun break;
1305*4882a593Smuzhiyun }
1306*4882a593Smuzhiyun hep->hcpriv = ep;
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun ep->num_req = isp1362_hcd->req_serial++;
1309*4882a593Smuzhiyun
1310*4882a593Smuzhiyun /* maybe put endpoint into schedule */
1311*4882a593Smuzhiyun switch (type) {
1312*4882a593Smuzhiyun case PIPE_CONTROL:
1313*4882a593Smuzhiyun case PIPE_BULK:
1314*4882a593Smuzhiyun if (list_empty(&ep->schedule)) {
1315*4882a593Smuzhiyun DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1316*4882a593Smuzhiyun __func__, ep, ep->num_req);
1317*4882a593Smuzhiyun list_add_tail(&ep->schedule, &isp1362_hcd->async);
1318*4882a593Smuzhiyun }
1319*4882a593Smuzhiyun break;
1320*4882a593Smuzhiyun case PIPE_ISOCHRONOUS:
1321*4882a593Smuzhiyun case PIPE_INTERRUPT:
1322*4882a593Smuzhiyun urb->interval = ep->interval;
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun /* urb submitted for already existing EP */
1325*4882a593Smuzhiyun if (ep->branch < PERIODIC_SIZE)
1326*4882a593Smuzhiyun break;
1327*4882a593Smuzhiyun
1328*4882a593Smuzhiyun retval = balance(isp1362_hcd, ep->interval, ep->load);
1329*4882a593Smuzhiyun if (retval < 0) {
1330*4882a593Smuzhiyun pr_err("%s: balance returned %d\n", __func__, retval);
1331*4882a593Smuzhiyun goto fail;
1332*4882a593Smuzhiyun }
1333*4882a593Smuzhiyun ep->branch = retval;
1334*4882a593Smuzhiyun retval = 0;
1335*4882a593Smuzhiyun isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1336*4882a593Smuzhiyun DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1337*4882a593Smuzhiyun __func__, isp1362_hcd->fmindex, ep->branch,
1338*4882a593Smuzhiyun ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
1339*4882a593Smuzhiyun ~(PERIODIC_SIZE - 1)) + ep->branch,
1340*4882a593Smuzhiyun (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun if (list_empty(&ep->schedule)) {
1343*4882a593Smuzhiyun if (type == PIPE_ISOCHRONOUS) {
1344*4882a593Smuzhiyun u16 frame = isp1362_hcd->fmindex;
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun frame += max_t(u16, 8, ep->interval);
1347*4882a593Smuzhiyun frame &= ~(ep->interval - 1);
1348*4882a593Smuzhiyun frame |= ep->branch;
1349*4882a593Smuzhiyun if (frame_before(frame, isp1362_hcd->fmindex))
1350*4882a593Smuzhiyun frame += ep->interval;
1351*4882a593Smuzhiyun urb->start_frame = frame;
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
1354*4882a593Smuzhiyun list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
1355*4882a593Smuzhiyun } else {
1356*4882a593Smuzhiyun DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
1357*4882a593Smuzhiyun list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
1358*4882a593Smuzhiyun }
1359*4882a593Smuzhiyun } else
1360*4882a593Smuzhiyun DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
1361*4882a593Smuzhiyun
1362*4882a593Smuzhiyun DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
1363*4882a593Smuzhiyun ep->load / ep->interval, isp1362_hcd->load[ep->branch],
1364*4882a593Smuzhiyun isp1362_hcd->load[ep->branch] + ep->load);
1365*4882a593Smuzhiyun isp1362_hcd->load[ep->branch] += ep->load;
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun
1368*4882a593Smuzhiyun urb->hcpriv = hep;
1369*4882a593Smuzhiyun ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun switch (type) {
1372*4882a593Smuzhiyun case PIPE_CONTROL:
1373*4882a593Smuzhiyun case PIPE_BULK:
1374*4882a593Smuzhiyun start_atl_transfers(isp1362_hcd);
1375*4882a593Smuzhiyun break;
1376*4882a593Smuzhiyun case PIPE_INTERRUPT:
1377*4882a593Smuzhiyun start_intl_transfers(isp1362_hcd);
1378*4882a593Smuzhiyun break;
1379*4882a593Smuzhiyun case PIPE_ISOCHRONOUS:
1380*4882a593Smuzhiyun start_iso_transfers(isp1362_hcd);
1381*4882a593Smuzhiyun break;
1382*4882a593Smuzhiyun default:
1383*4882a593Smuzhiyun BUG();
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun fail:
1386*4882a593Smuzhiyun if (retval)
1387*4882a593Smuzhiyun usb_hcd_unlink_urb_from_ep(hcd, urb);
1388*4882a593Smuzhiyun
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun fail_not_linked:
1391*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1392*4882a593Smuzhiyun if (retval)
1393*4882a593Smuzhiyun DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
1394*4882a593Smuzhiyun return retval;
1395*4882a593Smuzhiyun }
1396*4882a593Smuzhiyun
isp1362_urb_dequeue(struct usb_hcd * hcd,struct urb * urb,int status)1397*4882a593Smuzhiyun static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1398*4882a593Smuzhiyun {
1399*4882a593Smuzhiyun struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1400*4882a593Smuzhiyun struct usb_host_endpoint *hep;
1401*4882a593Smuzhiyun unsigned long flags;
1402*4882a593Smuzhiyun struct isp1362_ep *ep;
1403*4882a593Smuzhiyun int retval = 0;
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun DBG(3, "%s: urb %p\n", __func__, urb);
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
1408*4882a593Smuzhiyun retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1409*4882a593Smuzhiyun if (retval)
1410*4882a593Smuzhiyun goto done;
1411*4882a593Smuzhiyun
1412*4882a593Smuzhiyun hep = urb->hcpriv;
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun if (!hep) {
1415*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1416*4882a593Smuzhiyun return -EIDRM;
1417*4882a593Smuzhiyun }
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun ep = hep->hcpriv;
1420*4882a593Smuzhiyun if (ep) {
1421*4882a593Smuzhiyun /* In front of queue? */
1422*4882a593Smuzhiyun if (ep->hep->urb_list.next == &urb->urb_list) {
1423*4882a593Smuzhiyun if (!list_empty(&ep->active)) {
1424*4882a593Smuzhiyun DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
1425*4882a593Smuzhiyun urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1426*4882a593Smuzhiyun /* disable processing and queue PTD for removal */
1427*4882a593Smuzhiyun remove_ptd(isp1362_hcd, ep);
1428*4882a593Smuzhiyun urb = NULL;
1429*4882a593Smuzhiyun }
1430*4882a593Smuzhiyun }
1431*4882a593Smuzhiyun if (urb) {
1432*4882a593Smuzhiyun DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
1433*4882a593Smuzhiyun ep->num_req);
1434*4882a593Smuzhiyun finish_request(isp1362_hcd, ep, urb, status);
1435*4882a593Smuzhiyun } else
1436*4882a593Smuzhiyun DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
1437*4882a593Smuzhiyun } else {
1438*4882a593Smuzhiyun pr_warn("%s: No EP in URB %p\n", __func__, urb);
1439*4882a593Smuzhiyun retval = -EINVAL;
1440*4882a593Smuzhiyun }
1441*4882a593Smuzhiyun done:
1442*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun DBG(3, "%s: exit\n", __func__);
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun return retval;
1447*4882a593Smuzhiyun }
1448*4882a593Smuzhiyun
isp1362_endpoint_disable(struct usb_hcd * hcd,struct usb_host_endpoint * hep)1449*4882a593Smuzhiyun static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
1450*4882a593Smuzhiyun {
1451*4882a593Smuzhiyun struct isp1362_ep *ep = hep->hcpriv;
1452*4882a593Smuzhiyun struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1453*4882a593Smuzhiyun unsigned long flags;
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun DBG(1, "%s: ep %p\n", __func__, ep);
1456*4882a593Smuzhiyun if (!ep)
1457*4882a593Smuzhiyun return;
1458*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
1459*4882a593Smuzhiyun if (!list_empty(&hep->urb_list)) {
1460*4882a593Smuzhiyun if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
1461*4882a593Smuzhiyun DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
1462*4882a593Smuzhiyun ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1463*4882a593Smuzhiyun remove_ptd(isp1362_hcd, ep);
1464*4882a593Smuzhiyun pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
1465*4882a593Smuzhiyun }
1466*4882a593Smuzhiyun }
1467*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1468*4882a593Smuzhiyun /* Wait for interrupt to clear out active list */
1469*4882a593Smuzhiyun while (!list_empty(&ep->active))
1470*4882a593Smuzhiyun msleep(1);
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun DBG(1, "%s: Freeing EP %p\n", __func__, ep);
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun usb_put_dev(ep->udev);
1475*4882a593Smuzhiyun kfree(ep);
1476*4882a593Smuzhiyun hep->hcpriv = NULL;
1477*4882a593Smuzhiyun }
1478*4882a593Smuzhiyun
isp1362_get_frame(struct usb_hcd * hcd)1479*4882a593Smuzhiyun static int isp1362_get_frame(struct usb_hcd *hcd)
1480*4882a593Smuzhiyun {
1481*4882a593Smuzhiyun struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1482*4882a593Smuzhiyun u32 fmnum;
1483*4882a593Smuzhiyun unsigned long flags;
1484*4882a593Smuzhiyun
1485*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
1486*4882a593Smuzhiyun fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1487*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun return (int)fmnum;
1490*4882a593Smuzhiyun }
1491*4882a593Smuzhiyun
1492*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
1493*4882a593Smuzhiyun
1494*4882a593Smuzhiyun /* Adapted from ohci-hub.c */
isp1362_hub_status_data(struct usb_hcd * hcd,char * buf)1495*4882a593Smuzhiyun static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
1496*4882a593Smuzhiyun {
1497*4882a593Smuzhiyun struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1498*4882a593Smuzhiyun int ports, i, changed = 0;
1499*4882a593Smuzhiyun unsigned long flags;
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun if (!HC_IS_RUNNING(hcd->state))
1502*4882a593Smuzhiyun return -ESHUTDOWN;
1503*4882a593Smuzhiyun
1504*4882a593Smuzhiyun /* Report no status change now, if we are scheduled to be
1505*4882a593Smuzhiyun called later */
1506*4882a593Smuzhiyun if (timer_pending(&hcd->rh_timer))
1507*4882a593Smuzhiyun return 0;
1508*4882a593Smuzhiyun
1509*4882a593Smuzhiyun ports = isp1362_hcd->rhdesca & RH_A_NDP;
1510*4882a593Smuzhiyun BUG_ON(ports > 2);
1511*4882a593Smuzhiyun
1512*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
1513*4882a593Smuzhiyun /* init status */
1514*4882a593Smuzhiyun if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
1515*4882a593Smuzhiyun buf[0] = changed = 1;
1516*4882a593Smuzhiyun else
1517*4882a593Smuzhiyun buf[0] = 0;
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun for (i = 0; i < ports; i++) {
1520*4882a593Smuzhiyun u32 status = isp1362_hcd->rhport[i];
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
1523*4882a593Smuzhiyun RH_PS_OCIC | RH_PS_PRSC)) {
1524*4882a593Smuzhiyun changed = 1;
1525*4882a593Smuzhiyun buf[0] |= 1 << (i + 1);
1526*4882a593Smuzhiyun continue;
1527*4882a593Smuzhiyun }
1528*4882a593Smuzhiyun
1529*4882a593Smuzhiyun if (!(status & RH_PS_CCS))
1530*4882a593Smuzhiyun continue;
1531*4882a593Smuzhiyun }
1532*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1533*4882a593Smuzhiyun return changed;
1534*4882a593Smuzhiyun }
1535*4882a593Smuzhiyun
isp1362_hub_descriptor(struct isp1362_hcd * isp1362_hcd,struct usb_hub_descriptor * desc)1536*4882a593Smuzhiyun static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
1537*4882a593Smuzhiyun struct usb_hub_descriptor *desc)
1538*4882a593Smuzhiyun {
1539*4882a593Smuzhiyun u32 reg = isp1362_hcd->rhdesca;
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun DBG(3, "%s: enter\n", __func__);
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun desc->bDescriptorType = USB_DT_HUB;
1544*4882a593Smuzhiyun desc->bDescLength = 9;
1545*4882a593Smuzhiyun desc->bHubContrCurrent = 0;
1546*4882a593Smuzhiyun desc->bNbrPorts = reg & 0x3;
1547*4882a593Smuzhiyun /* Power switching, device type, overcurrent. */
1548*4882a593Smuzhiyun desc->wHubCharacteristics = cpu_to_le16((reg >> 8) &
1549*4882a593Smuzhiyun (HUB_CHAR_LPSM |
1550*4882a593Smuzhiyun HUB_CHAR_COMPOUND |
1551*4882a593Smuzhiyun HUB_CHAR_OCPM));
1552*4882a593Smuzhiyun DBG(0, "%s: hubcharacteristics = %02x\n", __func__,
1553*4882a593Smuzhiyun desc->wHubCharacteristics);
1554*4882a593Smuzhiyun desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
1555*4882a593Smuzhiyun /* ports removable, and legacy PortPwrCtrlMask */
1556*4882a593Smuzhiyun desc->u.hs.DeviceRemovable[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
1557*4882a593Smuzhiyun desc->u.hs.DeviceRemovable[1] = ~0;
1558*4882a593Smuzhiyun
1559*4882a593Smuzhiyun DBG(3, "%s: exit\n", __func__);
1560*4882a593Smuzhiyun }
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun /* Adapted from ohci-hub.c */
isp1362_hub_control(struct usb_hcd * hcd,u16 typeReq,u16 wValue,u16 wIndex,char * buf,u16 wLength)1563*4882a593Smuzhiyun static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1564*4882a593Smuzhiyun u16 wIndex, char *buf, u16 wLength)
1565*4882a593Smuzhiyun {
1566*4882a593Smuzhiyun struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1567*4882a593Smuzhiyun int retval = 0;
1568*4882a593Smuzhiyun unsigned long flags;
1569*4882a593Smuzhiyun unsigned long t1;
1570*4882a593Smuzhiyun int ports = isp1362_hcd->rhdesca & RH_A_NDP;
1571*4882a593Smuzhiyun u32 tmp = 0;
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun switch (typeReq) {
1574*4882a593Smuzhiyun case ClearHubFeature:
1575*4882a593Smuzhiyun DBG(0, "ClearHubFeature: ");
1576*4882a593Smuzhiyun switch (wValue) {
1577*4882a593Smuzhiyun case C_HUB_OVER_CURRENT:
1578*4882a593Smuzhiyun DBG(0, "C_HUB_OVER_CURRENT\n");
1579*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
1580*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
1581*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1582*4882a593Smuzhiyun break;
1583*4882a593Smuzhiyun case C_HUB_LOCAL_POWER:
1584*4882a593Smuzhiyun DBG(0, "C_HUB_LOCAL_POWER\n");
1585*4882a593Smuzhiyun break;
1586*4882a593Smuzhiyun default:
1587*4882a593Smuzhiyun goto error;
1588*4882a593Smuzhiyun }
1589*4882a593Smuzhiyun break;
1590*4882a593Smuzhiyun case SetHubFeature:
1591*4882a593Smuzhiyun DBG(0, "SetHubFeature: ");
1592*4882a593Smuzhiyun switch (wValue) {
1593*4882a593Smuzhiyun case C_HUB_OVER_CURRENT:
1594*4882a593Smuzhiyun case C_HUB_LOCAL_POWER:
1595*4882a593Smuzhiyun DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
1596*4882a593Smuzhiyun break;
1597*4882a593Smuzhiyun default:
1598*4882a593Smuzhiyun goto error;
1599*4882a593Smuzhiyun }
1600*4882a593Smuzhiyun break;
1601*4882a593Smuzhiyun case GetHubDescriptor:
1602*4882a593Smuzhiyun DBG(0, "GetHubDescriptor\n");
1603*4882a593Smuzhiyun isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
1604*4882a593Smuzhiyun break;
1605*4882a593Smuzhiyun case GetHubStatus:
1606*4882a593Smuzhiyun DBG(0, "GetHubStatus\n");
1607*4882a593Smuzhiyun put_unaligned(cpu_to_le32(0), (__le32 *) buf);
1608*4882a593Smuzhiyun break;
1609*4882a593Smuzhiyun case GetPortStatus:
1610*4882a593Smuzhiyun #ifndef VERBOSE
1611*4882a593Smuzhiyun DBG(0, "GetPortStatus\n");
1612*4882a593Smuzhiyun #endif
1613*4882a593Smuzhiyun if (!wIndex || wIndex > ports)
1614*4882a593Smuzhiyun goto error;
1615*4882a593Smuzhiyun tmp = isp1362_hcd->rhport[--wIndex];
1616*4882a593Smuzhiyun put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
1617*4882a593Smuzhiyun break;
1618*4882a593Smuzhiyun case ClearPortFeature:
1619*4882a593Smuzhiyun DBG(0, "ClearPortFeature: ");
1620*4882a593Smuzhiyun if (!wIndex || wIndex > ports)
1621*4882a593Smuzhiyun goto error;
1622*4882a593Smuzhiyun wIndex--;
1623*4882a593Smuzhiyun
1624*4882a593Smuzhiyun switch (wValue) {
1625*4882a593Smuzhiyun case USB_PORT_FEAT_ENABLE:
1626*4882a593Smuzhiyun DBG(0, "USB_PORT_FEAT_ENABLE\n");
1627*4882a593Smuzhiyun tmp = RH_PS_CCS;
1628*4882a593Smuzhiyun break;
1629*4882a593Smuzhiyun case USB_PORT_FEAT_C_ENABLE:
1630*4882a593Smuzhiyun DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
1631*4882a593Smuzhiyun tmp = RH_PS_PESC;
1632*4882a593Smuzhiyun break;
1633*4882a593Smuzhiyun case USB_PORT_FEAT_SUSPEND:
1634*4882a593Smuzhiyun DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1635*4882a593Smuzhiyun tmp = RH_PS_POCI;
1636*4882a593Smuzhiyun break;
1637*4882a593Smuzhiyun case USB_PORT_FEAT_C_SUSPEND:
1638*4882a593Smuzhiyun DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
1639*4882a593Smuzhiyun tmp = RH_PS_PSSC;
1640*4882a593Smuzhiyun break;
1641*4882a593Smuzhiyun case USB_PORT_FEAT_POWER:
1642*4882a593Smuzhiyun DBG(0, "USB_PORT_FEAT_POWER\n");
1643*4882a593Smuzhiyun tmp = RH_PS_LSDA;
1644*4882a593Smuzhiyun
1645*4882a593Smuzhiyun break;
1646*4882a593Smuzhiyun case USB_PORT_FEAT_C_CONNECTION:
1647*4882a593Smuzhiyun DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
1648*4882a593Smuzhiyun tmp = RH_PS_CSC;
1649*4882a593Smuzhiyun break;
1650*4882a593Smuzhiyun case USB_PORT_FEAT_C_OVER_CURRENT:
1651*4882a593Smuzhiyun DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
1652*4882a593Smuzhiyun tmp = RH_PS_OCIC;
1653*4882a593Smuzhiyun break;
1654*4882a593Smuzhiyun case USB_PORT_FEAT_C_RESET:
1655*4882a593Smuzhiyun DBG(0, "USB_PORT_FEAT_C_RESET\n");
1656*4882a593Smuzhiyun tmp = RH_PS_PRSC;
1657*4882a593Smuzhiyun break;
1658*4882a593Smuzhiyun default:
1659*4882a593Smuzhiyun goto error;
1660*4882a593Smuzhiyun }
1661*4882a593Smuzhiyun
1662*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
1663*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
1664*4882a593Smuzhiyun isp1362_hcd->rhport[wIndex] =
1665*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1666*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1667*4882a593Smuzhiyun break;
1668*4882a593Smuzhiyun case SetPortFeature:
1669*4882a593Smuzhiyun DBG(0, "SetPortFeature: ");
1670*4882a593Smuzhiyun if (!wIndex || wIndex > ports)
1671*4882a593Smuzhiyun goto error;
1672*4882a593Smuzhiyun wIndex--;
1673*4882a593Smuzhiyun switch (wValue) {
1674*4882a593Smuzhiyun case USB_PORT_FEAT_SUSPEND:
1675*4882a593Smuzhiyun DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1676*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
1677*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
1678*4882a593Smuzhiyun isp1362_hcd->rhport[wIndex] =
1679*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1680*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1681*4882a593Smuzhiyun break;
1682*4882a593Smuzhiyun case USB_PORT_FEAT_POWER:
1683*4882a593Smuzhiyun DBG(0, "USB_PORT_FEAT_POWER\n");
1684*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
1685*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
1686*4882a593Smuzhiyun isp1362_hcd->rhport[wIndex] =
1687*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1688*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1689*4882a593Smuzhiyun break;
1690*4882a593Smuzhiyun case USB_PORT_FEAT_RESET:
1691*4882a593Smuzhiyun DBG(0, "USB_PORT_FEAT_RESET\n");
1692*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
1693*4882a593Smuzhiyun
1694*4882a593Smuzhiyun t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
1695*4882a593Smuzhiyun while (time_before(jiffies, t1)) {
1696*4882a593Smuzhiyun /* spin until any current reset finishes */
1697*4882a593Smuzhiyun for (;;) {
1698*4882a593Smuzhiyun tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1699*4882a593Smuzhiyun if (!(tmp & RH_PS_PRS))
1700*4882a593Smuzhiyun break;
1701*4882a593Smuzhiyun udelay(500);
1702*4882a593Smuzhiyun }
1703*4882a593Smuzhiyun if (!(tmp & RH_PS_CCS))
1704*4882a593Smuzhiyun break;
1705*4882a593Smuzhiyun /* Reset lasts 10ms (claims datasheet) */
1706*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
1707*4882a593Smuzhiyun
1708*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1709*4882a593Smuzhiyun msleep(10);
1710*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
1711*4882a593Smuzhiyun }
1712*4882a593Smuzhiyun
1713*4882a593Smuzhiyun isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
1714*4882a593Smuzhiyun HCRHPORT1 + wIndex);
1715*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1716*4882a593Smuzhiyun break;
1717*4882a593Smuzhiyun default:
1718*4882a593Smuzhiyun goto error;
1719*4882a593Smuzhiyun }
1720*4882a593Smuzhiyun break;
1721*4882a593Smuzhiyun
1722*4882a593Smuzhiyun default:
1723*4882a593Smuzhiyun error:
1724*4882a593Smuzhiyun /* "protocol stall" on error */
1725*4882a593Smuzhiyun DBG(0, "PROTOCOL STALL\n");
1726*4882a593Smuzhiyun retval = -EPIPE;
1727*4882a593Smuzhiyun }
1728*4882a593Smuzhiyun
1729*4882a593Smuzhiyun return retval;
1730*4882a593Smuzhiyun }
1731*4882a593Smuzhiyun
1732*4882a593Smuzhiyun #ifdef CONFIG_PM
isp1362_bus_suspend(struct usb_hcd * hcd)1733*4882a593Smuzhiyun static int isp1362_bus_suspend(struct usb_hcd *hcd)
1734*4882a593Smuzhiyun {
1735*4882a593Smuzhiyun int status = 0;
1736*4882a593Smuzhiyun struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1737*4882a593Smuzhiyun unsigned long flags;
1738*4882a593Smuzhiyun
1739*4882a593Smuzhiyun if (time_before(jiffies, isp1362_hcd->next_statechange))
1740*4882a593Smuzhiyun msleep(5);
1741*4882a593Smuzhiyun
1742*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
1743*4882a593Smuzhiyun
1744*4882a593Smuzhiyun isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1745*4882a593Smuzhiyun switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1746*4882a593Smuzhiyun case OHCI_USB_RESUME:
1747*4882a593Smuzhiyun DBG(0, "%s: resume/suspend?\n", __func__);
1748*4882a593Smuzhiyun isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1749*4882a593Smuzhiyun isp1362_hcd->hc_control |= OHCI_USB_RESET;
1750*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1751*4882a593Smuzhiyun fallthrough;
1752*4882a593Smuzhiyun case OHCI_USB_RESET:
1753*4882a593Smuzhiyun status = -EBUSY;
1754*4882a593Smuzhiyun pr_warn("%s: needs reinit!\n", __func__);
1755*4882a593Smuzhiyun goto done;
1756*4882a593Smuzhiyun case OHCI_USB_SUSPEND:
1757*4882a593Smuzhiyun pr_warn("%s: already suspended?\n", __func__);
1758*4882a593Smuzhiyun goto done;
1759*4882a593Smuzhiyun }
1760*4882a593Smuzhiyun DBG(0, "%s: suspend root hub\n", __func__);
1761*4882a593Smuzhiyun
1762*4882a593Smuzhiyun /* First stop any processing */
1763*4882a593Smuzhiyun hcd->state = HC_STATE_QUIESCING;
1764*4882a593Smuzhiyun if (!list_empty(&isp1362_hcd->atl_queue.active) ||
1765*4882a593Smuzhiyun !list_empty(&isp1362_hcd->intl_queue.active) ||
1766*4882a593Smuzhiyun !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
1767*4882a593Smuzhiyun !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
1768*4882a593Smuzhiyun int limit;
1769*4882a593Smuzhiyun
1770*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
1771*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
1772*4882a593Smuzhiyun isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
1773*4882a593Smuzhiyun isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1774*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
1775*4882a593Smuzhiyun
1776*4882a593Smuzhiyun DBG(0, "%s: stopping schedules ...\n", __func__);
1777*4882a593Smuzhiyun limit = 2000;
1778*4882a593Smuzhiyun while (limit > 0) {
1779*4882a593Smuzhiyun udelay(250);
1780*4882a593Smuzhiyun limit -= 250;
1781*4882a593Smuzhiyun if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
1782*4882a593Smuzhiyun break;
1783*4882a593Smuzhiyun }
1784*4882a593Smuzhiyun mdelay(7);
1785*4882a593Smuzhiyun if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
1786*4882a593Smuzhiyun u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1787*4882a593Smuzhiyun finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1788*4882a593Smuzhiyun }
1789*4882a593Smuzhiyun if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
1790*4882a593Smuzhiyun u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1791*4882a593Smuzhiyun finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1792*4882a593Smuzhiyun }
1793*4882a593Smuzhiyun if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
1794*4882a593Smuzhiyun finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
1795*4882a593Smuzhiyun if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
1796*4882a593Smuzhiyun finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
1797*4882a593Smuzhiyun }
1798*4882a593Smuzhiyun DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
1799*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1800*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
1801*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1802*4882a593Smuzhiyun
1803*4882a593Smuzhiyun /* Suspend hub */
1804*4882a593Smuzhiyun isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
1805*4882a593Smuzhiyun isp1362_show_reg(isp1362_hcd, HCCONTROL);
1806*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1807*4882a593Smuzhiyun isp1362_show_reg(isp1362_hcd, HCCONTROL);
1808*4882a593Smuzhiyun
1809*4882a593Smuzhiyun #if 1
1810*4882a593Smuzhiyun isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1811*4882a593Smuzhiyun if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
1812*4882a593Smuzhiyun pr_err("%s: controller won't suspend %08x\n", __func__,
1813*4882a593Smuzhiyun isp1362_hcd->hc_control);
1814*4882a593Smuzhiyun status = -EBUSY;
1815*4882a593Smuzhiyun } else
1816*4882a593Smuzhiyun #endif
1817*4882a593Smuzhiyun {
1818*4882a593Smuzhiyun /* no resumes until devices finish suspending */
1819*4882a593Smuzhiyun isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
1820*4882a593Smuzhiyun }
1821*4882a593Smuzhiyun done:
1822*4882a593Smuzhiyun if (status == 0) {
1823*4882a593Smuzhiyun hcd->state = HC_STATE_SUSPENDED;
1824*4882a593Smuzhiyun DBG(0, "%s: HCD suspended: %08x\n", __func__,
1825*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1826*4882a593Smuzhiyun }
1827*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1828*4882a593Smuzhiyun return status;
1829*4882a593Smuzhiyun }
1830*4882a593Smuzhiyun
isp1362_bus_resume(struct usb_hcd * hcd)1831*4882a593Smuzhiyun static int isp1362_bus_resume(struct usb_hcd *hcd)
1832*4882a593Smuzhiyun {
1833*4882a593Smuzhiyun struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1834*4882a593Smuzhiyun u32 port;
1835*4882a593Smuzhiyun unsigned long flags;
1836*4882a593Smuzhiyun int status = -EINPROGRESS;
1837*4882a593Smuzhiyun
1838*4882a593Smuzhiyun if (time_before(jiffies, isp1362_hcd->next_statechange))
1839*4882a593Smuzhiyun msleep(5);
1840*4882a593Smuzhiyun
1841*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
1842*4882a593Smuzhiyun isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1843*4882a593Smuzhiyun pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
1844*4882a593Smuzhiyun if (hcd->state == HC_STATE_RESUMING) {
1845*4882a593Smuzhiyun pr_warn("%s: duplicate resume\n", __func__);
1846*4882a593Smuzhiyun status = 0;
1847*4882a593Smuzhiyun } else
1848*4882a593Smuzhiyun switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1849*4882a593Smuzhiyun case OHCI_USB_SUSPEND:
1850*4882a593Smuzhiyun DBG(0, "%s: resume root hub\n", __func__);
1851*4882a593Smuzhiyun isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1852*4882a593Smuzhiyun isp1362_hcd->hc_control |= OHCI_USB_RESUME;
1853*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1854*4882a593Smuzhiyun break;
1855*4882a593Smuzhiyun case OHCI_USB_RESUME:
1856*4882a593Smuzhiyun /* HCFS changes sometime after INTR_RD */
1857*4882a593Smuzhiyun DBG(0, "%s: remote wakeup\n", __func__);
1858*4882a593Smuzhiyun break;
1859*4882a593Smuzhiyun case OHCI_USB_OPER:
1860*4882a593Smuzhiyun DBG(0, "%s: odd resume\n", __func__);
1861*4882a593Smuzhiyun status = 0;
1862*4882a593Smuzhiyun hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1863*4882a593Smuzhiyun break;
1864*4882a593Smuzhiyun default: /* RESET, we lost power */
1865*4882a593Smuzhiyun DBG(0, "%s: root hub hardware reset\n", __func__);
1866*4882a593Smuzhiyun status = -EBUSY;
1867*4882a593Smuzhiyun }
1868*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1869*4882a593Smuzhiyun if (status == -EBUSY) {
1870*4882a593Smuzhiyun DBG(0, "%s: Restarting HC\n", __func__);
1871*4882a593Smuzhiyun isp1362_hc_stop(hcd);
1872*4882a593Smuzhiyun return isp1362_hc_start(hcd);
1873*4882a593Smuzhiyun }
1874*4882a593Smuzhiyun if (status != -EINPROGRESS)
1875*4882a593Smuzhiyun return status;
1876*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
1877*4882a593Smuzhiyun port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
1878*4882a593Smuzhiyun while (port--) {
1879*4882a593Smuzhiyun u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
1880*4882a593Smuzhiyun
1881*4882a593Smuzhiyun /* force global, not selective, resume */
1882*4882a593Smuzhiyun if (!(stat & RH_PS_PSS)) {
1883*4882a593Smuzhiyun DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
1884*4882a593Smuzhiyun continue;
1885*4882a593Smuzhiyun }
1886*4882a593Smuzhiyun DBG(0, "%s: Resuming RH port %d\n", __func__, port);
1887*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
1888*4882a593Smuzhiyun }
1889*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1890*4882a593Smuzhiyun
1891*4882a593Smuzhiyun /* Some controllers (lucent) need extra-long delays */
1892*4882a593Smuzhiyun hcd->state = HC_STATE_RESUMING;
1893*4882a593Smuzhiyun mdelay(20 /* usb 11.5.1.10 */ + 15);
1894*4882a593Smuzhiyun
1895*4882a593Smuzhiyun isp1362_hcd->hc_control = OHCI_USB_OPER;
1896*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
1897*4882a593Smuzhiyun isp1362_show_reg(isp1362_hcd, HCCONTROL);
1898*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1899*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1900*4882a593Smuzhiyun /* TRSMRCY */
1901*4882a593Smuzhiyun msleep(10);
1902*4882a593Smuzhiyun
1903*4882a593Smuzhiyun /* keep it alive for ~5x suspend + resume costs */
1904*4882a593Smuzhiyun isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
1905*4882a593Smuzhiyun
1906*4882a593Smuzhiyun hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1907*4882a593Smuzhiyun hcd->state = HC_STATE_RUNNING;
1908*4882a593Smuzhiyun return 0;
1909*4882a593Smuzhiyun }
1910*4882a593Smuzhiyun #else
1911*4882a593Smuzhiyun #define isp1362_bus_suspend NULL
1912*4882a593Smuzhiyun #define isp1362_bus_resume NULL
1913*4882a593Smuzhiyun #endif
1914*4882a593Smuzhiyun
1915*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
1916*4882a593Smuzhiyun
dump_irq(struct seq_file * s,char * label,u16 mask)1917*4882a593Smuzhiyun static void dump_irq(struct seq_file *s, char *label, u16 mask)
1918*4882a593Smuzhiyun {
1919*4882a593Smuzhiyun seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
1920*4882a593Smuzhiyun mask & HCuPINT_CLKRDY ? " clkrdy" : "",
1921*4882a593Smuzhiyun mask & HCuPINT_SUSP ? " susp" : "",
1922*4882a593Smuzhiyun mask & HCuPINT_OPR ? " opr" : "",
1923*4882a593Smuzhiyun mask & HCuPINT_EOT ? " eot" : "",
1924*4882a593Smuzhiyun mask & HCuPINT_ATL ? " atl" : "",
1925*4882a593Smuzhiyun mask & HCuPINT_SOF ? " sof" : "");
1926*4882a593Smuzhiyun }
1927*4882a593Smuzhiyun
dump_int(struct seq_file * s,char * label,u32 mask)1928*4882a593Smuzhiyun static void dump_int(struct seq_file *s, char *label, u32 mask)
1929*4882a593Smuzhiyun {
1930*4882a593Smuzhiyun seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
1931*4882a593Smuzhiyun mask & OHCI_INTR_MIE ? " MIE" : "",
1932*4882a593Smuzhiyun mask & OHCI_INTR_RHSC ? " rhsc" : "",
1933*4882a593Smuzhiyun mask & OHCI_INTR_FNO ? " fno" : "",
1934*4882a593Smuzhiyun mask & OHCI_INTR_UE ? " ue" : "",
1935*4882a593Smuzhiyun mask & OHCI_INTR_RD ? " rd" : "",
1936*4882a593Smuzhiyun mask & OHCI_INTR_SF ? " sof" : "",
1937*4882a593Smuzhiyun mask & OHCI_INTR_SO ? " so" : "");
1938*4882a593Smuzhiyun }
1939*4882a593Smuzhiyun
dump_ctrl(struct seq_file * s,char * label,u32 mask)1940*4882a593Smuzhiyun static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
1941*4882a593Smuzhiyun {
1942*4882a593Smuzhiyun seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
1943*4882a593Smuzhiyun mask & OHCI_CTRL_RWC ? " rwc" : "",
1944*4882a593Smuzhiyun mask & OHCI_CTRL_RWE ? " rwe" : "",
1945*4882a593Smuzhiyun ({
1946*4882a593Smuzhiyun char *hcfs;
1947*4882a593Smuzhiyun switch (mask & OHCI_CTRL_HCFS) {
1948*4882a593Smuzhiyun case OHCI_USB_OPER:
1949*4882a593Smuzhiyun hcfs = " oper";
1950*4882a593Smuzhiyun break;
1951*4882a593Smuzhiyun case OHCI_USB_RESET:
1952*4882a593Smuzhiyun hcfs = " reset";
1953*4882a593Smuzhiyun break;
1954*4882a593Smuzhiyun case OHCI_USB_RESUME:
1955*4882a593Smuzhiyun hcfs = " resume";
1956*4882a593Smuzhiyun break;
1957*4882a593Smuzhiyun case OHCI_USB_SUSPEND:
1958*4882a593Smuzhiyun hcfs = " suspend";
1959*4882a593Smuzhiyun break;
1960*4882a593Smuzhiyun default:
1961*4882a593Smuzhiyun hcfs = " ?";
1962*4882a593Smuzhiyun }
1963*4882a593Smuzhiyun hcfs;
1964*4882a593Smuzhiyun }));
1965*4882a593Smuzhiyun }
1966*4882a593Smuzhiyun
dump_regs(struct seq_file * s,struct isp1362_hcd * isp1362_hcd)1967*4882a593Smuzhiyun static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
1968*4882a593Smuzhiyun {
1969*4882a593Smuzhiyun seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
1970*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCREVISION));
1971*4882a593Smuzhiyun seq_printf(s, "HCCONTROL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
1972*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1973*4882a593Smuzhiyun seq_printf(s, "HCCMDSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
1974*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
1975*4882a593Smuzhiyun seq_printf(s, "HCINTSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
1976*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1977*4882a593Smuzhiyun seq_printf(s, "HCINTENB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
1978*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCINTENB));
1979*4882a593Smuzhiyun seq_printf(s, "HCFMINTVL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
1980*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
1981*4882a593Smuzhiyun seq_printf(s, "HCFMREM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
1982*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCFMREM));
1983*4882a593Smuzhiyun seq_printf(s, "HCFMNUM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
1984*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCFMNUM));
1985*4882a593Smuzhiyun seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
1986*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
1987*4882a593Smuzhiyun seq_printf(s, "HCRHDESCA [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
1988*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
1989*4882a593Smuzhiyun seq_printf(s, "HCRHDESCB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
1990*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
1991*4882a593Smuzhiyun seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
1992*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
1993*4882a593Smuzhiyun seq_printf(s, "HCRHPORT1 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
1994*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
1995*4882a593Smuzhiyun seq_printf(s, "HCRHPORT2 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
1996*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
1997*4882a593Smuzhiyun seq_printf(s, "\n");
1998*4882a593Smuzhiyun seq_printf(s, "HCHWCFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
1999*4882a593Smuzhiyun isp1362_read_reg16(isp1362_hcd, HCHWCFG));
2000*4882a593Smuzhiyun seq_printf(s, "HCDMACFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
2001*4882a593Smuzhiyun isp1362_read_reg16(isp1362_hcd, HCDMACFG));
2002*4882a593Smuzhiyun seq_printf(s, "HCXFERCTR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
2003*4882a593Smuzhiyun isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
2004*4882a593Smuzhiyun seq_printf(s, "HCuPINT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
2005*4882a593Smuzhiyun isp1362_read_reg16(isp1362_hcd, HCuPINT));
2006*4882a593Smuzhiyun seq_printf(s, "HCuPINTENB [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
2007*4882a593Smuzhiyun isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2008*4882a593Smuzhiyun seq_printf(s, "HCCHIPID [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
2009*4882a593Smuzhiyun isp1362_read_reg16(isp1362_hcd, HCCHIPID));
2010*4882a593Smuzhiyun seq_printf(s, "HCSCRATCH [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
2011*4882a593Smuzhiyun isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
2012*4882a593Smuzhiyun seq_printf(s, "HCBUFSTAT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
2013*4882a593Smuzhiyun isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
2014*4882a593Smuzhiyun seq_printf(s, "HCDIRADDR [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
2015*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
2016*4882a593Smuzhiyun #if 0
2017*4882a593Smuzhiyun seq_printf(s, "HCDIRDATA [%02x] %04x\n", ISP1362_REG_NO(HCDIRDATA),
2018*4882a593Smuzhiyun isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
2019*4882a593Smuzhiyun #endif
2020*4882a593Smuzhiyun seq_printf(s, "HCISTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
2021*4882a593Smuzhiyun isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
2022*4882a593Smuzhiyun seq_printf(s, "HCISTLRATE [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
2023*4882a593Smuzhiyun isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
2024*4882a593Smuzhiyun seq_printf(s, "\n");
2025*4882a593Smuzhiyun seq_printf(s, "HCINTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
2026*4882a593Smuzhiyun isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
2027*4882a593Smuzhiyun seq_printf(s, "HCINTLBLKSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
2028*4882a593Smuzhiyun isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
2029*4882a593Smuzhiyun seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
2030*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
2031*4882a593Smuzhiyun seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
2032*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
2033*4882a593Smuzhiyun seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
2034*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
2035*4882a593Smuzhiyun seq_printf(s, "HCINTLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
2036*4882a593Smuzhiyun isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
2037*4882a593Smuzhiyun seq_printf(s, "\n");
2038*4882a593Smuzhiyun seq_printf(s, "HCATLBUFSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
2039*4882a593Smuzhiyun isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
2040*4882a593Smuzhiyun seq_printf(s, "HCATLBLKSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
2041*4882a593Smuzhiyun isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
2042*4882a593Smuzhiyun #if 0
2043*4882a593Smuzhiyun seq_printf(s, "HCATLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
2044*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCATLDONE));
2045*4882a593Smuzhiyun #endif
2046*4882a593Smuzhiyun seq_printf(s, "HCATLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
2047*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
2048*4882a593Smuzhiyun seq_printf(s, "HCATLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
2049*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCATLLAST));
2050*4882a593Smuzhiyun seq_printf(s, "HCATLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
2051*4882a593Smuzhiyun isp1362_read_reg16(isp1362_hcd, HCATLCURR));
2052*4882a593Smuzhiyun seq_printf(s, "\n");
2053*4882a593Smuzhiyun seq_printf(s, "HCATLDTC [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
2054*4882a593Smuzhiyun isp1362_read_reg16(isp1362_hcd, HCATLDTC));
2055*4882a593Smuzhiyun seq_printf(s, "HCATLDTCTO [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
2056*4882a593Smuzhiyun isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
2057*4882a593Smuzhiyun }
2058*4882a593Smuzhiyun
isp1362_show(struct seq_file * s,void * unused)2059*4882a593Smuzhiyun static int isp1362_show(struct seq_file *s, void *unused)
2060*4882a593Smuzhiyun {
2061*4882a593Smuzhiyun struct isp1362_hcd *isp1362_hcd = s->private;
2062*4882a593Smuzhiyun struct isp1362_ep *ep;
2063*4882a593Smuzhiyun int i;
2064*4882a593Smuzhiyun
2065*4882a593Smuzhiyun seq_printf(s, "%s\n%s version %s\n",
2066*4882a593Smuzhiyun isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
2067*4882a593Smuzhiyun
2068*4882a593Smuzhiyun /* collect statistics to help estimate potential win for
2069*4882a593Smuzhiyun * DMA engines that care about alignment (PXA)
2070*4882a593Smuzhiyun */
2071*4882a593Smuzhiyun seq_printf(s, "alignment: 16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
2072*4882a593Smuzhiyun isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
2073*4882a593Smuzhiyun isp1362_hcd->stat2, isp1362_hcd->stat1);
2074*4882a593Smuzhiyun seq_printf(s, "max # ptds in ATL fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
2075*4882a593Smuzhiyun seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
2076*4882a593Smuzhiyun seq_printf(s, "max # ptds in ISTL fifo: %d\n",
2077*4882a593Smuzhiyun max(isp1362_hcd->istl_queue[0] .stat_maxptds,
2078*4882a593Smuzhiyun isp1362_hcd->istl_queue[1] .stat_maxptds));
2079*4882a593Smuzhiyun
2080*4882a593Smuzhiyun /* FIXME: don't show the following in suspended state */
2081*4882a593Smuzhiyun spin_lock_irq(&isp1362_hcd->lock);
2082*4882a593Smuzhiyun
2083*4882a593Smuzhiyun dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2084*4882a593Smuzhiyun dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
2085*4882a593Smuzhiyun dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
2086*4882a593Smuzhiyun dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
2087*4882a593Smuzhiyun dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
2088*4882a593Smuzhiyun
2089*4882a593Smuzhiyun for (i = 0; i < NUM_ISP1362_IRQS; i++)
2090*4882a593Smuzhiyun if (isp1362_hcd->irq_stat[i])
2091*4882a593Smuzhiyun seq_printf(s, "%-15s: %d\n",
2092*4882a593Smuzhiyun ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
2093*4882a593Smuzhiyun
2094*4882a593Smuzhiyun dump_regs(s, isp1362_hcd);
2095*4882a593Smuzhiyun list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
2096*4882a593Smuzhiyun struct urb *urb;
2097*4882a593Smuzhiyun
2098*4882a593Smuzhiyun seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
2099*4882a593Smuzhiyun ({
2100*4882a593Smuzhiyun char *s;
2101*4882a593Smuzhiyun switch (ep->nextpid) {
2102*4882a593Smuzhiyun case USB_PID_IN:
2103*4882a593Smuzhiyun s = "in";
2104*4882a593Smuzhiyun break;
2105*4882a593Smuzhiyun case USB_PID_OUT:
2106*4882a593Smuzhiyun s = "out";
2107*4882a593Smuzhiyun break;
2108*4882a593Smuzhiyun case USB_PID_SETUP:
2109*4882a593Smuzhiyun s = "setup";
2110*4882a593Smuzhiyun break;
2111*4882a593Smuzhiyun case USB_PID_ACK:
2112*4882a593Smuzhiyun s = "status";
2113*4882a593Smuzhiyun break;
2114*4882a593Smuzhiyun default:
2115*4882a593Smuzhiyun s = "?";
2116*4882a593Smuzhiyun break;
2117*4882a593Smuzhiyun }
2118*4882a593Smuzhiyun s;}), ep->maxpacket) ;
2119*4882a593Smuzhiyun list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
2120*4882a593Smuzhiyun seq_printf(s, " urb%p, %d/%d\n", urb,
2121*4882a593Smuzhiyun urb->actual_length,
2122*4882a593Smuzhiyun urb->transfer_buffer_length);
2123*4882a593Smuzhiyun }
2124*4882a593Smuzhiyun }
2125*4882a593Smuzhiyun if (!list_empty(&isp1362_hcd->async))
2126*4882a593Smuzhiyun seq_printf(s, "\n");
2127*4882a593Smuzhiyun dump_ptd_queue(&isp1362_hcd->atl_queue);
2128*4882a593Smuzhiyun
2129*4882a593Smuzhiyun seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
2130*4882a593Smuzhiyun
2131*4882a593Smuzhiyun list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
2132*4882a593Smuzhiyun seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
2133*4882a593Smuzhiyun isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
2134*4882a593Smuzhiyun
2135*4882a593Smuzhiyun seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
2136*4882a593Smuzhiyun ep->interval, ep,
2137*4882a593Smuzhiyun (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2138*4882a593Smuzhiyun ep->udev->devnum, ep->epnum,
2139*4882a593Smuzhiyun (ep->epnum == 0) ? "" :
2140*4882a593Smuzhiyun ((ep->nextpid == USB_PID_IN) ?
2141*4882a593Smuzhiyun "in" : "out"), ep->maxpacket);
2142*4882a593Smuzhiyun }
2143*4882a593Smuzhiyun dump_ptd_queue(&isp1362_hcd->intl_queue);
2144*4882a593Smuzhiyun
2145*4882a593Smuzhiyun seq_printf(s, "ISO:\n");
2146*4882a593Smuzhiyun
2147*4882a593Smuzhiyun list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
2148*4882a593Smuzhiyun seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
2149*4882a593Smuzhiyun ep->interval, ep,
2150*4882a593Smuzhiyun (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2151*4882a593Smuzhiyun ep->udev->devnum, ep->epnum,
2152*4882a593Smuzhiyun (ep->epnum == 0) ? "" :
2153*4882a593Smuzhiyun ((ep->nextpid == USB_PID_IN) ?
2154*4882a593Smuzhiyun "in" : "out"), ep->maxpacket);
2155*4882a593Smuzhiyun }
2156*4882a593Smuzhiyun
2157*4882a593Smuzhiyun spin_unlock_irq(&isp1362_hcd->lock);
2158*4882a593Smuzhiyun seq_printf(s, "\n");
2159*4882a593Smuzhiyun
2160*4882a593Smuzhiyun return 0;
2161*4882a593Smuzhiyun }
2162*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(isp1362);
2163*4882a593Smuzhiyun
2164*4882a593Smuzhiyun /* expect just one isp1362_hcd per system */
create_debug_file(struct isp1362_hcd * isp1362_hcd)2165*4882a593Smuzhiyun static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
2166*4882a593Smuzhiyun {
2167*4882a593Smuzhiyun isp1362_hcd->debug_file = debugfs_create_file("isp1362", S_IRUGO,
2168*4882a593Smuzhiyun usb_debug_root,
2169*4882a593Smuzhiyun isp1362_hcd,
2170*4882a593Smuzhiyun &isp1362_fops);
2171*4882a593Smuzhiyun }
2172*4882a593Smuzhiyun
remove_debug_file(struct isp1362_hcd * isp1362_hcd)2173*4882a593Smuzhiyun static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
2174*4882a593Smuzhiyun {
2175*4882a593Smuzhiyun debugfs_remove(isp1362_hcd->debug_file);
2176*4882a593Smuzhiyun }
2177*4882a593Smuzhiyun
2178*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
2179*4882a593Smuzhiyun
__isp1362_sw_reset(struct isp1362_hcd * isp1362_hcd)2180*4882a593Smuzhiyun static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2181*4882a593Smuzhiyun {
2182*4882a593Smuzhiyun int tmp = 20;
2183*4882a593Smuzhiyun
2184*4882a593Smuzhiyun isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
2185*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
2186*4882a593Smuzhiyun while (--tmp) {
2187*4882a593Smuzhiyun mdelay(1);
2188*4882a593Smuzhiyun if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
2189*4882a593Smuzhiyun break;
2190*4882a593Smuzhiyun }
2191*4882a593Smuzhiyun if (!tmp)
2192*4882a593Smuzhiyun pr_err("Software reset timeout\n");
2193*4882a593Smuzhiyun }
2194*4882a593Smuzhiyun
isp1362_sw_reset(struct isp1362_hcd * isp1362_hcd)2195*4882a593Smuzhiyun static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2196*4882a593Smuzhiyun {
2197*4882a593Smuzhiyun unsigned long flags;
2198*4882a593Smuzhiyun
2199*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
2200*4882a593Smuzhiyun __isp1362_sw_reset(isp1362_hcd);
2201*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2202*4882a593Smuzhiyun }
2203*4882a593Smuzhiyun
isp1362_mem_config(struct usb_hcd * hcd)2204*4882a593Smuzhiyun static int isp1362_mem_config(struct usb_hcd *hcd)
2205*4882a593Smuzhiyun {
2206*4882a593Smuzhiyun struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2207*4882a593Smuzhiyun unsigned long flags;
2208*4882a593Smuzhiyun u32 total;
2209*4882a593Smuzhiyun u16 istl_size = ISP1362_ISTL_BUFSIZE;
2210*4882a593Smuzhiyun u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
2211*4882a593Smuzhiyun u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
2212*4882a593Smuzhiyun u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
2213*4882a593Smuzhiyun u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
2214*4882a593Smuzhiyun u16 atl_size;
2215*4882a593Smuzhiyun int i;
2216*4882a593Smuzhiyun
2217*4882a593Smuzhiyun WARN_ON(istl_size & 3);
2218*4882a593Smuzhiyun WARN_ON(atl_blksize & 3);
2219*4882a593Smuzhiyun WARN_ON(intl_blksize & 3);
2220*4882a593Smuzhiyun WARN_ON(atl_blksize < PTD_HEADER_SIZE);
2221*4882a593Smuzhiyun WARN_ON(intl_blksize < PTD_HEADER_SIZE);
2222*4882a593Smuzhiyun
2223*4882a593Smuzhiyun BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
2224*4882a593Smuzhiyun if (atl_buffers > 32)
2225*4882a593Smuzhiyun atl_buffers = 32;
2226*4882a593Smuzhiyun atl_size = atl_buffers * atl_blksize;
2227*4882a593Smuzhiyun total = atl_size + intl_size + istl_size;
2228*4882a593Smuzhiyun dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
2229*4882a593Smuzhiyun dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n",
2230*4882a593Smuzhiyun istl_size / 2, istl_size, 0, istl_size / 2);
2231*4882a593Smuzhiyun dev_info(hcd->self.controller, " INTL: %4d * (%3zu+8): %4d @ $%04x\n",
2232*4882a593Smuzhiyun ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
2233*4882a593Smuzhiyun intl_size, istl_size);
2234*4882a593Smuzhiyun dev_info(hcd->self.controller, " ATL : %4d * (%3zu+8): %4d @ $%04x\n",
2235*4882a593Smuzhiyun atl_buffers, atl_blksize - PTD_HEADER_SIZE,
2236*4882a593Smuzhiyun atl_size, istl_size + intl_size);
2237*4882a593Smuzhiyun dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total,
2238*4882a593Smuzhiyun ISP1362_BUF_SIZE - total);
2239*4882a593Smuzhiyun
2240*4882a593Smuzhiyun if (total > ISP1362_BUF_SIZE) {
2241*4882a593Smuzhiyun dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
2242*4882a593Smuzhiyun __func__, total, ISP1362_BUF_SIZE);
2243*4882a593Smuzhiyun return -ENOMEM;
2244*4882a593Smuzhiyun }
2245*4882a593Smuzhiyun
2246*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
2247*4882a593Smuzhiyun
2248*4882a593Smuzhiyun for (i = 0; i < 2; i++) {
2249*4882a593Smuzhiyun isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
2250*4882a593Smuzhiyun isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
2251*4882a593Smuzhiyun isp1362_hcd->istl_queue[i].blk_size = 4;
2252*4882a593Smuzhiyun INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
2253*4882a593Smuzhiyun snprintf(isp1362_hcd->istl_queue[i].name,
2254*4882a593Smuzhiyun sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
2255*4882a593Smuzhiyun DBG(3, "%s: %5s buf $%04x %d\n", __func__,
2256*4882a593Smuzhiyun isp1362_hcd->istl_queue[i].name,
2257*4882a593Smuzhiyun isp1362_hcd->istl_queue[i].buf_start,
2258*4882a593Smuzhiyun isp1362_hcd->istl_queue[i].buf_size);
2259*4882a593Smuzhiyun }
2260*4882a593Smuzhiyun isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
2261*4882a593Smuzhiyun
2262*4882a593Smuzhiyun isp1362_hcd->intl_queue.buf_start = istl_size;
2263*4882a593Smuzhiyun isp1362_hcd->intl_queue.buf_size = intl_size;
2264*4882a593Smuzhiyun isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
2265*4882a593Smuzhiyun isp1362_hcd->intl_queue.blk_size = intl_blksize;
2266*4882a593Smuzhiyun isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
2267*4882a593Smuzhiyun isp1362_hcd->intl_queue.skip_map = ~0;
2268*4882a593Smuzhiyun INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
2269*4882a593Smuzhiyun
2270*4882a593Smuzhiyun isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
2271*4882a593Smuzhiyun isp1362_hcd->intl_queue.buf_size);
2272*4882a593Smuzhiyun isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
2273*4882a593Smuzhiyun isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
2274*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
2275*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
2276*4882a593Smuzhiyun 1 << (ISP1362_INTL_BUFFERS - 1));
2277*4882a593Smuzhiyun
2278*4882a593Smuzhiyun isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
2279*4882a593Smuzhiyun isp1362_hcd->atl_queue.buf_size = atl_size;
2280*4882a593Smuzhiyun isp1362_hcd->atl_queue.buf_count = atl_buffers;
2281*4882a593Smuzhiyun isp1362_hcd->atl_queue.blk_size = atl_blksize;
2282*4882a593Smuzhiyun isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
2283*4882a593Smuzhiyun isp1362_hcd->atl_queue.skip_map = ~0;
2284*4882a593Smuzhiyun INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
2285*4882a593Smuzhiyun
2286*4882a593Smuzhiyun isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
2287*4882a593Smuzhiyun isp1362_hcd->atl_queue.buf_size);
2288*4882a593Smuzhiyun isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
2289*4882a593Smuzhiyun isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
2290*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
2291*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCATLLAST,
2292*4882a593Smuzhiyun 1 << (atl_buffers - 1));
2293*4882a593Smuzhiyun
2294*4882a593Smuzhiyun snprintf(isp1362_hcd->atl_queue.name,
2295*4882a593Smuzhiyun sizeof(isp1362_hcd->atl_queue.name), "ATL");
2296*4882a593Smuzhiyun snprintf(isp1362_hcd->intl_queue.name,
2297*4882a593Smuzhiyun sizeof(isp1362_hcd->intl_queue.name), "INTL");
2298*4882a593Smuzhiyun DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2299*4882a593Smuzhiyun isp1362_hcd->intl_queue.name,
2300*4882a593Smuzhiyun isp1362_hcd->intl_queue.buf_start,
2301*4882a593Smuzhiyun ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
2302*4882a593Smuzhiyun isp1362_hcd->intl_queue.buf_size);
2303*4882a593Smuzhiyun DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2304*4882a593Smuzhiyun isp1362_hcd->atl_queue.name,
2305*4882a593Smuzhiyun isp1362_hcd->atl_queue.buf_start,
2306*4882a593Smuzhiyun atl_buffers, isp1362_hcd->atl_queue.blk_size,
2307*4882a593Smuzhiyun isp1362_hcd->atl_queue.buf_size);
2308*4882a593Smuzhiyun
2309*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2310*4882a593Smuzhiyun
2311*4882a593Smuzhiyun return 0;
2312*4882a593Smuzhiyun }
2313*4882a593Smuzhiyun
isp1362_hc_reset(struct usb_hcd * hcd)2314*4882a593Smuzhiyun static int isp1362_hc_reset(struct usb_hcd *hcd)
2315*4882a593Smuzhiyun {
2316*4882a593Smuzhiyun int ret = 0;
2317*4882a593Smuzhiyun struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2318*4882a593Smuzhiyun unsigned long t;
2319*4882a593Smuzhiyun unsigned long timeout = 100;
2320*4882a593Smuzhiyun unsigned long flags;
2321*4882a593Smuzhiyun int clkrdy = 0;
2322*4882a593Smuzhiyun
2323*4882a593Smuzhiyun pr_debug("%s:\n", __func__);
2324*4882a593Smuzhiyun
2325*4882a593Smuzhiyun if (isp1362_hcd->board && isp1362_hcd->board->reset) {
2326*4882a593Smuzhiyun isp1362_hcd->board->reset(hcd->self.controller, 1);
2327*4882a593Smuzhiyun msleep(20);
2328*4882a593Smuzhiyun if (isp1362_hcd->board->clock)
2329*4882a593Smuzhiyun isp1362_hcd->board->clock(hcd->self.controller, 1);
2330*4882a593Smuzhiyun isp1362_hcd->board->reset(hcd->self.controller, 0);
2331*4882a593Smuzhiyun } else
2332*4882a593Smuzhiyun isp1362_sw_reset(isp1362_hcd);
2333*4882a593Smuzhiyun
2334*4882a593Smuzhiyun /* chip has been reset. First we need to see a clock */
2335*4882a593Smuzhiyun t = jiffies + msecs_to_jiffies(timeout);
2336*4882a593Smuzhiyun while (!clkrdy && time_before_eq(jiffies, t)) {
2337*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
2338*4882a593Smuzhiyun clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
2339*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2340*4882a593Smuzhiyun if (!clkrdy)
2341*4882a593Smuzhiyun msleep(4);
2342*4882a593Smuzhiyun }
2343*4882a593Smuzhiyun
2344*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
2345*4882a593Smuzhiyun isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
2346*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2347*4882a593Smuzhiyun if (!clkrdy) {
2348*4882a593Smuzhiyun pr_err("Clock not ready after %lums\n", timeout);
2349*4882a593Smuzhiyun ret = -ENODEV;
2350*4882a593Smuzhiyun }
2351*4882a593Smuzhiyun return ret;
2352*4882a593Smuzhiyun }
2353*4882a593Smuzhiyun
isp1362_hc_stop(struct usb_hcd * hcd)2354*4882a593Smuzhiyun static void isp1362_hc_stop(struct usb_hcd *hcd)
2355*4882a593Smuzhiyun {
2356*4882a593Smuzhiyun struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2357*4882a593Smuzhiyun unsigned long flags;
2358*4882a593Smuzhiyun u32 tmp;
2359*4882a593Smuzhiyun
2360*4882a593Smuzhiyun pr_debug("%s:\n", __func__);
2361*4882a593Smuzhiyun
2362*4882a593Smuzhiyun del_timer_sync(&hcd->rh_timer);
2363*4882a593Smuzhiyun
2364*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
2365*4882a593Smuzhiyun
2366*4882a593Smuzhiyun isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2367*4882a593Smuzhiyun
2368*4882a593Smuzhiyun /* Switch off power for all ports */
2369*4882a593Smuzhiyun tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2370*4882a593Smuzhiyun tmp &= ~(RH_A_NPS | RH_A_PSM);
2371*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
2372*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2373*4882a593Smuzhiyun
2374*4882a593Smuzhiyun /* Reset the chip */
2375*4882a593Smuzhiyun if (isp1362_hcd->board && isp1362_hcd->board->reset)
2376*4882a593Smuzhiyun isp1362_hcd->board->reset(hcd->self.controller, 1);
2377*4882a593Smuzhiyun else
2378*4882a593Smuzhiyun __isp1362_sw_reset(isp1362_hcd);
2379*4882a593Smuzhiyun
2380*4882a593Smuzhiyun if (isp1362_hcd->board && isp1362_hcd->board->clock)
2381*4882a593Smuzhiyun isp1362_hcd->board->clock(hcd->self.controller, 0);
2382*4882a593Smuzhiyun
2383*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2384*4882a593Smuzhiyun }
2385*4882a593Smuzhiyun
2386*4882a593Smuzhiyun #ifdef CHIP_BUFFER_TEST
isp1362_chip_test(struct isp1362_hcd * isp1362_hcd)2387*4882a593Smuzhiyun static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
2388*4882a593Smuzhiyun {
2389*4882a593Smuzhiyun int ret = 0;
2390*4882a593Smuzhiyun u16 *ref;
2391*4882a593Smuzhiyun unsigned long flags;
2392*4882a593Smuzhiyun
2393*4882a593Smuzhiyun ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
2394*4882a593Smuzhiyun if (ref) {
2395*4882a593Smuzhiyun int offset;
2396*4882a593Smuzhiyun u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
2397*4882a593Smuzhiyun
2398*4882a593Smuzhiyun for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
2399*4882a593Smuzhiyun ref[offset] = ~offset;
2400*4882a593Smuzhiyun tst[offset] = offset;
2401*4882a593Smuzhiyun }
2402*4882a593Smuzhiyun
2403*4882a593Smuzhiyun for (offset = 0; offset < 4; offset++) {
2404*4882a593Smuzhiyun int j;
2405*4882a593Smuzhiyun
2406*4882a593Smuzhiyun for (j = 0; j < 8; j++) {
2407*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
2408*4882a593Smuzhiyun isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
2409*4882a593Smuzhiyun isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
2410*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2411*4882a593Smuzhiyun
2412*4882a593Smuzhiyun if (memcmp(ref, tst, j)) {
2413*4882a593Smuzhiyun ret = -ENODEV;
2414*4882a593Smuzhiyun pr_err("%s: memory check with %d byte offset %d failed\n",
2415*4882a593Smuzhiyun __func__, j, offset);
2416*4882a593Smuzhiyun dump_data((u8 *)ref + offset, j);
2417*4882a593Smuzhiyun dump_data((u8 *)tst + offset, j);
2418*4882a593Smuzhiyun }
2419*4882a593Smuzhiyun }
2420*4882a593Smuzhiyun }
2421*4882a593Smuzhiyun
2422*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
2423*4882a593Smuzhiyun isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
2424*4882a593Smuzhiyun isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2425*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2426*4882a593Smuzhiyun
2427*4882a593Smuzhiyun if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
2428*4882a593Smuzhiyun ret = -ENODEV;
2429*4882a593Smuzhiyun pr_err("%s: memory check failed\n", __func__);
2430*4882a593Smuzhiyun dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
2431*4882a593Smuzhiyun }
2432*4882a593Smuzhiyun
2433*4882a593Smuzhiyun for (offset = 0; offset < 256; offset++) {
2434*4882a593Smuzhiyun int test_size = 0;
2435*4882a593Smuzhiyun
2436*4882a593Smuzhiyun yield();
2437*4882a593Smuzhiyun
2438*4882a593Smuzhiyun memset(tst, 0, ISP1362_BUF_SIZE);
2439*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
2440*4882a593Smuzhiyun isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2441*4882a593Smuzhiyun isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2442*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2443*4882a593Smuzhiyun if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
2444*4882a593Smuzhiyun ISP1362_BUF_SIZE / 2)) {
2445*4882a593Smuzhiyun pr_err("%s: Failed to clear buffer\n", __func__);
2446*4882a593Smuzhiyun dump_data((u8 *)tst, ISP1362_BUF_SIZE);
2447*4882a593Smuzhiyun break;
2448*4882a593Smuzhiyun }
2449*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
2450*4882a593Smuzhiyun isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
2451*4882a593Smuzhiyun isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
2452*4882a593Smuzhiyun offset * 2 + PTD_HEADER_SIZE, test_size);
2453*4882a593Smuzhiyun isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2454*4882a593Smuzhiyun PTD_HEADER_SIZE + test_size);
2455*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2456*4882a593Smuzhiyun if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2457*4882a593Smuzhiyun dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
2458*4882a593Smuzhiyun dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
2459*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
2460*4882a593Smuzhiyun isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2461*4882a593Smuzhiyun PTD_HEADER_SIZE + test_size);
2462*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2463*4882a593Smuzhiyun if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2464*4882a593Smuzhiyun ret = -ENODEV;
2465*4882a593Smuzhiyun pr_err("%s: memory check with offset %02x failed\n",
2466*4882a593Smuzhiyun __func__, offset);
2467*4882a593Smuzhiyun break;
2468*4882a593Smuzhiyun }
2469*4882a593Smuzhiyun pr_warn("%s: memory check with offset %02x ok after second read\n",
2470*4882a593Smuzhiyun __func__, offset);
2471*4882a593Smuzhiyun }
2472*4882a593Smuzhiyun }
2473*4882a593Smuzhiyun kfree(ref);
2474*4882a593Smuzhiyun }
2475*4882a593Smuzhiyun return ret;
2476*4882a593Smuzhiyun }
2477*4882a593Smuzhiyun #endif
2478*4882a593Smuzhiyun
isp1362_hc_start(struct usb_hcd * hcd)2479*4882a593Smuzhiyun static int isp1362_hc_start(struct usb_hcd *hcd)
2480*4882a593Smuzhiyun {
2481*4882a593Smuzhiyun int ret;
2482*4882a593Smuzhiyun struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2483*4882a593Smuzhiyun struct isp1362_platform_data *board = isp1362_hcd->board;
2484*4882a593Smuzhiyun u16 hwcfg;
2485*4882a593Smuzhiyun u16 chipid;
2486*4882a593Smuzhiyun unsigned long flags;
2487*4882a593Smuzhiyun
2488*4882a593Smuzhiyun pr_debug("%s:\n", __func__);
2489*4882a593Smuzhiyun
2490*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
2491*4882a593Smuzhiyun chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
2492*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2493*4882a593Smuzhiyun
2494*4882a593Smuzhiyun if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
2495*4882a593Smuzhiyun pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
2496*4882a593Smuzhiyun return -ENODEV;
2497*4882a593Smuzhiyun }
2498*4882a593Smuzhiyun
2499*4882a593Smuzhiyun #ifdef CHIP_BUFFER_TEST
2500*4882a593Smuzhiyun ret = isp1362_chip_test(isp1362_hcd);
2501*4882a593Smuzhiyun if (ret)
2502*4882a593Smuzhiyun return -ENODEV;
2503*4882a593Smuzhiyun #endif
2504*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
2505*4882a593Smuzhiyun /* clear interrupt status and disable all interrupt sources */
2506*4882a593Smuzhiyun isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
2507*4882a593Smuzhiyun isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2508*4882a593Smuzhiyun
2509*4882a593Smuzhiyun /* HW conf */
2510*4882a593Smuzhiyun hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
2511*4882a593Smuzhiyun if (board->sel15Kres)
2512*4882a593Smuzhiyun hwcfg |= HCHWCFG_PULLDOWN_DS2 |
2513*4882a593Smuzhiyun ((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
2514*4882a593Smuzhiyun if (board->clknotstop)
2515*4882a593Smuzhiyun hwcfg |= HCHWCFG_CLKNOTSTOP;
2516*4882a593Smuzhiyun if (board->oc_enable)
2517*4882a593Smuzhiyun hwcfg |= HCHWCFG_ANALOG_OC;
2518*4882a593Smuzhiyun if (board->int_act_high)
2519*4882a593Smuzhiyun hwcfg |= HCHWCFG_INT_POL;
2520*4882a593Smuzhiyun if (board->int_edge_triggered)
2521*4882a593Smuzhiyun hwcfg |= HCHWCFG_INT_TRIGGER;
2522*4882a593Smuzhiyun if (board->dreq_act_high)
2523*4882a593Smuzhiyun hwcfg |= HCHWCFG_DREQ_POL;
2524*4882a593Smuzhiyun if (board->dack_act_high)
2525*4882a593Smuzhiyun hwcfg |= HCHWCFG_DACK_POL;
2526*4882a593Smuzhiyun isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
2527*4882a593Smuzhiyun isp1362_show_reg(isp1362_hcd, HCHWCFG);
2528*4882a593Smuzhiyun isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
2529*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2530*4882a593Smuzhiyun
2531*4882a593Smuzhiyun ret = isp1362_mem_config(hcd);
2532*4882a593Smuzhiyun if (ret)
2533*4882a593Smuzhiyun return ret;
2534*4882a593Smuzhiyun
2535*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
2536*4882a593Smuzhiyun
2537*4882a593Smuzhiyun /* Root hub conf */
2538*4882a593Smuzhiyun isp1362_hcd->rhdesca = 0;
2539*4882a593Smuzhiyun if (board->no_power_switching)
2540*4882a593Smuzhiyun isp1362_hcd->rhdesca |= RH_A_NPS;
2541*4882a593Smuzhiyun if (board->power_switching_mode)
2542*4882a593Smuzhiyun isp1362_hcd->rhdesca |= RH_A_PSM;
2543*4882a593Smuzhiyun if (board->potpg)
2544*4882a593Smuzhiyun isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
2545*4882a593Smuzhiyun else
2546*4882a593Smuzhiyun isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
2547*4882a593Smuzhiyun
2548*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
2549*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
2550*4882a593Smuzhiyun isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2551*4882a593Smuzhiyun
2552*4882a593Smuzhiyun isp1362_hcd->rhdescb = RH_B_PPCM;
2553*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
2554*4882a593Smuzhiyun isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
2555*4882a593Smuzhiyun
2556*4882a593Smuzhiyun isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
2557*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
2558*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
2559*4882a593Smuzhiyun
2560*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2561*4882a593Smuzhiyun
2562*4882a593Smuzhiyun isp1362_hcd->hc_control = OHCI_USB_OPER;
2563*4882a593Smuzhiyun hcd->state = HC_STATE_RUNNING;
2564*4882a593Smuzhiyun
2565*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
2566*4882a593Smuzhiyun /* Set up interrupts */
2567*4882a593Smuzhiyun isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
2568*4882a593Smuzhiyun isp1362_hcd->intenb |= OHCI_INTR_RD;
2569*4882a593Smuzhiyun isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
2570*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
2571*4882a593Smuzhiyun isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
2572*4882a593Smuzhiyun
2573*4882a593Smuzhiyun /* Go operational */
2574*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
2575*4882a593Smuzhiyun /* enable global power */
2576*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
2577*4882a593Smuzhiyun
2578*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2579*4882a593Smuzhiyun
2580*4882a593Smuzhiyun return 0;
2581*4882a593Smuzhiyun }
2582*4882a593Smuzhiyun
2583*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
2584*4882a593Smuzhiyun
2585*4882a593Smuzhiyun static const struct hc_driver isp1362_hc_driver = {
2586*4882a593Smuzhiyun .description = hcd_name,
2587*4882a593Smuzhiyun .product_desc = "ISP1362 Host Controller",
2588*4882a593Smuzhiyun .hcd_priv_size = sizeof(struct isp1362_hcd),
2589*4882a593Smuzhiyun
2590*4882a593Smuzhiyun .irq = isp1362_irq,
2591*4882a593Smuzhiyun .flags = HCD_USB11 | HCD_MEMORY,
2592*4882a593Smuzhiyun
2593*4882a593Smuzhiyun .reset = isp1362_hc_reset,
2594*4882a593Smuzhiyun .start = isp1362_hc_start,
2595*4882a593Smuzhiyun .stop = isp1362_hc_stop,
2596*4882a593Smuzhiyun
2597*4882a593Smuzhiyun .urb_enqueue = isp1362_urb_enqueue,
2598*4882a593Smuzhiyun .urb_dequeue = isp1362_urb_dequeue,
2599*4882a593Smuzhiyun .endpoint_disable = isp1362_endpoint_disable,
2600*4882a593Smuzhiyun
2601*4882a593Smuzhiyun .get_frame_number = isp1362_get_frame,
2602*4882a593Smuzhiyun
2603*4882a593Smuzhiyun .hub_status_data = isp1362_hub_status_data,
2604*4882a593Smuzhiyun .hub_control = isp1362_hub_control,
2605*4882a593Smuzhiyun .bus_suspend = isp1362_bus_suspend,
2606*4882a593Smuzhiyun .bus_resume = isp1362_bus_resume,
2607*4882a593Smuzhiyun };
2608*4882a593Smuzhiyun
2609*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
2610*4882a593Smuzhiyun
isp1362_remove(struct platform_device * pdev)2611*4882a593Smuzhiyun static int isp1362_remove(struct platform_device *pdev)
2612*4882a593Smuzhiyun {
2613*4882a593Smuzhiyun struct usb_hcd *hcd = platform_get_drvdata(pdev);
2614*4882a593Smuzhiyun struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2615*4882a593Smuzhiyun
2616*4882a593Smuzhiyun remove_debug_file(isp1362_hcd);
2617*4882a593Smuzhiyun DBG(0, "%s: Removing HCD\n", __func__);
2618*4882a593Smuzhiyun usb_remove_hcd(hcd);
2619*4882a593Smuzhiyun DBG(0, "%s: put_hcd\n", __func__);
2620*4882a593Smuzhiyun usb_put_hcd(hcd);
2621*4882a593Smuzhiyun DBG(0, "%s: Done\n", __func__);
2622*4882a593Smuzhiyun
2623*4882a593Smuzhiyun return 0;
2624*4882a593Smuzhiyun }
2625*4882a593Smuzhiyun
isp1362_probe(struct platform_device * pdev)2626*4882a593Smuzhiyun static int isp1362_probe(struct platform_device *pdev)
2627*4882a593Smuzhiyun {
2628*4882a593Smuzhiyun struct usb_hcd *hcd;
2629*4882a593Smuzhiyun struct isp1362_hcd *isp1362_hcd;
2630*4882a593Smuzhiyun struct resource *data, *irq_res;
2631*4882a593Smuzhiyun void __iomem *addr_reg;
2632*4882a593Smuzhiyun void __iomem *data_reg;
2633*4882a593Smuzhiyun int irq;
2634*4882a593Smuzhiyun int retval = 0;
2635*4882a593Smuzhiyun unsigned int irq_flags = 0;
2636*4882a593Smuzhiyun
2637*4882a593Smuzhiyun if (usb_disabled())
2638*4882a593Smuzhiyun return -ENODEV;
2639*4882a593Smuzhiyun
2640*4882a593Smuzhiyun /* basic sanity checks first. board-specific init logic should
2641*4882a593Smuzhiyun * have initialized this the three resources and probably board
2642*4882a593Smuzhiyun * specific platform_data. we don't probe for IRQs, and do only
2643*4882a593Smuzhiyun * minimal sanity checking.
2644*4882a593Smuzhiyun */
2645*4882a593Smuzhiyun if (pdev->num_resources < 3)
2646*4882a593Smuzhiyun return -ENODEV;
2647*4882a593Smuzhiyun
2648*4882a593Smuzhiyun irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2649*4882a593Smuzhiyun if (!irq_res)
2650*4882a593Smuzhiyun return -ENODEV;
2651*4882a593Smuzhiyun
2652*4882a593Smuzhiyun irq = irq_res->start;
2653*4882a593Smuzhiyun
2654*4882a593Smuzhiyun addr_reg = devm_platform_ioremap_resource(pdev, 1);
2655*4882a593Smuzhiyun if (IS_ERR(addr_reg))
2656*4882a593Smuzhiyun return PTR_ERR(addr_reg);
2657*4882a593Smuzhiyun
2658*4882a593Smuzhiyun data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2659*4882a593Smuzhiyun data_reg = devm_ioremap_resource(&pdev->dev, data);
2660*4882a593Smuzhiyun if (IS_ERR(data_reg))
2661*4882a593Smuzhiyun return PTR_ERR(data_reg);
2662*4882a593Smuzhiyun
2663*4882a593Smuzhiyun /* allocate and initialize hcd */
2664*4882a593Smuzhiyun hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
2665*4882a593Smuzhiyun if (!hcd)
2666*4882a593Smuzhiyun return -ENOMEM;
2667*4882a593Smuzhiyun
2668*4882a593Smuzhiyun hcd->rsrc_start = data->start;
2669*4882a593Smuzhiyun isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2670*4882a593Smuzhiyun isp1362_hcd->data_reg = data_reg;
2671*4882a593Smuzhiyun isp1362_hcd->addr_reg = addr_reg;
2672*4882a593Smuzhiyun
2673*4882a593Smuzhiyun isp1362_hcd->next_statechange = jiffies;
2674*4882a593Smuzhiyun spin_lock_init(&isp1362_hcd->lock);
2675*4882a593Smuzhiyun INIT_LIST_HEAD(&isp1362_hcd->async);
2676*4882a593Smuzhiyun INIT_LIST_HEAD(&isp1362_hcd->periodic);
2677*4882a593Smuzhiyun INIT_LIST_HEAD(&isp1362_hcd->isoc);
2678*4882a593Smuzhiyun INIT_LIST_HEAD(&isp1362_hcd->remove_list);
2679*4882a593Smuzhiyun isp1362_hcd->board = dev_get_platdata(&pdev->dev);
2680*4882a593Smuzhiyun #if USE_PLATFORM_DELAY
2681*4882a593Smuzhiyun if (!isp1362_hcd->board->delay) {
2682*4882a593Smuzhiyun dev_err(hcd->self.controller, "No platform delay function given\n");
2683*4882a593Smuzhiyun retval = -ENODEV;
2684*4882a593Smuzhiyun goto err;
2685*4882a593Smuzhiyun }
2686*4882a593Smuzhiyun #endif
2687*4882a593Smuzhiyun
2688*4882a593Smuzhiyun if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2689*4882a593Smuzhiyun irq_flags |= IRQF_TRIGGER_RISING;
2690*4882a593Smuzhiyun if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2691*4882a593Smuzhiyun irq_flags |= IRQF_TRIGGER_FALLING;
2692*4882a593Smuzhiyun if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2693*4882a593Smuzhiyun irq_flags |= IRQF_TRIGGER_HIGH;
2694*4882a593Smuzhiyun if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2695*4882a593Smuzhiyun irq_flags |= IRQF_TRIGGER_LOW;
2696*4882a593Smuzhiyun
2697*4882a593Smuzhiyun retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_SHARED);
2698*4882a593Smuzhiyun if (retval != 0)
2699*4882a593Smuzhiyun goto err;
2700*4882a593Smuzhiyun device_wakeup_enable(hcd->self.controller);
2701*4882a593Smuzhiyun
2702*4882a593Smuzhiyun dev_info(&pdev->dev, "%s, irq %d\n", hcd->product_desc, irq);
2703*4882a593Smuzhiyun
2704*4882a593Smuzhiyun create_debug_file(isp1362_hcd);
2705*4882a593Smuzhiyun
2706*4882a593Smuzhiyun return 0;
2707*4882a593Smuzhiyun
2708*4882a593Smuzhiyun err:
2709*4882a593Smuzhiyun usb_put_hcd(hcd);
2710*4882a593Smuzhiyun
2711*4882a593Smuzhiyun return retval;
2712*4882a593Smuzhiyun }
2713*4882a593Smuzhiyun
2714*4882a593Smuzhiyun #ifdef CONFIG_PM
isp1362_suspend(struct platform_device * pdev,pm_message_t state)2715*4882a593Smuzhiyun static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
2716*4882a593Smuzhiyun {
2717*4882a593Smuzhiyun struct usb_hcd *hcd = platform_get_drvdata(pdev);
2718*4882a593Smuzhiyun struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2719*4882a593Smuzhiyun unsigned long flags;
2720*4882a593Smuzhiyun int retval = 0;
2721*4882a593Smuzhiyun
2722*4882a593Smuzhiyun DBG(0, "%s: Suspending device\n", __func__);
2723*4882a593Smuzhiyun
2724*4882a593Smuzhiyun if (state.event == PM_EVENT_FREEZE) {
2725*4882a593Smuzhiyun DBG(0, "%s: Suspending root hub\n", __func__);
2726*4882a593Smuzhiyun retval = isp1362_bus_suspend(hcd);
2727*4882a593Smuzhiyun } else {
2728*4882a593Smuzhiyun DBG(0, "%s: Suspending RH ports\n", __func__);
2729*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
2730*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2731*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2732*4882a593Smuzhiyun }
2733*4882a593Smuzhiyun if (retval == 0)
2734*4882a593Smuzhiyun pdev->dev.power.power_state = state;
2735*4882a593Smuzhiyun return retval;
2736*4882a593Smuzhiyun }
2737*4882a593Smuzhiyun
isp1362_resume(struct platform_device * pdev)2738*4882a593Smuzhiyun static int isp1362_resume(struct platform_device *pdev)
2739*4882a593Smuzhiyun {
2740*4882a593Smuzhiyun struct usb_hcd *hcd = platform_get_drvdata(pdev);
2741*4882a593Smuzhiyun struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2742*4882a593Smuzhiyun unsigned long flags;
2743*4882a593Smuzhiyun
2744*4882a593Smuzhiyun DBG(0, "%s: Resuming\n", __func__);
2745*4882a593Smuzhiyun
2746*4882a593Smuzhiyun if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2747*4882a593Smuzhiyun DBG(0, "%s: Resume RH ports\n", __func__);
2748*4882a593Smuzhiyun spin_lock_irqsave(&isp1362_hcd->lock, flags);
2749*4882a593Smuzhiyun isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
2750*4882a593Smuzhiyun spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2751*4882a593Smuzhiyun return 0;
2752*4882a593Smuzhiyun }
2753*4882a593Smuzhiyun
2754*4882a593Smuzhiyun pdev->dev.power.power_state = PMSG_ON;
2755*4882a593Smuzhiyun
2756*4882a593Smuzhiyun return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
2757*4882a593Smuzhiyun }
2758*4882a593Smuzhiyun #else
2759*4882a593Smuzhiyun #define isp1362_suspend NULL
2760*4882a593Smuzhiyun #define isp1362_resume NULL
2761*4882a593Smuzhiyun #endif
2762*4882a593Smuzhiyun
2763*4882a593Smuzhiyun static struct platform_driver isp1362_driver = {
2764*4882a593Smuzhiyun .probe = isp1362_probe,
2765*4882a593Smuzhiyun .remove = isp1362_remove,
2766*4882a593Smuzhiyun
2767*4882a593Smuzhiyun .suspend = isp1362_suspend,
2768*4882a593Smuzhiyun .resume = isp1362_resume,
2769*4882a593Smuzhiyun .driver = {
2770*4882a593Smuzhiyun .name = hcd_name,
2771*4882a593Smuzhiyun },
2772*4882a593Smuzhiyun };
2773*4882a593Smuzhiyun
2774*4882a593Smuzhiyun module_platform_driver(isp1362_driver);
2775