xref: /OK3568_Linux_fs/kernel/drivers/usb/host/uhci-hcd.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Universal Host Controller Interface driver for USB.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Maintainer: Alan Stern <stern@rowland.harvard.edu>
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * (C) Copyright 1999 Linus Torvalds
8*4882a593Smuzhiyun  * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
9*4882a593Smuzhiyun  * (C) Copyright 1999 Randy Dunlap
10*4882a593Smuzhiyun  * (C) Copyright 1999 Georg Acher, acher@in.tum.de
11*4882a593Smuzhiyun  * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
12*4882a593Smuzhiyun  * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
13*4882a593Smuzhiyun  * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
14*4882a593Smuzhiyun  * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
15*4882a593Smuzhiyun  *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
16*4882a593Smuzhiyun  * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
17*4882a593Smuzhiyun  * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * Intel documents this fairly well, and as far as I know there
20*4882a593Smuzhiyun  * are no royalties or anything like that, but even so there are
21*4882a593Smuzhiyun  * people who decided that they want to do the same thing in a
22*4882a593Smuzhiyun  * completely different way.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  */
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include <linux/module.h>
27*4882a593Smuzhiyun #include <linux/pci.h>
28*4882a593Smuzhiyun #include <linux/kernel.h>
29*4882a593Smuzhiyun #include <linux/init.h>
30*4882a593Smuzhiyun #include <linux/delay.h>
31*4882a593Smuzhiyun #include <linux/ioport.h>
32*4882a593Smuzhiyun #include <linux/slab.h>
33*4882a593Smuzhiyun #include <linux/errno.h>
34*4882a593Smuzhiyun #include <linux/unistd.h>
35*4882a593Smuzhiyun #include <linux/interrupt.h>
36*4882a593Smuzhiyun #include <linux/spinlock.h>
37*4882a593Smuzhiyun #include <linux/debugfs.h>
38*4882a593Smuzhiyun #include <linux/pm.h>
39*4882a593Smuzhiyun #include <linux/dmapool.h>
40*4882a593Smuzhiyun #include <linux/dma-mapping.h>
41*4882a593Smuzhiyun #include <linux/usb.h>
42*4882a593Smuzhiyun #include <linux/usb/hcd.h>
43*4882a593Smuzhiyun #include <linux/bitops.h>
44*4882a593Smuzhiyun #include <linux/dmi.h>
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #include <linux/uaccess.h>
47*4882a593Smuzhiyun #include <asm/io.h>
48*4882a593Smuzhiyun #include <asm/irq.h>
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun #include "uhci-hcd.h"
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun  * Version Information
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun #define DRIVER_AUTHOR							\
56*4882a593Smuzhiyun 	"Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, "		\
57*4882a593Smuzhiyun 	"Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, "	\
58*4882a593Smuzhiyun 	"Roman Weissgaerber, Alan Stern"
59*4882a593Smuzhiyun #define DRIVER_DESC "USB Universal Host Controller Interface driver"
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun /* for flakey hardware, ignore overcurrent indicators */
62*4882a593Smuzhiyun static bool ignore_oc;
63*4882a593Smuzhiyun module_param(ignore_oc, bool, S_IRUGO);
64*4882a593Smuzhiyun MODULE_PARM_DESC(ignore_oc, "ignore hardware overcurrent indications");
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun  * debug = 0, no debugging messages
68*4882a593Smuzhiyun  * debug = 1, dump failed URBs except for stalls
69*4882a593Smuzhiyun  * debug = 2, dump all failed URBs (including stalls)
70*4882a593Smuzhiyun  *            show all queues in /sys/kernel/debug/uhci/[pci_addr]
71*4882a593Smuzhiyun  * debug = 3, show all TDs in URBs when dumping
72*4882a593Smuzhiyun  */
73*4882a593Smuzhiyun #ifdef CONFIG_DYNAMIC_DEBUG
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun static int debug = 1;
76*4882a593Smuzhiyun module_param(debug, int, S_IRUGO | S_IWUSR);
77*4882a593Smuzhiyun MODULE_PARM_DESC(debug, "Debug level");
78*4882a593Smuzhiyun static char *errbuf;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun #else
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun #define debug 0
83*4882a593Smuzhiyun #define errbuf NULL
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #endif
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun #define ERRBUF_LEN    (32 * 1024)
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun static struct kmem_cache *uhci_up_cachep;	/* urb_priv */
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state);
93*4882a593Smuzhiyun static void wakeup_rh(struct uhci_hcd *uhci);
94*4882a593Smuzhiyun static void uhci_get_current_frame_number(struct uhci_hcd *uhci);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun  * Calculate the link pointer DMA value for the first Skeleton QH in a frame.
98*4882a593Smuzhiyun  */
uhci_frame_skel_link(struct uhci_hcd * uhci,int frame)99*4882a593Smuzhiyun static __hc32 uhci_frame_skel_link(struct uhci_hcd *uhci, int frame)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	int skelnum;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	/*
104*4882a593Smuzhiyun 	 * The interrupt queues will be interleaved as evenly as possible.
105*4882a593Smuzhiyun 	 * There's not much to be done about period-1 interrupts; they have
106*4882a593Smuzhiyun 	 * to occur in every frame.  But we can schedule period-2 interrupts
107*4882a593Smuzhiyun 	 * in odd-numbered frames, period-4 interrupts in frames congruent
108*4882a593Smuzhiyun 	 * to 2 (mod 4), and so on.  This way each frame only has two
109*4882a593Smuzhiyun 	 * interrupt QHs, which will help spread out bandwidth utilization.
110*4882a593Smuzhiyun 	 *
111*4882a593Smuzhiyun 	 * ffs (Find First bit Set) does exactly what we need:
112*4882a593Smuzhiyun 	 * 1,3,5,...  => ffs = 0 => use period-2 QH = skelqh[8],
113*4882a593Smuzhiyun 	 * 2,6,10,... => ffs = 1 => use period-4 QH = skelqh[7], etc.
114*4882a593Smuzhiyun 	 * ffs >= 7 => not on any high-period queue, so use
115*4882a593Smuzhiyun 	 *	period-1 QH = skelqh[9].
116*4882a593Smuzhiyun 	 * Add in UHCI_NUMFRAMES to insure at least one bit is set.
117*4882a593Smuzhiyun 	 */
118*4882a593Smuzhiyun 	skelnum = 8 - (int) __ffs(frame | UHCI_NUMFRAMES);
119*4882a593Smuzhiyun 	if (skelnum <= 1)
120*4882a593Smuzhiyun 		skelnum = 9;
121*4882a593Smuzhiyun 	return LINK_TO_QH(uhci, uhci->skelqh[skelnum]);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun #include "uhci-debug.c"
125*4882a593Smuzhiyun #include "uhci-q.c"
126*4882a593Smuzhiyun #include "uhci-hub.c"
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun /*
129*4882a593Smuzhiyun  * Finish up a host controller reset and update the recorded state.
130*4882a593Smuzhiyun  */
finish_reset(struct uhci_hcd * uhci)131*4882a593Smuzhiyun static void finish_reset(struct uhci_hcd *uhci)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	int port;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	/* HCRESET doesn't affect the Suspend, Reset, and Resume Detect
136*4882a593Smuzhiyun 	 * bits in the port status and control registers.
137*4882a593Smuzhiyun 	 * We have to clear them by hand.
138*4882a593Smuzhiyun 	 */
139*4882a593Smuzhiyun 	for (port = 0; port < uhci->rh_numports; ++port)
140*4882a593Smuzhiyun 		uhci_writew(uhci, 0, USBPORTSC1 + (port * 2));
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	uhci->port_c_suspend = uhci->resuming_ports = 0;
143*4882a593Smuzhiyun 	uhci->rh_state = UHCI_RH_RESET;
144*4882a593Smuzhiyun 	uhci->is_stopped = UHCI_IS_STOPPED;
145*4882a593Smuzhiyun 	clear_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun /*
149*4882a593Smuzhiyun  * Last rites for a defunct/nonfunctional controller
150*4882a593Smuzhiyun  * or one we don't want to use any more.
151*4882a593Smuzhiyun  */
uhci_hc_died(struct uhci_hcd * uhci)152*4882a593Smuzhiyun static void uhci_hc_died(struct uhci_hcd *uhci)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	uhci_get_current_frame_number(uhci);
155*4882a593Smuzhiyun 	uhci->reset_hc(uhci);
156*4882a593Smuzhiyun 	finish_reset(uhci);
157*4882a593Smuzhiyun 	uhci->dead = 1;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	/* The current frame may already be partway finished */
160*4882a593Smuzhiyun 	++uhci->frame_number;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun /*
164*4882a593Smuzhiyun  * Initialize a controller that was newly discovered or has lost power
165*4882a593Smuzhiyun  * or otherwise been reset while it was suspended.  In none of these cases
166*4882a593Smuzhiyun  * can we be sure of its previous state.
167*4882a593Smuzhiyun  */
check_and_reset_hc(struct uhci_hcd * uhci)168*4882a593Smuzhiyun static void check_and_reset_hc(struct uhci_hcd *uhci)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	if (uhci->check_and_reset_hc(uhci))
171*4882a593Smuzhiyun 		finish_reset(uhci);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun #if defined(CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC)
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun  * The two functions below are generic reset functions that are used on systems
177*4882a593Smuzhiyun  * that do not have keyboard and mouse legacy support. We assume that we are
178*4882a593Smuzhiyun  * running on such a system if CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC is defined.
179*4882a593Smuzhiyun  */
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun /*
182*4882a593Smuzhiyun  * Make sure the controller is completely inactive, unable to
183*4882a593Smuzhiyun  * generate interrupts or do DMA.
184*4882a593Smuzhiyun  */
uhci_generic_reset_hc(struct uhci_hcd * uhci)185*4882a593Smuzhiyun static void uhci_generic_reset_hc(struct uhci_hcd *uhci)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	/* Reset the HC - this will force us to get a
188*4882a593Smuzhiyun 	 * new notification of any already connected
189*4882a593Smuzhiyun 	 * ports due to the virtual disconnect that it
190*4882a593Smuzhiyun 	 * implies.
191*4882a593Smuzhiyun 	 */
192*4882a593Smuzhiyun 	uhci_writew(uhci, USBCMD_HCRESET, USBCMD);
193*4882a593Smuzhiyun 	mb();
194*4882a593Smuzhiyun 	udelay(5);
195*4882a593Smuzhiyun 	if (uhci_readw(uhci, USBCMD) & USBCMD_HCRESET)
196*4882a593Smuzhiyun 		dev_warn(uhci_dev(uhci), "HCRESET not completed yet!\n");
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	/* Just to be safe, disable interrupt requests and
199*4882a593Smuzhiyun 	 * make sure the controller is stopped.
200*4882a593Smuzhiyun 	 */
201*4882a593Smuzhiyun 	uhci_writew(uhci, 0, USBINTR);
202*4882a593Smuzhiyun 	uhci_writew(uhci, 0, USBCMD);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun /*
206*4882a593Smuzhiyun  * Initialize a controller that was newly discovered or has just been
207*4882a593Smuzhiyun  * resumed.  In either case we can't be sure of its previous state.
208*4882a593Smuzhiyun  *
209*4882a593Smuzhiyun  * Returns: 1 if the controller was reset, 0 otherwise.
210*4882a593Smuzhiyun  */
uhci_generic_check_and_reset_hc(struct uhci_hcd * uhci)211*4882a593Smuzhiyun static int uhci_generic_check_and_reset_hc(struct uhci_hcd *uhci)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	unsigned int cmd, intr;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	/*
216*4882a593Smuzhiyun 	 * When restarting a suspended controller, we expect all the
217*4882a593Smuzhiyun 	 * settings to be the same as we left them:
218*4882a593Smuzhiyun 	 *
219*4882a593Smuzhiyun 	 *	Controller is stopped and configured with EGSM set;
220*4882a593Smuzhiyun 	 *	No interrupts enabled except possibly Resume Detect.
221*4882a593Smuzhiyun 	 *
222*4882a593Smuzhiyun 	 * If any of these conditions are violated we do a complete reset.
223*4882a593Smuzhiyun 	 */
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	cmd = uhci_readw(uhci, USBCMD);
226*4882a593Smuzhiyun 	if ((cmd & USBCMD_RS) || !(cmd & USBCMD_CF) || !(cmd & USBCMD_EGSM)) {
227*4882a593Smuzhiyun 		dev_dbg(uhci_dev(uhci), "%s: cmd = 0x%04x\n",
228*4882a593Smuzhiyun 				__func__, cmd);
229*4882a593Smuzhiyun 		goto reset_needed;
230*4882a593Smuzhiyun 	}
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	intr = uhci_readw(uhci, USBINTR);
233*4882a593Smuzhiyun 	if (intr & (~USBINTR_RESUME)) {
234*4882a593Smuzhiyun 		dev_dbg(uhci_dev(uhci), "%s: intr = 0x%04x\n",
235*4882a593Smuzhiyun 				__func__, intr);
236*4882a593Smuzhiyun 		goto reset_needed;
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 	return 0;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun reset_needed:
241*4882a593Smuzhiyun 	dev_dbg(uhci_dev(uhci), "Performing full reset\n");
242*4882a593Smuzhiyun 	uhci_generic_reset_hc(uhci);
243*4882a593Smuzhiyun 	return 1;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun #endif /* CONFIG_USB_UHCI_SUPPORT_NON_PCI_HC */
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun /*
248*4882a593Smuzhiyun  * Store the basic register settings needed by the controller.
249*4882a593Smuzhiyun  */
configure_hc(struct uhci_hcd * uhci)250*4882a593Smuzhiyun static void configure_hc(struct uhci_hcd *uhci)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	/* Set the frame length to the default: 1 ms exactly */
253*4882a593Smuzhiyun 	uhci_writeb(uhci, USBSOF_DEFAULT, USBSOF);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	/* Store the frame list base address */
256*4882a593Smuzhiyun 	uhci_writel(uhci, uhci->frame_dma_handle, USBFLBASEADD);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	/* Set the current frame number */
259*4882a593Smuzhiyun 	uhci_writew(uhci, uhci->frame_number & UHCI_MAX_SOF_NUMBER,
260*4882a593Smuzhiyun 			USBFRNUM);
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	/* perform any arch/bus specific configuration */
263*4882a593Smuzhiyun 	if (uhci->configure_hc)
264*4882a593Smuzhiyun 		uhci->configure_hc(uhci);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun 
resume_detect_interrupts_are_broken(struct uhci_hcd * uhci)267*4882a593Smuzhiyun static int resume_detect_interrupts_are_broken(struct uhci_hcd *uhci)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	/*
270*4882a593Smuzhiyun 	 * If we have to ignore overcurrent events then almost by definition
271*4882a593Smuzhiyun 	 * we can't depend on resume-detect interrupts.
272*4882a593Smuzhiyun 	 *
273*4882a593Smuzhiyun 	 * Those interrupts also don't seem to work on ASpeed SoCs.
274*4882a593Smuzhiyun 	 */
275*4882a593Smuzhiyun 	if (ignore_oc || uhci_is_aspeed(uhci))
276*4882a593Smuzhiyun 		return 1;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	return uhci->resume_detect_interrupts_are_broken ?
279*4882a593Smuzhiyun 		uhci->resume_detect_interrupts_are_broken(uhci) : 0;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun 
global_suspend_mode_is_broken(struct uhci_hcd * uhci)282*4882a593Smuzhiyun static int global_suspend_mode_is_broken(struct uhci_hcd *uhci)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun 	return uhci->global_suspend_mode_is_broken ?
285*4882a593Smuzhiyun 		uhci->global_suspend_mode_is_broken(uhci) : 0;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
suspend_rh(struct uhci_hcd * uhci,enum uhci_rh_state new_state)288*4882a593Smuzhiyun static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state)
289*4882a593Smuzhiyun __releases(uhci->lock)
290*4882a593Smuzhiyun __acquires(uhci->lock)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	int auto_stop;
293*4882a593Smuzhiyun 	int int_enable, egsm_enable, wakeup_enable;
294*4882a593Smuzhiyun 	struct usb_device *rhdev = uhci_to_hcd(uhci)->self.root_hub;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	auto_stop = (new_state == UHCI_RH_AUTO_STOPPED);
297*4882a593Smuzhiyun 	dev_dbg(&rhdev->dev, "%s%s\n", __func__,
298*4882a593Smuzhiyun 			(auto_stop ? " (auto-stop)" : ""));
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	/* Start off by assuming Resume-Detect interrupts and EGSM work
301*4882a593Smuzhiyun 	 * and that remote wakeups should be enabled.
302*4882a593Smuzhiyun 	 */
303*4882a593Smuzhiyun 	egsm_enable = USBCMD_EGSM;
304*4882a593Smuzhiyun 	int_enable = USBINTR_RESUME;
305*4882a593Smuzhiyun 	wakeup_enable = 1;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	/*
308*4882a593Smuzhiyun 	 * In auto-stop mode, we must be able to detect new connections.
309*4882a593Smuzhiyun 	 * The user can force us to poll by disabling remote wakeup;
310*4882a593Smuzhiyun 	 * otherwise we will use the EGSM/RD mechanism.
311*4882a593Smuzhiyun 	 */
312*4882a593Smuzhiyun 	if (auto_stop) {
313*4882a593Smuzhiyun 		if (!device_may_wakeup(&rhdev->dev))
314*4882a593Smuzhiyun 			egsm_enable = int_enable = 0;
315*4882a593Smuzhiyun 	}
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun #ifdef CONFIG_PM
318*4882a593Smuzhiyun 	/*
319*4882a593Smuzhiyun 	 * In bus-suspend mode, we use the wakeup setting specified
320*4882a593Smuzhiyun 	 * for the root hub.
321*4882a593Smuzhiyun 	 */
322*4882a593Smuzhiyun 	else {
323*4882a593Smuzhiyun 		if (!rhdev->do_remote_wakeup)
324*4882a593Smuzhiyun 			wakeup_enable = 0;
325*4882a593Smuzhiyun 	}
326*4882a593Smuzhiyun #endif
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	/*
329*4882a593Smuzhiyun 	 * UHCI doesn't distinguish between wakeup requests from downstream
330*4882a593Smuzhiyun 	 * devices and local connect/disconnect events.  There's no way to
331*4882a593Smuzhiyun 	 * enable one without the other; both are controlled by EGSM.  Thus
332*4882a593Smuzhiyun 	 * if wakeups are disallowed then EGSM must be turned off -- in which
333*4882a593Smuzhiyun 	 * case remote wakeup requests from downstream during system sleep
334*4882a593Smuzhiyun 	 * will be lost.
335*4882a593Smuzhiyun 	 *
336*4882a593Smuzhiyun 	 * In addition, if EGSM is broken then we can't use it.  Likewise,
337*4882a593Smuzhiyun 	 * if Resume-Detect interrupts are broken then we can't use them.
338*4882a593Smuzhiyun 	 *
339*4882a593Smuzhiyun 	 * Finally, neither EGSM nor RD is useful by itself.  Without EGSM,
340*4882a593Smuzhiyun 	 * the RD status bit will never get set.  Without RD, the controller
341*4882a593Smuzhiyun 	 * won't generate interrupts to tell the system about wakeup events.
342*4882a593Smuzhiyun 	 */
343*4882a593Smuzhiyun 	if (!wakeup_enable || global_suspend_mode_is_broken(uhci) ||
344*4882a593Smuzhiyun 			resume_detect_interrupts_are_broken(uhci))
345*4882a593Smuzhiyun 		egsm_enable = int_enable = 0;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	uhci->RD_enable = !!int_enable;
348*4882a593Smuzhiyun 	uhci_writew(uhci, int_enable, USBINTR);
349*4882a593Smuzhiyun 	uhci_writew(uhci, egsm_enable | USBCMD_CF, USBCMD);
350*4882a593Smuzhiyun 	mb();
351*4882a593Smuzhiyun 	udelay(5);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	/* If we're auto-stopping then no devices have been attached
354*4882a593Smuzhiyun 	 * for a while, so there shouldn't be any active URBs and the
355*4882a593Smuzhiyun 	 * controller should stop after a few microseconds.  Otherwise
356*4882a593Smuzhiyun 	 * we will give the controller one frame to stop.
357*4882a593Smuzhiyun 	 */
358*4882a593Smuzhiyun 	if (!auto_stop && !(uhci_readw(uhci, USBSTS) & USBSTS_HCH)) {
359*4882a593Smuzhiyun 		uhci->rh_state = UHCI_RH_SUSPENDING;
360*4882a593Smuzhiyun 		spin_unlock_irq(&uhci->lock);
361*4882a593Smuzhiyun 		msleep(1);
362*4882a593Smuzhiyun 		spin_lock_irq(&uhci->lock);
363*4882a593Smuzhiyun 		if (uhci->dead)
364*4882a593Smuzhiyun 			return;
365*4882a593Smuzhiyun 	}
366*4882a593Smuzhiyun 	if (!(uhci_readw(uhci, USBSTS) & USBSTS_HCH))
367*4882a593Smuzhiyun 		dev_warn(uhci_dev(uhci), "Controller not stopped yet!\n");
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	uhci_get_current_frame_number(uhci);
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	uhci->rh_state = new_state;
372*4882a593Smuzhiyun 	uhci->is_stopped = UHCI_IS_STOPPED;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	/*
375*4882a593Smuzhiyun 	 * If remote wakeup is enabled but either EGSM or RD interrupts
376*4882a593Smuzhiyun 	 * doesn't work, then we won't get an interrupt when a wakeup event
377*4882a593Smuzhiyun 	 * occurs.  Thus the suspended root hub needs to be polled.
378*4882a593Smuzhiyun 	 */
379*4882a593Smuzhiyun 	if (wakeup_enable && (!int_enable || !egsm_enable))
380*4882a593Smuzhiyun 		set_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
381*4882a593Smuzhiyun 	else
382*4882a593Smuzhiyun 		clear_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	uhci_scan_schedule(uhci);
385*4882a593Smuzhiyun 	uhci_fsbr_off(uhci);
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun 
start_rh(struct uhci_hcd * uhci)388*4882a593Smuzhiyun static void start_rh(struct uhci_hcd *uhci)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun 	uhci->is_stopped = 0;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	/*
393*4882a593Smuzhiyun 	 * Clear stale status bits on Aspeed as we get a stale HCH
394*4882a593Smuzhiyun 	 * which causes problems later on
395*4882a593Smuzhiyun 	 */
396*4882a593Smuzhiyun 	if (uhci_is_aspeed(uhci))
397*4882a593Smuzhiyun 		uhci_writew(uhci, uhci_readw(uhci, USBSTS), USBSTS);
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	/* Mark it configured and running with a 64-byte max packet.
400*4882a593Smuzhiyun 	 * All interrupts are enabled, even though RESUME won't do anything.
401*4882a593Smuzhiyun 	 */
402*4882a593Smuzhiyun 	uhci_writew(uhci, USBCMD_RS | USBCMD_CF | USBCMD_MAXP, USBCMD);
403*4882a593Smuzhiyun 	uhci_writew(uhci, USBINTR_TIMEOUT | USBINTR_RESUME |
404*4882a593Smuzhiyun 		USBINTR_IOC | USBINTR_SP, USBINTR);
405*4882a593Smuzhiyun 	mb();
406*4882a593Smuzhiyun 	uhci->rh_state = UHCI_RH_RUNNING;
407*4882a593Smuzhiyun 	set_bit(HCD_FLAG_POLL_RH, &uhci_to_hcd(uhci)->flags);
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun 
wakeup_rh(struct uhci_hcd * uhci)410*4882a593Smuzhiyun static void wakeup_rh(struct uhci_hcd *uhci)
411*4882a593Smuzhiyun __releases(uhci->lock)
412*4882a593Smuzhiyun __acquires(uhci->lock)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun 	dev_dbg(&uhci_to_hcd(uhci)->self.root_hub->dev,
415*4882a593Smuzhiyun 			"%s%s\n", __func__,
416*4882a593Smuzhiyun 			uhci->rh_state == UHCI_RH_AUTO_STOPPED ?
417*4882a593Smuzhiyun 				" (auto-start)" : "");
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	/* If we are auto-stopped then no devices are attached so there's
420*4882a593Smuzhiyun 	 * no need for wakeup signals.  Otherwise we send Global Resume
421*4882a593Smuzhiyun 	 * for 20 ms.
422*4882a593Smuzhiyun 	 */
423*4882a593Smuzhiyun 	if (uhci->rh_state == UHCI_RH_SUSPENDED) {
424*4882a593Smuzhiyun 		unsigned egsm;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 		/* Keep EGSM on if it was set before */
427*4882a593Smuzhiyun 		egsm = uhci_readw(uhci, USBCMD) & USBCMD_EGSM;
428*4882a593Smuzhiyun 		uhci->rh_state = UHCI_RH_RESUMING;
429*4882a593Smuzhiyun 		uhci_writew(uhci, USBCMD_FGR | USBCMD_CF | egsm, USBCMD);
430*4882a593Smuzhiyun 		spin_unlock_irq(&uhci->lock);
431*4882a593Smuzhiyun 		msleep(20);
432*4882a593Smuzhiyun 		spin_lock_irq(&uhci->lock);
433*4882a593Smuzhiyun 		if (uhci->dead)
434*4882a593Smuzhiyun 			return;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 		/* End Global Resume and wait for EOP to be sent */
437*4882a593Smuzhiyun 		uhci_writew(uhci, USBCMD_CF, USBCMD);
438*4882a593Smuzhiyun 		mb();
439*4882a593Smuzhiyun 		udelay(4);
440*4882a593Smuzhiyun 		if (uhci_readw(uhci, USBCMD) & USBCMD_FGR)
441*4882a593Smuzhiyun 			dev_warn(uhci_dev(uhci), "FGR not stopped yet!\n");
442*4882a593Smuzhiyun 	}
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	start_rh(uhci);
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	/* Restart root hub polling */
447*4882a593Smuzhiyun 	mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun 
uhci_irq(struct usb_hcd * hcd)450*4882a593Smuzhiyun static irqreturn_t uhci_irq(struct usb_hcd *hcd)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun 	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
453*4882a593Smuzhiyun 	unsigned short status;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	/*
456*4882a593Smuzhiyun 	 * Read the interrupt status, and write it back to clear the
457*4882a593Smuzhiyun 	 * interrupt cause.  Contrary to the UHCI specification, the
458*4882a593Smuzhiyun 	 * "HC Halted" status bit is persistent: it is RO, not R/WC.
459*4882a593Smuzhiyun 	 */
460*4882a593Smuzhiyun 	status = uhci_readw(uhci, USBSTS);
461*4882a593Smuzhiyun 	if (!(status & ~USBSTS_HCH))	/* shared interrupt, not mine */
462*4882a593Smuzhiyun 		return IRQ_NONE;
463*4882a593Smuzhiyun 	uhci_writew(uhci, status, USBSTS);		/* Clear it */
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	spin_lock(&uhci->lock);
466*4882a593Smuzhiyun 	if (unlikely(!uhci->is_initialized))	/* not yet configured */
467*4882a593Smuzhiyun 		goto done;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
470*4882a593Smuzhiyun 		if (status & USBSTS_HSE)
471*4882a593Smuzhiyun 			dev_err(uhci_dev(uhci),
472*4882a593Smuzhiyun 				"host system error, PCI problems?\n");
473*4882a593Smuzhiyun 		if (status & USBSTS_HCPE)
474*4882a593Smuzhiyun 			dev_err(uhci_dev(uhci),
475*4882a593Smuzhiyun 				"host controller process error, something bad happened!\n");
476*4882a593Smuzhiyun 		if (status & USBSTS_HCH) {
477*4882a593Smuzhiyun 			if (uhci->rh_state >= UHCI_RH_RUNNING) {
478*4882a593Smuzhiyun 				dev_err(uhci_dev(uhci),
479*4882a593Smuzhiyun 					"host controller halted, very bad!\n");
480*4882a593Smuzhiyun 				if (debug > 1 && errbuf) {
481*4882a593Smuzhiyun 					/* Print the schedule for debugging */
482*4882a593Smuzhiyun 					uhci_sprint_schedule(uhci, errbuf,
483*4882a593Smuzhiyun 						ERRBUF_LEN - EXTRA_SPACE);
484*4882a593Smuzhiyun 					lprintk(errbuf);
485*4882a593Smuzhiyun 				}
486*4882a593Smuzhiyun 				uhci_hc_died(uhci);
487*4882a593Smuzhiyun 				usb_hc_died(hcd);
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 				/* Force a callback in case there are
490*4882a593Smuzhiyun 				 * pending unlinks */
491*4882a593Smuzhiyun 				mod_timer(&hcd->rh_timer, jiffies);
492*4882a593Smuzhiyun 			}
493*4882a593Smuzhiyun 		}
494*4882a593Smuzhiyun 	}
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	if (status & USBSTS_RD) {
497*4882a593Smuzhiyun 		spin_unlock(&uhci->lock);
498*4882a593Smuzhiyun 		usb_hcd_poll_rh_status(hcd);
499*4882a593Smuzhiyun 	} else {
500*4882a593Smuzhiyun 		uhci_scan_schedule(uhci);
501*4882a593Smuzhiyun  done:
502*4882a593Smuzhiyun 		spin_unlock(&uhci->lock);
503*4882a593Smuzhiyun 	}
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	return IRQ_HANDLED;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun /*
509*4882a593Smuzhiyun  * Store the current frame number in uhci->frame_number if the controller
510*4882a593Smuzhiyun  * is running.  Expand from 11 bits (of which we use only 10) to a
511*4882a593Smuzhiyun  * full-sized integer.
512*4882a593Smuzhiyun  *
513*4882a593Smuzhiyun  * Like many other parts of the driver, this code relies on being polled
514*4882a593Smuzhiyun  * more than once per second as long as the controller is running.
515*4882a593Smuzhiyun  */
uhci_get_current_frame_number(struct uhci_hcd * uhci)516*4882a593Smuzhiyun static void uhci_get_current_frame_number(struct uhci_hcd *uhci)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun 	if (!uhci->is_stopped) {
519*4882a593Smuzhiyun 		unsigned delta;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 		delta = (uhci_readw(uhci, USBFRNUM) - uhci->frame_number) &
522*4882a593Smuzhiyun 				(UHCI_NUMFRAMES - 1);
523*4882a593Smuzhiyun 		uhci->frame_number += delta;
524*4882a593Smuzhiyun 	}
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun /*
528*4882a593Smuzhiyun  * De-allocate all resources
529*4882a593Smuzhiyun  */
release_uhci(struct uhci_hcd * uhci)530*4882a593Smuzhiyun static void release_uhci(struct uhci_hcd *uhci)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun 	int i;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	spin_lock_irq(&uhci->lock);
536*4882a593Smuzhiyun 	uhci->is_initialized = 0;
537*4882a593Smuzhiyun 	spin_unlock_irq(&uhci->lock);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	debugfs_remove(uhci->dentry);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	for (i = 0; i < UHCI_NUM_SKELQH; i++)
542*4882a593Smuzhiyun 		uhci_free_qh(uhci, uhci->skelqh[i]);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	uhci_free_td(uhci, uhci->term_td);
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	dma_pool_destroy(uhci->qh_pool);
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	dma_pool_destroy(uhci->td_pool);
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	kfree(uhci->frame_cpu);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	dma_free_coherent(uhci_dev(uhci),
553*4882a593Smuzhiyun 			UHCI_NUMFRAMES * sizeof(*uhci->frame),
554*4882a593Smuzhiyun 			uhci->frame, uhci->frame_dma_handle);
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun /*
558*4882a593Smuzhiyun  * Allocate a frame list, and then setup the skeleton
559*4882a593Smuzhiyun  *
560*4882a593Smuzhiyun  * The hardware doesn't really know any difference
561*4882a593Smuzhiyun  * in the queues, but the order does matter for the
562*4882a593Smuzhiyun  * protocols higher up.  The order in which the queues
563*4882a593Smuzhiyun  * are encountered by the hardware is:
564*4882a593Smuzhiyun  *
565*4882a593Smuzhiyun  *  - All isochronous events are handled before any
566*4882a593Smuzhiyun  *    of the queues. We don't do that here, because
567*4882a593Smuzhiyun  *    we'll create the actual TD entries on demand.
568*4882a593Smuzhiyun  *  - The first queue is the high-period interrupt queue.
569*4882a593Smuzhiyun  *  - The second queue is the period-1 interrupt and async
570*4882a593Smuzhiyun  *    (low-speed control, full-speed control, then bulk) queue.
571*4882a593Smuzhiyun  *  - The third queue is the terminating bandwidth reclamation queue,
572*4882a593Smuzhiyun  *    which contains no members, loops back to itself, and is present
573*4882a593Smuzhiyun  *    only when FSBR is on and there are no full-speed control or bulk QHs.
574*4882a593Smuzhiyun  */
uhci_start(struct usb_hcd * hcd)575*4882a593Smuzhiyun static int uhci_start(struct usb_hcd *hcd)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun 	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
578*4882a593Smuzhiyun 	int retval = -EBUSY;
579*4882a593Smuzhiyun 	int i;
580*4882a593Smuzhiyun 	struct dentry __maybe_unused *dentry;
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	hcd->uses_new_polling = 1;
583*4882a593Smuzhiyun 	/* Accept arbitrarily long scatter-gather lists */
584*4882a593Smuzhiyun 	if (!hcd->localmem_pool)
585*4882a593Smuzhiyun 		hcd->self.sg_tablesize = ~0;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	spin_lock_init(&uhci->lock);
588*4882a593Smuzhiyun 	timer_setup(&uhci->fsbr_timer, uhci_fsbr_timeout, 0);
589*4882a593Smuzhiyun 	INIT_LIST_HEAD(&uhci->idle_qh_list);
590*4882a593Smuzhiyun 	init_waitqueue_head(&uhci->waitqh);
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun #ifdef UHCI_DEBUG_OPS
593*4882a593Smuzhiyun 	uhci->dentry = debugfs_create_file(hcd->self.bus_name,
594*4882a593Smuzhiyun 					   S_IFREG|S_IRUGO|S_IWUSR,
595*4882a593Smuzhiyun 					   uhci_debugfs_root, uhci,
596*4882a593Smuzhiyun 					   &uhci_debug_operations);
597*4882a593Smuzhiyun #endif
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	uhci->frame = dma_alloc_coherent(uhci_dev(uhci),
600*4882a593Smuzhiyun 					 UHCI_NUMFRAMES * sizeof(*uhci->frame),
601*4882a593Smuzhiyun 					 &uhci->frame_dma_handle, GFP_KERNEL);
602*4882a593Smuzhiyun 	if (!uhci->frame) {
603*4882a593Smuzhiyun 		dev_err(uhci_dev(uhci),
604*4882a593Smuzhiyun 			"unable to allocate consistent memory for frame list\n");
605*4882a593Smuzhiyun 		goto err_alloc_frame;
606*4882a593Smuzhiyun 	}
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	uhci->frame_cpu = kcalloc(UHCI_NUMFRAMES, sizeof(*uhci->frame_cpu),
609*4882a593Smuzhiyun 			GFP_KERNEL);
610*4882a593Smuzhiyun 	if (!uhci->frame_cpu)
611*4882a593Smuzhiyun 		goto err_alloc_frame_cpu;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	uhci->td_pool = dma_pool_create("uhci_td", uhci_dev(uhci),
614*4882a593Smuzhiyun 			sizeof(struct uhci_td), 16, 0);
615*4882a593Smuzhiyun 	if (!uhci->td_pool) {
616*4882a593Smuzhiyun 		dev_err(uhci_dev(uhci), "unable to create td dma_pool\n");
617*4882a593Smuzhiyun 		goto err_create_td_pool;
618*4882a593Smuzhiyun 	}
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	uhci->qh_pool = dma_pool_create("uhci_qh", uhci_dev(uhci),
621*4882a593Smuzhiyun 			sizeof(struct uhci_qh), 16, 0);
622*4882a593Smuzhiyun 	if (!uhci->qh_pool) {
623*4882a593Smuzhiyun 		dev_err(uhci_dev(uhci), "unable to create qh dma_pool\n");
624*4882a593Smuzhiyun 		goto err_create_qh_pool;
625*4882a593Smuzhiyun 	}
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	uhci->term_td = uhci_alloc_td(uhci);
628*4882a593Smuzhiyun 	if (!uhci->term_td) {
629*4882a593Smuzhiyun 		dev_err(uhci_dev(uhci), "unable to allocate terminating TD\n");
630*4882a593Smuzhiyun 		goto err_alloc_term_td;
631*4882a593Smuzhiyun 	}
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	for (i = 0; i < UHCI_NUM_SKELQH; i++) {
634*4882a593Smuzhiyun 		uhci->skelqh[i] = uhci_alloc_qh(uhci, NULL, NULL);
635*4882a593Smuzhiyun 		if (!uhci->skelqh[i]) {
636*4882a593Smuzhiyun 			dev_err(uhci_dev(uhci), "unable to allocate QH\n");
637*4882a593Smuzhiyun 			goto err_alloc_skelqh;
638*4882a593Smuzhiyun 		}
639*4882a593Smuzhiyun 	}
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	/*
642*4882a593Smuzhiyun 	 * 8 Interrupt queues; link all higher int queues to int1 = async
643*4882a593Smuzhiyun 	 */
644*4882a593Smuzhiyun 	for (i = SKEL_ISO + 1; i < SKEL_ASYNC; ++i)
645*4882a593Smuzhiyun 		uhci->skelqh[i]->link = LINK_TO_QH(uhci, uhci->skel_async_qh);
646*4882a593Smuzhiyun 	uhci->skel_async_qh->link = UHCI_PTR_TERM(uhci);
647*4882a593Smuzhiyun 	uhci->skel_term_qh->link = LINK_TO_QH(uhci, uhci->skel_term_qh);
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	/* This dummy TD is to work around a bug in Intel PIIX controllers */
650*4882a593Smuzhiyun 	uhci_fill_td(uhci, uhci->term_td, 0, uhci_explen(0) |
651*4882a593Smuzhiyun 			(0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0);
652*4882a593Smuzhiyun 	uhci->term_td->link = UHCI_PTR_TERM(uhci);
653*4882a593Smuzhiyun 	uhci->skel_async_qh->element = uhci->skel_term_qh->element =
654*4882a593Smuzhiyun 		LINK_TO_TD(uhci, uhci->term_td);
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	/*
657*4882a593Smuzhiyun 	 * Fill the frame list: make all entries point to the proper
658*4882a593Smuzhiyun 	 * interrupt queue.
659*4882a593Smuzhiyun 	 */
660*4882a593Smuzhiyun 	for (i = 0; i < UHCI_NUMFRAMES; i++) {
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 		/* Only place we don't use the frame list routines */
663*4882a593Smuzhiyun 		uhci->frame[i] = uhci_frame_skel_link(uhci, i);
664*4882a593Smuzhiyun 	}
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	/*
667*4882a593Smuzhiyun 	 * Some architectures require a full mb() to enforce completion of
668*4882a593Smuzhiyun 	 * the memory writes above before the I/O transfers in configure_hc().
669*4882a593Smuzhiyun 	 */
670*4882a593Smuzhiyun 	mb();
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	spin_lock_irq(&uhci->lock);
673*4882a593Smuzhiyun 	configure_hc(uhci);
674*4882a593Smuzhiyun 	uhci->is_initialized = 1;
675*4882a593Smuzhiyun 	start_rh(uhci);
676*4882a593Smuzhiyun 	spin_unlock_irq(&uhci->lock);
677*4882a593Smuzhiyun 	return 0;
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun /*
680*4882a593Smuzhiyun  * error exits:
681*4882a593Smuzhiyun  */
682*4882a593Smuzhiyun err_alloc_skelqh:
683*4882a593Smuzhiyun 	for (i = 0; i < UHCI_NUM_SKELQH; i++) {
684*4882a593Smuzhiyun 		if (uhci->skelqh[i])
685*4882a593Smuzhiyun 			uhci_free_qh(uhci, uhci->skelqh[i]);
686*4882a593Smuzhiyun 	}
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	uhci_free_td(uhci, uhci->term_td);
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun err_alloc_term_td:
691*4882a593Smuzhiyun 	dma_pool_destroy(uhci->qh_pool);
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun err_create_qh_pool:
694*4882a593Smuzhiyun 	dma_pool_destroy(uhci->td_pool);
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun err_create_td_pool:
697*4882a593Smuzhiyun 	kfree(uhci->frame_cpu);
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun err_alloc_frame_cpu:
700*4882a593Smuzhiyun 	dma_free_coherent(uhci_dev(uhci),
701*4882a593Smuzhiyun 			UHCI_NUMFRAMES * sizeof(*uhci->frame),
702*4882a593Smuzhiyun 			uhci->frame, uhci->frame_dma_handle);
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun err_alloc_frame:
705*4882a593Smuzhiyun 	debugfs_remove(uhci->dentry);
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	return retval;
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun 
uhci_stop(struct usb_hcd * hcd)710*4882a593Smuzhiyun static void uhci_stop(struct usb_hcd *hcd)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun 	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	spin_lock_irq(&uhci->lock);
715*4882a593Smuzhiyun 	if (HCD_HW_ACCESSIBLE(hcd) && !uhci->dead)
716*4882a593Smuzhiyun 		uhci_hc_died(uhci);
717*4882a593Smuzhiyun 	uhci_scan_schedule(uhci);
718*4882a593Smuzhiyun 	spin_unlock_irq(&uhci->lock);
719*4882a593Smuzhiyun 	synchronize_irq(hcd->irq);
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	del_timer_sync(&uhci->fsbr_timer);
722*4882a593Smuzhiyun 	release_uhci(uhci);
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun #ifdef CONFIG_PM
uhci_rh_suspend(struct usb_hcd * hcd)726*4882a593Smuzhiyun static int uhci_rh_suspend(struct usb_hcd *hcd)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun 	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
729*4882a593Smuzhiyun 	int rc = 0;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	spin_lock_irq(&uhci->lock);
732*4882a593Smuzhiyun 	if (!HCD_HW_ACCESSIBLE(hcd))
733*4882a593Smuzhiyun 		rc = -ESHUTDOWN;
734*4882a593Smuzhiyun 	else if (uhci->dead)
735*4882a593Smuzhiyun 		;		/* Dead controllers tell no tales */
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	/* Once the controller is stopped, port resumes that are already
738*4882a593Smuzhiyun 	 * in progress won't complete.  Hence if remote wakeup is enabled
739*4882a593Smuzhiyun 	 * for the root hub and any ports are in the middle of a resume or
740*4882a593Smuzhiyun 	 * remote wakeup, we must fail the suspend.
741*4882a593Smuzhiyun 	 */
742*4882a593Smuzhiyun 	else if (hcd->self.root_hub->do_remote_wakeup &&
743*4882a593Smuzhiyun 			uhci->resuming_ports) {
744*4882a593Smuzhiyun 		dev_dbg(uhci_dev(uhci),
745*4882a593Smuzhiyun 			"suspend failed because a port is resuming\n");
746*4882a593Smuzhiyun 		rc = -EBUSY;
747*4882a593Smuzhiyun 	} else
748*4882a593Smuzhiyun 		suspend_rh(uhci, UHCI_RH_SUSPENDED);
749*4882a593Smuzhiyun 	spin_unlock_irq(&uhci->lock);
750*4882a593Smuzhiyun 	return rc;
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun 
uhci_rh_resume(struct usb_hcd * hcd)753*4882a593Smuzhiyun static int uhci_rh_resume(struct usb_hcd *hcd)
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun 	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
756*4882a593Smuzhiyun 	int rc = 0;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	spin_lock_irq(&uhci->lock);
759*4882a593Smuzhiyun 	if (!HCD_HW_ACCESSIBLE(hcd))
760*4882a593Smuzhiyun 		rc = -ESHUTDOWN;
761*4882a593Smuzhiyun 	else if (!uhci->dead)
762*4882a593Smuzhiyun 		wakeup_rh(uhci);
763*4882a593Smuzhiyun 	spin_unlock_irq(&uhci->lock);
764*4882a593Smuzhiyun 	return rc;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun #endif
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun /* Wait until a particular device/endpoint's QH is idle, and free it */
uhci_hcd_endpoint_disable(struct usb_hcd * hcd,struct usb_host_endpoint * hep)770*4882a593Smuzhiyun static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd,
771*4882a593Smuzhiyun 		struct usb_host_endpoint *hep)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun 	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
774*4882a593Smuzhiyun 	struct uhci_qh *qh;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	spin_lock_irq(&uhci->lock);
777*4882a593Smuzhiyun 	qh = (struct uhci_qh *) hep->hcpriv;
778*4882a593Smuzhiyun 	if (qh == NULL)
779*4882a593Smuzhiyun 		goto done;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	while (qh->state != QH_STATE_IDLE) {
782*4882a593Smuzhiyun 		++uhci->num_waiting;
783*4882a593Smuzhiyun 		spin_unlock_irq(&uhci->lock);
784*4882a593Smuzhiyun 		wait_event_interruptible(uhci->waitqh,
785*4882a593Smuzhiyun 				qh->state == QH_STATE_IDLE);
786*4882a593Smuzhiyun 		spin_lock_irq(&uhci->lock);
787*4882a593Smuzhiyun 		--uhci->num_waiting;
788*4882a593Smuzhiyun 	}
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 	uhci_free_qh(uhci, qh);
791*4882a593Smuzhiyun done:
792*4882a593Smuzhiyun 	spin_unlock_irq(&uhci->lock);
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun 
uhci_hcd_get_frame_number(struct usb_hcd * hcd)795*4882a593Smuzhiyun static int uhci_hcd_get_frame_number(struct usb_hcd *hcd)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun 	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
798*4882a593Smuzhiyun 	unsigned frame_number;
799*4882a593Smuzhiyun 	unsigned delta;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	/* Minimize latency by avoiding the spinlock */
802*4882a593Smuzhiyun 	frame_number = uhci->frame_number;
803*4882a593Smuzhiyun 	barrier();
804*4882a593Smuzhiyun 	delta = (uhci_readw(uhci, USBFRNUM) - frame_number) &
805*4882a593Smuzhiyun 			(UHCI_NUMFRAMES - 1);
806*4882a593Smuzhiyun 	return frame_number + delta;
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun /* Determines number of ports on controller */
uhci_count_ports(struct usb_hcd * hcd)810*4882a593Smuzhiyun static int uhci_count_ports(struct usb_hcd *hcd)
811*4882a593Smuzhiyun {
812*4882a593Smuzhiyun 	struct uhci_hcd *uhci = hcd_to_uhci(hcd);
813*4882a593Smuzhiyun 	unsigned io_size = (unsigned) hcd->rsrc_len;
814*4882a593Smuzhiyun 	int port;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	/* The UHCI spec says devices must have 2 ports, and goes on to say
817*4882a593Smuzhiyun 	 * they may have more but gives no way to determine how many there
818*4882a593Smuzhiyun 	 * are.  However according to the UHCI spec, Bit 7 of the port
819*4882a593Smuzhiyun 	 * status and control register is always set to 1.  So we try to
820*4882a593Smuzhiyun 	 * use this to our advantage.  Another common failure mode when
821*4882a593Smuzhiyun 	 * a nonexistent register is addressed is to return all ones, so
822*4882a593Smuzhiyun 	 * we test for that also.
823*4882a593Smuzhiyun 	 */
824*4882a593Smuzhiyun 	for (port = 0; port < (io_size - USBPORTSC1) / 2; port++) {
825*4882a593Smuzhiyun 		unsigned int portstatus;
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 		portstatus = uhci_readw(uhci, USBPORTSC1 + (port * 2));
828*4882a593Smuzhiyun 		if (!(portstatus & 0x0080) || portstatus == 0xffff)
829*4882a593Smuzhiyun 			break;
830*4882a593Smuzhiyun 	}
831*4882a593Smuzhiyun 	if (debug)
832*4882a593Smuzhiyun 		dev_info(uhci_dev(uhci), "detected %d ports\n", port);
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	/* Anything greater than 7 is weird so we'll ignore it. */
835*4882a593Smuzhiyun 	if (port > UHCI_RH_MAXCHILD) {
836*4882a593Smuzhiyun 		dev_info(uhci_dev(uhci),
837*4882a593Smuzhiyun 			"port count misdetected? forcing to 2 ports\n");
838*4882a593Smuzhiyun 		port = 2;
839*4882a593Smuzhiyun 	}
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	return port;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun static const char hcd_name[] = "uhci_hcd";
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun #ifdef CONFIG_USB_PCI
847*4882a593Smuzhiyun #include "uhci-pci.c"
848*4882a593Smuzhiyun #define	PCI_DRIVER		uhci_pci_driver
849*4882a593Smuzhiyun #endif
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun #ifdef CONFIG_SPARC_LEON
852*4882a593Smuzhiyun #include "uhci-grlib.c"
853*4882a593Smuzhiyun #define PLATFORM_DRIVER		uhci_grlib_driver
854*4882a593Smuzhiyun #endif
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun #ifdef CONFIG_USB_UHCI_PLATFORM
857*4882a593Smuzhiyun #include "uhci-platform.c"
858*4882a593Smuzhiyun #define PLATFORM_DRIVER		uhci_platform_driver
859*4882a593Smuzhiyun #endif
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun #if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER)
862*4882a593Smuzhiyun #error "missing bus glue for uhci-hcd"
863*4882a593Smuzhiyun #endif
864*4882a593Smuzhiyun 
uhci_hcd_init(void)865*4882a593Smuzhiyun static int __init uhci_hcd_init(void)
866*4882a593Smuzhiyun {
867*4882a593Smuzhiyun 	int retval = -ENOMEM;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	if (usb_disabled())
870*4882a593Smuzhiyun 		return -ENODEV;
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	printk(KERN_INFO "uhci_hcd: " DRIVER_DESC "%s\n",
873*4882a593Smuzhiyun 			ignore_oc ? ", overcurrent ignored" : "");
874*4882a593Smuzhiyun 	set_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun #ifdef CONFIG_DYNAMIC_DEBUG
877*4882a593Smuzhiyun 	errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
878*4882a593Smuzhiyun 	if (!errbuf)
879*4882a593Smuzhiyun 		goto errbuf_failed;
880*4882a593Smuzhiyun 	uhci_debugfs_root = debugfs_create_dir("uhci", usb_debug_root);
881*4882a593Smuzhiyun #endif
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
884*4882a593Smuzhiyun 		sizeof(struct urb_priv), 0, 0, NULL);
885*4882a593Smuzhiyun 	if (!uhci_up_cachep)
886*4882a593Smuzhiyun 		goto up_failed;
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun #ifdef PLATFORM_DRIVER
889*4882a593Smuzhiyun 	retval = platform_driver_register(&PLATFORM_DRIVER);
890*4882a593Smuzhiyun 	if (retval < 0)
891*4882a593Smuzhiyun 		goto clean0;
892*4882a593Smuzhiyun #endif
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun #ifdef PCI_DRIVER
895*4882a593Smuzhiyun 	retval = pci_register_driver(&PCI_DRIVER);
896*4882a593Smuzhiyun 	if (retval < 0)
897*4882a593Smuzhiyun 		goto clean1;
898*4882a593Smuzhiyun #endif
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	return 0;
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun #ifdef PCI_DRIVER
903*4882a593Smuzhiyun clean1:
904*4882a593Smuzhiyun #endif
905*4882a593Smuzhiyun #ifdef PLATFORM_DRIVER
906*4882a593Smuzhiyun 	platform_driver_unregister(&PLATFORM_DRIVER);
907*4882a593Smuzhiyun clean0:
908*4882a593Smuzhiyun #endif
909*4882a593Smuzhiyun 	kmem_cache_destroy(uhci_up_cachep);
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun up_failed:
912*4882a593Smuzhiyun #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
913*4882a593Smuzhiyun 	debugfs_remove(uhci_debugfs_root);
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	kfree(errbuf);
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun errbuf_failed:
918*4882a593Smuzhiyun #endif
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	clear_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
921*4882a593Smuzhiyun 	return retval;
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun 
uhci_hcd_cleanup(void)924*4882a593Smuzhiyun static void __exit uhci_hcd_cleanup(void)
925*4882a593Smuzhiyun {
926*4882a593Smuzhiyun #ifdef PLATFORM_DRIVER
927*4882a593Smuzhiyun 	platform_driver_unregister(&PLATFORM_DRIVER);
928*4882a593Smuzhiyun #endif
929*4882a593Smuzhiyun #ifdef PCI_DRIVER
930*4882a593Smuzhiyun 	pci_unregister_driver(&PCI_DRIVER);
931*4882a593Smuzhiyun #endif
932*4882a593Smuzhiyun 	kmem_cache_destroy(uhci_up_cachep);
933*4882a593Smuzhiyun 	debugfs_remove(uhci_debugfs_root);
934*4882a593Smuzhiyun #ifdef CONFIG_DYNAMIC_DEBUG
935*4882a593Smuzhiyun 	kfree(errbuf);
936*4882a593Smuzhiyun #endif
937*4882a593Smuzhiyun 	clear_bit(USB_UHCI_LOADED, &usb_hcds_loaded);
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun module_init(uhci_hcd_init);
941*4882a593Smuzhiyun module_exit(uhci_hcd_cleanup);
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun MODULE_AUTHOR(DRIVER_AUTHOR);
944*4882a593Smuzhiyun MODULE_DESCRIPTION(DRIVER_DESC);
945*4882a593Smuzhiyun MODULE_LICENSE("GPL");
946