1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * (C) Copyright Linus Torvalds 1999
4*4882a593Smuzhiyun * (C) Copyright Johannes Erdfelt 1999-2001
5*4882a593Smuzhiyun * (C) Copyright Andreas Gal 1999
6*4882a593Smuzhiyun * (C) Copyright Gregory P. Smith 1999
7*4882a593Smuzhiyun * (C) Copyright Deti Fliegl 1999
8*4882a593Smuzhiyun * (C) Copyright Randy Dunlap 2000
9*4882a593Smuzhiyun * (C) Copyright David Brownell 2000-2002
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/bcd.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/version.h>
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/sched/task_stack.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/completion.h>
19*4882a593Smuzhiyun #include <linux/utsname.h>
20*4882a593Smuzhiyun #include <linux/mm.h>
21*4882a593Smuzhiyun #include <asm/io.h>
22*4882a593Smuzhiyun #include <linux/device.h>
23*4882a593Smuzhiyun #include <linux/dma-mapping.h>
24*4882a593Smuzhiyun #include <linux/mutex.h>
25*4882a593Smuzhiyun #include <asm/irq.h>
26*4882a593Smuzhiyun #include <asm/byteorder.h>
27*4882a593Smuzhiyun #include <asm/unaligned.h>
28*4882a593Smuzhiyun #include <linux/platform_device.h>
29*4882a593Smuzhiyun #include <linux/workqueue.h>
30*4882a593Smuzhiyun #include <linux/pm_runtime.h>
31*4882a593Smuzhiyun #include <linux/types.h>
32*4882a593Smuzhiyun #include <linux/genalloc.h>
33*4882a593Smuzhiyun #include <linux/io.h>
34*4882a593Smuzhiyun #include <linux/kcov.h>
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #include <linux/phy/phy.h>
37*4882a593Smuzhiyun #include <linux/usb.h>
38*4882a593Smuzhiyun #include <linux/usb/hcd.h>
39*4882a593Smuzhiyun #include <linux/usb/otg.h>
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #include "usb.h"
42*4882a593Smuzhiyun #include "phy.h"
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /*
48*4882a593Smuzhiyun * USB Host Controller Driver framework
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * Plugs into usbcore (usb_bus) and lets HCDs share code, minimizing
51*4882a593Smuzhiyun * HCD-specific behaviors/bugs.
52*4882a593Smuzhiyun *
53*4882a593Smuzhiyun * This does error checks, tracks devices and urbs, and delegates to a
54*4882a593Smuzhiyun * "hc_driver" only for code (and data) that really needs to know about
55*4882a593Smuzhiyun * hardware differences. That includes root hub registers, i/o queues,
56*4882a593Smuzhiyun * and so on ... but as little else as possible.
57*4882a593Smuzhiyun *
58*4882a593Smuzhiyun * Shared code includes most of the "root hub" code (these are emulated,
59*4882a593Smuzhiyun * though each HC's hardware works differently) and PCI glue, plus request
60*4882a593Smuzhiyun * tracking overhead. The HCD code should only block on spinlocks or on
61*4882a593Smuzhiyun * hardware handshaking; blocking on software events (such as other kernel
62*4882a593Smuzhiyun * threads releasing resources, or completing actions) is all generic.
63*4882a593Smuzhiyun *
64*4882a593Smuzhiyun * Happens the USB 2.0 spec says this would be invisible inside the "USBD",
65*4882a593Smuzhiyun * and includes mostly a "HCDI" (HCD Interface) along with some APIs used
66*4882a593Smuzhiyun * only by the hub driver ... and that neither should be seen or used by
67*4882a593Smuzhiyun * usb client device drivers.
68*4882a593Smuzhiyun *
69*4882a593Smuzhiyun * Contributors of ideas or unattributed patches include: David Brownell,
70*4882a593Smuzhiyun * Roman Weissgaerber, Rory Bolt, Greg Kroah-Hartman, ...
71*4882a593Smuzhiyun *
72*4882a593Smuzhiyun * HISTORY:
73*4882a593Smuzhiyun * 2002-02-21 Pull in most of the usb_bus support from usb.c; some
74*4882a593Smuzhiyun * associated cleanup. "usb_hcd" still != "usb_bus".
75*4882a593Smuzhiyun * 2001-12-12 Initial patch version for Linux 2.5.1 kernel.
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /* Keep track of which host controller drivers are loaded */
81*4882a593Smuzhiyun unsigned long usb_hcds_loaded;
82*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_hcds_loaded);
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /* host controllers we manage */
85*4882a593Smuzhiyun DEFINE_IDR (usb_bus_idr);
86*4882a593Smuzhiyun EXPORT_SYMBOL_GPL (usb_bus_idr);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /* used when allocating bus numbers */
89*4882a593Smuzhiyun #define USB_MAXBUS 64
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /* used when updating list of hcds */
92*4882a593Smuzhiyun DEFINE_MUTEX(usb_bus_idr_lock); /* exported only for usbfs */
93*4882a593Smuzhiyun EXPORT_SYMBOL_GPL (usb_bus_idr_lock);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* used for controlling access to virtual root hubs */
96*4882a593Smuzhiyun static DEFINE_SPINLOCK(hcd_root_hub_lock);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /* used when updating an endpoint's URB list */
99*4882a593Smuzhiyun static DEFINE_SPINLOCK(hcd_urb_list_lock);
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* used to protect against unlinking URBs after the device is gone */
102*4882a593Smuzhiyun static DEFINE_SPINLOCK(hcd_urb_unlink_lock);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /* wait queue for synchronous unlinks */
105*4882a593Smuzhiyun DECLARE_WAIT_QUEUE_HEAD(usb_kill_urb_queue);
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun * Sharable chunks of root hub code.
111*4882a593Smuzhiyun */
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
114*4882a593Smuzhiyun #define KERNEL_REL bin2bcd(((LINUX_VERSION_CODE >> 16) & 0x0ff))
115*4882a593Smuzhiyun #define KERNEL_VER bin2bcd(((LINUX_VERSION_CODE >> 8) & 0x0ff))
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* usb 3.1 root hub device descriptor */
118*4882a593Smuzhiyun static const u8 usb31_rh_dev_descriptor[18] = {
119*4882a593Smuzhiyun 0x12, /* __u8 bLength; */
120*4882a593Smuzhiyun USB_DT_DEVICE, /* __u8 bDescriptorType; Device */
121*4882a593Smuzhiyun 0x10, 0x03, /* __le16 bcdUSB; v3.1 */
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
124*4882a593Smuzhiyun 0x00, /* __u8 bDeviceSubClass; */
125*4882a593Smuzhiyun 0x03, /* __u8 bDeviceProtocol; USB 3 hub */
126*4882a593Smuzhiyun 0x09, /* __u8 bMaxPacketSize0; 2^9 = 512 Bytes */
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun 0x6b, 0x1d, /* __le16 idVendor; Linux Foundation 0x1d6b */
129*4882a593Smuzhiyun 0x03, 0x00, /* __le16 idProduct; device 0x0003 */
130*4882a593Smuzhiyun KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun 0x03, /* __u8 iManufacturer; */
133*4882a593Smuzhiyun 0x02, /* __u8 iProduct; */
134*4882a593Smuzhiyun 0x01, /* __u8 iSerialNumber; */
135*4882a593Smuzhiyun 0x01 /* __u8 bNumConfigurations; */
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /* usb 3.0 root hub device descriptor */
139*4882a593Smuzhiyun static const u8 usb3_rh_dev_descriptor[18] = {
140*4882a593Smuzhiyun 0x12, /* __u8 bLength; */
141*4882a593Smuzhiyun USB_DT_DEVICE, /* __u8 bDescriptorType; Device */
142*4882a593Smuzhiyun 0x00, 0x03, /* __le16 bcdUSB; v3.0 */
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
145*4882a593Smuzhiyun 0x00, /* __u8 bDeviceSubClass; */
146*4882a593Smuzhiyun 0x03, /* __u8 bDeviceProtocol; USB 3.0 hub */
147*4882a593Smuzhiyun 0x09, /* __u8 bMaxPacketSize0; 2^9 = 512 Bytes */
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun 0x6b, 0x1d, /* __le16 idVendor; Linux Foundation 0x1d6b */
150*4882a593Smuzhiyun 0x03, 0x00, /* __le16 idProduct; device 0x0003 */
151*4882a593Smuzhiyun KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun 0x03, /* __u8 iManufacturer; */
154*4882a593Smuzhiyun 0x02, /* __u8 iProduct; */
155*4882a593Smuzhiyun 0x01, /* __u8 iSerialNumber; */
156*4882a593Smuzhiyun 0x01 /* __u8 bNumConfigurations; */
157*4882a593Smuzhiyun };
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /* usb 2.5 (wireless USB 1.0) root hub device descriptor */
160*4882a593Smuzhiyun static const u8 usb25_rh_dev_descriptor[18] = {
161*4882a593Smuzhiyun 0x12, /* __u8 bLength; */
162*4882a593Smuzhiyun USB_DT_DEVICE, /* __u8 bDescriptorType; Device */
163*4882a593Smuzhiyun 0x50, 0x02, /* __le16 bcdUSB; v2.5 */
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
166*4882a593Smuzhiyun 0x00, /* __u8 bDeviceSubClass; */
167*4882a593Smuzhiyun 0x00, /* __u8 bDeviceProtocol; [ usb 2.0 no TT ] */
168*4882a593Smuzhiyun 0xFF, /* __u8 bMaxPacketSize0; always 0xFF (WUSB Spec 7.4.1). */
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun 0x6b, 0x1d, /* __le16 idVendor; Linux Foundation 0x1d6b */
171*4882a593Smuzhiyun 0x02, 0x00, /* __le16 idProduct; device 0x0002 */
172*4882a593Smuzhiyun KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun 0x03, /* __u8 iManufacturer; */
175*4882a593Smuzhiyun 0x02, /* __u8 iProduct; */
176*4882a593Smuzhiyun 0x01, /* __u8 iSerialNumber; */
177*4882a593Smuzhiyun 0x01 /* __u8 bNumConfigurations; */
178*4882a593Smuzhiyun };
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /* usb 2.0 root hub device descriptor */
181*4882a593Smuzhiyun static const u8 usb2_rh_dev_descriptor[18] = {
182*4882a593Smuzhiyun 0x12, /* __u8 bLength; */
183*4882a593Smuzhiyun USB_DT_DEVICE, /* __u8 bDescriptorType; Device */
184*4882a593Smuzhiyun 0x00, 0x02, /* __le16 bcdUSB; v2.0 */
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
187*4882a593Smuzhiyun 0x00, /* __u8 bDeviceSubClass; */
188*4882a593Smuzhiyun 0x00, /* __u8 bDeviceProtocol; [ usb 2.0 no TT ] */
189*4882a593Smuzhiyun 0x40, /* __u8 bMaxPacketSize0; 64 Bytes */
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun 0x6b, 0x1d, /* __le16 idVendor; Linux Foundation 0x1d6b */
192*4882a593Smuzhiyun 0x02, 0x00, /* __le16 idProduct; device 0x0002 */
193*4882a593Smuzhiyun KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun 0x03, /* __u8 iManufacturer; */
196*4882a593Smuzhiyun 0x02, /* __u8 iProduct; */
197*4882a593Smuzhiyun 0x01, /* __u8 iSerialNumber; */
198*4882a593Smuzhiyun 0x01 /* __u8 bNumConfigurations; */
199*4882a593Smuzhiyun };
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /* no usb 2.0 root hub "device qualifier" descriptor: one speed only */
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /* usb 1.1 root hub device descriptor */
204*4882a593Smuzhiyun static const u8 usb11_rh_dev_descriptor[18] = {
205*4882a593Smuzhiyun 0x12, /* __u8 bLength; */
206*4882a593Smuzhiyun USB_DT_DEVICE, /* __u8 bDescriptorType; Device */
207*4882a593Smuzhiyun 0x10, 0x01, /* __le16 bcdUSB; v1.1 */
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
210*4882a593Smuzhiyun 0x00, /* __u8 bDeviceSubClass; */
211*4882a593Smuzhiyun 0x00, /* __u8 bDeviceProtocol; [ low/full speeds only ] */
212*4882a593Smuzhiyun 0x40, /* __u8 bMaxPacketSize0; 64 Bytes */
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun 0x6b, 0x1d, /* __le16 idVendor; Linux Foundation 0x1d6b */
215*4882a593Smuzhiyun 0x01, 0x00, /* __le16 idProduct; device 0x0001 */
216*4882a593Smuzhiyun KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun 0x03, /* __u8 iManufacturer; */
219*4882a593Smuzhiyun 0x02, /* __u8 iProduct; */
220*4882a593Smuzhiyun 0x01, /* __u8 iSerialNumber; */
221*4882a593Smuzhiyun 0x01 /* __u8 bNumConfigurations; */
222*4882a593Smuzhiyun };
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /* Configuration descriptors for our root hubs */
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun static const u8 fs_rh_config_descriptor[] = {
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /* one configuration */
232*4882a593Smuzhiyun 0x09, /* __u8 bLength; */
233*4882a593Smuzhiyun USB_DT_CONFIG, /* __u8 bDescriptorType; Configuration */
234*4882a593Smuzhiyun 0x19, 0x00, /* __le16 wTotalLength; */
235*4882a593Smuzhiyun 0x01, /* __u8 bNumInterfaces; (1) */
236*4882a593Smuzhiyun 0x01, /* __u8 bConfigurationValue; */
237*4882a593Smuzhiyun 0x00, /* __u8 iConfiguration; */
238*4882a593Smuzhiyun 0xc0, /* __u8 bmAttributes;
239*4882a593Smuzhiyun Bit 7: must be set,
240*4882a593Smuzhiyun 6: Self-powered,
241*4882a593Smuzhiyun 5: Remote wakeup,
242*4882a593Smuzhiyun 4..0: resvd */
243*4882a593Smuzhiyun 0x00, /* __u8 MaxPower; */
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /* USB 1.1:
246*4882a593Smuzhiyun * USB 2.0, single TT organization (mandatory):
247*4882a593Smuzhiyun * one interface, protocol 0
248*4882a593Smuzhiyun *
249*4882a593Smuzhiyun * USB 2.0, multiple TT organization (optional):
250*4882a593Smuzhiyun * two interfaces, protocols 1 (like single TT)
251*4882a593Smuzhiyun * and 2 (multiple TT mode) ... config is
252*4882a593Smuzhiyun * sometimes settable
253*4882a593Smuzhiyun * NOT IMPLEMENTED
254*4882a593Smuzhiyun */
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /* one interface */
257*4882a593Smuzhiyun 0x09, /* __u8 if_bLength; */
258*4882a593Smuzhiyun USB_DT_INTERFACE, /* __u8 if_bDescriptorType; Interface */
259*4882a593Smuzhiyun 0x00, /* __u8 if_bInterfaceNumber; */
260*4882a593Smuzhiyun 0x00, /* __u8 if_bAlternateSetting; */
261*4882a593Smuzhiyun 0x01, /* __u8 if_bNumEndpoints; */
262*4882a593Smuzhiyun 0x09, /* __u8 if_bInterfaceClass; HUB_CLASSCODE */
263*4882a593Smuzhiyun 0x00, /* __u8 if_bInterfaceSubClass; */
264*4882a593Smuzhiyun 0x00, /* __u8 if_bInterfaceProtocol; [usb1.1 or single tt] */
265*4882a593Smuzhiyun 0x00, /* __u8 if_iInterface; */
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /* one endpoint (status change endpoint) */
268*4882a593Smuzhiyun 0x07, /* __u8 ep_bLength; */
269*4882a593Smuzhiyun USB_DT_ENDPOINT, /* __u8 ep_bDescriptorType; Endpoint */
270*4882a593Smuzhiyun 0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */
271*4882a593Smuzhiyun 0x03, /* __u8 ep_bmAttributes; Interrupt */
272*4882a593Smuzhiyun 0x02, 0x00, /* __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8) */
273*4882a593Smuzhiyun 0xff /* __u8 ep_bInterval; (255ms -- usb 2.0 spec) */
274*4882a593Smuzhiyun };
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun static const u8 hs_rh_config_descriptor[] = {
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /* one configuration */
279*4882a593Smuzhiyun 0x09, /* __u8 bLength; */
280*4882a593Smuzhiyun USB_DT_CONFIG, /* __u8 bDescriptorType; Configuration */
281*4882a593Smuzhiyun 0x19, 0x00, /* __le16 wTotalLength; */
282*4882a593Smuzhiyun 0x01, /* __u8 bNumInterfaces; (1) */
283*4882a593Smuzhiyun 0x01, /* __u8 bConfigurationValue; */
284*4882a593Smuzhiyun 0x00, /* __u8 iConfiguration; */
285*4882a593Smuzhiyun 0xc0, /* __u8 bmAttributes;
286*4882a593Smuzhiyun Bit 7: must be set,
287*4882a593Smuzhiyun 6: Self-powered,
288*4882a593Smuzhiyun 5: Remote wakeup,
289*4882a593Smuzhiyun 4..0: resvd */
290*4882a593Smuzhiyun 0x00, /* __u8 MaxPower; */
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /* USB 1.1:
293*4882a593Smuzhiyun * USB 2.0, single TT organization (mandatory):
294*4882a593Smuzhiyun * one interface, protocol 0
295*4882a593Smuzhiyun *
296*4882a593Smuzhiyun * USB 2.0, multiple TT organization (optional):
297*4882a593Smuzhiyun * two interfaces, protocols 1 (like single TT)
298*4882a593Smuzhiyun * and 2 (multiple TT mode) ... config is
299*4882a593Smuzhiyun * sometimes settable
300*4882a593Smuzhiyun * NOT IMPLEMENTED
301*4882a593Smuzhiyun */
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /* one interface */
304*4882a593Smuzhiyun 0x09, /* __u8 if_bLength; */
305*4882a593Smuzhiyun USB_DT_INTERFACE, /* __u8 if_bDescriptorType; Interface */
306*4882a593Smuzhiyun 0x00, /* __u8 if_bInterfaceNumber; */
307*4882a593Smuzhiyun 0x00, /* __u8 if_bAlternateSetting; */
308*4882a593Smuzhiyun 0x01, /* __u8 if_bNumEndpoints; */
309*4882a593Smuzhiyun 0x09, /* __u8 if_bInterfaceClass; HUB_CLASSCODE */
310*4882a593Smuzhiyun 0x00, /* __u8 if_bInterfaceSubClass; */
311*4882a593Smuzhiyun 0x00, /* __u8 if_bInterfaceProtocol; [usb1.1 or single tt] */
312*4882a593Smuzhiyun 0x00, /* __u8 if_iInterface; */
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /* one endpoint (status change endpoint) */
315*4882a593Smuzhiyun 0x07, /* __u8 ep_bLength; */
316*4882a593Smuzhiyun USB_DT_ENDPOINT, /* __u8 ep_bDescriptorType; Endpoint */
317*4882a593Smuzhiyun 0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */
318*4882a593Smuzhiyun 0x03, /* __u8 ep_bmAttributes; Interrupt */
319*4882a593Smuzhiyun /* __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8)
320*4882a593Smuzhiyun * see hub.c:hub_configure() for details. */
321*4882a593Smuzhiyun (USB_MAXCHILDREN + 1 + 7) / 8, 0x00,
322*4882a593Smuzhiyun 0x0c /* __u8 ep_bInterval; (256ms -- usb 2.0 spec) */
323*4882a593Smuzhiyun };
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun static const u8 ss_rh_config_descriptor[] = {
326*4882a593Smuzhiyun /* one configuration */
327*4882a593Smuzhiyun 0x09, /* __u8 bLength; */
328*4882a593Smuzhiyun USB_DT_CONFIG, /* __u8 bDescriptorType; Configuration */
329*4882a593Smuzhiyun 0x1f, 0x00, /* __le16 wTotalLength; */
330*4882a593Smuzhiyun 0x01, /* __u8 bNumInterfaces; (1) */
331*4882a593Smuzhiyun 0x01, /* __u8 bConfigurationValue; */
332*4882a593Smuzhiyun 0x00, /* __u8 iConfiguration; */
333*4882a593Smuzhiyun 0xc0, /* __u8 bmAttributes;
334*4882a593Smuzhiyun Bit 7: must be set,
335*4882a593Smuzhiyun 6: Self-powered,
336*4882a593Smuzhiyun 5: Remote wakeup,
337*4882a593Smuzhiyun 4..0: resvd */
338*4882a593Smuzhiyun 0x00, /* __u8 MaxPower; */
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun /* one interface */
341*4882a593Smuzhiyun 0x09, /* __u8 if_bLength; */
342*4882a593Smuzhiyun USB_DT_INTERFACE, /* __u8 if_bDescriptorType; Interface */
343*4882a593Smuzhiyun 0x00, /* __u8 if_bInterfaceNumber; */
344*4882a593Smuzhiyun 0x00, /* __u8 if_bAlternateSetting; */
345*4882a593Smuzhiyun 0x01, /* __u8 if_bNumEndpoints; */
346*4882a593Smuzhiyun 0x09, /* __u8 if_bInterfaceClass; HUB_CLASSCODE */
347*4882a593Smuzhiyun 0x00, /* __u8 if_bInterfaceSubClass; */
348*4882a593Smuzhiyun 0x00, /* __u8 if_bInterfaceProtocol; */
349*4882a593Smuzhiyun 0x00, /* __u8 if_iInterface; */
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /* one endpoint (status change endpoint) */
352*4882a593Smuzhiyun 0x07, /* __u8 ep_bLength; */
353*4882a593Smuzhiyun USB_DT_ENDPOINT, /* __u8 ep_bDescriptorType; Endpoint */
354*4882a593Smuzhiyun 0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */
355*4882a593Smuzhiyun 0x03, /* __u8 ep_bmAttributes; Interrupt */
356*4882a593Smuzhiyun /* __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8)
357*4882a593Smuzhiyun * see hub.c:hub_configure() for details. */
358*4882a593Smuzhiyun (USB_MAXCHILDREN + 1 + 7) / 8, 0x00,
359*4882a593Smuzhiyun 0x0c, /* __u8 ep_bInterval; (256ms -- usb 2.0 spec) */
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /* one SuperSpeed endpoint companion descriptor */
362*4882a593Smuzhiyun 0x06, /* __u8 ss_bLength */
363*4882a593Smuzhiyun USB_DT_SS_ENDPOINT_COMP, /* __u8 ss_bDescriptorType; SuperSpeed EP */
364*4882a593Smuzhiyun /* Companion */
365*4882a593Smuzhiyun 0x00, /* __u8 ss_bMaxBurst; allows 1 TX between ACKs */
366*4882a593Smuzhiyun 0x00, /* __u8 ss_bmAttributes; 1 packet per service interval */
367*4882a593Smuzhiyun 0x02, 0x00 /* __le16 ss_wBytesPerInterval; 15 bits for max 15 ports */
368*4882a593Smuzhiyun };
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /* authorized_default behaviour:
371*4882a593Smuzhiyun * -1 is authorized for all devices except wireless (old behaviour)
372*4882a593Smuzhiyun * 0 is unauthorized for all devices
373*4882a593Smuzhiyun * 1 is authorized for all devices
374*4882a593Smuzhiyun * 2 is authorized for internal devices
375*4882a593Smuzhiyun */
376*4882a593Smuzhiyun #define USB_AUTHORIZE_WIRED -1
377*4882a593Smuzhiyun #define USB_AUTHORIZE_NONE 0
378*4882a593Smuzhiyun #define USB_AUTHORIZE_ALL 1
379*4882a593Smuzhiyun #define USB_AUTHORIZE_INTERNAL 2
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun static int authorized_default = USB_AUTHORIZE_WIRED;
382*4882a593Smuzhiyun module_param(authorized_default, int, S_IRUGO|S_IWUSR);
383*4882a593Smuzhiyun MODULE_PARM_DESC(authorized_default,
384*4882a593Smuzhiyun "Default USB device authorization: 0 is not authorized, 1 is "
385*4882a593Smuzhiyun "authorized, 2 is authorized for internal devices, -1 is "
386*4882a593Smuzhiyun "authorized except for wireless USB (default, old behaviour)");
387*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun /**
390*4882a593Smuzhiyun * ascii2desc() - Helper routine for producing UTF-16LE string descriptors
391*4882a593Smuzhiyun * @s: Null-terminated ASCII (actually ISO-8859-1) string
392*4882a593Smuzhiyun * @buf: Buffer for USB string descriptor (header + UTF-16LE)
393*4882a593Smuzhiyun * @len: Length (in bytes; may be odd) of descriptor buffer.
394*4882a593Smuzhiyun *
395*4882a593Smuzhiyun * Return: The number of bytes filled in: 2 + 2*strlen(s) or @len,
396*4882a593Smuzhiyun * whichever is less.
397*4882a593Smuzhiyun *
398*4882a593Smuzhiyun * Note:
399*4882a593Smuzhiyun * USB String descriptors can contain at most 126 characters; input
400*4882a593Smuzhiyun * strings longer than that are truncated.
401*4882a593Smuzhiyun */
402*4882a593Smuzhiyun static unsigned
ascii2desc(char const * s,u8 * buf,unsigned len)403*4882a593Smuzhiyun ascii2desc(char const *s, u8 *buf, unsigned len)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun unsigned n, t = 2 + 2*strlen(s);
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun if (t > 254)
408*4882a593Smuzhiyun t = 254; /* Longest possible UTF string descriptor */
409*4882a593Smuzhiyun if (len > t)
410*4882a593Smuzhiyun len = t;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun t += USB_DT_STRING << 8; /* Now t is first 16 bits to store */
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun n = len;
415*4882a593Smuzhiyun while (n--) {
416*4882a593Smuzhiyun *buf++ = t;
417*4882a593Smuzhiyun if (!n--)
418*4882a593Smuzhiyun break;
419*4882a593Smuzhiyun *buf++ = t >> 8;
420*4882a593Smuzhiyun t = (unsigned char)*s++;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun return len;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /**
426*4882a593Smuzhiyun * rh_string() - provides string descriptors for root hub
427*4882a593Smuzhiyun * @id: the string ID number (0: langids, 1: serial #, 2: product, 3: vendor)
428*4882a593Smuzhiyun * @hcd: the host controller for this root hub
429*4882a593Smuzhiyun * @data: buffer for output packet
430*4882a593Smuzhiyun * @len: length of the provided buffer
431*4882a593Smuzhiyun *
432*4882a593Smuzhiyun * Produces either a manufacturer, product or serial number string for the
433*4882a593Smuzhiyun * virtual root hub device.
434*4882a593Smuzhiyun *
435*4882a593Smuzhiyun * Return: The number of bytes filled in: the length of the descriptor or
436*4882a593Smuzhiyun * of the provided buffer, whichever is less.
437*4882a593Smuzhiyun */
438*4882a593Smuzhiyun static unsigned
rh_string(int id,struct usb_hcd const * hcd,u8 * data,unsigned len)439*4882a593Smuzhiyun rh_string(int id, struct usb_hcd const *hcd, u8 *data, unsigned len)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun char buf[100];
442*4882a593Smuzhiyun char const *s;
443*4882a593Smuzhiyun static char const langids[4] = {4, USB_DT_STRING, 0x09, 0x04};
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun /* language ids */
446*4882a593Smuzhiyun switch (id) {
447*4882a593Smuzhiyun case 0:
448*4882a593Smuzhiyun /* Array of LANGID codes (0x0409 is MSFT-speak for "en-us") */
449*4882a593Smuzhiyun /* See http://www.usb.org/developers/docs/USB_LANGIDs.pdf */
450*4882a593Smuzhiyun if (len > 4)
451*4882a593Smuzhiyun len = 4;
452*4882a593Smuzhiyun memcpy(data, langids, len);
453*4882a593Smuzhiyun return len;
454*4882a593Smuzhiyun case 1:
455*4882a593Smuzhiyun /* Serial number */
456*4882a593Smuzhiyun s = hcd->self.bus_name;
457*4882a593Smuzhiyun break;
458*4882a593Smuzhiyun case 2:
459*4882a593Smuzhiyun /* Product name */
460*4882a593Smuzhiyun s = hcd->product_desc;
461*4882a593Smuzhiyun break;
462*4882a593Smuzhiyun case 3:
463*4882a593Smuzhiyun /* Manufacturer */
464*4882a593Smuzhiyun snprintf (buf, sizeof buf, "%s %s %s", init_utsname()->sysname,
465*4882a593Smuzhiyun init_utsname()->release, hcd->driver->description);
466*4882a593Smuzhiyun s = buf;
467*4882a593Smuzhiyun break;
468*4882a593Smuzhiyun default:
469*4882a593Smuzhiyun /* Can't happen; caller guarantees it */
470*4882a593Smuzhiyun return 0;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun return ascii2desc(s, data, len);
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /* Root hub control transfers execute synchronously */
rh_call_control(struct usb_hcd * hcd,struct urb * urb)478*4882a593Smuzhiyun static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun struct usb_ctrlrequest *cmd;
481*4882a593Smuzhiyun u16 typeReq, wValue, wIndex, wLength;
482*4882a593Smuzhiyun u8 *ubuf = urb->transfer_buffer;
483*4882a593Smuzhiyun unsigned len = 0;
484*4882a593Smuzhiyun int status;
485*4882a593Smuzhiyun u8 patch_wakeup = 0;
486*4882a593Smuzhiyun u8 patch_protocol = 0;
487*4882a593Smuzhiyun u16 tbuf_size;
488*4882a593Smuzhiyun u8 *tbuf = NULL;
489*4882a593Smuzhiyun const u8 *bufp;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun might_sleep();
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun spin_lock_irq(&hcd_root_hub_lock);
494*4882a593Smuzhiyun status = usb_hcd_link_urb_to_ep(hcd, urb);
495*4882a593Smuzhiyun spin_unlock_irq(&hcd_root_hub_lock);
496*4882a593Smuzhiyun if (status)
497*4882a593Smuzhiyun return status;
498*4882a593Smuzhiyun urb->hcpriv = hcd; /* Indicate it's queued */
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun cmd = (struct usb_ctrlrequest *) urb->setup_packet;
501*4882a593Smuzhiyun typeReq = (cmd->bRequestType << 8) | cmd->bRequest;
502*4882a593Smuzhiyun wValue = le16_to_cpu (cmd->wValue);
503*4882a593Smuzhiyun wIndex = le16_to_cpu (cmd->wIndex);
504*4882a593Smuzhiyun wLength = le16_to_cpu (cmd->wLength);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun if (wLength > urb->transfer_buffer_length)
507*4882a593Smuzhiyun goto error;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun /*
510*4882a593Smuzhiyun * tbuf should be at least as big as the
511*4882a593Smuzhiyun * USB hub descriptor.
512*4882a593Smuzhiyun */
513*4882a593Smuzhiyun tbuf_size = max_t(u16, sizeof(struct usb_hub_descriptor), wLength);
514*4882a593Smuzhiyun tbuf = kzalloc(tbuf_size, GFP_KERNEL);
515*4882a593Smuzhiyun if (!tbuf) {
516*4882a593Smuzhiyun status = -ENOMEM;
517*4882a593Smuzhiyun goto err_alloc;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun bufp = tbuf;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun urb->actual_length = 0;
524*4882a593Smuzhiyun switch (typeReq) {
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun /* DEVICE REQUESTS */
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun /* The root hub's remote wakeup enable bit is implemented using
529*4882a593Smuzhiyun * driver model wakeup flags. If this system supports wakeup
530*4882a593Smuzhiyun * through USB, userspace may change the default "allow wakeup"
531*4882a593Smuzhiyun * policy through sysfs or these calls.
532*4882a593Smuzhiyun *
533*4882a593Smuzhiyun * Most root hubs support wakeup from downstream devices, for
534*4882a593Smuzhiyun * runtime power management (disabling USB clocks and reducing
535*4882a593Smuzhiyun * VBUS power usage). However, not all of them do so; silicon,
536*4882a593Smuzhiyun * board, and BIOS bugs here are not uncommon, so these can't
537*4882a593Smuzhiyun * be treated quite like external hubs.
538*4882a593Smuzhiyun *
539*4882a593Smuzhiyun * Likewise, not all root hubs will pass wakeup events upstream,
540*4882a593Smuzhiyun * to wake up the whole system. So don't assume root hub and
541*4882a593Smuzhiyun * controller capabilities are identical.
542*4882a593Smuzhiyun */
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun case DeviceRequest | USB_REQ_GET_STATUS:
545*4882a593Smuzhiyun tbuf[0] = (device_may_wakeup(&hcd->self.root_hub->dev)
546*4882a593Smuzhiyun << USB_DEVICE_REMOTE_WAKEUP)
547*4882a593Smuzhiyun | (1 << USB_DEVICE_SELF_POWERED);
548*4882a593Smuzhiyun tbuf[1] = 0;
549*4882a593Smuzhiyun len = 2;
550*4882a593Smuzhiyun break;
551*4882a593Smuzhiyun case DeviceOutRequest | USB_REQ_CLEAR_FEATURE:
552*4882a593Smuzhiyun if (wValue == USB_DEVICE_REMOTE_WAKEUP)
553*4882a593Smuzhiyun device_set_wakeup_enable(&hcd->self.root_hub->dev, 0);
554*4882a593Smuzhiyun else
555*4882a593Smuzhiyun goto error;
556*4882a593Smuzhiyun break;
557*4882a593Smuzhiyun case DeviceOutRequest | USB_REQ_SET_FEATURE:
558*4882a593Smuzhiyun if (device_can_wakeup(&hcd->self.root_hub->dev)
559*4882a593Smuzhiyun && wValue == USB_DEVICE_REMOTE_WAKEUP)
560*4882a593Smuzhiyun device_set_wakeup_enable(&hcd->self.root_hub->dev, 1);
561*4882a593Smuzhiyun else
562*4882a593Smuzhiyun goto error;
563*4882a593Smuzhiyun break;
564*4882a593Smuzhiyun case DeviceRequest | USB_REQ_GET_CONFIGURATION:
565*4882a593Smuzhiyun tbuf[0] = 1;
566*4882a593Smuzhiyun len = 1;
567*4882a593Smuzhiyun fallthrough;
568*4882a593Smuzhiyun case DeviceOutRequest | USB_REQ_SET_CONFIGURATION:
569*4882a593Smuzhiyun break;
570*4882a593Smuzhiyun case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
571*4882a593Smuzhiyun switch (wValue & 0xff00) {
572*4882a593Smuzhiyun case USB_DT_DEVICE << 8:
573*4882a593Smuzhiyun switch (hcd->speed) {
574*4882a593Smuzhiyun case HCD_USB32:
575*4882a593Smuzhiyun case HCD_USB31:
576*4882a593Smuzhiyun bufp = usb31_rh_dev_descriptor;
577*4882a593Smuzhiyun break;
578*4882a593Smuzhiyun case HCD_USB3:
579*4882a593Smuzhiyun bufp = usb3_rh_dev_descriptor;
580*4882a593Smuzhiyun break;
581*4882a593Smuzhiyun case HCD_USB25:
582*4882a593Smuzhiyun bufp = usb25_rh_dev_descriptor;
583*4882a593Smuzhiyun break;
584*4882a593Smuzhiyun case HCD_USB2:
585*4882a593Smuzhiyun bufp = usb2_rh_dev_descriptor;
586*4882a593Smuzhiyun break;
587*4882a593Smuzhiyun case HCD_USB11:
588*4882a593Smuzhiyun bufp = usb11_rh_dev_descriptor;
589*4882a593Smuzhiyun break;
590*4882a593Smuzhiyun default:
591*4882a593Smuzhiyun goto error;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun len = 18;
594*4882a593Smuzhiyun if (hcd->has_tt)
595*4882a593Smuzhiyun patch_protocol = 1;
596*4882a593Smuzhiyun break;
597*4882a593Smuzhiyun case USB_DT_CONFIG << 8:
598*4882a593Smuzhiyun switch (hcd->speed) {
599*4882a593Smuzhiyun case HCD_USB32:
600*4882a593Smuzhiyun case HCD_USB31:
601*4882a593Smuzhiyun case HCD_USB3:
602*4882a593Smuzhiyun bufp = ss_rh_config_descriptor;
603*4882a593Smuzhiyun len = sizeof ss_rh_config_descriptor;
604*4882a593Smuzhiyun break;
605*4882a593Smuzhiyun case HCD_USB25:
606*4882a593Smuzhiyun case HCD_USB2:
607*4882a593Smuzhiyun bufp = hs_rh_config_descriptor;
608*4882a593Smuzhiyun len = sizeof hs_rh_config_descriptor;
609*4882a593Smuzhiyun break;
610*4882a593Smuzhiyun case HCD_USB11:
611*4882a593Smuzhiyun bufp = fs_rh_config_descriptor;
612*4882a593Smuzhiyun len = sizeof fs_rh_config_descriptor;
613*4882a593Smuzhiyun break;
614*4882a593Smuzhiyun default:
615*4882a593Smuzhiyun goto error;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun if (device_can_wakeup(&hcd->self.root_hub->dev))
618*4882a593Smuzhiyun patch_wakeup = 1;
619*4882a593Smuzhiyun break;
620*4882a593Smuzhiyun case USB_DT_STRING << 8:
621*4882a593Smuzhiyun if ((wValue & 0xff) < 4)
622*4882a593Smuzhiyun urb->actual_length = rh_string(wValue & 0xff,
623*4882a593Smuzhiyun hcd, ubuf, wLength);
624*4882a593Smuzhiyun else /* unsupported IDs --> "protocol stall" */
625*4882a593Smuzhiyun goto error;
626*4882a593Smuzhiyun break;
627*4882a593Smuzhiyun case USB_DT_BOS << 8:
628*4882a593Smuzhiyun goto nongeneric;
629*4882a593Smuzhiyun default:
630*4882a593Smuzhiyun goto error;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun break;
633*4882a593Smuzhiyun case DeviceRequest | USB_REQ_GET_INTERFACE:
634*4882a593Smuzhiyun tbuf[0] = 0;
635*4882a593Smuzhiyun len = 1;
636*4882a593Smuzhiyun fallthrough;
637*4882a593Smuzhiyun case DeviceOutRequest | USB_REQ_SET_INTERFACE:
638*4882a593Smuzhiyun break;
639*4882a593Smuzhiyun case DeviceOutRequest | USB_REQ_SET_ADDRESS:
640*4882a593Smuzhiyun /* wValue == urb->dev->devaddr */
641*4882a593Smuzhiyun dev_dbg (hcd->self.controller, "root hub device address %d\n",
642*4882a593Smuzhiyun wValue);
643*4882a593Smuzhiyun break;
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun /* INTERFACE REQUESTS (no defined feature/status flags) */
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun /* ENDPOINT REQUESTS */
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun case EndpointRequest | USB_REQ_GET_STATUS:
650*4882a593Smuzhiyun /* ENDPOINT_HALT flag */
651*4882a593Smuzhiyun tbuf[0] = 0;
652*4882a593Smuzhiyun tbuf[1] = 0;
653*4882a593Smuzhiyun len = 2;
654*4882a593Smuzhiyun fallthrough;
655*4882a593Smuzhiyun case EndpointOutRequest | USB_REQ_CLEAR_FEATURE:
656*4882a593Smuzhiyun case EndpointOutRequest | USB_REQ_SET_FEATURE:
657*4882a593Smuzhiyun dev_dbg (hcd->self.controller, "no endpoint features yet\n");
658*4882a593Smuzhiyun break;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun /* CLASS REQUESTS (and errors) */
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun default:
663*4882a593Smuzhiyun nongeneric:
664*4882a593Smuzhiyun /* non-generic request */
665*4882a593Smuzhiyun switch (typeReq) {
666*4882a593Smuzhiyun case GetHubStatus:
667*4882a593Smuzhiyun len = 4;
668*4882a593Smuzhiyun break;
669*4882a593Smuzhiyun case GetPortStatus:
670*4882a593Smuzhiyun if (wValue == HUB_PORT_STATUS)
671*4882a593Smuzhiyun len = 4;
672*4882a593Smuzhiyun else
673*4882a593Smuzhiyun /* other port status types return 8 bytes */
674*4882a593Smuzhiyun len = 8;
675*4882a593Smuzhiyun break;
676*4882a593Smuzhiyun case GetHubDescriptor:
677*4882a593Smuzhiyun len = sizeof (struct usb_hub_descriptor);
678*4882a593Smuzhiyun break;
679*4882a593Smuzhiyun case DeviceRequest | USB_REQ_GET_DESCRIPTOR:
680*4882a593Smuzhiyun /* len is returned by hub_control */
681*4882a593Smuzhiyun break;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun status = hcd->driver->hub_control (hcd,
684*4882a593Smuzhiyun typeReq, wValue, wIndex,
685*4882a593Smuzhiyun tbuf, wLength);
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun if (typeReq == GetHubDescriptor)
688*4882a593Smuzhiyun usb_hub_adjust_deviceremovable(hcd->self.root_hub,
689*4882a593Smuzhiyun (struct usb_hub_descriptor *)tbuf);
690*4882a593Smuzhiyun break;
691*4882a593Smuzhiyun error:
692*4882a593Smuzhiyun /* "protocol stall" on error */
693*4882a593Smuzhiyun status = -EPIPE;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun if (status < 0) {
697*4882a593Smuzhiyun len = 0;
698*4882a593Smuzhiyun if (status != -EPIPE) {
699*4882a593Smuzhiyun dev_dbg (hcd->self.controller,
700*4882a593Smuzhiyun "CTRL: TypeReq=0x%x val=0x%x "
701*4882a593Smuzhiyun "idx=0x%x len=%d ==> %d\n",
702*4882a593Smuzhiyun typeReq, wValue, wIndex,
703*4882a593Smuzhiyun wLength, status);
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun } else if (status > 0) {
706*4882a593Smuzhiyun /* hub_control may return the length of data copied. */
707*4882a593Smuzhiyun len = status;
708*4882a593Smuzhiyun status = 0;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun if (len) {
711*4882a593Smuzhiyun if (urb->transfer_buffer_length < len)
712*4882a593Smuzhiyun len = urb->transfer_buffer_length;
713*4882a593Smuzhiyun urb->actual_length = len;
714*4882a593Smuzhiyun /* always USB_DIR_IN, toward host */
715*4882a593Smuzhiyun memcpy (ubuf, bufp, len);
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun /* report whether RH hardware supports remote wakeup */
718*4882a593Smuzhiyun if (patch_wakeup &&
719*4882a593Smuzhiyun len > offsetof (struct usb_config_descriptor,
720*4882a593Smuzhiyun bmAttributes))
721*4882a593Smuzhiyun ((struct usb_config_descriptor *)ubuf)->bmAttributes
722*4882a593Smuzhiyun |= USB_CONFIG_ATT_WAKEUP;
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun /* report whether RH hardware has an integrated TT */
725*4882a593Smuzhiyun if (patch_protocol &&
726*4882a593Smuzhiyun len > offsetof(struct usb_device_descriptor,
727*4882a593Smuzhiyun bDeviceProtocol))
728*4882a593Smuzhiyun ((struct usb_device_descriptor *) ubuf)->
729*4882a593Smuzhiyun bDeviceProtocol = USB_HUB_PR_HS_SINGLE_TT;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun kfree(tbuf);
733*4882a593Smuzhiyun err_alloc:
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun /* any errors get returned through the urb completion */
736*4882a593Smuzhiyun spin_lock_irq(&hcd_root_hub_lock);
737*4882a593Smuzhiyun usb_hcd_unlink_urb_from_ep(hcd, urb);
738*4882a593Smuzhiyun usb_hcd_giveback_urb(hcd, urb, status);
739*4882a593Smuzhiyun spin_unlock_irq(&hcd_root_hub_lock);
740*4882a593Smuzhiyun return 0;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun /*
746*4882a593Smuzhiyun * Root Hub interrupt transfers are polled using a timer if the
747*4882a593Smuzhiyun * driver requests it; otherwise the driver is responsible for
748*4882a593Smuzhiyun * calling usb_hcd_poll_rh_status() when an event occurs.
749*4882a593Smuzhiyun *
750*4882a593Smuzhiyun * Completions are called in_interrupt(), but they may or may not
751*4882a593Smuzhiyun * be in_irq().
752*4882a593Smuzhiyun */
usb_hcd_poll_rh_status(struct usb_hcd * hcd)753*4882a593Smuzhiyun void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun struct urb *urb;
756*4882a593Smuzhiyun int length;
757*4882a593Smuzhiyun int status;
758*4882a593Smuzhiyun unsigned long flags;
759*4882a593Smuzhiyun char buffer[6]; /* Any root hubs with > 31 ports? */
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun if (unlikely(!hcd->rh_pollable))
762*4882a593Smuzhiyun return;
763*4882a593Smuzhiyun if (!hcd->uses_new_polling && !hcd->status_urb)
764*4882a593Smuzhiyun return;
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun length = hcd->driver->hub_status_data(hcd, buffer);
767*4882a593Smuzhiyun if (length > 0) {
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun /* try to complete the status urb */
770*4882a593Smuzhiyun spin_lock_irqsave(&hcd_root_hub_lock, flags);
771*4882a593Smuzhiyun urb = hcd->status_urb;
772*4882a593Smuzhiyun if (urb) {
773*4882a593Smuzhiyun clear_bit(HCD_FLAG_POLL_PENDING, &hcd->flags);
774*4882a593Smuzhiyun hcd->status_urb = NULL;
775*4882a593Smuzhiyun if (urb->transfer_buffer_length >= length) {
776*4882a593Smuzhiyun status = 0;
777*4882a593Smuzhiyun } else {
778*4882a593Smuzhiyun status = -EOVERFLOW;
779*4882a593Smuzhiyun length = urb->transfer_buffer_length;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun urb->actual_length = length;
782*4882a593Smuzhiyun memcpy(urb->transfer_buffer, buffer, length);
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun usb_hcd_unlink_urb_from_ep(hcd, urb);
785*4882a593Smuzhiyun usb_hcd_giveback_urb(hcd, urb, status);
786*4882a593Smuzhiyun } else {
787*4882a593Smuzhiyun length = 0;
788*4882a593Smuzhiyun set_bit(HCD_FLAG_POLL_PENDING, &hcd->flags);
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun spin_unlock_irqrestore(&hcd_root_hub_lock, flags);
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun /* The USB 2.0 spec says 256 ms. This is close enough and won't
794*4882a593Smuzhiyun * exceed that limit if HZ is 100. The math is more clunky than
795*4882a593Smuzhiyun * maybe expected, this is to make sure that all timers for USB devices
796*4882a593Smuzhiyun * fire at the same time to give the CPU a break in between */
797*4882a593Smuzhiyun if (hcd->uses_new_polling ? HCD_POLL_RH(hcd) :
798*4882a593Smuzhiyun (length == 0 && hcd->status_urb != NULL))
799*4882a593Smuzhiyun mod_timer (&hcd->rh_timer, (jiffies/(HZ/4) + 1) * (HZ/4));
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_hcd_poll_rh_status);
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun /* timer callback */
rh_timer_func(struct timer_list * t)804*4882a593Smuzhiyun static void rh_timer_func (struct timer_list *t)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun struct usb_hcd *_hcd = from_timer(_hcd, t, rh_timer);
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun usb_hcd_poll_rh_status(_hcd);
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
812*4882a593Smuzhiyun
rh_queue_status(struct usb_hcd * hcd,struct urb * urb)813*4882a593Smuzhiyun static int rh_queue_status (struct usb_hcd *hcd, struct urb *urb)
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun int retval;
816*4882a593Smuzhiyun unsigned long flags;
817*4882a593Smuzhiyun unsigned len = 1 + (urb->dev->maxchild / 8);
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun spin_lock_irqsave (&hcd_root_hub_lock, flags);
820*4882a593Smuzhiyun if (hcd->status_urb || urb->transfer_buffer_length < len) {
821*4882a593Smuzhiyun dev_dbg (hcd->self.controller, "not queuing rh status urb\n");
822*4882a593Smuzhiyun retval = -EINVAL;
823*4882a593Smuzhiyun goto done;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun retval = usb_hcd_link_urb_to_ep(hcd, urb);
827*4882a593Smuzhiyun if (retval)
828*4882a593Smuzhiyun goto done;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun hcd->status_urb = urb;
831*4882a593Smuzhiyun urb->hcpriv = hcd; /* indicate it's queued */
832*4882a593Smuzhiyun if (!hcd->uses_new_polling)
833*4882a593Smuzhiyun mod_timer(&hcd->rh_timer, (jiffies/(HZ/4) + 1) * (HZ/4));
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun /* If a status change has already occurred, report it ASAP */
836*4882a593Smuzhiyun else if (HCD_POLL_PENDING(hcd))
837*4882a593Smuzhiyun mod_timer(&hcd->rh_timer, jiffies);
838*4882a593Smuzhiyun retval = 0;
839*4882a593Smuzhiyun done:
840*4882a593Smuzhiyun spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
841*4882a593Smuzhiyun return retval;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun
rh_urb_enqueue(struct usb_hcd * hcd,struct urb * urb)844*4882a593Smuzhiyun static int rh_urb_enqueue (struct usb_hcd *hcd, struct urb *urb)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun if (usb_endpoint_xfer_int(&urb->ep->desc))
847*4882a593Smuzhiyun return rh_queue_status (hcd, urb);
848*4882a593Smuzhiyun if (usb_endpoint_xfer_control(&urb->ep->desc))
849*4882a593Smuzhiyun return rh_call_control (hcd, urb);
850*4882a593Smuzhiyun return -EINVAL;
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun /* Unlinks of root-hub control URBs are legal, but they don't do anything
856*4882a593Smuzhiyun * since these URBs always execute synchronously.
857*4882a593Smuzhiyun */
usb_rh_urb_dequeue(struct usb_hcd * hcd,struct urb * urb,int status)858*4882a593Smuzhiyun static int usb_rh_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
859*4882a593Smuzhiyun {
860*4882a593Smuzhiyun unsigned long flags;
861*4882a593Smuzhiyun int rc;
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun spin_lock_irqsave(&hcd_root_hub_lock, flags);
864*4882a593Smuzhiyun rc = usb_hcd_check_unlink_urb(hcd, urb, status);
865*4882a593Smuzhiyun if (rc)
866*4882a593Smuzhiyun goto done;
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun if (usb_endpoint_num(&urb->ep->desc) == 0) { /* Control URB */
869*4882a593Smuzhiyun ; /* Do nothing */
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun } else { /* Status URB */
872*4882a593Smuzhiyun if (!hcd->uses_new_polling)
873*4882a593Smuzhiyun del_timer (&hcd->rh_timer);
874*4882a593Smuzhiyun if (urb == hcd->status_urb) {
875*4882a593Smuzhiyun hcd->status_urb = NULL;
876*4882a593Smuzhiyun usb_hcd_unlink_urb_from_ep(hcd, urb);
877*4882a593Smuzhiyun usb_hcd_giveback_urb(hcd, urb, status);
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun done:
881*4882a593Smuzhiyun spin_unlock_irqrestore(&hcd_root_hub_lock, flags);
882*4882a593Smuzhiyun return rc;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun /**
889*4882a593Smuzhiyun * usb_bus_init - shared initialization code
890*4882a593Smuzhiyun * @bus: the bus structure being initialized
891*4882a593Smuzhiyun *
892*4882a593Smuzhiyun * This code is used to initialize a usb_bus structure, memory for which is
893*4882a593Smuzhiyun * separately managed.
894*4882a593Smuzhiyun */
usb_bus_init(struct usb_bus * bus)895*4882a593Smuzhiyun static void usb_bus_init (struct usb_bus *bus)
896*4882a593Smuzhiyun {
897*4882a593Smuzhiyun memset (&bus->devmap, 0, sizeof(struct usb_devmap));
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun bus->devnum_next = 1;
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun bus->root_hub = NULL;
902*4882a593Smuzhiyun bus->busnum = -1;
903*4882a593Smuzhiyun bus->bandwidth_allocated = 0;
904*4882a593Smuzhiyun bus->bandwidth_int_reqs = 0;
905*4882a593Smuzhiyun bus->bandwidth_isoc_reqs = 0;
906*4882a593Smuzhiyun mutex_init(&bus->devnum_next_mutex);
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun /**
912*4882a593Smuzhiyun * usb_register_bus - registers the USB host controller with the usb core
913*4882a593Smuzhiyun * @bus: pointer to the bus to register
914*4882a593Smuzhiyun * Context: !in_interrupt()
915*4882a593Smuzhiyun *
916*4882a593Smuzhiyun * Assigns a bus number, and links the controller into usbcore data
917*4882a593Smuzhiyun * structures so that it can be seen by scanning the bus list.
918*4882a593Smuzhiyun *
919*4882a593Smuzhiyun * Return: 0 if successful. A negative error code otherwise.
920*4882a593Smuzhiyun */
usb_register_bus(struct usb_bus * bus)921*4882a593Smuzhiyun static int usb_register_bus(struct usb_bus *bus)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun int result = -E2BIG;
924*4882a593Smuzhiyun int busnum;
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun mutex_lock(&usb_bus_idr_lock);
927*4882a593Smuzhiyun busnum = idr_alloc(&usb_bus_idr, bus, 1, USB_MAXBUS, GFP_KERNEL);
928*4882a593Smuzhiyun if (busnum < 0) {
929*4882a593Smuzhiyun pr_err("%s: failed to get bus number\n", usbcore_name);
930*4882a593Smuzhiyun goto error_find_busnum;
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun bus->busnum = busnum;
933*4882a593Smuzhiyun mutex_unlock(&usb_bus_idr_lock);
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun usb_notify_add_bus(bus);
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun dev_info (bus->controller, "new USB bus registered, assigned bus "
938*4882a593Smuzhiyun "number %d\n", bus->busnum);
939*4882a593Smuzhiyun return 0;
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun error_find_busnum:
942*4882a593Smuzhiyun mutex_unlock(&usb_bus_idr_lock);
943*4882a593Smuzhiyun return result;
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun /**
947*4882a593Smuzhiyun * usb_deregister_bus - deregisters the USB host controller
948*4882a593Smuzhiyun * @bus: pointer to the bus to deregister
949*4882a593Smuzhiyun * Context: !in_interrupt()
950*4882a593Smuzhiyun *
951*4882a593Smuzhiyun * Recycles the bus number, and unlinks the controller from usbcore data
952*4882a593Smuzhiyun * structures so that it won't be seen by scanning the bus list.
953*4882a593Smuzhiyun */
usb_deregister_bus(struct usb_bus * bus)954*4882a593Smuzhiyun static void usb_deregister_bus (struct usb_bus *bus)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun dev_info (bus->controller, "USB bus %d deregistered\n", bus->busnum);
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun /*
959*4882a593Smuzhiyun * NOTE: make sure that all the devices are removed by the
960*4882a593Smuzhiyun * controller code, as well as having it call this when cleaning
961*4882a593Smuzhiyun * itself up
962*4882a593Smuzhiyun */
963*4882a593Smuzhiyun mutex_lock(&usb_bus_idr_lock);
964*4882a593Smuzhiyun idr_remove(&usb_bus_idr, bus->busnum);
965*4882a593Smuzhiyun mutex_unlock(&usb_bus_idr_lock);
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun usb_notify_remove_bus(bus);
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun /**
971*4882a593Smuzhiyun * register_root_hub - called by usb_add_hcd() to register a root hub
972*4882a593Smuzhiyun * @hcd: host controller for this root hub
973*4882a593Smuzhiyun *
974*4882a593Smuzhiyun * This function registers the root hub with the USB subsystem. It sets up
975*4882a593Smuzhiyun * the device properly in the device tree and then calls usb_new_device()
976*4882a593Smuzhiyun * to register the usb device. It also assigns the root hub's USB address
977*4882a593Smuzhiyun * (always 1).
978*4882a593Smuzhiyun *
979*4882a593Smuzhiyun * Return: 0 if successful. A negative error code otherwise.
980*4882a593Smuzhiyun */
register_root_hub(struct usb_hcd * hcd)981*4882a593Smuzhiyun static int register_root_hub(struct usb_hcd *hcd)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun struct device *parent_dev = hcd->self.controller;
984*4882a593Smuzhiyun struct usb_device *usb_dev = hcd->self.root_hub;
985*4882a593Smuzhiyun const int devnum = 1;
986*4882a593Smuzhiyun int retval;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun usb_dev->devnum = devnum;
989*4882a593Smuzhiyun usb_dev->bus->devnum_next = devnum + 1;
990*4882a593Smuzhiyun set_bit (devnum, usb_dev->bus->devmap.devicemap);
991*4882a593Smuzhiyun usb_set_device_state(usb_dev, USB_STATE_ADDRESS);
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun mutex_lock(&usb_bus_idr_lock);
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun usb_dev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
996*4882a593Smuzhiyun retval = usb_get_device_descriptor(usb_dev, USB_DT_DEVICE_SIZE);
997*4882a593Smuzhiyun if (retval != sizeof usb_dev->descriptor) {
998*4882a593Smuzhiyun mutex_unlock(&usb_bus_idr_lock);
999*4882a593Smuzhiyun dev_dbg (parent_dev, "can't read %s device descriptor %d\n",
1000*4882a593Smuzhiyun dev_name(&usb_dev->dev), retval);
1001*4882a593Smuzhiyun return (retval < 0) ? retval : -EMSGSIZE;
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) {
1005*4882a593Smuzhiyun retval = usb_get_bos_descriptor(usb_dev);
1006*4882a593Smuzhiyun if (!retval) {
1007*4882a593Smuzhiyun usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
1008*4882a593Smuzhiyun } else if (usb_dev->speed >= USB_SPEED_SUPER) {
1009*4882a593Smuzhiyun mutex_unlock(&usb_bus_idr_lock);
1010*4882a593Smuzhiyun dev_dbg(parent_dev, "can't read %s bos descriptor %d\n",
1011*4882a593Smuzhiyun dev_name(&usb_dev->dev), retval);
1012*4882a593Smuzhiyun return retval;
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun }
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun retval = usb_new_device (usb_dev);
1017*4882a593Smuzhiyun if (retval) {
1018*4882a593Smuzhiyun dev_err (parent_dev, "can't register root hub for %s, %d\n",
1019*4882a593Smuzhiyun dev_name(&usb_dev->dev), retval);
1020*4882a593Smuzhiyun } else {
1021*4882a593Smuzhiyun spin_lock_irq (&hcd_root_hub_lock);
1022*4882a593Smuzhiyun hcd->rh_registered = 1;
1023*4882a593Smuzhiyun spin_unlock_irq (&hcd_root_hub_lock);
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun /* Did the HC die before the root hub was registered? */
1026*4882a593Smuzhiyun if (HCD_DEAD(hcd))
1027*4882a593Smuzhiyun usb_hc_died (hcd); /* This time clean up */
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun mutex_unlock(&usb_bus_idr_lock);
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun return retval;
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun /*
1035*4882a593Smuzhiyun * usb_hcd_start_port_resume - a root-hub port is sending a resume signal
1036*4882a593Smuzhiyun * @bus: the bus which the root hub belongs to
1037*4882a593Smuzhiyun * @portnum: the port which is being resumed
1038*4882a593Smuzhiyun *
1039*4882a593Smuzhiyun * HCDs should call this function when they know that a resume signal is
1040*4882a593Smuzhiyun * being sent to a root-hub port. The root hub will be prevented from
1041*4882a593Smuzhiyun * going into autosuspend until usb_hcd_end_port_resume() is called.
1042*4882a593Smuzhiyun *
1043*4882a593Smuzhiyun * The bus's private lock must be held by the caller.
1044*4882a593Smuzhiyun */
usb_hcd_start_port_resume(struct usb_bus * bus,int portnum)1045*4882a593Smuzhiyun void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun unsigned bit = 1 << portnum;
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun if (!(bus->resuming_ports & bit)) {
1050*4882a593Smuzhiyun bus->resuming_ports |= bit;
1051*4882a593Smuzhiyun pm_runtime_get_noresume(&bus->root_hub->dev);
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun }
1054*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_hcd_start_port_resume);
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun /*
1057*4882a593Smuzhiyun * usb_hcd_end_port_resume - a root-hub port has stopped sending a resume signal
1058*4882a593Smuzhiyun * @bus: the bus which the root hub belongs to
1059*4882a593Smuzhiyun * @portnum: the port which is being resumed
1060*4882a593Smuzhiyun *
1061*4882a593Smuzhiyun * HCDs should call this function when they know that a resume signal has
1062*4882a593Smuzhiyun * stopped being sent to a root-hub port. The root hub will be allowed to
1063*4882a593Smuzhiyun * autosuspend again.
1064*4882a593Smuzhiyun *
1065*4882a593Smuzhiyun * The bus's private lock must be held by the caller.
1066*4882a593Smuzhiyun */
usb_hcd_end_port_resume(struct usb_bus * bus,int portnum)1067*4882a593Smuzhiyun void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum)
1068*4882a593Smuzhiyun {
1069*4882a593Smuzhiyun unsigned bit = 1 << portnum;
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun if (bus->resuming_ports & bit) {
1072*4882a593Smuzhiyun bus->resuming_ports &= ~bit;
1073*4882a593Smuzhiyun pm_runtime_put_noidle(&bus->root_hub->dev);
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_hcd_end_port_resume);
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun /**
1081*4882a593Smuzhiyun * usb_calc_bus_time - approximate periodic transaction time in nanoseconds
1082*4882a593Smuzhiyun * @speed: from dev->speed; USB_SPEED_{LOW,FULL,HIGH}
1083*4882a593Smuzhiyun * @is_input: true iff the transaction sends data to the host
1084*4882a593Smuzhiyun * @isoc: true for isochronous transactions, false for interrupt ones
1085*4882a593Smuzhiyun * @bytecount: how many bytes in the transaction.
1086*4882a593Smuzhiyun *
1087*4882a593Smuzhiyun * Return: Approximate bus time in nanoseconds for a periodic transaction.
1088*4882a593Smuzhiyun *
1089*4882a593Smuzhiyun * Note:
1090*4882a593Smuzhiyun * See USB 2.0 spec section 5.11.3; only periodic transfers need to be
1091*4882a593Smuzhiyun * scheduled in software, this function is only used for such scheduling.
1092*4882a593Smuzhiyun */
usb_calc_bus_time(int speed,int is_input,int isoc,int bytecount)1093*4882a593Smuzhiyun long usb_calc_bus_time (int speed, int is_input, int isoc, int bytecount)
1094*4882a593Smuzhiyun {
1095*4882a593Smuzhiyun unsigned long tmp;
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun switch (speed) {
1098*4882a593Smuzhiyun case USB_SPEED_LOW: /* INTR only */
1099*4882a593Smuzhiyun if (is_input) {
1100*4882a593Smuzhiyun tmp = (67667L * (31L + 10L * BitTime (bytecount))) / 1000L;
1101*4882a593Smuzhiyun return 64060L + (2 * BW_HUB_LS_SETUP) + BW_HOST_DELAY + tmp;
1102*4882a593Smuzhiyun } else {
1103*4882a593Smuzhiyun tmp = (66700L * (31L + 10L * BitTime (bytecount))) / 1000L;
1104*4882a593Smuzhiyun return 64107L + (2 * BW_HUB_LS_SETUP) + BW_HOST_DELAY + tmp;
1105*4882a593Smuzhiyun }
1106*4882a593Smuzhiyun case USB_SPEED_FULL: /* ISOC or INTR */
1107*4882a593Smuzhiyun if (isoc) {
1108*4882a593Smuzhiyun tmp = (8354L * (31L + 10L * BitTime (bytecount))) / 1000L;
1109*4882a593Smuzhiyun return ((is_input) ? 7268L : 6265L) + BW_HOST_DELAY + tmp;
1110*4882a593Smuzhiyun } else {
1111*4882a593Smuzhiyun tmp = (8354L * (31L + 10L * BitTime (bytecount))) / 1000L;
1112*4882a593Smuzhiyun return 9107L + BW_HOST_DELAY + tmp;
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun case USB_SPEED_HIGH: /* ISOC or INTR */
1115*4882a593Smuzhiyun /* FIXME adjust for input vs output */
1116*4882a593Smuzhiyun if (isoc)
1117*4882a593Smuzhiyun tmp = HS_NSECS_ISO (bytecount);
1118*4882a593Smuzhiyun else
1119*4882a593Smuzhiyun tmp = HS_NSECS (bytecount);
1120*4882a593Smuzhiyun return tmp;
1121*4882a593Smuzhiyun default:
1122*4882a593Smuzhiyun pr_debug ("%s: bogus device speed!\n", usbcore_name);
1123*4882a593Smuzhiyun return -1;
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun }
1126*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_calc_bus_time);
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun /*
1132*4882a593Smuzhiyun * Generic HC operations.
1133*4882a593Smuzhiyun */
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun /**
1138*4882a593Smuzhiyun * usb_hcd_link_urb_to_ep - add an URB to its endpoint queue
1139*4882a593Smuzhiyun * @hcd: host controller to which @urb was submitted
1140*4882a593Smuzhiyun * @urb: URB being submitted
1141*4882a593Smuzhiyun *
1142*4882a593Smuzhiyun * Host controller drivers should call this routine in their enqueue()
1143*4882a593Smuzhiyun * method. The HCD's private spinlock must be held and interrupts must
1144*4882a593Smuzhiyun * be disabled. The actions carried out here are required for URB
1145*4882a593Smuzhiyun * submission, as well as for endpoint shutdown and for usb_kill_urb.
1146*4882a593Smuzhiyun *
1147*4882a593Smuzhiyun * Return: 0 for no error, otherwise a negative error code (in which case
1148*4882a593Smuzhiyun * the enqueue() method must fail). If no error occurs but enqueue() fails
1149*4882a593Smuzhiyun * anyway, it must call usb_hcd_unlink_urb_from_ep() before releasing
1150*4882a593Smuzhiyun * the private spinlock and returning.
1151*4882a593Smuzhiyun */
usb_hcd_link_urb_to_ep(struct usb_hcd * hcd,struct urb * urb)1152*4882a593Smuzhiyun int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb)
1153*4882a593Smuzhiyun {
1154*4882a593Smuzhiyun int rc = 0;
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun spin_lock(&hcd_urb_list_lock);
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun /* Check that the URB isn't being killed */
1159*4882a593Smuzhiyun if (unlikely(atomic_read(&urb->reject))) {
1160*4882a593Smuzhiyun rc = -EPERM;
1161*4882a593Smuzhiyun goto done;
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun if (unlikely(!urb->ep->enabled)) {
1165*4882a593Smuzhiyun rc = -ENOENT;
1166*4882a593Smuzhiyun goto done;
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun if (unlikely(!urb->dev->can_submit)) {
1170*4882a593Smuzhiyun rc = -EHOSTUNREACH;
1171*4882a593Smuzhiyun goto done;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun /*
1175*4882a593Smuzhiyun * Check the host controller's state and add the URB to the
1176*4882a593Smuzhiyun * endpoint's queue.
1177*4882a593Smuzhiyun */
1178*4882a593Smuzhiyun if (HCD_RH_RUNNING(hcd)) {
1179*4882a593Smuzhiyun urb->unlinked = 0;
1180*4882a593Smuzhiyun list_add_tail(&urb->urb_list, &urb->ep->urb_list);
1181*4882a593Smuzhiyun } else {
1182*4882a593Smuzhiyun rc = -ESHUTDOWN;
1183*4882a593Smuzhiyun goto done;
1184*4882a593Smuzhiyun }
1185*4882a593Smuzhiyun done:
1186*4882a593Smuzhiyun spin_unlock(&hcd_urb_list_lock);
1187*4882a593Smuzhiyun return rc;
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_hcd_link_urb_to_ep);
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun /**
1192*4882a593Smuzhiyun * usb_hcd_check_unlink_urb - check whether an URB may be unlinked
1193*4882a593Smuzhiyun * @hcd: host controller to which @urb was submitted
1194*4882a593Smuzhiyun * @urb: URB being checked for unlinkability
1195*4882a593Smuzhiyun * @status: error code to store in @urb if the unlink succeeds
1196*4882a593Smuzhiyun *
1197*4882a593Smuzhiyun * Host controller drivers should call this routine in their dequeue()
1198*4882a593Smuzhiyun * method. The HCD's private spinlock must be held and interrupts must
1199*4882a593Smuzhiyun * be disabled. The actions carried out here are required for making
1200*4882a593Smuzhiyun * sure than an unlink is valid.
1201*4882a593Smuzhiyun *
1202*4882a593Smuzhiyun * Return: 0 for no error, otherwise a negative error code (in which case
1203*4882a593Smuzhiyun * the dequeue() method must fail). The possible error codes are:
1204*4882a593Smuzhiyun *
1205*4882a593Smuzhiyun * -EIDRM: @urb was not submitted or has already completed.
1206*4882a593Smuzhiyun * The completion function may not have been called yet.
1207*4882a593Smuzhiyun *
1208*4882a593Smuzhiyun * -EBUSY: @urb has already been unlinked.
1209*4882a593Smuzhiyun */
usb_hcd_check_unlink_urb(struct usb_hcd * hcd,struct urb * urb,int status)1210*4882a593Smuzhiyun int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
1211*4882a593Smuzhiyun int status)
1212*4882a593Smuzhiyun {
1213*4882a593Smuzhiyun struct list_head *tmp;
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun /* insist the urb is still queued */
1216*4882a593Smuzhiyun list_for_each(tmp, &urb->ep->urb_list) {
1217*4882a593Smuzhiyun if (tmp == &urb->urb_list)
1218*4882a593Smuzhiyun break;
1219*4882a593Smuzhiyun }
1220*4882a593Smuzhiyun if (tmp != &urb->urb_list)
1221*4882a593Smuzhiyun return -EIDRM;
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun /* Any status except -EINPROGRESS means something already started to
1224*4882a593Smuzhiyun * unlink this URB from the hardware. So there's no more work to do.
1225*4882a593Smuzhiyun */
1226*4882a593Smuzhiyun if (urb->unlinked)
1227*4882a593Smuzhiyun return -EBUSY;
1228*4882a593Smuzhiyun urb->unlinked = status;
1229*4882a593Smuzhiyun return 0;
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_hcd_check_unlink_urb);
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun /**
1234*4882a593Smuzhiyun * usb_hcd_unlink_urb_from_ep - remove an URB from its endpoint queue
1235*4882a593Smuzhiyun * @hcd: host controller to which @urb was submitted
1236*4882a593Smuzhiyun * @urb: URB being unlinked
1237*4882a593Smuzhiyun *
1238*4882a593Smuzhiyun * Host controller drivers should call this routine before calling
1239*4882a593Smuzhiyun * usb_hcd_giveback_urb(). The HCD's private spinlock must be held and
1240*4882a593Smuzhiyun * interrupts must be disabled. The actions carried out here are required
1241*4882a593Smuzhiyun * for URB completion.
1242*4882a593Smuzhiyun */
usb_hcd_unlink_urb_from_ep(struct usb_hcd * hcd,struct urb * urb)1243*4882a593Smuzhiyun void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb)
1244*4882a593Smuzhiyun {
1245*4882a593Smuzhiyun /* clear all state linking urb to this dev (and hcd) */
1246*4882a593Smuzhiyun spin_lock(&hcd_urb_list_lock);
1247*4882a593Smuzhiyun list_del_init(&urb->urb_list);
1248*4882a593Smuzhiyun spin_unlock(&hcd_urb_list_lock);
1249*4882a593Smuzhiyun }
1250*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_hcd_unlink_urb_from_ep);
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun /*
1253*4882a593Smuzhiyun * Some usb host controllers can only perform dma using a small SRAM area.
1254*4882a593Smuzhiyun * The usb core itself is however optimized for host controllers that can dma
1255*4882a593Smuzhiyun * using regular system memory - like pci devices doing bus mastering.
1256*4882a593Smuzhiyun *
1257*4882a593Smuzhiyun * To support host controllers with limited dma capabilities we provide dma
1258*4882a593Smuzhiyun * bounce buffers. This feature can be enabled by initializing
1259*4882a593Smuzhiyun * hcd->localmem_pool using usb_hcd_setup_local_mem().
1260*4882a593Smuzhiyun *
1261*4882a593Smuzhiyun * The initialized hcd->localmem_pool then tells the usb code to allocate all
1262*4882a593Smuzhiyun * data for dma using the genalloc API.
1263*4882a593Smuzhiyun *
1264*4882a593Smuzhiyun * So, to summarize...
1265*4882a593Smuzhiyun *
1266*4882a593Smuzhiyun * - We need "local" memory, canonical example being
1267*4882a593Smuzhiyun * a small SRAM on a discrete controller being the
1268*4882a593Smuzhiyun * only memory that the controller can read ...
1269*4882a593Smuzhiyun * (a) "normal" kernel memory is no good, and
1270*4882a593Smuzhiyun * (b) there's not enough to share
1271*4882a593Smuzhiyun *
1272*4882a593Smuzhiyun * - So we use that, even though the primary requirement
1273*4882a593Smuzhiyun * is that the memory be "local" (hence addressable
1274*4882a593Smuzhiyun * by that device), not "coherent".
1275*4882a593Smuzhiyun *
1276*4882a593Smuzhiyun */
1277*4882a593Smuzhiyun
hcd_alloc_coherent(struct usb_bus * bus,gfp_t mem_flags,dma_addr_t * dma_handle,void ** vaddr_handle,size_t size,enum dma_data_direction dir)1278*4882a593Smuzhiyun static int hcd_alloc_coherent(struct usb_bus *bus,
1279*4882a593Smuzhiyun gfp_t mem_flags, dma_addr_t *dma_handle,
1280*4882a593Smuzhiyun void **vaddr_handle, size_t size,
1281*4882a593Smuzhiyun enum dma_data_direction dir)
1282*4882a593Smuzhiyun {
1283*4882a593Smuzhiyun unsigned char *vaddr;
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun if (*vaddr_handle == NULL) {
1286*4882a593Smuzhiyun WARN_ON_ONCE(1);
1287*4882a593Smuzhiyun return -EFAULT;
1288*4882a593Smuzhiyun }
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun vaddr = hcd_buffer_alloc(bus, size + sizeof(vaddr),
1291*4882a593Smuzhiyun mem_flags, dma_handle);
1292*4882a593Smuzhiyun if (!vaddr)
1293*4882a593Smuzhiyun return -ENOMEM;
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun /*
1296*4882a593Smuzhiyun * Store the virtual address of the buffer at the end
1297*4882a593Smuzhiyun * of the allocated dma buffer. The size of the buffer
1298*4882a593Smuzhiyun * may be uneven so use unaligned functions instead
1299*4882a593Smuzhiyun * of just rounding up. It makes sense to optimize for
1300*4882a593Smuzhiyun * memory footprint over access speed since the amount
1301*4882a593Smuzhiyun * of memory available for dma may be limited.
1302*4882a593Smuzhiyun */
1303*4882a593Smuzhiyun put_unaligned((unsigned long)*vaddr_handle,
1304*4882a593Smuzhiyun (unsigned long *)(vaddr + size));
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun if (dir == DMA_TO_DEVICE)
1307*4882a593Smuzhiyun memcpy(vaddr, *vaddr_handle, size);
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun *vaddr_handle = vaddr;
1310*4882a593Smuzhiyun return 0;
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun
hcd_free_coherent(struct usb_bus * bus,dma_addr_t * dma_handle,void ** vaddr_handle,size_t size,enum dma_data_direction dir)1313*4882a593Smuzhiyun static void hcd_free_coherent(struct usb_bus *bus, dma_addr_t *dma_handle,
1314*4882a593Smuzhiyun void **vaddr_handle, size_t size,
1315*4882a593Smuzhiyun enum dma_data_direction dir)
1316*4882a593Smuzhiyun {
1317*4882a593Smuzhiyun unsigned char *vaddr = *vaddr_handle;
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun vaddr = (void *)get_unaligned((unsigned long *)(vaddr + size));
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun if (dir == DMA_FROM_DEVICE)
1322*4882a593Smuzhiyun memcpy(vaddr, *vaddr_handle, size);
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun hcd_buffer_free(bus, size + sizeof(vaddr), *vaddr_handle, *dma_handle);
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun *vaddr_handle = vaddr;
1327*4882a593Smuzhiyun *dma_handle = 0;
1328*4882a593Smuzhiyun }
1329*4882a593Smuzhiyun
usb_hcd_unmap_urb_setup_for_dma(struct usb_hcd * hcd,struct urb * urb)1330*4882a593Smuzhiyun void usb_hcd_unmap_urb_setup_for_dma(struct usb_hcd *hcd, struct urb *urb)
1331*4882a593Smuzhiyun {
1332*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_HAS_DMA) &&
1333*4882a593Smuzhiyun (urb->transfer_flags & URB_SETUP_MAP_SINGLE))
1334*4882a593Smuzhiyun dma_unmap_single(hcd->self.sysdev,
1335*4882a593Smuzhiyun urb->setup_dma,
1336*4882a593Smuzhiyun sizeof(struct usb_ctrlrequest),
1337*4882a593Smuzhiyun DMA_TO_DEVICE);
1338*4882a593Smuzhiyun else if (urb->transfer_flags & URB_SETUP_MAP_LOCAL)
1339*4882a593Smuzhiyun hcd_free_coherent(urb->dev->bus,
1340*4882a593Smuzhiyun &urb->setup_dma,
1341*4882a593Smuzhiyun (void **) &urb->setup_packet,
1342*4882a593Smuzhiyun sizeof(struct usb_ctrlrequest),
1343*4882a593Smuzhiyun DMA_TO_DEVICE);
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun /* Make it safe to call this routine more than once */
1346*4882a593Smuzhiyun urb->transfer_flags &= ~(URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL);
1347*4882a593Smuzhiyun }
1348*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_hcd_unmap_urb_setup_for_dma);
1349*4882a593Smuzhiyun
unmap_urb_for_dma(struct usb_hcd * hcd,struct urb * urb)1350*4882a593Smuzhiyun static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
1351*4882a593Smuzhiyun {
1352*4882a593Smuzhiyun if (hcd->driver->unmap_urb_for_dma)
1353*4882a593Smuzhiyun hcd->driver->unmap_urb_for_dma(hcd, urb);
1354*4882a593Smuzhiyun else
1355*4882a593Smuzhiyun usb_hcd_unmap_urb_for_dma(hcd, urb);
1356*4882a593Smuzhiyun }
1357*4882a593Smuzhiyun
usb_hcd_unmap_urb_for_dma(struct usb_hcd * hcd,struct urb * urb)1358*4882a593Smuzhiyun void usb_hcd_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
1359*4882a593Smuzhiyun {
1360*4882a593Smuzhiyun enum dma_data_direction dir;
1361*4882a593Smuzhiyun
1362*4882a593Smuzhiyun usb_hcd_unmap_urb_setup_for_dma(hcd, urb);
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1365*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_HAS_DMA) &&
1366*4882a593Smuzhiyun (urb->transfer_flags & URB_DMA_MAP_SG))
1367*4882a593Smuzhiyun dma_unmap_sg(hcd->self.sysdev,
1368*4882a593Smuzhiyun urb->sg,
1369*4882a593Smuzhiyun urb->num_sgs,
1370*4882a593Smuzhiyun dir);
1371*4882a593Smuzhiyun else if (IS_ENABLED(CONFIG_HAS_DMA) &&
1372*4882a593Smuzhiyun (urb->transfer_flags & URB_DMA_MAP_PAGE))
1373*4882a593Smuzhiyun dma_unmap_page(hcd->self.sysdev,
1374*4882a593Smuzhiyun urb->transfer_dma,
1375*4882a593Smuzhiyun urb->transfer_buffer_length,
1376*4882a593Smuzhiyun dir);
1377*4882a593Smuzhiyun else if (IS_ENABLED(CONFIG_HAS_DMA) &&
1378*4882a593Smuzhiyun (urb->transfer_flags & URB_DMA_MAP_SINGLE))
1379*4882a593Smuzhiyun dma_unmap_single(hcd->self.sysdev,
1380*4882a593Smuzhiyun urb->transfer_dma,
1381*4882a593Smuzhiyun urb->transfer_buffer_length,
1382*4882a593Smuzhiyun dir);
1383*4882a593Smuzhiyun else if (urb->transfer_flags & URB_MAP_LOCAL)
1384*4882a593Smuzhiyun hcd_free_coherent(urb->dev->bus,
1385*4882a593Smuzhiyun &urb->transfer_dma,
1386*4882a593Smuzhiyun &urb->transfer_buffer,
1387*4882a593Smuzhiyun urb->transfer_buffer_length,
1388*4882a593Smuzhiyun dir);
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun /* Make it safe to call this routine more than once */
1391*4882a593Smuzhiyun urb->transfer_flags &= ~(URB_DMA_MAP_SG | URB_DMA_MAP_PAGE |
1392*4882a593Smuzhiyun URB_DMA_MAP_SINGLE | URB_MAP_LOCAL);
1393*4882a593Smuzhiyun }
1394*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_hcd_unmap_urb_for_dma);
1395*4882a593Smuzhiyun
map_urb_for_dma(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)1396*4882a593Smuzhiyun static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1397*4882a593Smuzhiyun gfp_t mem_flags)
1398*4882a593Smuzhiyun {
1399*4882a593Smuzhiyun if (hcd->driver->map_urb_for_dma)
1400*4882a593Smuzhiyun return hcd->driver->map_urb_for_dma(hcd, urb, mem_flags);
1401*4882a593Smuzhiyun else
1402*4882a593Smuzhiyun return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
1403*4882a593Smuzhiyun }
1404*4882a593Smuzhiyun
usb_hcd_map_urb_for_dma(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)1405*4882a593Smuzhiyun int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1406*4882a593Smuzhiyun gfp_t mem_flags)
1407*4882a593Smuzhiyun {
1408*4882a593Smuzhiyun enum dma_data_direction dir;
1409*4882a593Smuzhiyun int ret = 0;
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun /* Map the URB's buffers for DMA access.
1412*4882a593Smuzhiyun * Lower level HCD code should use *_dma exclusively,
1413*4882a593Smuzhiyun * unless it uses pio or talks to another transport,
1414*4882a593Smuzhiyun * or uses the provided scatter gather list for bulk.
1415*4882a593Smuzhiyun */
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1418*4882a593Smuzhiyun if (hcd->self.uses_pio_for_control)
1419*4882a593Smuzhiyun return ret;
1420*4882a593Smuzhiyun if (hcd->localmem_pool) {
1421*4882a593Smuzhiyun ret = hcd_alloc_coherent(
1422*4882a593Smuzhiyun urb->dev->bus, mem_flags,
1423*4882a593Smuzhiyun &urb->setup_dma,
1424*4882a593Smuzhiyun (void **)&urb->setup_packet,
1425*4882a593Smuzhiyun sizeof(struct usb_ctrlrequest),
1426*4882a593Smuzhiyun DMA_TO_DEVICE);
1427*4882a593Smuzhiyun if (ret)
1428*4882a593Smuzhiyun return ret;
1429*4882a593Smuzhiyun urb->transfer_flags |= URB_SETUP_MAP_LOCAL;
1430*4882a593Smuzhiyun } else if (hcd_uses_dma(hcd)) {
1431*4882a593Smuzhiyun if (object_is_on_stack(urb->setup_packet)) {
1432*4882a593Smuzhiyun WARN_ONCE(1, "setup packet is on stack\n");
1433*4882a593Smuzhiyun return -EAGAIN;
1434*4882a593Smuzhiyun }
1435*4882a593Smuzhiyun
1436*4882a593Smuzhiyun urb->setup_dma = dma_map_single(
1437*4882a593Smuzhiyun hcd->self.sysdev,
1438*4882a593Smuzhiyun urb->setup_packet,
1439*4882a593Smuzhiyun sizeof(struct usb_ctrlrequest),
1440*4882a593Smuzhiyun DMA_TO_DEVICE);
1441*4882a593Smuzhiyun if (dma_mapping_error(hcd->self.sysdev,
1442*4882a593Smuzhiyun urb->setup_dma))
1443*4882a593Smuzhiyun return -EAGAIN;
1444*4882a593Smuzhiyun urb->transfer_flags |= URB_SETUP_MAP_SINGLE;
1445*4882a593Smuzhiyun }
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1449*4882a593Smuzhiyun if (urb->transfer_buffer_length != 0
1450*4882a593Smuzhiyun && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
1451*4882a593Smuzhiyun if (hcd->localmem_pool) {
1452*4882a593Smuzhiyun ret = hcd_alloc_coherent(
1453*4882a593Smuzhiyun urb->dev->bus, mem_flags,
1454*4882a593Smuzhiyun &urb->transfer_dma,
1455*4882a593Smuzhiyun &urb->transfer_buffer,
1456*4882a593Smuzhiyun urb->transfer_buffer_length,
1457*4882a593Smuzhiyun dir);
1458*4882a593Smuzhiyun if (ret == 0)
1459*4882a593Smuzhiyun urb->transfer_flags |= URB_MAP_LOCAL;
1460*4882a593Smuzhiyun } else if (hcd_uses_dma(hcd)) {
1461*4882a593Smuzhiyun if (urb->num_sgs) {
1462*4882a593Smuzhiyun int n;
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun /* We don't support sg for isoc transfers ! */
1465*4882a593Smuzhiyun if (usb_endpoint_xfer_isoc(&urb->ep->desc)) {
1466*4882a593Smuzhiyun WARN_ON(1);
1467*4882a593Smuzhiyun return -EINVAL;
1468*4882a593Smuzhiyun }
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun n = dma_map_sg(
1471*4882a593Smuzhiyun hcd->self.sysdev,
1472*4882a593Smuzhiyun urb->sg,
1473*4882a593Smuzhiyun urb->num_sgs,
1474*4882a593Smuzhiyun dir);
1475*4882a593Smuzhiyun if (n <= 0)
1476*4882a593Smuzhiyun ret = -EAGAIN;
1477*4882a593Smuzhiyun else
1478*4882a593Smuzhiyun urb->transfer_flags |= URB_DMA_MAP_SG;
1479*4882a593Smuzhiyun urb->num_mapped_sgs = n;
1480*4882a593Smuzhiyun if (n != urb->num_sgs)
1481*4882a593Smuzhiyun urb->transfer_flags |=
1482*4882a593Smuzhiyun URB_DMA_SG_COMBINED;
1483*4882a593Smuzhiyun } else if (urb->sg) {
1484*4882a593Smuzhiyun struct scatterlist *sg = urb->sg;
1485*4882a593Smuzhiyun urb->transfer_dma = dma_map_page(
1486*4882a593Smuzhiyun hcd->self.sysdev,
1487*4882a593Smuzhiyun sg_page(sg),
1488*4882a593Smuzhiyun sg->offset,
1489*4882a593Smuzhiyun urb->transfer_buffer_length,
1490*4882a593Smuzhiyun dir);
1491*4882a593Smuzhiyun if (dma_mapping_error(hcd->self.sysdev,
1492*4882a593Smuzhiyun urb->transfer_dma))
1493*4882a593Smuzhiyun ret = -EAGAIN;
1494*4882a593Smuzhiyun else
1495*4882a593Smuzhiyun urb->transfer_flags |= URB_DMA_MAP_PAGE;
1496*4882a593Smuzhiyun } else if (object_is_on_stack(urb->transfer_buffer)) {
1497*4882a593Smuzhiyun WARN_ONCE(1, "transfer buffer is on stack\n");
1498*4882a593Smuzhiyun ret = -EAGAIN;
1499*4882a593Smuzhiyun } else {
1500*4882a593Smuzhiyun urb->transfer_dma = dma_map_single(
1501*4882a593Smuzhiyun hcd->self.sysdev,
1502*4882a593Smuzhiyun urb->transfer_buffer,
1503*4882a593Smuzhiyun urb->transfer_buffer_length,
1504*4882a593Smuzhiyun dir);
1505*4882a593Smuzhiyun if (dma_mapping_error(hcd->self.sysdev,
1506*4882a593Smuzhiyun urb->transfer_dma))
1507*4882a593Smuzhiyun ret = -EAGAIN;
1508*4882a593Smuzhiyun else
1509*4882a593Smuzhiyun urb->transfer_flags |= URB_DMA_MAP_SINGLE;
1510*4882a593Smuzhiyun }
1511*4882a593Smuzhiyun }
1512*4882a593Smuzhiyun if (ret && (urb->transfer_flags & (URB_SETUP_MAP_SINGLE |
1513*4882a593Smuzhiyun URB_SETUP_MAP_LOCAL)))
1514*4882a593Smuzhiyun usb_hcd_unmap_urb_for_dma(hcd, urb);
1515*4882a593Smuzhiyun }
1516*4882a593Smuzhiyun return ret;
1517*4882a593Smuzhiyun }
1518*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_hcd_map_urb_for_dma);
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun /* may be called in any context with a valid urb->dev usecount
1523*4882a593Smuzhiyun * caller surrenders "ownership" of urb
1524*4882a593Smuzhiyun * expects usb_submit_urb() to have sanity checked and conditioned all
1525*4882a593Smuzhiyun * inputs in the urb
1526*4882a593Smuzhiyun */
usb_hcd_submit_urb(struct urb * urb,gfp_t mem_flags)1527*4882a593Smuzhiyun int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
1528*4882a593Smuzhiyun {
1529*4882a593Smuzhiyun int status;
1530*4882a593Smuzhiyun struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
1531*4882a593Smuzhiyun
1532*4882a593Smuzhiyun /* increment urb's reference count as part of giving it to the HCD
1533*4882a593Smuzhiyun * (which will control it). HCD guarantees that it either returns
1534*4882a593Smuzhiyun * an error or calls giveback(), but not both.
1535*4882a593Smuzhiyun */
1536*4882a593Smuzhiyun usb_get_urb(urb);
1537*4882a593Smuzhiyun atomic_inc(&urb->use_count);
1538*4882a593Smuzhiyun atomic_inc(&urb->dev->urbnum);
1539*4882a593Smuzhiyun usbmon_urb_submit(&hcd->self, urb);
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun /* NOTE requirements on root-hub callers (usbfs and the hub
1542*4882a593Smuzhiyun * driver, for now): URBs' urb->transfer_buffer must be
1543*4882a593Smuzhiyun * valid and usb_buffer_{sync,unmap}() not be needed, since
1544*4882a593Smuzhiyun * they could clobber root hub response data. Also, control
1545*4882a593Smuzhiyun * URBs must be submitted in process context with interrupts
1546*4882a593Smuzhiyun * enabled.
1547*4882a593Smuzhiyun */
1548*4882a593Smuzhiyun
1549*4882a593Smuzhiyun if (is_root_hub(urb->dev)) {
1550*4882a593Smuzhiyun status = rh_urb_enqueue(hcd, urb);
1551*4882a593Smuzhiyun } else {
1552*4882a593Smuzhiyun status = map_urb_for_dma(hcd, urb, mem_flags);
1553*4882a593Smuzhiyun if (likely(status == 0)) {
1554*4882a593Smuzhiyun status = hcd->driver->urb_enqueue(hcd, urb, mem_flags);
1555*4882a593Smuzhiyun if (unlikely(status))
1556*4882a593Smuzhiyun unmap_urb_for_dma(hcd, urb);
1557*4882a593Smuzhiyun }
1558*4882a593Smuzhiyun }
1559*4882a593Smuzhiyun
1560*4882a593Smuzhiyun if (unlikely(status)) {
1561*4882a593Smuzhiyun usbmon_urb_submit_error(&hcd->self, urb, status);
1562*4882a593Smuzhiyun urb->hcpriv = NULL;
1563*4882a593Smuzhiyun INIT_LIST_HEAD(&urb->urb_list);
1564*4882a593Smuzhiyun atomic_dec(&urb->use_count);
1565*4882a593Smuzhiyun /*
1566*4882a593Smuzhiyun * Order the write of urb->use_count above before the read
1567*4882a593Smuzhiyun * of urb->reject below. Pairs with the memory barriers in
1568*4882a593Smuzhiyun * usb_kill_urb() and usb_poison_urb().
1569*4882a593Smuzhiyun */
1570*4882a593Smuzhiyun smp_mb__after_atomic();
1571*4882a593Smuzhiyun
1572*4882a593Smuzhiyun atomic_dec(&urb->dev->urbnum);
1573*4882a593Smuzhiyun if (atomic_read(&urb->reject))
1574*4882a593Smuzhiyun wake_up(&usb_kill_urb_queue);
1575*4882a593Smuzhiyun usb_put_urb(urb);
1576*4882a593Smuzhiyun }
1577*4882a593Smuzhiyun return status;
1578*4882a593Smuzhiyun }
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun /* this makes the hcd giveback() the urb more quickly, by kicking it
1583*4882a593Smuzhiyun * off hardware queues (which may take a while) and returning it as
1584*4882a593Smuzhiyun * soon as practical. we've already set up the urb's return status,
1585*4882a593Smuzhiyun * but we can't know if the callback completed already.
1586*4882a593Smuzhiyun */
unlink1(struct usb_hcd * hcd,struct urb * urb,int status)1587*4882a593Smuzhiyun static int unlink1(struct usb_hcd *hcd, struct urb *urb, int status)
1588*4882a593Smuzhiyun {
1589*4882a593Smuzhiyun int value;
1590*4882a593Smuzhiyun
1591*4882a593Smuzhiyun if (is_root_hub(urb->dev))
1592*4882a593Smuzhiyun value = usb_rh_urb_dequeue(hcd, urb, status);
1593*4882a593Smuzhiyun else {
1594*4882a593Smuzhiyun
1595*4882a593Smuzhiyun /* The only reason an HCD might fail this call is if
1596*4882a593Smuzhiyun * it has not yet fully queued the urb to begin with.
1597*4882a593Smuzhiyun * Such failures should be harmless. */
1598*4882a593Smuzhiyun value = hcd->driver->urb_dequeue(hcd, urb, status);
1599*4882a593Smuzhiyun }
1600*4882a593Smuzhiyun return value;
1601*4882a593Smuzhiyun }
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun /*
1604*4882a593Smuzhiyun * called in any context
1605*4882a593Smuzhiyun *
1606*4882a593Smuzhiyun * caller guarantees urb won't be recycled till both unlink()
1607*4882a593Smuzhiyun * and the urb's completion function return
1608*4882a593Smuzhiyun */
usb_hcd_unlink_urb(struct urb * urb,int status)1609*4882a593Smuzhiyun int usb_hcd_unlink_urb (struct urb *urb, int status)
1610*4882a593Smuzhiyun {
1611*4882a593Smuzhiyun struct usb_hcd *hcd;
1612*4882a593Smuzhiyun struct usb_device *udev = urb->dev;
1613*4882a593Smuzhiyun int retval = -EIDRM;
1614*4882a593Smuzhiyun unsigned long flags;
1615*4882a593Smuzhiyun
1616*4882a593Smuzhiyun /* Prevent the device and bus from going away while
1617*4882a593Smuzhiyun * the unlink is carried out. If they are already gone
1618*4882a593Smuzhiyun * then urb->use_count must be 0, since disconnected
1619*4882a593Smuzhiyun * devices can't have any active URBs.
1620*4882a593Smuzhiyun */
1621*4882a593Smuzhiyun spin_lock_irqsave(&hcd_urb_unlink_lock, flags);
1622*4882a593Smuzhiyun if (atomic_read(&urb->use_count) > 0) {
1623*4882a593Smuzhiyun retval = 0;
1624*4882a593Smuzhiyun usb_get_dev(udev);
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun spin_unlock_irqrestore(&hcd_urb_unlink_lock, flags);
1627*4882a593Smuzhiyun if (retval == 0) {
1628*4882a593Smuzhiyun hcd = bus_to_hcd(urb->dev->bus);
1629*4882a593Smuzhiyun retval = unlink1(hcd, urb, status);
1630*4882a593Smuzhiyun if (retval == 0)
1631*4882a593Smuzhiyun retval = -EINPROGRESS;
1632*4882a593Smuzhiyun else if (retval != -EIDRM && retval != -EBUSY)
1633*4882a593Smuzhiyun dev_dbg(&udev->dev, "hcd_unlink_urb %pK fail %d\n",
1634*4882a593Smuzhiyun urb, retval);
1635*4882a593Smuzhiyun usb_put_dev(udev);
1636*4882a593Smuzhiyun }
1637*4882a593Smuzhiyun return retval;
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun
1640*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
1641*4882a593Smuzhiyun
__usb_hcd_giveback_urb(struct urb * urb)1642*4882a593Smuzhiyun static void __usb_hcd_giveback_urb(struct urb *urb)
1643*4882a593Smuzhiyun {
1644*4882a593Smuzhiyun struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
1645*4882a593Smuzhiyun struct usb_anchor *anchor = urb->anchor;
1646*4882a593Smuzhiyun int status = urb->unlinked;
1647*4882a593Smuzhiyun
1648*4882a593Smuzhiyun urb->hcpriv = NULL;
1649*4882a593Smuzhiyun if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) &&
1650*4882a593Smuzhiyun urb->actual_length < urb->transfer_buffer_length &&
1651*4882a593Smuzhiyun !status))
1652*4882a593Smuzhiyun status = -EREMOTEIO;
1653*4882a593Smuzhiyun
1654*4882a593Smuzhiyun unmap_urb_for_dma(hcd, urb);
1655*4882a593Smuzhiyun usbmon_urb_complete(&hcd->self, urb, status);
1656*4882a593Smuzhiyun usb_anchor_suspend_wakeups(anchor);
1657*4882a593Smuzhiyun usb_unanchor_urb(urb);
1658*4882a593Smuzhiyun if (likely(status == 0))
1659*4882a593Smuzhiyun usb_led_activity(USB_LED_EVENT_HOST);
1660*4882a593Smuzhiyun
1661*4882a593Smuzhiyun /* pass ownership to the completion handler */
1662*4882a593Smuzhiyun urb->status = status;
1663*4882a593Smuzhiyun /*
1664*4882a593Smuzhiyun * This function can be called in task context inside another remote
1665*4882a593Smuzhiyun * coverage collection section, but KCOV doesn't support that kind of
1666*4882a593Smuzhiyun * recursion yet. Only collect coverage in softirq context for now.
1667*4882a593Smuzhiyun */
1668*4882a593Smuzhiyun if (in_serving_softirq())
1669*4882a593Smuzhiyun kcov_remote_start_usb((u64)urb->dev->bus->busnum);
1670*4882a593Smuzhiyun urb->complete(urb);
1671*4882a593Smuzhiyun if (in_serving_softirq())
1672*4882a593Smuzhiyun kcov_remote_stop();
1673*4882a593Smuzhiyun
1674*4882a593Smuzhiyun usb_anchor_resume_wakeups(anchor);
1675*4882a593Smuzhiyun atomic_dec(&urb->use_count);
1676*4882a593Smuzhiyun /*
1677*4882a593Smuzhiyun * Order the write of urb->use_count above before the read
1678*4882a593Smuzhiyun * of urb->reject below. Pairs with the memory barriers in
1679*4882a593Smuzhiyun * usb_kill_urb() and usb_poison_urb().
1680*4882a593Smuzhiyun */
1681*4882a593Smuzhiyun smp_mb__after_atomic();
1682*4882a593Smuzhiyun
1683*4882a593Smuzhiyun if (unlikely(atomic_read(&urb->reject)))
1684*4882a593Smuzhiyun wake_up(&usb_kill_urb_queue);
1685*4882a593Smuzhiyun usb_put_urb(urb);
1686*4882a593Smuzhiyun }
1687*4882a593Smuzhiyun
usb_giveback_urb_bh(struct tasklet_struct * t)1688*4882a593Smuzhiyun static void usb_giveback_urb_bh(struct tasklet_struct *t)
1689*4882a593Smuzhiyun {
1690*4882a593Smuzhiyun struct giveback_urb_bh *bh = from_tasklet(bh, t, bh);
1691*4882a593Smuzhiyun struct list_head local_list;
1692*4882a593Smuzhiyun
1693*4882a593Smuzhiyun spin_lock_irq(&bh->lock);
1694*4882a593Smuzhiyun bh->running = true;
1695*4882a593Smuzhiyun restart:
1696*4882a593Smuzhiyun list_replace_init(&bh->head, &local_list);
1697*4882a593Smuzhiyun spin_unlock_irq(&bh->lock);
1698*4882a593Smuzhiyun
1699*4882a593Smuzhiyun while (!list_empty(&local_list)) {
1700*4882a593Smuzhiyun struct urb *urb;
1701*4882a593Smuzhiyun
1702*4882a593Smuzhiyun urb = list_entry(local_list.next, struct urb, urb_list);
1703*4882a593Smuzhiyun list_del_init(&urb->urb_list);
1704*4882a593Smuzhiyun bh->completing_ep = urb->ep;
1705*4882a593Smuzhiyun __usb_hcd_giveback_urb(urb);
1706*4882a593Smuzhiyun bh->completing_ep = NULL;
1707*4882a593Smuzhiyun }
1708*4882a593Smuzhiyun
1709*4882a593Smuzhiyun /* check if there are new URBs to giveback */
1710*4882a593Smuzhiyun spin_lock_irq(&bh->lock);
1711*4882a593Smuzhiyun if (!list_empty(&bh->head))
1712*4882a593Smuzhiyun goto restart;
1713*4882a593Smuzhiyun bh->running = false;
1714*4882a593Smuzhiyun spin_unlock_irq(&bh->lock);
1715*4882a593Smuzhiyun }
1716*4882a593Smuzhiyun
1717*4882a593Smuzhiyun /**
1718*4882a593Smuzhiyun * usb_hcd_giveback_urb - return URB from HCD to device driver
1719*4882a593Smuzhiyun * @hcd: host controller returning the URB
1720*4882a593Smuzhiyun * @urb: urb being returned to the USB device driver.
1721*4882a593Smuzhiyun * @status: completion status code for the URB.
1722*4882a593Smuzhiyun * Context: in_interrupt()
1723*4882a593Smuzhiyun *
1724*4882a593Smuzhiyun * This hands the URB from HCD to its USB device driver, using its
1725*4882a593Smuzhiyun * completion function. The HCD has freed all per-urb resources
1726*4882a593Smuzhiyun * (and is done using urb->hcpriv). It also released all HCD locks;
1727*4882a593Smuzhiyun * the device driver won't cause problems if it frees, modifies,
1728*4882a593Smuzhiyun * or resubmits this URB.
1729*4882a593Smuzhiyun *
1730*4882a593Smuzhiyun * If @urb was unlinked, the value of @status will be overridden by
1731*4882a593Smuzhiyun * @urb->unlinked. Erroneous short transfers are detected in case
1732*4882a593Smuzhiyun * the HCD hasn't checked for them.
1733*4882a593Smuzhiyun */
usb_hcd_giveback_urb(struct usb_hcd * hcd,struct urb * urb,int status)1734*4882a593Smuzhiyun void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status)
1735*4882a593Smuzhiyun {
1736*4882a593Smuzhiyun struct giveback_urb_bh *bh;
1737*4882a593Smuzhiyun bool running, high_prio_bh;
1738*4882a593Smuzhiyun
1739*4882a593Smuzhiyun /* pass status to tasklet via unlinked */
1740*4882a593Smuzhiyun if (likely(!urb->unlinked))
1741*4882a593Smuzhiyun urb->unlinked = status;
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun if (!hcd_giveback_urb_in_bh(hcd) && !is_root_hub(urb->dev)) {
1744*4882a593Smuzhiyun __usb_hcd_giveback_urb(urb);
1745*4882a593Smuzhiyun return;
1746*4882a593Smuzhiyun }
1747*4882a593Smuzhiyun
1748*4882a593Smuzhiyun if (usb_pipeisoc(urb->pipe) || usb_pipeint(urb->pipe)) {
1749*4882a593Smuzhiyun bh = &hcd->high_prio_bh;
1750*4882a593Smuzhiyun high_prio_bh = true;
1751*4882a593Smuzhiyun } else {
1752*4882a593Smuzhiyun bh = &hcd->low_prio_bh;
1753*4882a593Smuzhiyun high_prio_bh = false;
1754*4882a593Smuzhiyun }
1755*4882a593Smuzhiyun
1756*4882a593Smuzhiyun spin_lock(&bh->lock);
1757*4882a593Smuzhiyun list_add_tail(&urb->urb_list, &bh->head);
1758*4882a593Smuzhiyun running = bh->running;
1759*4882a593Smuzhiyun spin_unlock(&bh->lock);
1760*4882a593Smuzhiyun
1761*4882a593Smuzhiyun if (running)
1762*4882a593Smuzhiyun ;
1763*4882a593Smuzhiyun else if (high_prio_bh)
1764*4882a593Smuzhiyun tasklet_hi_schedule(&bh->bh);
1765*4882a593Smuzhiyun else
1766*4882a593Smuzhiyun tasklet_schedule(&bh->bh);
1767*4882a593Smuzhiyun }
1768*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_hcd_giveback_urb);
1769*4882a593Smuzhiyun
1770*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
1771*4882a593Smuzhiyun
1772*4882a593Smuzhiyun /* Cancel all URBs pending on this endpoint and wait for the endpoint's
1773*4882a593Smuzhiyun * queue to drain completely. The caller must first insure that no more
1774*4882a593Smuzhiyun * URBs can be submitted for this endpoint.
1775*4882a593Smuzhiyun */
usb_hcd_flush_endpoint(struct usb_device * udev,struct usb_host_endpoint * ep)1776*4882a593Smuzhiyun void usb_hcd_flush_endpoint(struct usb_device *udev,
1777*4882a593Smuzhiyun struct usb_host_endpoint *ep)
1778*4882a593Smuzhiyun {
1779*4882a593Smuzhiyun struct usb_hcd *hcd;
1780*4882a593Smuzhiyun struct urb *urb;
1781*4882a593Smuzhiyun
1782*4882a593Smuzhiyun if (!ep)
1783*4882a593Smuzhiyun return;
1784*4882a593Smuzhiyun might_sleep();
1785*4882a593Smuzhiyun hcd = bus_to_hcd(udev->bus);
1786*4882a593Smuzhiyun
1787*4882a593Smuzhiyun /* No more submits can occur */
1788*4882a593Smuzhiyun spin_lock_irq(&hcd_urb_list_lock);
1789*4882a593Smuzhiyun rescan:
1790*4882a593Smuzhiyun list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) {
1791*4882a593Smuzhiyun int is_in;
1792*4882a593Smuzhiyun
1793*4882a593Smuzhiyun if (urb->unlinked)
1794*4882a593Smuzhiyun continue;
1795*4882a593Smuzhiyun usb_get_urb (urb);
1796*4882a593Smuzhiyun is_in = usb_urb_dir_in(urb);
1797*4882a593Smuzhiyun spin_unlock(&hcd_urb_list_lock);
1798*4882a593Smuzhiyun
1799*4882a593Smuzhiyun /* kick hcd */
1800*4882a593Smuzhiyun unlink1(hcd, urb, -ESHUTDOWN);
1801*4882a593Smuzhiyun dev_dbg (hcd->self.controller,
1802*4882a593Smuzhiyun "shutdown urb %pK ep%d%s-%s\n",
1803*4882a593Smuzhiyun urb, usb_endpoint_num(&ep->desc),
1804*4882a593Smuzhiyun is_in ? "in" : "out",
1805*4882a593Smuzhiyun usb_ep_type_string(usb_endpoint_type(&ep->desc)));
1806*4882a593Smuzhiyun usb_put_urb (urb);
1807*4882a593Smuzhiyun
1808*4882a593Smuzhiyun /* list contents may have changed */
1809*4882a593Smuzhiyun spin_lock(&hcd_urb_list_lock);
1810*4882a593Smuzhiyun goto rescan;
1811*4882a593Smuzhiyun }
1812*4882a593Smuzhiyun spin_unlock_irq(&hcd_urb_list_lock);
1813*4882a593Smuzhiyun
1814*4882a593Smuzhiyun /* Wait until the endpoint queue is completely empty */
1815*4882a593Smuzhiyun while (!list_empty (&ep->urb_list)) {
1816*4882a593Smuzhiyun spin_lock_irq(&hcd_urb_list_lock);
1817*4882a593Smuzhiyun
1818*4882a593Smuzhiyun /* The list may have changed while we acquired the spinlock */
1819*4882a593Smuzhiyun urb = NULL;
1820*4882a593Smuzhiyun if (!list_empty (&ep->urb_list)) {
1821*4882a593Smuzhiyun urb = list_entry (ep->urb_list.prev, struct urb,
1822*4882a593Smuzhiyun urb_list);
1823*4882a593Smuzhiyun usb_get_urb (urb);
1824*4882a593Smuzhiyun }
1825*4882a593Smuzhiyun spin_unlock_irq(&hcd_urb_list_lock);
1826*4882a593Smuzhiyun
1827*4882a593Smuzhiyun if (urb) {
1828*4882a593Smuzhiyun usb_kill_urb (urb);
1829*4882a593Smuzhiyun usb_put_urb (urb);
1830*4882a593Smuzhiyun }
1831*4882a593Smuzhiyun }
1832*4882a593Smuzhiyun }
1833*4882a593Smuzhiyun
1834*4882a593Smuzhiyun /**
1835*4882a593Smuzhiyun * usb_hcd_alloc_bandwidth - check whether a new bandwidth setting exceeds
1836*4882a593Smuzhiyun * the bus bandwidth
1837*4882a593Smuzhiyun * @udev: target &usb_device
1838*4882a593Smuzhiyun * @new_config: new configuration to install
1839*4882a593Smuzhiyun * @cur_alt: the current alternate interface setting
1840*4882a593Smuzhiyun * @new_alt: alternate interface setting that is being installed
1841*4882a593Smuzhiyun *
1842*4882a593Smuzhiyun * To change configurations, pass in the new configuration in new_config,
1843*4882a593Smuzhiyun * and pass NULL for cur_alt and new_alt.
1844*4882a593Smuzhiyun *
1845*4882a593Smuzhiyun * To reset a device's configuration (put the device in the ADDRESSED state),
1846*4882a593Smuzhiyun * pass in NULL for new_config, cur_alt, and new_alt.
1847*4882a593Smuzhiyun *
1848*4882a593Smuzhiyun * To change alternate interface settings, pass in NULL for new_config,
1849*4882a593Smuzhiyun * pass in the current alternate interface setting in cur_alt,
1850*4882a593Smuzhiyun * and pass in the new alternate interface setting in new_alt.
1851*4882a593Smuzhiyun *
1852*4882a593Smuzhiyun * Return: An error if the requested bandwidth change exceeds the
1853*4882a593Smuzhiyun * bus bandwidth or host controller internal resources.
1854*4882a593Smuzhiyun */
usb_hcd_alloc_bandwidth(struct usb_device * udev,struct usb_host_config * new_config,struct usb_host_interface * cur_alt,struct usb_host_interface * new_alt)1855*4882a593Smuzhiyun int usb_hcd_alloc_bandwidth(struct usb_device *udev,
1856*4882a593Smuzhiyun struct usb_host_config *new_config,
1857*4882a593Smuzhiyun struct usb_host_interface *cur_alt,
1858*4882a593Smuzhiyun struct usb_host_interface *new_alt)
1859*4882a593Smuzhiyun {
1860*4882a593Smuzhiyun int num_intfs, i, j;
1861*4882a593Smuzhiyun struct usb_host_interface *alt = NULL;
1862*4882a593Smuzhiyun int ret = 0;
1863*4882a593Smuzhiyun struct usb_hcd *hcd;
1864*4882a593Smuzhiyun struct usb_host_endpoint *ep;
1865*4882a593Smuzhiyun
1866*4882a593Smuzhiyun hcd = bus_to_hcd(udev->bus);
1867*4882a593Smuzhiyun if (!hcd->driver->check_bandwidth)
1868*4882a593Smuzhiyun return 0;
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun /* Configuration is being removed - set configuration 0 */
1871*4882a593Smuzhiyun if (!new_config && !cur_alt) {
1872*4882a593Smuzhiyun for (i = 1; i < 16; ++i) {
1873*4882a593Smuzhiyun ep = udev->ep_out[i];
1874*4882a593Smuzhiyun if (ep)
1875*4882a593Smuzhiyun hcd->driver->drop_endpoint(hcd, udev, ep);
1876*4882a593Smuzhiyun ep = udev->ep_in[i];
1877*4882a593Smuzhiyun if (ep)
1878*4882a593Smuzhiyun hcd->driver->drop_endpoint(hcd, udev, ep);
1879*4882a593Smuzhiyun }
1880*4882a593Smuzhiyun hcd->driver->check_bandwidth(hcd, udev);
1881*4882a593Smuzhiyun return 0;
1882*4882a593Smuzhiyun }
1883*4882a593Smuzhiyun /* Check if the HCD says there's enough bandwidth. Enable all endpoints
1884*4882a593Smuzhiyun * each interface's alt setting 0 and ask the HCD to check the bandwidth
1885*4882a593Smuzhiyun * of the bus. There will always be bandwidth for endpoint 0, so it's
1886*4882a593Smuzhiyun * ok to exclude it.
1887*4882a593Smuzhiyun */
1888*4882a593Smuzhiyun if (new_config) {
1889*4882a593Smuzhiyun num_intfs = new_config->desc.bNumInterfaces;
1890*4882a593Smuzhiyun /* Remove endpoints (except endpoint 0, which is always on the
1891*4882a593Smuzhiyun * schedule) from the old config from the schedule
1892*4882a593Smuzhiyun */
1893*4882a593Smuzhiyun for (i = 1; i < 16; ++i) {
1894*4882a593Smuzhiyun ep = udev->ep_out[i];
1895*4882a593Smuzhiyun if (ep) {
1896*4882a593Smuzhiyun ret = hcd->driver->drop_endpoint(hcd, udev, ep);
1897*4882a593Smuzhiyun if (ret < 0)
1898*4882a593Smuzhiyun goto reset;
1899*4882a593Smuzhiyun }
1900*4882a593Smuzhiyun ep = udev->ep_in[i];
1901*4882a593Smuzhiyun if (ep) {
1902*4882a593Smuzhiyun ret = hcd->driver->drop_endpoint(hcd, udev, ep);
1903*4882a593Smuzhiyun if (ret < 0)
1904*4882a593Smuzhiyun goto reset;
1905*4882a593Smuzhiyun }
1906*4882a593Smuzhiyun }
1907*4882a593Smuzhiyun for (i = 0; i < num_intfs; ++i) {
1908*4882a593Smuzhiyun struct usb_host_interface *first_alt;
1909*4882a593Smuzhiyun int iface_num;
1910*4882a593Smuzhiyun
1911*4882a593Smuzhiyun first_alt = &new_config->intf_cache[i]->altsetting[0];
1912*4882a593Smuzhiyun iface_num = first_alt->desc.bInterfaceNumber;
1913*4882a593Smuzhiyun /* Set up endpoints for alternate interface setting 0 */
1914*4882a593Smuzhiyun alt = usb_find_alt_setting(new_config, iface_num, 0);
1915*4882a593Smuzhiyun if (!alt)
1916*4882a593Smuzhiyun /* No alt setting 0? Pick the first setting. */
1917*4882a593Smuzhiyun alt = first_alt;
1918*4882a593Smuzhiyun
1919*4882a593Smuzhiyun for (j = 0; j < alt->desc.bNumEndpoints; j++) {
1920*4882a593Smuzhiyun ret = hcd->driver->add_endpoint(hcd, udev, &alt->endpoint[j]);
1921*4882a593Smuzhiyun if (ret < 0)
1922*4882a593Smuzhiyun goto reset;
1923*4882a593Smuzhiyun }
1924*4882a593Smuzhiyun }
1925*4882a593Smuzhiyun }
1926*4882a593Smuzhiyun if (cur_alt && new_alt) {
1927*4882a593Smuzhiyun struct usb_interface *iface = usb_ifnum_to_if(udev,
1928*4882a593Smuzhiyun cur_alt->desc.bInterfaceNumber);
1929*4882a593Smuzhiyun
1930*4882a593Smuzhiyun if (!iface)
1931*4882a593Smuzhiyun return -EINVAL;
1932*4882a593Smuzhiyun if (iface->resetting_device) {
1933*4882a593Smuzhiyun /*
1934*4882a593Smuzhiyun * The USB core just reset the device, so the xHCI host
1935*4882a593Smuzhiyun * and the device will think alt setting 0 is installed.
1936*4882a593Smuzhiyun * However, the USB core will pass in the alternate
1937*4882a593Smuzhiyun * setting installed before the reset as cur_alt. Dig
1938*4882a593Smuzhiyun * out the alternate setting 0 structure, or the first
1939*4882a593Smuzhiyun * alternate setting if a broken device doesn't have alt
1940*4882a593Smuzhiyun * setting 0.
1941*4882a593Smuzhiyun */
1942*4882a593Smuzhiyun cur_alt = usb_altnum_to_altsetting(iface, 0);
1943*4882a593Smuzhiyun if (!cur_alt)
1944*4882a593Smuzhiyun cur_alt = &iface->altsetting[0];
1945*4882a593Smuzhiyun }
1946*4882a593Smuzhiyun
1947*4882a593Smuzhiyun /* Drop all the endpoints in the current alt setting */
1948*4882a593Smuzhiyun for (i = 0; i < cur_alt->desc.bNumEndpoints; i++) {
1949*4882a593Smuzhiyun ret = hcd->driver->drop_endpoint(hcd, udev,
1950*4882a593Smuzhiyun &cur_alt->endpoint[i]);
1951*4882a593Smuzhiyun if (ret < 0)
1952*4882a593Smuzhiyun goto reset;
1953*4882a593Smuzhiyun }
1954*4882a593Smuzhiyun /* Add all the endpoints in the new alt setting */
1955*4882a593Smuzhiyun for (i = 0; i < new_alt->desc.bNumEndpoints; i++) {
1956*4882a593Smuzhiyun ret = hcd->driver->add_endpoint(hcd, udev,
1957*4882a593Smuzhiyun &new_alt->endpoint[i]);
1958*4882a593Smuzhiyun if (ret < 0)
1959*4882a593Smuzhiyun goto reset;
1960*4882a593Smuzhiyun }
1961*4882a593Smuzhiyun }
1962*4882a593Smuzhiyun ret = hcd->driver->check_bandwidth(hcd, udev);
1963*4882a593Smuzhiyun reset:
1964*4882a593Smuzhiyun if (ret < 0)
1965*4882a593Smuzhiyun hcd->driver->reset_bandwidth(hcd, udev);
1966*4882a593Smuzhiyun return ret;
1967*4882a593Smuzhiyun }
1968*4882a593Smuzhiyun
1969*4882a593Smuzhiyun /* Disables the endpoint: synchronizes with the hcd to make sure all
1970*4882a593Smuzhiyun * endpoint state is gone from hardware. usb_hcd_flush_endpoint() must
1971*4882a593Smuzhiyun * have been called previously. Use for set_configuration, set_interface,
1972*4882a593Smuzhiyun * driver removal, physical disconnect.
1973*4882a593Smuzhiyun *
1974*4882a593Smuzhiyun * example: a qh stored in ep->hcpriv, holding state related to endpoint
1975*4882a593Smuzhiyun * type, maxpacket size, toggle, halt status, and scheduling.
1976*4882a593Smuzhiyun */
usb_hcd_disable_endpoint(struct usb_device * udev,struct usb_host_endpoint * ep)1977*4882a593Smuzhiyun void usb_hcd_disable_endpoint(struct usb_device *udev,
1978*4882a593Smuzhiyun struct usb_host_endpoint *ep)
1979*4882a593Smuzhiyun {
1980*4882a593Smuzhiyun struct usb_hcd *hcd;
1981*4882a593Smuzhiyun
1982*4882a593Smuzhiyun might_sleep();
1983*4882a593Smuzhiyun hcd = bus_to_hcd(udev->bus);
1984*4882a593Smuzhiyun if (hcd->driver->endpoint_disable)
1985*4882a593Smuzhiyun hcd->driver->endpoint_disable(hcd, ep);
1986*4882a593Smuzhiyun }
1987*4882a593Smuzhiyun
1988*4882a593Smuzhiyun /**
1989*4882a593Smuzhiyun * usb_hcd_reset_endpoint - reset host endpoint state
1990*4882a593Smuzhiyun * @udev: USB device.
1991*4882a593Smuzhiyun * @ep: the endpoint to reset.
1992*4882a593Smuzhiyun *
1993*4882a593Smuzhiyun * Resets any host endpoint state such as the toggle bit, sequence
1994*4882a593Smuzhiyun * number and current window.
1995*4882a593Smuzhiyun */
usb_hcd_reset_endpoint(struct usb_device * udev,struct usb_host_endpoint * ep)1996*4882a593Smuzhiyun void usb_hcd_reset_endpoint(struct usb_device *udev,
1997*4882a593Smuzhiyun struct usb_host_endpoint *ep)
1998*4882a593Smuzhiyun {
1999*4882a593Smuzhiyun struct usb_hcd *hcd = bus_to_hcd(udev->bus);
2000*4882a593Smuzhiyun
2001*4882a593Smuzhiyun if (hcd->driver->endpoint_reset)
2002*4882a593Smuzhiyun hcd->driver->endpoint_reset(hcd, ep);
2003*4882a593Smuzhiyun else {
2004*4882a593Smuzhiyun int epnum = usb_endpoint_num(&ep->desc);
2005*4882a593Smuzhiyun int is_out = usb_endpoint_dir_out(&ep->desc);
2006*4882a593Smuzhiyun int is_control = usb_endpoint_xfer_control(&ep->desc);
2007*4882a593Smuzhiyun
2008*4882a593Smuzhiyun usb_settoggle(udev, epnum, is_out, 0);
2009*4882a593Smuzhiyun if (is_control)
2010*4882a593Smuzhiyun usb_settoggle(udev, epnum, !is_out, 0);
2011*4882a593Smuzhiyun }
2012*4882a593Smuzhiyun }
2013*4882a593Smuzhiyun
2014*4882a593Smuzhiyun /**
2015*4882a593Smuzhiyun * usb_alloc_streams - allocate bulk endpoint stream IDs.
2016*4882a593Smuzhiyun * @interface: alternate setting that includes all endpoints.
2017*4882a593Smuzhiyun * @eps: array of endpoints that need streams.
2018*4882a593Smuzhiyun * @num_eps: number of endpoints in the array.
2019*4882a593Smuzhiyun * @num_streams: number of streams to allocate.
2020*4882a593Smuzhiyun * @mem_flags: flags hcd should use to allocate memory.
2021*4882a593Smuzhiyun *
2022*4882a593Smuzhiyun * Sets up a group of bulk endpoints to have @num_streams stream IDs available.
2023*4882a593Smuzhiyun * Drivers may queue multiple transfers to different stream IDs, which may
2024*4882a593Smuzhiyun * complete in a different order than they were queued.
2025*4882a593Smuzhiyun *
2026*4882a593Smuzhiyun * Return: On success, the number of allocated streams. On failure, a negative
2027*4882a593Smuzhiyun * error code.
2028*4882a593Smuzhiyun */
usb_alloc_streams(struct usb_interface * interface,struct usb_host_endpoint ** eps,unsigned int num_eps,unsigned int num_streams,gfp_t mem_flags)2029*4882a593Smuzhiyun int usb_alloc_streams(struct usb_interface *interface,
2030*4882a593Smuzhiyun struct usb_host_endpoint **eps, unsigned int num_eps,
2031*4882a593Smuzhiyun unsigned int num_streams, gfp_t mem_flags)
2032*4882a593Smuzhiyun {
2033*4882a593Smuzhiyun struct usb_hcd *hcd;
2034*4882a593Smuzhiyun struct usb_device *dev;
2035*4882a593Smuzhiyun int i, ret;
2036*4882a593Smuzhiyun
2037*4882a593Smuzhiyun dev = interface_to_usbdev(interface);
2038*4882a593Smuzhiyun hcd = bus_to_hcd(dev->bus);
2039*4882a593Smuzhiyun if (!hcd->driver->alloc_streams || !hcd->driver->free_streams)
2040*4882a593Smuzhiyun return -EINVAL;
2041*4882a593Smuzhiyun if (dev->speed < USB_SPEED_SUPER)
2042*4882a593Smuzhiyun return -EINVAL;
2043*4882a593Smuzhiyun if (dev->state < USB_STATE_CONFIGURED)
2044*4882a593Smuzhiyun return -ENODEV;
2045*4882a593Smuzhiyun
2046*4882a593Smuzhiyun for (i = 0; i < num_eps; i++) {
2047*4882a593Smuzhiyun /* Streams only apply to bulk endpoints. */
2048*4882a593Smuzhiyun if (!usb_endpoint_xfer_bulk(&eps[i]->desc))
2049*4882a593Smuzhiyun return -EINVAL;
2050*4882a593Smuzhiyun /* Re-alloc is not allowed */
2051*4882a593Smuzhiyun if (eps[i]->streams)
2052*4882a593Smuzhiyun return -EINVAL;
2053*4882a593Smuzhiyun }
2054*4882a593Smuzhiyun
2055*4882a593Smuzhiyun ret = hcd->driver->alloc_streams(hcd, dev, eps, num_eps,
2056*4882a593Smuzhiyun num_streams, mem_flags);
2057*4882a593Smuzhiyun if (ret < 0)
2058*4882a593Smuzhiyun return ret;
2059*4882a593Smuzhiyun
2060*4882a593Smuzhiyun for (i = 0; i < num_eps; i++)
2061*4882a593Smuzhiyun eps[i]->streams = ret;
2062*4882a593Smuzhiyun
2063*4882a593Smuzhiyun return ret;
2064*4882a593Smuzhiyun }
2065*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_alloc_streams);
2066*4882a593Smuzhiyun
2067*4882a593Smuzhiyun /**
2068*4882a593Smuzhiyun * usb_free_streams - free bulk endpoint stream IDs.
2069*4882a593Smuzhiyun * @interface: alternate setting that includes all endpoints.
2070*4882a593Smuzhiyun * @eps: array of endpoints to remove streams from.
2071*4882a593Smuzhiyun * @num_eps: number of endpoints in the array.
2072*4882a593Smuzhiyun * @mem_flags: flags hcd should use to allocate memory.
2073*4882a593Smuzhiyun *
2074*4882a593Smuzhiyun * Reverts a group of bulk endpoints back to not using stream IDs.
2075*4882a593Smuzhiyun * Can fail if we are given bad arguments, or HCD is broken.
2076*4882a593Smuzhiyun *
2077*4882a593Smuzhiyun * Return: 0 on success. On failure, a negative error code.
2078*4882a593Smuzhiyun */
usb_free_streams(struct usb_interface * interface,struct usb_host_endpoint ** eps,unsigned int num_eps,gfp_t mem_flags)2079*4882a593Smuzhiyun int usb_free_streams(struct usb_interface *interface,
2080*4882a593Smuzhiyun struct usb_host_endpoint **eps, unsigned int num_eps,
2081*4882a593Smuzhiyun gfp_t mem_flags)
2082*4882a593Smuzhiyun {
2083*4882a593Smuzhiyun struct usb_hcd *hcd;
2084*4882a593Smuzhiyun struct usb_device *dev;
2085*4882a593Smuzhiyun int i, ret;
2086*4882a593Smuzhiyun
2087*4882a593Smuzhiyun dev = interface_to_usbdev(interface);
2088*4882a593Smuzhiyun hcd = bus_to_hcd(dev->bus);
2089*4882a593Smuzhiyun if (dev->speed < USB_SPEED_SUPER)
2090*4882a593Smuzhiyun return -EINVAL;
2091*4882a593Smuzhiyun
2092*4882a593Smuzhiyun /* Double-free is not allowed */
2093*4882a593Smuzhiyun for (i = 0; i < num_eps; i++)
2094*4882a593Smuzhiyun if (!eps[i] || !eps[i]->streams)
2095*4882a593Smuzhiyun return -EINVAL;
2096*4882a593Smuzhiyun
2097*4882a593Smuzhiyun ret = hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags);
2098*4882a593Smuzhiyun if (ret < 0)
2099*4882a593Smuzhiyun return ret;
2100*4882a593Smuzhiyun
2101*4882a593Smuzhiyun for (i = 0; i < num_eps; i++)
2102*4882a593Smuzhiyun eps[i]->streams = 0;
2103*4882a593Smuzhiyun
2104*4882a593Smuzhiyun return ret;
2105*4882a593Smuzhiyun }
2106*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_free_streams);
2107*4882a593Smuzhiyun
2108*4882a593Smuzhiyun /* Protect against drivers that try to unlink URBs after the device
2109*4882a593Smuzhiyun * is gone, by waiting until all unlinks for @udev are finished.
2110*4882a593Smuzhiyun * Since we don't currently track URBs by device, simply wait until
2111*4882a593Smuzhiyun * nothing is running in the locked region of usb_hcd_unlink_urb().
2112*4882a593Smuzhiyun */
usb_hcd_synchronize_unlinks(struct usb_device * udev)2113*4882a593Smuzhiyun void usb_hcd_synchronize_unlinks(struct usb_device *udev)
2114*4882a593Smuzhiyun {
2115*4882a593Smuzhiyun spin_lock_irq(&hcd_urb_unlink_lock);
2116*4882a593Smuzhiyun spin_unlock_irq(&hcd_urb_unlink_lock);
2117*4882a593Smuzhiyun }
2118*4882a593Smuzhiyun
2119*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
2120*4882a593Smuzhiyun
2121*4882a593Smuzhiyun /* called in any context */
usb_hcd_get_frame_number(struct usb_device * udev)2122*4882a593Smuzhiyun int usb_hcd_get_frame_number (struct usb_device *udev)
2123*4882a593Smuzhiyun {
2124*4882a593Smuzhiyun struct usb_hcd *hcd = bus_to_hcd(udev->bus);
2125*4882a593Smuzhiyun
2126*4882a593Smuzhiyun if (!HCD_RH_RUNNING(hcd))
2127*4882a593Smuzhiyun return -ESHUTDOWN;
2128*4882a593Smuzhiyun return hcd->driver->get_frame_number (hcd);
2129*4882a593Smuzhiyun }
2130*4882a593Smuzhiyun
2131*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
2132*4882a593Smuzhiyun
2133*4882a593Smuzhiyun #ifdef CONFIG_PM
2134*4882a593Smuzhiyun
hcd_bus_suspend(struct usb_device * rhdev,pm_message_t msg)2135*4882a593Smuzhiyun int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
2136*4882a593Smuzhiyun {
2137*4882a593Smuzhiyun struct usb_hcd *hcd = bus_to_hcd(rhdev->bus);
2138*4882a593Smuzhiyun int status;
2139*4882a593Smuzhiyun int old_state = hcd->state;
2140*4882a593Smuzhiyun
2141*4882a593Smuzhiyun dev_dbg(&rhdev->dev, "bus %ssuspend, wakeup %d\n",
2142*4882a593Smuzhiyun (PMSG_IS_AUTO(msg) ? "auto-" : ""),
2143*4882a593Smuzhiyun rhdev->do_remote_wakeup);
2144*4882a593Smuzhiyun if (HCD_DEAD(hcd)) {
2145*4882a593Smuzhiyun dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "suspend");
2146*4882a593Smuzhiyun return 0;
2147*4882a593Smuzhiyun }
2148*4882a593Smuzhiyun
2149*4882a593Smuzhiyun if (!hcd->driver->bus_suspend) {
2150*4882a593Smuzhiyun status = -ENOENT;
2151*4882a593Smuzhiyun } else {
2152*4882a593Smuzhiyun clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
2153*4882a593Smuzhiyun hcd->state = HC_STATE_QUIESCING;
2154*4882a593Smuzhiyun status = hcd->driver->bus_suspend(hcd);
2155*4882a593Smuzhiyun }
2156*4882a593Smuzhiyun if (status == 0) {
2157*4882a593Smuzhiyun usb_set_device_state(rhdev, USB_STATE_SUSPENDED);
2158*4882a593Smuzhiyun hcd->state = HC_STATE_SUSPENDED;
2159*4882a593Smuzhiyun
2160*4882a593Smuzhiyun if (!PMSG_IS_AUTO(msg))
2161*4882a593Smuzhiyun usb_phy_roothub_suspend(hcd->self.sysdev,
2162*4882a593Smuzhiyun hcd->phy_roothub);
2163*4882a593Smuzhiyun
2164*4882a593Smuzhiyun /* Did we race with a root-hub wakeup event? */
2165*4882a593Smuzhiyun if (rhdev->do_remote_wakeup) {
2166*4882a593Smuzhiyun char buffer[6];
2167*4882a593Smuzhiyun
2168*4882a593Smuzhiyun status = hcd->driver->hub_status_data(hcd, buffer);
2169*4882a593Smuzhiyun if (status != 0) {
2170*4882a593Smuzhiyun dev_dbg(&rhdev->dev, "suspend raced with wakeup event\n");
2171*4882a593Smuzhiyun hcd_bus_resume(rhdev, PMSG_AUTO_RESUME);
2172*4882a593Smuzhiyun status = -EBUSY;
2173*4882a593Smuzhiyun }
2174*4882a593Smuzhiyun }
2175*4882a593Smuzhiyun } else {
2176*4882a593Smuzhiyun spin_lock_irq(&hcd_root_hub_lock);
2177*4882a593Smuzhiyun if (!HCD_DEAD(hcd)) {
2178*4882a593Smuzhiyun set_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
2179*4882a593Smuzhiyun hcd->state = old_state;
2180*4882a593Smuzhiyun }
2181*4882a593Smuzhiyun spin_unlock_irq(&hcd_root_hub_lock);
2182*4882a593Smuzhiyun dev_dbg(&rhdev->dev, "bus %s fail, err %d\n",
2183*4882a593Smuzhiyun "suspend", status);
2184*4882a593Smuzhiyun }
2185*4882a593Smuzhiyun return status;
2186*4882a593Smuzhiyun }
2187*4882a593Smuzhiyun
hcd_bus_resume(struct usb_device * rhdev,pm_message_t msg)2188*4882a593Smuzhiyun int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
2189*4882a593Smuzhiyun {
2190*4882a593Smuzhiyun struct usb_hcd *hcd = bus_to_hcd(rhdev->bus);
2191*4882a593Smuzhiyun int status;
2192*4882a593Smuzhiyun int old_state = hcd->state;
2193*4882a593Smuzhiyun
2194*4882a593Smuzhiyun dev_dbg(&rhdev->dev, "usb %sresume\n",
2195*4882a593Smuzhiyun (PMSG_IS_AUTO(msg) ? "auto-" : ""));
2196*4882a593Smuzhiyun if (HCD_DEAD(hcd)) {
2197*4882a593Smuzhiyun dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "resume");
2198*4882a593Smuzhiyun return 0;
2199*4882a593Smuzhiyun }
2200*4882a593Smuzhiyun
2201*4882a593Smuzhiyun if (!PMSG_IS_AUTO(msg)) {
2202*4882a593Smuzhiyun status = usb_phy_roothub_resume(hcd->self.sysdev,
2203*4882a593Smuzhiyun hcd->phy_roothub);
2204*4882a593Smuzhiyun if (status)
2205*4882a593Smuzhiyun return status;
2206*4882a593Smuzhiyun }
2207*4882a593Smuzhiyun
2208*4882a593Smuzhiyun if (!hcd->driver->bus_resume)
2209*4882a593Smuzhiyun return -ENOENT;
2210*4882a593Smuzhiyun if (HCD_RH_RUNNING(hcd))
2211*4882a593Smuzhiyun return 0;
2212*4882a593Smuzhiyun
2213*4882a593Smuzhiyun hcd->state = HC_STATE_RESUMING;
2214*4882a593Smuzhiyun status = hcd->driver->bus_resume(hcd);
2215*4882a593Smuzhiyun clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
2216*4882a593Smuzhiyun if (status == 0)
2217*4882a593Smuzhiyun status = usb_phy_roothub_calibrate(hcd->phy_roothub);
2218*4882a593Smuzhiyun
2219*4882a593Smuzhiyun if (status == 0) {
2220*4882a593Smuzhiyun struct usb_device *udev;
2221*4882a593Smuzhiyun int port1;
2222*4882a593Smuzhiyun
2223*4882a593Smuzhiyun spin_lock_irq(&hcd_root_hub_lock);
2224*4882a593Smuzhiyun if (!HCD_DEAD(hcd)) {
2225*4882a593Smuzhiyun usb_set_device_state(rhdev, rhdev->actconfig
2226*4882a593Smuzhiyun ? USB_STATE_CONFIGURED
2227*4882a593Smuzhiyun : USB_STATE_ADDRESS);
2228*4882a593Smuzhiyun set_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
2229*4882a593Smuzhiyun hcd->state = HC_STATE_RUNNING;
2230*4882a593Smuzhiyun }
2231*4882a593Smuzhiyun spin_unlock_irq(&hcd_root_hub_lock);
2232*4882a593Smuzhiyun
2233*4882a593Smuzhiyun /*
2234*4882a593Smuzhiyun * Check whether any of the enabled ports on the root hub are
2235*4882a593Smuzhiyun * unsuspended. If they are then a TRSMRCY delay is needed
2236*4882a593Smuzhiyun * (this is what the USB-2 spec calls a "global resume").
2237*4882a593Smuzhiyun * Otherwise we can skip the delay.
2238*4882a593Smuzhiyun */
2239*4882a593Smuzhiyun usb_hub_for_each_child(rhdev, port1, udev) {
2240*4882a593Smuzhiyun if (udev->state != USB_STATE_NOTATTACHED &&
2241*4882a593Smuzhiyun !udev->port_is_suspended) {
2242*4882a593Smuzhiyun usleep_range(10000, 11000); /* TRSMRCY */
2243*4882a593Smuzhiyun break;
2244*4882a593Smuzhiyun }
2245*4882a593Smuzhiyun }
2246*4882a593Smuzhiyun } else {
2247*4882a593Smuzhiyun hcd->state = old_state;
2248*4882a593Smuzhiyun usb_phy_roothub_suspend(hcd->self.sysdev, hcd->phy_roothub);
2249*4882a593Smuzhiyun dev_dbg(&rhdev->dev, "bus %s fail, err %d\n",
2250*4882a593Smuzhiyun "resume", status);
2251*4882a593Smuzhiyun if (status != -ESHUTDOWN)
2252*4882a593Smuzhiyun usb_hc_died(hcd);
2253*4882a593Smuzhiyun }
2254*4882a593Smuzhiyun return status;
2255*4882a593Smuzhiyun }
2256*4882a593Smuzhiyun
2257*4882a593Smuzhiyun /* Workqueue routine for root-hub remote wakeup */
hcd_resume_work(struct work_struct * work)2258*4882a593Smuzhiyun static void hcd_resume_work(struct work_struct *work)
2259*4882a593Smuzhiyun {
2260*4882a593Smuzhiyun struct usb_hcd *hcd = container_of(work, struct usb_hcd, wakeup_work);
2261*4882a593Smuzhiyun struct usb_device *udev = hcd->self.root_hub;
2262*4882a593Smuzhiyun
2263*4882a593Smuzhiyun usb_remote_wakeup(udev);
2264*4882a593Smuzhiyun }
2265*4882a593Smuzhiyun
2266*4882a593Smuzhiyun /**
2267*4882a593Smuzhiyun * usb_hcd_resume_root_hub - called by HCD to resume its root hub
2268*4882a593Smuzhiyun * @hcd: host controller for this root hub
2269*4882a593Smuzhiyun *
2270*4882a593Smuzhiyun * The USB host controller calls this function when its root hub is
2271*4882a593Smuzhiyun * suspended (with the remote wakeup feature enabled) and a remote
2272*4882a593Smuzhiyun * wakeup request is received. The routine submits a workqueue request
2273*4882a593Smuzhiyun * to resume the root hub (that is, manage its downstream ports again).
2274*4882a593Smuzhiyun */
usb_hcd_resume_root_hub(struct usb_hcd * hcd)2275*4882a593Smuzhiyun void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
2276*4882a593Smuzhiyun {
2277*4882a593Smuzhiyun unsigned long flags;
2278*4882a593Smuzhiyun
2279*4882a593Smuzhiyun spin_lock_irqsave (&hcd_root_hub_lock, flags);
2280*4882a593Smuzhiyun if (hcd->rh_registered) {
2281*4882a593Smuzhiyun pm_wakeup_event(&hcd->self.root_hub->dev, 0);
2282*4882a593Smuzhiyun set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
2283*4882a593Smuzhiyun queue_work(pm_wq, &hcd->wakeup_work);
2284*4882a593Smuzhiyun }
2285*4882a593Smuzhiyun spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
2286*4882a593Smuzhiyun }
2287*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_hcd_resume_root_hub);
2288*4882a593Smuzhiyun
2289*4882a593Smuzhiyun #endif /* CONFIG_PM */
2290*4882a593Smuzhiyun
2291*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
2292*4882a593Smuzhiyun
2293*4882a593Smuzhiyun #ifdef CONFIG_USB_OTG
2294*4882a593Smuzhiyun
2295*4882a593Smuzhiyun /**
2296*4882a593Smuzhiyun * usb_bus_start_enum - start immediate enumeration (for OTG)
2297*4882a593Smuzhiyun * @bus: the bus (must use hcd framework)
2298*4882a593Smuzhiyun * @port_num: 1-based number of port; usually bus->otg_port
2299*4882a593Smuzhiyun * Context: in_interrupt()
2300*4882a593Smuzhiyun *
2301*4882a593Smuzhiyun * Starts enumeration, with an immediate reset followed later by
2302*4882a593Smuzhiyun * hub_wq identifying and possibly configuring the device.
2303*4882a593Smuzhiyun * This is needed by OTG controller drivers, where it helps meet
2304*4882a593Smuzhiyun * HNP protocol timing requirements for starting a port reset.
2305*4882a593Smuzhiyun *
2306*4882a593Smuzhiyun * Return: 0 if successful.
2307*4882a593Smuzhiyun */
usb_bus_start_enum(struct usb_bus * bus,unsigned port_num)2308*4882a593Smuzhiyun int usb_bus_start_enum(struct usb_bus *bus, unsigned port_num)
2309*4882a593Smuzhiyun {
2310*4882a593Smuzhiyun struct usb_hcd *hcd;
2311*4882a593Smuzhiyun int status = -EOPNOTSUPP;
2312*4882a593Smuzhiyun
2313*4882a593Smuzhiyun /* NOTE: since HNP can't start by grabbing the bus's address0_sem,
2314*4882a593Smuzhiyun * boards with root hubs hooked up to internal devices (instead of
2315*4882a593Smuzhiyun * just the OTG port) may need more attention to resetting...
2316*4882a593Smuzhiyun */
2317*4882a593Smuzhiyun hcd = bus_to_hcd(bus);
2318*4882a593Smuzhiyun if (port_num && hcd->driver->start_port_reset)
2319*4882a593Smuzhiyun status = hcd->driver->start_port_reset(hcd, port_num);
2320*4882a593Smuzhiyun
2321*4882a593Smuzhiyun /* allocate hub_wq shortly after (first) root port reset finishes;
2322*4882a593Smuzhiyun * it may issue others, until at least 50 msecs have passed.
2323*4882a593Smuzhiyun */
2324*4882a593Smuzhiyun if (status == 0)
2325*4882a593Smuzhiyun mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(10));
2326*4882a593Smuzhiyun return status;
2327*4882a593Smuzhiyun }
2328*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_bus_start_enum);
2329*4882a593Smuzhiyun
2330*4882a593Smuzhiyun #endif
2331*4882a593Smuzhiyun
2332*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
2333*4882a593Smuzhiyun
2334*4882a593Smuzhiyun /**
2335*4882a593Smuzhiyun * usb_hcd_irq - hook IRQs to HCD framework (bus glue)
2336*4882a593Smuzhiyun * @irq: the IRQ being raised
2337*4882a593Smuzhiyun * @__hcd: pointer to the HCD whose IRQ is being signaled
2338*4882a593Smuzhiyun *
2339*4882a593Smuzhiyun * If the controller isn't HALTed, calls the driver's irq handler.
2340*4882a593Smuzhiyun * Checks whether the controller is now dead.
2341*4882a593Smuzhiyun *
2342*4882a593Smuzhiyun * Return: %IRQ_HANDLED if the IRQ was handled. %IRQ_NONE otherwise.
2343*4882a593Smuzhiyun */
usb_hcd_irq(int irq,void * __hcd)2344*4882a593Smuzhiyun irqreturn_t usb_hcd_irq (int irq, void *__hcd)
2345*4882a593Smuzhiyun {
2346*4882a593Smuzhiyun struct usb_hcd *hcd = __hcd;
2347*4882a593Smuzhiyun irqreturn_t rc;
2348*4882a593Smuzhiyun
2349*4882a593Smuzhiyun if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd)))
2350*4882a593Smuzhiyun rc = IRQ_NONE;
2351*4882a593Smuzhiyun else if (hcd->driver->irq(hcd) == IRQ_NONE)
2352*4882a593Smuzhiyun rc = IRQ_NONE;
2353*4882a593Smuzhiyun else
2354*4882a593Smuzhiyun rc = IRQ_HANDLED;
2355*4882a593Smuzhiyun
2356*4882a593Smuzhiyun return rc;
2357*4882a593Smuzhiyun }
2358*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_hcd_irq);
2359*4882a593Smuzhiyun
2360*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
2361*4882a593Smuzhiyun
2362*4882a593Smuzhiyun /* Workqueue routine for when the root-hub has died. */
hcd_died_work(struct work_struct * work)2363*4882a593Smuzhiyun static void hcd_died_work(struct work_struct *work)
2364*4882a593Smuzhiyun {
2365*4882a593Smuzhiyun struct usb_hcd *hcd = container_of(work, struct usb_hcd, died_work);
2366*4882a593Smuzhiyun static char *env[] = {
2367*4882a593Smuzhiyun "ERROR=DEAD",
2368*4882a593Smuzhiyun NULL
2369*4882a593Smuzhiyun };
2370*4882a593Smuzhiyun
2371*4882a593Smuzhiyun /* Notify user space that the host controller has died */
2372*4882a593Smuzhiyun kobject_uevent_env(&hcd->self.root_hub->dev.kobj, KOBJ_OFFLINE, env);
2373*4882a593Smuzhiyun }
2374*4882a593Smuzhiyun
2375*4882a593Smuzhiyun /**
2376*4882a593Smuzhiyun * usb_hc_died - report abnormal shutdown of a host controller (bus glue)
2377*4882a593Smuzhiyun * @hcd: pointer to the HCD representing the controller
2378*4882a593Smuzhiyun *
2379*4882a593Smuzhiyun * This is called by bus glue to report a USB host controller that died
2380*4882a593Smuzhiyun * while operations may still have been pending. It's called automatically
2381*4882a593Smuzhiyun * by the PCI glue, so only glue for non-PCI busses should need to call it.
2382*4882a593Smuzhiyun *
2383*4882a593Smuzhiyun * Only call this function with the primary HCD.
2384*4882a593Smuzhiyun */
usb_hc_died(struct usb_hcd * hcd)2385*4882a593Smuzhiyun void usb_hc_died (struct usb_hcd *hcd)
2386*4882a593Smuzhiyun {
2387*4882a593Smuzhiyun unsigned long flags;
2388*4882a593Smuzhiyun
2389*4882a593Smuzhiyun dev_err (hcd->self.controller, "HC died; cleaning up\n");
2390*4882a593Smuzhiyun
2391*4882a593Smuzhiyun spin_lock_irqsave (&hcd_root_hub_lock, flags);
2392*4882a593Smuzhiyun clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
2393*4882a593Smuzhiyun set_bit(HCD_FLAG_DEAD, &hcd->flags);
2394*4882a593Smuzhiyun if (hcd->rh_registered) {
2395*4882a593Smuzhiyun clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
2396*4882a593Smuzhiyun
2397*4882a593Smuzhiyun /* make hub_wq clean up old urbs and devices */
2398*4882a593Smuzhiyun usb_set_device_state (hcd->self.root_hub,
2399*4882a593Smuzhiyun USB_STATE_NOTATTACHED);
2400*4882a593Smuzhiyun usb_kick_hub_wq(hcd->self.root_hub);
2401*4882a593Smuzhiyun }
2402*4882a593Smuzhiyun if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) {
2403*4882a593Smuzhiyun hcd = hcd->shared_hcd;
2404*4882a593Smuzhiyun clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
2405*4882a593Smuzhiyun set_bit(HCD_FLAG_DEAD, &hcd->flags);
2406*4882a593Smuzhiyun if (hcd->rh_registered) {
2407*4882a593Smuzhiyun clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
2408*4882a593Smuzhiyun
2409*4882a593Smuzhiyun /* make hub_wq clean up old urbs and devices */
2410*4882a593Smuzhiyun usb_set_device_state(hcd->self.root_hub,
2411*4882a593Smuzhiyun USB_STATE_NOTATTACHED);
2412*4882a593Smuzhiyun usb_kick_hub_wq(hcd->self.root_hub);
2413*4882a593Smuzhiyun }
2414*4882a593Smuzhiyun }
2415*4882a593Smuzhiyun
2416*4882a593Smuzhiyun /* Handle the case where this function gets called with a shared HCD */
2417*4882a593Smuzhiyun if (usb_hcd_is_primary_hcd(hcd))
2418*4882a593Smuzhiyun schedule_work(&hcd->died_work);
2419*4882a593Smuzhiyun else
2420*4882a593Smuzhiyun schedule_work(&hcd->primary_hcd->died_work);
2421*4882a593Smuzhiyun
2422*4882a593Smuzhiyun spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
2423*4882a593Smuzhiyun /* Make sure that the other roothub is also deallocated. */
2424*4882a593Smuzhiyun }
2425*4882a593Smuzhiyun EXPORT_SYMBOL_GPL (usb_hc_died);
2426*4882a593Smuzhiyun
2427*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
2428*4882a593Smuzhiyun
init_giveback_urb_bh(struct giveback_urb_bh * bh)2429*4882a593Smuzhiyun static void init_giveback_urb_bh(struct giveback_urb_bh *bh)
2430*4882a593Smuzhiyun {
2431*4882a593Smuzhiyun
2432*4882a593Smuzhiyun spin_lock_init(&bh->lock);
2433*4882a593Smuzhiyun INIT_LIST_HEAD(&bh->head);
2434*4882a593Smuzhiyun tasklet_setup(&bh->bh, usb_giveback_urb_bh);
2435*4882a593Smuzhiyun }
2436*4882a593Smuzhiyun
__usb_create_hcd(const struct hc_driver * driver,struct device * sysdev,struct device * dev,const char * bus_name,struct usb_hcd * primary_hcd)2437*4882a593Smuzhiyun struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
2438*4882a593Smuzhiyun struct device *sysdev, struct device *dev, const char *bus_name,
2439*4882a593Smuzhiyun struct usb_hcd *primary_hcd)
2440*4882a593Smuzhiyun {
2441*4882a593Smuzhiyun struct usb_hcd *hcd;
2442*4882a593Smuzhiyun
2443*4882a593Smuzhiyun hcd = kzalloc(sizeof(*hcd) + driver->hcd_priv_size, GFP_KERNEL);
2444*4882a593Smuzhiyun if (!hcd)
2445*4882a593Smuzhiyun return NULL;
2446*4882a593Smuzhiyun if (primary_hcd == NULL) {
2447*4882a593Smuzhiyun hcd->address0_mutex = kmalloc(sizeof(*hcd->address0_mutex),
2448*4882a593Smuzhiyun GFP_KERNEL);
2449*4882a593Smuzhiyun if (!hcd->address0_mutex) {
2450*4882a593Smuzhiyun kfree(hcd);
2451*4882a593Smuzhiyun dev_dbg(dev, "hcd address0 mutex alloc failed\n");
2452*4882a593Smuzhiyun return NULL;
2453*4882a593Smuzhiyun }
2454*4882a593Smuzhiyun mutex_init(hcd->address0_mutex);
2455*4882a593Smuzhiyun hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex),
2456*4882a593Smuzhiyun GFP_KERNEL);
2457*4882a593Smuzhiyun if (!hcd->bandwidth_mutex) {
2458*4882a593Smuzhiyun kfree(hcd->address0_mutex);
2459*4882a593Smuzhiyun kfree(hcd);
2460*4882a593Smuzhiyun dev_dbg(dev, "hcd bandwidth mutex alloc failed\n");
2461*4882a593Smuzhiyun return NULL;
2462*4882a593Smuzhiyun }
2463*4882a593Smuzhiyun mutex_init(hcd->bandwidth_mutex);
2464*4882a593Smuzhiyun dev_set_drvdata(dev, hcd);
2465*4882a593Smuzhiyun } else {
2466*4882a593Smuzhiyun mutex_lock(&usb_port_peer_mutex);
2467*4882a593Smuzhiyun hcd->address0_mutex = primary_hcd->address0_mutex;
2468*4882a593Smuzhiyun hcd->bandwidth_mutex = primary_hcd->bandwidth_mutex;
2469*4882a593Smuzhiyun hcd->primary_hcd = primary_hcd;
2470*4882a593Smuzhiyun primary_hcd->primary_hcd = primary_hcd;
2471*4882a593Smuzhiyun hcd->shared_hcd = primary_hcd;
2472*4882a593Smuzhiyun primary_hcd->shared_hcd = hcd;
2473*4882a593Smuzhiyun mutex_unlock(&usb_port_peer_mutex);
2474*4882a593Smuzhiyun }
2475*4882a593Smuzhiyun
2476*4882a593Smuzhiyun kref_init(&hcd->kref);
2477*4882a593Smuzhiyun
2478*4882a593Smuzhiyun usb_bus_init(&hcd->self);
2479*4882a593Smuzhiyun hcd->self.controller = dev;
2480*4882a593Smuzhiyun hcd->self.sysdev = sysdev;
2481*4882a593Smuzhiyun hcd->self.bus_name = bus_name;
2482*4882a593Smuzhiyun
2483*4882a593Smuzhiyun timer_setup(&hcd->rh_timer, rh_timer_func, 0);
2484*4882a593Smuzhiyun #ifdef CONFIG_PM
2485*4882a593Smuzhiyun INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
2486*4882a593Smuzhiyun #endif
2487*4882a593Smuzhiyun
2488*4882a593Smuzhiyun INIT_WORK(&hcd->died_work, hcd_died_work);
2489*4882a593Smuzhiyun
2490*4882a593Smuzhiyun hcd->driver = driver;
2491*4882a593Smuzhiyun hcd->speed = driver->flags & HCD_MASK;
2492*4882a593Smuzhiyun hcd->product_desc = (driver->product_desc) ? driver->product_desc :
2493*4882a593Smuzhiyun "USB Host Controller";
2494*4882a593Smuzhiyun return hcd;
2495*4882a593Smuzhiyun }
2496*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__usb_create_hcd);
2497*4882a593Smuzhiyun
2498*4882a593Smuzhiyun /**
2499*4882a593Smuzhiyun * usb_create_shared_hcd - create and initialize an HCD structure
2500*4882a593Smuzhiyun * @driver: HC driver that will use this hcd
2501*4882a593Smuzhiyun * @dev: device for this HC, stored in hcd->self.controller
2502*4882a593Smuzhiyun * @bus_name: value to store in hcd->self.bus_name
2503*4882a593Smuzhiyun * @primary_hcd: a pointer to the usb_hcd structure that is sharing the
2504*4882a593Smuzhiyun * PCI device. Only allocate certain resources for the primary HCD
2505*4882a593Smuzhiyun * Context: !in_interrupt()
2506*4882a593Smuzhiyun *
2507*4882a593Smuzhiyun * Allocate a struct usb_hcd, with extra space at the end for the
2508*4882a593Smuzhiyun * HC driver's private data. Initialize the generic members of the
2509*4882a593Smuzhiyun * hcd structure.
2510*4882a593Smuzhiyun *
2511*4882a593Smuzhiyun * Return: On success, a pointer to the created and initialized HCD structure.
2512*4882a593Smuzhiyun * On failure (e.g. if memory is unavailable), %NULL.
2513*4882a593Smuzhiyun */
usb_create_shared_hcd(const struct hc_driver * driver,struct device * dev,const char * bus_name,struct usb_hcd * primary_hcd)2514*4882a593Smuzhiyun struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
2515*4882a593Smuzhiyun struct device *dev, const char *bus_name,
2516*4882a593Smuzhiyun struct usb_hcd *primary_hcd)
2517*4882a593Smuzhiyun {
2518*4882a593Smuzhiyun return __usb_create_hcd(driver, dev, dev, bus_name, primary_hcd);
2519*4882a593Smuzhiyun }
2520*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_create_shared_hcd);
2521*4882a593Smuzhiyun
2522*4882a593Smuzhiyun /**
2523*4882a593Smuzhiyun * usb_create_hcd - create and initialize an HCD structure
2524*4882a593Smuzhiyun * @driver: HC driver that will use this hcd
2525*4882a593Smuzhiyun * @dev: device for this HC, stored in hcd->self.controller
2526*4882a593Smuzhiyun * @bus_name: value to store in hcd->self.bus_name
2527*4882a593Smuzhiyun * Context: !in_interrupt()
2528*4882a593Smuzhiyun *
2529*4882a593Smuzhiyun * Allocate a struct usb_hcd, with extra space at the end for the
2530*4882a593Smuzhiyun * HC driver's private data. Initialize the generic members of the
2531*4882a593Smuzhiyun * hcd structure.
2532*4882a593Smuzhiyun *
2533*4882a593Smuzhiyun * Return: On success, a pointer to the created and initialized HCD
2534*4882a593Smuzhiyun * structure. On failure (e.g. if memory is unavailable), %NULL.
2535*4882a593Smuzhiyun */
usb_create_hcd(const struct hc_driver * driver,struct device * dev,const char * bus_name)2536*4882a593Smuzhiyun struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
2537*4882a593Smuzhiyun struct device *dev, const char *bus_name)
2538*4882a593Smuzhiyun {
2539*4882a593Smuzhiyun return __usb_create_hcd(driver, dev, dev, bus_name, NULL);
2540*4882a593Smuzhiyun }
2541*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_create_hcd);
2542*4882a593Smuzhiyun
2543*4882a593Smuzhiyun /*
2544*4882a593Smuzhiyun * Roothubs that share one PCI device must also share the bandwidth mutex.
2545*4882a593Smuzhiyun * Don't deallocate the bandwidth_mutex until the last shared usb_hcd is
2546*4882a593Smuzhiyun * deallocated.
2547*4882a593Smuzhiyun *
2548*4882a593Smuzhiyun * Make sure to deallocate the bandwidth_mutex only when the last HCD is
2549*4882a593Smuzhiyun * freed. When hcd_release() is called for either hcd in a peer set,
2550*4882a593Smuzhiyun * invalidate the peer's ->shared_hcd and ->primary_hcd pointers.
2551*4882a593Smuzhiyun */
hcd_release(struct kref * kref)2552*4882a593Smuzhiyun static void hcd_release(struct kref *kref)
2553*4882a593Smuzhiyun {
2554*4882a593Smuzhiyun struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref);
2555*4882a593Smuzhiyun
2556*4882a593Smuzhiyun mutex_lock(&usb_port_peer_mutex);
2557*4882a593Smuzhiyun if (hcd->shared_hcd) {
2558*4882a593Smuzhiyun struct usb_hcd *peer = hcd->shared_hcd;
2559*4882a593Smuzhiyun
2560*4882a593Smuzhiyun peer->shared_hcd = NULL;
2561*4882a593Smuzhiyun peer->primary_hcd = NULL;
2562*4882a593Smuzhiyun } else {
2563*4882a593Smuzhiyun kfree(hcd->address0_mutex);
2564*4882a593Smuzhiyun kfree(hcd->bandwidth_mutex);
2565*4882a593Smuzhiyun }
2566*4882a593Smuzhiyun mutex_unlock(&usb_port_peer_mutex);
2567*4882a593Smuzhiyun kfree(hcd);
2568*4882a593Smuzhiyun }
2569*4882a593Smuzhiyun
usb_get_hcd(struct usb_hcd * hcd)2570*4882a593Smuzhiyun struct usb_hcd *usb_get_hcd (struct usb_hcd *hcd)
2571*4882a593Smuzhiyun {
2572*4882a593Smuzhiyun if (hcd)
2573*4882a593Smuzhiyun kref_get (&hcd->kref);
2574*4882a593Smuzhiyun return hcd;
2575*4882a593Smuzhiyun }
2576*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_get_hcd);
2577*4882a593Smuzhiyun
usb_put_hcd(struct usb_hcd * hcd)2578*4882a593Smuzhiyun void usb_put_hcd (struct usb_hcd *hcd)
2579*4882a593Smuzhiyun {
2580*4882a593Smuzhiyun if (hcd)
2581*4882a593Smuzhiyun kref_put (&hcd->kref, hcd_release);
2582*4882a593Smuzhiyun }
2583*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_put_hcd);
2584*4882a593Smuzhiyun
usb_hcd_is_primary_hcd(struct usb_hcd * hcd)2585*4882a593Smuzhiyun int usb_hcd_is_primary_hcd(struct usb_hcd *hcd)
2586*4882a593Smuzhiyun {
2587*4882a593Smuzhiyun if (!hcd->primary_hcd)
2588*4882a593Smuzhiyun return 1;
2589*4882a593Smuzhiyun return hcd == hcd->primary_hcd;
2590*4882a593Smuzhiyun }
2591*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_hcd_is_primary_hcd);
2592*4882a593Smuzhiyun
usb_hcd_find_raw_port_number(struct usb_hcd * hcd,int port1)2593*4882a593Smuzhiyun int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1)
2594*4882a593Smuzhiyun {
2595*4882a593Smuzhiyun if (!hcd->driver->find_raw_port_number)
2596*4882a593Smuzhiyun return port1;
2597*4882a593Smuzhiyun
2598*4882a593Smuzhiyun return hcd->driver->find_raw_port_number(hcd, port1);
2599*4882a593Smuzhiyun }
2600*4882a593Smuzhiyun
usb_hcd_request_irqs(struct usb_hcd * hcd,unsigned int irqnum,unsigned long irqflags)2601*4882a593Smuzhiyun static int usb_hcd_request_irqs(struct usb_hcd *hcd,
2602*4882a593Smuzhiyun unsigned int irqnum, unsigned long irqflags)
2603*4882a593Smuzhiyun {
2604*4882a593Smuzhiyun int retval;
2605*4882a593Smuzhiyun
2606*4882a593Smuzhiyun if (hcd->driver->irq) {
2607*4882a593Smuzhiyun
2608*4882a593Smuzhiyun snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
2609*4882a593Smuzhiyun hcd->driver->description, hcd->self.busnum);
2610*4882a593Smuzhiyun retval = request_irq(irqnum, &usb_hcd_irq, irqflags,
2611*4882a593Smuzhiyun hcd->irq_descr, hcd);
2612*4882a593Smuzhiyun if (retval != 0) {
2613*4882a593Smuzhiyun dev_err(hcd->self.controller,
2614*4882a593Smuzhiyun "request interrupt %d failed\n",
2615*4882a593Smuzhiyun irqnum);
2616*4882a593Smuzhiyun return retval;
2617*4882a593Smuzhiyun }
2618*4882a593Smuzhiyun hcd->irq = irqnum;
2619*4882a593Smuzhiyun dev_info(hcd->self.controller, "irq %d, %s 0x%08llx\n", irqnum,
2620*4882a593Smuzhiyun (hcd->driver->flags & HCD_MEMORY) ?
2621*4882a593Smuzhiyun "io mem" : "io base",
2622*4882a593Smuzhiyun (unsigned long long)hcd->rsrc_start);
2623*4882a593Smuzhiyun } else {
2624*4882a593Smuzhiyun hcd->irq = 0;
2625*4882a593Smuzhiyun if (hcd->rsrc_start)
2626*4882a593Smuzhiyun dev_info(hcd->self.controller, "%s 0x%08llx\n",
2627*4882a593Smuzhiyun (hcd->driver->flags & HCD_MEMORY) ?
2628*4882a593Smuzhiyun "io mem" : "io base",
2629*4882a593Smuzhiyun (unsigned long long)hcd->rsrc_start);
2630*4882a593Smuzhiyun }
2631*4882a593Smuzhiyun return 0;
2632*4882a593Smuzhiyun }
2633*4882a593Smuzhiyun
2634*4882a593Smuzhiyun /*
2635*4882a593Smuzhiyun * Before we free this root hub, flush in-flight peering attempts
2636*4882a593Smuzhiyun * and disable peer lookups
2637*4882a593Smuzhiyun */
usb_put_invalidate_rhdev(struct usb_hcd * hcd)2638*4882a593Smuzhiyun static void usb_put_invalidate_rhdev(struct usb_hcd *hcd)
2639*4882a593Smuzhiyun {
2640*4882a593Smuzhiyun struct usb_device *rhdev;
2641*4882a593Smuzhiyun
2642*4882a593Smuzhiyun mutex_lock(&usb_port_peer_mutex);
2643*4882a593Smuzhiyun rhdev = hcd->self.root_hub;
2644*4882a593Smuzhiyun hcd->self.root_hub = NULL;
2645*4882a593Smuzhiyun mutex_unlock(&usb_port_peer_mutex);
2646*4882a593Smuzhiyun usb_put_dev(rhdev);
2647*4882a593Smuzhiyun }
2648*4882a593Smuzhiyun
2649*4882a593Smuzhiyun /**
2650*4882a593Smuzhiyun * usb_add_hcd - finish generic HCD structure initialization and register
2651*4882a593Smuzhiyun * @hcd: the usb_hcd structure to initialize
2652*4882a593Smuzhiyun * @irqnum: Interrupt line to allocate
2653*4882a593Smuzhiyun * @irqflags: Interrupt type flags
2654*4882a593Smuzhiyun *
2655*4882a593Smuzhiyun * Finish the remaining parts of generic HCD initialization: allocate the
2656*4882a593Smuzhiyun * buffers of consistent memory, register the bus, request the IRQ line,
2657*4882a593Smuzhiyun * and call the driver's reset() and start() routines.
2658*4882a593Smuzhiyun */
usb_add_hcd(struct usb_hcd * hcd,unsigned int irqnum,unsigned long irqflags)2659*4882a593Smuzhiyun int usb_add_hcd(struct usb_hcd *hcd,
2660*4882a593Smuzhiyun unsigned int irqnum, unsigned long irqflags)
2661*4882a593Smuzhiyun {
2662*4882a593Smuzhiyun int retval;
2663*4882a593Smuzhiyun struct usb_device *rhdev;
2664*4882a593Smuzhiyun struct usb_hcd *shared_hcd;
2665*4882a593Smuzhiyun
2666*4882a593Smuzhiyun if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
2667*4882a593Smuzhiyun hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
2668*4882a593Smuzhiyun if (IS_ERR(hcd->phy_roothub))
2669*4882a593Smuzhiyun return PTR_ERR(hcd->phy_roothub);
2670*4882a593Smuzhiyun
2671*4882a593Smuzhiyun retval = usb_phy_roothub_init(hcd->phy_roothub);
2672*4882a593Smuzhiyun if (retval)
2673*4882a593Smuzhiyun return retval;
2674*4882a593Smuzhiyun
2675*4882a593Smuzhiyun retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
2676*4882a593Smuzhiyun PHY_MODE_USB_HOST_SS);
2677*4882a593Smuzhiyun if (retval)
2678*4882a593Smuzhiyun retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
2679*4882a593Smuzhiyun PHY_MODE_USB_HOST);
2680*4882a593Smuzhiyun if (retval)
2681*4882a593Smuzhiyun goto err_usb_phy_roothub_power_on;
2682*4882a593Smuzhiyun
2683*4882a593Smuzhiyun retval = usb_phy_roothub_power_on(hcd->phy_roothub);
2684*4882a593Smuzhiyun if (retval)
2685*4882a593Smuzhiyun goto err_usb_phy_roothub_power_on;
2686*4882a593Smuzhiyun }
2687*4882a593Smuzhiyun
2688*4882a593Smuzhiyun dev_info(hcd->self.controller, "%s\n", hcd->product_desc);
2689*4882a593Smuzhiyun
2690*4882a593Smuzhiyun switch (authorized_default) {
2691*4882a593Smuzhiyun case USB_AUTHORIZE_NONE:
2692*4882a593Smuzhiyun hcd->dev_policy = USB_DEVICE_AUTHORIZE_NONE;
2693*4882a593Smuzhiyun break;
2694*4882a593Smuzhiyun
2695*4882a593Smuzhiyun case USB_AUTHORIZE_ALL:
2696*4882a593Smuzhiyun hcd->dev_policy = USB_DEVICE_AUTHORIZE_ALL;
2697*4882a593Smuzhiyun break;
2698*4882a593Smuzhiyun
2699*4882a593Smuzhiyun case USB_AUTHORIZE_INTERNAL:
2700*4882a593Smuzhiyun hcd->dev_policy = USB_DEVICE_AUTHORIZE_INTERNAL;
2701*4882a593Smuzhiyun break;
2702*4882a593Smuzhiyun
2703*4882a593Smuzhiyun case USB_AUTHORIZE_WIRED:
2704*4882a593Smuzhiyun default:
2705*4882a593Smuzhiyun hcd->dev_policy = hcd->wireless ?
2706*4882a593Smuzhiyun USB_DEVICE_AUTHORIZE_NONE : USB_DEVICE_AUTHORIZE_ALL;
2707*4882a593Smuzhiyun break;
2708*4882a593Smuzhiyun }
2709*4882a593Smuzhiyun
2710*4882a593Smuzhiyun set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
2711*4882a593Smuzhiyun
2712*4882a593Smuzhiyun /* per default all interfaces are authorized */
2713*4882a593Smuzhiyun set_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
2714*4882a593Smuzhiyun
2715*4882a593Smuzhiyun /* HC is in reset state, but accessible. Now do the one-time init,
2716*4882a593Smuzhiyun * bottom up so that hcds can customize the root hubs before hub_wq
2717*4882a593Smuzhiyun * starts talking to them. (Note, bus id is assigned early too.)
2718*4882a593Smuzhiyun */
2719*4882a593Smuzhiyun retval = hcd_buffer_create(hcd);
2720*4882a593Smuzhiyun if (retval != 0) {
2721*4882a593Smuzhiyun dev_dbg(hcd->self.sysdev, "pool alloc failed\n");
2722*4882a593Smuzhiyun goto err_create_buf;
2723*4882a593Smuzhiyun }
2724*4882a593Smuzhiyun
2725*4882a593Smuzhiyun retval = usb_register_bus(&hcd->self);
2726*4882a593Smuzhiyun if (retval < 0)
2727*4882a593Smuzhiyun goto err_register_bus;
2728*4882a593Smuzhiyun
2729*4882a593Smuzhiyun rhdev = usb_alloc_dev(NULL, &hcd->self, 0);
2730*4882a593Smuzhiyun if (rhdev == NULL) {
2731*4882a593Smuzhiyun dev_err(hcd->self.sysdev, "unable to allocate root hub\n");
2732*4882a593Smuzhiyun retval = -ENOMEM;
2733*4882a593Smuzhiyun goto err_allocate_root_hub;
2734*4882a593Smuzhiyun }
2735*4882a593Smuzhiyun mutex_lock(&usb_port_peer_mutex);
2736*4882a593Smuzhiyun hcd->self.root_hub = rhdev;
2737*4882a593Smuzhiyun mutex_unlock(&usb_port_peer_mutex);
2738*4882a593Smuzhiyun
2739*4882a593Smuzhiyun rhdev->rx_lanes = 1;
2740*4882a593Smuzhiyun rhdev->tx_lanes = 1;
2741*4882a593Smuzhiyun
2742*4882a593Smuzhiyun switch (hcd->speed) {
2743*4882a593Smuzhiyun case HCD_USB11:
2744*4882a593Smuzhiyun rhdev->speed = USB_SPEED_FULL;
2745*4882a593Smuzhiyun break;
2746*4882a593Smuzhiyun case HCD_USB2:
2747*4882a593Smuzhiyun rhdev->speed = USB_SPEED_HIGH;
2748*4882a593Smuzhiyun break;
2749*4882a593Smuzhiyun case HCD_USB25:
2750*4882a593Smuzhiyun rhdev->speed = USB_SPEED_WIRELESS;
2751*4882a593Smuzhiyun break;
2752*4882a593Smuzhiyun case HCD_USB3:
2753*4882a593Smuzhiyun rhdev->speed = USB_SPEED_SUPER;
2754*4882a593Smuzhiyun break;
2755*4882a593Smuzhiyun case HCD_USB32:
2756*4882a593Smuzhiyun rhdev->rx_lanes = 2;
2757*4882a593Smuzhiyun rhdev->tx_lanes = 2;
2758*4882a593Smuzhiyun fallthrough;
2759*4882a593Smuzhiyun case HCD_USB31:
2760*4882a593Smuzhiyun rhdev->speed = USB_SPEED_SUPER_PLUS;
2761*4882a593Smuzhiyun break;
2762*4882a593Smuzhiyun default:
2763*4882a593Smuzhiyun retval = -EINVAL;
2764*4882a593Smuzhiyun goto err_set_rh_speed;
2765*4882a593Smuzhiyun }
2766*4882a593Smuzhiyun
2767*4882a593Smuzhiyun /* wakeup flag init defaults to "everything works" for root hubs,
2768*4882a593Smuzhiyun * but drivers can override it in reset() if needed, along with
2769*4882a593Smuzhiyun * recording the overall controller's system wakeup capability.
2770*4882a593Smuzhiyun */
2771*4882a593Smuzhiyun device_set_wakeup_capable(&rhdev->dev, 1);
2772*4882a593Smuzhiyun
2773*4882a593Smuzhiyun /* HCD_FLAG_RH_RUNNING doesn't matter until the root hub is
2774*4882a593Smuzhiyun * registered. But since the controller can die at any time,
2775*4882a593Smuzhiyun * let's initialize the flag before touching the hardware.
2776*4882a593Smuzhiyun */
2777*4882a593Smuzhiyun set_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
2778*4882a593Smuzhiyun
2779*4882a593Smuzhiyun /* "reset" is misnamed; its role is now one-time init. the controller
2780*4882a593Smuzhiyun * should already have been reset (and boot firmware kicked off etc).
2781*4882a593Smuzhiyun */
2782*4882a593Smuzhiyun if (hcd->driver->reset) {
2783*4882a593Smuzhiyun retval = hcd->driver->reset(hcd);
2784*4882a593Smuzhiyun if (retval < 0) {
2785*4882a593Smuzhiyun dev_err(hcd->self.controller, "can't setup: %d\n",
2786*4882a593Smuzhiyun retval);
2787*4882a593Smuzhiyun goto err_hcd_driver_setup;
2788*4882a593Smuzhiyun }
2789*4882a593Smuzhiyun }
2790*4882a593Smuzhiyun hcd->rh_pollable = 1;
2791*4882a593Smuzhiyun
2792*4882a593Smuzhiyun retval = usb_phy_roothub_calibrate(hcd->phy_roothub);
2793*4882a593Smuzhiyun if (retval)
2794*4882a593Smuzhiyun goto err_hcd_driver_setup;
2795*4882a593Smuzhiyun
2796*4882a593Smuzhiyun /* NOTE: root hub and controller capabilities may not be the same */
2797*4882a593Smuzhiyun if (device_can_wakeup(hcd->self.controller)
2798*4882a593Smuzhiyun && device_can_wakeup(&hcd->self.root_hub->dev))
2799*4882a593Smuzhiyun dev_dbg(hcd->self.controller, "supports USB remote wakeup\n");
2800*4882a593Smuzhiyun
2801*4882a593Smuzhiyun /* initialize tasklets */
2802*4882a593Smuzhiyun init_giveback_urb_bh(&hcd->high_prio_bh);
2803*4882a593Smuzhiyun init_giveback_urb_bh(&hcd->low_prio_bh);
2804*4882a593Smuzhiyun
2805*4882a593Smuzhiyun /* enable irqs just before we start the controller,
2806*4882a593Smuzhiyun * if the BIOS provides legacy PCI irqs.
2807*4882a593Smuzhiyun */
2808*4882a593Smuzhiyun if (usb_hcd_is_primary_hcd(hcd) && irqnum) {
2809*4882a593Smuzhiyun retval = usb_hcd_request_irqs(hcd, irqnum, irqflags);
2810*4882a593Smuzhiyun if (retval)
2811*4882a593Smuzhiyun goto err_request_irq;
2812*4882a593Smuzhiyun }
2813*4882a593Smuzhiyun
2814*4882a593Smuzhiyun hcd->state = HC_STATE_RUNNING;
2815*4882a593Smuzhiyun retval = hcd->driver->start(hcd);
2816*4882a593Smuzhiyun if (retval < 0) {
2817*4882a593Smuzhiyun dev_err(hcd->self.controller, "startup error %d\n", retval);
2818*4882a593Smuzhiyun goto err_hcd_driver_start;
2819*4882a593Smuzhiyun }
2820*4882a593Smuzhiyun
2821*4882a593Smuzhiyun /* starting here, usbcore will pay attention to the shared HCD roothub */
2822*4882a593Smuzhiyun shared_hcd = hcd->shared_hcd;
2823*4882a593Smuzhiyun if (!usb_hcd_is_primary_hcd(hcd) && shared_hcd && HCD_DEFER_RH_REGISTER(shared_hcd)) {
2824*4882a593Smuzhiyun retval = register_root_hub(shared_hcd);
2825*4882a593Smuzhiyun if (retval != 0)
2826*4882a593Smuzhiyun goto err_register_root_hub;
2827*4882a593Smuzhiyun
2828*4882a593Smuzhiyun if (shared_hcd->uses_new_polling && HCD_POLL_RH(shared_hcd))
2829*4882a593Smuzhiyun usb_hcd_poll_rh_status(shared_hcd);
2830*4882a593Smuzhiyun }
2831*4882a593Smuzhiyun
2832*4882a593Smuzhiyun /* starting here, usbcore will pay attention to this root hub */
2833*4882a593Smuzhiyun if (!HCD_DEFER_RH_REGISTER(hcd)) {
2834*4882a593Smuzhiyun retval = register_root_hub(hcd);
2835*4882a593Smuzhiyun if (retval != 0)
2836*4882a593Smuzhiyun goto err_register_root_hub;
2837*4882a593Smuzhiyun
2838*4882a593Smuzhiyun if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
2839*4882a593Smuzhiyun usb_hcd_poll_rh_status(hcd);
2840*4882a593Smuzhiyun }
2841*4882a593Smuzhiyun
2842*4882a593Smuzhiyun return retval;
2843*4882a593Smuzhiyun
2844*4882a593Smuzhiyun err_register_root_hub:
2845*4882a593Smuzhiyun hcd->rh_pollable = 0;
2846*4882a593Smuzhiyun clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
2847*4882a593Smuzhiyun del_timer_sync(&hcd->rh_timer);
2848*4882a593Smuzhiyun hcd->driver->stop(hcd);
2849*4882a593Smuzhiyun hcd->state = HC_STATE_HALT;
2850*4882a593Smuzhiyun clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
2851*4882a593Smuzhiyun del_timer_sync(&hcd->rh_timer);
2852*4882a593Smuzhiyun err_hcd_driver_start:
2853*4882a593Smuzhiyun if (usb_hcd_is_primary_hcd(hcd) && hcd->irq > 0)
2854*4882a593Smuzhiyun free_irq(irqnum, hcd);
2855*4882a593Smuzhiyun err_request_irq:
2856*4882a593Smuzhiyun err_hcd_driver_setup:
2857*4882a593Smuzhiyun err_set_rh_speed:
2858*4882a593Smuzhiyun usb_put_invalidate_rhdev(hcd);
2859*4882a593Smuzhiyun err_allocate_root_hub:
2860*4882a593Smuzhiyun usb_deregister_bus(&hcd->self);
2861*4882a593Smuzhiyun err_register_bus:
2862*4882a593Smuzhiyun hcd_buffer_destroy(hcd);
2863*4882a593Smuzhiyun err_create_buf:
2864*4882a593Smuzhiyun usb_phy_roothub_power_off(hcd->phy_roothub);
2865*4882a593Smuzhiyun err_usb_phy_roothub_power_on:
2866*4882a593Smuzhiyun usb_phy_roothub_exit(hcd->phy_roothub);
2867*4882a593Smuzhiyun
2868*4882a593Smuzhiyun return retval;
2869*4882a593Smuzhiyun }
2870*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_add_hcd);
2871*4882a593Smuzhiyun
2872*4882a593Smuzhiyun /**
2873*4882a593Smuzhiyun * usb_remove_hcd - shutdown processing for generic HCDs
2874*4882a593Smuzhiyun * @hcd: the usb_hcd structure to remove
2875*4882a593Smuzhiyun * Context: !in_interrupt()
2876*4882a593Smuzhiyun *
2877*4882a593Smuzhiyun * Disconnects the root hub, then reverses the effects of usb_add_hcd(),
2878*4882a593Smuzhiyun * invoking the HCD's stop() method.
2879*4882a593Smuzhiyun */
usb_remove_hcd(struct usb_hcd * hcd)2880*4882a593Smuzhiyun void usb_remove_hcd(struct usb_hcd *hcd)
2881*4882a593Smuzhiyun {
2882*4882a593Smuzhiyun struct usb_device *rhdev = hcd->self.root_hub;
2883*4882a593Smuzhiyun bool rh_registered;
2884*4882a593Smuzhiyun
2885*4882a593Smuzhiyun dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
2886*4882a593Smuzhiyun
2887*4882a593Smuzhiyun usb_get_dev(rhdev);
2888*4882a593Smuzhiyun clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
2889*4882a593Smuzhiyun if (HC_IS_RUNNING (hcd->state))
2890*4882a593Smuzhiyun hcd->state = HC_STATE_QUIESCING;
2891*4882a593Smuzhiyun
2892*4882a593Smuzhiyun dev_dbg(hcd->self.controller, "roothub graceful disconnect\n");
2893*4882a593Smuzhiyun spin_lock_irq (&hcd_root_hub_lock);
2894*4882a593Smuzhiyun rh_registered = hcd->rh_registered;
2895*4882a593Smuzhiyun hcd->rh_registered = 0;
2896*4882a593Smuzhiyun spin_unlock_irq (&hcd_root_hub_lock);
2897*4882a593Smuzhiyun
2898*4882a593Smuzhiyun #ifdef CONFIG_PM
2899*4882a593Smuzhiyun cancel_work_sync(&hcd->wakeup_work);
2900*4882a593Smuzhiyun #endif
2901*4882a593Smuzhiyun cancel_work_sync(&hcd->died_work);
2902*4882a593Smuzhiyun
2903*4882a593Smuzhiyun mutex_lock(&usb_bus_idr_lock);
2904*4882a593Smuzhiyun if (rh_registered)
2905*4882a593Smuzhiyun usb_disconnect(&rhdev); /* Sets rhdev to NULL */
2906*4882a593Smuzhiyun mutex_unlock(&usb_bus_idr_lock);
2907*4882a593Smuzhiyun
2908*4882a593Smuzhiyun /*
2909*4882a593Smuzhiyun * tasklet_kill() isn't needed here because:
2910*4882a593Smuzhiyun * - driver's disconnect() called from usb_disconnect() should
2911*4882a593Smuzhiyun * make sure its URBs are completed during the disconnect()
2912*4882a593Smuzhiyun * callback
2913*4882a593Smuzhiyun *
2914*4882a593Smuzhiyun * - it is too late to run complete() here since driver may have
2915*4882a593Smuzhiyun * been removed already now
2916*4882a593Smuzhiyun */
2917*4882a593Smuzhiyun
2918*4882a593Smuzhiyun /* Prevent any more root-hub status calls from the timer.
2919*4882a593Smuzhiyun * The HCD might still restart the timer (if a port status change
2920*4882a593Smuzhiyun * interrupt occurs), but usb_hcd_poll_rh_status() won't invoke
2921*4882a593Smuzhiyun * the hub_status_data() callback.
2922*4882a593Smuzhiyun */
2923*4882a593Smuzhiyun hcd->rh_pollable = 0;
2924*4882a593Smuzhiyun clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
2925*4882a593Smuzhiyun del_timer_sync(&hcd->rh_timer);
2926*4882a593Smuzhiyun
2927*4882a593Smuzhiyun hcd->driver->stop(hcd);
2928*4882a593Smuzhiyun hcd->state = HC_STATE_HALT;
2929*4882a593Smuzhiyun
2930*4882a593Smuzhiyun /* In case the HCD restarted the timer, stop it again. */
2931*4882a593Smuzhiyun clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
2932*4882a593Smuzhiyun del_timer_sync(&hcd->rh_timer);
2933*4882a593Smuzhiyun
2934*4882a593Smuzhiyun if (usb_hcd_is_primary_hcd(hcd)) {
2935*4882a593Smuzhiyun if (hcd->irq > 0)
2936*4882a593Smuzhiyun free_irq(hcd->irq, hcd);
2937*4882a593Smuzhiyun }
2938*4882a593Smuzhiyun
2939*4882a593Smuzhiyun usb_deregister_bus(&hcd->self);
2940*4882a593Smuzhiyun hcd_buffer_destroy(hcd);
2941*4882a593Smuzhiyun
2942*4882a593Smuzhiyun usb_phy_roothub_power_off(hcd->phy_roothub);
2943*4882a593Smuzhiyun usb_phy_roothub_exit(hcd->phy_roothub);
2944*4882a593Smuzhiyun
2945*4882a593Smuzhiyun usb_put_invalidate_rhdev(hcd);
2946*4882a593Smuzhiyun hcd->flags = 0;
2947*4882a593Smuzhiyun }
2948*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_remove_hcd);
2949*4882a593Smuzhiyun
2950*4882a593Smuzhiyun void
usb_hcd_platform_shutdown(struct platform_device * dev)2951*4882a593Smuzhiyun usb_hcd_platform_shutdown(struct platform_device *dev)
2952*4882a593Smuzhiyun {
2953*4882a593Smuzhiyun struct usb_hcd *hcd = platform_get_drvdata(dev);
2954*4882a593Smuzhiyun
2955*4882a593Smuzhiyun /* No need for pm_runtime_put(), we're shutting down */
2956*4882a593Smuzhiyun pm_runtime_get_sync(&dev->dev);
2957*4882a593Smuzhiyun
2958*4882a593Smuzhiyun if (hcd->driver->shutdown)
2959*4882a593Smuzhiyun hcd->driver->shutdown(hcd);
2960*4882a593Smuzhiyun }
2961*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
2962*4882a593Smuzhiyun
usb_hcd_setup_local_mem(struct usb_hcd * hcd,phys_addr_t phys_addr,dma_addr_t dma,size_t size)2963*4882a593Smuzhiyun int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr,
2964*4882a593Smuzhiyun dma_addr_t dma, size_t size)
2965*4882a593Smuzhiyun {
2966*4882a593Smuzhiyun int err;
2967*4882a593Smuzhiyun void *local_mem;
2968*4882a593Smuzhiyun
2969*4882a593Smuzhiyun hcd->localmem_pool = devm_gen_pool_create(hcd->self.sysdev, 4,
2970*4882a593Smuzhiyun dev_to_node(hcd->self.sysdev),
2971*4882a593Smuzhiyun dev_name(hcd->self.sysdev));
2972*4882a593Smuzhiyun if (IS_ERR(hcd->localmem_pool))
2973*4882a593Smuzhiyun return PTR_ERR(hcd->localmem_pool);
2974*4882a593Smuzhiyun
2975*4882a593Smuzhiyun local_mem = devm_memremap(hcd->self.sysdev, phys_addr,
2976*4882a593Smuzhiyun size, MEMREMAP_WC);
2977*4882a593Smuzhiyun if (IS_ERR(local_mem))
2978*4882a593Smuzhiyun return PTR_ERR(local_mem);
2979*4882a593Smuzhiyun
2980*4882a593Smuzhiyun /*
2981*4882a593Smuzhiyun * Here we pass a dma_addr_t but the arg type is a phys_addr_t.
2982*4882a593Smuzhiyun * It's not backed by system memory and thus there's no kernel mapping
2983*4882a593Smuzhiyun * for it.
2984*4882a593Smuzhiyun */
2985*4882a593Smuzhiyun err = gen_pool_add_virt(hcd->localmem_pool, (unsigned long)local_mem,
2986*4882a593Smuzhiyun dma, size, dev_to_node(hcd->self.sysdev));
2987*4882a593Smuzhiyun if (err < 0) {
2988*4882a593Smuzhiyun dev_err(hcd->self.sysdev, "gen_pool_add_virt failed with %d\n",
2989*4882a593Smuzhiyun err);
2990*4882a593Smuzhiyun return err;
2991*4882a593Smuzhiyun }
2992*4882a593Smuzhiyun
2993*4882a593Smuzhiyun return 0;
2994*4882a593Smuzhiyun }
2995*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_hcd_setup_local_mem);
2996*4882a593Smuzhiyun
2997*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
2998*4882a593Smuzhiyun
2999*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_USB_MON)
3000*4882a593Smuzhiyun
3001*4882a593Smuzhiyun const struct usb_mon_operations *mon_ops;
3002*4882a593Smuzhiyun
3003*4882a593Smuzhiyun /*
3004*4882a593Smuzhiyun * The registration is unlocked.
3005*4882a593Smuzhiyun * We do it this way because we do not want to lock in hot paths.
3006*4882a593Smuzhiyun *
3007*4882a593Smuzhiyun * Notice that the code is minimally error-proof. Because usbmon needs
3008*4882a593Smuzhiyun * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
3009*4882a593Smuzhiyun */
3010*4882a593Smuzhiyun
usb_mon_register(const struct usb_mon_operations * ops)3011*4882a593Smuzhiyun int usb_mon_register(const struct usb_mon_operations *ops)
3012*4882a593Smuzhiyun {
3013*4882a593Smuzhiyun
3014*4882a593Smuzhiyun if (mon_ops)
3015*4882a593Smuzhiyun return -EBUSY;
3016*4882a593Smuzhiyun
3017*4882a593Smuzhiyun mon_ops = ops;
3018*4882a593Smuzhiyun mb();
3019*4882a593Smuzhiyun return 0;
3020*4882a593Smuzhiyun }
3021*4882a593Smuzhiyun EXPORT_SYMBOL_GPL (usb_mon_register);
3022*4882a593Smuzhiyun
usb_mon_deregister(void)3023*4882a593Smuzhiyun void usb_mon_deregister (void)
3024*4882a593Smuzhiyun {
3025*4882a593Smuzhiyun
3026*4882a593Smuzhiyun if (mon_ops == NULL) {
3027*4882a593Smuzhiyun printk(KERN_ERR "USB: monitor was not registered\n");
3028*4882a593Smuzhiyun return;
3029*4882a593Smuzhiyun }
3030*4882a593Smuzhiyun mon_ops = NULL;
3031*4882a593Smuzhiyun mb();
3032*4882a593Smuzhiyun }
3033*4882a593Smuzhiyun EXPORT_SYMBOL_GPL (usb_mon_deregister);
3034*4882a593Smuzhiyun
3035*4882a593Smuzhiyun #endif /* CONFIG_USB_MON || CONFIG_USB_MON_MODULE */
3036