1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Released under the GPLv2 only.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/module.h>
7*4882a593Smuzhiyun #include <linux/string.h>
8*4882a593Smuzhiyun #include <linux/bitops.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/log2.h>
11*4882a593Smuzhiyun #include <linux/usb.h>
12*4882a593Smuzhiyun #include <linux/wait.h>
13*4882a593Smuzhiyun #include <linux/usb/hcd.h>
14*4882a593Smuzhiyun #include <linux/scatterlist.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #define to_urb(d) container_of(d, struct urb, kref)
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun
urb_destroy(struct kref * kref)19*4882a593Smuzhiyun static void urb_destroy(struct kref *kref)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun struct urb *urb = to_urb(kref);
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun if (urb->transfer_flags & URB_FREE_BUFFER)
24*4882a593Smuzhiyun kfree(urb->transfer_buffer);
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun kfree(urb);
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /**
30*4882a593Smuzhiyun * usb_init_urb - initializes a urb so that it can be used by a USB driver
31*4882a593Smuzhiyun * @urb: pointer to the urb to initialize
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * Initializes a urb so that the USB subsystem can use it properly.
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * If a urb is created with a call to usb_alloc_urb() it is not
36*4882a593Smuzhiyun * necessary to call this function. Only use this if you allocate the
37*4882a593Smuzhiyun * space for a struct urb on your own. If you call this function, be
38*4882a593Smuzhiyun * careful when freeing the memory for your urb that it is no longer in
39*4882a593Smuzhiyun * use by the USB core.
40*4882a593Smuzhiyun *
41*4882a593Smuzhiyun * Only use this function if you _really_ understand what you are doing.
42*4882a593Smuzhiyun */
usb_init_urb(struct urb * urb)43*4882a593Smuzhiyun void usb_init_urb(struct urb *urb)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun if (urb) {
46*4882a593Smuzhiyun memset(urb, 0, sizeof(*urb));
47*4882a593Smuzhiyun kref_init(&urb->kref);
48*4882a593Smuzhiyun INIT_LIST_HEAD(&urb->urb_list);
49*4882a593Smuzhiyun INIT_LIST_HEAD(&urb->anchor_list);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_init_urb);
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /**
55*4882a593Smuzhiyun * usb_alloc_urb - creates a new urb for a USB driver to use
56*4882a593Smuzhiyun * @iso_packets: number of iso packets for this urb
57*4882a593Smuzhiyun * @mem_flags: the type of memory to allocate, see kmalloc() for a list of
58*4882a593Smuzhiyun * valid options for this.
59*4882a593Smuzhiyun *
60*4882a593Smuzhiyun * Creates an urb for the USB driver to use, initializes a few internal
61*4882a593Smuzhiyun * structures, increments the usage counter, and returns a pointer to it.
62*4882a593Smuzhiyun *
63*4882a593Smuzhiyun * If the driver want to use this urb for interrupt, control, or bulk
64*4882a593Smuzhiyun * endpoints, pass '0' as the number of iso packets.
65*4882a593Smuzhiyun *
66*4882a593Smuzhiyun * The driver must call usb_free_urb() when it is finished with the urb.
67*4882a593Smuzhiyun *
68*4882a593Smuzhiyun * Return: A pointer to the new urb, or %NULL if no memory is available.
69*4882a593Smuzhiyun */
usb_alloc_urb(int iso_packets,gfp_t mem_flags)70*4882a593Smuzhiyun struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun struct urb *urb;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun urb = kmalloc(struct_size(urb, iso_frame_desc, iso_packets),
75*4882a593Smuzhiyun mem_flags);
76*4882a593Smuzhiyun if (!urb)
77*4882a593Smuzhiyun return NULL;
78*4882a593Smuzhiyun usb_init_urb(urb);
79*4882a593Smuzhiyun return urb;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_alloc_urb);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /**
84*4882a593Smuzhiyun * usb_free_urb - frees the memory used by a urb when all users of it are finished
85*4882a593Smuzhiyun * @urb: pointer to the urb to free, may be NULL
86*4882a593Smuzhiyun *
87*4882a593Smuzhiyun * Must be called when a user of a urb is finished with it. When the last user
88*4882a593Smuzhiyun * of the urb calls this function, the memory of the urb is freed.
89*4882a593Smuzhiyun *
90*4882a593Smuzhiyun * Note: The transfer buffer associated with the urb is not freed unless the
91*4882a593Smuzhiyun * URB_FREE_BUFFER transfer flag is set.
92*4882a593Smuzhiyun */
usb_free_urb(struct urb * urb)93*4882a593Smuzhiyun void usb_free_urb(struct urb *urb)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun if (urb)
96*4882a593Smuzhiyun kref_put(&urb->kref, urb_destroy);
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_free_urb);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /**
101*4882a593Smuzhiyun * usb_get_urb - increments the reference count of the urb
102*4882a593Smuzhiyun * @urb: pointer to the urb to modify, may be NULL
103*4882a593Smuzhiyun *
104*4882a593Smuzhiyun * This must be called whenever a urb is transferred from a device driver to a
105*4882a593Smuzhiyun * host controller driver. This allows proper reference counting to happen
106*4882a593Smuzhiyun * for urbs.
107*4882a593Smuzhiyun *
108*4882a593Smuzhiyun * Return: A pointer to the urb with the incremented reference counter.
109*4882a593Smuzhiyun */
usb_get_urb(struct urb * urb)110*4882a593Smuzhiyun struct urb *usb_get_urb(struct urb *urb)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun if (urb)
113*4882a593Smuzhiyun kref_get(&urb->kref);
114*4882a593Smuzhiyun return urb;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_get_urb);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /**
119*4882a593Smuzhiyun * usb_anchor_urb - anchors an URB while it is processed
120*4882a593Smuzhiyun * @urb: pointer to the urb to anchor
121*4882a593Smuzhiyun * @anchor: pointer to the anchor
122*4882a593Smuzhiyun *
123*4882a593Smuzhiyun * This can be called to have access to URBs which are to be executed
124*4882a593Smuzhiyun * without bothering to track them
125*4882a593Smuzhiyun */
usb_anchor_urb(struct urb * urb,struct usb_anchor * anchor)126*4882a593Smuzhiyun void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun unsigned long flags;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun spin_lock_irqsave(&anchor->lock, flags);
131*4882a593Smuzhiyun usb_get_urb(urb);
132*4882a593Smuzhiyun list_add_tail(&urb->anchor_list, &anchor->urb_list);
133*4882a593Smuzhiyun urb->anchor = anchor;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if (unlikely(anchor->poisoned))
136*4882a593Smuzhiyun atomic_inc(&urb->reject);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun spin_unlock_irqrestore(&anchor->lock, flags);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_anchor_urb);
141*4882a593Smuzhiyun
usb_anchor_check_wakeup(struct usb_anchor * anchor)142*4882a593Smuzhiyun static int usb_anchor_check_wakeup(struct usb_anchor *anchor)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun return atomic_read(&anchor->suspend_wakeups) == 0 &&
145*4882a593Smuzhiyun list_empty(&anchor->urb_list);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /* Callers must hold anchor->lock */
__usb_unanchor_urb(struct urb * urb,struct usb_anchor * anchor)149*4882a593Smuzhiyun static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun urb->anchor = NULL;
152*4882a593Smuzhiyun list_del(&urb->anchor_list);
153*4882a593Smuzhiyun usb_put_urb(urb);
154*4882a593Smuzhiyun if (usb_anchor_check_wakeup(anchor))
155*4882a593Smuzhiyun wake_up(&anchor->wait);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /**
159*4882a593Smuzhiyun * usb_unanchor_urb - unanchors an URB
160*4882a593Smuzhiyun * @urb: pointer to the urb to anchor
161*4882a593Smuzhiyun *
162*4882a593Smuzhiyun * Call this to stop the system keeping track of this URB
163*4882a593Smuzhiyun */
usb_unanchor_urb(struct urb * urb)164*4882a593Smuzhiyun void usb_unanchor_urb(struct urb *urb)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun unsigned long flags;
167*4882a593Smuzhiyun struct usb_anchor *anchor;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun if (!urb)
170*4882a593Smuzhiyun return;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun anchor = urb->anchor;
173*4882a593Smuzhiyun if (!anchor)
174*4882a593Smuzhiyun return;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun spin_lock_irqsave(&anchor->lock, flags);
177*4882a593Smuzhiyun /*
178*4882a593Smuzhiyun * At this point, we could be competing with another thread which
179*4882a593Smuzhiyun * has the same intention. To protect the urb from being unanchored
180*4882a593Smuzhiyun * twice, only the winner of the race gets the job.
181*4882a593Smuzhiyun */
182*4882a593Smuzhiyun if (likely(anchor == urb->anchor))
183*4882a593Smuzhiyun __usb_unanchor_urb(urb, anchor);
184*4882a593Smuzhiyun spin_unlock_irqrestore(&anchor->lock, flags);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_unanchor_urb);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /*-------------------------------------------------------------------*/
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun static const int pipetypes[4] = {
191*4882a593Smuzhiyun PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
192*4882a593Smuzhiyun };
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /**
195*4882a593Smuzhiyun * usb_pipe_type_check - sanity check of a specific pipe for a usb device
196*4882a593Smuzhiyun * @dev: struct usb_device to be checked
197*4882a593Smuzhiyun * @pipe: pipe to check
198*4882a593Smuzhiyun *
199*4882a593Smuzhiyun * This performs a light-weight sanity check for the endpoint in the
200*4882a593Smuzhiyun * given usb device. It returns 0 if the pipe is valid for the specific usb
201*4882a593Smuzhiyun * device, otherwise a negative error code.
202*4882a593Smuzhiyun */
usb_pipe_type_check(struct usb_device * dev,unsigned int pipe)203*4882a593Smuzhiyun int usb_pipe_type_check(struct usb_device *dev, unsigned int pipe)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun const struct usb_host_endpoint *ep;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun ep = usb_pipe_endpoint(dev, pipe);
208*4882a593Smuzhiyun if (!ep)
209*4882a593Smuzhiyun return -EINVAL;
210*4882a593Smuzhiyun if (usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
211*4882a593Smuzhiyun return -EINVAL;
212*4882a593Smuzhiyun return 0;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_pipe_type_check);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /**
217*4882a593Smuzhiyun * usb_urb_ep_type_check - sanity check of endpoint in the given urb
218*4882a593Smuzhiyun * @urb: urb to be checked
219*4882a593Smuzhiyun *
220*4882a593Smuzhiyun * This performs a light-weight sanity check for the endpoint in the
221*4882a593Smuzhiyun * given urb. It returns 0 if the urb contains a valid endpoint, otherwise
222*4882a593Smuzhiyun * a negative error code.
223*4882a593Smuzhiyun */
usb_urb_ep_type_check(const struct urb * urb)224*4882a593Smuzhiyun int usb_urb_ep_type_check(const struct urb *urb)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun return usb_pipe_type_check(urb->dev, urb->pipe);
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_urb_ep_type_check);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /**
231*4882a593Smuzhiyun * usb_submit_urb - issue an asynchronous transfer request for an endpoint
232*4882a593Smuzhiyun * @urb: pointer to the urb describing the request
233*4882a593Smuzhiyun * @mem_flags: the type of memory to allocate, see kmalloc() for a list
234*4882a593Smuzhiyun * of valid options for this.
235*4882a593Smuzhiyun *
236*4882a593Smuzhiyun * This submits a transfer request, and transfers control of the URB
237*4882a593Smuzhiyun * describing that request to the USB subsystem. Request completion will
238*4882a593Smuzhiyun * be indicated later, asynchronously, by calling the completion handler.
239*4882a593Smuzhiyun * The three types of completion are success, error, and unlink
240*4882a593Smuzhiyun * (a software-induced fault, also called "request cancellation").
241*4882a593Smuzhiyun *
242*4882a593Smuzhiyun * URBs may be submitted in interrupt context.
243*4882a593Smuzhiyun *
244*4882a593Smuzhiyun * The caller must have correctly initialized the URB before submitting
245*4882a593Smuzhiyun * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are
246*4882a593Smuzhiyun * available to ensure that most fields are correctly initialized, for
247*4882a593Smuzhiyun * the particular kind of transfer, although they will not initialize
248*4882a593Smuzhiyun * any transfer flags.
249*4882a593Smuzhiyun *
250*4882a593Smuzhiyun * If the submission is successful, the complete() callback from the URB
251*4882a593Smuzhiyun * will be called exactly once, when the USB core and Host Controller Driver
252*4882a593Smuzhiyun * (HCD) are finished with the URB. When the completion function is called,
253*4882a593Smuzhiyun * control of the URB is returned to the device driver which issued the
254*4882a593Smuzhiyun * request. The completion handler may then immediately free or reuse that
255*4882a593Smuzhiyun * URB.
256*4882a593Smuzhiyun *
257*4882a593Smuzhiyun * With few exceptions, USB device drivers should never access URB fields
258*4882a593Smuzhiyun * provided by usbcore or the HCD until its complete() is called.
259*4882a593Smuzhiyun * The exceptions relate to periodic transfer scheduling. For both
260*4882a593Smuzhiyun * interrupt and isochronous urbs, as part of successful URB submission
261*4882a593Smuzhiyun * urb->interval is modified to reflect the actual transfer period used
262*4882a593Smuzhiyun * (normally some power of two units). And for isochronous urbs,
263*4882a593Smuzhiyun * urb->start_frame is modified to reflect when the URB's transfers were
264*4882a593Smuzhiyun * scheduled to start.
265*4882a593Smuzhiyun *
266*4882a593Smuzhiyun * Not all isochronous transfer scheduling policies will work, but most
267*4882a593Smuzhiyun * host controller drivers should easily handle ISO queues going from now
268*4882a593Smuzhiyun * until 10-200 msec into the future. Drivers should try to keep at
269*4882a593Smuzhiyun * least one or two msec of data in the queue; many controllers require
270*4882a593Smuzhiyun * that new transfers start at least 1 msec in the future when they are
271*4882a593Smuzhiyun * added. If the driver is unable to keep up and the queue empties out,
272*4882a593Smuzhiyun * the behavior for new submissions is governed by the URB_ISO_ASAP flag.
273*4882a593Smuzhiyun * If the flag is set, or if the queue is idle, then the URB is always
274*4882a593Smuzhiyun * assigned to the first available (and not yet expired) slot in the
275*4882a593Smuzhiyun * endpoint's schedule. If the flag is not set and the queue is active
276*4882a593Smuzhiyun * then the URB is always assigned to the next slot in the schedule
277*4882a593Smuzhiyun * following the end of the endpoint's previous URB, even if that slot is
278*4882a593Smuzhiyun * in the past. When a packet is assigned in this way to a slot that has
279*4882a593Smuzhiyun * already expired, the packet is not transmitted and the corresponding
280*4882a593Smuzhiyun * usb_iso_packet_descriptor's status field will return -EXDEV. If this
281*4882a593Smuzhiyun * would happen to all the packets in the URB, submission fails with a
282*4882a593Smuzhiyun * -EXDEV error code.
283*4882a593Smuzhiyun *
284*4882a593Smuzhiyun * For control endpoints, the synchronous usb_control_msg() call is
285*4882a593Smuzhiyun * often used (in non-interrupt context) instead of this call.
286*4882a593Smuzhiyun * That is often used through convenience wrappers, for the requests
287*4882a593Smuzhiyun * that are standardized in the USB 2.0 specification. For bulk
288*4882a593Smuzhiyun * endpoints, a synchronous usb_bulk_msg() call is available.
289*4882a593Smuzhiyun *
290*4882a593Smuzhiyun * Return:
291*4882a593Smuzhiyun * 0 on successful submissions. A negative error number otherwise.
292*4882a593Smuzhiyun *
293*4882a593Smuzhiyun * Request Queuing:
294*4882a593Smuzhiyun *
295*4882a593Smuzhiyun * URBs may be submitted to endpoints before previous ones complete, to
296*4882a593Smuzhiyun * minimize the impact of interrupt latencies and system overhead on data
297*4882a593Smuzhiyun * throughput. With that queuing policy, an endpoint's queue would never
298*4882a593Smuzhiyun * be empty. This is required for continuous isochronous data streams,
299*4882a593Smuzhiyun * and may also be required for some kinds of interrupt transfers. Such
300*4882a593Smuzhiyun * queuing also maximizes bandwidth utilization by letting USB controllers
301*4882a593Smuzhiyun * start work on later requests before driver software has finished the
302*4882a593Smuzhiyun * completion processing for earlier (successful) requests.
303*4882a593Smuzhiyun *
304*4882a593Smuzhiyun * As of Linux 2.6, all USB endpoint transfer queues support depths greater
305*4882a593Smuzhiyun * than one. This was previously a HCD-specific behavior, except for ISO
306*4882a593Smuzhiyun * transfers. Non-isochronous endpoint queues are inactive during cleanup
307*4882a593Smuzhiyun * after faults (transfer errors or cancellation).
308*4882a593Smuzhiyun *
309*4882a593Smuzhiyun * Reserved Bandwidth Transfers:
310*4882a593Smuzhiyun *
311*4882a593Smuzhiyun * Periodic transfers (interrupt or isochronous) are performed repeatedly,
312*4882a593Smuzhiyun * using the interval specified in the urb. Submitting the first urb to
313*4882a593Smuzhiyun * the endpoint reserves the bandwidth necessary to make those transfers.
314*4882a593Smuzhiyun * If the USB subsystem can't allocate sufficient bandwidth to perform
315*4882a593Smuzhiyun * the periodic request, submitting such a periodic request should fail.
316*4882a593Smuzhiyun *
317*4882a593Smuzhiyun * For devices under xHCI, the bandwidth is reserved at configuration time, or
318*4882a593Smuzhiyun * when the alt setting is selected. If there is not enough bus bandwidth, the
319*4882a593Smuzhiyun * configuration/alt setting request will fail. Therefore, submissions to
320*4882a593Smuzhiyun * periodic endpoints on devices under xHCI should never fail due to bandwidth
321*4882a593Smuzhiyun * constraints.
322*4882a593Smuzhiyun *
323*4882a593Smuzhiyun * Device drivers must explicitly request that repetition, by ensuring that
324*4882a593Smuzhiyun * some URB is always on the endpoint's queue (except possibly for short
325*4882a593Smuzhiyun * periods during completion callbacks). When there is no longer an urb
326*4882a593Smuzhiyun * queued, the endpoint's bandwidth reservation is canceled. This means
327*4882a593Smuzhiyun * drivers can use their completion handlers to ensure they keep bandwidth
328*4882a593Smuzhiyun * they need, by reinitializing and resubmitting the just-completed urb
329*4882a593Smuzhiyun * until the driver longer needs that periodic bandwidth.
330*4882a593Smuzhiyun *
331*4882a593Smuzhiyun * Memory Flags:
332*4882a593Smuzhiyun *
333*4882a593Smuzhiyun * The general rules for how to decide which mem_flags to use
334*4882a593Smuzhiyun * are the same as for kmalloc. There are four
335*4882a593Smuzhiyun * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and
336*4882a593Smuzhiyun * GFP_ATOMIC.
337*4882a593Smuzhiyun *
338*4882a593Smuzhiyun * GFP_NOFS is not ever used, as it has not been implemented yet.
339*4882a593Smuzhiyun *
340*4882a593Smuzhiyun * GFP_ATOMIC is used when
341*4882a593Smuzhiyun * (a) you are inside a completion handler, an interrupt, bottom half,
342*4882a593Smuzhiyun * tasklet or timer, or
343*4882a593Smuzhiyun * (b) you are holding a spinlock or rwlock (does not apply to
344*4882a593Smuzhiyun * semaphores), or
345*4882a593Smuzhiyun * (c) current->state != TASK_RUNNING, this is the case only after
346*4882a593Smuzhiyun * you've changed it.
347*4882a593Smuzhiyun *
348*4882a593Smuzhiyun * GFP_NOIO is used in the block io path and error handling of storage
349*4882a593Smuzhiyun * devices.
350*4882a593Smuzhiyun *
351*4882a593Smuzhiyun * All other situations use GFP_KERNEL.
352*4882a593Smuzhiyun *
353*4882a593Smuzhiyun * Some more specific rules for mem_flags can be inferred, such as
354*4882a593Smuzhiyun * (1) start_xmit, timeout, and receive methods of network drivers must
355*4882a593Smuzhiyun * use GFP_ATOMIC (they are called with a spinlock held);
356*4882a593Smuzhiyun * (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also
357*4882a593Smuzhiyun * called with a spinlock held);
358*4882a593Smuzhiyun * (3) If you use a kernel thread with a network driver you must use
359*4882a593Smuzhiyun * GFP_NOIO, unless (b) or (c) apply;
360*4882a593Smuzhiyun * (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c)
361*4882a593Smuzhiyun * apply or your are in a storage driver's block io path;
362*4882a593Smuzhiyun * (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and
363*4882a593Smuzhiyun * (6) changing firmware on a running storage or net device uses
364*4882a593Smuzhiyun * GFP_NOIO, unless b) or c) apply
365*4882a593Smuzhiyun *
366*4882a593Smuzhiyun */
usb_submit_urb(struct urb * urb,gfp_t mem_flags)367*4882a593Smuzhiyun int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun int xfertype, max;
370*4882a593Smuzhiyun struct usb_device *dev;
371*4882a593Smuzhiyun struct usb_host_endpoint *ep;
372*4882a593Smuzhiyun int is_out;
373*4882a593Smuzhiyun unsigned int allowed;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun if (!urb || !urb->complete)
376*4882a593Smuzhiyun return -EINVAL;
377*4882a593Smuzhiyun if (urb->hcpriv) {
378*4882a593Smuzhiyun WARN_ONCE(1, "URB %pK submitted while active\n", urb);
379*4882a593Smuzhiyun return -EBUSY;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun dev = urb->dev;
383*4882a593Smuzhiyun if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
384*4882a593Smuzhiyun return -ENODEV;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /* For now, get the endpoint from the pipe. Eventually drivers
387*4882a593Smuzhiyun * will be required to set urb->ep directly and we will eliminate
388*4882a593Smuzhiyun * urb->pipe.
389*4882a593Smuzhiyun */
390*4882a593Smuzhiyun ep = usb_pipe_endpoint(dev, urb->pipe);
391*4882a593Smuzhiyun if (!ep)
392*4882a593Smuzhiyun return -ENOENT;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun urb->ep = ep;
395*4882a593Smuzhiyun urb->status = -EINPROGRESS;
396*4882a593Smuzhiyun urb->actual_length = 0;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /* Lots of sanity checks, so HCDs can rely on clean data
399*4882a593Smuzhiyun * and don't need to duplicate tests
400*4882a593Smuzhiyun */
401*4882a593Smuzhiyun xfertype = usb_endpoint_type(&ep->desc);
402*4882a593Smuzhiyun if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
403*4882a593Smuzhiyun struct usb_ctrlrequest *setup =
404*4882a593Smuzhiyun (struct usb_ctrlrequest *) urb->setup_packet;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun if (!setup)
407*4882a593Smuzhiyun return -ENOEXEC;
408*4882a593Smuzhiyun is_out = !(setup->bRequestType & USB_DIR_IN) ||
409*4882a593Smuzhiyun !setup->wLength;
410*4882a593Smuzhiyun } else {
411*4882a593Smuzhiyun is_out = usb_endpoint_dir_out(&ep->desc);
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun /* Clear the internal flags and cache the direction for later use */
415*4882a593Smuzhiyun urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE |
416*4882a593Smuzhiyun URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL |
417*4882a593Smuzhiyun URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
418*4882a593Smuzhiyun URB_DMA_SG_COMBINED);
419*4882a593Smuzhiyun urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
422*4882a593Smuzhiyun dev->state < USB_STATE_CONFIGURED)
423*4882a593Smuzhiyun return -ENODEV;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun max = usb_endpoint_maxp(&ep->desc);
426*4882a593Smuzhiyun if (max <= 0) {
427*4882a593Smuzhiyun dev_dbg(&dev->dev,
428*4882a593Smuzhiyun "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n",
429*4882a593Smuzhiyun usb_endpoint_num(&ep->desc), is_out ? "out" : "in",
430*4882a593Smuzhiyun __func__, max);
431*4882a593Smuzhiyun return -EMSGSIZE;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /* periodic transfers limit size per frame/uframe,
435*4882a593Smuzhiyun * but drivers only control those sizes for ISO.
436*4882a593Smuzhiyun * while we're checking, initialize return status.
437*4882a593Smuzhiyun */
438*4882a593Smuzhiyun if (xfertype == USB_ENDPOINT_XFER_ISOC) {
439*4882a593Smuzhiyun int n, len;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun /* SuperSpeed isoc endpoints have up to 16 bursts of up to
442*4882a593Smuzhiyun * 3 packets each
443*4882a593Smuzhiyun */
444*4882a593Smuzhiyun if (dev->speed >= USB_SPEED_SUPER) {
445*4882a593Smuzhiyun int burst = 1 + ep->ss_ep_comp.bMaxBurst;
446*4882a593Smuzhiyun int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes);
447*4882a593Smuzhiyun max *= burst;
448*4882a593Smuzhiyun max *= mult;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun if (dev->speed == USB_SPEED_SUPER_PLUS &&
452*4882a593Smuzhiyun USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes)) {
453*4882a593Smuzhiyun struct usb_ssp_isoc_ep_comp_descriptor *isoc_ep_comp;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun isoc_ep_comp = &ep->ssp_isoc_ep_comp;
456*4882a593Smuzhiyun max = le32_to_cpu(isoc_ep_comp->dwBytesPerInterval);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun /* "high bandwidth" mode, 1-3 packets/uframe? */
460*4882a593Smuzhiyun if (dev->speed == USB_SPEED_HIGH)
461*4882a593Smuzhiyun max *= usb_endpoint_maxp_mult(&ep->desc);
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun if (urb->number_of_packets <= 0)
464*4882a593Smuzhiyun return -EINVAL;
465*4882a593Smuzhiyun for (n = 0; n < urb->number_of_packets; n++) {
466*4882a593Smuzhiyun len = urb->iso_frame_desc[n].length;
467*4882a593Smuzhiyun if (len < 0 || len > max)
468*4882a593Smuzhiyun return -EMSGSIZE;
469*4882a593Smuzhiyun urb->iso_frame_desc[n].status = -EXDEV;
470*4882a593Smuzhiyun urb->iso_frame_desc[n].actual_length = 0;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun } else if (urb->num_sgs && !urb->dev->bus->no_sg_constraint &&
473*4882a593Smuzhiyun dev->speed != USB_SPEED_WIRELESS) {
474*4882a593Smuzhiyun struct scatterlist *sg;
475*4882a593Smuzhiyun int i;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun for_each_sg(urb->sg, sg, urb->num_sgs - 1, i)
478*4882a593Smuzhiyun if (sg->length % max)
479*4882a593Smuzhiyun return -EINVAL;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun /* the I/O buffer must be mapped/unmapped, except when length=0 */
483*4882a593Smuzhiyun if (urb->transfer_buffer_length > INT_MAX)
484*4882a593Smuzhiyun return -EMSGSIZE;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /*
487*4882a593Smuzhiyun * stuff that drivers shouldn't do, but which shouldn't
488*4882a593Smuzhiyun * cause problems in HCDs if they get it wrong.
489*4882a593Smuzhiyun */
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun /* Check that the pipe's type matches the endpoint's type */
492*4882a593Smuzhiyun if (usb_pipe_type_check(urb->dev, urb->pipe))
493*4882a593Smuzhiyun dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
494*4882a593Smuzhiyun usb_pipetype(urb->pipe), pipetypes[xfertype]);
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun /* Check against a simple/standard policy */
497*4882a593Smuzhiyun allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK |
498*4882a593Smuzhiyun URB_FREE_BUFFER);
499*4882a593Smuzhiyun switch (xfertype) {
500*4882a593Smuzhiyun case USB_ENDPOINT_XFER_BULK:
501*4882a593Smuzhiyun case USB_ENDPOINT_XFER_INT:
502*4882a593Smuzhiyun if (is_out)
503*4882a593Smuzhiyun allowed |= URB_ZERO_PACKET;
504*4882a593Smuzhiyun fallthrough;
505*4882a593Smuzhiyun default: /* all non-iso endpoints */
506*4882a593Smuzhiyun if (!is_out)
507*4882a593Smuzhiyun allowed |= URB_SHORT_NOT_OK;
508*4882a593Smuzhiyun break;
509*4882a593Smuzhiyun case USB_ENDPOINT_XFER_ISOC:
510*4882a593Smuzhiyun allowed |= URB_ISO_ASAP;
511*4882a593Smuzhiyun break;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun allowed &= urb->transfer_flags;
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun /* warn if submitter gave bogus flags */
516*4882a593Smuzhiyun if (allowed != urb->transfer_flags)
517*4882a593Smuzhiyun dev_WARN(&dev->dev, "BOGUS urb flags, %x --> %x\n",
518*4882a593Smuzhiyun urb->transfer_flags, allowed);
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun /*
521*4882a593Smuzhiyun * Force periodic transfer intervals to be legal values that are
522*4882a593Smuzhiyun * a power of two (so HCDs don't need to).
523*4882a593Smuzhiyun *
524*4882a593Smuzhiyun * FIXME want bus->{intr,iso}_sched_horizon values here. Each HC
525*4882a593Smuzhiyun * supports different values... this uses EHCI/UHCI defaults (and
526*4882a593Smuzhiyun * EHCI can use smaller non-default values).
527*4882a593Smuzhiyun */
528*4882a593Smuzhiyun switch (xfertype) {
529*4882a593Smuzhiyun case USB_ENDPOINT_XFER_ISOC:
530*4882a593Smuzhiyun case USB_ENDPOINT_XFER_INT:
531*4882a593Smuzhiyun /* too small? */
532*4882a593Smuzhiyun switch (dev->speed) {
533*4882a593Smuzhiyun case USB_SPEED_WIRELESS:
534*4882a593Smuzhiyun if ((urb->interval < 6)
535*4882a593Smuzhiyun && (xfertype == USB_ENDPOINT_XFER_INT))
536*4882a593Smuzhiyun return -EINVAL;
537*4882a593Smuzhiyun fallthrough;
538*4882a593Smuzhiyun default:
539*4882a593Smuzhiyun if (urb->interval <= 0)
540*4882a593Smuzhiyun return -EINVAL;
541*4882a593Smuzhiyun break;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun /* too big? */
544*4882a593Smuzhiyun switch (dev->speed) {
545*4882a593Smuzhiyun case USB_SPEED_SUPER_PLUS:
546*4882a593Smuzhiyun case USB_SPEED_SUPER: /* units are 125us */
547*4882a593Smuzhiyun /* Handle up to 2^(16-1) microframes */
548*4882a593Smuzhiyun if (urb->interval > (1 << 15))
549*4882a593Smuzhiyun return -EINVAL;
550*4882a593Smuzhiyun max = 1 << 15;
551*4882a593Smuzhiyun break;
552*4882a593Smuzhiyun case USB_SPEED_WIRELESS:
553*4882a593Smuzhiyun if (urb->interval > 16)
554*4882a593Smuzhiyun return -EINVAL;
555*4882a593Smuzhiyun break;
556*4882a593Smuzhiyun case USB_SPEED_HIGH: /* units are microframes */
557*4882a593Smuzhiyun /* NOTE usb handles 2^15 */
558*4882a593Smuzhiyun if (urb->interval > (1024 * 8))
559*4882a593Smuzhiyun urb->interval = 1024 * 8;
560*4882a593Smuzhiyun max = 1024 * 8;
561*4882a593Smuzhiyun break;
562*4882a593Smuzhiyun case USB_SPEED_FULL: /* units are frames/msec */
563*4882a593Smuzhiyun case USB_SPEED_LOW:
564*4882a593Smuzhiyun if (xfertype == USB_ENDPOINT_XFER_INT) {
565*4882a593Smuzhiyun if (urb->interval > 255)
566*4882a593Smuzhiyun return -EINVAL;
567*4882a593Smuzhiyun /* NOTE ohci only handles up to 32 */
568*4882a593Smuzhiyun max = 128;
569*4882a593Smuzhiyun } else {
570*4882a593Smuzhiyun if (urb->interval > 1024)
571*4882a593Smuzhiyun urb->interval = 1024;
572*4882a593Smuzhiyun /* NOTE usb and ohci handle up to 2^15 */
573*4882a593Smuzhiyun max = 1024;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun break;
576*4882a593Smuzhiyun default:
577*4882a593Smuzhiyun return -EINVAL;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun if (dev->speed != USB_SPEED_WIRELESS) {
580*4882a593Smuzhiyun /* Round down to a power of 2, no more than max */
581*4882a593Smuzhiyun urb->interval = min(max, 1 << ilog2(urb->interval));
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun return usb_hcd_submit_urb(urb, mem_flags);
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_submit_urb);
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun /*-------------------------------------------------------------------*/
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun /**
592*4882a593Smuzhiyun * usb_unlink_urb - abort/cancel a transfer request for an endpoint
593*4882a593Smuzhiyun * @urb: pointer to urb describing a previously submitted request,
594*4882a593Smuzhiyun * may be NULL
595*4882a593Smuzhiyun *
596*4882a593Smuzhiyun * This routine cancels an in-progress request. URBs complete only once
597*4882a593Smuzhiyun * per submission, and may be canceled only once per submission.
598*4882a593Smuzhiyun * Successful cancellation means termination of @urb will be expedited
599*4882a593Smuzhiyun * and the completion handler will be called with a status code
600*4882a593Smuzhiyun * indicating that the request has been canceled (rather than any other
601*4882a593Smuzhiyun * code).
602*4882a593Smuzhiyun *
603*4882a593Smuzhiyun * Drivers should not call this routine or related routines, such as
604*4882a593Smuzhiyun * usb_kill_urb() or usb_unlink_anchored_urbs(), after their disconnect
605*4882a593Smuzhiyun * method has returned. The disconnect function should synchronize with
606*4882a593Smuzhiyun * a driver's I/O routines to insure that all URB-related activity has
607*4882a593Smuzhiyun * completed before it returns.
608*4882a593Smuzhiyun *
609*4882a593Smuzhiyun * This request is asynchronous, however the HCD might call the ->complete()
610*4882a593Smuzhiyun * callback during unlink. Therefore when drivers call usb_unlink_urb(), they
611*4882a593Smuzhiyun * must not hold any locks that may be taken by the completion function.
612*4882a593Smuzhiyun * Success is indicated by returning -EINPROGRESS, at which time the URB will
613*4882a593Smuzhiyun * probably not yet have been given back to the device driver. When it is
614*4882a593Smuzhiyun * eventually called, the completion function will see @urb->status ==
615*4882a593Smuzhiyun * -ECONNRESET.
616*4882a593Smuzhiyun * Failure is indicated by usb_unlink_urb() returning any other value.
617*4882a593Smuzhiyun * Unlinking will fail when @urb is not currently "linked" (i.e., it was
618*4882a593Smuzhiyun * never submitted, or it was unlinked before, or the hardware is already
619*4882a593Smuzhiyun * finished with it), even if the completion handler has not yet run.
620*4882a593Smuzhiyun *
621*4882a593Smuzhiyun * The URB must not be deallocated while this routine is running. In
622*4882a593Smuzhiyun * particular, when a driver calls this routine, it must insure that the
623*4882a593Smuzhiyun * completion handler cannot deallocate the URB.
624*4882a593Smuzhiyun *
625*4882a593Smuzhiyun * Return: -EINPROGRESS on success. See description for other values on
626*4882a593Smuzhiyun * failure.
627*4882a593Smuzhiyun *
628*4882a593Smuzhiyun * Unlinking and Endpoint Queues:
629*4882a593Smuzhiyun *
630*4882a593Smuzhiyun * [The behaviors and guarantees described below do not apply to virtual
631*4882a593Smuzhiyun * root hubs but only to endpoint queues for physical USB devices.]
632*4882a593Smuzhiyun *
633*4882a593Smuzhiyun * Host Controller Drivers (HCDs) place all the URBs for a particular
634*4882a593Smuzhiyun * endpoint in a queue. Normally the queue advances as the controller
635*4882a593Smuzhiyun * hardware processes each request. But when an URB terminates with an
636*4882a593Smuzhiyun * error its queue generally stops (see below), at least until that URB's
637*4882a593Smuzhiyun * completion routine returns. It is guaranteed that a stopped queue
638*4882a593Smuzhiyun * will not restart until all its unlinked URBs have been fully retired,
639*4882a593Smuzhiyun * with their completion routines run, even if that's not until some time
640*4882a593Smuzhiyun * after the original completion handler returns. The same behavior and
641*4882a593Smuzhiyun * guarantee apply when an URB terminates because it was unlinked.
642*4882a593Smuzhiyun *
643*4882a593Smuzhiyun * Bulk and interrupt endpoint queues are guaranteed to stop whenever an
644*4882a593Smuzhiyun * URB terminates with any sort of error, including -ECONNRESET, -ENOENT,
645*4882a593Smuzhiyun * and -EREMOTEIO. Control endpoint queues behave the same way except
646*4882a593Smuzhiyun * that they are not guaranteed to stop for -EREMOTEIO errors. Queues
647*4882a593Smuzhiyun * for isochronous endpoints are treated differently, because they must
648*4882a593Smuzhiyun * advance at fixed rates. Such queues do not stop when an URB
649*4882a593Smuzhiyun * encounters an error or is unlinked. An unlinked isochronous URB may
650*4882a593Smuzhiyun * leave a gap in the stream of packets; it is undefined whether such
651*4882a593Smuzhiyun * gaps can be filled in.
652*4882a593Smuzhiyun *
653*4882a593Smuzhiyun * Note that early termination of an URB because a short packet was
654*4882a593Smuzhiyun * received will generate a -EREMOTEIO error if and only if the
655*4882a593Smuzhiyun * URB_SHORT_NOT_OK flag is set. By setting this flag, USB device
656*4882a593Smuzhiyun * drivers can build deep queues for large or complex bulk transfers
657*4882a593Smuzhiyun * and clean them up reliably after any sort of aborted transfer by
658*4882a593Smuzhiyun * unlinking all pending URBs at the first fault.
659*4882a593Smuzhiyun *
660*4882a593Smuzhiyun * When a control URB terminates with an error other than -EREMOTEIO, it
661*4882a593Smuzhiyun * is quite likely that the status stage of the transfer will not take
662*4882a593Smuzhiyun * place.
663*4882a593Smuzhiyun */
usb_unlink_urb(struct urb * urb)664*4882a593Smuzhiyun int usb_unlink_urb(struct urb *urb)
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun if (!urb)
667*4882a593Smuzhiyun return -EINVAL;
668*4882a593Smuzhiyun if (!urb->dev)
669*4882a593Smuzhiyun return -ENODEV;
670*4882a593Smuzhiyun if (!urb->ep)
671*4882a593Smuzhiyun return -EIDRM;
672*4882a593Smuzhiyun return usb_hcd_unlink_urb(urb, -ECONNRESET);
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_unlink_urb);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun /**
677*4882a593Smuzhiyun * usb_kill_urb - cancel a transfer request and wait for it to finish
678*4882a593Smuzhiyun * @urb: pointer to URB describing a previously submitted request,
679*4882a593Smuzhiyun * may be NULL
680*4882a593Smuzhiyun *
681*4882a593Smuzhiyun * This routine cancels an in-progress request. It is guaranteed that
682*4882a593Smuzhiyun * upon return all completion handlers will have finished and the URB
683*4882a593Smuzhiyun * will be totally idle and available for reuse. These features make
684*4882a593Smuzhiyun * this an ideal way to stop I/O in a disconnect() callback or close()
685*4882a593Smuzhiyun * function. If the request has not already finished or been unlinked
686*4882a593Smuzhiyun * the completion handler will see urb->status == -ENOENT.
687*4882a593Smuzhiyun *
688*4882a593Smuzhiyun * While the routine is running, attempts to resubmit the URB will fail
689*4882a593Smuzhiyun * with error -EPERM. Thus even if the URB's completion handler always
690*4882a593Smuzhiyun * tries to resubmit, it will not succeed and the URB will become idle.
691*4882a593Smuzhiyun *
692*4882a593Smuzhiyun * The URB must not be deallocated while this routine is running. In
693*4882a593Smuzhiyun * particular, when a driver calls this routine, it must insure that the
694*4882a593Smuzhiyun * completion handler cannot deallocate the URB.
695*4882a593Smuzhiyun *
696*4882a593Smuzhiyun * This routine may not be used in an interrupt context (such as a bottom
697*4882a593Smuzhiyun * half or a completion handler), or when holding a spinlock, or in other
698*4882a593Smuzhiyun * situations where the caller can't schedule().
699*4882a593Smuzhiyun *
700*4882a593Smuzhiyun * This routine should not be called by a driver after its disconnect
701*4882a593Smuzhiyun * method has returned.
702*4882a593Smuzhiyun */
usb_kill_urb(struct urb * urb)703*4882a593Smuzhiyun void usb_kill_urb(struct urb *urb)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun might_sleep();
706*4882a593Smuzhiyun if (!(urb && urb->dev && urb->ep))
707*4882a593Smuzhiyun return;
708*4882a593Smuzhiyun atomic_inc(&urb->reject);
709*4882a593Smuzhiyun /*
710*4882a593Smuzhiyun * Order the write of urb->reject above before the read
711*4882a593Smuzhiyun * of urb->use_count below. Pairs with the barriers in
712*4882a593Smuzhiyun * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
713*4882a593Smuzhiyun */
714*4882a593Smuzhiyun smp_mb__after_atomic();
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun usb_hcd_unlink_urb(urb, -ENOENT);
717*4882a593Smuzhiyun wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun atomic_dec(&urb->reject);
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_kill_urb);
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun /**
724*4882a593Smuzhiyun * usb_poison_urb - reliably kill a transfer and prevent further use of an URB
725*4882a593Smuzhiyun * @urb: pointer to URB describing a previously submitted request,
726*4882a593Smuzhiyun * may be NULL
727*4882a593Smuzhiyun *
728*4882a593Smuzhiyun * This routine cancels an in-progress request. It is guaranteed that
729*4882a593Smuzhiyun * upon return all completion handlers will have finished and the URB
730*4882a593Smuzhiyun * will be totally idle and cannot be reused. These features make
731*4882a593Smuzhiyun * this an ideal way to stop I/O in a disconnect() callback.
732*4882a593Smuzhiyun * If the request has not already finished or been unlinked
733*4882a593Smuzhiyun * the completion handler will see urb->status == -ENOENT.
734*4882a593Smuzhiyun *
735*4882a593Smuzhiyun * After and while the routine runs, attempts to resubmit the URB will fail
736*4882a593Smuzhiyun * with error -EPERM. Thus even if the URB's completion handler always
737*4882a593Smuzhiyun * tries to resubmit, it will not succeed and the URB will become idle.
738*4882a593Smuzhiyun *
739*4882a593Smuzhiyun * The URB must not be deallocated while this routine is running. In
740*4882a593Smuzhiyun * particular, when a driver calls this routine, it must insure that the
741*4882a593Smuzhiyun * completion handler cannot deallocate the URB.
742*4882a593Smuzhiyun *
743*4882a593Smuzhiyun * This routine may not be used in an interrupt context (such as a bottom
744*4882a593Smuzhiyun * half or a completion handler), or when holding a spinlock, or in other
745*4882a593Smuzhiyun * situations where the caller can't schedule().
746*4882a593Smuzhiyun *
747*4882a593Smuzhiyun * This routine should not be called by a driver after its disconnect
748*4882a593Smuzhiyun * method has returned.
749*4882a593Smuzhiyun */
usb_poison_urb(struct urb * urb)750*4882a593Smuzhiyun void usb_poison_urb(struct urb *urb)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun might_sleep();
753*4882a593Smuzhiyun if (!urb)
754*4882a593Smuzhiyun return;
755*4882a593Smuzhiyun atomic_inc(&urb->reject);
756*4882a593Smuzhiyun /*
757*4882a593Smuzhiyun * Order the write of urb->reject above before the read
758*4882a593Smuzhiyun * of urb->use_count below. Pairs with the barriers in
759*4882a593Smuzhiyun * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
760*4882a593Smuzhiyun */
761*4882a593Smuzhiyun smp_mb__after_atomic();
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun if (!urb->dev || !urb->ep)
764*4882a593Smuzhiyun return;
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun usb_hcd_unlink_urb(urb, -ENOENT);
767*4882a593Smuzhiyun wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_poison_urb);
770*4882a593Smuzhiyun
usb_unpoison_urb(struct urb * urb)771*4882a593Smuzhiyun void usb_unpoison_urb(struct urb *urb)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun if (!urb)
774*4882a593Smuzhiyun return;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun atomic_dec(&urb->reject);
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_unpoison_urb);
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun /**
781*4882a593Smuzhiyun * usb_block_urb - reliably prevent further use of an URB
782*4882a593Smuzhiyun * @urb: pointer to URB to be blocked, may be NULL
783*4882a593Smuzhiyun *
784*4882a593Smuzhiyun * After the routine has run, attempts to resubmit the URB will fail
785*4882a593Smuzhiyun * with error -EPERM. Thus even if the URB's completion handler always
786*4882a593Smuzhiyun * tries to resubmit, it will not succeed and the URB will become idle.
787*4882a593Smuzhiyun *
788*4882a593Smuzhiyun * The URB must not be deallocated while this routine is running. In
789*4882a593Smuzhiyun * particular, when a driver calls this routine, it must insure that the
790*4882a593Smuzhiyun * completion handler cannot deallocate the URB.
791*4882a593Smuzhiyun */
usb_block_urb(struct urb * urb)792*4882a593Smuzhiyun void usb_block_urb(struct urb *urb)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun if (!urb)
795*4882a593Smuzhiyun return;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun atomic_inc(&urb->reject);
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_block_urb);
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun /**
802*4882a593Smuzhiyun * usb_kill_anchored_urbs - kill all URBs associated with an anchor
803*4882a593Smuzhiyun * @anchor: anchor the requests are bound to
804*4882a593Smuzhiyun *
805*4882a593Smuzhiyun * This kills all outstanding URBs starting from the back of the queue,
806*4882a593Smuzhiyun * with guarantee that no completer callbacks will take place from the
807*4882a593Smuzhiyun * anchor after this function returns.
808*4882a593Smuzhiyun *
809*4882a593Smuzhiyun * This routine should not be called by a driver after its disconnect
810*4882a593Smuzhiyun * method has returned.
811*4882a593Smuzhiyun */
usb_kill_anchored_urbs(struct usb_anchor * anchor)812*4882a593Smuzhiyun void usb_kill_anchored_urbs(struct usb_anchor *anchor)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun struct urb *victim;
815*4882a593Smuzhiyun int surely_empty;
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun do {
818*4882a593Smuzhiyun spin_lock_irq(&anchor->lock);
819*4882a593Smuzhiyun while (!list_empty(&anchor->urb_list)) {
820*4882a593Smuzhiyun victim = list_entry(anchor->urb_list.prev,
821*4882a593Smuzhiyun struct urb, anchor_list);
822*4882a593Smuzhiyun /* make sure the URB isn't freed before we kill it */
823*4882a593Smuzhiyun usb_get_urb(victim);
824*4882a593Smuzhiyun spin_unlock_irq(&anchor->lock);
825*4882a593Smuzhiyun /* this will unanchor the URB */
826*4882a593Smuzhiyun usb_kill_urb(victim);
827*4882a593Smuzhiyun usb_put_urb(victim);
828*4882a593Smuzhiyun spin_lock_irq(&anchor->lock);
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun surely_empty = usb_anchor_check_wakeup(anchor);
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun spin_unlock_irq(&anchor->lock);
833*4882a593Smuzhiyun cpu_relax();
834*4882a593Smuzhiyun } while (!surely_empty);
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun /**
840*4882a593Smuzhiyun * usb_poison_anchored_urbs - cease all traffic from an anchor
841*4882a593Smuzhiyun * @anchor: anchor the requests are bound to
842*4882a593Smuzhiyun *
843*4882a593Smuzhiyun * this allows all outstanding URBs to be poisoned starting
844*4882a593Smuzhiyun * from the back of the queue. Newly added URBs will also be
845*4882a593Smuzhiyun * poisoned
846*4882a593Smuzhiyun *
847*4882a593Smuzhiyun * This routine should not be called by a driver after its disconnect
848*4882a593Smuzhiyun * method has returned.
849*4882a593Smuzhiyun */
usb_poison_anchored_urbs(struct usb_anchor * anchor)850*4882a593Smuzhiyun void usb_poison_anchored_urbs(struct usb_anchor *anchor)
851*4882a593Smuzhiyun {
852*4882a593Smuzhiyun struct urb *victim;
853*4882a593Smuzhiyun int surely_empty;
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun do {
856*4882a593Smuzhiyun spin_lock_irq(&anchor->lock);
857*4882a593Smuzhiyun anchor->poisoned = 1;
858*4882a593Smuzhiyun while (!list_empty(&anchor->urb_list)) {
859*4882a593Smuzhiyun victim = list_entry(anchor->urb_list.prev,
860*4882a593Smuzhiyun struct urb, anchor_list);
861*4882a593Smuzhiyun /* make sure the URB isn't freed before we kill it */
862*4882a593Smuzhiyun usb_get_urb(victim);
863*4882a593Smuzhiyun spin_unlock_irq(&anchor->lock);
864*4882a593Smuzhiyun /* this will unanchor the URB */
865*4882a593Smuzhiyun usb_poison_urb(victim);
866*4882a593Smuzhiyun usb_put_urb(victim);
867*4882a593Smuzhiyun spin_lock_irq(&anchor->lock);
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun surely_empty = usb_anchor_check_wakeup(anchor);
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun spin_unlock_irq(&anchor->lock);
872*4882a593Smuzhiyun cpu_relax();
873*4882a593Smuzhiyun } while (!surely_empty);
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun /**
878*4882a593Smuzhiyun * usb_unpoison_anchored_urbs - let an anchor be used successfully again
879*4882a593Smuzhiyun * @anchor: anchor the requests are bound to
880*4882a593Smuzhiyun *
881*4882a593Smuzhiyun * Reverses the effect of usb_poison_anchored_urbs
882*4882a593Smuzhiyun * the anchor can be used normally after it returns
883*4882a593Smuzhiyun */
usb_unpoison_anchored_urbs(struct usb_anchor * anchor)884*4882a593Smuzhiyun void usb_unpoison_anchored_urbs(struct usb_anchor *anchor)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun unsigned long flags;
887*4882a593Smuzhiyun struct urb *lazarus;
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun spin_lock_irqsave(&anchor->lock, flags);
890*4882a593Smuzhiyun list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) {
891*4882a593Smuzhiyun usb_unpoison_urb(lazarus);
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun anchor->poisoned = 0;
894*4882a593Smuzhiyun spin_unlock_irqrestore(&anchor->lock, flags);
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs);
897*4882a593Smuzhiyun /**
898*4882a593Smuzhiyun * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse
899*4882a593Smuzhiyun * @anchor: anchor the requests are bound to
900*4882a593Smuzhiyun *
901*4882a593Smuzhiyun * this allows all outstanding URBs to be unlinked starting
902*4882a593Smuzhiyun * from the back of the queue. This function is asynchronous.
903*4882a593Smuzhiyun * The unlinking is just triggered. It may happen after this
904*4882a593Smuzhiyun * function has returned.
905*4882a593Smuzhiyun *
906*4882a593Smuzhiyun * This routine should not be called by a driver after its disconnect
907*4882a593Smuzhiyun * method has returned.
908*4882a593Smuzhiyun */
usb_unlink_anchored_urbs(struct usb_anchor * anchor)909*4882a593Smuzhiyun void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun struct urb *victim;
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun while ((victim = usb_get_from_anchor(anchor)) != NULL) {
914*4882a593Smuzhiyun usb_unlink_urb(victim);
915*4882a593Smuzhiyun usb_put_urb(victim);
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun /**
921*4882a593Smuzhiyun * usb_anchor_suspend_wakeups
922*4882a593Smuzhiyun * @anchor: the anchor you want to suspend wakeups on
923*4882a593Smuzhiyun *
924*4882a593Smuzhiyun * Call this to stop the last urb being unanchored from waking up any
925*4882a593Smuzhiyun * usb_wait_anchor_empty_timeout waiters. This is used in the hcd urb give-
926*4882a593Smuzhiyun * back path to delay waking up until after the completion handler has run.
927*4882a593Smuzhiyun */
usb_anchor_suspend_wakeups(struct usb_anchor * anchor)928*4882a593Smuzhiyun void usb_anchor_suspend_wakeups(struct usb_anchor *anchor)
929*4882a593Smuzhiyun {
930*4882a593Smuzhiyun if (anchor)
931*4882a593Smuzhiyun atomic_inc(&anchor->suspend_wakeups);
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_anchor_suspend_wakeups);
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun /**
936*4882a593Smuzhiyun * usb_anchor_resume_wakeups
937*4882a593Smuzhiyun * @anchor: the anchor you want to resume wakeups on
938*4882a593Smuzhiyun *
939*4882a593Smuzhiyun * Allow usb_wait_anchor_empty_timeout waiters to be woken up again, and
940*4882a593Smuzhiyun * wake up any current waiters if the anchor is empty.
941*4882a593Smuzhiyun */
usb_anchor_resume_wakeups(struct usb_anchor * anchor)942*4882a593Smuzhiyun void usb_anchor_resume_wakeups(struct usb_anchor *anchor)
943*4882a593Smuzhiyun {
944*4882a593Smuzhiyun if (!anchor)
945*4882a593Smuzhiyun return;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun atomic_dec(&anchor->suspend_wakeups);
948*4882a593Smuzhiyun if (usb_anchor_check_wakeup(anchor))
949*4882a593Smuzhiyun wake_up(&anchor->wait);
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_anchor_resume_wakeups);
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun /**
954*4882a593Smuzhiyun * usb_wait_anchor_empty_timeout - wait for an anchor to be unused
955*4882a593Smuzhiyun * @anchor: the anchor you want to become unused
956*4882a593Smuzhiyun * @timeout: how long you are willing to wait in milliseconds
957*4882a593Smuzhiyun *
958*4882a593Smuzhiyun * Call this is you want to be sure all an anchor's
959*4882a593Smuzhiyun * URBs have finished
960*4882a593Smuzhiyun *
961*4882a593Smuzhiyun * Return: Non-zero if the anchor became unused. Zero on timeout.
962*4882a593Smuzhiyun */
usb_wait_anchor_empty_timeout(struct usb_anchor * anchor,unsigned int timeout)963*4882a593Smuzhiyun int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
964*4882a593Smuzhiyun unsigned int timeout)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun return wait_event_timeout(anchor->wait,
967*4882a593Smuzhiyun usb_anchor_check_wakeup(anchor),
968*4882a593Smuzhiyun msecs_to_jiffies(timeout));
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout);
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun /**
973*4882a593Smuzhiyun * usb_get_from_anchor - get an anchor's oldest urb
974*4882a593Smuzhiyun * @anchor: the anchor whose urb you want
975*4882a593Smuzhiyun *
976*4882a593Smuzhiyun * This will take the oldest urb from an anchor,
977*4882a593Smuzhiyun * unanchor and return it
978*4882a593Smuzhiyun *
979*4882a593Smuzhiyun * Return: The oldest urb from @anchor, or %NULL if @anchor has no
980*4882a593Smuzhiyun * urbs associated with it.
981*4882a593Smuzhiyun */
usb_get_from_anchor(struct usb_anchor * anchor)982*4882a593Smuzhiyun struct urb *usb_get_from_anchor(struct usb_anchor *anchor)
983*4882a593Smuzhiyun {
984*4882a593Smuzhiyun struct urb *victim;
985*4882a593Smuzhiyun unsigned long flags;
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun spin_lock_irqsave(&anchor->lock, flags);
988*4882a593Smuzhiyun if (!list_empty(&anchor->urb_list)) {
989*4882a593Smuzhiyun victim = list_entry(anchor->urb_list.next, struct urb,
990*4882a593Smuzhiyun anchor_list);
991*4882a593Smuzhiyun usb_get_urb(victim);
992*4882a593Smuzhiyun __usb_unanchor_urb(victim, anchor);
993*4882a593Smuzhiyun } else {
994*4882a593Smuzhiyun victim = NULL;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun spin_unlock_irqrestore(&anchor->lock, flags);
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun return victim;
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_get_from_anchor);
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun /**
1004*4882a593Smuzhiyun * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs
1005*4882a593Smuzhiyun * @anchor: the anchor whose urbs you want to unanchor
1006*4882a593Smuzhiyun *
1007*4882a593Smuzhiyun * use this to get rid of all an anchor's urbs
1008*4882a593Smuzhiyun */
usb_scuttle_anchored_urbs(struct usb_anchor * anchor)1009*4882a593Smuzhiyun void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
1010*4882a593Smuzhiyun {
1011*4882a593Smuzhiyun struct urb *victim;
1012*4882a593Smuzhiyun unsigned long flags;
1013*4882a593Smuzhiyun int surely_empty;
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun do {
1016*4882a593Smuzhiyun spin_lock_irqsave(&anchor->lock, flags);
1017*4882a593Smuzhiyun while (!list_empty(&anchor->urb_list)) {
1018*4882a593Smuzhiyun victim = list_entry(anchor->urb_list.prev,
1019*4882a593Smuzhiyun struct urb, anchor_list);
1020*4882a593Smuzhiyun __usb_unanchor_urb(victim, anchor);
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun surely_empty = usb_anchor_check_wakeup(anchor);
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun spin_unlock_irqrestore(&anchor->lock, flags);
1025*4882a593Smuzhiyun cpu_relax();
1026*4882a593Smuzhiyun } while (!surely_empty);
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun /**
1032*4882a593Smuzhiyun * usb_anchor_empty - is an anchor empty
1033*4882a593Smuzhiyun * @anchor: the anchor you want to query
1034*4882a593Smuzhiyun *
1035*4882a593Smuzhiyun * Return: 1 if the anchor has no urbs associated with it.
1036*4882a593Smuzhiyun */
usb_anchor_empty(struct usb_anchor * anchor)1037*4882a593Smuzhiyun int usb_anchor_empty(struct usb_anchor *anchor)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun return list_empty(&anchor->urb_list);
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_anchor_empty);
1043*4882a593Smuzhiyun
1044