1 #include <linux/module.h>
2 #include <linux/string.h>
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/init.h>
6 #include <linux/log2.h>
7 #include <linux/usb.h>
8 #include <linux/wait.h>
9 #include <linux/usb/hcd.h>
10
11 #define to_urb(d) container_of(d, struct urb, kref)
12
13
urb_destroy(struct kref * kref)14 static void urb_destroy(struct kref *kref)
15 {
16 struct urb *urb = to_urb(kref);
17
18 if (urb->transfer_flags & URB_FREE_BUFFER)
19 kfree(urb->transfer_buffer);
20
21 kfree(urb);
22 }
23
24 /**
25 * usb_init_urb - initializes a urb so that it can be used by a USB driver
26 * @urb: pointer to the urb to initialize
27 *
28 * Initializes a urb so that the USB subsystem can use it properly.
29 *
30 * If a urb is created with a call to usb_alloc_urb() it is not
31 * necessary to call this function. Only use this if you allocate the
32 * space for a struct urb on your own. If you call this function, be
33 * careful when freeing the memory for your urb that it is no longer in
34 * use by the USB core.
35 *
36 * Only use this function if you _really_ understand what you are doing.
37 */
usb_init_urb(struct urb * urb)38 void usb_init_urb(struct urb *urb)
39 {
40 if (urb) {
41 memset(urb, 0, sizeof(*urb));
42 kref_init(&urb->kref);
43 INIT_LIST_HEAD(&urb->anchor_list);
44 }
45 }
46 EXPORT_SYMBOL_GPL(usb_init_urb);
47
48 /**
49 * usb_alloc_urb - creates a new urb for a USB driver to use
50 * @iso_packets: number of iso packets for this urb
51 * @mem_flags: the type of memory to allocate, see kmalloc() for a list of
52 * valid options for this.
53 *
54 * Creates an urb for the USB driver to use, initializes a few internal
55 * structures, incrementes the usage counter, and returns a pointer to it.
56 *
57 * If no memory is available, NULL is returned.
58 *
59 * If the driver want to use this urb for interrupt, control, or bulk
60 * endpoints, pass '0' as the number of iso packets.
61 *
62 * The driver must call usb_free_urb() when it is finished with the urb.
63 */
usb_alloc_urb(int iso_packets,gfp_t mem_flags)64 struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags)
65 {
66 struct urb *urb;
67
68 urb = kmalloc(sizeof(struct urb) +
69 iso_packets * sizeof(struct usb_iso_packet_descriptor),
70 mem_flags);
71 if (!urb) {
72 printk(KERN_ERR "alloc_urb: kmalloc failed\n");
73 return NULL;
74 }
75 usb_init_urb(urb);
76 return urb;
77 }
78 EXPORT_SYMBOL_GPL(usb_alloc_urb);
79
80 /**
81 * usb_free_urb - frees the memory used by a urb when all users of it are finished
82 * @urb: pointer to the urb to free, may be NULL
83 *
84 * Must be called when a user of a urb is finished with it. When the last user
85 * of the urb calls this function, the memory of the urb is freed.
86 *
87 * Note: The transfer buffer associated with the urb is not freed unless the
88 * URB_FREE_BUFFER transfer flag is set.
89 */
usb_free_urb(struct urb * urb)90 void usb_free_urb(struct urb *urb)
91 {
92 if (urb)
93 kref_put(&urb->kref, urb_destroy);
94 }
95 EXPORT_SYMBOL_GPL(usb_free_urb);
96
97 /**
98 * usb_get_urb - increments the reference count of the urb
99 * @urb: pointer to the urb to modify, may be NULL
100 *
101 * This must be called whenever a urb is transferred from a device driver to a
102 * host controller driver. This allows proper reference counting to happen
103 * for urbs.
104 *
105 * A pointer to the urb with the incremented reference counter is returned.
106 */
usb_get_urb(struct urb * urb)107 struct urb *usb_get_urb(struct urb *urb)
108 {
109 if (urb)
110 kref_get(&urb->kref);
111 return urb;
112 }
113 EXPORT_SYMBOL_GPL(usb_get_urb);
114
115 /**
116 * usb_anchor_urb - anchors an URB while it is processed
117 * @urb: pointer to the urb to anchor
118 * @anchor: pointer to the anchor
119 *
120 * This can be called to have access to URBs which are to be executed
121 * without bothering to track them
122 */
usb_anchor_urb(struct urb * urb,struct usb_anchor * anchor)123 void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
124 {
125 unsigned long flags;
126
127 spin_lock_irqsave(&anchor->lock, flags);
128 usb_get_urb(urb);
129 list_add_tail(&urb->anchor_list, &anchor->urb_list);
130 urb->anchor = anchor;
131
132 if (unlikely(anchor->poisoned)) {
133 atomic_inc(&urb->reject);
134 }
135
136 spin_unlock_irqrestore(&anchor->lock, flags);
137 }
138 EXPORT_SYMBOL_GPL(usb_anchor_urb);
139
140 /* Callers must hold anchor->lock */
__usb_unanchor_urb(struct urb * urb,struct usb_anchor * anchor)141 static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor)
142 {
143 urb->anchor = NULL;
144 list_del(&urb->anchor_list);
145 usb_put_urb(urb);
146 if (list_empty(&anchor->urb_list))
147 wake_up(&anchor->wait);
148 }
149
150 /**
151 * usb_unanchor_urb - unanchors an URB
152 * @urb: pointer to the urb to anchor
153 *
154 * Call this to stop the system keeping track of this URB
155 */
usb_unanchor_urb(struct urb * urb)156 void usb_unanchor_urb(struct urb *urb)
157 {
158 unsigned long flags;
159 struct usb_anchor *anchor;
160
161 if (!urb)
162 return;
163
164 anchor = urb->anchor;
165 if (!anchor)
166 return;
167
168 spin_lock_irqsave(&anchor->lock, flags);
169 /*
170 * At this point, we could be competing with another thread which
171 * has the same intention. To protect the urb from being unanchored
172 * twice, only the winner of the race gets the job.
173 */
174 if (likely(anchor == urb->anchor))
175 __usb_unanchor_urb(urb, anchor);
176 spin_unlock_irqrestore(&anchor->lock, flags);
177 }
178 EXPORT_SYMBOL_GPL(usb_unanchor_urb);
179
180 /*-------------------------------------------------------------------*/
181
182 /**
183 * usb_submit_urb - issue an asynchronous transfer request for an endpoint
184 * @urb: pointer to the urb describing the request
185 * @mem_flags: the type of memory to allocate, see kmalloc() for a list
186 * of valid options for this.
187 *
188 * This submits a transfer request, and transfers control of the URB
189 * describing that request to the USB subsystem. Request completion will
190 * be indicated later, asynchronously, by calling the completion handler.
191 * The three types of completion are success, error, and unlink
192 * (a software-induced fault, also called "request cancellation").
193 *
194 * URBs may be submitted in interrupt context.
195 *
196 * The caller must have correctly initialized the URB before submitting
197 * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are
198 * available to ensure that most fields are correctly initialized, for
199 * the particular kind of transfer, although they will not initialize
200 * any transfer flags.
201 *
202 * Successful submissions return 0; otherwise this routine returns a
203 * negative error number. If the submission is successful, the complete()
204 * callback from the URB will be called exactly once, when the USB core and
205 * Host Controller Driver (HCD) are finished with the URB. When the completion
206 * function is called, control of the URB is returned to the device
207 * driver which issued the request. The completion handler may then
208 * immediately free or reuse that URB.
209 *
210 * With few exceptions, USB device drivers should never access URB fields
211 * provided by usbcore or the HCD until its complete() is called.
212 * The exceptions relate to periodic transfer scheduling. For both
213 * interrupt and isochronous urbs, as part of successful URB submission
214 * urb->interval is modified to reflect the actual transfer period used
215 * (normally some power of two units). And for isochronous urbs,
216 * urb->start_frame is modified to reflect when the URB's transfers were
217 * scheduled to start.
218 *
219 * Not all isochronous transfer scheduling policies will work, but most
220 * host controller drivers should easily handle ISO queues going from now
221 * until 10-200 msec into the future. Drivers should try to keep at
222 * least one or two msec of data in the queue; many controllers require
223 * that new transfers start at least 1 msec in the future when they are
224 * added. If the driver is unable to keep up and the queue empties out,
225 * the behavior for new submissions is governed by the URB_ISO_ASAP flag.
226 * If the flag is set, or if the queue is idle, then the URB is always
227 * assigned to the first available (and not yet expired) slot in the
228 * endpoint's schedule. If the flag is not set and the queue is active
229 * then the URB is always assigned to the next slot in the schedule
230 * following the end of the endpoint's previous URB, even if that slot is
231 * in the past. When a packet is assigned in this way to a slot that has
232 * already expired, the packet is not transmitted and the corresponding
233 * usb_iso_packet_descriptor's status field will return -EXDEV. If this
234 * would happen to all the packets in the URB, submission fails with a
235 * -EXDEV error code.
236 *
237 * For control endpoints, the synchronous usb_control_msg() call is
238 * often used (in non-interrupt context) instead of this call.
239 * That is often used through convenience wrappers, for the requests
240 * that are standardized in the USB 2.0 specification. For bulk
241 * endpoints, a synchronous usb_bulk_msg() call is available.
242 *
243 * Request Queuing:
244 *
245 * URBs may be submitted to endpoints before previous ones complete, to
246 * minimize the impact of interrupt latencies and system overhead on data
247 * throughput. With that queuing policy, an endpoint's queue would never
248 * be empty. This is required for continuous isochronous data streams,
249 * and may also be required for some kinds of interrupt transfers. Such
250 * queuing also maximizes bandwidth utilization by letting USB controllers
251 * start work on later requests before driver software has finished the
252 * completion processing for earlier (successful) requests.
253 *
254 * As of Linux 2.6, all USB endpoint transfer queues support depths greater
255 * than one. This was previously a HCD-specific behavior, except for ISO
256 * transfers. Non-isochronous endpoint queues are inactive during cleanup
257 * after faults (transfer errors or cancellation).
258 *
259 * Reserved Bandwidth Transfers:
260 *
261 * Periodic transfers (interrupt or isochronous) are performed repeatedly,
262 * using the interval specified in the urb. Submitting the first urb to
263 * the endpoint reserves the bandwidth necessary to make those transfers.
264 * If the USB subsystem can't allocate sufficient bandwidth to perform
265 * the periodic request, submitting such a periodic request should fail.
266 *
267 * For devices under xHCI, the bandwidth is reserved at configuration time, or
268 * when the alt setting is selected. If there is not enough bus bandwidth, the
269 * configuration/alt setting request will fail. Therefore, submissions to
270 * periodic endpoints on devices under xHCI should never fail due to bandwidth
271 * constraints.
272 *
273 * Device drivers must explicitly request that repetition, by ensuring that
274 * some URB is always on the endpoint's queue (except possibly for short
275 * periods during completion callacks). When there is no longer an urb
276 * queued, the endpoint's bandwidth reservation is canceled. This means
277 * drivers can use their completion handlers to ensure they keep bandwidth
278 * they need, by reinitializing and resubmitting the just-completed urb
279 * until the driver longer needs that periodic bandwidth.
280 *
281 * Memory Flags:
282 *
283 * The general rules for how to decide which mem_flags to use
284 * are the same as for kmalloc. There are four
285 * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and
286 * GFP_ATOMIC.
287 *
288 * GFP_NOFS is not ever used, as it has not been implemented yet.
289 *
290 * GFP_ATOMIC is used when
291 * (a) you are inside a completion handler, an interrupt, bottom half,
292 * tasklet or timer, or
293 * (b) you are holding a spinlock or rwlock (does not apply to
294 * semaphores), or
295 * (c) current->state != TASK_RUNNING, this is the case only after
296 * you've changed it.
297 *
298 * GFP_NOIO is used in the block io path and error handling of storage
299 * devices.
300 *
301 * All other situations use GFP_KERNEL.
302 *
303 * Some more specific rules for mem_flags can be inferred, such as
304 * (1) start_xmit, timeout, and receive methods of network drivers must
305 * use GFP_ATOMIC (they are called with a spinlock held);
306 * (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also
307 * called with a spinlock held);
308 * (3) If you use a kernel thread with a network driver you must use
309 * GFP_NOIO, unless (b) or (c) apply;
310 * (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c)
311 * apply or your are in a storage driver's block io path;
312 * (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and
313 * (6) changing firmware on a running storage or net device uses
314 * GFP_NOIO, unless b) or c) apply
315 *
316 */
usb_submit_urb(struct urb * urb,gfp_t mem_flags)317 int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
318 {
319 int xfertype, max;
320 struct usb_device *dev;
321 struct usb_host_endpoint *ep;
322 int is_out;
323
324 if (!urb || !urb->complete)
325 return -EINVAL;
326 if (urb->hcpriv) {
327 WARN_ONCE(1, "URB %p submitted while active\n", urb);
328 return -EBUSY;
329 }
330
331 dev = urb->dev;
332 if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
333 return -ENODEV;
334
335 /* For now, get the endpoint from the pipe. Eventually drivers
336 * will be required to set urb->ep directly and we will eliminate
337 * urb->pipe.
338 */
339 ep = usb_pipe_endpoint(dev, urb->pipe);
340 if (!ep)
341 return -ENOENT;
342
343 urb->ep = ep;
344 urb->status = -EINPROGRESS;
345 urb->actual_length = 0;
346
347 /* Lots of sanity checks, so HCDs can rely on clean data
348 * and don't need to duplicate tests
349 */
350 xfertype = usb_endpoint_type(&ep->desc);
351 if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
352 struct usb_ctrlrequest *setup =
353 (struct usb_ctrlrequest *) urb->setup_packet;
354
355 if (!setup)
356 return -ENOEXEC;
357 is_out = !(setup->bRequestType & USB_DIR_IN) ||
358 !setup->wLength;
359 } else {
360 is_out = usb_endpoint_dir_out(&ep->desc);
361 }
362
363 /* Clear the internal flags and cache the direction for later use */
364 urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE |
365 URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL |
366 URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
367 URB_DMA_SG_COMBINED);
368 urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);
369
370 if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
371 dev->state < USB_STATE_CONFIGURED)
372 return -ENODEV;
373
374 max = usb_endpoint_maxp(&ep->desc);
375 if (max <= 0) {
376 dev_dbg(&dev->dev,
377 "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n",
378 usb_endpoint_num(&ep->desc), is_out ? "out" : "in",
379 __func__, max);
380 return -EMSGSIZE;
381 }
382
383 /* periodic transfers limit size per frame/uframe,
384 * but drivers only control those sizes for ISO.
385 * while we're checking, initialize return status.
386 */
387 if (xfertype == USB_ENDPOINT_XFER_ISOC) {
388 int n, len;
389
390 /* SuperSpeed isoc endpoints have up to 16 bursts of up to
391 * 3 packets each
392 */
393 if (dev->speed == USB_SPEED_SUPER) {
394 int burst = 1 + ep->ss_ep_comp.bMaxBurst;
395 int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes);
396 max *= burst;
397 max *= mult;
398 }
399
400 /* "high bandwidth" mode, 1-3 packets/uframe? */
401 if (dev->speed == USB_SPEED_HIGH) {
402 int mult = 1 + ((max >> 11) & 0x03);
403 max &= 0x07ff;
404 max *= mult;
405 }
406
407 if (urb->number_of_packets <= 0)
408 return -EINVAL;
409 for (n = 0; n < urb->number_of_packets; n++) {
410 len = urb->iso_frame_desc[n].length;
411 if (len < 0 || len > max)
412 return -EMSGSIZE;
413 urb->iso_frame_desc[n].status = -EXDEV;
414 urb->iso_frame_desc[n].actual_length = 0;
415 }
416 }
417
418 /* the I/O buffer must be mapped/unmapped, except when length=0 */
419 if (urb->transfer_buffer_length > INT_MAX)
420 return -EMSGSIZE;
421
422 #ifdef DEBUG
423 /* stuff that drivers shouldn't do, but which shouldn't
424 * cause problems in HCDs if they get it wrong.
425 */
426 {
427 unsigned int allowed;
428 static int pipetypes[4] = {
429 PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
430 };
431
432 /* Check that the pipe's type matches the endpoint's type */
433 if (usb_pipetype(urb->pipe) != pipetypes[xfertype])
434 dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
435 usb_pipetype(urb->pipe), pipetypes[xfertype]);
436
437 /* Check against a simple/standard policy */
438 allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK |
439 URB_FREE_BUFFER);
440 switch (xfertype) {
441 case USB_ENDPOINT_XFER_BULK:
442 if (is_out)
443 allowed |= URB_ZERO_PACKET;
444 /* FALLTHROUGH */
445 case USB_ENDPOINT_XFER_CONTROL:
446 allowed |= URB_NO_FSBR; /* only affects UHCI */
447 /* FALLTHROUGH */
448 default: /* all non-iso endpoints */
449 if (!is_out)
450 allowed |= URB_SHORT_NOT_OK;
451 break;
452 case USB_ENDPOINT_XFER_ISOC:
453 allowed |= URB_ISO_ASAP;
454 break;
455 }
456 allowed &= urb->transfer_flags;
457
458 /* warn if submitter gave bogus flags */
459 if (allowed != urb->transfer_flags)
460 dev_WARN(&dev->dev, "BOGUS urb flags, %x --> %x\n",
461 urb->transfer_flags, allowed);
462 }
463 #endif
464 /*
465 * Force periodic transfer intervals to be legal values that are
466 * a power of two (so HCDs don't need to).
467 *
468 * FIXME want bus->{intr,iso}_sched_horizon values here. Each HC
469 * supports different values... this uses EHCI/UHCI defaults (and
470 * EHCI can use smaller non-default values).
471 */
472 switch (xfertype) {
473 case USB_ENDPOINT_XFER_ISOC:
474 case USB_ENDPOINT_XFER_INT:
475 /* too small? */
476 switch (dev->speed) {
477 case USB_SPEED_WIRELESS:
478 if (urb->interval < 6)
479 return -EINVAL;
480 break;
481 default:
482 if (urb->interval <= 0)
483 return -EINVAL;
484 break;
485 }
486 /* too big? */
487 switch (dev->speed) {
488 case USB_SPEED_SUPER: /* units are 125us */
489 /* Handle up to 2^(16-1) microframes */
490 if (urb->interval > (1 << 15))
491 return -EINVAL;
492 max = 1 << 15;
493 break;
494 case USB_SPEED_WIRELESS:
495 if (urb->interval > 16)
496 return -EINVAL;
497 break;
498 case USB_SPEED_HIGH: /* units are microframes */
499 /* NOTE usb handles 2^15 */
500 if (urb->interval > (1024 * 8))
501 urb->interval = 1024 * 8;
502 max = 1024 * 8;
503 break;
504 case USB_SPEED_FULL: /* units are frames/msec */
505 case USB_SPEED_LOW:
506 if (xfertype == USB_ENDPOINT_XFER_INT) {
507 if (urb->interval > 255)
508 return -EINVAL;
509 /* NOTE ohci only handles up to 32 */
510 max = 128;
511 } else {
512 if (urb->interval > 1024)
513 urb->interval = 1024;
514 /* NOTE usb and ohci handle up to 2^15 */
515 max = 1024;
516 }
517 break;
518 default:
519 return -EINVAL;
520 }
521 if (dev->speed != USB_SPEED_WIRELESS) {
522 /* Round down to a power of 2, no more than max */
523 urb->interval = min(max, 1 << ilog2(urb->interval));
524 }
525 }
526
527 return usb_hcd_submit_urb(urb, mem_flags);
528 }
529 EXPORT_SYMBOL_GPL(usb_submit_urb);
530
531 /*-------------------------------------------------------------------*/
532
533 /**
534 * usb_unlink_urb - abort/cancel a transfer request for an endpoint
535 * @urb: pointer to urb describing a previously submitted request,
536 * may be NULL
537 *
538 * This routine cancels an in-progress request. URBs complete only once
539 * per submission, and may be canceled only once per submission.
540 * Successful cancellation means termination of @urb will be expedited
541 * and the completion handler will be called with a status code
542 * indicating that the request has been canceled (rather than any other
543 * code).
544 *
545 * Drivers should not call this routine or related routines, such as
546 * usb_kill_urb() or usb_unlink_anchored_urbs(), after their disconnect
547 * method has returned. The disconnect function should synchronize with
548 * a driver's I/O routines to insure that all URB-related activity has
549 * completed before it returns.
550 *
551 * This request is asynchronous, however the HCD might call the ->complete()
552 * callback during unlink. Therefore when drivers call usb_unlink_urb(), they
553 * must not hold any locks that may be taken by the completion function.
554 * Success is indicated by returning -EINPROGRESS, at which time the URB will
555 * probably not yet have been given back to the device driver. When it is
556 * eventually called, the completion function will see @urb->status ==
557 * -ECONNRESET.
558 * Failure is indicated by usb_unlink_urb() returning any other value.
559 * Unlinking will fail when @urb is not currently "linked" (i.e., it was
560 * never submitted, or it was unlinked before, or the hardware is already
561 * finished with it), even if the completion handler has not yet run.
562 *
563 * The URB must not be deallocated while this routine is running. In
564 * particular, when a driver calls this routine, it must insure that the
565 * completion handler cannot deallocate the URB.
566 *
567 * Unlinking and Endpoint Queues:
568 *
569 * [The behaviors and guarantees described below do not apply to virtual
570 * root hubs but only to endpoint queues for physical USB devices.]
571 *
572 * Host Controller Drivers (HCDs) place all the URBs for a particular
573 * endpoint in a queue. Normally the queue advances as the controller
574 * hardware processes each request. But when an URB terminates with an
575 * error its queue generally stops (see below), at least until that URB's
576 * completion routine returns. It is guaranteed that a stopped queue
577 * will not restart until all its unlinked URBs have been fully retired,
578 * with their completion routines run, even if that's not until some time
579 * after the original completion handler returns. The same behavior and
580 * guarantee apply when an URB terminates because it was unlinked.
581 *
582 * Bulk and interrupt endpoint queues are guaranteed to stop whenever an
583 * URB terminates with any sort of error, including -ECONNRESET, -ENOENT,
584 * and -EREMOTEIO. Control endpoint queues behave the same way except
585 * that they are not guaranteed to stop for -EREMOTEIO errors. Queues
586 * for isochronous endpoints are treated differently, because they must
587 * advance at fixed rates. Such queues do not stop when an URB
588 * encounters an error or is unlinked. An unlinked isochronous URB may
589 * leave a gap in the stream of packets; it is undefined whether such
590 * gaps can be filled in.
591 *
592 * Note that early termination of an URB because a short packet was
593 * received will generate a -EREMOTEIO error if and only if the
594 * URB_SHORT_NOT_OK flag is set. By setting this flag, USB device
595 * drivers can build deep queues for large or complex bulk transfers
596 * and clean them up reliably after any sort of aborted transfer by
597 * unlinking all pending URBs at the first fault.
598 *
599 * When a control URB terminates with an error other than -EREMOTEIO, it
600 * is quite likely that the status stage of the transfer will not take
601 * place.
602 */
usb_unlink_urb(struct urb * urb)603 int usb_unlink_urb(struct urb *urb)
604 {
605 if (!urb)
606 return -EINVAL;
607 if (!urb->dev)
608 return -ENODEV;
609 if (!urb->ep)
610 return -EIDRM;
611 return usb_hcd_unlink_urb(urb, -ECONNRESET);
612 }
613 EXPORT_SYMBOL_GPL(usb_unlink_urb);
614
615 /**
616 * usb_kill_urb - cancel a transfer request and wait for it to finish
617 * @urb: pointer to URB describing a previously submitted request,
618 * may be NULL
619 *
620 * This routine cancels an in-progress request. It is guaranteed that
621 * upon return all completion handlers will have finished and the URB
622 * will be totally idle and available for reuse. These features make
623 * this an ideal way to stop I/O in a disconnect() callback or close()
624 * function. If the request has not already finished or been unlinked
625 * the completion handler will see urb->status == -ENOENT.
626 *
627 * While the routine is running, attempts to resubmit the URB will fail
628 * with error -EPERM. Thus even if the URB's completion handler always
629 * tries to resubmit, it will not succeed and the URB will become idle.
630 *
631 * The URB must not be deallocated while this routine is running. In
632 * particular, when a driver calls this routine, it must insure that the
633 * completion handler cannot deallocate the URB.
634 *
635 * This routine may not be used in an interrupt context (such as a bottom
636 * half or a completion handler), or when holding a spinlock, or in other
637 * situations where the caller can't schedule().
638 *
639 * This routine should not be called by a driver after its disconnect
640 * method has returned.
641 */
usb_kill_urb(struct urb * urb)642 void usb_kill_urb(struct urb *urb)
643 {
644 might_sleep();
645 if (!(urb && urb->dev && urb->ep))
646 return;
647 atomic_inc(&urb->reject);
648
649 usb_hcd_unlink_urb(urb, -ENOENT);
650 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
651
652 atomic_dec(&urb->reject);
653 }
654 EXPORT_SYMBOL_GPL(usb_kill_urb);
655
656 /**
657 * usb_poison_urb - reliably kill a transfer and prevent further use of an URB
658 * @urb: pointer to URB describing a previously submitted request,
659 * may be NULL
660 *
661 * This routine cancels an in-progress request. It is guaranteed that
662 * upon return all completion handlers will have finished and the URB
663 * will be totally idle and cannot be reused. These features make
664 * this an ideal way to stop I/O in a disconnect() callback.
665 * If the request has not already finished or been unlinked
666 * the completion handler will see urb->status == -ENOENT.
667 *
668 * After and while the routine runs, attempts to resubmit the URB will fail
669 * with error -EPERM. Thus even if the URB's completion handler always
670 * tries to resubmit, it will not succeed and the URB will become idle.
671 *
672 * The URB must not be deallocated while this routine is running. In
673 * particular, when a driver calls this routine, it must insure that the
674 * completion handler cannot deallocate the URB.
675 *
676 * This routine may not be used in an interrupt context (such as a bottom
677 * half or a completion handler), or when holding a spinlock, or in other
678 * situations where the caller can't schedule().
679 *
680 * This routine should not be called by a driver after its disconnect
681 * method has returned.
682 */
usb_poison_urb(struct urb * urb)683 void usb_poison_urb(struct urb *urb)
684 {
685 might_sleep();
686 if (!urb)
687 return;
688 atomic_inc(&urb->reject);
689
690 if (!urb->dev || !urb->ep)
691 return;
692
693 usb_hcd_unlink_urb(urb, -ENOENT);
694 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
695 }
696 EXPORT_SYMBOL_GPL(usb_poison_urb);
697
usb_unpoison_urb(struct urb * urb)698 void usb_unpoison_urb(struct urb *urb)
699 {
700 if (!urb)
701 return;
702
703 atomic_dec(&urb->reject);
704 }
705 EXPORT_SYMBOL_GPL(usb_unpoison_urb);
706
707 /**
708 * usb_block_urb - reliably prevent further use of an URB
709 * @urb: pointer to URB to be blocked, may be NULL
710 *
711 * After the routine has run, attempts to resubmit the URB will fail
712 * with error -EPERM. Thus even if the URB's completion handler always
713 * tries to resubmit, it will not succeed and the URB will become idle.
714 *
715 * The URB must not be deallocated while this routine is running. In
716 * particular, when a driver calls this routine, it must insure that the
717 * completion handler cannot deallocate the URB.
718 */
usb_block_urb(struct urb * urb)719 void usb_block_urb(struct urb *urb)
720 {
721 if (!urb)
722 return;
723
724 atomic_inc(&urb->reject);
725 }
726 EXPORT_SYMBOL_GPL(usb_block_urb);
727
728 /**
729 * usb_kill_anchored_urbs - cancel transfer requests en masse
730 * @anchor: anchor the requests are bound to
731 *
732 * this allows all outstanding URBs to be killed starting
733 * from the back of the queue
734 *
735 * This routine should not be called by a driver after its disconnect
736 * method has returned.
737 */
usb_kill_anchored_urbs(struct usb_anchor * anchor)738 void usb_kill_anchored_urbs(struct usb_anchor *anchor)
739 {
740 struct urb *victim;
741
742 spin_lock_irq(&anchor->lock);
743 while (!list_empty(&anchor->urb_list)) {
744 victim = list_entry(anchor->urb_list.prev, struct urb,
745 anchor_list);
746 /* we must make sure the URB isn't freed before we kill it*/
747 usb_get_urb(victim);
748 spin_unlock_irq(&anchor->lock);
749 /* this will unanchor the URB */
750 usb_kill_urb(victim);
751 usb_put_urb(victim);
752 spin_lock_irq(&anchor->lock);
753 }
754 spin_unlock_irq(&anchor->lock);
755 }
756 EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
757
758
759 /**
760 * usb_poison_anchored_urbs - cease all traffic from an anchor
761 * @anchor: anchor the requests are bound to
762 *
763 * this allows all outstanding URBs to be poisoned starting
764 * from the back of the queue. Newly added URBs will also be
765 * poisoned
766 *
767 * This routine should not be called by a driver after its disconnect
768 * method has returned.
769 */
usb_poison_anchored_urbs(struct usb_anchor * anchor)770 void usb_poison_anchored_urbs(struct usb_anchor *anchor)
771 {
772 struct urb *victim;
773
774 spin_lock_irq(&anchor->lock);
775 anchor->poisoned = 1;
776 while (!list_empty(&anchor->urb_list)) {
777 victim = list_entry(anchor->urb_list.prev, struct urb,
778 anchor_list);
779 /* we must make sure the URB isn't freed before we kill it*/
780 usb_get_urb(victim);
781 spin_unlock_irq(&anchor->lock);
782 /* this will unanchor the URB */
783 usb_poison_urb(victim);
784 usb_put_urb(victim);
785 spin_lock_irq(&anchor->lock);
786 }
787 spin_unlock_irq(&anchor->lock);
788 }
789 EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs);
790
791 /**
792 * usb_unpoison_anchored_urbs - let an anchor be used successfully again
793 * @anchor: anchor the requests are bound to
794 *
795 * Reverses the effect of usb_poison_anchored_urbs
796 * the anchor can be used normally after it returns
797 */
usb_unpoison_anchored_urbs(struct usb_anchor * anchor)798 void usb_unpoison_anchored_urbs(struct usb_anchor *anchor)
799 {
800 unsigned long flags;
801 struct urb *lazarus;
802
803 spin_lock_irqsave(&anchor->lock, flags);
804 list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) {
805 usb_unpoison_urb(lazarus);
806 }
807 anchor->poisoned = 0;
808 spin_unlock_irqrestore(&anchor->lock, flags);
809 }
810 EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs);
811 /**
812 * usb_unlink_anchored_urbs - asynchronously cancel transfer requests en masse
813 * @anchor: anchor the requests are bound to
814 *
815 * this allows all outstanding URBs to be unlinked starting
816 * from the back of the queue. This function is asynchronous.
817 * The unlinking is just tiggered. It may happen after this
818 * function has returned.
819 *
820 * This routine should not be called by a driver after its disconnect
821 * method has returned.
822 */
usb_unlink_anchored_urbs(struct usb_anchor * anchor)823 void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
824 {
825 struct urb *victim;
826
827 while ((victim = usb_get_from_anchor(anchor)) != NULL) {
828 usb_unlink_urb(victim);
829 usb_put_urb(victim);
830 }
831 }
832 EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);
833
834 /**
835 * usb_wait_anchor_empty_timeout - wait for an anchor to be unused
836 * @anchor: the anchor you want to become unused
837 * @timeout: how long you are willing to wait in milliseconds
838 *
839 * Call this is you want to be sure all an anchor's
840 * URBs have finished
841 */
usb_wait_anchor_empty_timeout(struct usb_anchor * anchor,unsigned int timeout)842 int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
843 unsigned int timeout)
844 {
845 return wait_event_timeout(anchor->wait, list_empty(&anchor->urb_list),
846 msecs_to_jiffies(timeout));
847 }
848 EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout);
849
850 /**
851 * usb_get_from_anchor - get an anchor's oldest urb
852 * @anchor: the anchor whose urb you want
853 *
854 * this will take the oldest urb from an anchor,
855 * unanchor and return it
856 */
usb_get_from_anchor(struct usb_anchor * anchor)857 struct urb *usb_get_from_anchor(struct usb_anchor *anchor)
858 {
859 struct urb *victim;
860 unsigned long flags;
861
862 spin_lock_irqsave(&anchor->lock, flags);
863 if (!list_empty(&anchor->urb_list)) {
864 victim = list_entry(anchor->urb_list.next, struct urb,
865 anchor_list);
866 usb_get_urb(victim);
867 __usb_unanchor_urb(victim, anchor);
868 } else {
869 victim = NULL;
870 }
871 spin_unlock_irqrestore(&anchor->lock, flags);
872
873 return victim;
874 }
875
876 EXPORT_SYMBOL_GPL(usb_get_from_anchor);
877
878 /**
879 * usb_scuttle_anchored_urbs - unanchor all an anchor's urbs
880 * @anchor: the anchor whose urbs you want to unanchor
881 *
882 * use this to get rid of all an anchor's urbs
883 */
usb_scuttle_anchored_urbs(struct usb_anchor * anchor)884 void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
885 {
886 struct urb *victim;
887 unsigned long flags;
888
889 spin_lock_irqsave(&anchor->lock, flags);
890 while (!list_empty(&anchor->urb_list)) {
891 victim = list_entry(anchor->urb_list.prev, struct urb,
892 anchor_list);
893 __usb_unanchor_urb(victim, anchor);
894 }
895 spin_unlock_irqrestore(&anchor->lock, flags);
896 }
897
898 EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
899
900 /**
901 * usb_anchor_empty - is an anchor empty
902 * @anchor: the anchor you want to query
903 *
904 * returns 1 if the anchor has no urbs associated with it
905 */
usb_anchor_empty(struct usb_anchor * anchor)906 int usb_anchor_empty(struct usb_anchor *anchor)
907 {
908 return list_empty(&anchor->urb_list);
909 }
910
911 EXPORT_SYMBOL_GPL(usb_anchor_empty);
912
913