1 /* $FreeBSD: releng/12.2/sys/dev/usb/usb_transfer.c 363664 2020-07-29 14:30:42Z markj $ */
2 /*-
3 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 *
5 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "implementation/global_implementation.h"
30
31 #undef USB_DEBUG_VAR
32 #define USB_DEBUG_VAR usb_debug
33
34 SPIN_LOCK_INIT(g_usb_wait_queue_spinlock);
35
36 struct usb_std_packet_size {
37 struct {
38 uint16_t min; /* inclusive */
39 uint16_t max; /* inclusive */
40 } range;
41
42 uint16_t fixed[4];
43 };
44
45 static usb_callback_t usb_request_callback;
46
47 static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
48
49 /* This transfer is used for generic control endpoint transfers */
50
51 [0] = {
52 .type = UE_CONTROL,
53 .endpoint = 0x00, /* Control endpoint */
54 .direction = UE_DIR_ANY,
55 .bufsize = USB_EP0_BUFSIZE, /* bytes */
56 .flags = {.proxy_buffer = 1,},
57 .callback = &usb_request_callback,
58 .usb_mode = USB_MODE_DUAL, /* both modes */
59 },
60
61 /* This transfer is used for generic clear stall only */
62
63 [1] = {
64 .type = UE_CONTROL,
65 .endpoint = 0x00, /* Control pipe */
66 .direction = UE_DIR_ANY,
67 .bufsize = sizeof(struct usb_device_request),
68 .callback = &usb_do_clear_stall_callback,
69 .timeout = 1000, /* 1 second */
70 .interval = 50, /* 50ms */
71 .usb_mode = USB_MODE_HOST,
72 },
73 };
74
75 static const struct usb_config usb_control_ep_quirk_cfg[USB_CTRL_XFER_MAX] = {
76
77 /* This transfer is used for generic control endpoint transfers */
78
79 [0] = {
80 .type = UE_CONTROL,
81 .endpoint = 0x00, /* Control endpoint */
82 .direction = UE_DIR_ANY,
83 .bufsize = 65535, /* bytes */
84 .callback = &usb_request_callback,
85 .usb_mode = USB_MODE_DUAL, /* both modes */
86 },
87
88 /* This transfer is used for generic clear stall only */
89
90 [1] = {
91 .type = UE_CONTROL,
92 .endpoint = 0x00, /* Control pipe */
93 .direction = UE_DIR_ANY,
94 .bufsize = sizeof(struct usb_device_request),
95 .callback = &usb_do_clear_stall_callback,
96 .timeout = 1000, /* 1 second */
97 .interval = 50, /* 50ms */
98 .usb_mode = USB_MODE_HOST,
99 },
100 };
101
102 /* function prototypes */
103
104 static void usbd_update_max_frame_size(struct usb_xfer *);
105 static void usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
106 static void usbd_control_transfer_init(struct usb_xfer *);
107 static int usbd_setup_ctrl_transfer(struct usb_xfer *);
108 static void usb_callback_proc(struct usb_proc_msg *);
109 static void usbd_callback_ss_done_defer(struct usb_xfer *);
110 static void usbd_callback_wrapper(struct usb_xfer_queue *);
111 static void usbd_transfer_start_cb(void *);
112 static uint8_t usbd_callback_wrapper_sub(struct usb_xfer *);
113 static void usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
114 uint8_t type, enum usb_dev_speed speed);
115
116 /*------------------------------------------------------------------------*
117 * usb_request_callback
118 *------------------------------------------------------------------------*/
119 static void
usb_request_callback(struct usb_xfer * xfer,usb_error_t error)120 usb_request_callback(struct usb_xfer *xfer, usb_error_t error)
121 {
122 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
123 usb_handle_request_callback(xfer, error);
124 else
125 usbd_do_request_callback(xfer, error);
126 }
127
128 /*------------------------------------------------------------------------*
129 * usbd_update_max_frame_size
130 *
131 * This function updates the maximum frame size, hence high speed USB
132 * can transfer multiple consecutive packets.
133 *------------------------------------------------------------------------*/
134 static void
usbd_update_max_frame_size(struct usb_xfer * xfer)135 usbd_update_max_frame_size(struct usb_xfer *xfer)
136 {
137 /* compute maximum frame size */
138 /* this computation should not overflow 16-bit */
139 /* max = 15 * 1024 */
140
141 xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
142 }
143
144 /*------------------------------------------------------------------------*
145 * usbd_get_dma_delay
146 *
147 * The following function is called when we need to
148 * synchronize with DMA hardware.
149 *
150 * Returns:
151 * 0: no DMA delay required
152 * Else: milliseconds of DMA delay
153 *------------------------------------------------------------------------*/
154 usb_timeout_t
usbd_get_dma_delay(struct usb_device * udev)155 usbd_get_dma_delay(struct usb_device *udev)
156 {
157 const struct usb_bus_methods *mtod;
158 uint32_t temp;
159
160 mtod = udev->bus->methods;
161 temp = 0;
162
163 if (mtod->get_dma_delay) {
164 (mtod->get_dma_delay) (udev, &temp);
165 /*
166 * Round up and convert to milliseconds. Note that we use
167 * 1024 milliseconds per second. to save a division.
168 */
169 temp += 0x3FF;
170 temp /= 0x400;
171 }
172 return (temp);
173 }
174
175 /*------------------------------------------------------------------------*
176 * usbd_transfer_setup_sub_malloc
177 *
178 * This function will allocate one or more DMA'able memory chunks
179 * according to "size", "align" and "count" arguments. "ppc" is
180 * pointed to a linear array of USB page caches afterwards.
181 *
182 * If the "align" argument is equal to "1" a non-contiguous allocation
183 * can happen. Else if the "align" argument is greater than "1", the
184 * allocation will always be contiguous in memory.
185 *
186 * Returns:
187 * 0: Success
188 * Else: Failure
189 *------------------------------------------------------------------------*/
190 #if USB_HAVE_BUSDMA
191 uint8_t
usbd_transfer_setup_sub_malloc(struct usb_setup_params * parm,struct usb_page_cache ** ppc,usb_size_t size,usb_size_t align,usb_size_t count)192 usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
193 struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
194 usb_size_t count)
195 {
196 struct usb_page_cache *pc;
197 struct usb_page *pg;
198 void *buf;
199 usb_size_t n_dma_pc;
200 usb_size_t n_dma_pg;
201 usb_size_t n_obj;
202 usb_size_t x;
203 usb_size_t y;
204 usb_size_t r;
205 usb_size_t z;
206
207 USB_ASSERT(align > 0, ("Invalid alignment, 0x%08x\n",
208 align));
209 USB_ASSERT(size > 0, ("Invalid size = 0\n"));
210
211 if (count == 0) {
212 return (0); /* nothing to allocate */
213 }
214 /*
215 * Make sure that the size is aligned properly.
216 */
217 size = -((-size) & (-align));
218
219 /*
220 * Try multi-allocation chunks to reduce the number of DMA
221 * allocations, hence DMA allocations are slow.
222 */
223 if (align == 1) {
224 /* special case - non-cached multi page DMA memory */
225 n_dma_pc = count;
226 n_dma_pg = (2 + (size / USB_PAGE_SIZE));
227 n_obj = 1;
228 } else if (size >= USB_PAGE_SIZE) {
229 n_dma_pc = count;
230 n_dma_pg = 1;
231 n_obj = 1;
232 } else {
233 /* compute number of objects per page */
234 n_obj = (USB_PAGE_SIZE / size);
235 /*
236 * Compute number of DMA chunks, rounded up
237 * to nearest one:
238 */
239 n_dma_pc = ((count + n_obj - 1) / n_obj);
240 n_dma_pg = 1;
241 }
242
243 /*
244 * DMA memory is allocated once, but mapped twice. That's why
245 * there is one list for auto-free and another list for
246 * non-auto-free which only holds the mapping and not the
247 * allocation.
248 */
249 if (parm->buf == NULL) {
250 /* reserve memory (auto-free) */
251 parm->dma_page_ptr += n_dma_pc * n_dma_pg;
252 parm->dma_page_cache_ptr += n_dma_pc;
253
254 /* reserve memory (no-auto-free) */
255 parm->dma_page_ptr += count * n_dma_pg;
256 parm->xfer_page_cache_ptr += count;
257 return (0);
258 }
259 for (x = 0; x != n_dma_pc; x++) {
260 /* need to initialize the page cache */
261 parm->dma_page_cache_ptr[x].tag_parent =
262 &parm->curr_xfer->xroot->dma_parent_tag;
263 }
264 for (x = 0; x != count; x++) {
265 /* need to initialize the page cache */
266 parm->xfer_page_cache_ptr[x].tag_parent =
267 &parm->curr_xfer->xroot->dma_parent_tag;
268 }
269
270 if (ppc != NULL) {
271 *ppc = parm->xfer_page_cache_ptr;
272 }
273 r = count; /* set remainder count */
274 z = n_obj * size; /* set allocation size */
275 pc = parm->xfer_page_cache_ptr;
276 pg = parm->dma_page_ptr;
277
278 for (x = 0; x != n_dma_pc; x++) {
279
280 if (r < n_obj) {
281 /* compute last remainder */
282 z = r * size;
283 n_obj = r;
284 }
285 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
286 pg, z, align)) {
287 return (1); /* failure */
288 }
289 /* Set beginning of current buffer */
290 buf = parm->dma_page_cache_ptr->buffer;
291 /* Make room for one DMA page cache and one page */
292 parm->dma_page_cache_ptr++;
293 pg += n_dma_pg;
294
295 for (y = 0; (y != n_obj); y++, r--, pc++, pg += n_dma_pg) {
296
297 /* Load sub-chunk into DMA */
298 if (usb_pc_dmamap_create(pc, size)) {
299 return (1); /* failure */
300 }
301 pc->buffer = USB_ADD_BYTES(buf, y * size);
302 pc->page_start = pg;
303
304 USB_MTX_LOCK(pc->tag_parent->mtx);
305 (void)usb_pc_load_mem(pc, size, 1 /* synchronous */ );
306 USB_MTX_UNLOCK(pc->tag_parent->mtx);
307 }
308 }
309
310 parm->xfer_page_cache_ptr = pc;
311 parm->dma_page_ptr = pg;
312 return (0);
313 }
314 #endif
315
316 /*------------------------------------------------------------------------*
317 * usbd_transfer_setup_sub - transfer setup subroutine
318 *
319 * This function must be called from the "xfer_setup" callback of the
320 * USB Host or Device controller driver when setting up an USB
321 * transfer. This function will setup correct packet sizes, buffer
322 * sizes, flags and more, that are stored in the "usb_xfer"
323 * structure.
324 *------------------------------------------------------------------------*/
325 void
usbd_transfer_setup_sub(struct usb_setup_params * parm)326 usbd_transfer_setup_sub(struct usb_setup_params *parm)
327 {
328 enum {
329 REQ_SIZE = 8,
330 MIN_PKT = 8,
331 };
332 struct usb_xfer *xfer = parm->curr_xfer;
333 const struct usb_config *setup = parm->curr_setup;
334 struct usb_endpoint_ss_comp_descriptor *ecomp;
335 struct usb_endpoint_descriptor *edesc;
336 struct usb_std_packet_size std_size;
337 usb_frcount_t n_frlengths;
338 usb_frcount_t n_frbuffers;
339 usb_frcount_t x;
340 uint16_t maxp_old;
341 uint8_t type;
342 uint8_t zmps;
343
344 /*
345 * Sanity check. The following parameters must be initialized before
346 * calling this function.
347 */
348 if ((parm->hc_max_packet_size == 0) ||
349 (parm->hc_max_packet_count == 0) ||
350 (parm->hc_max_frame_size == 0)) {
351 parm->err = USB_ERR_INVAL;
352 goto done;
353 }
354 edesc = xfer->endpoint->edesc;
355 ecomp = xfer->endpoint->ecomp;
356
357 type = (edesc->bmAttributes & UE_XFERTYPE);
358
359 xfer->flags = setup->flags;
360 xfer->nframes = setup->frames;
361 xfer->timeout = setup->timeout;
362 xfer->callback = setup->callback;
363 xfer->interval = setup->interval;
364 xfer->endpointno = edesc->bEndpointAddress;
365 xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
366 xfer->max_packet_count = 1;
367 /* make a shadow copy: */
368 xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
369
370 parm->bufsize = setup->bufsize;
371
372 switch (parm->speed) {
373 case USB_SPEED_HIGH:
374 switch (type) {
375 case UE_ISOCHRONOUS:
376 case UE_INTERRUPT:
377 xfer->max_packet_count +=
378 (xfer->max_packet_size >> 11) & 3;
379
380 /* check for invalid max packet count */
381 if (xfer->max_packet_count > 3)
382 xfer->max_packet_count = 3;
383 break;
384 default:
385 break;
386 }
387 xfer->max_packet_size &= 0x7FF;
388 break;
389 case USB_SPEED_SUPER:
390 xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
391
392 if (ecomp != NULL)
393 xfer->max_packet_count += ecomp->bMaxBurst;
394
395 if ((xfer->max_packet_count == 0) ||
396 (xfer->max_packet_count > 16))
397 xfer->max_packet_count = 16;
398
399 switch (type) {
400 case UE_CONTROL:
401 xfer->max_packet_count = 1;
402 break;
403 case UE_ISOCHRONOUS:
404 if (ecomp != NULL) {
405 uint8_t mult;
406
407 mult = UE_GET_SS_ISO_MULT(
408 ecomp->bmAttributes) + 1;
409 if (mult > 3)
410 mult = 3;
411
412 xfer->max_packet_count *= mult;
413 }
414 break;
415 default:
416 break;
417 }
418 xfer->max_packet_size &= 0x7FF;
419 break;
420 default:
421 break;
422 }
423 /* range check "max_packet_count" */
424
425 if (xfer->max_packet_count > parm->hc_max_packet_count) {
426 xfer->max_packet_count = parm->hc_max_packet_count;
427 }
428
429 /* store max packet size value before filtering */
430
431 maxp_old = xfer->max_packet_size;
432
433 /* filter "wMaxPacketSize" according to HC capabilities */
434
435 if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
436 (xfer->max_packet_size == 0)) {
437 xfer->max_packet_size = parm->hc_max_packet_size;
438 }
439 /* filter "wMaxPacketSize" according to standard sizes */
440
441 usbd_get_std_packet_size(&std_size, type, parm->speed);
442
443 if (std_size.range.min || std_size.range.max) {
444
445 if (xfer->max_packet_size < std_size.range.min) {
446 xfer->max_packet_size = std_size.range.min;
447 }
448 if (xfer->max_packet_size > std_size.range.max) {
449 xfer->max_packet_size = std_size.range.max;
450 }
451 } else {
452
453 if (xfer->max_packet_size >= std_size.fixed[3]) {
454 xfer->max_packet_size = std_size.fixed[3];
455 } else if (xfer->max_packet_size >= std_size.fixed[2]) {
456 xfer->max_packet_size = std_size.fixed[2];
457 } else if (xfer->max_packet_size >= std_size.fixed[1]) {
458 xfer->max_packet_size = std_size.fixed[1];
459 } else {
460 /* only one possibility left */
461 xfer->max_packet_size = std_size.fixed[0];
462 }
463 }
464
465 /*
466 * Check if the max packet size was outside its allowed range
467 * and clamped to a valid value:
468 */
469 if (maxp_old != xfer->max_packet_size)
470 xfer->flags_int.maxp_was_clamped = 1;
471
472 /* compute "max_frame_size" */
473
474 usbd_update_max_frame_size(xfer);
475
476 /* check interrupt interval and transfer pre-delay */
477
478 if (type == UE_ISOCHRONOUS) {
479
480 uint16_t frame_limit;
481
482 xfer->interval = 0; /* not used, must be zero */
483 xfer->flags_int.isochronous_xfr = 1; /* set flag */
484
485 if (xfer->timeout == 0) {
486 /*
487 * set a default timeout in
488 * case something goes wrong!
489 */
490 xfer->timeout = 1000 / 4;
491 }
492 switch (parm->speed) {
493 case USB_SPEED_LOW:
494 case USB_SPEED_FULL:
495 frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
496 xfer->fps_shift = 0;
497 break;
498 default:
499 frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
500 xfer->fps_shift = edesc->bInterval;
501 if (xfer->fps_shift > 0)
502 xfer->fps_shift--;
503 if (xfer->fps_shift > 3)
504 xfer->fps_shift = 3;
505 if (xfer->flags.pre_scale_frames != 0)
506 xfer->nframes <<= (3 - xfer->fps_shift);
507 break;
508 }
509
510 if (xfer->nframes > frame_limit) {
511 /*
512 * this is not going to work
513 * cross hardware
514 */
515 parm->err = USB_ERR_INVAL;
516 goto done;
517 }
518 if (xfer->nframes == 0) {
519 /*
520 * this is not a valid value
521 */
522 parm->err = USB_ERR_ZERO_NFRAMES;
523 goto done;
524 }
525 } else {
526
527 /*
528 * If a value is specified use that else check the
529 * endpoint descriptor!
530 */
531 if (type == UE_INTERRUPT) {
532
533 uint32_t temp;
534
535 if (xfer->interval == 0) {
536
537 xfer->interval = edesc->bInterval;
538
539 switch (parm->speed) {
540 case USB_SPEED_LOW:
541 case USB_SPEED_FULL:
542 break;
543 default:
544 /* 125us -> 1ms */
545 if (xfer->interval < 4)
546 xfer->interval = 1;
547 else if (xfer->interval > 16)
548 xfer->interval = (1 << (16 - 4));
549 else
550 xfer->interval =
551 (1 << (xfer->interval - 4));
552 break;
553 }
554 }
555
556 if (xfer->interval == 0) {
557 /*
558 * One millisecond is the smallest
559 * interval we support:
560 */
561 xfer->interval = 1;
562 }
563
564 xfer->fps_shift = 0;
565 temp = 1;
566
567 while ((temp != 0) && (temp < xfer->interval)) {
568 xfer->fps_shift++;
569 temp *= 2;
570 }
571
572 switch (parm->speed) {
573 case USB_SPEED_LOW:
574 case USB_SPEED_FULL:
575 break;
576 default:
577 xfer->fps_shift += 3;
578 break;
579 }
580 }
581 }
582
583 /*
584 * NOTE: we do not allow "max_packet_size" or "max_frame_size"
585 * to be equal to zero when setting up USB transfers, hence
586 * this leads to alot of extra code in the USB kernel.
587 */
588
589 if ((xfer->max_frame_size == 0) ||
590 (xfer->max_packet_size == 0)) {
591
592 zmps = 1;
593
594 if ((parm->bufsize <= MIN_PKT) &&
595 (type != UE_CONTROL) &&
596 (type != UE_BULK)) {
597
598 /* workaround */
599 xfer->max_packet_size = MIN_PKT;
600 xfer->max_packet_count = 1;
601 parm->bufsize = 0; /* automatic setup length */
602 usbd_update_max_frame_size(xfer);
603
604 } else {
605 parm->err = USB_ERR_ZERO_MAXP;
606 goto done;
607 }
608
609 } else {
610 zmps = 0;
611 }
612
613 /*
614 * check if we should setup a default
615 * length:
616 */
617
618 if (parm->bufsize == 0) {
619
620 parm->bufsize = xfer->max_frame_size;
621
622 if (type == UE_ISOCHRONOUS) {
623 parm->bufsize *= xfer->nframes;
624 }
625 }
626 /*
627 * check if we are about to setup a proxy
628 * type of buffer:
629 */
630
631 if (xfer->flags.proxy_buffer) {
632
633 /* round bufsize up */
634
635 parm->bufsize += (xfer->max_frame_size - 1);
636
637 if (parm->bufsize < xfer->max_frame_size) {
638 /* length wrapped around */
639 parm->err = USB_ERR_INVAL;
640 goto done;
641 }
642 /* subtract remainder */
643
644 parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
645
646 /* add length of USB device request structure, if any */
647
648 if (type == UE_CONTROL) {
649 parm->bufsize += REQ_SIZE; /* SETUP message */
650 }
651 }
652 xfer->max_data_length = parm->bufsize;
653
654 /* Setup "n_frlengths" and "n_frbuffers" */
655
656 if (type == UE_ISOCHRONOUS) {
657 n_frlengths = xfer->nframes;
658 n_frbuffers = 1;
659 } else {
660
661 if (type == UE_CONTROL) {
662 xfer->flags_int.control_xfr = 1;
663 if (xfer->nframes == 0) {
664 if (parm->bufsize <= REQ_SIZE) {
665 /*
666 * there will never be any data
667 * stage
668 */
669 xfer->nframes = 1;
670 } else {
671 xfer->nframes = 2;
672 }
673 }
674 } else {
675 if (xfer->nframes == 0) {
676 xfer->nframes = 1;
677 }
678 }
679
680 n_frlengths = xfer->nframes;
681 n_frbuffers = xfer->nframes;
682 }
683
684 /*
685 * check if we have room for the
686 * USB device request structure:
687 */
688
689 if (type == UE_CONTROL) {
690
691 if (xfer->max_data_length < REQ_SIZE) {
692 /* length wrapped around or too small bufsize */
693 parm->err = USB_ERR_INVAL;
694 goto done;
695 }
696 xfer->max_data_length -= REQ_SIZE;
697 }
698 /*
699 * Setup "frlengths" and shadow "frlengths" for keeping the
700 * initial frame lengths when a USB transfer is complete. This
701 * information is useful when computing isochronous offsets.
702 */
703 xfer->frlengths = parm->xfer_length_ptr;
704 parm->xfer_length_ptr += 2 * n_frlengths;
705
706 /* setup "frbuffers" */
707 xfer->frbuffers = parm->xfer_page_cache_ptr;
708 parm->xfer_page_cache_ptr += n_frbuffers;
709
710 /* initialize max frame count */
711 xfer->max_frame_count = xfer->nframes;
712
713 /*
714 * check if we need to setup
715 * a local buffer:
716 */
717
718 if (!xfer->flags.ext_buffer) {
719 #if USB_HAVE_BUSDMA
720 struct usb_page_search page_info;
721 struct usb_page_cache *pc;
722
723 if (usbd_transfer_setup_sub_malloc(parm,
724 &pc, parm->bufsize, 1, 1)) {
725 parm->err = USB_ERR_NOMEM;
726 } else if (parm->buf != NULL) {
727
728 usbd_get_page(pc, 0, &page_info);
729
730 xfer->local_buffer = page_info.buffer;
731
732 usbd_xfer_set_frame_offset(xfer, 0, 0);
733
734 if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
735 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
736 }
737 }
738 #else
739 /* align data */
740 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
741
742 if (parm->buf != NULL) {
743 xfer->local_buffer =
744 USB_ADD_BYTES(parm->buf, parm->size[0]);
745
746 usbd_xfer_set_frame_offset(xfer, 0, 0);
747
748 if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
749 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
750 }
751 }
752 parm->size[0] += parm->bufsize;
753
754 /* align data again */
755 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
756 #endif
757 }
758 /*
759 * Compute maximum buffer size
760 */
761
762 if (parm->bufsize_max < parm->bufsize) {
763 parm->bufsize_max = parm->bufsize;
764 }
765 #if USB_HAVE_BUSDMA
766 if (xfer->flags_int.bdma_enable) {
767 /*
768 * Setup "dma_page_ptr".
769 *
770 * Proof for formula below:
771 *
772 * Assume there are three USB frames having length "a", "b" and
773 * "c". These USB frames will at maximum need "z"
774 * "usb_page" structures. "z" is given by:
775 *
776 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
777 * ((c / USB_PAGE_SIZE) + 2);
778 *
779 * Constraining "a", "b" and "c" like this:
780 *
781 * (a + b + c) <= parm->bufsize
782 *
783 * We know that:
784 *
785 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
786 *
787 * Here is the general formula:
788 */
789 xfer->dma_page_ptr = parm->dma_page_ptr;
790 parm->dma_page_ptr += (2 * n_frbuffers);
791 parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
792 }
793 #endif
794 if (zmps) {
795 /* correct maximum data length */
796 xfer->max_data_length = 0;
797 }
798 /* subtract USB frame remainder from "hc_max_frame_size" */
799
800 xfer->max_hc_frame_size =
801 (parm->hc_max_frame_size -
802 (parm->hc_max_frame_size % xfer->max_frame_size));
803
804 if (xfer->max_hc_frame_size == 0) {
805 parm->err = USB_ERR_INVAL;
806 goto done;
807 }
808
809 /* initialize frame buffers */
810
811 if (parm->buf) {
812 for (x = 0; x != n_frbuffers; x++) {
813 xfer->frbuffers[x].tag_parent =
814 &xfer->xroot->dma_parent_tag;
815 #if USB_HAVE_BUSDMA
816 if (xfer->flags_int.bdma_enable &&
817 (parm->bufsize_max > 0)) {
818
819 if (usb_pc_dmamap_create(
820 xfer->frbuffers + x,
821 parm->bufsize_max)) {
822 parm->err = USB_ERR_NOMEM;
823 goto done;
824 }
825 }
826 #endif
827 }
828 }
829 done:
830 if (parm->err) {
831 /*
832 * Set some dummy values so that we avoid division by zero:
833 */
834 xfer->max_hc_frame_size = 1;
835 xfer->max_frame_size = 1;
836 xfer->max_packet_size = 1;
837 xfer->max_data_length = 0;
838 xfer->nframes = 0;
839 xfer->max_frame_count = 0;
840 }
841 }
842
843 static uint8_t
usbd_transfer_setup_has_bulk(const struct usb_config * setup_start,uint16_t n_setup)844 usbd_transfer_setup_has_bulk(const struct usb_config *setup_start,
845 uint16_t n_setup)
846 {
847 uint8_t type;
848
849 while (n_setup--) {
850 type = setup_start[n_setup].type;
851 if ((type == UE_BULK) || (type == UE_BULK_INTR) ||
852 (type == UE_TYPE_ANY))
853 return (1);
854 }
855 return (0);
856 }
857
858 /*------------------------------------------------------------------------*
859 * usbd_transfer_setup - setup an array of USB transfers
860 *
861 * NOTE: You must always call "usbd_transfer_unsetup" after calling
862 * "usbd_transfer_setup" if success was returned.
863 *
864 * The idea is that the USB device driver should pre-allocate all its
865 * transfers by one call to this function.
866 *
867 * Return values:
868 * 0: Success
869 * Else: Failure
870 *------------------------------------------------------------------------*/
871 usb_error_t
usbd_transfer_setup(struct usb_device * udev,const uint8_t * ifaces,struct usb_xfer ** ppxfer,const struct usb_config * setup_start,uint16_t n_setup,void * priv_sc,struct mtx * xfer_mtx)872 usbd_transfer_setup(struct usb_device *udev,
873 const uint8_t *ifaces, struct usb_xfer **ppxfer,
874 const struct usb_config *setup_start, uint16_t n_setup,
875 void *priv_sc, struct mtx *xfer_mtx)
876 {
877 const struct usb_config *setup_end = setup_start + n_setup;
878 const struct usb_config *setup;
879 struct usb_setup_params *parm;
880 struct usb_endpoint *ep;
881 struct usb_xfer_root *info;
882 struct usb_xfer *xfer;
883 void *buf = NULL;
884 usb_error_t error = USB_ERR_NORMAL_COMPLETION;
885 uint16_t n;
886 uint16_t refcount;
887 uint8_t do_unlock;
888
889 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
890 "usbd_transfer_setup can sleep!");
891
892 /* do some checking first */
893
894 if (n_setup == 0) {
895 DPRINTFN(5, "setup array has zero length!\n");
896 return (USB_ERR_INVAL);
897 }
898 if (ifaces == 0) {
899 DPRINTFN(5, "ifaces array is NULL!\n");
900 return (USB_ERR_INVAL);
901 }
902 if (xfer_mtx == NULL) {
903 DPRINTFN(5, "using global lock\n");
904 xfer_mtx = &Giant;
905 }
906
907 /* more sanity checks */
908
909 for (setup = setup_start, n = 0;
910 setup != setup_end; setup++, n++) {
911 if (setup->bufsize == (usb_frlength_t)-1) {
912 error = USB_ERR_BAD_BUFSIZE;
913 DPRINTF("invalid bufsize\n");
914 }
915 if (setup->callback == NULL) {
916 error = USB_ERR_NO_CALLBACK;
917 DPRINTF("no callback\n");
918 }
919 ppxfer[n] = NULL;
920 }
921
922 if (error)
923 return (error);
924
925 /* Protect scratch area */
926 do_unlock = usbd_ctrl_lock(udev);
927
928 refcount = 0;
929 info = NULL;
930
931 parm = &udev->scratch.xfer_setup[0].parm;
932 (void)memset_s(parm, sizeof(*parm), 0, sizeof(*parm));
933
934 parm->udev = udev;
935 parm->speed = usbd_get_speed(udev);
936 parm->hc_max_packet_count = 1;
937
938 if (parm->speed >= USB_SPEED_MAX) {
939 parm->err = USB_ERR_INVAL;
940 goto done;
941 }
942 /* setup all transfers */
943
944 while (1) {
945
946 if (buf) {
947 /*
948 * Initialize the "usb_xfer_root" structure,
949 * which is common for all our USB transfers.
950 */
951 info = USB_ADD_BYTES(buf, 0);
952
953 info->memory_base = buf;
954 info->memory_size = parm->size[0];
955
956 #if USB_HAVE_BUSDMA
957 info->dma_page_cache_start = USB_ADD_BYTES(buf, parm->size[4]);
958 info->dma_page_cache_end = USB_ADD_BYTES(buf, parm->size[5]);
959 #endif
960 info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm->size[5]);
961 info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm->size[2]);
962
963 cv_init(&info->cv_drain, "WDRAIN");
964
965 info->xfer_mtx = xfer_mtx;
966 #if USB_HAVE_BUSDMA
967 usb_dma_tag_setup(&info->dma_parent_tag,
968 parm->dma_tag_p, udev->bus->dma_parent_tag[0].tag,
969 xfer_mtx, &usb_bdma_done_event, udev->bus->dma_bits, parm->dma_tag_max);
970 #endif
971
972 info->bus = udev->bus;
973 info->udev = udev;
974
975 TAILQ_INIT(&info->done_q.head);
976 info->done_q.command = &usbd_callback_wrapper;
977 #if USB_HAVE_BUSDMA
978 TAILQ_INIT(&info->dma_q.head);
979 info->dma_q.command = &usb_bdma_work_loop;
980 #endif
981 info->done_m[0].hdr.pm_callback = &usb_callback_proc;
982 info->done_m[0].xroot = info;
983 info->done_m[1].hdr.pm_callback = &usb_callback_proc;
984 info->done_m[1].xroot = info;
985
986 /*
987 * In device side mode control endpoint
988 * requests need to run from a separate
989 * context, else there is a chance of
990 * deadlock!
991 */
992 if (setup_start == usb_control_ep_cfg ||
993 setup_start == usb_control_ep_quirk_cfg)
994 info->done_p =
995 USB_BUS_CONTROL_XFER_PROC(udev->bus);
996 else if (xfer_mtx == &Giant)
997 info->done_p =
998 USB_BUS_GIANT_PROC(udev->bus);
999 else if (usbd_transfer_setup_has_bulk(setup_start, n_setup))
1000 info->done_p =
1001 USB_BUS_NON_GIANT_BULK_PROC(udev->bus);
1002 else
1003 info->done_p =
1004 USB_BUS_NON_GIANT_ISOC_PROC(udev->bus);
1005 }
1006 /* reset sizes */
1007
1008 parm->size[0] = 0;
1009 parm->buf = buf;
1010 parm->size[0] += sizeof(info[0]);
1011
1012 for (setup = setup_start, n = 0;
1013 setup != setup_end; setup++, n++) {
1014
1015 /* skip USB transfers without callbacks: */
1016 if (setup->callback == NULL) {
1017 continue;
1018 }
1019 /* see if there is a matching endpoint */
1020 ep = usbd_get_endpoint(udev,
1021 ifaces[setup->if_index], setup);
1022
1023 /*
1024 * Check that the USB PIPE is valid and that
1025 * the endpoint mode is proper.
1026 *
1027 * Make sure we don't allocate a streams
1028 * transfer when such a combination is not
1029 * valid.
1030 */
1031 if ((ep == NULL) || (ep->methods == NULL) ||
1032 ((ep->ep_mode != USB_EP_MODE_STREAMS) &&
1033 (ep->ep_mode != USB_EP_MODE_DEFAULT)) ||
1034 ((setup->stream_id != 0) &&
1035 ((setup->stream_id >= USB_MAX_EP_STREAMS) ||
1036 (ep->ep_mode != USB_EP_MODE_STREAMS)))) {
1037 if (setup->flags.no_pipe_ok)
1038 continue;
1039 if ((setup->usb_mode != USB_MODE_DUAL) &&
1040 (setup->usb_mode != udev->flags.usb_mode))
1041 continue;
1042 parm->err = USB_ERR_NO_PIPE;
1043 goto done;
1044 }
1045
1046 /* align data properly */
1047 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1048
1049 /* store current setup pointer */
1050 parm->curr_setup = setup;
1051
1052 if (buf) {
1053 /*
1054 * Common initialization of the
1055 * "usb_xfer" structure.
1056 */
1057 xfer = USB_ADD_BYTES(buf, parm->size[0]);
1058 xfer->address = udev->address;
1059 xfer->priv_sc = priv_sc;
1060 xfer->xroot = info;
1061
1062 callout_init_mtx(&xfer->timeout_handle,
1063 &udev->bus->bus_mtx, 0);
1064 } else {
1065 /*
1066 * Setup a dummy xfer, hence we are
1067 * writing to the "usb_xfer"
1068 * structure pointed to by "xfer"
1069 * before we have allocated any
1070 * memory:
1071 */
1072 xfer = &udev->scratch.xfer_setup[0].dummy;
1073 (void)memset_s(xfer, sizeof(*xfer), 0, sizeof(*xfer));
1074 refcount++;
1075 }
1076
1077 /* set transfer endpoint pointer */
1078 xfer->endpoint = ep;
1079
1080 /* set transfer stream ID */
1081 xfer->stream_id = setup->stream_id;
1082
1083 parm->size[0] += sizeof(xfer[0]);
1084 parm->methods = xfer->endpoint->methods;
1085 parm->curr_xfer = xfer;
1086
1087 /*
1088 * Call the Host or Device controller transfer
1089 * setup routine:
1090 */
1091 (udev->bus->methods->xfer_setup) (parm);
1092
1093 /* check for error */
1094 if (parm->err)
1095 goto done;
1096
1097 if (buf) {
1098 /*
1099 * Increment the endpoint refcount. This
1100 * basically prevents setting a new
1101 * configuration and alternate setting
1102 * when USB transfers are in use on
1103 * the given interface. Search the USB
1104 * code for "endpoint->refcount_alloc" if you
1105 * want more information.
1106 */
1107 USB_BUS_LOCK(info->bus);
1108 if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
1109 parm->err = USB_ERR_INVAL;
1110
1111 xfer->endpoint->refcount_alloc++;
1112
1113 if (xfer->endpoint->refcount_alloc == 0)
1114 panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
1115 USB_BUS_UNLOCK(info->bus);
1116
1117 /*
1118 * Whenever we set ppxfer[] then we
1119 * also need to increment the
1120 * "setup_refcount":
1121 */
1122 info->setup_refcount++;
1123
1124 /*
1125 * Transfer is successfully setup and
1126 * can be used:
1127 */
1128 ppxfer[n] = xfer;
1129 }
1130
1131 /* check for error */
1132 if (parm->err)
1133 goto done;
1134 }
1135
1136 if ((buf != NULL) || (parm->err != 0))
1137 goto done;
1138
1139 /* if no transfers, nothing to do */
1140 if (refcount == 0)
1141 goto done;
1142
1143 /* align data properly */
1144 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1145
1146 /* store offset temporarily */
1147 parm->size[1] = parm->size[0];
1148
1149 /*
1150 * The number of DMA tags required depends on
1151 * the number of endpoints. The current estimate
1152 * for maximum number of DMA tags per endpoint
1153 * is three:
1154 * 1) for loading memory
1155 * 2) for allocating memory
1156 * 3) for fixing memory [UHCI]
1157 */
1158 parm->dma_tag_max += 3 * MIN(n_setup, USB_EP_MAX);
1159
1160 /*
1161 * DMA tags for QH, TD, Data and more.
1162 */
1163 parm->dma_tag_max += 8;
1164
1165 parm->dma_tag_p += parm->dma_tag_max;
1166
1167 parm->size[0] += ((uint8_t *)parm->dma_tag_p) -
1168 ((uint8_t *)0);
1169
1170 /* align data properly */
1171 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1172
1173 /* store offset temporarily */
1174 parm->size[3] = parm->size[0];
1175
1176 parm->size[0] += ((uint8_t *)parm->dma_page_ptr) -
1177 ((uint8_t *)0);
1178
1179 /* align data properly */
1180 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1181
1182 /* store offset temporarily */
1183 parm->size[4] = parm->size[0];
1184
1185 parm->size[0] += ((uint8_t *)parm->dma_page_cache_ptr) -
1186 ((uint8_t *)0);
1187
1188 /* store end offset temporarily */
1189 parm->size[5] = parm->size[0];
1190
1191 parm->size[0] += ((uint8_t *)parm->xfer_page_cache_ptr) -
1192 ((uint8_t *)0);
1193
1194 /* store end offset temporarily */
1195
1196 parm->size[2] = parm->size[0];
1197
1198 /* align data properly */
1199 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1200
1201 parm->size[6] = parm->size[0];
1202
1203 parm->size[0] += ((uint8_t *)parm->xfer_length_ptr) -
1204 ((uint8_t *)0);
1205
1206 /* align data properly */
1207 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1208
1209 /* allocate zeroed memory */
1210 buf = bsd_malloc(parm->size[0], M_USB, M_WAITOK | M_ZERO);
1211
1212 if (buf == NULL) {
1213 parm->err = USB_ERR_NOMEM;
1214 DPRINTFN(0, "cannot allocate memory block for "
1215 "configuration (%d bytes)\n",
1216 parm->size[0]);
1217 goto done;
1218 }
1219 parm->dma_tag_p = USB_ADD_BYTES(buf, parm->size[1]);
1220 parm->dma_page_ptr = USB_ADD_BYTES(buf, parm->size[3]);
1221 parm->dma_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[4]);
1222 parm->xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[5]);
1223 parm->xfer_length_ptr = USB_ADD_BYTES(buf, parm->size[6]);
1224 }
1225
1226 done:
1227 if (buf) {
1228 if (info->setup_refcount == 0) {
1229 /*
1230 * "usbd_transfer_unsetup_sub" will unlock
1231 * the bus mutex before returning !
1232 */
1233 USB_BUS_LOCK(info->bus);
1234
1235 /* something went wrong */
1236 usbd_transfer_unsetup_sub(info, 0);
1237 }
1238 }
1239
1240 /* check if any errors happened */
1241 if (parm->err)
1242 usbd_transfer_unsetup(ppxfer, n_setup);
1243
1244 error = parm->err;
1245
1246 if (do_unlock)
1247 usbd_ctrl_unlock(udev);
1248
1249 return (error);
1250 }
1251
1252 /*------------------------------------------------------------------------*
1253 * usbd_transfer_unsetup_sub - factored out code
1254 *------------------------------------------------------------------------*/
1255 static void
usbd_transfer_unsetup_sub(struct usb_xfer_root * info,uint8_t needs_delay)1256 usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
1257 {
1258 #if USB_HAVE_BUSDMA
1259 struct usb_page_cache *pc;
1260 #endif
1261
1262 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
1263
1264 /* wait for any outstanding DMA operations */
1265
1266 if (needs_delay) {
1267 usb_timeout_t temp;
1268 temp = usbd_get_dma_delay(info->udev);
1269 if (temp != 0) {
1270 usb_pause_mtx(&info->bus->bus_mtx,
1271 USB_MS_TO_TICKS(temp));
1272 }
1273 }
1274
1275 /* make sure that our done messages are not queued anywhere */
1276 usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
1277
1278 USB_BUS_UNLOCK(info->bus);
1279
1280 #if USB_HAVE_BUSDMA
1281 /* free DMA'able memory, if any */
1282 pc = info->dma_page_cache_start;
1283 while (pc != info->dma_page_cache_end) {
1284 usb_pc_free_mem(pc);
1285 pc++;
1286 }
1287
1288 /* free DMA maps in all "xfer->frbuffers" */
1289 pc = info->xfer_page_cache_start;
1290 while (pc != info->xfer_page_cache_end) {
1291 usb_pc_dmamap_destroy(pc);
1292 pc++;
1293 }
1294
1295 /* free all DMA tags */
1296 usb_dma_tag_unsetup(&info->dma_parent_tag);
1297 #endif
1298
1299 cv_destroy(&info->cv_drain);
1300
1301 /*
1302 * free the "memory_base" last, hence the "info" structure is
1303 * contained within the "memory_base"!
1304 */
1305 bsd_free(info->memory_base, M_USB);
1306 info->memory_base = NULL;
1307 }
1308
1309 /*------------------------------------------------------------------------*
1310 * usbd_transfer_unsetup - unsetup/free an array of USB transfers
1311 *
1312 * NOTE: All USB transfers in progress will get called back passing
1313 * the error code "USB_ERR_CANCELLED" before this function
1314 * returns.
1315 *------------------------------------------------------------------------*/
1316 void
usbd_transfer_unsetup(struct usb_xfer ** pxfer,uint16_t n_setup)1317 usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
1318 {
1319 struct usb_xfer *xfer;
1320 struct usb_xfer_root *info;
1321 uint8_t needs_delay = 0;
1322
1323 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1324 "usbd_transfer_unsetup can sleep!");
1325
1326 while (n_setup--) {
1327 xfer = pxfer[n_setup];
1328
1329 if (xfer == NULL)
1330 continue;
1331
1332 info = xfer->xroot;
1333
1334 USB_XFER_LOCK(xfer);
1335 USB_BUS_LOCK(info->bus);
1336
1337 /*
1338 * HINT: when you start/stop a transfer, it might be a
1339 * good idea to directly use the "pxfer[]" structure:
1340 *
1341 * usbd_transfer_start(sc->pxfer[0]);
1342 * usbd_transfer_stop(sc->pxfer[0]);
1343 *
1344 * That way, if your code has many parts that will not
1345 * stop running under the same lock, in other words
1346 * "xfer_mtx", the usbd_transfer_start and
1347 * usbd_transfer_stop functions will simply return
1348 * when they detect a NULL pointer argument.
1349 *
1350 * To avoid any races we clear the "pxfer[]" pointer
1351 * while holding the private mutex of the driver:
1352 */
1353 pxfer[n_setup] = NULL;
1354
1355 USB_BUS_UNLOCK(info->bus);
1356 USB_XFER_UNLOCK(xfer);
1357
1358 usbd_transfer_drain(xfer);
1359
1360 #if USB_HAVE_BUSDMA
1361 if (xfer->flags_int.bdma_enable)
1362 needs_delay = 1;
1363 #endif
1364 /*
1365 * NOTE: default endpoint does not have an
1366 * interface, even if endpoint->iface_index == 0
1367 */
1368 USB_BUS_LOCK(info->bus);
1369 xfer->endpoint->refcount_alloc--;
1370 USB_BUS_UNLOCK(info->bus);
1371
1372 callout_drain(&xfer->timeout_handle);
1373
1374 USB_BUS_LOCK(info->bus);
1375
1376 USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
1377 "reference count\n"));
1378
1379 info->setup_refcount--;
1380
1381 if (info->setup_refcount == 0) {
1382 usbd_transfer_unsetup_sub(info,
1383 needs_delay);
1384 } else {
1385 USB_BUS_UNLOCK(info->bus);
1386 }
1387 }
1388 }
1389
1390 /*------------------------------------------------------------------------*
1391 * usbd_control_transfer_init - factored out code
1392 *
1393 * In USB Device Mode we have to wait for the SETUP packet which
1394 * containst the "struct usb_device_request" structure, before we can
1395 * transfer any data. In USB Host Mode we already have the SETUP
1396 * packet at the moment the USB transfer is started. This leads us to
1397 * having to setup the USB transfer at two different places in
1398 * time. This function just contains factored out control transfer
1399 * initialisation code, so that we don't duplicate the code.
1400 *------------------------------------------------------------------------*/
1401 static void
usbd_control_transfer_init(struct usb_xfer * xfer)1402 usbd_control_transfer_init(struct usb_xfer *xfer)
1403 {
1404 struct usb_device_request req;
1405
1406 /* copy out the USB request header */
1407
1408 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1409
1410 /* setup remainder */
1411
1412 xfer->flags_int.control_rem = UGETW(req.wLength);
1413
1414 /* copy direction to endpoint variable */
1415
1416 xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
1417 xfer->endpointno |=
1418 (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
1419 }
1420
1421 /*------------------------------------------------------------------------*
1422 * usbd_control_transfer_did_data
1423 *
1424 * This function returns non-zero if a control endpoint has
1425 * transferred the first DATA packet after the SETUP packet.
1426 * Else it returns zero.
1427 *------------------------------------------------------------------------*/
1428 static uint8_t
usbd_control_transfer_did_data(struct usb_xfer * xfer)1429 usbd_control_transfer_did_data(struct usb_xfer *xfer)
1430 {
1431 struct usb_device_request req;
1432
1433 /* SETUP packet is not yet sent */
1434 if (xfer->flags_int.control_hdr != 0)
1435 return (0);
1436
1437 /* copy out the USB request header */
1438 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1439
1440 /* compare remainder to the initial value */
1441 return (xfer->flags_int.control_rem != UGETW(req.wLength));
1442 }
1443
1444 /*------------------------------------------------------------------------*
1445 * usbd_setup_ctrl_transfer
1446 *
1447 * This function handles initialisation of control transfers. Control
1448 * transfers are special in that regard that they can both transmit
1449 * and receive data.
1450 *
1451 * Return values:
1452 * 0: Success
1453 * Else: Failure
1454 *------------------------------------------------------------------------*/
1455 static int
usbd_setup_ctrl_transfer(struct usb_xfer * xfer)1456 usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
1457 {
1458 usb_frlength_t len;
1459
1460 /* Check for control endpoint stall */
1461 if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
1462 /* the control transfer is no longer active */
1463 xfer->flags_int.control_stall = 1;
1464 xfer->flags_int.control_act = 0;
1465 } else {
1466 /* don't stall control transfer by default */
1467 xfer->flags_int.control_stall = 0;
1468 }
1469
1470 /* Check for invalid number of frames */
1471 if (xfer->nframes > 2) {
1472 /*
1473 * If you need to split a control transfer, you
1474 * have to do one part at a time. Only with
1475 * non-control transfers you can do multiple
1476 * parts a time.
1477 */
1478 DPRINTFN(0, "Too many frames: %u\n",
1479 (unsigned int)xfer->nframes);
1480 goto error;
1481 }
1482
1483 /*
1484 * Check if there is a control
1485 * transfer in progress:
1486 */
1487 if (xfer->flags_int.control_act) {
1488
1489 if (xfer->flags_int.control_hdr) {
1490
1491 /* clear send header flag */
1492
1493 xfer->flags_int.control_hdr = 0;
1494
1495 /* setup control transfer */
1496 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1497 usbd_control_transfer_init(xfer);
1498 }
1499 }
1500 /* get data length */
1501
1502 len = xfer->sumlen;
1503
1504 } else {
1505
1506 /* the size of the SETUP structure is hardcoded ! */
1507
1508 if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
1509 DPRINTFN(0, "Wrong framelength %u != %zu\n",
1510 xfer->frlengths[0], sizeof(struct
1511 usb_device_request));
1512 goto error;
1513 }
1514 /* check USB mode */
1515 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1516
1517 /* check number of frames */
1518 if (xfer->nframes != 1) {
1519 /*
1520 * We need to receive the setup
1521 * message first so that we know the
1522 * data direction!
1523 */
1524 DPRINTF("Misconfigured transfer\n");
1525 goto error;
1526 }
1527 /*
1528 * Set a dummy "control_rem" value. This
1529 * variable will be overwritten later by a
1530 * call to "usbd_control_transfer_init()" !
1531 */
1532 xfer->flags_int.control_rem = 0xFFFF;
1533 } else {
1534
1535 /* setup "endpoint" and "control_rem" */
1536
1537 usbd_control_transfer_init(xfer);
1538 }
1539
1540 /* set transfer-header flag */
1541
1542 xfer->flags_int.control_hdr = 1;
1543
1544 /* get data length */
1545
1546 len = (xfer->sumlen - sizeof(struct usb_device_request));
1547 }
1548
1549 /* update did data flag */
1550
1551 xfer->flags_int.control_did_data =
1552 usbd_control_transfer_did_data(xfer);
1553
1554 /* check if there is a length mismatch */
1555
1556 if (len > xfer->flags_int.control_rem) {
1557 DPRINTFN(0, "Length (%d) greater than "
1558 "remaining length (%d)\n", len,
1559 xfer->flags_int.control_rem);
1560 goto error;
1561 }
1562 /* check if we are doing a short transfer */
1563
1564 if (xfer->flags.force_short_xfer) {
1565 xfer->flags_int.control_rem = 0;
1566 } else {
1567 if ((len != xfer->max_data_length) &&
1568 (len != xfer->flags_int.control_rem) &&
1569 (xfer->nframes != 1)) {
1570 DPRINTFN(0, "Short control transfer without "
1571 "force_short_xfer set\n");
1572 goto error;
1573 }
1574 xfer->flags_int.control_rem -= len;
1575 }
1576
1577 /* the status part is executed when "control_act" is 0 */
1578
1579 if ((xfer->flags_int.control_rem > 0) ||
1580 (xfer->flags.manual_status)) {
1581 /* don't execute the STATUS stage yet */
1582 xfer->flags_int.control_act = 1;
1583
1584 /* sanity check */
1585 if ((!xfer->flags_int.control_hdr) &&
1586 (xfer->nframes == 1)) {
1587 /*
1588 * This is not a valid operation!
1589 */
1590 DPRINTFN(0, "Invalid parameter "
1591 "combination\n");
1592 goto error;
1593 }
1594 } else {
1595 /* time to execute the STATUS stage */
1596 xfer->flags_int.control_act = 0;
1597 }
1598 return (0); /* success */
1599
1600 error:
1601 return (1); /* failure */
1602 }
1603
1604 /*------------------------------------------------------------------------*
1605 * usbd_transfer_submit - start USB hardware for the given transfer
1606 *
1607 * This function should only be called from the USB callback.
1608 *------------------------------------------------------------------------*/
1609 void
usbd_transfer_submit(struct usb_xfer * xfer)1610 usbd_transfer_submit(struct usb_xfer *xfer)
1611 {
1612 struct usb_xfer_root *info;
1613 struct usb_bus *bus;
1614 usb_frcount_t x;
1615
1616 info = xfer->xroot;
1617 bus = info->bus;
1618
1619 DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
1620 xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
1621 "read" : "write");
1622
1623 #ifdef LOSCFG_USB_DEBUG
1624 if (USB_DEBUG_VAR > 0) {
1625 USB_BUS_LOCK(bus);
1626
1627 usb_dump_endpoint(xfer->endpoint);
1628
1629 USB_BUS_UNLOCK(bus);
1630 }
1631 #endif
1632
1633 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1634 USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED);
1635
1636 /* Only open the USB transfer once! */
1637 if (!xfer->flags_int.open) {
1638 xfer->flags_int.open = 1;
1639
1640 DPRINTF("open\n");
1641
1642 USB_BUS_LOCK(bus);
1643 (xfer->endpoint->methods->open) (xfer);
1644 USB_BUS_UNLOCK(bus);
1645 }
1646 /* set "transferring" flag */
1647 xfer->flags_int.transferring = 1;
1648
1649 #if USB_HAVE_POWERD
1650 /* increment power reference */
1651 usbd_transfer_power_ref(xfer, 1);
1652 #endif
1653 /*
1654 * Check if the transfer is waiting on a queue, most
1655 * frequently the "done_q":
1656 */
1657 if (xfer->wait_queue) {
1658 USB_BUS_LOCK(bus);
1659 usbd_transfer_dequeue(xfer);
1660 USB_BUS_UNLOCK(bus);
1661 }
1662 /* clear "did_dma_delay" flag */
1663 xfer->flags_int.did_dma_delay = 0;
1664
1665 /* clear "did_close" flag */
1666 xfer->flags_int.did_close = 0;
1667
1668 #if USB_HAVE_BUSDMA
1669 /* clear "bdma_setup" flag */
1670 xfer->flags_int.bdma_setup = 0;
1671 #endif
1672 /* by default we cannot cancel any USB transfer immediately */
1673 xfer->flags_int.can_cancel_immed = 0;
1674
1675 /* clear lengths and frame counts by default */
1676 xfer->sumlen = 0;
1677 xfer->actlen = 0;
1678 xfer->aframes = 0;
1679
1680 /* clear any previous errors */
1681 xfer->error = USB_ERR_NORMAL_COMPLETION;
1682
1683 /* Check if the device is still alive */
1684 if (info->udev->state < USB_STATE_POWERED) {
1685 USB_BUS_LOCK(bus);
1686 /*
1687 * Must return cancelled error code else
1688 * device drivers can hang.
1689 */
1690 usbd_transfer_done(xfer, USB_ERR_CANCELLED);
1691 USB_BUS_UNLOCK(bus);
1692 return;
1693 }
1694
1695 /* sanity check */
1696 if (xfer->nframes == 0) {
1697 if (xfer->flags.stall_pipe) {
1698 /*
1699 * Special case - want to stall without transferring
1700 * any data:
1701 */
1702 DPRINTF("xfer=%p nframes=0: stall "
1703 "or clear stall!\n", xfer);
1704 USB_BUS_LOCK(bus);
1705 xfer->flags_int.can_cancel_immed = 1;
1706 /* start the transfer */
1707 usb_command_wrapper(&xfer->endpoint->
1708 endpoint_q[xfer->stream_id], xfer);
1709 USB_BUS_UNLOCK(bus);
1710 return;
1711 }
1712 USB_BUS_LOCK(bus);
1713 usbd_transfer_done(xfer, USB_ERR_INVAL);
1714 USB_BUS_UNLOCK(bus);
1715 return;
1716 }
1717 /* compute some variables */
1718
1719 for (x = 0; x != xfer->nframes; x++) {
1720 /* make a copy of the frlenghts[] */
1721 xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x];
1722 /* compute total transfer length */
1723 xfer->sumlen += xfer->frlengths[x];
1724 if (xfer->sumlen < xfer->frlengths[x]) {
1725 /* length wrapped around */
1726 USB_BUS_LOCK(bus);
1727 usbd_transfer_done(xfer, USB_ERR_INVAL);
1728 USB_BUS_UNLOCK(bus);
1729 return;
1730 }
1731 }
1732
1733 /* clear some internal flags */
1734
1735 xfer->flags_int.short_xfer_ok = 0;
1736 xfer->flags_int.short_frames_ok = 0;
1737
1738 /* check if this is a control transfer */
1739
1740 if (xfer->flags_int.control_xfr) {
1741
1742 if (usbd_setup_ctrl_transfer(xfer)) {
1743 USB_BUS_LOCK(bus);
1744 usbd_transfer_done(xfer, USB_ERR_STALLED);
1745 USB_BUS_UNLOCK(bus);
1746 return;
1747 }
1748 }
1749 /*
1750 * Setup filtered version of some transfer flags,
1751 * in case of data read direction
1752 */
1753 if (USB_GET_DATA_ISREAD(xfer)) {
1754
1755 if (xfer->flags.short_frames_ok) {
1756 xfer->flags_int.short_xfer_ok = 1;
1757 xfer->flags_int.short_frames_ok = 1;
1758 } else if (xfer->flags.short_xfer_ok) {
1759 xfer->flags_int.short_xfer_ok = 1;
1760
1761 /* check for control transfer */
1762 if (xfer->flags_int.control_xfr) {
1763 /*
1764 * 1) Control transfers do not support
1765 * reception of multiple short USB
1766 * frames in host mode and device side
1767 * mode, with exception of:
1768 *
1769 * 2) Due to sometimes buggy device
1770 * side firmware we need to do a
1771 * STATUS stage in case of short
1772 * control transfers in USB host mode.
1773 * The STATUS stage then becomes the
1774 * "alt_next" to the DATA stage.
1775 */
1776 xfer->flags_int.short_frames_ok = 1;
1777 }
1778 }
1779 }
1780 /*
1781 * Check if BUS-DMA support is enabled and try to load virtual
1782 * buffers into DMA, if any:
1783 */
1784 #if USB_HAVE_BUSDMA
1785 if (xfer->flags_int.bdma_enable) {
1786 /* insert the USB transfer last in the BUS-DMA queue */
1787 usb_command_wrapper(&xfer->xroot->dma_q, xfer);
1788 return;
1789 }
1790 #endif
1791 /*
1792 * Enter the USB transfer into the Host Controller or
1793 * Device Controller schedule:
1794 */
1795 usbd_pipe_enter(xfer);
1796 }
1797
1798 /*------------------------------------------------------------------------*
1799 * usbd_pipe_enter - factored out code
1800 *------------------------------------------------------------------------*/
1801 void
usbd_pipe_enter(struct usb_xfer * xfer)1802 usbd_pipe_enter(struct usb_xfer *xfer)
1803 {
1804 struct usb_endpoint *ep;
1805
1806 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1807
1808 USB_BUS_LOCK(xfer->xroot->bus);
1809
1810 ep = xfer->endpoint;
1811
1812 DPRINTF("enter\n");
1813
1814 /* the transfer can now be cancelled */
1815 xfer->flags_int.can_cancel_immed = 1;
1816
1817 /* enter the transfer */
1818 (ep->methods->enter) (xfer);
1819
1820 /* check for transfer error */
1821 if (xfer->error) {
1822 /* some error has happened */
1823 usbd_transfer_done(xfer, (usb_error_t)0);
1824 USB_BUS_UNLOCK(xfer->xroot->bus);
1825 return;
1826 }
1827
1828 /* start the transfer */
1829 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], xfer);
1830 USB_BUS_UNLOCK(xfer->xroot->bus);
1831 }
1832
1833 /*------------------------------------------------------------------------*
1834 * usbd_transfer_start - start an USB transfer
1835 *
1836 * NOTE: Calling this function more than one time will only
1837 * result in a single transfer start, until the USB transfer
1838 * completes.
1839 *------------------------------------------------------------------------*/
1840 void
usbd_transfer_start(struct usb_xfer * xfer)1841 usbd_transfer_start(struct usb_xfer *xfer)
1842 {
1843 if (xfer == NULL) {
1844 /* transfer is gone */
1845 return;
1846 }
1847 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1848
1849 /* mark the USB transfer started */
1850
1851 if (!xfer->flags_int.started) {
1852 /* lock the BUS lock to avoid races updating flags_int */
1853 USB_BUS_LOCK(xfer->xroot->bus);
1854 xfer->flags_int.started = 1;
1855 USB_BUS_UNLOCK(xfer->xroot->bus);
1856 }
1857 /* check if the USB transfer callback is already transferring */
1858
1859 if (xfer->flags_int.transferring) {
1860 return;
1861 }
1862 USB_BUS_LOCK(xfer->xroot->bus);
1863 /* call the USB transfer callback */
1864 usbd_callback_ss_done_defer(xfer);
1865 USB_BUS_UNLOCK(xfer->xroot->bus);
1866 }
1867
1868 /*------------------------------------------------------------------------*
1869 * usbd_transfer_stop - stop an USB transfer
1870 *
1871 * NOTE: Calling this function more than one time will only
1872 * result in a single transfer stop.
1873 * NOTE: When this function returns it is not safe to free nor
1874 * reuse any DMA buffers. See "usbd_transfer_drain()".
1875 *------------------------------------------------------------------------*/
1876 void
usbd_transfer_stop(struct usb_xfer * xfer)1877 usbd_transfer_stop(struct usb_xfer *xfer)
1878 {
1879 struct usb_endpoint *ep;
1880
1881 if (xfer == NULL) {
1882 /* transfer is gone */
1883 return;
1884 }
1885 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1886
1887 /* check if the USB transfer was ever opened */
1888
1889 if (!xfer->flags_int.open) {
1890 if (xfer->flags_int.started) {
1891 /* nothing to do except clearing the "started" flag */
1892 /* lock the BUS lock to avoid races updating flags_int */
1893 USB_BUS_LOCK(xfer->xroot->bus);
1894 xfer->flags_int.started = 0;
1895 USB_BUS_UNLOCK(xfer->xroot->bus);
1896 }
1897 return;
1898 }
1899 /* try to stop the current USB transfer */
1900
1901 USB_BUS_LOCK(xfer->xroot->bus);
1902 /* override any previous error */
1903 xfer->error = USB_ERR_CANCELLED;
1904
1905 /*
1906 * Clear "open" and "started" when both private and USB lock
1907 * is locked so that we don't get a race updating "flags_int"
1908 */
1909 xfer->flags_int.open = 0;
1910 xfer->flags_int.started = 0;
1911
1912 /*
1913 * Check if we can cancel the USB transfer immediately.
1914 */
1915 if (xfer->flags_int.transferring) {
1916 if (xfer->flags_int.can_cancel_immed &&
1917 (!xfer->flags_int.did_close)) {
1918 DPRINTF("close\n");
1919 /*
1920 * The following will lead to an USB_ERR_CANCELLED
1921 * error code being passed to the USB callback.
1922 */
1923 (xfer->endpoint->methods->close) (xfer);
1924 /* only close once */
1925 xfer->flags_int.did_close = 1;
1926 } else {
1927 /* need to wait for the next done callback */
1928 }
1929 } else {
1930 DPRINTF("close\n");
1931
1932 /* close here and now */
1933 (xfer->endpoint->methods->close) (xfer);
1934
1935 /*
1936 * Any additional DMA delay is done by
1937 * "usbd_transfer_unsetup()".
1938 */
1939
1940 /*
1941 * Special case. Check if we need to restart a blocked
1942 * endpoint.
1943 */
1944 ep = xfer->endpoint;
1945
1946 /*
1947 * If the current USB transfer is completing we need
1948 * to start the next one:
1949 */
1950 if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
1951 usb_command_wrapper(
1952 &ep->endpoint_q[xfer->stream_id], NULL);
1953 }
1954 }
1955
1956 USB_BUS_UNLOCK(xfer->xroot->bus);
1957 }
1958
1959 /*------------------------------------------------------------------------*
1960 * usbd_transfer_pending
1961 *
1962 * This function will check if an USB transfer is pending which is a
1963 * little bit complicated!
1964 * Return values:
1965 * 0: Not pending
1966 * 1: Pending: The USB transfer will receive a callback in the future.
1967 *------------------------------------------------------------------------*/
1968 uint8_t
usbd_transfer_pending(struct usb_xfer * xfer)1969 usbd_transfer_pending(struct usb_xfer *xfer)
1970 {
1971 struct usb_xfer_root *info;
1972 struct usb_xfer_queue *pq;
1973
1974 if (xfer == NULL) {
1975 /* transfer is gone */
1976 return (0);
1977 }
1978 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1979
1980 if (xfer->flags_int.transferring) {
1981 /* trivial case */
1982 return (1);
1983 }
1984 USB_BUS_LOCK(xfer->xroot->bus);
1985 if (xfer->wait_queue) {
1986 /* we are waiting on a queue somewhere */
1987 USB_BUS_UNLOCK(xfer->xroot->bus);
1988 return (1);
1989 }
1990 info = xfer->xroot;
1991 pq = &info->done_q;
1992
1993 if (pq->curr == xfer) {
1994 /* we are currently scheduled for callback */
1995 USB_BUS_UNLOCK(xfer->xroot->bus);
1996 return (1);
1997 }
1998 /* we are not pending */
1999 USB_BUS_UNLOCK(xfer->xroot->bus);
2000 return (0);
2001 }
2002
2003 /*------------------------------------------------------------------------*
2004 * usbd_transfer_drain
2005 *
2006 * This function will stop the USB transfer and wait for any
2007 * additional BUS-DMA and HW-DMA operations to complete. Buffers that
2008 * are loaded into DMA can safely be freed or reused after that this
2009 * function has returned.
2010 *------------------------------------------------------------------------*/
2011 void
usbd_transfer_drain(struct usb_xfer * xfer)2012 usbd_transfer_drain(struct usb_xfer *xfer)
2013 {
2014 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2015 "usbd_transfer_drain can sleep!");
2016
2017 if (xfer == NULL) {
2018 /* transfer is gone */
2019 return;
2020 }
2021 if (xfer->xroot->xfer_mtx != &Giant) {
2022 USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED);
2023 }
2024 USB_XFER_LOCK(xfer);
2025
2026 usbd_transfer_stop(xfer);
2027
2028 while (usbd_transfer_pending(xfer) ||
2029 xfer->flags_int.doing_callback) {
2030
2031 /*
2032 * It is allowed that the callback can drop its
2033 * transfer mutex. In that case checking only
2034 * "usbd_transfer_pending()" is not enough to tell if
2035 * the USB transfer is fully drained. We also need to
2036 * check the internal "doing_callback" flag.
2037 */
2038 xfer->flags_int.draining = 1;
2039
2040 /*
2041 * Wait until the current outstanding USB
2042 * transfer is complete !
2043 */
2044 (void)cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx);
2045 }
2046 USB_XFER_UNLOCK(xfer);
2047 }
2048
2049 struct usb_page_cache *
usbd_xfer_get_frame(struct usb_xfer * xfer,usb_frcount_t frindex)2050 usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
2051 {
2052 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2053
2054 return (&xfer->frbuffers[frindex]);
2055 }
2056
2057 void *
usbd_xfer_get_frame_buffer(struct usb_xfer * xfer,usb_frcount_t frindex)2058 usbd_xfer_get_frame_buffer(struct usb_xfer *xfer, usb_frcount_t frindex)
2059 {
2060 struct usb_page_search page_info;
2061
2062 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2063
2064 usbd_get_page(&xfer->frbuffers[frindex], 0, &page_info);
2065 return (page_info.buffer);
2066 }
2067
2068 /*------------------------------------------------------------------------*
2069 * usbd_xfer_get_fps_shift
2070 *
2071 * The following function is only useful for isochronous transfers. It
2072 * returns how many times the frame execution rate has been shifted
2073 * down.
2074 *
2075 * Return value:
2076 * Success: 0..3
2077 * Failure: 0
2078 *------------------------------------------------------------------------*/
2079 uint8_t
usbd_xfer_get_fps_shift(struct usb_xfer * xfer)2080 usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
2081 {
2082 return (xfer->fps_shift);
2083 }
2084
2085 usb_frlength_t
usbd_xfer_frame_len(struct usb_xfer * xfer,usb_frcount_t frindex)2086 usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
2087 {
2088 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2089
2090 return (xfer->frlengths[frindex]);
2091 }
2092
2093 /*------------------------------------------------------------------------*
2094 * usbd_xfer_set_frame_data
2095 *
2096 * This function sets the pointer of the buffer that should
2097 * loaded directly into DMA for the given USB frame. Passing "ptr"
2098 * equal to NULL while the corresponding "frlength" is greater
2099 * than zero gives undefined results!
2100 *------------------------------------------------------------------------*/
2101 void
usbd_xfer_set_frame_data(struct usb_xfer * xfer,usb_frcount_t frindex,void * ptr,usb_frlength_t len)2102 usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2103 void *ptr, usb_frlength_t len)
2104 {
2105 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2106
2107 /* set virtual address to load and length */
2108 xfer->frbuffers[frindex].buffer = ptr;
2109 usbd_xfer_set_frame_len(xfer, frindex, len);
2110 }
2111
2112 void
usbd_xfer_frame_data(struct usb_xfer * xfer,usb_frcount_t frindex,void ** ptr,int * len)2113 usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2114 void **ptr, int *len)
2115 {
2116 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2117
2118 if (ptr != NULL)
2119 *ptr = xfer->frbuffers[frindex].buffer;
2120 if (len != NULL)
2121 *len = xfer->frlengths[frindex];
2122 }
2123
2124 /*------------------------------------------------------------------------*
2125 * usbd_xfer_old_frame_length
2126 *
2127 * This function returns the framelength of the given frame at the
2128 * time the transfer was submitted. This function can be used to
2129 * compute the starting data pointer of the next isochronous frame
2130 * when an isochronous transfer has completed.
2131 *------------------------------------------------------------------------*/
2132 usb_frlength_t
usbd_xfer_old_frame_length(struct usb_xfer * xfer,usb_frcount_t frindex)2133 usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex)
2134 {
2135 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2136
2137 return (xfer->frlengths[frindex + xfer->max_frame_count]);
2138 }
2139
2140 void
usbd_xfer_status(struct usb_xfer * xfer,int * actlen,int * sumlen,int * aframes,int * nframes)2141 usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
2142 int *nframes)
2143 {
2144 if (actlen != NULL)
2145 *actlen = xfer->actlen;
2146 if (sumlen != NULL)
2147 *sumlen = xfer->sumlen;
2148 if (aframes != NULL)
2149 *aframes = xfer->aframes;
2150 if (nframes != NULL)
2151 *nframes = xfer->nframes;
2152 }
2153
2154 /*------------------------------------------------------------------------*
2155 * usbd_xfer_set_frame_offset
2156 *
2157 * This function sets the frame data buffer offset relative to the beginning
2158 * of the USB DMA buffer allocated for this USB transfer.
2159 *------------------------------------------------------------------------*/
2160 void
usbd_xfer_set_frame_offset(struct usb_xfer * xfer,usb_frlength_t offset,usb_frcount_t frindex)2161 usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
2162 usb_frcount_t frindex)
2163 {
2164 KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
2165 "when the USB buffer is external\n"));
2166 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2167
2168 /* set virtual address to load */
2169 xfer->frbuffers[frindex].buffer =
2170 USB_ADD_BYTES(xfer->local_buffer, offset);
2171 }
2172
2173 void
usbd_xfer_set_interval(struct usb_xfer * xfer,int i)2174 usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
2175 {
2176 xfer->interval = i;
2177 }
2178
2179 void
usbd_xfer_set_timeout(struct usb_xfer * xfer,int t)2180 usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
2181 {
2182 xfer->timeout = t;
2183 }
2184
2185 void
usbd_xfer_set_frames(struct usb_xfer * xfer,usb_frcount_t n)2186 usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
2187 {
2188 xfer->nframes = n;
2189 }
2190
2191 usb_frcount_t
usbd_xfer_max_frames(struct usb_xfer * xfer)2192 usbd_xfer_max_frames(struct usb_xfer *xfer)
2193 {
2194 return (xfer->max_frame_count);
2195 }
2196
2197 usb_frlength_t
usbd_xfer_max_len(struct usb_xfer * xfer)2198 usbd_xfer_max_len(struct usb_xfer *xfer)
2199 {
2200 return (xfer->max_data_length);
2201 }
2202
2203 usb_frlength_t
usbd_xfer_max_framelen(struct usb_xfer * xfer)2204 usbd_xfer_max_framelen(struct usb_xfer *xfer)
2205 {
2206 return (xfer->max_frame_size);
2207 }
2208
2209 void
usbd_xfer_set_frame_len(struct usb_xfer * xfer,usb_frcount_t frindex,usb_frlength_t len)2210 usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
2211 usb_frlength_t len)
2212 {
2213 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2214
2215 xfer->frlengths[frindex] = len;
2216 }
2217
2218 /*------------------------------------------------------------------------*
2219 * usb_callback_proc - factored out code
2220 *
2221 * This function performs USB callbacks.
2222 *------------------------------------------------------------------------*/
2223 static void
usb_callback_proc(struct usb_proc_msg * _pm)2224 usb_callback_proc(struct usb_proc_msg *_pm)
2225 {
2226 struct usb_done_msg *pm = (void *)_pm;
2227 struct usb_xfer_root *info = pm->xroot;
2228
2229 /* Change locking order */
2230 USB_BUS_UNLOCK(info->bus);
2231
2232 /*
2233 * We exploit the fact that the mutex is the same for all
2234 * callbacks that will be called from this thread:
2235 */
2236 USB_MTX_LOCK(info->xfer_mtx);
2237 USB_BUS_LOCK(info->bus);
2238
2239 /* Continue where we lost track */
2240 usb_command_wrapper(&info->done_q,
2241 info->done_q.curr);
2242
2243 USB_MTX_UNLOCK(info->xfer_mtx);
2244 }
2245
2246 /*------------------------------------------------------------------------*
2247 * usbd_callback_ss_done_defer
2248 *
2249 * This function will defer the start, stop and done callback to the
2250 * correct thread.
2251 *------------------------------------------------------------------------*/
2252 static void
usbd_callback_ss_done_defer(struct usb_xfer * xfer)2253 usbd_callback_ss_done_defer(struct usb_xfer *xfer)
2254 {
2255 struct usb_xfer_root *info = xfer->xroot;
2256 struct usb_xfer_queue *pq = &info->done_q;
2257
2258 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2259
2260 if (pq->curr != xfer) {
2261 usbd_transfer_enqueue(pq, xfer);
2262 }
2263 if (!pq->recurse_1) {
2264
2265 /*
2266 * We have to postpone the callback due to the fact we
2267 * will have a Lock Order Reversal, LOR, if we try to
2268 * proceed !
2269 */
2270 if (usb_proc_msignal(info->done_p,
2271 &info->done_m[0], &info->done_m[1])) {
2272 /* ignore */
2273 }
2274 } else {
2275 /* clear second recurse flag */
2276 pq->recurse_2 = 0;
2277 }
2278 return;
2279
2280 }
2281
2282 /*------------------------------------------------------------------------*
2283 * usbd_callback_wrapper
2284 *
2285 * This is a wrapper for USB callbacks. This wrapper does some
2286 * auto-magic things like figuring out if we can call the callback
2287 * directly from the current context or if we need to wakeup the
2288 * interrupt process.
2289 *------------------------------------------------------------------------*/
2290 static void
usbd_callback_wrapper(struct usb_xfer_queue * pq)2291 usbd_callback_wrapper(struct usb_xfer_queue *pq)
2292 {
2293 struct usb_xfer *xfer = pq->curr;
2294 struct usb_xfer_root *info = xfer->xroot;
2295
2296 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2297 if (!mtx_owned(info->xfer_mtx) && !SCHEDULER_STOPPED()) {
2298 /*
2299 * Cases that end up here:
2300 *
2301 * 5) HW interrupt done callback or other source.
2302 */
2303 DPRINTFN(3, "case 5\n");
2304
2305 /*
2306 * We have to postpone the callback due to the fact we
2307 * will have a Lock Order Reversal, LOR, if we try to
2308 * proceed!
2309 */
2310 if (usb_proc_msignal(info->done_p,
2311 &info->done_m[0], &info->done_m[1])) {
2312 /* ignore */
2313 }
2314 return;
2315 }
2316 /*
2317 * Cases that end up here:
2318 *
2319 * 1) We are starting a transfer
2320 * 2) We are prematurely calling back a transfer
2321 * 3) We are stopping a transfer
2322 * 4) We are doing an ordinary callback
2323 */
2324 DPRINTFN(3, "case 1-4\n");
2325 /* get next USB transfer in the queue */
2326 info->done_q.curr = NULL;
2327
2328 /* set flag in case of drain */
2329 xfer->flags_int.doing_callback = 1;
2330
2331 USB_BUS_UNLOCK(info->bus);
2332 USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED);
2333
2334 /* set correct USB state for callback */
2335 if (!xfer->flags_int.transferring) {
2336 xfer->usb_state = USB_ST_SETUP;
2337 if (!xfer->flags_int.started) {
2338 /* we got stopped before we even got started */
2339 USB_BUS_LOCK(info->bus);
2340 goto done;
2341 }
2342 } else {
2343
2344 if (usbd_callback_wrapper_sub(xfer)) {
2345 /* the callback has been deferred */
2346 USB_BUS_LOCK(info->bus);
2347 goto done;
2348 }
2349 #if USB_HAVE_POWERD
2350 /* decrement power reference */
2351 usbd_transfer_power_ref(xfer, -1);
2352 #endif
2353 xfer->flags_int.transferring = 0;
2354
2355 if (xfer->error) {
2356 xfer->usb_state = USB_ST_ERROR;
2357 } else {
2358 /* set transferred state */
2359 xfer->usb_state = USB_ST_TRANSFERRED;
2360 #if USB_HAVE_BUSDMA
2361 /* sync DMA memory, if any */
2362 if (xfer->flags_int.bdma_enable &&
2363 (!xfer->flags_int.bdma_no_post_sync)) {
2364 usb_bdma_post_sync(xfer);
2365 }
2366 #endif
2367 }
2368 }
2369
2370 #if USB_HAVE_PF
2371 if (xfer->usb_state != USB_ST_SETUP) {
2372 USB_BUS_LOCK(info->bus);
2373 usbpf_xfertap(xfer, USBPF_XFERTAP_DONE);
2374 USB_BUS_UNLOCK(info->bus);
2375 }
2376 #endif
2377 /* call processing routine */
2378 (xfer->callback) (xfer, xfer->error);
2379
2380 /* pickup the USB mutex again */
2381 USB_BUS_LOCK(info->bus);
2382
2383 /*
2384 * Check if we got started after that we got cancelled, but
2385 * before we managed to do the callback.
2386 */
2387 if ((!xfer->flags_int.open) &&
2388 (xfer->flags_int.started) &&
2389 (xfer->usb_state == USB_ST_ERROR)) {
2390 /* clear flag in case of drain */
2391 xfer->flags_int.doing_callback = 0;
2392 /* try to loop, but not recursivly */
2393 usb_command_wrapper(&info->done_q, xfer);
2394 return;
2395 }
2396
2397 done:
2398 /* clear flag in case of drain */
2399 xfer->flags_int.doing_callback = 0;
2400
2401 /*
2402 * Check if we are draining.
2403 */
2404 if (xfer->flags_int.draining &&
2405 (!xfer->flags_int.transferring)) {
2406 /* "usbd_transfer_drain()" is waiting for end of transfer */
2407 xfer->flags_int.draining = 0;
2408 (void)cv_broadcast(&info->cv_drain);
2409 }
2410
2411 /* do the next callback, if any */
2412 usb_command_wrapper(&info->done_q,
2413 info->done_q.curr);
2414 }
2415
2416 /*------------------------------------------------------------------------*
2417 * usb_dma_delay_done_cb
2418 *
2419 * This function is called when the DMA delay has been exectuded, and
2420 * will make sure that the callback is called to complete the USB
2421 * transfer. This code path is usually only used when there is an USB
2422 * error like USB_ERR_CANCELLED.
2423 *------------------------------------------------------------------------*/
2424 void
usb_dma_delay_done_cb(struct usb_xfer * xfer)2425 usb_dma_delay_done_cb(struct usb_xfer *xfer)
2426 {
2427 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2428
2429 DPRINTFN(3, "Completed %p\n", xfer);
2430
2431 /* queue callback for execution, again */
2432 usbd_transfer_done(xfer, (usb_error_t)0);
2433 }
2434
2435 /*------------------------------------------------------------------------*
2436 * usbd_transfer_dequeue
2437 *
2438 * - This function is used to remove an USB transfer from a USB
2439 * transfer queue.
2440 *
2441 * - This function can be called multiple times in a row.
2442 *------------------------------------------------------------------------*/
2443 void
usbd_transfer_dequeue(struct usb_xfer * xfer)2444 usbd_transfer_dequeue(struct usb_xfer *xfer)
2445 {
2446 struct usb_xfer_queue *pq;
2447 uint32_t int_save;
2448
2449 LOS_SpinLockSave(&g_usb_wait_queue_spinlock, &int_save);
2450 pq = xfer->wait_queue;
2451 if (pq != NULL) {
2452 TAILQ_REMOVE(&pq->head, xfer, wait_entry);
2453 xfer->wait_queue = NULL;
2454 }
2455 LOS_SpinUnlockRestore(&g_usb_wait_queue_spinlock, int_save);
2456 }
2457
2458 /*------------------------------------------------------------------------*
2459 * usbd_transfer_enqueue
2460 *
2461 * - This function is used to insert an USB transfer into a USB *
2462 * transfer queue.
2463 *
2464 * - This function can be called multiple times in a row.
2465 *------------------------------------------------------------------------*/
2466 void
usbd_transfer_enqueue(struct usb_xfer_queue * pq,struct usb_xfer * xfer)2467 usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2468 {
2469 uint32_t int_save;
2470 /*
2471 * Insert the USB transfer into the queue, if it is not
2472 * already on a USB transfer queue:
2473 */
2474 LOS_SpinLockSave(&g_usb_wait_queue_spinlock, &int_save);
2475 if (xfer->wait_queue == NULL) {
2476 xfer->wait_queue = pq;
2477 TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
2478 }
2479 LOS_SpinUnlockRestore(&g_usb_wait_queue_spinlock, int_save);
2480 }
2481
2482 /*------------------------------------------------------------------------*
2483 * usbd_transfer_done
2484 *
2485 * - This function is used to remove an USB transfer from the busdma,
2486 * pipe or interrupt queue.
2487 *
2488 * - This function is used to queue the USB transfer on the done
2489 * queue.
2490 *
2491 * - This function is used to stop any USB transfer timeouts.
2492 *------------------------------------------------------------------------*/
2493 void
usbd_transfer_done(struct usb_xfer * xfer,usb_error_t error)2494 usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
2495 {
2496 struct usb_xfer_root *info = xfer->xroot;
2497
2498 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2499
2500 DPRINTF("err=%s\n", usbd_errstr(error));
2501
2502 /*
2503 * If we are not transferring then just return.
2504 * This can happen during transfer cancel.
2505 */
2506 if (!xfer->flags_int.transferring) {
2507 DPRINTF("not transferring\n");
2508 /* end of control transfer, if any */
2509 xfer->flags_int.control_act = 0;
2510 return;
2511 }
2512 /* only set transfer error, if not already set */
2513 if (xfer->error == USB_ERR_NORMAL_COMPLETION)
2514 xfer->error = error;
2515
2516 /* stop any callouts */
2517 callout_stop(&xfer->timeout_handle);
2518
2519 /*
2520 * If we are waiting on a queue, just remove the USB transfer
2521 * from the queue, if any. We should have the required locks
2522 * locked to do the remove when this function is called.
2523 */
2524 usbd_transfer_dequeue(xfer);
2525
2526 #if USB_HAVE_BUSDMA
2527 if (mtx_owned(info->xfer_mtx)) {
2528 struct usb_xfer_queue *pq;
2529
2530 /*
2531 * If the private USB lock is not locked, then we assume
2532 * that the BUS-DMA load stage has been passed:
2533 */
2534 pq = &info->dma_q;
2535
2536 if (pq->curr == xfer) {
2537 /* start the next BUS-DMA load, if any */
2538 usb_command_wrapper(pq, NULL);
2539 }
2540 }
2541 #endif
2542 /* keep some statistics */
2543 if (xfer->error == USB_ERR_CANCELLED) {
2544 info->udev->stats_cancelled.uds_requests
2545 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2546 } else if (xfer->error != USB_ERR_NORMAL_COMPLETION) {
2547 info->udev->stats_err.uds_requests
2548 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2549 } else {
2550 info->udev->stats_ok.uds_requests
2551 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2552 }
2553
2554 /* call the USB transfer callback */
2555 usbd_callback_ss_done_defer(xfer);
2556 }
2557
2558 /*------------------------------------------------------------------------*
2559 * usbd_transfer_start_cb
2560 *
2561 * This function is called to start the USB transfer when
2562 * "xfer->interval" is greater than zero, and and the endpoint type is
2563 * BULK or CONTROL.
2564 *------------------------------------------------------------------------*/
2565 static void
usbd_transfer_start_cb(void * arg)2566 usbd_transfer_start_cb(void *arg)
2567 {
2568 struct usb_xfer *xfer = arg;
2569 struct usb_endpoint *ep = xfer->endpoint;
2570
2571 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2572
2573 DPRINTF("start\n");
2574
2575 #if USB_HAVE_PF
2576 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2577 #endif
2578
2579 /* the transfer can now be cancelled */
2580 xfer->flags_int.can_cancel_immed = 1;
2581
2582 /* start USB transfer, if no error */
2583 if (xfer->error == 0)
2584 (ep->methods->start) (xfer);
2585
2586 /* check for transfer error */
2587 if (xfer->error) {
2588 /* some error has happened */
2589 usbd_transfer_done(xfer, (usb_error_t)0);
2590 }
2591 }
2592
2593 /*------------------------------------------------------------------------*
2594 * usbd_xfer_set_stall
2595 *
2596 * This function is used to set the stall flag outside the
2597 * callback. This function is NULL safe.
2598 *------------------------------------------------------------------------*/
2599 void
usbd_xfer_set_stall(struct usb_xfer * xfer)2600 usbd_xfer_set_stall(struct usb_xfer *xfer)
2601 {
2602 if (xfer == NULL) {
2603 /* tearing down */
2604 return;
2605 }
2606 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2607
2608 /* avoid any races by locking the USB mutex */
2609 USB_BUS_LOCK(xfer->xroot->bus);
2610 xfer->flags.stall_pipe = 1;
2611 USB_BUS_UNLOCK(xfer->xroot->bus);
2612 }
2613
2614 int
usbd_xfer_is_stalled(struct usb_xfer * xfer)2615 usbd_xfer_is_stalled(struct usb_xfer *xfer)
2616 {
2617 return (xfer->endpoint->is_stalled);
2618 }
2619
2620 /*------------------------------------------------------------------------*
2621 * usbd_transfer_clear_stall
2622 *
2623 * This function is used to clear the stall flag outside the
2624 * callback. This function is NULL safe.
2625 *------------------------------------------------------------------------*/
2626 void
usbd_transfer_clear_stall(struct usb_xfer * xfer)2627 usbd_transfer_clear_stall(struct usb_xfer *xfer)
2628 {
2629 if (xfer == NULL) {
2630 /* tearing down */
2631 return;
2632 }
2633 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2634
2635 /* avoid any races by locking the USB mutex */
2636 USB_BUS_LOCK(xfer->xroot->bus);
2637
2638 xfer->flags.stall_pipe = 0;
2639
2640 USB_BUS_UNLOCK(xfer->xroot->bus);
2641 }
2642
2643 /*------------------------------------------------------------------------*
2644 * usbd_pipe_start
2645 *
2646 * This function is used to add an USB transfer to the pipe transfer list.
2647 *------------------------------------------------------------------------*/
2648 void
usbd_pipe_start(struct usb_xfer_queue * pq)2649 usbd_pipe_start(struct usb_xfer_queue *pq)
2650 {
2651 struct usb_endpoint *ep;
2652 struct usb_xfer *xfer;
2653 uint8_t type;
2654
2655 xfer = pq->curr;
2656 ep = xfer->endpoint;
2657
2658 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2659
2660 /*
2661 * If the endpoint is already stalled we do nothing !
2662 */
2663 if (ep->is_stalled) {
2664 DPRINTFN(1, "is_stalled\n");
2665 return;
2666 }
2667 /*
2668 * Check if we are supposed to stall the endpoint:
2669 */
2670 if (xfer->flags.stall_pipe) {
2671 struct usb_device *udev;
2672 struct usb_xfer_root *info;
2673
2674 /* clear stall command */
2675 xfer->flags.stall_pipe = 0;
2676
2677 /* get pointer to USB device */
2678 info = xfer->xroot;
2679 udev = info->udev;
2680
2681 /*
2682 * Only stall BULK and INTERRUPT endpoints.
2683 */
2684 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2685 if ((type == UE_BULK) ||
2686 (type == UE_INTERRUPT)) {
2687 uint8_t did_stall;
2688
2689 did_stall = 1;
2690
2691 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2692 (udev->bus->methods->set_stall) (
2693 udev, ep, &did_stall);
2694 } else if (udev->ctrl_xfer[1]) {
2695 info = udev->ctrl_xfer[1]->xroot;
2696 (void)usb_proc_msignal(
2697 USB_BUS_CS_PROC(info->bus),
2698 &udev->cs_msg[0], &udev->cs_msg[1]);
2699 } else {
2700 /* should not happen */
2701 DPRINTFN(0, "No stall handler\n");
2702 }
2703 /*
2704 * Check if we should stall. Some USB hardware
2705 * handles set- and clear-stall in hardware.
2706 */
2707 if (did_stall) {
2708 /*
2709 * The transfer will be continued when
2710 * the clear-stall control endpoint
2711 * message is received.
2712 */
2713 ep->is_stalled = 1;
2714 DPRINTFN(1, "did_stall\n");
2715 return;
2716 }
2717 } else if (type == UE_ISOCHRONOUS) {
2718
2719 /*
2720 * Make sure any FIFO overflow or other FIFO
2721 * error conditions go away by resetting the
2722 * endpoint FIFO through the clear stall
2723 * method.
2724 */
2725 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2726 (udev->bus->methods->clear_stall) (udev, ep);
2727 }
2728 }
2729 }
2730 /* Set or clear stall complete - special case */
2731 if (xfer->nframes == 0) {
2732 /* we are complete */
2733 xfer->aframes = 0;
2734 usbd_transfer_done(xfer, (usb_error_t)0);
2735 DPRINTFN(1, "nframes == 0\n");
2736 return;
2737 }
2738 /*
2739 * Handled cases:
2740 *
2741 * 1) Start the first transfer queued.
2742 *
2743 * 2) Re-start the current USB transfer.
2744 */
2745 /*
2746 * Check if there should be any
2747 * pre transfer start delay:
2748 */
2749 if (xfer->interval > 0) {
2750 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2751 if ((type == UE_BULK) ||
2752 (type == UE_CONTROL)) {
2753 usbd_transfer_timeout_ms(xfer,
2754 &usbd_transfer_start_cb,
2755 xfer->interval);
2756 DPRINTFN(1, "usbd_transfer_timeout_ms \n");
2757 return;
2758 }
2759 }
2760
2761 usbd_transfer_start_cb((void *)xfer);
2762 }
2763
2764 /*------------------------------------------------------------------------*
2765 * usbd_transfer_timeout_ms
2766 *
2767 * This function is used to setup a timeout on the given USB
2768 * transfer. If the timeout has been deferred the callback given by
2769 * "cb" will get called after "ms" milliseconds.
2770 *------------------------------------------------------------------------*/
2771 void
usbd_transfer_timeout_ms(struct usb_xfer * xfer,void (* cb)(void * arg),usb_timeout_t ms)2772 usbd_transfer_timeout_ms(struct usb_xfer *xfer,
2773 void (*cb) (void *arg), usb_timeout_t ms)
2774 {
2775 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2776
2777 /* defer delay */
2778 callout_reset(&xfer->timeout_handle,
2779 USB_MS_TO_TICKS(ms) + USB_CALLOUT_ZERO_TICKS, cb, xfer);
2780 }
2781
2782 /*------------------------------------------------------------------------*
2783 * usbd_callback_wrapper_sub
2784 *
2785 * - This function will update variables in an USB transfer after
2786 * that the USB transfer is complete.
2787 *
2788 * - This function is used to start the next USB transfer on the
2789 * ep transfer queue, if any.
2790 *
2791 * NOTE: In some special cases the USB transfer will not be removed from
2792 * the pipe queue, but remain first. To enforce USB transfer removal call
2793 * this function passing the error code "USB_ERR_CANCELLED".
2794 *
2795 * Return values:
2796 * 0: Success.
2797 * Else: The callback has been deferred.
2798 *------------------------------------------------------------------------*/
2799 static uint8_t
usbd_callback_wrapper_sub(struct usb_xfer * xfer)2800 usbd_callback_wrapper_sub(struct usb_xfer *xfer)
2801 {
2802 struct usb_endpoint *ep;
2803 struct usb_bus *bus;
2804 usb_frcount_t x;
2805
2806 bus = xfer->xroot->bus;
2807
2808 if ((!xfer->flags_int.open) &&
2809 (!xfer->flags_int.did_close)) {
2810 DPRINTF("close\n");
2811 USB_BUS_LOCK(bus);
2812 (xfer->endpoint->methods->close) (xfer);
2813 USB_BUS_UNLOCK(bus);
2814 /* only close once */
2815 xfer->flags_int.did_close = 1;
2816 return (1); /* wait for new callback */
2817 }
2818 /*
2819 * If we have a non-hardware induced error we
2820 * need to do the DMA delay!
2821 */
2822 if ((xfer->error != 0) && (!xfer->flags_int.did_dma_delay) &&
2823 ((xfer->error == USB_ERR_CANCELLED) ||
2824 (xfer->error == USB_ERR_TIMEOUT) ||
2825 (bus->methods->start_dma_delay != NULL))) {
2826
2827 usb_timeout_t temp;
2828
2829 /* only delay once */
2830 xfer->flags_int.did_dma_delay = 1;
2831
2832 /* we can not cancel this delay */
2833 xfer->flags_int.can_cancel_immed = 0;
2834
2835 temp = usbd_get_dma_delay(xfer->xroot->udev);
2836
2837 DPRINTFN(3, "DMA delay, %u ms, "
2838 "on %p\n", temp, xfer);
2839
2840 if (temp != 0) {
2841 USB_BUS_LOCK(bus);
2842 /*
2843 * Some hardware solutions have dedicated
2844 * events when it is safe to free DMA'ed
2845 * memory. For the other hardware platforms we
2846 * use a static delay.
2847 */
2848 if (bus->methods->start_dma_delay != NULL) {
2849 (bus->methods->start_dma_delay) (xfer);
2850 } else {
2851 usbd_transfer_timeout_ms(xfer,
2852 (void (*)(void *))&usb_dma_delay_done_cb,
2853 temp);
2854 }
2855 USB_BUS_UNLOCK(bus);
2856 return (1); /* wait for new callback */
2857 }
2858 }
2859 /* check actual number of frames */
2860 if (xfer->aframes > xfer->nframes) {
2861 if (xfer->error == 0) {
2862 panic("%s: actual number of frames, %d, is "
2863 "greater than initial number of frames, %d\n",
2864 __FUNCTION__, xfer->aframes, xfer->nframes);
2865 } else {
2866 /* just set some valid value */
2867 xfer->aframes = xfer->nframes;
2868 }
2869 }
2870 /* compute actual length */
2871 xfer->actlen = 0;
2872
2873 for (x = 0; x != xfer->aframes; x++) {
2874 xfer->actlen += xfer->frlengths[x];
2875 }
2876
2877 /*
2878 * Frames that were not transferred get zero actual length in
2879 * case the USB device driver does not check the actual number
2880 * of frames transferred, "xfer->aframes":
2881 */
2882 for (; x < xfer->nframes; x++) {
2883 usbd_xfer_set_frame_len(xfer, x, 0);
2884 }
2885
2886 /* check actual length */
2887 if (xfer->actlen > xfer->sumlen) {
2888 if (xfer->error == 0) {
2889 panic("%s: actual length, %d, is greater than "
2890 "initial length, %d\n",
2891 __FUNCTION__, xfer->actlen, xfer->sumlen);
2892 } else {
2893 /* just set some valid value */
2894 xfer->actlen = xfer->sumlen;
2895 }
2896 }
2897 DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
2898 xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
2899 xfer->aframes, xfer->nframes);
2900
2901 if (xfer->error) {
2902 /* end of control transfer, if any */
2903 xfer->flags_int.control_act = 0;
2904
2905 #if USB_HAVE_TT_SUPPORT
2906 switch (xfer->error) {
2907 case USB_ERR_NORMAL_COMPLETION:
2908 case USB_ERR_SHORT_XFER:
2909 case USB_ERR_STALLED:
2910 case USB_ERR_CANCELLED:
2911 /* nothing to do */
2912 break;
2913 default:
2914 /* try to reset the TT, if any */
2915 USB_BUS_LOCK(bus);
2916 uhub_tt_buffer_reset_async_locked(xfer->xroot->udev, xfer->endpoint);
2917 USB_BUS_UNLOCK(bus);
2918 break;
2919 }
2920 #endif
2921 /* check if we should block the execution queue */
2922 if ((xfer->error != USB_ERR_CANCELLED) &&
2923 (xfer->flags.pipe_bof)) {
2924 DPRINTFN(2, "xfer=%p: Block On Failure "
2925 "on endpoint=%p\n", xfer, xfer->endpoint);
2926 goto done;
2927 }
2928 } else {
2929 /* check for short transfers */
2930 if (xfer->actlen < xfer->sumlen) {
2931
2932 /* end of control transfer, if any */
2933 xfer->flags_int.control_act = 0;
2934
2935 if (!xfer->flags_int.short_xfer_ok) {
2936 xfer->error = USB_ERR_SHORT_XFER;
2937 if (xfer->flags.pipe_bof) {
2938 DPRINTFN(2, "xfer=%p: Block On Failure on "
2939 "Short Transfer on endpoint %p.\n",
2940 xfer, xfer->endpoint);
2941 goto done;
2942 }
2943 }
2944 } else {
2945 /*
2946 * Check if we are in the middle of a
2947 * control transfer:
2948 */
2949 if (xfer->flags_int.control_act) {
2950 DPRINTFN(5, "xfer=%p: Control transfer "
2951 "active on endpoint=%p\n", xfer, xfer->endpoint);
2952 goto done;
2953 }
2954 }
2955 }
2956
2957 ep = xfer->endpoint;
2958
2959 /*
2960 * If the current USB transfer is completing we need to start the
2961 * next one:
2962 */
2963 USB_BUS_LOCK(bus);
2964 if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
2965 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], NULL);
2966
2967 if ((ep->endpoint_q[xfer->stream_id].curr != NULL) ||
2968 (TAILQ_FIRST(&ep->endpoint_q[xfer->stream_id].head) != NULL)) {
2969 /* there is another USB transfer waiting */
2970 } else {
2971 /* this is the last USB transfer */
2972 /* clear isochronous sync flag */
2973 xfer->endpoint->is_synced = 0;
2974 }
2975 }
2976 USB_BUS_UNLOCK(bus);
2977 done:
2978 return (0);
2979 }
2980
2981 /*------------------------------------------------------------------------*
2982 * usb_command_wrapper
2983 *
2984 * This function is used to execute commands non-recursivly on an USB
2985 * transfer.
2986 *------------------------------------------------------------------------*/
2987 void
usb_command_wrapper(struct usb_xfer_queue * pq,struct usb_xfer * xfer)2988 usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2989 {
2990 uint32_t int_save;
2991
2992 if (xfer) {
2993 /*
2994 * If the transfer is not already processing,
2995 * queue it!
2996 */
2997 if (pq->curr != xfer) {
2998 usbd_transfer_enqueue(pq, xfer);
2999 if (pq->curr != NULL) {
3000 /* something is already processing */
3001 DPRINTFN(6, "busy %p\n", pq->curr);
3002 return;
3003 }
3004 }
3005 } else {
3006 /* Get next element in queue */
3007 pq->curr = NULL;
3008 }
3009
3010 if (!pq->recurse_1) {
3011
3012 do {
3013
3014 LOS_SpinLockSave(&g_usb_wait_queue_spinlock, &int_save);
3015 /* set both recurse flags */
3016 pq->recurse_1 = 1;
3017 pq->recurse_2 = 1;
3018
3019 if (pq->curr == NULL) {
3020 xfer = TAILQ_FIRST(&pq->head);
3021 if (xfer) {
3022 TAILQ_REMOVE(&pq->head, xfer,
3023 wait_entry);
3024 xfer->wait_queue = NULL;
3025 pq->curr = xfer;
3026 } else {
3027 /* clear first recurse flag */
3028 pq->recurse_1 = 0;
3029 LOS_SpinUnlockRestore(&g_usb_wait_queue_spinlock, int_save);
3030 break;
3031 }
3032 }
3033 LOS_SpinUnlockRestore(&g_usb_wait_queue_spinlock, int_save);
3034
3035 DPRINTFN(6, "cb %p (enter)\n", pq->curr);
3036 (pq->command) (pq);
3037 DPRINTFN(6, "cb %p (leave)\n", pq->curr);
3038
3039 LOS_SpinLockSave(&g_usb_wait_queue_spinlock, &int_save);
3040 if (pq->recurse_2) {
3041 /* clear first recurse flag */
3042 pq->recurse_1 = 0;
3043 LOS_SpinUnlockRestore(&g_usb_wait_queue_spinlock, int_save);
3044 break;
3045 }
3046 LOS_SpinUnlockRestore(&g_usb_wait_queue_spinlock, int_save);
3047 } while (1);
3048
3049 } else {
3050 /* clear second recurse flag */
3051 pq->recurse_2 = 0;
3052 }
3053 }
3054
3055 /*------------------------------------------------------------------------*
3056 * usbd_ctrl_transfer_setup
3057 *
3058 * This function is used to setup the default USB control endpoint
3059 * transfer.
3060 *------------------------------------------------------------------------*/
3061 void
usbd_ctrl_transfer_setup(struct usb_device * udev)3062 usbd_ctrl_transfer_setup(struct usb_device *udev)
3063 {
3064 struct usb_xfer *xfer;
3065 uint8_t no_resetup;
3066 uint8_t iface_index;
3067
3068 /* check for root HUB */
3069 if (udev->parent_hub == NULL)
3070 return;
3071 repeat:
3072
3073 xfer = udev->ctrl_xfer[0];
3074 if (xfer) {
3075 USB_XFER_LOCK(xfer);
3076 no_resetup =
3077 ((xfer->address == udev->address) &&
3078 (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
3079 udev->ddesc.bMaxPacketSize));
3080 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
3081 if (no_resetup) {
3082 /*
3083 * NOTE: checking "xfer->address" and
3084 * starting the USB transfer must be
3085 * atomic!
3086 */
3087 usbd_transfer_start(xfer);
3088 }
3089 }
3090 USB_XFER_UNLOCK(xfer);
3091 } else {
3092 no_resetup = 0;
3093 }
3094
3095 if (no_resetup) {
3096 /*
3097 * All parameters are exactly the same like before.
3098 * Just return.
3099 */
3100 return;
3101 }
3102 /*
3103 * Update wMaxPacketSize for the default control endpoint:
3104 */
3105 udev->ctrl_ep_desc.wMaxPacketSize[0] =
3106 udev->ddesc.bMaxPacketSize;
3107
3108 /*
3109 * Unsetup any existing USB transfer:
3110 */
3111 usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
3112
3113 /*
3114 * Reset clear stall error counter.
3115 */
3116 udev->clear_stall_errors = 0;
3117
3118 /*
3119 * Try to setup a new USB transfer for the
3120 * default control endpoint:
3121 */
3122 iface_index = 0;
3123 if (usbd_transfer_setup(udev, &iface_index,
3124 udev->ctrl_xfer, udev->bus->control_ep_quirk ?
3125 usb_control_ep_quirk_cfg : usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
3126 &udev->device_mtx)) {
3127 DPRINTFN(0, "could not setup default "
3128 "USB transfer\n");
3129 } else {
3130 goto repeat;
3131 }
3132 }
3133
3134 /*------------------------------------------------------------------------*
3135 * usbd_clear_data_toggle - factored out code
3136 *
3137 * NOTE: the intention of this function is not to reset the hardware
3138 * data toggle.
3139 *------------------------------------------------------------------------*/
3140 void
usbd_clear_stall_locked(struct usb_device * udev,struct usb_endpoint * ep)3141 usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
3142 {
3143 USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED);
3144
3145 /* check that we have a valid case */
3146 if ((udev->flags.usb_mode == USB_MODE_HOST) &&
3147 (udev->parent_hub != NULL) &&
3148 (udev->bus->methods->clear_stall != NULL) &&
3149 (ep->methods != NULL)) {
3150 (udev->bus->methods->clear_stall) (udev, ep);
3151 }
3152 }
3153
3154 /*------------------------------------------------------------------------*
3155 * usbd_clear_data_toggle - factored out code
3156 *
3157 * NOTE: the intention of this function is not to reset the hardware
3158 * data toggle on the USB device side.
3159 *------------------------------------------------------------------------*/
3160 void
usbd_clear_data_toggle(struct usb_device * udev,struct usb_endpoint * ep)3161 usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
3162 {
3163 DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
3164
3165 USB_BUS_LOCK(udev->bus);
3166 ep->toggle_next = 0;
3167 /* some hardware needs a callback to clear the data toggle */
3168 usbd_clear_stall_locked(udev, ep);
3169 USB_BUS_UNLOCK(udev->bus);
3170 }
3171
3172 /*------------------------------------------------------------------------*
3173 * usbd_clear_stall_callback - factored out clear stall callback
3174 *
3175 * Input parameters:
3176 * xfer1: Clear Stall Control Transfer
3177 * xfer2: Stalled USB Transfer
3178 *
3179 * This function is NULL safe.
3180 *
3181 * Return values:
3182 * 0: In progress
3183 * Else: Finished
3184 *
3185 * Clear stall config example:
3186 *
3187 * static const struct usb_config my_clearstall = {
3188 * .type = UE_CONTROL,
3189 * .endpoint = 0,
3190 * .direction = UE_DIR_ANY,
3191 * .interval = 50, //50 milliseconds
3192 * .bufsize = sizeof(struct usb_device_request),
3193 * .timeout = 1000, //1.000 seconds
3194 * .callback = &my_clear_stall_callback, // **
3195 * .usb_mode = USB_MODE_HOST,
3196 * };
3197 *
3198 * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
3199 * passing the correct parameters.
3200 *------------------------------------------------------------------------*/
3201 uint8_t
usbd_clear_stall_callback(struct usb_xfer * xfer1,struct usb_xfer * xfer2)3202 usbd_clear_stall_callback(struct usb_xfer *xfer1,
3203 struct usb_xfer *xfer2)
3204 {
3205 struct usb_device_request req;
3206
3207 if (xfer2 == NULL) {
3208 /* looks like we are tearing down */
3209 DPRINTF("NULL input parameter\n");
3210 return (0);
3211 }
3212 USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED);
3213 USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED);
3214
3215 switch (USB_GET_STATE(xfer1)) {
3216 case USB_ST_SETUP:
3217
3218 /*
3219 * pre-clear the data toggle to DATA0 ("umass.c" and
3220 * "ata-usb.c" depends on this)
3221 */
3222
3223 usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
3224
3225 /* setup a clear-stall packet */
3226
3227 req.bmRequestType = UT_WRITE_ENDPOINT;
3228 req.bRequest = UR_CLEAR_FEATURE;
3229 USETW(req.wValue, UF_ENDPOINT_HALT);
3230 req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
3231 req.wIndex[1] = 0;
3232 USETW(req.wLength, 0);
3233
3234 /*
3235 * "usbd_transfer_setup_sub()" will ensure that
3236 * we have sufficient room in the buffer for
3237 * the request structure!
3238 */
3239
3240 /* copy in the transfer */
3241
3242 usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
3243
3244 /* set length */
3245 xfer1->frlengths[0] = sizeof(req);
3246 xfer1->nframes = 1;
3247
3248 usbd_transfer_submit(xfer1);
3249 return (0);
3250
3251 case USB_ST_TRANSFERRED:
3252 break;
3253
3254 default: /* Error */
3255 if (xfer1->error == USB_ERR_CANCELLED) {
3256 return (0);
3257 }
3258 break;
3259 }
3260 return (1); /* Clear Stall Finished */
3261 }
3262
3263 /*------------------------------------------------------------------------*
3264 * usbd_transfer_poll
3265 *
3266 * The following function gets called from the USB keyboard driver and
3267 * UMASS when the system has paniced.
3268 *
3269 * NOTE: It is currently not possible to resume normal operation on
3270 * the USB controller which has been polled, due to clearing of the
3271 * "up_dsleep" and "up_msleep" flags.
3272 *------------------------------------------------------------------------*/
3273 void
usbd_transfer_poll(struct usb_xfer ** ppxfer,uint16_t max)3274 usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
3275 {
3276 struct usb_xfer *xfer;
3277 struct usb_xfer_root *xroot;
3278 struct usb_device *udev;
3279 struct usb_proc_msg *pm;
3280 uint16_t n;
3281 uint16_t drop_bus;
3282 uint16_t drop_xfer;
3283
3284 for (n = 0; n != max; n++) {
3285 /* Extra checks to avoid panic */
3286 xfer = ppxfer[n];
3287 if (xfer == NULL)
3288 continue; /* no USB transfer */
3289 xroot = xfer->xroot;
3290 if (xroot == NULL)
3291 continue; /* no USB root */
3292 udev = xroot->udev;
3293 if (udev == NULL)
3294 continue; /* no USB device */
3295 if (udev->bus == NULL)
3296 continue; /* no BUS structure */
3297 if (udev->bus->methods == NULL)
3298 continue; /* no BUS methods */
3299 if (udev->bus->methods->xfer_poll == NULL)
3300 continue; /* no poll method */
3301
3302 /* make sure that the BUS mutex is not locked */
3303 drop_bus = 0;
3304 while (mtx_owned(&xroot->udev->bus->bus_mtx) && !SCHEDULER_STOPPED()) {
3305 mtx_unlock(&xroot->udev->bus->bus_mtx);
3306 drop_bus++;
3307 }
3308
3309 /* make sure that the transfer mutex is not locked */
3310 drop_xfer = 0;
3311 while (mtx_owned(xroot->xfer_mtx) && !SCHEDULER_STOPPED()) {
3312 mtx_unlock(xroot->xfer_mtx);
3313 drop_xfer++;
3314 }
3315
3316 /* Make sure cv_signal() and cv_broadcast() is not called */
3317 USB_BUS_CONTROL_XFER_PROC(udev->bus)->up_msleep = 0;
3318 USB_BUS_EXPLORE_PROC(udev->bus)->up_msleep = 0;
3319 USB_BUS_GIANT_PROC(udev->bus)->up_msleep = 0;
3320 USB_BUS_NON_GIANT_ISOC_PROC(udev->bus)->up_msleep = 0;
3321 USB_BUS_NON_GIANT_BULK_PROC(udev->bus)->up_msleep = 0;
3322
3323 /* poll USB hardware */
3324 (udev->bus->methods->xfer_poll) (udev->bus);
3325
3326 USB_BUS_LOCK(xroot->bus);
3327
3328 /* check for clear stall */
3329 if (udev->ctrl_xfer[1] != NULL) {
3330
3331 /* poll clear stall start */
3332 pm = &udev->cs_msg[0].hdr;
3333 (pm->pm_callback) (pm);
3334 /* poll clear stall done thread */
3335 pm = &udev->ctrl_xfer[1]->
3336 xroot->done_m[0].hdr;
3337 (pm->pm_callback) (pm);
3338 }
3339
3340 /* poll done thread */
3341 pm = &xroot->done_m[0].hdr;
3342 (pm->pm_callback) (pm);
3343
3344 USB_BUS_UNLOCK(xroot->bus);
3345
3346 /* restore transfer mutex */
3347 while (drop_xfer--)
3348 mtx_lock(xroot->xfer_mtx);
3349
3350 /* restore BUS mutex */
3351 while (drop_bus--)
3352 mtx_lock(&xroot->udev->bus->bus_mtx);
3353 }
3354 }
3355
3356 static void
usbd_get_std_packet_size(struct usb_std_packet_size * ptr,uint8_t type,enum usb_dev_speed speed)3357 usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
3358 uint8_t type, enum usb_dev_speed speed)
3359 {
3360 static const uint16_t intr_range_max[USB_SPEED_MAX] = {
3361 [USB_SPEED_LOW] = 8,
3362 [USB_SPEED_FULL] = 64,
3363 [USB_SPEED_HIGH] = 1024,
3364 [USB_SPEED_VARIABLE] = 1024,
3365 [USB_SPEED_SUPER] = 1024,
3366 };
3367
3368 static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
3369 [USB_SPEED_LOW] = 0, /* invalid */
3370 [USB_SPEED_FULL] = 1023,
3371 [USB_SPEED_HIGH] = 1024,
3372 [USB_SPEED_VARIABLE] = 3584,
3373 [USB_SPEED_SUPER] = 1024,
3374 };
3375
3376 static const uint16_t control_min[USB_SPEED_MAX] = {
3377 [USB_SPEED_LOW] = 8,
3378 [USB_SPEED_FULL] = 8,
3379 [USB_SPEED_HIGH] = 64,
3380 [USB_SPEED_VARIABLE] = 512,
3381 [USB_SPEED_SUPER] = 512,
3382 };
3383
3384 static const uint16_t bulk_min[USB_SPEED_MAX] = {
3385 [USB_SPEED_LOW] = 8,
3386 [USB_SPEED_FULL] = 8,
3387 [USB_SPEED_HIGH] = 512,
3388 [USB_SPEED_VARIABLE] = 512,
3389 [USB_SPEED_SUPER] = 1024,
3390 };
3391
3392 uint16_t temp;
3393
3394 (void)memset_s(ptr, sizeof(*ptr), 0, sizeof(*ptr));
3395
3396 switch (type) {
3397 case UE_INTERRUPT:
3398 ptr->range.max = intr_range_max[speed];
3399 break;
3400 case UE_ISOCHRONOUS:
3401 ptr->range.max = isoc_range_max[speed];
3402 break;
3403 default:
3404 if (type == UE_BULK)
3405 temp = bulk_min[speed];
3406 else /* UE_CONTROL */
3407 temp = control_min[speed];
3408
3409 /* default is fixed */
3410 ptr->fixed[0] = temp;
3411 ptr->fixed[1] = temp;
3412 ptr->fixed[2] = temp;
3413 ptr->fixed[3] = temp;
3414
3415 if (speed == USB_SPEED_FULL) {
3416 /* multiple sizes */
3417 ptr->fixed[1] = 16;
3418 ptr->fixed[2] = 32;
3419 ptr->fixed[3] = 64;
3420 }
3421 if ((speed == USB_SPEED_VARIABLE) &&
3422 (type == UE_BULK)) {
3423 /* multiple sizes */
3424 ptr->fixed[2] = 1024;
3425 ptr->fixed[3] = 1536;
3426 }
3427 break;
3428 }
3429 }
3430
3431 void *
usbd_xfer_softc(struct usb_xfer * xfer)3432 usbd_xfer_softc(struct usb_xfer *xfer)
3433 {
3434 return (xfer->priv_sc);
3435 }
3436
3437 void *
usbd_xfer_get_priv(struct usb_xfer * xfer)3438 usbd_xfer_get_priv(struct usb_xfer *xfer)
3439 {
3440 return (xfer->priv_fifo);
3441 }
3442
3443 void
usbd_xfer_set_priv(struct usb_xfer * xfer,void * ptr)3444 usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
3445 {
3446 xfer->priv_fifo = ptr;
3447 }
3448
3449 uint8_t
usbd_xfer_state(struct usb_xfer * xfer)3450 usbd_xfer_state(struct usb_xfer *xfer)
3451 {
3452 return (xfer->usb_state);
3453 }
3454
3455 void
usbd_xfer_set_flag(struct usb_xfer * xfer,int flag)3456 usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
3457 {
3458 switch (flag) {
3459 case USB_FORCE_SHORT_XFER:
3460 xfer->flags.force_short_xfer = 1;
3461 break;
3462 case USB_SHORT_XFER_OK:
3463 xfer->flags.short_xfer_ok = 1;
3464 break;
3465 case USB_MULTI_SHORT_OK:
3466 xfer->flags.short_frames_ok = 1;
3467 break;
3468 case USB_MANUAL_STATUS:
3469 xfer->flags.manual_status = 1;
3470 break;
3471 }
3472 }
3473
3474 void
usbd_xfer_clr_flag(struct usb_xfer * xfer,int flag)3475 usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
3476 {
3477 switch (flag) {
3478 case USB_FORCE_SHORT_XFER:
3479 xfer->flags.force_short_xfer = 0;
3480 break;
3481 case USB_SHORT_XFER_OK:
3482 xfer->flags.short_xfer_ok = 0;
3483 break;
3484 case USB_MULTI_SHORT_OK:
3485 xfer->flags.short_frames_ok = 0;
3486 break;
3487 case USB_MANUAL_STATUS:
3488 xfer->flags.manual_status = 0;
3489 break;
3490 }
3491 }
3492
3493 /*
3494 * The following function returns in milliseconds when the isochronous
3495 * transfer was completed by the hardware. The returned value wraps
3496 * around 65536 milliseconds.
3497 */
3498 uint16_t
usbd_xfer_get_timestamp(struct usb_xfer * xfer)3499 usbd_xfer_get_timestamp(struct usb_xfer *xfer)
3500 {
3501 return (xfer->isoc_time_complete);
3502 }
3503
3504 /*
3505 * The following function returns non-zero if the max packet size
3506 * field was clamped to a valid value. Else it returns zero.
3507 */
3508 uint8_t
usbd_xfer_maxp_was_clamped(struct usb_xfer * xfer)3509 usbd_xfer_maxp_was_clamped(struct usb_xfer *xfer)
3510 {
3511 return (xfer->flags_int.maxp_was_clamped);
3512 }
3513
3514 #undef USB_DEBUG_VAR
3515