1 /* $FreeBSD$ */
2 /*-
3 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 *
5 * Copyright (c) 2008 Hans Petter Selasky. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "implementation/global_implementation.h"
30
31 #undef USB_DEBUG_VAR
32 #define USB_DEBUG_VAR usb_debug
33
34 SPIN_LOCK_INIT(g_usb_wait_queue_spinlock);
35
36 struct usb_std_packet_size {
37 struct {
38 uint16_t min; /* inclusive */
39 uint16_t max; /* inclusive */
40 } range;
41
42 uint16_t fixed[4];
43 };
44
45 static usb_callback_t usb_request_callback;
46
47 static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
48 /* This transfer is used for generic control endpoint transfers */
49
50 [0] = {
51 .type = UE_CONTROL,
52 .endpoint = 0x00, /* Control endpoint */
53 .direction = UE_DIR_ANY,
54 .bufsize = USB_EP0_BUFSIZE, /* bytes */
55 .flags = {.proxy_buffer = 1,},
56 .callback = &usb_request_callback,
57 .usb_mode = USB_MODE_DUAL, /* both modes */
58 },
59
60 /* This transfer is used for generic clear stall only */
61
62 [1] = {
63 .type = UE_CONTROL,
64 .endpoint = 0x00, /* Control pipe */
65 .direction = UE_DIR_ANY,
66 .bufsize = sizeof(struct usb_device_request),
67 .callback = &usb_do_clear_stall_callback,
68 .timeout = 1000, /* 1 second */
69 .interval = 50, /* 50ms */
70 .usb_mode = USB_MODE_HOST,
71 },
72 };
73
74 static const struct usb_config usb_control_ep_quirk_cfg[USB_CTRL_XFER_MAX] = {
75 /* This transfer is used for generic control endpoint transfers */
76
77 [0] = {
78 .type = UE_CONTROL,
79 .endpoint = 0x00, /* Control endpoint */
80 .direction = UE_DIR_ANY,
81 .bufsize = 65535, /* bytes */
82 .callback = &usb_request_callback,
83 .usb_mode = USB_MODE_DUAL, /* both modes */
84 },
85
86 /* This transfer is used for generic clear stall only */
87
88 [1] = {
89 .type = UE_CONTROL,
90 .endpoint = 0x00, /* Control pipe */
91 .direction = UE_DIR_ANY,
92 .bufsize = sizeof(struct usb_device_request),
93 .callback = &usb_do_clear_stall_callback,
94 .timeout = 1000, /* 1 second */
95 .interval = 50, /* 50ms */
96 .usb_mode = USB_MODE_HOST,
97 },
98 };
99
100 /* function prototypes */
101
102 static void usbd_update_max_frame_size(struct usb_xfer *);
103 static void usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
104 static void usbd_control_transfer_init(struct usb_xfer *);
105 static int usbd_setup_ctrl_transfer(struct usb_xfer *);
106 static void usb_callback_proc(struct usb_proc_msg *);
107 static void usbd_callback_ss_done_defer(struct usb_xfer *);
108 static void usbd_callback_wrapper(struct usb_xfer_queue *);
109 static void usbd_transfer_start_cb(void *);
110 static uint8_t usbd_callback_wrapper_sub(struct usb_xfer *);
111 static void usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
112 uint8_t type, enum usb_dev_speed speed);
113
114 /*------------------------------------------------------------------------*
115 * usb_request_callback
116 *------------------------------------------------------------------------*/
117 static void
usb_request_callback(struct usb_xfer * xfer,usb_error_t error)118 usb_request_callback(struct usb_xfer *xfer, usb_error_t error)
119 {
120 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
121 usb_handle_request_callback(xfer, error);
122 else
123 usbd_do_request_callback(xfer, error);
124 }
125
126 /*------------------------------------------------------------------------*
127 * usbd_update_max_frame_size
128 *
129 * This function updates the maximum frame size, hence high speed USB
130 * can transfer multiple consecutive packets.
131 *------------------------------------------------------------------------*/
132 static void
usbd_update_max_frame_size(struct usb_xfer * xfer)133 usbd_update_max_frame_size(struct usb_xfer *xfer)
134 {
135 /* compute maximum frame size */
136 /* this computation should not overflow 16-bit */
137 /* max = 15 * 1024 */
138
139 xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
140 }
141
142 /*------------------------------------------------------------------------*
143 * usbd_get_dma_delay
144 *
145 * The following function is called when we need to
146 * synchronize with DMA hardware.
147 *
148 * Returns:
149 * 0: no DMA delay required
150 * Else: milliseconds of DMA delay
151 *------------------------------------------------------------------------*/
152 usb_timeout_t
usbd_get_dma_delay(struct usb_device * udev)153 usbd_get_dma_delay(struct usb_device *udev)
154 {
155 const struct usb_bus_methods *mtod;
156 uint32_t temp;
157
158 mtod = udev->bus->methods;
159 temp = 0;
160
161 if (mtod->get_dma_delay) {
162 (mtod->get_dma_delay) (udev, &temp);
163 /*
164 * Round up and convert to milliseconds. Note that we use
165 * 1024 milliseconds per second. to save a division.
166 */
167 temp += 0x3FF;
168 temp /= 0x400;
169 }
170 return (temp);
171 }
172
173 /*------------------------------------------------------------------------*
174 * usbd_transfer_setup_sub_malloc
175 *
176 * This function will allocate one or more DMA'able memory chunks
177 * according to "size", "align" and "count" arguments. "ppc" is
178 * pointed to a linear array of USB page caches afterwards.
179 *
180 * If the "align" argument is equal to "1" a non-contiguous allocation
181 * can happen. Else if the "align" argument is greater than "1", the
182 * allocation will always be contiguous in memory.
183 *
184 * Returns:
185 * 0: Success
186 * Else: Failure
187 *------------------------------------------------------------------------*/
188 #if USB_HAVE_BUSDMA
189 uint8_t
usbd_transfer_setup_sub_malloc(struct usb_setup_params * parm,struct usb_page_cache ** ppc,usb_size_t size,usb_size_t align,usb_size_t count)190 usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
191 struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
192 usb_size_t count)
193 {
194 struct usb_page_cache *pc;
195 struct usb_page *pg;
196 void *buf;
197 usb_size_t n_dma_pc;
198 usb_size_t n_dma_pg;
199 usb_size_t n_obj;
200 usb_size_t x;
201 usb_size_t y;
202 usb_size_t r;
203 usb_size_t z;
204
205 USB_ASSERT(align > 0, ("Invalid alignment, 0x%08x\n",
206 align));
207 USB_ASSERT(size > 0, ("Invalid size = 0\n"));
208
209 if (count == 0) {
210 return (0); /* nothing to allocate */
211 }
212 /*
213 * Make sure that the size is aligned properly.
214 */
215 size = -((-size) & (-align));
216
217 /*
218 * Try multi-allocation chunks to reduce the number of DMA
219 * allocations, hence DMA allocations are slow.
220 */
221 if (align == 1) {
222 /* special case - non-cached multi page DMA memory */
223 n_dma_pc = count;
224 n_dma_pg = (2 + (size / USB_PAGE_SIZE));
225 n_obj = 1;
226 } else if (size >= USB_PAGE_SIZE) {
227 n_dma_pc = count;
228 n_dma_pg = 1;
229 n_obj = 1;
230 } else {
231 /* compute number of objects per page */
232 n_obj = (USB_PAGE_SIZE / size);
233 /*
234 * Compute number of DMA chunks, rounded up
235 * to nearest one:
236 */
237 n_dma_pc = ((count + n_obj - 1) / n_obj);
238 n_dma_pg = 1;
239 }
240
241 /*
242 * DMA memory is allocated once, but mapped twice. That's why
243 * there is one list for auto-free and another list for
244 * non-auto-free which only holds the mapping and not the
245 * allocation.
246 */
247 if (parm->buf == NULL) {
248 /* reserve memory (auto-free) */
249 parm->dma_page_ptr += n_dma_pc * n_dma_pg;
250 parm->dma_page_cache_ptr += n_dma_pc;
251
252 /* reserve memory (no-auto-free) */
253 parm->dma_page_ptr += count * n_dma_pg;
254 parm->xfer_page_cache_ptr += count;
255 return (0);
256 }
257 for (x = 0; x != n_dma_pc; x++) {
258 /* need to initialize the page cache */
259 parm->dma_page_cache_ptr[x].tag_parent =
260 &parm->curr_xfer->xroot->dma_parent_tag;
261 }
262 for (x = 0; x != count; x++) {
263 /* need to initialize the page cache */
264 parm->xfer_page_cache_ptr[x].tag_parent =
265 &parm->curr_xfer->xroot->dma_parent_tag;
266 }
267
268 if (ppc != NULL) {
269 *ppc = parm->xfer_page_cache_ptr;
270 }
271 r = count; /* set remainder count */
272 z = n_obj * size; /* set allocation size */
273 pc = parm->xfer_page_cache_ptr;
274 pg = parm->dma_page_ptr;
275
276 for (x = 0; x != n_dma_pc; x++) {
277 if (r < n_obj) {
278 /* compute last remainder */
279 z = r * size;
280 n_obj = r;
281 }
282 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
283 pg, z, align)) {
284 return (1); /* failure */
285 }
286 /* Set beginning of current buffer */
287 buf = parm->dma_page_cache_ptr->buffer;
288 /* Make room for one DMA page cache and one page */
289 parm->dma_page_cache_ptr++;
290 pg += n_dma_pg;
291
292 for (y = 0; (y != n_obj); y++, r--, pc++, pg += n_dma_pg) {
293 /* Load sub-chunk into DMA */
294 if (usb_pc_dmamap_create(pc, size)) {
295 return (1); /* failure */
296 }
297 pc->buffer = USB_ADD_BYTES(buf, y * size);
298 pc->page_start = pg;
299
300 USB_MTX_LOCK(pc->tag_parent->mtx);
301 (void)usb_pc_load_mem(pc, size, 1 /* synchronous */ );
302 USB_MTX_UNLOCK(pc->tag_parent->mtx);
303 }
304 }
305
306 parm->xfer_page_cache_ptr = pc;
307 parm->dma_page_ptr = pg;
308 return (0);
309 }
310 #endif
311
312 /*------------------------------------------------------------------------*
313 * usbd_transfer_setup_sub - transfer setup subroutine
314 *
315 * This function must be called from the "xfer_setup" callback of the
316 * USB Host or Device controller driver when setting up an USB
317 * transfer. This function will setup correct packet sizes, buffer
318 * sizes, flags and more, that are stored in the "usb_xfer"
319 * structure.
320 *------------------------------------------------------------------------*/
321 void
usbd_transfer_setup_sub(struct usb_setup_params * parm)322 usbd_transfer_setup_sub(struct usb_setup_params *parm)
323 {
324 enum {
325 REQ_SIZE = 8,
326 MIN_PKT = 8,
327 };
328 struct usb_xfer *xfer = parm->curr_xfer;
329 const struct usb_config *setup = parm->curr_setup;
330 struct usb_endpoint_ss_comp_descriptor *ecomp;
331 struct usb_endpoint_descriptor *edesc;
332 struct usb_std_packet_size std_size;
333 usb_frcount_t n_frlengths;
334 usb_frcount_t n_frbuffers;
335 usb_frcount_t x;
336 uint16_t maxp_old;
337 uint8_t type;
338 uint8_t zmps;
339
340 /*
341 * Sanity check. The following parameters must be initialized before
342 * calling this function.
343 */
344 if ((parm->hc_max_packet_size == 0) ||
345 (parm->hc_max_packet_count == 0) ||
346 (parm->hc_max_frame_size == 0)) {
347 parm->err = USB_ERR_INVAL;
348 goto done;
349 }
350 edesc = xfer->endpoint->edesc;
351 ecomp = xfer->endpoint->ecomp;
352
353 type = (edesc->bmAttributes & UE_XFERTYPE);
354
355 xfer->flags = setup->flags;
356 xfer->nframes = setup->frames;
357 xfer->timeout = setup->timeout;
358 xfer->callback = setup->callback;
359 xfer->interval = setup->interval;
360 xfer->endpointno = edesc->bEndpointAddress;
361 xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
362 xfer->max_packet_count = 1;
363 /* make a shadow copy: */
364 xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
365
366 parm->bufsize = setup->bufsize;
367
368 switch (parm->speed) {
369 case USB_SPEED_HIGH:
370 switch (type) {
371 case UE_ISOCHRONOUS:
372 case UE_INTERRUPT:
373 xfer->max_packet_count +=
374 (xfer->max_packet_size >> 11) & 3;
375
376 /* check for invalid max packet count */
377 if (xfer->max_packet_count > 3)
378 xfer->max_packet_count = 3;
379 break;
380 default:
381 break;
382 }
383 xfer->max_packet_size &= 0x7FF;
384 break;
385 case USB_SPEED_SUPER:
386 xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
387
388 if (ecomp != NULL)
389 xfer->max_packet_count += ecomp->bMaxBurst;
390
391 if ((xfer->max_packet_count == 0) ||
392 (xfer->max_packet_count > 16))
393 xfer->max_packet_count = 16;
394
395 switch (type) {
396 case UE_CONTROL:
397 xfer->max_packet_count = 1;
398 break;
399 case UE_ISOCHRONOUS:
400 if (ecomp != NULL) {
401 uint8_t mult;
402
403 mult = UE_GET_SS_ISO_MULT(
404 ecomp->bmAttributes) + 1;
405 if (mult > 3)
406 mult = 3;
407
408 xfer->max_packet_count *= mult;
409 }
410 break;
411 default:
412 break;
413 }
414 xfer->max_packet_size &= 0x7FF;
415 break;
416 default:
417 break;
418 }
419 /* range check "max_packet_count" */
420
421 if (xfer->max_packet_count > parm->hc_max_packet_count) {
422 xfer->max_packet_count = parm->hc_max_packet_count;
423 }
424
425 /* store max packet size value before filtering */
426
427 maxp_old = xfer->max_packet_size;
428
429 /* filter "wMaxPacketSize" according to HC capabilities */
430
431 if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
432 (xfer->max_packet_size == 0)) {
433 xfer->max_packet_size = parm->hc_max_packet_size;
434 }
435 /* filter "wMaxPacketSize" according to standard sizes */
436
437 usbd_get_std_packet_size(&std_size, type, parm->speed);
438
439 if (std_size.range.min || std_size.range.max) {
440 if (xfer->max_packet_size < std_size.range.min) {
441 xfer->max_packet_size = std_size.range.min;
442 }
443 if (xfer->max_packet_size > std_size.range.max) {
444 xfer->max_packet_size = std_size.range.max;
445 }
446 } else {
447 if (xfer->max_packet_size >= std_size.fixed[3]) {
448 xfer->max_packet_size = std_size.fixed[3];
449 } else if (xfer->max_packet_size >= std_size.fixed[2]) {
450 xfer->max_packet_size = std_size.fixed[2];
451 } else if (xfer->max_packet_size >= std_size.fixed[1]) {
452 xfer->max_packet_size = std_size.fixed[1];
453 } else {
454 /* only one possibility left */
455 xfer->max_packet_size = std_size.fixed[0];
456 }
457 }
458
459 /*
460 * Check if the max packet size was outside its allowed range
461 * and clamped to a valid value:
462 */
463 if (maxp_old != xfer->max_packet_size)
464 xfer->flags_int.maxp_was_clamped = 1;
465
466 /* compute "max_frame_size" */
467
468 usbd_update_max_frame_size(xfer);
469
470 /* check interrupt interval and transfer pre-delay */
471
472 if (type == UE_ISOCHRONOUS) {
473 uint16_t frame_limit;
474
475 xfer->interval = 0; /* not used, must be zero */
476 xfer->flags_int.isochronous_xfr = 1; /* set flag */
477
478 if (xfer->timeout == 0) {
479 /*
480 * set a default timeout in
481 * case something goes wrong!
482 */
483 xfer->timeout = 1000 / 4;
484 }
485 switch (parm->speed) {
486 case USB_SPEED_LOW:
487 case USB_SPEED_FULL:
488 frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
489 xfer->fps_shift = 0;
490 break;
491 default:
492 frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
493 xfer->fps_shift = edesc->bInterval;
494 if (xfer->fps_shift > 0)
495 xfer->fps_shift--;
496 if (xfer->fps_shift > 3)
497 xfer->fps_shift = 3;
498 if (xfer->flags.pre_scale_frames != 0)
499 xfer->nframes <<= (3 - xfer->fps_shift);
500 break;
501 }
502
503 if (xfer->nframes > frame_limit) {
504 /*
505 * this is not going to work
506 * cross hardware
507 */
508 parm->err = USB_ERR_INVAL;
509 goto done;
510 }
511 if (xfer->nframes == 0) {
512 /*
513 * this is not a valid value
514 */
515 parm->err = USB_ERR_ZERO_NFRAMES;
516 goto done;
517 }
518 } else {
519 /*
520 * If a value is specified use that else check the
521 * endpoint descriptor!
522 */
523 if (type == UE_INTERRUPT) {
524 uint32_t temp;
525
526 if (xfer->interval == 0) {
527 xfer->interval = edesc->bInterval;
528
529 switch (parm->speed) {
530 case USB_SPEED_LOW:
531 case USB_SPEED_FULL:
532 break;
533 default:
534 /* 125us -> 1ms */
535 if (xfer->interval < 4)
536 xfer->interval = 1;
537 else if (xfer->interval > 16)
538 xfer->interval = (1 << (16 - 4));
539 else
540 xfer->interval =
541 (1 << (xfer->interval - 4));
542 break;
543 }
544 }
545
546 if (xfer->interval == 0) {
547 /*
548 * One millisecond is the smallest
549 * interval we support:
550 */
551 xfer->interval = 1;
552 }
553
554 xfer->fps_shift = 0;
555 temp = 1;
556
557 while ((temp != 0) && (temp < xfer->interval)) {
558 xfer->fps_shift++;
559 temp *= 2;
560 }
561
562 switch (parm->speed) {
563 case USB_SPEED_LOW:
564 case USB_SPEED_FULL:
565 break;
566 default:
567 xfer->fps_shift += 3;
568 break;
569 }
570 }
571 }
572
573 /*
574 * NOTE: we do not allow "max_packet_size" or "max_frame_size"
575 * to be equal to zero when setting up USB transfers, hence
576 * this leads to alot of extra code in the USB kernel.
577 */
578
579 if ((xfer->max_frame_size == 0) ||
580 (xfer->max_packet_size == 0)) {
581 zmps = 1;
582
583 if ((parm->bufsize <= MIN_PKT) &&
584 (type != UE_CONTROL) &&
585 (type != UE_BULK)) {
586 /* workaround */
587 xfer->max_packet_size = MIN_PKT;
588 xfer->max_packet_count = 1;
589 parm->bufsize = 0; /* automatic setup length */
590 usbd_update_max_frame_size(xfer);
591
592 } else {
593 parm->err = USB_ERR_ZERO_MAXP;
594 goto done;
595 }
596
597 } else {
598 zmps = 0;
599 }
600
601 /*
602 * check if we should setup a default
603 * length:
604 */
605
606 if (parm->bufsize == 0) {
607 parm->bufsize = xfer->max_frame_size;
608
609 if (type == UE_ISOCHRONOUS) {
610 parm->bufsize *= xfer->nframes;
611 }
612 }
613 /*
614 * check if we are about to setup a proxy
615 * type of buffer:
616 */
617
618 if (xfer->flags.proxy_buffer) {
619 /* round bufsize up */
620
621 parm->bufsize += (xfer->max_frame_size - 1);
622
623 if (parm->bufsize < xfer->max_frame_size) {
624 /* length wrapped around */
625 parm->err = USB_ERR_INVAL;
626 goto done;
627 }
628 /* subtract remainder */
629
630 parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
631
632 /* add length of USB device request structure, if any */
633
634 if (type == UE_CONTROL) {
635 parm->bufsize += REQ_SIZE; /* SETUP message */
636 }
637 }
638 xfer->max_data_length = parm->bufsize;
639
640 /* Setup "n_frlengths" and "n_frbuffers" */
641
642 if (type == UE_ISOCHRONOUS) {
643 n_frlengths = xfer->nframes;
644 n_frbuffers = 1;
645 } else {
646 if (type == UE_CONTROL) {
647 xfer->flags_int.control_xfr = 1;
648 if (xfer->nframes == 0) {
649 if (parm->bufsize <= REQ_SIZE) {
650 /*
651 * there will never be any data
652 * stage
653 */
654 xfer->nframes = 1;
655 } else {
656 xfer->nframes = 2;
657 }
658 }
659 } else {
660 if (xfer->nframes == 0) {
661 xfer->nframes = 1;
662 }
663 }
664
665 n_frlengths = xfer->nframes;
666 n_frbuffers = xfer->nframes;
667 }
668
669 /*
670 * check if we have room for the
671 * USB device request structure:
672 */
673
674 if (type == UE_CONTROL) {
675 if (xfer->max_data_length < REQ_SIZE) {
676 /* length wrapped around or too small bufsize */
677 parm->err = USB_ERR_INVAL;
678 goto done;
679 }
680 xfer->max_data_length -= REQ_SIZE;
681 }
682 /*
683 * Setup "frlengths" and shadow "frlengths" for keeping the
684 * initial frame lengths when a USB transfer is complete. This
685 * information is useful when computing isochronous offsets.
686 */
687 xfer->frlengths = parm->xfer_length_ptr;
688 parm->xfer_length_ptr += 2 * n_frlengths;
689
690 /* setup "frbuffers" */
691 xfer->frbuffers = parm->xfer_page_cache_ptr;
692 parm->xfer_page_cache_ptr += n_frbuffers;
693
694 /* initialize max frame count */
695 xfer->max_frame_count = xfer->nframes;
696
697 /*
698 * check if we need to setup
699 * a local buffer:
700 */
701
702 if (!xfer->flags.ext_buffer) {
703 #if USB_HAVE_BUSDMA
704 struct usb_page_search page_info;
705 struct usb_page_cache *pc;
706
707 if (usbd_transfer_setup_sub_malloc(parm,
708 &pc, parm->bufsize, 1, 1)) {
709 parm->err = USB_ERR_NOMEM;
710 } else if (parm->buf != NULL) {
711 usbd_get_page(pc, 0, &page_info);
712
713 xfer->local_buffer = page_info.buffer;
714
715 usbd_xfer_set_frame_offset(xfer, 0, 0);
716
717 if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
718 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
719 }
720 }
721 #else
722 /* align data */
723 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
724
725 if (parm->buf != NULL) {
726 xfer->local_buffer =
727 USB_ADD_BYTES(parm->buf, parm->size[0]);
728
729 usbd_xfer_set_frame_offset(xfer, 0, 0);
730
731 if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
732 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
733 }
734 }
735 parm->size[0] += parm->bufsize;
736
737 /* align data again */
738 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
739 #endif
740 }
741 /*
742 * Compute maximum buffer size
743 */
744
745 if (parm->bufsize_max < parm->bufsize) {
746 parm->bufsize_max = parm->bufsize;
747 }
748 #if USB_HAVE_BUSDMA
749 if (xfer->flags_int.bdma_enable) {
750 /*
751 * Setup "dma_page_ptr".
752 *
753 * Proof for formula below:
754 *
755 * Assume there are three USB frames having length "a", "b" and
756 * "c". These USB frames will at maximum need "z"
757 * "usb_page" structures. "z" is given by:
758 *
759 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
760 * ((c / USB_PAGE_SIZE) + 2);
761 *
762 * Constraining "a", "b" and "c" like this:
763 *
764 * (a + b + c) <= parm->bufsize
765 *
766 * We know that:
767 *
768 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
769 *
770 * Here is the general formula:
771 */
772 xfer->dma_page_ptr = parm->dma_page_ptr;
773 parm->dma_page_ptr += (2 * n_frbuffers);
774 parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
775 }
776 #endif
777 if (zmps) {
778 /* correct maximum data length */
779 xfer->max_data_length = 0;
780 }
781 /* subtract USB frame remainder from "hc_max_frame_size" */
782
783 xfer->max_hc_frame_size =
784 (parm->hc_max_frame_size -
785 (parm->hc_max_frame_size % xfer->max_frame_size));
786
787 if (xfer->max_hc_frame_size == 0) {
788 parm->err = USB_ERR_INVAL;
789 goto done;
790 }
791
792 /* initialize frame buffers */
793
794 if (parm->buf) {
795 for (x = 0; x != n_frbuffers; x++) {
796 xfer->frbuffers[x].tag_parent =
797 &xfer->xroot->dma_parent_tag;
798 #if USB_HAVE_BUSDMA
799 if (xfer->flags_int.bdma_enable &&
800 (parm->bufsize_max > 0)) {
801 if (usb_pc_dmamap_create(
802 xfer->frbuffers + x,
803 parm->bufsize_max)) {
804 parm->err = USB_ERR_NOMEM;
805 goto done;
806 }
807 }
808 #endif
809 }
810 }
811 done:
812 if (parm->err) {
813 /*
814 * Set some dummy values so that we avoid division by zero:
815 */
816 xfer->max_hc_frame_size = 1;
817 xfer->max_frame_size = 1;
818 xfer->max_packet_size = 1;
819 xfer->max_data_length = 0;
820 xfer->nframes = 0;
821 xfer->max_frame_count = 0;
822 }
823 }
824
825 static uint8_t
usbd_transfer_setup_has_bulk(const struct usb_config * setup_start,uint16_t n_setup)826 usbd_transfer_setup_has_bulk(const struct usb_config *setup_start,
827 uint16_t n_setup)
828 {
829 uint8_t type;
830
831 while (n_setup--) {
832 type = setup_start[n_setup].type;
833 if ((type == UE_BULK) || (type == UE_BULK_INTR) ||
834 (type == UE_TYPE_ANY))
835 return (1);
836 }
837 return (0);
838 }
839
840 /*------------------------------------------------------------------------*
841 * usbd_transfer_setup - setup an array of USB transfers
842 *
843 * NOTE: You must always call "usbd_transfer_unsetup" after calling
844 * "usbd_transfer_setup" if success was returned.
845 *
846 * The idea is that the USB device driver should pre-allocate all its
847 * transfers by one call to this function.
848 *
849 * Return values:
850 * 0: Success
851 * Else: Failure
852 *------------------------------------------------------------------------*/
853 usb_error_t
usbd_transfer_setup(struct usb_device * udev,const uint8_t * ifaces,struct usb_xfer ** ppxfer,const struct usb_config * setup_start,uint16_t n_setup,void * priv_sc,struct mtx * xfer_mtx)854 usbd_transfer_setup(struct usb_device *udev,
855 const uint8_t *ifaces, struct usb_xfer **ppxfer,
856 const struct usb_config *setup_start, uint16_t n_setup,
857 void *priv_sc, struct mtx *xfer_mtx)
858 {
859 const struct usb_config *setup_end = setup_start + n_setup;
860 const struct usb_config *setup;
861 struct usb_setup_params *parm;
862 struct usb_endpoint *ep;
863 struct usb_xfer_root *info;
864 struct usb_xfer *xfer;
865 void *buf = NULL;
866 usb_error_t error = USB_ERR_NORMAL_COMPLETION;
867 uint16_t n;
868 uint16_t refcount;
869 uint8_t do_unlock;
870
871 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
872 "usbd_transfer_setup can sleep!");
873
874 /* do some checking first */
875
876 if (n_setup == 0) {
877 DPRINTFN(5, "setup array has zero length!\n");
878 return (USB_ERR_INVAL);
879 }
880 if (ifaces == 0) {
881 DPRINTFN(5, "ifaces array is NULL!\n");
882 return (USB_ERR_INVAL);
883 }
884 if (xfer_mtx == NULL) {
885 DPRINTFN(5, "using global lock\n");
886 xfer_mtx = &Giant;
887 }
888
889 /* more sanity checks */
890
891 for (setup = setup_start, n = 0;
892 setup != setup_end; setup++, n++) {
893 if (setup->bufsize == (usb_frlength_t)-1) {
894 error = USB_ERR_BAD_BUFSIZE;
895 DPRINTF("invalid bufsize\n");
896 }
897 if (setup->callback == NULL) {
898 error = USB_ERR_NO_CALLBACK;
899 DPRINTF("no callback\n");
900 }
901 ppxfer[n] = NULL;
902 }
903
904 if (error)
905 return (error);
906
907 /* Protect scratch area */
908 do_unlock = usbd_ctrl_lock(udev);
909
910 refcount = 0;
911 info = NULL;
912
913 parm = &udev->scratch.xfer_setup[0].parm;
914 (void)memset_s(parm, sizeof(*parm), 0, sizeof(*parm));
915
916 parm->udev = udev;
917 parm->speed = usbd_get_speed(udev);
918 parm->hc_max_packet_count = 1;
919
920 if (parm->speed >= USB_SPEED_MAX) {
921 parm->err = USB_ERR_INVAL;
922 goto done;
923 }
924 /* setup all transfers */
925
926 while (1) {
927 if (buf) {
928 /*
929 * Initialize the "usb_xfer_root" structure,
930 * which is common for all our USB transfers.
931 */
932 info = USB_ADD_BYTES(buf, 0);
933
934 info->memory_base = buf;
935 info->memory_size = parm->size[0];
936
937 #if USB_HAVE_BUSDMA
938 info->dma_page_cache_start = USB_ADD_BYTES(buf, parm->size[4]);
939 info->dma_page_cache_end = USB_ADD_BYTES(buf, parm->size[5]);
940 #endif
941 info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm->size[5]);
942 info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm->size[2]);
943
944 cv_init(&info->cv_drain, "WDRAIN");
945
946 info->xfer_mtx = xfer_mtx;
947 #if USB_HAVE_BUSDMA
948 usb_dma_tag_setup(&info->dma_parent_tag,
949 parm->dma_tag_p, udev->bus->dma_parent_tag[0].tag,
950 xfer_mtx, &usb_bdma_done_event, udev->bus->dma_bits, parm->dma_tag_max);
951 #endif
952
953 info->bus = udev->bus;
954 info->udev = udev;
955
956 TAILQ_INIT(&info->done_q.head);
957 info->done_q.command = &usbd_callback_wrapper;
958 #if USB_HAVE_BUSDMA
959 TAILQ_INIT(&info->dma_q.head);
960 info->dma_q.command = &usb_bdma_work_loop;
961 #endif
962 info->done_m[0].hdr.pm_callback = &usb_callback_proc;
963 info->done_m[0].xroot = info;
964 info->done_m[1].hdr.pm_callback = &usb_callback_proc;
965 info->done_m[1].xroot = info;
966
967 /*
968 * In device side mode control endpoint
969 * requests need to run from a separate
970 * context, else there is a chance of
971 * deadlock!
972 */
973 if (setup_start == usb_control_ep_cfg ||
974 setup_start == usb_control_ep_quirk_cfg)
975 info->done_p =
976 USB_BUS_CONTROL_XFER_PROC(udev->bus);
977 else if (xfer_mtx == &Giant)
978 info->done_p =
979 USB_BUS_GIANT_PROC(udev->bus);
980 else if (usbd_transfer_setup_has_bulk(setup_start, n_setup))
981 info->done_p =
982 USB_BUS_NON_GIANT_BULK_PROC(udev->bus);
983 else
984 info->done_p =
985 USB_BUS_NON_GIANT_ISOC_PROC(udev->bus);
986 }
987 /* reset sizes */
988
989 parm->size[0] = 0;
990 parm->buf = buf;
991 parm->size[0] += sizeof(info[0]);
992
993 for (setup = setup_start, n = 0;
994 setup != setup_end; setup++, n++) {
995 /* skip USB transfers without callbacks: */
996 if (setup->callback == NULL) {
997 continue;
998 }
999 /* see if there is a matching endpoint */
1000 ep = usbd_get_endpoint(udev,
1001 ifaces[setup->if_index], setup);
1002
1003 /*
1004 * Check that the USB PIPE is valid and that
1005 * the endpoint mode is proper.
1006 *
1007 * Make sure we don't allocate a streams
1008 * transfer when such a combination is not
1009 * valid.
1010 */
1011 if ((ep == NULL) || (ep->methods == NULL) ||
1012 ((ep->ep_mode != USB_EP_MODE_STREAMS) &&
1013 (ep->ep_mode != USB_EP_MODE_DEFAULT)) ||
1014 ((setup->stream_id != 0) &&
1015 ((setup->stream_id >= USB_MAX_EP_STREAMS) ||
1016 (ep->ep_mode != USB_EP_MODE_STREAMS)))) {
1017 if (setup->flags.no_pipe_ok)
1018 continue;
1019 if ((setup->usb_mode != USB_MODE_DUAL) &&
1020 (setup->usb_mode != udev->flags.usb_mode))
1021 continue;
1022 parm->err = USB_ERR_NO_PIPE;
1023 goto done;
1024 }
1025
1026 /* align data properly */
1027 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1028
1029 /* store current setup pointer */
1030 parm->curr_setup = setup;
1031
1032 if (buf) {
1033 /*
1034 * Common initialization of the
1035 * "usb_xfer" structure.
1036 */
1037 xfer = USB_ADD_BYTES(buf, parm->size[0]);
1038 xfer->address = udev->address;
1039 xfer->priv_sc = priv_sc;
1040 xfer->xroot = info;
1041
1042 callout_init_mtx(&xfer->timeout_handle,
1043 &udev->bus->bus_mtx, 0);
1044 } else {
1045 /*
1046 * Setup a dummy xfer, hence we are
1047 * writing to the "usb_xfer"
1048 * structure pointed to by "xfer"
1049 * before we have allocated any
1050 * memory:
1051 */
1052 xfer = &udev->scratch.xfer_setup[0].dummy;
1053 (void)memset_s(xfer, sizeof(*xfer), 0, sizeof(*xfer));
1054 refcount++;
1055 }
1056
1057 /* set transfer endpoint pointer */
1058 xfer->endpoint = ep;
1059
1060 /* set transfer stream ID */
1061 xfer->stream_id = setup->stream_id;
1062
1063 parm->size[0] += sizeof(xfer[0]);
1064 parm->methods = xfer->endpoint->methods;
1065 parm->curr_xfer = xfer;
1066
1067 /*
1068 * Call the Host or Device controller transfer
1069 * setup routine:
1070 */
1071 (udev->bus->methods->xfer_setup) (parm);
1072
1073 /* check for error */
1074 if (parm->err)
1075 goto done;
1076
1077 if (buf) {
1078 /*
1079 * Increment the endpoint refcount. This
1080 * basically prevents setting a new
1081 * configuration and alternate setting
1082 * when USB transfers are in use on
1083 * the given interface. Search the USB
1084 * code for "endpoint->refcount_alloc" if you
1085 * want more information.
1086 */
1087 USB_BUS_LOCK(info->bus);
1088 if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
1089 parm->err = USB_ERR_INVAL;
1090
1091 xfer->endpoint->refcount_alloc++;
1092
1093 if (xfer->endpoint->refcount_alloc == 0)
1094 panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
1095 USB_BUS_UNLOCK(info->bus);
1096
1097 /*
1098 * Whenever we set ppxfer[] then we
1099 * also need to increment the
1100 * "setup_refcount":
1101 */
1102 info->setup_refcount++;
1103
1104 /*
1105 * Transfer is successfully setup and
1106 * can be used:
1107 */
1108 ppxfer[n] = xfer;
1109 }
1110
1111 /* check for error */
1112 if (parm->err)
1113 goto done;
1114 }
1115
1116 if ((buf != NULL) || (parm->err != 0))
1117 goto done;
1118
1119 /* if no transfers, nothing to do */
1120 if (refcount == 0)
1121 goto done;
1122
1123 /* align data properly */
1124 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1125
1126 /* store offset temporarily */
1127 parm->size[1] = parm->size[0];
1128
1129 /*
1130 * The number of DMA tags required depends on
1131 * the number of endpoints. The current estimate
1132 * for maximum number of DMA tags per endpoint
1133 * is three:
1134 * 1) for loading memory
1135 * 2) for allocating memory
1136 * 3) for fixing memory [UHCI]
1137 */
1138 parm->dma_tag_max += 3 * MIN(n_setup, USB_EP_MAX);
1139
1140 /*
1141 * DMA tags for QH, TD, Data and more.
1142 */
1143 parm->dma_tag_max += 8;
1144
1145 parm->dma_tag_p += parm->dma_tag_max;
1146
1147 parm->size[0] += ((uint8_t *)parm->dma_tag_p) -
1148 ((uint8_t *)0);
1149
1150 /* align data properly */
1151 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1152
1153 /* store offset temporarily */
1154 parm->size[3] = parm->size[0];
1155
1156 parm->size[0] += ((uint8_t *)parm->dma_page_ptr) -
1157 ((uint8_t *)0);
1158
1159 /* align data properly */
1160 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1161
1162 /* store offset temporarily */
1163 parm->size[4] = parm->size[0];
1164
1165 parm->size[0] += ((uint8_t *)parm->dma_page_cache_ptr) -
1166 ((uint8_t *)0);
1167
1168 /* store end offset temporarily */
1169 parm->size[5] = parm->size[0];
1170
1171 parm->size[0] += ((uint8_t *)parm->xfer_page_cache_ptr) -
1172 ((uint8_t *)0);
1173
1174 /* store end offset temporarily */
1175
1176 parm->size[2] = parm->size[0];
1177
1178 /* align data properly */
1179 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1180
1181 parm->size[6] = parm->size[0];
1182
1183 parm->size[0] += ((uint8_t *)parm->xfer_length_ptr) -
1184 ((uint8_t *)0);
1185
1186 /* align data properly */
1187 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1188
1189 /* allocate zeroed memory */
1190 buf = bsd_malloc(parm->size[0], M_USB, M_WAITOK | M_ZERO);
1191
1192 if (buf == NULL) {
1193 parm->err = USB_ERR_NOMEM;
1194 DPRINTFN(0, "cannot allocate memory block for "
1195 "configuration (%d bytes)\n",
1196 parm->size[0]);
1197 goto done;
1198 }
1199 parm->dma_tag_p = USB_ADD_BYTES(buf, parm->size[1]);
1200 parm->dma_page_ptr = USB_ADD_BYTES(buf, parm->size[3]);
1201 parm->dma_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[4]);
1202 parm->xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[5]);
1203 parm->xfer_length_ptr = USB_ADD_BYTES(buf, parm->size[6]);
1204 }
1205
1206 done:
1207 if (buf) {
1208 if (info->setup_refcount == 0) {
1209 /*
1210 * "usbd_transfer_unsetup_sub" will unlock
1211 * the bus mutex before returning !
1212 */
1213 USB_BUS_LOCK(info->bus);
1214
1215 /* something went wrong */
1216 usbd_transfer_unsetup_sub(info, 0);
1217 }
1218 }
1219
1220 /* check if any errors happened */
1221 if (parm->err)
1222 usbd_transfer_unsetup(ppxfer, n_setup);
1223
1224 error = parm->err;
1225
1226 if (do_unlock)
1227 usbd_ctrl_unlock(udev);
1228
1229 return (error);
1230 }
1231
1232 /*------------------------------------------------------------------------*
1233 * usbd_transfer_unsetup_sub - factored out code
1234 *------------------------------------------------------------------------*/
1235 static void
usbd_transfer_unsetup_sub(struct usb_xfer_root * info,uint8_t needs_delay)1236 usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
1237 {
1238 #if USB_HAVE_BUSDMA
1239 struct usb_page_cache *pc;
1240 #endif
1241
1242 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
1243
1244 /* wait for any outstanding DMA operations */
1245
1246 if (needs_delay) {
1247 usb_timeout_t temp;
1248 temp = usbd_get_dma_delay(info->udev);
1249 if (temp != 0) {
1250 usb_pause_mtx(&info->bus->bus_mtx,
1251 USB_MS_TO_TICKS(temp));
1252 }
1253 }
1254
1255 /* make sure that our done messages are not queued anywhere */
1256 usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
1257
1258 USB_BUS_UNLOCK(info->bus);
1259
1260 #if USB_HAVE_BUSDMA
1261 /* free DMA'able memory, if any */
1262 pc = info->dma_page_cache_start;
1263 while (pc != info->dma_page_cache_end) {
1264 usb_pc_free_mem(pc);
1265 pc++;
1266 }
1267
1268 /* free DMA maps in all "xfer->frbuffers" */
1269 pc = info->xfer_page_cache_start;
1270 while (pc != info->xfer_page_cache_end) {
1271 usb_pc_dmamap_destroy(pc);
1272 pc++;
1273 }
1274
1275 /* free all DMA tags */
1276 usb_dma_tag_unsetup(&info->dma_parent_tag);
1277 #endif
1278
1279 cv_destroy(&info->cv_drain);
1280
1281 /*
1282 * free the "memory_base" last, hence the "info" structure is
1283 * contained within the "memory_base"!
1284 */
1285 bsd_free(info->memory_base, M_USB);
1286 info->memory_base = NULL;
1287 }
1288
1289 /*------------------------------------------------------------------------*
1290 * usbd_transfer_unsetup - unsetup/free an array of USB transfers
1291 *
1292 * NOTE: All USB transfers in progress will get called back passing
1293 * the error code "USB_ERR_CANCELLED" before this function
1294 * returns.
1295 *------------------------------------------------------------------------*/
1296 void
usbd_transfer_unsetup(struct usb_xfer ** pxfer,uint16_t n_setup)1297 usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
1298 {
1299 struct usb_xfer *xfer;
1300 struct usb_xfer_root *info;
1301 uint8_t needs_delay = 0;
1302
1303 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1304 "usbd_transfer_unsetup can sleep!");
1305
1306 while (n_setup--) {
1307 xfer = pxfer[n_setup];
1308
1309 if (xfer == NULL)
1310 continue;
1311
1312 info = xfer->xroot;
1313
1314 USB_XFER_LOCK(xfer);
1315 USB_BUS_LOCK(info->bus);
1316
1317 /*
1318 * HINT: when you start/stop a transfer, it might be a
1319 * good idea to directly use the "pxfer[]" structure:
1320 *
1321 * usbd_transfer_start(sc->pxfer[0]);
1322 * usbd_transfer_stop(sc->pxfer[0]);
1323 *
1324 * That way, if your code has many parts that will not
1325 * stop running under the same lock, in other words
1326 * "xfer_mtx", the usbd_transfer_start and
1327 * usbd_transfer_stop functions will simply return
1328 * when they detect a NULL pointer argument.
1329 *
1330 * To avoid any races we clear the "pxfer[]" pointer
1331 * while holding the private mutex of the driver:
1332 */
1333 pxfer[n_setup] = NULL;
1334
1335 USB_BUS_UNLOCK(info->bus);
1336 USB_XFER_UNLOCK(xfer);
1337
1338 usbd_transfer_drain(xfer);
1339
1340 #if USB_HAVE_BUSDMA
1341 if (xfer->flags_int.bdma_enable)
1342 needs_delay = 1;
1343 #endif
1344 /*
1345 * NOTE: default endpoint does not have an
1346 * interface, even if endpoint->iface_index == 0
1347 */
1348 USB_BUS_LOCK(info->bus);
1349 xfer->endpoint->refcount_alloc--;
1350 USB_BUS_UNLOCK(info->bus);
1351
1352 callout_drain(&xfer->timeout_handle);
1353
1354 USB_BUS_LOCK(info->bus);
1355
1356 USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
1357 "reference count\n"));
1358
1359 info->setup_refcount--;
1360
1361 if (info->setup_refcount == 0) {
1362 usbd_transfer_unsetup_sub(info,
1363 needs_delay);
1364 } else {
1365 USB_BUS_UNLOCK(info->bus);
1366 }
1367 }
1368 }
1369
1370 /*------------------------------------------------------------------------*
1371 * usbd_control_transfer_init - factored out code
1372 *
1373 * In USB Device Mode we have to wait for the SETUP packet which
1374 * containst the "struct usb_device_request" structure, before we can
1375 * transfer any data. In USB Host Mode we already have the SETUP
1376 * packet at the moment the USB transfer is started. This leads us to
1377 * having to setup the USB transfer at two different places in
1378 * time. This function just contains factored out control transfer
1379 * initialisation code, so that we don't duplicate the code.
1380 *------------------------------------------------------------------------*/
1381 static void
usbd_control_transfer_init(struct usb_xfer * xfer)1382 usbd_control_transfer_init(struct usb_xfer *xfer)
1383 {
1384 struct usb_device_request req;
1385
1386 /* copy out the USB request header */
1387
1388 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1389
1390 /* setup remainder */
1391
1392 xfer->flags_int.control_rem = UGETW(req.wLength);
1393
1394 /* copy direction to endpoint variable */
1395
1396 xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
1397 xfer->endpointno |=
1398 (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
1399 }
1400
1401 /*------------------------------------------------------------------------*
1402 * usbd_control_transfer_did_data
1403 *
1404 * This function returns non-zero if a control endpoint has
1405 * transferred the first DATA packet after the SETUP packet.
1406 * Else it returns zero.
1407 *------------------------------------------------------------------------*/
1408 static uint8_t
usbd_control_transfer_did_data(struct usb_xfer * xfer)1409 usbd_control_transfer_did_data(struct usb_xfer *xfer)
1410 {
1411 struct usb_device_request req;
1412
1413 /* SETUP packet is not yet sent */
1414 if (xfer->flags_int.control_hdr != 0)
1415 return (0);
1416
1417 /* copy out the USB request header */
1418 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1419
1420 /* compare remainder to the initial value */
1421 return (xfer->flags_int.control_rem != UGETW(req.wLength));
1422 }
1423
1424 /*------------------------------------------------------------------------*
1425 * usbd_setup_ctrl_transfer
1426 *
1427 * This function handles initialisation of control transfers. Control
1428 * transfers are special in that regard that they can both transmit
1429 * and receive data.
1430 *
1431 * Return values:
1432 * 0: Success
1433 * Else: Failure
1434 *------------------------------------------------------------------------*/
1435 static int
usbd_setup_ctrl_transfer(struct usb_xfer * xfer)1436 usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
1437 {
1438 usb_frlength_t len;
1439
1440 /* Check for control endpoint stall */
1441 if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
1442 /* the control transfer is no longer active */
1443 xfer->flags_int.control_stall = 1;
1444 xfer->flags_int.control_act = 0;
1445 } else {
1446 /* don't stall control transfer by default */
1447 xfer->flags_int.control_stall = 0;
1448 }
1449
1450 /* Check for invalid number of frames */
1451 if (xfer->nframes > 2) {
1452 /*
1453 * If you need to split a control transfer, you
1454 * have to do one part at a time. Only with
1455 * non-control transfers you can do multiple
1456 * parts a time.
1457 */
1458 DPRINTFN(0, "Too many frames: %u\n",
1459 (unsigned int)xfer->nframes);
1460 goto error;
1461 }
1462
1463 /*
1464 * Check if there is a control
1465 * transfer in progress:
1466 */
1467 if (xfer->flags_int.control_act) {
1468 if (xfer->flags_int.control_hdr) {
1469 /* clear send header flag */
1470
1471 xfer->flags_int.control_hdr = 0;
1472
1473 /* setup control transfer */
1474 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1475 usbd_control_transfer_init(xfer);
1476 }
1477 }
1478 /* get data length */
1479
1480 len = xfer->sumlen;
1481
1482 } else {
1483 /* the size of the SETUP structure is hardcoded ! */
1484
1485 if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
1486 DPRINTFN(0, "Wrong framelength %u != %zu\n",
1487 xfer->frlengths[0], sizeof(struct
1488 usb_device_request));
1489 goto error;
1490 }
1491 /* check USB mode */
1492 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1493 /* check number of frames */
1494 if (xfer->nframes != 1) {
1495 /*
1496 * We need to receive the setup
1497 * message first so that we know the
1498 * data direction!
1499 */
1500 DPRINTF("Misconfigured transfer\n");
1501 goto error;
1502 }
1503 /*
1504 * Set a dummy "control_rem" value. This
1505 * variable will be overwritten later by a
1506 * call to "usbd_control_transfer_init()" !
1507 */
1508 xfer->flags_int.control_rem = 0xFFFF;
1509 } else {
1510 /* setup "endpoint" and "control_rem" */
1511
1512 usbd_control_transfer_init(xfer);
1513 }
1514
1515 /* set transfer-header flag */
1516
1517 xfer->flags_int.control_hdr = 1;
1518
1519 /* get data length */
1520
1521 len = (xfer->sumlen - sizeof(struct usb_device_request));
1522 }
1523
1524 /* update did data flag */
1525
1526 xfer->flags_int.control_did_data =
1527 usbd_control_transfer_did_data(xfer);
1528
1529 /* check if there is a length mismatch */
1530
1531 if (len > xfer->flags_int.control_rem) {
1532 DPRINTFN(0, "Length (%d) greater than "
1533 "remaining length (%d)\n", len,
1534 xfer->flags_int.control_rem);
1535 goto error;
1536 }
1537 /* check if we are doing a short transfer */
1538
1539 if (xfer->flags.force_short_xfer) {
1540 xfer->flags_int.control_rem = 0;
1541 } else {
1542 if ((len != xfer->max_data_length) &&
1543 (len != xfer->flags_int.control_rem) &&
1544 (xfer->nframes != 1)) {
1545 DPRINTFN(0, "Short control transfer without "
1546 "force_short_xfer set\n");
1547 goto error;
1548 }
1549 xfer->flags_int.control_rem -= len;
1550 }
1551
1552 /* the status part is executed when "control_act" is 0 */
1553
1554 if ((xfer->flags_int.control_rem > 0) ||
1555 (xfer->flags.manual_status)) {
1556 /* don't execute the STATUS stage yet */
1557 xfer->flags_int.control_act = 1;
1558
1559 /* sanity check */
1560 if ((!xfer->flags_int.control_hdr) &&
1561 (xfer->nframes == 1)) {
1562 /*
1563 * This is not a valid operation!
1564 */
1565 DPRINTFN(0, "Invalid parameter "
1566 "combination\n");
1567 goto error;
1568 }
1569 } else {
1570 /* time to execute the STATUS stage */
1571 xfer->flags_int.control_act = 0;
1572 }
1573 return (0); /* success */
1574
1575 error:
1576 return (1); /* failure */
1577 }
1578
1579 /*------------------------------------------------------------------------*
1580 * usbd_transfer_submit - start USB hardware for the given transfer
1581 *
1582 * This function should only be called from the USB callback.
1583 *------------------------------------------------------------------------*/
1584 void
usbd_transfer_submit(struct usb_xfer * xfer)1585 usbd_transfer_submit(struct usb_xfer *xfer)
1586 {
1587 struct usb_xfer_root *info;
1588 struct usb_bus *bus;
1589 usb_frcount_t x;
1590
1591 info = xfer->xroot;
1592 bus = info->bus;
1593
1594 DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
1595 xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
1596 "read" : "write");
1597
1598 #ifdef LOSCFG_USB_DEBUG
1599 if (USB_DEBUG_VAR > 0) {
1600 USB_BUS_LOCK(bus);
1601
1602 usb_dump_endpoint(xfer->endpoint);
1603
1604 USB_BUS_UNLOCK(bus);
1605 }
1606 #endif
1607
1608 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1609 USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED);
1610
1611 /* Only open the USB transfer once! */
1612 if (!xfer->flags_int.open) {
1613 xfer->flags_int.open = 1;
1614
1615 DPRINTF("open\n");
1616
1617 USB_BUS_LOCK(bus);
1618 (xfer->endpoint->methods->open) (xfer);
1619 USB_BUS_UNLOCK(bus);
1620 }
1621 /* set "transferring" flag */
1622 xfer->flags_int.transferring = 1;
1623
1624 #if USB_HAVE_POWERD
1625 /* increment power reference */
1626 usbd_transfer_power_ref(xfer, 1);
1627 #endif
1628 /*
1629 * Check if the transfer is waiting on a queue, most
1630 * frequently the "done_q":
1631 */
1632 if (xfer->wait_queue) {
1633 USB_BUS_LOCK(bus);
1634 usbd_transfer_dequeue(xfer);
1635 USB_BUS_UNLOCK(bus);
1636 }
1637 /* clear "did_dma_delay" flag */
1638 xfer->flags_int.did_dma_delay = 0;
1639
1640 /* clear "did_close" flag */
1641 xfer->flags_int.did_close = 0;
1642
1643 #if USB_HAVE_BUSDMA
1644 /* clear "bdma_setup" flag */
1645 xfer->flags_int.bdma_setup = 0;
1646 #endif
1647 /* by default we cannot cancel any USB transfer immediately */
1648 xfer->flags_int.can_cancel_immed = 0;
1649
1650 /* clear lengths and frame counts by default */
1651 xfer->sumlen = 0;
1652 xfer->actlen = 0;
1653 xfer->aframes = 0;
1654
1655 /* clear any previous errors */
1656 xfer->error = USB_ERR_NORMAL_COMPLETION;
1657
1658 /* Check if the device is still alive */
1659 if (info->udev->state < USB_STATE_POWERED) {
1660 USB_BUS_LOCK(bus);
1661 /*
1662 * Must return cancelled error code else
1663 * device drivers can hang.
1664 */
1665 usbd_transfer_done(xfer, USB_ERR_CANCELLED);
1666 USB_BUS_UNLOCK(bus);
1667 return;
1668 }
1669
1670 /* sanity check */
1671 if (xfer->nframes == 0) {
1672 if (xfer->flags.stall_pipe) {
1673 /*
1674 * Special case - want to stall without transferring
1675 * any data:
1676 */
1677 DPRINTF("xfer=%p nframes=0: stall "
1678 "or clear stall!\n", xfer);
1679 USB_BUS_LOCK(bus);
1680 xfer->flags_int.can_cancel_immed = 1;
1681 /* start the transfer */
1682 usb_command_wrapper(&xfer->endpoint->
1683 endpoint_q[xfer->stream_id], xfer);
1684 USB_BUS_UNLOCK(bus);
1685 return;
1686 }
1687 USB_BUS_LOCK(bus);
1688 usbd_transfer_done(xfer, USB_ERR_INVAL);
1689 USB_BUS_UNLOCK(bus);
1690 return;
1691 }
1692 /* compute some variables */
1693
1694 for (x = 0; x != xfer->nframes; x++) {
1695 /* make a copy of the frlenghts[] */
1696 xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x];
1697 /* compute total transfer length */
1698 xfer->sumlen += xfer->frlengths[x];
1699 if (xfer->sumlen < xfer->frlengths[x]) {
1700 /* length wrapped around */
1701 USB_BUS_LOCK(bus);
1702 usbd_transfer_done(xfer, USB_ERR_INVAL);
1703 USB_BUS_UNLOCK(bus);
1704 return;
1705 }
1706 }
1707
1708 /* clear some internal flags */
1709
1710 xfer->flags_int.short_xfer_ok = 0;
1711 xfer->flags_int.short_frames_ok = 0;
1712
1713 /* check if this is a control transfer */
1714
1715 if (xfer->flags_int.control_xfr) {
1716 if (usbd_setup_ctrl_transfer(xfer)) {
1717 USB_BUS_LOCK(bus);
1718 usbd_transfer_done(xfer, USB_ERR_STALLED);
1719 USB_BUS_UNLOCK(bus);
1720 return;
1721 }
1722 }
1723 /*
1724 * Setup filtered version of some transfer flags,
1725 * in case of data read direction
1726 */
1727 if (USB_GET_DATA_ISREAD(xfer)) {
1728 if (xfer->flags.short_frames_ok) {
1729 xfer->flags_int.short_xfer_ok = 1;
1730 xfer->flags_int.short_frames_ok = 1;
1731 } else if (xfer->flags.short_xfer_ok) {
1732 xfer->flags_int.short_xfer_ok = 1;
1733
1734 /* check for control transfer */
1735 if (xfer->flags_int.control_xfr) {
1736 /*
1737 * 1) Control transfers do not support
1738 * reception of multiple short USB
1739 * frames in host mode and device side
1740 * mode, with exception of:
1741 *
1742 * 2) Due to sometimes buggy device
1743 * side firmware we need to do a
1744 * STATUS stage in case of short
1745 * control transfers in USB host mode.
1746 * The STATUS stage then becomes the
1747 * "alt_next" to the DATA stage.
1748 */
1749 xfer->flags_int.short_frames_ok = 1;
1750 }
1751 }
1752 }
1753 /*
1754 * Check if BUS-DMA support is enabled and try to load virtual
1755 * buffers into DMA, if any:
1756 */
1757 #if USB_HAVE_BUSDMA
1758 if (xfer->flags_int.bdma_enable) {
1759 /* insert the USB transfer last in the BUS-DMA queue */
1760 usb_command_wrapper(&xfer->xroot->dma_q, xfer);
1761 return;
1762 }
1763 #endif
1764 /*
1765 * Enter the USB transfer into the Host Controller or
1766 * Device Controller schedule:
1767 */
1768 usbd_pipe_enter(xfer);
1769 }
1770
1771 /*------------------------------------------------------------------------*
1772 * usbd_pipe_enter - factored out code
1773 *------------------------------------------------------------------------*/
1774 void
usbd_pipe_enter(struct usb_xfer * xfer)1775 usbd_pipe_enter(struct usb_xfer *xfer)
1776 {
1777 struct usb_endpoint *ep;
1778
1779 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1780
1781 USB_BUS_LOCK(xfer->xroot->bus);
1782
1783 ep = xfer->endpoint;
1784
1785 DPRINTF("enter\n");
1786
1787 /* the transfer can now be cancelled */
1788 xfer->flags_int.can_cancel_immed = 1;
1789
1790 /* enter the transfer */
1791 (ep->methods->enter) (xfer);
1792
1793 /* check for transfer error */
1794 if (xfer->error) {
1795 /* some error has happened */
1796 usbd_transfer_done(xfer, (usb_error_t)0);
1797 USB_BUS_UNLOCK(xfer->xroot->bus);
1798 return;
1799 }
1800
1801 /* start the transfer */
1802 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], xfer);
1803 USB_BUS_UNLOCK(xfer->xroot->bus);
1804 }
1805
1806 /*------------------------------------------------------------------------*
1807 * usbd_transfer_start - start an USB transfer
1808 *
1809 * NOTE: Calling this function more than one time will only
1810 * result in a single transfer start, until the USB transfer
1811 * completes.
1812 *------------------------------------------------------------------------*/
1813 void
usbd_transfer_start(struct usb_xfer * xfer)1814 usbd_transfer_start(struct usb_xfer *xfer)
1815 {
1816 if (xfer == NULL) {
1817 /* transfer is gone */
1818 return;
1819 }
1820 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1821
1822 /* mark the USB transfer started */
1823
1824 if (!xfer->flags_int.started) {
1825 /* lock the BUS lock to avoid races updating flags_int */
1826 USB_BUS_LOCK(xfer->xroot->bus);
1827 xfer->flags_int.started = 1;
1828 USB_BUS_UNLOCK(xfer->xroot->bus);
1829 }
1830 /* check if the USB transfer callback is already transferring */
1831
1832 if (xfer->flags_int.transferring) {
1833 return;
1834 }
1835 USB_BUS_LOCK(xfer->xroot->bus);
1836 /* call the USB transfer callback */
1837 usbd_callback_ss_done_defer(xfer);
1838 USB_BUS_UNLOCK(xfer->xroot->bus);
1839 }
1840
1841 /*------------------------------------------------------------------------*
1842 * usbd_transfer_stop - stop an USB transfer
1843 *
1844 * NOTE: Calling this function more than one time will only
1845 * result in a single transfer stop.
1846 * NOTE: When this function returns it is not safe to free nor
1847 * reuse any DMA buffers. See "usbd_transfer_drain()".
1848 *------------------------------------------------------------------------*/
1849 void
usbd_transfer_stop(struct usb_xfer * xfer)1850 usbd_transfer_stop(struct usb_xfer *xfer)
1851 {
1852 struct usb_endpoint *ep;
1853
1854 if (xfer == NULL) {
1855 /* transfer is gone */
1856 return;
1857 }
1858 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1859
1860 /* check if the USB transfer was ever opened */
1861
1862 if (!xfer->flags_int.open) {
1863 if (xfer->flags_int.started) {
1864 /* nothing to do except clearing the "started" flag */
1865 /* lock the BUS lock to avoid races updating flags_int */
1866 USB_BUS_LOCK(xfer->xroot->bus);
1867 xfer->flags_int.started = 0;
1868 USB_BUS_UNLOCK(xfer->xroot->bus);
1869 }
1870 return;
1871 }
1872 /* try to stop the current USB transfer */
1873
1874 USB_BUS_LOCK(xfer->xroot->bus);
1875 /* override any previous error */
1876 xfer->error = USB_ERR_CANCELLED;
1877
1878 /*
1879 * Clear "open" and "started" when both private and USB lock
1880 * is locked so that we don't get a race updating "flags_int"
1881 */
1882 xfer->flags_int.open = 0;
1883 xfer->flags_int.started = 0;
1884
1885 /*
1886 * Check if we can cancel the USB transfer immediately.
1887 */
1888 if (xfer->flags_int.transferring) {
1889 if (xfer->flags_int.can_cancel_immed &&
1890 (!xfer->flags_int.did_close)) {
1891 DPRINTF("close\n");
1892 /*
1893 * The following will lead to an USB_ERR_CANCELLED
1894 * error code being passed to the USB callback.
1895 */
1896 (xfer->endpoint->methods->close) (xfer);
1897 /* only close once */
1898 xfer->flags_int.did_close = 1;
1899 } else {
1900 /* need to wait for the next done callback */
1901 }
1902 } else {
1903 DPRINTF("close\n");
1904
1905 /* close here and now */
1906 (xfer->endpoint->methods->close) (xfer);
1907
1908 /*
1909 * Any additional DMA delay is done by
1910 * "usbd_transfer_unsetup()".
1911 */
1912
1913 /*
1914 * Special case. Check if we need to restart a blocked
1915 * endpoint.
1916 */
1917 ep = xfer->endpoint;
1918
1919 /*
1920 * If the current USB transfer is completing we need
1921 * to start the next one:
1922 */
1923 if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
1924 usb_command_wrapper(
1925 &ep->endpoint_q[xfer->stream_id], NULL);
1926 }
1927 }
1928
1929 USB_BUS_UNLOCK(xfer->xroot->bus);
1930 }
1931
1932 /*------------------------------------------------------------------------*
1933 * usbd_transfer_pending
1934 *
1935 * This function will check if an USB transfer is pending which is a
1936 * little bit complicated!
1937 * Return values:
1938 * 0: Not pending
1939 * 1: Pending: The USB transfer will receive a callback in the future.
1940 *------------------------------------------------------------------------*/
1941 uint8_t
usbd_transfer_pending(struct usb_xfer * xfer)1942 usbd_transfer_pending(struct usb_xfer *xfer)
1943 {
1944 struct usb_xfer_root *info;
1945 struct usb_xfer_queue *pq;
1946
1947 if (xfer == NULL) {
1948 /* transfer is gone */
1949 return (0);
1950 }
1951 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1952
1953 if (xfer->flags_int.transferring) {
1954 /* trivial case */
1955 return (1);
1956 }
1957 USB_BUS_LOCK(xfer->xroot->bus);
1958 if (xfer->wait_queue) {
1959 /* we are waiting on a queue somewhere */
1960 USB_BUS_UNLOCK(xfer->xroot->bus);
1961 return (1);
1962 }
1963 info = xfer->xroot;
1964 pq = &info->done_q;
1965
1966 if (pq->curr == xfer) {
1967 /* we are currently scheduled for callback */
1968 USB_BUS_UNLOCK(xfer->xroot->bus);
1969 return (1);
1970 }
1971 /* we are not pending */
1972 USB_BUS_UNLOCK(xfer->xroot->bus);
1973 return (0);
1974 }
1975
1976 /*------------------------------------------------------------------------*
1977 * usbd_transfer_drain
1978 *
1979 * This function will stop the USB transfer and wait for any
1980 * additional BUS-DMA and HW-DMA operations to complete. Buffers that
1981 * are loaded into DMA can safely be freed or reused after that this
1982 * function has returned.
1983 *------------------------------------------------------------------------*/
1984 void
usbd_transfer_drain(struct usb_xfer * xfer)1985 usbd_transfer_drain(struct usb_xfer *xfer)
1986 {
1987 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1988 "usbd_transfer_drain can sleep!");
1989
1990 if (xfer == NULL) {
1991 /* transfer is gone */
1992 return;
1993 }
1994 if (xfer->xroot->xfer_mtx != &Giant) {
1995 USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED);
1996 }
1997 USB_XFER_LOCK(xfer);
1998
1999 usbd_transfer_stop(xfer);
2000
2001 while (usbd_transfer_pending(xfer) ||
2002 xfer->flags_int.doing_callback) {
2003 /*
2004 * It is allowed that the callback can drop its
2005 * transfer mutex. In that case checking only
2006 * "usbd_transfer_pending()" is not enough to tell if
2007 * the USB transfer is fully drained. We also need to
2008 * check the internal "doing_callback" flag.
2009 */
2010 xfer->flags_int.draining = 1;
2011
2012 /*
2013 * Wait until the current outstanding USB
2014 * transfer is complete !
2015 */
2016 (void)cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx);
2017 }
2018 USB_XFER_UNLOCK(xfer);
2019 }
2020
2021 struct usb_page_cache *
usbd_xfer_get_frame(struct usb_xfer * xfer,usb_frcount_t frindex)2022 usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
2023 {
2024 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2025
2026 return (&xfer->frbuffers[frindex]);
2027 }
2028
2029 void *
usbd_xfer_get_frame_buffer(struct usb_xfer * xfer,usb_frcount_t frindex)2030 usbd_xfer_get_frame_buffer(struct usb_xfer *xfer, usb_frcount_t frindex)
2031 {
2032 struct usb_page_search page_info;
2033
2034 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2035
2036 usbd_get_page(&xfer->frbuffers[frindex], 0, &page_info);
2037 return (page_info.buffer);
2038 }
2039
2040 /*------------------------------------------------------------------------*
2041 * usbd_xfer_get_fps_shift
2042 *
2043 * The following function is only useful for isochronous transfers. It
2044 * returns how many times the frame execution rate has been shifted
2045 * down.
2046 *
2047 * Return value:
2048 * Success: 0..3
2049 * Failure: 0
2050 *------------------------------------------------------------------------*/
2051 uint8_t
usbd_xfer_get_fps_shift(struct usb_xfer * xfer)2052 usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
2053 {
2054 return (xfer->fps_shift);
2055 }
2056
2057 usb_frlength_t
usbd_xfer_frame_len(struct usb_xfer * xfer,usb_frcount_t frindex)2058 usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
2059 {
2060 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2061
2062 return (xfer->frlengths[frindex]);
2063 }
2064
2065 /*------------------------------------------------------------------------*
2066 * usbd_xfer_set_frame_data
2067 *
2068 * This function sets the pointer of the buffer that should
2069 * loaded directly into DMA for the given USB frame. Passing "ptr"
2070 * equal to NULL while the corresponding "frlength" is greater
2071 * than zero gives undefined results!
2072 *------------------------------------------------------------------------*/
2073 void
usbd_xfer_set_frame_data(struct usb_xfer * xfer,usb_frcount_t frindex,void * ptr,usb_frlength_t len)2074 usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2075 void *ptr, usb_frlength_t len)
2076 {
2077 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2078
2079 /* set virtual address to load and length */
2080 xfer->frbuffers[frindex].buffer = ptr;
2081 usbd_xfer_set_frame_len(xfer, frindex, len);
2082 }
2083
2084 void
usbd_xfer_frame_data(struct usb_xfer * xfer,usb_frcount_t frindex,void ** ptr,int * len)2085 usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2086 void **ptr, int *len)
2087 {
2088 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2089
2090 if (ptr != NULL)
2091 *ptr = xfer->frbuffers[frindex].buffer;
2092 if (len != NULL)
2093 *len = xfer->frlengths[frindex];
2094 }
2095
2096 /*------------------------------------------------------------------------*
2097 * usbd_xfer_old_frame_length
2098 *
2099 * This function returns the framelength of the given frame at the
2100 * time the transfer was submitted. This function can be used to
2101 * compute the starting data pointer of the next isochronous frame
2102 * when an isochronous transfer has completed.
2103 *------------------------------------------------------------------------*/
2104 usb_frlength_t
usbd_xfer_old_frame_length(struct usb_xfer * xfer,usb_frcount_t frindex)2105 usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex)
2106 {
2107 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2108
2109 return (xfer->frlengths[frindex + xfer->max_frame_count]);
2110 }
2111
2112 void
usbd_xfer_status(struct usb_xfer * xfer,int * actlen,int * sumlen,int * aframes,int * nframes)2113 usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
2114 int *nframes)
2115 {
2116 if (actlen != NULL)
2117 *actlen = xfer->actlen;
2118 if (sumlen != NULL)
2119 *sumlen = xfer->sumlen;
2120 if (aframes != NULL)
2121 *aframes = xfer->aframes;
2122 if (nframes != NULL)
2123 *nframes = xfer->nframes;
2124 }
2125
2126 /*------------------------------------------------------------------------*
2127 * usbd_xfer_set_frame_offset
2128 *
2129 * This function sets the frame data buffer offset relative to the beginning
2130 * of the USB DMA buffer allocated for this USB transfer.
2131 *------------------------------------------------------------------------*/
2132 void
usbd_xfer_set_frame_offset(struct usb_xfer * xfer,usb_frlength_t offset,usb_frcount_t frindex)2133 usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
2134 usb_frcount_t frindex)
2135 {
2136 KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
2137 "when the USB buffer is external\n"));
2138 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2139
2140 /* set virtual address to load */
2141 xfer->frbuffers[frindex].buffer =
2142 USB_ADD_BYTES(xfer->local_buffer, offset);
2143 }
2144
2145 void
usbd_xfer_set_interval(struct usb_xfer * xfer,int i)2146 usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
2147 {
2148 xfer->interval = i;
2149 }
2150
2151 void
usbd_xfer_set_timeout(struct usb_xfer * xfer,int t)2152 usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
2153 {
2154 xfer->timeout = t;
2155 }
2156
2157 void
usbd_xfer_set_frames(struct usb_xfer * xfer,usb_frcount_t n)2158 usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
2159 {
2160 xfer->nframes = n;
2161 }
2162
2163 usb_frcount_t
usbd_xfer_max_frames(struct usb_xfer * xfer)2164 usbd_xfer_max_frames(struct usb_xfer *xfer)
2165 {
2166 return (xfer->max_frame_count);
2167 }
2168
2169 usb_frlength_t
usbd_xfer_max_len(struct usb_xfer * xfer)2170 usbd_xfer_max_len(struct usb_xfer *xfer)
2171 {
2172 return (xfer->max_data_length);
2173 }
2174
2175 usb_frlength_t
usbd_xfer_max_framelen(struct usb_xfer * xfer)2176 usbd_xfer_max_framelen(struct usb_xfer *xfer)
2177 {
2178 return (xfer->max_frame_size);
2179 }
2180
2181 void
usbd_xfer_set_frame_len(struct usb_xfer * xfer,usb_frcount_t frindex,usb_frlength_t len)2182 usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
2183 usb_frlength_t len)
2184 {
2185 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2186
2187 xfer->frlengths[frindex] = len;
2188 }
2189
2190 /*------------------------------------------------------------------------*
2191 * usb_callback_proc - factored out code
2192 *
2193 * This function performs USB callbacks.
2194 *------------------------------------------------------------------------*/
2195 static void
usb_callback_proc(struct usb_proc_msg * _pm)2196 usb_callback_proc(struct usb_proc_msg *_pm)
2197 {
2198 struct usb_done_msg *pm = (void *)_pm;
2199 struct usb_xfer_root *info = pm->xroot;
2200
2201 /* Change locking order */
2202 USB_BUS_UNLOCK(info->bus);
2203
2204 /*
2205 * We exploit the fact that the mutex is the same for all
2206 * callbacks that will be called from this thread:
2207 */
2208 USB_MTX_LOCK(info->xfer_mtx);
2209 USB_BUS_LOCK(info->bus);
2210
2211 /* Continue where we lost track */
2212 usb_command_wrapper(&info->done_q,
2213 info->done_q.curr);
2214
2215 USB_MTX_UNLOCK(info->xfer_mtx);
2216 }
2217
2218 /*------------------------------------------------------------------------*
2219 * usbd_callback_ss_done_defer
2220 *
2221 * This function will defer the start, stop and done callback to the
2222 * correct thread.
2223 *------------------------------------------------------------------------*/
2224 static void
usbd_callback_ss_done_defer(struct usb_xfer * xfer)2225 usbd_callback_ss_done_defer(struct usb_xfer *xfer)
2226 {
2227 struct usb_xfer_root *info = xfer->xroot;
2228 struct usb_xfer_queue *pq = &info->done_q;
2229
2230 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2231
2232 if (pq->curr != xfer) {
2233 usbd_transfer_enqueue(pq, xfer);
2234 }
2235 if (!pq->recurse_1) {
2236 /*
2237 * We have to postpone the callback due to the fact we
2238 * will have a Lock Order Reversal, LOR, if we try to
2239 * proceed !
2240 */
2241 if (usb_proc_msignal(info->done_p,
2242 &info->done_m[0], &info->done_m[1])) {
2243 /* ignore */
2244 }
2245 } else {
2246 /* clear second recurse flag */
2247 pq->recurse_2 = 0;
2248 }
2249 return;
2250
2251 }
2252
2253 /*------------------------------------------------------------------------*
2254 * usbd_callback_wrapper
2255 *
2256 * This is a wrapper for USB callbacks. This wrapper does some
2257 * auto-magic things like figuring out if we can call the callback
2258 * directly from the current context or if we need to wakeup the
2259 * interrupt process.
2260 *------------------------------------------------------------------------*/
2261 static void
usbd_callback_wrapper(struct usb_xfer_queue * pq)2262 usbd_callback_wrapper(struct usb_xfer_queue *pq)
2263 {
2264 struct usb_xfer *xfer = pq->curr;
2265 struct usb_xfer_root *info = xfer->xroot;
2266
2267 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2268 if (!mtx_owned(info->xfer_mtx) && !SCHEDULER_STOPPED()) {
2269 /*
2270 * Cases that end up here:
2271 *
2272 * 5) HW interrupt done callback or other source.
2273 */
2274 DPRINTFN(3, "case 5\n");
2275
2276 /*
2277 * We have to postpone the callback due to the fact we
2278 * will have a Lock Order Reversal, LOR, if we try to
2279 * proceed!
2280 */
2281 if (usb_proc_msignal(info->done_p,
2282 &info->done_m[0], &info->done_m[1])) {
2283 /* ignore */
2284 }
2285 return;
2286 }
2287 /*
2288 * Cases that end up here:
2289 *
2290 * 1) We are starting a transfer
2291 * 2) We are prematurely calling back a transfer
2292 * 3) We are stopping a transfer
2293 * 4) We are doing an ordinary callback
2294 */
2295 DPRINTFN(3, "case 1-4\n");
2296 /* get next USB transfer in the queue */
2297 info->done_q.curr = NULL;
2298
2299 /* set flag in case of drain */
2300 xfer->flags_int.doing_callback = 1;
2301
2302 USB_BUS_UNLOCK(info->bus);
2303 USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED);
2304
2305 /* set correct USB state for callback */
2306 if (!xfer->flags_int.transferring) {
2307 xfer->usb_state = USB_ST_SETUP;
2308 if (!xfer->flags_int.started) {
2309 /* we got stopped before we even got started */
2310 USB_BUS_LOCK(info->bus);
2311 goto done;
2312 }
2313 } else {
2314 if (usbd_callback_wrapper_sub(xfer)) {
2315 /* the callback has been deferred */
2316 USB_BUS_LOCK(info->bus);
2317 goto done;
2318 }
2319 #if USB_HAVE_POWERD
2320 /* decrement power reference */
2321 usbd_transfer_power_ref(xfer, -1);
2322 #endif
2323 xfer->flags_int.transferring = 0;
2324
2325 if (xfer->error) {
2326 xfer->usb_state = USB_ST_ERROR;
2327 } else {
2328 /* set transferred state */
2329 xfer->usb_state = USB_ST_TRANSFERRED;
2330 #if USB_HAVE_BUSDMA
2331 /* sync DMA memory, if any */
2332 if (xfer->flags_int.bdma_enable &&
2333 (!xfer->flags_int.bdma_no_post_sync)) {
2334 usb_bdma_post_sync(xfer);
2335 }
2336 #endif
2337 }
2338 }
2339
2340 #if USB_HAVE_PF
2341 if (xfer->usb_state != USB_ST_SETUP) {
2342 USB_BUS_LOCK(info->bus);
2343 usbpf_xfertap(xfer, USBPF_XFERTAP_DONE);
2344 USB_BUS_UNLOCK(info->bus);
2345 }
2346 #endif
2347 /* call processing routine */
2348 (xfer->callback) (xfer, xfer->error);
2349
2350 /* pickup the USB mutex again */
2351 USB_BUS_LOCK(info->bus);
2352
2353 /*
2354 * Check if we got started after that we got cancelled, but
2355 * before we managed to do the callback.
2356 */
2357 if ((!xfer->flags_int.open) &&
2358 (xfer->flags_int.started) &&
2359 (xfer->usb_state == USB_ST_ERROR)) {
2360 /* clear flag in case of drain */
2361 xfer->flags_int.doing_callback = 0;
2362 /* try to loop, but not recursivly */
2363 usb_command_wrapper(&info->done_q, xfer);
2364 return;
2365 }
2366
2367 done:
2368 /* clear flag in case of drain */
2369 xfer->flags_int.doing_callback = 0;
2370
2371 /*
2372 * Check if we are draining.
2373 */
2374 if (xfer->flags_int.draining &&
2375 (!xfer->flags_int.transferring)) {
2376 /* "usbd_transfer_drain()" is waiting for end of transfer */
2377 xfer->flags_int.draining = 0;
2378 (void)cv_broadcast(&info->cv_drain);
2379 }
2380
2381 /* do the next callback, if any */
2382 usb_command_wrapper(&info->done_q,
2383 info->done_q.curr);
2384 }
2385
2386 /*------------------------------------------------------------------------*
2387 * usb_dma_delay_done_cb
2388 *
2389 * This function is called when the DMA delay has been exectuded, and
2390 * will make sure that the callback is called to complete the USB
2391 * transfer. This code path is usually only used when there is an USB
2392 * error like USB_ERR_CANCELLED.
2393 *------------------------------------------------------------------------*/
2394 void
usb_dma_delay_done_cb(struct usb_xfer * xfer)2395 usb_dma_delay_done_cb(struct usb_xfer *xfer)
2396 {
2397 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2398
2399 DPRINTFN(3, "Completed %p\n", xfer);
2400
2401 /* queue callback for execution, again */
2402 usbd_transfer_done(xfer, (usb_error_t)0);
2403 }
2404
2405 /*------------------------------------------------------------------------*
2406 * usbd_transfer_dequeue
2407 *
2408 * - This function is used to remove an USB transfer from a USB
2409 * transfer queue.
2410 *
2411 * - This function can be called multiple times in a row.
2412 *------------------------------------------------------------------------*/
2413 void
usbd_transfer_dequeue(struct usb_xfer * xfer)2414 usbd_transfer_dequeue(struct usb_xfer *xfer)
2415 {
2416 struct usb_xfer_queue *pq;
2417 uint32_t int_save;
2418
2419 LOS_SpinLockSave(&g_usb_wait_queue_spinlock, &int_save);
2420 pq = xfer->wait_queue;
2421 if (pq != NULL) {
2422 TAILQ_REMOVE(&pq->head, xfer, wait_entry);
2423 xfer->wait_queue = NULL;
2424 }
2425 LOS_SpinUnlockRestore(&g_usb_wait_queue_spinlock, int_save);
2426 }
2427
2428 /*------------------------------------------------------------------------*
2429 * usbd_transfer_enqueue
2430 *
2431 * - This function is used to insert an USB transfer into a USB *
2432 * transfer queue.
2433 *
2434 * - This function can be called multiple times in a row.
2435 *------------------------------------------------------------------------*/
2436 void
usbd_transfer_enqueue(struct usb_xfer_queue * pq,struct usb_xfer * xfer)2437 usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2438 {
2439 uint32_t int_save;
2440 /*
2441 * Insert the USB transfer into the queue, if it is not
2442 * already on a USB transfer queue:
2443 */
2444 LOS_SpinLockSave(&g_usb_wait_queue_spinlock, &int_save);
2445 if (xfer->wait_queue == NULL) {
2446 xfer->wait_queue = pq;
2447 TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
2448 }
2449 LOS_SpinUnlockRestore(&g_usb_wait_queue_spinlock, int_save);
2450 }
2451
2452 /*------------------------------------------------------------------------*
2453 * usbd_transfer_done
2454 *
2455 * - This function is used to remove an USB transfer from the busdma,
2456 * pipe or interrupt queue.
2457 *
2458 * - This function is used to queue the USB transfer on the done
2459 * queue.
2460 *
2461 * - This function is used to stop any USB transfer timeouts.
2462 *------------------------------------------------------------------------*/
2463 void
usbd_transfer_done(struct usb_xfer * xfer,usb_error_t error)2464 usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
2465 {
2466 struct usb_xfer_root *info = xfer->xroot;
2467
2468 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2469
2470 DPRINTF("err=%s\n", usbd_errstr(error));
2471
2472 /*
2473 * If we are not transferring then just return.
2474 * This can happen during transfer cancel.
2475 */
2476 if (!xfer->flags_int.transferring) {
2477 DPRINTF("not transferring\n");
2478 /* end of control transfer, if any */
2479 xfer->flags_int.control_act = 0;
2480 return;
2481 }
2482 /* only set transfer error, if not already set */
2483 if (xfer->error == USB_ERR_NORMAL_COMPLETION)
2484 xfer->error = error;
2485
2486 /* stop any callouts */
2487 callout_stop(&xfer->timeout_handle);
2488
2489 /*
2490 * If we are waiting on a queue, just remove the USB transfer
2491 * from the queue, if any. We should have the required locks
2492 * locked to do the remove when this function is called.
2493 */
2494 usbd_transfer_dequeue(xfer);
2495
2496 #if USB_HAVE_BUSDMA
2497 if (mtx_owned(info->xfer_mtx)) {
2498 struct usb_xfer_queue *pq;
2499
2500 /*
2501 * If the private USB lock is not locked, then we assume
2502 * that the BUS-DMA load stage has been passed:
2503 */
2504 pq = &info->dma_q;
2505
2506 if (pq->curr == xfer) {
2507 /* start the next BUS-DMA load, if any */
2508 usb_command_wrapper(pq, NULL);
2509 }
2510 }
2511 #endif
2512 /* keep some statistics */
2513 if (xfer->error == USB_ERR_CANCELLED) {
2514 info->udev->stats_cancelled.uds_requests
2515 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2516 } else if (xfer->error != USB_ERR_NORMAL_COMPLETION) {
2517 info->udev->stats_err.uds_requests
2518 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2519 } else {
2520 info->udev->stats_ok.uds_requests
2521 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2522 }
2523
2524 /* call the USB transfer callback */
2525 usbd_callback_ss_done_defer(xfer);
2526 }
2527
2528 /*------------------------------------------------------------------------*
2529 * usbd_transfer_start_cb
2530 *
2531 * This function is called to start the USB transfer when
2532 * "xfer->interval" is greater than zero, and and the endpoint type is
2533 * BULK or CONTROL.
2534 *------------------------------------------------------------------------*/
2535 static void
usbd_transfer_start_cb(void * arg)2536 usbd_transfer_start_cb(void *arg)
2537 {
2538 struct usb_xfer *xfer = arg;
2539 struct usb_endpoint *ep = xfer->endpoint;
2540
2541 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2542
2543 DPRINTF("start\n");
2544
2545 #if USB_HAVE_PF
2546 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2547 #endif
2548
2549 /* the transfer can now be cancelled */
2550 xfer->flags_int.can_cancel_immed = 1;
2551
2552 /* start USB transfer, if no error */
2553 if (xfer->error == 0)
2554 (ep->methods->start) (xfer);
2555
2556 /* check for transfer error */
2557 if (xfer->error) {
2558 /* some error has happened */
2559 usbd_transfer_done(xfer, (usb_error_t)0);
2560 }
2561 }
2562
2563 /*------------------------------------------------------------------------*
2564 * usbd_xfer_set_stall
2565 *
2566 * This function is used to set the stall flag outside the
2567 * callback. This function is NULL safe.
2568 *------------------------------------------------------------------------*/
2569 void
usbd_xfer_set_stall(struct usb_xfer * xfer)2570 usbd_xfer_set_stall(struct usb_xfer *xfer)
2571 {
2572 if (xfer == NULL) {
2573 /* tearing down */
2574 return;
2575 }
2576 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2577
2578 /* avoid any races by locking the USB mutex */
2579 USB_BUS_LOCK(xfer->xroot->bus);
2580 xfer->flags.stall_pipe = 1;
2581 USB_BUS_UNLOCK(xfer->xroot->bus);
2582 }
2583
2584 int
usbd_xfer_is_stalled(struct usb_xfer * xfer)2585 usbd_xfer_is_stalled(struct usb_xfer *xfer)
2586 {
2587 return (xfer->endpoint->is_stalled);
2588 }
2589
2590 /*------------------------------------------------------------------------*
2591 * usbd_transfer_clear_stall
2592 *
2593 * This function is used to clear the stall flag outside the
2594 * callback. This function is NULL safe.
2595 *------------------------------------------------------------------------*/
2596 void
usbd_transfer_clear_stall(struct usb_xfer * xfer)2597 usbd_transfer_clear_stall(struct usb_xfer *xfer)
2598 {
2599 if (xfer == NULL) {
2600 /* tearing down */
2601 return;
2602 }
2603 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2604
2605 /* avoid any races by locking the USB mutex */
2606 USB_BUS_LOCK(xfer->xroot->bus);
2607
2608 xfer->flags.stall_pipe = 0;
2609
2610 USB_BUS_UNLOCK(xfer->xroot->bus);
2611 }
2612
2613 /*------------------------------------------------------------------------*
2614 * usbd_pipe_start
2615 *
2616 * This function is used to add an USB transfer to the pipe transfer list.
2617 *------------------------------------------------------------------------*/
2618 void
usbd_pipe_start(struct usb_xfer_queue * pq)2619 usbd_pipe_start(struct usb_xfer_queue *pq)
2620 {
2621 struct usb_endpoint *ep;
2622 struct usb_xfer *xfer;
2623 uint8_t type;
2624
2625 xfer = pq->curr;
2626 ep = xfer->endpoint;
2627
2628 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2629
2630 /*
2631 * If the endpoint is already stalled we do nothing !
2632 */
2633 if (ep->is_stalled) {
2634 DPRINTFN(1, "is_stalled\n");
2635 return;
2636 }
2637 /*
2638 * Check if we are supposed to stall the endpoint:
2639 */
2640 if (xfer->flags.stall_pipe) {
2641 struct usb_device *udev;
2642 struct usb_xfer_root *info;
2643
2644 /* clear stall command */
2645 xfer->flags.stall_pipe = 0;
2646
2647 /* get pointer to USB device */
2648 info = xfer->xroot;
2649 udev = info->udev;
2650
2651 /*
2652 * Only stall BULK and INTERRUPT endpoints.
2653 */
2654 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2655 if ((type == UE_BULK) ||
2656 (type == UE_INTERRUPT)) {
2657 uint8_t did_stall;
2658
2659 did_stall = 1;
2660
2661 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2662 (udev->bus->methods->set_stall) (
2663 udev, ep, &did_stall);
2664 } else if (udev->ctrl_xfer[1]) {
2665 info = udev->ctrl_xfer[1]->xroot;
2666 (void)usb_proc_msignal(
2667 USB_BUS_CS_PROC(info->bus),
2668 &udev->cs_msg[0], &udev->cs_msg[1]);
2669 } else {
2670 /* should not happen */
2671 DPRINTFN(0, "No stall handler\n");
2672 }
2673 /*
2674 * Check if we should stall. Some USB hardware
2675 * handles set- and clear-stall in hardware.
2676 */
2677 if (did_stall) {
2678 /*
2679 * The transfer will be continued when
2680 * the clear-stall control endpoint
2681 * message is received.
2682 */
2683 ep->is_stalled = 1;
2684 DPRINTFN(1, "did_stall\n");
2685 return;
2686 }
2687 } else if (type == UE_ISOCHRONOUS) {
2688 /*
2689 * Make sure any FIFO overflow or other FIFO
2690 * error conditions go away by resetting the
2691 * endpoint FIFO through the clear stall
2692 * method.
2693 */
2694 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2695 (udev->bus->methods->clear_stall) (udev, ep);
2696 }
2697 }
2698 }
2699 /* Set or clear stall complete - special case */
2700 if (xfer->nframes == 0) {
2701 /* we are complete */
2702 xfer->aframes = 0;
2703 usbd_transfer_done(xfer, (usb_error_t)0);
2704 DPRINTFN(1, "nframes == 0\n");
2705 return;
2706 }
2707 /*
2708 * Handled cases:
2709 *
2710 * 1) Start the first transfer queued.
2711 *
2712 * 2) Re-start the current USB transfer.
2713 */
2714 /*
2715 * Check if there should be any
2716 * pre transfer start delay:
2717 */
2718 if (xfer->interval > 0) {
2719 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2720 if ((type == UE_BULK) ||
2721 (type == UE_CONTROL)) {
2722 usbd_transfer_timeout_ms(xfer,
2723 &usbd_transfer_start_cb,
2724 xfer->interval);
2725 DPRINTFN(1, "usbd_transfer_timeout_ms \n");
2726 return;
2727 }
2728 }
2729
2730 usbd_transfer_start_cb((void *)xfer);
2731 }
2732
2733 /*------------------------------------------------------------------------*
2734 * usbd_transfer_timeout_ms
2735 *
2736 * This function is used to setup a timeout on the given USB
2737 * transfer. If the timeout has been deferred the callback given by
2738 * "cb" will get called after "ms" milliseconds.
2739 *------------------------------------------------------------------------*/
2740 void
usbd_transfer_timeout_ms(struct usb_xfer * xfer,void (* cb)(void * arg),usb_timeout_t ms)2741 usbd_transfer_timeout_ms(struct usb_xfer *xfer,
2742 void (*cb) (void *arg), usb_timeout_t ms)
2743 {
2744 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2745
2746 /* defer delay */
2747 callout_reset(&xfer->timeout_handle,
2748 USB_MS_TO_TICKS(ms) + USB_CALLOUT_ZERO_TICKS, cb, xfer);
2749 }
2750
2751 /*------------------------------------------------------------------------*
2752 * usbd_callback_wrapper_sub
2753 *
2754 * - This function will update variables in an USB transfer after
2755 * that the USB transfer is complete.
2756 *
2757 * - This function is used to start the next USB transfer on the
2758 * ep transfer queue, if any.
2759 *
2760 * NOTE: In some special cases the USB transfer will not be removed from
2761 * the pipe queue, but remain first. To enforce USB transfer removal call
2762 * this function passing the error code "USB_ERR_CANCELLED".
2763 *
2764 * Return values:
2765 * 0: Success.
2766 * Else: The callback has been deferred.
2767 *------------------------------------------------------------------------*/
2768 static uint8_t
usbd_callback_wrapper_sub(struct usb_xfer * xfer)2769 usbd_callback_wrapper_sub(struct usb_xfer *xfer)
2770 {
2771 struct usb_endpoint *ep;
2772 struct usb_bus *bus;
2773 usb_frcount_t x;
2774
2775 bus = xfer->xroot->bus;
2776
2777 if ((!xfer->flags_int.open) &&
2778 (!xfer->flags_int.did_close)) {
2779 DPRINTF("close\n");
2780 USB_BUS_LOCK(bus);
2781 (xfer->endpoint->methods->close) (xfer);
2782 USB_BUS_UNLOCK(bus);
2783 /* only close once */
2784 xfer->flags_int.did_close = 1;
2785 return (1); /* wait for new callback */
2786 }
2787 /*
2788 * If we have a non-hardware induced error we
2789 * need to do the DMA delay!
2790 */
2791 if ((xfer->error != 0) && (!xfer->flags_int.did_dma_delay) &&
2792 ((xfer->error == USB_ERR_CANCELLED) ||
2793 (xfer->error == USB_ERR_TIMEOUT) ||
2794 (bus->methods->start_dma_delay != NULL))) {
2795 usb_timeout_t temp;
2796
2797 /* only delay once */
2798 xfer->flags_int.did_dma_delay = 1;
2799
2800 /* we can not cancel this delay */
2801 xfer->flags_int.can_cancel_immed = 0;
2802
2803 temp = usbd_get_dma_delay(xfer->xroot->udev);
2804
2805 DPRINTFN(3, "DMA delay, %u ms, "
2806 "on %p\n", temp, xfer);
2807
2808 if (temp != 0) {
2809 USB_BUS_LOCK(bus);
2810 /*
2811 * Some hardware solutions have dedicated
2812 * events when it is safe to free DMA'ed
2813 * memory. For the other hardware platforms we
2814 * use a static delay.
2815 */
2816 if (bus->methods->start_dma_delay != NULL) {
2817 (bus->methods->start_dma_delay) (xfer);
2818 } else {
2819 usbd_transfer_timeout_ms(xfer,
2820 (void (*)(void *))&usb_dma_delay_done_cb,
2821 temp);
2822 }
2823 USB_BUS_UNLOCK(bus);
2824 return (1); /* wait for new callback */
2825 }
2826 }
2827 /* check actual number of frames */
2828 if (xfer->aframes > xfer->nframes) {
2829 if (xfer->error == 0) {
2830 panic("%s: actual number of frames, %d, is "
2831 "greater than initial number of frames, %d\n",
2832 __FUNCTION__, xfer->aframes, xfer->nframes);
2833 } else {
2834 /* just set some valid value */
2835 xfer->aframes = xfer->nframes;
2836 }
2837 }
2838 /* compute actual length */
2839 xfer->actlen = 0;
2840
2841 for (x = 0; x != xfer->aframes; x++) {
2842 xfer->actlen += xfer->frlengths[x];
2843 }
2844
2845 /*
2846 * Frames that were not transferred get zero actual length in
2847 * case the USB device driver does not check the actual number
2848 * of frames transferred, "xfer->aframes":
2849 */
2850 for (; x < xfer->nframes; x++) {
2851 usbd_xfer_set_frame_len(xfer, x, 0);
2852 }
2853
2854 /* check actual length */
2855 if (xfer->actlen > xfer->sumlen) {
2856 if (xfer->error == 0) {
2857 panic("%s: actual length, %d, is greater than "
2858 "initial length, %d\n",
2859 __FUNCTION__, xfer->actlen, xfer->sumlen);
2860 } else {
2861 /* just set some valid value */
2862 xfer->actlen = xfer->sumlen;
2863 }
2864 }
2865 DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
2866 xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
2867 xfer->aframes, xfer->nframes);
2868
2869 if (xfer->error) {
2870 /* end of control transfer, if any */
2871 xfer->flags_int.control_act = 0;
2872
2873 #if USB_HAVE_TT_SUPPORT
2874 switch (xfer->error) {
2875 case USB_ERR_NORMAL_COMPLETION:
2876 case USB_ERR_SHORT_XFER:
2877 case USB_ERR_STALLED:
2878 case USB_ERR_CANCELLED:
2879 /* nothing to do */
2880 break;
2881 default:
2882 /* try to reset the TT, if any */
2883 USB_BUS_LOCK(bus);
2884 uhub_tt_buffer_reset_async_locked(xfer->xroot->udev, xfer->endpoint);
2885 USB_BUS_UNLOCK(bus);
2886 break;
2887 }
2888 #endif
2889 /* check if we should block the execution queue */
2890 if ((xfer->error != USB_ERR_CANCELLED) &&
2891 (xfer->flags.pipe_bof)) {
2892 DPRINTFN(2, "xfer=%p: Block On Failure "
2893 "on endpoint=%p\n", xfer, xfer->endpoint);
2894 goto done;
2895 }
2896 } else {
2897 /* check for short transfers */
2898 if (xfer->actlen < xfer->sumlen) {
2899 /* end of control transfer, if any */
2900 xfer->flags_int.control_act = 0;
2901
2902 if (!xfer->flags_int.short_xfer_ok) {
2903 xfer->error = USB_ERR_SHORT_XFER;
2904 if (xfer->flags.pipe_bof) {
2905 DPRINTFN(2, "xfer=%p: Block On Failure on "
2906 "Short Transfer on endpoint %p.\n",
2907 xfer, xfer->endpoint);
2908 goto done;
2909 }
2910 }
2911 } else {
2912 /*
2913 * Check if we are in the middle of a
2914 * control transfer:
2915 */
2916 if (xfer->flags_int.control_act) {
2917 DPRINTFN(5, "xfer=%p: Control transfer "
2918 "active on endpoint=%p\n", xfer, xfer->endpoint);
2919 goto done;
2920 }
2921 }
2922 }
2923
2924 ep = xfer->endpoint;
2925
2926 /*
2927 * If the current USB transfer is completing we need to start the
2928 * next one:
2929 */
2930 USB_BUS_LOCK(bus);
2931 if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
2932 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], NULL);
2933
2934 if ((ep->endpoint_q[xfer->stream_id].curr != NULL) ||
2935 (TAILQ_FIRST(&ep->endpoint_q[xfer->stream_id].head) != NULL)) {
2936 /* there is another USB transfer waiting */
2937 } else {
2938 /* this is the last USB transfer */
2939 /* clear isochronous sync flag */
2940 xfer->endpoint->is_synced = 0;
2941 }
2942 }
2943 USB_BUS_UNLOCK(bus);
2944 done:
2945 return (0);
2946 }
2947
2948 /*------------------------------------------------------------------------*
2949 * usb_command_wrapper
2950 *
2951 * This function is used to execute commands non-recursivly on an USB
2952 * transfer.
2953 *------------------------------------------------------------------------*/
2954 void
usb_command_wrapper(struct usb_xfer_queue * pq,struct usb_xfer * xfer)2955 usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2956 {
2957 uint32_t int_save;
2958
2959 if (xfer) {
2960 /*
2961 * If the transfer is not already processing,
2962 * queue it!
2963 */
2964 if (pq->curr != xfer) {
2965 usbd_transfer_enqueue(pq, xfer);
2966 if (pq->curr != NULL) {
2967 /* something is already processing */
2968 DPRINTFN(6, "busy %p\n", pq->curr);
2969 return;
2970 }
2971 }
2972 } else {
2973 /* Get next element in queue */
2974 pq->curr = NULL;
2975 }
2976
2977 if (!pq->recurse_1) {
2978 do {
2979
2980 LOS_SpinLockSave(&g_usb_wait_queue_spinlock, &int_save);
2981 /* set both recurse flags */
2982 pq->recurse_1 = 1;
2983 pq->recurse_2 = 1;
2984
2985 if (pq->curr == NULL) {
2986 xfer = TAILQ_FIRST(&pq->head);
2987 if (xfer) {
2988 TAILQ_REMOVE(&pq->head, xfer,
2989 wait_entry);
2990 xfer->wait_queue = NULL;
2991 pq->curr = xfer;
2992 } else {
2993 /* clear first recurse flag */
2994 pq->recurse_1 = 0;
2995 LOS_SpinUnlockRestore(&g_usb_wait_queue_spinlock, int_save);
2996 break;
2997 }
2998 }
2999 LOS_SpinUnlockRestore(&g_usb_wait_queue_spinlock, int_save);
3000
3001 DPRINTFN(6, "cb %p (enter)\n", pq->curr);
3002 (pq->command) (pq);
3003 DPRINTFN(6, "cb %p (leave)\n", pq->curr);
3004
3005 LOS_SpinLockSave(&g_usb_wait_queue_spinlock, &int_save);
3006 if (pq->recurse_2) {
3007 /* clear first recurse flag */
3008 pq->recurse_1 = 0;
3009 LOS_SpinUnlockRestore(&g_usb_wait_queue_spinlock, int_save);
3010 break;
3011 }
3012 LOS_SpinUnlockRestore(&g_usb_wait_queue_spinlock, int_save);
3013 } while (1);
3014
3015 } else {
3016 /* clear second recurse flag */
3017 pq->recurse_2 = 0;
3018 }
3019 }
3020
3021 /*------------------------------------------------------------------------*
3022 * usbd_ctrl_transfer_setup
3023 *
3024 * This function is used to setup the default USB control endpoint
3025 * transfer.
3026 *------------------------------------------------------------------------*/
3027 void
usbd_ctrl_transfer_setup(struct usb_device * udev)3028 usbd_ctrl_transfer_setup(struct usb_device *udev)
3029 {
3030 struct usb_xfer *xfer;
3031 uint8_t no_resetup;
3032 uint8_t iface_index;
3033
3034 /* check for root HUB */
3035 if (udev->parent_hub == NULL)
3036 return;
3037 repeat:
3038
3039 xfer = udev->ctrl_xfer[0];
3040 if (xfer) {
3041 USB_XFER_LOCK(xfer);
3042 no_resetup =
3043 ((xfer->address == udev->address) &&
3044 (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
3045 udev->ddesc.bMaxPacketSize));
3046 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
3047 if (no_resetup) {
3048 /*
3049 * NOTE: checking "xfer->address" and
3050 * starting the USB transfer must be
3051 * atomic!
3052 */
3053 usbd_transfer_start(xfer);
3054 }
3055 }
3056 USB_XFER_UNLOCK(xfer);
3057 } else {
3058 no_resetup = 0;
3059 }
3060
3061 if (no_resetup) {
3062 /*
3063 * All parameters are exactly the same like before.
3064 * Just return.
3065 */
3066 return;
3067 }
3068 /*
3069 * Update wMaxPacketSize for the default control endpoint:
3070 */
3071 udev->ctrl_ep_desc.wMaxPacketSize[0] =
3072 udev->ddesc.bMaxPacketSize;
3073
3074 /*
3075 * Unsetup any existing USB transfer:
3076 */
3077 usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
3078
3079 /*
3080 * Reset clear stall error counter.
3081 */
3082 udev->clear_stall_errors = 0;
3083
3084 /*
3085 * Try to setup a new USB transfer for the
3086 * default control endpoint:
3087 */
3088 iface_index = 0;
3089 if (usbd_transfer_setup(udev, &iface_index,
3090 udev->ctrl_xfer, udev->bus->control_ep_quirk ?
3091 usb_control_ep_quirk_cfg : usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
3092 &udev->device_mtx)) {
3093 DPRINTFN(0, "could not setup default "
3094 "USB transfer\n");
3095 } else {
3096 goto repeat;
3097 }
3098 }
3099
3100 /*------------------------------------------------------------------------*
3101 * usbd_clear_data_toggle - factored out code
3102 *
3103 * NOTE: the intention of this function is not to reset the hardware
3104 * data toggle.
3105 *------------------------------------------------------------------------*/
3106 void
usbd_clear_stall_locked(struct usb_device * udev,struct usb_endpoint * ep)3107 usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
3108 {
3109 USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED);
3110
3111 /* check that we have a valid case */
3112 if ((udev->flags.usb_mode == USB_MODE_HOST) &&
3113 (udev->parent_hub != NULL) &&
3114 (udev->bus->methods->clear_stall != NULL) &&
3115 (ep->methods != NULL)) {
3116 (udev->bus->methods->clear_stall) (udev, ep);
3117 }
3118 }
3119
3120 /*------------------------------------------------------------------------*
3121 * usbd_clear_data_toggle - factored out code
3122 *
3123 * NOTE: the intention of this function is not to reset the hardware
3124 * data toggle on the USB device side.
3125 *------------------------------------------------------------------------*/
3126 void
usbd_clear_data_toggle(struct usb_device * udev,struct usb_endpoint * ep)3127 usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
3128 {
3129 DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
3130
3131 USB_BUS_LOCK(udev->bus);
3132 ep->toggle_next = 0;
3133 /* some hardware needs a callback to clear the data toggle */
3134 usbd_clear_stall_locked(udev, ep);
3135 USB_BUS_UNLOCK(udev->bus);
3136 }
3137
3138 /*------------------------------------------------------------------------*
3139 * usbd_clear_stall_callback - factored out clear stall callback
3140 *
3141 * Input parameters:
3142 * xfer1: Clear Stall Control Transfer
3143 * xfer2: Stalled USB Transfer
3144 *
3145 * This function is NULL safe.
3146 *
3147 * Return values:
3148 * 0: In progress
3149 * Else: Finished
3150 *
3151 * Clear stall config example:
3152 *
3153 * static const struct usb_config my_clearstall = {
3154 * .type = UE_CONTROL,
3155 * .endpoint = 0,
3156 * .direction = UE_DIR_ANY,
3157 * .interval = 50, //50 milliseconds
3158 * .bufsize = sizeof(struct usb_device_request),
3159 * .timeout = 1000, //1.000 seconds
3160 * .callback = &my_clear_stall_callback, // **
3161 * .usb_mode = USB_MODE_HOST,
3162 * };
3163 *
3164 * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
3165 * passing the correct parameters.
3166 *------------------------------------------------------------------------*/
3167 uint8_t
usbd_clear_stall_callback(struct usb_xfer * xfer1,struct usb_xfer * xfer2)3168 usbd_clear_stall_callback(struct usb_xfer *xfer1,
3169 struct usb_xfer *xfer2)
3170 {
3171 struct usb_device_request req;
3172
3173 if (xfer2 == NULL) {
3174 /* looks like we are tearing down */
3175 DPRINTF("NULL input parameter\n");
3176 return (0);
3177 }
3178 USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED);
3179 USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED);
3180
3181 switch (USB_GET_STATE(xfer1)) {
3182 case USB_ST_SETUP:
3183
3184 /*
3185 * pre-clear the data toggle to DATA0 ("umass.c" and
3186 * "ata-usb.c" depends on this)
3187 */
3188
3189 usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
3190
3191 /* setup a clear-stall packet */
3192
3193 req.bmRequestType = UT_WRITE_ENDPOINT;
3194 req.bRequest = UR_CLEAR_FEATURE;
3195 USETW(req.wValue, UF_ENDPOINT_HALT);
3196 req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
3197 req.wIndex[1] = 0;
3198 USETW(req.wLength, 0);
3199
3200 /*
3201 * "usbd_transfer_setup_sub()" will ensure that
3202 * we have sufficient room in the buffer for
3203 * the request structure!
3204 */
3205
3206 /* copy in the transfer */
3207
3208 usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
3209
3210 /* set length */
3211 xfer1->frlengths[0] = sizeof(req);
3212 xfer1->nframes = 1;
3213
3214 usbd_transfer_submit(xfer1);
3215 return (0);
3216
3217 case USB_ST_TRANSFERRED:
3218 break;
3219
3220 default: /* Error */
3221 if (xfer1->error == USB_ERR_CANCELLED) {
3222 return (0);
3223 }
3224 break;
3225 }
3226 return (1); /* Clear Stall Finished */
3227 }
3228
3229 /*------------------------------------------------------------------------*
3230 * usbd_transfer_poll
3231 *
3232 * The following function gets called from the USB keyboard driver and
3233 * UMASS when the system has paniced.
3234 *
3235 * NOTE: It is currently not possible to resume normal operation on
3236 * the USB controller which has been polled, due to clearing of the
3237 * "up_dsleep" and "up_msleep" flags.
3238 *------------------------------------------------------------------------*/
3239 void
usbd_transfer_poll(struct usb_xfer ** ppxfer,uint16_t max)3240 usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
3241 {
3242 struct usb_xfer *xfer;
3243 struct usb_xfer_root *xroot;
3244 struct usb_device *udev;
3245 struct usb_proc_msg *pm;
3246 uint16_t n;
3247 uint16_t drop_bus;
3248 uint16_t drop_xfer;
3249
3250 for (n = 0; n != max; n++) {
3251 /* Extra checks to avoid panic */
3252 xfer = ppxfer[n];
3253 if (xfer == NULL)
3254 continue; /* no USB transfer */
3255 xroot = xfer->xroot;
3256 if (xroot == NULL)
3257 continue; /* no USB root */
3258 udev = xroot->udev;
3259 if (udev == NULL)
3260 continue; /* no USB device */
3261 if (udev->bus == NULL)
3262 continue; /* no BUS structure */
3263 if (udev->bus->methods == NULL)
3264 continue; /* no BUS methods */
3265 if (udev->bus->methods->xfer_poll == NULL)
3266 continue; /* no poll method */
3267
3268 /* make sure that the BUS mutex is not locked */
3269 drop_bus = 0;
3270 while (mtx_owned(&xroot->udev->bus->bus_mtx) && !SCHEDULER_STOPPED()) {
3271 mtx_unlock(&xroot->udev->bus->bus_mtx);
3272 drop_bus++;
3273 }
3274
3275 /* make sure that the transfer mutex is not locked */
3276 drop_xfer = 0;
3277 while (mtx_owned(xroot->xfer_mtx) && !SCHEDULER_STOPPED()) {
3278 mtx_unlock(xroot->xfer_mtx);
3279 drop_xfer++;
3280 }
3281
3282 /* Make sure cv_signal() and cv_broadcast() is not called */
3283 USB_BUS_CONTROL_XFER_PROC(udev->bus)->up_msleep = 0;
3284 USB_BUS_EXPLORE_PROC(udev->bus)->up_msleep = 0;
3285 USB_BUS_GIANT_PROC(udev->bus)->up_msleep = 0;
3286 USB_BUS_NON_GIANT_ISOC_PROC(udev->bus)->up_msleep = 0;
3287 USB_BUS_NON_GIANT_BULK_PROC(udev->bus)->up_msleep = 0;
3288
3289 /* poll USB hardware */
3290 (udev->bus->methods->xfer_poll) (udev->bus);
3291
3292 USB_BUS_LOCK(xroot->bus);
3293
3294 /* check for clear stall */
3295 if (udev->ctrl_xfer[1] != NULL) {
3296 /* poll clear stall start */
3297 pm = &udev->cs_msg[0].hdr;
3298 (pm->pm_callback) (pm);
3299 /* poll clear stall done thread */
3300 pm = &udev->ctrl_xfer[1]->
3301 xroot->done_m[0].hdr;
3302 (pm->pm_callback) (pm);
3303 }
3304
3305 /* poll done thread */
3306 pm = &xroot->done_m[0].hdr;
3307 (pm->pm_callback) (pm);
3308
3309 USB_BUS_UNLOCK(xroot->bus);
3310
3311 /* restore transfer mutex */
3312 while (drop_xfer--)
3313 mtx_lock(xroot->xfer_mtx);
3314
3315 /* restore BUS mutex */
3316 while (drop_bus--)
3317 mtx_lock(&xroot->udev->bus->bus_mtx);
3318 }
3319 }
3320
3321 static void
usbd_get_std_packet_size(struct usb_std_packet_size * ptr,uint8_t type,enum usb_dev_speed speed)3322 usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
3323 uint8_t type, enum usb_dev_speed speed)
3324 {
3325 static const uint16_t intr_range_max[USB_SPEED_MAX] = {
3326 [USB_SPEED_LOW] = 8,
3327 [USB_SPEED_FULL] = 64,
3328 [USB_SPEED_HIGH] = 1024,
3329 [USB_SPEED_VARIABLE] = 1024,
3330 [USB_SPEED_SUPER] = 1024,
3331 };
3332
3333 static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
3334 [USB_SPEED_LOW] = 0, /* invalid */
3335 [USB_SPEED_FULL] = 1023,
3336 [USB_SPEED_HIGH] = 1024,
3337 [USB_SPEED_VARIABLE] = 3584,
3338 [USB_SPEED_SUPER] = 1024,
3339 };
3340
3341 static const uint16_t control_min[USB_SPEED_MAX] = {
3342 [USB_SPEED_LOW] = 8,
3343 [USB_SPEED_FULL] = 8,
3344 [USB_SPEED_HIGH] = 64,
3345 [USB_SPEED_VARIABLE] = 512,
3346 [USB_SPEED_SUPER] = 512,
3347 };
3348
3349 static const uint16_t bulk_min[USB_SPEED_MAX] = {
3350 [USB_SPEED_LOW] = 8,
3351 [USB_SPEED_FULL] = 8,
3352 [USB_SPEED_HIGH] = 512,
3353 [USB_SPEED_VARIABLE] = 512,
3354 [USB_SPEED_SUPER] = 1024,
3355 };
3356
3357 uint16_t temp;
3358
3359 (void)memset_s(ptr, sizeof(*ptr), 0, sizeof(*ptr));
3360
3361 switch (type) {
3362 case UE_INTERRUPT:
3363 ptr->range.max = intr_range_max[speed];
3364 break;
3365 case UE_ISOCHRONOUS:
3366 ptr->range.max = isoc_range_max[speed];
3367 break;
3368 default:
3369 if (type == UE_BULK)
3370 temp = bulk_min[speed];
3371 else /* UE_CONTROL */
3372 temp = control_min[speed];
3373
3374 /* default is fixed */
3375 ptr->fixed[0] = temp;
3376 ptr->fixed[1] = temp;
3377 ptr->fixed[2] = temp;
3378 ptr->fixed[3] = temp;
3379
3380 if (speed == USB_SPEED_FULL) {
3381 /* multiple sizes */
3382 ptr->fixed[1] = 16;
3383 ptr->fixed[2] = 32;
3384 ptr->fixed[3] = 64;
3385 }
3386 if ((speed == USB_SPEED_VARIABLE) &&
3387 (type == UE_BULK)) {
3388 /* multiple sizes */
3389 ptr->fixed[2] = 1024;
3390 ptr->fixed[3] = 1536;
3391 }
3392 break;
3393 }
3394 }
3395
3396 void *
usbd_xfer_softc(struct usb_xfer * xfer)3397 usbd_xfer_softc(struct usb_xfer *xfer)
3398 {
3399 return (xfer->priv_sc);
3400 }
3401
3402 void *
usbd_xfer_get_priv(struct usb_xfer * xfer)3403 usbd_xfer_get_priv(struct usb_xfer *xfer)
3404 {
3405 return (xfer->priv_fifo);
3406 }
3407
3408 void
usbd_xfer_set_priv(struct usb_xfer * xfer,void * ptr)3409 usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
3410 {
3411 xfer->priv_fifo = ptr;
3412 }
3413
3414 uint8_t
usbd_xfer_state(struct usb_xfer * xfer)3415 usbd_xfer_state(struct usb_xfer *xfer)
3416 {
3417 return (xfer->usb_state);
3418 }
3419
3420 void
usbd_xfer_set_flag(struct usb_xfer * xfer,int flag)3421 usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
3422 {
3423 switch (flag) {
3424 case USB_FORCE_SHORT_XFER:
3425 xfer->flags.force_short_xfer = 1;
3426 break;
3427 case USB_SHORT_XFER_OK:
3428 xfer->flags.short_xfer_ok = 1;
3429 break;
3430 case USB_MULTI_SHORT_OK:
3431 xfer->flags.short_frames_ok = 1;
3432 break;
3433 case USB_MANUAL_STATUS:
3434 xfer->flags.manual_status = 1;
3435 break;
3436 }
3437 }
3438
3439 void
usbd_xfer_clr_flag(struct usb_xfer * xfer,int flag)3440 usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
3441 {
3442 switch (flag) {
3443 case USB_FORCE_SHORT_XFER:
3444 xfer->flags.force_short_xfer = 0;
3445 break;
3446 case USB_SHORT_XFER_OK:
3447 xfer->flags.short_xfer_ok = 0;
3448 break;
3449 case USB_MULTI_SHORT_OK:
3450 xfer->flags.short_frames_ok = 0;
3451 break;
3452 case USB_MANUAL_STATUS:
3453 xfer->flags.manual_status = 0;
3454 break;
3455 }
3456 }
3457
3458 /*
3459 * The following function returns in milliseconds when the isochronous
3460 * transfer was completed by the hardware. The returned value wraps
3461 * around 65536 milliseconds.
3462 */
3463 uint16_t
usbd_xfer_get_timestamp(struct usb_xfer * xfer)3464 usbd_xfer_get_timestamp(struct usb_xfer *xfer)
3465 {
3466 return (xfer->isoc_time_complete);
3467 }
3468
3469 /*
3470 * The following function returns non-zero if the max packet size
3471 * field was clamped to a valid value. Else it returns zero.
3472 */
3473 uint8_t
usbd_xfer_maxp_was_clamped(struct usb_xfer * xfer)3474 usbd_xfer_maxp_was_clamped(struct usb_xfer *xfer)
3475 {
3476 return (xfer->flags_int.maxp_was_clamped);
3477 }
3478
3479 /*
3480 * The following function computes the next isochronous frame number
3481 * where the first isochronous packet should be queued.
3482 *
3483 * The function returns non-zero if there was a discontinuity.
3484 * Else zero is returned for normal operation.
3485 */
3486 uint8_t
usbd_xfer_get_isochronous_start_frame(struct usb_xfer * xfer,uint32_t frame_curr,uint32_t frame_min,uint32_t frame_ms,uint32_t frame_mask,uint32_t * p_frame_start)3487 usbd_xfer_get_isochronous_start_frame(struct usb_xfer *xfer, uint32_t frame_curr,
3488 uint32_t frame_min, uint32_t frame_ms, uint32_t frame_mask, uint32_t *p_frame_start)
3489 {
3490 uint32_t duration;
3491 uint32_t delta;
3492 uint8_t retval;
3493 uint8_t shift;
3494
3495 /* Compute time ahead of current schedule. */
3496 delta = (xfer->endpoint->isoc_next - frame_curr) & frame_mask;
3497
3498 /*
3499 * Check if it is the first transfer or if the future frame
3500 * delta is less than one millisecond or if the frame delta is
3501 * negative:
3502 */
3503 if (xfer->endpoint->is_synced == 0 ||
3504 delta < (frame_ms + frame_min) ||
3505 delta > (frame_mask / 2)) {
3506 /* Schedule transfer 2 milliseconds into the future. */
3507 xfer->endpoint->isoc_next = (frame_curr + 2 * frame_ms + frame_min) & frame_mask;
3508 xfer->endpoint->is_synced = 1;
3509
3510 retval = 1;
3511 } else {
3512 retval = 0;
3513 }
3514
3515 /* Store start time, if any. */
3516 if (p_frame_start != NULL)
3517 *p_frame_start = xfer->endpoint->isoc_next & frame_mask;
3518
3519 /* Get relative completion time, in milliseconds. */
3520 delta = xfer->endpoint->isoc_next - frame_curr + (frame_curr % frame_ms);
3521 delta &= frame_mask;
3522 delta /= frame_ms;
3523
3524 switch (usbd_get_speed(xfer->xroot->udev)) {
3525 case USB_SPEED_FULL:
3526 shift = 3;
3527 break;
3528 default:
3529 shift = usbd_xfer_get_fps_shift(xfer);
3530 break;
3531 }
3532
3533 /* Get duration in milliseconds, rounded up. */
3534 duration = ((xfer->nframes << shift) + 7) / 8;
3535
3536 /* Compute full 32-bit completion time, in milliseconds. */
3537 xfer->isoc_time_complete =
3538 usb_isoc_time_expand(xfer->xroot->bus, frame_curr / frame_ms) +
3539 delta + duration;
3540
3541 /* Compute next isochronous frame. */
3542 xfer->endpoint->isoc_next += duration * frame_ms;
3543 xfer->endpoint->isoc_next &= frame_mask;
3544
3545 return (retval);
3546 }
3547
3548 #undef USB_DEBUG_VAR
3549