1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2008-2021 Hans Petter Selasky. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include "implementation/global_implementation.h"
29
30 #undef USB_DEBUG_VAR
31 #define USB_DEBUG_VAR usb_debug
32
33 SPIN_LOCK_INIT(g_usb_wait_queue_spinlock);
34
35 struct usb_std_packet_size {
36 struct {
37 uint16_t min; /* inclusive */
38 uint16_t max; /* inclusive */
39 } range;
40
41 uint16_t fixed[4];
42 };
43
44 static usb_callback_t usb_request_callback;
45
46 static const struct usb_config usb_control_ep_cfg[USB_CTRL_XFER_MAX] = {
47 /* This transfer is used for generic control endpoint transfers */
48
49 [0] = {
50 .type = UE_CONTROL,
51 .endpoint = 0x00, /* Control endpoint */
52 .direction = UE_DIR_ANY,
53 .bufsize = USB_EP0_BUFSIZE, /* bytes */
54 .flags = {.proxy_buffer = 1,},
55 .callback = &usb_request_callback,
56 .usb_mode = USB_MODE_DUAL, /* both modes */
57 },
58
59 /* This transfer is used for generic clear stall only */
60
61 [1] = {
62 .type = UE_CONTROL,
63 .endpoint = 0x00, /* Control pipe */
64 .direction = UE_DIR_ANY,
65 .bufsize = sizeof(struct usb_device_request),
66 .callback = &usb_do_clear_stall_callback,
67 .timeout = 1000, /* 1 second */
68 .interval = 50, /* 50ms */
69 .usb_mode = USB_MODE_HOST,
70 },
71 };
72
73 static const struct usb_config usb_control_ep_quirk_cfg[USB_CTRL_XFER_MAX] = {
74 /* This transfer is used for generic control endpoint transfers */
75
76 [0] = {
77 .type = UE_CONTROL,
78 .endpoint = 0x00, /* Control endpoint */
79 .direction = UE_DIR_ANY,
80 .bufsize = 65535, /* bytes */
81 .callback = &usb_request_callback,
82 .usb_mode = USB_MODE_DUAL, /* both modes */
83 },
84
85 /* This transfer is used for generic clear stall only */
86
87 [1] = {
88 .type = UE_CONTROL,
89 .endpoint = 0x00, /* Control pipe */
90 .direction = UE_DIR_ANY,
91 .bufsize = sizeof(struct usb_device_request),
92 .callback = &usb_do_clear_stall_callback,
93 .timeout = 1000, /* 1 second */
94 .interval = 50, /* 50ms */
95 .usb_mode = USB_MODE_HOST,
96 },
97 };
98
99 /* function prototypes */
100
101 static void usbd_update_max_frame_size(struct usb_xfer *);
102 static void usbd_transfer_unsetup_sub(struct usb_xfer_root *, uint8_t);
103 static void usbd_control_transfer_init(struct usb_xfer *);
104 static int usbd_setup_ctrl_transfer(struct usb_xfer *);
105 static void usb_callback_proc(struct usb_proc_msg *);
106 static void usbd_callback_ss_done_defer(struct usb_xfer *);
107 static void usbd_callback_wrapper(struct usb_xfer_queue *);
108 static void usbd_transfer_start_cb(void *);
109 static uint8_t usbd_callback_wrapper_sub(struct usb_xfer *);
110 static void usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
111 uint8_t type, enum usb_dev_speed speed);
112
113 /*------------------------------------------------------------------------*
114 * usb_request_callback
115 *------------------------------------------------------------------------*/
116 static void
usb_request_callback(struct usb_xfer * xfer,usb_error_t error)117 usb_request_callback(struct usb_xfer *xfer, usb_error_t error)
118 {
119 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE)
120 usb_handle_request_callback(xfer, error);
121 else
122 usbd_do_request_callback(xfer, error);
123 }
124
125 /*------------------------------------------------------------------------*
126 * usbd_update_max_frame_size
127 *
128 * This function updates the maximum frame size, hence high speed USB
129 * can transfer multiple consecutive packets.
130 *------------------------------------------------------------------------*/
131 static void
usbd_update_max_frame_size(struct usb_xfer * xfer)132 usbd_update_max_frame_size(struct usb_xfer *xfer)
133 {
134 /* compute maximum frame size */
135 /* this computation should not overflow 16-bit */
136 /* max = 15 * 1024 */
137
138 xfer->max_frame_size = xfer->max_packet_size * xfer->max_packet_count;
139 }
140
141 /*------------------------------------------------------------------------*
142 * usbd_get_dma_delay
143 *
144 * The following function is called when we need to
145 * synchronize with DMA hardware.
146 *
147 * Returns:
148 * 0: no DMA delay required
149 * Else: milliseconds of DMA delay
150 *------------------------------------------------------------------------*/
151 usb_timeout_t
usbd_get_dma_delay(struct usb_device * udev)152 usbd_get_dma_delay(struct usb_device *udev)
153 {
154 const struct usb_bus_methods *mtod;
155 uint32_t temp;
156
157 mtod = udev->bus->methods;
158 temp = 0;
159
160 if (mtod->get_dma_delay) {
161 (mtod->get_dma_delay) (udev, &temp);
162 /*
163 * Round up and convert to milliseconds. Note that we use
164 * 1024 milliseconds per second. to save a division.
165 */
166 temp += 0x3FF;
167 temp /= 0x400;
168 }
169 return (temp);
170 }
171
172 /*------------------------------------------------------------------------*
173 * usbd_transfer_setup_sub_malloc
174 *
175 * This function will allocate one or more DMA'able memory chunks
176 * according to "size", "align" and "count" arguments. "ppc" is
177 * pointed to a linear array of USB page caches afterwards.
178 *
179 * If the "align" argument is equal to "1" a non-contiguous allocation
180 * can happen. Else if the "align" argument is greater than "1", the
181 * allocation will always be contiguous in memory.
182 *
183 * Returns:
184 * 0: Success
185 * Else: Failure
186 *------------------------------------------------------------------------*/
187 #if USB_HAVE_BUSDMA
188 uint8_t
usbd_transfer_setup_sub_malloc(struct usb_setup_params * parm,struct usb_page_cache ** ppc,usb_size_t size,usb_size_t align,usb_size_t count)189 usbd_transfer_setup_sub_malloc(struct usb_setup_params *parm,
190 struct usb_page_cache **ppc, usb_size_t size, usb_size_t align,
191 usb_size_t count)
192 {
193 struct usb_page_cache *pc;
194 struct usb_page *pg;
195 void *buf;
196 usb_size_t n_dma_pc;
197 usb_size_t n_dma_pg;
198 usb_size_t n_obj;
199 usb_size_t x;
200 usb_size_t y;
201 usb_size_t r;
202 usb_size_t z;
203
204 USB_ASSERT(align > 0, ("Invalid alignment, 0x%08x\n",
205 align));
206 USB_ASSERT(size > 0, ("Invalid size = 0\n"));
207
208 if (count == 0) {
209 return (0); /* nothing to allocate */
210 }
211 /*
212 * Make sure that the size is aligned properly.
213 */
214 size = -((-size) & (-align));
215
216 /*
217 * Try multi-allocation chunks to reduce the number of DMA
218 * allocations, hence DMA allocations are slow.
219 */
220 if (align == 1) {
221 /* special case - non-cached multi page DMA memory */
222 n_dma_pc = count;
223 n_dma_pg = (2 + (size / USB_PAGE_SIZE));
224 n_obj = 1;
225 } else if (size >= USB_PAGE_SIZE) {
226 n_dma_pc = count;
227 n_dma_pg = 1;
228 n_obj = 1;
229 } else {
230 /* compute number of objects per page */
231 n_obj = (USB_PAGE_SIZE / size);
232 /*
233 * Compute number of DMA chunks, rounded up
234 * to nearest one:
235 */
236 n_dma_pc = ((count + n_obj - 1) / n_obj);
237 n_dma_pg = 1;
238 }
239
240 /*
241 * DMA memory is allocated once, but mapped twice. That's why
242 * there is one list for auto-free and another list for
243 * non-auto-free which only holds the mapping and not the
244 * allocation.
245 */
246 if (parm->buf == NULL) {
247 /* reserve memory (auto-free) */
248 parm->dma_page_ptr += n_dma_pc * n_dma_pg;
249 parm->dma_page_cache_ptr += n_dma_pc;
250
251 /* reserve memory (no-auto-free) */
252 parm->dma_page_ptr += count * n_dma_pg;
253 parm->xfer_page_cache_ptr += count;
254 return (0);
255 }
256 for (x = 0; x != n_dma_pc; x++) {
257 /* need to initialize the page cache */
258 parm->dma_page_cache_ptr[x].tag_parent =
259 &parm->curr_xfer->xroot->dma_parent_tag;
260 }
261 for (x = 0; x != count; x++) {
262 /* need to initialize the page cache */
263 parm->xfer_page_cache_ptr[x].tag_parent =
264 &parm->curr_xfer->xroot->dma_parent_tag;
265 }
266
267 if (ppc != NULL) {
268 *ppc = parm->xfer_page_cache_ptr;
269 }
270 r = count; /* set remainder count */
271 z = n_obj * size; /* set allocation size */
272 pc = parm->xfer_page_cache_ptr;
273 pg = parm->dma_page_ptr;
274
275 for (x = 0; x != n_dma_pc; x++) {
276 if (r < n_obj) {
277 /* compute last remainder */
278 z = r * size;
279 n_obj = r;
280 }
281 if (usb_pc_alloc_mem(parm->dma_page_cache_ptr,
282 pg, z, align)) {
283 return (1); /* failure */
284 }
285 /* Set beginning of current buffer */
286 buf = parm->dma_page_cache_ptr->buffer;
287 /* Make room for one DMA page cache and one page */
288 parm->dma_page_cache_ptr++;
289 pg += n_dma_pg;
290
291 for (y = 0; (y != n_obj); y++, r--, pc++, pg += n_dma_pg) {
292 /* Load sub-chunk into DMA */
293 if (usb_pc_dmamap_create(pc, size)) {
294 return (1); /* failure */
295 }
296 pc->buffer = USB_ADD_BYTES(buf, y * size);
297 pc->page_start = pg;
298
299 USB_MTX_LOCK(pc->tag_parent->mtx);
300 if (usb_pc_load_mem(pc, size, 1 /* synchronous */ )) {
301 USB_MTX_UNLOCK(pc->tag_parent->mtx);
302 return (1); /* failure */
303 }
304 USB_MTX_UNLOCK(pc->tag_parent->mtx);
305 }
306 }
307
308 parm->xfer_page_cache_ptr = pc;
309 parm->dma_page_ptr = pg;
310 return (0);
311 }
312 #endif
313
314 /*------------------------------------------------------------------------*
315 * usbd_transfer_setup_sub - transfer setup subroutine
316 *
317 * This function must be called from the "xfer_setup" callback of the
318 * USB Host or Device controller driver when setting up an USB
319 * transfer. This function will setup correct packet sizes, buffer
320 * sizes, flags and more, that are stored in the "usb_xfer"
321 * structure.
322 *------------------------------------------------------------------------*/
323 void
usbd_transfer_setup_sub(struct usb_setup_params * parm)324 usbd_transfer_setup_sub(struct usb_setup_params *parm)
325 {
326 enum {
327 REQ_SIZE = 8,
328 MIN_PKT = 8,
329 };
330 struct usb_xfer *xfer = parm->curr_xfer;
331 const struct usb_config *setup = parm->curr_setup;
332 struct usb_endpoint_ss_comp_descriptor *ecomp;
333 struct usb_endpoint_descriptor *edesc;
334 struct usb_std_packet_size std_size;
335 usb_frcount_t n_frlengths;
336 usb_frcount_t n_frbuffers;
337 usb_frcount_t x;
338 uint16_t maxp_old;
339 uint8_t type;
340 uint8_t zmps;
341
342 /*
343 * Sanity check. The following parameters must be initialized before
344 * calling this function.
345 */
346 if ((parm->hc_max_packet_size == 0) ||
347 (parm->hc_max_packet_count == 0) ||
348 (parm->hc_max_frame_size == 0)) {
349 parm->err = USB_ERR_INVAL;
350 goto done;
351 }
352 edesc = xfer->endpoint->edesc;
353 ecomp = xfer->endpoint->ecomp;
354
355 type = (edesc->bmAttributes & UE_XFERTYPE);
356
357 xfer->flags = setup->flags;
358 xfer->nframes = setup->frames;
359 xfer->timeout = setup->timeout;
360 xfer->callback = setup->callback;
361 xfer->interval = setup->interval;
362 xfer->endpointno = edesc->bEndpointAddress;
363 xfer->max_packet_size = UGETW(edesc->wMaxPacketSize);
364 xfer->max_packet_count = 1;
365 /* make a shadow copy: */
366 xfer->flags_int.usb_mode = parm->udev->flags.usb_mode;
367
368 parm->bufsize = setup->bufsize;
369
370 switch (parm->speed) {
371 case USB_SPEED_HIGH:
372 switch (type) {
373 case UE_ISOCHRONOUS:
374 case UE_INTERRUPT:
375 xfer->max_packet_count +=
376 (xfer->max_packet_size >> 11) & 3;
377
378 /* check for invalid max packet count */
379 if (xfer->max_packet_count > 3)
380 xfer->max_packet_count = 3;
381 break;
382 default:
383 break;
384 }
385 xfer->max_packet_size &= 0x7FF;
386 break;
387 case USB_SPEED_SUPER:
388 xfer->max_packet_count += (xfer->max_packet_size >> 11) & 3;
389
390 if (ecomp != NULL)
391 xfer->max_packet_count += ecomp->bMaxBurst;
392
393 if ((xfer->max_packet_count == 0) ||
394 (xfer->max_packet_count > 16))
395 xfer->max_packet_count = 16;
396
397 switch (type) {
398 case UE_CONTROL:
399 xfer->max_packet_count = 1;
400 break;
401 case UE_ISOCHRONOUS:
402 if (ecomp != NULL) {
403 uint8_t mult;
404
405 mult = UE_GET_SS_ISO_MULT(
406 ecomp->bmAttributes) + 1;
407 if (mult > 3)
408 mult = 3;
409
410 xfer->max_packet_count *= mult;
411 }
412 break;
413 default:
414 break;
415 }
416 xfer->max_packet_size &= 0x7FF;
417 break;
418 default:
419 break;
420 }
421 /* range check "max_packet_count" */
422
423 if (xfer->max_packet_count > parm->hc_max_packet_count) {
424 xfer->max_packet_count = parm->hc_max_packet_count;
425 }
426
427 /* store max packet size value before filtering */
428
429 maxp_old = xfer->max_packet_size;
430
431 /* filter "wMaxPacketSize" according to HC capabilities */
432
433 if ((xfer->max_packet_size > parm->hc_max_packet_size) ||
434 (xfer->max_packet_size == 0)) {
435 xfer->max_packet_size = parm->hc_max_packet_size;
436 }
437 /* filter "wMaxPacketSize" according to standard sizes */
438
439 usbd_get_std_packet_size(&std_size, type, parm->speed);
440
441 if (std_size.range.min || std_size.range.max) {
442 if (xfer->max_packet_size < std_size.range.min) {
443 xfer->max_packet_size = std_size.range.min;
444 }
445 if (xfer->max_packet_size > std_size.range.max) {
446 xfer->max_packet_size = std_size.range.max;
447 }
448 } else {
449 if (xfer->max_packet_size >= std_size.fixed[3]) {
450 xfer->max_packet_size = std_size.fixed[3];
451 } else if (xfer->max_packet_size >= std_size.fixed[2]) {
452 xfer->max_packet_size = std_size.fixed[2];
453 } else if (xfer->max_packet_size >= std_size.fixed[1]) {
454 xfer->max_packet_size = std_size.fixed[1];
455 } else {
456 /* only one possibility left */
457 xfer->max_packet_size = std_size.fixed[0];
458 }
459 }
460
461 /*
462 * Check if the max packet size was outside its allowed range
463 * and clamped to a valid value:
464 */
465 if (maxp_old != xfer->max_packet_size)
466 xfer->flags_int.maxp_was_clamped = 1;
467
468 /* compute "max_frame_size" */
469
470 usbd_update_max_frame_size(xfer);
471
472 /* check interrupt interval and transfer pre-delay */
473
474 if (type == UE_ISOCHRONOUS) {
475 uint16_t frame_limit;
476
477 xfer->interval = 0; /* not used, must be zero */
478 xfer->flags_int.isochronous_xfr = 1; /* set flag */
479
480 if (xfer->timeout == 0) {
481 /*
482 * set a default timeout in
483 * case something goes wrong!
484 */
485 xfer->timeout = 1000 / 4;
486 }
487 switch (parm->speed) {
488 case USB_SPEED_LOW:
489 case USB_SPEED_FULL:
490 frame_limit = USB_MAX_FS_ISOC_FRAMES_PER_XFER;
491 xfer->fps_shift = 0;
492 break;
493 default:
494 frame_limit = USB_MAX_HS_ISOC_FRAMES_PER_XFER;
495 xfer->fps_shift = edesc->bInterval;
496 if (xfer->fps_shift > 0)
497 xfer->fps_shift--;
498 if (xfer->fps_shift > 3)
499 xfer->fps_shift = 3;
500 if (xfer->flags.pre_scale_frames != 0)
501 xfer->nframes <<= (3 - xfer->fps_shift);
502 break;
503 }
504
505 if (xfer->nframes > frame_limit) {
506 /*
507 * this is not going to work
508 * cross hardware
509 */
510 parm->err = USB_ERR_INVAL;
511 goto done;
512 }
513 if (xfer->nframes == 0) {
514 /*
515 * this is not a valid value
516 */
517 parm->err = USB_ERR_ZERO_NFRAMES;
518 goto done;
519 }
520 } else {
521 /*
522 * If a value is specified use that else check the
523 * endpoint descriptor!
524 */
525 if (type == UE_INTERRUPT) {
526 uint32_t temp;
527
528 if (xfer->interval == 0) {
529 xfer->interval = edesc->bInterval;
530
531 switch (parm->speed) {
532 case USB_SPEED_LOW:
533 case USB_SPEED_FULL:
534 break;
535 default:
536 /* 125us -> 1ms */
537 if (xfer->interval < 4)
538 xfer->interval = 1;
539 else if (xfer->interval > 16)
540 xfer->interval = (1 << (16 - 4));
541 else
542 xfer->interval =
543 (1 << (xfer->interval - 4));
544 break;
545 }
546 }
547
548 if (xfer->interval == 0) {
549 /*
550 * One millisecond is the smallest
551 * interval we support:
552 */
553 xfer->interval = 1;
554 }
555
556 xfer->fps_shift = 0;
557 temp = 1;
558
559 while ((temp != 0) && (temp < xfer->interval)) {
560 xfer->fps_shift++;
561 temp *= 2;
562 }
563
564 switch (parm->speed) {
565 case USB_SPEED_LOW:
566 case USB_SPEED_FULL:
567 break;
568 default:
569 xfer->fps_shift += 3;
570 break;
571 }
572 }
573 }
574
575 /*
576 * NOTE: we do not allow "max_packet_size" or "max_frame_size"
577 * to be equal to zero when setting up USB transfers, hence
578 * this leads to alot of extra code in the USB kernel.
579 */
580
581 if ((xfer->max_frame_size == 0) ||
582 (xfer->max_packet_size == 0)) {
583 zmps = 1;
584
585 if ((parm->bufsize <= MIN_PKT) &&
586 (type != UE_CONTROL) &&
587 (type != UE_BULK)) {
588 /* workaround */
589 xfer->max_packet_size = MIN_PKT;
590 xfer->max_packet_count = 1;
591 parm->bufsize = 0; /* automatic setup length */
592 usbd_update_max_frame_size(xfer);
593
594 } else {
595 parm->err = USB_ERR_ZERO_MAXP;
596 goto done;
597 }
598
599 } else {
600 zmps = 0;
601 }
602
603 /*
604 * check if we should setup a default
605 * length:
606 */
607
608 if (parm->bufsize == 0) {
609 parm->bufsize = xfer->max_frame_size;
610
611 if (type == UE_ISOCHRONOUS) {
612 parm->bufsize *= xfer->nframes;
613 }
614 }
615 /*
616 * check if we are about to setup a proxy
617 * type of buffer:
618 */
619
620 if (xfer->flags.proxy_buffer) {
621 /* round bufsize up */
622
623 parm->bufsize += (xfer->max_frame_size - 1);
624
625 if (parm->bufsize < xfer->max_frame_size) {
626 /* length wrapped around */
627 parm->err = USB_ERR_INVAL;
628 goto done;
629 }
630 /* subtract remainder */
631
632 parm->bufsize -= (parm->bufsize % xfer->max_frame_size);
633
634 /* add length of USB device request structure, if any */
635
636 if (type == UE_CONTROL) {
637 parm->bufsize += REQ_SIZE; /* SETUP message */
638 }
639 }
640 xfer->max_data_length = parm->bufsize;
641
642 /* Setup "n_frlengths" and "n_frbuffers" */
643
644 if (type == UE_ISOCHRONOUS) {
645 n_frlengths = xfer->nframes;
646 n_frbuffers = 1;
647 } else {
648 if (type == UE_CONTROL) {
649 xfer->flags_int.control_xfr = 1;
650 if (xfer->nframes == 0) {
651 if (parm->bufsize <= REQ_SIZE) {
652 /*
653 * there will never be any data
654 * stage
655 */
656 xfer->nframes = 1;
657 } else {
658 xfer->nframes = 2;
659 }
660 }
661 } else {
662 if (xfer->nframes == 0) {
663 xfer->nframes = 1;
664 }
665 }
666
667 n_frlengths = xfer->nframes;
668 n_frbuffers = xfer->nframes;
669 }
670
671 /*
672 * check if we have room for the
673 * USB device request structure:
674 */
675
676 if (type == UE_CONTROL) {
677 if (xfer->max_data_length < REQ_SIZE) {
678 /* length wrapped around or too small bufsize */
679 parm->err = USB_ERR_INVAL;
680 goto done;
681 }
682 xfer->max_data_length -= REQ_SIZE;
683 }
684 /*
685 * Setup "frlengths" and shadow "frlengths" for keeping the
686 * initial frame lengths when a USB transfer is complete. This
687 * information is useful when computing isochronous offsets.
688 */
689 xfer->frlengths = parm->xfer_length_ptr;
690 parm->xfer_length_ptr += 2 * n_frlengths;
691
692 /* setup "frbuffers" */
693 xfer->frbuffers = parm->xfer_page_cache_ptr;
694 parm->xfer_page_cache_ptr += n_frbuffers;
695
696 /* initialize max frame count */
697 xfer->max_frame_count = xfer->nframes;
698
699 /*
700 * check if we need to setup
701 * a local buffer:
702 */
703
704 if (!xfer->flags.ext_buffer) {
705 #if USB_HAVE_BUSDMA
706 struct usb_page_search page_info;
707 struct usb_page_cache *pc;
708
709 if (usbd_transfer_setup_sub_malloc(parm,
710 &pc, parm->bufsize, 1, 1)) {
711 parm->err = USB_ERR_NOMEM;
712 } else if (parm->buf != NULL) {
713 usbd_get_page(pc, 0, &page_info);
714
715 xfer->local_buffer = page_info.buffer;
716
717 usbd_xfer_set_frame_offset(xfer, 0, 0);
718
719 if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
720 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
721 }
722 }
723 #else
724 /* align data */
725 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
726
727 if (parm->buf != NULL) {
728 xfer->local_buffer =
729 USB_ADD_BYTES(parm->buf, parm->size[0]);
730
731 usbd_xfer_set_frame_offset(xfer, 0, 0);
732
733 if ((type == UE_CONTROL) && (n_frbuffers > 1)) {
734 usbd_xfer_set_frame_offset(xfer, REQ_SIZE, 1);
735 }
736 }
737 parm->size[0] += parm->bufsize;
738
739 /* align data again */
740 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
741 #endif
742 }
743 /*
744 * Compute maximum buffer size
745 */
746
747 if (parm->bufsize_max < parm->bufsize) {
748 parm->bufsize_max = parm->bufsize;
749 }
750 #if USB_HAVE_BUSDMA
751 if (xfer->flags_int.bdma_enable) {
752 /*
753 * Setup "dma_page_ptr".
754 *
755 * Proof for formula below:
756 *
757 * Assume there are three USB frames having length "a", "b" and
758 * "c". These USB frames will at maximum need "z"
759 * "usb_page" structures. "z" is given by:
760 *
761 * z = ((a / USB_PAGE_SIZE) + 2) + ((b / USB_PAGE_SIZE) + 2) +
762 * ((c / USB_PAGE_SIZE) + 2);
763 *
764 * Constraining "a", "b" and "c" like this:
765 *
766 * (a + b + c) <= parm->bufsize
767 *
768 * We know that:
769 *
770 * z <= ((parm->bufsize / USB_PAGE_SIZE) + (3*2));
771 *
772 * Here is the general formula:
773 */
774 xfer->dma_page_ptr = parm->dma_page_ptr;
775 parm->dma_page_ptr += (2 * n_frbuffers);
776 parm->dma_page_ptr += (parm->bufsize / USB_PAGE_SIZE);
777 }
778 #endif
779 if (zmps) {
780 /* correct maximum data length */
781 xfer->max_data_length = 0;
782 }
783 /* subtract USB frame remainder from "hc_max_frame_size" */
784
785 xfer->max_hc_frame_size =
786 (parm->hc_max_frame_size -
787 (parm->hc_max_frame_size % xfer->max_frame_size));
788
789 if (xfer->max_hc_frame_size == 0) {
790 parm->err = USB_ERR_INVAL;
791 goto done;
792 }
793
794 /* initialize frame buffers */
795
796 if (parm->buf) {
797 for (x = 0; x != n_frbuffers; x++) {
798 xfer->frbuffers[x].tag_parent =
799 &xfer->xroot->dma_parent_tag;
800 #if USB_HAVE_BUSDMA
801 if (xfer->flags_int.bdma_enable &&
802 (parm->bufsize_max > 0)) {
803 if (usb_pc_dmamap_create(
804 xfer->frbuffers + x,
805 parm->bufsize_max)) {
806 parm->err = USB_ERR_NOMEM;
807 goto done;
808 }
809 }
810 #endif
811 }
812 }
813 done:
814 if (parm->err) {
815 /*
816 * Set some dummy values so that we avoid division by zero:
817 */
818 xfer->max_hc_frame_size = 1;
819 xfer->max_frame_size = 1;
820 xfer->max_packet_size = 1;
821 xfer->max_data_length = 0;
822 xfer->nframes = 0;
823 xfer->max_frame_count = 0;
824 }
825 }
826
827 static uint8_t
usbd_transfer_setup_has_bulk(const struct usb_config * setup_start,uint16_t n_setup)828 usbd_transfer_setup_has_bulk(const struct usb_config *setup_start,
829 uint16_t n_setup)
830 {
831 uint8_t type;
832
833 while (n_setup--) {
834 type = setup_start[n_setup].type;
835 if ((type == UE_BULK) || (type == UE_BULK_INTR) ||
836 (type == UE_TYPE_ANY))
837 return (1);
838 }
839 return (0);
840 }
841
842 /*------------------------------------------------------------------------*
843 * usbd_transfer_setup - setup an array of USB transfers
844 *
845 * NOTE: You must always call "usbd_transfer_unsetup" after calling
846 * "usbd_transfer_setup" if success was returned.
847 *
848 * The idea is that the USB device driver should pre-allocate all its
849 * transfers by one call to this function.
850 *
851 * Return values:
852 * 0: Success
853 * Else: Failure
854 *------------------------------------------------------------------------*/
855 usb_error_t
usbd_transfer_setup(struct usb_device * udev,const uint8_t * ifaces,struct usb_xfer ** ppxfer,const struct usb_config * setup_start,uint16_t n_setup,void * priv_sc,struct mtx * xfer_mtx)856 usbd_transfer_setup(struct usb_device *udev,
857 const uint8_t *ifaces, struct usb_xfer **ppxfer,
858 const struct usb_config *setup_start, uint16_t n_setup,
859 void *priv_sc, struct mtx *xfer_mtx)
860 {
861 const struct usb_config *setup_end = setup_start + n_setup;
862 const struct usb_config *setup;
863 struct usb_setup_params *parm;
864 struct usb_endpoint *ep;
865 struct usb_xfer_root *info;
866 struct usb_xfer *xfer;
867 void *buf = NULL;
868 usb_error_t error = USB_ERR_NORMAL_COMPLETION;
869 uint16_t n;
870 uint16_t refcount;
871 uint8_t do_unlock;
872
873 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
874 "usbd_transfer_setup can sleep!");
875
876 /* do some checking first */
877
878 if (n_setup == 0) {
879 DPRINTFN(5, "setup array has zero length!\n");
880 return (USB_ERR_INVAL);
881 }
882 if (ifaces == 0) {
883 DPRINTFN(5, "ifaces array is NULL!\n");
884 return (USB_ERR_INVAL);
885 }
886 if (xfer_mtx == NULL) {
887 DPRINTFN(5, "using global lock\n");
888 xfer_mtx = &Giant;
889 }
890
891 /* more sanity checks */
892
893 for (setup = setup_start, n = 0;
894 setup != setup_end; setup++, n++) {
895 if (setup->bufsize == (usb_frlength_t)-1) {
896 error = USB_ERR_BAD_BUFSIZE;
897 DPRINTF("invalid bufsize\n");
898 }
899 if (setup->callback == NULL) {
900 error = USB_ERR_NO_CALLBACK;
901 DPRINTF("no callback\n");
902 }
903 ppxfer[n] = NULL;
904 }
905
906 if (error)
907 return (error);
908
909 /* Protect scratch area */
910 do_unlock = usbd_ctrl_lock(udev);
911
912 refcount = 0;
913 info = NULL;
914
915 parm = &udev->scratch.xfer_setup[0].parm;
916 (void)memset_s(parm, sizeof(*parm), 0, sizeof(*parm));
917
918 parm->udev = udev;
919 parm->speed = usbd_get_speed(udev);
920 parm->hc_max_packet_count = 1;
921
922 if (parm->speed >= USB_SPEED_MAX) {
923 parm->err = USB_ERR_INVAL;
924 goto done;
925 }
926 /* setup all transfers */
927
928 while (1) {
929 if (buf) {
930 /*
931 * Initialize the "usb_xfer_root" structure,
932 * which is common for all our USB transfers.
933 */
934 info = USB_ADD_BYTES(buf, 0);
935
936 info->memory_base = buf;
937 info->memory_size = parm->size[0];
938
939 #if USB_HAVE_BUSDMA
940 info->dma_page_cache_start = USB_ADD_BYTES(buf, parm->size[4]);
941 info->dma_page_cache_end = USB_ADD_BYTES(buf, parm->size[5]);
942 #endif
943 info->xfer_page_cache_start = USB_ADD_BYTES(buf, parm->size[5]);
944 info->xfer_page_cache_end = USB_ADD_BYTES(buf, parm->size[2]);
945
946 cv_init(&info->cv_drain, "WDRAIN");
947
948 info->xfer_mtx = xfer_mtx;
949 #if USB_HAVE_BUSDMA
950 usb_dma_tag_setup(&info->dma_parent_tag,
951 parm->dma_tag_p, udev->bus->dma_parent_tag[0].tag,
952 xfer_mtx, &usb_bdma_done_event, udev->bus->dma_bits, parm->dma_tag_max);
953 #endif
954
955 info->bus = udev->bus;
956 info->udev = udev;
957
958 TAILQ_INIT(&info->done_q.head);
959 info->done_q.command = &usbd_callback_wrapper;
960 #if USB_HAVE_BUSDMA
961 TAILQ_INIT(&info->dma_q.head);
962 info->dma_q.command = &usb_bdma_work_loop;
963 #endif
964 info->done_m[0].hdr.pm_callback = &usb_callback_proc;
965 info->done_m[0].xroot = info;
966 info->done_m[1].hdr.pm_callback = &usb_callback_proc;
967 info->done_m[1].xroot = info;
968
969 /*
970 * In device side mode control endpoint
971 * requests need to run from a separate
972 * context, else there is a chance of
973 * deadlock!
974 */
975 if (setup_start == usb_control_ep_cfg ||
976 setup_start == usb_control_ep_quirk_cfg)
977 info->done_p =
978 USB_BUS_CONTROL_XFER_PROC(udev->bus);
979 else if (xfer_mtx == &Giant)
980 info->done_p =
981 USB_BUS_GIANT_PROC(udev->bus);
982 else if (usbd_transfer_setup_has_bulk(setup_start, n_setup))
983 info->done_p =
984 USB_BUS_NON_GIANT_BULK_PROC(udev->bus);
985 else
986 info->done_p =
987 USB_BUS_NON_GIANT_ISOC_PROC(udev->bus);
988 }
989 /* reset sizes */
990
991 parm->size[0] = 0;
992 parm->buf = buf;
993 parm->size[0] += sizeof(info[0]);
994
995 for (setup = setup_start, n = 0;
996 setup != setup_end; setup++, n++) {
997 /* skip USB transfers without callbacks: */
998 if (setup->callback == NULL) {
999 continue;
1000 }
1001 /* see if there is a matching endpoint */
1002 ep = usbd_get_endpoint(udev,
1003 ifaces[setup->if_index], setup);
1004
1005 /*
1006 * Check that the USB PIPE is valid and that
1007 * the endpoint mode is proper.
1008 *
1009 * Make sure we don't allocate a streams
1010 * transfer when such a combination is not
1011 * valid.
1012 */
1013 if ((ep == NULL) || (ep->methods == NULL) ||
1014 ((ep->ep_mode != USB_EP_MODE_STREAMS) &&
1015 (ep->ep_mode != USB_EP_MODE_DEFAULT)) ||
1016 ((setup->stream_id != 0) &&
1017 ((setup->stream_id >= USB_MAX_EP_STREAMS) ||
1018 (ep->ep_mode != USB_EP_MODE_STREAMS)))) {
1019 if (setup->flags.no_pipe_ok)
1020 continue;
1021 if ((setup->usb_mode != USB_MODE_DUAL) &&
1022 (setup->usb_mode != udev->flags.usb_mode))
1023 continue;
1024 parm->err = USB_ERR_NO_PIPE;
1025 goto done;
1026 }
1027
1028 /* align data properly */
1029 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1030
1031 /* store current setup pointer */
1032 parm->curr_setup = setup;
1033
1034 if (buf) {
1035 /*
1036 * Common initialization of the
1037 * "usb_xfer" structure.
1038 */
1039 xfer = USB_ADD_BYTES(buf, parm->size[0]);
1040 xfer->address = udev->address;
1041 xfer->priv_sc = priv_sc;
1042 xfer->xroot = info;
1043
1044 usb_callout_init_mtx(&xfer->timeout_handle,
1045 &udev->bus->bus_mtx, 0);
1046 } else {
1047 /*
1048 * Setup a dummy xfer, hence we are
1049 * writing to the "usb_xfer"
1050 * structure pointed to by "xfer"
1051 * before we have allocated any
1052 * memory:
1053 */
1054 xfer = &udev->scratch.xfer_setup[0].dummy;
1055 (void)memset_s(xfer, sizeof(*xfer), 0, sizeof(*xfer));
1056 refcount++;
1057 }
1058
1059 /* set transfer endpoint pointer */
1060 xfer->endpoint = ep;
1061
1062 /* set transfer stream ID */
1063 xfer->stream_id = setup->stream_id;
1064
1065 parm->size[0] += sizeof(xfer[0]);
1066 parm->methods = xfer->endpoint->methods;
1067 parm->curr_xfer = xfer;
1068
1069 /*
1070 * Call the Host or Device controller transfer
1071 * setup routine:
1072 */
1073 (udev->bus->methods->xfer_setup) (parm);
1074
1075 /* check for error */
1076 if (parm->err)
1077 goto done;
1078
1079 if (buf) {
1080 /*
1081 * Increment the endpoint refcount. This
1082 * basically prevents setting a new
1083 * configuration and alternate setting
1084 * when USB transfers are in use on
1085 * the given interface. Search the USB
1086 * code for "endpoint->refcount_alloc" if you
1087 * want more information.
1088 */
1089 USB_BUS_LOCK(info->bus);
1090 if (xfer->endpoint->refcount_alloc >= USB_EP_REF_MAX)
1091 parm->err = USB_ERR_INVAL;
1092
1093 xfer->endpoint->refcount_alloc++;
1094
1095 if (xfer->endpoint->refcount_alloc == 0)
1096 panic("usbd_transfer_setup(): Refcount wrapped to zero\n");
1097 USB_BUS_UNLOCK(info->bus);
1098
1099 /*
1100 * Whenever we set ppxfer[] then we
1101 * also need to increment the
1102 * "setup_refcount":
1103 */
1104 info->setup_refcount++;
1105
1106 /*
1107 * Transfer is successfully setup and
1108 * can be used:
1109 */
1110 ppxfer[n] = xfer;
1111 }
1112
1113 /* check for error */
1114 if (parm->err)
1115 goto done;
1116 }
1117
1118 if ((buf != NULL) || (parm->err != 0))
1119 goto done;
1120
1121 /* if no transfers, nothing to do */
1122 if (refcount == 0)
1123 goto done;
1124
1125 /* align data properly */
1126 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1127
1128 /* store offset temporarily */
1129 parm->size[1] = parm->size[0];
1130
1131 /*
1132 * The number of DMA tags required depends on
1133 * the number of endpoints. The current estimate
1134 * for maximum number of DMA tags per endpoint
1135 * is three:
1136 * 1) for loading memory
1137 * 2) for allocating memory
1138 * 3) for fixing memory [UHCI]
1139 */
1140 parm->dma_tag_max += 3 * MIN(n_setup, USB_EP_MAX);
1141
1142 /*
1143 * DMA tags for QH, TD, Data and more.
1144 */
1145 parm->dma_tag_max += 8;
1146
1147 parm->dma_tag_p += parm->dma_tag_max;
1148
1149 parm->size[0] += ((uint8_t *)parm->dma_tag_p) -
1150 ((uint8_t *)0);
1151
1152 /* align data properly */
1153 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1154
1155 /* store offset temporarily */
1156 parm->size[3] = parm->size[0];
1157
1158 parm->size[0] += ((uint8_t *)parm->dma_page_ptr) -
1159 ((uint8_t *)0);
1160
1161 /* align data properly */
1162 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1163
1164 /* store offset temporarily */
1165 parm->size[4] = parm->size[0];
1166
1167 parm->size[0] += ((uint8_t *)parm->dma_page_cache_ptr) -
1168 ((uint8_t *)0);
1169
1170 /* store end offset temporarily */
1171 parm->size[5] = parm->size[0];
1172
1173 parm->size[0] += ((uint8_t *)parm->xfer_page_cache_ptr) -
1174 ((uint8_t *)0);
1175
1176 /* store end offset temporarily */
1177
1178 parm->size[2] = parm->size[0];
1179
1180 /* align data properly */
1181 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1182
1183 parm->size[6] = parm->size[0];
1184
1185 parm->size[0] += ((uint8_t *)parm->xfer_length_ptr) -
1186 ((uint8_t *)0);
1187
1188 /* align data properly */
1189 parm->size[0] += ((-parm->size[0]) & (USB_HOST_ALIGN - 1));
1190
1191 /* allocate zeroed memory */
1192 buf = bsd_malloc(parm->size[0], M_USB, M_WAITOK | M_ZERO);
1193
1194 if (buf == NULL) {
1195 parm->err = USB_ERR_NOMEM;
1196 DPRINTFN(0, "cannot allocate memory block for "
1197 "configuration (%d bytes)\n",
1198 parm->size[0]);
1199 goto done;
1200 }
1201 parm->dma_tag_p = USB_ADD_BYTES(buf, parm->size[1]);
1202 parm->dma_page_ptr = USB_ADD_BYTES(buf, parm->size[3]);
1203 parm->dma_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[4]);
1204 parm->xfer_page_cache_ptr = USB_ADD_BYTES(buf, parm->size[5]);
1205 parm->xfer_length_ptr = USB_ADD_BYTES(buf, parm->size[6]);
1206 }
1207
1208 done:
1209 if (buf) {
1210 if (info->setup_refcount == 0) {
1211 /*
1212 * "usbd_transfer_unsetup_sub" will unlock
1213 * the bus mutex before returning !
1214 */
1215 USB_BUS_LOCK(info->bus);
1216
1217 /* something went wrong */
1218 usbd_transfer_unsetup_sub(info, 0);
1219 }
1220 }
1221
1222 /* check if any errors happened */
1223 if (parm->err)
1224 usbd_transfer_unsetup(ppxfer, n_setup);
1225
1226 error = parm->err;
1227
1228 if (do_unlock)
1229 usbd_ctrl_unlock(udev);
1230
1231 return (error);
1232 }
1233
1234 /*------------------------------------------------------------------------*
1235 * usbd_transfer_unsetup_sub - factored out code
1236 *------------------------------------------------------------------------*/
1237 static void
usbd_transfer_unsetup_sub(struct usb_xfer_root * info,uint8_t needs_delay)1238 usbd_transfer_unsetup_sub(struct usb_xfer_root *info, uint8_t needs_delay)
1239 {
1240 #if USB_HAVE_BUSDMA
1241 struct usb_page_cache *pc;
1242 #endif
1243
1244 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
1245
1246 /* wait for any outstanding DMA operations */
1247
1248 if (needs_delay) {
1249 usb_timeout_t temp;
1250 temp = usbd_get_dma_delay(info->udev);
1251 if (temp != 0) {
1252 usb_pause_mtx(&info->bus->bus_mtx,
1253 USB_MS_TO_TICKS(temp));
1254 }
1255 }
1256
1257 /* make sure that our done messages are not queued anywhere */
1258 usb_proc_mwait(info->done_p, &info->done_m[0], &info->done_m[1]);
1259
1260 USB_BUS_UNLOCK(info->bus);
1261
1262 #if USB_HAVE_BUSDMA
1263 /* free DMA'able memory, if any */
1264 pc = info->dma_page_cache_start;
1265 while (pc != info->dma_page_cache_end) {
1266 usb_pc_free_mem(pc);
1267 pc++;
1268 }
1269
1270 /* free DMA maps in all "xfer->frbuffers" */
1271 pc = info->xfer_page_cache_start;
1272 while (pc != info->xfer_page_cache_end) {
1273 usb_pc_dmamap_destroy(pc);
1274 pc++;
1275 }
1276
1277 /* free all DMA tags */
1278 usb_dma_tag_unsetup(&info->dma_parent_tag);
1279 #endif
1280
1281 cv_destroy(&info->cv_drain);
1282
1283 /*
1284 * free the "memory_base" last, hence the "info" structure is
1285 * contained within the "memory_base"!
1286 */
1287 bsd_free(info->memory_base, M_USB);
1288 info->memory_base = NULL;
1289 }
1290
1291 /*------------------------------------------------------------------------*
1292 * usbd_transfer_unsetup - unsetup/free an array of USB transfers
1293 *
1294 * NOTE: All USB transfers in progress will get called back passing
1295 * the error code "USB_ERR_CANCELLED" before this function
1296 * returns.
1297 *------------------------------------------------------------------------*/
1298 void
usbd_transfer_unsetup(struct usb_xfer ** pxfer,uint16_t n_setup)1299 usbd_transfer_unsetup(struct usb_xfer **pxfer, uint16_t n_setup)
1300 {
1301 struct usb_xfer *xfer;
1302 struct usb_xfer_root *info;
1303 uint8_t needs_delay = 0;
1304
1305 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1306 "usbd_transfer_unsetup can sleep!");
1307
1308 while (n_setup--) {
1309 xfer = pxfer[n_setup];
1310
1311 if (xfer == NULL)
1312 continue;
1313
1314 info = xfer->xroot;
1315
1316 USB_XFER_LOCK(xfer);
1317 USB_BUS_LOCK(info->bus);
1318
1319 /*
1320 * HINT: when you start/stop a transfer, it might be a
1321 * good idea to directly use the "pxfer[]" structure:
1322 *
1323 * usbd_transfer_start(sc->pxfer[0]);
1324 * usbd_transfer_stop(sc->pxfer[0]);
1325 *
1326 * That way, if your code has many parts that will not
1327 * stop running under the same lock, in other words
1328 * "xfer_mtx", the usbd_transfer_start and
1329 * usbd_transfer_stop functions will simply return
1330 * when they detect a NULL pointer argument.
1331 *
1332 * To avoid any races we clear the "pxfer[]" pointer
1333 * while holding the private mutex of the driver:
1334 */
1335 pxfer[n_setup] = NULL;
1336
1337 USB_BUS_UNLOCK(info->bus);
1338 USB_XFER_UNLOCK(xfer);
1339
1340 usbd_transfer_drain(xfer);
1341
1342 #if USB_HAVE_BUSDMA
1343 if (xfer->flags_int.bdma_enable)
1344 needs_delay = 1;
1345 #endif
1346 /*
1347 * NOTE: default endpoint does not have an
1348 * interface, even if endpoint->iface_index == 0
1349 */
1350 USB_BUS_LOCK(info->bus);
1351 xfer->endpoint->refcount_alloc--;
1352 USB_BUS_UNLOCK(info->bus);
1353
1354 usb_callout_drain(&xfer->timeout_handle);
1355
1356 USB_BUS_LOCK(info->bus);
1357
1358 USB_ASSERT(info->setup_refcount != 0, ("Invalid setup "
1359 "reference count\n"));
1360
1361 info->setup_refcount--;
1362
1363 if (info->setup_refcount == 0) {
1364 usbd_transfer_unsetup_sub(info,
1365 needs_delay);
1366 } else {
1367 USB_BUS_UNLOCK(info->bus);
1368 }
1369 }
1370 }
1371
1372 /*------------------------------------------------------------------------*
1373 * usbd_control_transfer_init - factored out code
1374 *
1375 * In USB Device Mode we have to wait for the SETUP packet which
1376 * containst the "struct usb_device_request" structure, before we can
1377 * transfer any data. In USB Host Mode we already have the SETUP
1378 * packet at the moment the USB transfer is started. This leads us to
1379 * having to setup the USB transfer at two different places in
1380 * time. This function just contains factored out control transfer
1381 * initialisation code, so that we don't duplicate the code.
1382 *------------------------------------------------------------------------*/
1383 static void
usbd_control_transfer_init(struct usb_xfer * xfer)1384 usbd_control_transfer_init(struct usb_xfer *xfer)
1385 {
1386 struct usb_device_request req;
1387
1388 /* copy out the USB request header */
1389
1390 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1391
1392 /* setup remainder */
1393
1394 xfer->flags_int.control_rem = UGETW(req.wLength);
1395
1396 /* copy direction to endpoint variable */
1397
1398 xfer->endpointno &= ~(UE_DIR_IN | UE_DIR_OUT);
1399 xfer->endpointno |=
1400 (req.bmRequestType & UT_READ) ? UE_DIR_IN : UE_DIR_OUT;
1401 }
1402
1403 /*------------------------------------------------------------------------*
1404 * usbd_control_transfer_did_data
1405 *
1406 * This function returns non-zero if a control endpoint has
1407 * transferred the first DATA packet after the SETUP packet.
1408 * Else it returns zero.
1409 *------------------------------------------------------------------------*/
1410 static uint8_t
usbd_control_transfer_did_data(struct usb_xfer * xfer)1411 usbd_control_transfer_did_data(struct usb_xfer *xfer)
1412 {
1413 struct usb_device_request req;
1414
1415 /* SETUP packet is not yet sent */
1416 if (xfer->flags_int.control_hdr != 0)
1417 return (0);
1418
1419 /* copy out the USB request header */
1420 usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));
1421
1422 /* compare remainder to the initial value */
1423 return (xfer->flags_int.control_rem != UGETW(req.wLength));
1424 }
1425
1426 /*------------------------------------------------------------------------*
1427 * usbd_setup_ctrl_transfer
1428 *
1429 * This function handles initialisation of control transfers. Control
1430 * transfers are special in that regard that they can both transmit
1431 * and receive data.
1432 *
1433 * Return values:
1434 * 0: Success
1435 * Else: Failure
1436 *------------------------------------------------------------------------*/
1437 static int
usbd_setup_ctrl_transfer(struct usb_xfer * xfer)1438 usbd_setup_ctrl_transfer(struct usb_xfer *xfer)
1439 {
1440 usb_frlength_t len;
1441
1442 /* Check for control endpoint stall */
1443 if (xfer->flags.stall_pipe && xfer->flags_int.control_act) {
1444 /* the control transfer is no longer active */
1445 xfer->flags_int.control_stall = 1;
1446 xfer->flags_int.control_act = 0;
1447 } else {
1448 /* don't stall control transfer by default */
1449 xfer->flags_int.control_stall = 0;
1450 }
1451
1452 /* Check for invalid number of frames */
1453 if (xfer->nframes > 2) {
1454 /*
1455 * If you need to split a control transfer, you
1456 * have to do one part at a time. Only with
1457 * non-control transfers you can do multiple
1458 * parts a time.
1459 */
1460 DPRINTFN(0, "Too many frames: %u\n",
1461 (unsigned int)xfer->nframes);
1462 goto error;
1463 }
1464
1465 /*
1466 * Check if there is a control
1467 * transfer in progress:
1468 */
1469 if (xfer->flags_int.control_act) {
1470 if (xfer->flags_int.control_hdr) {
1471 /* clear send header flag */
1472
1473 xfer->flags_int.control_hdr = 0;
1474
1475 /* setup control transfer */
1476 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1477 usbd_control_transfer_init(xfer);
1478 }
1479 }
1480 /* get data length */
1481
1482 len = xfer->sumlen;
1483
1484 } else {
1485 /* the size of the SETUP structure is hardcoded ! */
1486
1487 if (xfer->frlengths[0] != sizeof(struct usb_device_request)) {
1488 DPRINTFN(0, "Wrong framelength %u != %zu\n",
1489 xfer->frlengths[0], sizeof(struct
1490 usb_device_request));
1491 goto error;
1492 }
1493 /* check USB mode */
1494 if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) {
1495 /* check number of frames */
1496 if (xfer->nframes != 1) {
1497 /*
1498 * We need to receive the setup
1499 * message first so that we know the
1500 * data direction!
1501 */
1502 DPRINTF("Misconfigured transfer\n");
1503 goto error;
1504 }
1505 /*
1506 * Set a dummy "control_rem" value. This
1507 * variable will be overwritten later by a
1508 * call to "usbd_control_transfer_init()" !
1509 */
1510 xfer->flags_int.control_rem = 0xFFFF;
1511 } else {
1512 /* setup "endpoint" and "control_rem" */
1513
1514 usbd_control_transfer_init(xfer);
1515 }
1516
1517 /* set transfer-header flag */
1518
1519 xfer->flags_int.control_hdr = 1;
1520
1521 /* get data length */
1522
1523 len = (xfer->sumlen - sizeof(struct usb_device_request));
1524 }
1525
1526 /* update did data flag */
1527
1528 xfer->flags_int.control_did_data =
1529 usbd_control_transfer_did_data(xfer);
1530
1531 /* check if there is a length mismatch */
1532
1533 if (len > xfer->flags_int.control_rem) {
1534 DPRINTFN(0, "Length (%d) greater than "
1535 "remaining length (%d)\n", len,
1536 xfer->flags_int.control_rem);
1537 goto error;
1538 }
1539 /* check if we are doing a short transfer */
1540
1541 if (xfer->flags.force_short_xfer) {
1542 xfer->flags_int.control_rem = 0;
1543 } else {
1544 if ((len != xfer->max_data_length) &&
1545 (len != xfer->flags_int.control_rem) &&
1546 (xfer->nframes != 1)) {
1547 DPRINTFN(0, "Short control transfer without "
1548 "force_short_xfer set\n");
1549 goto error;
1550 }
1551 xfer->flags_int.control_rem -= len;
1552 }
1553
1554 /* the status part is executed when "control_act" is 0 */
1555
1556 if ((xfer->flags_int.control_rem > 0) ||
1557 (xfer->flags.manual_status)) {
1558 /* don't execute the STATUS stage yet */
1559 xfer->flags_int.control_act = 1;
1560
1561 /* sanity check */
1562 if ((!xfer->flags_int.control_hdr) &&
1563 (xfer->nframes == 1)) {
1564 /*
1565 * This is not a valid operation!
1566 */
1567 DPRINTFN(0, "Invalid parameter "
1568 "combination\n");
1569 goto error;
1570 }
1571 } else {
1572 /* time to execute the STATUS stage */
1573 xfer->flags_int.control_act = 0;
1574 }
1575 return (0); /* success */
1576
1577 error:
1578 return (1); /* failure */
1579 }
1580
1581 /*------------------------------------------------------------------------*
1582 * usbd_transfer_submit - start USB hardware for the given transfer
1583 *
1584 * This function should only be called from the USB callback.
1585 *------------------------------------------------------------------------*/
1586 void
usbd_transfer_submit(struct usb_xfer * xfer)1587 usbd_transfer_submit(struct usb_xfer *xfer)
1588 {
1589 struct usb_xfer_root *info;
1590 struct usb_bus *bus;
1591 usb_frcount_t x;
1592
1593 info = xfer->xroot;
1594 bus = info->bus;
1595
1596 DPRINTF("xfer=%p, endpoint=%p, nframes=%d, dir=%s\n",
1597 xfer, xfer->endpoint, xfer->nframes, USB_GET_DATA_ISREAD(xfer) ?
1598 "read" : "write");
1599
1600 #ifdef LOSCFG_USB_DEBUG
1601 if (USB_DEBUG_VAR > 0) {
1602 USB_BUS_LOCK(bus);
1603
1604 usb_dump_endpoint(xfer->endpoint);
1605
1606 USB_BUS_UNLOCK(bus);
1607 }
1608 #endif
1609
1610 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1611 USB_BUS_LOCK_ASSERT(bus, MA_NOTOWNED);
1612
1613 /* Only open the USB transfer once! */
1614 if (!xfer->flags_int.open) {
1615 xfer->flags_int.open = 1;
1616
1617 DPRINTF("open\n");
1618
1619 USB_BUS_LOCK(bus);
1620 (xfer->endpoint->methods->open) (xfer);
1621 USB_BUS_UNLOCK(bus);
1622 }
1623 /* set "transferring" flag */
1624 xfer->flags_int.transferring = 1;
1625
1626 #if USB_HAVE_POWERD
1627 /* increment power reference */
1628 usbd_transfer_power_ref(xfer, 1);
1629 #endif
1630 /*
1631 * Check if the transfer is waiting on a queue, most
1632 * frequently the "done_q":
1633 */
1634 if (xfer->wait_queue) {
1635 USB_BUS_LOCK(bus);
1636 usbd_transfer_dequeue(xfer);
1637 USB_BUS_UNLOCK(bus);
1638 }
1639 /* clear "did_dma_delay" flag */
1640 xfer->flags_int.did_dma_delay = 0;
1641
1642 /* clear "did_close" flag */
1643 xfer->flags_int.did_close = 0;
1644
1645 #if USB_HAVE_BUSDMA
1646 /* clear "bdma_setup" flag */
1647 xfer->flags_int.bdma_setup = 0;
1648 #endif
1649 /* by default we cannot cancel any USB transfer immediately */
1650 xfer->flags_int.can_cancel_immed = 0;
1651
1652 /* clear lengths and frame counts by default */
1653 xfer->sumlen = 0;
1654 xfer->actlen = 0;
1655 xfer->aframes = 0;
1656
1657 /* clear any previous errors */
1658 xfer->error = USB_ERR_NORMAL_COMPLETION;
1659
1660 /* Check if the device is still alive */
1661 if (info->udev->state < USB_STATE_POWERED) {
1662 USB_BUS_LOCK(bus);
1663 /*
1664 * Must return cancelled error code else
1665 * device drivers can hang.
1666 */
1667 usbd_transfer_done(xfer, USB_ERR_CANCELLED);
1668 USB_BUS_UNLOCK(bus);
1669 return;
1670 }
1671
1672 /* sanity check */
1673 if (xfer->nframes == 0) {
1674 if (xfer->flags.stall_pipe) {
1675 /*
1676 * Special case - want to stall without transferring
1677 * any data:
1678 */
1679 DPRINTF("xfer=%p nframes=0: stall "
1680 "or clear stall!\n", xfer);
1681 USB_BUS_LOCK(bus);
1682 xfer->flags_int.can_cancel_immed = 1;
1683 /* start the transfer */
1684 usb_command_wrapper(&xfer->endpoint->
1685 endpoint_q[xfer->stream_id], xfer);
1686 USB_BUS_UNLOCK(bus);
1687 return;
1688 }
1689 USB_BUS_LOCK(bus);
1690 usbd_transfer_done(xfer, USB_ERR_INVAL);
1691 USB_BUS_UNLOCK(bus);
1692 return;
1693 }
1694 /* compute some variables */
1695
1696 for (x = 0; x != xfer->nframes; x++) {
1697 /* make a copy of the frlenghts[] */
1698 xfer->frlengths[x + xfer->max_frame_count] = xfer->frlengths[x];
1699 /* compute total transfer length */
1700 xfer->sumlen += xfer->frlengths[x];
1701 if (xfer->sumlen < xfer->frlengths[x]) {
1702 /* length wrapped around */
1703 USB_BUS_LOCK(bus);
1704 usbd_transfer_done(xfer, USB_ERR_INVAL);
1705 USB_BUS_UNLOCK(bus);
1706 return;
1707 }
1708 }
1709
1710 /* clear some internal flags */
1711
1712 xfer->flags_int.short_xfer_ok = 0;
1713 xfer->flags_int.short_frames_ok = 0;
1714
1715 /* check if this is a control transfer */
1716
1717 if (xfer->flags_int.control_xfr) {
1718 if (usbd_setup_ctrl_transfer(xfer)) {
1719 USB_BUS_LOCK(bus);
1720 usbd_transfer_done(xfer, USB_ERR_STALLED);
1721 USB_BUS_UNLOCK(bus);
1722 return;
1723 }
1724 }
1725 /*
1726 * Setup filtered version of some transfer flags,
1727 * in case of data read direction
1728 */
1729 if (USB_GET_DATA_ISREAD(xfer)) {
1730 if (xfer->flags.short_frames_ok) {
1731 xfer->flags_int.short_xfer_ok = 1;
1732 xfer->flags_int.short_frames_ok = 1;
1733 } else if (xfer->flags.short_xfer_ok) {
1734 xfer->flags_int.short_xfer_ok = 1;
1735
1736 /* check for control transfer */
1737 if (xfer->flags_int.control_xfr) {
1738 /*
1739 * 1) Control transfers do not support
1740 * reception of multiple short USB
1741 * frames in host mode and device side
1742 * mode, with exception of:
1743 *
1744 * 2) Due to sometimes buggy device
1745 * side firmware we need to do a
1746 * STATUS stage in case of short
1747 * control transfers in USB host mode.
1748 * The STATUS stage then becomes the
1749 * "alt_next" to the DATA stage.
1750 */
1751 xfer->flags_int.short_frames_ok = 1;
1752 }
1753 }
1754 }
1755 /*
1756 * Check if BUS-DMA support is enabled and try to load virtual
1757 * buffers into DMA, if any:
1758 */
1759 #if USB_HAVE_BUSDMA
1760 if (xfer->flags_int.bdma_enable) {
1761 /* insert the USB transfer last in the BUS-DMA queue */
1762 usb_command_wrapper(&xfer->xroot->dma_q, xfer);
1763 return;
1764 }
1765 #endif
1766 /*
1767 * Enter the USB transfer into the Host Controller or
1768 * Device Controller schedule:
1769 */
1770 usbd_pipe_enter(xfer);
1771 }
1772
1773 /*------------------------------------------------------------------------*
1774 * usbd_pipe_enter - factored out code
1775 *------------------------------------------------------------------------*/
1776 void
usbd_pipe_enter(struct usb_xfer * xfer)1777 usbd_pipe_enter(struct usb_xfer *xfer)
1778 {
1779 struct usb_endpoint *ep;
1780
1781 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1782
1783 USB_BUS_LOCK(xfer->xroot->bus);
1784
1785 ep = xfer->endpoint;
1786
1787 DPRINTF("enter\n");
1788
1789 /* the transfer can now be cancelled */
1790 xfer->flags_int.can_cancel_immed = 1;
1791
1792 /* enter the transfer */
1793 (ep->methods->enter) (xfer);
1794
1795 /* check for transfer error */
1796 if (xfer->error) {
1797 /* some error has happened */
1798 usbd_transfer_done(xfer, (usb_error_t)0);
1799 USB_BUS_UNLOCK(xfer->xroot->bus);
1800 return;
1801 }
1802
1803 /* start the transfer */
1804 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], xfer);
1805 USB_BUS_UNLOCK(xfer->xroot->bus);
1806 }
1807
1808 /*------------------------------------------------------------------------*
1809 * usbd_transfer_start - start an USB transfer
1810 *
1811 * NOTE: Calling this function more than one time will only
1812 * result in a single transfer start, until the USB transfer
1813 * completes.
1814 *------------------------------------------------------------------------*/
1815 void
usbd_transfer_start(struct usb_xfer * xfer)1816 usbd_transfer_start(struct usb_xfer *xfer)
1817 {
1818 if (xfer == NULL) {
1819 /* transfer is gone */
1820 return;
1821 }
1822 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1823
1824 /* mark the USB transfer started */
1825
1826 if (!xfer->flags_int.started) {
1827 /* lock the BUS lock to avoid races updating flags_int */
1828 USB_BUS_LOCK(xfer->xroot->bus);
1829 xfer->flags_int.started = 1;
1830 USB_BUS_UNLOCK(xfer->xroot->bus);
1831 }
1832 /* check if the USB transfer callback is already transferring */
1833
1834 if (xfer->flags_int.transferring) {
1835 return;
1836 }
1837 USB_BUS_LOCK(xfer->xroot->bus);
1838 /* call the USB transfer callback */
1839 usbd_callback_ss_done_defer(xfer);
1840 USB_BUS_UNLOCK(xfer->xroot->bus);
1841 }
1842
1843 /*------------------------------------------------------------------------*
1844 * usbd_transfer_stop - stop an USB transfer
1845 *
1846 * NOTE: Calling this function more than one time will only
1847 * result in a single transfer stop.
1848 * NOTE: When this function returns it is not safe to free nor
1849 * reuse any DMA buffers. See "usbd_transfer_drain()".
1850 *------------------------------------------------------------------------*/
1851 void
usbd_transfer_stop(struct usb_xfer * xfer)1852 usbd_transfer_stop(struct usb_xfer *xfer)
1853 {
1854 struct usb_endpoint *ep;
1855
1856 if (xfer == NULL) {
1857 /* transfer is gone */
1858 return;
1859 }
1860 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1861
1862 /* check if the USB transfer was ever opened */
1863
1864 if (!xfer->flags_int.open) {
1865 if (xfer->flags_int.started) {
1866 /* nothing to do except clearing the "started" flag */
1867 /* lock the BUS lock to avoid races updating flags_int */
1868 USB_BUS_LOCK(xfer->xroot->bus);
1869 xfer->flags_int.started = 0;
1870 USB_BUS_UNLOCK(xfer->xroot->bus);
1871 }
1872 return;
1873 }
1874 /* try to stop the current USB transfer */
1875
1876 USB_BUS_LOCK(xfer->xroot->bus);
1877 /* override any previous error */
1878 xfer->error = USB_ERR_CANCELLED;
1879
1880 /*
1881 * Clear "open" and "started" when both private and USB lock
1882 * is locked so that we don't get a race updating "flags_int"
1883 */
1884 xfer->flags_int.open = 0;
1885 xfer->flags_int.started = 0;
1886
1887 /*
1888 * Check if we can cancel the USB transfer immediately.
1889 */
1890 if (xfer->flags_int.transferring) {
1891 if (xfer->flags_int.can_cancel_immed &&
1892 (!xfer->flags_int.did_close)) {
1893 DPRINTF("close\n");
1894 /*
1895 * The following will lead to an USB_ERR_CANCELLED
1896 * error code being passed to the USB callback.
1897 */
1898 (xfer->endpoint->methods->close) (xfer);
1899 /* only close once */
1900 xfer->flags_int.did_close = 1;
1901 } else {
1902 /* need to wait for the next done callback */
1903 }
1904 } else {
1905 DPRINTF("close\n");
1906
1907 /* close here and now */
1908 (xfer->endpoint->methods->close) (xfer);
1909
1910 /*
1911 * Any additional DMA delay is done by
1912 * "usbd_transfer_unsetup()".
1913 */
1914
1915 /*
1916 * Special case. Check if we need to restart a blocked
1917 * endpoint.
1918 */
1919 ep = xfer->endpoint;
1920
1921 /*
1922 * If the current USB transfer is completing we need
1923 * to start the next one:
1924 */
1925 if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
1926 usb_command_wrapper(
1927 &ep->endpoint_q[xfer->stream_id], NULL);
1928 }
1929 }
1930
1931 USB_BUS_UNLOCK(xfer->xroot->bus);
1932 }
1933
1934 /*------------------------------------------------------------------------*
1935 * usbd_transfer_pending
1936 *
1937 * This function will check if an USB transfer is pending which is a
1938 * little bit complicated!
1939 * Return values:
1940 * 0: Not pending
1941 * 1: Pending: The USB transfer will receive a callback in the future.
1942 *------------------------------------------------------------------------*/
1943 uint8_t
usbd_transfer_pending(struct usb_xfer * xfer)1944 usbd_transfer_pending(struct usb_xfer *xfer)
1945 {
1946 struct usb_xfer_root *info;
1947 struct usb_xfer_queue *pq;
1948
1949 if (xfer == NULL) {
1950 /* transfer is gone */
1951 return (0);
1952 }
1953 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
1954
1955 if (xfer->flags_int.transferring) {
1956 /* trivial case */
1957 return (1);
1958 }
1959 USB_BUS_LOCK(xfer->xroot->bus);
1960 if (xfer->wait_queue) {
1961 /* we are waiting on a queue somewhere */
1962 USB_BUS_UNLOCK(xfer->xroot->bus);
1963 return (1);
1964 }
1965 info = xfer->xroot;
1966 pq = &info->done_q;
1967
1968 if (pq->curr == xfer) {
1969 /* we are currently scheduled for callback */
1970 USB_BUS_UNLOCK(xfer->xroot->bus);
1971 return (1);
1972 }
1973 /* we are not pending */
1974 USB_BUS_UNLOCK(xfer->xroot->bus);
1975 return (0);
1976 }
1977
1978 /*------------------------------------------------------------------------*
1979 * usbd_transfer_drain
1980 *
1981 * This function will stop the USB transfer and wait for any
1982 * additional BUS-DMA and HW-DMA operations to complete. Buffers that
1983 * are loaded into DMA can safely be freed or reused after that this
1984 * function has returned.
1985 *------------------------------------------------------------------------*/
1986 void
usbd_transfer_drain(struct usb_xfer * xfer)1987 usbd_transfer_drain(struct usb_xfer *xfer)
1988 {
1989 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1990 "usbd_transfer_drain can sleep!");
1991
1992 if (xfer == NULL) {
1993 /* transfer is gone */
1994 return;
1995 }
1996 if (xfer->xroot->xfer_mtx != &Giant) {
1997 USB_XFER_LOCK_ASSERT(xfer, MA_NOTOWNED);
1998 }
1999 USB_XFER_LOCK(xfer);
2000
2001 usbd_transfer_stop(xfer);
2002
2003 while (usbd_transfer_pending(xfer) ||
2004 xfer->flags_int.doing_callback) {
2005 /*
2006 * It is allowed that the callback can drop its
2007 * transfer mutex. In that case checking only
2008 * "usbd_transfer_pending()" is not enough to tell if
2009 * the USB transfer is fully drained. We also need to
2010 * check the internal "doing_callback" flag.
2011 */
2012 xfer->flags_int.draining = 1;
2013
2014 /*
2015 * Wait until the current outstanding USB
2016 * transfer is complete !
2017 */
2018 (void)cv_wait(&xfer->xroot->cv_drain, xfer->xroot->xfer_mtx);
2019 }
2020 USB_XFER_UNLOCK(xfer);
2021 }
2022
2023 struct usb_page_cache *
usbd_xfer_get_frame(struct usb_xfer * xfer,usb_frcount_t frindex)2024 usbd_xfer_get_frame(struct usb_xfer *xfer, usb_frcount_t frindex)
2025 {
2026 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2027
2028 return (&xfer->frbuffers[frindex]);
2029 }
2030
2031 void *
usbd_xfer_get_frame_buffer(struct usb_xfer * xfer,usb_frcount_t frindex)2032 usbd_xfer_get_frame_buffer(struct usb_xfer *xfer, usb_frcount_t frindex)
2033 {
2034 struct usb_page_search page_info;
2035
2036 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2037
2038 usbd_get_page(&xfer->frbuffers[frindex], 0, &page_info);
2039 return (page_info.buffer);
2040 }
2041
2042 /*------------------------------------------------------------------------*
2043 * usbd_xfer_get_fps_shift
2044 *
2045 * The following function is only useful for isochronous transfers. It
2046 * returns how many times the frame execution rate has been shifted
2047 * down.
2048 *
2049 * Return value:
2050 * Success: 0..3
2051 * Failure: 0
2052 *------------------------------------------------------------------------*/
2053 uint8_t
usbd_xfer_get_fps_shift(struct usb_xfer * xfer)2054 usbd_xfer_get_fps_shift(struct usb_xfer *xfer)
2055 {
2056 return (xfer->fps_shift);
2057 }
2058
2059 usb_frlength_t
usbd_xfer_frame_len(struct usb_xfer * xfer,usb_frcount_t frindex)2060 usbd_xfer_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex)
2061 {
2062 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2063
2064 return (xfer->frlengths[frindex]);
2065 }
2066
2067 /*------------------------------------------------------------------------*
2068 * usbd_xfer_set_frame_data
2069 *
2070 * This function sets the pointer of the buffer that should
2071 * loaded directly into DMA for the given USB frame. Passing "ptr"
2072 * equal to NULL while the corresponding "frlength" is greater
2073 * than zero gives undefined results!
2074 *------------------------------------------------------------------------*/
2075 void
usbd_xfer_set_frame_data(struct usb_xfer * xfer,usb_frcount_t frindex,void * ptr,usb_frlength_t len)2076 usbd_xfer_set_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2077 void *ptr, usb_frlength_t len)
2078 {
2079 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2080
2081 /* set virtual address to load and length */
2082 xfer->frbuffers[frindex].buffer = ptr;
2083 usbd_xfer_set_frame_len(xfer, frindex, len);
2084 }
2085
2086 void
usbd_xfer_frame_data(struct usb_xfer * xfer,usb_frcount_t frindex,void ** ptr,int * len)2087 usbd_xfer_frame_data(struct usb_xfer *xfer, usb_frcount_t frindex,
2088 void **ptr, int *len)
2089 {
2090 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2091
2092 if (ptr != NULL)
2093 *ptr = xfer->frbuffers[frindex].buffer;
2094 if (len != NULL)
2095 *len = xfer->frlengths[frindex];
2096 }
2097
2098 /*------------------------------------------------------------------------*
2099 * usbd_xfer_old_frame_length
2100 *
2101 * This function returns the framelength of the given frame at the
2102 * time the transfer was submitted. This function can be used to
2103 * compute the starting data pointer of the next isochronous frame
2104 * when an isochronous transfer has completed.
2105 *------------------------------------------------------------------------*/
2106 usb_frlength_t
usbd_xfer_old_frame_length(struct usb_xfer * xfer,usb_frcount_t frindex)2107 usbd_xfer_old_frame_length(struct usb_xfer *xfer, usb_frcount_t frindex)
2108 {
2109 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2110
2111 return (xfer->frlengths[frindex + xfer->max_frame_count]);
2112 }
2113
2114 void
usbd_xfer_status(struct usb_xfer * xfer,int * actlen,int * sumlen,int * aframes,int * nframes)2115 usbd_xfer_status(struct usb_xfer *xfer, int *actlen, int *sumlen, int *aframes,
2116 int *nframes)
2117 {
2118 if (actlen != NULL)
2119 *actlen = xfer->actlen;
2120 if (sumlen != NULL)
2121 *sumlen = xfer->sumlen;
2122 if (aframes != NULL)
2123 *aframes = xfer->aframes;
2124 if (nframes != NULL)
2125 *nframes = xfer->nframes;
2126 }
2127
2128 /*------------------------------------------------------------------------*
2129 * usbd_xfer_set_frame_offset
2130 *
2131 * This function sets the frame data buffer offset relative to the beginning
2132 * of the USB DMA buffer allocated for this USB transfer.
2133 *------------------------------------------------------------------------*/
2134 void
usbd_xfer_set_frame_offset(struct usb_xfer * xfer,usb_frlength_t offset,usb_frcount_t frindex)2135 usbd_xfer_set_frame_offset(struct usb_xfer *xfer, usb_frlength_t offset,
2136 usb_frcount_t frindex)
2137 {
2138 KASSERT(!xfer->flags.ext_buffer, ("Cannot offset data frame "
2139 "when the USB buffer is external\n"));
2140 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2141
2142 /* set virtual address to load */
2143 xfer->frbuffers[frindex].buffer =
2144 USB_ADD_BYTES(xfer->local_buffer, offset);
2145 }
2146
2147 void
usbd_xfer_set_interval(struct usb_xfer * xfer,int i)2148 usbd_xfer_set_interval(struct usb_xfer *xfer, int i)
2149 {
2150 xfer->interval = i;
2151 }
2152
2153 void
usbd_xfer_set_timeout(struct usb_xfer * xfer,int t)2154 usbd_xfer_set_timeout(struct usb_xfer *xfer, int t)
2155 {
2156 xfer->timeout = t;
2157 }
2158
2159 void
usbd_xfer_set_frames(struct usb_xfer * xfer,usb_frcount_t n)2160 usbd_xfer_set_frames(struct usb_xfer *xfer, usb_frcount_t n)
2161 {
2162 xfer->nframes = n;
2163 }
2164
2165 usb_frcount_t
usbd_xfer_max_frames(struct usb_xfer * xfer)2166 usbd_xfer_max_frames(struct usb_xfer *xfer)
2167 {
2168 return (xfer->max_frame_count);
2169 }
2170
2171 usb_frlength_t
usbd_xfer_max_len(struct usb_xfer * xfer)2172 usbd_xfer_max_len(struct usb_xfer *xfer)
2173 {
2174 return (xfer->max_data_length);
2175 }
2176
2177 usb_frlength_t
usbd_xfer_max_framelen(struct usb_xfer * xfer)2178 usbd_xfer_max_framelen(struct usb_xfer *xfer)
2179 {
2180 return (xfer->max_frame_size);
2181 }
2182
2183 void
usbd_xfer_set_frame_len(struct usb_xfer * xfer,usb_frcount_t frindex,usb_frlength_t len)2184 usbd_xfer_set_frame_len(struct usb_xfer *xfer, usb_frcount_t frindex,
2185 usb_frlength_t len)
2186 {
2187 KASSERT(frindex < xfer->max_frame_count, ("frame index overflow"));
2188
2189 xfer->frlengths[frindex] = len;
2190 }
2191
2192 /*------------------------------------------------------------------------*
2193 * usb_callback_proc - factored out code
2194 *
2195 * This function performs USB callbacks.
2196 *------------------------------------------------------------------------*/
2197 static void
usb_callback_proc(struct usb_proc_msg * _pm)2198 usb_callback_proc(struct usb_proc_msg *_pm)
2199 {
2200 struct usb_done_msg *pm = (void *)_pm;
2201 struct usb_xfer_root *info = pm->xroot;
2202
2203 /* Change locking order */
2204 USB_BUS_UNLOCK(info->bus);
2205
2206 /*
2207 * We exploit the fact that the mutex is the same for all
2208 * callbacks that will be called from this thread:
2209 */
2210 USB_MTX_LOCK(info->xfer_mtx);
2211 USB_BUS_LOCK(info->bus);
2212
2213 /* Continue where we lost track */
2214 usb_command_wrapper(&info->done_q,
2215 info->done_q.curr);
2216
2217 USB_MTX_UNLOCK(info->xfer_mtx);
2218 }
2219
2220 /*------------------------------------------------------------------------*
2221 * usbd_callback_ss_done_defer
2222 *
2223 * This function will defer the start, stop and done callback to the
2224 * correct thread.
2225 *------------------------------------------------------------------------*/
2226 static void
usbd_callback_ss_done_defer(struct usb_xfer * xfer)2227 usbd_callback_ss_done_defer(struct usb_xfer *xfer)
2228 {
2229 struct usb_xfer_root *info = xfer->xroot;
2230 struct usb_xfer_queue *pq = &info->done_q;
2231
2232 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2233
2234 if (pq->curr != xfer) {
2235 usbd_transfer_enqueue(pq, xfer);
2236 }
2237 if (!pq->recurse_1) {
2238 /*
2239 * We have to postpone the callback due to the fact we
2240 * will have a Lock Order Reversal, LOR, if we try to
2241 * proceed !
2242 */
2243 if (usb_proc_msignal(info->done_p,
2244 &info->done_m[0], &info->done_m[1])) {
2245 /* ignore */
2246 }
2247 } else {
2248 /* clear second recurse flag */
2249 pq->recurse_2 = 0;
2250 }
2251 return;
2252
2253 }
2254
2255 /*------------------------------------------------------------------------*
2256 * usbd_callback_wrapper
2257 *
2258 * This is a wrapper for USB callbacks. This wrapper does some
2259 * auto-magic things like figuring out if we can call the callback
2260 * directly from the current context or if we need to wakeup the
2261 * interrupt process.
2262 *------------------------------------------------------------------------*/
2263 static void
usbd_callback_wrapper(struct usb_xfer_queue * pq)2264 usbd_callback_wrapper(struct usb_xfer_queue *pq)
2265 {
2266 struct usb_xfer *xfer = pq->curr;
2267 struct usb_xfer_root *info = xfer->xroot;
2268
2269 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2270 if (!mtx_owned(info->xfer_mtx) && !SCHEDULER_STOPPED()) {
2271 /*
2272 * Cases that end up here:
2273 *
2274 * 5) HW interrupt done callback or other source.
2275 */
2276 DPRINTFN(3, "case 5\n");
2277
2278 /*
2279 * We have to postpone the callback due to the fact we
2280 * will have a Lock Order Reversal, LOR, if we try to
2281 * proceed!
2282 */
2283 if (usb_proc_msignal(info->done_p,
2284 &info->done_m[0], &info->done_m[1])) {
2285 /* ignore */
2286 }
2287 return;
2288 }
2289 /*
2290 * Cases that end up here:
2291 *
2292 * 1) We are starting a transfer
2293 * 2) We are prematurely calling back a transfer
2294 * 3) We are stopping a transfer
2295 * 4) We are doing an ordinary callback
2296 */
2297 DPRINTFN(3, "case 1-4\n");
2298 /* get next USB transfer in the queue */
2299 info->done_q.curr = NULL;
2300
2301 /* set flag in case of drain */
2302 xfer->flags_int.doing_callback = 1;
2303
2304 USB_BUS_UNLOCK(info->bus);
2305 USB_BUS_LOCK_ASSERT(info->bus, MA_NOTOWNED);
2306
2307 /* set correct USB state for callback */
2308 if (!xfer->flags_int.transferring) {
2309 xfer->usb_state = USB_ST_SETUP;
2310 if (!xfer->flags_int.started) {
2311 /* we got stopped before we even got started */
2312 USB_BUS_LOCK(info->bus);
2313 goto done;
2314 }
2315 } else {
2316 if (usbd_callback_wrapper_sub(xfer)) {
2317 /* the callback has been deferred */
2318 USB_BUS_LOCK(info->bus);
2319 goto done;
2320 }
2321 #if USB_HAVE_POWERD
2322 /* decrement power reference */
2323 usbd_transfer_power_ref(xfer, -1);
2324 #endif
2325 xfer->flags_int.transferring = 0;
2326
2327 if (xfer->error) {
2328 xfer->usb_state = USB_ST_ERROR;
2329 } else {
2330 /* set transferred state */
2331 xfer->usb_state = USB_ST_TRANSFERRED;
2332 #if USB_HAVE_BUSDMA
2333 /* sync DMA memory, if any */
2334 if (xfer->flags_int.bdma_enable &&
2335 (!xfer->flags_int.bdma_no_post_sync)) {
2336 usb_bdma_post_sync(xfer);
2337 }
2338 #endif
2339 }
2340 }
2341
2342 #if USB_HAVE_PF
2343 if (xfer->usb_state != USB_ST_SETUP) {
2344 USB_BUS_LOCK(info->bus);
2345 usbpf_xfertap(xfer, USBPF_XFERTAP_DONE);
2346 USB_BUS_UNLOCK(info->bus);
2347 }
2348 #endif
2349 /* call processing routine */
2350 (xfer->callback) (xfer, xfer->error);
2351
2352 /* pickup the USB mutex again */
2353 USB_BUS_LOCK(info->bus);
2354
2355 /*
2356 * Check if we got started after that we got cancelled, but
2357 * before we managed to do the callback.
2358 */
2359 if ((!xfer->flags_int.open) &&
2360 (xfer->flags_int.started) &&
2361 (xfer->usb_state == USB_ST_ERROR)) {
2362 /* clear flag in case of drain */
2363 xfer->flags_int.doing_callback = 0;
2364 /* try to loop, but not recursivly */
2365 usb_command_wrapper(&info->done_q, xfer);
2366 return;
2367 }
2368
2369 done:
2370 /* clear flag in case of drain */
2371 xfer->flags_int.doing_callback = 0;
2372
2373 /*
2374 * Check if we are draining.
2375 */
2376 if (xfer->flags_int.draining &&
2377 (!xfer->flags_int.transferring)) {
2378 /* "usbd_transfer_drain()" is waiting for end of transfer */
2379 xfer->flags_int.draining = 0;
2380 (void)cv_broadcast(&info->cv_drain);
2381 }
2382
2383 /* do the next callback, if any */
2384 usb_command_wrapper(&info->done_q,
2385 info->done_q.curr);
2386 }
2387
2388 /*------------------------------------------------------------------------*
2389 * usb_dma_delay_done_cb
2390 *
2391 * This function is called when the DMA delay has been exectuded, and
2392 * will make sure that the callback is called to complete the USB
2393 * transfer. This code path is usually only used when there is an USB
2394 * error like USB_ERR_CANCELLED.
2395 *------------------------------------------------------------------------*/
2396 void
usb_dma_delay_done_cb(struct usb_xfer * xfer)2397 usb_dma_delay_done_cb(struct usb_xfer *xfer)
2398 {
2399 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2400
2401 DPRINTFN(3, "Completed %p\n", xfer);
2402
2403 /* queue callback for execution, again */
2404 usbd_transfer_done(xfer, (usb_error_t)0);
2405 }
2406
2407 /*------------------------------------------------------------------------*
2408 * usbd_transfer_dequeue
2409 *
2410 * - This function is used to remove an USB transfer from a USB
2411 * transfer queue.
2412 *
2413 * - This function can be called multiple times in a row.
2414 *------------------------------------------------------------------------*/
2415 void
usbd_transfer_dequeue(struct usb_xfer * xfer)2416 usbd_transfer_dequeue(struct usb_xfer *xfer)
2417 {
2418 struct usb_xfer_queue *pq;
2419 uint32_t int_save;
2420
2421 LOS_SpinLockSave(&g_usb_wait_queue_spinlock, &int_save);
2422 pq = xfer->wait_queue;
2423 if (pq != NULL) {
2424 TAILQ_REMOVE(&pq->head, xfer, wait_entry);
2425 xfer->wait_queue = NULL;
2426 }
2427 LOS_SpinUnlockRestore(&g_usb_wait_queue_spinlock, int_save);
2428 }
2429
2430 /*------------------------------------------------------------------------*
2431 * usbd_transfer_enqueue
2432 *
2433 * - This function is used to insert an USB transfer into a USB *
2434 * transfer queue.
2435 *
2436 * - This function can be called multiple times in a row.
2437 *------------------------------------------------------------------------*/
2438 void
usbd_transfer_enqueue(struct usb_xfer_queue * pq,struct usb_xfer * xfer)2439 usbd_transfer_enqueue(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2440 {
2441 uint32_t int_save;
2442 /*
2443 * Insert the USB transfer into the queue, if it is not
2444 * already on a USB transfer queue:
2445 */
2446 LOS_SpinLockSave(&g_usb_wait_queue_spinlock, &int_save);
2447 if (xfer->wait_queue == NULL) {
2448 xfer->wait_queue = pq;
2449 TAILQ_INSERT_TAIL(&pq->head, xfer, wait_entry);
2450 }
2451 LOS_SpinUnlockRestore(&g_usb_wait_queue_spinlock, int_save);
2452 }
2453
2454 /*------------------------------------------------------------------------*
2455 * usbd_transfer_done
2456 *
2457 * - This function is used to remove an USB transfer from the busdma,
2458 * pipe or interrupt queue.
2459 *
2460 * - This function is used to queue the USB transfer on the done
2461 * queue.
2462 *
2463 * - This function is used to stop any USB transfer timeouts.
2464 *------------------------------------------------------------------------*/
2465 void
usbd_transfer_done(struct usb_xfer * xfer,usb_error_t error)2466 usbd_transfer_done(struct usb_xfer *xfer, usb_error_t error)
2467 {
2468 struct usb_xfer_root *info = xfer->xroot;
2469
2470 USB_BUS_LOCK_ASSERT(info->bus, MA_OWNED);
2471
2472 DPRINTF("err=%s\n", usbd_errstr(error));
2473
2474 /*
2475 * If we are not transferring then just return.
2476 * This can happen during transfer cancel.
2477 */
2478 if (!xfer->flags_int.transferring) {
2479 DPRINTF("not transferring\n");
2480 /* end of control transfer, if any */
2481 xfer->flags_int.control_act = 0;
2482 return;
2483 }
2484 /* only set transfer error, if not already set */
2485 if (xfer->error == USB_ERR_NORMAL_COMPLETION)
2486 xfer->error = error;
2487
2488 /* stop any callouts */
2489 usb_callout_stop(&xfer->timeout_handle);
2490
2491 /*
2492 * If we are waiting on a queue, just remove the USB transfer
2493 * from the queue, if any. We should have the required locks
2494 * locked to do the remove when this function is called.
2495 */
2496 usbd_transfer_dequeue(xfer);
2497
2498 #if USB_HAVE_BUSDMA
2499 if (mtx_owned(info->xfer_mtx)) {
2500 struct usb_xfer_queue *pq;
2501
2502 /*
2503 * If the private USB lock is not locked, then we assume
2504 * that the BUS-DMA load stage has been passed:
2505 */
2506 pq = &info->dma_q;
2507
2508 if (pq->curr == xfer) {
2509 /* start the next BUS-DMA load, if any */
2510 usb_command_wrapper(pq, NULL);
2511 }
2512 }
2513 #endif
2514 /* keep some statistics */
2515 if (xfer->error == USB_ERR_CANCELLED) {
2516 info->udev->stats_cancelled.uds_requests
2517 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2518 } else if (xfer->error != USB_ERR_NORMAL_COMPLETION) {
2519 info->udev->stats_err.uds_requests
2520 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2521 } else {
2522 info->udev->stats_ok.uds_requests
2523 [xfer->endpoint->edesc->bmAttributes & UE_XFERTYPE]++;
2524 }
2525
2526 /* call the USB transfer callback */
2527 usbd_callback_ss_done_defer(xfer);
2528 }
2529
2530 /*------------------------------------------------------------------------*
2531 * usbd_transfer_start_cb
2532 *
2533 * This function is called to start the USB transfer when
2534 * "xfer->interval" is greater than zero, and and the endpoint type is
2535 * BULK or CONTROL.
2536 *------------------------------------------------------------------------*/
2537 static void
usbd_transfer_start_cb(void * arg)2538 usbd_transfer_start_cb(void *arg)
2539 {
2540 struct usb_xfer *xfer = arg;
2541 struct usb_endpoint *ep = xfer->endpoint;
2542
2543 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2544
2545 DPRINTF("start\n");
2546
2547 #if USB_HAVE_PF
2548 usbpf_xfertap(xfer, USBPF_XFERTAP_SUBMIT);
2549 #endif
2550
2551 /* the transfer can now be cancelled */
2552 xfer->flags_int.can_cancel_immed = 1;
2553
2554 /* start USB transfer, if no error */
2555 if (xfer->error == 0)
2556 (ep->methods->start) (xfer);
2557
2558 /* check for transfer error */
2559 if (xfer->error) {
2560 /* some error has happened */
2561 usbd_transfer_done(xfer, (usb_error_t)0);
2562 }
2563 }
2564
2565 /*------------------------------------------------------------------------*
2566 * usbd_xfer_set_stall
2567 *
2568 * This function is used to set the stall flag outside the
2569 * callback. This function is NULL safe.
2570 *------------------------------------------------------------------------*/
2571 void
usbd_xfer_set_stall(struct usb_xfer * xfer)2572 usbd_xfer_set_stall(struct usb_xfer *xfer)
2573 {
2574 if (xfer == NULL) {
2575 /* tearing down */
2576 return;
2577 }
2578 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2579
2580 /* avoid any races by locking the USB mutex */
2581 USB_BUS_LOCK(xfer->xroot->bus);
2582 xfer->flags.stall_pipe = 1;
2583 USB_BUS_UNLOCK(xfer->xroot->bus);
2584 }
2585
2586 int
usbd_xfer_is_stalled(struct usb_xfer * xfer)2587 usbd_xfer_is_stalled(struct usb_xfer *xfer)
2588 {
2589 return (xfer->endpoint->is_stalled);
2590 }
2591
2592 /*------------------------------------------------------------------------*
2593 * usbd_transfer_clear_stall
2594 *
2595 * This function is used to clear the stall flag outside the
2596 * callback. This function is NULL safe.
2597 *------------------------------------------------------------------------*/
2598 void
usbd_transfer_clear_stall(struct usb_xfer * xfer)2599 usbd_transfer_clear_stall(struct usb_xfer *xfer)
2600 {
2601 if (xfer == NULL) {
2602 /* tearing down */
2603 return;
2604 }
2605 USB_XFER_LOCK_ASSERT(xfer, MA_OWNED);
2606
2607 /* avoid any races by locking the USB mutex */
2608 USB_BUS_LOCK(xfer->xroot->bus);
2609
2610 xfer->flags.stall_pipe = 0;
2611
2612 USB_BUS_UNLOCK(xfer->xroot->bus);
2613 }
2614
2615 /*------------------------------------------------------------------------*
2616 * usbd_pipe_start
2617 *
2618 * This function is used to add an USB transfer to the pipe transfer list.
2619 *------------------------------------------------------------------------*/
2620 void
usbd_pipe_start(struct usb_xfer_queue * pq)2621 usbd_pipe_start(struct usb_xfer_queue *pq)
2622 {
2623 struct usb_endpoint *ep;
2624 struct usb_xfer *xfer;
2625 uint8_t type;
2626
2627 xfer = pq->curr;
2628 ep = xfer->endpoint;
2629
2630 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2631
2632 /*
2633 * If the endpoint is already stalled we do nothing !
2634 */
2635 if (ep->is_stalled) {
2636 DPRINTFN(1, "is_stalled\n");
2637 return;
2638 }
2639 /*
2640 * Check if we are supposed to stall the endpoint:
2641 */
2642 if (xfer->flags.stall_pipe) {
2643 struct usb_device *udev;
2644 struct usb_xfer_root *info;
2645
2646 /* clear stall command */
2647 xfer->flags.stall_pipe = 0;
2648
2649 /* get pointer to USB device */
2650 info = xfer->xroot;
2651 udev = info->udev;
2652
2653 /*
2654 * Only stall BULK and INTERRUPT endpoints.
2655 */
2656 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2657 if ((type == UE_BULK) ||
2658 (type == UE_INTERRUPT)) {
2659 uint8_t did_stall;
2660
2661 did_stall = 1;
2662
2663 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2664 (udev->bus->methods->set_stall) (
2665 udev, ep, &did_stall);
2666 } else if (udev->ctrl_xfer[1]) {
2667 info = udev->ctrl_xfer[1]->xroot;
2668 (void)usb_proc_msignal(
2669 USB_BUS_CS_PROC(info->bus),
2670 &udev->cs_msg[0], &udev->cs_msg[1]);
2671 } else {
2672 /* should not happen */
2673 DPRINTFN(0, "No stall handler\n");
2674 }
2675 /*
2676 * Check if we should stall. Some USB hardware
2677 * handles set- and clear-stall in hardware.
2678 */
2679 if (did_stall) {
2680 /*
2681 * The transfer will be continued when
2682 * the clear-stall control endpoint
2683 * message is received.
2684 */
2685 ep->is_stalled = 1;
2686 DPRINTFN(1, "did_stall\n");
2687 return;
2688 }
2689 } else if (type == UE_ISOCHRONOUS) {
2690 /*
2691 * Make sure any FIFO overflow or other FIFO
2692 * error conditions go away by resetting the
2693 * endpoint FIFO through the clear stall
2694 * method.
2695 */
2696 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
2697 (udev->bus->methods->clear_stall) (udev, ep);
2698 }
2699 }
2700 }
2701 /* Set or clear stall complete - special case */
2702 if (xfer->nframes == 0) {
2703 /* we are complete */
2704 xfer->aframes = 0;
2705 usbd_transfer_done(xfer, (usb_error_t)0);
2706 DPRINTFN(1, "nframes == 0\n");
2707 return;
2708 }
2709 /*
2710 * Handled cases:
2711 *
2712 * 1) Start the first transfer queued.
2713 *
2714 * 2) Re-start the current USB transfer.
2715 */
2716 /*
2717 * Check if there should be any
2718 * pre transfer start delay:
2719 */
2720 if (xfer->interval > 0) {
2721 type = (ep->edesc->bmAttributes & UE_XFERTYPE);
2722 if ((type == UE_BULK) ||
2723 (type == UE_CONTROL)) {
2724 usbd_transfer_timeout_ms(xfer,
2725 &usbd_transfer_start_cb,
2726 xfer->interval);
2727 DPRINTFN(1, "usbd_transfer_timeout_ms \n");
2728 return;
2729 }
2730 }
2731
2732 usbd_transfer_start_cb((void *)xfer);
2733 }
2734
2735 /*------------------------------------------------------------------------*
2736 * usbd_transfer_timeout_ms
2737 *
2738 * This function is used to setup a timeout on the given USB
2739 * transfer. If the timeout has been deferred the callback given by
2740 * "cb" will get called after "ms" milliseconds.
2741 *------------------------------------------------------------------------*/
2742 void
usbd_transfer_timeout_ms(struct usb_xfer * xfer,void (* cb)(void * arg),usb_timeout_t ms)2743 usbd_transfer_timeout_ms(struct usb_xfer *xfer,
2744 void (*cb) (void *arg), usb_timeout_t ms)
2745 {
2746 USB_BUS_LOCK_ASSERT(xfer->xroot->bus, MA_OWNED);
2747
2748 /* defer delay */
2749 usb_callout_reset(&xfer->timeout_handle,
2750 USB_MS_TO_TICKS(ms) + USB_CALLOUT_ZERO_TICKS, cb, xfer);
2751 }
2752
2753 /*------------------------------------------------------------------------*
2754 * usbd_callback_wrapper_sub
2755 *
2756 * - This function will update variables in an USB transfer after
2757 * that the USB transfer is complete.
2758 *
2759 * - This function is used to start the next USB transfer on the
2760 * ep transfer queue, if any.
2761 *
2762 * NOTE: In some special cases the USB transfer will not be removed from
2763 * the pipe queue, but remain first. To enforce USB transfer removal call
2764 * this function passing the error code "USB_ERR_CANCELLED".
2765 *
2766 * Return values:
2767 * 0: Success.
2768 * Else: The callback has been deferred.
2769 *------------------------------------------------------------------------*/
2770 static uint8_t
usbd_callback_wrapper_sub(struct usb_xfer * xfer)2771 usbd_callback_wrapper_sub(struct usb_xfer *xfer)
2772 {
2773 struct usb_endpoint *ep;
2774 struct usb_bus *bus;
2775 usb_frcount_t x;
2776
2777 bus = xfer->xroot->bus;
2778
2779 if ((!xfer->flags_int.open) &&
2780 (!xfer->flags_int.did_close)) {
2781 DPRINTF("close\n");
2782 USB_BUS_LOCK(bus);
2783 (xfer->endpoint->methods->close) (xfer);
2784 USB_BUS_UNLOCK(bus);
2785 /* only close once */
2786 xfer->flags_int.did_close = 1;
2787 return (1); /* wait for new callback */
2788 }
2789 /*
2790 * If we have a non-hardware induced error we
2791 * need to do the DMA delay!
2792 */
2793 if ((xfer->error != 0) && (!xfer->flags_int.did_dma_delay) &&
2794 ((xfer->error == USB_ERR_CANCELLED) ||
2795 (xfer->error == USB_ERR_TIMEOUT) ||
2796 (bus->methods->start_dma_delay != NULL))) {
2797 usb_timeout_t temp;
2798
2799 /* only delay once */
2800 xfer->flags_int.did_dma_delay = 1;
2801
2802 /* we can not cancel this delay */
2803 xfer->flags_int.can_cancel_immed = 0;
2804
2805 temp = usbd_get_dma_delay(xfer->xroot->udev);
2806
2807 DPRINTFN(3, "DMA delay, %u ms, "
2808 "on %p\n", temp, xfer);
2809
2810 if (temp != 0) {
2811 USB_BUS_LOCK(bus);
2812 /*
2813 * Some hardware solutions have dedicated
2814 * events when it is safe to free DMA'ed
2815 * memory. For the other hardware platforms we
2816 * use a static delay.
2817 */
2818 if (bus->methods->start_dma_delay != NULL) {
2819 (bus->methods->start_dma_delay) (xfer);
2820 } else {
2821 usbd_transfer_timeout_ms(xfer,
2822 (void (*)(void *))&usb_dma_delay_done_cb,
2823 temp);
2824 }
2825 USB_BUS_UNLOCK(bus);
2826 return (1); /* wait for new callback */
2827 }
2828 }
2829 /* check actual number of frames */
2830 if (xfer->aframes > xfer->nframes) {
2831 if (xfer->error == 0) {
2832 panic("%s: actual number of frames, %d, is "
2833 "greater than initial number of frames, %d\n",
2834 __FUNCTION__, xfer->aframes, xfer->nframes);
2835 } else {
2836 /* just set some valid value */
2837 xfer->aframes = xfer->nframes;
2838 }
2839 }
2840 /* compute actual length */
2841 xfer->actlen = 0;
2842
2843 for (x = 0; x != xfer->aframes; x++) {
2844 xfer->actlen += xfer->frlengths[x];
2845 }
2846
2847 /*
2848 * Frames that were not transferred get zero actual length in
2849 * case the USB device driver does not check the actual number
2850 * of frames transferred, "xfer->aframes":
2851 */
2852 for (; x < xfer->nframes; x++) {
2853 usbd_xfer_set_frame_len(xfer, x, 0);
2854 }
2855
2856 /* check actual length */
2857 if (xfer->actlen > xfer->sumlen) {
2858 if (xfer->error == 0) {
2859 panic("%s: actual length, %d, is greater than "
2860 "initial length, %d\n",
2861 __FUNCTION__, xfer->actlen, xfer->sumlen);
2862 } else {
2863 /* just set some valid value */
2864 xfer->actlen = xfer->sumlen;
2865 }
2866 }
2867 DPRINTFN(1, "xfer=%p endpoint=%p sts=%d alen=%d, slen=%d, afrm=%d, nfrm=%d\n",
2868 xfer, xfer->endpoint, xfer->error, xfer->actlen, xfer->sumlen,
2869 xfer->aframes, xfer->nframes);
2870
2871 if (xfer->error) {
2872 /* end of control transfer, if any */
2873 xfer->flags_int.control_act = 0;
2874
2875 #if USB_HAVE_TT_SUPPORT
2876 switch (xfer->error) {
2877 case USB_ERR_NORMAL_COMPLETION:
2878 case USB_ERR_SHORT_XFER:
2879 case USB_ERR_STALLED:
2880 case USB_ERR_CANCELLED:
2881 /* nothing to do */
2882 break;
2883 default:
2884 /* try to reset the TT, if any */
2885 USB_BUS_LOCK(bus);
2886 uhub_tt_buffer_reset_async_locked(xfer->xroot->udev, xfer->endpoint);
2887 USB_BUS_UNLOCK(bus);
2888 break;
2889 }
2890 #endif
2891 /* check if we should block the execution queue */
2892 if ((xfer->error != USB_ERR_CANCELLED) &&
2893 (xfer->flags.pipe_bof)) {
2894 DPRINTFN(2, "xfer=%p: Block On Failure "
2895 "on endpoint=%p\n", xfer, xfer->endpoint);
2896 goto done;
2897 }
2898 } else {
2899 /* check for short transfers */
2900 if (xfer->actlen < xfer->sumlen) {
2901 /* end of control transfer, if any */
2902 xfer->flags_int.control_act = 0;
2903
2904 if (!xfer->flags_int.short_xfer_ok) {
2905 xfer->error = USB_ERR_SHORT_XFER;
2906 if (xfer->flags.pipe_bof) {
2907 DPRINTFN(2, "xfer=%p: Block On Failure on "
2908 "Short Transfer on endpoint %p.\n",
2909 xfer, xfer->endpoint);
2910 goto done;
2911 }
2912 }
2913 } else {
2914 /*
2915 * Check if we are in the middle of a
2916 * control transfer:
2917 */
2918 if (xfer->flags_int.control_act) {
2919 DPRINTFN(5, "xfer=%p: Control transfer "
2920 "active on endpoint=%p\n", xfer, xfer->endpoint);
2921 goto done;
2922 }
2923 }
2924 }
2925
2926 ep = xfer->endpoint;
2927
2928 /*
2929 * If the current USB transfer is completing we need to start the
2930 * next one:
2931 */
2932 USB_BUS_LOCK(bus);
2933 if (ep->endpoint_q[xfer->stream_id].curr == xfer) {
2934 usb_command_wrapper(&ep->endpoint_q[xfer->stream_id], NULL);
2935
2936 if ((ep->endpoint_q[xfer->stream_id].curr != NULL) ||
2937 (TAILQ_FIRST(&ep->endpoint_q[xfer->stream_id].head) != NULL)) {
2938 /* there is another USB transfer waiting */
2939 } else {
2940 /* this is the last USB transfer */
2941 /* clear isochronous sync flag */
2942 xfer->endpoint->is_synced = 0;
2943 }
2944 }
2945 USB_BUS_UNLOCK(bus);
2946 done:
2947 return (0);
2948 }
2949
2950 /*------------------------------------------------------------------------*
2951 * usb_command_wrapper
2952 *
2953 * This function is used to execute commands non-recursivly on an USB
2954 * transfer.
2955 *------------------------------------------------------------------------*/
2956 void
usb_command_wrapper(struct usb_xfer_queue * pq,struct usb_xfer * xfer)2957 usb_command_wrapper(struct usb_xfer_queue *pq, struct usb_xfer *xfer)
2958 {
2959 uint32_t int_save;
2960
2961 if (xfer) {
2962 /*
2963 * If the transfer is not already processing,
2964 * queue it!
2965 */
2966 if (pq->curr != xfer) {
2967 usbd_transfer_enqueue(pq, xfer);
2968 if (pq->curr != NULL) {
2969 /* something is already processing */
2970 DPRINTFN(6, "busy %p\n", pq->curr);
2971 return;
2972 }
2973 }
2974 } else {
2975 /* Get next element in queue */
2976 pq->curr = NULL;
2977 }
2978
2979 if (!pq->recurse_1) {
2980 do {
2981
2982 LOS_SpinLockSave(&g_usb_wait_queue_spinlock, &int_save);
2983 /* set both recurse flags */
2984 pq->recurse_1 = 1;
2985 pq->recurse_2 = 1;
2986
2987 if (pq->curr == NULL) {
2988 xfer = TAILQ_FIRST(&pq->head);
2989 if (xfer) {
2990 TAILQ_REMOVE(&pq->head, xfer,
2991 wait_entry);
2992 xfer->wait_queue = NULL;
2993 pq->curr = xfer;
2994 } else {
2995 /* clear first recurse flag */
2996 pq->recurse_1 = 0;
2997 LOS_SpinUnlockRestore(&g_usb_wait_queue_spinlock, int_save);
2998 break;
2999 }
3000 }
3001 LOS_SpinUnlockRestore(&g_usb_wait_queue_spinlock, int_save);
3002
3003 DPRINTFN(6, "cb %p (enter)\n", pq->curr);
3004 (pq->command) (pq);
3005 DPRINTFN(6, "cb %p (leave)\n", pq->curr);
3006
3007 LOS_SpinLockSave(&g_usb_wait_queue_spinlock, &int_save);
3008 if (pq->recurse_2) {
3009 /* clear first recurse flag */
3010 pq->recurse_1 = 0;
3011 LOS_SpinUnlockRestore(&g_usb_wait_queue_spinlock, int_save);
3012 break;
3013 }
3014 LOS_SpinUnlockRestore(&g_usb_wait_queue_spinlock, int_save);
3015 } while (1);
3016
3017 } else {
3018 /* clear second recurse flag */
3019 pq->recurse_2 = 0;
3020 }
3021 }
3022
3023 /*------------------------------------------------------------------------*
3024 * usbd_ctrl_transfer_setup
3025 *
3026 * This function is used to setup the default USB control endpoint
3027 * transfer.
3028 *------------------------------------------------------------------------*/
3029 void
usbd_ctrl_transfer_setup(struct usb_device * udev)3030 usbd_ctrl_transfer_setup(struct usb_device *udev)
3031 {
3032 struct usb_xfer *xfer;
3033 uint8_t no_resetup;
3034 uint8_t iface_index;
3035
3036 /* check for root HUB */
3037 if (udev->parent_hub == NULL)
3038 return;
3039 repeat:
3040
3041 xfer = udev->ctrl_xfer[0];
3042 if (xfer) {
3043 USB_XFER_LOCK(xfer);
3044 no_resetup =
3045 ((xfer->address == udev->address) &&
3046 (udev->ctrl_ep_desc.wMaxPacketSize[0] ==
3047 udev->ddesc.bMaxPacketSize));
3048 if (udev->flags.usb_mode == USB_MODE_DEVICE) {
3049 if (no_resetup) {
3050 /*
3051 * NOTE: checking "xfer->address" and
3052 * starting the USB transfer must be
3053 * atomic!
3054 */
3055 usbd_transfer_start(xfer);
3056 }
3057 }
3058 USB_XFER_UNLOCK(xfer);
3059 } else {
3060 no_resetup = 0;
3061 }
3062
3063 if (no_resetup) {
3064 /*
3065 * All parameters are exactly the same like before.
3066 * Just return.
3067 */
3068 return;
3069 }
3070 /*
3071 * Update wMaxPacketSize for the default control endpoint:
3072 */
3073 udev->ctrl_ep_desc.wMaxPacketSize[0] =
3074 udev->ddesc.bMaxPacketSize;
3075
3076 /*
3077 * Unsetup any existing USB transfer:
3078 */
3079 usbd_transfer_unsetup(udev->ctrl_xfer, USB_CTRL_XFER_MAX);
3080
3081 /*
3082 * Reset clear stall error counter.
3083 */
3084 udev->clear_stall_errors = 0;
3085
3086 /*
3087 * Try to setup a new USB transfer for the
3088 * default control endpoint:
3089 */
3090 iface_index = 0;
3091 if (usbd_transfer_setup(udev, &iface_index,
3092 udev->ctrl_xfer, udev->bus->control_ep_quirk ?
3093 usb_control_ep_quirk_cfg : usb_control_ep_cfg, USB_CTRL_XFER_MAX, NULL,
3094 &udev->device_mtx)) {
3095 DPRINTFN(0, "could not setup default "
3096 "USB transfer\n");
3097 } else {
3098 goto repeat;
3099 }
3100 }
3101
3102 /*------------------------------------------------------------------------*
3103 * usbd_clear_data_toggle - factored out code
3104 *
3105 * NOTE: the intention of this function is not to reset the hardware
3106 * data toggle.
3107 *------------------------------------------------------------------------*/
3108 void
usbd_clear_stall_locked(struct usb_device * udev,struct usb_endpoint * ep)3109 usbd_clear_stall_locked(struct usb_device *udev, struct usb_endpoint *ep)
3110 {
3111 USB_BUS_LOCK_ASSERT(udev->bus, MA_OWNED);
3112
3113 /* check that we have a valid case */
3114 if ((udev->flags.usb_mode == USB_MODE_HOST) &&
3115 (udev->parent_hub != NULL) &&
3116 (udev->bus->methods->clear_stall != NULL) &&
3117 (ep->methods != NULL)) {
3118 (udev->bus->methods->clear_stall) (udev, ep);
3119 }
3120 }
3121
3122 /*------------------------------------------------------------------------*
3123 * usbd_clear_data_toggle - factored out code
3124 *
3125 * NOTE: the intention of this function is not to reset the hardware
3126 * data toggle on the USB device side.
3127 *------------------------------------------------------------------------*/
3128 void
usbd_clear_data_toggle(struct usb_device * udev,struct usb_endpoint * ep)3129 usbd_clear_data_toggle(struct usb_device *udev, struct usb_endpoint *ep)
3130 {
3131 DPRINTFN(5, "udev=%p endpoint=%p\n", udev, ep);
3132
3133 USB_BUS_LOCK(udev->bus);
3134 ep->toggle_next = 0;
3135 /* some hardware needs a callback to clear the data toggle */
3136 usbd_clear_stall_locked(udev, ep);
3137 USB_BUS_UNLOCK(udev->bus);
3138 }
3139
3140 /*------------------------------------------------------------------------*
3141 * usbd_clear_stall_callback - factored out clear stall callback
3142 *
3143 * Input parameters:
3144 * xfer1: Clear Stall Control Transfer
3145 * xfer2: Stalled USB Transfer
3146 *
3147 * This function is NULL safe.
3148 *
3149 * Return values:
3150 * 0: In progress
3151 * Else: Finished
3152 *
3153 * Clear stall config example:
3154 *
3155 * static const struct usb_config my_clearstall = {
3156 * .type = UE_CONTROL,
3157 * .endpoint = 0,
3158 * .direction = UE_DIR_ANY,
3159 * .interval = 50, //50 milliseconds
3160 * .bufsize = sizeof(struct usb_device_request),
3161 * .timeout = 1000, //1.000 seconds
3162 * .callback = &my_clear_stall_callback, // **
3163 * .usb_mode = USB_MODE_HOST,
3164 * };
3165 *
3166 * ** "my_clear_stall_callback" calls "usbd_clear_stall_callback"
3167 * passing the correct parameters.
3168 *------------------------------------------------------------------------*/
3169 uint8_t
usbd_clear_stall_callback(struct usb_xfer * xfer1,struct usb_xfer * xfer2)3170 usbd_clear_stall_callback(struct usb_xfer *xfer1,
3171 struct usb_xfer *xfer2)
3172 {
3173 struct usb_device_request req;
3174
3175 if (xfer2 == NULL) {
3176 /* looks like we are tearing down */
3177 DPRINTF("NULL input parameter\n");
3178 return (0);
3179 }
3180 USB_XFER_LOCK_ASSERT(xfer1, MA_OWNED);
3181 USB_XFER_LOCK_ASSERT(xfer2, MA_OWNED);
3182
3183 switch (USB_GET_STATE(xfer1)) {
3184 case USB_ST_SETUP:
3185
3186 /*
3187 * pre-clear the data toggle to DATA0 ("umass.c" and
3188 * "ata-usb.c" depends on this)
3189 */
3190
3191 usbd_clear_data_toggle(xfer2->xroot->udev, xfer2->endpoint);
3192
3193 /* setup a clear-stall packet */
3194
3195 req.bmRequestType = UT_WRITE_ENDPOINT;
3196 req.bRequest = UR_CLEAR_FEATURE;
3197 USETW(req.wValue, UF_ENDPOINT_HALT);
3198 req.wIndex[0] = xfer2->endpoint->edesc->bEndpointAddress;
3199 req.wIndex[1] = 0;
3200 USETW(req.wLength, 0);
3201
3202 /*
3203 * "usbd_transfer_setup_sub()" will ensure that
3204 * we have sufficient room in the buffer for
3205 * the request structure!
3206 */
3207
3208 /* copy in the transfer */
3209
3210 usbd_copy_in(xfer1->frbuffers, 0, &req, sizeof(req));
3211
3212 /* set length */
3213 xfer1->frlengths[0] = sizeof(req);
3214 xfer1->nframes = 1;
3215
3216 usbd_transfer_submit(xfer1);
3217 return (0);
3218
3219 case USB_ST_TRANSFERRED:
3220 break;
3221
3222 default: /* Error */
3223 if (xfer1->error == USB_ERR_CANCELLED) {
3224 return (0);
3225 }
3226 break;
3227 }
3228 return (1); /* Clear Stall Finished */
3229 }
3230
3231 /*------------------------------------------------------------------------*
3232 * usbd_transfer_poll
3233 *
3234 * The following function gets called from the USB keyboard driver and
3235 * UMASS when the system has paniced.
3236 *
3237 * NOTE: It is currently not possible to resume normal operation on
3238 * the USB controller which has been polled, due to clearing of the
3239 * "up_dsleep" and "up_msleep" flags.
3240 *------------------------------------------------------------------------*/
3241 void
usbd_transfer_poll(struct usb_xfer ** ppxfer,uint16_t max)3242 usbd_transfer_poll(struct usb_xfer **ppxfer, uint16_t max)
3243 {
3244 struct usb_xfer *xfer;
3245 struct usb_xfer_root *xroot;
3246 struct usb_device *udev;
3247 struct usb_proc_msg *pm;
3248 uint16_t n;
3249 uint16_t drop_bus;
3250 uint16_t drop_xfer;
3251
3252 for (n = 0; n != max; n++) {
3253 /* Extra checks to avoid panic */
3254 xfer = ppxfer[n];
3255 if (xfer == NULL)
3256 continue; /* no USB transfer */
3257 xroot = xfer->xroot;
3258 if (xroot == NULL)
3259 continue; /* no USB root */
3260 udev = xroot->udev;
3261 if (udev == NULL)
3262 continue; /* no USB device */
3263 if (udev->bus == NULL)
3264 continue; /* no BUS structure */
3265 if (udev->bus->methods == NULL)
3266 continue; /* no BUS methods */
3267 if (udev->bus->methods->xfer_poll == NULL)
3268 continue; /* no poll method */
3269
3270 /* make sure that the BUS mutex is not locked */
3271 drop_bus = 0;
3272 while (mtx_owned(&xroot->udev->bus->bus_mtx) && !SCHEDULER_STOPPED()) {
3273 mtx_unlock(&xroot->udev->bus->bus_mtx);
3274 drop_bus++;
3275 }
3276
3277 /* make sure that the transfer mutex is not locked */
3278 drop_xfer = 0;
3279 while (mtx_owned(xroot->xfer_mtx) && !SCHEDULER_STOPPED()) {
3280 mtx_unlock(xroot->xfer_mtx);
3281 drop_xfer++;
3282 }
3283
3284 /* Make sure cv_signal() and cv_broadcast() is not called */
3285 USB_BUS_CONTROL_XFER_PROC(udev->bus)->up_msleep = 0;
3286 USB_BUS_EXPLORE_PROC(udev->bus)->up_msleep = 0;
3287 USB_BUS_GIANT_PROC(udev->bus)->up_msleep = 0;
3288 USB_BUS_NON_GIANT_ISOC_PROC(udev->bus)->up_msleep = 0;
3289 USB_BUS_NON_GIANT_BULK_PROC(udev->bus)->up_msleep = 0;
3290
3291 /* poll USB hardware */
3292 (udev->bus->methods->xfer_poll) (udev->bus);
3293
3294 USB_BUS_LOCK(xroot->bus);
3295
3296 /* check for clear stall */
3297 if (udev->ctrl_xfer[1] != NULL) {
3298 /* poll clear stall start */
3299 pm = &udev->cs_msg[0].hdr;
3300 (pm->pm_callback) (pm);
3301 /* poll clear stall done thread */
3302 pm = &udev->ctrl_xfer[1]->
3303 xroot->done_m[0].hdr;
3304 (pm->pm_callback) (pm);
3305 }
3306
3307 /* poll done thread */
3308 pm = &xroot->done_m[0].hdr;
3309 (pm->pm_callback) (pm);
3310
3311 USB_BUS_UNLOCK(xroot->bus);
3312
3313 /* restore transfer mutex */
3314 while (drop_xfer--)
3315 mtx_lock(xroot->xfer_mtx);
3316
3317 /* restore BUS mutex */
3318 while (drop_bus--)
3319 mtx_lock(&xroot->udev->bus->bus_mtx);
3320 }
3321 }
3322
3323 static void
usbd_get_std_packet_size(struct usb_std_packet_size * ptr,uint8_t type,enum usb_dev_speed speed)3324 usbd_get_std_packet_size(struct usb_std_packet_size *ptr,
3325 uint8_t type, enum usb_dev_speed speed)
3326 {
3327 static const uint16_t intr_range_max[USB_SPEED_MAX] = {
3328 [USB_SPEED_LOW] = 8,
3329 [USB_SPEED_FULL] = 64,
3330 [USB_SPEED_HIGH] = 1024,
3331 [USB_SPEED_VARIABLE] = 1024,
3332 [USB_SPEED_SUPER] = 1024,
3333 };
3334
3335 static const uint16_t isoc_range_max[USB_SPEED_MAX] = {
3336 [USB_SPEED_LOW] = 0, /* invalid */
3337 [USB_SPEED_FULL] = 1023,
3338 [USB_SPEED_HIGH] = 1024,
3339 [USB_SPEED_VARIABLE] = 3584,
3340 [USB_SPEED_SUPER] = 1024,
3341 };
3342
3343 static const uint16_t control_min[USB_SPEED_MAX] = {
3344 [USB_SPEED_LOW] = 8,
3345 [USB_SPEED_FULL] = 8,
3346 [USB_SPEED_HIGH] = 64,
3347 [USB_SPEED_VARIABLE] = 512,
3348 [USB_SPEED_SUPER] = 512,
3349 };
3350
3351 static const uint16_t bulk_min[USB_SPEED_MAX] = {
3352 [USB_SPEED_LOW] = 8,
3353 [USB_SPEED_FULL] = 8,
3354 [USB_SPEED_HIGH] = 512,
3355 [USB_SPEED_VARIABLE] = 512,
3356 [USB_SPEED_SUPER] = 1024,
3357 };
3358
3359 uint16_t temp;
3360
3361 (void)memset_s(ptr, sizeof(*ptr), 0, sizeof(*ptr));
3362
3363 switch (type) {
3364 case UE_INTERRUPT:
3365 ptr->range.max = intr_range_max[speed];
3366 break;
3367 case UE_ISOCHRONOUS:
3368 ptr->range.max = isoc_range_max[speed];
3369 break;
3370 default:
3371 if (type == UE_BULK)
3372 temp = bulk_min[speed];
3373 else /* UE_CONTROL */
3374 temp = control_min[speed];
3375
3376 /* default is fixed */
3377 ptr->fixed[0] = temp;
3378 ptr->fixed[1] = temp;
3379 ptr->fixed[2] = temp;
3380 ptr->fixed[3] = temp;
3381
3382 if (speed == USB_SPEED_FULL) {
3383 /* multiple sizes */
3384 ptr->fixed[1] = 16;
3385 ptr->fixed[2] = 32;
3386 ptr->fixed[3] = 64;
3387 }
3388 if ((speed == USB_SPEED_VARIABLE) &&
3389 (type == UE_BULK)) {
3390 /* multiple sizes */
3391 ptr->fixed[2] = 1024;
3392 ptr->fixed[3] = 1536;
3393 }
3394 break;
3395 }
3396 }
3397
3398 void *
usbd_xfer_softc(struct usb_xfer * xfer)3399 usbd_xfer_softc(struct usb_xfer *xfer)
3400 {
3401 return (xfer->priv_sc);
3402 }
3403
3404 void *
usbd_xfer_get_priv(struct usb_xfer * xfer)3405 usbd_xfer_get_priv(struct usb_xfer *xfer)
3406 {
3407 return (xfer->priv_fifo);
3408 }
3409
3410 void
usbd_xfer_set_priv(struct usb_xfer * xfer,void * ptr)3411 usbd_xfer_set_priv(struct usb_xfer *xfer, void *ptr)
3412 {
3413 xfer->priv_fifo = ptr;
3414 }
3415
3416 uint8_t
usbd_xfer_state(struct usb_xfer * xfer)3417 usbd_xfer_state(struct usb_xfer *xfer)
3418 {
3419 return (xfer->usb_state);
3420 }
3421
3422 void
usbd_xfer_set_flag(struct usb_xfer * xfer,int flag)3423 usbd_xfer_set_flag(struct usb_xfer *xfer, int flag)
3424 {
3425 switch (flag) {
3426 case USB_FORCE_SHORT_XFER:
3427 xfer->flags.force_short_xfer = 1;
3428 break;
3429 case USB_SHORT_XFER_OK:
3430 xfer->flags.short_xfer_ok = 1;
3431 break;
3432 case USB_MULTI_SHORT_OK:
3433 xfer->flags.short_frames_ok = 1;
3434 break;
3435 case USB_MANUAL_STATUS:
3436 xfer->flags.manual_status = 1;
3437 break;
3438 }
3439 }
3440
3441 void
usbd_xfer_clr_flag(struct usb_xfer * xfer,int flag)3442 usbd_xfer_clr_flag(struct usb_xfer *xfer, int flag)
3443 {
3444 switch (flag) {
3445 case USB_FORCE_SHORT_XFER:
3446 xfer->flags.force_short_xfer = 0;
3447 break;
3448 case USB_SHORT_XFER_OK:
3449 xfer->flags.short_xfer_ok = 0;
3450 break;
3451 case USB_MULTI_SHORT_OK:
3452 xfer->flags.short_frames_ok = 0;
3453 break;
3454 case USB_MANUAL_STATUS:
3455 xfer->flags.manual_status = 0;
3456 break;
3457 }
3458 }
3459
3460 /*
3461 * The following function returns in milliseconds when the isochronous
3462 * transfer was completed by the hardware. The returned value wraps
3463 * around 65536 milliseconds.
3464 */
3465 uint16_t
usbd_xfer_get_timestamp(struct usb_xfer * xfer)3466 usbd_xfer_get_timestamp(struct usb_xfer *xfer)
3467 {
3468 return (xfer->isoc_time_complete);
3469 }
3470
3471 /*
3472 * The following function returns non-zero if the max packet size
3473 * field was clamped to a valid value. Else it returns zero.
3474 */
3475 uint8_t
usbd_xfer_maxp_was_clamped(struct usb_xfer * xfer)3476 usbd_xfer_maxp_was_clamped(struct usb_xfer *xfer)
3477 {
3478 return (xfer->flags_int.maxp_was_clamped);
3479 }
3480
3481 /*
3482 * The following function computes the next isochronous frame number
3483 * where the first isochronous packet should be queued.
3484 *
3485 * The function returns non-zero if there was a discontinuity.
3486 * Else zero is returned for normal operation.
3487 */
3488 uint8_t
usbd_xfer_get_isochronous_start_frame(struct usb_xfer * xfer,uint32_t frame_curr,uint32_t frame_min,uint32_t frame_ms,uint32_t frame_mask,uint32_t * p_frame_start)3489 usbd_xfer_get_isochronous_start_frame(struct usb_xfer *xfer, uint32_t frame_curr,
3490 uint32_t frame_min, uint32_t frame_ms, uint32_t frame_mask, uint32_t *p_frame_start)
3491 {
3492 uint32_t duration;
3493 uint32_t delta;
3494 uint8_t retval;
3495 uint8_t shift;
3496
3497 /* Compute time ahead of current schedule. */
3498 delta = (xfer->endpoint->isoc_next - frame_curr) & frame_mask;
3499
3500 /*
3501 * Check if it is the first transfer or if the future frame
3502 * delta is less than one millisecond or if the frame delta is
3503 * negative:
3504 */
3505 if (xfer->endpoint->is_synced == 0 ||
3506 delta < (frame_ms + frame_min) ||
3507 delta > (frame_mask / 2)) {
3508 /* Schedule transfer 2 milliseconds into the future. */
3509 xfer->endpoint->isoc_next = (frame_curr + 2 * frame_ms + frame_min) & frame_mask;
3510 xfer->endpoint->is_synced = 1;
3511
3512 retval = 1;
3513 } else {
3514 retval = 0;
3515 }
3516
3517 /* Store start time, if any. */
3518 if (p_frame_start != NULL)
3519 *p_frame_start = xfer->endpoint->isoc_next & frame_mask;
3520
3521 /* Get relative completion time, in milliseconds. */
3522 delta = xfer->endpoint->isoc_next - frame_curr + (frame_curr % frame_ms);
3523 delta &= frame_mask;
3524 delta /= frame_ms;
3525
3526 switch (usbd_get_speed(xfer->xroot->udev)) {
3527 case USB_SPEED_FULL:
3528 shift = 3;
3529 break;
3530 default:
3531 shift = usbd_xfer_get_fps_shift(xfer);
3532 break;
3533 }
3534
3535 /* Get duration in milliseconds, rounded up. */
3536 duration = ((xfer->nframes << shift) + 7) / 8;
3537
3538 /* Compute full 32-bit completion time, in milliseconds. */
3539 xfer->isoc_time_complete =
3540 usb_isoc_time_expand(xfer->xroot->bus, frame_curr / frame_ms) +
3541 delta + duration;
3542
3543 /* Compute next isochronous frame. */
3544 xfer->endpoint->isoc_next += duration * frame_ms;
3545 xfer->endpoint->isoc_next &= frame_mask;
3546
3547 return (retval);
3548 }
3549
3550 #undef USB_DEBUG_VAR
3551