1 #include <linux/kernel.h>
2 #include <linux/errno.h>
3 #include <linux/init.h>
4 #include <linux/slab.h>
5 #include <linux/mm.h>
6 #include <linux/module.h>
7 #include <linux/moduleparam.h>
8 #include <linux/scatterlist.h>
9 #include <linux/mutex.h>
10 #include <linux/timer.h>
11 #include <linux/usb.h>
12
13 #define SIMPLE_IO_TIMEOUT 10000 /* in milliseconds */
14
15 /*-------------------------------------------------------------------------*/
16
17 static int override_alt = -1;
18 module_param_named(alt, override_alt, int, 0644);
19 MODULE_PARM_DESC(alt, ">= 0 to override altsetting selection");
20
21 /*-------------------------------------------------------------------------*/
22
23 /* FIXME make these public somewhere; usbdevfs.h? */
24 struct usbtest_param {
25 /* inputs */
26 unsigned test_num; /* 0..(TEST_CASES-1) */
27 unsigned iterations;
28 unsigned length;
29 unsigned vary;
30 unsigned sglen;
31
32 /* outputs */
33 struct timeval duration;
34 };
35 #define USBTEST_REQUEST _IOWR('U', 100, struct usbtest_param)
36
37 /*-------------------------------------------------------------------------*/
38
39 #define GENERIC /* let probe() bind using module params */
40
41 /* Some devices that can be used for testing will have "real" drivers.
42 * Entries for those need to be enabled here by hand, after disabling
43 * that "real" driver.
44 */
45 //#define IBOT2 /* grab iBOT2 webcams */
46 //#define KEYSPAN_19Qi /* grab un-renumerated serial adapter */
47
48 /*-------------------------------------------------------------------------*/
49
50 struct usbtest_info {
51 const char *name;
52 u8 ep_in; /* bulk/intr source */
53 u8 ep_out; /* bulk/intr sink */
54 unsigned autoconf:1;
55 unsigned ctrl_out:1;
56 unsigned iso:1; /* try iso in/out */
57 unsigned intr:1; /* try interrupt in/out */
58 int alt;
59 };
60
61 /* this is accessed only through usbfs ioctl calls.
62 * one ioctl to issue a test ... one lock per device.
63 * tests create other threads if they need them.
64 * urbs and buffers are allocated dynamically,
65 * and data generated deterministically.
66 */
67 struct usbtest_dev {
68 struct usb_interface *intf;
69 struct usbtest_info *info;
70 int in_pipe;
71 int out_pipe;
72 int in_iso_pipe;
73 int out_iso_pipe;
74 int in_int_pipe;
75 int out_int_pipe;
76 struct usb_endpoint_descriptor *iso_in, *iso_out;
77 struct usb_endpoint_descriptor *int_in, *int_out;
78 struct mutex lock;
79
80 #define TBUF_SIZE 256
81 u8 *buf;
82 };
83
testdev_to_usbdev(struct usbtest_dev * test)84 static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
85 {
86 return interface_to_usbdev(test->intf);
87 }
88
89 /* set up all urbs so they can be used with either bulk or interrupt */
90 #define INTERRUPT_RATE 1 /* msec/transfer */
91
92 #define ERROR(tdev, fmt, args...) \
93 dev_err(&(tdev)->intf->dev , fmt , ## args)
94 #define WARNING(tdev, fmt, args...) \
95 dev_warn(&(tdev)->intf->dev , fmt , ## args)
96
97 #define GUARD_BYTE 0xA5
98
99 /*-------------------------------------------------------------------------*/
100
101 static int
get_endpoints(struct usbtest_dev * dev,struct usb_interface * intf)102 get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
103 {
104 int tmp;
105 struct usb_host_interface *alt;
106 struct usb_host_endpoint *in, *out;
107 struct usb_host_endpoint *iso_in, *iso_out;
108 struct usb_host_endpoint *int_in, *int_out;
109 struct usb_device *udev;
110
111 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
112 unsigned ep;
113
114 in = out = NULL;
115 iso_in = iso_out = NULL;
116 int_in = int_out = NULL;
117 alt = intf->altsetting + tmp;
118
119 if (override_alt >= 0 &&
120 override_alt != alt->desc.bAlternateSetting)
121 continue;
122
123 /* take the first altsetting with in-bulk + out-bulk;
124 * ignore other endpoints and altsettings.
125 */
126 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
127 struct usb_host_endpoint *e;
128
129 e = alt->endpoint + ep;
130 switch (usb_endpoint_type(&e->desc)) {
131 case USB_ENDPOINT_XFER_BULK:
132 break;
133 case USB_ENDPOINT_XFER_INT:
134 if (dev->info->intr)
135 goto try_intr;
136 continue;
137 case USB_ENDPOINT_XFER_ISOC:
138 if (dev->info->iso)
139 goto try_iso;
140 /* FALLTHROUGH */
141 default:
142 continue;
143 }
144 if (usb_endpoint_dir_in(&e->desc)) {
145 if (!in)
146 in = e;
147 } else {
148 if (!out)
149 out = e;
150 }
151 continue;
152 try_intr:
153 if (usb_endpoint_dir_in(&e->desc)) {
154 if (!int_in)
155 int_in = e;
156 } else {
157 if (!int_out)
158 int_out = e;
159 }
160 continue;
161 try_iso:
162 if (usb_endpoint_dir_in(&e->desc)) {
163 if (!iso_in)
164 iso_in = e;
165 } else {
166 if (!iso_out)
167 iso_out = e;
168 }
169 }
170 if ((in && out) || iso_in || iso_out || int_in || int_out)
171 goto found;
172 }
173 return -EINVAL;
174
175 found:
176 udev = testdev_to_usbdev(dev);
177 dev->info->alt = alt->desc.bAlternateSetting;
178 if (alt->desc.bAlternateSetting != 0) {
179 tmp = usb_set_interface(udev,
180 alt->desc.bInterfaceNumber,
181 alt->desc.bAlternateSetting);
182 if (tmp < 0)
183 return tmp;
184 }
185
186 if (in)
187 dev->in_pipe = usb_rcvbulkpipe(udev,
188 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
189 if (out)
190 dev->out_pipe = usb_sndbulkpipe(udev,
191 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
192
193 if (iso_in) {
194 dev->iso_in = &iso_in->desc;
195 dev->in_iso_pipe = usb_rcvisocpipe(udev,
196 iso_in->desc.bEndpointAddress
197 & USB_ENDPOINT_NUMBER_MASK);
198 }
199
200 if (iso_out) {
201 dev->iso_out = &iso_out->desc;
202 dev->out_iso_pipe = usb_sndisocpipe(udev,
203 iso_out->desc.bEndpointAddress
204 & USB_ENDPOINT_NUMBER_MASK);
205 }
206
207 if (int_in) {
208 dev->int_in = &int_in->desc;
209 dev->in_int_pipe = usb_rcvintpipe(udev,
210 int_in->desc.bEndpointAddress
211 & USB_ENDPOINT_NUMBER_MASK);
212 }
213
214 if (int_out) {
215 dev->int_out = &int_out->desc;
216 dev->out_int_pipe = usb_sndintpipe(udev,
217 int_out->desc.bEndpointAddress
218 & USB_ENDPOINT_NUMBER_MASK);
219 }
220 return 0;
221 }
222
223 /*-------------------------------------------------------------------------*/
224
225 /* Support for testing basic non-queued I/O streams.
226 *
227 * These just package urbs as requests that can be easily canceled.
228 * Each urb's data buffer is dynamically allocated; callers can fill
229 * them with non-zero test data (or test for it) when appropriate.
230 */
231
simple_callback(struct urb * urb)232 static void simple_callback(struct urb *urb)
233 {
234 complete(urb->context);
235 }
236
usbtest_alloc_urb(struct usb_device * udev,int pipe,unsigned long bytes,unsigned transfer_flags,unsigned offset,u8 bInterval)237 static struct urb *usbtest_alloc_urb(
238 struct usb_device *udev,
239 int pipe,
240 unsigned long bytes,
241 unsigned transfer_flags,
242 unsigned offset,
243 u8 bInterval)
244 {
245 struct urb *urb;
246
247 urb = usb_alloc_urb(0, GFP_KERNEL);
248 if (!urb)
249 return urb;
250
251 if (bInterval)
252 usb_fill_int_urb(urb, udev, pipe, NULL, bytes, simple_callback,
253 NULL, bInterval);
254 else
255 usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, simple_callback,
256 NULL);
257
258 urb->interval = (udev->speed == USB_SPEED_HIGH)
259 ? (INTERRUPT_RATE << 3)
260 : INTERRUPT_RATE;
261 urb->transfer_flags = transfer_flags;
262 if (usb_pipein(pipe))
263 urb->transfer_flags |= URB_SHORT_NOT_OK;
264
265 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
266 urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
267 GFP_KERNEL, &urb->transfer_dma);
268 else
269 urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL);
270
271 if (!urb->transfer_buffer) {
272 usb_free_urb(urb);
273 return NULL;
274 }
275
276 /* To test unaligned transfers add an offset and fill the
277 unused memory with a guard value */
278 if (offset) {
279 memset(urb->transfer_buffer, GUARD_BYTE, offset);
280 urb->transfer_buffer += offset;
281 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
282 urb->transfer_dma += offset;
283 }
284
285 /* For inbound transfers use guard byte so that test fails if
286 data not correctly copied */
287 memset(urb->transfer_buffer,
288 usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
289 bytes);
290 return urb;
291 }
292
simple_alloc_urb(struct usb_device * udev,int pipe,unsigned long bytes,u8 bInterval)293 static struct urb *simple_alloc_urb(
294 struct usb_device *udev,
295 int pipe,
296 unsigned long bytes,
297 u8 bInterval)
298 {
299 return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0,
300 bInterval);
301 }
302
303 static unsigned pattern;
304 static unsigned mod_pattern;
305 module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
306 MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
307
get_maxpacket(struct usb_device * udev,int pipe)308 static unsigned get_maxpacket(struct usb_device *udev, int pipe)
309 {
310 struct usb_host_endpoint *ep;
311
312 ep = usb_pipe_endpoint(udev, pipe);
313 return le16_to_cpup(&ep->desc.wMaxPacketSize);
314 }
315
simple_fill_buf(struct urb * urb)316 static void simple_fill_buf(struct urb *urb)
317 {
318 unsigned i;
319 u8 *buf = urb->transfer_buffer;
320 unsigned len = urb->transfer_buffer_length;
321 unsigned maxpacket;
322
323 switch (pattern) {
324 default:
325 /* FALLTHROUGH */
326 case 0:
327 memset(buf, 0, len);
328 break;
329 case 1: /* mod63 */
330 maxpacket = get_maxpacket(urb->dev, urb->pipe);
331 for (i = 0; i < len; i++)
332 *buf++ = (u8) ((i % maxpacket) % 63);
333 break;
334 }
335 }
336
buffer_offset(void * buf)337 static inline unsigned long buffer_offset(void *buf)
338 {
339 return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1);
340 }
341
check_guard_bytes(struct usbtest_dev * tdev,struct urb * urb)342 static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb)
343 {
344 u8 *buf = urb->transfer_buffer;
345 u8 *guard = buf - buffer_offset(buf);
346 unsigned i;
347
348 for (i = 0; guard < buf; i++, guard++) {
349 if (*guard != GUARD_BYTE) {
350 ERROR(tdev, "guard byte[%d] %d (not %d)\n",
351 i, *guard, GUARD_BYTE);
352 return -EINVAL;
353 }
354 }
355 return 0;
356 }
357
simple_check_buf(struct usbtest_dev * tdev,struct urb * urb)358 static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
359 {
360 unsigned i;
361 u8 expected;
362 u8 *buf = urb->transfer_buffer;
363 unsigned len = urb->actual_length;
364 unsigned maxpacket = get_maxpacket(urb->dev, urb->pipe);
365
366 int ret = check_guard_bytes(tdev, urb);
367 if (ret)
368 return ret;
369
370 for (i = 0; i < len; i++, buf++) {
371 switch (pattern) {
372 /* all-zeroes has no synchronization issues */
373 case 0:
374 expected = 0;
375 break;
376 /* mod63 stays in sync with short-terminated transfers,
377 * or otherwise when host and gadget agree on how large
378 * each usb transfer request should be. resync is done
379 * with set_interface or set_config.
380 */
381 case 1: /* mod63 */
382 expected = (i % maxpacket) % 63;
383 break;
384 /* always fail unsupported patterns */
385 default:
386 expected = !*buf;
387 break;
388 }
389 if (*buf == expected)
390 continue;
391 ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
392 return -EINVAL;
393 }
394 return 0;
395 }
396
simple_free_urb(struct urb * urb)397 static void simple_free_urb(struct urb *urb)
398 {
399 unsigned long offset = buffer_offset(urb->transfer_buffer);
400
401 if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
402 usb_free_coherent(
403 urb->dev,
404 urb->transfer_buffer_length + offset,
405 urb->transfer_buffer - offset,
406 urb->transfer_dma - offset);
407 else
408 kfree(urb->transfer_buffer - offset);
409 usb_free_urb(urb);
410 }
411
simple_io(struct usbtest_dev * tdev,struct urb * urb,int iterations,int vary,int expected,const char * label)412 static int simple_io(
413 struct usbtest_dev *tdev,
414 struct urb *urb,
415 int iterations,
416 int vary,
417 int expected,
418 const char *label
419 )
420 {
421 struct usb_device *udev = urb->dev;
422 int max = urb->transfer_buffer_length;
423 struct completion completion;
424 int retval = 0;
425 unsigned long expire;
426
427 urb->context = &completion;
428 while (retval == 0 && iterations-- > 0) {
429 init_completion(&completion);
430 if (usb_pipeout(urb->pipe)) {
431 simple_fill_buf(urb);
432 urb->transfer_flags |= URB_ZERO_PACKET;
433 }
434 retval = usb_submit_urb(urb, GFP_KERNEL);
435 if (retval != 0)
436 break;
437
438 expire = msecs_to_jiffies(SIMPLE_IO_TIMEOUT);
439 if (!wait_for_completion_timeout(&completion, expire)) {
440 usb_kill_urb(urb);
441 retval = (urb->status == -ENOENT ?
442 -ETIMEDOUT : urb->status);
443 } else {
444 retval = urb->status;
445 }
446
447 urb->dev = udev;
448 if (retval == 0 && usb_pipein(urb->pipe))
449 retval = simple_check_buf(tdev, urb);
450
451 if (vary) {
452 int len = urb->transfer_buffer_length;
453
454 len += vary;
455 len %= max;
456 if (len == 0)
457 len = (vary < max) ? vary : max;
458 urb->transfer_buffer_length = len;
459 }
460
461 /* FIXME if endpoint halted, clear halt (and log) */
462 }
463 urb->transfer_buffer_length = max;
464
465 if (expected != retval)
466 dev_err(&udev->dev,
467 "%s failed, iterations left %d, status %d (not %d)\n",
468 label, iterations, retval, expected);
469 return retval;
470 }
471
472
473 /*-------------------------------------------------------------------------*/
474
475 /* We use scatterlist primitives to test queued I/O.
476 * Yes, this also tests the scatterlist primitives.
477 */
478
free_sglist(struct scatterlist * sg,int nents)479 static void free_sglist(struct scatterlist *sg, int nents)
480 {
481 unsigned i;
482
483 if (!sg)
484 return;
485 for (i = 0; i < nents; i++) {
486 if (!sg_page(&sg[i]))
487 continue;
488 kfree(sg_virt(&sg[i]));
489 }
490 kfree(sg);
491 }
492
493 static struct scatterlist *
alloc_sglist(int nents,int max,int vary,struct usbtest_dev * dev,int pipe)494 alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe)
495 {
496 struct scatterlist *sg;
497 unsigned int n_size = 0;
498 unsigned i;
499 unsigned size = max;
500 unsigned maxpacket =
501 get_maxpacket(interface_to_usbdev(dev->intf), pipe);
502
503 if (max == 0)
504 return NULL;
505
506 sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL);
507 if (!sg)
508 return NULL;
509 sg_init_table(sg, nents);
510
511 for (i = 0; i < nents; i++) {
512 char *buf;
513 unsigned j;
514
515 buf = kzalloc(size, GFP_KERNEL);
516 if (!buf) {
517 free_sglist(sg, i);
518 return NULL;
519 }
520
521 /* kmalloc pages are always physically contiguous! */
522 sg_set_buf(&sg[i], buf, size);
523
524 switch (pattern) {
525 case 0:
526 /* already zeroed */
527 break;
528 case 1:
529 for (j = 0; j < size; j++)
530 *buf++ = (u8) (((j + n_size) % maxpacket) % 63);
531 n_size += size;
532 break;
533 }
534
535 if (vary) {
536 size += vary;
537 size %= max;
538 if (size == 0)
539 size = (vary < max) ? vary : max;
540 }
541 }
542
543 return sg;
544 }
545
sg_timeout(unsigned long _req)546 static void sg_timeout(unsigned long _req)
547 {
548 struct usb_sg_request *req = (struct usb_sg_request *) _req;
549
550 usb_sg_cancel(req);
551 }
552
perform_sglist(struct usbtest_dev * tdev,unsigned iterations,int pipe,struct usb_sg_request * req,struct scatterlist * sg,int nents)553 static int perform_sglist(
554 struct usbtest_dev *tdev,
555 unsigned iterations,
556 int pipe,
557 struct usb_sg_request *req,
558 struct scatterlist *sg,
559 int nents
560 )
561 {
562 struct usb_device *udev = testdev_to_usbdev(tdev);
563 int retval = 0;
564 struct timer_list sg_timer;
565
566 setup_timer_on_stack(&sg_timer, sg_timeout, (unsigned long) req);
567
568 while (retval == 0 && iterations-- > 0) {
569 retval = usb_sg_init(req, udev, pipe,
570 (udev->speed == USB_SPEED_HIGH)
571 ? (INTERRUPT_RATE << 3)
572 : INTERRUPT_RATE,
573 sg, nents, 0, GFP_KERNEL);
574
575 if (retval)
576 break;
577 mod_timer(&sg_timer, jiffies +
578 msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
579 usb_sg_wait(req);
580 if (!del_timer_sync(&sg_timer))
581 retval = -ETIMEDOUT;
582 else
583 retval = req->status;
584
585 /* FIXME check resulting data pattern */
586
587 /* FIXME if endpoint halted, clear halt (and log) */
588 }
589
590 /* FIXME for unlink or fault handling tests, don't report
591 * failure if retval is as we expected ...
592 */
593 if (retval)
594 ERROR(tdev, "perform_sglist failed, "
595 "iterations left %d, status %d\n",
596 iterations, retval);
597 return retval;
598 }
599
600
601 /*-------------------------------------------------------------------------*/
602
603 /* unqueued control message testing
604 *
605 * there's a nice set of device functional requirements in chapter 9 of the
606 * usb 2.0 spec, which we can apply to ANY device, even ones that don't use
607 * special test firmware.
608 *
609 * we know the device is configured (or suspended) by the time it's visible
610 * through usbfs. we can't change that, so we won't test enumeration (which
611 * worked 'well enough' to get here, this time), power management (ditto),
612 * or remote wakeup (which needs human interaction).
613 */
614
615 static unsigned realworld = 1;
616 module_param(realworld, uint, 0);
617 MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance");
618
get_altsetting(struct usbtest_dev * dev)619 static int get_altsetting(struct usbtest_dev *dev)
620 {
621 struct usb_interface *iface = dev->intf;
622 struct usb_device *udev = interface_to_usbdev(iface);
623 int retval;
624
625 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
626 USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE,
627 0, iface->altsetting[0].desc.bInterfaceNumber,
628 dev->buf, 1, USB_CTRL_GET_TIMEOUT);
629 switch (retval) {
630 case 1:
631 return dev->buf[0];
632 case 0:
633 retval = -ERANGE;
634 /* FALLTHROUGH */
635 default:
636 return retval;
637 }
638 }
639
set_altsetting(struct usbtest_dev * dev,int alternate)640 static int set_altsetting(struct usbtest_dev *dev, int alternate)
641 {
642 struct usb_interface *iface = dev->intf;
643 struct usb_device *udev;
644
645 if (alternate < 0 || alternate >= 256)
646 return -EINVAL;
647
648 udev = interface_to_usbdev(iface);
649 return usb_set_interface(udev,
650 iface->altsetting[0].desc.bInterfaceNumber,
651 alternate);
652 }
653
is_good_config(struct usbtest_dev * tdev,int len)654 static int is_good_config(struct usbtest_dev *tdev, int len)
655 {
656 struct usb_config_descriptor *config;
657
658 if (len < sizeof(*config))
659 return 0;
660 config = (struct usb_config_descriptor *) tdev->buf;
661
662 switch (config->bDescriptorType) {
663 case USB_DT_CONFIG:
664 case USB_DT_OTHER_SPEED_CONFIG:
665 if (config->bLength != 9) {
666 ERROR(tdev, "bogus config descriptor length\n");
667 return 0;
668 }
669 /* this bit 'must be 1' but often isn't */
670 if (!realworld && !(config->bmAttributes & 0x80)) {
671 ERROR(tdev, "high bit of config attributes not set\n");
672 return 0;
673 }
674 if (config->bmAttributes & 0x1f) { /* reserved == 0 */
675 ERROR(tdev, "reserved config bits set\n");
676 return 0;
677 }
678 break;
679 default:
680 return 0;
681 }
682
683 if (le16_to_cpu(config->wTotalLength) == len) /* read it all */
684 return 1;
685 if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE) /* max partial read */
686 return 1;
687 ERROR(tdev, "bogus config descriptor read size\n");
688 return 0;
689 }
690
is_good_ext(struct usbtest_dev * tdev,u8 * buf)691 static int is_good_ext(struct usbtest_dev *tdev, u8 *buf)
692 {
693 struct usb_ext_cap_descriptor *ext;
694 u32 attr;
695
696 ext = (struct usb_ext_cap_descriptor *) buf;
697
698 if (ext->bLength != USB_DT_USB_EXT_CAP_SIZE) {
699 ERROR(tdev, "bogus usb 2.0 extension descriptor length\n");
700 return 0;
701 }
702
703 attr = le32_to_cpu(ext->bmAttributes);
704 /* bits[1:15] is used and others are reserved */
705 if (attr & ~0xfffe) { /* reserved == 0 */
706 ERROR(tdev, "reserved bits set\n");
707 return 0;
708 }
709
710 return 1;
711 }
712
is_good_ss_cap(struct usbtest_dev * tdev,u8 * buf)713 static int is_good_ss_cap(struct usbtest_dev *tdev, u8 *buf)
714 {
715 struct usb_ss_cap_descriptor *ss;
716
717 ss = (struct usb_ss_cap_descriptor *) buf;
718
719 if (ss->bLength != USB_DT_USB_SS_CAP_SIZE) {
720 ERROR(tdev, "bogus superspeed device capability descriptor length\n");
721 return 0;
722 }
723
724 /*
725 * only bit[1] of bmAttributes is used for LTM and others are
726 * reserved
727 */
728 if (ss->bmAttributes & ~0x02) { /* reserved == 0 */
729 ERROR(tdev, "reserved bits set in bmAttributes\n");
730 return 0;
731 }
732
733 /* bits[0:3] of wSpeedSupported is used and others are reserved */
734 if (le16_to_cpu(ss->wSpeedSupported) & ~0x0f) { /* reserved == 0 */
735 ERROR(tdev, "reserved bits set in wSpeedSupported\n");
736 return 0;
737 }
738
739 return 1;
740 }
741
is_good_con_id(struct usbtest_dev * tdev,u8 * buf)742 static int is_good_con_id(struct usbtest_dev *tdev, u8 *buf)
743 {
744 struct usb_ss_container_id_descriptor *con_id;
745
746 con_id = (struct usb_ss_container_id_descriptor *) buf;
747
748 if (con_id->bLength != USB_DT_USB_SS_CONTN_ID_SIZE) {
749 ERROR(tdev, "bogus container id descriptor length\n");
750 return 0;
751 }
752
753 if (con_id->bReserved) { /* reserved == 0 */
754 ERROR(tdev, "reserved bits set\n");
755 return 0;
756 }
757
758 return 1;
759 }
760
761 /* sanity test for standard requests working with usb_control_mesg() and some
762 * of the utility functions which use it.
763 *
764 * this doesn't test how endpoint halts behave or data toggles get set, since
765 * we won't do I/O to bulk/interrupt endpoints here (which is how to change
766 * halt or toggle). toggle testing is impractical without support from hcds.
767 *
768 * this avoids failing devices linux would normally work with, by not testing
769 * config/altsetting operations for devices that only support their defaults.
770 * such devices rarely support those needless operations.
771 *
772 * NOTE that since this is a sanity test, it's not examining boundary cases
773 * to see if usbcore, hcd, and device all behave right. such testing would
774 * involve varied read sizes and other operation sequences.
775 */
ch9_postconfig(struct usbtest_dev * dev)776 static int ch9_postconfig(struct usbtest_dev *dev)
777 {
778 struct usb_interface *iface = dev->intf;
779 struct usb_device *udev = interface_to_usbdev(iface);
780 int i, alt, retval;
781
782 /* [9.2.3] if there's more than one altsetting, we need to be able to
783 * set and get each one. mostly trusts the descriptors from usbcore.
784 */
785 for (i = 0; i < iface->num_altsetting; i++) {
786
787 /* 9.2.3 constrains the range here */
788 alt = iface->altsetting[i].desc.bAlternateSetting;
789 if (alt < 0 || alt >= iface->num_altsetting) {
790 dev_err(&iface->dev,
791 "invalid alt [%d].bAltSetting = %d\n",
792 i, alt);
793 }
794
795 /* [real world] get/set unimplemented if there's only one */
796 if (realworld && iface->num_altsetting == 1)
797 continue;
798
799 /* [9.4.10] set_interface */
800 retval = set_altsetting(dev, alt);
801 if (retval) {
802 dev_err(&iface->dev, "can't set_interface = %d, %d\n",
803 alt, retval);
804 return retval;
805 }
806
807 /* [9.4.4] get_interface always works */
808 retval = get_altsetting(dev);
809 if (retval != alt) {
810 dev_err(&iface->dev, "get alt should be %d, was %d\n",
811 alt, retval);
812 return (retval < 0) ? retval : -EDOM;
813 }
814
815 }
816
817 /* [real world] get_config unimplemented if there's only one */
818 if (!realworld || udev->descriptor.bNumConfigurations != 1) {
819 int expected = udev->actconfig->desc.bConfigurationValue;
820
821 /* [9.4.2] get_configuration always works
822 * ... although some cheap devices (like one TI Hub I've got)
823 * won't return config descriptors except before set_config.
824 */
825 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
826 USB_REQ_GET_CONFIGURATION,
827 USB_DIR_IN | USB_RECIP_DEVICE,
828 0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
829 if (retval != 1 || dev->buf[0] != expected) {
830 dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
831 retval, dev->buf[0], expected);
832 return (retval < 0) ? retval : -EDOM;
833 }
834 }
835
836 /* there's always [9.4.3] a device descriptor [9.6.1] */
837 retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
838 dev->buf, sizeof(udev->descriptor));
839 if (retval != sizeof(udev->descriptor)) {
840 dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
841 return (retval < 0) ? retval : -EDOM;
842 }
843
844 /*
845 * there's always [9.4.3] a bos device descriptor [9.6.2] in USB
846 * 3.0 spec
847 */
848 if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0210) {
849 struct usb_bos_descriptor *bos = NULL;
850 struct usb_dev_cap_header *header = NULL;
851 unsigned total, num, length;
852 u8 *buf;
853
854 retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
855 sizeof(*udev->bos->desc));
856 if (retval != sizeof(*udev->bos->desc)) {
857 dev_err(&iface->dev, "bos descriptor --> %d\n", retval);
858 return (retval < 0) ? retval : -EDOM;
859 }
860
861 bos = (struct usb_bos_descriptor *)dev->buf;
862 total = le16_to_cpu(bos->wTotalLength);
863 num = bos->bNumDeviceCaps;
864
865 if (total > TBUF_SIZE)
866 total = TBUF_SIZE;
867
868 /*
869 * get generic device-level capability descriptors [9.6.2]
870 * in USB 3.0 spec
871 */
872 retval = usb_get_descriptor(udev, USB_DT_BOS, 0, dev->buf,
873 total);
874 if (retval != total) {
875 dev_err(&iface->dev, "bos descriptor set --> %d\n",
876 retval);
877 return (retval < 0) ? retval : -EDOM;
878 }
879
880 length = sizeof(*udev->bos->desc);
881 buf = dev->buf;
882 for (i = 0; i < num; i++) {
883 buf += length;
884 if (buf + sizeof(struct usb_dev_cap_header) >
885 dev->buf + total)
886 break;
887
888 header = (struct usb_dev_cap_header *)buf;
889 length = header->bLength;
890
891 if (header->bDescriptorType !=
892 USB_DT_DEVICE_CAPABILITY) {
893 dev_warn(&udev->dev, "not device capability descriptor, skip\n");
894 continue;
895 }
896
897 switch (header->bDevCapabilityType) {
898 case USB_CAP_TYPE_EXT:
899 if (buf + USB_DT_USB_EXT_CAP_SIZE >
900 dev->buf + total ||
901 !is_good_ext(dev, buf)) {
902 dev_err(&iface->dev, "bogus usb 2.0 extension descriptor\n");
903 return -EDOM;
904 }
905 break;
906 case USB_SS_CAP_TYPE:
907 if (buf + USB_DT_USB_SS_CAP_SIZE >
908 dev->buf + total ||
909 !is_good_ss_cap(dev, buf)) {
910 dev_err(&iface->dev, "bogus superspeed device capability descriptor\n");
911 return -EDOM;
912 }
913 break;
914 case CONTAINER_ID_TYPE:
915 if (buf + USB_DT_USB_SS_CONTN_ID_SIZE >
916 dev->buf + total ||
917 !is_good_con_id(dev, buf)) {
918 dev_err(&iface->dev, "bogus container id descriptor\n");
919 return -EDOM;
920 }
921 break;
922 default:
923 break;
924 }
925 }
926 }
927
928 /* there's always [9.4.3] at least one config descriptor [9.6.3] */
929 for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
930 retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
931 dev->buf, TBUF_SIZE);
932 if (!is_good_config(dev, retval)) {
933 dev_err(&iface->dev,
934 "config [%d] descriptor --> %d\n",
935 i, retval);
936 return (retval < 0) ? retval : -EDOM;
937 }
938
939 /* FIXME cross-checking udev->config[i] to make sure usbcore
940 * parsed it right (etc) would be good testing paranoia
941 */
942 }
943
944 /* and sometimes [9.2.6.6] speed dependent descriptors */
945 if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) {
946 struct usb_qualifier_descriptor *d = NULL;
947
948 /* device qualifier [9.6.2] */
949 retval = usb_get_descriptor(udev,
950 USB_DT_DEVICE_QUALIFIER, 0, dev->buf,
951 sizeof(struct usb_qualifier_descriptor));
952 if (retval == -EPIPE) {
953 if (udev->speed == USB_SPEED_HIGH) {
954 dev_err(&iface->dev,
955 "hs dev qualifier --> %d\n",
956 retval);
957 return (retval < 0) ? retval : -EDOM;
958 }
959 /* usb2.0 but not high-speed capable; fine */
960 } else if (retval != sizeof(struct usb_qualifier_descriptor)) {
961 dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
962 return (retval < 0) ? retval : -EDOM;
963 } else
964 d = (struct usb_qualifier_descriptor *) dev->buf;
965
966 /* might not have [9.6.2] any other-speed configs [9.6.4] */
967 if (d) {
968 unsigned max = d->bNumConfigurations;
969 for (i = 0; i < max; i++) {
970 retval = usb_get_descriptor(udev,
971 USB_DT_OTHER_SPEED_CONFIG, i,
972 dev->buf, TBUF_SIZE);
973 if (!is_good_config(dev, retval)) {
974 dev_err(&iface->dev,
975 "other speed config --> %d\n",
976 retval);
977 return (retval < 0) ? retval : -EDOM;
978 }
979 }
980 }
981 }
982 /* FIXME fetch strings from at least the device descriptor */
983
984 /* [9.4.5] get_status always works */
985 retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
986 if (retval) {
987 dev_err(&iface->dev, "get dev status --> %d\n", retval);
988 return retval;
989 }
990
991 /* FIXME configuration.bmAttributes says if we could try to set/clear
992 * the device's remote wakeup feature ... if we can, test that here
993 */
994
995 retval = usb_get_status(udev, USB_RECIP_INTERFACE,
996 iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
997 if (retval) {
998 dev_err(&iface->dev, "get interface status --> %d\n", retval);
999 return retval;
1000 }
1001 /* FIXME get status for each endpoint in the interface */
1002
1003 return 0;
1004 }
1005
1006 /*-------------------------------------------------------------------------*/
1007
1008 /* use ch9 requests to test whether:
1009 * (a) queues work for control, keeping N subtests queued and
1010 * active (auto-resubmit) for M loops through the queue.
1011 * (b) protocol stalls (control-only) will autorecover.
1012 * it's not like bulk/intr; no halt clearing.
1013 * (c) short control reads are reported and handled.
1014 * (d) queues are always processed in-order
1015 */
1016
1017 struct ctrl_ctx {
1018 spinlock_t lock;
1019 struct usbtest_dev *dev;
1020 struct completion complete;
1021 unsigned count;
1022 unsigned pending;
1023 int status;
1024 struct urb **urb;
1025 struct usbtest_param *param;
1026 int last;
1027 };
1028
1029 #define NUM_SUBCASES 16 /* how many test subcases here? */
1030
1031 struct subcase {
1032 struct usb_ctrlrequest setup;
1033 int number;
1034 int expected;
1035 };
1036
ctrl_complete(struct urb * urb)1037 static void ctrl_complete(struct urb *urb)
1038 {
1039 struct ctrl_ctx *ctx = urb->context;
1040 struct usb_ctrlrequest *reqp;
1041 struct subcase *subcase;
1042 int status = urb->status;
1043
1044 reqp = (struct usb_ctrlrequest *)urb->setup_packet;
1045 subcase = container_of(reqp, struct subcase, setup);
1046
1047 spin_lock(&ctx->lock);
1048 ctx->count--;
1049 ctx->pending--;
1050
1051 /* queue must transfer and complete in fifo order, unless
1052 * usb_unlink_urb() is used to unlink something not at the
1053 * physical queue head (not tested).
1054 */
1055 if (subcase->number > 0) {
1056 if ((subcase->number - ctx->last) != 1) {
1057 ERROR(ctx->dev,
1058 "subcase %d completed out of order, last %d\n",
1059 subcase->number, ctx->last);
1060 status = -EDOM;
1061 ctx->last = subcase->number;
1062 goto error;
1063 }
1064 }
1065 ctx->last = subcase->number;
1066
1067 /* succeed or fault in only one way? */
1068 if (status == subcase->expected)
1069 status = 0;
1070
1071 /* async unlink for cleanup? */
1072 else if (status != -ECONNRESET) {
1073
1074 /* some faults are allowed, not required */
1075 if (subcase->expected > 0 && (
1076 ((status == -subcase->expected /* happened */
1077 || status == 0)))) /* didn't */
1078 status = 0;
1079 /* sometimes more than one fault is allowed */
1080 else if (subcase->number == 12 && status == -EPIPE)
1081 status = 0;
1082 else
1083 ERROR(ctx->dev, "subtest %d error, status %d\n",
1084 subcase->number, status);
1085 }
1086
1087 /* unexpected status codes mean errors; ideally, in hardware */
1088 if (status) {
1089 error:
1090 if (ctx->status == 0) {
1091 int i;
1092
1093 ctx->status = status;
1094 ERROR(ctx->dev, "control queue %02x.%02x, err %d, "
1095 "%d left, subcase %d, len %d/%d\n",
1096 reqp->bRequestType, reqp->bRequest,
1097 status, ctx->count, subcase->number,
1098 urb->actual_length,
1099 urb->transfer_buffer_length);
1100
1101 /* FIXME this "unlink everything" exit route should
1102 * be a separate test case.
1103 */
1104
1105 /* unlink whatever's still pending */
1106 for (i = 1; i < ctx->param->sglen; i++) {
1107 struct urb *u = ctx->urb[
1108 (i + subcase->number)
1109 % ctx->param->sglen];
1110
1111 if (u == urb || !u->dev)
1112 continue;
1113 spin_unlock(&ctx->lock);
1114 status = usb_unlink_urb(u);
1115 spin_lock(&ctx->lock);
1116 switch (status) {
1117 case -EINPROGRESS:
1118 case -EBUSY:
1119 case -EIDRM:
1120 continue;
1121 default:
1122 ERROR(ctx->dev, "urb unlink --> %d\n",
1123 status);
1124 }
1125 }
1126 status = ctx->status;
1127 }
1128 }
1129
1130 /* resubmit if we need to, else mark this as done */
1131 if ((status == 0) && (ctx->pending < ctx->count)) {
1132 status = usb_submit_urb(urb, GFP_ATOMIC);
1133 if (status != 0) {
1134 ERROR(ctx->dev,
1135 "can't resubmit ctrl %02x.%02x, err %d\n",
1136 reqp->bRequestType, reqp->bRequest, status);
1137 urb->dev = NULL;
1138 } else
1139 ctx->pending++;
1140 } else
1141 urb->dev = NULL;
1142
1143 /* signal completion when nothing's queued */
1144 if (ctx->pending == 0)
1145 complete(&ctx->complete);
1146 spin_unlock(&ctx->lock);
1147 }
1148
1149 static int
test_ctrl_queue(struct usbtest_dev * dev,struct usbtest_param * param)1150 test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
1151 {
1152 struct usb_device *udev = testdev_to_usbdev(dev);
1153 struct urb **urb;
1154 struct ctrl_ctx context;
1155 int i;
1156
1157 if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen)
1158 return -EOPNOTSUPP;
1159
1160 spin_lock_init(&context.lock);
1161 context.dev = dev;
1162 init_completion(&context.complete);
1163 context.count = param->sglen * param->iterations;
1164 context.pending = 0;
1165 context.status = -ENOMEM;
1166 context.param = param;
1167 context.last = -1;
1168
1169 /* allocate and init the urbs we'll queue.
1170 * as with bulk/intr sglists, sglen is the queue depth; it also
1171 * controls which subtests run (more tests than sglen) or rerun.
1172 */
1173 urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
1174 if (!urb)
1175 return -ENOMEM;
1176 for (i = 0; i < param->sglen; i++) {
1177 int pipe = usb_rcvctrlpipe(udev, 0);
1178 unsigned len;
1179 struct urb *u;
1180 struct usb_ctrlrequest req;
1181 struct subcase *reqp;
1182
1183 /* sign of this variable means:
1184 * -: tested code must return this (negative) error code
1185 * +: tested code may return this (negative too) error code
1186 */
1187 int expected = 0;
1188
1189 /* requests here are mostly expected to succeed on any
1190 * device, but some are chosen to trigger protocol stalls
1191 * or short reads.
1192 */
1193 memset(&req, 0, sizeof(req));
1194 req.bRequest = USB_REQ_GET_DESCRIPTOR;
1195 req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
1196
1197 switch (i % NUM_SUBCASES) {
1198 case 0: /* get device descriptor */
1199 req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
1200 len = sizeof(struct usb_device_descriptor);
1201 break;
1202 case 1: /* get first config descriptor (only) */
1203 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1204 len = sizeof(struct usb_config_descriptor);
1205 break;
1206 case 2: /* get altsetting (OFTEN STALLS) */
1207 req.bRequest = USB_REQ_GET_INTERFACE;
1208 req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
1209 /* index = 0 means first interface */
1210 len = 1;
1211 expected = EPIPE;
1212 break;
1213 case 3: /* get interface status */
1214 req.bRequest = USB_REQ_GET_STATUS;
1215 req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
1216 /* interface 0 */
1217 len = 2;
1218 break;
1219 case 4: /* get device status */
1220 req.bRequest = USB_REQ_GET_STATUS;
1221 req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
1222 len = 2;
1223 break;
1224 case 5: /* get device qualifier (MAY STALL) */
1225 req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
1226 len = sizeof(struct usb_qualifier_descriptor);
1227 if (udev->speed != USB_SPEED_HIGH)
1228 expected = EPIPE;
1229 break;
1230 case 6: /* get first config descriptor, plus interface */
1231 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1232 len = sizeof(struct usb_config_descriptor);
1233 len += sizeof(struct usb_interface_descriptor);
1234 break;
1235 case 7: /* get interface descriptor (ALWAYS STALLS) */
1236 req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
1237 /* interface == 0 */
1238 len = sizeof(struct usb_interface_descriptor);
1239 expected = -EPIPE;
1240 break;
1241 /* NOTE: two consecutive stalls in the queue here.
1242 * that tests fault recovery a bit more aggressively. */
1243 case 8: /* clear endpoint halt (MAY STALL) */
1244 req.bRequest = USB_REQ_CLEAR_FEATURE;
1245 req.bRequestType = USB_RECIP_ENDPOINT;
1246 /* wValue 0 == ep halt */
1247 /* wIndex 0 == ep0 (shouldn't halt!) */
1248 len = 0;
1249 pipe = usb_sndctrlpipe(udev, 0);
1250 expected = EPIPE;
1251 break;
1252 case 9: /* get endpoint status */
1253 req.bRequest = USB_REQ_GET_STATUS;
1254 req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
1255 /* endpoint 0 */
1256 len = 2;
1257 break;
1258 case 10: /* trigger short read (EREMOTEIO) */
1259 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1260 len = 1024;
1261 expected = -EREMOTEIO;
1262 break;
1263 /* NOTE: two consecutive _different_ faults in the queue. */
1264 case 11: /* get endpoint descriptor (ALWAYS STALLS) */
1265 req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
1266 /* endpoint == 0 */
1267 len = sizeof(struct usb_interface_descriptor);
1268 expected = EPIPE;
1269 break;
1270 /* NOTE: sometimes even a third fault in the queue! */
1271 case 12: /* get string 0 descriptor (MAY STALL) */
1272 req.wValue = cpu_to_le16(USB_DT_STRING << 8);
1273 /* string == 0, for language IDs */
1274 len = sizeof(struct usb_interface_descriptor);
1275 /* may succeed when > 4 languages */
1276 expected = EREMOTEIO; /* or EPIPE, if no strings */
1277 break;
1278 case 13: /* short read, resembling case 10 */
1279 req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
1280 /* last data packet "should" be DATA1, not DATA0 */
1281 if (udev->speed == USB_SPEED_SUPER)
1282 len = 1024 - 512;
1283 else
1284 len = 1024 - udev->descriptor.bMaxPacketSize0;
1285 expected = -EREMOTEIO;
1286 break;
1287 case 14: /* short read; try to fill the last packet */
1288 req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
1289 /* device descriptor size == 18 bytes */
1290 len = udev->descriptor.bMaxPacketSize0;
1291 if (udev->speed == USB_SPEED_SUPER)
1292 len = 512;
1293 switch (len) {
1294 case 8:
1295 len = 24;
1296 break;
1297 case 16:
1298 len = 32;
1299 break;
1300 }
1301 expected = -EREMOTEIO;
1302 break;
1303 case 15:
1304 req.wValue = cpu_to_le16(USB_DT_BOS << 8);
1305 if (udev->bos)
1306 len = le16_to_cpu(udev->bos->desc->wTotalLength);
1307 else
1308 len = sizeof(struct usb_bos_descriptor);
1309 if (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0201)
1310 expected = -EPIPE;
1311 break;
1312 default:
1313 ERROR(dev, "bogus number of ctrl queue testcases!\n");
1314 context.status = -EINVAL;
1315 goto cleanup;
1316 }
1317 req.wLength = cpu_to_le16(len);
1318 urb[i] = u = simple_alloc_urb(udev, pipe, len, 0);
1319 if (!u)
1320 goto cleanup;
1321
1322 reqp = kmalloc(sizeof(*reqp), GFP_KERNEL);
1323 if (!reqp)
1324 goto cleanup;
1325 reqp->setup = req;
1326 reqp->number = i % NUM_SUBCASES;
1327 reqp->expected = expected;
1328 u->setup_packet = (char *) &reqp->setup;
1329
1330 u->context = &context;
1331 u->complete = ctrl_complete;
1332 }
1333
1334 /* queue the urbs */
1335 context.urb = urb;
1336 spin_lock_irq(&context.lock);
1337 for (i = 0; i < param->sglen; i++) {
1338 context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
1339 if (context.status != 0) {
1340 ERROR(dev, "can't submit urb[%d], status %d\n",
1341 i, context.status);
1342 context.count = context.pending;
1343 break;
1344 }
1345 context.pending++;
1346 }
1347 spin_unlock_irq(&context.lock);
1348
1349 /* FIXME set timer and time out; provide a disconnect hook */
1350
1351 /* wait for the last one to complete */
1352 if (context.pending > 0)
1353 wait_for_completion(&context.complete);
1354
1355 cleanup:
1356 for (i = 0; i < param->sglen; i++) {
1357 if (!urb[i])
1358 continue;
1359 urb[i]->dev = udev;
1360 kfree(urb[i]->setup_packet);
1361 simple_free_urb(urb[i]);
1362 }
1363 kfree(urb);
1364 return context.status;
1365 }
1366 #undef NUM_SUBCASES
1367
1368
1369 /*-------------------------------------------------------------------------*/
1370
unlink1_callback(struct urb * urb)1371 static void unlink1_callback(struct urb *urb)
1372 {
1373 int status = urb->status;
1374
1375 /* we "know" -EPIPE (stall) never happens */
1376 if (!status)
1377 status = usb_submit_urb(urb, GFP_ATOMIC);
1378 if (status) {
1379 urb->status = status;
1380 complete(urb->context);
1381 }
1382 }
1383
unlink1(struct usbtest_dev * dev,int pipe,int size,int async)1384 static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
1385 {
1386 struct urb *urb;
1387 struct completion completion;
1388 int retval = 0;
1389
1390 init_completion(&completion);
1391 urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size, 0);
1392 if (!urb)
1393 return -ENOMEM;
1394 urb->context = &completion;
1395 urb->complete = unlink1_callback;
1396
1397 if (usb_pipeout(urb->pipe)) {
1398 simple_fill_buf(urb);
1399 urb->transfer_flags |= URB_ZERO_PACKET;
1400 }
1401
1402 /* keep the endpoint busy. there are lots of hc/hcd-internal
1403 * states, and testing should get to all of them over time.
1404 *
1405 * FIXME want additional tests for when endpoint is STALLing
1406 * due to errors, or is just NAKing requests.
1407 */
1408 retval = usb_submit_urb(urb, GFP_KERNEL);
1409 if (retval != 0) {
1410 dev_err(&dev->intf->dev, "submit fail %d\n", retval);
1411 return retval;
1412 }
1413
1414 /* unlinking that should always work. variable delay tests more
1415 * hcd states and code paths, even with little other system load.
1416 */
1417 msleep(jiffies % (2 * INTERRUPT_RATE));
1418 if (async) {
1419 while (!completion_done(&completion)) {
1420 retval = usb_unlink_urb(urb);
1421
1422 if (retval == 0 && usb_pipein(urb->pipe))
1423 retval = simple_check_buf(dev, urb);
1424
1425 switch (retval) {
1426 case -EBUSY:
1427 case -EIDRM:
1428 /* we can't unlink urbs while they're completing
1429 * or if they've completed, and we haven't
1430 * resubmitted. "normal" drivers would prevent
1431 * resubmission, but since we're testing unlink
1432 * paths, we can't.
1433 */
1434 ERROR(dev, "unlink retry\n");
1435 continue;
1436 case 0:
1437 case -EINPROGRESS:
1438 break;
1439
1440 default:
1441 dev_err(&dev->intf->dev,
1442 "unlink fail %d\n", retval);
1443 return retval;
1444 }
1445
1446 break;
1447 }
1448 } else
1449 usb_kill_urb(urb);
1450
1451 wait_for_completion(&completion);
1452 retval = urb->status;
1453 simple_free_urb(urb);
1454
1455 if (async)
1456 return (retval == -ECONNRESET) ? 0 : retval - 1000;
1457 else
1458 return (retval == -ENOENT || retval == -EPERM) ?
1459 0 : retval - 2000;
1460 }
1461
unlink_simple(struct usbtest_dev * dev,int pipe,int len)1462 static int unlink_simple(struct usbtest_dev *dev, int pipe, int len)
1463 {
1464 int retval = 0;
1465
1466 /* test sync and async paths */
1467 retval = unlink1(dev, pipe, len, 1);
1468 if (!retval)
1469 retval = unlink1(dev, pipe, len, 0);
1470 return retval;
1471 }
1472
1473 /*-------------------------------------------------------------------------*/
1474
1475 struct queued_ctx {
1476 struct completion complete;
1477 atomic_t pending;
1478 unsigned num;
1479 int status;
1480 struct urb **urbs;
1481 };
1482
unlink_queued_callback(struct urb * urb)1483 static void unlink_queued_callback(struct urb *urb)
1484 {
1485 int status = urb->status;
1486 struct queued_ctx *ctx = urb->context;
1487
1488 if (ctx->status)
1489 goto done;
1490 if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) {
1491 if (status == -ECONNRESET)
1492 goto done;
1493 /* What error should we report if the URB completed normally? */
1494 }
1495 if (status != 0)
1496 ctx->status = status;
1497
1498 done:
1499 if (atomic_dec_and_test(&ctx->pending))
1500 complete(&ctx->complete);
1501 }
1502
unlink_queued(struct usbtest_dev * dev,int pipe,unsigned num,unsigned size)1503 static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
1504 unsigned size)
1505 {
1506 struct queued_ctx ctx;
1507 struct usb_device *udev = testdev_to_usbdev(dev);
1508 void *buf;
1509 dma_addr_t buf_dma;
1510 int i;
1511 int retval = -ENOMEM;
1512
1513 init_completion(&ctx.complete);
1514 atomic_set(&ctx.pending, 1); /* One more than the actual value */
1515 ctx.num = num;
1516 ctx.status = 0;
1517
1518 buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma);
1519 if (!buf)
1520 return retval;
1521 memset(buf, 0, size);
1522
1523 /* Allocate and init the urbs we'll queue */
1524 ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL);
1525 if (!ctx.urbs)
1526 goto free_buf;
1527 for (i = 0; i < num; i++) {
1528 ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
1529 if (!ctx.urbs[i])
1530 goto free_urbs;
1531 usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size,
1532 unlink_queued_callback, &ctx);
1533 ctx.urbs[i]->transfer_dma = buf_dma;
1534 ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
1535
1536 if (usb_pipeout(ctx.urbs[i]->pipe)) {
1537 simple_fill_buf(ctx.urbs[i]);
1538 ctx.urbs[i]->transfer_flags |= URB_ZERO_PACKET;
1539 }
1540 }
1541
1542 /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
1543 for (i = 0; i < num; i++) {
1544 atomic_inc(&ctx.pending);
1545 retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL);
1546 if (retval != 0) {
1547 dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n",
1548 i, retval);
1549 atomic_dec(&ctx.pending);
1550 ctx.status = retval;
1551 break;
1552 }
1553 }
1554 if (i == num) {
1555 usb_unlink_urb(ctx.urbs[num - 4]);
1556 usb_unlink_urb(ctx.urbs[num - 2]);
1557 } else {
1558 while (--i >= 0)
1559 usb_unlink_urb(ctx.urbs[i]);
1560 }
1561
1562 if (atomic_dec_and_test(&ctx.pending)) /* The extra count */
1563 complete(&ctx.complete);
1564 wait_for_completion(&ctx.complete);
1565 retval = ctx.status;
1566
1567 free_urbs:
1568 for (i = 0; i < num; i++)
1569 usb_free_urb(ctx.urbs[i]);
1570 kfree(ctx.urbs);
1571 free_buf:
1572 usb_free_coherent(udev, size, buf, buf_dma);
1573 return retval;
1574 }
1575
1576 /*-------------------------------------------------------------------------*/
1577
verify_not_halted(struct usbtest_dev * tdev,int ep,struct urb * urb)1578 static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1579 {
1580 int retval;
1581 u16 status;
1582
1583 /* shouldn't look or act halted */
1584 retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1585 if (retval < 0) {
1586 ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
1587 ep, retval);
1588 return retval;
1589 }
1590 if (status != 0) {
1591 ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status);
1592 return -EINVAL;
1593 }
1594 retval = simple_io(tdev, urb, 1, 0, 0, __func__);
1595 if (retval != 0)
1596 return -EINVAL;
1597 return 0;
1598 }
1599
verify_halted(struct usbtest_dev * tdev,int ep,struct urb * urb)1600 static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
1601 {
1602 int retval;
1603 u16 status;
1604
1605 /* should look and act halted */
1606 retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
1607 if (retval < 0) {
1608 ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
1609 ep, retval);
1610 return retval;
1611 }
1612 if (status != 1) {
1613 ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
1614 return -EINVAL;
1615 }
1616 retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__);
1617 if (retval != -EPIPE)
1618 return -EINVAL;
1619 retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted");
1620 if (retval != -EPIPE)
1621 return -EINVAL;
1622 return 0;
1623 }
1624
test_halt(struct usbtest_dev * tdev,int ep,struct urb * urb)1625 static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
1626 {
1627 int retval;
1628
1629 /* shouldn't look or act halted now */
1630 retval = verify_not_halted(tdev, ep, urb);
1631 if (retval < 0)
1632 return retval;
1633
1634 /* set halt (protocol test only), verify it worked */
1635 retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
1636 USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
1637 USB_ENDPOINT_HALT, ep,
1638 NULL, 0, USB_CTRL_SET_TIMEOUT);
1639 if (retval < 0) {
1640 ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval);
1641 return retval;
1642 }
1643 retval = verify_halted(tdev, ep, urb);
1644 if (retval < 0) {
1645 int ret;
1646
1647 /* clear halt anyways, else further tests will fail */
1648 ret = usb_clear_halt(urb->dev, urb->pipe);
1649 if (ret)
1650 ERROR(tdev, "ep %02x couldn't clear halt, %d\n",
1651 ep, ret);
1652
1653 return retval;
1654 }
1655
1656 /* clear halt (tests API + protocol), verify it worked */
1657 retval = usb_clear_halt(urb->dev, urb->pipe);
1658 if (retval < 0) {
1659 ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
1660 return retval;
1661 }
1662 retval = verify_not_halted(tdev, ep, urb);
1663 if (retval < 0)
1664 return retval;
1665
1666 /* NOTE: could also verify SET_INTERFACE clear halts ... */
1667
1668 return 0;
1669 }
1670
halt_simple(struct usbtest_dev * dev)1671 static int halt_simple(struct usbtest_dev *dev)
1672 {
1673 int ep;
1674 int retval = 0;
1675 struct urb *urb;
1676 struct usb_device *udev = testdev_to_usbdev(dev);
1677
1678 if (udev->speed == USB_SPEED_SUPER)
1679 urb = simple_alloc_urb(udev, 0, 1024, 0);
1680 else
1681 urb = simple_alloc_urb(udev, 0, 512, 0);
1682 if (urb == NULL)
1683 return -ENOMEM;
1684
1685 if (dev->in_pipe) {
1686 ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN;
1687 urb->pipe = dev->in_pipe;
1688 retval = test_halt(dev, ep, urb);
1689 if (retval < 0)
1690 goto done;
1691 }
1692
1693 if (dev->out_pipe) {
1694 ep = usb_pipeendpoint(dev->out_pipe);
1695 urb->pipe = dev->out_pipe;
1696 retval = test_halt(dev, ep, urb);
1697 }
1698 done:
1699 simple_free_urb(urb);
1700 return retval;
1701 }
1702
1703 /*-------------------------------------------------------------------------*/
1704
1705 /* Control OUT tests use the vendor control requests from Intel's
1706 * USB 2.0 compliance test device: write a buffer, read it back.
1707 *
1708 * Intel's spec only _requires_ that it work for one packet, which
1709 * is pretty weak. Some HCDs place limits here; most devices will
1710 * need to be able to handle more than one OUT data packet. We'll
1711 * try whatever we're told to try.
1712 */
ctrl_out(struct usbtest_dev * dev,unsigned count,unsigned length,unsigned vary,unsigned offset)1713 static int ctrl_out(struct usbtest_dev *dev,
1714 unsigned count, unsigned length, unsigned vary, unsigned offset)
1715 {
1716 unsigned i, j, len;
1717 int retval;
1718 u8 *buf;
1719 char *what = "?";
1720 struct usb_device *udev;
1721
1722 if (length < 1 || length > 0xffff || vary >= length)
1723 return -EINVAL;
1724
1725 buf = kmalloc(length + offset, GFP_KERNEL);
1726 if (!buf)
1727 return -ENOMEM;
1728
1729 buf += offset;
1730 udev = testdev_to_usbdev(dev);
1731 len = length;
1732 retval = 0;
1733
1734 /* NOTE: hardware might well act differently if we pushed it
1735 * with lots back-to-back queued requests.
1736 */
1737 for (i = 0; i < count; i++) {
1738 /* write patterned data */
1739 for (j = 0; j < len; j++)
1740 buf[j] = i + j;
1741 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
1742 0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
1743 0, 0, buf, len, USB_CTRL_SET_TIMEOUT);
1744 if (retval != len) {
1745 what = "write";
1746 if (retval >= 0) {
1747 ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
1748 retval, len);
1749 retval = -EBADMSG;
1750 }
1751 break;
1752 }
1753
1754 /* read it back -- assuming nothing intervened!! */
1755 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1756 0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
1757 0, 0, buf, len, USB_CTRL_GET_TIMEOUT);
1758 if (retval != len) {
1759 what = "read";
1760 if (retval >= 0) {
1761 ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
1762 retval, len);
1763 retval = -EBADMSG;
1764 }
1765 break;
1766 }
1767
1768 /* fail if we can't verify */
1769 for (j = 0; j < len; j++) {
1770 if (buf[j] != (u8) (i + j)) {
1771 ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
1772 j, buf[j], (u8) i + j);
1773 retval = -EBADMSG;
1774 break;
1775 }
1776 }
1777 if (retval < 0) {
1778 what = "verify";
1779 break;
1780 }
1781
1782 len += vary;
1783
1784 /* [real world] the "zero bytes IN" case isn't really used.
1785 * hardware can easily trip up in this weird case, since its
1786 * status stage is IN, not OUT like other ep0in transfers.
1787 */
1788 if (len > length)
1789 len = realworld ? 1 : 0;
1790 }
1791
1792 if (retval < 0)
1793 ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
1794 what, retval, i);
1795
1796 kfree(buf - offset);
1797 return retval;
1798 }
1799
1800 /*-------------------------------------------------------------------------*/
1801
1802 /* ISO tests ... mimics common usage
1803 * - buffer length is split into N packets (mostly maxpacket sized)
1804 * - multi-buffers according to sglen
1805 */
1806
1807 struct iso_context {
1808 unsigned count;
1809 unsigned pending;
1810 spinlock_t lock;
1811 struct completion done;
1812 int submit_error;
1813 unsigned long errors;
1814 unsigned long packet_count;
1815 struct usbtest_dev *dev;
1816 };
1817
iso_callback(struct urb * urb)1818 static void iso_callback(struct urb *urb)
1819 {
1820 struct iso_context *ctx = urb->context;
1821
1822 spin_lock(&ctx->lock);
1823 ctx->count--;
1824
1825 ctx->packet_count += urb->number_of_packets;
1826 if (urb->error_count > 0)
1827 ctx->errors += urb->error_count;
1828 else if (urb->status != 0)
1829 ctx->errors += urb->number_of_packets;
1830 else if (urb->actual_length != urb->transfer_buffer_length)
1831 ctx->errors++;
1832 else if (check_guard_bytes(ctx->dev, urb) != 0)
1833 ctx->errors++;
1834
1835 if (urb->status == 0 && ctx->count > (ctx->pending - 1)
1836 && !ctx->submit_error) {
1837 int status = usb_submit_urb(urb, GFP_ATOMIC);
1838 switch (status) {
1839 case 0:
1840 goto done;
1841 default:
1842 dev_err(&ctx->dev->intf->dev,
1843 "iso resubmit err %d\n",
1844 status);
1845 /* FALLTHROUGH */
1846 case -ENODEV: /* disconnected */
1847 case -ESHUTDOWN: /* endpoint disabled */
1848 ctx->submit_error = 1;
1849 break;
1850 }
1851 }
1852
1853 ctx->pending--;
1854 if (ctx->pending == 0) {
1855 if (ctx->errors)
1856 dev_err(&ctx->dev->intf->dev,
1857 "iso test, %lu errors out of %lu\n",
1858 ctx->errors, ctx->packet_count);
1859 complete(&ctx->done);
1860 }
1861 done:
1862 spin_unlock(&ctx->lock);
1863 }
1864
iso_alloc_urb(struct usb_device * udev,int pipe,struct usb_endpoint_descriptor * desc,long bytes,unsigned offset)1865 static struct urb *iso_alloc_urb(
1866 struct usb_device *udev,
1867 int pipe,
1868 struct usb_endpoint_descriptor *desc,
1869 long bytes,
1870 unsigned offset
1871 )
1872 {
1873 struct urb *urb;
1874 unsigned i, maxp, packets;
1875
1876 if (bytes < 0 || !desc)
1877 return NULL;
1878 maxp = 0x7ff & usb_endpoint_maxp(desc);
1879 maxp *= 1 + (0x3 & (usb_endpoint_maxp(desc) >> 11));
1880 packets = DIV_ROUND_UP(bytes, maxp);
1881
1882 urb = usb_alloc_urb(packets, GFP_KERNEL);
1883 if (!urb)
1884 return urb;
1885 urb->dev = udev;
1886 urb->pipe = pipe;
1887
1888 urb->number_of_packets = packets;
1889 urb->transfer_buffer_length = bytes;
1890 urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
1891 GFP_KERNEL,
1892 &urb->transfer_dma);
1893 if (!urb->transfer_buffer) {
1894 usb_free_urb(urb);
1895 return NULL;
1896 }
1897 if (offset) {
1898 memset(urb->transfer_buffer, GUARD_BYTE, offset);
1899 urb->transfer_buffer += offset;
1900 urb->transfer_dma += offset;
1901 }
1902 /* For inbound transfers use guard byte so that test fails if
1903 data not correctly copied */
1904 memset(urb->transfer_buffer,
1905 usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
1906 bytes);
1907
1908 for (i = 0; i < packets; i++) {
1909 /* here, only the last packet will be short */
1910 urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
1911 bytes -= urb->iso_frame_desc[i].length;
1912
1913 urb->iso_frame_desc[i].offset = maxp * i;
1914 }
1915
1916 urb->complete = iso_callback;
1917 /* urb->context = SET BY CALLER */
1918 urb->interval = 1 << (desc->bInterval - 1);
1919 urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
1920 return urb;
1921 }
1922
1923 static int
test_iso_queue(struct usbtest_dev * dev,struct usbtest_param * param,int pipe,struct usb_endpoint_descriptor * desc,unsigned offset)1924 test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
1925 int pipe, struct usb_endpoint_descriptor *desc, unsigned offset)
1926 {
1927 struct iso_context context;
1928 struct usb_device *udev;
1929 unsigned i;
1930 unsigned long packets = 0;
1931 int status = 0;
1932 struct urb *urbs[10]; /* FIXME no limit */
1933
1934 if (param->sglen > 10)
1935 return -EDOM;
1936
1937 memset(&context, 0, sizeof(context));
1938 context.count = param->iterations * param->sglen;
1939 context.dev = dev;
1940 init_completion(&context.done);
1941 spin_lock_init(&context.lock);
1942
1943 memset(urbs, 0, sizeof(urbs));
1944 udev = testdev_to_usbdev(dev);
1945 dev_info(&dev->intf->dev,
1946 "... iso period %d %sframes, wMaxPacket %04x\n",
1947 1 << (desc->bInterval - 1),
1948 (udev->speed == USB_SPEED_HIGH) ? "micro" : "",
1949 usb_endpoint_maxp(desc));
1950
1951 for (i = 0; i < param->sglen; i++) {
1952 urbs[i] = iso_alloc_urb(udev, pipe, desc,
1953 param->length, offset);
1954 if (!urbs[i]) {
1955 status = -ENOMEM;
1956 goto fail;
1957 }
1958 packets += urbs[i]->number_of_packets;
1959 urbs[i]->context = &context;
1960 }
1961 packets *= param->iterations;
1962 dev_info(&dev->intf->dev,
1963 "... total %lu msec (%lu packets)\n",
1964 (packets * (1 << (desc->bInterval - 1)))
1965 / ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
1966 packets);
1967
1968 spin_lock_irq(&context.lock);
1969 for (i = 0; i < param->sglen; i++) {
1970 ++context.pending;
1971 status = usb_submit_urb(urbs[i], GFP_ATOMIC);
1972 if (status < 0) {
1973 ERROR(dev, "submit iso[%d], error %d\n", i, status);
1974 if (i == 0) {
1975 spin_unlock_irq(&context.lock);
1976 goto fail;
1977 }
1978
1979 simple_free_urb(urbs[i]);
1980 urbs[i] = NULL;
1981 context.pending--;
1982 context.submit_error = 1;
1983 break;
1984 }
1985 }
1986 spin_unlock_irq(&context.lock);
1987
1988 wait_for_completion(&context.done);
1989
1990 for (i = 0; i < param->sglen; i++) {
1991 if (urbs[i])
1992 simple_free_urb(urbs[i]);
1993 }
1994 /*
1995 * Isochronous transfers are expected to fail sometimes. As an
1996 * arbitrary limit, we will report an error if any submissions
1997 * fail or if the transfer failure rate is > 10%.
1998 */
1999 if (status != 0)
2000 ;
2001 else if (context.submit_error)
2002 status = -EACCES;
2003 else if (context.errors > context.packet_count / 10)
2004 status = -EIO;
2005 return status;
2006
2007 fail:
2008 for (i = 0; i < param->sglen; i++) {
2009 if (urbs[i])
2010 simple_free_urb(urbs[i]);
2011 }
2012 return status;
2013 }
2014
test_unaligned_bulk(struct usbtest_dev * tdev,int pipe,unsigned length,int iterations,unsigned transfer_flags,const char * label)2015 static int test_unaligned_bulk(
2016 struct usbtest_dev *tdev,
2017 int pipe,
2018 unsigned length,
2019 int iterations,
2020 unsigned transfer_flags,
2021 const char *label)
2022 {
2023 int retval;
2024 struct urb *urb = usbtest_alloc_urb(
2025 testdev_to_usbdev(tdev), pipe, length, transfer_flags, 1, 0);
2026
2027 if (!urb)
2028 return -ENOMEM;
2029
2030 retval = simple_io(tdev, urb, iterations, 0, 0, label);
2031 simple_free_urb(urb);
2032 return retval;
2033 }
2034
2035 /*-------------------------------------------------------------------------*/
2036
2037 /* We only have this one interface to user space, through usbfs.
2038 * User mode code can scan usbfs to find N different devices (maybe on
2039 * different busses) to use when testing, and allocate one thread per
2040 * test. So discovery is simplified, and we have no device naming issues.
2041 *
2042 * Don't use these only as stress/load tests. Use them along with with
2043 * other USB bus activity: plugging, unplugging, mousing, mp3 playback,
2044 * video capture, and so on. Run different tests at different times, in
2045 * different sequences. Nothing here should interact with other devices,
2046 * except indirectly by consuming USB bandwidth and CPU resources for test
2047 * threads and request completion. But the only way to know that for sure
2048 * is to test when HC queues are in use by many devices.
2049 *
2050 * WARNING: Because usbfs grabs udev->dev.sem before calling this ioctl(),
2051 * it locks out usbcore in certain code paths. Notably, if you disconnect
2052 * the device-under-test, hub_wq will wait block forever waiting for the
2053 * ioctl to complete ... so that usb_disconnect() can abort the pending
2054 * urbs and then call usbtest_disconnect(). To abort a test, you're best
2055 * off just killing the userspace task and waiting for it to exit.
2056 */
2057
2058 static int
usbtest_ioctl(struct usb_interface * intf,unsigned int code,void * buf)2059 usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
2060 {
2061 struct usbtest_dev *dev = usb_get_intfdata(intf);
2062 struct usb_device *udev = testdev_to_usbdev(dev);
2063 struct usbtest_param *param = buf;
2064 int retval = -EOPNOTSUPP;
2065 struct urb *urb;
2066 struct scatterlist *sg;
2067 struct usb_sg_request req;
2068 struct timeval start;
2069 unsigned i;
2070
2071 /* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
2072
2073 pattern = mod_pattern;
2074
2075 if (code != USBTEST_REQUEST)
2076 return -EOPNOTSUPP;
2077
2078 if (param->iterations <= 0)
2079 return -EINVAL;
2080
2081 if (mutex_lock_interruptible(&dev->lock))
2082 return -ERESTARTSYS;
2083
2084 /* FIXME: What if a system sleep starts while a test is running? */
2085
2086 /* some devices, like ez-usb default devices, need a non-default
2087 * altsetting to have any active endpoints. some tests change
2088 * altsettings; force a default so most tests don't need to check.
2089 */
2090 if (dev->info->alt >= 0) {
2091 int res;
2092
2093 if (intf->altsetting->desc.bInterfaceNumber) {
2094 mutex_unlock(&dev->lock);
2095 return -ENODEV;
2096 }
2097 res = set_altsetting(dev, dev->info->alt);
2098 if (res) {
2099 dev_err(&intf->dev,
2100 "set altsetting to %d failed, %d\n",
2101 dev->info->alt, res);
2102 mutex_unlock(&dev->lock);
2103 return res;
2104 }
2105 }
2106
2107 /*
2108 * Just a bunch of test cases that every HCD is expected to handle.
2109 *
2110 * Some may need specific firmware, though it'd be good to have
2111 * one firmware image to handle all the test cases.
2112 *
2113 * FIXME add more tests! cancel requests, verify the data, control
2114 * queueing, concurrent read+write threads, and so on.
2115 */
2116 do_gettimeofday(&start);
2117 switch (param->test_num) {
2118
2119 case 0:
2120 dev_info(&intf->dev, "TEST 0: NOP\n");
2121 retval = 0;
2122 break;
2123
2124 /* Simple non-queued bulk I/O tests */
2125 case 1:
2126 if (dev->out_pipe == 0)
2127 break;
2128 dev_info(&intf->dev,
2129 "TEST 1: write %d bytes %u times\n",
2130 param->length, param->iterations);
2131 urb = simple_alloc_urb(udev, dev->out_pipe, param->length, 0);
2132 if (!urb) {
2133 retval = -ENOMEM;
2134 break;
2135 }
2136 /* FIRMWARE: bulk sink (maybe accepts short writes) */
2137 retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
2138 simple_free_urb(urb);
2139 break;
2140 case 2:
2141 if (dev->in_pipe == 0)
2142 break;
2143 dev_info(&intf->dev,
2144 "TEST 2: read %d bytes %u times\n",
2145 param->length, param->iterations);
2146 urb = simple_alloc_urb(udev, dev->in_pipe, param->length, 0);
2147 if (!urb) {
2148 retval = -ENOMEM;
2149 break;
2150 }
2151 /* FIRMWARE: bulk source (maybe generates short writes) */
2152 retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
2153 simple_free_urb(urb);
2154 break;
2155 case 3:
2156 if (dev->out_pipe == 0 || param->vary == 0)
2157 break;
2158 dev_info(&intf->dev,
2159 "TEST 3: write/%d 0..%d bytes %u times\n",
2160 param->vary, param->length, param->iterations);
2161 urb = simple_alloc_urb(udev, dev->out_pipe, param->length, 0);
2162 if (!urb) {
2163 retval = -ENOMEM;
2164 break;
2165 }
2166 /* FIRMWARE: bulk sink (maybe accepts short writes) */
2167 retval = simple_io(dev, urb, param->iterations, param->vary,
2168 0, "test3");
2169 simple_free_urb(urb);
2170 break;
2171 case 4:
2172 if (dev->in_pipe == 0 || param->vary == 0)
2173 break;
2174 dev_info(&intf->dev,
2175 "TEST 4: read/%d 0..%d bytes %u times\n",
2176 param->vary, param->length, param->iterations);
2177 urb = simple_alloc_urb(udev, dev->in_pipe, param->length, 0);
2178 if (!urb) {
2179 retval = -ENOMEM;
2180 break;
2181 }
2182 /* FIRMWARE: bulk source (maybe generates short writes) */
2183 retval = simple_io(dev, urb, param->iterations, param->vary,
2184 0, "test4");
2185 simple_free_urb(urb);
2186 break;
2187
2188 /* Queued bulk I/O tests */
2189 case 5:
2190 if (dev->out_pipe == 0 || param->sglen == 0)
2191 break;
2192 dev_info(&intf->dev,
2193 "TEST 5: write %d sglists %d entries of %d bytes\n",
2194 param->iterations,
2195 param->sglen, param->length);
2196 sg = alloc_sglist(param->sglen, param->length,
2197 0, dev, dev->out_pipe);
2198 if (!sg) {
2199 retval = -ENOMEM;
2200 break;
2201 }
2202 /* FIRMWARE: bulk sink (maybe accepts short writes) */
2203 retval = perform_sglist(dev, param->iterations, dev->out_pipe,
2204 &req, sg, param->sglen);
2205 free_sglist(sg, param->sglen);
2206 break;
2207
2208 case 6:
2209 if (dev->in_pipe == 0 || param->sglen == 0)
2210 break;
2211 dev_info(&intf->dev,
2212 "TEST 6: read %d sglists %d entries of %d bytes\n",
2213 param->iterations,
2214 param->sglen, param->length);
2215 sg = alloc_sglist(param->sglen, param->length,
2216 0, dev, dev->in_pipe);
2217 if (!sg) {
2218 retval = -ENOMEM;
2219 break;
2220 }
2221 /* FIRMWARE: bulk source (maybe generates short writes) */
2222 retval = perform_sglist(dev, param->iterations, dev->in_pipe,
2223 &req, sg, param->sglen);
2224 free_sglist(sg, param->sglen);
2225 break;
2226 case 7:
2227 if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
2228 break;
2229 dev_info(&intf->dev,
2230 "TEST 7: write/%d %d sglists %d entries 0..%d bytes\n",
2231 param->vary, param->iterations,
2232 param->sglen, param->length);
2233 sg = alloc_sglist(param->sglen, param->length,
2234 param->vary, dev, dev->out_pipe);
2235 if (!sg) {
2236 retval = -ENOMEM;
2237 break;
2238 }
2239 /* FIRMWARE: bulk sink (maybe accepts short writes) */
2240 retval = perform_sglist(dev, param->iterations, dev->out_pipe,
2241 &req, sg, param->sglen);
2242 free_sglist(sg, param->sglen);
2243 break;
2244 case 8:
2245 if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
2246 break;
2247 dev_info(&intf->dev,
2248 "TEST 8: read/%d %d sglists %d entries 0..%d bytes\n",
2249 param->vary, param->iterations,
2250 param->sglen, param->length);
2251 sg = alloc_sglist(param->sglen, param->length,
2252 param->vary, dev, dev->in_pipe);
2253 if (!sg) {
2254 retval = -ENOMEM;
2255 break;
2256 }
2257 /* FIRMWARE: bulk source (maybe generates short writes) */
2258 retval = perform_sglist(dev, param->iterations, dev->in_pipe,
2259 &req, sg, param->sglen);
2260 free_sglist(sg, param->sglen);
2261 break;
2262
2263 /* non-queued sanity tests for control (chapter 9 subset) */
2264 case 9:
2265 retval = 0;
2266 dev_info(&intf->dev,
2267 "TEST 9: ch9 (subset) control tests, %d times\n",
2268 param->iterations);
2269 for (i = param->iterations; retval == 0 && i--; /* NOP */)
2270 retval = ch9_postconfig(dev);
2271 if (retval)
2272 dev_err(&intf->dev, "ch9 subset failed, "
2273 "iterations left %d\n", i);
2274 break;
2275
2276 /* queued control messaging */
2277 case 10:
2278 retval = 0;
2279 dev_info(&intf->dev,
2280 "TEST 10: queue %d control calls, %d times\n",
2281 param->sglen,
2282 param->iterations);
2283 retval = test_ctrl_queue(dev, param);
2284 break;
2285
2286 /* simple non-queued unlinks (ring with one urb) */
2287 case 11:
2288 if (dev->in_pipe == 0 || !param->length)
2289 break;
2290 retval = 0;
2291 dev_info(&intf->dev, "TEST 11: unlink %d reads of %d\n",
2292 param->iterations, param->length);
2293 for (i = param->iterations; retval == 0 && i--; /* NOP */)
2294 retval = unlink_simple(dev, dev->in_pipe,
2295 param->length);
2296 if (retval)
2297 dev_err(&intf->dev, "unlink reads failed %d, "
2298 "iterations left %d\n", retval, i);
2299 break;
2300 case 12:
2301 if (dev->out_pipe == 0 || !param->length)
2302 break;
2303 retval = 0;
2304 dev_info(&intf->dev, "TEST 12: unlink %d writes of %d\n",
2305 param->iterations, param->length);
2306 for (i = param->iterations; retval == 0 && i--; /* NOP */)
2307 retval = unlink_simple(dev, dev->out_pipe,
2308 param->length);
2309 if (retval)
2310 dev_err(&intf->dev, "unlink writes failed %d, "
2311 "iterations left %d\n", retval, i);
2312 break;
2313
2314 /* ep halt tests */
2315 case 13:
2316 if (dev->out_pipe == 0 && dev->in_pipe == 0)
2317 break;
2318 retval = 0;
2319 dev_info(&intf->dev, "TEST 13: set/clear %d halts\n",
2320 param->iterations);
2321 for (i = param->iterations; retval == 0 && i--; /* NOP */)
2322 retval = halt_simple(dev);
2323
2324 if (retval)
2325 ERROR(dev, "halts failed, iterations left %d\n", i);
2326 break;
2327
2328 /* control write tests */
2329 case 14:
2330 if (!dev->info->ctrl_out)
2331 break;
2332 dev_info(&intf->dev, "TEST 14: %d ep0out, %d..%d vary %d\n",
2333 param->iterations,
2334 realworld ? 1 : 0, param->length,
2335 param->vary);
2336 retval = ctrl_out(dev, param->iterations,
2337 param->length, param->vary, 0);
2338 break;
2339
2340 /* iso write tests */
2341 case 15:
2342 if (dev->out_iso_pipe == 0 || param->sglen == 0)
2343 break;
2344 dev_info(&intf->dev,
2345 "TEST 15: write %d iso, %d entries of %d bytes\n",
2346 param->iterations,
2347 param->sglen, param->length);
2348 /* FIRMWARE: iso sink */
2349 retval = test_iso_queue(dev, param,
2350 dev->out_iso_pipe, dev->iso_out, 0);
2351 break;
2352
2353 /* iso read tests */
2354 case 16:
2355 if (dev->in_iso_pipe == 0 || param->sglen == 0)
2356 break;
2357 dev_info(&intf->dev,
2358 "TEST 16: read %d iso, %d entries of %d bytes\n",
2359 param->iterations,
2360 param->sglen, param->length);
2361 /* FIRMWARE: iso source */
2362 retval = test_iso_queue(dev, param,
2363 dev->in_iso_pipe, dev->iso_in, 0);
2364 break;
2365
2366 /* FIXME scatterlist cancel (needs helper thread) */
2367
2368 /* Tests for bulk I/O using DMA mapping by core and odd address */
2369 case 17:
2370 if (dev->out_pipe == 0)
2371 break;
2372 dev_info(&intf->dev,
2373 "TEST 17: write odd addr %d bytes %u times core map\n",
2374 param->length, param->iterations);
2375
2376 retval = test_unaligned_bulk(
2377 dev, dev->out_pipe,
2378 param->length, param->iterations,
2379 0, "test17");
2380 break;
2381
2382 case 18:
2383 if (dev->in_pipe == 0)
2384 break;
2385 dev_info(&intf->dev,
2386 "TEST 18: read odd addr %d bytes %u times core map\n",
2387 param->length, param->iterations);
2388
2389 retval = test_unaligned_bulk(
2390 dev, dev->in_pipe,
2391 param->length, param->iterations,
2392 0, "test18");
2393 break;
2394
2395 /* Tests for bulk I/O using premapped coherent buffer and odd address */
2396 case 19:
2397 if (dev->out_pipe == 0)
2398 break;
2399 dev_info(&intf->dev,
2400 "TEST 19: write odd addr %d bytes %u times premapped\n",
2401 param->length, param->iterations);
2402
2403 retval = test_unaligned_bulk(
2404 dev, dev->out_pipe,
2405 param->length, param->iterations,
2406 URB_NO_TRANSFER_DMA_MAP, "test19");
2407 break;
2408
2409 case 20:
2410 if (dev->in_pipe == 0)
2411 break;
2412 dev_info(&intf->dev,
2413 "TEST 20: read odd addr %d bytes %u times premapped\n",
2414 param->length, param->iterations);
2415
2416 retval = test_unaligned_bulk(
2417 dev, dev->in_pipe,
2418 param->length, param->iterations,
2419 URB_NO_TRANSFER_DMA_MAP, "test20");
2420 break;
2421
2422 /* control write tests with unaligned buffer */
2423 case 21:
2424 if (!dev->info->ctrl_out)
2425 break;
2426 dev_info(&intf->dev,
2427 "TEST 21: %d ep0out odd addr, %d..%d vary %d\n",
2428 param->iterations,
2429 realworld ? 1 : 0, param->length,
2430 param->vary);
2431 retval = ctrl_out(dev, param->iterations,
2432 param->length, param->vary, 1);
2433 break;
2434
2435 /* unaligned iso tests */
2436 case 22:
2437 if (dev->out_iso_pipe == 0 || param->sglen == 0)
2438 break;
2439 dev_info(&intf->dev,
2440 "TEST 22: write %d iso odd, %d entries of %d bytes\n",
2441 param->iterations,
2442 param->sglen, param->length);
2443 retval = test_iso_queue(dev, param,
2444 dev->out_iso_pipe, dev->iso_out, 1);
2445 break;
2446
2447 case 23:
2448 if (dev->in_iso_pipe == 0 || param->sglen == 0)
2449 break;
2450 dev_info(&intf->dev,
2451 "TEST 23: read %d iso odd, %d entries of %d bytes\n",
2452 param->iterations,
2453 param->sglen, param->length);
2454 retval = test_iso_queue(dev, param,
2455 dev->in_iso_pipe, dev->iso_in, 1);
2456 break;
2457
2458 /* unlink URBs from a bulk-OUT queue */
2459 case 24:
2460 if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
2461 break;
2462 retval = 0;
2463 dev_info(&intf->dev, "TEST 24: unlink from %d queues of "
2464 "%d %d-byte writes\n",
2465 param->iterations, param->sglen, param->length);
2466 for (i = param->iterations; retval == 0 && i > 0; --i) {
2467 retval = unlink_queued(dev, dev->out_pipe,
2468 param->sglen, param->length);
2469 if (retval) {
2470 dev_err(&intf->dev,
2471 "unlink queued writes failed %d, "
2472 "iterations left %d\n", retval, i);
2473 break;
2474 }
2475 }
2476 break;
2477
2478 /* Simple non-queued interrupt I/O tests */
2479 case 25:
2480 if (dev->out_int_pipe == 0)
2481 break;
2482 dev_info(&intf->dev,
2483 "TEST 25: write %d bytes %u times\n",
2484 param->length, param->iterations);
2485 urb = simple_alloc_urb(udev, dev->out_int_pipe, param->length,
2486 dev->int_out->bInterval);
2487 if (!urb) {
2488 retval = -ENOMEM;
2489 break;
2490 }
2491 /* FIRMWARE: interrupt sink (maybe accepts short writes) */
2492 retval = simple_io(dev, urb, param->iterations, 0, 0, "test25");
2493 simple_free_urb(urb);
2494 break;
2495 case 26:
2496 if (dev->in_int_pipe == 0)
2497 break;
2498 dev_info(&intf->dev,
2499 "TEST 26: read %d bytes %u times\n",
2500 param->length, param->iterations);
2501 urb = simple_alloc_urb(udev, dev->in_int_pipe, param->length,
2502 dev->int_in->bInterval);
2503 if (!urb) {
2504 retval = -ENOMEM;
2505 break;
2506 }
2507 /* FIRMWARE: interrupt source (maybe generates short writes) */
2508 retval = simple_io(dev, urb, param->iterations, 0, 0, "test26");
2509 simple_free_urb(urb);
2510 break;
2511 }
2512 do_gettimeofday(¶m->duration);
2513 param->duration.tv_sec -= start.tv_sec;
2514 param->duration.tv_usec -= start.tv_usec;
2515 if (param->duration.tv_usec < 0) {
2516 param->duration.tv_usec += 1000 * 1000;
2517 param->duration.tv_sec -= 1;
2518 }
2519 mutex_unlock(&dev->lock);
2520 return retval;
2521 }
2522
2523 /*-------------------------------------------------------------------------*/
2524
2525 static unsigned force_interrupt;
2526 module_param(force_interrupt, uint, 0);
2527 MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt");
2528
2529 #ifdef GENERIC
2530 static unsigned short vendor;
2531 module_param(vendor, ushort, 0);
2532 MODULE_PARM_DESC(vendor, "vendor code (from usb-if)");
2533
2534 static unsigned short product;
2535 module_param(product, ushort, 0);
2536 MODULE_PARM_DESC(product, "product code (from vendor)");
2537 #endif
2538
2539 static int
usbtest_probe(struct usb_interface * intf,const struct usb_device_id * id)2540 usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
2541 {
2542 struct usb_device *udev;
2543 struct usbtest_dev *dev;
2544 struct usbtest_info *info;
2545 char *rtest, *wtest;
2546 char *irtest, *iwtest;
2547 char *intrtest, *intwtest;
2548
2549 udev = interface_to_usbdev(intf);
2550
2551 #ifdef GENERIC
2552 /* specify devices by module parameters? */
2553 if (id->match_flags == 0) {
2554 /* vendor match required, product match optional */
2555 if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor)
2556 return -ENODEV;
2557 if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product)
2558 return -ENODEV;
2559 dev_info(&intf->dev, "matched module params, "
2560 "vend=0x%04x prod=0x%04x\n",
2561 le16_to_cpu(udev->descriptor.idVendor),
2562 le16_to_cpu(udev->descriptor.idProduct));
2563 }
2564 #endif
2565
2566 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2567 if (!dev)
2568 return -ENOMEM;
2569 info = (struct usbtest_info *) id->driver_info;
2570 dev->info = info;
2571 mutex_init(&dev->lock);
2572
2573 dev->intf = intf;
2574
2575 /* cacheline-aligned scratch for i/o */
2576 dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
2577 if (dev->buf == NULL) {
2578 kfree(dev);
2579 return -ENOMEM;
2580 }
2581
2582 /* NOTE this doesn't yet test the handful of difference that are
2583 * visible with high speed interrupts: bigger maxpacket (1K) and
2584 * "high bandwidth" modes (up to 3 packets/uframe).
2585 */
2586 rtest = wtest = "";
2587 irtest = iwtest = "";
2588 intrtest = intwtest = "";
2589 if (force_interrupt || udev->speed == USB_SPEED_LOW) {
2590 if (info->ep_in) {
2591 dev->in_pipe = usb_rcvintpipe(udev, info->ep_in);
2592 rtest = " intr-in";
2593 }
2594 if (info->ep_out) {
2595 dev->out_pipe = usb_sndintpipe(udev, info->ep_out);
2596 wtest = " intr-out";
2597 }
2598 } else {
2599 if (override_alt >= 0 || info->autoconf) {
2600 int status;
2601
2602 status = get_endpoints(dev, intf);
2603 if (status < 0) {
2604 WARNING(dev, "couldn't get endpoints, %d\n",
2605 status);
2606 kfree(dev->buf);
2607 kfree(dev);
2608 return status;
2609 }
2610 /* may find bulk or ISO pipes */
2611 } else {
2612 if (info->ep_in)
2613 dev->in_pipe = usb_rcvbulkpipe(udev,
2614 info->ep_in);
2615 if (info->ep_out)
2616 dev->out_pipe = usb_sndbulkpipe(udev,
2617 info->ep_out);
2618 }
2619 if (dev->in_pipe)
2620 rtest = " bulk-in";
2621 if (dev->out_pipe)
2622 wtest = " bulk-out";
2623 if (dev->in_iso_pipe)
2624 irtest = " iso-in";
2625 if (dev->out_iso_pipe)
2626 iwtest = " iso-out";
2627 if (dev->in_int_pipe)
2628 intrtest = " int-in";
2629 if (dev->out_int_pipe)
2630 intwtest = " int-out";
2631 }
2632
2633 usb_set_intfdata(intf, dev);
2634 dev_info(&intf->dev, "%s\n", info->name);
2635 dev_info(&intf->dev, "%s {control%s%s%s%s%s%s%s} tests%s\n",
2636 usb_speed_string(udev->speed),
2637 info->ctrl_out ? " in/out" : "",
2638 rtest, wtest,
2639 irtest, iwtest,
2640 intrtest, intwtest,
2641 info->alt >= 0 ? " (+alt)" : "");
2642 return 0;
2643 }
2644
usbtest_suspend(struct usb_interface * intf,pm_message_t message)2645 static int usbtest_suspend(struct usb_interface *intf, pm_message_t message)
2646 {
2647 return 0;
2648 }
2649
usbtest_resume(struct usb_interface * intf)2650 static int usbtest_resume(struct usb_interface *intf)
2651 {
2652 return 0;
2653 }
2654
2655
usbtest_disconnect(struct usb_interface * intf)2656 static void usbtest_disconnect(struct usb_interface *intf)
2657 {
2658 struct usbtest_dev *dev = usb_get_intfdata(intf);
2659
2660 usb_set_intfdata(intf, NULL);
2661 dev_dbg(&intf->dev, "disconnect\n");
2662 kfree(dev);
2663 }
2664
2665 /* Basic testing only needs a device that can source or sink bulk traffic.
2666 * Any device can test control transfers (default with GENERIC binding).
2667 *
2668 * Several entries work with the default EP0 implementation that's built
2669 * into EZ-USB chips. There's a default vendor ID which can be overridden
2670 * by (very) small config EEPROMS, but otherwise all these devices act
2671 * identically until firmware is loaded: only EP0 works. It turns out
2672 * to be easy to make other endpoints work, without modifying that EP0
2673 * behavior. For now, we expect that kind of firmware.
2674 */
2675
2676 /* an21xx or fx versions of ez-usb */
2677 static struct usbtest_info ez1_info = {
2678 .name = "EZ-USB device",
2679 .ep_in = 2,
2680 .ep_out = 2,
2681 .alt = 1,
2682 };
2683
2684 /* fx2 version of ez-usb */
2685 static struct usbtest_info ez2_info = {
2686 .name = "FX2 device",
2687 .ep_in = 6,
2688 .ep_out = 2,
2689 .alt = 1,
2690 };
2691
2692 /* ezusb family device with dedicated usb test firmware,
2693 */
2694 static struct usbtest_info fw_info = {
2695 .name = "usb test device",
2696 .ep_in = 2,
2697 .ep_out = 2,
2698 .alt = 1,
2699 .autoconf = 1, /* iso and ctrl_out need autoconf */
2700 .ctrl_out = 1,
2701 .iso = 1, /* iso_ep's are #8 in/out */
2702 };
2703
2704 /* peripheral running Linux and 'zero.c' test firmware, or
2705 * its user-mode cousin. different versions of this use
2706 * different hardware with the same vendor/product codes.
2707 * host side MUST rely on the endpoint descriptors.
2708 */
2709 static struct usbtest_info gz_info = {
2710 .name = "Linux gadget zero",
2711 .autoconf = 1,
2712 .ctrl_out = 1,
2713 .iso = 1,
2714 .intr = 1,
2715 .alt = 0,
2716 };
2717
2718 static struct usbtest_info um_info = {
2719 .name = "Linux user mode test driver",
2720 .autoconf = 1,
2721 .alt = -1,
2722 };
2723
2724 static struct usbtest_info um2_info = {
2725 .name = "Linux user mode ISO test driver",
2726 .autoconf = 1,
2727 .iso = 1,
2728 .alt = -1,
2729 };
2730
2731 #ifdef IBOT2
2732 /* this is a nice source of high speed bulk data;
2733 * uses an FX2, with firmware provided in the device
2734 */
2735 static struct usbtest_info ibot2_info = {
2736 .name = "iBOT2 webcam",
2737 .ep_in = 2,
2738 .alt = -1,
2739 };
2740 #endif
2741
2742 #ifdef GENERIC
2743 /* we can use any device to test control traffic */
2744 static struct usbtest_info generic_info = {
2745 .name = "Generic USB device",
2746 .alt = -1,
2747 };
2748 #endif
2749
2750
2751 static const struct usb_device_id id_table[] = {
2752
2753 /*-------------------------------------------------------------*/
2754
2755 /* EZ-USB devices which download firmware to replace (or in our
2756 * case augment) the default device implementation.
2757 */
2758
2759 /* generic EZ-USB FX controller */
2760 { USB_DEVICE(0x0547, 0x2235),
2761 .driver_info = (unsigned long) &ez1_info,
2762 },
2763
2764 /* CY3671 development board with EZ-USB FX */
2765 { USB_DEVICE(0x0547, 0x0080),
2766 .driver_info = (unsigned long) &ez1_info,
2767 },
2768
2769 /* generic EZ-USB FX2 controller (or development board) */
2770 { USB_DEVICE(0x04b4, 0x8613),
2771 .driver_info = (unsigned long) &ez2_info,
2772 },
2773
2774 /* re-enumerated usb test device firmware */
2775 { USB_DEVICE(0xfff0, 0xfff0),
2776 .driver_info = (unsigned long) &fw_info,
2777 },
2778
2779 /* "Gadget Zero" firmware runs under Linux */
2780 { USB_DEVICE(0x0525, 0xa4a0),
2781 .driver_info = (unsigned long) &gz_info,
2782 },
2783
2784 /* so does a user-mode variant */
2785 { USB_DEVICE(0x0525, 0xa4a4),
2786 .driver_info = (unsigned long) &um_info,
2787 },
2788
2789 /* ... and a user-mode variant that talks iso */
2790 { USB_DEVICE(0x0525, 0xa4a3),
2791 .driver_info = (unsigned long) &um2_info,
2792 },
2793
2794 #ifdef KEYSPAN_19Qi
2795 /* Keyspan 19qi uses an21xx (original EZ-USB) */
2796 /* this does not coexist with the real Keyspan 19qi driver! */
2797 { USB_DEVICE(0x06cd, 0x010b),
2798 .driver_info = (unsigned long) &ez1_info,
2799 },
2800 #endif
2801
2802 /*-------------------------------------------------------------*/
2803
2804 #ifdef IBOT2
2805 /* iBOT2 makes a nice source of high speed bulk-in data */
2806 /* this does not coexist with a real iBOT2 driver! */
2807 { USB_DEVICE(0x0b62, 0x0059),
2808 .driver_info = (unsigned long) &ibot2_info,
2809 },
2810 #endif
2811
2812 /*-------------------------------------------------------------*/
2813
2814 #ifdef GENERIC
2815 /* module params can specify devices to use for control tests */
2816 { .driver_info = (unsigned long) &generic_info, },
2817 #endif
2818
2819 /*-------------------------------------------------------------*/
2820
2821 { }
2822 };
2823 MODULE_DEVICE_TABLE(usb, id_table);
2824
2825 static struct usb_driver usbtest_driver = {
2826 .name = "usbtest",
2827 .id_table = id_table,
2828 .probe = usbtest_probe,
2829 .unlocked_ioctl = usbtest_ioctl,
2830 .disconnect = usbtest_disconnect,
2831 .suspend = usbtest_suspend,
2832 .resume = usbtest_resume,
2833 };
2834
2835 /*-------------------------------------------------------------------------*/
2836
usbtest_init(void)2837 static int __init usbtest_init(void)
2838 {
2839 #ifdef GENERIC
2840 if (vendor)
2841 pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
2842 #endif
2843 return usb_register(&usbtest_driver);
2844 }
2845 module_init(usbtest_init);
2846
usbtest_exit(void)2847 static void __exit usbtest_exit(void)
2848 {
2849 usb_deregister(&usbtest_driver);
2850 }
2851 module_exit(usbtest_exit);
2852
2853 MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
2854 MODULE_LICENSE("GPL");
2855
2856