1 /*
2 * USB Network driver infrastructure
3 * Copyright (C) 2000-2005 by David Brownell
4 * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21 /*
22 * This is a generic "USB networking" framework that works with several
23 * kinds of full and high speed networking devices: host-to-host cables,
24 * smart usb peripherals, and actual Ethernet adapters.
25 *
26 * These devices usually differ in terms of control protocols (if they
27 * even have one!) and sometimes they define new framing to wrap or batch
28 * Ethernet packets. Otherwise, they talk to USB pretty much the same,
29 * so interface (un)binding, endpoint I/O queues, fault handling, and other
30 * issues can usefully be addressed by this framework.
31 */
32
33 // #define DEBUG // error path messages, extra info
34 // #define VERBOSE // more; success messages
35
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/ctype.h>
41 #include <linux/ethtool.h>
42 #include <linux/workqueue.h>
43 #include <linux/mii.h>
44 #include <linux/usb.h>
45 #include <linux/usb/usbnet.h>
46 #include <linux/slab.h>
47 #include <linux/kernel.h>
48 #include <linux/pm_runtime.h>
49
50 #define DRIVER_VERSION "22-Aug-2005"
51
52
53 /*-------------------------------------------------------------------------*/
54
55 /*
56 * Nineteen USB 1.1 max size bulk transactions per frame (ms), max.
57 * Several dozen bytes of IPv4 data can fit in two such transactions.
58 * One maximum size Ethernet packet takes twenty four of them.
59 * For high speed, each frame comfortably fits almost 36 max size
60 * Ethernet packets (so queues should be bigger).
61 *
62 * REVISIT qlens should be members of 'struct usbnet'; the goal is to
63 * let the USB host controller be busy for 5msec or more before an irq
64 * is required, under load. Jumbograms change the equation.
65 */
66 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
67 #define RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
68 (RX_MAX_QUEUE_MEMORY/(dev)->rx_urb_size) : 4)
69 #define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
70 (RX_MAX_QUEUE_MEMORY/(dev)->hard_mtu) : 4)
71
72 // reawaken network queue this soon after stopping; else watchdog barks
73 #define TX_TIMEOUT_JIFFIES (5*HZ)
74
75 // throttle rx/tx briefly after some faults, so khubd might disconnect()
76 // us (it polls at HZ/4 usually) before we report too many false errors.
77 #define THROTTLE_JIFFIES (HZ/8)
78
79 // between wakeups
80 #define UNLINK_TIMEOUT_MS 3
81
82 /*-------------------------------------------------------------------------*/
83
84 // randomly generated ethernet address
85 static u8 node_id [ETH_ALEN];
86
87 static const char driver_name [] = "usbnet";
88
89 /* use ethtool to change the level for any given device */
90 static int msg_level = -1;
91 module_param (msg_level, int, 0);
92 MODULE_PARM_DESC (msg_level, "Override default message level");
93
94 /*-------------------------------------------------------------------------*/
95
96 /* handles CDC Ethernet and many other network "bulk data" interfaces */
usbnet_get_endpoints(struct usbnet * dev,struct usb_interface * intf)97 int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
98 {
99 int tmp;
100 struct usb_host_interface *alt = NULL;
101 struct usb_host_endpoint *in = NULL, *out = NULL;
102 struct usb_host_endpoint *status = NULL;
103
104 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
105 unsigned ep;
106
107 in = out = status = NULL;
108 alt = intf->altsetting + tmp;
109
110 /* take the first altsetting with in-bulk + out-bulk;
111 * remember any status endpoint, just in case;
112 * ignore other endpoints and altsettings.
113 */
114 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
115 struct usb_host_endpoint *e;
116 int intr = 0;
117
118 e = alt->endpoint + ep;
119 switch (e->desc.bmAttributes) {
120 case USB_ENDPOINT_XFER_INT:
121 if (!usb_endpoint_dir_in(&e->desc))
122 continue;
123 intr = 1;
124 /* FALLTHROUGH */
125 case USB_ENDPOINT_XFER_BULK:
126 break;
127 default:
128 continue;
129 }
130 if (usb_endpoint_dir_in(&e->desc)) {
131 if (!intr && !in)
132 in = e;
133 else if (intr && !status)
134 status = e;
135 } else {
136 if (!out)
137 out = e;
138 }
139 }
140 if (in && out)
141 break;
142 }
143 if (!alt || !in || !out)
144 return -EINVAL;
145
146 if (alt->desc.bAlternateSetting != 0 ||
147 !(dev->driver_info->flags & FLAG_NO_SETINT)) {
148 tmp = usb_set_interface (dev->udev, alt->desc.bInterfaceNumber,
149 alt->desc.bAlternateSetting);
150 if (tmp < 0)
151 return tmp;
152 }
153
154 dev->in = usb_rcvbulkpipe (dev->udev,
155 in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
156 dev->out = usb_sndbulkpipe (dev->udev,
157 out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
158 dev->status = status;
159 return 0;
160 }
161 EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
162
usbnet_get_ethernet_addr(struct usbnet * dev,int iMACAddress)163 int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
164 {
165 int tmp, i;
166 unsigned char buf [13];
167
168 tmp = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
169 if (tmp != 12) {
170 dev_dbg(&dev->udev->dev,
171 "bad MAC string %d fetch, %d\n", iMACAddress, tmp);
172 if (tmp >= 0)
173 tmp = -EINVAL;
174 return tmp;
175 }
176 for (i = tmp = 0; i < 6; i++, tmp += 2)
177 dev->net->dev_addr [i] =
178 (hex_to_bin(buf[tmp]) << 4) + hex_to_bin(buf[tmp + 1]);
179 return 0;
180 }
181 EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
182
183 static void intr_complete (struct urb *urb);
184
init_status(struct usbnet * dev,struct usb_interface * intf)185 static int init_status (struct usbnet *dev, struct usb_interface *intf)
186 {
187 char *buf = NULL;
188 unsigned pipe = 0;
189 unsigned maxp;
190 unsigned period;
191
192 if (!dev->driver_info->status)
193 return 0;
194
195 pipe = usb_rcvintpipe (dev->udev,
196 dev->status->desc.bEndpointAddress
197 & USB_ENDPOINT_NUMBER_MASK);
198 maxp = usb_maxpacket (dev->udev, pipe, 0);
199
200 /* avoid 1 msec chatter: min 8 msec poll rate */
201 period = max ((int) dev->status->desc.bInterval,
202 (dev->udev->speed == USB_SPEED_HIGH) ? 7 : 3);
203
204 buf = kmalloc (maxp, GFP_KERNEL);
205 if (buf) {
206 dev->interrupt = usb_alloc_urb (0, GFP_KERNEL);
207 if (!dev->interrupt) {
208 kfree (buf);
209 return -ENOMEM;
210 } else {
211 usb_fill_int_urb(dev->interrupt, dev->udev, pipe,
212 buf, maxp, intr_complete, dev, period);
213 dev->interrupt->transfer_flags |= URB_FREE_BUFFER;
214 dev_dbg(&intf->dev,
215 "status ep%din, %d bytes period %d\n",
216 usb_pipeendpoint(pipe), maxp, period);
217 }
218 }
219 return 0;
220 }
221
222 /* Passes this packet up the stack, updating its accounting.
223 * Some link protocols batch packets, so their rx_fixup paths
224 * can return clones as well as just modify the original skb.
225 */
usbnet_skb_return(struct usbnet * dev,struct sk_buff * skb)226 void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
227 {
228 int status;
229
230 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
231 skb_queue_tail(&dev->rxq_pause, skb);
232 return;
233 }
234
235 skb->protocol = eth_type_trans (skb, dev->net);
236 dev->net->stats.rx_packets++;
237 dev->net->stats.rx_bytes += skb->len;
238
239 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
240 skb->len + sizeof (struct ethhdr), skb->protocol);
241 memset (skb->cb, 0, sizeof (struct skb_data));
242
243 if (skb_defer_rx_timestamp(skb))
244 return;
245
246 status = netif_rx (skb);
247 if (status != NET_RX_SUCCESS)
248 netif_dbg(dev, rx_err, dev->net,
249 "netif_rx status %d\n", status);
250 }
251 EXPORT_SYMBOL_GPL(usbnet_skb_return);
252
253
254 /*-------------------------------------------------------------------------
255 *
256 * Network Device Driver (peer link to "Host Device", from USB host)
257 *
258 *-------------------------------------------------------------------------*/
259
usbnet_change_mtu(struct net_device * net,int new_mtu)260 int usbnet_change_mtu (struct net_device *net, int new_mtu)
261 {
262 struct usbnet *dev = netdev_priv(net);
263 int ll_mtu = new_mtu + net->hard_header_len;
264 int old_hard_mtu = dev->hard_mtu;
265 int old_rx_urb_size = dev->rx_urb_size;
266
267 if (new_mtu <= 0)
268 return -EINVAL;
269 // no second zero-length packet read wanted after mtu-sized packets
270 if ((ll_mtu % dev->maxpacket) == 0)
271 return -EDOM;
272 net->mtu = new_mtu;
273
274 dev->hard_mtu = net->mtu + net->hard_header_len;
275 if (dev->rx_urb_size == old_hard_mtu) {
276 dev->rx_urb_size = dev->hard_mtu;
277 if (dev->rx_urb_size > old_rx_urb_size)
278 usbnet_unlink_rx_urbs(dev);
279 }
280
281 return 0;
282 }
283 EXPORT_SYMBOL_GPL(usbnet_change_mtu);
284
285 /* The caller must hold list->lock */
__usbnet_queue_skb(struct sk_buff_head * list,struct sk_buff * newsk,enum skb_state state)286 static void __usbnet_queue_skb(struct sk_buff_head *list,
287 struct sk_buff *newsk, enum skb_state state)
288 {
289 struct skb_data *entry = (struct skb_data *) newsk->cb;
290
291 __skb_queue_tail(list, newsk);
292 entry->state = state;
293 }
294
295 /*-------------------------------------------------------------------------*/
296
297 /* some LK 2.4 HCDs oopsed if we freed or resubmitted urbs from
298 * completion callbacks. 2.5 should have fixed those bugs...
299 */
300
defer_bh(struct usbnet * dev,struct sk_buff * skb,struct sk_buff_head * list,enum skb_state state)301 static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
302 struct sk_buff_head *list, enum skb_state state)
303 {
304 unsigned long flags;
305 enum skb_state old_state;
306 struct skb_data *entry = (struct skb_data *) skb->cb;
307
308 spin_lock_irqsave(&list->lock, flags);
309 old_state = entry->state;
310 entry->state = state;
311 __skb_unlink(skb, list);
312 spin_unlock(&list->lock);
313 spin_lock(&dev->done.lock);
314 __skb_queue_tail(&dev->done, skb);
315 if (dev->done.qlen == 1)
316 tasklet_schedule(&dev->bh);
317 spin_unlock_irqrestore(&dev->done.lock, flags);
318 return old_state;
319 }
320
321 /* some work can't be done in tasklets, so we use keventd
322 *
323 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
324 * but tasklet_schedule() doesn't. hope the failure is rare.
325 */
usbnet_defer_kevent(struct usbnet * dev,int work)326 void usbnet_defer_kevent (struct usbnet *dev, int work)
327 {
328 set_bit (work, &dev->flags);
329 if (!schedule_work (&dev->kevent))
330 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
331 else
332 netdev_dbg(dev->net, "kevent %d scheduled\n", work);
333 }
334 EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
335
336 /*-------------------------------------------------------------------------*/
337
338 static void rx_complete (struct urb *urb);
339
rx_submit(struct usbnet * dev,struct urb * urb,gfp_t flags)340 static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
341 {
342 struct sk_buff *skb;
343 struct skb_data *entry;
344 int retval = 0;
345 unsigned long lockflags;
346 size_t size = dev->rx_urb_size;
347
348 skb = __netdev_alloc_skb_ip_align(dev->net, size, flags);
349 if (!skb) {
350 netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
351 usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
352 usb_free_urb (urb);
353 return -ENOMEM;
354 }
355
356 entry = (struct skb_data *) skb->cb;
357 entry->urb = urb;
358 entry->dev = dev;
359 entry->length = 0;
360
361 usb_fill_bulk_urb (urb, dev->udev, dev->in,
362 skb->data, size, rx_complete, skb);
363
364 spin_lock_irqsave (&dev->rxq.lock, lockflags);
365
366 if (netif_running (dev->net) &&
367 netif_device_present (dev->net) &&
368 !test_bit (EVENT_RX_HALT, &dev->flags) &&
369 !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
370 switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
371 case -EPIPE:
372 usbnet_defer_kevent (dev, EVENT_RX_HALT);
373 break;
374 case -ENOMEM:
375 usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
376 break;
377 case -ENODEV:
378 netif_dbg(dev, ifdown, dev->net, "device gone\n");
379 netif_device_detach (dev->net);
380 break;
381 case -EHOSTUNREACH:
382 retval = -ENOLINK;
383 break;
384 default:
385 netif_dbg(dev, rx_err, dev->net,
386 "rx submit, %d\n", retval);
387 tasklet_schedule (&dev->bh);
388 break;
389 case 0:
390 __usbnet_queue_skb(&dev->rxq, skb, rx_start);
391 }
392 } else {
393 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
394 retval = -ENOLINK;
395 }
396 spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
397 if (retval) {
398 dev_kfree_skb_any (skb);
399 usb_free_urb (urb);
400 }
401 return retval;
402 }
403
404
405 /*-------------------------------------------------------------------------*/
406
rx_process(struct usbnet * dev,struct sk_buff * skb)407 static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
408 {
409 if (dev->driver_info->rx_fixup &&
410 !dev->driver_info->rx_fixup (dev, skb)) {
411 /* With RX_ASSEMBLE, rx_fixup() must update counters */
412 if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE))
413 dev->net->stats.rx_errors++;
414 goto done;
415 }
416 // else network stack removes extra byte if we forced a short packet
417
418 if (skb->len) {
419 /* all data was already cloned from skb inside the driver */
420 if (dev->driver_info->flags & FLAG_MULTI_PACKET)
421 dev_kfree_skb_any(skb);
422 else
423 usbnet_skb_return(dev, skb);
424 return;
425 }
426
427 netif_dbg(dev, rx_err, dev->net, "drop\n");
428 dev->net->stats.rx_errors++;
429 done:
430 skb_queue_tail(&dev->done, skb);
431 }
432
433 /*-------------------------------------------------------------------------*/
434
rx_complete(struct urb * urb)435 static void rx_complete (struct urb *urb)
436 {
437 struct sk_buff *skb = (struct sk_buff *) urb->context;
438 struct skb_data *entry = (struct skb_data *) skb->cb;
439 struct usbnet *dev = entry->dev;
440 int urb_status = urb->status;
441 enum skb_state state;
442
443 skb_put (skb, urb->actual_length);
444 state = rx_done;
445 entry->urb = NULL;
446
447 switch (urb_status) {
448 /* success */
449 case 0:
450 if (skb->len < dev->net->hard_header_len) {
451 state = rx_cleanup;
452 dev->net->stats.rx_errors++;
453 dev->net->stats.rx_length_errors++;
454 netif_dbg(dev, rx_err, dev->net,
455 "rx length %d\n", skb->len);
456 }
457 break;
458
459 /* stalls need manual reset. this is rare ... except that
460 * when going through USB 2.0 TTs, unplug appears this way.
461 * we avoid the highspeed version of the ETIMEDOUT/EILSEQ
462 * storm, recovering as needed.
463 */
464 case -EPIPE:
465 dev->net->stats.rx_errors++;
466 usbnet_defer_kevent (dev, EVENT_RX_HALT);
467 // FALLTHROUGH
468
469 /* software-driven interface shutdown */
470 case -ECONNRESET: /* async unlink */
471 case -ESHUTDOWN: /* hardware gone */
472 netif_dbg(dev, ifdown, dev->net,
473 "rx shutdown, code %d\n", urb_status);
474 goto block;
475
476 /* we get controller i/o faults during khubd disconnect() delays.
477 * throttle down resubmits, to avoid log floods; just temporarily,
478 * so we still recover when the fault isn't a khubd delay.
479 */
480 case -EPROTO:
481 case -ETIME:
482 case -EILSEQ:
483 dev->net->stats.rx_errors++;
484 if (!timer_pending (&dev->delay)) {
485 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
486 netif_dbg(dev, link, dev->net,
487 "rx throttle %d\n", urb_status);
488 }
489 block:
490 state = rx_cleanup;
491 entry->urb = urb;
492 urb = NULL;
493 break;
494
495 /* data overrun ... flush fifo? */
496 case -EOVERFLOW:
497 dev->net->stats.rx_over_errors++;
498 // FALLTHROUGH
499
500 default:
501 state = rx_cleanup;
502 dev->net->stats.rx_errors++;
503 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
504 break;
505 }
506
507 state = defer_bh(dev, skb, &dev->rxq, state);
508
509 if (urb) {
510 if (netif_running (dev->net) &&
511 !test_bit (EVENT_RX_HALT, &dev->flags) &&
512 state != unlink_start) {
513 rx_submit (dev, urb, GFP_ATOMIC);
514 usb_mark_last_busy(dev->udev);
515 return;
516 }
517 usb_free_urb (urb);
518 }
519 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
520 }
521
intr_complete(struct urb * urb)522 static void intr_complete (struct urb *urb)
523 {
524 struct usbnet *dev = urb->context;
525 int status = urb->status;
526
527 switch (status) {
528 /* success */
529 case 0:
530 dev->driver_info->status(dev, urb);
531 break;
532
533 /* software-driven interface shutdown */
534 case -ENOENT: /* urb killed */
535 case -ESHUTDOWN: /* hardware gone */
536 netif_dbg(dev, ifdown, dev->net,
537 "intr shutdown, code %d\n", status);
538 return;
539
540 /* NOTE: not throttling like RX/TX, since this endpoint
541 * already polls infrequently
542 */
543 default:
544 netdev_dbg(dev->net, "intr status %d\n", status);
545 break;
546 }
547
548 if (!netif_running (dev->net))
549 return;
550
551 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
552 status = usb_submit_urb (urb, GFP_ATOMIC);
553 if (status != 0)
554 netif_err(dev, timer, dev->net,
555 "intr resubmit --> %d\n", status);
556 }
557
558 /*-------------------------------------------------------------------------*/
usbnet_pause_rx(struct usbnet * dev)559 void usbnet_pause_rx(struct usbnet *dev)
560 {
561 set_bit(EVENT_RX_PAUSED, &dev->flags);
562
563 netif_dbg(dev, rx_status, dev->net, "paused rx queue enabled\n");
564 }
565 EXPORT_SYMBOL_GPL(usbnet_pause_rx);
566
usbnet_resume_rx(struct usbnet * dev)567 void usbnet_resume_rx(struct usbnet *dev)
568 {
569 struct sk_buff *skb;
570 int num = 0;
571
572 clear_bit(EVENT_RX_PAUSED, &dev->flags);
573
574 while ((skb = skb_dequeue(&dev->rxq_pause)) != NULL) {
575 usbnet_skb_return(dev, skb);
576 num++;
577 }
578
579 tasklet_schedule(&dev->bh);
580
581 netif_dbg(dev, rx_status, dev->net,
582 "paused rx queue disabled, %d skbs requeued\n", num);
583 }
584 EXPORT_SYMBOL_GPL(usbnet_resume_rx);
585
usbnet_purge_paused_rxq(struct usbnet * dev)586 void usbnet_purge_paused_rxq(struct usbnet *dev)
587 {
588 skb_queue_purge(&dev->rxq_pause);
589 }
590 EXPORT_SYMBOL_GPL(usbnet_purge_paused_rxq);
591
592 /*-------------------------------------------------------------------------*/
593
594 // unlink pending rx/tx; completion handlers do all other cleanup
595
unlink_urbs(struct usbnet * dev,struct sk_buff_head * q)596 static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
597 {
598 unsigned long flags;
599 struct sk_buff *skb;
600 int count = 0;
601
602 spin_lock_irqsave (&q->lock, flags);
603 while (!skb_queue_empty(q)) {
604 struct skb_data *entry;
605 struct urb *urb;
606 int retval;
607
608 skb_queue_walk(q, skb) {
609 entry = (struct skb_data *) skb->cb;
610 if (entry->state != unlink_start)
611 goto found;
612 }
613 break;
614 found:
615 entry->state = unlink_start;
616 urb = entry->urb;
617
618 /*
619 * Get reference count of the URB to avoid it to be
620 * freed during usb_unlink_urb, which may trigger
621 * use-after-free problem inside usb_unlink_urb since
622 * usb_unlink_urb is always racing with .complete
623 * handler(include defer_bh).
624 */
625 usb_get_urb(urb);
626 spin_unlock_irqrestore(&q->lock, flags);
627 // during some PM-driven resume scenarios,
628 // these (async) unlinks complete immediately
629 retval = usb_unlink_urb (urb);
630 if (retval != -EINPROGRESS && retval != 0)
631 netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
632 else
633 count++;
634 usb_put_urb(urb);
635 spin_lock_irqsave(&q->lock, flags);
636 }
637 spin_unlock_irqrestore (&q->lock, flags);
638 return count;
639 }
640
641 // Flush all pending rx urbs
642 // minidrivers may need to do this when the MTU changes
643
usbnet_unlink_rx_urbs(struct usbnet * dev)644 void usbnet_unlink_rx_urbs(struct usbnet *dev)
645 {
646 if (netif_running(dev->net)) {
647 (void) unlink_urbs (dev, &dev->rxq);
648 tasklet_schedule(&dev->bh);
649 }
650 }
651 EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
652
653 /*-------------------------------------------------------------------------*/
654
655 // precondition: never called in_interrupt
usbnet_terminate_urbs(struct usbnet * dev)656 static void usbnet_terminate_urbs(struct usbnet *dev)
657 {
658 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
659 DECLARE_WAITQUEUE(wait, current);
660 int temp;
661
662 /* ensure there are no more active urbs */
663 add_wait_queue(&unlink_wakeup, &wait);
664 set_current_state(TASK_UNINTERRUPTIBLE);
665 dev->wait = &unlink_wakeup;
666 temp = unlink_urbs(dev, &dev->txq) +
667 unlink_urbs(dev, &dev->rxq);
668
669 /* maybe wait for deletions to finish. */
670 while (!skb_queue_empty(&dev->rxq)
671 && !skb_queue_empty(&dev->txq)
672 && !skb_queue_empty(&dev->done)) {
673 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
674 set_current_state(TASK_UNINTERRUPTIBLE);
675 netif_dbg(dev, ifdown, dev->net,
676 "waited for %d urb completions\n", temp);
677 }
678 set_current_state(TASK_RUNNING);
679 dev->wait = NULL;
680 remove_wait_queue(&unlink_wakeup, &wait);
681 }
682
usbnet_stop(struct net_device * net)683 int usbnet_stop (struct net_device *net)
684 {
685 struct usbnet *dev = netdev_priv(net);
686 struct driver_info *info = dev->driver_info;
687 int retval;
688
689 clear_bit(EVENT_DEV_OPEN, &dev->flags);
690 netif_stop_queue (net);
691
692 netif_info(dev, ifdown, dev->net,
693 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
694 net->stats.rx_packets, net->stats.tx_packets,
695 net->stats.rx_errors, net->stats.tx_errors);
696
697 /* allow minidriver to stop correctly (wireless devices to turn off
698 * radio etc) */
699 if (info->stop) {
700 retval = info->stop(dev);
701 if (retval < 0)
702 netif_info(dev, ifdown, dev->net,
703 "stop fail (%d) usbnet usb-%s-%s, %s\n",
704 retval,
705 dev->udev->bus->bus_name, dev->udev->devpath,
706 info->description);
707 }
708
709 if (!(info->flags & FLAG_AVOID_UNLINK_URBS))
710 usbnet_terminate_urbs(dev);
711
712 usb_kill_urb(dev->interrupt);
713
714 usbnet_purge_paused_rxq(dev);
715
716 /* deferred work (task, timer, softirq) must also stop.
717 * can't flush_scheduled_work() until we drop rtnl (later),
718 * else workers could deadlock; so make workers a NOP.
719 */
720 dev->flags = 0;
721 del_timer_sync (&dev->delay);
722 tasklet_kill (&dev->bh);
723 if (info->manage_power)
724 info->manage_power(dev, 0);
725 else
726 usb_autopm_put_interface(dev->intf);
727
728 return 0;
729 }
730 EXPORT_SYMBOL_GPL(usbnet_stop);
731
732 /*-------------------------------------------------------------------------*/
733
734 // posts reads, and enables write queuing
735
736 // precondition: never called in_interrupt
737
usbnet_open(struct net_device * net)738 int usbnet_open (struct net_device *net)
739 {
740 struct usbnet *dev = netdev_priv(net);
741 int retval;
742 struct driver_info *info = dev->driver_info;
743
744 if ((retval = usb_autopm_get_interface(dev->intf)) < 0) {
745 netif_info(dev, ifup, dev->net,
746 "resumption fail (%d) usbnet usb-%s-%s, %s\n",
747 retval,
748 dev->udev->bus->bus_name,
749 dev->udev->devpath,
750 info->description);
751 goto done_nopm;
752 }
753
754 // put into "known safe" state
755 if (info->reset && (retval = info->reset (dev)) < 0) {
756 netif_info(dev, ifup, dev->net,
757 "open reset fail (%d) usbnet usb-%s-%s, %s\n",
758 retval,
759 dev->udev->bus->bus_name,
760 dev->udev->devpath,
761 info->description);
762 goto done;
763 }
764
765 // insist peer be connected
766 if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
767 netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval);
768 goto done;
769 }
770
771 /* start any status interrupt transfer */
772 if (dev->interrupt) {
773 retval = usb_submit_urb (dev->interrupt, GFP_KERNEL);
774 if (retval < 0) {
775 netif_err(dev, ifup, dev->net,
776 "intr submit %d\n", retval);
777 goto done;
778 }
779 }
780
781 set_bit(EVENT_DEV_OPEN, &dev->flags);
782 netif_start_queue (net);
783 netif_info(dev, ifup, dev->net,
784 "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n",
785 (int)RX_QLEN(dev), (int)TX_QLEN(dev),
786 dev->net->mtu,
787 (dev->driver_info->flags & FLAG_FRAMING_NC) ? "NetChip" :
788 (dev->driver_info->flags & FLAG_FRAMING_GL) ? "GeneSys" :
789 (dev->driver_info->flags & FLAG_FRAMING_Z) ? "Zaurus" :
790 (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" :
791 (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
792 "simple");
793
794 // delay posting reads until we're fully open
795 tasklet_schedule (&dev->bh);
796 if (info->manage_power) {
797 retval = info->manage_power(dev, 1);
798 if (retval < 0)
799 goto done;
800 usb_autopm_put_interface(dev->intf);
801 }
802 return retval;
803
804 done:
805 usb_autopm_put_interface(dev->intf);
806 done_nopm:
807 return retval;
808 }
809 EXPORT_SYMBOL_GPL(usbnet_open);
810
811 /*-------------------------------------------------------------------------*/
812
813 /* ethtool methods; minidrivers may need to add some more, but
814 * they'll probably want to use this base set.
815 */
816
usbnet_get_settings(struct net_device * net,struct ethtool_cmd * cmd)817 int usbnet_get_settings (struct net_device *net, struct ethtool_cmd *cmd)
818 {
819 struct usbnet *dev = netdev_priv(net);
820
821 if (!dev->mii.mdio_read)
822 return -EOPNOTSUPP;
823
824 return mii_ethtool_gset(&dev->mii, cmd);
825 }
826 EXPORT_SYMBOL_GPL(usbnet_get_settings);
827
usbnet_set_settings(struct net_device * net,struct ethtool_cmd * cmd)828 int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd)
829 {
830 struct usbnet *dev = netdev_priv(net);
831 int retval;
832
833 if (!dev->mii.mdio_write)
834 return -EOPNOTSUPP;
835
836 retval = mii_ethtool_sset(&dev->mii, cmd);
837
838 /* link speed/duplex might have changed */
839 if (dev->driver_info->link_reset)
840 dev->driver_info->link_reset(dev);
841
842 return retval;
843
844 }
845 EXPORT_SYMBOL_GPL(usbnet_set_settings);
846
usbnet_get_link(struct net_device * net)847 u32 usbnet_get_link (struct net_device *net)
848 {
849 struct usbnet *dev = netdev_priv(net);
850
851 /* If a check_connect is defined, return its result */
852 if (dev->driver_info->check_connect)
853 return dev->driver_info->check_connect (dev) == 0;
854
855 /* if the device has mii operations, use those */
856 if (dev->mii.mdio_read)
857 return mii_link_ok(&dev->mii);
858
859 /* Otherwise, dtrt for drivers calling netif_carrier_{on,off} */
860 return ethtool_op_get_link(net);
861 }
862 EXPORT_SYMBOL_GPL(usbnet_get_link);
863
usbnet_nway_reset(struct net_device * net)864 int usbnet_nway_reset(struct net_device *net)
865 {
866 struct usbnet *dev = netdev_priv(net);
867
868 if (!dev->mii.mdio_write)
869 return -EOPNOTSUPP;
870
871 return mii_nway_restart(&dev->mii);
872 }
873 EXPORT_SYMBOL_GPL(usbnet_nway_reset);
874
usbnet_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * info)875 void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
876 {
877 struct usbnet *dev = netdev_priv(net);
878
879 strncpy (info->driver, dev->driver_name, sizeof info->driver);
880 strncpy (info->version, DRIVER_VERSION, sizeof info->version);
881 strncpy (info->fw_version, dev->driver_info->description,
882 sizeof info->fw_version);
883 usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
884 }
885 EXPORT_SYMBOL_GPL(usbnet_get_drvinfo);
886
usbnet_get_msglevel(struct net_device * net)887 u32 usbnet_get_msglevel (struct net_device *net)
888 {
889 struct usbnet *dev = netdev_priv(net);
890
891 return dev->msg_enable;
892 }
893 EXPORT_SYMBOL_GPL(usbnet_get_msglevel);
894
usbnet_set_msglevel(struct net_device * net,u32 level)895 void usbnet_set_msglevel (struct net_device *net, u32 level)
896 {
897 struct usbnet *dev = netdev_priv(net);
898
899 dev->msg_enable = level;
900 }
901 EXPORT_SYMBOL_GPL(usbnet_set_msglevel);
902
903 /* drivers may override default ethtool_ops in their bind() routine */
904 static const struct ethtool_ops usbnet_ethtool_ops = {
905 .get_settings = usbnet_get_settings,
906 .set_settings = usbnet_set_settings,
907 .get_link = usbnet_get_link,
908 .nway_reset = usbnet_nway_reset,
909 .get_drvinfo = usbnet_get_drvinfo,
910 .get_msglevel = usbnet_get_msglevel,
911 .set_msglevel = usbnet_set_msglevel,
912 };
913
914 /*-------------------------------------------------------------------------*/
915
916 /* work that cannot be done in interrupt context uses keventd.
917 *
918 * NOTE: with 2.5 we could do more of this using completion callbacks,
919 * especially now that control transfers can be queued.
920 */
921 static void
kevent(struct work_struct * work)922 kevent (struct work_struct *work)
923 {
924 struct usbnet *dev =
925 container_of(work, struct usbnet, kevent);
926 int status;
927
928 /* usb_clear_halt() needs a thread context */
929 if (test_bit (EVENT_TX_HALT, &dev->flags)) {
930 unlink_urbs (dev, &dev->txq);
931 status = usb_autopm_get_interface(dev->intf);
932 if (status < 0)
933 goto fail_pipe;
934 status = usb_clear_halt (dev->udev, dev->out);
935 usb_autopm_put_interface(dev->intf);
936 if (status < 0 &&
937 status != -EPIPE &&
938 status != -ESHUTDOWN) {
939 if (netif_msg_tx_err (dev))
940 fail_pipe:
941 netdev_err(dev->net, "can't clear tx halt, status %d\n",
942 status);
943 } else {
944 clear_bit (EVENT_TX_HALT, &dev->flags);
945 if (status != -ESHUTDOWN)
946 netif_wake_queue (dev->net);
947 }
948 }
949 if (test_bit (EVENT_RX_HALT, &dev->flags)) {
950 unlink_urbs (dev, &dev->rxq);
951 status = usb_autopm_get_interface(dev->intf);
952 if (status < 0)
953 goto fail_halt;
954 status = usb_clear_halt (dev->udev, dev->in);
955 usb_autopm_put_interface(dev->intf);
956 if (status < 0 &&
957 status != -EPIPE &&
958 status != -ESHUTDOWN) {
959 if (netif_msg_rx_err (dev))
960 fail_halt:
961 netdev_err(dev->net, "can't clear rx halt, status %d\n",
962 status);
963 } else {
964 clear_bit (EVENT_RX_HALT, &dev->flags);
965 tasklet_schedule (&dev->bh);
966 }
967 }
968
969 /* tasklet could resubmit itself forever if memory is tight */
970 if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
971 struct urb *urb = NULL;
972 int resched = 1;
973
974 if (netif_running (dev->net))
975 urb = usb_alloc_urb (0, GFP_KERNEL);
976 else
977 clear_bit (EVENT_RX_MEMORY, &dev->flags);
978 if (urb != NULL) {
979 clear_bit (EVENT_RX_MEMORY, &dev->flags);
980 status = usb_autopm_get_interface(dev->intf);
981 if (status < 0) {
982 usb_free_urb(urb);
983 goto fail_lowmem;
984 }
985 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
986 resched = 0;
987 usb_autopm_put_interface(dev->intf);
988 fail_lowmem:
989 if (resched)
990 tasklet_schedule (&dev->bh);
991 }
992 }
993
994 if (test_bit (EVENT_LINK_RESET, &dev->flags)) {
995 struct driver_info *info = dev->driver_info;
996 int retval = 0;
997
998 clear_bit (EVENT_LINK_RESET, &dev->flags);
999 status = usb_autopm_get_interface(dev->intf);
1000 if (status < 0)
1001 goto skip_reset;
1002 if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
1003 usb_autopm_put_interface(dev->intf);
1004 skip_reset:
1005 netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n",
1006 retval,
1007 dev->udev->bus->bus_name,
1008 dev->udev->devpath,
1009 info->description);
1010 } else {
1011 usb_autopm_put_interface(dev->intf);
1012 }
1013 }
1014
1015 if (dev->flags)
1016 netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags);
1017 }
1018
1019 /*-------------------------------------------------------------------------*/
1020
tx_complete(struct urb * urb)1021 static void tx_complete (struct urb *urb)
1022 {
1023 struct sk_buff *skb = (struct sk_buff *) urb->context;
1024 struct skb_data *entry = (struct skb_data *) skb->cb;
1025 struct usbnet *dev = entry->dev;
1026
1027 if (urb->status == 0) {
1028 if (!(dev->driver_info->flags & FLAG_MULTI_PACKET))
1029 dev->net->stats.tx_packets++;
1030 dev->net->stats.tx_bytes += entry->length;
1031 } else {
1032 dev->net->stats.tx_errors++;
1033
1034 switch (urb->status) {
1035 case -EPIPE:
1036 usbnet_defer_kevent (dev, EVENT_TX_HALT);
1037 break;
1038
1039 /* software-driven interface shutdown */
1040 case -ECONNRESET: // async unlink
1041 case -ESHUTDOWN: // hardware gone
1042 break;
1043
1044 // like rx, tx gets controller i/o faults during khubd delays
1045 // and so it uses the same throttling mechanism.
1046 case -EPROTO:
1047 case -ETIME:
1048 case -EILSEQ:
1049 usb_mark_last_busy(dev->udev);
1050 if (!timer_pending (&dev->delay)) {
1051 mod_timer (&dev->delay,
1052 jiffies + THROTTLE_JIFFIES);
1053 netif_dbg(dev, link, dev->net,
1054 "tx throttle %d\n", urb->status);
1055 }
1056 netif_stop_queue (dev->net);
1057 break;
1058 default:
1059 netif_dbg(dev, tx_err, dev->net,
1060 "tx err %d\n", entry->urb->status);
1061 break;
1062 }
1063 }
1064
1065 usb_autopm_put_interface_async(dev->intf);
1066 (void) defer_bh(dev, skb, &dev->txq, tx_done);
1067 }
1068
1069 /*-------------------------------------------------------------------------*/
1070
usbnet_tx_timeout(struct net_device * net)1071 void usbnet_tx_timeout (struct net_device *net)
1072 {
1073 struct usbnet *dev = netdev_priv(net);
1074
1075 unlink_urbs (dev, &dev->txq);
1076 tasklet_schedule (&dev->bh);
1077
1078 // FIXME: device recovery -- reset?
1079 }
1080 EXPORT_SYMBOL_GPL(usbnet_tx_timeout);
1081
1082 /*-------------------------------------------------------------------------*/
1083
usbnet_start_xmit(struct sk_buff * skb,struct net_device * net)1084 netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1085 struct net_device *net)
1086 {
1087 struct usbnet *dev = netdev_priv(net);
1088 int length;
1089 struct urb *urb = NULL;
1090 struct skb_data *entry;
1091 struct driver_info *info = dev->driver_info;
1092 unsigned long flags;
1093 int retval;
1094
1095 if (skb)
1096 skb_tx_timestamp(skb);
1097
1098 // some devices want funky USB-level framing, for
1099 // win32 driver (usually) and/or hardware quirks
1100 if (info->tx_fixup) {
1101 skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
1102 if (!skb) {
1103 if (netif_msg_tx_err(dev)) {
1104 netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
1105 goto drop;
1106 } else {
1107 /* cdc_ncm collected packet; waits for more */
1108 goto not_drop;
1109 }
1110 }
1111 }
1112 length = skb->len;
1113
1114 if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
1115 netif_dbg(dev, tx_err, dev->net, "no urb\n");
1116 goto drop;
1117 }
1118
1119 entry = (struct skb_data *) skb->cb;
1120 entry->urb = urb;
1121 entry->dev = dev;
1122 entry->length = length;
1123
1124 usb_fill_bulk_urb (urb, dev->udev, dev->out,
1125 skb->data, skb->len, tx_complete, skb);
1126
1127 /* don't assume the hardware handles USB_ZERO_PACKET
1128 * NOTE: strictly conforming cdc-ether devices should expect
1129 * the ZLP here, but ignore the one-byte packet.
1130 * NOTE2: CDC NCM specification is different from CDC ECM when
1131 * handling ZLP/short packets, so cdc_ncm driver will make short
1132 * packet itself if needed.
1133 */
1134 if (length % dev->maxpacket == 0) {
1135 if (!(info->flags & FLAG_SEND_ZLP)) {
1136 if (!(info->flags & FLAG_MULTI_PACKET)) {
1137 urb->transfer_buffer_length++;
1138 if (skb_tailroom(skb)) {
1139 skb->data[skb->len] = 0;
1140 __skb_put(skb, 1);
1141 }
1142 }
1143 } else
1144 urb->transfer_flags |= URB_ZERO_PACKET;
1145 }
1146
1147 spin_lock_irqsave(&dev->txq.lock, flags);
1148 retval = usb_autopm_get_interface_async(dev->intf);
1149 if (retval < 0) {
1150 spin_unlock_irqrestore(&dev->txq.lock, flags);
1151 goto drop;
1152 }
1153
1154 #ifdef CONFIG_PM
1155 /* if this triggers the device is still a sleep */
1156 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
1157 /* transmission will be done in resume */
1158 usb_anchor_urb(urb, &dev->deferred);
1159 /* no use to process more packets */
1160 netif_stop_queue(net);
1161 usb_put_urb(urb);
1162 spin_unlock_irqrestore(&dev->txq.lock, flags);
1163 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
1164 goto deferred;
1165 }
1166 #endif
1167
1168 switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
1169 case -EPIPE:
1170 netif_stop_queue (net);
1171 usbnet_defer_kevent (dev, EVENT_TX_HALT);
1172 usb_autopm_put_interface_async(dev->intf);
1173 break;
1174 default:
1175 usb_autopm_put_interface_async(dev->intf);
1176 netif_dbg(dev, tx_err, dev->net,
1177 "tx: submit urb err %d\n", retval);
1178 break;
1179 case 0:
1180 net->trans_start = jiffies;
1181 __usbnet_queue_skb(&dev->txq, skb, tx_start);
1182 if (dev->txq.qlen >= TX_QLEN (dev))
1183 netif_stop_queue (net);
1184 }
1185 spin_unlock_irqrestore (&dev->txq.lock, flags);
1186
1187 if (retval) {
1188 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
1189 drop:
1190 dev->net->stats.tx_dropped++;
1191 not_drop:
1192 if (skb)
1193 dev_kfree_skb_any (skb);
1194 usb_free_urb (urb);
1195 } else
1196 netif_dbg(dev, tx_queued, dev->net,
1197 "> tx, len %d, type 0x%x\n", length, skb->protocol);
1198 #ifdef CONFIG_PM
1199 deferred:
1200 #endif
1201 return NETDEV_TX_OK;
1202 }
1203 EXPORT_SYMBOL_GPL(usbnet_start_xmit);
1204
1205 /*-------------------------------------------------------------------------*/
1206
1207 // tasklet (work deferred from completions, in_irq) or timer
1208
usbnet_bh(unsigned long param)1209 static void usbnet_bh (unsigned long param)
1210 {
1211 struct usbnet *dev = (struct usbnet *) param;
1212 struct sk_buff *skb;
1213 struct skb_data *entry;
1214
1215 while ((skb = skb_dequeue (&dev->done))) {
1216 entry = (struct skb_data *) skb->cb;
1217 switch (entry->state) {
1218 case rx_done:
1219 entry->state = rx_cleanup;
1220 rx_process (dev, skb);
1221 continue;
1222 case tx_done:
1223 case rx_cleanup:
1224 usb_free_urb (entry->urb);
1225 dev_kfree_skb (skb);
1226 continue;
1227 default:
1228 netdev_dbg(dev->net, "bogus skb state %d\n", entry->state);
1229 }
1230 }
1231
1232 // waiting for all pending urbs to complete?
1233 if (dev->wait) {
1234 if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
1235 wake_up (dev->wait);
1236 }
1237
1238 // or are we maybe short a few urbs?
1239 } else if (netif_running (dev->net) &&
1240 netif_device_present (dev->net) &&
1241 !timer_pending (&dev->delay) &&
1242 !test_bit (EVENT_RX_HALT, &dev->flags)) {
1243 int temp = dev->rxq.qlen;
1244 int qlen = RX_QLEN (dev);
1245
1246 if (temp < qlen) {
1247 struct urb *urb;
1248 int i;
1249
1250 // don't refill the queue all at once
1251 for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
1252 urb = usb_alloc_urb (0, GFP_ATOMIC);
1253 if (urb != NULL) {
1254 if (rx_submit (dev, urb, GFP_ATOMIC) ==
1255 -ENOLINK)
1256 return;
1257 }
1258 }
1259 if (temp != dev->rxq.qlen)
1260 netif_dbg(dev, link, dev->net,
1261 "rxqlen %d --> %d\n",
1262 temp, dev->rxq.qlen);
1263 if (dev->rxq.qlen < qlen)
1264 tasklet_schedule (&dev->bh);
1265 }
1266 if (dev->txq.qlen < TX_QLEN (dev))
1267 netif_wake_queue (dev->net);
1268 }
1269 }
1270
1271
1272 /*-------------------------------------------------------------------------
1273 *
1274 * USB Device Driver support
1275 *
1276 *-------------------------------------------------------------------------*/
1277
1278 // precondition: never called in_interrupt
1279
usbnet_disconnect(struct usb_interface * intf)1280 void usbnet_disconnect (struct usb_interface *intf)
1281 {
1282 struct usbnet *dev;
1283 struct usb_device *xdev;
1284 struct net_device *net;
1285
1286 dev = usb_get_intfdata(intf);
1287 usb_set_intfdata(intf, NULL);
1288 if (!dev)
1289 return;
1290
1291 xdev = interface_to_usbdev (intf);
1292
1293 netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n",
1294 intf->dev.driver->name,
1295 xdev->bus->bus_name, xdev->devpath,
1296 dev->driver_info->description);
1297
1298 net = dev->net;
1299 unregister_netdev (net);
1300
1301 cancel_work_sync(&dev->kevent);
1302
1303 usb_scuttle_anchored_urbs(&dev->deferred);
1304
1305 if (dev->driver_info->unbind)
1306 dev->driver_info->unbind (dev, intf);
1307
1308 usb_kill_urb(dev->interrupt);
1309 usb_free_urb(dev->interrupt);
1310
1311 free_netdev(net);
1312 usb_put_dev (xdev);
1313 }
1314 EXPORT_SYMBOL_GPL(usbnet_disconnect);
1315
1316 static const struct net_device_ops usbnet_netdev_ops = {
1317 .ndo_open = usbnet_open,
1318 .ndo_stop = usbnet_stop,
1319 .ndo_start_xmit = usbnet_start_xmit,
1320 .ndo_tx_timeout = usbnet_tx_timeout,
1321 .ndo_change_mtu = usbnet_change_mtu,
1322 .ndo_set_mac_address = eth_mac_addr,
1323 .ndo_validate_addr = eth_validate_addr,
1324 };
1325
1326 /*-------------------------------------------------------------------------*/
1327
1328 // precondition: never called in_interrupt
1329
1330 static struct device_type wlan_type = {
1331 .name = "wlan",
1332 };
1333
1334 static struct device_type wwan_type = {
1335 .name = "wwan",
1336 };
1337
1338 int
usbnet_probe(struct usb_interface * udev,const struct usb_device_id * prod)1339 usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1340 {
1341 struct usbnet *dev;
1342 struct net_device *net;
1343 struct usb_host_interface *interface;
1344 struct driver_info *info;
1345 struct usb_device *xdev;
1346 int status;
1347 const char *name;
1348 struct usb_driver *driver = to_usb_driver(udev->dev.driver);
1349
1350 /* usbnet already took usb runtime pm, so have to enable the feature
1351 * for usb interface, otherwise usb_autopm_get_interface may return
1352 * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
1353 */
1354 if (!driver->supports_autosuspend) {
1355 driver->supports_autosuspend = 1;
1356 pm_runtime_enable(&udev->dev);
1357 }
1358
1359 name = udev->dev.driver->name;
1360 info = (struct driver_info *) prod->driver_info;
1361 if (!info) {
1362 dev_dbg (&udev->dev, "blacklisted by %s\n", name);
1363 return -ENODEV;
1364 }
1365 xdev = interface_to_usbdev (udev);
1366 interface = udev->cur_altsetting;
1367
1368 usb_get_dev (xdev);
1369
1370 status = -ENOMEM;
1371
1372 // set up our own records
1373 net = alloc_etherdev(sizeof(*dev));
1374 if (!net)
1375 goto out;
1376
1377 /* netdev_printk() needs this so do it as early as possible */
1378 SET_NETDEV_DEV(net, &udev->dev);
1379
1380 dev = netdev_priv(net);
1381 dev->udev = xdev;
1382 dev->intf = udev;
1383 dev->driver_info = info;
1384 dev->driver_name = name;
1385 dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
1386 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
1387 skb_queue_head_init (&dev->rxq);
1388 skb_queue_head_init (&dev->txq);
1389 skb_queue_head_init (&dev->done);
1390 skb_queue_head_init(&dev->rxq_pause);
1391 dev->bh.func = usbnet_bh;
1392 dev->bh.data = (unsigned long) dev;
1393 INIT_WORK (&dev->kevent, kevent);
1394 init_usb_anchor(&dev->deferred);
1395 dev->delay.function = usbnet_bh;
1396 dev->delay.data = (unsigned long) dev;
1397 init_timer (&dev->delay);
1398 mutex_init (&dev->phy_mutex);
1399
1400 dev->net = net;
1401 strcpy (net->name, "usb%d");
1402 memcpy (net->dev_addr, node_id, sizeof node_id);
1403
1404 /* rx and tx sides can use different message sizes;
1405 * bind() should set rx_urb_size in that case.
1406 */
1407 dev->hard_mtu = net->mtu + net->hard_header_len;
1408 #if 0
1409 // dma_supported() is deeply broken on almost all architectures
1410 // possible with some EHCI controllers
1411 if (dma_supported (&udev->dev, DMA_BIT_MASK(64)))
1412 net->features |= NETIF_F_HIGHDMA;
1413 #endif
1414
1415 net->netdev_ops = &usbnet_netdev_ops;
1416 net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
1417 net->ethtool_ops = &usbnet_ethtool_ops;
1418
1419 // allow device-specific bind/init procedures
1420 // NOTE net->name still not usable ...
1421 if (info->bind) {
1422 status = info->bind (dev, udev);
1423 if (status < 0)
1424 goto out1;
1425
1426 // heuristic: "usb%d" for links we know are two-host,
1427 // else "eth%d" when there's reasonable doubt. userspace
1428 // can rename the link if it knows better.
1429 if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
1430 ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
1431 (net->dev_addr [0] & 0x02) == 0))
1432 strcpy (net->name, "eth%d");
1433 /* WLAN devices should always be named "wlan%d" */
1434 if ((dev->driver_info->flags & FLAG_WLAN) != 0)
1435 strcpy(net->name, "wlan%d");
1436 /* WWAN devices should always be named "wwan%d" */
1437 if ((dev->driver_info->flags & FLAG_WWAN) != 0)
1438 strcpy(net->name, "wwan%d");
1439
1440 /* maybe the remote can't receive an Ethernet MTU */
1441 if (net->mtu > (dev->hard_mtu - net->hard_header_len))
1442 net->mtu = dev->hard_mtu - net->hard_header_len;
1443 } else if (!info->in || !info->out)
1444 status = usbnet_get_endpoints (dev, udev);
1445 else {
1446 dev->in = usb_rcvbulkpipe (xdev, info->in);
1447 dev->out = usb_sndbulkpipe (xdev, info->out);
1448 if (!(info->flags & FLAG_NO_SETINT))
1449 status = usb_set_interface (xdev,
1450 interface->desc.bInterfaceNumber,
1451 interface->desc.bAlternateSetting);
1452 else
1453 status = 0;
1454
1455 }
1456 if (status >= 0 && dev->status)
1457 status = init_status (dev, udev);
1458 if (status < 0)
1459 goto out3;
1460
1461 if (!dev->rx_urb_size)
1462 dev->rx_urb_size = dev->hard_mtu;
1463 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
1464
1465 if ((dev->driver_info->flags & FLAG_WLAN) != 0)
1466 SET_NETDEV_DEVTYPE(net, &wlan_type);
1467 if ((dev->driver_info->flags & FLAG_WWAN) != 0)
1468 SET_NETDEV_DEVTYPE(net, &wwan_type);
1469
1470 status = register_netdev (net);
1471 if (status)
1472 goto out4;
1473 netif_info(dev, probe, dev->net,
1474 "register '%s' at usb-%s-%s, %s, %pM\n",
1475 udev->dev.driver->name,
1476 xdev->bus->bus_name, xdev->devpath,
1477 dev->driver_info->description,
1478 net->dev_addr);
1479
1480 // ok, it's ready to go.
1481 usb_set_intfdata (udev, dev);
1482
1483 netif_device_attach (net);
1484
1485 if (dev->driver_info->flags & FLAG_LINK_INTR)
1486 netif_carrier_off(net);
1487
1488 return 0;
1489
1490 out4:
1491 usb_free_urb(dev->interrupt);
1492 out3:
1493 if (info->unbind)
1494 info->unbind (dev, udev);
1495 out1:
1496 free_netdev(net);
1497 out:
1498 usb_put_dev(xdev);
1499 return status;
1500 }
1501 EXPORT_SYMBOL_GPL(usbnet_probe);
1502
1503 /*-------------------------------------------------------------------------*/
1504
1505 /*
1506 * suspend the whole driver as soon as the first interface is suspended
1507 * resume only when the last interface is resumed
1508 */
1509
usbnet_suspend(struct usb_interface * intf,pm_message_t message)1510 int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
1511 {
1512 struct usbnet *dev = usb_get_intfdata(intf);
1513
1514 if (!dev->suspend_count++) {
1515 spin_lock_irq(&dev->txq.lock);
1516 /* don't autosuspend while transmitting */
1517 if (dev->txq.qlen && PMSG_IS_AUTO(message)) {
1518 spin_unlock_irq(&dev->txq.lock);
1519 return -EBUSY;
1520 } else {
1521 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
1522 spin_unlock_irq(&dev->txq.lock);
1523 }
1524 /*
1525 * accelerate emptying of the rx and queues, to avoid
1526 * having everything error out.
1527 */
1528 netif_device_detach (dev->net);
1529 usbnet_terminate_urbs(dev);
1530 usb_kill_urb(dev->interrupt);
1531
1532 /*
1533 * reattach so runtime management can use and
1534 * wake the device
1535 */
1536 netif_device_attach (dev->net);
1537 }
1538 return 0;
1539 }
1540 EXPORT_SYMBOL_GPL(usbnet_suspend);
1541
usbnet_resume(struct usb_interface * intf)1542 int usbnet_resume (struct usb_interface *intf)
1543 {
1544 struct usbnet *dev = usb_get_intfdata(intf);
1545 struct sk_buff *skb;
1546 struct urb *res;
1547 int retval;
1548
1549 if (!--dev->suspend_count) {
1550 /* resume interrupt URBs */
1551 if (dev->interrupt && test_bit(EVENT_DEV_OPEN, &dev->flags))
1552 usb_submit_urb(dev->interrupt, GFP_NOIO);
1553
1554 spin_lock_irq(&dev->txq.lock);
1555 while ((res = usb_get_from_anchor(&dev->deferred))) {
1556
1557 skb = (struct sk_buff *)res->context;
1558 retval = usb_submit_urb(res, GFP_ATOMIC);
1559 if (retval < 0) {
1560 dev_kfree_skb_any(skb);
1561 usb_free_urb(res);
1562 usb_autopm_put_interface_async(dev->intf);
1563 } else {
1564 dev->net->trans_start = jiffies;
1565 __skb_queue_tail(&dev->txq, skb);
1566 }
1567 }
1568
1569 smp_mb();
1570 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
1571 spin_unlock_irq(&dev->txq.lock);
1572
1573 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
1574 if (!(dev->txq.qlen >= TX_QLEN(dev)))
1575 netif_tx_wake_all_queues(dev->net);
1576 tasklet_schedule (&dev->bh);
1577 }
1578 }
1579 return 0;
1580 }
1581 EXPORT_SYMBOL_GPL(usbnet_resume);
1582
1583
1584 /*-------------------------------------------------------------------------*/
1585
usbnet_init(void)1586 static int __init usbnet_init(void)
1587 {
1588 /* Compiler should optimize this out. */
1589 BUILD_BUG_ON(
1590 FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data));
1591
1592 random_ether_addr(node_id);
1593 return 0;
1594 }
1595 module_init(usbnet_init);
1596
usbnet_exit(void)1597 static void __exit usbnet_exit(void)
1598 {
1599 }
1600 module_exit(usbnet_exit);
1601
1602 MODULE_AUTHOR("David Brownell");
1603 MODULE_DESCRIPTION("USB network driver framework");
1604 MODULE_LICENSE("GPL");
1605