1 /*
2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
3 *
4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2008 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14 /* #define VERBOSE_DEBUG */
15
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/gfp.h>
19 #include <linux/device.h>
20 #include <linux/ctype.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/if_vlan.h>
24
25 #include "u_ether.h"
26
27
28 /*
29 * This component encapsulates the Ethernet link glue needed to provide
30 * one (!) network link through the USB gadget stack, normally "usb0".
31 *
32 * The control and data models are handled by the function driver which
33 * connects to this code; such as CDC Ethernet (ECM or EEM),
34 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
35 * management.
36 *
37 * Link level addressing is handled by this component using module
38 * parameters; if no such parameters are provided, random link level
39 * addresses are used. Each end of the link uses one address. The
40 * host end address is exported in various ways, and is often recorded
41 * in configuration databases.
42 *
43 * The driver which assembles each configuration using such a link is
44 * responsible for ensuring that each configuration includes at most one
45 * instance of is network link. (The network layer provides ways for
46 * this single "physical" link to be used by multiple virtual links.)
47 */
48
49 #define UETH__VERSION "29-May-2008"
50
51 /* Experiments show that both Linux and Windows hosts allow up to 16k
52 * frame sizes. Set the max size to 15k+52 to prevent allocating 32k
53 * blocks and still have efficient handling. */
54 #define GETHER_MAX_ETH_FRAME_LEN 15412
55
56 static struct workqueue_struct *uether_wq;
57
58 struct eth_dev {
59 /* lock is held while accessing port_usb
60 */
61 spinlock_t lock;
62 struct gether *port_usb;
63
64 struct net_device *net;
65 struct usb_gadget *gadget;
66
67 spinlock_t req_lock; /* guard {rx,tx}_reqs */
68 struct list_head tx_reqs, rx_reqs;
69 atomic_t tx_qlen;
70 /* Minimum number of TX USB request queued to UDC */
71 #define TX_REQ_THRESHOLD 5
72 int no_tx_req_used;
73 int tx_skb_hold_count;
74 u32 tx_req_bufsize;
75
76 struct sk_buff_head rx_frames;
77
78 unsigned qmult;
79
80 unsigned header_len;
81 unsigned ul_max_pkts_per_xfer;
82 unsigned dl_max_pkts_per_xfer;
83 struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb);
84 int (*unwrap)(struct gether *,
85 struct sk_buff *skb,
86 struct sk_buff_head *list);
87
88 struct work_struct work;
89 struct work_struct rx_work;
90
91 unsigned long todo;
92 #define WORK_RX_MEMORY 0
93
94 bool zlp;
95 u8 host_mac[ETH_ALEN];
96 u8 dev_mac[ETH_ALEN];
97 };
98
99 /*-------------------------------------------------------------------------*/
100
101 #define RX_EXTRA 20 /* bytes guarding against rx overflows */
102
103 #define DEFAULT_QLEN 2 /* double buffering by default */
104
105 /* for dual-speed hardware, use deeper queues at high/super speed */
qlen(struct usb_gadget * gadget,unsigned qmult)106 static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
107 {
108 if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
109 gadget->speed >= USB_SPEED_SUPER))
110 return qmult * DEFAULT_QLEN;
111 else
112 return DEFAULT_QLEN;
113 }
114
115 /*-------------------------------------------------------------------------*/
116
117 /* REVISIT there must be a better way than having two sets
118 * of debug calls ...
119 */
120
121 #undef DBG
122 #undef VDBG
123 #undef ERROR
124 #undef INFO
125
126 #define xprintk(d, level, fmt, args...) \
127 printk(level "%s: " fmt , (d)->net->name , ## args)
128
129 #ifdef DEBUG
130 #undef DEBUG
131 #define DBG(dev, fmt, args...) \
132 xprintk(dev , KERN_DEBUG , fmt , ## args)
133 #else
134 #define DBG(dev, fmt, args...) \
135 do { } while (0)
136 #endif /* DEBUG */
137
138 #ifdef VERBOSE_DEBUG
139 #define VDBG DBG
140 #else
141 #define VDBG(dev, fmt, args...) \
142 do { } while (0)
143 #endif /* DEBUG */
144
145 #define ERROR(dev, fmt, args...) \
146 xprintk(dev , KERN_ERR , fmt , ## args)
147 #define INFO(dev, fmt, args...) \
148 xprintk(dev , KERN_INFO , fmt , ## args)
149
150 /*-------------------------------------------------------------------------*/
151
152 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
153
ueth_change_mtu(struct net_device * net,int new_mtu)154 static int ueth_change_mtu(struct net_device *net, int new_mtu)
155 {
156 struct eth_dev *dev = netdev_priv(net);
157 unsigned long flags;
158 int status = 0;
159
160 /* don't change MTU on "live" link (peer won't know) */
161 spin_lock_irqsave(&dev->lock, flags);
162 if (dev->port_usb)
163 status = -EBUSY;
164 else if (new_mtu <= ETH_HLEN || new_mtu > GETHER_MAX_ETH_FRAME_LEN)
165 status = -ERANGE;
166 else
167 net->mtu = new_mtu;
168 spin_unlock_irqrestore(&dev->lock, flags);
169
170 return status;
171 }
172
eth_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * p)173 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
174 {
175 struct eth_dev *dev = netdev_priv(net);
176
177 strlcpy(p->driver, "g_ether", sizeof(p->driver));
178 strlcpy(p->version, UETH__VERSION, sizeof(p->version));
179 strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
180 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
181 }
182
183 /* REVISIT can also support:
184 * - WOL (by tracking suspends and issuing remote wakeup)
185 * - msglevel (implies updated messaging)
186 * - ... probably more ethtool ops
187 */
188
189 static const struct ethtool_ops ops = {
190 .get_drvinfo = eth_get_drvinfo,
191 .get_link = ethtool_op_get_link,
192 };
193
defer_kevent(struct eth_dev * dev,int flag)194 static void defer_kevent(struct eth_dev *dev, int flag)
195 {
196 if (test_and_set_bit(flag, &dev->todo))
197 return;
198 if (!schedule_work(&dev->work))
199 ERROR(dev, "kevent %d may have been dropped\n", flag);
200 else
201 DBG(dev, "kevent %d scheduled\n", flag);
202 }
203
204 static void rx_complete(struct usb_ep *ep, struct usb_request *req);
205
206 static int
rx_submit(struct eth_dev * dev,struct usb_request * req,gfp_t gfp_flags)207 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
208 {
209 struct sk_buff *skb;
210 int retval = -ENOMEM;
211 size_t size = 0;
212 struct usb_ep *out;
213 unsigned long flags;
214
215 spin_lock_irqsave(&dev->lock, flags);
216 if (dev->port_usb)
217 out = dev->port_usb->out_ep;
218 else
219 out = NULL;
220
221 if (!out)
222 {
223 spin_unlock_irqrestore(&dev->lock, flags);
224 return -ENOTCONN;
225 }
226
227 /* Padding up to RX_EXTRA handles minor disagreements with host.
228 * Normally we use the USB "terminate on short read" convention;
229 * so allow up to (N*maxpacket), since that memory is normally
230 * already allocated. Some hardware doesn't deal well with short
231 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
232 * byte off the end (to force hardware errors on overflow).
233 *
234 * RNDIS uses internal framing, and explicitly allows senders to
235 * pad to end-of-packet. That's potentially nice for speed, but
236 * means receivers can't recover lost synch on their own (because
237 * new packets don't only start after a short RX).
238 */
239 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
240 size += dev->port_usb->header_len;
241 size += out->maxpacket - 1;
242 size -= size % out->maxpacket;
243
244 if (dev->ul_max_pkts_per_xfer)
245 size *= dev->ul_max_pkts_per_xfer;
246
247 if (dev->port_usb->is_fixed)
248 size = max_t(size_t, size, dev->port_usb->fixed_out_len);
249 spin_unlock_irqrestore(&dev->lock, flags);
250
251 DBG(dev, "%s: size: %zd\n", __func__, size);
252 skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
253 if (skb == NULL) {
254 DBG(dev, "no rx skb\n");
255 goto enomem;
256 }
257
258 /* Some platforms perform better when IP packets are aligned,
259 * but on at least one, checksumming fails otherwise. Note:
260 * RNDIS headers involve variable numbers of LE32 values.
261 */
262 skb_reserve(skb, NET_IP_ALIGN);
263
264 req->buf = skb->data;
265 req->length = size;
266 req->complete = rx_complete;
267 req->context = skb;
268
269 retval = usb_ep_queue(out, req, gfp_flags);
270 if (retval == -ENOMEM)
271 enomem:
272 defer_kevent(dev, WORK_RX_MEMORY);
273 if (retval) {
274 DBG(dev, "rx submit --> %d\n", retval);
275 if (skb)
276 dev_kfree_skb_any(skb);
277 }
278 return retval;
279 }
280
rx_complete(struct usb_ep * ep,struct usb_request * req)281 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
282 {
283 struct sk_buff *skb = req->context;
284 struct eth_dev *dev = ep->driver_data;
285 int status = req->status;
286 bool queue = 0;
287
288 switch (status) {
289
290 /* normal completion */
291 case 0:
292 skb_put(skb, req->actual);
293
294 if (dev->unwrap) {
295 unsigned long flags;
296
297 spin_lock_irqsave(&dev->lock, flags);
298 if (dev->port_usb) {
299 status = dev->unwrap(dev->port_usb,
300 skb,
301 &dev->rx_frames);
302 if (status == -EINVAL)
303 dev->net->stats.rx_errors++;
304 else if (status == -EOVERFLOW)
305 dev->net->stats.rx_over_errors++;
306 } else {
307 dev_kfree_skb_any(skb);
308 status = -ENOTCONN;
309 }
310 spin_unlock_irqrestore(&dev->lock, flags);
311 } else {
312 skb_queue_tail(&dev->rx_frames, skb);
313 }
314 if (!status)
315 queue = 1;
316 break;
317
318 /* software-driven interface shutdown */
319 case -ECONNRESET: /* unlink */
320 case -ESHUTDOWN: /* disconnect etc */
321 VDBG(dev, "rx shutdown, code %d\n", status);
322 goto quiesce;
323
324 /* for hardware automagic (such as pxa) */
325 case -ECONNABORTED: /* endpoint reset */
326 DBG(dev, "rx %s reset\n", ep->name);
327 defer_kevent(dev, WORK_RX_MEMORY);
328 quiesce:
329 dev_kfree_skb_any(skb);
330 goto clean;
331
332 /* data overrun */
333 case -EOVERFLOW:
334 dev->net->stats.rx_over_errors++;
335 /* FALLTHROUGH */
336
337 default:
338 queue = 1;
339 dev_kfree_skb_any(skb);
340 dev->net->stats.rx_errors++;
341 DBG(dev, "rx status %d\n", status);
342 break;
343 }
344
345 clean:
346 spin_lock(&dev->req_lock);
347 list_add(&req->list, &dev->rx_reqs);
348 spin_unlock(&dev->req_lock);
349
350 if (queue)
351 queue_work(uether_wq, &dev->rx_work);
352 }
353
prealloc(struct list_head * list,struct usb_ep * ep,unsigned n)354 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
355 {
356 unsigned i;
357 struct usb_request *req;
358
359 if (!n)
360 return -ENOMEM;
361
362 /* queue/recycle up to N requests */
363 i = n;
364 list_for_each_entry(req, list, list) {
365 if (i-- == 0)
366 goto extra;
367 }
368 while (i--) {
369 req = usb_ep_alloc_request(ep, GFP_ATOMIC);
370 if (!req)
371 return list_empty(list) ? -ENOMEM : 0;
372 list_add(&req->list, list);
373 }
374 return 0;
375
376 extra:
377 /* free extras */
378 for (;;) {
379 struct list_head *next;
380
381 next = req->list.next;
382 list_del(&req->list);
383 usb_ep_free_request(ep, req);
384
385 if (next == list)
386 break;
387
388 req = container_of(next, struct usb_request, list);
389 }
390 return 0;
391 }
392
alloc_requests(struct eth_dev * dev,struct gether * link,unsigned n)393 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
394 {
395 int status;
396
397 spin_lock(&dev->req_lock);
398 status = prealloc(&dev->tx_reqs, link->in_ep, n);
399 if (status < 0)
400 goto fail;
401 status = prealloc(&dev->rx_reqs, link->out_ep, n);
402 if (status < 0)
403 goto fail;
404 goto done;
405 fail:
406 DBG(dev, "can't alloc requests\n");
407 done:
408 spin_unlock(&dev->req_lock);
409 return status;
410 }
411
rx_fill(struct eth_dev * dev,gfp_t gfp_flags)412 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
413 {
414 struct usb_request *req;
415 unsigned long flags;
416 int req_cnt = 0;
417
418 /* fill unused rxq slots with some skb */
419 spin_lock_irqsave(&dev->req_lock, flags);
420 while (!list_empty(&dev->rx_reqs)) {
421 /* break the nexus of continuous completion and re-submission*/
422 if (++req_cnt > qlen(dev->gadget, dev->qmult))
423 break;
424
425 req = container_of(dev->rx_reqs.next,
426 struct usb_request, list);
427 list_del_init(&req->list);
428 spin_unlock_irqrestore(&dev->req_lock, flags);
429
430 if (rx_submit(dev, req, gfp_flags) < 0) {
431 spin_lock_irqsave(&dev->req_lock, flags);
432 list_add(&req->list, &dev->rx_reqs);
433 spin_unlock_irqrestore(&dev->req_lock, flags);
434 defer_kevent(dev, WORK_RX_MEMORY);
435 return;
436 }
437
438 spin_lock_irqsave(&dev->req_lock, flags);
439 }
440 spin_unlock_irqrestore(&dev->req_lock, flags);
441 }
442
process_rx_w(struct work_struct * work)443 static void process_rx_w(struct work_struct *work)
444 {
445 struct eth_dev *dev = container_of(work, struct eth_dev, rx_work);
446 struct sk_buff *skb;
447 int status = 0;
448
449 if (!dev->port_usb)
450 return;
451
452 while ((skb = skb_dequeue(&dev->rx_frames))) {
453 if (status < 0
454 || ETH_HLEN > skb->len
455 || skb->len > ETH_FRAME_LEN) {
456 dev->net->stats.rx_errors++;
457 dev->net->stats.rx_length_errors++;
458 DBG(dev, "rx length %d\n", skb->len);
459 dev_kfree_skb_any(skb);
460 continue;
461 }
462 skb->protocol = eth_type_trans(skb, dev->net);
463 dev->net->stats.rx_packets++;
464 dev->net->stats.rx_bytes += skb->len;
465
466 status = netif_rx_ni(skb);
467 }
468
469 if (netif_running(dev->net))
470 rx_fill(dev, GFP_KERNEL);
471 }
472
eth_work(struct work_struct * work)473 static void eth_work(struct work_struct *work)
474 {
475 struct eth_dev *dev = container_of(work, struct eth_dev, work);
476
477 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
478 if (netif_running(dev->net))
479 rx_fill(dev, GFP_KERNEL);
480 }
481
482 if (dev->todo)
483 DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
484 }
485
tx_complete(struct usb_ep * ep,struct usb_request * req)486 static void tx_complete(struct usb_ep *ep, struct usb_request *req)
487 {
488 struct sk_buff *skb = req->context;
489 struct eth_dev *dev = ep->driver_data;
490 struct net_device *net = dev->net;
491 struct usb_request *new_req;
492 struct usb_ep *in;
493 int length;
494 int retval;
495
496 switch (req->status) {
497 default:
498 dev->net->stats.tx_errors++;
499 VDBG(dev, "tx err %d\n", req->status);
500 /* FALLTHROUGH */
501 case -ECONNRESET: /* unlink */
502 case -ESHUTDOWN: /* disconnect etc */
503 break;
504 case 0:
505 if (!req->zero)
506 dev->net->stats.tx_bytes += req->length-1;
507 else
508 dev->net->stats.tx_bytes += req->length;
509 }
510 dev->net->stats.tx_packets++;
511
512 spin_lock(&dev->req_lock);
513 list_add_tail(&req->list, &dev->tx_reqs);
514
515 if (dev->port_usb->multi_pkt_xfer) {
516 dev->no_tx_req_used--;
517 req->length = 0;
518 in = dev->port_usb->in_ep;
519
520 if (!list_empty(&dev->tx_reqs)) {
521 new_req = container_of(dev->tx_reqs.next,
522 struct usb_request, list);
523 list_del(&new_req->list);
524 spin_unlock(&dev->req_lock);
525 if (new_req->length > 0) {
526 length = new_req->length;
527
528 /* NCM requires no zlp if transfer is
529 * dwNtbInMaxSize */
530 if (dev->port_usb->is_fixed &&
531 length == dev->port_usb->fixed_in_len &&
532 (length % in->maxpacket) == 0)
533 new_req->zero = 0;
534 else
535 new_req->zero = 1;
536
537 /* use zlp framing on tx for strict CDC-Ether
538 * conformance, though any robust network rx
539 * path ignores extra padding. and some hardware
540 * doesn't like to write zlps.
541 */
542 if (new_req->zero && !dev->zlp &&
543 (length % in->maxpacket) == 0) {
544 new_req->zero = 0;
545 length++;
546 }
547
548 new_req->length = length;
549 retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
550 switch (retval) {
551 default:
552 DBG(dev, "tx queue err %d\n", retval);
553 break;
554 case 0:
555 spin_lock(&dev->req_lock);
556 dev->no_tx_req_used++;
557 spin_unlock(&dev->req_lock);
558 net->trans_start = jiffies;
559 }
560 } else {
561 spin_lock(&dev->req_lock);
562 list_add(&new_req->list, &dev->tx_reqs);
563 spin_unlock(&dev->req_lock);
564 }
565 } else {
566 spin_unlock(&dev->req_lock);
567 }
568 } else {
569 spin_unlock(&dev->req_lock);
570 dev_kfree_skb_any(skb);
571 }
572
573 atomic_dec(&dev->tx_qlen);
574 if (netif_carrier_ok(dev->net))
575 netif_wake_queue(dev->net);
576 }
577
is_promisc(u16 cdc_filter)578 static inline int is_promisc(u16 cdc_filter)
579 {
580 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
581 }
582
alloc_tx_buffer(struct eth_dev * dev)583 static void alloc_tx_buffer(struct eth_dev *dev)
584 {
585 struct list_head *act;
586 struct usb_request *req;
587
588 dev->tx_req_bufsize = (dev->dl_max_pkts_per_xfer *
589 (dev->net->mtu
590 + sizeof(struct ethhdr)
591 /* size of rndis_packet_msg_type */
592 + 44
593 + 22));
594
595 list_for_each(act, &dev->tx_reqs) {
596 req = container_of(act, struct usb_request, list);
597 if (!req->buf)
598 req->buf = kmalloc(dev->tx_req_bufsize,
599 GFP_ATOMIC);
600 }
601 }
602
eth_start_xmit(struct sk_buff * skb,struct net_device * net)603 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
604 struct net_device *net)
605 {
606 struct eth_dev *dev = netdev_priv(net);
607 int length = 0;
608 int retval;
609 struct usb_request *req = NULL;
610 unsigned long flags;
611 struct usb_ep *in;
612 u16 cdc_filter;
613
614 spin_lock_irqsave(&dev->lock, flags);
615 if (dev->port_usb) {
616 in = dev->port_usb->in_ep;
617 cdc_filter = dev->port_usb->cdc_filter;
618 } else {
619 in = NULL;
620 cdc_filter = 0;
621 }
622 spin_unlock_irqrestore(&dev->lock, flags);
623
624 if (!in) {
625 if (skb)
626 dev_kfree_skb_any(skb);
627 return NETDEV_TX_OK;
628 }
629
630 /* Allocate memory for tx_reqs to support multi packet transfer */
631 if (dev->port_usb->multi_pkt_xfer && !dev->tx_req_bufsize)
632 alloc_tx_buffer(dev);
633
634 /* apply outgoing CDC or RNDIS filters */
635 if (skb && !is_promisc(cdc_filter)) {
636 u8 *dest = skb->data;
637
638 if (is_multicast_ether_addr(dest)) {
639 u16 type;
640
641 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
642 * SET_ETHERNET_MULTICAST_FILTERS requests
643 */
644 if (is_broadcast_ether_addr(dest))
645 type = USB_CDC_PACKET_TYPE_BROADCAST;
646 else
647 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
648 if (!(cdc_filter & type)) {
649 dev_kfree_skb_any(skb);
650 return NETDEV_TX_OK;
651 }
652 }
653 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
654 }
655
656 spin_lock_irqsave(&dev->req_lock, flags);
657 /*
658 * this freelist can be empty if an interrupt triggered disconnect()
659 * and reconfigured the gadget (shutting down this queue) after the
660 * network stack decided to xmit but before we got the spinlock.
661 */
662 if (list_empty(&dev->tx_reqs)) {
663 spin_unlock_irqrestore(&dev->req_lock, flags);
664 return NETDEV_TX_BUSY;
665 }
666
667 req = container_of(dev->tx_reqs.next, struct usb_request, list);
668 list_del(&req->list);
669
670 /* temporarily stop TX queue when the freelist empties */
671 if (list_empty(&dev->tx_reqs))
672 netif_stop_queue(net);
673 spin_unlock_irqrestore(&dev->req_lock, flags);
674
675 /* no buffer copies needed, unless the network stack did it
676 * or the hardware can't use skb buffers.
677 * or there's not enough space for extra headers we need
678 */
679 if (dev->wrap) {
680 unsigned long flags;
681
682 spin_lock_irqsave(&dev->lock, flags);
683 if (dev->port_usb)
684 skb = dev->wrap(dev->port_usb, skb);
685 spin_unlock_irqrestore(&dev->lock, flags);
686 if (!skb) {
687 /* Multi frame CDC protocols may store the frame for
688 * later which is not a dropped frame.
689 */
690 if (dev->port_usb->supports_multi_frame)
691 goto multiframe;
692 goto drop;
693 }
694 }
695
696 spin_lock_irqsave(&dev->req_lock, flags);
697 dev->tx_skb_hold_count++;
698 spin_unlock_irqrestore(&dev->req_lock, flags);
699
700 if (dev->port_usb->multi_pkt_xfer) {
701 memcpy(req->buf + req->length, skb->data, skb->len);
702 req->length = req->length + skb->len;
703 length = req->length;
704 dev_kfree_skb_any(skb);
705
706 spin_lock_irqsave(&dev->req_lock, flags);
707 if (dev->tx_skb_hold_count < dev->dl_max_pkts_per_xfer) {
708 if (dev->no_tx_req_used > TX_REQ_THRESHOLD) {
709 list_add(&req->list, &dev->tx_reqs);
710 spin_unlock_irqrestore(&dev->req_lock, flags);
711 goto success;
712 }
713 }
714
715 dev->no_tx_req_used++;
716 spin_unlock_irqrestore(&dev->req_lock, flags);
717
718 spin_lock_irqsave(&dev->lock, flags);
719 dev->tx_skb_hold_count = 0;
720 spin_unlock_irqrestore(&dev->lock, flags);
721 } else {
722 length = skb->len;
723 req->buf = skb->data;
724 req->context = skb;
725 }
726
727 req->complete = tx_complete;
728
729 /* NCM requires no zlp if transfer is dwNtbInMaxSize */
730 if (dev->port_usb->is_fixed &&
731 length == dev->port_usb->fixed_in_len &&
732 (length % in->maxpacket) == 0)
733 req->zero = 0;
734 else
735 req->zero = 1;
736
737 /* use zlp framing on tx for strict CDC-Ether conformance,
738 * though any robust network rx path ignores extra padding.
739 * and some hardware doesn't like to write zlps.
740 */
741 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) {
742 req->zero = 0;
743 length++;
744 }
745
746 req->length = length;
747
748 /* throttle high/super speed IRQ rate back slightly */
749 if (gadget_is_dualspeed(dev->gadget))
750 req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH ||
751 dev->gadget->speed == USB_SPEED_SUPER)) &&
752 !list_empty(&dev->tx_reqs))
753 ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
754 : 0;
755
756 retval = usb_ep_queue(in, req, GFP_ATOMIC);
757 switch (retval) {
758 default:
759 DBG(dev, "tx queue err %d\n", retval);
760 break;
761 case 0:
762 net->trans_start = jiffies;
763 atomic_inc(&dev->tx_qlen);
764 }
765
766 if (retval) {
767 if (!dev->port_usb->multi_pkt_xfer)
768 dev_kfree_skb_any(skb);
769 drop:
770 dev->net->stats.tx_dropped++;
771 multiframe:
772 spin_lock_irqsave(&dev->req_lock, flags);
773 if (list_empty(&dev->tx_reqs))
774 netif_start_queue(net);
775 list_add(&req->list, &dev->tx_reqs);
776 spin_unlock_irqrestore(&dev->req_lock, flags);
777 }
778 success:
779 return NETDEV_TX_OK;
780 }
781
782 /*-------------------------------------------------------------------------*/
783
eth_start(struct eth_dev * dev,gfp_t gfp_flags)784 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
785 {
786 DBG(dev, "%s\n", __func__);
787
788 /* fill the rx queue */
789 rx_fill(dev, gfp_flags);
790
791 /* and open the tx floodgates */
792 atomic_set(&dev->tx_qlen, 0);
793 netif_wake_queue(dev->net);
794 }
795
eth_open(struct net_device * net)796 static int eth_open(struct net_device *net)
797 {
798 struct eth_dev *dev = netdev_priv(net);
799 struct gether *link;
800
801 DBG(dev, "%s\n", __func__);
802 if (netif_carrier_ok(dev->net))
803 eth_start(dev, GFP_KERNEL);
804
805 spin_lock_irq(&dev->lock);
806 link = dev->port_usb;
807 if (link && link->open)
808 link->open(link);
809 spin_unlock_irq(&dev->lock);
810
811 return 0;
812 }
813
eth_stop(struct net_device * net)814 static int eth_stop(struct net_device *net)
815 {
816 struct eth_dev *dev = netdev_priv(net);
817 unsigned long flags;
818
819 VDBG(dev, "%s\n", __func__);
820 netif_stop_queue(net);
821
822 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
823 dev->net->stats.rx_packets, dev->net->stats.tx_packets,
824 dev->net->stats.rx_errors, dev->net->stats.tx_errors
825 );
826
827 /* ensure there are no more active requests */
828 spin_lock_irqsave(&dev->lock, flags);
829 if (dev->port_usb) {
830 struct gether *link = dev->port_usb;
831 const struct usb_endpoint_descriptor *in;
832 const struct usb_endpoint_descriptor *out;
833
834 if (link->close)
835 link->close(link);
836
837 /* NOTE: we have no abort-queue primitive we could use
838 * to cancel all pending I/O. Instead, we disable then
839 * reenable the endpoints ... this idiom may leave toggle
840 * wrong, but that's a self-correcting error.
841 *
842 * REVISIT: we *COULD* just let the transfers complete at
843 * their own pace; the network stack can handle old packets.
844 * For the moment we leave this here, since it works.
845 */
846 in = link->in_ep->desc;
847 out = link->out_ep->desc;
848 usb_ep_disable(link->in_ep);
849 usb_ep_disable(link->out_ep);
850 if (netif_carrier_ok(net)) {
851 DBG(dev, "host still using in/out endpoints\n");
852 link->in_ep->desc = in;
853 link->out_ep->desc = out;
854 usb_ep_enable(link->in_ep);
855 usb_ep_enable(link->out_ep);
856 }
857 }
858 spin_unlock_irqrestore(&dev->lock, flags);
859
860 return 0;
861 }
862
863 /*-------------------------------------------------------------------------*/
864
get_ether_addr(const char * str,u8 * dev_addr)865 static int get_ether_addr(const char *str, u8 *dev_addr)
866 {
867 if (str) {
868 unsigned i;
869
870 for (i = 0; i < 6; i++) {
871 unsigned char num;
872
873 if ((*str == '.') || (*str == ':'))
874 str++;
875 num = hex_to_bin(*str++) << 4;
876 num |= hex_to_bin(*str++);
877 dev_addr [i] = num;
878 }
879 if (is_valid_ether_addr(dev_addr))
880 return 0;
881 }
882 eth_random_addr(dev_addr);
883 return 1;
884 }
885
get_ether_addr_str(u8 dev_addr[ETH_ALEN],char * str,int len)886 static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
887 {
888 if (len < 18)
889 return -EINVAL;
890
891 snprintf(str, len, "%pM", dev_addr);
892 return 18;
893 }
894
895 static const struct net_device_ops eth_netdev_ops = {
896 .ndo_open = eth_open,
897 .ndo_stop = eth_stop,
898 .ndo_start_xmit = eth_start_xmit,
899 .ndo_change_mtu = ueth_change_mtu,
900 .ndo_set_mac_address = eth_mac_addr,
901 .ndo_validate_addr = eth_validate_addr,
902 };
903
904 static struct device_type gadget_type = {
905 .name = "gadget",
906 };
907
908 /**
909 * gether_setup_name - initialize one ethernet-over-usb link
910 * @g: gadget to associated with these links
911 * @ethaddr: NULL, or a buffer in which the ethernet address of the
912 * host side of the link is recorded
913 * @netname: name for network device (for example, "usb")
914 * Context: may sleep
915 *
916 * This sets up the single network link that may be exported by a
917 * gadget driver using this framework. The link layer addresses are
918 * set up using module parameters.
919 *
920 * Returns an eth_dev pointer on success, or an ERR_PTR on failure.
921 */
gether_setup_name(struct usb_gadget * g,const char * dev_addr,const char * host_addr,u8 ethaddr[ETH_ALEN],unsigned qmult,const char * netname)922 struct eth_dev *gether_setup_name(struct usb_gadget *g,
923 const char *dev_addr, const char *host_addr,
924 u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname)
925 {
926 struct eth_dev *dev;
927 struct net_device *net;
928 int status;
929
930 net = alloc_etherdev(sizeof *dev);
931 if (!net)
932 return ERR_PTR(-ENOMEM);
933
934 dev = netdev_priv(net);
935 spin_lock_init(&dev->lock);
936 spin_lock_init(&dev->req_lock);
937 INIT_WORK(&dev->work, eth_work);
938 INIT_WORK(&dev->rx_work, process_rx_w);
939 INIT_LIST_HEAD(&dev->tx_reqs);
940 INIT_LIST_HEAD(&dev->rx_reqs);
941
942 skb_queue_head_init(&dev->rx_frames);
943
944 /* network device setup */
945 dev->net = net;
946 dev->qmult = qmult;
947 snprintf(net->name, sizeof(net->name), "%s%%d", netname);
948
949 if (get_ether_addr(dev_addr, net->dev_addr))
950 dev_warn(&g->dev,
951 "using random %s ethernet address\n", "self");
952 if (get_ether_addr(host_addr, dev->host_mac))
953 dev_warn(&g->dev,
954 "using random %s ethernet address\n", "host");
955
956 if (ethaddr)
957 memcpy(ethaddr, dev->host_mac, ETH_ALEN);
958
959 net->netdev_ops = ð_netdev_ops;
960
961 net->ethtool_ops = &ops;
962
963 dev->gadget = g;
964 SET_NETDEV_DEV(net, &g->dev);
965 SET_NETDEV_DEVTYPE(net, &gadget_type);
966
967 status = register_netdev(net);
968 if (status < 0) {
969 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
970 free_netdev(net);
971 dev = ERR_PTR(status);
972 } else {
973 INFO(dev, "MAC %pM\n", net->dev_addr);
974 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
975
976 /*
977 * two kinds of host-initiated state changes:
978 * - iff DATA transfer is active, carrier is "on"
979 * - tx queueing enabled if open *and* carrier is "on"
980 */
981 netif_carrier_off(net);
982 }
983
984 return dev;
985 }
986 EXPORT_SYMBOL_GPL(gether_setup_name);
987
gether_setup_name_default(const char * netname)988 struct net_device *gether_setup_name_default(const char *netname)
989 {
990 struct net_device *net;
991 struct eth_dev *dev;
992
993 net = alloc_etherdev(sizeof(*dev));
994 if (!net)
995 return ERR_PTR(-ENOMEM);
996
997 dev = netdev_priv(net);
998 spin_lock_init(&dev->lock);
999 spin_lock_init(&dev->req_lock);
1000 INIT_WORK(&dev->work, eth_work);
1001 INIT_WORK(&dev->rx_work, process_rx_w);
1002 INIT_LIST_HEAD(&dev->tx_reqs);
1003 INIT_LIST_HEAD(&dev->rx_reqs);
1004
1005 skb_queue_head_init(&dev->rx_frames);
1006
1007 /* network device setup */
1008 dev->net = net;
1009 dev->qmult = QMULT_DEFAULT;
1010 snprintf(net->name, sizeof(net->name), "%s%%d", netname);
1011
1012 eth_random_addr(dev->dev_mac);
1013 pr_warn("using random %s ethernet address\n", "self");
1014 eth_random_addr(dev->host_mac);
1015 pr_warn("using random %s ethernet address\n", "host");
1016
1017 net->netdev_ops = ð_netdev_ops;
1018
1019 net->ethtool_ops = &ops;
1020 SET_NETDEV_DEVTYPE(net, &gadget_type);
1021
1022 return net;
1023 }
1024 EXPORT_SYMBOL_GPL(gether_setup_name_default);
1025
gether_register_netdev(struct net_device * net)1026 int gether_register_netdev(struct net_device *net)
1027 {
1028 struct eth_dev *dev;
1029 struct usb_gadget *g;
1030 struct sockaddr sa;
1031 int status;
1032
1033 if (!net->dev.parent)
1034 return -EINVAL;
1035 dev = netdev_priv(net);
1036 g = dev->gadget;
1037 status = register_netdev(net);
1038 if (status < 0) {
1039 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
1040 return status;
1041 } else {
1042 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
1043
1044 /* two kinds of host-initiated state changes:
1045 * - iff DATA transfer is active, carrier is "on"
1046 * - tx queueing enabled if open *and* carrier is "on"
1047 */
1048 netif_carrier_off(net);
1049 }
1050 sa.sa_family = net->type;
1051 memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN);
1052 rtnl_lock();
1053 status = dev_set_mac_address(net, &sa);
1054 rtnl_unlock();
1055 if (status)
1056 pr_warn("cannot set self ethernet address: %d\n", status);
1057 else
1058 INFO(dev, "MAC %pM\n", dev->dev_mac);
1059
1060 return status;
1061 }
1062 EXPORT_SYMBOL_GPL(gether_register_netdev);
1063
gether_set_gadget(struct net_device * net,struct usb_gadget * g)1064 void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
1065 {
1066 struct eth_dev *dev;
1067
1068 dev = netdev_priv(net);
1069 dev->gadget = g;
1070 SET_NETDEV_DEV(net, &g->dev);
1071 }
1072 EXPORT_SYMBOL_GPL(gether_set_gadget);
1073
gether_set_dev_addr(struct net_device * net,const char * dev_addr)1074 int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
1075 {
1076 struct eth_dev *dev;
1077 u8 new_addr[ETH_ALEN];
1078
1079 dev = netdev_priv(net);
1080 if (get_ether_addr(dev_addr, new_addr))
1081 return -EINVAL;
1082 memcpy(dev->dev_mac, new_addr, ETH_ALEN);
1083 return 0;
1084 }
1085 EXPORT_SYMBOL_GPL(gether_set_dev_addr);
1086
gether_get_dev_addr(struct net_device * net,char * dev_addr,int len)1087 int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len)
1088 {
1089 struct eth_dev *dev;
1090
1091 dev = netdev_priv(net);
1092 return get_ether_addr_str(dev->dev_mac, dev_addr, len);
1093 }
1094 EXPORT_SYMBOL_GPL(gether_get_dev_addr);
1095
gether_set_host_addr(struct net_device * net,const char * host_addr)1096 int gether_set_host_addr(struct net_device *net, const char *host_addr)
1097 {
1098 struct eth_dev *dev;
1099 u8 new_addr[ETH_ALEN];
1100
1101 dev = netdev_priv(net);
1102 if (get_ether_addr(host_addr, new_addr))
1103 return -EINVAL;
1104 memcpy(dev->host_mac, new_addr, ETH_ALEN);
1105 return 0;
1106 }
1107 EXPORT_SYMBOL_GPL(gether_set_host_addr);
1108
gether_get_host_addr(struct net_device * net,char * host_addr,int len)1109 int gether_get_host_addr(struct net_device *net, char *host_addr, int len)
1110 {
1111 struct eth_dev *dev;
1112
1113 dev = netdev_priv(net);
1114 return get_ether_addr_str(dev->host_mac, host_addr, len);
1115 }
1116 EXPORT_SYMBOL_GPL(gether_get_host_addr);
1117
gether_get_host_addr_cdc(struct net_device * net,char * host_addr,int len)1118 int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
1119 {
1120 struct eth_dev *dev;
1121
1122 if (len < 13)
1123 return -EINVAL;
1124
1125 dev = netdev_priv(net);
1126 snprintf(host_addr, len, "%pm", dev->host_mac);
1127
1128 return strlen(host_addr);
1129 }
1130 EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
1131
gether_get_host_addr_u8(struct net_device * net,u8 host_mac[ETH_ALEN])1132 void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN])
1133 {
1134 struct eth_dev *dev;
1135
1136 dev = netdev_priv(net);
1137 memcpy(host_mac, dev->host_mac, ETH_ALEN);
1138 }
1139 EXPORT_SYMBOL_GPL(gether_get_host_addr_u8);
1140
gether_set_qmult(struct net_device * net,unsigned qmult)1141 void gether_set_qmult(struct net_device *net, unsigned qmult)
1142 {
1143 struct eth_dev *dev;
1144
1145 dev = netdev_priv(net);
1146 dev->qmult = qmult;
1147 }
1148 EXPORT_SYMBOL_GPL(gether_set_qmult);
1149
gether_get_qmult(struct net_device * net)1150 unsigned gether_get_qmult(struct net_device *net)
1151 {
1152 struct eth_dev *dev;
1153
1154 dev = netdev_priv(net);
1155 return dev->qmult;
1156 }
1157 EXPORT_SYMBOL_GPL(gether_get_qmult);
1158
gether_get_ifname(struct net_device * net,char * name,int len)1159 int gether_get_ifname(struct net_device *net, char *name, int len)
1160 {
1161 rtnl_lock();
1162 strlcpy(name, netdev_name(net), len);
1163 rtnl_unlock();
1164 return strlen(name);
1165 }
1166 EXPORT_SYMBOL_GPL(gether_get_ifname);
1167
1168 /**
1169 * gether_cleanup - remove Ethernet-over-USB device
1170 * Context: may sleep
1171 *
1172 * This is called to free all resources allocated by @gether_setup().
1173 */
gether_cleanup(struct eth_dev * dev)1174 void gether_cleanup(struct eth_dev *dev)
1175 {
1176 if (!dev)
1177 return;
1178
1179 unregister_netdev(dev->net);
1180 flush_work(&dev->work);
1181 free_netdev(dev->net);
1182 }
1183 EXPORT_SYMBOL_GPL(gether_cleanup);
1184
1185 /**
1186 * gether_connect - notify network layer that USB link is active
1187 * @link: the USB link, set up with endpoints, descriptors matching
1188 * current device speed, and any framing wrapper(s) set up.
1189 * Context: irqs blocked
1190 *
1191 * This is called to activate endpoints and let the network layer know
1192 * the connection is active ("carrier detect"). It may cause the I/O
1193 * queues to open and start letting network packets flow, but will in
1194 * any case activate the endpoints so that they respond properly to the
1195 * USB host.
1196 *
1197 * Verify net_device pointer returned using IS_ERR(). If it doesn't
1198 * indicate some error code (negative errno), ep->driver_data values
1199 * have been overwritten.
1200 */
gether_connect(struct gether * link)1201 struct net_device *gether_connect(struct gether *link)
1202 {
1203 struct eth_dev *dev = link->ioport;
1204 int result = 0;
1205
1206 if (!dev)
1207 return ERR_PTR(-EINVAL);
1208
1209 link->in_ep->driver_data = dev;
1210 result = usb_ep_enable(link->in_ep);
1211 if (result != 0) {
1212 DBG(dev, "enable %s --> %d\n",
1213 link->in_ep->name, result);
1214 goto fail0;
1215 }
1216
1217 link->out_ep->driver_data = dev;
1218 result = usb_ep_enable(link->out_ep);
1219 if (result != 0) {
1220 DBG(dev, "enable %s --> %d\n",
1221 link->out_ep->name, result);
1222 goto fail1;
1223 }
1224
1225 if (result == 0)
1226 result = alloc_requests(dev, link, qlen(dev->gadget,
1227 dev->qmult));
1228
1229 if (result == 0) {
1230 dev->zlp = link->is_zlp_ok;
1231 DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult));
1232
1233 dev->header_len = link->header_len;
1234 dev->unwrap = link->unwrap;
1235 dev->wrap = link->wrap;
1236 dev->ul_max_pkts_per_xfer = link->ul_max_pkts_per_xfer;
1237 dev->dl_max_pkts_per_xfer = link->dl_max_pkts_per_xfer;
1238
1239 spin_lock(&dev->lock);
1240 dev->tx_skb_hold_count = 0;
1241 dev->no_tx_req_used = 0;
1242 dev->tx_req_bufsize = 0;
1243 dev->port_usb = link;
1244 if (netif_running(dev->net)) {
1245 if (link->open)
1246 link->open(link);
1247 } else {
1248 if (link->close)
1249 link->close(link);
1250 }
1251 spin_unlock(&dev->lock);
1252
1253 netif_carrier_on(dev->net);
1254 if (netif_running(dev->net))
1255 eth_start(dev, GFP_ATOMIC);
1256
1257 /* on error, disable any endpoints */
1258 } else {
1259 (void) usb_ep_disable(link->out_ep);
1260 fail1:
1261 (void) usb_ep_disable(link->in_ep);
1262 }
1263 fail0:
1264 /* caller is responsible for cleanup on error */
1265 if (result < 0)
1266 return ERR_PTR(result);
1267 return dev->net;
1268 }
1269 EXPORT_SYMBOL_GPL(gether_connect);
1270
1271 /**
1272 * gether_disconnect - notify network layer that USB link is inactive
1273 * @link: the USB link, on which gether_connect() was called
1274 * Context: irqs blocked
1275 *
1276 * This is called to deactivate endpoints and let the network layer know
1277 * the connection went inactive ("no carrier").
1278 *
1279 * On return, the state is as if gether_connect() had never been called.
1280 * The endpoints are inactive, and accordingly without active USB I/O.
1281 * Pointers to endpoint descriptors and endpoint private data are nulled.
1282 */
gether_disconnect(struct gether * link)1283 void gether_disconnect(struct gether *link)
1284 {
1285 struct eth_dev *dev = link->ioport;
1286 struct usb_request *req;
1287 struct sk_buff *skb;
1288
1289 WARN_ON(!dev);
1290 if (!dev)
1291 return;
1292
1293 DBG(dev, "%s\n", __func__);
1294
1295 netif_stop_queue(dev->net);
1296 netif_carrier_off(dev->net);
1297
1298 /* disable endpoints, forcing (synchronous) completion
1299 * of all pending i/o. then free the request objects
1300 * and forget about the endpoints.
1301 */
1302 usb_ep_disable(link->in_ep);
1303 spin_lock(&dev->req_lock);
1304 while (!list_empty(&dev->tx_reqs)) {
1305 req = container_of(dev->tx_reqs.next,
1306 struct usb_request, list);
1307 list_del(&req->list);
1308
1309 spin_unlock(&dev->req_lock);
1310 if (link->multi_pkt_xfer)
1311 kfree(req->buf);
1312 usb_ep_free_request(link->in_ep, req);
1313 spin_lock(&dev->req_lock);
1314 }
1315 spin_unlock(&dev->req_lock);
1316 link->in_ep->desc = NULL;
1317
1318 usb_ep_disable(link->out_ep);
1319 spin_lock(&dev->req_lock);
1320 while (!list_empty(&dev->rx_reqs)) {
1321 req = container_of(dev->rx_reqs.next,
1322 struct usb_request, list);
1323 list_del(&req->list);
1324
1325 spin_unlock(&dev->req_lock);
1326 usb_ep_free_request(link->out_ep, req);
1327 spin_lock(&dev->req_lock);
1328 }
1329 spin_unlock(&dev->req_lock);
1330
1331 spin_lock(&dev->rx_frames.lock);
1332 while ((skb = __skb_dequeue(&dev->rx_frames)))
1333 dev_kfree_skb_any(skb);
1334 spin_unlock(&dev->rx_frames.lock);
1335
1336 link->out_ep->desc = NULL;
1337
1338 /* finish forgetting about this USB link episode */
1339 dev->header_len = 0;
1340 dev->unwrap = NULL;
1341 dev->wrap = NULL;
1342
1343 spin_lock(&dev->lock);
1344 dev->port_usb = NULL;
1345 spin_unlock(&dev->lock);
1346 }
1347 EXPORT_SYMBOL_GPL(gether_disconnect);
1348
gether_init(void)1349 static int __init gether_init(void)
1350 {
1351 uether_wq = create_singlethread_workqueue("uether");
1352 if (!uether_wq) {
1353 pr_err("%s: Unable to create workqueue: uether\n", __func__);
1354 return -ENOMEM;
1355 }
1356 return 0;
1357 }
1358 module_init(gether_init);
1359
gether_exit(void)1360 static void __exit gether_exit(void)
1361 {
1362 destroy_workqueue(uether_wq);
1363
1364 }
1365 module_exit(gether_exit);
1366 MODULE_AUTHOR("David Brownell");
1367 MODULE_DESCRIPTION("ethernet over USB driver");
1368 MODULE_LICENSE("GPL v2");
1369