• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
4  *
5  * Copyright (C) 2003-2005,2008 David Brownell
6  * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
7  * Copyright (C) 2008 Nokia Corporation
8  */
9 
10 /* #define VERBOSE_DEBUG */
11 
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/gfp.h>
15 #include <linux/device.h>
16 #include <linux/ctype.h>
17 #include <linux/etherdevice.h>
18 #include <linux/ethtool.h>
19 #include <linux/if_vlan.h>
20 #include <linux/etherdevice.h>
21 #include <linux/string_helpers.h>
22 
23 #include "u_ether.h"
24 
25 
26 /*
27  * This component encapsulates the Ethernet link glue needed to provide
28  * one (!) network link through the USB gadget stack, normally "usb0".
29  *
30  * The control and data models are handled by the function driver which
31  * connects to this code; such as CDC Ethernet (ECM or EEM),
32  * "CDC Subset", or RNDIS.  That includes all descriptor and endpoint
33  * management.
34  *
35  * Link level addressing is handled by this component using module
36  * parameters; if no such parameters are provided, random link level
37  * addresses are used.  Each end of the link uses one address.  The
38  * host end address is exported in various ways, and is often recorded
39  * in configuration databases.
40  *
41  * The driver which assembles each configuration using such a link is
42  * responsible for ensuring that each configuration includes at most one
43  * instance of is network link.  (The network layer provides ways for
44  * this single "physical" link to be used by multiple virtual links.)
45  */
46 
47 #define UETH__VERSION	"29-May-2008"
48 
49 /* Experiments show that both Linux and Windows hosts allow up to 16k
50  * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
51  * blocks and still have efficient handling. */
52 #define GETHER_MAX_MTU_SIZE 15412
53 #define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
54 
55 struct eth_dev {
56 	/* lock is held while accessing port_usb
57 	 */
58 	spinlock_t		lock;
59 	struct gether		*port_usb;
60 
61 	struct net_device	*net;
62 	struct usb_gadget	*gadget;
63 
64 	spinlock_t		req_lock;	/* guard {rx,tx}_reqs */
65 	struct list_head	tx_reqs, rx_reqs;
66 	atomic_t		tx_qlen;
67 
68 	struct sk_buff_head	rx_frames;
69 
70 	unsigned		qmult;
71 
72 	unsigned		header_len;
73 	struct sk_buff		*(*wrap)(struct gether *, struct sk_buff *skb);
74 	int			(*unwrap)(struct gether *,
75 						struct sk_buff *skb,
76 						struct sk_buff_head *list);
77 
78 	struct work_struct	work;
79 
80 	unsigned long		todo;
81 #define	WORK_RX_MEMORY		0
82 
83 	bool			zlp;
84 	bool			no_skb_reserve;
85 	bool			ifname_set;
86 	u8			host_mac[ETH_ALEN];
87 	u8			dev_mac[ETH_ALEN];
88 };
89 
90 /*-------------------------------------------------------------------------*/
91 
92 #define RX_EXTRA	20	/* bytes guarding against rx overflows */
93 
94 #define DEFAULT_QLEN	2	/* double buffering by default */
95 
96 /* for dual-speed hardware, use deeper queues at high/super speed */
qlen(struct usb_gadget * gadget,unsigned qmult)97 static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
98 {
99 	if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
100 					    gadget->speed >= USB_SPEED_SUPER))
101 		return qmult * DEFAULT_QLEN;
102 	else
103 		return DEFAULT_QLEN;
104 }
105 
106 /*-------------------------------------------------------------------------*/
107 
108 /* REVISIT there must be a better way than having two sets
109  * of debug calls ...
110  */
111 
112 #undef DBG
113 #undef VDBG
114 #undef ERROR
115 #undef INFO
116 
117 #define xprintk(d, level, fmt, args...) \
118 	printk(level "%s: " fmt , (d)->net->name , ## args)
119 
120 #ifdef DEBUG
121 #undef DEBUG
122 #define DBG(dev, fmt, args...) \
123 	xprintk(dev , KERN_DEBUG , fmt , ## args)
124 #else
125 #define DBG(dev, fmt, args...) \
126 	do { } while (0)
127 #endif /* DEBUG */
128 
129 #ifdef VERBOSE_DEBUG
130 #define VDBG	DBG
131 #else
132 #define VDBG(dev, fmt, args...) \
133 	do { } while (0)
134 #endif /* DEBUG */
135 
136 #define ERROR(dev, fmt, args...) \
137 	xprintk(dev , KERN_ERR , fmt , ## args)
138 #define INFO(dev, fmt, args...) \
139 	xprintk(dev , KERN_INFO , fmt , ## args)
140 
141 /*-------------------------------------------------------------------------*/
142 
143 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
144 
eth_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * p)145 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
146 {
147 	struct eth_dev *dev = netdev_priv(net);
148 
149 	strscpy(p->driver, "g_ether", sizeof(p->driver));
150 	strscpy(p->version, UETH__VERSION, sizeof(p->version));
151 	strscpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
152 	strscpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
153 }
154 
155 /* REVISIT can also support:
156  *   - WOL (by tracking suspends and issuing remote wakeup)
157  *   - msglevel (implies updated messaging)
158  *   - ... probably more ethtool ops
159  */
160 
161 static const struct ethtool_ops ops = {
162 	.get_drvinfo = eth_get_drvinfo,
163 	.get_link = ethtool_op_get_link,
164 };
165 
defer_kevent(struct eth_dev * dev,int flag)166 static void defer_kevent(struct eth_dev *dev, int flag)
167 {
168 	if (test_and_set_bit(flag, &dev->todo))
169 		return;
170 	if (!schedule_work(&dev->work))
171 		ERROR(dev, "kevent %d may have been dropped\n", flag);
172 	else
173 		DBG(dev, "kevent %d scheduled\n", flag);
174 }
175 
176 static void rx_complete(struct usb_ep *ep, struct usb_request *req);
177 
178 static int
rx_submit(struct eth_dev * dev,struct usb_request * req,gfp_t gfp_flags)179 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
180 {
181 	struct usb_gadget *g = dev->gadget;
182 	struct sk_buff	*skb;
183 	int		retval = -ENOMEM;
184 	size_t		size = 0;
185 	struct usb_ep	*out;
186 	unsigned long	flags;
187 
188 	spin_lock_irqsave(&dev->lock, flags);
189 	if (dev->port_usb)
190 		out = dev->port_usb->out_ep;
191 	else
192 		out = NULL;
193 
194 	if (!out)
195 	{
196 		spin_unlock_irqrestore(&dev->lock, flags);
197 		return -ENOTCONN;
198 	}
199 
200 	/* Padding up to RX_EXTRA handles minor disagreements with host.
201 	 * Normally we use the USB "terminate on short read" convention;
202 	 * so allow up to (N*maxpacket), since that memory is normally
203 	 * already allocated.  Some hardware doesn't deal well with short
204 	 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
205 	 * byte off the end (to force hardware errors on overflow).
206 	 *
207 	 * RNDIS uses internal framing, and explicitly allows senders to
208 	 * pad to end-of-packet.  That's potentially nice for speed, but
209 	 * means receivers can't recover lost synch on their own (because
210 	 * new packets don't only start after a short RX).
211 	 */
212 	size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
213 	size += dev->port_usb->header_len;
214 
215 	if (g->quirk_ep_out_aligned_size) {
216 		size += out->maxpacket - 1;
217 		size -= size % out->maxpacket;
218 	}
219 
220 	if (dev->port_usb->is_fixed)
221 		size = max_t(size_t, size, dev->port_usb->fixed_out_len);
222 	spin_unlock_irqrestore(&dev->lock, flags);
223 
224 	skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags);
225 	if (skb == NULL) {
226 		DBG(dev, "no rx skb\n");
227 		goto enomem;
228 	}
229 
230 	/* Some platforms perform better when IP packets are aligned,
231 	 * but on at least one, checksumming fails otherwise.  Note:
232 	 * RNDIS headers involve variable numbers of LE32 values.
233 	 */
234 	if (likely(!dev->no_skb_reserve))
235 		skb_reserve(skb, NET_IP_ALIGN);
236 
237 	req->buf = skb->data;
238 	req->length = size;
239 	req->complete = rx_complete;
240 	req->context = skb;
241 
242 	retval = usb_ep_queue(out, req, gfp_flags);
243 	if (retval == -ENOMEM)
244 enomem:
245 		defer_kevent(dev, WORK_RX_MEMORY);
246 	if (retval) {
247 		DBG(dev, "rx submit --> %d\n", retval);
248 		if (skb)
249 			dev_kfree_skb_any(skb);
250 		spin_lock_irqsave(&dev->req_lock, flags);
251 		list_add(&req->list, &dev->rx_reqs);
252 		spin_unlock_irqrestore(&dev->req_lock, flags);
253 	}
254 	return retval;
255 }
256 
rx_complete(struct usb_ep * ep,struct usb_request * req)257 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
258 {
259 	struct sk_buff	*skb = req->context, *skb2;
260 	struct eth_dev	*dev = ep->driver_data;
261 	int		status = req->status;
262 
263 	switch (status) {
264 
265 	/* normal completion */
266 	case 0:
267 		skb_put(skb, req->actual);
268 
269 		if (dev->unwrap) {
270 			unsigned long	flags;
271 
272 			spin_lock_irqsave(&dev->lock, flags);
273 			if (dev->port_usb) {
274 				status = dev->unwrap(dev->port_usb,
275 							skb,
276 							&dev->rx_frames);
277 			} else {
278 				dev_kfree_skb_any(skb);
279 				status = -ENOTCONN;
280 			}
281 			spin_unlock_irqrestore(&dev->lock, flags);
282 		} else {
283 			skb_queue_tail(&dev->rx_frames, skb);
284 		}
285 		skb = NULL;
286 
287 		skb2 = skb_dequeue(&dev->rx_frames);
288 		while (skb2) {
289 			if (status < 0
290 					|| ETH_HLEN > skb2->len
291 					|| skb2->len > GETHER_MAX_ETH_FRAME_LEN) {
292 				dev->net->stats.rx_errors++;
293 				dev->net->stats.rx_length_errors++;
294 				DBG(dev, "rx length %d\n", skb2->len);
295 				dev_kfree_skb_any(skb2);
296 				goto next_frame;
297 			}
298 			skb2->protocol = eth_type_trans(skb2, dev->net);
299 			dev->net->stats.rx_packets++;
300 			dev->net->stats.rx_bytes += skb2->len;
301 
302 			/* no buffer copies needed, unless hardware can't
303 			 * use skb buffers.
304 			 */
305 			status = netif_rx(skb2);
306 next_frame:
307 			skb2 = skb_dequeue(&dev->rx_frames);
308 		}
309 		break;
310 
311 	/* software-driven interface shutdown */
312 	case -ECONNRESET:		/* unlink */
313 	case -ESHUTDOWN:		/* disconnect etc */
314 		VDBG(dev, "rx shutdown, code %d\n", status);
315 		goto quiesce;
316 
317 	/* for hardware automagic (such as pxa) */
318 	case -ECONNABORTED:		/* endpoint reset */
319 		DBG(dev, "rx %s reset\n", ep->name);
320 		defer_kevent(dev, WORK_RX_MEMORY);
321 quiesce:
322 		dev_kfree_skb_any(skb);
323 		goto clean;
324 
325 	/* data overrun */
326 	case -EOVERFLOW:
327 		dev->net->stats.rx_over_errors++;
328 		fallthrough;
329 
330 	default:
331 		dev->net->stats.rx_errors++;
332 		DBG(dev, "rx status %d\n", status);
333 		break;
334 	}
335 
336 	if (skb)
337 		dev_kfree_skb_any(skb);
338 	if (!netif_running(dev->net)) {
339 clean:
340 		spin_lock(&dev->req_lock);
341 		list_add(&req->list, &dev->rx_reqs);
342 		spin_unlock(&dev->req_lock);
343 		req = NULL;
344 	}
345 	if (req)
346 		rx_submit(dev, req, GFP_ATOMIC);
347 }
348 
prealloc(struct list_head * list,struct usb_ep * ep,unsigned n)349 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
350 {
351 	unsigned		i;
352 	struct usb_request	*req;
353 
354 	if (!n)
355 		return -ENOMEM;
356 
357 	/* queue/recycle up to N requests */
358 	i = n;
359 	list_for_each_entry(req, list, list) {
360 		if (i-- == 0)
361 			goto extra;
362 	}
363 	while (i--) {
364 		req = usb_ep_alloc_request(ep, GFP_ATOMIC);
365 		if (!req)
366 			return list_empty(list) ? -ENOMEM : 0;
367 		list_add(&req->list, list);
368 	}
369 	return 0;
370 
371 extra:
372 	/* free extras */
373 	for (;;) {
374 		struct list_head	*next;
375 
376 		next = req->list.next;
377 		list_del(&req->list);
378 		usb_ep_free_request(ep, req);
379 
380 		if (next == list)
381 			break;
382 
383 		req = container_of(next, struct usb_request, list);
384 	}
385 	return 0;
386 }
387 
alloc_requests(struct eth_dev * dev,struct gether * link,unsigned n)388 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
389 {
390 	int	status;
391 
392 	spin_lock(&dev->req_lock);
393 	status = prealloc(&dev->tx_reqs, link->in_ep, n);
394 	if (status < 0)
395 		goto fail;
396 	status = prealloc(&dev->rx_reqs, link->out_ep, n);
397 	if (status < 0)
398 		goto fail;
399 	goto done;
400 fail:
401 	DBG(dev, "can't alloc requests\n");
402 done:
403 	spin_unlock(&dev->req_lock);
404 	return status;
405 }
406 
rx_fill(struct eth_dev * dev,gfp_t gfp_flags)407 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
408 {
409 	struct usb_request	*req;
410 	unsigned long		flags;
411 
412 	/* fill unused rxq slots with some skb */
413 	spin_lock_irqsave(&dev->req_lock, flags);
414 	while (!list_empty(&dev->rx_reqs)) {
415 		req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
416 		list_del_init(&req->list);
417 		spin_unlock_irqrestore(&dev->req_lock, flags);
418 
419 		if (rx_submit(dev, req, gfp_flags) < 0) {
420 			defer_kevent(dev, WORK_RX_MEMORY);
421 			return;
422 		}
423 
424 		spin_lock_irqsave(&dev->req_lock, flags);
425 	}
426 	spin_unlock_irqrestore(&dev->req_lock, flags);
427 }
428 
eth_work(struct work_struct * work)429 static void eth_work(struct work_struct *work)
430 {
431 	struct eth_dev	*dev = container_of(work, struct eth_dev, work);
432 
433 	if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
434 		if (netif_running(dev->net))
435 			rx_fill(dev, GFP_KERNEL);
436 	}
437 
438 	if (dev->todo)
439 		DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
440 }
441 
tx_complete(struct usb_ep * ep,struct usb_request * req)442 static void tx_complete(struct usb_ep *ep, struct usb_request *req)
443 {
444 	struct sk_buff	*skb = req->context;
445 	struct eth_dev	*dev = ep->driver_data;
446 
447 	switch (req->status) {
448 	default:
449 		dev->net->stats.tx_errors++;
450 		VDBG(dev, "tx err %d\n", req->status);
451 		fallthrough;
452 	case -ECONNRESET:		/* unlink */
453 	case -ESHUTDOWN:		/* disconnect etc */
454 		dev_kfree_skb_any(skb);
455 		break;
456 	case 0:
457 		dev->net->stats.tx_bytes += skb->len;
458 		dev_consume_skb_any(skb);
459 	}
460 	dev->net->stats.tx_packets++;
461 
462 	spin_lock(&dev->req_lock);
463 	list_add(&req->list, &dev->tx_reqs);
464 	spin_unlock(&dev->req_lock);
465 
466 	atomic_dec(&dev->tx_qlen);
467 	if (netif_carrier_ok(dev->net))
468 		netif_wake_queue(dev->net);
469 }
470 
is_promisc(u16 cdc_filter)471 static inline int is_promisc(u16 cdc_filter)
472 {
473 	return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
474 }
475 
eth_start_xmit(struct sk_buff * skb,struct net_device * net)476 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
477 					struct net_device *net)
478 {
479 	struct eth_dev		*dev = netdev_priv(net);
480 	int			length = 0;
481 	int			retval;
482 	struct usb_request	*req = NULL;
483 	unsigned long		flags;
484 	struct usb_ep		*in;
485 	u16			cdc_filter;
486 
487 	spin_lock_irqsave(&dev->lock, flags);
488 	if (dev->port_usb) {
489 		in = dev->port_usb->in_ep;
490 		cdc_filter = dev->port_usb->cdc_filter;
491 	} else {
492 		in = NULL;
493 		cdc_filter = 0;
494 	}
495 	spin_unlock_irqrestore(&dev->lock, flags);
496 
497 	if (!in) {
498 		if (skb)
499 			dev_kfree_skb_any(skb);
500 		return NETDEV_TX_OK;
501 	}
502 
503 	/* apply outgoing CDC or RNDIS filters */
504 	if (skb && !is_promisc(cdc_filter)) {
505 		u8		*dest = skb->data;
506 
507 		if (is_multicast_ether_addr(dest)) {
508 			u16	type;
509 
510 			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
511 			 * SET_ETHERNET_MULTICAST_FILTERS requests
512 			 */
513 			if (is_broadcast_ether_addr(dest))
514 				type = USB_CDC_PACKET_TYPE_BROADCAST;
515 			else
516 				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
517 			if (!(cdc_filter & type)) {
518 				dev_kfree_skb_any(skb);
519 				return NETDEV_TX_OK;
520 			}
521 		}
522 		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
523 	}
524 
525 	spin_lock_irqsave(&dev->req_lock, flags);
526 	/*
527 	 * this freelist can be empty if an interrupt triggered disconnect()
528 	 * and reconfigured the gadget (shutting down this queue) after the
529 	 * network stack decided to xmit but before we got the spinlock.
530 	 */
531 	if (list_empty(&dev->tx_reqs)) {
532 		spin_unlock_irqrestore(&dev->req_lock, flags);
533 		return NETDEV_TX_BUSY;
534 	}
535 
536 	req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
537 	list_del(&req->list);
538 
539 	/* temporarily stop TX queue when the freelist empties */
540 	if (list_empty(&dev->tx_reqs))
541 		netif_stop_queue(net);
542 	spin_unlock_irqrestore(&dev->req_lock, flags);
543 
544 	/* no buffer copies needed, unless the network stack did it
545 	 * or the hardware can't use skb buffers.
546 	 * or there's not enough space for extra headers we need
547 	 */
548 	if (dev->wrap) {
549 		unsigned long	flags;
550 
551 		spin_lock_irqsave(&dev->lock, flags);
552 		if (dev->port_usb)
553 			skb = dev->wrap(dev->port_usb, skb);
554 		spin_unlock_irqrestore(&dev->lock, flags);
555 		if (!skb) {
556 			/* Multi frame CDC protocols may store the frame for
557 			 * later which is not a dropped frame.
558 			 */
559 			if (dev->port_usb &&
560 					dev->port_usb->supports_multi_frame)
561 				goto multiframe;
562 			goto drop;
563 		}
564 	}
565 
566 	length = skb->len;
567 	req->buf = skb->data;
568 	req->context = skb;
569 	req->complete = tx_complete;
570 
571 	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
572 	if (dev->port_usb &&
573 	    dev->port_usb->is_fixed &&
574 	    length == dev->port_usb->fixed_in_len &&
575 	    (length % in->maxpacket) == 0)
576 		req->zero = 0;
577 	else
578 		req->zero = 1;
579 
580 	/* use zlp framing on tx for strict CDC-Ether conformance,
581 	 * though any robust network rx path ignores extra padding.
582 	 * and some hardware doesn't like to write zlps.
583 	 */
584 	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
585 		length++;
586 
587 	req->length = length;
588 
589 	retval = usb_ep_queue(in, req, GFP_ATOMIC);
590 	switch (retval) {
591 	default:
592 		DBG(dev, "tx queue err %d\n", retval);
593 		break;
594 	case 0:
595 		netif_trans_update(net);
596 		atomic_inc(&dev->tx_qlen);
597 	}
598 
599 	if (retval) {
600 		dev_kfree_skb_any(skb);
601 drop:
602 		dev->net->stats.tx_dropped++;
603 multiframe:
604 		spin_lock_irqsave(&dev->req_lock, flags);
605 		if (list_empty(&dev->tx_reqs))
606 			netif_start_queue(net);
607 		list_add(&req->list, &dev->tx_reqs);
608 		spin_unlock_irqrestore(&dev->req_lock, flags);
609 	}
610 	return NETDEV_TX_OK;
611 }
612 
613 /*-------------------------------------------------------------------------*/
614 
eth_start(struct eth_dev * dev,gfp_t gfp_flags)615 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
616 {
617 	DBG(dev, "%s\n", __func__);
618 
619 	/* fill the rx queue */
620 	rx_fill(dev, gfp_flags);
621 
622 	/* and open the tx floodgates */
623 	atomic_set(&dev->tx_qlen, 0);
624 	netif_wake_queue(dev->net);
625 }
626 
eth_open(struct net_device * net)627 static int eth_open(struct net_device *net)
628 {
629 	struct eth_dev	*dev = netdev_priv(net);
630 	struct gether	*link;
631 
632 	DBG(dev, "%s\n", __func__);
633 	if (netif_carrier_ok(dev->net))
634 		eth_start(dev, GFP_KERNEL);
635 
636 	spin_lock_irq(&dev->lock);
637 	link = dev->port_usb;
638 	if (link && link->open)
639 		link->open(link);
640 	spin_unlock_irq(&dev->lock);
641 
642 	return 0;
643 }
644 
eth_stop(struct net_device * net)645 static int eth_stop(struct net_device *net)
646 {
647 	struct eth_dev	*dev = netdev_priv(net);
648 	unsigned long	flags;
649 
650 	VDBG(dev, "%s\n", __func__);
651 	netif_stop_queue(net);
652 
653 	DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
654 		dev->net->stats.rx_packets, dev->net->stats.tx_packets,
655 		dev->net->stats.rx_errors, dev->net->stats.tx_errors
656 		);
657 
658 	/* ensure there are no more active requests */
659 	spin_lock_irqsave(&dev->lock, flags);
660 	if (dev->port_usb) {
661 		struct gether	*link = dev->port_usb;
662 		const struct usb_endpoint_descriptor *in;
663 		const struct usb_endpoint_descriptor *out;
664 
665 		if (link->close)
666 			link->close(link);
667 
668 		/* NOTE:  we have no abort-queue primitive we could use
669 		 * to cancel all pending I/O.  Instead, we disable then
670 		 * reenable the endpoints ... this idiom may leave toggle
671 		 * wrong, but that's a self-correcting error.
672 		 *
673 		 * REVISIT:  we *COULD* just let the transfers complete at
674 		 * their own pace; the network stack can handle old packets.
675 		 * For the moment we leave this here, since it works.
676 		 */
677 		in = link->in_ep->desc;
678 		out = link->out_ep->desc;
679 		usb_ep_disable(link->in_ep);
680 		usb_ep_disable(link->out_ep);
681 		if (netif_carrier_ok(net)) {
682 			DBG(dev, "host still using in/out endpoints\n");
683 			link->in_ep->desc = in;
684 			link->out_ep->desc = out;
685 			usb_ep_enable(link->in_ep);
686 			usb_ep_enable(link->out_ep);
687 		}
688 	}
689 	spin_unlock_irqrestore(&dev->lock, flags);
690 
691 	return 0;
692 }
693 
694 /*-------------------------------------------------------------------------*/
695 
get_ether_addr(const char * str,u8 * dev_addr)696 static int get_ether_addr(const char *str, u8 *dev_addr)
697 {
698 	if (str) {
699 		unsigned	i;
700 
701 		for (i = 0; i < 6; i++) {
702 			unsigned char num;
703 
704 			if ((*str == '.') || (*str == ':'))
705 				str++;
706 			num = hex_to_bin(*str++) << 4;
707 			num |= hex_to_bin(*str++);
708 			dev_addr [i] = num;
709 		}
710 		if (is_valid_ether_addr(dev_addr))
711 			return 0;
712 	}
713 	eth_random_addr(dev_addr);
714 	return 1;
715 }
716 
get_ether_addr_str(u8 dev_addr[ETH_ALEN],char * str,int len)717 static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
718 {
719 	if (len < 18)
720 		return -EINVAL;
721 
722 	snprintf(str, len, "%pM", dev_addr);
723 	return 18;
724 }
725 
726 static const struct net_device_ops eth_netdev_ops = {
727 	.ndo_open		= eth_open,
728 	.ndo_stop		= eth_stop,
729 	.ndo_start_xmit		= eth_start_xmit,
730 	.ndo_set_mac_address 	= eth_mac_addr,
731 	.ndo_validate_addr	= eth_validate_addr,
732 };
733 
734 static struct device_type gadget_type = {
735 	.name	= "gadget",
736 };
737 
738 /*
739  * gether_setup_name - initialize one ethernet-over-usb link
740  * @g: gadget to associated with these links
741  * @ethaddr: NULL, or a buffer in which the ethernet address of the
742  *	host side of the link is recorded
743  * @netname: name for network device (for example, "usb")
744  * Context: may sleep
745  *
746  * This sets up the single network link that may be exported by a
747  * gadget driver using this framework.  The link layer addresses are
748  * set up using module parameters.
749  *
750  * Returns an eth_dev pointer on success, or an ERR_PTR on failure.
751  */
gether_setup_name(struct usb_gadget * g,const char * dev_addr,const char * host_addr,u8 ethaddr[ETH_ALEN],unsigned qmult,const char * netname)752 struct eth_dev *gether_setup_name(struct usb_gadget *g,
753 		const char *dev_addr, const char *host_addr,
754 		u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname)
755 {
756 	struct eth_dev		*dev;
757 	struct net_device	*net;
758 	int			status;
759 
760 	net = alloc_etherdev(sizeof *dev);
761 	if (!net)
762 		return ERR_PTR(-ENOMEM);
763 
764 	dev = netdev_priv(net);
765 	spin_lock_init(&dev->lock);
766 	spin_lock_init(&dev->req_lock);
767 	INIT_WORK(&dev->work, eth_work);
768 	INIT_LIST_HEAD(&dev->tx_reqs);
769 	INIT_LIST_HEAD(&dev->rx_reqs);
770 
771 	skb_queue_head_init(&dev->rx_frames);
772 
773 	/* network device setup */
774 	dev->net = net;
775 	dev->qmult = qmult;
776 	snprintf(net->name, sizeof(net->name), "%s%%d", netname);
777 
778 	if (get_ether_addr(dev_addr, net->dev_addr)) {
779 		net->addr_assign_type = NET_ADDR_RANDOM;
780 		dev_warn(&g->dev,
781 			"using random %s ethernet address\n", "self");
782 	} else {
783 		net->addr_assign_type = NET_ADDR_SET;
784 	}
785 	if (get_ether_addr(host_addr, dev->host_mac))
786 		dev_warn(&g->dev,
787 			"using random %s ethernet address\n", "host");
788 
789 	if (ethaddr)
790 		memcpy(ethaddr, dev->host_mac, ETH_ALEN);
791 
792 	net->netdev_ops = &eth_netdev_ops;
793 
794 	net->ethtool_ops = &ops;
795 
796 	/* MTU range: 14 - 15412 */
797 	net->min_mtu = ETH_HLEN;
798 	net->max_mtu = GETHER_MAX_MTU_SIZE;
799 
800 	dev->gadget = g;
801 	SET_NETDEV_DEV(net, &g->dev);
802 	SET_NETDEV_DEVTYPE(net, &gadget_type);
803 
804 	status = register_netdev(net);
805 	if (status < 0) {
806 		dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
807 		free_netdev(net);
808 		dev = ERR_PTR(status);
809 	} else {
810 		INFO(dev, "MAC %pM\n", net->dev_addr);
811 		INFO(dev, "HOST MAC %pM\n", dev->host_mac);
812 
813 		/*
814 		 * two kinds of host-initiated state changes:
815 		 *  - iff DATA transfer is active, carrier is "on"
816 		 *  - tx queueing enabled if open *and* carrier is "on"
817 		 */
818 		netif_carrier_off(net);
819 	}
820 
821 	return dev;
822 }
823 EXPORT_SYMBOL_GPL(gether_setup_name);
824 
gether_setup_name_default(const char * netname)825 struct net_device *gether_setup_name_default(const char *netname)
826 {
827 	struct net_device	*net;
828 	struct eth_dev		*dev;
829 
830 	net = alloc_etherdev(sizeof(*dev));
831 	if (!net)
832 		return ERR_PTR(-ENOMEM);
833 
834 	dev = netdev_priv(net);
835 	spin_lock_init(&dev->lock);
836 	spin_lock_init(&dev->req_lock);
837 	INIT_WORK(&dev->work, eth_work);
838 	INIT_LIST_HEAD(&dev->tx_reqs);
839 	INIT_LIST_HEAD(&dev->rx_reqs);
840 
841 	/* by default we always have a random MAC address */
842 	net->addr_assign_type = NET_ADDR_RANDOM;
843 
844 	skb_queue_head_init(&dev->rx_frames);
845 
846 	/* network device setup */
847 	dev->net = net;
848 	dev->qmult = QMULT_DEFAULT;
849 	snprintf(net->name, sizeof(net->name), "%s%%d", netname);
850 
851 	eth_random_addr(dev->dev_mac);
852 	pr_warn("using random %s ethernet address\n", "self");
853 	eth_random_addr(dev->host_mac);
854 	pr_warn("using random %s ethernet address\n", "host");
855 
856 	net->netdev_ops = &eth_netdev_ops;
857 
858 	net->ethtool_ops = &ops;
859 	SET_NETDEV_DEVTYPE(net, &gadget_type);
860 
861 	/* MTU range: 14 - 15412 */
862 	net->min_mtu = ETH_HLEN;
863 	net->max_mtu = GETHER_MAX_MTU_SIZE;
864 
865 	return net;
866 }
867 EXPORT_SYMBOL_GPL(gether_setup_name_default);
868 
gether_register_netdev(struct net_device * net)869 int gether_register_netdev(struct net_device *net)
870 {
871 	struct eth_dev *dev;
872 	struct usb_gadget *g;
873 	int status;
874 
875 	if (!net->dev.parent)
876 		return -EINVAL;
877 	dev = netdev_priv(net);
878 	g = dev->gadget;
879 
880 	eth_hw_addr_set(net, dev->dev_mac);
881 
882 	status = register_netdev(net);
883 	if (status < 0) {
884 		dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
885 		return status;
886 	} else {
887 		INFO(dev, "HOST MAC %pM\n", dev->host_mac);
888 		INFO(dev, "MAC %pM\n", dev->dev_mac);
889 
890 		/* two kinds of host-initiated state changes:
891 		 *  - iff DATA transfer is active, carrier is "on"
892 		 *  - tx queueing enabled if open *and* carrier is "on"
893 		 */
894 		netif_carrier_off(net);
895 	}
896 
897 	return status;
898 }
899 EXPORT_SYMBOL_GPL(gether_register_netdev);
900 
gether_set_gadget(struct net_device * net,struct usb_gadget * g)901 void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
902 {
903 	struct eth_dev *dev;
904 
905 	dev = netdev_priv(net);
906 	dev->gadget = g;
907 	SET_NETDEV_DEV(net, &g->dev);
908 }
909 EXPORT_SYMBOL_GPL(gether_set_gadget);
910 
gether_set_dev_addr(struct net_device * net,const char * dev_addr)911 int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
912 {
913 	struct eth_dev *dev;
914 	u8 new_addr[ETH_ALEN];
915 
916 	dev = netdev_priv(net);
917 	if (get_ether_addr(dev_addr, new_addr))
918 		return -EINVAL;
919 	memcpy(dev->dev_mac, new_addr, ETH_ALEN);
920 	net->addr_assign_type = NET_ADDR_SET;
921 	return 0;
922 }
923 EXPORT_SYMBOL_GPL(gether_set_dev_addr);
924 
gether_get_dev_addr(struct net_device * net,char * dev_addr,int len)925 int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len)
926 {
927 	struct eth_dev *dev;
928 	int ret;
929 
930 	dev = netdev_priv(net);
931 	ret = get_ether_addr_str(dev->dev_mac, dev_addr, len);
932 	if (ret + 1 < len) {
933 		dev_addr[ret++] = '\n';
934 		dev_addr[ret] = '\0';
935 	}
936 
937 	return ret;
938 }
939 EXPORT_SYMBOL_GPL(gether_get_dev_addr);
940 
gether_set_host_addr(struct net_device * net,const char * host_addr)941 int gether_set_host_addr(struct net_device *net, const char *host_addr)
942 {
943 	struct eth_dev *dev;
944 	u8 new_addr[ETH_ALEN];
945 
946 	dev = netdev_priv(net);
947 	if (get_ether_addr(host_addr, new_addr))
948 		return -EINVAL;
949 	memcpy(dev->host_mac, new_addr, ETH_ALEN);
950 	return 0;
951 }
952 EXPORT_SYMBOL_GPL(gether_set_host_addr);
953 
gether_get_host_addr(struct net_device * net,char * host_addr,int len)954 int gether_get_host_addr(struct net_device *net, char *host_addr, int len)
955 {
956 	struct eth_dev *dev;
957 	int ret;
958 
959 	dev = netdev_priv(net);
960 	ret = get_ether_addr_str(dev->host_mac, host_addr, len);
961 	if (ret + 1 < len) {
962 		host_addr[ret++] = '\n';
963 		host_addr[ret] = '\0';
964 	}
965 
966 	return ret;
967 }
968 EXPORT_SYMBOL_GPL(gether_get_host_addr);
969 
gether_get_host_addr_cdc(struct net_device * net,char * host_addr,int len)970 int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
971 {
972 	struct eth_dev *dev;
973 
974 	if (len < 13)
975 		return -EINVAL;
976 
977 	dev = netdev_priv(net);
978 	snprintf(host_addr, len, "%pm", dev->host_mac);
979 
980 	string_upper(host_addr, host_addr);
981 
982 	return strlen(host_addr);
983 }
984 EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
985 
gether_get_host_addr_u8(struct net_device * net,u8 host_mac[ETH_ALEN])986 void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN])
987 {
988 	struct eth_dev *dev;
989 
990 	dev = netdev_priv(net);
991 	memcpy(host_mac, dev->host_mac, ETH_ALEN);
992 }
993 EXPORT_SYMBOL_GPL(gether_get_host_addr_u8);
994 
gether_set_qmult(struct net_device * net,unsigned qmult)995 void gether_set_qmult(struct net_device *net, unsigned qmult)
996 {
997 	struct eth_dev *dev;
998 
999 	dev = netdev_priv(net);
1000 	dev->qmult = qmult;
1001 }
1002 EXPORT_SYMBOL_GPL(gether_set_qmult);
1003 
gether_get_qmult(struct net_device * net)1004 unsigned gether_get_qmult(struct net_device *net)
1005 {
1006 	struct eth_dev *dev;
1007 
1008 	dev = netdev_priv(net);
1009 	return dev->qmult;
1010 }
1011 EXPORT_SYMBOL_GPL(gether_get_qmult);
1012 
gether_get_ifname(struct net_device * net,char * name,int len)1013 int gether_get_ifname(struct net_device *net, char *name, int len)
1014 {
1015 	struct eth_dev *dev = netdev_priv(net);
1016 	int ret;
1017 
1018 	rtnl_lock();
1019 	ret = scnprintf(name, len, "%s\n",
1020 			dev->ifname_set ? net->name : netdev_name(net));
1021 	rtnl_unlock();
1022 	return ret;
1023 }
1024 EXPORT_SYMBOL_GPL(gether_get_ifname);
1025 
gether_set_ifname(struct net_device * net,const char * name,int len)1026 int gether_set_ifname(struct net_device *net, const char *name, int len)
1027 {
1028 	struct eth_dev *dev = netdev_priv(net);
1029 	char tmp[IFNAMSIZ];
1030 	const char *p;
1031 
1032 	if (name[len - 1] == '\n')
1033 		len--;
1034 
1035 	if (len >= sizeof(tmp))
1036 		return -E2BIG;
1037 
1038 	strscpy(tmp, name, len + 1);
1039 	if (!dev_valid_name(tmp))
1040 		return -EINVAL;
1041 
1042 	/* Require exactly one %d, so binding will not fail with EEXIST. */
1043 	p = strchr(name, '%');
1044 	if (!p || p[1] != 'd' || strchr(p + 2, '%'))
1045 		return -EINVAL;
1046 
1047 	strncpy(net->name, tmp, sizeof(net->name));
1048 	dev->ifname_set = true;
1049 
1050 	return 0;
1051 }
1052 EXPORT_SYMBOL_GPL(gether_set_ifname);
1053 
1054 /*
1055  * gether_cleanup - remove Ethernet-over-USB device
1056  * Context: may sleep
1057  *
1058  * This is called to free all resources allocated by @gether_setup().
1059  */
gether_cleanup(struct eth_dev * dev)1060 void gether_cleanup(struct eth_dev *dev)
1061 {
1062 	if (!dev)
1063 		return;
1064 
1065 	unregister_netdev(dev->net);
1066 	flush_work(&dev->work);
1067 	free_netdev(dev->net);
1068 }
1069 EXPORT_SYMBOL_GPL(gether_cleanup);
1070 
1071 /**
1072  * gether_connect - notify network layer that USB link is active
1073  * @link: the USB link, set up with endpoints, descriptors matching
1074  *	current device speed, and any framing wrapper(s) set up.
1075  * Context: irqs blocked
1076  *
1077  * This is called to activate endpoints and let the network layer know
1078  * the connection is active ("carrier detect").  It may cause the I/O
1079  * queues to open and start letting network packets flow, but will in
1080  * any case activate the endpoints so that they respond properly to the
1081  * USB host.
1082  *
1083  * Verify net_device pointer returned using IS_ERR().  If it doesn't
1084  * indicate some error code (negative errno), ep->driver_data values
1085  * have been overwritten.
1086  */
gether_connect(struct gether * link)1087 struct net_device *gether_connect(struct gether *link)
1088 {
1089 	struct eth_dev		*dev = link->ioport;
1090 	int			result = 0;
1091 
1092 	if (!dev)
1093 		return ERR_PTR(-EINVAL);
1094 
1095 	link->in_ep->driver_data = dev;
1096 	result = usb_ep_enable(link->in_ep);
1097 	if (result != 0) {
1098 		DBG(dev, "enable %s --> %d\n",
1099 			link->in_ep->name, result);
1100 		goto fail0;
1101 	}
1102 
1103 	link->out_ep->driver_data = dev;
1104 	result = usb_ep_enable(link->out_ep);
1105 	if (result != 0) {
1106 		DBG(dev, "enable %s --> %d\n",
1107 			link->out_ep->name, result);
1108 		goto fail1;
1109 	}
1110 
1111 	if (result == 0)
1112 		result = alloc_requests(dev, link, qlen(dev->gadget,
1113 					dev->qmult));
1114 
1115 	if (result == 0) {
1116 		dev->zlp = link->is_zlp_ok;
1117 		dev->no_skb_reserve = gadget_avoids_skb_reserve(dev->gadget);
1118 		DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult));
1119 
1120 		dev->header_len = link->header_len;
1121 		dev->unwrap = link->unwrap;
1122 		dev->wrap = link->wrap;
1123 
1124 		spin_lock(&dev->lock);
1125 		dev->port_usb = link;
1126 		if (netif_running(dev->net)) {
1127 			if (link->open)
1128 				link->open(link);
1129 		} else {
1130 			if (link->close)
1131 				link->close(link);
1132 		}
1133 		spin_unlock(&dev->lock);
1134 
1135 		netif_carrier_on(dev->net);
1136 		if (netif_running(dev->net))
1137 			eth_start(dev, GFP_ATOMIC);
1138 
1139 	/* on error, disable any endpoints  */
1140 	} else {
1141 		(void) usb_ep_disable(link->out_ep);
1142 fail1:
1143 		(void) usb_ep_disable(link->in_ep);
1144 	}
1145 fail0:
1146 	/* caller is responsible for cleanup on error */
1147 	if (result < 0)
1148 		return ERR_PTR(result);
1149 	return dev->net;
1150 }
1151 EXPORT_SYMBOL_GPL(gether_connect);
1152 
1153 /**
1154  * gether_disconnect - notify network layer that USB link is inactive
1155  * @link: the USB link, on which gether_connect() was called
1156  * Context: irqs blocked
1157  *
1158  * This is called to deactivate endpoints and let the network layer know
1159  * the connection went inactive ("no carrier").
1160  *
1161  * On return, the state is as if gether_connect() had never been called.
1162  * The endpoints are inactive, and accordingly without active USB I/O.
1163  * Pointers to endpoint descriptors and endpoint private data are nulled.
1164  */
gether_disconnect(struct gether * link)1165 void gether_disconnect(struct gether *link)
1166 {
1167 	struct eth_dev		*dev = link->ioport;
1168 	struct usb_request	*req;
1169 
1170 	WARN_ON(!dev);
1171 	if (!dev)
1172 		return;
1173 
1174 	DBG(dev, "%s\n", __func__);
1175 
1176 	netif_stop_queue(dev->net);
1177 	netif_carrier_off(dev->net);
1178 
1179 	/* disable endpoints, forcing (synchronous) completion
1180 	 * of all pending i/o.  then free the request objects
1181 	 * and forget about the endpoints.
1182 	 */
1183 	usb_ep_disable(link->in_ep);
1184 	spin_lock(&dev->req_lock);
1185 	while (!list_empty(&dev->tx_reqs)) {
1186 		req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
1187 		list_del(&req->list);
1188 
1189 		spin_unlock(&dev->req_lock);
1190 		usb_ep_free_request(link->in_ep, req);
1191 		spin_lock(&dev->req_lock);
1192 	}
1193 	spin_unlock(&dev->req_lock);
1194 	link->in_ep->desc = NULL;
1195 
1196 	usb_ep_disable(link->out_ep);
1197 	spin_lock(&dev->req_lock);
1198 	while (!list_empty(&dev->rx_reqs)) {
1199 		req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
1200 		list_del(&req->list);
1201 
1202 		spin_unlock(&dev->req_lock);
1203 		usb_ep_free_request(link->out_ep, req);
1204 		spin_lock(&dev->req_lock);
1205 	}
1206 	spin_unlock(&dev->req_lock);
1207 	link->out_ep->desc = NULL;
1208 
1209 	/* finish forgetting about this USB link episode */
1210 	dev->header_len = 0;
1211 	dev->unwrap = NULL;
1212 	dev->wrap = NULL;
1213 
1214 	spin_lock(&dev->lock);
1215 	dev->port_usb = NULL;
1216 	spin_unlock(&dev->lock);
1217 }
1218 EXPORT_SYMBOL_GPL(gether_disconnect);
1219 
1220 MODULE_LICENSE("GPL");
1221 MODULE_AUTHOR("David Brownell");
1222