• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
3  *
4  * Copyright (C) 2003-2005,2008 David Brownell
5  * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6  * Copyright (C) 2008 Nokia Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13 
14 /* #define VERBOSE_DEBUG */
15 
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/gfp.h>
19 #include <linux/device.h>
20 #include <linux/ctype.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/if_vlan.h>
24 
25 #include "u_ether.h"
26 
27 
28 /*
29  * This component encapsulates the Ethernet link glue needed to provide
30  * one (!) network link through the USB gadget stack, normally "usb0".
31  *
32  * The control and data models are handled by the function driver which
33  * connects to this code; such as CDC Ethernet (ECM or EEM),
34  * "CDC Subset", or RNDIS.  That includes all descriptor and endpoint
35  * management.
36  *
37  * Link level addressing is handled by this component using module
38  * parameters; if no such parameters are provided, random link level
39  * addresses are used.  Each end of the link uses one address.  The
40  * host end address is exported in various ways, and is often recorded
41  * in configuration databases.
42  *
43  * The driver which assembles each configuration using such a link is
44  * responsible for ensuring that each configuration includes at most one
45  * instance of is network link.  (The network layer provides ways for
46  * this single "physical" link to be used by multiple virtual links.)
47  */
48 
49 #define UETH__VERSION	"29-May-2008"
50 
51 static struct workqueue_struct	*uether_wq;
52 
53 struct eth_dev {
54 	/* lock is held while accessing port_usb
55 	 */
56 	spinlock_t		lock;
57 	struct gether		*port_usb;
58 
59 	struct net_device	*net;
60 	struct usb_gadget	*gadget;
61 
62 	spinlock_t		req_lock;	/* guard {rx,tx}_reqs */
63 	struct list_head	tx_reqs, rx_reqs;
64 	unsigned		tx_qlen;
65 /* Minimum number of TX USB request queued to UDC */
66 #define TX_REQ_THRESHOLD	5
67 	int			no_tx_req_used;
68 	int			tx_skb_hold_count;
69 	u32			tx_req_bufsize;
70 
71 	struct sk_buff_head	rx_frames;
72 
73 	unsigned		qmult;
74 
75 	unsigned		header_len;
76 	unsigned		ul_max_pkts_per_xfer;
77 	unsigned		dl_max_pkts_per_xfer;
78 	struct sk_buff		*(*wrap)(struct gether *, struct sk_buff *skb);
79 	int			(*unwrap)(struct gether *,
80 						struct sk_buff *skb,
81 						struct sk_buff_head *list);
82 
83 	struct work_struct	work;
84 	struct work_struct	rx_work;
85 
86 	unsigned long		todo;
87 #define	WORK_RX_MEMORY		0
88 
89 	bool			zlp;
90 	u8			host_mac[ETH_ALEN];
91 	u8			dev_mac[ETH_ALEN];
92 };
93 
94 /*-------------------------------------------------------------------------*/
95 
96 #define RX_EXTRA	20	/* bytes guarding against rx overflows */
97 
98 #define DEFAULT_QLEN	2	/* double buffering by default */
99 
100 /* for dual-speed hardware, use deeper queues at high/super speed */
qlen(struct usb_gadget * gadget,unsigned qmult)101 static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
102 {
103 	if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
104 					    gadget->speed == USB_SPEED_SUPER))
105 		return qmult * DEFAULT_QLEN;
106 	else
107 		return DEFAULT_QLEN;
108 }
109 
110 /*-------------------------------------------------------------------------*/
111 
112 /* REVISIT there must be a better way than having two sets
113  * of debug calls ...
114  */
115 
116 #undef DBG
117 #undef VDBG
118 #undef ERROR
119 #undef INFO
120 
121 #define xprintk(d, level, fmt, args...) \
122 	printk(level "%s: " fmt , (d)->net->name , ## args)
123 
124 #ifdef DEBUG
125 #undef DEBUG
126 #define DBG(dev, fmt, args...) \
127 	xprintk(dev , KERN_DEBUG , fmt , ## args)
128 #else
129 #define DBG(dev, fmt, args...) \
130 	do { } while (0)
131 #endif /* DEBUG */
132 
133 #ifdef VERBOSE_DEBUG
134 #define VDBG	DBG
135 #else
136 #define VDBG(dev, fmt, args...) \
137 	do { } while (0)
138 #endif /* DEBUG */
139 
140 #define ERROR(dev, fmt, args...) \
141 	xprintk(dev , KERN_ERR , fmt , ## args)
142 #define INFO(dev, fmt, args...) \
143 	xprintk(dev , KERN_INFO , fmt , ## args)
144 
145 /*-------------------------------------------------------------------------*/
146 
147 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
148 
ueth_change_mtu(struct net_device * net,int new_mtu)149 static int ueth_change_mtu(struct net_device *net, int new_mtu)
150 {
151 	struct eth_dev	*dev = netdev_priv(net);
152 	unsigned long	flags;
153 	int		status = 0;
154 
155 	/* don't change MTU on "live" link (peer won't know) */
156 	spin_lock_irqsave(&dev->lock, flags);
157 	if (dev->port_usb)
158 		status = -EBUSY;
159 	else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
160 		status = -ERANGE;
161 	else
162 		net->mtu = new_mtu;
163 	spin_unlock_irqrestore(&dev->lock, flags);
164 
165 	return status;
166 }
167 
eth_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * p)168 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
169 {
170 	struct eth_dev *dev = netdev_priv(net);
171 
172 	strlcpy(p->driver, "g_ether", sizeof(p->driver));
173 	strlcpy(p->version, UETH__VERSION, sizeof(p->version));
174 	strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
175 	strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
176 }
177 
178 /* REVISIT can also support:
179  *   - WOL (by tracking suspends and issuing remote wakeup)
180  *   - msglevel (implies updated messaging)
181  *   - ... probably more ethtool ops
182  */
183 
184 static const struct ethtool_ops ops = {
185 	.get_drvinfo = eth_get_drvinfo,
186 	.get_link = ethtool_op_get_link,
187 };
188 
defer_kevent(struct eth_dev * dev,int flag)189 static void defer_kevent(struct eth_dev *dev, int flag)
190 {
191 	if (test_and_set_bit(flag, &dev->todo))
192 		return;
193 	if (!schedule_work(&dev->work))
194 		ERROR(dev, "kevent %d may have been dropped\n", flag);
195 	else
196 		DBG(dev, "kevent %d scheduled\n", flag);
197 }
198 
199 static void rx_complete(struct usb_ep *ep, struct usb_request *req);
200 
201 static int
rx_submit(struct eth_dev * dev,struct usb_request * req,gfp_t gfp_flags)202 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
203 {
204 	struct sk_buff	*skb;
205 	int		retval = -ENOMEM;
206 	size_t		size = 0;
207 	struct usb_ep	*out;
208 	unsigned long	flags;
209 
210 	spin_lock_irqsave(&dev->lock, flags);
211 	if (dev->port_usb)
212 		out = dev->port_usb->out_ep;
213 	else
214 		out = NULL;
215 	spin_unlock_irqrestore(&dev->lock, flags);
216 
217 	if (!out)
218 		return -ENOTCONN;
219 
220 
221 	/* Padding up to RX_EXTRA handles minor disagreements with host.
222 	 * Normally we use the USB "terminate on short read" convention;
223 	 * so allow up to (N*maxpacket), since that memory is normally
224 	 * already allocated.  Some hardware doesn't deal well with short
225 	 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
226 	 * byte off the end (to force hardware errors on overflow).
227 	 *
228 	 * RNDIS uses internal framing, and explicitly allows senders to
229 	 * pad to end-of-packet.  That's potentially nice for speed, but
230 	 * means receivers can't recover lost synch on their own (because
231 	 * new packets don't only start after a short RX).
232 	 */
233 	size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
234 	size += dev->port_usb->header_len;
235 	size += out->maxpacket - 1;
236 	size -= size % out->maxpacket;
237 
238 	if (dev->ul_max_pkts_per_xfer)
239 		size *= dev->ul_max_pkts_per_xfer;
240 
241 	if (dev->port_usb->is_fixed)
242 		size = max_t(size_t, size, dev->port_usb->fixed_out_len);
243 
244 	DBG(dev, "%s: size: %zd\n", __func__, size);
245 	skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
246 	if (skb == NULL) {
247 		DBG(dev, "no rx skb\n");
248 		goto enomem;
249 	}
250 
251 	/* Some platforms perform better when IP packets are aligned,
252 	 * but on at least one, checksumming fails otherwise.  Note:
253 	 * RNDIS headers involve variable numbers of LE32 values.
254 	 */
255 	skb_reserve(skb, NET_IP_ALIGN);
256 
257 	req->buf = skb->data;
258 	req->length = size;
259 	req->complete = rx_complete;
260 	req->context = skb;
261 
262 	retval = usb_ep_queue(out, req, gfp_flags);
263 	if (retval == -ENOMEM)
264 enomem:
265 		defer_kevent(dev, WORK_RX_MEMORY);
266 	if (retval) {
267 		DBG(dev, "rx submit --> %d\n", retval);
268 		if (skb)
269 			dev_kfree_skb_any(skb);
270 	}
271 	return retval;
272 }
273 
rx_complete(struct usb_ep * ep,struct usb_request * req)274 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
275 {
276 	struct sk_buff	*skb = req->context;
277 	struct eth_dev	*dev = ep->driver_data;
278 	int		status = req->status;
279 	bool		queue = 0;
280 
281 	switch (status) {
282 
283 	/* normal completion */
284 	case 0:
285 		skb_put(skb, req->actual);
286 
287 		if (dev->unwrap) {
288 			unsigned long	flags;
289 
290 			spin_lock_irqsave(&dev->lock, flags);
291 			if (dev->port_usb) {
292 				status = dev->unwrap(dev->port_usb,
293 							skb,
294 							&dev->rx_frames);
295 				if (status == -EINVAL)
296 					dev->net->stats.rx_errors++;
297 				else if (status == -EOVERFLOW)
298 					dev->net->stats.rx_over_errors++;
299 			} else {
300 				dev_kfree_skb_any(skb);
301 				status = -ENOTCONN;
302 			}
303 			spin_unlock_irqrestore(&dev->lock, flags);
304 		} else {
305 			skb_queue_tail(&dev->rx_frames, skb);
306 		}
307 
308 		if (!status)
309 			queue = 1;
310 		break;
311 
312 	/* software-driven interface shutdown */
313 	case -ECONNRESET:		/* unlink */
314 	case -ESHUTDOWN:		/* disconnect etc */
315 		VDBG(dev, "rx shutdown, code %d\n", status);
316 		goto quiesce;
317 
318 	/* for hardware automagic (such as pxa) */
319 	case -ECONNABORTED:		/* endpoint reset */
320 		DBG(dev, "rx %s reset\n", ep->name);
321 		defer_kevent(dev, WORK_RX_MEMORY);
322 quiesce:
323 		dev_kfree_skb_any(skb);
324 		goto clean;
325 
326 	/* data overrun */
327 	case -EOVERFLOW:
328 		dev->net->stats.rx_over_errors++;
329 		/* FALLTHROUGH */
330 
331 	default:
332 		queue = 1;
333 		dev_kfree_skb_any(skb);
334 		dev->net->stats.rx_errors++;
335 		DBG(dev, "rx status %d\n", status);
336 		break;
337 	}
338 
339 clean:
340 	spin_lock(&dev->req_lock);
341 	list_add(&req->list, &dev->rx_reqs);
342 	spin_unlock(&dev->req_lock);
343 
344 	if (queue)
345 		queue_work(uether_wq, &dev->rx_work);
346 }
347 
prealloc(struct list_head * list,struct usb_ep * ep,unsigned n)348 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
349 {
350 	unsigned		i;
351 	struct usb_request	*req;
352 
353 	if (!n)
354 		return -ENOMEM;
355 
356 	/* queue/recycle up to N requests */
357 	i = n;
358 	list_for_each_entry(req, list, list) {
359 		if (i-- == 0)
360 			goto extra;
361 	}
362 	while (i--) {
363 		req = usb_ep_alloc_request(ep, GFP_ATOMIC);
364 		if (!req)
365 			return list_empty(list) ? -ENOMEM : 0;
366 		list_add(&req->list, list);
367 	}
368 	return 0;
369 
370 extra:
371 	/* free extras */
372 	for (;;) {
373 		struct list_head	*next;
374 
375 		next = req->list.next;
376 		list_del(&req->list);
377 		usb_ep_free_request(ep, req);
378 
379 		if (next == list)
380 			break;
381 
382 		req = container_of(next, struct usb_request, list);
383 	}
384 	return 0;
385 }
386 
alloc_requests(struct eth_dev * dev,struct gether * link,unsigned n)387 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
388 {
389 	int	status;
390 
391 	spin_lock(&dev->req_lock);
392 	status = prealloc(&dev->tx_reqs, link->in_ep, n);
393 	if (status < 0)
394 		goto fail;
395 	status = prealloc(&dev->rx_reqs, link->out_ep, n);
396 	if (status < 0)
397 		goto fail;
398 	goto done;
399 fail:
400 	DBG(dev, "can't alloc requests\n");
401 done:
402 	spin_unlock(&dev->req_lock);
403 	return status;
404 }
405 
rx_fill(struct eth_dev * dev,gfp_t gfp_flags)406 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
407 {
408 	struct usb_request	*req;
409 	unsigned long		flags;
410 	int			req_cnt = 0;
411 
412 	/* fill unused rxq slots with some skb */
413 	spin_lock_irqsave(&dev->req_lock, flags);
414 	while (!list_empty(&dev->rx_reqs)) {
415 		/* break the nexus of continuous completion and re-submission*/
416 		if (++req_cnt > qlen(dev->gadget, dev->qmult))
417 			break;
418 
419 		req = container_of(dev->rx_reqs.next,
420 				struct usb_request, list);
421 		list_del_init(&req->list);
422 		spin_unlock_irqrestore(&dev->req_lock, flags);
423 
424 		if (rx_submit(dev, req, gfp_flags) < 0) {
425 			spin_lock_irqsave(&dev->req_lock, flags);
426 			list_add(&req->list, &dev->rx_reqs);
427 			spin_unlock_irqrestore(&dev->req_lock, flags);
428 			defer_kevent(dev, WORK_RX_MEMORY);
429 			return;
430 		}
431 
432 		spin_lock_irqsave(&dev->req_lock, flags);
433 	}
434 	spin_unlock_irqrestore(&dev->req_lock, flags);
435 }
436 
process_rx_w(struct work_struct * work)437 static void process_rx_w(struct work_struct *work)
438 {
439 	struct eth_dev	*dev = container_of(work, struct eth_dev, rx_work);
440 	struct sk_buff	*skb;
441 	int		status = 0;
442 
443 	if (!dev->port_usb)
444 		return;
445 
446 	while ((skb = skb_dequeue(&dev->rx_frames))) {
447 		if (status < 0
448 				|| ETH_HLEN > skb->len
449 				|| skb->len > ETH_FRAME_LEN) {
450 			dev->net->stats.rx_errors++;
451 			dev->net->stats.rx_length_errors++;
452 			DBG(dev, "rx length %d\n", skb->len);
453 			dev_kfree_skb_any(skb);
454 			continue;
455 		}
456 		skb->protocol = eth_type_trans(skb, dev->net);
457 		dev->net->stats.rx_packets++;
458 		dev->net->stats.rx_bytes += skb->len;
459 
460 		status = netif_rx_ni(skb);
461 	}
462 
463 	if (netif_running(dev->net))
464 		rx_fill(dev, GFP_KERNEL);
465 }
466 
eth_work(struct work_struct * work)467 static void eth_work(struct work_struct *work)
468 {
469 	struct eth_dev	*dev = container_of(work, struct eth_dev, work);
470 
471 	if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
472 		if (netif_running(dev->net))
473 			rx_fill(dev, GFP_KERNEL);
474 	}
475 
476 	if (dev->todo)
477 		DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
478 }
479 
tx_complete(struct usb_ep * ep,struct usb_request * req)480 static void tx_complete(struct usb_ep *ep, struct usb_request *req)
481 {
482 	struct sk_buff	*skb = req->context;
483 	struct eth_dev	*dev = ep->driver_data;
484 	struct net_device *net = dev->net;
485 	struct usb_request *new_req;
486 	struct usb_ep *in;
487 	int length;
488 	int retval;
489 
490 	switch (req->status) {
491 	default:
492 		dev->net->stats.tx_errors++;
493 		VDBG(dev, "tx err %d\n", req->status);
494 		/* FALLTHROUGH */
495 	case -ECONNRESET:		/* unlink */
496 	case -ESHUTDOWN:		/* disconnect etc */
497 		break;
498 	case 0:
499 		if (!req->zero)
500 			dev->net->stats.tx_bytes += req->length-1;
501 		else
502 			dev->net->stats.tx_bytes += req->length;
503 	}
504 	dev->net->stats.tx_packets++;
505 
506 	spin_lock(&dev->req_lock);
507 	list_add_tail(&req->list, &dev->tx_reqs);
508 
509 	if (dev->port_usb->multi_pkt_xfer) {
510 		dev->no_tx_req_used--;
511 		req->length = 0;
512 		in = dev->port_usb->in_ep;
513 
514 		if (!list_empty(&dev->tx_reqs)) {
515 			new_req = container_of(dev->tx_reqs.next,
516 					struct usb_request, list);
517 			list_del(&new_req->list);
518 			spin_unlock(&dev->req_lock);
519 			if (new_req->length > 0) {
520 				length = new_req->length;
521 
522 				/* NCM requires no zlp if transfer is
523 				 * dwNtbInMaxSize */
524 				if (dev->port_usb->is_fixed &&
525 					length == dev->port_usb->fixed_in_len &&
526 					(length % in->maxpacket) == 0)
527 					new_req->zero = 0;
528 				else
529 					new_req->zero = 1;
530 
531 				/* use zlp framing on tx for strict CDC-Ether
532 				 * conformance, though any robust network rx
533 				 * path ignores extra padding. and some hardware
534 				 * doesn't like to write zlps.
535 				 */
536 				if (new_req->zero && !dev->zlp &&
537 						(length % in->maxpacket) == 0) {
538 					new_req->zero = 0;
539 					length++;
540 				}
541 
542 				new_req->length = length;
543 				retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
544 				switch (retval) {
545 				default:
546 					DBG(dev, "tx queue err %d\n", retval);
547 					break;
548 				case 0:
549 					spin_lock(&dev->req_lock);
550 					dev->no_tx_req_used++;
551 					spin_unlock(&dev->req_lock);
552 					net->trans_start = jiffies;
553 				}
554 			} else {
555 				spin_lock(&dev->req_lock);
556 				list_add(&new_req->list, &dev->tx_reqs);
557 				spin_unlock(&dev->req_lock);
558 			}
559 		} else {
560 			spin_unlock(&dev->req_lock);
561 		}
562 	} else {
563 		spin_unlock(&dev->req_lock);
564 		dev_kfree_skb_any(skb);
565 	}
566 
567 	if (netif_carrier_ok(dev->net))
568 		netif_wake_queue(dev->net);
569 }
570 
is_promisc(u16 cdc_filter)571 static inline int is_promisc(u16 cdc_filter)
572 {
573 	return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
574 }
575 
alloc_tx_buffer(struct eth_dev * dev)576 static void alloc_tx_buffer(struct eth_dev *dev)
577 {
578 	struct list_head	*act;
579 	struct usb_request	*req;
580 
581 	dev->tx_req_bufsize = (dev->dl_max_pkts_per_xfer *
582 				(dev->net->mtu
583 				+ sizeof(struct ethhdr)
584 				/* size of rndis_packet_msg_type */
585 				+ 44
586 				+ 22));
587 
588 	list_for_each(act, &dev->tx_reqs) {
589 		req = container_of(act, struct usb_request, list);
590 		if (!req->buf)
591 			req->buf = kmalloc(dev->tx_req_bufsize,
592 						GFP_ATOMIC);
593 	}
594 }
595 
eth_start_xmit(struct sk_buff * skb,struct net_device * net)596 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
597 					struct net_device *net)
598 {
599 	struct eth_dev		*dev = netdev_priv(net);
600 	int			length = 0;
601 	int			retval;
602 	struct usb_request	*req = NULL;
603 	unsigned long		flags;
604 	struct usb_ep		*in;
605 	u16			cdc_filter;
606 
607 	spin_lock_irqsave(&dev->lock, flags);
608 	if (dev->port_usb) {
609 		in = dev->port_usb->in_ep;
610 		cdc_filter = dev->port_usb->cdc_filter;
611 	} else {
612 		in = NULL;
613 		cdc_filter = 0;
614 	}
615 	spin_unlock_irqrestore(&dev->lock, flags);
616 
617 	if (skb && !in) {
618 		dev_kfree_skb_any(skb);
619 		return NETDEV_TX_OK;
620 	}
621 
622 	/* Allocate memory for tx_reqs to support multi packet transfer */
623 	if (dev->port_usb->multi_pkt_xfer && !dev->tx_req_bufsize)
624 		alloc_tx_buffer(dev);
625 
626 	/* apply outgoing CDC or RNDIS filters */
627 	if (skb && !is_promisc(cdc_filter)) {
628 		u8		*dest = skb->data;
629 
630 		if (is_multicast_ether_addr(dest)) {
631 			u16	type;
632 
633 			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
634 			 * SET_ETHERNET_MULTICAST_FILTERS requests
635 			 */
636 			if (is_broadcast_ether_addr(dest))
637 				type = USB_CDC_PACKET_TYPE_BROADCAST;
638 			else
639 				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
640 			if (!(cdc_filter & type)) {
641 				dev_kfree_skb_any(skb);
642 				return NETDEV_TX_OK;
643 			}
644 		}
645 		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
646 	}
647 
648 	spin_lock_irqsave(&dev->req_lock, flags);
649 	/*
650 	 * this freelist can be empty if an interrupt triggered disconnect()
651 	 * and reconfigured the gadget (shutting down this queue) after the
652 	 * network stack decided to xmit but before we got the spinlock.
653 	 */
654 	if (list_empty(&dev->tx_reqs)) {
655 		spin_unlock_irqrestore(&dev->req_lock, flags);
656 		return NETDEV_TX_BUSY;
657 	}
658 
659 	req = container_of(dev->tx_reqs.next, struct usb_request, list);
660 	list_del(&req->list);
661 
662 	/* temporarily stop TX queue when the freelist empties */
663 	if (list_empty(&dev->tx_reqs))
664 		netif_stop_queue(net);
665 	spin_unlock_irqrestore(&dev->req_lock, flags);
666 
667 	/* no buffer copies needed, unless the network stack did it
668 	 * or the hardware can't use skb buffers.
669 	 * or there's not enough space for extra headers we need
670 	 */
671 	if (dev->wrap) {
672 		unsigned long	flags;
673 
674 		spin_lock_irqsave(&dev->lock, flags);
675 		if (dev->port_usb)
676 			skb = dev->wrap(dev->port_usb, skb);
677 		spin_unlock_irqrestore(&dev->lock, flags);
678 		if (!skb) {
679 			/* Multi frame CDC protocols may store the frame for
680 			 * later which is not a dropped frame.
681 			 */
682 			if (dev->port_usb->supports_multi_frame)
683 				goto multiframe;
684 			goto drop;
685 		}
686 	}
687 
688 	spin_lock_irqsave(&dev->req_lock, flags);
689 	dev->tx_skb_hold_count++;
690 	spin_unlock_irqrestore(&dev->req_lock, flags);
691 
692 	if (dev->port_usb->multi_pkt_xfer) {
693 		memcpy(req->buf + req->length, skb->data, skb->len);
694 		req->length = req->length + skb->len;
695 		length = req->length;
696 		dev_kfree_skb_any(skb);
697 
698 		spin_lock_irqsave(&dev->req_lock, flags);
699 		if (dev->tx_skb_hold_count < dev->dl_max_pkts_per_xfer) {
700 			if (dev->no_tx_req_used > TX_REQ_THRESHOLD) {
701 				list_add(&req->list, &dev->tx_reqs);
702 				spin_unlock_irqrestore(&dev->req_lock, flags);
703 				goto success;
704 			}
705 		}
706 
707 		dev->no_tx_req_used++;
708 		spin_unlock_irqrestore(&dev->req_lock, flags);
709 
710 		spin_lock_irqsave(&dev->lock, flags);
711 		dev->tx_skb_hold_count = 0;
712 		spin_unlock_irqrestore(&dev->lock, flags);
713 	} else {
714 		length = skb->len;
715 		req->buf = skb->data;
716 		req->context = skb;
717 	}
718 
719 	req->complete = tx_complete;
720 
721 	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
722 	if (dev->port_usb->is_fixed &&
723 	    length == dev->port_usb->fixed_in_len &&
724 	    (length % in->maxpacket) == 0)
725 		req->zero = 0;
726 	else
727 		req->zero = 1;
728 
729 	/* use zlp framing on tx for strict CDC-Ether conformance,
730 	 * though any robust network rx path ignores extra padding.
731 	 * and some hardware doesn't like to write zlps.
732 	 */
733 	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) {
734 		req->zero = 0;
735 		length++;
736 	}
737 
738 	req->length = length;
739 
740 	retval = usb_ep_queue(in, req, GFP_ATOMIC);
741 	switch (retval) {
742 	default:
743 		DBG(dev, "tx queue err %d\n", retval);
744 		break;
745 	case 0:
746 		net->trans_start = jiffies;
747 	}
748 
749 	if (retval) {
750 		if (!dev->port_usb->multi_pkt_xfer)
751 			dev_kfree_skb_any(skb);
752 drop:
753 		dev->net->stats.tx_dropped++;
754 multiframe:
755 		spin_lock_irqsave(&dev->req_lock, flags);
756 		if (list_empty(&dev->tx_reqs))
757 			netif_start_queue(net);
758 		list_add(&req->list, &dev->tx_reqs);
759 		spin_unlock_irqrestore(&dev->req_lock, flags);
760 	}
761 success:
762 	return NETDEV_TX_OK;
763 }
764 
765 /*-------------------------------------------------------------------------*/
766 
eth_start(struct eth_dev * dev,gfp_t gfp_flags)767 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
768 {
769 	DBG(dev, "%s\n", __func__);
770 
771 	/* fill the rx queue */
772 	rx_fill(dev, gfp_flags);
773 
774 	/* and open the tx floodgates */
775 	dev->tx_qlen = 0;
776 	netif_wake_queue(dev->net);
777 }
778 
eth_open(struct net_device * net)779 static int eth_open(struct net_device *net)
780 {
781 	struct eth_dev	*dev = netdev_priv(net);
782 	struct gether	*link;
783 
784 	DBG(dev, "%s\n", __func__);
785 	if (netif_carrier_ok(dev->net))
786 		eth_start(dev, GFP_KERNEL);
787 
788 	spin_lock_irq(&dev->lock);
789 	link = dev->port_usb;
790 	if (link && link->open)
791 		link->open(link);
792 	spin_unlock_irq(&dev->lock);
793 
794 	return 0;
795 }
796 
eth_stop(struct net_device * net)797 static int eth_stop(struct net_device *net)
798 {
799 	struct eth_dev	*dev = netdev_priv(net);
800 	unsigned long	flags;
801 
802 	VDBG(dev, "%s\n", __func__);
803 	netif_stop_queue(net);
804 
805 	DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
806 		dev->net->stats.rx_packets, dev->net->stats.tx_packets,
807 		dev->net->stats.rx_errors, dev->net->stats.tx_errors
808 		);
809 
810 	/* ensure there are no more active requests */
811 	spin_lock_irqsave(&dev->lock, flags);
812 	if (dev->port_usb) {
813 		struct gether	*link = dev->port_usb;
814 		const struct usb_endpoint_descriptor *in;
815 		const struct usb_endpoint_descriptor *out;
816 
817 		if (link->close)
818 			link->close(link);
819 
820 		/* NOTE:  we have no abort-queue primitive we could use
821 		 * to cancel all pending I/O.  Instead, we disable then
822 		 * reenable the endpoints ... this idiom may leave toggle
823 		 * wrong, but that's a self-correcting error.
824 		 *
825 		 * REVISIT:  we *COULD* just let the transfers complete at
826 		 * their own pace; the network stack can handle old packets.
827 		 * For the moment we leave this here, since it works.
828 		 */
829 		in = link->in_ep->desc;
830 		out = link->out_ep->desc;
831 		usb_ep_disable(link->in_ep);
832 		usb_ep_disable(link->out_ep);
833 		if (netif_carrier_ok(net)) {
834 			DBG(dev, "host still using in/out endpoints\n");
835 			link->in_ep->desc = in;
836 			link->out_ep->desc = out;
837 			usb_ep_enable(link->in_ep);
838 			usb_ep_enable(link->out_ep);
839 		}
840 	}
841 	spin_unlock_irqrestore(&dev->lock, flags);
842 
843 	return 0;
844 }
845 
846 /*-------------------------------------------------------------------------*/
847 
848 static u8 host_ethaddr[ETH_ALEN];
849 
get_ether_addr(const char * str,u8 * dev_addr)850 static int get_ether_addr(const char *str, u8 *dev_addr)
851 {
852 	if (str) {
853 		unsigned	i;
854 
855 		for (i = 0; i < 6; i++) {
856 			unsigned char num;
857 
858 			if ((*str == '.') || (*str == ':'))
859 				str++;
860 			num = hex_to_bin(*str++) << 4;
861 			num |= hex_to_bin(*str++);
862 			dev_addr [i] = num;
863 		}
864 		if (is_valid_ether_addr(dev_addr))
865 			return 0;
866 	}
867 	eth_random_addr(dev_addr);
868 	return 1;
869 }
870 
get_ether_addr_str(u8 dev_addr[ETH_ALEN],char * str,int len)871 static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
872 {
873 	if (len < 18)
874 		return -EINVAL;
875 
876 	snprintf(str, len, "%02x:%02x:%02x:%02x:%02x:%02x",
877 		 dev_addr[0], dev_addr[1], dev_addr[2],
878 		 dev_addr[3], dev_addr[4], dev_addr[5]);
879 	return 18;
880 }
881 
get_host_ether_addr(u8 * str,u8 * dev_addr)882 static int get_host_ether_addr(u8 *str, u8 *dev_addr)
883 {
884 	memcpy(dev_addr, str, ETH_ALEN);
885 	if (is_valid_ether_addr(dev_addr))
886 		return 0;
887 
888 	random_ether_addr(dev_addr);
889 	memcpy(str, dev_addr, ETH_ALEN);
890 	return 1;
891 }
892 
893 static const struct net_device_ops eth_netdev_ops = {
894 	.ndo_open		= eth_open,
895 	.ndo_stop		= eth_stop,
896 	.ndo_start_xmit		= eth_start_xmit,
897 	.ndo_change_mtu		= ueth_change_mtu,
898 	.ndo_set_mac_address 	= eth_mac_addr,
899 	.ndo_validate_addr	= eth_validate_addr,
900 };
901 
902 static struct device_type gadget_type = {
903 	.name	= "gadget",
904 };
905 
906 /**
907  * gether_setup_name - initialize one ethernet-over-usb link
908  * @g: gadget to associated with these links
909  * @ethaddr: NULL, or a buffer in which the ethernet address of the
910  *	host side of the link is recorded
911  * @netname: name for network device (for example, "usb")
912  * Context: may sleep
913  *
914  * This sets up the single network link that may be exported by a
915  * gadget driver using this framework.  The link layer addresses are
916  * set up using module parameters.
917  *
918  * Returns an eth_dev pointer on success, or an ERR_PTR on failure.
919  */
gether_setup_name(struct usb_gadget * g,const char * dev_addr,const char * host_addr,u8 ethaddr[ETH_ALEN],unsigned qmult,const char * netname)920 struct eth_dev *gether_setup_name(struct usb_gadget *g,
921 		const char *dev_addr, const char *host_addr,
922 		u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname)
923 {
924 	struct eth_dev		*dev;
925 	struct net_device	*net;
926 	int			status;
927 
928 	net = alloc_etherdev(sizeof *dev);
929 	if (!net)
930 		return ERR_PTR(-ENOMEM);
931 
932 	dev = netdev_priv(net);
933 	spin_lock_init(&dev->lock);
934 	spin_lock_init(&dev->req_lock);
935 	INIT_WORK(&dev->work, eth_work);
936 	INIT_WORK(&dev->rx_work, process_rx_w);
937 	INIT_LIST_HEAD(&dev->tx_reqs);
938 	INIT_LIST_HEAD(&dev->rx_reqs);
939 
940 	skb_queue_head_init(&dev->rx_frames);
941 
942 	/* network device setup */
943 	dev->net = net;
944 	dev->qmult = qmult;
945 	snprintf(net->name, sizeof(net->name), "%s%%d", netname);
946 
947 	if (get_ether_addr(dev_addr, net->dev_addr))
948 		dev_warn(&g->dev,
949 			"using random %s ethernet address\n", "self");
950 
951 	if (get_host_ether_addr(host_ethaddr, dev->host_mac))
952 		dev_warn(&g->dev, "using random %s ethernet address\n", "host");
953 	else
954 		dev_warn(&g->dev, "using previous %s ethernet address\n", "host");
955 
956 	if (ethaddr)
957 		memcpy(ethaddr, dev->host_mac, ETH_ALEN);
958 
959 	net->netdev_ops = &eth_netdev_ops;
960 
961 	net->ethtool_ops = &ops;
962 
963 	dev->gadget = g;
964 	SET_NETDEV_DEV(net, &g->dev);
965 	SET_NETDEV_DEVTYPE(net, &gadget_type);
966 
967 	status = register_netdev(net);
968 	if (status < 0) {
969 		dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
970 		free_netdev(net);
971 		dev = ERR_PTR(status);
972 	} else {
973 		INFO(dev, "MAC %pM\n", net->dev_addr);
974 		INFO(dev, "HOST MAC %pM\n", dev->host_mac);
975 
976 		/*
977 		 * two kinds of host-initiated state changes:
978 		 *  - iff DATA transfer is active, carrier is "on"
979 		 *  - tx queueing enabled if open *and* carrier is "on"
980 		 */
981 		netif_carrier_off(net);
982 	}
983 
984 	return dev;
985 }
986 EXPORT_SYMBOL_GPL(gether_setup_name);
987 
gether_setup_name_default(const char * netname)988 struct net_device *gether_setup_name_default(const char *netname)
989 {
990 	struct net_device	*net;
991 	struct eth_dev		*dev;
992 
993 	net = alloc_etherdev(sizeof(*dev));
994 	if (!net)
995 		return ERR_PTR(-ENOMEM);
996 
997 	dev = netdev_priv(net);
998 	spin_lock_init(&dev->lock);
999 	spin_lock_init(&dev->req_lock);
1000 	INIT_WORK(&dev->work, eth_work);
1001 	INIT_WORK(&dev->rx_work, process_rx_w);
1002 	INIT_LIST_HEAD(&dev->tx_reqs);
1003 	INIT_LIST_HEAD(&dev->rx_reqs);
1004 
1005 	skb_queue_head_init(&dev->rx_frames);
1006 
1007 	/* network device setup */
1008 	dev->net = net;
1009 	dev->qmult = QMULT_DEFAULT;
1010 	snprintf(net->name, sizeof(net->name), "%s%%d", netname);
1011 
1012 	eth_random_addr(dev->dev_mac);
1013 	pr_warn("using random %s ethernet address\n", "self");
1014 	if (get_host_ether_addr(host_ethaddr, dev->host_mac))
1015 		pr_warn("using random %s ethernet address\n", "host");
1016         else
1017 		pr_warn("using previous %s ethernet address\n", "host");
1018 
1019 	net->netdev_ops = &eth_netdev_ops;
1020 
1021 	net->ethtool_ops = &ops;
1022 	SET_NETDEV_DEVTYPE(net, &gadget_type);
1023 
1024 	return net;
1025 }
1026 EXPORT_SYMBOL_GPL(gether_setup_name_default);
1027 
gether_register_netdev(struct net_device * net)1028 int gether_register_netdev(struct net_device *net)
1029 {
1030 	struct eth_dev *dev;
1031 	struct usb_gadget *g;
1032 	struct sockaddr sa;
1033 	int status;
1034 
1035 	if (!net->dev.parent)
1036 		return -EINVAL;
1037 	dev = netdev_priv(net);
1038 	g = dev->gadget;
1039 	status = register_netdev(net);
1040 	if (status < 0) {
1041 		dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
1042 		return status;
1043 	} else {
1044 		INFO(dev, "HOST MAC %pM\n", dev->host_mac);
1045 
1046 		/* two kinds of host-initiated state changes:
1047 		 *  - iff DATA transfer is active, carrier is "on"
1048 		 *  - tx queueing enabled if open *and* carrier is "on"
1049 		 */
1050 		netif_carrier_off(net);
1051 	}
1052 	sa.sa_family = net->type;
1053 	memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN);
1054 	rtnl_lock();
1055 	status = dev_set_mac_address(net, &sa);
1056 	rtnl_unlock();
1057 	if (status)
1058 		pr_warn("cannot set self ethernet address: %d\n", status);
1059 	else
1060 		INFO(dev, "MAC %pM\n", dev->dev_mac);
1061 
1062 	return status;
1063 }
1064 EXPORT_SYMBOL_GPL(gether_register_netdev);
1065 
gether_set_gadget(struct net_device * net,struct usb_gadget * g)1066 void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
1067 {
1068 	struct eth_dev *dev;
1069 
1070 	dev = netdev_priv(net);
1071 	dev->gadget = g;
1072 	SET_NETDEV_DEV(net, &g->dev);
1073 }
1074 EXPORT_SYMBOL_GPL(gether_set_gadget);
1075 
gether_set_dev_addr(struct net_device * net,const char * dev_addr)1076 int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
1077 {
1078 	struct eth_dev *dev;
1079 	u8 new_addr[ETH_ALEN];
1080 
1081 	dev = netdev_priv(net);
1082 	if (get_ether_addr(dev_addr, new_addr))
1083 		return -EINVAL;
1084 	memcpy(dev->dev_mac, new_addr, ETH_ALEN);
1085 	return 0;
1086 }
1087 EXPORT_SYMBOL_GPL(gether_set_dev_addr);
1088 
gether_get_dev_addr(struct net_device * net,char * dev_addr,int len)1089 int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len)
1090 {
1091 	struct eth_dev *dev;
1092 
1093 	dev = netdev_priv(net);
1094 	return get_ether_addr_str(dev->dev_mac, dev_addr, len);
1095 }
1096 EXPORT_SYMBOL_GPL(gether_get_dev_addr);
1097 
gether_set_host_addr(struct net_device * net,const char * host_addr)1098 int gether_set_host_addr(struct net_device *net, const char *host_addr)
1099 {
1100 	struct eth_dev *dev;
1101 	u8 new_addr[ETH_ALEN];
1102 
1103 	dev = netdev_priv(net);
1104 	if (get_ether_addr(host_addr, new_addr))
1105 		return -EINVAL;
1106 	memcpy(dev->host_mac, new_addr, ETH_ALEN);
1107 	return 0;
1108 }
1109 EXPORT_SYMBOL_GPL(gether_set_host_addr);
1110 
gether_get_host_addr(struct net_device * net,char * host_addr,int len)1111 int gether_get_host_addr(struct net_device *net, char *host_addr, int len)
1112 {
1113 	struct eth_dev *dev;
1114 
1115 	dev = netdev_priv(net);
1116 	return get_ether_addr_str(dev->host_mac, host_addr, len);
1117 }
1118 EXPORT_SYMBOL_GPL(gether_get_host_addr);
1119 
gether_get_host_addr_cdc(struct net_device * net,char * host_addr,int len)1120 int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
1121 {
1122 	struct eth_dev *dev;
1123 
1124 	if (len < 13)
1125 		return -EINVAL;
1126 
1127 	dev = netdev_priv(net);
1128 	snprintf(host_addr, len, "%pm", dev->host_mac);
1129 
1130 	return strlen(host_addr);
1131 }
1132 EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
1133 
gether_get_host_addr_u8(struct net_device * net,u8 host_mac[ETH_ALEN])1134 void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN])
1135 {
1136 	struct eth_dev *dev;
1137 
1138 	dev = netdev_priv(net);
1139 	memcpy(host_mac, dev->host_mac, ETH_ALEN);
1140 }
1141 EXPORT_SYMBOL_GPL(gether_get_host_addr_u8);
1142 
gether_set_qmult(struct net_device * net,unsigned qmult)1143 void gether_set_qmult(struct net_device *net, unsigned qmult)
1144 {
1145 	struct eth_dev *dev;
1146 
1147 	dev = netdev_priv(net);
1148 	dev->qmult = qmult;
1149 }
1150 EXPORT_SYMBOL_GPL(gether_set_qmult);
1151 
gether_get_qmult(struct net_device * net)1152 unsigned gether_get_qmult(struct net_device *net)
1153 {
1154 	struct eth_dev *dev;
1155 
1156 	dev = netdev_priv(net);
1157 	return dev->qmult;
1158 }
1159 EXPORT_SYMBOL_GPL(gether_get_qmult);
1160 
gether_get_ifname(struct net_device * net,char * name,int len)1161 int gether_get_ifname(struct net_device *net, char *name, int len)
1162 {
1163 	rtnl_lock();
1164 	strlcpy(name, netdev_name(net), len);
1165 	rtnl_unlock();
1166 	return strlen(name);
1167 }
1168 EXPORT_SYMBOL_GPL(gether_get_ifname);
1169 
1170 /**
1171  * gether_cleanup - remove Ethernet-over-USB device
1172  * Context: may sleep
1173  *
1174  * This is called to free all resources allocated by @gether_setup().
1175  */
gether_cleanup(struct eth_dev * dev)1176 void gether_cleanup(struct eth_dev *dev)
1177 {
1178 	if (!dev)
1179 		return;
1180 
1181 	unregister_netdev(dev->net);
1182 	flush_work(&dev->work);
1183 	free_netdev(dev->net);
1184 }
1185 EXPORT_SYMBOL_GPL(gether_cleanup);
1186 
1187 /**
1188  * gether_connect - notify network layer that USB link is active
1189  * @link: the USB link, set up with endpoints, descriptors matching
1190  *	current device speed, and any framing wrapper(s) set up.
1191  * Context: irqs blocked
1192  *
1193  * This is called to activate endpoints and let the network layer know
1194  * the connection is active ("carrier detect").  It may cause the I/O
1195  * queues to open and start letting network packets flow, but will in
1196  * any case activate the endpoints so that they respond properly to the
1197  * USB host.
1198  *
1199  * Verify net_device pointer returned using IS_ERR().  If it doesn't
1200  * indicate some error code (negative errno), ep->driver_data values
1201  * have been overwritten.
1202  */
gether_connect(struct gether * link)1203 struct net_device *gether_connect(struct gether *link)
1204 {
1205 	struct eth_dev		*dev = link->ioport;
1206 	int			result = 0;
1207 
1208 	if (!dev)
1209 		return ERR_PTR(-EINVAL);
1210 
1211 	link->in_ep->driver_data = dev;
1212 	result = usb_ep_enable(link->in_ep);
1213 	if (result != 0) {
1214 		DBG(dev, "enable %s --> %d\n",
1215 			link->in_ep->name, result);
1216 		goto fail0;
1217 	}
1218 
1219 	link->out_ep->driver_data = dev;
1220 	result = usb_ep_enable(link->out_ep);
1221 	if (result != 0) {
1222 		DBG(dev, "enable %s --> %d\n",
1223 			link->out_ep->name, result);
1224 		goto fail1;
1225 	}
1226 
1227 	if (result == 0)
1228 		result = alloc_requests(dev, link, qlen(dev->gadget,
1229 					dev->qmult));
1230 
1231 	if (result == 0) {
1232 		dev->zlp = link->is_zlp_ok;
1233 		DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult));
1234 
1235 		dev->header_len = link->header_len;
1236 		dev->unwrap = link->unwrap;
1237 		dev->wrap = link->wrap;
1238 		dev->ul_max_pkts_per_xfer = link->ul_max_pkts_per_xfer;
1239 		dev->dl_max_pkts_per_xfer = link->dl_max_pkts_per_xfer;
1240 
1241 		spin_lock(&dev->lock);
1242 		dev->tx_skb_hold_count = 0;
1243 		dev->no_tx_req_used = 0;
1244 		dev->tx_req_bufsize = 0;
1245 		dev->port_usb = link;
1246 		if (netif_running(dev->net)) {
1247 			if (link->open)
1248 				link->open(link);
1249 		} else {
1250 			if (link->close)
1251 				link->close(link);
1252 		}
1253 		spin_unlock(&dev->lock);
1254 
1255 		netif_carrier_on(dev->net);
1256 		if (netif_running(dev->net))
1257 			eth_start(dev, GFP_ATOMIC);
1258 
1259 	/* on error, disable any endpoints  */
1260 	} else {
1261 		(void) usb_ep_disable(link->out_ep);
1262 fail1:
1263 		(void) usb_ep_disable(link->in_ep);
1264 	}
1265 fail0:
1266 	/* caller is responsible for cleanup on error */
1267 	if (result < 0)
1268 		return ERR_PTR(result);
1269 	return dev->net;
1270 }
1271 EXPORT_SYMBOL_GPL(gether_connect);
1272 
1273 /**
1274  * gether_disconnect - notify network layer that USB link is inactive
1275  * @link: the USB link, on which gether_connect() was called
1276  * Context: irqs blocked
1277  *
1278  * This is called to deactivate endpoints and let the network layer know
1279  * the connection went inactive ("no carrier").
1280  *
1281  * On return, the state is as if gether_connect() had never been called.
1282  * The endpoints are inactive, and accordingly without active USB I/O.
1283  * Pointers to endpoint descriptors and endpoint private data are nulled.
1284  */
gether_disconnect(struct gether * link)1285 void gether_disconnect(struct gether *link)
1286 {
1287 	struct eth_dev		*dev = link->ioport;
1288 	struct usb_request	*req;
1289 	struct sk_buff		*skb;
1290 
1291 	WARN_ON(!dev);
1292 	if (!dev)
1293 		return;
1294 
1295 	DBG(dev, "%s\n", __func__);
1296 
1297 	netif_stop_queue(dev->net);
1298 	netif_carrier_off(dev->net);
1299 
1300 	/* disable endpoints, forcing (synchronous) completion
1301 	 * of all pending i/o.  then free the request objects
1302 	 * and forget about the endpoints.
1303 	 */
1304 	usb_ep_disable(link->in_ep);
1305 	spin_lock(&dev->req_lock);
1306 	while (!list_empty(&dev->tx_reqs)) {
1307 		req = container_of(dev->tx_reqs.next,
1308 					struct usb_request, list);
1309 		list_del(&req->list);
1310 
1311 		spin_unlock(&dev->req_lock);
1312 		if (link->multi_pkt_xfer)
1313 			kfree(req->buf);
1314 		usb_ep_free_request(link->in_ep, req);
1315 		spin_lock(&dev->req_lock);
1316 	}
1317 	spin_unlock(&dev->req_lock);
1318 	link->in_ep->driver_data = NULL;
1319 	link->in_ep->desc = NULL;
1320 
1321 	usb_ep_disable(link->out_ep);
1322 	spin_lock(&dev->req_lock);
1323 	while (!list_empty(&dev->rx_reqs)) {
1324 		req = container_of(dev->rx_reqs.next,
1325 					struct usb_request, list);
1326 		list_del(&req->list);
1327 
1328 		spin_unlock(&dev->req_lock);
1329 		usb_ep_free_request(link->out_ep, req);
1330 		spin_lock(&dev->req_lock);
1331 	}
1332 	spin_unlock(&dev->req_lock);
1333 
1334 	spin_lock(&dev->rx_frames.lock);
1335 	while ((skb = __skb_dequeue(&dev->rx_frames)))
1336 		dev_kfree_skb_any(skb);
1337 	spin_unlock(&dev->rx_frames.lock);
1338 
1339 	link->out_ep->driver_data = NULL;
1340 	link->out_ep->desc = NULL;
1341 
1342 	/* finish forgetting about this USB link episode */
1343 	dev->header_len = 0;
1344 	dev->unwrap = NULL;
1345 	dev->wrap = NULL;
1346 
1347 	spin_lock(&dev->lock);
1348 	dev->port_usb = NULL;
1349 	spin_unlock(&dev->lock);
1350 }
1351 EXPORT_SYMBOL_GPL(gether_disconnect);
1352 
gether_init(void)1353 static int __init gether_init(void)
1354 {
1355 	uether_wq  = create_singlethread_workqueue("uether");
1356 	if (!uether_wq) {
1357 		pr_err("%s: Unable to create workqueue: uether\n", __func__);
1358 		return -ENOMEM;
1359 	}
1360 	return 0;
1361 }
1362 module_init(gether_init);
1363 
gether_exit(void)1364 static void __exit gether_exit(void)
1365 {
1366 	destroy_workqueue(uether_wq);
1367 
1368 }
1369 module_exit(gether_exit);
1370 MODULE_AUTHOR("David Brownell");
1371 MODULE_DESCRIPTION("ethernet over USB driver");
1372 MODULE_LICENSE("GPL v2");
1373