• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <net/vxlan.h>
34 #include <linux/microchipphy.h>
35 #include "lan78xx.h"
36 
37 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
38 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
39 #define DRIVER_NAME	"lan78xx"
40 #define DRIVER_VERSION	"1.0.1"
41 
42 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
43 #define THROTTLE_JIFFIES		(HZ / 8)
44 #define UNLINK_TIMEOUT_MS		3
45 
46 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
47 
48 #define SS_USB_PKT_SIZE			(1024)
49 #define HS_USB_PKT_SIZE			(512)
50 #define FS_USB_PKT_SIZE			(64)
51 
52 #define MAX_RX_FIFO_SIZE		(12 * 1024)
53 #define MAX_TX_FIFO_SIZE		(12 * 1024)
54 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
55 #define DEFAULT_BULK_IN_DELAY		(0x0800)
56 #define MAX_SINGLE_PACKET_SIZE		(9000)
57 #define DEFAULT_TX_CSUM_ENABLE		(true)
58 #define DEFAULT_RX_CSUM_ENABLE		(true)
59 #define DEFAULT_TSO_CSUM_ENABLE		(true)
60 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
61 #define TX_OVERHEAD			(8)
62 #define RXW_PADDING			2
63 
64 #define LAN78XX_USB_VENDOR_ID		(0x0424)
65 #define LAN7800_USB_PRODUCT_ID		(0x7800)
66 #define LAN7850_USB_PRODUCT_ID		(0x7850)
67 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
68 #define LAN78XX_OTP_MAGIC		(0x78F3)
69 #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
70 #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
71 
72 #define	MII_READ			1
73 #define	MII_WRITE			0
74 
75 #define EEPROM_INDICATOR		(0xA5)
76 #define EEPROM_MAC_OFFSET		(0x01)
77 #define MAX_EEPROM_SIZE			512
78 #define OTP_INDICATOR_1			(0xF3)
79 #define OTP_INDICATOR_2			(0xF7)
80 
81 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
82 					 WAKE_MCAST | WAKE_BCAST | \
83 					 WAKE_ARP | WAKE_MAGIC)
84 
85 /* USB related defines */
86 #define BULK_IN_PIPE			1
87 #define BULK_OUT_PIPE			2
88 
89 /* default autosuspend delay (mSec)*/
90 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
91 
92 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
93 	"RX FCS Errors",
94 	"RX Alignment Errors",
95 	"Rx Fragment Errors",
96 	"RX Jabber Errors",
97 	"RX Undersize Frame Errors",
98 	"RX Oversize Frame Errors",
99 	"RX Dropped Frames",
100 	"RX Unicast Byte Count",
101 	"RX Broadcast Byte Count",
102 	"RX Multicast Byte Count",
103 	"RX Unicast Frames",
104 	"RX Broadcast Frames",
105 	"RX Multicast Frames",
106 	"RX Pause Frames",
107 	"RX 64 Byte Frames",
108 	"RX 65 - 127 Byte Frames",
109 	"RX 128 - 255 Byte Frames",
110 	"RX 256 - 511 Bytes Frames",
111 	"RX 512 - 1023 Byte Frames",
112 	"RX 1024 - 1518 Byte Frames",
113 	"RX Greater 1518 Byte Frames",
114 	"EEE RX LPI Transitions",
115 	"EEE RX LPI Time",
116 	"TX FCS Errors",
117 	"TX Excess Deferral Errors",
118 	"TX Carrier Errors",
119 	"TX Bad Byte Count",
120 	"TX Single Collisions",
121 	"TX Multiple Collisions",
122 	"TX Excessive Collision",
123 	"TX Late Collisions",
124 	"TX Unicast Byte Count",
125 	"TX Broadcast Byte Count",
126 	"TX Multicast Byte Count",
127 	"TX Unicast Frames",
128 	"TX Broadcast Frames",
129 	"TX Multicast Frames",
130 	"TX Pause Frames",
131 	"TX 64 Byte Frames",
132 	"TX 65 - 127 Byte Frames",
133 	"TX 128 - 255 Byte Frames",
134 	"TX 256 - 511 Bytes Frames",
135 	"TX 512 - 1023 Byte Frames",
136 	"TX 1024 - 1518 Byte Frames",
137 	"TX Greater 1518 Byte Frames",
138 	"EEE TX LPI Transitions",
139 	"EEE TX LPI Time",
140 };
141 
142 struct lan78xx_statstage {
143 	u32 rx_fcs_errors;
144 	u32 rx_alignment_errors;
145 	u32 rx_fragment_errors;
146 	u32 rx_jabber_errors;
147 	u32 rx_undersize_frame_errors;
148 	u32 rx_oversize_frame_errors;
149 	u32 rx_dropped_frames;
150 	u32 rx_unicast_byte_count;
151 	u32 rx_broadcast_byte_count;
152 	u32 rx_multicast_byte_count;
153 	u32 rx_unicast_frames;
154 	u32 rx_broadcast_frames;
155 	u32 rx_multicast_frames;
156 	u32 rx_pause_frames;
157 	u32 rx_64_byte_frames;
158 	u32 rx_65_127_byte_frames;
159 	u32 rx_128_255_byte_frames;
160 	u32 rx_256_511_bytes_frames;
161 	u32 rx_512_1023_byte_frames;
162 	u32 rx_1024_1518_byte_frames;
163 	u32 rx_greater_1518_byte_frames;
164 	u32 eee_rx_lpi_transitions;
165 	u32 eee_rx_lpi_time;
166 	u32 tx_fcs_errors;
167 	u32 tx_excess_deferral_errors;
168 	u32 tx_carrier_errors;
169 	u32 tx_bad_byte_count;
170 	u32 tx_single_collisions;
171 	u32 tx_multiple_collisions;
172 	u32 tx_excessive_collision;
173 	u32 tx_late_collisions;
174 	u32 tx_unicast_byte_count;
175 	u32 tx_broadcast_byte_count;
176 	u32 tx_multicast_byte_count;
177 	u32 tx_unicast_frames;
178 	u32 tx_broadcast_frames;
179 	u32 tx_multicast_frames;
180 	u32 tx_pause_frames;
181 	u32 tx_64_byte_frames;
182 	u32 tx_65_127_byte_frames;
183 	u32 tx_128_255_byte_frames;
184 	u32 tx_256_511_bytes_frames;
185 	u32 tx_512_1023_byte_frames;
186 	u32 tx_1024_1518_byte_frames;
187 	u32 tx_greater_1518_byte_frames;
188 	u32 eee_tx_lpi_transitions;
189 	u32 eee_tx_lpi_time;
190 };
191 
192 struct lan78xx_net;
193 
194 struct lan78xx_priv {
195 	struct lan78xx_net *dev;
196 	u32 rfe_ctl;
197 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
198 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
199 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
200 	struct mutex dataport_mutex; /* for dataport access */
201 	spinlock_t rfe_ctl_lock; /* for rfe register access */
202 	struct work_struct set_multicast;
203 	struct work_struct set_vlan;
204 	u32 wol;
205 };
206 
207 enum skb_state {
208 	illegal = 0,
209 	tx_start,
210 	tx_done,
211 	rx_start,
212 	rx_done,
213 	rx_cleanup,
214 	unlink_start
215 };
216 
217 struct skb_data {		/* skb->cb is one of these */
218 	struct urb *urb;
219 	struct lan78xx_net *dev;
220 	enum skb_state state;
221 	size_t length;
222 };
223 
224 struct usb_context {
225 	struct usb_ctrlrequest req;
226 	struct lan78xx_net *dev;
227 };
228 
229 #define EVENT_TX_HALT			0
230 #define EVENT_RX_HALT			1
231 #define EVENT_RX_MEMORY			2
232 #define EVENT_STS_SPLIT			3
233 #define EVENT_LINK_RESET		4
234 #define EVENT_RX_PAUSED			5
235 #define EVENT_DEV_WAKING		6
236 #define EVENT_DEV_ASLEEP		7
237 #define EVENT_DEV_OPEN			8
238 
239 struct lan78xx_net {
240 	struct net_device	*net;
241 	struct usb_device	*udev;
242 	struct usb_interface	*intf;
243 	void			*driver_priv;
244 
245 	int			rx_qlen;
246 	int			tx_qlen;
247 	struct sk_buff_head	rxq;
248 	struct sk_buff_head	txq;
249 	struct sk_buff_head	done;
250 	struct sk_buff_head	rxq_pause;
251 	struct sk_buff_head	txq_pend;
252 
253 	struct tasklet_struct	bh;
254 	struct delayed_work	wq;
255 
256 	int			msg_enable;
257 
258 	struct urb		*urb_intr;
259 	struct usb_anchor	deferred;
260 
261 	struct mutex		phy_mutex; /* for phy access */
262 	unsigned		pipe_in, pipe_out, pipe_intr;
263 
264 	u32			hard_mtu;	/* count any extra framing */
265 	size_t			rx_urb_size;	/* size for rx urbs */
266 
267 	unsigned long		flags;
268 
269 	wait_queue_head_t	*wait;
270 	unsigned char		suspend_count;
271 
272 	unsigned		maxpacket;
273 	struct timer_list	delay;
274 
275 	unsigned long		data[5];
276 
277 	int			link_on;
278 	u8			mdix_ctrl;
279 
280 	u32			devid;
281 	struct mii_bus		*mdiobus;
282 };
283 
284 /* use ethtool to change the level for any given device */
285 static int msg_level = -1;
286 module_param(msg_level, int, 0);
287 MODULE_PARM_DESC(msg_level, "Override default message level");
288 
lan78xx_read_reg(struct lan78xx_net * dev,u32 index,u32 * data)289 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
290 {
291 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
292 	int ret;
293 
294 	if (!buf)
295 		return -ENOMEM;
296 
297 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
298 			      USB_VENDOR_REQUEST_READ_REGISTER,
299 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
300 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
301 	if (likely(ret >= 0)) {
302 		le32_to_cpus(buf);
303 		*data = *buf;
304 	} else {
305 		netdev_warn(dev->net,
306 			    "Failed to read register index 0x%08x. ret = %d",
307 			    index, ret);
308 	}
309 
310 	kfree(buf);
311 
312 	return ret;
313 }
314 
lan78xx_write_reg(struct lan78xx_net * dev,u32 index,u32 data)315 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
316 {
317 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
318 	int ret;
319 
320 	if (!buf)
321 		return -ENOMEM;
322 
323 	*buf = data;
324 	cpu_to_le32s(buf);
325 
326 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
327 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
328 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
329 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
330 	if (unlikely(ret < 0)) {
331 		netdev_warn(dev->net,
332 			    "Failed to write register index 0x%08x. ret = %d",
333 			    index, ret);
334 	}
335 
336 	kfree(buf);
337 
338 	return ret;
339 }
340 
lan78xx_read_stats(struct lan78xx_net * dev,struct lan78xx_statstage * data)341 static int lan78xx_read_stats(struct lan78xx_net *dev,
342 			      struct lan78xx_statstage *data)
343 {
344 	int ret = 0;
345 	int i;
346 	struct lan78xx_statstage *stats;
347 	u32 *src;
348 	u32 *dst;
349 
350 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
351 	if (!stats)
352 		return -ENOMEM;
353 
354 	ret = usb_control_msg(dev->udev,
355 			      usb_rcvctrlpipe(dev->udev, 0),
356 			      USB_VENDOR_REQUEST_GET_STATS,
357 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
358 			      0,
359 			      0,
360 			      (void *)stats,
361 			      sizeof(*stats),
362 			      USB_CTRL_SET_TIMEOUT);
363 	if (likely(ret >= 0)) {
364 		src = (u32 *)stats;
365 		dst = (u32 *)data;
366 		for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
367 			le32_to_cpus(&src[i]);
368 			dst[i] = src[i];
369 		}
370 	} else {
371 		netdev_warn(dev->net,
372 			    "Failed to read stat ret = %d", ret);
373 	}
374 
375 	kfree(stats);
376 
377 	return ret;
378 }
379 
380 /* Loop until the read is completed with timeout called with phy_mutex held */
lan78xx_phy_wait_not_busy(struct lan78xx_net * dev)381 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
382 {
383 	unsigned long start_time = jiffies;
384 	u32 val;
385 	int ret;
386 
387 	do {
388 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
389 		if (unlikely(ret < 0))
390 			return -EIO;
391 
392 		if (!(val & MII_ACC_MII_BUSY_))
393 			return 0;
394 	} while (!time_after(jiffies, start_time + HZ));
395 
396 	return -EIO;
397 }
398 
mii_access(int id,int index,int read)399 static inline u32 mii_access(int id, int index, int read)
400 {
401 	u32 ret;
402 
403 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
404 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
405 	if (read)
406 		ret |= MII_ACC_MII_READ_;
407 	else
408 		ret |= MII_ACC_MII_WRITE_;
409 	ret |= MII_ACC_MII_BUSY_;
410 
411 	return ret;
412 }
413 
lan78xx_wait_eeprom(struct lan78xx_net * dev)414 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
415 {
416 	unsigned long start_time = jiffies;
417 	u32 val;
418 	int ret;
419 
420 	do {
421 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
422 		if (unlikely(ret < 0))
423 			return -EIO;
424 
425 		if (!(val & E2P_CMD_EPC_BUSY_) ||
426 		    (val & E2P_CMD_EPC_TIMEOUT_))
427 			break;
428 		usleep_range(40, 100);
429 	} while (!time_after(jiffies, start_time + HZ));
430 
431 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
432 		netdev_warn(dev->net, "EEPROM read operation timeout");
433 		return -EIO;
434 	}
435 
436 	return 0;
437 }
438 
lan78xx_eeprom_confirm_not_busy(struct lan78xx_net * dev)439 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
440 {
441 	unsigned long start_time = jiffies;
442 	u32 val;
443 	int ret;
444 
445 	do {
446 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
447 		if (unlikely(ret < 0))
448 			return -EIO;
449 
450 		if (!(val & E2P_CMD_EPC_BUSY_))
451 			return 0;
452 
453 		usleep_range(40, 100);
454 	} while (!time_after(jiffies, start_time + HZ));
455 
456 	netdev_warn(dev->net, "EEPROM is busy");
457 	return -EIO;
458 }
459 
lan78xx_read_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)460 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
461 				   u32 length, u8 *data)
462 {
463 	u32 val;
464 	int i, ret;
465 
466 	ret = lan78xx_eeprom_confirm_not_busy(dev);
467 	if (ret)
468 		return ret;
469 
470 	for (i = 0; i < length; i++) {
471 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
472 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
473 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
474 		if (unlikely(ret < 0))
475 			return -EIO;
476 
477 		ret = lan78xx_wait_eeprom(dev);
478 		if (ret < 0)
479 			return ret;
480 
481 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
482 		if (unlikely(ret < 0))
483 			return -EIO;
484 
485 		data[i] = val & 0xFF;
486 		offset++;
487 	}
488 
489 	return 0;
490 }
491 
lan78xx_read_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)492 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
493 			       u32 length, u8 *data)
494 {
495 	u8 sig;
496 	int ret;
497 
498 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
499 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
500 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
501 	else
502 		ret = -EINVAL;
503 
504 	return ret;
505 }
506 
lan78xx_write_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)507 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
508 				    u32 length, u8 *data)
509 {
510 	u32 val;
511 	int i, ret;
512 
513 	ret = lan78xx_eeprom_confirm_not_busy(dev);
514 	if (ret)
515 		return ret;
516 
517 	/* Issue write/erase enable command */
518 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
519 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
520 	if (unlikely(ret < 0))
521 		return -EIO;
522 
523 	ret = lan78xx_wait_eeprom(dev);
524 	if (ret < 0)
525 		return ret;
526 
527 	for (i = 0; i < length; i++) {
528 		/* Fill data register */
529 		val = data[i];
530 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
531 		if (ret < 0)
532 			return ret;
533 
534 		/* Send "write" command */
535 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
536 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
537 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
538 		if (ret < 0)
539 			return ret;
540 
541 		ret = lan78xx_wait_eeprom(dev);
542 		if (ret < 0)
543 			return ret;
544 
545 		offset++;
546 	}
547 
548 	return 0;
549 }
550 
lan78xx_read_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)551 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
552 				u32 length, u8 *data)
553 {
554 	int i;
555 	int ret;
556 	u32 buf;
557 	unsigned long timeout;
558 
559 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
560 
561 	if (buf & OTP_PWR_DN_PWRDN_N_) {
562 		/* clear it and wait to be cleared */
563 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
564 
565 		timeout = jiffies + HZ;
566 		do {
567 			usleep_range(1, 10);
568 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
569 			if (time_after(jiffies, timeout)) {
570 				netdev_warn(dev->net,
571 					    "timeout on OTP_PWR_DN");
572 				return -EIO;
573 			}
574 		} while (buf & OTP_PWR_DN_PWRDN_N_);
575 	}
576 
577 	for (i = 0; i < length; i++) {
578 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
579 					((offset + i) >> 8) & OTP_ADDR1_15_11);
580 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
581 					((offset + i) & OTP_ADDR2_10_3));
582 
583 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
584 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
585 
586 		timeout = jiffies + HZ;
587 		do {
588 			udelay(1);
589 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
590 			if (time_after(jiffies, timeout)) {
591 				netdev_warn(dev->net,
592 					    "timeout on OTP_STATUS");
593 				return -EIO;
594 			}
595 		} while (buf & OTP_STATUS_BUSY_);
596 
597 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
598 
599 		data[i] = (u8)(buf & 0xFF);
600 	}
601 
602 	return 0;
603 }
604 
lan78xx_read_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)605 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
606 			    u32 length, u8 *data)
607 {
608 	u8 sig;
609 	int ret;
610 
611 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
612 
613 	if (ret == 0) {
614 		if (sig == OTP_INDICATOR_2)
615 			offset += 0x100;
616 		else if (sig != OTP_INDICATOR_1)
617 			ret = -EINVAL;
618 		if (!ret)
619 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
620 	}
621 
622 	return ret;
623 }
624 
lan78xx_dataport_wait_not_busy(struct lan78xx_net * dev)625 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
626 {
627 	int i, ret;
628 
629 	for (i = 0; i < 100; i++) {
630 		u32 dp_sel;
631 
632 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
633 		if (unlikely(ret < 0))
634 			return -EIO;
635 
636 		if (dp_sel & DP_SEL_DPRDY_)
637 			return 0;
638 
639 		usleep_range(40, 100);
640 	}
641 
642 	netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
643 
644 	return -EIO;
645 }
646 
lan78xx_dataport_write(struct lan78xx_net * dev,u32 ram_select,u32 addr,u32 length,u32 * buf)647 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
648 				  u32 addr, u32 length, u32 *buf)
649 {
650 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
651 	u32 dp_sel;
652 	int i, ret;
653 
654 	if (usb_autopm_get_interface(dev->intf) < 0)
655 			return 0;
656 
657 	mutex_lock(&pdata->dataport_mutex);
658 
659 	ret = lan78xx_dataport_wait_not_busy(dev);
660 	if (ret < 0)
661 		goto done;
662 
663 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
664 
665 	dp_sel &= ~DP_SEL_RSEL_MASK_;
666 	dp_sel |= ram_select;
667 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
668 
669 	for (i = 0; i < length; i++) {
670 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
671 
672 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
673 
674 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
675 
676 		ret = lan78xx_dataport_wait_not_busy(dev);
677 		if (ret < 0)
678 			goto done;
679 	}
680 
681 done:
682 	mutex_unlock(&pdata->dataport_mutex);
683 	usb_autopm_put_interface(dev->intf);
684 
685 	return ret;
686 }
687 
lan78xx_set_addr_filter(struct lan78xx_priv * pdata,int index,u8 addr[ETH_ALEN])688 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
689 				    int index, u8 addr[ETH_ALEN])
690 {
691 	u32	temp;
692 
693 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
694 		temp = addr[3];
695 		temp = addr[2] | (temp << 8);
696 		temp = addr[1] | (temp << 8);
697 		temp = addr[0] | (temp << 8);
698 		pdata->pfilter_table[index][1] = temp;
699 		temp = addr[5];
700 		temp = addr[4] | (temp << 8);
701 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
702 		pdata->pfilter_table[index][0] = temp;
703 	}
704 }
705 
706 /* returns hash bit number for given MAC address */
lan78xx_hash(char addr[ETH_ALEN])707 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
708 {
709 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
710 }
711 
lan78xx_deferred_multicast_write(struct work_struct * param)712 static void lan78xx_deferred_multicast_write(struct work_struct *param)
713 {
714 	struct lan78xx_priv *pdata =
715 			container_of(param, struct lan78xx_priv, set_multicast);
716 	struct lan78xx_net *dev = pdata->dev;
717 	int i;
718 	int ret;
719 
720 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
721 		  pdata->rfe_ctl);
722 
723 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
724 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
725 
726 	for (i = 1; i < NUM_OF_MAF; i++) {
727 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
728 		ret = lan78xx_write_reg(dev, MAF_LO(i),
729 					pdata->pfilter_table[i][1]);
730 		ret = lan78xx_write_reg(dev, MAF_HI(i),
731 					pdata->pfilter_table[i][0]);
732 	}
733 
734 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
735 }
736 
lan78xx_set_multicast(struct net_device * netdev)737 static void lan78xx_set_multicast(struct net_device *netdev)
738 {
739 	struct lan78xx_net *dev = netdev_priv(netdev);
740 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
741 	unsigned long flags;
742 	int i;
743 
744 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
745 
746 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
747 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
748 
749 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
750 			pdata->mchash_table[i] = 0;
751 	/* pfilter_table[0] has own HW address */
752 	for (i = 1; i < NUM_OF_MAF; i++) {
753 			pdata->pfilter_table[i][0] =
754 			pdata->pfilter_table[i][1] = 0;
755 	}
756 
757 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
758 
759 	if (dev->net->flags & IFF_PROMISC) {
760 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
761 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
762 	} else {
763 		if (dev->net->flags & IFF_ALLMULTI) {
764 			netif_dbg(dev, drv, dev->net,
765 				  "receive all multicast enabled");
766 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
767 		}
768 	}
769 
770 	if (netdev_mc_count(dev->net)) {
771 		struct netdev_hw_addr *ha;
772 		int i;
773 
774 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
775 
776 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
777 
778 		i = 1;
779 		netdev_for_each_mc_addr(ha, netdev) {
780 			/* set first 32 into Perfect Filter */
781 			if (i < 33) {
782 				lan78xx_set_addr_filter(pdata, i, ha->addr);
783 			} else {
784 				u32 bitnum = lan78xx_hash(ha->addr);
785 
786 				pdata->mchash_table[bitnum / 32] |=
787 							(1 << (bitnum % 32));
788 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
789 			}
790 			i++;
791 		}
792 	}
793 
794 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
795 
796 	/* defer register writes to a sleepable context */
797 	schedule_work(&pdata->set_multicast);
798 }
799 
lan78xx_update_flowcontrol(struct lan78xx_net * dev,u8 duplex,u16 lcladv,u16 rmtadv)800 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
801 				      u16 lcladv, u16 rmtadv)
802 {
803 	u32 flow = 0, fct_flow = 0;
804 	int ret;
805 
806 	u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
807 
808 	if (cap & FLOW_CTRL_TX)
809 		flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
810 
811 	if (cap & FLOW_CTRL_RX)
812 		flow |= FLOW_CR_RX_FCEN_;
813 
814 	if (dev->udev->speed == USB_SPEED_SUPER)
815 		fct_flow = 0x817;
816 	else if (dev->udev->speed == USB_SPEED_HIGH)
817 		fct_flow = 0x211;
818 
819 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
820 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
821 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
822 
823 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
824 
825 	/* threshold value should be set before enabling flow */
826 	ret = lan78xx_write_reg(dev, FLOW, flow);
827 
828 	return 0;
829 }
830 
lan78xx_link_reset(struct lan78xx_net * dev)831 static int lan78xx_link_reset(struct lan78xx_net *dev)
832 {
833 	struct phy_device *phydev = dev->net->phydev;
834 	struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
835 	int ladv, radv, ret;
836 	u32 buf;
837 
838 	/* clear PHY interrupt status */
839 	ret = phy_read(phydev, LAN88XX_INT_STS);
840 	if (unlikely(ret < 0))
841 		return -EIO;
842 
843 	/* clear LAN78xx interrupt status */
844 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
845 	if (unlikely(ret < 0))
846 		return -EIO;
847 
848 	phy_read_status(phydev);
849 
850 	if (!phydev->link && dev->link_on) {
851 		dev->link_on = false;
852 		netif_carrier_off(dev->net);
853 
854 		/* reset MAC */
855 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
856 		if (unlikely(ret < 0))
857 			return -EIO;
858 		buf |= MAC_CR_RST_;
859 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
860 		if (unlikely(ret < 0))
861 			return -EIO;
862 	} else if (phydev->link && !dev->link_on) {
863 		dev->link_on = true;
864 
865 		phy_ethtool_gset(phydev, &ecmd);
866 
867 		ret = phy_read(phydev, LAN88XX_INT_STS);
868 
869 		if (dev->udev->speed == USB_SPEED_SUPER) {
870 			if (ethtool_cmd_speed(&ecmd) == 1000) {
871 				/* disable U2 */
872 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
873 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
874 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
875 				/* enable U1 */
876 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
877 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
878 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
879 			} else {
880 				/* enable U1 & U2 */
881 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
882 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
883 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
884 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
885 			}
886 		}
887 
888 		ladv = phy_read(phydev, MII_ADVERTISE);
889 		if (ladv < 0)
890 			return ladv;
891 
892 		radv = phy_read(phydev, MII_LPA);
893 		if (radv < 0)
894 			return radv;
895 
896 		netif_dbg(dev, link, dev->net,
897 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
898 			  ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
899 
900 		ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
901 		netif_carrier_on(dev->net);
902 
903 		tasklet_schedule(&dev->bh);
904 	}
905 
906 	return ret;
907 }
908 
909 /* some work can't be done in tasklets, so we use keventd
910  *
911  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
912  * but tasklet_schedule() doesn't.	hope the failure is rare.
913  */
lan78xx_defer_kevent(struct lan78xx_net * dev,int work)914 void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
915 {
916 	set_bit(work, &dev->flags);
917 	if (!schedule_delayed_work(&dev->wq, 0))
918 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
919 }
920 
lan78xx_status(struct lan78xx_net * dev,struct urb * urb)921 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
922 {
923 	u32 intdata;
924 
925 	if (urb->actual_length != 4) {
926 		netdev_warn(dev->net,
927 			    "unexpected urb length %d", urb->actual_length);
928 		return;
929 	}
930 
931 	memcpy(&intdata, urb->transfer_buffer, 4);
932 	le32_to_cpus(&intdata);
933 
934 	if (intdata & INT_ENP_PHY_INT) {
935 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
936 			  lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
937 	} else
938 		netdev_warn(dev->net,
939 			    "unexpected interrupt: 0x%08x\n", intdata);
940 }
941 
lan78xx_ethtool_get_eeprom_len(struct net_device * netdev)942 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
943 {
944 	return MAX_EEPROM_SIZE;
945 }
946 
lan78xx_ethtool_get_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)947 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
948 				      struct ethtool_eeprom *ee, u8 *data)
949 {
950 	struct lan78xx_net *dev = netdev_priv(netdev);
951 
952 	ee->magic = LAN78XX_EEPROM_MAGIC;
953 
954 	return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
955 }
956 
lan78xx_ethtool_set_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)957 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
958 				      struct ethtool_eeprom *ee, u8 *data)
959 {
960 	struct lan78xx_net *dev = netdev_priv(netdev);
961 
962 	/* Allow entire eeprom update only */
963 	if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
964 	    (ee->offset == 0) &&
965 	    (ee->len == 512) &&
966 	    (data[0] == EEPROM_INDICATOR))
967 		return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
968 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
969 		 (ee->offset == 0) &&
970 		 (ee->len == 512) &&
971 		 (data[0] == OTP_INDICATOR_1))
972 		return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
973 
974 	return -EINVAL;
975 }
976 
lan78xx_get_strings(struct net_device * netdev,u32 stringset,u8 * data)977 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
978 				u8 *data)
979 {
980 	if (stringset == ETH_SS_STATS)
981 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
982 }
983 
lan78xx_get_sset_count(struct net_device * netdev,int sset)984 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
985 {
986 	if (sset == ETH_SS_STATS)
987 		return ARRAY_SIZE(lan78xx_gstrings);
988 	else
989 		return -EOPNOTSUPP;
990 }
991 
lan78xx_get_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)992 static void lan78xx_get_stats(struct net_device *netdev,
993 			      struct ethtool_stats *stats, u64 *data)
994 {
995 	struct lan78xx_net *dev = netdev_priv(netdev);
996 	struct lan78xx_statstage lan78xx_stat;
997 	u32 *p;
998 	int i;
999 
1000 	if (usb_autopm_get_interface(dev->intf) < 0)
1001 		return;
1002 
1003 	if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
1004 		p = (u32 *)&lan78xx_stat;
1005 		for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
1006 			data[i] = p[i];
1007 	}
1008 
1009 	usb_autopm_put_interface(dev->intf);
1010 }
1011 
lan78xx_get_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1012 static void lan78xx_get_wol(struct net_device *netdev,
1013 			    struct ethtool_wolinfo *wol)
1014 {
1015 	struct lan78xx_net *dev = netdev_priv(netdev);
1016 	int ret;
1017 	u32 buf;
1018 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1019 
1020 	if (usb_autopm_get_interface(dev->intf) < 0)
1021 			return;
1022 
1023 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1024 	if (unlikely(ret < 0)) {
1025 		wol->supported = 0;
1026 		wol->wolopts = 0;
1027 	} else {
1028 		if (buf & USB_CFG_RMT_WKP_) {
1029 			wol->supported = WAKE_ALL;
1030 			wol->wolopts = pdata->wol;
1031 		} else {
1032 			wol->supported = 0;
1033 			wol->wolopts = 0;
1034 		}
1035 	}
1036 
1037 	usb_autopm_put_interface(dev->intf);
1038 }
1039 
lan78xx_set_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1040 static int lan78xx_set_wol(struct net_device *netdev,
1041 			   struct ethtool_wolinfo *wol)
1042 {
1043 	struct lan78xx_net *dev = netdev_priv(netdev);
1044 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1045 	int ret;
1046 
1047 	ret = usb_autopm_get_interface(dev->intf);
1048 	if (ret < 0)
1049 		return ret;
1050 
1051 	if (wol->wolopts & ~WAKE_ALL)
1052 		return -EINVAL;
1053 
1054 	pdata->wol = wol->wolopts;
1055 
1056 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1057 
1058 	phy_ethtool_set_wol(netdev->phydev, wol);
1059 
1060 	usb_autopm_put_interface(dev->intf);
1061 
1062 	return ret;
1063 }
1064 
lan78xx_get_eee(struct net_device * net,struct ethtool_eee * edata)1065 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1066 {
1067 	struct lan78xx_net *dev = netdev_priv(net);
1068 	struct phy_device *phydev = net->phydev;
1069 	int ret;
1070 	u32 buf;
1071 
1072 	ret = usb_autopm_get_interface(dev->intf);
1073 	if (ret < 0)
1074 		return ret;
1075 
1076 	ret = phy_ethtool_get_eee(phydev, edata);
1077 	if (ret < 0)
1078 		goto exit;
1079 
1080 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1081 	if (buf & MAC_CR_EEE_EN_) {
1082 		edata->eee_enabled = true;
1083 		edata->eee_active = !!(edata->advertised &
1084 				       edata->lp_advertised);
1085 		edata->tx_lpi_enabled = true;
1086 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1087 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1088 		edata->tx_lpi_timer = buf;
1089 	} else {
1090 		edata->eee_enabled = false;
1091 		edata->eee_active = false;
1092 		edata->tx_lpi_enabled = false;
1093 		edata->tx_lpi_timer = 0;
1094 	}
1095 
1096 	ret = 0;
1097 exit:
1098 	usb_autopm_put_interface(dev->intf);
1099 
1100 	return ret;
1101 }
1102 
lan78xx_set_eee(struct net_device * net,struct ethtool_eee * edata)1103 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1104 {
1105 	struct lan78xx_net *dev = netdev_priv(net);
1106 	int ret;
1107 	u32 buf;
1108 
1109 	ret = usb_autopm_get_interface(dev->intf);
1110 	if (ret < 0)
1111 		return ret;
1112 
1113 	if (edata->eee_enabled) {
1114 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1115 		buf |= MAC_CR_EEE_EN_;
1116 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1117 
1118 		phy_ethtool_set_eee(net->phydev, edata);
1119 
1120 		buf = (u32)edata->tx_lpi_timer;
1121 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1122 	} else {
1123 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1124 		buf &= ~MAC_CR_EEE_EN_;
1125 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1126 	}
1127 
1128 	usb_autopm_put_interface(dev->intf);
1129 
1130 	return 0;
1131 }
1132 
lan78xx_get_link(struct net_device * net)1133 static u32 lan78xx_get_link(struct net_device *net)
1134 {
1135 	phy_read_status(net->phydev);
1136 
1137 	return net->phydev->link;
1138 }
1139 
lan78xx_nway_reset(struct net_device * net)1140 int lan78xx_nway_reset(struct net_device *net)
1141 {
1142 	return phy_start_aneg(net->phydev);
1143 }
1144 
lan78xx_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * info)1145 static void lan78xx_get_drvinfo(struct net_device *net,
1146 				struct ethtool_drvinfo *info)
1147 {
1148 	struct lan78xx_net *dev = netdev_priv(net);
1149 
1150 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1151 	strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1152 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1153 }
1154 
lan78xx_get_msglevel(struct net_device * net)1155 static u32 lan78xx_get_msglevel(struct net_device *net)
1156 {
1157 	struct lan78xx_net *dev = netdev_priv(net);
1158 
1159 	return dev->msg_enable;
1160 }
1161 
lan78xx_set_msglevel(struct net_device * net,u32 level)1162 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1163 {
1164 	struct lan78xx_net *dev = netdev_priv(net);
1165 
1166 	dev->msg_enable = level;
1167 }
1168 
lan78xx_get_mdix_status(struct net_device * net)1169 static int lan78xx_get_mdix_status(struct net_device *net)
1170 {
1171 	struct phy_device *phydev = net->phydev;
1172 	int buf;
1173 
1174 	phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1175 	buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1176 	phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1177 
1178 	return buf;
1179 }
1180 
lan78xx_set_mdix_status(struct net_device * net,__u8 mdix_ctrl)1181 static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1182 {
1183 	struct lan78xx_net *dev = netdev_priv(net);
1184 	struct phy_device *phydev = net->phydev;
1185 	int buf;
1186 
1187 	if (mdix_ctrl == ETH_TP_MDI) {
1188 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1189 			  LAN88XX_EXT_PAGE_SPACE_1);
1190 		buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1191 		buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1192 		phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1193 			  buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1194 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1195 			  LAN88XX_EXT_PAGE_SPACE_0);
1196 	} else if (mdix_ctrl == ETH_TP_MDI_X) {
1197 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1198 			  LAN88XX_EXT_PAGE_SPACE_1);
1199 		buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1200 		buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1201 		phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1202 			  buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1203 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1204 			  LAN88XX_EXT_PAGE_SPACE_0);
1205 	} else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1206 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1207 			  LAN88XX_EXT_PAGE_SPACE_1);
1208 		buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1209 		buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1210 		phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1211 			  buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1212 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1213 			  LAN88XX_EXT_PAGE_SPACE_0);
1214 	}
1215 	dev->mdix_ctrl = mdix_ctrl;
1216 }
1217 
lan78xx_get_settings(struct net_device * net,struct ethtool_cmd * cmd)1218 static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1219 {
1220 	struct lan78xx_net *dev = netdev_priv(net);
1221 	struct phy_device *phydev = net->phydev;
1222 	int ret;
1223 	int buf;
1224 
1225 	ret = usb_autopm_get_interface(dev->intf);
1226 	if (ret < 0)
1227 		return ret;
1228 
1229 	ret = phy_ethtool_gset(phydev, cmd);
1230 
1231 	buf = lan78xx_get_mdix_status(net);
1232 
1233 	buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1234 	if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
1235 		cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1236 		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1237 	} else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
1238 		cmd->eth_tp_mdix = ETH_TP_MDI;
1239 		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
1240 	} else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
1241 		cmd->eth_tp_mdix = ETH_TP_MDI_X;
1242 		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1243 	}
1244 
1245 	usb_autopm_put_interface(dev->intf);
1246 
1247 	return ret;
1248 }
1249 
lan78xx_set_settings(struct net_device * net,struct ethtool_cmd * cmd)1250 static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1251 {
1252 	struct lan78xx_net *dev = netdev_priv(net);
1253 	struct phy_device *phydev = net->phydev;
1254 	int ret = 0;
1255 	int temp;
1256 
1257 	ret = usb_autopm_get_interface(dev->intf);
1258 	if (ret < 0)
1259 		return ret;
1260 
1261 	if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
1262 		lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
1263 	}
1264 
1265 	/* change speed & duplex */
1266 	ret = phy_ethtool_sset(phydev, cmd);
1267 
1268 	if (!cmd->autoneg) {
1269 		/* force link down */
1270 		temp = phy_read(phydev, MII_BMCR);
1271 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1272 		mdelay(1);
1273 		phy_write(phydev, MII_BMCR, temp);
1274 	}
1275 
1276 	usb_autopm_put_interface(dev->intf);
1277 
1278 	return ret;
1279 }
1280 
1281 static const struct ethtool_ops lan78xx_ethtool_ops = {
1282 	.get_link	= lan78xx_get_link,
1283 	.nway_reset	= lan78xx_nway_reset,
1284 	.get_drvinfo	= lan78xx_get_drvinfo,
1285 	.get_msglevel	= lan78xx_get_msglevel,
1286 	.set_msglevel	= lan78xx_set_msglevel,
1287 	.get_settings	= lan78xx_get_settings,
1288 	.set_settings	= lan78xx_set_settings,
1289 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1290 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1291 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1292 	.get_ethtool_stats = lan78xx_get_stats,
1293 	.get_sset_count = lan78xx_get_sset_count,
1294 	.get_strings	= lan78xx_get_strings,
1295 	.get_wol	= lan78xx_get_wol,
1296 	.set_wol	= lan78xx_set_wol,
1297 	.get_eee	= lan78xx_get_eee,
1298 	.set_eee	= lan78xx_set_eee,
1299 };
1300 
lan78xx_ioctl(struct net_device * netdev,struct ifreq * rq,int cmd)1301 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1302 {
1303 	if (!netif_running(netdev))
1304 		return -EINVAL;
1305 
1306 	return phy_mii_ioctl(netdev->phydev, rq, cmd);
1307 }
1308 
lan78xx_init_mac_address(struct lan78xx_net * dev)1309 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1310 {
1311 	u32 addr_lo, addr_hi;
1312 	int ret;
1313 	u8 addr[6];
1314 
1315 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1316 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1317 
1318 	addr[0] = addr_lo & 0xFF;
1319 	addr[1] = (addr_lo >> 8) & 0xFF;
1320 	addr[2] = (addr_lo >> 16) & 0xFF;
1321 	addr[3] = (addr_lo >> 24) & 0xFF;
1322 	addr[4] = addr_hi & 0xFF;
1323 	addr[5] = (addr_hi >> 8) & 0xFF;
1324 
1325 	if (!is_valid_ether_addr(addr)) {
1326 		/* reading mac address from EEPROM or OTP */
1327 		if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1328 					 addr) == 0) ||
1329 		    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1330 				      addr) == 0)) {
1331 			if (is_valid_ether_addr(addr)) {
1332 				/* eeprom values are valid so use them */
1333 				netif_dbg(dev, ifup, dev->net,
1334 					  "MAC address read from EEPROM");
1335 			} else {
1336 				/* generate random MAC */
1337 				random_ether_addr(addr);
1338 				netif_dbg(dev, ifup, dev->net,
1339 					  "MAC address set to random addr");
1340 			}
1341 
1342 			addr_lo = addr[0] | (addr[1] << 8) |
1343 				  (addr[2] << 16) | (addr[3] << 24);
1344 			addr_hi = addr[4] | (addr[5] << 8);
1345 
1346 			ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1347 			ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1348 		} else {
1349 			/* generate random MAC */
1350 			random_ether_addr(addr);
1351 			netif_dbg(dev, ifup, dev->net,
1352 				  "MAC address set to random addr");
1353 		}
1354 	}
1355 
1356 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1357 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1358 
1359 	ether_addr_copy(dev->net->dev_addr, addr);
1360 }
1361 
1362 /* MDIO read and write wrappers for phylib */
lan78xx_mdiobus_read(struct mii_bus * bus,int phy_id,int idx)1363 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1364 {
1365 	struct lan78xx_net *dev = bus->priv;
1366 	u32 val, addr;
1367 	int ret;
1368 
1369 	ret = usb_autopm_get_interface(dev->intf);
1370 	if (ret < 0)
1371 		return ret;
1372 
1373 	mutex_lock(&dev->phy_mutex);
1374 
1375 	/* confirm MII not busy */
1376 	ret = lan78xx_phy_wait_not_busy(dev);
1377 	if (ret < 0)
1378 		goto done;
1379 
1380 	/* set the address, index & direction (read from PHY) */
1381 	addr = mii_access(phy_id, idx, MII_READ);
1382 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1383 
1384 	ret = lan78xx_phy_wait_not_busy(dev);
1385 	if (ret < 0)
1386 		goto done;
1387 
1388 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
1389 
1390 	ret = (int)(val & 0xFFFF);
1391 
1392 done:
1393 	mutex_unlock(&dev->phy_mutex);
1394 	usb_autopm_put_interface(dev->intf);
1395 	return ret;
1396 }
1397 
lan78xx_mdiobus_write(struct mii_bus * bus,int phy_id,int idx,u16 regval)1398 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1399 				 u16 regval)
1400 {
1401 	struct lan78xx_net *dev = bus->priv;
1402 	u32 val, addr;
1403 	int ret;
1404 
1405 	ret = usb_autopm_get_interface(dev->intf);
1406 	if (ret < 0)
1407 		return ret;
1408 
1409 	mutex_lock(&dev->phy_mutex);
1410 
1411 	/* confirm MII not busy */
1412 	ret = lan78xx_phy_wait_not_busy(dev);
1413 	if (ret < 0)
1414 		goto done;
1415 
1416 	val = (u32)regval;
1417 	ret = lan78xx_write_reg(dev, MII_DATA, val);
1418 
1419 	/* set the address, index & direction (write to PHY) */
1420 	addr = mii_access(phy_id, idx, MII_WRITE);
1421 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1422 
1423 	ret = lan78xx_phy_wait_not_busy(dev);
1424 	if (ret < 0)
1425 		goto done;
1426 
1427 done:
1428 	mutex_unlock(&dev->phy_mutex);
1429 	usb_autopm_put_interface(dev->intf);
1430 	return 0;
1431 }
1432 
lan78xx_mdio_init(struct lan78xx_net * dev)1433 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1434 {
1435 	int ret;
1436 	int i;
1437 
1438 	dev->mdiobus = mdiobus_alloc();
1439 	if (!dev->mdiobus) {
1440 		netdev_err(dev->net, "can't allocate MDIO bus\n");
1441 		return -ENOMEM;
1442 	}
1443 
1444 	dev->mdiobus->priv = (void *)dev;
1445 	dev->mdiobus->read = lan78xx_mdiobus_read;
1446 	dev->mdiobus->write = lan78xx_mdiobus_write;
1447 	dev->mdiobus->name = "lan78xx-mdiobus";
1448 	dev->mdiobus->parent = &dev->udev->dev;
1449 
1450 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1451 		 dev->udev->bus->busnum, dev->udev->devnum);
1452 
1453 	dev->mdiobus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1454 	if (!dev->mdiobus->irq) {
1455 		ret = -ENOMEM;
1456 		goto exit1;
1457 	}
1458 
1459 	/* handle our own interrupt */
1460 	for (i = 0; i < PHY_MAX_ADDR; i++)
1461 		dev->mdiobus->irq[i] = PHY_IGNORE_INTERRUPT;
1462 
1463 	switch (dev->devid & ID_REV_CHIP_ID_MASK_) {
1464 	case 0x78000000:
1465 	case 0x78500000:
1466 		/* set to internal PHY id */
1467 		dev->mdiobus->phy_mask = ~(1 << 1);
1468 		break;
1469 	}
1470 
1471 	ret = mdiobus_register(dev->mdiobus);
1472 	if (ret) {
1473 		netdev_err(dev->net, "can't register MDIO bus\n");
1474 		goto exit2;
1475 	}
1476 
1477 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1478 	return 0;
1479 exit2:
1480 	kfree(dev->mdiobus->irq);
1481 exit1:
1482 	mdiobus_free(dev->mdiobus);
1483 	return ret;
1484 }
1485 
lan78xx_remove_mdio(struct lan78xx_net * dev)1486 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1487 {
1488 	mdiobus_unregister(dev->mdiobus);
1489 	kfree(dev->mdiobus->irq);
1490 	mdiobus_free(dev->mdiobus);
1491 }
1492 
lan78xx_link_status_change(struct net_device * net)1493 static void lan78xx_link_status_change(struct net_device *net)
1494 {
1495 	/* nothing to do */
1496 }
1497 
lan78xx_phy_init(struct lan78xx_net * dev)1498 static int lan78xx_phy_init(struct lan78xx_net *dev)
1499 {
1500 	int ret;
1501 	struct phy_device *phydev = dev->net->phydev;
1502 
1503 	phydev = phy_find_first(dev->mdiobus);
1504 	if (!phydev) {
1505 		netdev_err(dev->net, "no PHY found\n");
1506 		return -EIO;
1507 	}
1508 
1509 	ret = phy_connect_direct(dev->net, phydev,
1510 				 lan78xx_link_status_change,
1511 				 PHY_INTERFACE_MODE_GMII);
1512 	if (ret) {
1513 		netdev_err(dev->net, "can't attach PHY to %s\n",
1514 			   dev->mdiobus->id);
1515 		return -EIO;
1516 	}
1517 
1518 	/* set to AUTOMDIX */
1519 	lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
1520 
1521 	/* MAC doesn't support 1000T Half */
1522 	phydev->supported &= ~SUPPORTED_1000baseT_Half;
1523 	phydev->supported |= (SUPPORTED_10baseT_Half |
1524 			      SUPPORTED_10baseT_Full |
1525 			      SUPPORTED_100baseT_Half |
1526 			      SUPPORTED_100baseT_Full |
1527 			      SUPPORTED_1000baseT_Full |
1528 			      SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1529 	genphy_config_aneg(phydev);
1530 
1531 	/* Workaround to enable PHY interrupt.
1532 	 * phy_start_interrupts() is API for requesting and enabling
1533 	 * PHY interrupt. However, USB-to-Ethernet device can't use
1534 	 * request_irq() called in phy_start_interrupts().
1535 	 * Set PHY to PHY_HALTED and call phy_start()
1536 	 * to make a call to phy_enable_interrupts()
1537 	 */
1538 	phy_stop(phydev);
1539 	phy_start(phydev);
1540 
1541 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1542 
1543 	return 0;
1544 }
1545 
lan78xx_set_rx_max_frame_length(struct lan78xx_net * dev,int size)1546 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1547 {
1548 	int ret = 0;
1549 	u32 buf;
1550 	bool rxenabled;
1551 
1552 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1553 
1554 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1555 
1556 	if (rxenabled) {
1557 		buf &= ~MAC_RX_RXEN_;
1558 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
1559 	}
1560 
1561 	/* add 4 to size for FCS */
1562 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
1563 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1564 
1565 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
1566 
1567 	if (rxenabled) {
1568 		buf |= MAC_RX_RXEN_;
1569 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
1570 	}
1571 
1572 	return 0;
1573 }
1574 
unlink_urbs(struct lan78xx_net * dev,struct sk_buff_head * q)1575 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1576 {
1577 	struct sk_buff *skb;
1578 	unsigned long flags;
1579 	int count = 0;
1580 
1581 	spin_lock_irqsave(&q->lock, flags);
1582 	while (!skb_queue_empty(q)) {
1583 		struct skb_data	*entry;
1584 		struct urb *urb;
1585 		int ret;
1586 
1587 		skb_queue_walk(q, skb) {
1588 			entry = (struct skb_data *)skb->cb;
1589 			if (entry->state != unlink_start)
1590 				goto found;
1591 		}
1592 		break;
1593 found:
1594 		entry->state = unlink_start;
1595 		urb = entry->urb;
1596 
1597 		/* Get reference count of the URB to avoid it to be
1598 		 * freed during usb_unlink_urb, which may trigger
1599 		 * use-after-free problem inside usb_unlink_urb since
1600 		 * usb_unlink_urb is always racing with .complete
1601 		 * handler(include defer_bh).
1602 		 */
1603 		usb_get_urb(urb);
1604 		spin_unlock_irqrestore(&q->lock, flags);
1605 		/* during some PM-driven resume scenarios,
1606 		 * these (async) unlinks complete immediately
1607 		 */
1608 		ret = usb_unlink_urb(urb);
1609 		if (ret != -EINPROGRESS && ret != 0)
1610 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1611 		else
1612 			count++;
1613 		usb_put_urb(urb);
1614 		spin_lock_irqsave(&q->lock, flags);
1615 	}
1616 	spin_unlock_irqrestore(&q->lock, flags);
1617 	return count;
1618 }
1619 
lan78xx_change_mtu(struct net_device * netdev,int new_mtu)1620 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1621 {
1622 	struct lan78xx_net *dev = netdev_priv(netdev);
1623 	int ll_mtu = new_mtu + netdev->hard_header_len;
1624 	int old_hard_mtu = dev->hard_mtu;
1625 	int old_rx_urb_size = dev->rx_urb_size;
1626 	int ret;
1627 
1628 	if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1629 		return -EINVAL;
1630 
1631 	if (new_mtu <= 0)
1632 		return -EINVAL;
1633 	/* no second zero-length packet read wanted after mtu-sized packets */
1634 	if ((ll_mtu % dev->maxpacket) == 0)
1635 		return -EDOM;
1636 
1637 	ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1638 
1639 	netdev->mtu = new_mtu;
1640 
1641 	dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1642 	if (dev->rx_urb_size == old_hard_mtu) {
1643 		dev->rx_urb_size = dev->hard_mtu;
1644 		if (dev->rx_urb_size > old_rx_urb_size) {
1645 			if (netif_running(dev->net)) {
1646 				unlink_urbs(dev, &dev->rxq);
1647 				tasklet_schedule(&dev->bh);
1648 			}
1649 		}
1650 	}
1651 
1652 	return 0;
1653 }
1654 
lan78xx_set_mac_addr(struct net_device * netdev,void * p)1655 int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1656 {
1657 	struct lan78xx_net *dev = netdev_priv(netdev);
1658 	struct sockaddr *addr = p;
1659 	u32 addr_lo, addr_hi;
1660 	int ret;
1661 
1662 	if (netif_running(netdev))
1663 		return -EBUSY;
1664 
1665 	if (!is_valid_ether_addr(addr->sa_data))
1666 		return -EADDRNOTAVAIL;
1667 
1668 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
1669 
1670 	addr_lo = netdev->dev_addr[0] |
1671 		  netdev->dev_addr[1] << 8 |
1672 		  netdev->dev_addr[2] << 16 |
1673 		  netdev->dev_addr[3] << 24;
1674 	addr_hi = netdev->dev_addr[4] |
1675 		  netdev->dev_addr[5] << 8;
1676 
1677 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1678 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1679 
1680 	return 0;
1681 }
1682 
1683 /* Enable or disable Rx checksum offload engine */
lan78xx_set_features(struct net_device * netdev,netdev_features_t features)1684 static int lan78xx_set_features(struct net_device *netdev,
1685 				netdev_features_t features)
1686 {
1687 	struct lan78xx_net *dev = netdev_priv(netdev);
1688 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1689 	unsigned long flags;
1690 	int ret;
1691 
1692 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1693 
1694 	if (features & NETIF_F_RXCSUM) {
1695 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
1696 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
1697 	} else {
1698 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
1699 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
1700 	}
1701 
1702 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1703 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
1704 	else
1705 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
1706 
1707 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1708 
1709 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1710 
1711 	return 0;
1712 }
1713 
lan78xx_deferred_vlan_write(struct work_struct * param)1714 static void lan78xx_deferred_vlan_write(struct work_struct *param)
1715 {
1716 	struct lan78xx_priv *pdata =
1717 			container_of(param, struct lan78xx_priv, set_vlan);
1718 	struct lan78xx_net *dev = pdata->dev;
1719 
1720 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
1721 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
1722 }
1723 
lan78xx_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)1724 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
1725 				   __be16 proto, u16 vid)
1726 {
1727 	struct lan78xx_net *dev = netdev_priv(netdev);
1728 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1729 	u16 vid_bit_index;
1730 	u16 vid_dword_index;
1731 
1732 	vid_dword_index = (vid >> 5) & 0x7F;
1733 	vid_bit_index = vid & 0x1F;
1734 
1735 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
1736 
1737 	/* defer register writes to a sleepable context */
1738 	schedule_work(&pdata->set_vlan);
1739 
1740 	return 0;
1741 }
1742 
lan78xx_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)1743 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
1744 				    __be16 proto, u16 vid)
1745 {
1746 	struct lan78xx_net *dev = netdev_priv(netdev);
1747 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1748 	u16 vid_bit_index;
1749 	u16 vid_dword_index;
1750 
1751 	vid_dword_index = (vid >> 5) & 0x7F;
1752 	vid_bit_index = vid & 0x1F;
1753 
1754 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
1755 
1756 	/* defer register writes to a sleepable context */
1757 	schedule_work(&pdata->set_vlan);
1758 
1759 	return 0;
1760 }
1761 
lan78xx_init_ltm(struct lan78xx_net * dev)1762 static void lan78xx_init_ltm(struct lan78xx_net *dev)
1763 {
1764 	int ret;
1765 	u32 buf;
1766 	u32 regs[6] = { 0 };
1767 
1768 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1769 	if (buf & USB_CFG1_LTM_ENABLE_) {
1770 		u8 temp[2];
1771 		/* Get values from EEPROM first */
1772 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
1773 			if (temp[0] == 24) {
1774 				ret = lan78xx_read_raw_eeprom(dev,
1775 							      temp[1] * 2,
1776 							      24,
1777 							      (u8 *)regs);
1778 				if (ret < 0)
1779 					return;
1780 			}
1781 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
1782 			if (temp[0] == 24) {
1783 				ret = lan78xx_read_raw_otp(dev,
1784 							   temp[1] * 2,
1785 							   24,
1786 							   (u8 *)regs);
1787 				if (ret < 0)
1788 					return;
1789 			}
1790 		}
1791 	}
1792 
1793 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
1794 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
1795 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
1796 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
1797 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
1798 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
1799 }
1800 
lan78xx_reset(struct lan78xx_net * dev)1801 static int lan78xx_reset(struct lan78xx_net *dev)
1802 {
1803 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1804 	u32 buf;
1805 	int ret = 0;
1806 	unsigned long timeout;
1807 
1808 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1809 	buf |= HW_CFG_LRST_;
1810 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
1811 
1812 	timeout = jiffies + HZ;
1813 	do {
1814 		mdelay(1);
1815 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1816 		if (time_after(jiffies, timeout)) {
1817 			netdev_warn(dev->net,
1818 				    "timeout on completion of LiteReset");
1819 			return -EIO;
1820 		}
1821 	} while (buf & HW_CFG_LRST_);
1822 
1823 	lan78xx_init_mac_address(dev);
1824 
1825 	/* save DEVID for later usage */
1826 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
1827 	dev->devid = buf;
1828 
1829 	/* Respond to the IN token with a NAK */
1830 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1831 	buf |= USB_CFG_BIR_;
1832 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1833 
1834 	/* Init LTM */
1835 	lan78xx_init_ltm(dev);
1836 
1837 	dev->net->hard_header_len += TX_OVERHEAD;
1838 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1839 
1840 	if (dev->udev->speed == USB_SPEED_SUPER) {
1841 		buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
1842 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1843 		dev->rx_qlen = 4;
1844 		dev->tx_qlen = 4;
1845 	} else if (dev->udev->speed == USB_SPEED_HIGH) {
1846 		buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
1847 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1848 		dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
1849 		dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
1850 	} else {
1851 		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
1852 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
1853 		dev->rx_qlen = 4;
1854 		dev->tx_qlen = 4;
1855 	}
1856 
1857 	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
1858 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
1859 
1860 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
1861 	buf |= HW_CFG_MEF_;
1862 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
1863 
1864 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1865 	buf |= USB_CFG_BCE_;
1866 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
1867 
1868 	/* set FIFO sizes */
1869 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
1870 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
1871 
1872 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
1873 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
1874 
1875 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
1876 	ret = lan78xx_write_reg(dev, FLOW, 0);
1877 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
1878 
1879 	/* Don't need rfe_ctl_lock during initialisation */
1880 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
1881 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
1882 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1883 
1884 	/* Enable or disable checksum offload engines */
1885 	lan78xx_set_features(dev->net, dev->net->features);
1886 
1887 	lan78xx_set_multicast(dev->net);
1888 
1889 	/* reset PHY */
1890 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1891 	buf |= PMT_CTL_PHY_RST_;
1892 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
1893 
1894 	timeout = jiffies + HZ;
1895 	do {
1896 		mdelay(1);
1897 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
1898 		if (time_after(jiffies, timeout)) {
1899 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
1900 			return -EIO;
1901 		}
1902 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
1903 
1904 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1905 	buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
1906 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
1907 
1908 	/* enable PHY interrupts */
1909 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1910 	buf |= INT_ENP_PHY_INT;
1911 	ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
1912 
1913 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
1914 	buf |= MAC_TX_TXEN_;
1915 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
1916 
1917 	ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
1918 	buf |= FCT_TX_CTL_EN_;
1919 	ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
1920 
1921 	ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
1922 
1923 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1924 	buf |= MAC_RX_RXEN_;
1925 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
1926 
1927 	ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
1928 	buf |= FCT_RX_CTL_EN_;
1929 	ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
1930 
1931 	return 0;
1932 }
1933 
lan78xx_open(struct net_device * net)1934 static int lan78xx_open(struct net_device *net)
1935 {
1936 	struct lan78xx_net *dev = netdev_priv(net);
1937 	int ret;
1938 
1939 	ret = usb_autopm_get_interface(dev->intf);
1940 	if (ret < 0)
1941 		goto out;
1942 
1943 	ret = lan78xx_reset(dev);
1944 	if (ret < 0)
1945 		goto done;
1946 
1947 	ret = lan78xx_phy_init(dev);
1948 	if (ret < 0)
1949 		goto done;
1950 
1951 	/* for Link Check */
1952 	if (dev->urb_intr) {
1953 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
1954 		if (ret < 0) {
1955 			netif_err(dev, ifup, dev->net,
1956 				  "intr submit %d\n", ret);
1957 			goto done;
1958 		}
1959 	}
1960 
1961 	set_bit(EVENT_DEV_OPEN, &dev->flags);
1962 
1963 	netif_start_queue(net);
1964 
1965 	dev->link_on = false;
1966 
1967 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1968 done:
1969 	usb_autopm_put_interface(dev->intf);
1970 
1971 out:
1972 	return ret;
1973 }
1974 
lan78xx_terminate_urbs(struct lan78xx_net * dev)1975 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
1976 {
1977 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
1978 	DECLARE_WAITQUEUE(wait, current);
1979 	int temp;
1980 
1981 	/* ensure there are no more active urbs */
1982 	add_wait_queue(&unlink_wakeup, &wait);
1983 	set_current_state(TASK_UNINTERRUPTIBLE);
1984 	dev->wait = &unlink_wakeup;
1985 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
1986 
1987 	/* maybe wait for deletions to finish. */
1988 	while (!skb_queue_empty(&dev->rxq) &&
1989 	       !skb_queue_empty(&dev->txq) &&
1990 	       !skb_queue_empty(&dev->done)) {
1991 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
1992 		set_current_state(TASK_UNINTERRUPTIBLE);
1993 		netif_dbg(dev, ifdown, dev->net,
1994 			  "waited for %d urb completions\n", temp);
1995 	}
1996 	set_current_state(TASK_RUNNING);
1997 	dev->wait = NULL;
1998 	remove_wait_queue(&unlink_wakeup, &wait);
1999 }
2000 
lan78xx_stop(struct net_device * net)2001 int lan78xx_stop(struct net_device *net)
2002 {
2003 	struct lan78xx_net		*dev = netdev_priv(net);
2004 
2005 	phy_stop(net->phydev);
2006 	phy_disconnect(net->phydev);
2007 	net->phydev = NULL;
2008 
2009 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
2010 	netif_stop_queue(net);
2011 
2012 	netif_info(dev, ifdown, dev->net,
2013 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2014 		   net->stats.rx_packets, net->stats.tx_packets,
2015 		   net->stats.rx_errors, net->stats.tx_errors);
2016 
2017 	lan78xx_terminate_urbs(dev);
2018 
2019 	usb_kill_urb(dev->urb_intr);
2020 
2021 	skb_queue_purge(&dev->rxq_pause);
2022 
2023 	/* deferred work (task, timer, softirq) must also stop.
2024 	 * can't flush_scheduled_work() until we drop rtnl (later),
2025 	 * else workers could deadlock; so make workers a NOP.
2026 	 */
2027 	dev->flags = 0;
2028 	cancel_delayed_work_sync(&dev->wq);
2029 	tasklet_kill(&dev->bh);
2030 
2031 	usb_autopm_put_interface(dev->intf);
2032 
2033 	return 0;
2034 }
2035 
lan78xx_tx_prep(struct lan78xx_net * dev,struct sk_buff * skb,gfp_t flags)2036 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2037 				       struct sk_buff *skb, gfp_t flags)
2038 {
2039 	u32 tx_cmd_a, tx_cmd_b;
2040 
2041 	if (skb_cow_head(skb, TX_OVERHEAD)) {
2042 		dev_kfree_skb_any(skb);
2043 		return NULL;
2044 	}
2045 
2046 	if (skb_linearize(skb)) {
2047 		dev_kfree_skb_any(skb);
2048 		return NULL;
2049 	}
2050 
2051 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2052 
2053 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2054 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2055 
2056 	tx_cmd_b = 0;
2057 	if (skb_is_gso(skb)) {
2058 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2059 
2060 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2061 
2062 		tx_cmd_a |= TX_CMD_A_LSO_;
2063 	}
2064 
2065 	if (skb_vlan_tag_present(skb)) {
2066 		tx_cmd_a |= TX_CMD_A_IVTG_;
2067 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2068 	}
2069 
2070 	skb_push(skb, 4);
2071 	cpu_to_le32s(&tx_cmd_b);
2072 	memcpy(skb->data, &tx_cmd_b, 4);
2073 
2074 	skb_push(skb, 4);
2075 	cpu_to_le32s(&tx_cmd_a);
2076 	memcpy(skb->data, &tx_cmd_a, 4);
2077 
2078 	return skb;
2079 }
2080 
defer_bh(struct lan78xx_net * dev,struct sk_buff * skb,struct sk_buff_head * list,enum skb_state state)2081 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2082 			       struct sk_buff_head *list, enum skb_state state)
2083 {
2084 	unsigned long flags;
2085 	enum skb_state old_state;
2086 	struct skb_data *entry = (struct skb_data *)skb->cb;
2087 
2088 	spin_lock_irqsave(&list->lock, flags);
2089 	old_state = entry->state;
2090 	entry->state = state;
2091 
2092 	__skb_unlink(skb, list);
2093 	spin_unlock(&list->lock);
2094 	spin_lock(&dev->done.lock);
2095 
2096 	__skb_queue_tail(&dev->done, skb);
2097 	if (skb_queue_len(&dev->done) == 1)
2098 		tasklet_schedule(&dev->bh);
2099 	spin_unlock_irqrestore(&dev->done.lock, flags);
2100 
2101 	return old_state;
2102 }
2103 
tx_complete(struct urb * urb)2104 static void tx_complete(struct urb *urb)
2105 {
2106 	struct sk_buff *skb = (struct sk_buff *)urb->context;
2107 	struct skb_data *entry = (struct skb_data *)skb->cb;
2108 	struct lan78xx_net *dev = entry->dev;
2109 
2110 	if (urb->status == 0) {
2111 		dev->net->stats.tx_packets++;
2112 		dev->net->stats.tx_bytes += entry->length;
2113 	} else {
2114 		dev->net->stats.tx_errors++;
2115 
2116 		switch (urb->status) {
2117 		case -EPIPE:
2118 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2119 			break;
2120 
2121 		/* software-driven interface shutdown */
2122 		case -ECONNRESET:
2123 		case -ESHUTDOWN:
2124 			break;
2125 
2126 		case -EPROTO:
2127 		case -ETIME:
2128 		case -EILSEQ:
2129 			netif_stop_queue(dev->net);
2130 			break;
2131 		default:
2132 			netif_dbg(dev, tx_err, dev->net,
2133 				  "tx err %d\n", entry->urb->status);
2134 			break;
2135 		}
2136 	}
2137 
2138 	usb_autopm_put_interface_async(dev->intf);
2139 
2140 	defer_bh(dev, skb, &dev->txq, tx_done);
2141 }
2142 
lan78xx_queue_skb(struct sk_buff_head * list,struct sk_buff * newsk,enum skb_state state)2143 static void lan78xx_queue_skb(struct sk_buff_head *list,
2144 			      struct sk_buff *newsk, enum skb_state state)
2145 {
2146 	struct skb_data *entry = (struct skb_data *)newsk->cb;
2147 
2148 	__skb_queue_tail(list, newsk);
2149 	entry->state = state;
2150 }
2151 
lan78xx_start_xmit(struct sk_buff * skb,struct net_device * net)2152 netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2153 {
2154 	struct lan78xx_net *dev = netdev_priv(net);
2155 	struct sk_buff *skb2 = NULL;
2156 
2157 	if (skb) {
2158 		skb_tx_timestamp(skb);
2159 		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2160 	}
2161 
2162 	if (skb2) {
2163 		skb_queue_tail(&dev->txq_pend, skb2);
2164 
2165 		if (skb_queue_len(&dev->txq_pend) > 10)
2166 			netif_stop_queue(net);
2167 	} else {
2168 		netif_dbg(dev, tx_err, dev->net,
2169 			  "lan78xx_tx_prep return NULL\n");
2170 		dev->net->stats.tx_errors++;
2171 		dev->net->stats.tx_dropped++;
2172 	}
2173 
2174 	tasklet_schedule(&dev->bh);
2175 
2176 	return NETDEV_TX_OK;
2177 }
2178 
lan78xx_bind(struct lan78xx_net * dev,struct usb_interface * intf)2179 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2180 {
2181 	struct lan78xx_priv *pdata = NULL;
2182 	int ret;
2183 	int i;
2184 
2185 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2186 
2187 	pdata = (struct lan78xx_priv *)(dev->data[0]);
2188 	if (!pdata) {
2189 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2190 		return -ENOMEM;
2191 	}
2192 
2193 	pdata->dev = dev;
2194 
2195 	spin_lock_init(&pdata->rfe_ctl_lock);
2196 	mutex_init(&pdata->dataport_mutex);
2197 
2198 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2199 
2200 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2201 		pdata->vlan_table[i] = 0;
2202 
2203 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2204 
2205 	dev->net->features = 0;
2206 
2207 	if (DEFAULT_TX_CSUM_ENABLE)
2208 		dev->net->features |= NETIF_F_HW_CSUM;
2209 
2210 	if (DEFAULT_RX_CSUM_ENABLE)
2211 		dev->net->features |= NETIF_F_RXCSUM;
2212 
2213 	if (DEFAULT_TSO_CSUM_ENABLE)
2214 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2215 
2216 	dev->net->hw_features = dev->net->features;
2217 
2218 	/* Init all registers */
2219 	ret = lan78xx_reset(dev);
2220 
2221 	lan78xx_mdio_init(dev);
2222 
2223 	dev->net->flags |= IFF_MULTICAST;
2224 
2225 	pdata->wol = WAKE_MAGIC;
2226 
2227 	return 0;
2228 }
2229 
lan78xx_unbind(struct lan78xx_net * dev,struct usb_interface * intf)2230 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2231 {
2232 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2233 
2234 	lan78xx_remove_mdio(dev);
2235 
2236 	if (pdata) {
2237 		netif_dbg(dev, ifdown, dev->net, "free pdata");
2238 		kfree(pdata);
2239 		pdata = NULL;
2240 		dev->data[0] = 0;
2241 	}
2242 }
2243 
lan78xx_rx_csum_offload(struct lan78xx_net * dev,struct sk_buff * skb,u32 rx_cmd_a,u32 rx_cmd_b)2244 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2245 				    struct sk_buff *skb,
2246 				    u32 rx_cmd_a, u32 rx_cmd_b)
2247 {
2248 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
2249 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2250 		skb->ip_summed = CHECKSUM_NONE;
2251 	} else {
2252 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2253 		skb->ip_summed = CHECKSUM_COMPLETE;
2254 	}
2255 }
2256 
lan78xx_skb_return(struct lan78xx_net * dev,struct sk_buff * skb)2257 void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2258 {
2259 	int		status;
2260 
2261 	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2262 		skb_queue_tail(&dev->rxq_pause, skb);
2263 		return;
2264 	}
2265 
2266 	skb->protocol = eth_type_trans(skb, dev->net);
2267 	dev->net->stats.rx_packets++;
2268 	dev->net->stats.rx_bytes += skb->len;
2269 
2270 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2271 		  skb->len + sizeof(struct ethhdr), skb->protocol);
2272 	memset(skb->cb, 0, sizeof(struct skb_data));
2273 
2274 	if (skb_defer_rx_timestamp(skb))
2275 		return;
2276 
2277 	status = netif_rx(skb);
2278 	if (status != NET_RX_SUCCESS)
2279 		netif_dbg(dev, rx_err, dev->net,
2280 			  "netif_rx status %d\n", status);
2281 }
2282 
lan78xx_rx(struct lan78xx_net * dev,struct sk_buff * skb)2283 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2284 {
2285 	if (skb->len < dev->net->hard_header_len)
2286 		return 0;
2287 
2288 	while (skb->len > 0) {
2289 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
2290 		u16 rx_cmd_c;
2291 		struct sk_buff *skb2;
2292 		unsigned char *packet;
2293 
2294 		memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2295 		le32_to_cpus(&rx_cmd_a);
2296 		skb_pull(skb, sizeof(rx_cmd_a));
2297 
2298 		memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2299 		le32_to_cpus(&rx_cmd_b);
2300 		skb_pull(skb, sizeof(rx_cmd_b));
2301 
2302 		memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2303 		le16_to_cpus(&rx_cmd_c);
2304 		skb_pull(skb, sizeof(rx_cmd_c));
2305 
2306 		packet = skb->data;
2307 
2308 		/* get the packet length */
2309 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2310 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2311 
2312 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2313 			netif_dbg(dev, rx_err, dev->net,
2314 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
2315 		} else {
2316 			/* last frame in this batch */
2317 			if (skb->len == size) {
2318 				lan78xx_rx_csum_offload(dev, skb,
2319 							rx_cmd_a, rx_cmd_b);
2320 
2321 				skb_trim(skb, skb->len - 4); /* remove fcs */
2322 				skb->truesize = size + sizeof(struct sk_buff);
2323 
2324 				return 1;
2325 			}
2326 
2327 			skb2 = skb_clone(skb, GFP_ATOMIC);
2328 			if (unlikely(!skb2)) {
2329 				netdev_warn(dev->net, "Error allocating skb");
2330 				return 0;
2331 			}
2332 
2333 			skb2->len = size;
2334 			skb2->data = packet;
2335 			skb_set_tail_pointer(skb2, size);
2336 
2337 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2338 
2339 			skb_trim(skb2, skb2->len - 4); /* remove fcs */
2340 			skb2->truesize = size + sizeof(struct sk_buff);
2341 
2342 			lan78xx_skb_return(dev, skb2);
2343 		}
2344 
2345 		skb_pull(skb, size);
2346 
2347 		/* padding bytes before the next frame starts */
2348 		if (skb->len)
2349 			skb_pull(skb, align_count);
2350 	}
2351 
2352 	return 1;
2353 }
2354 
rx_process(struct lan78xx_net * dev,struct sk_buff * skb)2355 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2356 {
2357 	if (!lan78xx_rx(dev, skb)) {
2358 		dev->net->stats.rx_errors++;
2359 		goto done;
2360 	}
2361 
2362 	if (skb->len) {
2363 		lan78xx_skb_return(dev, skb);
2364 		return;
2365 	}
2366 
2367 	netif_dbg(dev, rx_err, dev->net, "drop\n");
2368 	dev->net->stats.rx_errors++;
2369 done:
2370 	skb_queue_tail(&dev->done, skb);
2371 }
2372 
2373 static void rx_complete(struct urb *urb);
2374 
rx_submit(struct lan78xx_net * dev,struct urb * urb,gfp_t flags)2375 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2376 {
2377 	struct sk_buff *skb;
2378 	struct skb_data *entry;
2379 	unsigned long lockflags;
2380 	size_t size = dev->rx_urb_size;
2381 	int ret = 0;
2382 
2383 	skb = netdev_alloc_skb_ip_align(dev->net, size);
2384 	if (!skb) {
2385 		usb_free_urb(urb);
2386 		return -ENOMEM;
2387 	}
2388 
2389 	entry = (struct skb_data *)skb->cb;
2390 	entry->urb = urb;
2391 	entry->dev = dev;
2392 	entry->length = 0;
2393 
2394 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2395 			  skb->data, size, rx_complete, skb);
2396 
2397 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
2398 
2399 	if (netif_device_present(dev->net) &&
2400 	    netif_running(dev->net) &&
2401 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
2402 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2403 		ret = usb_submit_urb(urb, GFP_ATOMIC);
2404 		switch (ret) {
2405 		case 0:
2406 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2407 			break;
2408 		case -EPIPE:
2409 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2410 			break;
2411 		case -ENODEV:
2412 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
2413 			netif_device_detach(dev->net);
2414 			break;
2415 		case -EHOSTUNREACH:
2416 			ret = -ENOLINK;
2417 			break;
2418 		default:
2419 			netif_dbg(dev, rx_err, dev->net,
2420 				  "rx submit, %d\n", ret);
2421 			tasklet_schedule(&dev->bh);
2422 		}
2423 	} else {
2424 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2425 		ret = -ENOLINK;
2426 	}
2427 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2428 	if (ret) {
2429 		dev_kfree_skb_any(skb);
2430 		usb_free_urb(urb);
2431 	}
2432 	return ret;
2433 }
2434 
rx_complete(struct urb * urb)2435 static void rx_complete(struct urb *urb)
2436 {
2437 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
2438 	struct skb_data	*entry = (struct skb_data *)skb->cb;
2439 	struct lan78xx_net *dev = entry->dev;
2440 	int urb_status = urb->status;
2441 	enum skb_state state;
2442 
2443 	skb_put(skb, urb->actual_length);
2444 	state = rx_done;
2445 	entry->urb = NULL;
2446 
2447 	switch (urb_status) {
2448 	case 0:
2449 		if (skb->len < dev->net->hard_header_len) {
2450 			state = rx_cleanup;
2451 			dev->net->stats.rx_errors++;
2452 			dev->net->stats.rx_length_errors++;
2453 			netif_dbg(dev, rx_err, dev->net,
2454 				  "rx length %d\n", skb->len);
2455 		}
2456 		usb_mark_last_busy(dev->udev);
2457 		break;
2458 	case -EPIPE:
2459 		dev->net->stats.rx_errors++;
2460 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2461 		/* FALLTHROUGH */
2462 	case -ECONNRESET:				/* async unlink */
2463 	case -ESHUTDOWN:				/* hardware gone */
2464 		netif_dbg(dev, ifdown, dev->net,
2465 			  "rx shutdown, code %d\n", urb_status);
2466 		state = rx_cleanup;
2467 		entry->urb = urb;
2468 		urb = NULL;
2469 		break;
2470 	case -EPROTO:
2471 	case -ETIME:
2472 	case -EILSEQ:
2473 		dev->net->stats.rx_errors++;
2474 		state = rx_cleanup;
2475 		entry->urb = urb;
2476 		urb = NULL;
2477 		break;
2478 
2479 	/* data overrun ... flush fifo? */
2480 	case -EOVERFLOW:
2481 		dev->net->stats.rx_over_errors++;
2482 		/* FALLTHROUGH */
2483 
2484 	default:
2485 		state = rx_cleanup;
2486 		dev->net->stats.rx_errors++;
2487 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2488 		break;
2489 	}
2490 
2491 	state = defer_bh(dev, skb, &dev->rxq, state);
2492 
2493 	if (urb) {
2494 		if (netif_running(dev->net) &&
2495 		    !test_bit(EVENT_RX_HALT, &dev->flags) &&
2496 		    state != unlink_start) {
2497 			rx_submit(dev, urb, GFP_ATOMIC);
2498 			return;
2499 		}
2500 		usb_free_urb(urb);
2501 	}
2502 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2503 }
2504 
lan78xx_tx_bh(struct lan78xx_net * dev)2505 static void lan78xx_tx_bh(struct lan78xx_net *dev)
2506 {
2507 	int length;
2508 	struct urb *urb = NULL;
2509 	struct skb_data *entry;
2510 	unsigned long flags;
2511 	struct sk_buff_head *tqp = &dev->txq_pend;
2512 	struct sk_buff *skb, *skb2;
2513 	int ret;
2514 	int count, pos;
2515 	int skb_totallen, pkt_cnt;
2516 
2517 	skb_totallen = 0;
2518 	pkt_cnt = 0;
2519 	for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2520 		if (skb_is_gso(skb)) {
2521 			if (pkt_cnt) {
2522 				/* handle previous packets first */
2523 				break;
2524 			}
2525 			length = skb->len;
2526 			skb2 = skb_dequeue(tqp);
2527 			goto gso_skb;
2528 		}
2529 
2530 		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2531 			break;
2532 		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2533 		pkt_cnt++;
2534 	}
2535 
2536 	/* copy to a single skb */
2537 	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2538 	if (!skb)
2539 		goto drop;
2540 
2541 	skb_put(skb, skb_totallen);
2542 
2543 	for (count = pos = 0; count < pkt_cnt; count++) {
2544 		skb2 = skb_dequeue(tqp);
2545 		if (skb2) {
2546 			memcpy(skb->data + pos, skb2->data, skb2->len);
2547 			pos += roundup(skb2->len, sizeof(u32));
2548 			dev_kfree_skb(skb2);
2549 		}
2550 	}
2551 
2552 	length = skb_totallen;
2553 
2554 gso_skb:
2555 	urb = usb_alloc_urb(0, GFP_ATOMIC);
2556 	if (!urb) {
2557 		netif_dbg(dev, tx_err, dev->net, "no urb\n");
2558 		goto drop;
2559 	}
2560 
2561 	entry = (struct skb_data *)skb->cb;
2562 	entry->urb = urb;
2563 	entry->dev = dev;
2564 	entry->length = length;
2565 
2566 	spin_lock_irqsave(&dev->txq.lock, flags);
2567 	ret = usb_autopm_get_interface_async(dev->intf);
2568 	if (ret < 0) {
2569 		spin_unlock_irqrestore(&dev->txq.lock, flags);
2570 		goto drop;
2571 	}
2572 
2573 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2574 			  skb->data, skb->len, tx_complete, skb);
2575 
2576 	if (length % dev->maxpacket == 0) {
2577 		/* send USB_ZERO_PACKET */
2578 		urb->transfer_flags |= URB_ZERO_PACKET;
2579 	}
2580 
2581 #ifdef CONFIG_PM
2582 	/* if this triggers the device is still a sleep */
2583 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2584 		/* transmission will be done in resume */
2585 		usb_anchor_urb(urb, &dev->deferred);
2586 		/* no use to process more packets */
2587 		netif_stop_queue(dev->net);
2588 		usb_put_urb(urb);
2589 		spin_unlock_irqrestore(&dev->txq.lock, flags);
2590 		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
2591 		return;
2592 	}
2593 #endif
2594 
2595 	ret = usb_submit_urb(urb, GFP_ATOMIC);
2596 	switch (ret) {
2597 	case 0:
2598 		dev->net->trans_start = jiffies;
2599 		lan78xx_queue_skb(&dev->txq, skb, tx_start);
2600 		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
2601 			netif_stop_queue(dev->net);
2602 		break;
2603 	case -EPIPE:
2604 		netif_stop_queue(dev->net);
2605 		lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2606 		usb_autopm_put_interface_async(dev->intf);
2607 		break;
2608 	default:
2609 		usb_autopm_put_interface_async(dev->intf);
2610 		netif_dbg(dev, tx_err, dev->net,
2611 			  "tx: submit urb err %d\n", ret);
2612 		break;
2613 	}
2614 
2615 	spin_unlock_irqrestore(&dev->txq.lock, flags);
2616 
2617 	if (ret) {
2618 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
2619 drop:
2620 		dev->net->stats.tx_dropped++;
2621 		if (skb)
2622 			dev_kfree_skb_any(skb);
2623 		usb_free_urb(urb);
2624 	} else
2625 		netif_dbg(dev, tx_queued, dev->net,
2626 			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
2627 }
2628 
lan78xx_rx_bh(struct lan78xx_net * dev)2629 static void lan78xx_rx_bh(struct lan78xx_net *dev)
2630 {
2631 	struct urb *urb;
2632 	int i;
2633 
2634 	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
2635 		for (i = 0; i < 10; i++) {
2636 			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
2637 				break;
2638 			urb = usb_alloc_urb(0, GFP_ATOMIC);
2639 			if (urb)
2640 				if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
2641 					return;
2642 		}
2643 
2644 		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
2645 			tasklet_schedule(&dev->bh);
2646 	}
2647 	if (skb_queue_len(&dev->txq) < dev->tx_qlen)
2648 		netif_wake_queue(dev->net);
2649 }
2650 
lan78xx_bh(unsigned long param)2651 static void lan78xx_bh(unsigned long param)
2652 {
2653 	struct lan78xx_net *dev = (struct lan78xx_net *)param;
2654 	struct sk_buff *skb;
2655 	struct skb_data *entry;
2656 
2657 	while ((skb = skb_dequeue(&dev->done))) {
2658 		entry = (struct skb_data *)(skb->cb);
2659 		switch (entry->state) {
2660 		case rx_done:
2661 			entry->state = rx_cleanup;
2662 			rx_process(dev, skb);
2663 			continue;
2664 		case tx_done:
2665 			usb_free_urb(entry->urb);
2666 			dev_kfree_skb(skb);
2667 			continue;
2668 		case rx_cleanup:
2669 			usb_free_urb(entry->urb);
2670 			dev_kfree_skb(skb);
2671 			continue;
2672 		default:
2673 			netdev_dbg(dev->net, "skb state %d\n", entry->state);
2674 			return;
2675 		}
2676 	}
2677 
2678 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
2679 		if (!skb_queue_empty(&dev->txq_pend))
2680 			lan78xx_tx_bh(dev);
2681 
2682 		if (!timer_pending(&dev->delay) &&
2683 		    !test_bit(EVENT_RX_HALT, &dev->flags))
2684 			lan78xx_rx_bh(dev);
2685 	}
2686 }
2687 
lan78xx_delayedwork(struct work_struct * work)2688 static void lan78xx_delayedwork(struct work_struct *work)
2689 {
2690 	int status;
2691 	struct lan78xx_net *dev;
2692 
2693 	dev = container_of(work, struct lan78xx_net, wq.work);
2694 
2695 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
2696 		unlink_urbs(dev, &dev->txq);
2697 		status = usb_autopm_get_interface(dev->intf);
2698 		if (status < 0)
2699 			goto fail_pipe;
2700 		status = usb_clear_halt(dev->udev, dev->pipe_out);
2701 		usb_autopm_put_interface(dev->intf);
2702 		if (status < 0 &&
2703 		    status != -EPIPE &&
2704 		    status != -ESHUTDOWN) {
2705 			if (netif_msg_tx_err(dev))
2706 fail_pipe:
2707 				netdev_err(dev->net,
2708 					   "can't clear tx halt, status %d\n",
2709 					   status);
2710 		} else {
2711 			clear_bit(EVENT_TX_HALT, &dev->flags);
2712 			if (status != -ESHUTDOWN)
2713 				netif_wake_queue(dev->net);
2714 		}
2715 	}
2716 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
2717 		unlink_urbs(dev, &dev->rxq);
2718 		status = usb_autopm_get_interface(dev->intf);
2719 		if (status < 0)
2720 				goto fail_halt;
2721 		status = usb_clear_halt(dev->udev, dev->pipe_in);
2722 		usb_autopm_put_interface(dev->intf);
2723 		if (status < 0 &&
2724 		    status != -EPIPE &&
2725 		    status != -ESHUTDOWN) {
2726 			if (netif_msg_rx_err(dev))
2727 fail_halt:
2728 				netdev_err(dev->net,
2729 					   "can't clear rx halt, status %d\n",
2730 					   status);
2731 		} else {
2732 			clear_bit(EVENT_RX_HALT, &dev->flags);
2733 			tasklet_schedule(&dev->bh);
2734 		}
2735 	}
2736 
2737 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
2738 		int ret = 0;
2739 
2740 		clear_bit(EVENT_LINK_RESET, &dev->flags);
2741 		status = usb_autopm_get_interface(dev->intf);
2742 		if (status < 0)
2743 			goto skip_reset;
2744 		if (lan78xx_link_reset(dev) < 0) {
2745 			usb_autopm_put_interface(dev->intf);
2746 skip_reset:
2747 			netdev_info(dev->net, "link reset failed (%d)\n",
2748 				    ret);
2749 		} else {
2750 			usb_autopm_put_interface(dev->intf);
2751 		}
2752 	}
2753 }
2754 
intr_complete(struct urb * urb)2755 static void intr_complete(struct urb *urb)
2756 {
2757 	struct lan78xx_net *dev = urb->context;
2758 	int status = urb->status;
2759 
2760 	switch (status) {
2761 	/* success */
2762 	case 0:
2763 		lan78xx_status(dev, urb);
2764 		break;
2765 
2766 	/* software-driven interface shutdown */
2767 	case -ENOENT:			/* urb killed */
2768 	case -ESHUTDOWN:		/* hardware gone */
2769 		netif_dbg(dev, ifdown, dev->net,
2770 			  "intr shutdown, code %d\n", status);
2771 		return;
2772 
2773 	/* NOTE:  not throttling like RX/TX, since this endpoint
2774 	 * already polls infrequently
2775 	 */
2776 	default:
2777 		netdev_dbg(dev->net, "intr status %d\n", status);
2778 		break;
2779 	}
2780 
2781 	if (!netif_running(dev->net))
2782 		return;
2783 
2784 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
2785 	status = usb_submit_urb(urb, GFP_ATOMIC);
2786 	if (status != 0)
2787 		netif_err(dev, timer, dev->net,
2788 			  "intr resubmit --> %d\n", status);
2789 }
2790 
lan78xx_disconnect(struct usb_interface * intf)2791 static void lan78xx_disconnect(struct usb_interface *intf)
2792 {
2793 	struct lan78xx_net		*dev;
2794 	struct usb_device		*udev;
2795 	struct net_device		*net;
2796 
2797 	dev = usb_get_intfdata(intf);
2798 	usb_set_intfdata(intf, NULL);
2799 	if (!dev)
2800 		return;
2801 
2802 	udev = interface_to_usbdev(intf);
2803 
2804 	net = dev->net;
2805 	unregister_netdev(net);
2806 
2807 	cancel_delayed_work_sync(&dev->wq);
2808 
2809 	usb_scuttle_anchored_urbs(&dev->deferred);
2810 
2811 	lan78xx_unbind(dev, intf);
2812 
2813 	usb_kill_urb(dev->urb_intr);
2814 	usb_free_urb(dev->urb_intr);
2815 
2816 	free_netdev(net);
2817 	usb_put_dev(udev);
2818 }
2819 
lan78xx_tx_timeout(struct net_device * net)2820 void lan78xx_tx_timeout(struct net_device *net)
2821 {
2822 	struct lan78xx_net *dev = netdev_priv(net);
2823 
2824 	unlink_urbs(dev, &dev->txq);
2825 	tasklet_schedule(&dev->bh);
2826 }
2827 
lan78xx_features_check(struct sk_buff * skb,struct net_device * netdev,netdev_features_t features)2828 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
2829 						struct net_device *netdev,
2830 						netdev_features_t features)
2831 {
2832 	if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
2833 		features &= ~NETIF_F_GSO_MASK;
2834 
2835 	features = vlan_features_check(skb, features);
2836 	features = vxlan_features_check(skb, features);
2837 
2838 	return features;
2839 }
2840 
2841 static const struct net_device_ops lan78xx_netdev_ops = {
2842 	.ndo_open		= lan78xx_open,
2843 	.ndo_stop		= lan78xx_stop,
2844 	.ndo_start_xmit		= lan78xx_start_xmit,
2845 	.ndo_tx_timeout		= lan78xx_tx_timeout,
2846 	.ndo_change_mtu		= lan78xx_change_mtu,
2847 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
2848 	.ndo_validate_addr	= eth_validate_addr,
2849 	.ndo_do_ioctl		= lan78xx_ioctl,
2850 	.ndo_set_rx_mode	= lan78xx_set_multicast,
2851 	.ndo_set_features	= lan78xx_set_features,
2852 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
2853 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
2854 	.ndo_features_check	= lan78xx_features_check,
2855 };
2856 
lan78xx_probe(struct usb_interface * intf,const struct usb_device_id * id)2857 static int lan78xx_probe(struct usb_interface *intf,
2858 			 const struct usb_device_id *id)
2859 {
2860 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
2861 	struct lan78xx_net *dev;
2862 	struct net_device *netdev;
2863 	struct usb_device *udev;
2864 	int ret;
2865 	unsigned maxp;
2866 	unsigned period;
2867 	u8 *buf = NULL;
2868 
2869 	udev = interface_to_usbdev(intf);
2870 	udev = usb_get_dev(udev);
2871 
2872 	ret = -ENOMEM;
2873 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
2874 	if (!netdev) {
2875 			dev_err(&intf->dev, "Error: OOM\n");
2876 			goto out1;
2877 	}
2878 
2879 	/* netdev_printk() needs this */
2880 	SET_NETDEV_DEV(netdev, &intf->dev);
2881 
2882 	dev = netdev_priv(netdev);
2883 	dev->udev = udev;
2884 	dev->intf = intf;
2885 	dev->net = netdev;
2886 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
2887 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
2888 
2889 	skb_queue_head_init(&dev->rxq);
2890 	skb_queue_head_init(&dev->txq);
2891 	skb_queue_head_init(&dev->done);
2892 	skb_queue_head_init(&dev->rxq_pause);
2893 	skb_queue_head_init(&dev->txq_pend);
2894 	mutex_init(&dev->phy_mutex);
2895 
2896 	tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
2897 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
2898 	init_usb_anchor(&dev->deferred);
2899 
2900 	netdev->netdev_ops = &lan78xx_netdev_ops;
2901 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
2902 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
2903 
2904 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
2905 		ret = -ENODEV;
2906 		goto out2;
2907 	}
2908 
2909 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
2910 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
2911 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
2912 		ret = -ENODEV;
2913 		goto out2;
2914 	}
2915 
2916 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
2917 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
2918 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
2919 		ret = -ENODEV;
2920 		goto out2;
2921 	}
2922 
2923 	ep_intr = &intf->cur_altsetting->endpoint[2];
2924 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
2925 		ret = -ENODEV;
2926 		goto out2;
2927 	}
2928 
2929 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
2930 					usb_endpoint_num(&ep_intr->desc));
2931 
2932 	ret = lan78xx_bind(dev, intf);
2933 	if (ret < 0)
2934 		goto out2;
2935 	strcpy(netdev->name, "eth%d");
2936 
2937 	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
2938 		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
2939 	netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
2940 
2941 	period = ep_intr->desc.bInterval;
2942 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
2943 	buf = kmalloc(maxp, GFP_KERNEL);
2944 	if (buf) {
2945 		dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
2946 		if (!dev->urb_intr) {
2947 			kfree(buf);
2948 			goto out3;
2949 		} else {
2950 			usb_fill_int_urb(dev->urb_intr, dev->udev,
2951 					 dev->pipe_intr, buf, maxp,
2952 					 intr_complete, dev, period);
2953 			dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
2954 		}
2955 	}
2956 
2957 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
2958 
2959 	/* Reject broken descriptors. */
2960 	if (dev->maxpacket == 0) {
2961 		ret = -ENODEV;
2962 		goto out3;
2963 	}
2964 
2965 	/* driver requires remote-wakeup capability during autosuspend. */
2966 	intf->needs_remote_wakeup = 1;
2967 
2968 	ret = register_netdev(netdev);
2969 	if (ret != 0) {
2970 		netif_err(dev, probe, netdev, "couldn't register the device\n");
2971 		goto out2;
2972 	}
2973 
2974 	usb_set_intfdata(intf, dev);
2975 
2976 	ret = device_set_wakeup_enable(&udev->dev, true);
2977 
2978 	 /* Default delay of 2sec has more overhead than advantage.
2979 	  * Set to 10sec as default.
2980 	  */
2981 	pm_runtime_set_autosuspend_delay(&udev->dev,
2982 					 DEFAULT_AUTOSUSPEND_DELAY);
2983 
2984 	return 0;
2985 
2986 out3:
2987 	lan78xx_unbind(dev, intf);
2988 out2:
2989 	free_netdev(netdev);
2990 out1:
2991 	usb_put_dev(udev);
2992 
2993 	return ret;
2994 }
2995 
lan78xx_wakeframe_crc16(const u8 * buf,int len)2996 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
2997 {
2998 	const u16 crc16poly = 0x8005;
2999 	int i;
3000 	u16 bit, crc, msb;
3001 	u8 data;
3002 
3003 	crc = 0xFFFF;
3004 	for (i = 0; i < len; i++) {
3005 		data = *buf++;
3006 		for (bit = 0; bit < 8; bit++) {
3007 			msb = crc >> 15;
3008 			crc <<= 1;
3009 
3010 			if (msb ^ (u16)(data & 1)) {
3011 				crc ^= crc16poly;
3012 				crc |= (u16)0x0001U;
3013 			}
3014 			data >>= 1;
3015 		}
3016 	}
3017 
3018 	return crc;
3019 }
3020 
lan78xx_set_suspend(struct lan78xx_net * dev,u32 wol)3021 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3022 {
3023 	u32 buf;
3024 	int ret;
3025 	int mask_index;
3026 	u16 crc;
3027 	u32 temp_wucsr;
3028 	u32 temp_pmt_ctl;
3029 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3030 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3031 	const u8 arp_type[2] = { 0x08, 0x06 };
3032 
3033 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3034 	buf &= ~MAC_TX_TXEN_;
3035 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
3036 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3037 	buf &= ~MAC_RX_RXEN_;
3038 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3039 
3040 	ret = lan78xx_write_reg(dev, WUCSR, 0);
3041 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
3042 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3043 
3044 	temp_wucsr = 0;
3045 
3046 	temp_pmt_ctl = 0;
3047 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3048 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3049 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3050 
3051 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3052 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3053 
3054 	mask_index = 0;
3055 	if (wol & WAKE_PHY) {
3056 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3057 
3058 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3059 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3060 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3061 	}
3062 	if (wol & WAKE_MAGIC) {
3063 		temp_wucsr |= WUCSR_MPEN_;
3064 
3065 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3066 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3067 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3068 	}
3069 	if (wol & WAKE_BCAST) {
3070 		temp_wucsr |= WUCSR_BCST_EN_;
3071 
3072 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3073 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3074 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3075 	}
3076 	if (wol & WAKE_MCAST) {
3077 		temp_wucsr |= WUCSR_WAKE_EN_;
3078 
3079 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3080 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3081 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3082 					WUF_CFGX_EN_ |
3083 					WUF_CFGX_TYPE_MCAST_ |
3084 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3085 					(crc & WUF_CFGX_CRC16_MASK_));
3086 
3087 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3088 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3089 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3090 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3091 		mask_index++;
3092 
3093 		/* for IPv6 Multicast */
3094 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3095 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3096 					WUF_CFGX_EN_ |
3097 					WUF_CFGX_TYPE_MCAST_ |
3098 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3099 					(crc & WUF_CFGX_CRC16_MASK_));
3100 
3101 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3102 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3103 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3104 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3105 		mask_index++;
3106 
3107 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3108 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3109 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3110 	}
3111 	if (wol & WAKE_UCAST) {
3112 		temp_wucsr |= WUCSR_PFDA_EN_;
3113 
3114 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3115 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3116 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3117 	}
3118 	if (wol & WAKE_ARP) {
3119 		temp_wucsr |= WUCSR_WAKE_EN_;
3120 
3121 		/* set WUF_CFG & WUF_MASK
3122 		 * for packettype (offset 12,13) = ARP (0x0806)
3123 		 */
3124 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
3125 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3126 					WUF_CFGX_EN_ |
3127 					WUF_CFGX_TYPE_ALL_ |
3128 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3129 					(crc & WUF_CFGX_CRC16_MASK_));
3130 
3131 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3132 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3133 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3134 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3135 		mask_index++;
3136 
3137 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3138 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3139 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3140 	}
3141 
3142 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3143 
3144 	/* when multiple WOL bits are set */
3145 	if (hweight_long((unsigned long)wol) > 1) {
3146 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3147 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3148 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3149 	}
3150 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3151 
3152 	/* clear WUPS */
3153 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3154 	buf |= PMT_CTL_WUPS_MASK_;
3155 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3156 
3157 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3158 	buf |= MAC_RX_RXEN_;
3159 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3160 
3161 	return 0;
3162 }
3163 
lan78xx_suspend(struct usb_interface * intf,pm_message_t message)3164 int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3165 {
3166 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3167 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3168 	u32 buf;
3169 	int ret;
3170 	int event;
3171 
3172 	event = message.event;
3173 
3174 	if (!dev->suspend_count++) {
3175 		spin_lock_irq(&dev->txq.lock);
3176 		/* don't autosuspend while transmitting */
3177 		if ((skb_queue_len(&dev->txq) ||
3178 		     skb_queue_len(&dev->txq_pend)) &&
3179 			PMSG_IS_AUTO(message)) {
3180 			spin_unlock_irq(&dev->txq.lock);
3181 			ret = -EBUSY;
3182 			goto out;
3183 		} else {
3184 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3185 			spin_unlock_irq(&dev->txq.lock);
3186 		}
3187 
3188 		/* stop TX & RX */
3189 		ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3190 		buf &= ~MAC_TX_TXEN_;
3191 		ret = lan78xx_write_reg(dev, MAC_TX, buf);
3192 		ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3193 		buf &= ~MAC_RX_RXEN_;
3194 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
3195 
3196 		/* empty out the rx and queues */
3197 		netif_device_detach(dev->net);
3198 		lan78xx_terminate_urbs(dev);
3199 		usb_kill_urb(dev->urb_intr);
3200 
3201 		/* reattach */
3202 		netif_device_attach(dev->net);
3203 	}
3204 
3205 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3206 		if (PMSG_IS_AUTO(message)) {
3207 			/* auto suspend (selective suspend) */
3208 			ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3209 			buf &= ~MAC_TX_TXEN_;
3210 			ret = lan78xx_write_reg(dev, MAC_TX, buf);
3211 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3212 			buf &= ~MAC_RX_RXEN_;
3213 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
3214 
3215 			ret = lan78xx_write_reg(dev, WUCSR, 0);
3216 			ret = lan78xx_write_reg(dev, WUCSR2, 0);
3217 			ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3218 
3219 			/* set goodframe wakeup */
3220 			ret = lan78xx_read_reg(dev, WUCSR, &buf);
3221 
3222 			buf |= WUCSR_RFE_WAKE_EN_;
3223 			buf |= WUCSR_STORE_WAKE_;
3224 
3225 			ret = lan78xx_write_reg(dev, WUCSR, buf);
3226 
3227 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3228 
3229 			buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3230 			buf |= PMT_CTL_RES_CLR_WKP_STS_;
3231 
3232 			buf |= PMT_CTL_PHY_WAKE_EN_;
3233 			buf |= PMT_CTL_WOL_EN_;
3234 			buf &= ~PMT_CTL_SUS_MODE_MASK_;
3235 			buf |= PMT_CTL_SUS_MODE_3_;
3236 
3237 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3238 
3239 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3240 
3241 			buf |= PMT_CTL_WUPS_MASK_;
3242 
3243 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3244 
3245 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3246 			buf |= MAC_RX_RXEN_;
3247 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
3248 		} else {
3249 			lan78xx_set_suspend(dev, pdata->wol);
3250 		}
3251 	}
3252 
3253 	ret = 0;
3254 out:
3255 	return ret;
3256 }
3257 
lan78xx_resume(struct usb_interface * intf)3258 int lan78xx_resume(struct usb_interface *intf)
3259 {
3260 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3261 	struct sk_buff *skb;
3262 	struct urb *res;
3263 	int ret;
3264 	u32 buf;
3265 
3266 	if (!--dev->suspend_count) {
3267 		/* resume interrupt URBs */
3268 		if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3269 				usb_submit_urb(dev->urb_intr, GFP_NOIO);
3270 
3271 		spin_lock_irq(&dev->txq.lock);
3272 		while ((res = usb_get_from_anchor(&dev->deferred))) {
3273 			skb = (struct sk_buff *)res->context;
3274 			ret = usb_submit_urb(res, GFP_ATOMIC);
3275 			if (ret < 0) {
3276 				dev_kfree_skb_any(skb);
3277 				usb_free_urb(res);
3278 				usb_autopm_put_interface_async(dev->intf);
3279 			} else {
3280 				dev->net->trans_start = jiffies;
3281 				lan78xx_queue_skb(&dev->txq, skb, tx_start);
3282 			}
3283 		}
3284 
3285 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3286 		spin_unlock_irq(&dev->txq.lock);
3287 
3288 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3289 			if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3290 				netif_start_queue(dev->net);
3291 			tasklet_schedule(&dev->bh);
3292 		}
3293 	}
3294 
3295 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
3296 	ret = lan78xx_write_reg(dev, WUCSR, 0);
3297 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3298 
3299 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3300 					     WUCSR2_ARP_RCD_ |
3301 					     WUCSR2_IPV6_TCPSYN_RCD_ |
3302 					     WUCSR2_IPV4_TCPSYN_RCD_);
3303 
3304 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3305 					    WUCSR_EEE_RX_WAKE_ |
3306 					    WUCSR_PFDA_FR_ |
3307 					    WUCSR_RFE_WAKE_FR_ |
3308 					    WUCSR_WUFR_ |
3309 					    WUCSR_MPR_ |
3310 					    WUCSR_BCST_FR_);
3311 
3312 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3313 	buf |= MAC_TX_TXEN_;
3314 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
3315 
3316 	return 0;
3317 }
3318 
lan78xx_reset_resume(struct usb_interface * intf)3319 int lan78xx_reset_resume(struct usb_interface *intf)
3320 {
3321 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3322 
3323 	lan78xx_reset(dev);
3324 
3325 	lan78xx_phy_init(dev);
3326 
3327 	return lan78xx_resume(intf);
3328 }
3329 
3330 static const struct usb_device_id products[] = {
3331 	{
3332 	/* LAN7800 USB Gigabit Ethernet Device */
3333 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3334 	},
3335 	{
3336 	/* LAN7850 USB Gigabit Ethernet Device */
3337 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3338 	},
3339 	{
3340 	/* ATM2-AF USB Gigabit Ethernet Device */
3341 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
3342 	},
3343 	{},
3344 };
3345 MODULE_DEVICE_TABLE(usb, products);
3346 
3347 static struct usb_driver lan78xx_driver = {
3348 	.name			= DRIVER_NAME,
3349 	.id_table		= products,
3350 	.probe			= lan78xx_probe,
3351 	.disconnect		= lan78xx_disconnect,
3352 	.suspend		= lan78xx_suspend,
3353 	.resume			= lan78xx_resume,
3354 	.reset_resume		= lan78xx_reset_resume,
3355 	.supports_autosuspend	= 1,
3356 	.disable_hub_initiated_lpm = 1,
3357 };
3358 
3359 module_usb_driver(lan78xx_driver);
3360 
3361 MODULE_AUTHOR(DRIVER_AUTHOR);
3362 MODULE_DESCRIPTION(DRIVER_DESC);
3363 MODULE_LICENSE("GPL");
3364