• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/usb.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32 
33 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME	"lan78xx"
36 
37 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
38 #define THROTTLE_JIFFIES		(HZ / 8)
39 #define UNLINK_TIMEOUT_MS		3
40 
41 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
42 
43 #define SS_USB_PKT_SIZE			(1024)
44 #define HS_USB_PKT_SIZE			(512)
45 #define FS_USB_PKT_SIZE			(64)
46 
47 #define MAX_RX_FIFO_SIZE		(12 * 1024)
48 #define MAX_TX_FIFO_SIZE		(12 * 1024)
49 
50 #define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
52 					 (FLOW_THRESHOLD(off) << 8))
53 
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS			9216
56 #define FLOW_ON_HS			8704
57 
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS			4096
60 #define FLOW_OFF_HS			1024
61 
62 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY		(0x0800)
64 #define MAX_SINGLE_PACKET_SIZE		(9000)
65 #define DEFAULT_TX_CSUM_ENABLE		(true)
66 #define DEFAULT_RX_CSUM_ENABLE		(true)
67 #define DEFAULT_TSO_CSUM_ENABLE		(true)
68 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
69 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
70 #define TX_OVERHEAD			(8)
71 #define RXW_PADDING			2
72 
73 #define LAN78XX_USB_VENDOR_ID		(0x0424)
74 #define LAN7800_USB_PRODUCT_ID		(0x7800)
75 #define LAN7850_USB_PRODUCT_ID		(0x7850)
76 #define LAN7801_USB_PRODUCT_ID		(0x7801)
77 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
78 #define LAN78XX_OTP_MAGIC		(0x78F3)
79 #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
80 #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
81 
82 #define	MII_READ			1
83 #define	MII_WRITE			0
84 
85 #define EEPROM_INDICATOR		(0xA5)
86 #define EEPROM_MAC_OFFSET		(0x01)
87 #define MAX_EEPROM_SIZE			512
88 #define OTP_INDICATOR_1			(0xF3)
89 #define OTP_INDICATOR_2			(0xF7)
90 
91 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
92 					 WAKE_MCAST | WAKE_BCAST | \
93 					 WAKE_ARP | WAKE_MAGIC)
94 
95 /* USB related defines */
96 #define BULK_IN_PIPE			1
97 #define BULK_OUT_PIPE			2
98 
99 /* default autosuspend delay (mSec)*/
100 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
101 
102 /* statistic update interval (mSec) */
103 #define STAT_UPDATE_TIMER		(1 * 1000)
104 
105 /* time to wait for MAC or FCT to stop (jiffies) */
106 #define HW_DISABLE_TIMEOUT		(HZ / 10)
107 
108 /* time to wait between polling MAC or FCT state (ms) */
109 #define HW_DISABLE_DELAY_MS		1
110 
111 /* defines interrupts from interrupt EP */
112 #define MAX_INT_EP			(32)
113 #define INT_EP_INTEP			(31)
114 #define INT_EP_OTP_WR_DONE		(28)
115 #define INT_EP_EEE_TX_LPI_START		(26)
116 #define INT_EP_EEE_TX_LPI_STOP		(25)
117 #define INT_EP_EEE_RX_LPI		(24)
118 #define INT_EP_MAC_RESET_TIMEOUT	(23)
119 #define INT_EP_RDFO			(22)
120 #define INT_EP_TXE			(21)
121 #define INT_EP_USB_STATUS		(20)
122 #define INT_EP_TX_DIS			(19)
123 #define INT_EP_RX_DIS			(18)
124 #define INT_EP_PHY			(17)
125 #define INT_EP_DP			(16)
126 #define INT_EP_MAC_ERR			(15)
127 #define INT_EP_TDFU			(14)
128 #define INT_EP_TDFO			(13)
129 #define INT_EP_UTX			(12)
130 #define INT_EP_GPIO_11			(11)
131 #define INT_EP_GPIO_10			(10)
132 #define INT_EP_GPIO_9			(9)
133 #define INT_EP_GPIO_8			(8)
134 #define INT_EP_GPIO_7			(7)
135 #define INT_EP_GPIO_6			(6)
136 #define INT_EP_GPIO_5			(5)
137 #define INT_EP_GPIO_4			(4)
138 #define INT_EP_GPIO_3			(3)
139 #define INT_EP_GPIO_2			(2)
140 #define INT_EP_GPIO_1			(1)
141 #define INT_EP_GPIO_0			(0)
142 
143 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
144 	"RX FCS Errors",
145 	"RX Alignment Errors",
146 	"Rx Fragment Errors",
147 	"RX Jabber Errors",
148 	"RX Undersize Frame Errors",
149 	"RX Oversize Frame Errors",
150 	"RX Dropped Frames",
151 	"RX Unicast Byte Count",
152 	"RX Broadcast Byte Count",
153 	"RX Multicast Byte Count",
154 	"RX Unicast Frames",
155 	"RX Broadcast Frames",
156 	"RX Multicast Frames",
157 	"RX Pause Frames",
158 	"RX 64 Byte Frames",
159 	"RX 65 - 127 Byte Frames",
160 	"RX 128 - 255 Byte Frames",
161 	"RX 256 - 511 Bytes Frames",
162 	"RX 512 - 1023 Byte Frames",
163 	"RX 1024 - 1518 Byte Frames",
164 	"RX Greater 1518 Byte Frames",
165 	"EEE RX LPI Transitions",
166 	"EEE RX LPI Time",
167 	"TX FCS Errors",
168 	"TX Excess Deferral Errors",
169 	"TX Carrier Errors",
170 	"TX Bad Byte Count",
171 	"TX Single Collisions",
172 	"TX Multiple Collisions",
173 	"TX Excessive Collision",
174 	"TX Late Collisions",
175 	"TX Unicast Byte Count",
176 	"TX Broadcast Byte Count",
177 	"TX Multicast Byte Count",
178 	"TX Unicast Frames",
179 	"TX Broadcast Frames",
180 	"TX Multicast Frames",
181 	"TX Pause Frames",
182 	"TX 64 Byte Frames",
183 	"TX 65 - 127 Byte Frames",
184 	"TX 128 - 255 Byte Frames",
185 	"TX 256 - 511 Bytes Frames",
186 	"TX 512 - 1023 Byte Frames",
187 	"TX 1024 - 1518 Byte Frames",
188 	"TX Greater 1518 Byte Frames",
189 	"EEE TX LPI Transitions",
190 	"EEE TX LPI Time",
191 };
192 
193 struct lan78xx_statstage {
194 	u32 rx_fcs_errors;
195 	u32 rx_alignment_errors;
196 	u32 rx_fragment_errors;
197 	u32 rx_jabber_errors;
198 	u32 rx_undersize_frame_errors;
199 	u32 rx_oversize_frame_errors;
200 	u32 rx_dropped_frames;
201 	u32 rx_unicast_byte_count;
202 	u32 rx_broadcast_byte_count;
203 	u32 rx_multicast_byte_count;
204 	u32 rx_unicast_frames;
205 	u32 rx_broadcast_frames;
206 	u32 rx_multicast_frames;
207 	u32 rx_pause_frames;
208 	u32 rx_64_byte_frames;
209 	u32 rx_65_127_byte_frames;
210 	u32 rx_128_255_byte_frames;
211 	u32 rx_256_511_bytes_frames;
212 	u32 rx_512_1023_byte_frames;
213 	u32 rx_1024_1518_byte_frames;
214 	u32 rx_greater_1518_byte_frames;
215 	u32 eee_rx_lpi_transitions;
216 	u32 eee_rx_lpi_time;
217 	u32 tx_fcs_errors;
218 	u32 tx_excess_deferral_errors;
219 	u32 tx_carrier_errors;
220 	u32 tx_bad_byte_count;
221 	u32 tx_single_collisions;
222 	u32 tx_multiple_collisions;
223 	u32 tx_excessive_collision;
224 	u32 tx_late_collisions;
225 	u32 tx_unicast_byte_count;
226 	u32 tx_broadcast_byte_count;
227 	u32 tx_multicast_byte_count;
228 	u32 tx_unicast_frames;
229 	u32 tx_broadcast_frames;
230 	u32 tx_multicast_frames;
231 	u32 tx_pause_frames;
232 	u32 tx_64_byte_frames;
233 	u32 tx_65_127_byte_frames;
234 	u32 tx_128_255_byte_frames;
235 	u32 tx_256_511_bytes_frames;
236 	u32 tx_512_1023_byte_frames;
237 	u32 tx_1024_1518_byte_frames;
238 	u32 tx_greater_1518_byte_frames;
239 	u32 eee_tx_lpi_transitions;
240 	u32 eee_tx_lpi_time;
241 };
242 
243 struct lan78xx_statstage64 {
244 	u64 rx_fcs_errors;
245 	u64 rx_alignment_errors;
246 	u64 rx_fragment_errors;
247 	u64 rx_jabber_errors;
248 	u64 rx_undersize_frame_errors;
249 	u64 rx_oversize_frame_errors;
250 	u64 rx_dropped_frames;
251 	u64 rx_unicast_byte_count;
252 	u64 rx_broadcast_byte_count;
253 	u64 rx_multicast_byte_count;
254 	u64 rx_unicast_frames;
255 	u64 rx_broadcast_frames;
256 	u64 rx_multicast_frames;
257 	u64 rx_pause_frames;
258 	u64 rx_64_byte_frames;
259 	u64 rx_65_127_byte_frames;
260 	u64 rx_128_255_byte_frames;
261 	u64 rx_256_511_bytes_frames;
262 	u64 rx_512_1023_byte_frames;
263 	u64 rx_1024_1518_byte_frames;
264 	u64 rx_greater_1518_byte_frames;
265 	u64 eee_rx_lpi_transitions;
266 	u64 eee_rx_lpi_time;
267 	u64 tx_fcs_errors;
268 	u64 tx_excess_deferral_errors;
269 	u64 tx_carrier_errors;
270 	u64 tx_bad_byte_count;
271 	u64 tx_single_collisions;
272 	u64 tx_multiple_collisions;
273 	u64 tx_excessive_collision;
274 	u64 tx_late_collisions;
275 	u64 tx_unicast_byte_count;
276 	u64 tx_broadcast_byte_count;
277 	u64 tx_multicast_byte_count;
278 	u64 tx_unicast_frames;
279 	u64 tx_broadcast_frames;
280 	u64 tx_multicast_frames;
281 	u64 tx_pause_frames;
282 	u64 tx_64_byte_frames;
283 	u64 tx_65_127_byte_frames;
284 	u64 tx_128_255_byte_frames;
285 	u64 tx_256_511_bytes_frames;
286 	u64 tx_512_1023_byte_frames;
287 	u64 tx_1024_1518_byte_frames;
288 	u64 tx_greater_1518_byte_frames;
289 	u64 eee_tx_lpi_transitions;
290 	u64 eee_tx_lpi_time;
291 };
292 
293 static u32 lan78xx_regs[] = {
294 	ID_REV,
295 	INT_STS,
296 	HW_CFG,
297 	PMT_CTL,
298 	E2P_CMD,
299 	E2P_DATA,
300 	USB_STATUS,
301 	VLAN_TYPE,
302 	MAC_CR,
303 	MAC_RX,
304 	MAC_TX,
305 	FLOW,
306 	ERR_STS,
307 	MII_ACC,
308 	MII_DATA,
309 	EEE_TX_LPI_REQ_DLY,
310 	EEE_TW_TX_SYS,
311 	EEE_TX_LPI_REM_DLY,
312 	WUCSR
313 };
314 
315 #define PHY_REG_SIZE (32 * sizeof(u32))
316 
317 struct lan78xx_net;
318 
319 struct lan78xx_priv {
320 	struct lan78xx_net *dev;
321 	u32 rfe_ctl;
322 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
323 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
324 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
325 	struct mutex dataport_mutex; /* for dataport access */
326 	spinlock_t rfe_ctl_lock; /* for rfe register access */
327 	struct work_struct set_multicast;
328 	struct work_struct set_vlan;
329 	u32 wol;
330 };
331 
332 enum skb_state {
333 	illegal = 0,
334 	tx_start,
335 	tx_done,
336 	rx_start,
337 	rx_done,
338 	rx_cleanup,
339 	unlink_start
340 };
341 
342 struct skb_data {		/* skb->cb is one of these */
343 	struct urb *urb;
344 	struct lan78xx_net *dev;
345 	enum skb_state state;
346 	size_t length;
347 	int num_of_packet;
348 };
349 
350 struct usb_context {
351 	struct usb_ctrlrequest req;
352 	struct lan78xx_net *dev;
353 };
354 
355 #define EVENT_TX_HALT			0
356 #define EVENT_RX_HALT			1
357 #define EVENT_RX_MEMORY			2
358 #define EVENT_STS_SPLIT			3
359 #define EVENT_LINK_RESET		4
360 #define EVENT_RX_PAUSED			5
361 #define EVENT_DEV_WAKING		6
362 #define EVENT_DEV_ASLEEP		7
363 #define EVENT_DEV_OPEN			8
364 #define EVENT_STAT_UPDATE		9
365 #define EVENT_DEV_DISCONNECT		10
366 
367 struct statstage {
368 	struct mutex			access_lock;	/* for stats access */
369 	struct lan78xx_statstage	saved;
370 	struct lan78xx_statstage	rollover_count;
371 	struct lan78xx_statstage	rollover_max;
372 	struct lan78xx_statstage64	curr_stat;
373 };
374 
375 struct irq_domain_data {
376 	struct irq_domain	*irqdomain;
377 	unsigned int		phyirq;
378 	struct irq_chip		*irqchip;
379 	irq_flow_handler_t	irq_handler;
380 	u32			irqenable;
381 	struct mutex		irq_lock;		/* for irq bus access */
382 };
383 
384 struct lan78xx_net {
385 	struct net_device	*net;
386 	struct usb_device	*udev;
387 	struct usb_interface	*intf;
388 	void			*driver_priv;
389 
390 	int			rx_qlen;
391 	int			tx_qlen;
392 	struct sk_buff_head	rxq;
393 	struct sk_buff_head	txq;
394 	struct sk_buff_head	done;
395 	struct sk_buff_head	txq_pend;
396 
397 	struct tasklet_struct	bh;
398 	struct delayed_work	wq;
399 
400 	int			msg_enable;
401 
402 	struct urb		*urb_intr;
403 	struct usb_anchor	deferred;
404 
405 	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
406 	struct mutex		phy_mutex; /* for phy access */
407 	unsigned int		pipe_in, pipe_out, pipe_intr;
408 
409 	u32			hard_mtu;	/* count any extra framing */
410 	size_t			rx_urb_size;	/* size for rx urbs */
411 
412 	unsigned long		flags;
413 
414 	wait_queue_head_t	*wait;
415 	unsigned char		suspend_count;
416 
417 	unsigned int		maxpacket;
418 	struct timer_list	stat_monitor;
419 
420 	unsigned long		data[5];
421 
422 	int			link_on;
423 	u8			mdix_ctrl;
424 
425 	u32			chipid;
426 	u32			chiprev;
427 	struct mii_bus		*mdiobus;
428 	phy_interface_t		interface;
429 
430 	int			fc_autoneg;
431 	u8			fc_request_control;
432 
433 	int			delta;
434 	struct statstage	stats;
435 
436 	struct irq_domain_data	domain_data;
437 };
438 
439 /* define external phy id */
440 #define	PHY_LAN8835			(0x0007C130)
441 #define	PHY_KSZ9031RNX			(0x00221620)
442 
443 /* use ethtool to change the level for any given device */
444 static int msg_level = -1;
445 module_param(msg_level, int, 0);
446 MODULE_PARM_DESC(msg_level, "Override default message level");
447 
lan78xx_read_reg(struct lan78xx_net * dev,u32 index,u32 * data)448 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
449 {
450 	u32 *buf;
451 	int ret;
452 
453 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
454 		return -ENODEV;
455 
456 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
457 	if (!buf)
458 		return -ENOMEM;
459 
460 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
461 			      USB_VENDOR_REQUEST_READ_REGISTER,
462 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
463 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
464 	if (likely(ret >= 0)) {
465 		le32_to_cpus(buf);
466 		*data = *buf;
467 	} else if (net_ratelimit()) {
468 		netdev_warn(dev->net,
469 			    "Failed to read register index 0x%08x. ret = %d",
470 			    index, ret);
471 	}
472 
473 	kfree(buf);
474 
475 	return ret;
476 }
477 
lan78xx_write_reg(struct lan78xx_net * dev,u32 index,u32 data)478 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
479 {
480 	u32 *buf;
481 	int ret;
482 
483 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
484 		return -ENODEV;
485 
486 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
487 	if (!buf)
488 		return -ENOMEM;
489 
490 	*buf = data;
491 	cpu_to_le32s(buf);
492 
493 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
494 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
495 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
496 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
497 	if (unlikely(ret < 0) &&
498 	    net_ratelimit()) {
499 		netdev_warn(dev->net,
500 			    "Failed to write register index 0x%08x. ret = %d",
501 			    index, ret);
502 	}
503 
504 	kfree(buf);
505 
506 	return ret;
507 }
508 
lan78xx_update_reg(struct lan78xx_net * dev,u32 reg,u32 mask,u32 data)509 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
510 			      u32 data)
511 {
512 	int ret;
513 	u32 buf;
514 
515 	ret = lan78xx_read_reg(dev, reg, &buf);
516 	if (ret < 0)
517 		return ret;
518 
519 	buf &= ~mask;
520 	buf |= (mask & data);
521 
522 	ret = lan78xx_write_reg(dev, reg, buf);
523 	if (ret < 0)
524 		return ret;
525 
526 	return 0;
527 }
528 
lan78xx_read_stats(struct lan78xx_net * dev,struct lan78xx_statstage * data)529 static int lan78xx_read_stats(struct lan78xx_net *dev,
530 			      struct lan78xx_statstage *data)
531 {
532 	int ret = 0;
533 	int i;
534 	struct lan78xx_statstage *stats;
535 	u32 *src;
536 	u32 *dst;
537 
538 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
539 	if (!stats)
540 		return -ENOMEM;
541 
542 	ret = usb_control_msg(dev->udev,
543 			      usb_rcvctrlpipe(dev->udev, 0),
544 			      USB_VENDOR_REQUEST_GET_STATS,
545 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
546 			      0,
547 			      0,
548 			      (void *)stats,
549 			      sizeof(*stats),
550 			      USB_CTRL_SET_TIMEOUT);
551 	if (likely(ret >= 0)) {
552 		src = (u32 *)stats;
553 		dst = (u32 *)data;
554 		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
555 			le32_to_cpus(&src[i]);
556 			dst[i] = src[i];
557 		}
558 	} else {
559 		netdev_warn(dev->net,
560 			    "Failed to read stat ret = %d", ret);
561 	}
562 
563 	kfree(stats);
564 
565 	return ret;
566 }
567 
568 #define check_counter_rollover(struct1, dev_stats, member)		\
569 	do {								\
570 		if ((struct1)->member < (dev_stats).saved.member)	\
571 			(dev_stats).rollover_count.member++;		\
572 	} while (0)
573 
lan78xx_check_stat_rollover(struct lan78xx_net * dev,struct lan78xx_statstage * stats)574 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
575 					struct lan78xx_statstage *stats)
576 {
577 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
578 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
579 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
580 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
581 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
582 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
583 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
584 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
585 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
586 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
587 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
588 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
589 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
590 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
591 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
592 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
593 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
594 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
595 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
596 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
597 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
598 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
599 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
600 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
601 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
602 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
603 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
604 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
605 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
606 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
607 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
608 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
609 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
610 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
611 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
612 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
613 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
614 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
615 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
616 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
617 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
618 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
619 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
620 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
621 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
622 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
623 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
624 
625 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
626 }
627 
lan78xx_update_stats(struct lan78xx_net * dev)628 static void lan78xx_update_stats(struct lan78xx_net *dev)
629 {
630 	u32 *p, *count, *max;
631 	u64 *data;
632 	int i;
633 	struct lan78xx_statstage lan78xx_stats;
634 
635 	if (usb_autopm_get_interface(dev->intf) < 0)
636 		return;
637 
638 	p = (u32 *)&lan78xx_stats;
639 	count = (u32 *)&dev->stats.rollover_count;
640 	max = (u32 *)&dev->stats.rollover_max;
641 	data = (u64 *)&dev->stats.curr_stat;
642 
643 	mutex_lock(&dev->stats.access_lock);
644 
645 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
646 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
647 
648 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
649 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
650 
651 	mutex_unlock(&dev->stats.access_lock);
652 
653 	usb_autopm_put_interface(dev->intf);
654 }
655 
656 /* Loop until the read is completed with timeout called with phy_mutex held */
lan78xx_phy_wait_not_busy(struct lan78xx_net * dev)657 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
658 {
659 	unsigned long start_time = jiffies;
660 	u32 val;
661 	int ret;
662 
663 	do {
664 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
665 		if (unlikely(ret < 0))
666 			return -EIO;
667 
668 		if (!(val & MII_ACC_MII_BUSY_))
669 			return 0;
670 	} while (!time_after(jiffies, start_time + HZ));
671 
672 	return -EIO;
673 }
674 
mii_access(int id,int index,int read)675 static inline u32 mii_access(int id, int index, int read)
676 {
677 	u32 ret;
678 
679 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
680 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
681 	if (read)
682 		ret |= MII_ACC_MII_READ_;
683 	else
684 		ret |= MII_ACC_MII_WRITE_;
685 	ret |= MII_ACC_MII_BUSY_;
686 
687 	return ret;
688 }
689 
lan78xx_wait_eeprom(struct lan78xx_net * dev)690 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
691 {
692 	unsigned long start_time = jiffies;
693 	u32 val;
694 	int ret;
695 
696 	do {
697 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
698 		if (unlikely(ret < 0))
699 			return -EIO;
700 
701 		if (!(val & E2P_CMD_EPC_BUSY_) ||
702 		    (val & E2P_CMD_EPC_TIMEOUT_))
703 			break;
704 		usleep_range(40, 100);
705 	} while (!time_after(jiffies, start_time + HZ));
706 
707 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
708 		netdev_warn(dev->net, "EEPROM read operation timeout");
709 		return -EIO;
710 	}
711 
712 	return 0;
713 }
714 
lan78xx_eeprom_confirm_not_busy(struct lan78xx_net * dev)715 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
716 {
717 	unsigned long start_time = jiffies;
718 	u32 val;
719 	int ret;
720 
721 	do {
722 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
723 		if (unlikely(ret < 0))
724 			return -EIO;
725 
726 		if (!(val & E2P_CMD_EPC_BUSY_))
727 			return 0;
728 
729 		usleep_range(40, 100);
730 	} while (!time_after(jiffies, start_time + HZ));
731 
732 	netdev_warn(dev->net, "EEPROM is busy");
733 	return -EIO;
734 }
735 
lan78xx_read_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)736 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
737 				   u32 length, u8 *data)
738 {
739 	u32 val;
740 	u32 saved;
741 	int i, ret;
742 	int retval;
743 
744 	/* depends on chip, some EEPROM pins are muxed with LED function.
745 	 * disable & restore LED function to access EEPROM.
746 	 */
747 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
748 	saved = val;
749 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
750 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
751 		ret = lan78xx_write_reg(dev, HW_CFG, val);
752 	}
753 
754 	retval = lan78xx_eeprom_confirm_not_busy(dev);
755 	if (retval)
756 		return retval;
757 
758 	for (i = 0; i < length; i++) {
759 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
760 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
761 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
762 		if (unlikely(ret < 0)) {
763 			retval = -EIO;
764 			goto exit;
765 		}
766 
767 		retval = lan78xx_wait_eeprom(dev);
768 		if (retval < 0)
769 			goto exit;
770 
771 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
772 		if (unlikely(ret < 0)) {
773 			retval = -EIO;
774 			goto exit;
775 		}
776 
777 		data[i] = val & 0xFF;
778 		offset++;
779 	}
780 
781 	retval = 0;
782 exit:
783 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
784 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
785 
786 	return retval;
787 }
788 
lan78xx_read_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)789 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
790 			       u32 length, u8 *data)
791 {
792 	u8 sig;
793 	int ret;
794 
795 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
796 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
797 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
798 	else
799 		ret = -EINVAL;
800 
801 	return ret;
802 }
803 
lan78xx_write_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)804 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
805 				    u32 length, u8 *data)
806 {
807 	u32 val;
808 	u32 saved;
809 	int i, ret;
810 	int retval;
811 
812 	/* depends on chip, some EEPROM pins are muxed with LED function.
813 	 * disable & restore LED function to access EEPROM.
814 	 */
815 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
816 	saved = val;
817 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
818 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
819 		ret = lan78xx_write_reg(dev, HW_CFG, val);
820 	}
821 
822 	retval = lan78xx_eeprom_confirm_not_busy(dev);
823 	if (retval)
824 		goto exit;
825 
826 	/* Issue write/erase enable command */
827 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
828 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
829 	if (unlikely(ret < 0)) {
830 		retval = -EIO;
831 		goto exit;
832 	}
833 
834 	retval = lan78xx_wait_eeprom(dev);
835 	if (retval < 0)
836 		goto exit;
837 
838 	for (i = 0; i < length; i++) {
839 		/* Fill data register */
840 		val = data[i];
841 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
842 		if (ret < 0) {
843 			retval = -EIO;
844 			goto exit;
845 		}
846 
847 		/* Send "write" command */
848 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
849 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
850 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
851 		if (ret < 0) {
852 			retval = -EIO;
853 			goto exit;
854 		}
855 
856 		retval = lan78xx_wait_eeprom(dev);
857 		if (retval < 0)
858 			goto exit;
859 
860 		offset++;
861 	}
862 
863 	retval = 0;
864 exit:
865 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
866 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
867 
868 	return retval;
869 }
870 
lan78xx_read_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)871 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
872 				u32 length, u8 *data)
873 {
874 	int i;
875 	u32 buf;
876 	unsigned long timeout;
877 
878 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
879 
880 	if (buf & OTP_PWR_DN_PWRDN_N_) {
881 		/* clear it and wait to be cleared */
882 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
883 
884 		timeout = jiffies + HZ;
885 		do {
886 			usleep_range(1, 10);
887 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
888 			if (time_after(jiffies, timeout)) {
889 				netdev_warn(dev->net,
890 					    "timeout on OTP_PWR_DN");
891 				return -EIO;
892 			}
893 		} while (buf & OTP_PWR_DN_PWRDN_N_);
894 	}
895 
896 	for (i = 0; i < length; i++) {
897 		lan78xx_write_reg(dev, OTP_ADDR1,
898 				  ((offset + i) >> 8) & OTP_ADDR1_15_11);
899 		lan78xx_write_reg(dev, OTP_ADDR2,
900 				  ((offset + i) & OTP_ADDR2_10_3));
901 
902 		lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
903 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
904 
905 		timeout = jiffies + HZ;
906 		do {
907 			udelay(1);
908 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
909 			if (time_after(jiffies, timeout)) {
910 				netdev_warn(dev->net,
911 					    "timeout on OTP_STATUS");
912 				return -EIO;
913 			}
914 		} while (buf & OTP_STATUS_BUSY_);
915 
916 		lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
917 
918 		data[i] = (u8)(buf & 0xFF);
919 	}
920 
921 	return 0;
922 }
923 
lan78xx_write_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)924 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
925 				 u32 length, u8 *data)
926 {
927 	int i;
928 	u32 buf;
929 	unsigned long timeout;
930 
931 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
932 
933 	if (buf & OTP_PWR_DN_PWRDN_N_) {
934 		/* clear it and wait to be cleared */
935 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
936 
937 		timeout = jiffies + HZ;
938 		do {
939 			udelay(1);
940 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
941 			if (time_after(jiffies, timeout)) {
942 				netdev_warn(dev->net,
943 					    "timeout on OTP_PWR_DN completion");
944 				return -EIO;
945 			}
946 		} while (buf & OTP_PWR_DN_PWRDN_N_);
947 	}
948 
949 	/* set to BYTE program mode */
950 	lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
951 
952 	for (i = 0; i < length; i++) {
953 		lan78xx_write_reg(dev, OTP_ADDR1,
954 				  ((offset + i) >> 8) & OTP_ADDR1_15_11);
955 		lan78xx_write_reg(dev, OTP_ADDR2,
956 				  ((offset + i) & OTP_ADDR2_10_3));
957 		lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
958 		lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
959 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
960 
961 		timeout = jiffies + HZ;
962 		do {
963 			udelay(1);
964 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
965 			if (time_after(jiffies, timeout)) {
966 				netdev_warn(dev->net,
967 					    "Timeout on OTP_STATUS completion");
968 				return -EIO;
969 			}
970 		} while (buf & OTP_STATUS_BUSY_);
971 	}
972 
973 	return 0;
974 }
975 
lan78xx_read_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)976 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
977 			    u32 length, u8 *data)
978 {
979 	u8 sig;
980 	int ret;
981 
982 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
983 
984 	if (ret == 0) {
985 		if (sig == OTP_INDICATOR_2)
986 			offset += 0x100;
987 		else if (sig != OTP_INDICATOR_1)
988 			ret = -EINVAL;
989 		if (!ret)
990 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
991 	}
992 
993 	return ret;
994 }
995 
lan78xx_dataport_wait_not_busy(struct lan78xx_net * dev)996 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
997 {
998 	int i, ret;
999 
1000 	for (i = 0; i < 100; i++) {
1001 		u32 dp_sel;
1002 
1003 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1004 		if (unlikely(ret < 0))
1005 			return -EIO;
1006 
1007 		if (dp_sel & DP_SEL_DPRDY_)
1008 			return 0;
1009 
1010 		usleep_range(40, 100);
1011 	}
1012 
1013 	netdev_warn(dev->net, "%s timed out", __func__);
1014 
1015 	return -EIO;
1016 }
1017 
lan78xx_dataport_write(struct lan78xx_net * dev,u32 ram_select,u32 addr,u32 length,u32 * buf)1018 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1019 				  u32 addr, u32 length, u32 *buf)
1020 {
1021 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1022 	u32 dp_sel;
1023 	int i, ret;
1024 
1025 	if (usb_autopm_get_interface(dev->intf) < 0)
1026 		return 0;
1027 
1028 	mutex_lock(&pdata->dataport_mutex);
1029 
1030 	ret = lan78xx_dataport_wait_not_busy(dev);
1031 	if (ret < 0)
1032 		goto done;
1033 
1034 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1035 
1036 	dp_sel &= ~DP_SEL_RSEL_MASK_;
1037 	dp_sel |= ram_select;
1038 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1039 
1040 	for (i = 0; i < length; i++) {
1041 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1042 
1043 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1044 
1045 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1046 
1047 		ret = lan78xx_dataport_wait_not_busy(dev);
1048 		if (ret < 0)
1049 			goto done;
1050 	}
1051 
1052 done:
1053 	mutex_unlock(&pdata->dataport_mutex);
1054 	usb_autopm_put_interface(dev->intf);
1055 
1056 	return ret;
1057 }
1058 
lan78xx_set_addr_filter(struct lan78xx_priv * pdata,int index,u8 addr[ETH_ALEN])1059 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1060 				    int index, u8 addr[ETH_ALEN])
1061 {
1062 	u32 temp;
1063 
1064 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1065 		temp = addr[3];
1066 		temp = addr[2] | (temp << 8);
1067 		temp = addr[1] | (temp << 8);
1068 		temp = addr[0] | (temp << 8);
1069 		pdata->pfilter_table[index][1] = temp;
1070 		temp = addr[5];
1071 		temp = addr[4] | (temp << 8);
1072 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1073 		pdata->pfilter_table[index][0] = temp;
1074 	}
1075 }
1076 
1077 /* returns hash bit number for given MAC address */
lan78xx_hash(char addr[ETH_ALEN])1078 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1079 {
1080 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1081 }
1082 
lan78xx_deferred_multicast_write(struct work_struct * param)1083 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1084 {
1085 	struct lan78xx_priv *pdata =
1086 			container_of(param, struct lan78xx_priv, set_multicast);
1087 	struct lan78xx_net *dev = pdata->dev;
1088 	int i;
1089 
1090 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1091 		  pdata->rfe_ctl);
1092 
1093 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1094 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1095 
1096 	for (i = 1; i < NUM_OF_MAF; i++) {
1097 		lan78xx_write_reg(dev, MAF_HI(i), 0);
1098 		lan78xx_write_reg(dev, MAF_LO(i),
1099 				  pdata->pfilter_table[i][1]);
1100 		lan78xx_write_reg(dev, MAF_HI(i),
1101 				  pdata->pfilter_table[i][0]);
1102 	}
1103 
1104 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1105 }
1106 
lan78xx_set_multicast(struct net_device * netdev)1107 static void lan78xx_set_multicast(struct net_device *netdev)
1108 {
1109 	struct lan78xx_net *dev = netdev_priv(netdev);
1110 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1111 	unsigned long flags;
1112 	int i;
1113 
1114 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1115 
1116 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1117 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1118 
1119 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1120 		pdata->mchash_table[i] = 0;
1121 
1122 	/* pfilter_table[0] has own HW address */
1123 	for (i = 1; i < NUM_OF_MAF; i++) {
1124 		pdata->pfilter_table[i][0] = 0;
1125 		pdata->pfilter_table[i][1] = 0;
1126 	}
1127 
1128 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1129 
1130 	if (dev->net->flags & IFF_PROMISC) {
1131 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1132 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1133 	} else {
1134 		if (dev->net->flags & IFF_ALLMULTI) {
1135 			netif_dbg(dev, drv, dev->net,
1136 				  "receive all multicast enabled");
1137 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1138 		}
1139 	}
1140 
1141 	if (netdev_mc_count(dev->net)) {
1142 		struct netdev_hw_addr *ha;
1143 		int i;
1144 
1145 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1146 
1147 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1148 
1149 		i = 1;
1150 		netdev_for_each_mc_addr(ha, netdev) {
1151 			/* set first 32 into Perfect Filter */
1152 			if (i < 33) {
1153 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1154 			} else {
1155 				u32 bitnum = lan78xx_hash(ha->addr);
1156 
1157 				pdata->mchash_table[bitnum / 32] |=
1158 							(1 << (bitnum % 32));
1159 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1160 			}
1161 			i++;
1162 		}
1163 	}
1164 
1165 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1166 
1167 	/* defer register writes to a sleepable context */
1168 	schedule_work(&pdata->set_multicast);
1169 }
1170 
lan78xx_update_flowcontrol(struct lan78xx_net * dev,u8 duplex,u16 lcladv,u16 rmtadv)1171 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1172 				      u16 lcladv, u16 rmtadv)
1173 {
1174 	u32 flow = 0, fct_flow = 0;
1175 	u8 cap;
1176 
1177 	if (dev->fc_autoneg)
1178 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1179 	else
1180 		cap = dev->fc_request_control;
1181 
1182 	if (cap & FLOW_CTRL_TX)
1183 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1184 
1185 	if (cap & FLOW_CTRL_RX)
1186 		flow |= FLOW_CR_RX_FCEN_;
1187 
1188 	if (dev->udev->speed == USB_SPEED_SUPER)
1189 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
1190 	else if (dev->udev->speed == USB_SPEED_HIGH)
1191 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
1192 
1193 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1194 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1195 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1196 
1197 	lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1198 
1199 	/* threshold value should be set before enabling flow */
1200 	lan78xx_write_reg(dev, FLOW, flow);
1201 
1202 	return 0;
1203 }
1204 
lan78xx_mac_reset(struct lan78xx_net * dev)1205 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1206 {
1207 	unsigned long start_time = jiffies;
1208 	u32 val;
1209 	int ret;
1210 
1211 	mutex_lock(&dev->phy_mutex);
1212 
1213 	/* Resetting the device while there is activity on the MDIO
1214 	 * bus can result in the MAC interface locking up and not
1215 	 * completing register access transactions.
1216 	 */
1217 	ret = lan78xx_phy_wait_not_busy(dev);
1218 	if (ret < 0)
1219 		goto done;
1220 
1221 	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1222 	if (ret < 0)
1223 		goto done;
1224 
1225 	val |= MAC_CR_RST_;
1226 	ret = lan78xx_write_reg(dev, MAC_CR, val);
1227 	if (ret < 0)
1228 		goto done;
1229 
1230 	/* Wait for the reset to complete before allowing any further
1231 	 * MAC register accesses otherwise the MAC may lock up.
1232 	 */
1233 	do {
1234 		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1235 		if (ret < 0)
1236 			goto done;
1237 
1238 		if (!(val & MAC_CR_RST_)) {
1239 			ret = 0;
1240 			goto done;
1241 		}
1242 	} while (!time_after(jiffies, start_time + HZ));
1243 
1244 	ret = -ETIMEDOUT;
1245 done:
1246 	mutex_unlock(&dev->phy_mutex);
1247 
1248 	return ret;
1249 }
1250 
lan78xx_link_reset(struct lan78xx_net * dev)1251 static int lan78xx_link_reset(struct lan78xx_net *dev)
1252 {
1253 	struct phy_device *phydev = dev->net->phydev;
1254 	struct ethtool_link_ksettings ecmd;
1255 	int ladv, radv, ret, link;
1256 	u32 buf;
1257 
1258 	/* clear LAN78xx interrupt status */
1259 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1260 	if (unlikely(ret < 0))
1261 		return ret;
1262 
1263 	mutex_lock(&phydev->lock);
1264 	phy_read_status(phydev);
1265 	link = phydev->link;
1266 	mutex_unlock(&phydev->lock);
1267 
1268 	if (!link && dev->link_on) {
1269 		dev->link_on = false;
1270 
1271 		/* reset MAC */
1272 		ret = lan78xx_mac_reset(dev);
1273 		if (ret < 0)
1274 			return ret;
1275 
1276 		del_timer(&dev->stat_monitor);
1277 	} else if (link && !dev->link_on) {
1278 		dev->link_on = true;
1279 
1280 		phy_ethtool_ksettings_get(phydev, &ecmd);
1281 
1282 		if (dev->udev->speed == USB_SPEED_SUPER) {
1283 			if (ecmd.base.speed == 1000) {
1284 				/* disable U2 */
1285 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1286 				if (ret < 0)
1287 					return ret;
1288 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1289 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1290 				if (ret < 0)
1291 					return ret;
1292 				/* enable U1 */
1293 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1294 				if (ret < 0)
1295 					return ret;
1296 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1297 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1298 				if (ret < 0)
1299 					return ret;
1300 			} else {
1301 				/* enable U1 & U2 */
1302 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1303 				if (ret < 0)
1304 					return ret;
1305 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1306 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1307 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1308 				if (ret < 0)
1309 					return ret;
1310 			}
1311 		}
1312 
1313 		ladv = phy_read(phydev, MII_ADVERTISE);
1314 		if (ladv < 0)
1315 			return ladv;
1316 
1317 		radv = phy_read(phydev, MII_LPA);
1318 		if (radv < 0)
1319 			return radv;
1320 
1321 		netif_dbg(dev, link, dev->net,
1322 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1323 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1324 
1325 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1326 						 radv);
1327 		if (ret < 0)
1328 			return ret;
1329 
1330 		if (!timer_pending(&dev->stat_monitor)) {
1331 			dev->delta = 1;
1332 			mod_timer(&dev->stat_monitor,
1333 				  jiffies + STAT_UPDATE_TIMER);
1334 		}
1335 
1336 		tasklet_schedule(&dev->bh);
1337 	}
1338 
1339 	return 0;
1340 }
1341 
1342 /* some work can't be done in tasklets, so we use keventd
1343  *
1344  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1345  * but tasklet_schedule() doesn't.	hope the failure is rare.
1346  */
lan78xx_defer_kevent(struct lan78xx_net * dev,int work)1347 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1348 {
1349 	set_bit(work, &dev->flags);
1350 	if (!schedule_delayed_work(&dev->wq, 0))
1351 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1352 }
1353 
lan78xx_status(struct lan78xx_net * dev,struct urb * urb)1354 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1355 {
1356 	u32 intdata;
1357 
1358 	if (urb->actual_length != 4) {
1359 		netdev_warn(dev->net,
1360 			    "unexpected urb length %d", urb->actual_length);
1361 		return;
1362 	}
1363 
1364 	intdata = get_unaligned_le32(urb->transfer_buffer);
1365 
1366 	if (intdata & INT_ENP_PHY_INT) {
1367 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1368 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1369 
1370 		if (dev->domain_data.phyirq > 0) {
1371 			local_irq_disable();
1372 			generic_handle_irq(dev->domain_data.phyirq);
1373 			local_irq_enable();
1374 		}
1375 	} else {
1376 		netdev_warn(dev->net,
1377 			    "unexpected interrupt: 0x%08x\n", intdata);
1378 	}
1379 }
1380 
lan78xx_ethtool_get_eeprom_len(struct net_device * netdev)1381 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1382 {
1383 	return MAX_EEPROM_SIZE;
1384 }
1385 
lan78xx_ethtool_get_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1386 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1387 				      struct ethtool_eeprom *ee, u8 *data)
1388 {
1389 	struct lan78xx_net *dev = netdev_priv(netdev);
1390 	int ret;
1391 
1392 	ret = usb_autopm_get_interface(dev->intf);
1393 	if (ret)
1394 		return ret;
1395 
1396 	ee->magic = LAN78XX_EEPROM_MAGIC;
1397 
1398 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1399 
1400 	usb_autopm_put_interface(dev->intf);
1401 
1402 	return ret;
1403 }
1404 
lan78xx_ethtool_set_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1405 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1406 				      struct ethtool_eeprom *ee, u8 *data)
1407 {
1408 	struct lan78xx_net *dev = netdev_priv(netdev);
1409 	int ret;
1410 
1411 	ret = usb_autopm_get_interface(dev->intf);
1412 	if (ret)
1413 		return ret;
1414 
1415 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1416 	 * to load data from EEPROM
1417 	 */
1418 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1419 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1420 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1421 		 (ee->offset == 0) &&
1422 		 (ee->len == 512) &&
1423 		 (data[0] == OTP_INDICATOR_1))
1424 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1425 
1426 	usb_autopm_put_interface(dev->intf);
1427 
1428 	return ret;
1429 }
1430 
lan78xx_get_strings(struct net_device * netdev,u32 stringset,u8 * data)1431 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1432 				u8 *data)
1433 {
1434 	if (stringset == ETH_SS_STATS)
1435 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1436 }
1437 
lan78xx_get_sset_count(struct net_device * netdev,int sset)1438 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1439 {
1440 	if (sset == ETH_SS_STATS)
1441 		return ARRAY_SIZE(lan78xx_gstrings);
1442 	else
1443 		return -EOPNOTSUPP;
1444 }
1445 
lan78xx_get_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)1446 static void lan78xx_get_stats(struct net_device *netdev,
1447 			      struct ethtool_stats *stats, u64 *data)
1448 {
1449 	struct lan78xx_net *dev = netdev_priv(netdev);
1450 
1451 	lan78xx_update_stats(dev);
1452 
1453 	mutex_lock(&dev->stats.access_lock);
1454 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1455 	mutex_unlock(&dev->stats.access_lock);
1456 }
1457 
lan78xx_get_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1458 static void lan78xx_get_wol(struct net_device *netdev,
1459 			    struct ethtool_wolinfo *wol)
1460 {
1461 	struct lan78xx_net *dev = netdev_priv(netdev);
1462 	int ret;
1463 	u32 buf;
1464 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1465 
1466 	if (usb_autopm_get_interface(dev->intf) < 0)
1467 		return;
1468 
1469 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1470 	if (unlikely(ret < 0)) {
1471 		wol->supported = 0;
1472 		wol->wolopts = 0;
1473 	} else {
1474 		if (buf & USB_CFG_RMT_WKP_) {
1475 			wol->supported = WAKE_ALL;
1476 			wol->wolopts = pdata->wol;
1477 		} else {
1478 			wol->supported = 0;
1479 			wol->wolopts = 0;
1480 		}
1481 	}
1482 
1483 	usb_autopm_put_interface(dev->intf);
1484 }
1485 
lan78xx_set_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1486 static int lan78xx_set_wol(struct net_device *netdev,
1487 			   struct ethtool_wolinfo *wol)
1488 {
1489 	struct lan78xx_net *dev = netdev_priv(netdev);
1490 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1491 	int ret;
1492 
1493 	ret = usb_autopm_get_interface(dev->intf);
1494 	if (ret < 0)
1495 		return ret;
1496 
1497 	if (wol->wolopts & ~WAKE_ALL)
1498 		return -EINVAL;
1499 
1500 	pdata->wol = wol->wolopts;
1501 
1502 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1503 
1504 	phy_ethtool_set_wol(netdev->phydev, wol);
1505 
1506 	usb_autopm_put_interface(dev->intf);
1507 
1508 	return ret;
1509 }
1510 
lan78xx_get_eee(struct net_device * net,struct ethtool_eee * edata)1511 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1512 {
1513 	struct lan78xx_net *dev = netdev_priv(net);
1514 	struct phy_device *phydev = net->phydev;
1515 	int ret;
1516 	u32 buf;
1517 
1518 	ret = usb_autopm_get_interface(dev->intf);
1519 	if (ret < 0)
1520 		return ret;
1521 
1522 	ret = phy_ethtool_get_eee(phydev, edata);
1523 	if (ret < 0)
1524 		goto exit;
1525 
1526 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1527 	if (buf & MAC_CR_EEE_EN_) {
1528 		edata->eee_enabled = true;
1529 		edata->eee_active = !!(edata->advertised &
1530 				       edata->lp_advertised);
1531 		edata->tx_lpi_enabled = true;
1532 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1533 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1534 		edata->tx_lpi_timer = buf;
1535 	} else {
1536 		edata->eee_enabled = false;
1537 		edata->eee_active = false;
1538 		edata->tx_lpi_enabled = false;
1539 		edata->tx_lpi_timer = 0;
1540 	}
1541 
1542 	ret = 0;
1543 exit:
1544 	usb_autopm_put_interface(dev->intf);
1545 
1546 	return ret;
1547 }
1548 
lan78xx_set_eee(struct net_device * net,struct ethtool_eee * edata)1549 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1550 {
1551 	struct lan78xx_net *dev = netdev_priv(net);
1552 	int ret;
1553 	u32 buf;
1554 
1555 	ret = usb_autopm_get_interface(dev->intf);
1556 	if (ret < 0)
1557 		return ret;
1558 
1559 	if (edata->eee_enabled) {
1560 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1561 		buf |= MAC_CR_EEE_EN_;
1562 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1563 
1564 		phy_ethtool_set_eee(net->phydev, edata);
1565 
1566 		buf = (u32)edata->tx_lpi_timer;
1567 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1568 	} else {
1569 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1570 		buf &= ~MAC_CR_EEE_EN_;
1571 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1572 	}
1573 
1574 	usb_autopm_put_interface(dev->intf);
1575 
1576 	return 0;
1577 }
1578 
lan78xx_get_link(struct net_device * net)1579 static u32 lan78xx_get_link(struct net_device *net)
1580 {
1581 	u32 link;
1582 
1583 	mutex_lock(&net->phydev->lock);
1584 	phy_read_status(net->phydev);
1585 	link = net->phydev->link;
1586 	mutex_unlock(&net->phydev->lock);
1587 
1588 	return link;
1589 }
1590 
lan78xx_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * info)1591 static void lan78xx_get_drvinfo(struct net_device *net,
1592 				struct ethtool_drvinfo *info)
1593 {
1594 	struct lan78xx_net *dev = netdev_priv(net);
1595 
1596 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1597 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1598 }
1599 
lan78xx_get_msglevel(struct net_device * net)1600 static u32 lan78xx_get_msglevel(struct net_device *net)
1601 {
1602 	struct lan78xx_net *dev = netdev_priv(net);
1603 
1604 	return dev->msg_enable;
1605 }
1606 
lan78xx_set_msglevel(struct net_device * net,u32 level)1607 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1608 {
1609 	struct lan78xx_net *dev = netdev_priv(net);
1610 
1611 	dev->msg_enable = level;
1612 }
1613 
lan78xx_get_link_ksettings(struct net_device * net,struct ethtool_link_ksettings * cmd)1614 static int lan78xx_get_link_ksettings(struct net_device *net,
1615 				      struct ethtool_link_ksettings *cmd)
1616 {
1617 	struct lan78xx_net *dev = netdev_priv(net);
1618 	struct phy_device *phydev = net->phydev;
1619 	int ret;
1620 
1621 	ret = usb_autopm_get_interface(dev->intf);
1622 	if (ret < 0)
1623 		return ret;
1624 
1625 	phy_ethtool_ksettings_get(phydev, cmd);
1626 
1627 	usb_autopm_put_interface(dev->intf);
1628 
1629 	return ret;
1630 }
1631 
lan78xx_set_link_ksettings(struct net_device * net,const struct ethtool_link_ksettings * cmd)1632 static int lan78xx_set_link_ksettings(struct net_device *net,
1633 				      const struct ethtool_link_ksettings *cmd)
1634 {
1635 	struct lan78xx_net *dev = netdev_priv(net);
1636 	struct phy_device *phydev = net->phydev;
1637 	int ret = 0;
1638 	int temp;
1639 
1640 	ret = usb_autopm_get_interface(dev->intf);
1641 	if (ret < 0)
1642 		return ret;
1643 
1644 	/* change speed & duplex */
1645 	ret = phy_ethtool_ksettings_set(phydev, cmd);
1646 
1647 	if (!cmd->base.autoneg) {
1648 		/* force link down */
1649 		temp = phy_read(phydev, MII_BMCR);
1650 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1651 		mdelay(1);
1652 		phy_write(phydev, MII_BMCR, temp);
1653 	}
1654 
1655 	usb_autopm_put_interface(dev->intf);
1656 
1657 	return ret;
1658 }
1659 
lan78xx_get_pause(struct net_device * net,struct ethtool_pauseparam * pause)1660 static void lan78xx_get_pause(struct net_device *net,
1661 			      struct ethtool_pauseparam *pause)
1662 {
1663 	struct lan78xx_net *dev = netdev_priv(net);
1664 	struct phy_device *phydev = net->phydev;
1665 	struct ethtool_link_ksettings ecmd;
1666 
1667 	phy_ethtool_ksettings_get(phydev, &ecmd);
1668 
1669 	pause->autoneg = dev->fc_autoneg;
1670 
1671 	if (dev->fc_request_control & FLOW_CTRL_TX)
1672 		pause->tx_pause = 1;
1673 
1674 	if (dev->fc_request_control & FLOW_CTRL_RX)
1675 		pause->rx_pause = 1;
1676 }
1677 
lan78xx_set_pause(struct net_device * net,struct ethtool_pauseparam * pause)1678 static int lan78xx_set_pause(struct net_device *net,
1679 			     struct ethtool_pauseparam *pause)
1680 {
1681 	struct lan78xx_net *dev = netdev_priv(net);
1682 	struct phy_device *phydev = net->phydev;
1683 	struct ethtool_link_ksettings ecmd;
1684 	int ret;
1685 
1686 	phy_ethtool_ksettings_get(phydev, &ecmd);
1687 
1688 	if (pause->autoneg && !ecmd.base.autoneg) {
1689 		ret = -EINVAL;
1690 		goto exit;
1691 	}
1692 
1693 	dev->fc_request_control = 0;
1694 	if (pause->rx_pause)
1695 		dev->fc_request_control |= FLOW_CTRL_RX;
1696 
1697 	if (pause->tx_pause)
1698 		dev->fc_request_control |= FLOW_CTRL_TX;
1699 
1700 	if (ecmd.base.autoneg) {
1701 		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1702 		u32 mii_adv;
1703 
1704 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1705 				   ecmd.link_modes.advertising);
1706 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1707 				   ecmd.link_modes.advertising);
1708 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1709 		mii_adv_to_linkmode_adv_t(fc, mii_adv);
1710 		linkmode_or(ecmd.link_modes.advertising, fc,
1711 			    ecmd.link_modes.advertising);
1712 
1713 		phy_ethtool_ksettings_set(phydev, &ecmd);
1714 	}
1715 
1716 	dev->fc_autoneg = pause->autoneg;
1717 
1718 	ret = 0;
1719 exit:
1720 	return ret;
1721 }
1722 
lan78xx_get_regs_len(struct net_device * netdev)1723 static int lan78xx_get_regs_len(struct net_device *netdev)
1724 {
1725 	if (!netdev->phydev)
1726 		return (sizeof(lan78xx_regs));
1727 	else
1728 		return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1729 }
1730 
1731 static void
lan78xx_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * buf)1732 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1733 		 void *buf)
1734 {
1735 	u32 *data = buf;
1736 	int i, j;
1737 	struct lan78xx_net *dev = netdev_priv(netdev);
1738 
1739 	/* Read Device/MAC registers */
1740 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1741 		lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1742 
1743 	if (!netdev->phydev)
1744 		return;
1745 
1746 	/* Read PHY registers */
1747 	for (j = 0; j < 32; i++, j++)
1748 		data[i] = phy_read(netdev->phydev, j);
1749 }
1750 
1751 static const struct ethtool_ops lan78xx_ethtool_ops = {
1752 	.get_link	= lan78xx_get_link,
1753 	.nway_reset	= phy_ethtool_nway_reset,
1754 	.get_drvinfo	= lan78xx_get_drvinfo,
1755 	.get_msglevel	= lan78xx_get_msglevel,
1756 	.set_msglevel	= lan78xx_set_msglevel,
1757 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1758 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1759 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1760 	.get_ethtool_stats = lan78xx_get_stats,
1761 	.get_sset_count = lan78xx_get_sset_count,
1762 	.get_strings	= lan78xx_get_strings,
1763 	.get_wol	= lan78xx_get_wol,
1764 	.set_wol	= lan78xx_set_wol,
1765 	.get_ts_info	= ethtool_op_get_ts_info,
1766 	.get_eee	= lan78xx_get_eee,
1767 	.set_eee	= lan78xx_set_eee,
1768 	.get_pauseparam	= lan78xx_get_pause,
1769 	.set_pauseparam	= lan78xx_set_pause,
1770 	.get_link_ksettings = lan78xx_get_link_ksettings,
1771 	.set_link_ksettings = lan78xx_set_link_ksettings,
1772 	.get_regs_len	= lan78xx_get_regs_len,
1773 	.get_regs	= lan78xx_get_regs,
1774 };
1775 
lan78xx_init_mac_address(struct lan78xx_net * dev)1776 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1777 {
1778 	u32 addr_lo, addr_hi;
1779 	u8 addr[6];
1780 
1781 	lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1782 	lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1783 
1784 	addr[0] = addr_lo & 0xFF;
1785 	addr[1] = (addr_lo >> 8) & 0xFF;
1786 	addr[2] = (addr_lo >> 16) & 0xFF;
1787 	addr[3] = (addr_lo >> 24) & 0xFF;
1788 	addr[4] = addr_hi & 0xFF;
1789 	addr[5] = (addr_hi >> 8) & 0xFF;
1790 
1791 	if (!is_valid_ether_addr(addr)) {
1792 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1793 			/* valid address present in Device Tree */
1794 			netif_dbg(dev, ifup, dev->net,
1795 				  "MAC address read from Device Tree");
1796 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1797 						 ETH_ALEN, addr) == 0) ||
1798 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1799 					      ETH_ALEN, addr) == 0)) &&
1800 			   is_valid_ether_addr(addr)) {
1801 			/* eeprom values are valid so use them */
1802 			netif_dbg(dev, ifup, dev->net,
1803 				  "MAC address read from EEPROM");
1804 		} else {
1805 			/* generate random MAC */
1806 			eth_random_addr(addr);
1807 			netif_dbg(dev, ifup, dev->net,
1808 				  "MAC address set to random addr");
1809 		}
1810 
1811 		addr_lo = addr[0] | (addr[1] << 8) |
1812 			  (addr[2] << 16) | (addr[3] << 24);
1813 		addr_hi = addr[4] | (addr[5] << 8);
1814 
1815 		lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1816 		lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1817 	}
1818 
1819 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1820 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1821 
1822 	ether_addr_copy(dev->net->dev_addr, addr);
1823 }
1824 
1825 /* MDIO read and write wrappers for phylib */
lan78xx_mdiobus_read(struct mii_bus * bus,int phy_id,int idx)1826 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1827 {
1828 	struct lan78xx_net *dev = bus->priv;
1829 	u32 val, addr;
1830 	int ret;
1831 
1832 	ret = usb_autopm_get_interface(dev->intf);
1833 	if (ret < 0)
1834 		return ret;
1835 
1836 	mutex_lock(&dev->phy_mutex);
1837 
1838 	/* confirm MII not busy */
1839 	ret = lan78xx_phy_wait_not_busy(dev);
1840 	if (ret < 0)
1841 		goto done;
1842 
1843 	/* set the address, index & direction (read from PHY) */
1844 	addr = mii_access(phy_id, idx, MII_READ);
1845 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1846 
1847 	ret = lan78xx_phy_wait_not_busy(dev);
1848 	if (ret < 0)
1849 		goto done;
1850 
1851 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
1852 
1853 	ret = (int)(val & 0xFFFF);
1854 
1855 done:
1856 	mutex_unlock(&dev->phy_mutex);
1857 	usb_autopm_put_interface(dev->intf);
1858 
1859 	return ret;
1860 }
1861 
lan78xx_mdiobus_write(struct mii_bus * bus,int phy_id,int idx,u16 regval)1862 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1863 				 u16 regval)
1864 {
1865 	struct lan78xx_net *dev = bus->priv;
1866 	u32 val, addr;
1867 	int ret;
1868 
1869 	ret = usb_autopm_get_interface(dev->intf);
1870 	if (ret < 0)
1871 		return ret;
1872 
1873 	mutex_lock(&dev->phy_mutex);
1874 
1875 	/* confirm MII not busy */
1876 	ret = lan78xx_phy_wait_not_busy(dev);
1877 	if (ret < 0)
1878 		goto done;
1879 
1880 	val = (u32)regval;
1881 	ret = lan78xx_write_reg(dev, MII_DATA, val);
1882 
1883 	/* set the address, index & direction (write to PHY) */
1884 	addr = mii_access(phy_id, idx, MII_WRITE);
1885 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1886 
1887 	ret = lan78xx_phy_wait_not_busy(dev);
1888 	if (ret < 0)
1889 		goto done;
1890 
1891 done:
1892 	mutex_unlock(&dev->phy_mutex);
1893 	usb_autopm_put_interface(dev->intf);
1894 	return 0;
1895 }
1896 
lan78xx_mdio_init(struct lan78xx_net * dev)1897 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1898 {
1899 	struct device_node *node;
1900 	int ret;
1901 
1902 	dev->mdiobus = mdiobus_alloc();
1903 	if (!dev->mdiobus) {
1904 		netdev_err(dev->net, "can't allocate MDIO bus\n");
1905 		return -ENOMEM;
1906 	}
1907 
1908 	dev->mdiobus->priv = (void *)dev;
1909 	dev->mdiobus->read = lan78xx_mdiobus_read;
1910 	dev->mdiobus->write = lan78xx_mdiobus_write;
1911 	dev->mdiobus->name = "lan78xx-mdiobus";
1912 	dev->mdiobus->parent = &dev->udev->dev;
1913 
1914 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1915 		 dev->udev->bus->busnum, dev->udev->devnum);
1916 
1917 	switch (dev->chipid) {
1918 	case ID_REV_CHIP_ID_7800_:
1919 	case ID_REV_CHIP_ID_7850_:
1920 		/* set to internal PHY id */
1921 		dev->mdiobus->phy_mask = ~(1 << 1);
1922 		break;
1923 	case ID_REV_CHIP_ID_7801_:
1924 		/* scan thru PHYAD[2..0] */
1925 		dev->mdiobus->phy_mask = ~(0xFF);
1926 		break;
1927 	}
1928 
1929 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1930 	ret = of_mdiobus_register(dev->mdiobus, node);
1931 	of_node_put(node);
1932 	if (ret) {
1933 		netdev_err(dev->net, "can't register MDIO bus\n");
1934 		goto exit1;
1935 	}
1936 
1937 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1938 	return 0;
1939 exit1:
1940 	mdiobus_free(dev->mdiobus);
1941 	return ret;
1942 }
1943 
lan78xx_remove_mdio(struct lan78xx_net * dev)1944 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1945 {
1946 	mdiobus_unregister(dev->mdiobus);
1947 	mdiobus_free(dev->mdiobus);
1948 }
1949 
lan78xx_link_status_change(struct net_device * net)1950 static void lan78xx_link_status_change(struct net_device *net)
1951 {
1952 	struct phy_device *phydev = net->phydev;
1953 
1954 	phy_print_status(phydev);
1955 }
1956 
irq_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hwirq)1957 static int irq_map(struct irq_domain *d, unsigned int irq,
1958 		   irq_hw_number_t hwirq)
1959 {
1960 	struct irq_domain_data *data = d->host_data;
1961 
1962 	irq_set_chip_data(irq, data);
1963 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1964 	irq_set_noprobe(irq);
1965 
1966 	return 0;
1967 }
1968 
irq_unmap(struct irq_domain * d,unsigned int irq)1969 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1970 {
1971 	irq_set_chip_and_handler(irq, NULL, NULL);
1972 	irq_set_chip_data(irq, NULL);
1973 }
1974 
1975 static const struct irq_domain_ops chip_domain_ops = {
1976 	.map	= irq_map,
1977 	.unmap	= irq_unmap,
1978 };
1979 
lan78xx_irq_mask(struct irq_data * irqd)1980 static void lan78xx_irq_mask(struct irq_data *irqd)
1981 {
1982 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1983 
1984 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1985 }
1986 
lan78xx_irq_unmask(struct irq_data * irqd)1987 static void lan78xx_irq_unmask(struct irq_data *irqd)
1988 {
1989 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1990 
1991 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
1992 }
1993 
lan78xx_irq_bus_lock(struct irq_data * irqd)1994 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1995 {
1996 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1997 
1998 	mutex_lock(&data->irq_lock);
1999 }
2000 
lan78xx_irq_bus_sync_unlock(struct irq_data * irqd)2001 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2002 {
2003 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2004 	struct lan78xx_net *dev =
2005 			container_of(data, struct lan78xx_net, domain_data);
2006 	u32 buf;
2007 
2008 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2009 	 * are only two callbacks executed in non-atomic contex.
2010 	 */
2011 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2012 	if (buf != data->irqenable)
2013 		lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2014 
2015 	mutex_unlock(&data->irq_lock);
2016 }
2017 
2018 static struct irq_chip lan78xx_irqchip = {
2019 	.name			= "lan78xx-irqs",
2020 	.irq_mask		= lan78xx_irq_mask,
2021 	.irq_unmask		= lan78xx_irq_unmask,
2022 	.irq_bus_lock		= lan78xx_irq_bus_lock,
2023 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2024 };
2025 
lan78xx_setup_irq_domain(struct lan78xx_net * dev)2026 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2027 {
2028 	struct device_node *of_node;
2029 	struct irq_domain *irqdomain;
2030 	unsigned int irqmap = 0;
2031 	u32 buf;
2032 	int ret = 0;
2033 
2034 	of_node = dev->udev->dev.parent->of_node;
2035 
2036 	mutex_init(&dev->domain_data.irq_lock);
2037 
2038 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2039 	dev->domain_data.irqenable = buf;
2040 
2041 	dev->domain_data.irqchip = &lan78xx_irqchip;
2042 	dev->domain_data.irq_handler = handle_simple_irq;
2043 
2044 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
2045 					  &chip_domain_ops, &dev->domain_data);
2046 	if (irqdomain) {
2047 		/* create mapping for PHY interrupt */
2048 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2049 		if (!irqmap) {
2050 			irq_domain_remove(irqdomain);
2051 
2052 			irqdomain = NULL;
2053 			ret = -EINVAL;
2054 		}
2055 	} else {
2056 		ret = -EINVAL;
2057 	}
2058 
2059 	dev->domain_data.irqdomain = irqdomain;
2060 	dev->domain_data.phyirq = irqmap;
2061 
2062 	return ret;
2063 }
2064 
lan78xx_remove_irq_domain(struct lan78xx_net * dev)2065 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2066 {
2067 	if (dev->domain_data.phyirq > 0) {
2068 		irq_dispose_mapping(dev->domain_data.phyirq);
2069 
2070 		if (dev->domain_data.irqdomain)
2071 			irq_domain_remove(dev->domain_data.irqdomain);
2072 	}
2073 	dev->domain_data.phyirq = 0;
2074 	dev->domain_data.irqdomain = NULL;
2075 }
2076 
lan8835_fixup(struct phy_device * phydev)2077 static int lan8835_fixup(struct phy_device *phydev)
2078 {
2079 	int buf;
2080 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2081 
2082 	/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2083 	buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2084 	buf &= ~0x1800;
2085 	buf |= 0x0800;
2086 	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2087 
2088 	/* RGMII MAC TXC Delay Enable */
2089 	lan78xx_write_reg(dev, MAC_RGMII_ID,
2090 			  MAC_RGMII_ID_TXC_DELAY_EN_);
2091 
2092 	/* RGMII TX DLL Tune Adjust */
2093 	lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2094 
2095 	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2096 
2097 	return 1;
2098 }
2099 
ksz9031rnx_fixup(struct phy_device * phydev)2100 static int ksz9031rnx_fixup(struct phy_device *phydev)
2101 {
2102 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2103 
2104 	/* Micrel9301RNX PHY configuration */
2105 	/* RGMII Control Signal Pad Skew */
2106 	phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2107 	/* RGMII RX Data Pad Skew */
2108 	phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2109 	/* RGMII RX Clock Pad Skew */
2110 	phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2111 
2112 	dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2113 
2114 	return 1;
2115 }
2116 
lan7801_phy_init(struct lan78xx_net * dev)2117 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2118 {
2119 	u32 buf;
2120 	int ret;
2121 	struct fixed_phy_status fphy_status = {
2122 		.link = 1,
2123 		.speed = SPEED_1000,
2124 		.duplex = DUPLEX_FULL,
2125 	};
2126 	struct phy_device *phydev;
2127 
2128 	phydev = phy_find_first(dev->mdiobus);
2129 	if (!phydev) {
2130 		netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2131 		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2132 		if (IS_ERR(phydev)) {
2133 			netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2134 			return NULL;
2135 		}
2136 		netdev_dbg(dev->net, "Registered FIXED PHY\n");
2137 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2138 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2139 					MAC_RGMII_ID_TXC_DELAY_EN_);
2140 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2141 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2142 		buf |= HW_CFG_CLK125_EN_;
2143 		buf |= HW_CFG_REFCLK25_EN_;
2144 		ret = lan78xx_write_reg(dev, HW_CFG, buf);
2145 	} else {
2146 		if (!phydev->drv) {
2147 			netdev_err(dev->net, "no PHY driver found\n");
2148 			return NULL;
2149 		}
2150 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2151 		/* external PHY fixup for KSZ9031RNX */
2152 		ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2153 						 ksz9031rnx_fixup);
2154 		if (ret < 0) {
2155 			netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2156 			return NULL;
2157 		}
2158 		/* external PHY fixup for LAN8835 */
2159 		ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2160 						 lan8835_fixup);
2161 		if (ret < 0) {
2162 			netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2163 			return NULL;
2164 		}
2165 		/* add more external PHY fixup here if needed */
2166 
2167 		phydev->is_internal = false;
2168 	}
2169 	return phydev;
2170 }
2171 
lan78xx_phy_init(struct lan78xx_net * dev)2172 static int lan78xx_phy_init(struct lan78xx_net *dev)
2173 {
2174 	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2175 	int ret;
2176 	u32 mii_adv;
2177 	struct phy_device *phydev;
2178 
2179 	switch (dev->chipid) {
2180 	case ID_REV_CHIP_ID_7801_:
2181 		phydev = lan7801_phy_init(dev);
2182 		if (!phydev) {
2183 			netdev_err(dev->net, "lan7801: PHY Init Failed");
2184 			return -EIO;
2185 		}
2186 		break;
2187 
2188 	case ID_REV_CHIP_ID_7800_:
2189 	case ID_REV_CHIP_ID_7850_:
2190 		phydev = phy_find_first(dev->mdiobus);
2191 		if (!phydev) {
2192 			netdev_err(dev->net, "no PHY found\n");
2193 			return -EIO;
2194 		}
2195 		phydev->is_internal = true;
2196 		dev->interface = PHY_INTERFACE_MODE_GMII;
2197 		break;
2198 
2199 	default:
2200 		netdev_err(dev->net, "Unknown CHIP ID found\n");
2201 		return -EIO;
2202 	}
2203 
2204 	/* if phyirq is not set, use polling mode in phylib */
2205 	if (dev->domain_data.phyirq > 0)
2206 		phydev->irq = dev->domain_data.phyirq;
2207 	else
2208 		phydev->irq = PHY_POLL;
2209 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2210 
2211 	/* set to AUTOMDIX */
2212 	phydev->mdix = ETH_TP_MDI_AUTO;
2213 
2214 	ret = phy_connect_direct(dev->net, phydev,
2215 				 lan78xx_link_status_change,
2216 				 dev->interface);
2217 	if (ret) {
2218 		netdev_err(dev->net, "can't attach PHY to %s\n",
2219 			   dev->mdiobus->id);
2220 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2221 			if (phy_is_pseudo_fixed_link(phydev)) {
2222 				fixed_phy_unregister(phydev);
2223 			} else {
2224 				phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2225 							     0xfffffff0);
2226 				phy_unregister_fixup_for_uid(PHY_LAN8835,
2227 							     0xfffffff0);
2228 			}
2229 		}
2230 		return -EIO;
2231 	}
2232 
2233 	/* MAC doesn't support 1000T Half */
2234 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2235 
2236 	/* support both flow controls */
2237 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2238 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2239 			   phydev->advertising);
2240 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2241 			   phydev->advertising);
2242 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2243 	mii_adv_to_linkmode_adv_t(fc, mii_adv);
2244 	linkmode_or(phydev->advertising, fc, phydev->advertising);
2245 
2246 	if (phydev->mdio.dev.of_node) {
2247 		u32 reg;
2248 		int len;
2249 
2250 		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2251 						      "microchip,led-modes",
2252 						      sizeof(u32));
2253 		if (len >= 0) {
2254 			/* Ensure the appropriate LEDs are enabled */
2255 			lan78xx_read_reg(dev, HW_CFG, &reg);
2256 			reg &= ~(HW_CFG_LED0_EN_ |
2257 				 HW_CFG_LED1_EN_ |
2258 				 HW_CFG_LED2_EN_ |
2259 				 HW_CFG_LED3_EN_);
2260 			reg |= (len > 0) * HW_CFG_LED0_EN_ |
2261 				(len > 1) * HW_CFG_LED1_EN_ |
2262 				(len > 2) * HW_CFG_LED2_EN_ |
2263 				(len > 3) * HW_CFG_LED3_EN_;
2264 			lan78xx_write_reg(dev, HW_CFG, reg);
2265 		}
2266 	}
2267 
2268 	genphy_config_aneg(phydev);
2269 
2270 	dev->fc_autoneg = phydev->autoneg;
2271 
2272 	return 0;
2273 }
2274 
lan78xx_set_rx_max_frame_length(struct lan78xx_net * dev,int size)2275 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2276 {
2277 	u32 buf;
2278 	bool rxenabled;
2279 
2280 	lan78xx_read_reg(dev, MAC_RX, &buf);
2281 
2282 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2283 
2284 	if (rxenabled) {
2285 		buf &= ~MAC_RX_RXEN_;
2286 		lan78xx_write_reg(dev, MAC_RX, buf);
2287 	}
2288 
2289 	/* add 4 to size for FCS */
2290 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2291 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2292 
2293 	lan78xx_write_reg(dev, MAC_RX, buf);
2294 
2295 	if (rxenabled) {
2296 		buf |= MAC_RX_RXEN_;
2297 		lan78xx_write_reg(dev, MAC_RX, buf);
2298 	}
2299 
2300 	return 0;
2301 }
2302 
unlink_urbs(struct lan78xx_net * dev,struct sk_buff_head * q)2303 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2304 {
2305 	struct sk_buff *skb;
2306 	unsigned long flags;
2307 	int count = 0;
2308 
2309 	spin_lock_irqsave(&q->lock, flags);
2310 	while (!skb_queue_empty(q)) {
2311 		struct skb_data	*entry;
2312 		struct urb *urb;
2313 		int ret;
2314 
2315 		skb_queue_walk(q, skb) {
2316 			entry = (struct skb_data *)skb->cb;
2317 			if (entry->state != unlink_start)
2318 				goto found;
2319 		}
2320 		break;
2321 found:
2322 		entry->state = unlink_start;
2323 		urb = entry->urb;
2324 
2325 		/* Get reference count of the URB to avoid it to be
2326 		 * freed during usb_unlink_urb, which may trigger
2327 		 * use-after-free problem inside usb_unlink_urb since
2328 		 * usb_unlink_urb is always racing with .complete
2329 		 * handler(include defer_bh).
2330 		 */
2331 		usb_get_urb(urb);
2332 		spin_unlock_irqrestore(&q->lock, flags);
2333 		/* during some PM-driven resume scenarios,
2334 		 * these (async) unlinks complete immediately
2335 		 */
2336 		ret = usb_unlink_urb(urb);
2337 		if (ret != -EINPROGRESS && ret != 0)
2338 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2339 		else
2340 			count++;
2341 		usb_put_urb(urb);
2342 		spin_lock_irqsave(&q->lock, flags);
2343 	}
2344 	spin_unlock_irqrestore(&q->lock, flags);
2345 	return count;
2346 }
2347 
lan78xx_change_mtu(struct net_device * netdev,int new_mtu)2348 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2349 {
2350 	struct lan78xx_net *dev = netdev_priv(netdev);
2351 	int ll_mtu = new_mtu + netdev->hard_header_len;
2352 	int old_hard_mtu = dev->hard_mtu;
2353 	int old_rx_urb_size = dev->rx_urb_size;
2354 	int ret;
2355 
2356 	/* no second zero-length packet read wanted after mtu-sized packets */
2357 	if ((ll_mtu % dev->maxpacket) == 0)
2358 		return -EDOM;
2359 
2360 	ret = usb_autopm_get_interface(dev->intf);
2361 	if (ret < 0)
2362 		return ret;
2363 
2364 	lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2365 
2366 	netdev->mtu = new_mtu;
2367 
2368 	dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2369 	if (dev->rx_urb_size == old_hard_mtu) {
2370 		dev->rx_urb_size = dev->hard_mtu;
2371 		if (dev->rx_urb_size > old_rx_urb_size) {
2372 			if (netif_running(dev->net)) {
2373 				unlink_urbs(dev, &dev->rxq);
2374 				tasklet_schedule(&dev->bh);
2375 			}
2376 		}
2377 	}
2378 
2379 	usb_autopm_put_interface(dev->intf);
2380 
2381 	return 0;
2382 }
2383 
lan78xx_set_mac_addr(struct net_device * netdev,void * p)2384 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2385 {
2386 	struct lan78xx_net *dev = netdev_priv(netdev);
2387 	struct sockaddr *addr = p;
2388 	u32 addr_lo, addr_hi;
2389 
2390 	if (netif_running(netdev))
2391 		return -EBUSY;
2392 
2393 	if (!is_valid_ether_addr(addr->sa_data))
2394 		return -EADDRNOTAVAIL;
2395 
2396 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
2397 
2398 	addr_lo = netdev->dev_addr[0] |
2399 		  netdev->dev_addr[1] << 8 |
2400 		  netdev->dev_addr[2] << 16 |
2401 		  netdev->dev_addr[3] << 24;
2402 	addr_hi = netdev->dev_addr[4] |
2403 		  netdev->dev_addr[5] << 8;
2404 
2405 	lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2406 	lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2407 
2408 	/* Added to support MAC address changes */
2409 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2410 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2411 
2412 	return 0;
2413 }
2414 
2415 /* Enable or disable Rx checksum offload engine */
lan78xx_set_features(struct net_device * netdev,netdev_features_t features)2416 static int lan78xx_set_features(struct net_device *netdev,
2417 				netdev_features_t features)
2418 {
2419 	struct lan78xx_net *dev = netdev_priv(netdev);
2420 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2421 	unsigned long flags;
2422 
2423 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2424 
2425 	if (features & NETIF_F_RXCSUM) {
2426 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2427 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2428 	} else {
2429 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2430 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2431 	}
2432 
2433 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2434 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2435 	else
2436 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2437 
2438 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2439 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2440 	else
2441 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2442 
2443 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2444 
2445 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2446 
2447 	return 0;
2448 }
2449 
lan78xx_deferred_vlan_write(struct work_struct * param)2450 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2451 {
2452 	struct lan78xx_priv *pdata =
2453 			container_of(param, struct lan78xx_priv, set_vlan);
2454 	struct lan78xx_net *dev = pdata->dev;
2455 
2456 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2457 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2458 }
2459 
lan78xx_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)2460 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2461 				   __be16 proto, u16 vid)
2462 {
2463 	struct lan78xx_net *dev = netdev_priv(netdev);
2464 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2465 	u16 vid_bit_index;
2466 	u16 vid_dword_index;
2467 
2468 	vid_dword_index = (vid >> 5) & 0x7F;
2469 	vid_bit_index = vid & 0x1F;
2470 
2471 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2472 
2473 	/* defer register writes to a sleepable context */
2474 	schedule_work(&pdata->set_vlan);
2475 
2476 	return 0;
2477 }
2478 
lan78xx_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)2479 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2480 				    __be16 proto, u16 vid)
2481 {
2482 	struct lan78xx_net *dev = netdev_priv(netdev);
2483 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2484 	u16 vid_bit_index;
2485 	u16 vid_dword_index;
2486 
2487 	vid_dword_index = (vid >> 5) & 0x7F;
2488 	vid_bit_index = vid & 0x1F;
2489 
2490 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2491 
2492 	/* defer register writes to a sleepable context */
2493 	schedule_work(&pdata->set_vlan);
2494 
2495 	return 0;
2496 }
2497 
lan78xx_init_ltm(struct lan78xx_net * dev)2498 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2499 {
2500 	int ret;
2501 	u32 buf;
2502 	u32 regs[6] = { 0 };
2503 
2504 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2505 	if (buf & USB_CFG1_LTM_ENABLE_) {
2506 		u8 temp[2];
2507 		/* Get values from EEPROM first */
2508 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2509 			if (temp[0] == 24) {
2510 				ret = lan78xx_read_raw_eeprom(dev,
2511 							      temp[1] * 2,
2512 							      24,
2513 							      (u8 *)regs);
2514 				if (ret < 0)
2515 					return;
2516 			}
2517 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2518 			if (temp[0] == 24) {
2519 				ret = lan78xx_read_raw_otp(dev,
2520 							   temp[1] * 2,
2521 							   24,
2522 							   (u8 *)regs);
2523 				if (ret < 0)
2524 					return;
2525 			}
2526 		}
2527 	}
2528 
2529 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2530 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2531 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2532 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2533 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2534 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2535 }
2536 
lan78xx_start_hw(struct lan78xx_net * dev,u32 reg,u32 hw_enable)2537 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
2538 {
2539 	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
2540 }
2541 
lan78xx_stop_hw(struct lan78xx_net * dev,u32 reg,u32 hw_enabled,u32 hw_disabled)2542 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
2543 			   u32 hw_disabled)
2544 {
2545 	unsigned long timeout;
2546 	bool stopped = true;
2547 	int ret;
2548 	u32 buf;
2549 
2550 	/* Stop the h/w block (if not already stopped) */
2551 
2552 	ret = lan78xx_read_reg(dev, reg, &buf);
2553 	if (ret < 0)
2554 		return ret;
2555 
2556 	if (buf & hw_enabled) {
2557 		buf &= ~hw_enabled;
2558 
2559 		ret = lan78xx_write_reg(dev, reg, buf);
2560 		if (ret < 0)
2561 			return ret;
2562 
2563 		stopped = false;
2564 		timeout = jiffies + HW_DISABLE_TIMEOUT;
2565 		do  {
2566 			ret = lan78xx_read_reg(dev, reg, &buf);
2567 			if (ret < 0)
2568 				return ret;
2569 
2570 			if (buf & hw_disabled)
2571 				stopped = true;
2572 			else
2573 				msleep(HW_DISABLE_DELAY_MS);
2574 		} while (!stopped && !time_after(jiffies, timeout));
2575 	}
2576 
2577 	ret = stopped ? 0 : -ETIME;
2578 
2579 	return ret;
2580 }
2581 
lan78xx_flush_fifo(struct lan78xx_net * dev,u32 reg,u32 fifo_flush)2582 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
2583 {
2584 	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
2585 }
2586 
lan78xx_start_tx_path(struct lan78xx_net * dev)2587 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
2588 {
2589 	int ret;
2590 
2591 	netif_dbg(dev, drv, dev->net, "start tx path");
2592 
2593 	/* Start the MAC transmitter */
2594 
2595 	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
2596 	if (ret < 0)
2597 		return ret;
2598 
2599 	/* Start the Tx FIFO */
2600 
2601 	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
2602 	if (ret < 0)
2603 		return ret;
2604 
2605 	return 0;
2606 }
2607 
lan78xx_stop_tx_path(struct lan78xx_net * dev)2608 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
2609 {
2610 	int ret;
2611 
2612 	netif_dbg(dev, drv, dev->net, "stop tx path");
2613 
2614 	/* Stop the Tx FIFO */
2615 
2616 	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
2617 	if (ret < 0)
2618 		return ret;
2619 
2620 	/* Stop the MAC transmitter */
2621 
2622 	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
2623 	if (ret < 0)
2624 		return ret;
2625 
2626 	return 0;
2627 }
2628 
2629 /* The caller must ensure the Tx path is stopped before calling
2630  * lan78xx_flush_tx_fifo().
2631  */
lan78xx_flush_tx_fifo(struct lan78xx_net * dev)2632 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
2633 {
2634 	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
2635 }
2636 
lan78xx_start_rx_path(struct lan78xx_net * dev)2637 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
2638 {
2639 	int ret;
2640 
2641 	netif_dbg(dev, drv, dev->net, "start rx path");
2642 
2643 	/* Start the Rx FIFO */
2644 
2645 	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
2646 	if (ret < 0)
2647 		return ret;
2648 
2649 	/* Start the MAC receiver*/
2650 
2651 	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
2652 	if (ret < 0)
2653 		return ret;
2654 
2655 	return 0;
2656 }
2657 
lan78xx_stop_rx_path(struct lan78xx_net * dev)2658 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
2659 {
2660 	int ret;
2661 
2662 	netif_dbg(dev, drv, dev->net, "stop rx path");
2663 
2664 	/* Stop the MAC receiver */
2665 
2666 	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
2667 	if (ret < 0)
2668 		return ret;
2669 
2670 	/* Stop the Rx FIFO */
2671 
2672 	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
2673 	if (ret < 0)
2674 		return ret;
2675 
2676 	return 0;
2677 }
2678 
2679 /* The caller must ensure the Rx path is stopped before calling
2680  * lan78xx_flush_rx_fifo().
2681  */
lan78xx_flush_rx_fifo(struct lan78xx_net * dev)2682 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
2683 {
2684 	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
2685 }
2686 
lan78xx_reset(struct lan78xx_net * dev)2687 static int lan78xx_reset(struct lan78xx_net *dev)
2688 {
2689 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2690 	unsigned long timeout;
2691 	int ret;
2692 	u32 buf;
2693 	u8 sig;
2694 
2695 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2696 	if (ret < 0)
2697 		return ret;
2698 
2699 	buf |= HW_CFG_LRST_;
2700 
2701 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2702 	if (ret < 0)
2703 		return ret;
2704 
2705 	timeout = jiffies + HZ;
2706 	do {
2707 		mdelay(1);
2708 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2709 		if (ret < 0)
2710 			return ret;
2711 
2712 		if (time_after(jiffies, timeout)) {
2713 			netdev_warn(dev->net,
2714 				    "timeout on completion of LiteReset");
2715 			ret = -ETIMEDOUT;
2716 			return ret;
2717 		}
2718 	} while (buf & HW_CFG_LRST_);
2719 
2720 	lan78xx_init_mac_address(dev);
2721 
2722 	/* save DEVID for later usage */
2723 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2724 	if (ret < 0)
2725 		return ret;
2726 
2727 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2728 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2729 
2730 	/* Respond to the IN token with a NAK */
2731 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2732 	if (ret < 0)
2733 		return ret;
2734 
2735 	buf |= USB_CFG_BIR_;
2736 
2737 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2738 	if (ret < 0)
2739 		return ret;
2740 
2741 	/* Init LTM */
2742 	lan78xx_init_ltm(dev);
2743 
2744 	if (dev->udev->speed == USB_SPEED_SUPER) {
2745 		buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2746 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2747 		dev->rx_qlen = 4;
2748 		dev->tx_qlen = 4;
2749 	} else if (dev->udev->speed == USB_SPEED_HIGH) {
2750 		buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2751 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2752 		dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2753 		dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2754 	} else {
2755 		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2756 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2757 		dev->rx_qlen = 4;
2758 		dev->tx_qlen = 4;
2759 	}
2760 
2761 	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2762 	if (ret < 0)
2763 		return ret;
2764 
2765 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2766 	if (ret < 0)
2767 		return ret;
2768 
2769 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2770 	if (ret < 0)
2771 		return ret;
2772 
2773 	buf |= HW_CFG_MEF_;
2774 
2775 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2776 	if (ret < 0)
2777 		return ret;
2778 
2779 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2780 	if (ret < 0)
2781 		return ret;
2782 
2783 	buf |= USB_CFG_BCE_;
2784 
2785 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2786 	if (ret < 0)
2787 		return ret;
2788 
2789 	/* set FIFO sizes */
2790 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2791 
2792 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2793 	if (ret < 0)
2794 		return ret;
2795 
2796 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2797 
2798 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2799 	if (ret < 0)
2800 		return ret;
2801 
2802 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2803 	if (ret < 0)
2804 		return ret;
2805 
2806 	ret = lan78xx_write_reg(dev, FLOW, 0);
2807 	if (ret < 0)
2808 		return ret;
2809 
2810 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2811 	if (ret < 0)
2812 		return ret;
2813 
2814 	/* Don't need rfe_ctl_lock during initialisation */
2815 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2816 	if (ret < 0)
2817 		return ret;
2818 
2819 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2820 
2821 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2822 	if (ret < 0)
2823 		return ret;
2824 
2825 	/* Enable or disable checksum offload engines */
2826 	ret = lan78xx_set_features(dev->net, dev->net->features);
2827 	if (ret < 0)
2828 		return ret;
2829 
2830 	lan78xx_set_multicast(dev->net);
2831 
2832 	/* reset PHY */
2833 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2834 	if (ret < 0)
2835 		return ret;
2836 
2837 	buf |= PMT_CTL_PHY_RST_;
2838 
2839 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2840 	if (ret < 0)
2841 		return ret;
2842 
2843 	timeout = jiffies + HZ;
2844 	do {
2845 		mdelay(1);
2846 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2847 		if (ret < 0)
2848 			return ret;
2849 
2850 		if (time_after(jiffies, timeout)) {
2851 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
2852 			ret = -ETIMEDOUT;
2853 			return ret;
2854 		}
2855 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2856 
2857 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2858 	if (ret < 0)
2859 		return ret;
2860 
2861 	/* LAN7801 only has RGMII mode */
2862 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
2863 		buf &= ~MAC_CR_GMII_EN_;
2864 
2865 	if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
2866 	    dev->chipid == ID_REV_CHIP_ID_7850_) {
2867 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2868 		if (!ret && sig != EEPROM_INDICATOR) {
2869 			/* Implies there is no external eeprom. Set mac speed */
2870 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2871 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2872 		}
2873 	}
2874 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
2875 	if (ret < 0)
2876 		return ret;
2877 
2878 	ret = lan78xx_set_rx_max_frame_length(dev,
2879 					      dev->net->mtu + VLAN_ETH_HLEN);
2880 
2881 	return ret;
2882 }
2883 
lan78xx_init_stats(struct lan78xx_net * dev)2884 static void lan78xx_init_stats(struct lan78xx_net *dev)
2885 {
2886 	u32 *p;
2887 	int i;
2888 
2889 	/* initialize for stats update
2890 	 * some counters are 20bits and some are 32bits
2891 	 */
2892 	p = (u32 *)&dev->stats.rollover_max;
2893 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2894 		p[i] = 0xFFFFF;
2895 
2896 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2897 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2898 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2899 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2900 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2901 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2902 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2903 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2904 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2905 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2906 
2907 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
2908 }
2909 
lan78xx_open(struct net_device * net)2910 static int lan78xx_open(struct net_device *net)
2911 {
2912 	struct lan78xx_net *dev = netdev_priv(net);
2913 	int ret;
2914 
2915 	netif_dbg(dev, ifup, dev->net, "open device");
2916 
2917 	ret = usb_autopm_get_interface(dev->intf);
2918 	if (ret < 0)
2919 		return ret;
2920 
2921 	mutex_lock(&dev->dev_mutex);
2922 
2923 	phy_start(net->phydev);
2924 
2925 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2926 
2927 	/* for Link Check */
2928 	if (dev->urb_intr) {
2929 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2930 		if (ret < 0) {
2931 			netif_err(dev, ifup, dev->net,
2932 				  "intr submit %d\n", ret);
2933 			goto done;
2934 		}
2935 	}
2936 
2937 	ret = lan78xx_flush_rx_fifo(dev);
2938 	if (ret < 0)
2939 		goto done;
2940 	ret = lan78xx_flush_tx_fifo(dev);
2941 	if (ret < 0)
2942 		goto done;
2943 
2944 	ret = lan78xx_start_tx_path(dev);
2945 	if (ret < 0)
2946 		goto done;
2947 	ret = lan78xx_start_rx_path(dev);
2948 	if (ret < 0)
2949 		goto done;
2950 
2951 	lan78xx_init_stats(dev);
2952 
2953 	set_bit(EVENT_DEV_OPEN, &dev->flags);
2954 
2955 	netif_start_queue(net);
2956 
2957 	dev->link_on = false;
2958 
2959 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2960 done:
2961 	mutex_unlock(&dev->dev_mutex);
2962 
2963 	usb_autopm_put_interface(dev->intf);
2964 
2965 	return ret;
2966 }
2967 
lan78xx_terminate_urbs(struct lan78xx_net * dev)2968 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2969 {
2970 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2971 	DECLARE_WAITQUEUE(wait, current);
2972 	int temp;
2973 
2974 	/* ensure there are no more active urbs */
2975 	add_wait_queue(&unlink_wakeup, &wait);
2976 	set_current_state(TASK_UNINTERRUPTIBLE);
2977 	dev->wait = &unlink_wakeup;
2978 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2979 
2980 	/* maybe wait for deletions to finish. */
2981 	while (!skb_queue_empty(&dev->rxq) ||
2982 	       !skb_queue_empty(&dev->txq)) {
2983 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2984 		set_current_state(TASK_UNINTERRUPTIBLE);
2985 		netif_dbg(dev, ifdown, dev->net,
2986 			  "waited for %d urb completions", temp);
2987 	}
2988 	set_current_state(TASK_RUNNING);
2989 	dev->wait = NULL;
2990 	remove_wait_queue(&unlink_wakeup, &wait);
2991 
2992 	while (!skb_queue_empty(&dev->done)) {
2993 		struct skb_data *entry;
2994 		struct sk_buff *skb;
2995 
2996 		skb = skb_dequeue(&dev->done);
2997 		entry = (struct skb_data *)(skb->cb);
2998 		usb_free_urb(entry->urb);
2999 		dev_kfree_skb(skb);
3000 	}
3001 }
3002 
lan78xx_stop(struct net_device * net)3003 static int lan78xx_stop(struct net_device *net)
3004 {
3005 	struct lan78xx_net *dev = netdev_priv(net);
3006 
3007 	netif_dbg(dev, ifup, dev->net, "stop device");
3008 
3009 	mutex_lock(&dev->dev_mutex);
3010 
3011 	if (timer_pending(&dev->stat_monitor))
3012 		del_timer_sync(&dev->stat_monitor);
3013 
3014 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3015 	netif_stop_queue(net);
3016 	tasklet_kill(&dev->bh);
3017 
3018 	lan78xx_terminate_urbs(dev);
3019 
3020 	netif_info(dev, ifdown, dev->net,
3021 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3022 		   net->stats.rx_packets, net->stats.tx_packets,
3023 		   net->stats.rx_errors, net->stats.tx_errors);
3024 
3025 	/* ignore errors that occur stopping the Tx and Rx data paths */
3026 	lan78xx_stop_tx_path(dev);
3027 	lan78xx_stop_rx_path(dev);
3028 
3029 	if (net->phydev)
3030 		phy_stop(net->phydev);
3031 
3032 	usb_kill_urb(dev->urb_intr);
3033 
3034 	/* deferred work (task, timer, softirq) must also stop.
3035 	 * can't flush_scheduled_work() until we drop rtnl (later),
3036 	 * else workers could deadlock; so make workers a NOP.
3037 	 */
3038 	clear_bit(EVENT_TX_HALT, &dev->flags);
3039 	clear_bit(EVENT_RX_HALT, &dev->flags);
3040 	clear_bit(EVENT_LINK_RESET, &dev->flags);
3041 	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3042 
3043 	cancel_delayed_work_sync(&dev->wq);
3044 
3045 	usb_autopm_put_interface(dev->intf);
3046 
3047 	mutex_unlock(&dev->dev_mutex);
3048 
3049 	return 0;
3050 }
3051 
lan78xx_tx_prep(struct lan78xx_net * dev,struct sk_buff * skb,gfp_t flags)3052 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
3053 				       struct sk_buff *skb, gfp_t flags)
3054 {
3055 	u32 tx_cmd_a, tx_cmd_b;
3056 	void *ptr;
3057 
3058 	if (skb_cow_head(skb, TX_OVERHEAD)) {
3059 		dev_kfree_skb_any(skb);
3060 		return NULL;
3061 	}
3062 
3063 	if (skb_linearize(skb)) {
3064 		dev_kfree_skb_any(skb);
3065 		return NULL;
3066 	}
3067 
3068 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3069 
3070 	if (skb->ip_summed == CHECKSUM_PARTIAL)
3071 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3072 
3073 	tx_cmd_b = 0;
3074 	if (skb_is_gso(skb)) {
3075 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3076 
3077 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3078 
3079 		tx_cmd_a |= TX_CMD_A_LSO_;
3080 	}
3081 
3082 	if (skb_vlan_tag_present(skb)) {
3083 		tx_cmd_a |= TX_CMD_A_IVTG_;
3084 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3085 	}
3086 
3087 	ptr = skb_push(skb, 8);
3088 	put_unaligned_le32(tx_cmd_a, ptr);
3089 	put_unaligned_le32(tx_cmd_b, ptr + 4);
3090 
3091 	return skb;
3092 }
3093 
defer_bh(struct lan78xx_net * dev,struct sk_buff * skb,struct sk_buff_head * list,enum skb_state state)3094 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3095 			       struct sk_buff_head *list, enum skb_state state)
3096 {
3097 	unsigned long flags;
3098 	enum skb_state old_state;
3099 	struct skb_data *entry = (struct skb_data *)skb->cb;
3100 
3101 	spin_lock_irqsave(&list->lock, flags);
3102 	old_state = entry->state;
3103 	entry->state = state;
3104 
3105 	__skb_unlink(skb, list);
3106 	spin_unlock(&list->lock);
3107 	spin_lock(&dev->done.lock);
3108 
3109 	__skb_queue_tail(&dev->done, skb);
3110 	if (skb_queue_len(&dev->done) == 1)
3111 		tasklet_schedule(&dev->bh);
3112 	spin_unlock_irqrestore(&dev->done.lock, flags);
3113 
3114 	return old_state;
3115 }
3116 
tx_complete(struct urb * urb)3117 static void tx_complete(struct urb *urb)
3118 {
3119 	struct sk_buff *skb = (struct sk_buff *)urb->context;
3120 	struct skb_data *entry = (struct skb_data *)skb->cb;
3121 	struct lan78xx_net *dev = entry->dev;
3122 
3123 	if (urb->status == 0) {
3124 		dev->net->stats.tx_packets += entry->num_of_packet;
3125 		dev->net->stats.tx_bytes += entry->length;
3126 	} else {
3127 		dev->net->stats.tx_errors++;
3128 
3129 		switch (urb->status) {
3130 		case -EPIPE:
3131 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3132 			break;
3133 
3134 		/* software-driven interface shutdown */
3135 		case -ECONNRESET:
3136 		case -ESHUTDOWN:
3137 			netif_dbg(dev, tx_err, dev->net,
3138 				  "tx err interface gone %d\n",
3139 				  entry->urb->status);
3140 			break;
3141 
3142 		case -EPROTO:
3143 		case -ETIME:
3144 		case -EILSEQ:
3145 			netif_stop_queue(dev->net);
3146 			netif_dbg(dev, tx_err, dev->net,
3147 				  "tx err queue stopped %d\n",
3148 				  entry->urb->status);
3149 			break;
3150 		default:
3151 			netif_dbg(dev, tx_err, dev->net,
3152 				  "unknown tx err %d\n",
3153 				  entry->urb->status);
3154 			break;
3155 		}
3156 	}
3157 
3158 	usb_autopm_put_interface_async(dev->intf);
3159 
3160 	defer_bh(dev, skb, &dev->txq, tx_done);
3161 }
3162 
lan78xx_queue_skb(struct sk_buff_head * list,struct sk_buff * newsk,enum skb_state state)3163 static void lan78xx_queue_skb(struct sk_buff_head *list,
3164 			      struct sk_buff *newsk, enum skb_state state)
3165 {
3166 	struct skb_data *entry = (struct skb_data *)newsk->cb;
3167 
3168 	__skb_queue_tail(list, newsk);
3169 	entry->state = state;
3170 }
3171 
3172 static netdev_tx_t
lan78xx_start_xmit(struct sk_buff * skb,struct net_device * net)3173 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3174 {
3175 	struct lan78xx_net *dev = netdev_priv(net);
3176 	struct sk_buff *skb2 = NULL;
3177 
3178 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3179 		schedule_delayed_work(&dev->wq, 0);
3180 
3181 	if (skb) {
3182 		skb_tx_timestamp(skb);
3183 		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
3184 	}
3185 
3186 	if (skb2) {
3187 		skb_queue_tail(&dev->txq_pend, skb2);
3188 
3189 		/* throttle TX patch at slower than SUPER SPEED USB */
3190 		if ((dev->udev->speed < USB_SPEED_SUPER) &&
3191 		    (skb_queue_len(&dev->txq_pend) > 10))
3192 			netif_stop_queue(net);
3193 	} else {
3194 		netif_dbg(dev, tx_err, dev->net,
3195 			  "lan78xx_tx_prep return NULL\n");
3196 		dev->net->stats.tx_errors++;
3197 		dev->net->stats.tx_dropped++;
3198 	}
3199 
3200 	tasklet_schedule(&dev->bh);
3201 
3202 	return NETDEV_TX_OK;
3203 }
3204 
lan78xx_bind(struct lan78xx_net * dev,struct usb_interface * intf)3205 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3206 {
3207 	struct lan78xx_priv *pdata = NULL;
3208 	int ret;
3209 	int i;
3210 
3211 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3212 
3213 	pdata = (struct lan78xx_priv *)(dev->data[0]);
3214 	if (!pdata) {
3215 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3216 		return -ENOMEM;
3217 	}
3218 
3219 	pdata->dev = dev;
3220 
3221 	spin_lock_init(&pdata->rfe_ctl_lock);
3222 	mutex_init(&pdata->dataport_mutex);
3223 
3224 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3225 
3226 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3227 		pdata->vlan_table[i] = 0;
3228 
3229 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3230 
3231 	dev->net->features = 0;
3232 
3233 	if (DEFAULT_TX_CSUM_ENABLE)
3234 		dev->net->features |= NETIF_F_HW_CSUM;
3235 
3236 	if (DEFAULT_RX_CSUM_ENABLE)
3237 		dev->net->features |= NETIF_F_RXCSUM;
3238 
3239 	if (DEFAULT_TSO_CSUM_ENABLE)
3240 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3241 
3242 	if (DEFAULT_VLAN_RX_OFFLOAD)
3243 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3244 
3245 	if (DEFAULT_VLAN_FILTER_ENABLE)
3246 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3247 
3248 	dev->net->hw_features = dev->net->features;
3249 
3250 	ret = lan78xx_setup_irq_domain(dev);
3251 	if (ret < 0) {
3252 		netdev_warn(dev->net,
3253 			    "lan78xx_setup_irq_domain() failed : %d", ret);
3254 		goto out1;
3255 	}
3256 
3257 	dev->net->hard_header_len += TX_OVERHEAD;
3258 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
3259 
3260 	/* Init all registers */
3261 	ret = lan78xx_reset(dev);
3262 	if (ret) {
3263 		netdev_warn(dev->net, "Registers INIT FAILED....");
3264 		goto out2;
3265 	}
3266 
3267 	ret = lan78xx_mdio_init(dev);
3268 	if (ret) {
3269 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3270 		goto out2;
3271 	}
3272 
3273 	dev->net->flags |= IFF_MULTICAST;
3274 
3275 	pdata->wol = WAKE_MAGIC;
3276 
3277 	return ret;
3278 
3279 out2:
3280 	lan78xx_remove_irq_domain(dev);
3281 
3282 out1:
3283 	netdev_warn(dev->net, "Bind routine FAILED");
3284 	cancel_work_sync(&pdata->set_multicast);
3285 	cancel_work_sync(&pdata->set_vlan);
3286 	kfree(pdata);
3287 	return ret;
3288 }
3289 
lan78xx_unbind(struct lan78xx_net * dev,struct usb_interface * intf)3290 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3291 {
3292 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3293 
3294 	lan78xx_remove_irq_domain(dev);
3295 
3296 	lan78xx_remove_mdio(dev);
3297 
3298 	if (pdata) {
3299 		cancel_work_sync(&pdata->set_multicast);
3300 		cancel_work_sync(&pdata->set_vlan);
3301 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3302 		kfree(pdata);
3303 		pdata = NULL;
3304 		dev->data[0] = 0;
3305 	}
3306 }
3307 
lan78xx_rx_csum_offload(struct lan78xx_net * dev,struct sk_buff * skb,u32 rx_cmd_a,u32 rx_cmd_b)3308 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3309 				    struct sk_buff *skb,
3310 				    u32 rx_cmd_a, u32 rx_cmd_b)
3311 {
3312 	/* HW Checksum offload appears to be flawed if used when not stripping
3313 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3314 	 */
3315 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3316 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3317 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3318 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3319 		skb->ip_summed = CHECKSUM_NONE;
3320 	} else {
3321 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3322 		skb->ip_summed = CHECKSUM_COMPLETE;
3323 	}
3324 }
3325 
lan78xx_rx_vlan_offload(struct lan78xx_net * dev,struct sk_buff * skb,u32 rx_cmd_a,u32 rx_cmd_b)3326 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3327 				    struct sk_buff *skb,
3328 				    u32 rx_cmd_a, u32 rx_cmd_b)
3329 {
3330 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3331 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3332 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3333 				       (rx_cmd_b & 0xffff));
3334 }
3335 
lan78xx_skb_return(struct lan78xx_net * dev,struct sk_buff * skb)3336 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3337 {
3338 	int status;
3339 
3340 	dev->net->stats.rx_packets++;
3341 	dev->net->stats.rx_bytes += skb->len;
3342 
3343 	skb->protocol = eth_type_trans(skb, dev->net);
3344 
3345 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3346 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3347 	memset(skb->cb, 0, sizeof(struct skb_data));
3348 
3349 	if (skb_defer_rx_timestamp(skb))
3350 		return;
3351 
3352 	status = netif_rx(skb);
3353 	if (status != NET_RX_SUCCESS)
3354 		netif_dbg(dev, rx_err, dev->net,
3355 			  "netif_rx status %d\n", status);
3356 }
3357 
lan78xx_rx(struct lan78xx_net * dev,struct sk_buff * skb)3358 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3359 {
3360 	if (skb->len < dev->net->hard_header_len)
3361 		return 0;
3362 
3363 	while (skb->len > 0) {
3364 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3365 		u16 rx_cmd_c;
3366 		struct sk_buff *skb2;
3367 		unsigned char *packet;
3368 
3369 		rx_cmd_a = get_unaligned_le32(skb->data);
3370 		skb_pull(skb, sizeof(rx_cmd_a));
3371 
3372 		rx_cmd_b = get_unaligned_le32(skb->data);
3373 		skb_pull(skb, sizeof(rx_cmd_b));
3374 
3375 		rx_cmd_c = get_unaligned_le16(skb->data);
3376 		skb_pull(skb, sizeof(rx_cmd_c));
3377 
3378 		packet = skb->data;
3379 
3380 		/* get the packet length */
3381 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3382 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3383 
3384 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3385 			netif_dbg(dev, rx_err, dev->net,
3386 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3387 		} else {
3388 			/* last frame in this batch */
3389 			if (skb->len == size) {
3390 				lan78xx_rx_csum_offload(dev, skb,
3391 							rx_cmd_a, rx_cmd_b);
3392 				lan78xx_rx_vlan_offload(dev, skb,
3393 							rx_cmd_a, rx_cmd_b);
3394 
3395 				skb_trim(skb, skb->len - 4); /* remove fcs */
3396 				skb->truesize = size + sizeof(struct sk_buff);
3397 
3398 				return 1;
3399 			}
3400 
3401 			skb2 = skb_clone(skb, GFP_ATOMIC);
3402 			if (unlikely(!skb2)) {
3403 				netdev_warn(dev->net, "Error allocating skb");
3404 				return 0;
3405 			}
3406 
3407 			skb2->len = size;
3408 			skb2->data = packet;
3409 			skb_set_tail_pointer(skb2, size);
3410 
3411 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3412 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3413 
3414 			skb_trim(skb2, skb2->len - 4); /* remove fcs */
3415 			skb2->truesize = size + sizeof(struct sk_buff);
3416 
3417 			lan78xx_skb_return(dev, skb2);
3418 		}
3419 
3420 		skb_pull(skb, size);
3421 
3422 		/* padding bytes before the next frame starts */
3423 		if (skb->len)
3424 			skb_pull(skb, align_count);
3425 	}
3426 
3427 	return 1;
3428 }
3429 
rx_process(struct lan78xx_net * dev,struct sk_buff * skb)3430 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3431 {
3432 	if (!lan78xx_rx(dev, skb)) {
3433 		dev->net->stats.rx_errors++;
3434 		goto done;
3435 	}
3436 
3437 	if (skb->len) {
3438 		lan78xx_skb_return(dev, skb);
3439 		return;
3440 	}
3441 
3442 	netif_dbg(dev, rx_err, dev->net, "drop\n");
3443 	dev->net->stats.rx_errors++;
3444 done:
3445 	skb_queue_tail(&dev->done, skb);
3446 }
3447 
3448 static void rx_complete(struct urb *urb);
3449 
rx_submit(struct lan78xx_net * dev,struct urb * urb,gfp_t flags)3450 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3451 {
3452 	struct sk_buff *skb;
3453 	struct skb_data *entry;
3454 	unsigned long lockflags;
3455 	size_t size = dev->rx_urb_size;
3456 	int ret = 0;
3457 
3458 	skb = netdev_alloc_skb_ip_align(dev->net, size);
3459 	if (!skb) {
3460 		usb_free_urb(urb);
3461 		return -ENOMEM;
3462 	}
3463 
3464 	entry = (struct skb_data *)skb->cb;
3465 	entry->urb = urb;
3466 	entry->dev = dev;
3467 	entry->length = 0;
3468 
3469 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3470 			  skb->data, size, rx_complete, skb);
3471 
3472 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3473 
3474 	if (netif_device_present(dev->net) &&
3475 	    netif_running(dev->net) &&
3476 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3477 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3478 		ret = usb_submit_urb(urb, GFP_ATOMIC);
3479 		switch (ret) {
3480 		case 0:
3481 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3482 			break;
3483 		case -EPIPE:
3484 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3485 			break;
3486 		case -ENODEV:
3487 		case -ENOENT:
3488 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3489 			netif_device_detach(dev->net);
3490 			break;
3491 		case -EHOSTUNREACH:
3492 			ret = -ENOLINK;
3493 			break;
3494 		default:
3495 			netif_dbg(dev, rx_err, dev->net,
3496 				  "rx submit, %d\n", ret);
3497 			tasklet_schedule(&dev->bh);
3498 		}
3499 	} else {
3500 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3501 		ret = -ENOLINK;
3502 	}
3503 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3504 	if (ret) {
3505 		dev_kfree_skb_any(skb);
3506 		usb_free_urb(urb);
3507 	}
3508 	return ret;
3509 }
3510 
rx_complete(struct urb * urb)3511 static void rx_complete(struct urb *urb)
3512 {
3513 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3514 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3515 	struct lan78xx_net *dev = entry->dev;
3516 	int urb_status = urb->status;
3517 	enum skb_state state;
3518 
3519 	skb_put(skb, urb->actual_length);
3520 	state = rx_done;
3521 	entry->urb = NULL;
3522 
3523 	switch (urb_status) {
3524 	case 0:
3525 		if (skb->len < dev->net->hard_header_len) {
3526 			state = rx_cleanup;
3527 			dev->net->stats.rx_errors++;
3528 			dev->net->stats.rx_length_errors++;
3529 			netif_dbg(dev, rx_err, dev->net,
3530 				  "rx length %d\n", skb->len);
3531 		}
3532 		usb_mark_last_busy(dev->udev);
3533 		break;
3534 	case -EPIPE:
3535 		dev->net->stats.rx_errors++;
3536 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3537 		fallthrough;
3538 	case -ECONNRESET:				/* async unlink */
3539 	case -ESHUTDOWN:				/* hardware gone */
3540 		netif_dbg(dev, ifdown, dev->net,
3541 			  "rx shutdown, code %d\n", urb_status);
3542 		state = rx_cleanup;
3543 		entry->urb = urb;
3544 		urb = NULL;
3545 		break;
3546 	case -EPROTO:
3547 	case -ETIME:
3548 	case -EILSEQ:
3549 		dev->net->stats.rx_errors++;
3550 		state = rx_cleanup;
3551 		entry->urb = urb;
3552 		urb = NULL;
3553 		break;
3554 
3555 	/* data overrun ... flush fifo? */
3556 	case -EOVERFLOW:
3557 		dev->net->stats.rx_over_errors++;
3558 		fallthrough;
3559 
3560 	default:
3561 		state = rx_cleanup;
3562 		dev->net->stats.rx_errors++;
3563 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3564 		break;
3565 	}
3566 
3567 	state = defer_bh(dev, skb, &dev->rxq, state);
3568 
3569 	if (urb) {
3570 		if (netif_running(dev->net) &&
3571 		    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3572 		    state != unlink_start) {
3573 			rx_submit(dev, urb, GFP_ATOMIC);
3574 			return;
3575 		}
3576 		usb_free_urb(urb);
3577 	}
3578 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3579 }
3580 
lan78xx_tx_bh(struct lan78xx_net * dev)3581 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3582 {
3583 	int length;
3584 	struct urb *urb = NULL;
3585 	struct skb_data *entry;
3586 	unsigned long flags;
3587 	struct sk_buff_head *tqp = &dev->txq_pend;
3588 	struct sk_buff *skb, *skb2;
3589 	int ret;
3590 	int count, pos;
3591 	int skb_totallen, pkt_cnt;
3592 
3593 	skb_totallen = 0;
3594 	pkt_cnt = 0;
3595 	count = 0;
3596 	length = 0;
3597 	spin_lock_irqsave(&tqp->lock, flags);
3598 	skb_queue_walk(tqp, skb) {
3599 		if (skb_is_gso(skb)) {
3600 			if (!skb_queue_is_first(tqp, skb)) {
3601 				/* handle previous packets first */
3602 				break;
3603 			}
3604 			count = 1;
3605 			length = skb->len - TX_OVERHEAD;
3606 			__skb_unlink(skb, tqp);
3607 			spin_unlock_irqrestore(&tqp->lock, flags);
3608 			goto gso_skb;
3609 		}
3610 
3611 		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3612 			break;
3613 		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3614 		pkt_cnt++;
3615 	}
3616 	spin_unlock_irqrestore(&tqp->lock, flags);
3617 
3618 	/* copy to a single skb */
3619 	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3620 	if (!skb)
3621 		goto drop;
3622 
3623 	skb_put(skb, skb_totallen);
3624 
3625 	for (count = pos = 0; count < pkt_cnt; count++) {
3626 		skb2 = skb_dequeue(tqp);
3627 		if (skb2) {
3628 			length += (skb2->len - TX_OVERHEAD);
3629 			memcpy(skb->data + pos, skb2->data, skb2->len);
3630 			pos += roundup(skb2->len, sizeof(u32));
3631 			dev_kfree_skb(skb2);
3632 		}
3633 	}
3634 
3635 gso_skb:
3636 	urb = usb_alloc_urb(0, GFP_ATOMIC);
3637 	if (!urb)
3638 		goto drop;
3639 
3640 	entry = (struct skb_data *)skb->cb;
3641 	entry->urb = urb;
3642 	entry->dev = dev;
3643 	entry->length = length;
3644 	entry->num_of_packet = count;
3645 
3646 	spin_lock_irqsave(&dev->txq.lock, flags);
3647 	ret = usb_autopm_get_interface_async(dev->intf);
3648 	if (ret < 0) {
3649 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3650 		goto drop;
3651 	}
3652 
3653 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3654 			  skb->data, skb->len, tx_complete, skb);
3655 
3656 	if (length % dev->maxpacket == 0) {
3657 		/* send USB_ZERO_PACKET */
3658 		urb->transfer_flags |= URB_ZERO_PACKET;
3659 	}
3660 
3661 #ifdef CONFIG_PM
3662 	/* if this triggers the device is still a sleep */
3663 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3664 		/* transmission will be done in resume */
3665 		usb_anchor_urb(urb, &dev->deferred);
3666 		/* no use to process more packets */
3667 		netif_stop_queue(dev->net);
3668 		usb_put_urb(urb);
3669 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3670 		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3671 		return;
3672 	}
3673 #endif
3674 
3675 	ret = usb_submit_urb(urb, GFP_ATOMIC);
3676 	switch (ret) {
3677 	case 0:
3678 		netif_trans_update(dev->net);
3679 		lan78xx_queue_skb(&dev->txq, skb, tx_start);
3680 		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3681 			netif_stop_queue(dev->net);
3682 		break;
3683 	case -EPIPE:
3684 		netif_stop_queue(dev->net);
3685 		lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3686 		usb_autopm_put_interface_async(dev->intf);
3687 		break;
3688 	case -ENODEV:
3689 	case -ENOENT:
3690 		netif_dbg(dev, tx_err, dev->net,
3691 			  "tx: submit urb err %d (disconnected?)", ret);
3692 		netif_device_detach(dev->net);
3693 		break;
3694 	default:
3695 		usb_autopm_put_interface_async(dev->intf);
3696 		netif_dbg(dev, tx_err, dev->net,
3697 			  "tx: submit urb err %d\n", ret);
3698 		break;
3699 	}
3700 
3701 	spin_unlock_irqrestore(&dev->txq.lock, flags);
3702 
3703 	if (ret) {
3704 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3705 drop:
3706 		dev->net->stats.tx_dropped++;
3707 		if (skb)
3708 			dev_kfree_skb_any(skb);
3709 		usb_free_urb(urb);
3710 	} else {
3711 		netif_dbg(dev, tx_queued, dev->net,
3712 			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
3713 	}
3714 }
3715 
lan78xx_rx_bh(struct lan78xx_net * dev)3716 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3717 {
3718 	struct urb *urb;
3719 	int i;
3720 
3721 	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3722 		for (i = 0; i < 10; i++) {
3723 			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3724 				break;
3725 			urb = usb_alloc_urb(0, GFP_ATOMIC);
3726 			if (urb)
3727 				if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3728 					return;
3729 		}
3730 
3731 		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3732 			tasklet_schedule(&dev->bh);
3733 	}
3734 	if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3735 		netif_wake_queue(dev->net);
3736 }
3737 
lan78xx_bh(struct tasklet_struct * t)3738 static void lan78xx_bh(struct tasklet_struct *t)
3739 {
3740 	struct lan78xx_net *dev = from_tasklet(dev, t, bh);
3741 	struct sk_buff *skb;
3742 	struct skb_data *entry;
3743 
3744 	while ((skb = skb_dequeue(&dev->done))) {
3745 		entry = (struct skb_data *)(skb->cb);
3746 		switch (entry->state) {
3747 		case rx_done:
3748 			entry->state = rx_cleanup;
3749 			rx_process(dev, skb);
3750 			continue;
3751 		case tx_done:
3752 			usb_free_urb(entry->urb);
3753 			dev_kfree_skb(skb);
3754 			continue;
3755 		case rx_cleanup:
3756 			usb_free_urb(entry->urb);
3757 			dev_kfree_skb(skb);
3758 			continue;
3759 		default:
3760 			netdev_dbg(dev->net, "skb state %d\n", entry->state);
3761 			return;
3762 		}
3763 	}
3764 
3765 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
3766 		/* reset update timer delta */
3767 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3768 			dev->delta = 1;
3769 			mod_timer(&dev->stat_monitor,
3770 				  jiffies + STAT_UPDATE_TIMER);
3771 		}
3772 
3773 		if (!skb_queue_empty(&dev->txq_pend))
3774 			lan78xx_tx_bh(dev);
3775 
3776 		if (!test_bit(EVENT_RX_HALT, &dev->flags))
3777 			lan78xx_rx_bh(dev);
3778 	}
3779 }
3780 
lan78xx_delayedwork(struct work_struct * work)3781 static void lan78xx_delayedwork(struct work_struct *work)
3782 {
3783 	int status;
3784 	struct lan78xx_net *dev;
3785 
3786 	dev = container_of(work, struct lan78xx_net, wq.work);
3787 
3788 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
3789 		return;
3790 
3791 	if (usb_autopm_get_interface(dev->intf) < 0)
3792 		return;
3793 
3794 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3795 		unlink_urbs(dev, &dev->txq);
3796 
3797 		status = usb_clear_halt(dev->udev, dev->pipe_out);
3798 		if (status < 0 &&
3799 		    status != -EPIPE &&
3800 		    status != -ESHUTDOWN) {
3801 			if (netif_msg_tx_err(dev))
3802 				netdev_err(dev->net,
3803 					   "can't clear tx halt, status %d\n",
3804 					   status);
3805 		} else {
3806 			clear_bit(EVENT_TX_HALT, &dev->flags);
3807 			if (status != -ESHUTDOWN)
3808 				netif_wake_queue(dev->net);
3809 		}
3810 	}
3811 
3812 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3813 		unlink_urbs(dev, &dev->rxq);
3814 		status = usb_clear_halt(dev->udev, dev->pipe_in);
3815 		if (status < 0 &&
3816 		    status != -EPIPE &&
3817 		    status != -ESHUTDOWN) {
3818 			if (netif_msg_rx_err(dev))
3819 				netdev_err(dev->net,
3820 					   "can't clear rx halt, status %d\n",
3821 					   status);
3822 		} else {
3823 			clear_bit(EVENT_RX_HALT, &dev->flags);
3824 			tasklet_schedule(&dev->bh);
3825 		}
3826 	}
3827 
3828 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3829 		int ret = 0;
3830 
3831 		clear_bit(EVENT_LINK_RESET, &dev->flags);
3832 		if (lan78xx_link_reset(dev) < 0) {
3833 			netdev_info(dev->net, "link reset failed (%d)\n",
3834 				    ret);
3835 		}
3836 	}
3837 
3838 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3839 		lan78xx_update_stats(dev);
3840 
3841 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3842 
3843 		mod_timer(&dev->stat_monitor,
3844 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
3845 
3846 		dev->delta = min((dev->delta * 2), 50);
3847 	}
3848 
3849 	usb_autopm_put_interface(dev->intf);
3850 }
3851 
intr_complete(struct urb * urb)3852 static void intr_complete(struct urb *urb)
3853 {
3854 	struct lan78xx_net *dev = urb->context;
3855 	int status = urb->status;
3856 
3857 	switch (status) {
3858 	/* success */
3859 	case 0:
3860 		lan78xx_status(dev, urb);
3861 		break;
3862 
3863 	/* software-driven interface shutdown */
3864 	case -ENOENT:			/* urb killed */
3865 	case -ENODEV:			/* hardware gone */
3866 	case -ESHUTDOWN:		/* hardware gone */
3867 		netif_dbg(dev, ifdown, dev->net,
3868 			  "intr shutdown, code %d\n", status);
3869 		return;
3870 
3871 	/* NOTE:  not throttling like RX/TX, since this endpoint
3872 	 * already polls infrequently
3873 	 */
3874 	default:
3875 		netdev_dbg(dev->net, "intr status %d\n", status);
3876 		break;
3877 	}
3878 
3879 	if (!netif_device_present(dev->net) ||
3880 	    !netif_running(dev->net)) {
3881 		netdev_warn(dev->net, "not submitting new status URB");
3882 		return;
3883 	}
3884 
3885 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3886 	status = usb_submit_urb(urb, GFP_ATOMIC);
3887 
3888 	switch (status) {
3889 	case  0:
3890 		break;
3891 	case -ENODEV:
3892 	case -ENOENT:
3893 		netif_dbg(dev, timer, dev->net,
3894 			  "intr resubmit %d (disconnect?)", status);
3895 		netif_device_detach(dev->net);
3896 		break;
3897 	default:
3898 		netif_err(dev, timer, dev->net,
3899 			  "intr resubmit --> %d\n", status);
3900 		break;
3901 	}
3902 }
3903 
lan78xx_disconnect(struct usb_interface * intf)3904 static void lan78xx_disconnect(struct usb_interface *intf)
3905 {
3906 	struct lan78xx_net *dev;
3907 	struct usb_device *udev;
3908 	struct net_device *net;
3909 	struct phy_device *phydev;
3910 
3911 	dev = usb_get_intfdata(intf);
3912 	usb_set_intfdata(intf, NULL);
3913 	if (!dev)
3914 		return;
3915 
3916 	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
3917 
3918 	udev = interface_to_usbdev(intf);
3919 	net = dev->net;
3920 
3921 	unregister_netdev(net);
3922 
3923 	cancel_delayed_work_sync(&dev->wq);
3924 
3925 	phydev = net->phydev;
3926 
3927 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3928 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3929 
3930 	phy_disconnect(net->phydev);
3931 
3932 	if (phy_is_pseudo_fixed_link(phydev))
3933 		fixed_phy_unregister(phydev);
3934 
3935 	usb_scuttle_anchored_urbs(&dev->deferred);
3936 
3937 	if (timer_pending(&dev->stat_monitor))
3938 		del_timer_sync(&dev->stat_monitor);
3939 
3940 	lan78xx_unbind(dev, intf);
3941 
3942 	usb_kill_urb(dev->urb_intr);
3943 	usb_free_urb(dev->urb_intr);
3944 
3945 	free_netdev(net);
3946 	usb_put_dev(udev);
3947 }
3948 
lan78xx_tx_timeout(struct net_device * net,unsigned int txqueue)3949 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
3950 {
3951 	struct lan78xx_net *dev = netdev_priv(net);
3952 
3953 	unlink_urbs(dev, &dev->txq);
3954 	tasklet_schedule(&dev->bh);
3955 }
3956 
lan78xx_features_check(struct sk_buff * skb,struct net_device * netdev,netdev_features_t features)3957 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3958 						struct net_device *netdev,
3959 						netdev_features_t features)
3960 {
3961 	if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3962 		features &= ~NETIF_F_GSO_MASK;
3963 
3964 	features = vlan_features_check(skb, features);
3965 	features = vxlan_features_check(skb, features);
3966 
3967 	return features;
3968 }
3969 
3970 static const struct net_device_ops lan78xx_netdev_ops = {
3971 	.ndo_open		= lan78xx_open,
3972 	.ndo_stop		= lan78xx_stop,
3973 	.ndo_start_xmit		= lan78xx_start_xmit,
3974 	.ndo_tx_timeout		= lan78xx_tx_timeout,
3975 	.ndo_change_mtu		= lan78xx_change_mtu,
3976 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
3977 	.ndo_validate_addr	= eth_validate_addr,
3978 	.ndo_eth_ioctl		= phy_do_ioctl_running,
3979 	.ndo_set_rx_mode	= lan78xx_set_multicast,
3980 	.ndo_set_features	= lan78xx_set_features,
3981 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
3982 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
3983 	.ndo_features_check	= lan78xx_features_check,
3984 };
3985 
lan78xx_stat_monitor(struct timer_list * t)3986 static void lan78xx_stat_monitor(struct timer_list *t)
3987 {
3988 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3989 
3990 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3991 }
3992 
lan78xx_probe(struct usb_interface * intf,const struct usb_device_id * id)3993 static int lan78xx_probe(struct usb_interface *intf,
3994 			 const struct usb_device_id *id)
3995 {
3996 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
3997 	struct lan78xx_net *dev;
3998 	struct net_device *netdev;
3999 	struct usb_device *udev;
4000 	int ret;
4001 	unsigned int maxp;
4002 	unsigned int period;
4003 	u8 *buf = NULL;
4004 
4005 	udev = interface_to_usbdev(intf);
4006 	udev = usb_get_dev(udev);
4007 
4008 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4009 	if (!netdev) {
4010 		dev_err(&intf->dev, "Error: OOM\n");
4011 		ret = -ENOMEM;
4012 		goto out1;
4013 	}
4014 
4015 	/* netdev_printk() needs this */
4016 	SET_NETDEV_DEV(netdev, &intf->dev);
4017 
4018 	dev = netdev_priv(netdev);
4019 	dev->udev = udev;
4020 	dev->intf = intf;
4021 	dev->net = netdev;
4022 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4023 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4024 
4025 	skb_queue_head_init(&dev->rxq);
4026 	skb_queue_head_init(&dev->txq);
4027 	skb_queue_head_init(&dev->done);
4028 	skb_queue_head_init(&dev->txq_pend);
4029 	mutex_init(&dev->phy_mutex);
4030 	mutex_init(&dev->dev_mutex);
4031 
4032 	tasklet_setup(&dev->bh, lan78xx_bh);
4033 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4034 	init_usb_anchor(&dev->deferred);
4035 
4036 	netdev->netdev_ops = &lan78xx_netdev_ops;
4037 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4038 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4039 
4040 	dev->delta = 1;
4041 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4042 
4043 	mutex_init(&dev->stats.access_lock);
4044 
4045 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4046 		ret = -ENODEV;
4047 		goto out2;
4048 	}
4049 
4050 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4051 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4052 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4053 		ret = -ENODEV;
4054 		goto out2;
4055 	}
4056 
4057 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4058 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4059 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4060 		ret = -ENODEV;
4061 		goto out2;
4062 	}
4063 
4064 	ep_intr = &intf->cur_altsetting->endpoint[2];
4065 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4066 		ret = -ENODEV;
4067 		goto out2;
4068 	}
4069 
4070 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4071 					usb_endpoint_num(&ep_intr->desc));
4072 
4073 	ret = lan78xx_bind(dev, intf);
4074 	if (ret < 0)
4075 		goto out2;
4076 
4077 	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
4078 		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
4079 
4080 	/* MTU range: 68 - 9000 */
4081 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4082 	netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
4083 
4084 	period = ep_intr->desc.bInterval;
4085 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
4086 	buf = kmalloc(maxp, GFP_KERNEL);
4087 	if (buf) {
4088 		dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4089 		if (!dev->urb_intr) {
4090 			ret = -ENOMEM;
4091 			kfree(buf);
4092 			goto out3;
4093 		} else {
4094 			usb_fill_int_urb(dev->urb_intr, dev->udev,
4095 					 dev->pipe_intr, buf, maxp,
4096 					 intr_complete, dev, period);
4097 			dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4098 		}
4099 	}
4100 
4101 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
4102 
4103 	/* Reject broken descriptors. */
4104 	if (dev->maxpacket == 0) {
4105 		ret = -ENODEV;
4106 		goto out4;
4107 	}
4108 
4109 	/* driver requires remote-wakeup capability during autosuspend. */
4110 	intf->needs_remote_wakeup = 1;
4111 
4112 	ret = lan78xx_phy_init(dev);
4113 	if (ret < 0)
4114 		goto out4;
4115 
4116 	ret = register_netdev(netdev);
4117 	if (ret != 0) {
4118 		netif_err(dev, probe, netdev, "couldn't register the device\n");
4119 		goto out5;
4120 	}
4121 
4122 	usb_set_intfdata(intf, dev);
4123 
4124 	ret = device_set_wakeup_enable(&udev->dev, true);
4125 
4126 	 /* Default delay of 2sec has more overhead than advantage.
4127 	  * Set to 10sec as default.
4128 	  */
4129 	pm_runtime_set_autosuspend_delay(&udev->dev,
4130 					 DEFAULT_AUTOSUSPEND_DELAY);
4131 
4132 	return 0;
4133 
4134 out5:
4135 	phy_disconnect(netdev->phydev);
4136 out4:
4137 	usb_free_urb(dev->urb_intr);
4138 out3:
4139 	lan78xx_unbind(dev, intf);
4140 out2:
4141 	free_netdev(netdev);
4142 out1:
4143 	usb_put_dev(udev);
4144 
4145 	return ret;
4146 }
4147 
lan78xx_wakeframe_crc16(const u8 * buf,int len)4148 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4149 {
4150 	const u16 crc16poly = 0x8005;
4151 	int i;
4152 	u16 bit, crc, msb;
4153 	u8 data;
4154 
4155 	crc = 0xFFFF;
4156 	for (i = 0; i < len; i++) {
4157 		data = *buf++;
4158 		for (bit = 0; bit < 8; bit++) {
4159 			msb = crc >> 15;
4160 			crc <<= 1;
4161 
4162 			if (msb ^ (u16)(data & 1)) {
4163 				crc ^= crc16poly;
4164 				crc |= (u16)0x0001U;
4165 			}
4166 			data >>= 1;
4167 		}
4168 	}
4169 
4170 	return crc;
4171 }
4172 
lan78xx_set_auto_suspend(struct lan78xx_net * dev)4173 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4174 {
4175 	u32 buf;
4176 	int ret;
4177 
4178 	ret = lan78xx_stop_tx_path(dev);
4179 	if (ret < 0)
4180 		return ret;
4181 
4182 	ret = lan78xx_stop_rx_path(dev);
4183 	if (ret < 0)
4184 		return ret;
4185 
4186 	/* auto suspend (selective suspend) */
4187 
4188 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4189 	if (ret < 0)
4190 		return ret;
4191 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4192 	if (ret < 0)
4193 		return ret;
4194 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4195 	if (ret < 0)
4196 		return ret;
4197 
4198 	/* set goodframe wakeup */
4199 
4200 	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4201 	if (ret < 0)
4202 		return ret;
4203 
4204 	buf |= WUCSR_RFE_WAKE_EN_;
4205 	buf |= WUCSR_STORE_WAKE_;
4206 
4207 	ret = lan78xx_write_reg(dev, WUCSR, buf);
4208 	if (ret < 0)
4209 		return ret;
4210 
4211 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4212 	if (ret < 0)
4213 		return ret;
4214 
4215 	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4216 	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4217 	buf |= PMT_CTL_PHY_WAKE_EN_;
4218 	buf |= PMT_CTL_WOL_EN_;
4219 	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4220 	buf |= PMT_CTL_SUS_MODE_3_;
4221 
4222 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4223 	if (ret < 0)
4224 		return ret;
4225 
4226 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4227 	if (ret < 0)
4228 		return ret;
4229 
4230 	buf |= PMT_CTL_WUPS_MASK_;
4231 
4232 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4233 	if (ret < 0)
4234 		return ret;
4235 
4236 	ret = lan78xx_start_rx_path(dev);
4237 
4238 	return ret;
4239 }
4240 
lan78xx_set_suspend(struct lan78xx_net * dev,u32 wol)4241 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4242 {
4243 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4244 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4245 	const u8 arp_type[2] = { 0x08, 0x06 };
4246 	u32 temp_pmt_ctl;
4247 	int mask_index;
4248 	u32 temp_wucsr;
4249 	u32 buf;
4250 	u16 crc;
4251 	int ret;
4252 
4253 	ret = lan78xx_stop_tx_path(dev);
4254 	if (ret < 0)
4255 		return ret;
4256 	ret = lan78xx_stop_rx_path(dev);
4257 	if (ret < 0)
4258 		return ret;
4259 
4260 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4261 	if (ret < 0)
4262 		return ret;
4263 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4264 	if (ret < 0)
4265 		return ret;
4266 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4267 	if (ret < 0)
4268 		return ret;
4269 
4270 	temp_wucsr = 0;
4271 
4272 	temp_pmt_ctl = 0;
4273 
4274 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4275 	if (ret < 0)
4276 		return ret;
4277 
4278 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4279 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4280 
4281 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4282 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4283 		if (ret < 0)
4284 			return ret;
4285 	}
4286 
4287 	mask_index = 0;
4288 	if (wol & WAKE_PHY) {
4289 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4290 
4291 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4292 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4293 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4294 	}
4295 	if (wol & WAKE_MAGIC) {
4296 		temp_wucsr |= WUCSR_MPEN_;
4297 
4298 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4299 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4300 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4301 	}
4302 	if (wol & WAKE_BCAST) {
4303 		temp_wucsr |= WUCSR_BCST_EN_;
4304 
4305 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4306 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4307 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4308 	}
4309 	if (wol & WAKE_MCAST) {
4310 		temp_wucsr |= WUCSR_WAKE_EN_;
4311 
4312 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4313 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4314 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4315 					WUF_CFGX_EN_ |
4316 					WUF_CFGX_TYPE_MCAST_ |
4317 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4318 					(crc & WUF_CFGX_CRC16_MASK_));
4319 		if (ret < 0)
4320 			return ret;
4321 
4322 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4323 		if (ret < 0)
4324 			return ret;
4325 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4326 		if (ret < 0)
4327 			return ret;
4328 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4329 		if (ret < 0)
4330 			return ret;
4331 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4332 		if (ret < 0)
4333 			return ret;
4334 
4335 		mask_index++;
4336 
4337 		/* for IPv6 Multicast */
4338 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4339 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4340 					WUF_CFGX_EN_ |
4341 					WUF_CFGX_TYPE_MCAST_ |
4342 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4343 					(crc & WUF_CFGX_CRC16_MASK_));
4344 		if (ret < 0)
4345 			return ret;
4346 
4347 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4348 		if (ret < 0)
4349 			return ret;
4350 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4351 		if (ret < 0)
4352 			return ret;
4353 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4354 		if (ret < 0)
4355 			return ret;
4356 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4357 		if (ret < 0)
4358 			return ret;
4359 
4360 		mask_index++;
4361 
4362 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4363 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4364 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4365 	}
4366 	if (wol & WAKE_UCAST) {
4367 		temp_wucsr |= WUCSR_PFDA_EN_;
4368 
4369 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4370 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4371 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4372 	}
4373 	if (wol & WAKE_ARP) {
4374 		temp_wucsr |= WUCSR_WAKE_EN_;
4375 
4376 		/* set WUF_CFG & WUF_MASK
4377 		 * for packettype (offset 12,13) = ARP (0x0806)
4378 		 */
4379 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
4380 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4381 					WUF_CFGX_EN_ |
4382 					WUF_CFGX_TYPE_ALL_ |
4383 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4384 					(crc & WUF_CFGX_CRC16_MASK_));
4385 		if (ret < 0)
4386 			return ret;
4387 
4388 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4389 		if (ret < 0)
4390 			return ret;
4391 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4392 		if (ret < 0)
4393 			return ret;
4394 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4395 		if (ret < 0)
4396 			return ret;
4397 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4398 		if (ret < 0)
4399 			return ret;
4400 
4401 		mask_index++;
4402 
4403 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4404 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4405 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4406 	}
4407 
4408 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4409 	if (ret < 0)
4410 		return ret;
4411 
4412 	/* when multiple WOL bits are set */
4413 	if (hweight_long((unsigned long)wol) > 1) {
4414 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4415 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4416 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4417 	}
4418 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4419 	if (ret < 0)
4420 		return ret;
4421 
4422 	/* clear WUPS */
4423 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4424 	if (ret < 0)
4425 		return ret;
4426 
4427 	buf |= PMT_CTL_WUPS_MASK_;
4428 
4429 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4430 	if (ret < 0)
4431 		return ret;
4432 
4433 	ret = lan78xx_start_rx_path(dev);
4434 
4435 	return ret;
4436 }
4437 
lan78xx_suspend(struct usb_interface * intf,pm_message_t message)4438 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4439 {
4440 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4441 	bool dev_open;
4442 	int ret;
4443 
4444 	mutex_lock(&dev->dev_mutex);
4445 
4446 	netif_dbg(dev, ifdown, dev->net,
4447 		  "suspending: pm event %#x", message.event);
4448 
4449 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4450 
4451 	if (dev_open) {
4452 		spin_lock_irq(&dev->txq.lock);
4453 		/* don't autosuspend while transmitting */
4454 		if ((skb_queue_len(&dev->txq) ||
4455 		     skb_queue_len(&dev->txq_pend)) &&
4456 		    PMSG_IS_AUTO(message)) {
4457 			spin_unlock_irq(&dev->txq.lock);
4458 			ret = -EBUSY;
4459 			goto out;
4460 		} else {
4461 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4462 			spin_unlock_irq(&dev->txq.lock);
4463 		}
4464 
4465 		/* stop RX */
4466 		ret = lan78xx_stop_rx_path(dev);
4467 		if (ret < 0)
4468 			goto out;
4469 
4470 		ret = lan78xx_flush_rx_fifo(dev);
4471 		if (ret < 0)
4472 			goto out;
4473 
4474 		/* stop Tx */
4475 		ret = lan78xx_stop_tx_path(dev);
4476 		if (ret < 0)
4477 			goto out;
4478 
4479 		/* empty out the Rx and Tx queues */
4480 		netif_device_detach(dev->net);
4481 		lan78xx_terminate_urbs(dev);
4482 		usb_kill_urb(dev->urb_intr);
4483 
4484 		/* reattach */
4485 		netif_device_attach(dev->net);
4486 
4487 		del_timer(&dev->stat_monitor);
4488 
4489 		if (PMSG_IS_AUTO(message)) {
4490 			ret = lan78xx_set_auto_suspend(dev);
4491 			if (ret < 0)
4492 				goto out;
4493 		} else {
4494 			struct lan78xx_priv *pdata;
4495 
4496 			pdata = (struct lan78xx_priv *)(dev->data[0]);
4497 			netif_carrier_off(dev->net);
4498 			ret = lan78xx_set_suspend(dev, pdata->wol);
4499 			if (ret < 0)
4500 				goto out;
4501 		}
4502 	} else {
4503 		/* Interface is down; don't allow WOL and PHY
4504 		 * events to wake up the host
4505 		 */
4506 		u32 buf;
4507 
4508 		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4509 
4510 		ret = lan78xx_write_reg(dev, WUCSR, 0);
4511 		if (ret < 0)
4512 			goto out;
4513 		ret = lan78xx_write_reg(dev, WUCSR2, 0);
4514 		if (ret < 0)
4515 			goto out;
4516 
4517 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4518 		if (ret < 0)
4519 			goto out;
4520 
4521 		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4522 		buf |= PMT_CTL_RES_CLR_WKP_STS_;
4523 		buf &= ~PMT_CTL_SUS_MODE_MASK_;
4524 		buf |= PMT_CTL_SUS_MODE_3_;
4525 
4526 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4527 		if (ret < 0)
4528 			goto out;
4529 
4530 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4531 		if (ret < 0)
4532 			goto out;
4533 
4534 		buf |= PMT_CTL_WUPS_MASK_;
4535 
4536 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4537 		if (ret < 0)
4538 			goto out;
4539 	}
4540 
4541 	ret = 0;
4542 out:
4543 	mutex_unlock(&dev->dev_mutex);
4544 
4545 	return ret;
4546 }
4547 
lan78xx_submit_deferred_urbs(struct lan78xx_net * dev)4548 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
4549 {
4550 	bool pipe_halted = false;
4551 	struct urb *urb;
4552 
4553 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
4554 		struct sk_buff *skb = urb->context;
4555 		int ret;
4556 
4557 		if (!netif_device_present(dev->net) ||
4558 		    !netif_carrier_ok(dev->net) ||
4559 		    pipe_halted) {
4560 			usb_free_urb(urb);
4561 			dev_kfree_skb(skb);
4562 			continue;
4563 		}
4564 
4565 		ret = usb_submit_urb(urb, GFP_ATOMIC);
4566 
4567 		if (ret == 0) {
4568 			netif_trans_update(dev->net);
4569 			lan78xx_queue_skb(&dev->txq, skb, tx_start);
4570 		} else {
4571 			usb_free_urb(urb);
4572 			dev_kfree_skb(skb);
4573 
4574 			if (ret == -EPIPE) {
4575 				netif_stop_queue(dev->net);
4576 				pipe_halted = true;
4577 			} else if (ret == -ENODEV) {
4578 				netif_device_detach(dev->net);
4579 			}
4580 		}
4581 	}
4582 
4583 	return pipe_halted;
4584 }
4585 
lan78xx_resume(struct usb_interface * intf)4586 static int lan78xx_resume(struct usb_interface *intf)
4587 {
4588 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4589 	bool dev_open;
4590 	int ret;
4591 
4592 	mutex_lock(&dev->dev_mutex);
4593 
4594 	netif_dbg(dev, ifup, dev->net, "resuming device");
4595 
4596 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4597 
4598 	if (dev_open) {
4599 		bool pipe_halted = false;
4600 
4601 		ret = lan78xx_flush_tx_fifo(dev);
4602 		if (ret < 0)
4603 			goto out;
4604 
4605 		if (dev->urb_intr) {
4606 			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
4607 
4608 			if (ret < 0) {
4609 				if (ret == -ENODEV)
4610 					netif_device_detach(dev->net);
4611 
4612 			netdev_warn(dev->net, "Failed to submit intr URB");
4613 			}
4614 		}
4615 
4616 		spin_lock_irq(&dev->txq.lock);
4617 
4618 		if (netif_device_present(dev->net)) {
4619 			pipe_halted = lan78xx_submit_deferred_urbs(dev);
4620 
4621 			if (pipe_halted)
4622 				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4623 		}
4624 
4625 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4626 
4627 		spin_unlock_irq(&dev->txq.lock);
4628 
4629 		if (!pipe_halted &&
4630 		    netif_device_present(dev->net) &&
4631 		    (skb_queue_len(&dev->txq) < dev->tx_qlen))
4632 			netif_start_queue(dev->net);
4633 
4634 		ret = lan78xx_start_tx_path(dev);
4635 		if (ret < 0)
4636 			goto out;
4637 
4638 		tasklet_schedule(&dev->bh);
4639 
4640 		if (!timer_pending(&dev->stat_monitor)) {
4641 			dev->delta = 1;
4642 			mod_timer(&dev->stat_monitor,
4643 				  jiffies + STAT_UPDATE_TIMER);
4644 		}
4645 
4646 	} else {
4647 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4648 	}
4649 
4650 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4651 	if (ret < 0)
4652 		goto out;
4653 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4654 	if (ret < 0)
4655 		goto out;
4656 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4657 	if (ret < 0)
4658 		goto out;
4659 
4660 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4661 					     WUCSR2_ARP_RCD_ |
4662 					     WUCSR2_IPV6_TCPSYN_RCD_ |
4663 					     WUCSR2_IPV4_TCPSYN_RCD_);
4664 	if (ret < 0)
4665 		goto out;
4666 
4667 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4668 					    WUCSR_EEE_RX_WAKE_ |
4669 					    WUCSR_PFDA_FR_ |
4670 					    WUCSR_RFE_WAKE_FR_ |
4671 					    WUCSR_WUFR_ |
4672 					    WUCSR_MPR_ |
4673 					    WUCSR_BCST_FR_);
4674 	if (ret < 0)
4675 		goto out;
4676 
4677 	ret = 0;
4678 out:
4679 	mutex_unlock(&dev->dev_mutex);
4680 
4681 	return ret;
4682 }
4683 
lan78xx_reset_resume(struct usb_interface * intf)4684 static int lan78xx_reset_resume(struct usb_interface *intf)
4685 {
4686 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4687 	int ret;
4688 
4689 	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
4690 
4691 	ret = lan78xx_reset(dev);
4692 	if (ret < 0)
4693 		return ret;
4694 
4695 	phy_start(dev->net->phydev);
4696 
4697 	ret = lan78xx_resume(intf);
4698 
4699 	return ret;
4700 }
4701 
4702 static const struct usb_device_id products[] = {
4703 	{
4704 	/* LAN7800 USB Gigabit Ethernet Device */
4705 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4706 	},
4707 	{
4708 	/* LAN7850 USB Gigabit Ethernet Device */
4709 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4710 	},
4711 	{
4712 	/* LAN7801 USB Gigabit Ethernet Device */
4713 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4714 	},
4715 	{
4716 	/* ATM2-AF USB Gigabit Ethernet Device */
4717 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
4718 	},
4719 	{},
4720 };
4721 MODULE_DEVICE_TABLE(usb, products);
4722 
4723 static struct usb_driver lan78xx_driver = {
4724 	.name			= DRIVER_NAME,
4725 	.id_table		= products,
4726 	.probe			= lan78xx_probe,
4727 	.disconnect		= lan78xx_disconnect,
4728 	.suspend		= lan78xx_suspend,
4729 	.resume			= lan78xx_resume,
4730 	.reset_resume		= lan78xx_reset_resume,
4731 	.supports_autosuspend	= 1,
4732 	.disable_hub_initiated_lpm = 1,
4733 };
4734 
4735 module_usb_driver(lan78xx_driver);
4736 
4737 MODULE_AUTHOR(DRIVER_AUTHOR);
4738 MODULE_DESCRIPTION(DRIVER_DESC);
4739 MODULE_LICENSE("GPL");
4740