• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/usb.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32 
33 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME	"lan78xx"
36 
37 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
38 #define THROTTLE_JIFFIES		(HZ / 8)
39 #define UNLINK_TIMEOUT_MS		3
40 
41 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
42 
43 #define SS_USB_PKT_SIZE			(1024)
44 #define HS_USB_PKT_SIZE			(512)
45 #define FS_USB_PKT_SIZE			(64)
46 
47 #define MAX_RX_FIFO_SIZE		(12 * 1024)
48 #define MAX_TX_FIFO_SIZE		(12 * 1024)
49 
50 #define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
52 					 (FLOW_THRESHOLD(off) << 8))
53 
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS			9216
56 #define FLOW_ON_HS			8704
57 
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS			4096
60 #define FLOW_OFF_HS			1024
61 
62 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY		(0x0800)
64 #define MAX_SINGLE_PACKET_SIZE		(9000)
65 #define DEFAULT_TX_CSUM_ENABLE		(true)
66 #define DEFAULT_RX_CSUM_ENABLE		(true)
67 #define DEFAULT_TSO_CSUM_ENABLE		(true)
68 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
69 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
70 #define TX_ALIGNMENT			(4)
71 #define RXW_PADDING			2
72 
73 #define LAN78XX_USB_VENDOR_ID		(0x0424)
74 #define LAN7800_USB_PRODUCT_ID		(0x7800)
75 #define LAN7850_USB_PRODUCT_ID		(0x7850)
76 #define LAN7801_USB_PRODUCT_ID		(0x7801)
77 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
78 #define LAN78XX_OTP_MAGIC		(0x78F3)
79 #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
80 #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
81 
82 #define	MII_READ			1
83 #define	MII_WRITE			0
84 
85 #define EEPROM_INDICATOR		(0xA5)
86 #define EEPROM_MAC_OFFSET		(0x01)
87 #define MAX_EEPROM_SIZE			512
88 #define OTP_INDICATOR_1			(0xF3)
89 #define OTP_INDICATOR_2			(0xF7)
90 
91 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
92 					 WAKE_MCAST | WAKE_BCAST | \
93 					 WAKE_ARP | WAKE_MAGIC)
94 
95 #define TX_URB_NUM			10
96 #define TX_SS_URB_NUM			TX_URB_NUM
97 #define TX_HS_URB_NUM			TX_URB_NUM
98 #define TX_FS_URB_NUM			TX_URB_NUM
99 
100 /* A single URB buffer must be large enough to hold a complete jumbo packet
101  */
102 #define TX_SS_URB_SIZE			(32 * 1024)
103 #define TX_HS_URB_SIZE			(16 * 1024)
104 #define TX_FS_URB_SIZE			(10 * 1024)
105 
106 #define RX_SS_URB_NUM			30
107 #define RX_HS_URB_NUM			10
108 #define RX_FS_URB_NUM			10
109 #define RX_SS_URB_SIZE			TX_SS_URB_SIZE
110 #define RX_HS_URB_SIZE			TX_HS_URB_SIZE
111 #define RX_FS_URB_SIZE			TX_FS_URB_SIZE
112 
113 #define SS_BURST_CAP_SIZE		RX_SS_URB_SIZE
114 #define SS_BULK_IN_DELAY		0x2000
115 #define HS_BURST_CAP_SIZE		RX_HS_URB_SIZE
116 #define HS_BULK_IN_DELAY		0x2000
117 #define FS_BURST_CAP_SIZE		RX_FS_URB_SIZE
118 #define FS_BULK_IN_DELAY		0x2000
119 
120 #define TX_CMD_LEN			8
121 #define TX_SKB_MIN_LEN			(TX_CMD_LEN + ETH_HLEN)
122 #define LAN78XX_TSO_SIZE(dev)		((dev)->tx_urb_size - TX_SKB_MIN_LEN)
123 
124 #define RX_CMD_LEN			10
125 #define RX_SKB_MIN_LEN			(RX_CMD_LEN + ETH_HLEN)
126 #define RX_MAX_FRAME_LEN(mtu)		((mtu) + ETH_HLEN + VLAN_HLEN)
127 
128 /* USB related defines */
129 #define BULK_IN_PIPE			1
130 #define BULK_OUT_PIPE			2
131 
132 /* default autosuspend delay (mSec)*/
133 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
134 
135 /* statistic update interval (mSec) */
136 #define STAT_UPDATE_TIMER		(1 * 1000)
137 
138 /* time to wait for MAC or FCT to stop (jiffies) */
139 #define HW_DISABLE_TIMEOUT		(HZ / 10)
140 
141 /* time to wait between polling MAC or FCT state (ms) */
142 #define HW_DISABLE_DELAY_MS		1
143 
144 /* defines interrupts from interrupt EP */
145 #define MAX_INT_EP			(32)
146 #define INT_EP_INTEP			(31)
147 #define INT_EP_OTP_WR_DONE		(28)
148 #define INT_EP_EEE_TX_LPI_START		(26)
149 #define INT_EP_EEE_TX_LPI_STOP		(25)
150 #define INT_EP_EEE_RX_LPI		(24)
151 #define INT_EP_MAC_RESET_TIMEOUT	(23)
152 #define INT_EP_RDFO			(22)
153 #define INT_EP_TXE			(21)
154 #define INT_EP_USB_STATUS		(20)
155 #define INT_EP_TX_DIS			(19)
156 #define INT_EP_RX_DIS			(18)
157 #define INT_EP_PHY			(17)
158 #define INT_EP_DP			(16)
159 #define INT_EP_MAC_ERR			(15)
160 #define INT_EP_TDFU			(14)
161 #define INT_EP_TDFO			(13)
162 #define INT_EP_UTX			(12)
163 #define INT_EP_GPIO_11			(11)
164 #define INT_EP_GPIO_10			(10)
165 #define INT_EP_GPIO_9			(9)
166 #define INT_EP_GPIO_8			(8)
167 #define INT_EP_GPIO_7			(7)
168 #define INT_EP_GPIO_6			(6)
169 #define INT_EP_GPIO_5			(5)
170 #define INT_EP_GPIO_4			(4)
171 #define INT_EP_GPIO_3			(3)
172 #define INT_EP_GPIO_2			(2)
173 #define INT_EP_GPIO_1			(1)
174 #define INT_EP_GPIO_0			(0)
175 
176 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
177 	"RX FCS Errors",
178 	"RX Alignment Errors",
179 	"Rx Fragment Errors",
180 	"RX Jabber Errors",
181 	"RX Undersize Frame Errors",
182 	"RX Oversize Frame Errors",
183 	"RX Dropped Frames",
184 	"RX Unicast Byte Count",
185 	"RX Broadcast Byte Count",
186 	"RX Multicast Byte Count",
187 	"RX Unicast Frames",
188 	"RX Broadcast Frames",
189 	"RX Multicast Frames",
190 	"RX Pause Frames",
191 	"RX 64 Byte Frames",
192 	"RX 65 - 127 Byte Frames",
193 	"RX 128 - 255 Byte Frames",
194 	"RX 256 - 511 Bytes Frames",
195 	"RX 512 - 1023 Byte Frames",
196 	"RX 1024 - 1518 Byte Frames",
197 	"RX Greater 1518 Byte Frames",
198 	"EEE RX LPI Transitions",
199 	"EEE RX LPI Time",
200 	"TX FCS Errors",
201 	"TX Excess Deferral Errors",
202 	"TX Carrier Errors",
203 	"TX Bad Byte Count",
204 	"TX Single Collisions",
205 	"TX Multiple Collisions",
206 	"TX Excessive Collision",
207 	"TX Late Collisions",
208 	"TX Unicast Byte Count",
209 	"TX Broadcast Byte Count",
210 	"TX Multicast Byte Count",
211 	"TX Unicast Frames",
212 	"TX Broadcast Frames",
213 	"TX Multicast Frames",
214 	"TX Pause Frames",
215 	"TX 64 Byte Frames",
216 	"TX 65 - 127 Byte Frames",
217 	"TX 128 - 255 Byte Frames",
218 	"TX 256 - 511 Bytes Frames",
219 	"TX 512 - 1023 Byte Frames",
220 	"TX 1024 - 1518 Byte Frames",
221 	"TX Greater 1518 Byte Frames",
222 	"EEE TX LPI Transitions",
223 	"EEE TX LPI Time",
224 };
225 
226 struct lan78xx_statstage {
227 	u32 rx_fcs_errors;
228 	u32 rx_alignment_errors;
229 	u32 rx_fragment_errors;
230 	u32 rx_jabber_errors;
231 	u32 rx_undersize_frame_errors;
232 	u32 rx_oversize_frame_errors;
233 	u32 rx_dropped_frames;
234 	u32 rx_unicast_byte_count;
235 	u32 rx_broadcast_byte_count;
236 	u32 rx_multicast_byte_count;
237 	u32 rx_unicast_frames;
238 	u32 rx_broadcast_frames;
239 	u32 rx_multicast_frames;
240 	u32 rx_pause_frames;
241 	u32 rx_64_byte_frames;
242 	u32 rx_65_127_byte_frames;
243 	u32 rx_128_255_byte_frames;
244 	u32 rx_256_511_bytes_frames;
245 	u32 rx_512_1023_byte_frames;
246 	u32 rx_1024_1518_byte_frames;
247 	u32 rx_greater_1518_byte_frames;
248 	u32 eee_rx_lpi_transitions;
249 	u32 eee_rx_lpi_time;
250 	u32 tx_fcs_errors;
251 	u32 tx_excess_deferral_errors;
252 	u32 tx_carrier_errors;
253 	u32 tx_bad_byte_count;
254 	u32 tx_single_collisions;
255 	u32 tx_multiple_collisions;
256 	u32 tx_excessive_collision;
257 	u32 tx_late_collisions;
258 	u32 tx_unicast_byte_count;
259 	u32 tx_broadcast_byte_count;
260 	u32 tx_multicast_byte_count;
261 	u32 tx_unicast_frames;
262 	u32 tx_broadcast_frames;
263 	u32 tx_multicast_frames;
264 	u32 tx_pause_frames;
265 	u32 tx_64_byte_frames;
266 	u32 tx_65_127_byte_frames;
267 	u32 tx_128_255_byte_frames;
268 	u32 tx_256_511_bytes_frames;
269 	u32 tx_512_1023_byte_frames;
270 	u32 tx_1024_1518_byte_frames;
271 	u32 tx_greater_1518_byte_frames;
272 	u32 eee_tx_lpi_transitions;
273 	u32 eee_tx_lpi_time;
274 };
275 
276 struct lan78xx_statstage64 {
277 	u64 rx_fcs_errors;
278 	u64 rx_alignment_errors;
279 	u64 rx_fragment_errors;
280 	u64 rx_jabber_errors;
281 	u64 rx_undersize_frame_errors;
282 	u64 rx_oversize_frame_errors;
283 	u64 rx_dropped_frames;
284 	u64 rx_unicast_byte_count;
285 	u64 rx_broadcast_byte_count;
286 	u64 rx_multicast_byte_count;
287 	u64 rx_unicast_frames;
288 	u64 rx_broadcast_frames;
289 	u64 rx_multicast_frames;
290 	u64 rx_pause_frames;
291 	u64 rx_64_byte_frames;
292 	u64 rx_65_127_byte_frames;
293 	u64 rx_128_255_byte_frames;
294 	u64 rx_256_511_bytes_frames;
295 	u64 rx_512_1023_byte_frames;
296 	u64 rx_1024_1518_byte_frames;
297 	u64 rx_greater_1518_byte_frames;
298 	u64 eee_rx_lpi_transitions;
299 	u64 eee_rx_lpi_time;
300 	u64 tx_fcs_errors;
301 	u64 tx_excess_deferral_errors;
302 	u64 tx_carrier_errors;
303 	u64 tx_bad_byte_count;
304 	u64 tx_single_collisions;
305 	u64 tx_multiple_collisions;
306 	u64 tx_excessive_collision;
307 	u64 tx_late_collisions;
308 	u64 tx_unicast_byte_count;
309 	u64 tx_broadcast_byte_count;
310 	u64 tx_multicast_byte_count;
311 	u64 tx_unicast_frames;
312 	u64 tx_broadcast_frames;
313 	u64 tx_multicast_frames;
314 	u64 tx_pause_frames;
315 	u64 tx_64_byte_frames;
316 	u64 tx_65_127_byte_frames;
317 	u64 tx_128_255_byte_frames;
318 	u64 tx_256_511_bytes_frames;
319 	u64 tx_512_1023_byte_frames;
320 	u64 tx_1024_1518_byte_frames;
321 	u64 tx_greater_1518_byte_frames;
322 	u64 eee_tx_lpi_transitions;
323 	u64 eee_tx_lpi_time;
324 };
325 
326 static u32 lan78xx_regs[] = {
327 	ID_REV,
328 	INT_STS,
329 	HW_CFG,
330 	PMT_CTL,
331 	E2P_CMD,
332 	E2P_DATA,
333 	USB_STATUS,
334 	VLAN_TYPE,
335 	MAC_CR,
336 	MAC_RX,
337 	MAC_TX,
338 	FLOW,
339 	ERR_STS,
340 	MII_ACC,
341 	MII_DATA,
342 	EEE_TX_LPI_REQ_DLY,
343 	EEE_TW_TX_SYS,
344 	EEE_TX_LPI_REM_DLY,
345 	WUCSR
346 };
347 
348 #define PHY_REG_SIZE (32 * sizeof(u32))
349 
350 struct lan78xx_net;
351 
352 struct lan78xx_priv {
353 	struct lan78xx_net *dev;
354 	u32 rfe_ctl;
355 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
356 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
357 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
358 	struct mutex dataport_mutex; /* for dataport access */
359 	spinlock_t rfe_ctl_lock; /* for rfe register access */
360 	struct work_struct set_multicast;
361 	struct work_struct set_vlan;
362 	u32 wol;
363 };
364 
365 enum skb_state {
366 	illegal = 0,
367 	tx_start,
368 	tx_done,
369 	rx_start,
370 	rx_done,
371 	rx_cleanup,
372 	unlink_start
373 };
374 
375 struct skb_data {		/* skb->cb is one of these */
376 	struct urb *urb;
377 	struct lan78xx_net *dev;
378 	enum skb_state state;
379 	size_t length;
380 	int num_of_packet;
381 };
382 
383 #define EVENT_TX_HALT			0
384 #define EVENT_RX_HALT			1
385 #define EVENT_RX_MEMORY			2
386 #define EVENT_STS_SPLIT			3
387 #define EVENT_LINK_RESET		4
388 #define EVENT_RX_PAUSED			5
389 #define EVENT_DEV_WAKING		6
390 #define EVENT_DEV_ASLEEP		7
391 #define EVENT_DEV_OPEN			8
392 #define EVENT_STAT_UPDATE		9
393 #define EVENT_DEV_DISCONNECT		10
394 
395 struct statstage {
396 	struct mutex			access_lock;	/* for stats access */
397 	struct lan78xx_statstage	saved;
398 	struct lan78xx_statstage	rollover_count;
399 	struct lan78xx_statstage	rollover_max;
400 	struct lan78xx_statstage64	curr_stat;
401 };
402 
403 struct irq_domain_data {
404 	struct irq_domain	*irqdomain;
405 	unsigned int		phyirq;
406 	struct irq_chip		*irqchip;
407 	irq_flow_handler_t	irq_handler;
408 	u32			irqenable;
409 	struct mutex		irq_lock;		/* for irq bus access */
410 };
411 
412 struct lan78xx_net {
413 	struct net_device	*net;
414 	struct usb_device	*udev;
415 	struct usb_interface	*intf;
416 	void			*driver_priv;
417 
418 	unsigned int		tx_pend_data_len;
419 	size_t			n_tx_urbs;
420 	size_t			n_rx_urbs;
421 	size_t			tx_urb_size;
422 	size_t			rx_urb_size;
423 
424 	struct sk_buff_head	rxq_free;
425 	struct sk_buff_head	rxq;
426 	struct sk_buff_head	rxq_done;
427 	struct sk_buff_head	rxq_overflow;
428 	struct sk_buff_head	txq_free;
429 	struct sk_buff_head	txq;
430 	struct sk_buff_head	txq_pend;
431 
432 	struct napi_struct	napi;
433 
434 	struct delayed_work	wq;
435 
436 	int			msg_enable;
437 
438 	struct urb		*urb_intr;
439 	struct usb_anchor	deferred;
440 
441 	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
442 	struct mutex		phy_mutex; /* for phy access */
443 	unsigned int		pipe_in, pipe_out, pipe_intr;
444 
445 	unsigned int		bulk_in_delay;
446 	unsigned int		burst_cap;
447 
448 	unsigned long		flags;
449 
450 	wait_queue_head_t	*wait;
451 	unsigned char		suspend_count;
452 
453 	unsigned int		maxpacket;
454 	struct timer_list	stat_monitor;
455 
456 	unsigned long		data[5];
457 
458 	int			link_on;
459 	u8			mdix_ctrl;
460 
461 	u32			chipid;
462 	u32			chiprev;
463 	struct mii_bus		*mdiobus;
464 	phy_interface_t		interface;
465 
466 	int			fc_autoneg;
467 	u8			fc_request_control;
468 
469 	int			delta;
470 	struct statstage	stats;
471 
472 	struct irq_domain_data	domain_data;
473 };
474 
475 /* define external phy id */
476 #define	PHY_LAN8835			(0x0007C130)
477 #define	PHY_KSZ9031RNX			(0x00221620)
478 
479 /* use ethtool to change the level for any given device */
480 static int msg_level = -1;
481 module_param(msg_level, int, 0);
482 MODULE_PARM_DESC(msg_level, "Override default message level");
483 
lan78xx_get_buf(struct sk_buff_head * buf_pool)484 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
485 {
486 	if (skb_queue_empty(buf_pool))
487 		return NULL;
488 
489 	return skb_dequeue(buf_pool);
490 }
491 
lan78xx_release_buf(struct sk_buff_head * buf_pool,struct sk_buff * buf)492 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
493 				struct sk_buff *buf)
494 {
495 	buf->data = buf->head;
496 	skb_reset_tail_pointer(buf);
497 
498 	buf->len = 0;
499 	buf->data_len = 0;
500 
501 	skb_queue_tail(buf_pool, buf);
502 }
503 
lan78xx_free_buf_pool(struct sk_buff_head * buf_pool)504 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
505 {
506 	struct skb_data *entry;
507 	struct sk_buff *buf;
508 
509 	while (!skb_queue_empty(buf_pool)) {
510 		buf = skb_dequeue(buf_pool);
511 		if (buf) {
512 			entry = (struct skb_data *)buf->cb;
513 			usb_free_urb(entry->urb);
514 			dev_kfree_skb_any(buf);
515 		}
516 	}
517 }
518 
lan78xx_alloc_buf_pool(struct sk_buff_head * buf_pool,size_t n_urbs,size_t urb_size,struct lan78xx_net * dev)519 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
520 				  size_t n_urbs, size_t urb_size,
521 				  struct lan78xx_net *dev)
522 {
523 	struct skb_data *entry;
524 	struct sk_buff *buf;
525 	struct urb *urb;
526 	int i;
527 
528 	skb_queue_head_init(buf_pool);
529 
530 	for (i = 0; i < n_urbs; i++) {
531 		buf = alloc_skb(urb_size, GFP_ATOMIC);
532 		if (!buf)
533 			goto error;
534 
535 		if (skb_linearize(buf) != 0) {
536 			dev_kfree_skb_any(buf);
537 			goto error;
538 		}
539 
540 		urb = usb_alloc_urb(0, GFP_ATOMIC);
541 		if (!urb) {
542 			dev_kfree_skb_any(buf);
543 			goto error;
544 		}
545 
546 		entry = (struct skb_data *)buf->cb;
547 		entry->urb = urb;
548 		entry->dev = dev;
549 		entry->length = 0;
550 		entry->num_of_packet = 0;
551 
552 		skb_queue_tail(buf_pool, buf);
553 	}
554 
555 	return 0;
556 
557 error:
558 	lan78xx_free_buf_pool(buf_pool);
559 
560 	return -ENOMEM;
561 }
562 
lan78xx_get_rx_buf(struct lan78xx_net * dev)563 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
564 {
565 	return lan78xx_get_buf(&dev->rxq_free);
566 }
567 
lan78xx_release_rx_buf(struct lan78xx_net * dev,struct sk_buff * rx_buf)568 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
569 				   struct sk_buff *rx_buf)
570 {
571 	lan78xx_release_buf(&dev->rxq_free, rx_buf);
572 }
573 
lan78xx_free_rx_resources(struct lan78xx_net * dev)574 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
575 {
576 	lan78xx_free_buf_pool(&dev->rxq_free);
577 }
578 
lan78xx_alloc_rx_resources(struct lan78xx_net * dev)579 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
580 {
581 	return lan78xx_alloc_buf_pool(&dev->rxq_free,
582 				      dev->n_rx_urbs, dev->rx_urb_size, dev);
583 }
584 
lan78xx_get_tx_buf(struct lan78xx_net * dev)585 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
586 {
587 	return lan78xx_get_buf(&dev->txq_free);
588 }
589 
lan78xx_release_tx_buf(struct lan78xx_net * dev,struct sk_buff * tx_buf)590 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
591 				   struct sk_buff *tx_buf)
592 {
593 	lan78xx_release_buf(&dev->txq_free, tx_buf);
594 }
595 
lan78xx_free_tx_resources(struct lan78xx_net * dev)596 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
597 {
598 	lan78xx_free_buf_pool(&dev->txq_free);
599 }
600 
lan78xx_alloc_tx_resources(struct lan78xx_net * dev)601 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
602 {
603 	return lan78xx_alloc_buf_pool(&dev->txq_free,
604 				      dev->n_tx_urbs, dev->tx_urb_size, dev);
605 }
606 
lan78xx_read_reg(struct lan78xx_net * dev,u32 index,u32 * data)607 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
608 {
609 	u32 *buf;
610 	int ret;
611 
612 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
613 		return -ENODEV;
614 
615 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
616 	if (!buf)
617 		return -ENOMEM;
618 
619 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
620 			      USB_VENDOR_REQUEST_READ_REGISTER,
621 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
622 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
623 	if (likely(ret >= 0)) {
624 		le32_to_cpus(buf);
625 		*data = *buf;
626 	} else if (net_ratelimit()) {
627 		netdev_warn(dev->net,
628 			    "Failed to read register index 0x%08x. ret = %d",
629 			    index, ret);
630 	}
631 
632 	kfree(buf);
633 
634 	return ret;
635 }
636 
lan78xx_write_reg(struct lan78xx_net * dev,u32 index,u32 data)637 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
638 {
639 	u32 *buf;
640 	int ret;
641 
642 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
643 		return -ENODEV;
644 
645 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
646 	if (!buf)
647 		return -ENOMEM;
648 
649 	*buf = data;
650 	cpu_to_le32s(buf);
651 
652 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
653 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
654 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
655 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
656 	if (unlikely(ret < 0) &&
657 	    net_ratelimit()) {
658 		netdev_warn(dev->net,
659 			    "Failed to write register index 0x%08x. ret = %d",
660 			    index, ret);
661 	}
662 
663 	kfree(buf);
664 
665 	return ret;
666 }
667 
lan78xx_update_reg(struct lan78xx_net * dev,u32 reg,u32 mask,u32 data)668 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
669 			      u32 data)
670 {
671 	int ret;
672 	u32 buf;
673 
674 	ret = lan78xx_read_reg(dev, reg, &buf);
675 	if (ret < 0)
676 		return ret;
677 
678 	buf &= ~mask;
679 	buf |= (mask & data);
680 
681 	ret = lan78xx_write_reg(dev, reg, buf);
682 	if (ret < 0)
683 		return ret;
684 
685 	return 0;
686 }
687 
lan78xx_read_stats(struct lan78xx_net * dev,struct lan78xx_statstage * data)688 static int lan78xx_read_stats(struct lan78xx_net *dev,
689 			      struct lan78xx_statstage *data)
690 {
691 	int ret = 0;
692 	int i;
693 	struct lan78xx_statstage *stats;
694 	u32 *src;
695 	u32 *dst;
696 
697 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
698 	if (!stats)
699 		return -ENOMEM;
700 
701 	ret = usb_control_msg(dev->udev,
702 			      usb_rcvctrlpipe(dev->udev, 0),
703 			      USB_VENDOR_REQUEST_GET_STATS,
704 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
705 			      0,
706 			      0,
707 			      (void *)stats,
708 			      sizeof(*stats),
709 			      USB_CTRL_SET_TIMEOUT);
710 	if (likely(ret >= 0)) {
711 		src = (u32 *)stats;
712 		dst = (u32 *)data;
713 		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
714 			le32_to_cpus(&src[i]);
715 			dst[i] = src[i];
716 		}
717 	} else {
718 		netdev_warn(dev->net,
719 			    "Failed to read stat ret = %d", ret);
720 	}
721 
722 	kfree(stats);
723 
724 	return ret;
725 }
726 
727 #define check_counter_rollover(struct1, dev_stats, member)		\
728 	do {								\
729 		if ((struct1)->member < (dev_stats).saved.member)	\
730 			(dev_stats).rollover_count.member++;		\
731 	} while (0)
732 
lan78xx_check_stat_rollover(struct lan78xx_net * dev,struct lan78xx_statstage * stats)733 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
734 					struct lan78xx_statstage *stats)
735 {
736 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
737 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
738 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
739 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
740 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
741 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
742 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
743 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
744 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
745 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
746 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
747 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
748 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
749 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
750 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
751 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
752 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
753 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
754 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
755 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
756 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
757 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
758 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
759 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
760 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
761 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
762 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
763 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
764 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
765 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
766 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
767 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
768 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
769 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
770 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
771 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
772 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
773 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
774 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
775 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
776 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
777 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
778 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
779 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
780 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
781 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
782 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
783 
784 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
785 }
786 
lan78xx_update_stats(struct lan78xx_net * dev)787 static void lan78xx_update_stats(struct lan78xx_net *dev)
788 {
789 	u32 *p, *count, *max;
790 	u64 *data;
791 	int i;
792 	struct lan78xx_statstage lan78xx_stats;
793 
794 	if (usb_autopm_get_interface(dev->intf) < 0)
795 		return;
796 
797 	p = (u32 *)&lan78xx_stats;
798 	count = (u32 *)&dev->stats.rollover_count;
799 	max = (u32 *)&dev->stats.rollover_max;
800 	data = (u64 *)&dev->stats.curr_stat;
801 
802 	mutex_lock(&dev->stats.access_lock);
803 
804 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
805 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
806 
807 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
808 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
809 
810 	mutex_unlock(&dev->stats.access_lock);
811 
812 	usb_autopm_put_interface(dev->intf);
813 }
814 
815 /* Loop until the read is completed with timeout called with phy_mutex held */
lan78xx_phy_wait_not_busy(struct lan78xx_net * dev)816 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
817 {
818 	unsigned long start_time = jiffies;
819 	u32 val;
820 	int ret;
821 
822 	do {
823 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
824 		if (unlikely(ret < 0))
825 			return -EIO;
826 
827 		if (!(val & MII_ACC_MII_BUSY_))
828 			return 0;
829 	} while (!time_after(jiffies, start_time + HZ));
830 
831 	return -EIO;
832 }
833 
mii_access(int id,int index,int read)834 static inline u32 mii_access(int id, int index, int read)
835 {
836 	u32 ret;
837 
838 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
839 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
840 	if (read)
841 		ret |= MII_ACC_MII_READ_;
842 	else
843 		ret |= MII_ACC_MII_WRITE_;
844 	ret |= MII_ACC_MII_BUSY_;
845 
846 	return ret;
847 }
848 
lan78xx_wait_eeprom(struct lan78xx_net * dev)849 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
850 {
851 	unsigned long start_time = jiffies;
852 	u32 val;
853 	int ret;
854 
855 	do {
856 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
857 		if (unlikely(ret < 0))
858 			return -EIO;
859 
860 		if (!(val & E2P_CMD_EPC_BUSY_) ||
861 		    (val & E2P_CMD_EPC_TIMEOUT_))
862 			break;
863 		usleep_range(40, 100);
864 	} while (!time_after(jiffies, start_time + HZ));
865 
866 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
867 		netdev_warn(dev->net, "EEPROM read operation timeout");
868 		return -EIO;
869 	}
870 
871 	return 0;
872 }
873 
lan78xx_eeprom_confirm_not_busy(struct lan78xx_net * dev)874 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
875 {
876 	unsigned long start_time = jiffies;
877 	u32 val;
878 	int ret;
879 
880 	do {
881 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
882 		if (unlikely(ret < 0))
883 			return -EIO;
884 
885 		if (!(val & E2P_CMD_EPC_BUSY_))
886 			return 0;
887 
888 		usleep_range(40, 100);
889 	} while (!time_after(jiffies, start_time + HZ));
890 
891 	netdev_warn(dev->net, "EEPROM is busy");
892 	return -EIO;
893 }
894 
lan78xx_read_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)895 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
896 				   u32 length, u8 *data)
897 {
898 	u32 val;
899 	u32 saved;
900 	int i, ret;
901 	int retval;
902 
903 	/* depends on chip, some EEPROM pins are muxed with LED function.
904 	 * disable & restore LED function to access EEPROM.
905 	 */
906 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
907 	saved = val;
908 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
909 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
910 		ret = lan78xx_write_reg(dev, HW_CFG, val);
911 	}
912 
913 	retval = lan78xx_eeprom_confirm_not_busy(dev);
914 	if (retval)
915 		return retval;
916 
917 	for (i = 0; i < length; i++) {
918 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
919 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
920 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
921 		if (unlikely(ret < 0)) {
922 			retval = -EIO;
923 			goto exit;
924 		}
925 
926 		retval = lan78xx_wait_eeprom(dev);
927 		if (retval < 0)
928 			goto exit;
929 
930 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
931 		if (unlikely(ret < 0)) {
932 			retval = -EIO;
933 			goto exit;
934 		}
935 
936 		data[i] = val & 0xFF;
937 		offset++;
938 	}
939 
940 	retval = 0;
941 exit:
942 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
943 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
944 
945 	return retval;
946 }
947 
lan78xx_read_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)948 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
949 			       u32 length, u8 *data)
950 {
951 	u8 sig;
952 	int ret;
953 
954 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
955 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
956 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
957 	else
958 		ret = -EINVAL;
959 
960 	return ret;
961 }
962 
lan78xx_write_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)963 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
964 				    u32 length, u8 *data)
965 {
966 	u32 val;
967 	u32 saved;
968 	int i, ret;
969 	int retval;
970 
971 	/* depends on chip, some EEPROM pins are muxed with LED function.
972 	 * disable & restore LED function to access EEPROM.
973 	 */
974 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
975 	saved = val;
976 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
977 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
978 		ret = lan78xx_write_reg(dev, HW_CFG, val);
979 	}
980 
981 	retval = lan78xx_eeprom_confirm_not_busy(dev);
982 	if (retval)
983 		goto exit;
984 
985 	/* Issue write/erase enable command */
986 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
987 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
988 	if (unlikely(ret < 0)) {
989 		retval = -EIO;
990 		goto exit;
991 	}
992 
993 	retval = lan78xx_wait_eeprom(dev);
994 	if (retval < 0)
995 		goto exit;
996 
997 	for (i = 0; i < length; i++) {
998 		/* Fill data register */
999 		val = data[i];
1000 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
1001 		if (ret < 0) {
1002 			retval = -EIO;
1003 			goto exit;
1004 		}
1005 
1006 		/* Send "write" command */
1007 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1008 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1009 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1010 		if (ret < 0) {
1011 			retval = -EIO;
1012 			goto exit;
1013 		}
1014 
1015 		retval = lan78xx_wait_eeprom(dev);
1016 		if (retval < 0)
1017 			goto exit;
1018 
1019 		offset++;
1020 	}
1021 
1022 	retval = 0;
1023 exit:
1024 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1025 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
1026 
1027 	return retval;
1028 }
1029 
lan78xx_read_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1030 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1031 				u32 length, u8 *data)
1032 {
1033 	int i;
1034 	u32 buf;
1035 	unsigned long timeout;
1036 
1037 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1038 
1039 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1040 		/* clear it and wait to be cleared */
1041 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1042 
1043 		timeout = jiffies + HZ;
1044 		do {
1045 			usleep_range(1, 10);
1046 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1047 			if (time_after(jiffies, timeout)) {
1048 				netdev_warn(dev->net,
1049 					    "timeout on OTP_PWR_DN");
1050 				return -EIO;
1051 			}
1052 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1053 	}
1054 
1055 	for (i = 0; i < length; i++) {
1056 		lan78xx_write_reg(dev, OTP_ADDR1,
1057 				  ((offset + i) >> 8) & OTP_ADDR1_15_11);
1058 		lan78xx_write_reg(dev, OTP_ADDR2,
1059 				  ((offset + i) & OTP_ADDR2_10_3));
1060 
1061 		lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1062 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1063 
1064 		timeout = jiffies + HZ;
1065 		do {
1066 			udelay(1);
1067 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
1068 			if (time_after(jiffies, timeout)) {
1069 				netdev_warn(dev->net,
1070 					    "timeout on OTP_STATUS");
1071 				return -EIO;
1072 			}
1073 		} while (buf & OTP_STATUS_BUSY_);
1074 
1075 		lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1076 
1077 		data[i] = (u8)(buf & 0xFF);
1078 	}
1079 
1080 	return 0;
1081 }
1082 
lan78xx_write_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1083 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1084 				 u32 length, u8 *data)
1085 {
1086 	int i;
1087 	u32 buf;
1088 	unsigned long timeout;
1089 
1090 	lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1091 
1092 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1093 		/* clear it and wait to be cleared */
1094 		lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1095 
1096 		timeout = jiffies + HZ;
1097 		do {
1098 			udelay(1);
1099 			lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1100 			if (time_after(jiffies, timeout)) {
1101 				netdev_warn(dev->net,
1102 					    "timeout on OTP_PWR_DN completion");
1103 				return -EIO;
1104 			}
1105 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1106 	}
1107 
1108 	/* set to BYTE program mode */
1109 	lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1110 
1111 	for (i = 0; i < length; i++) {
1112 		lan78xx_write_reg(dev, OTP_ADDR1,
1113 				  ((offset + i) >> 8) & OTP_ADDR1_15_11);
1114 		lan78xx_write_reg(dev, OTP_ADDR2,
1115 				  ((offset + i) & OTP_ADDR2_10_3));
1116 		lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1117 		lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1118 		lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1119 
1120 		timeout = jiffies + HZ;
1121 		do {
1122 			udelay(1);
1123 			lan78xx_read_reg(dev, OTP_STATUS, &buf);
1124 			if (time_after(jiffies, timeout)) {
1125 				netdev_warn(dev->net,
1126 					    "Timeout on OTP_STATUS completion");
1127 				return -EIO;
1128 			}
1129 		} while (buf & OTP_STATUS_BUSY_);
1130 	}
1131 
1132 	return 0;
1133 }
1134 
lan78xx_read_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1135 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1136 			    u32 length, u8 *data)
1137 {
1138 	u8 sig;
1139 	int ret;
1140 
1141 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1142 
1143 	if (ret == 0) {
1144 		if (sig == OTP_INDICATOR_2)
1145 			offset += 0x100;
1146 		else if (sig != OTP_INDICATOR_1)
1147 			ret = -EINVAL;
1148 		if (!ret)
1149 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
1150 	}
1151 
1152 	return ret;
1153 }
1154 
lan78xx_dataport_wait_not_busy(struct lan78xx_net * dev)1155 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1156 {
1157 	int i, ret;
1158 
1159 	for (i = 0; i < 100; i++) {
1160 		u32 dp_sel;
1161 
1162 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1163 		if (unlikely(ret < 0))
1164 			return -EIO;
1165 
1166 		if (dp_sel & DP_SEL_DPRDY_)
1167 			return 0;
1168 
1169 		usleep_range(40, 100);
1170 	}
1171 
1172 	netdev_warn(dev->net, "%s timed out", __func__);
1173 
1174 	return -EIO;
1175 }
1176 
lan78xx_dataport_write(struct lan78xx_net * dev,u32 ram_select,u32 addr,u32 length,u32 * buf)1177 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1178 				  u32 addr, u32 length, u32 *buf)
1179 {
1180 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1181 	u32 dp_sel;
1182 	int i, ret;
1183 
1184 	if (usb_autopm_get_interface(dev->intf) < 0)
1185 		return 0;
1186 
1187 	mutex_lock(&pdata->dataport_mutex);
1188 
1189 	ret = lan78xx_dataport_wait_not_busy(dev);
1190 	if (ret < 0)
1191 		goto done;
1192 
1193 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1194 
1195 	dp_sel &= ~DP_SEL_RSEL_MASK_;
1196 	dp_sel |= ram_select;
1197 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1198 
1199 	for (i = 0; i < length; i++) {
1200 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1201 
1202 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1203 
1204 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1205 
1206 		ret = lan78xx_dataport_wait_not_busy(dev);
1207 		if (ret < 0)
1208 			goto done;
1209 	}
1210 
1211 done:
1212 	mutex_unlock(&pdata->dataport_mutex);
1213 	usb_autopm_put_interface(dev->intf);
1214 
1215 	return ret;
1216 }
1217 
lan78xx_set_addr_filter(struct lan78xx_priv * pdata,int index,u8 addr[ETH_ALEN])1218 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1219 				    int index, u8 addr[ETH_ALEN])
1220 {
1221 	u32 temp;
1222 
1223 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1224 		temp = addr[3];
1225 		temp = addr[2] | (temp << 8);
1226 		temp = addr[1] | (temp << 8);
1227 		temp = addr[0] | (temp << 8);
1228 		pdata->pfilter_table[index][1] = temp;
1229 		temp = addr[5];
1230 		temp = addr[4] | (temp << 8);
1231 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1232 		pdata->pfilter_table[index][0] = temp;
1233 	}
1234 }
1235 
1236 /* returns hash bit number for given MAC address */
lan78xx_hash(char addr[ETH_ALEN])1237 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1238 {
1239 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1240 }
1241 
lan78xx_deferred_multicast_write(struct work_struct * param)1242 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1243 {
1244 	struct lan78xx_priv *pdata =
1245 			container_of(param, struct lan78xx_priv, set_multicast);
1246 	struct lan78xx_net *dev = pdata->dev;
1247 	int i;
1248 
1249 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1250 		  pdata->rfe_ctl);
1251 
1252 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1253 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1254 
1255 	for (i = 1; i < NUM_OF_MAF; i++) {
1256 		lan78xx_write_reg(dev, MAF_HI(i), 0);
1257 		lan78xx_write_reg(dev, MAF_LO(i),
1258 				  pdata->pfilter_table[i][1]);
1259 		lan78xx_write_reg(dev, MAF_HI(i),
1260 				  pdata->pfilter_table[i][0]);
1261 	}
1262 
1263 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1264 }
1265 
lan78xx_set_multicast(struct net_device * netdev)1266 static void lan78xx_set_multicast(struct net_device *netdev)
1267 {
1268 	struct lan78xx_net *dev = netdev_priv(netdev);
1269 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1270 	unsigned long flags;
1271 	int i;
1272 
1273 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1274 
1275 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1276 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1277 
1278 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1279 		pdata->mchash_table[i] = 0;
1280 
1281 	/* pfilter_table[0] has own HW address */
1282 	for (i = 1; i < NUM_OF_MAF; i++) {
1283 		pdata->pfilter_table[i][0] = 0;
1284 		pdata->pfilter_table[i][1] = 0;
1285 	}
1286 
1287 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1288 
1289 	if (dev->net->flags & IFF_PROMISC) {
1290 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1291 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1292 	} else {
1293 		if (dev->net->flags & IFF_ALLMULTI) {
1294 			netif_dbg(dev, drv, dev->net,
1295 				  "receive all multicast enabled");
1296 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1297 		}
1298 	}
1299 
1300 	if (netdev_mc_count(dev->net)) {
1301 		struct netdev_hw_addr *ha;
1302 		int i;
1303 
1304 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1305 
1306 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1307 
1308 		i = 1;
1309 		netdev_for_each_mc_addr(ha, netdev) {
1310 			/* set first 32 into Perfect Filter */
1311 			if (i < 33) {
1312 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1313 			} else {
1314 				u32 bitnum = lan78xx_hash(ha->addr);
1315 
1316 				pdata->mchash_table[bitnum / 32] |=
1317 							(1 << (bitnum % 32));
1318 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1319 			}
1320 			i++;
1321 		}
1322 	}
1323 
1324 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1325 
1326 	/* defer register writes to a sleepable context */
1327 	schedule_work(&pdata->set_multicast);
1328 }
1329 
lan78xx_update_flowcontrol(struct lan78xx_net * dev,u8 duplex,u16 lcladv,u16 rmtadv)1330 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1331 				      u16 lcladv, u16 rmtadv)
1332 {
1333 	u32 flow = 0, fct_flow = 0;
1334 	u8 cap;
1335 
1336 	if (dev->fc_autoneg)
1337 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1338 	else
1339 		cap = dev->fc_request_control;
1340 
1341 	if (cap & FLOW_CTRL_TX)
1342 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1343 
1344 	if (cap & FLOW_CTRL_RX)
1345 		flow |= FLOW_CR_RX_FCEN_;
1346 
1347 	if (dev->udev->speed == USB_SPEED_SUPER)
1348 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
1349 	else if (dev->udev->speed == USB_SPEED_HIGH)
1350 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
1351 
1352 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1353 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1354 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1355 
1356 	lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1357 
1358 	/* threshold value should be set before enabling flow */
1359 	lan78xx_write_reg(dev, FLOW, flow);
1360 
1361 	return 0;
1362 }
1363 
1364 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1365 
lan78xx_mac_reset(struct lan78xx_net * dev)1366 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1367 {
1368 	unsigned long start_time = jiffies;
1369 	u32 val;
1370 	int ret;
1371 
1372 	mutex_lock(&dev->phy_mutex);
1373 
1374 	/* Resetting the device while there is activity on the MDIO
1375 	 * bus can result in the MAC interface locking up and not
1376 	 * completing register access transactions.
1377 	 */
1378 	ret = lan78xx_phy_wait_not_busy(dev);
1379 	if (ret < 0)
1380 		goto done;
1381 
1382 	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1383 	if (ret < 0)
1384 		goto done;
1385 
1386 	val |= MAC_CR_RST_;
1387 	ret = lan78xx_write_reg(dev, MAC_CR, val);
1388 	if (ret < 0)
1389 		goto done;
1390 
1391 	/* Wait for the reset to complete before allowing any further
1392 	 * MAC register accesses otherwise the MAC may lock up.
1393 	 */
1394 	do {
1395 		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1396 		if (ret < 0)
1397 			goto done;
1398 
1399 		if (!(val & MAC_CR_RST_)) {
1400 			ret = 0;
1401 			goto done;
1402 		}
1403 	} while (!time_after(jiffies, start_time + HZ));
1404 
1405 	ret = -ETIMEDOUT;
1406 done:
1407 	mutex_unlock(&dev->phy_mutex);
1408 
1409 	return ret;
1410 }
1411 
lan78xx_link_reset(struct lan78xx_net * dev)1412 static int lan78xx_link_reset(struct lan78xx_net *dev)
1413 {
1414 	struct phy_device *phydev = dev->net->phydev;
1415 	struct ethtool_link_ksettings ecmd;
1416 	int ladv, radv, ret, link;
1417 	u32 buf;
1418 
1419 	/* clear LAN78xx interrupt status */
1420 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1421 	if (unlikely(ret < 0))
1422 		return ret;
1423 
1424 	mutex_lock(&phydev->lock);
1425 	phy_read_status(phydev);
1426 	link = phydev->link;
1427 	mutex_unlock(&phydev->lock);
1428 
1429 	if (!link && dev->link_on) {
1430 		dev->link_on = false;
1431 
1432 		/* reset MAC */
1433 		ret = lan78xx_mac_reset(dev);
1434 		if (ret < 0)
1435 			return ret;
1436 
1437 		del_timer(&dev->stat_monitor);
1438 	} else if (link && !dev->link_on) {
1439 		dev->link_on = true;
1440 
1441 		phy_ethtool_ksettings_get(phydev, &ecmd);
1442 
1443 		if (dev->udev->speed == USB_SPEED_SUPER) {
1444 			if (ecmd.base.speed == 1000) {
1445 				/* disable U2 */
1446 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1447 				if (ret < 0)
1448 					return ret;
1449 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1450 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1451 				if (ret < 0)
1452 					return ret;
1453 				/* enable U1 */
1454 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1455 				if (ret < 0)
1456 					return ret;
1457 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1458 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1459 				if (ret < 0)
1460 					return ret;
1461 			} else {
1462 				/* enable U1 & U2 */
1463 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1464 				if (ret < 0)
1465 					return ret;
1466 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1467 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1468 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1469 				if (ret < 0)
1470 					return ret;
1471 			}
1472 		}
1473 
1474 		ladv = phy_read(phydev, MII_ADVERTISE);
1475 		if (ladv < 0)
1476 			return ladv;
1477 
1478 		radv = phy_read(phydev, MII_LPA);
1479 		if (radv < 0)
1480 			return radv;
1481 
1482 		netif_dbg(dev, link, dev->net,
1483 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1484 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1485 
1486 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1487 						 radv);
1488 		if (ret < 0)
1489 			return ret;
1490 
1491 		if (!timer_pending(&dev->stat_monitor)) {
1492 			dev->delta = 1;
1493 			mod_timer(&dev->stat_monitor,
1494 				  jiffies + STAT_UPDATE_TIMER);
1495 		}
1496 
1497 		lan78xx_rx_urb_submit_all(dev);
1498 
1499 		local_bh_disable();
1500 		napi_schedule(&dev->napi);
1501 		local_bh_enable();
1502 	}
1503 
1504 	return 0;
1505 }
1506 
1507 /* some work can't be done in tasklets, so we use keventd
1508  *
1509  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1510  * but tasklet_schedule() doesn't.	hope the failure is rare.
1511  */
lan78xx_defer_kevent(struct lan78xx_net * dev,int work)1512 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1513 {
1514 	set_bit(work, &dev->flags);
1515 	if (!schedule_delayed_work(&dev->wq, 0))
1516 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1517 }
1518 
lan78xx_status(struct lan78xx_net * dev,struct urb * urb)1519 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1520 {
1521 	u32 intdata;
1522 
1523 	if (urb->actual_length != 4) {
1524 		netdev_warn(dev->net,
1525 			    "unexpected urb length %d", urb->actual_length);
1526 		return;
1527 	}
1528 
1529 	intdata = get_unaligned_le32(urb->transfer_buffer);
1530 
1531 	if (intdata & INT_ENP_PHY_INT) {
1532 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1533 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1534 
1535 		if (dev->domain_data.phyirq > 0)
1536 			generic_handle_irq_safe(dev->domain_data.phyirq);
1537 	} else {
1538 		netdev_warn(dev->net,
1539 			    "unexpected interrupt: 0x%08x\n", intdata);
1540 	}
1541 }
1542 
lan78xx_ethtool_get_eeprom_len(struct net_device * netdev)1543 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1544 {
1545 	return MAX_EEPROM_SIZE;
1546 }
1547 
lan78xx_ethtool_get_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1548 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1549 				      struct ethtool_eeprom *ee, u8 *data)
1550 {
1551 	struct lan78xx_net *dev = netdev_priv(netdev);
1552 	int ret;
1553 
1554 	ret = usb_autopm_get_interface(dev->intf);
1555 	if (ret)
1556 		return ret;
1557 
1558 	ee->magic = LAN78XX_EEPROM_MAGIC;
1559 
1560 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1561 
1562 	usb_autopm_put_interface(dev->intf);
1563 
1564 	return ret;
1565 }
1566 
lan78xx_ethtool_set_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1567 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1568 				      struct ethtool_eeprom *ee, u8 *data)
1569 {
1570 	struct lan78xx_net *dev = netdev_priv(netdev);
1571 	int ret;
1572 
1573 	ret = usb_autopm_get_interface(dev->intf);
1574 	if (ret)
1575 		return ret;
1576 
1577 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1578 	 * to load data from EEPROM
1579 	 */
1580 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1581 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1582 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1583 		 (ee->offset == 0) &&
1584 		 (ee->len == 512) &&
1585 		 (data[0] == OTP_INDICATOR_1))
1586 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1587 
1588 	usb_autopm_put_interface(dev->intf);
1589 
1590 	return ret;
1591 }
1592 
lan78xx_get_strings(struct net_device * netdev,u32 stringset,u8 * data)1593 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1594 				u8 *data)
1595 {
1596 	if (stringset == ETH_SS_STATS)
1597 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1598 }
1599 
lan78xx_get_sset_count(struct net_device * netdev,int sset)1600 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1601 {
1602 	if (sset == ETH_SS_STATS)
1603 		return ARRAY_SIZE(lan78xx_gstrings);
1604 	else
1605 		return -EOPNOTSUPP;
1606 }
1607 
lan78xx_get_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)1608 static void lan78xx_get_stats(struct net_device *netdev,
1609 			      struct ethtool_stats *stats, u64 *data)
1610 {
1611 	struct lan78xx_net *dev = netdev_priv(netdev);
1612 
1613 	lan78xx_update_stats(dev);
1614 
1615 	mutex_lock(&dev->stats.access_lock);
1616 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1617 	mutex_unlock(&dev->stats.access_lock);
1618 }
1619 
lan78xx_get_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1620 static void lan78xx_get_wol(struct net_device *netdev,
1621 			    struct ethtool_wolinfo *wol)
1622 {
1623 	struct lan78xx_net *dev = netdev_priv(netdev);
1624 	int ret;
1625 	u32 buf;
1626 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1627 
1628 	if (usb_autopm_get_interface(dev->intf) < 0)
1629 		return;
1630 
1631 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1632 	if (unlikely(ret < 0)) {
1633 		wol->supported = 0;
1634 		wol->wolopts = 0;
1635 	} else {
1636 		if (buf & USB_CFG_RMT_WKP_) {
1637 			wol->supported = WAKE_ALL;
1638 			wol->wolopts = pdata->wol;
1639 		} else {
1640 			wol->supported = 0;
1641 			wol->wolopts = 0;
1642 		}
1643 	}
1644 
1645 	usb_autopm_put_interface(dev->intf);
1646 }
1647 
lan78xx_set_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1648 static int lan78xx_set_wol(struct net_device *netdev,
1649 			   struct ethtool_wolinfo *wol)
1650 {
1651 	struct lan78xx_net *dev = netdev_priv(netdev);
1652 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1653 	int ret;
1654 
1655 	if (wol->wolopts & ~WAKE_ALL)
1656 		return -EINVAL;
1657 
1658 	ret = usb_autopm_get_interface(dev->intf);
1659 	if (ret < 0)
1660 		return ret;
1661 
1662 	pdata->wol = wol->wolopts;
1663 
1664 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1665 
1666 	phy_ethtool_set_wol(netdev->phydev, wol);
1667 
1668 	usb_autopm_put_interface(dev->intf);
1669 
1670 	return ret;
1671 }
1672 
lan78xx_get_eee(struct net_device * net,struct ethtool_keee * edata)1673 static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
1674 {
1675 	struct lan78xx_net *dev = netdev_priv(net);
1676 	struct phy_device *phydev = net->phydev;
1677 	int ret;
1678 	u32 buf;
1679 
1680 	ret = usb_autopm_get_interface(dev->intf);
1681 	if (ret < 0)
1682 		return ret;
1683 
1684 	ret = phy_ethtool_get_eee(phydev, edata);
1685 	if (ret < 0)
1686 		goto exit;
1687 
1688 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1689 	if (buf & MAC_CR_EEE_EN_) {
1690 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1691 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1692 		edata->tx_lpi_timer = buf;
1693 	} else {
1694 		edata->tx_lpi_timer = 0;
1695 	}
1696 
1697 	ret = 0;
1698 exit:
1699 	usb_autopm_put_interface(dev->intf);
1700 
1701 	return ret;
1702 }
1703 
lan78xx_set_eee(struct net_device * net,struct ethtool_keee * edata)1704 static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
1705 {
1706 	struct lan78xx_net *dev = netdev_priv(net);
1707 	int ret;
1708 	u32 buf;
1709 
1710 	ret = usb_autopm_get_interface(dev->intf);
1711 	if (ret < 0)
1712 		return ret;
1713 
1714 	ret = phy_ethtool_set_eee(net->phydev, edata);
1715 	if (ret < 0)
1716 		goto out;
1717 
1718 	buf = (u32)edata->tx_lpi_timer;
1719 	ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1720 out:
1721 	usb_autopm_put_interface(dev->intf);
1722 
1723 	return ret;
1724 }
1725 
lan78xx_get_link(struct net_device * net)1726 static u32 lan78xx_get_link(struct net_device *net)
1727 {
1728 	u32 link;
1729 
1730 	mutex_lock(&net->phydev->lock);
1731 	phy_read_status(net->phydev);
1732 	link = net->phydev->link;
1733 	mutex_unlock(&net->phydev->lock);
1734 
1735 	return link;
1736 }
1737 
lan78xx_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * info)1738 static void lan78xx_get_drvinfo(struct net_device *net,
1739 				struct ethtool_drvinfo *info)
1740 {
1741 	struct lan78xx_net *dev = netdev_priv(net);
1742 
1743 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1744 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1745 }
1746 
lan78xx_get_msglevel(struct net_device * net)1747 static u32 lan78xx_get_msglevel(struct net_device *net)
1748 {
1749 	struct lan78xx_net *dev = netdev_priv(net);
1750 
1751 	return dev->msg_enable;
1752 }
1753 
lan78xx_set_msglevel(struct net_device * net,u32 level)1754 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1755 {
1756 	struct lan78xx_net *dev = netdev_priv(net);
1757 
1758 	dev->msg_enable = level;
1759 }
1760 
lan78xx_get_link_ksettings(struct net_device * net,struct ethtool_link_ksettings * cmd)1761 static int lan78xx_get_link_ksettings(struct net_device *net,
1762 				      struct ethtool_link_ksettings *cmd)
1763 {
1764 	struct lan78xx_net *dev = netdev_priv(net);
1765 	struct phy_device *phydev = net->phydev;
1766 	int ret;
1767 
1768 	ret = usb_autopm_get_interface(dev->intf);
1769 	if (ret < 0)
1770 		return ret;
1771 
1772 	phy_ethtool_ksettings_get(phydev, cmd);
1773 
1774 	usb_autopm_put_interface(dev->intf);
1775 
1776 	return ret;
1777 }
1778 
lan78xx_set_link_ksettings(struct net_device * net,const struct ethtool_link_ksettings * cmd)1779 static int lan78xx_set_link_ksettings(struct net_device *net,
1780 				      const struct ethtool_link_ksettings *cmd)
1781 {
1782 	struct lan78xx_net *dev = netdev_priv(net);
1783 	struct phy_device *phydev = net->phydev;
1784 	int ret = 0;
1785 	int temp;
1786 
1787 	ret = usb_autopm_get_interface(dev->intf);
1788 	if (ret < 0)
1789 		return ret;
1790 
1791 	/* change speed & duplex */
1792 	ret = phy_ethtool_ksettings_set(phydev, cmd);
1793 
1794 	if (!cmd->base.autoneg) {
1795 		/* force link down */
1796 		temp = phy_read(phydev, MII_BMCR);
1797 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1798 		mdelay(1);
1799 		phy_write(phydev, MII_BMCR, temp);
1800 	}
1801 
1802 	usb_autopm_put_interface(dev->intf);
1803 
1804 	return ret;
1805 }
1806 
lan78xx_get_pause(struct net_device * net,struct ethtool_pauseparam * pause)1807 static void lan78xx_get_pause(struct net_device *net,
1808 			      struct ethtool_pauseparam *pause)
1809 {
1810 	struct lan78xx_net *dev = netdev_priv(net);
1811 	struct phy_device *phydev = net->phydev;
1812 	struct ethtool_link_ksettings ecmd;
1813 
1814 	phy_ethtool_ksettings_get(phydev, &ecmd);
1815 
1816 	pause->autoneg = dev->fc_autoneg;
1817 
1818 	if (dev->fc_request_control & FLOW_CTRL_TX)
1819 		pause->tx_pause = 1;
1820 
1821 	if (dev->fc_request_control & FLOW_CTRL_RX)
1822 		pause->rx_pause = 1;
1823 }
1824 
lan78xx_set_pause(struct net_device * net,struct ethtool_pauseparam * pause)1825 static int lan78xx_set_pause(struct net_device *net,
1826 			     struct ethtool_pauseparam *pause)
1827 {
1828 	struct lan78xx_net *dev = netdev_priv(net);
1829 	struct phy_device *phydev = net->phydev;
1830 	struct ethtool_link_ksettings ecmd;
1831 	int ret;
1832 
1833 	phy_ethtool_ksettings_get(phydev, &ecmd);
1834 
1835 	if (pause->autoneg && !ecmd.base.autoneg) {
1836 		ret = -EINVAL;
1837 		goto exit;
1838 	}
1839 
1840 	dev->fc_request_control = 0;
1841 	if (pause->rx_pause)
1842 		dev->fc_request_control |= FLOW_CTRL_RX;
1843 
1844 	if (pause->tx_pause)
1845 		dev->fc_request_control |= FLOW_CTRL_TX;
1846 
1847 	if (ecmd.base.autoneg) {
1848 		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1849 		u32 mii_adv;
1850 
1851 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1852 				   ecmd.link_modes.advertising);
1853 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1854 				   ecmd.link_modes.advertising);
1855 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1856 		mii_adv_to_linkmode_adv_t(fc, mii_adv);
1857 		linkmode_or(ecmd.link_modes.advertising, fc,
1858 			    ecmd.link_modes.advertising);
1859 
1860 		phy_ethtool_ksettings_set(phydev, &ecmd);
1861 	}
1862 
1863 	dev->fc_autoneg = pause->autoneg;
1864 
1865 	ret = 0;
1866 exit:
1867 	return ret;
1868 }
1869 
lan78xx_get_regs_len(struct net_device * netdev)1870 static int lan78xx_get_regs_len(struct net_device *netdev)
1871 {
1872 	if (!netdev->phydev)
1873 		return (sizeof(lan78xx_regs));
1874 	else
1875 		return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1876 }
1877 
1878 static void
lan78xx_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * buf)1879 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1880 		 void *buf)
1881 {
1882 	u32 *data = buf;
1883 	int i, j;
1884 	struct lan78xx_net *dev = netdev_priv(netdev);
1885 
1886 	/* Read Device/MAC registers */
1887 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1888 		lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1889 
1890 	if (!netdev->phydev)
1891 		return;
1892 
1893 	/* Read PHY registers */
1894 	for (j = 0; j < 32; i++, j++)
1895 		data[i] = phy_read(netdev->phydev, j);
1896 }
1897 
1898 static const struct ethtool_ops lan78xx_ethtool_ops = {
1899 	.get_link	= lan78xx_get_link,
1900 	.nway_reset	= phy_ethtool_nway_reset,
1901 	.get_drvinfo	= lan78xx_get_drvinfo,
1902 	.get_msglevel	= lan78xx_get_msglevel,
1903 	.set_msglevel	= lan78xx_set_msglevel,
1904 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1905 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1906 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1907 	.get_ethtool_stats = lan78xx_get_stats,
1908 	.get_sset_count = lan78xx_get_sset_count,
1909 	.get_strings	= lan78xx_get_strings,
1910 	.get_wol	= lan78xx_get_wol,
1911 	.set_wol	= lan78xx_set_wol,
1912 	.get_ts_info	= ethtool_op_get_ts_info,
1913 	.get_eee	= lan78xx_get_eee,
1914 	.set_eee	= lan78xx_set_eee,
1915 	.get_pauseparam	= lan78xx_get_pause,
1916 	.set_pauseparam	= lan78xx_set_pause,
1917 	.get_link_ksettings = lan78xx_get_link_ksettings,
1918 	.set_link_ksettings = lan78xx_set_link_ksettings,
1919 	.get_regs_len	= lan78xx_get_regs_len,
1920 	.get_regs	= lan78xx_get_regs,
1921 };
1922 
lan78xx_init_mac_address(struct lan78xx_net * dev)1923 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1924 {
1925 	u32 addr_lo, addr_hi;
1926 	u8 addr[6];
1927 
1928 	lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1929 	lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1930 
1931 	addr[0] = addr_lo & 0xFF;
1932 	addr[1] = (addr_lo >> 8) & 0xFF;
1933 	addr[2] = (addr_lo >> 16) & 0xFF;
1934 	addr[3] = (addr_lo >> 24) & 0xFF;
1935 	addr[4] = addr_hi & 0xFF;
1936 	addr[5] = (addr_hi >> 8) & 0xFF;
1937 
1938 	if (!is_valid_ether_addr(addr)) {
1939 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1940 			/* valid address present in Device Tree */
1941 			netif_dbg(dev, ifup, dev->net,
1942 				  "MAC address read from Device Tree");
1943 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1944 						 ETH_ALEN, addr) == 0) ||
1945 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1946 					      ETH_ALEN, addr) == 0)) &&
1947 			   is_valid_ether_addr(addr)) {
1948 			/* eeprom values are valid so use them */
1949 			netif_dbg(dev, ifup, dev->net,
1950 				  "MAC address read from EEPROM");
1951 		} else {
1952 			/* generate random MAC */
1953 			eth_random_addr(addr);
1954 			netif_dbg(dev, ifup, dev->net,
1955 				  "MAC address set to random addr");
1956 		}
1957 
1958 		addr_lo = addr[0] | (addr[1] << 8) |
1959 			  (addr[2] << 16) | (addr[3] << 24);
1960 		addr_hi = addr[4] | (addr[5] << 8);
1961 
1962 		lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1963 		lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1964 	}
1965 
1966 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1967 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1968 
1969 	eth_hw_addr_set(dev->net, addr);
1970 }
1971 
1972 /* MDIO read and write wrappers for phylib */
lan78xx_mdiobus_read(struct mii_bus * bus,int phy_id,int idx)1973 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1974 {
1975 	struct lan78xx_net *dev = bus->priv;
1976 	u32 val, addr;
1977 	int ret;
1978 
1979 	ret = usb_autopm_get_interface(dev->intf);
1980 	if (ret < 0)
1981 		return ret;
1982 
1983 	mutex_lock(&dev->phy_mutex);
1984 
1985 	/* confirm MII not busy */
1986 	ret = lan78xx_phy_wait_not_busy(dev);
1987 	if (ret < 0)
1988 		goto done;
1989 
1990 	/* set the address, index & direction (read from PHY) */
1991 	addr = mii_access(phy_id, idx, MII_READ);
1992 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1993 
1994 	ret = lan78xx_phy_wait_not_busy(dev);
1995 	if (ret < 0)
1996 		goto done;
1997 
1998 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
1999 
2000 	ret = (int)(val & 0xFFFF);
2001 
2002 done:
2003 	mutex_unlock(&dev->phy_mutex);
2004 	usb_autopm_put_interface(dev->intf);
2005 
2006 	return ret;
2007 }
2008 
lan78xx_mdiobus_write(struct mii_bus * bus,int phy_id,int idx,u16 regval)2009 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2010 				 u16 regval)
2011 {
2012 	struct lan78xx_net *dev = bus->priv;
2013 	u32 val, addr;
2014 	int ret;
2015 
2016 	ret = usb_autopm_get_interface(dev->intf);
2017 	if (ret < 0)
2018 		return ret;
2019 
2020 	mutex_lock(&dev->phy_mutex);
2021 
2022 	/* confirm MII not busy */
2023 	ret = lan78xx_phy_wait_not_busy(dev);
2024 	if (ret < 0)
2025 		goto done;
2026 
2027 	val = (u32)regval;
2028 	ret = lan78xx_write_reg(dev, MII_DATA, val);
2029 
2030 	/* set the address, index & direction (write to PHY) */
2031 	addr = mii_access(phy_id, idx, MII_WRITE);
2032 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2033 
2034 	ret = lan78xx_phy_wait_not_busy(dev);
2035 	if (ret < 0)
2036 		goto done;
2037 
2038 done:
2039 	mutex_unlock(&dev->phy_mutex);
2040 	usb_autopm_put_interface(dev->intf);
2041 	return 0;
2042 }
2043 
lan78xx_mdio_init(struct lan78xx_net * dev)2044 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2045 {
2046 	struct device_node *node;
2047 	int ret;
2048 
2049 	dev->mdiobus = mdiobus_alloc();
2050 	if (!dev->mdiobus) {
2051 		netdev_err(dev->net, "can't allocate MDIO bus\n");
2052 		return -ENOMEM;
2053 	}
2054 
2055 	dev->mdiobus->priv = (void *)dev;
2056 	dev->mdiobus->read = lan78xx_mdiobus_read;
2057 	dev->mdiobus->write = lan78xx_mdiobus_write;
2058 	dev->mdiobus->name = "lan78xx-mdiobus";
2059 	dev->mdiobus->parent = &dev->udev->dev;
2060 
2061 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2062 		 dev->udev->bus->busnum, dev->udev->devnum);
2063 
2064 	switch (dev->chipid) {
2065 	case ID_REV_CHIP_ID_7800_:
2066 	case ID_REV_CHIP_ID_7850_:
2067 		/* set to internal PHY id */
2068 		dev->mdiobus->phy_mask = ~(1 << 1);
2069 		break;
2070 	case ID_REV_CHIP_ID_7801_:
2071 		/* scan thru PHYAD[2..0] */
2072 		dev->mdiobus->phy_mask = ~(0xFF);
2073 		break;
2074 	}
2075 
2076 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2077 	ret = of_mdiobus_register(dev->mdiobus, node);
2078 	of_node_put(node);
2079 	if (ret) {
2080 		netdev_err(dev->net, "can't register MDIO bus\n");
2081 		goto exit1;
2082 	}
2083 
2084 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2085 	return 0;
2086 exit1:
2087 	mdiobus_free(dev->mdiobus);
2088 	return ret;
2089 }
2090 
lan78xx_remove_mdio(struct lan78xx_net * dev)2091 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2092 {
2093 	mdiobus_unregister(dev->mdiobus);
2094 	mdiobus_free(dev->mdiobus);
2095 }
2096 
lan78xx_link_status_change(struct net_device * net)2097 static void lan78xx_link_status_change(struct net_device *net)
2098 {
2099 	struct lan78xx_net *dev = netdev_priv(net);
2100 	struct phy_device *phydev = net->phydev;
2101 	u32 data;
2102 	int ret;
2103 
2104 	ret = lan78xx_read_reg(dev, MAC_CR, &data);
2105 	if (ret < 0)
2106 		return;
2107 
2108 	if (phydev->enable_tx_lpi)
2109 		data |=  MAC_CR_EEE_EN_;
2110 	else
2111 		data &= ~MAC_CR_EEE_EN_;
2112 	lan78xx_write_reg(dev, MAC_CR, data);
2113 
2114 	phy_print_status(phydev);
2115 }
2116 
irq_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hwirq)2117 static int irq_map(struct irq_domain *d, unsigned int irq,
2118 		   irq_hw_number_t hwirq)
2119 {
2120 	struct irq_domain_data *data = d->host_data;
2121 
2122 	irq_set_chip_data(irq, data);
2123 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2124 	irq_set_noprobe(irq);
2125 
2126 	return 0;
2127 }
2128 
irq_unmap(struct irq_domain * d,unsigned int irq)2129 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2130 {
2131 	irq_set_chip_and_handler(irq, NULL, NULL);
2132 	irq_set_chip_data(irq, NULL);
2133 }
2134 
2135 static const struct irq_domain_ops chip_domain_ops = {
2136 	.map	= irq_map,
2137 	.unmap	= irq_unmap,
2138 };
2139 
lan78xx_irq_mask(struct irq_data * irqd)2140 static void lan78xx_irq_mask(struct irq_data *irqd)
2141 {
2142 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2143 
2144 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2145 }
2146 
lan78xx_irq_unmask(struct irq_data * irqd)2147 static void lan78xx_irq_unmask(struct irq_data *irqd)
2148 {
2149 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2150 
2151 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
2152 }
2153 
lan78xx_irq_bus_lock(struct irq_data * irqd)2154 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2155 {
2156 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2157 
2158 	mutex_lock(&data->irq_lock);
2159 }
2160 
lan78xx_irq_bus_sync_unlock(struct irq_data * irqd)2161 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2162 {
2163 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2164 	struct lan78xx_net *dev =
2165 			container_of(data, struct lan78xx_net, domain_data);
2166 	u32 buf;
2167 
2168 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2169 	 * are only two callbacks executed in non-atomic contex.
2170 	 */
2171 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2172 	if (buf != data->irqenable)
2173 		lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2174 
2175 	mutex_unlock(&data->irq_lock);
2176 }
2177 
2178 static struct irq_chip lan78xx_irqchip = {
2179 	.name			= "lan78xx-irqs",
2180 	.irq_mask		= lan78xx_irq_mask,
2181 	.irq_unmask		= lan78xx_irq_unmask,
2182 	.irq_bus_lock		= lan78xx_irq_bus_lock,
2183 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2184 };
2185 
lan78xx_setup_irq_domain(struct lan78xx_net * dev)2186 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2187 {
2188 	struct device_node *of_node;
2189 	struct irq_domain *irqdomain;
2190 	unsigned int irqmap = 0;
2191 	u32 buf;
2192 	int ret = 0;
2193 
2194 	of_node = dev->udev->dev.parent->of_node;
2195 
2196 	mutex_init(&dev->domain_data.irq_lock);
2197 
2198 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2199 	dev->domain_data.irqenable = buf;
2200 
2201 	dev->domain_data.irqchip = &lan78xx_irqchip;
2202 	dev->domain_data.irq_handler = handle_simple_irq;
2203 
2204 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
2205 					  &chip_domain_ops, &dev->domain_data);
2206 	if (irqdomain) {
2207 		/* create mapping for PHY interrupt */
2208 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2209 		if (!irqmap) {
2210 			irq_domain_remove(irqdomain);
2211 
2212 			irqdomain = NULL;
2213 			ret = -EINVAL;
2214 		}
2215 	} else {
2216 		ret = -EINVAL;
2217 	}
2218 
2219 	dev->domain_data.irqdomain = irqdomain;
2220 	dev->domain_data.phyirq = irqmap;
2221 
2222 	return ret;
2223 }
2224 
lan78xx_remove_irq_domain(struct lan78xx_net * dev)2225 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2226 {
2227 	if (dev->domain_data.phyirq > 0) {
2228 		irq_dispose_mapping(dev->domain_data.phyirq);
2229 
2230 		if (dev->domain_data.irqdomain)
2231 			irq_domain_remove(dev->domain_data.irqdomain);
2232 	}
2233 	dev->domain_data.phyirq = 0;
2234 	dev->domain_data.irqdomain = NULL;
2235 }
2236 
lan8835_fixup(struct phy_device * phydev)2237 static int lan8835_fixup(struct phy_device *phydev)
2238 {
2239 	int buf;
2240 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2241 
2242 	/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2243 	buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2244 	buf &= ~0x1800;
2245 	buf |= 0x0800;
2246 	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2247 
2248 	/* RGMII MAC TXC Delay Enable */
2249 	lan78xx_write_reg(dev, MAC_RGMII_ID,
2250 			  MAC_RGMII_ID_TXC_DELAY_EN_);
2251 
2252 	/* RGMII TX DLL Tune Adjust */
2253 	lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2254 
2255 	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2256 
2257 	return 1;
2258 }
2259 
ksz9031rnx_fixup(struct phy_device * phydev)2260 static int ksz9031rnx_fixup(struct phy_device *phydev)
2261 {
2262 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2263 
2264 	/* Micrel9301RNX PHY configuration */
2265 	/* RGMII Control Signal Pad Skew */
2266 	phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2267 	/* RGMII RX Data Pad Skew */
2268 	phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2269 	/* RGMII RX Clock Pad Skew */
2270 	phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2271 
2272 	dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2273 
2274 	return 1;
2275 }
2276 
lan7801_phy_init(struct lan78xx_net * dev)2277 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2278 {
2279 	u32 buf;
2280 	int ret;
2281 	struct fixed_phy_status fphy_status = {
2282 		.link = 1,
2283 		.speed = SPEED_1000,
2284 		.duplex = DUPLEX_FULL,
2285 	};
2286 	struct phy_device *phydev;
2287 
2288 	phydev = phy_find_first(dev->mdiobus);
2289 	if (!phydev) {
2290 		netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2291 		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2292 		if (IS_ERR(phydev)) {
2293 			netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2294 			return NULL;
2295 		}
2296 		netdev_dbg(dev->net, "Registered FIXED PHY\n");
2297 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2298 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2299 					MAC_RGMII_ID_TXC_DELAY_EN_);
2300 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2301 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2302 		buf |= HW_CFG_CLK125_EN_;
2303 		buf |= HW_CFG_REFCLK25_EN_;
2304 		ret = lan78xx_write_reg(dev, HW_CFG, buf);
2305 	} else {
2306 		if (!phydev->drv) {
2307 			netdev_err(dev->net, "no PHY driver found\n");
2308 			return NULL;
2309 		}
2310 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2311 		/* external PHY fixup for KSZ9031RNX */
2312 		ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2313 						 ksz9031rnx_fixup);
2314 		if (ret < 0) {
2315 			netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2316 			return NULL;
2317 		}
2318 		/* external PHY fixup for LAN8835 */
2319 		ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2320 						 lan8835_fixup);
2321 		if (ret < 0) {
2322 			netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2323 			return NULL;
2324 		}
2325 		/* add more external PHY fixup here if needed */
2326 
2327 		phydev->is_internal = false;
2328 	}
2329 	return phydev;
2330 }
2331 
lan78xx_phy_init(struct lan78xx_net * dev)2332 static int lan78xx_phy_init(struct lan78xx_net *dev)
2333 {
2334 	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2335 	int ret;
2336 	u32 mii_adv;
2337 	struct phy_device *phydev;
2338 
2339 	switch (dev->chipid) {
2340 	case ID_REV_CHIP_ID_7801_:
2341 		phydev = lan7801_phy_init(dev);
2342 		if (!phydev) {
2343 			netdev_err(dev->net, "lan7801: PHY Init Failed");
2344 			return -EIO;
2345 		}
2346 		break;
2347 
2348 	case ID_REV_CHIP_ID_7800_:
2349 	case ID_REV_CHIP_ID_7850_:
2350 		phydev = phy_find_first(dev->mdiobus);
2351 		if (!phydev) {
2352 			netdev_err(dev->net, "no PHY found\n");
2353 			return -EIO;
2354 		}
2355 		phydev->is_internal = true;
2356 		dev->interface = PHY_INTERFACE_MODE_GMII;
2357 		break;
2358 
2359 	default:
2360 		netdev_err(dev->net, "Unknown CHIP ID found\n");
2361 		return -EIO;
2362 	}
2363 
2364 	/* if phyirq is not set, use polling mode in phylib */
2365 	if (dev->domain_data.phyirq > 0)
2366 		phydev->irq = dev->domain_data.phyirq;
2367 	else
2368 		phydev->irq = PHY_POLL;
2369 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2370 
2371 	/* set to AUTOMDIX */
2372 	phydev->mdix = ETH_TP_MDI_AUTO;
2373 
2374 	ret = phy_connect_direct(dev->net, phydev,
2375 				 lan78xx_link_status_change,
2376 				 dev->interface);
2377 	if (ret) {
2378 		netdev_err(dev->net, "can't attach PHY to %s\n",
2379 			   dev->mdiobus->id);
2380 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2381 			if (phy_is_pseudo_fixed_link(phydev)) {
2382 				fixed_phy_unregister(phydev);
2383 				phy_device_free(phydev);
2384 			} else {
2385 				phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2386 							     0xfffffff0);
2387 				phy_unregister_fixup_for_uid(PHY_LAN8835,
2388 							     0xfffffff0);
2389 			}
2390 		}
2391 		return -EIO;
2392 	}
2393 
2394 	/* MAC doesn't support 1000T Half */
2395 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2396 
2397 	/* support both flow controls */
2398 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2399 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2400 			   phydev->advertising);
2401 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2402 			   phydev->advertising);
2403 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2404 	mii_adv_to_linkmode_adv_t(fc, mii_adv);
2405 	linkmode_or(phydev->advertising, fc, phydev->advertising);
2406 
2407 	phy_support_eee(phydev);
2408 
2409 	if (phydev->mdio.dev.of_node) {
2410 		u32 reg;
2411 		int len;
2412 
2413 		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2414 						      "microchip,led-modes",
2415 						      sizeof(u32));
2416 		if (len >= 0) {
2417 			/* Ensure the appropriate LEDs are enabled */
2418 			lan78xx_read_reg(dev, HW_CFG, &reg);
2419 			reg &= ~(HW_CFG_LED0_EN_ |
2420 				 HW_CFG_LED1_EN_ |
2421 				 HW_CFG_LED2_EN_ |
2422 				 HW_CFG_LED3_EN_);
2423 			reg |= (len > 0) * HW_CFG_LED0_EN_ |
2424 				(len > 1) * HW_CFG_LED1_EN_ |
2425 				(len > 2) * HW_CFG_LED2_EN_ |
2426 				(len > 3) * HW_CFG_LED3_EN_;
2427 			lan78xx_write_reg(dev, HW_CFG, reg);
2428 		}
2429 	}
2430 
2431 	genphy_config_aneg(phydev);
2432 
2433 	dev->fc_autoneg = phydev->autoneg;
2434 
2435 	return 0;
2436 }
2437 
lan78xx_set_rx_max_frame_length(struct lan78xx_net * dev,int size)2438 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2439 {
2440 	u32 buf;
2441 	bool rxenabled;
2442 
2443 	lan78xx_read_reg(dev, MAC_RX, &buf);
2444 
2445 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2446 
2447 	if (rxenabled) {
2448 		buf &= ~MAC_RX_RXEN_;
2449 		lan78xx_write_reg(dev, MAC_RX, buf);
2450 	}
2451 
2452 	/* add 4 to size for FCS */
2453 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2454 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2455 
2456 	lan78xx_write_reg(dev, MAC_RX, buf);
2457 
2458 	if (rxenabled) {
2459 		buf |= MAC_RX_RXEN_;
2460 		lan78xx_write_reg(dev, MAC_RX, buf);
2461 	}
2462 
2463 	return 0;
2464 }
2465 
unlink_urbs(struct lan78xx_net * dev,struct sk_buff_head * q)2466 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2467 {
2468 	struct sk_buff *skb;
2469 	unsigned long flags;
2470 	int count = 0;
2471 
2472 	spin_lock_irqsave(&q->lock, flags);
2473 	while (!skb_queue_empty(q)) {
2474 		struct skb_data	*entry;
2475 		struct urb *urb;
2476 		int ret;
2477 
2478 		skb_queue_walk(q, skb) {
2479 			entry = (struct skb_data *)skb->cb;
2480 			if (entry->state != unlink_start)
2481 				goto found;
2482 		}
2483 		break;
2484 found:
2485 		entry->state = unlink_start;
2486 		urb = entry->urb;
2487 
2488 		/* Get reference count of the URB to avoid it to be
2489 		 * freed during usb_unlink_urb, which may trigger
2490 		 * use-after-free problem inside usb_unlink_urb since
2491 		 * usb_unlink_urb is always racing with .complete
2492 		 * handler(include defer_bh).
2493 		 */
2494 		usb_get_urb(urb);
2495 		spin_unlock_irqrestore(&q->lock, flags);
2496 		/* during some PM-driven resume scenarios,
2497 		 * these (async) unlinks complete immediately
2498 		 */
2499 		ret = usb_unlink_urb(urb);
2500 		if (ret != -EINPROGRESS && ret != 0)
2501 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2502 		else
2503 			count++;
2504 		usb_put_urb(urb);
2505 		spin_lock_irqsave(&q->lock, flags);
2506 	}
2507 	spin_unlock_irqrestore(&q->lock, flags);
2508 	return count;
2509 }
2510 
lan78xx_change_mtu(struct net_device * netdev,int new_mtu)2511 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2512 {
2513 	struct lan78xx_net *dev = netdev_priv(netdev);
2514 	int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2515 	int ret;
2516 
2517 	/* no second zero-length packet read wanted after mtu-sized packets */
2518 	if ((max_frame_len % dev->maxpacket) == 0)
2519 		return -EDOM;
2520 
2521 	ret = usb_autopm_get_interface(dev->intf);
2522 	if (ret < 0)
2523 		return ret;
2524 
2525 	ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2526 	if (!ret)
2527 		WRITE_ONCE(netdev->mtu, new_mtu);
2528 
2529 	usb_autopm_put_interface(dev->intf);
2530 
2531 	return ret;
2532 }
2533 
lan78xx_set_mac_addr(struct net_device * netdev,void * p)2534 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2535 {
2536 	struct lan78xx_net *dev = netdev_priv(netdev);
2537 	struct sockaddr *addr = p;
2538 	u32 addr_lo, addr_hi;
2539 
2540 	if (netif_running(netdev))
2541 		return -EBUSY;
2542 
2543 	if (!is_valid_ether_addr(addr->sa_data))
2544 		return -EADDRNOTAVAIL;
2545 
2546 	eth_hw_addr_set(netdev, addr->sa_data);
2547 
2548 	addr_lo = netdev->dev_addr[0] |
2549 		  netdev->dev_addr[1] << 8 |
2550 		  netdev->dev_addr[2] << 16 |
2551 		  netdev->dev_addr[3] << 24;
2552 	addr_hi = netdev->dev_addr[4] |
2553 		  netdev->dev_addr[5] << 8;
2554 
2555 	lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2556 	lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2557 
2558 	/* Added to support MAC address changes */
2559 	lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2560 	lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2561 
2562 	return 0;
2563 }
2564 
2565 /* Enable or disable Rx checksum offload engine */
lan78xx_set_features(struct net_device * netdev,netdev_features_t features)2566 static int lan78xx_set_features(struct net_device *netdev,
2567 				netdev_features_t features)
2568 {
2569 	struct lan78xx_net *dev = netdev_priv(netdev);
2570 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2571 	unsigned long flags;
2572 
2573 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2574 
2575 	if (features & NETIF_F_RXCSUM) {
2576 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2577 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2578 	} else {
2579 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2580 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2581 	}
2582 
2583 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2584 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2585 	else
2586 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2587 
2588 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2589 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2590 	else
2591 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2592 
2593 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2594 
2595 	lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2596 
2597 	return 0;
2598 }
2599 
lan78xx_deferred_vlan_write(struct work_struct * param)2600 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2601 {
2602 	struct lan78xx_priv *pdata =
2603 			container_of(param, struct lan78xx_priv, set_vlan);
2604 	struct lan78xx_net *dev = pdata->dev;
2605 
2606 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2607 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2608 }
2609 
lan78xx_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)2610 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2611 				   __be16 proto, u16 vid)
2612 {
2613 	struct lan78xx_net *dev = netdev_priv(netdev);
2614 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2615 	u16 vid_bit_index;
2616 	u16 vid_dword_index;
2617 
2618 	vid_dword_index = (vid >> 5) & 0x7F;
2619 	vid_bit_index = vid & 0x1F;
2620 
2621 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2622 
2623 	/* defer register writes to a sleepable context */
2624 	schedule_work(&pdata->set_vlan);
2625 
2626 	return 0;
2627 }
2628 
lan78xx_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)2629 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2630 				    __be16 proto, u16 vid)
2631 {
2632 	struct lan78xx_net *dev = netdev_priv(netdev);
2633 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2634 	u16 vid_bit_index;
2635 	u16 vid_dword_index;
2636 
2637 	vid_dword_index = (vid >> 5) & 0x7F;
2638 	vid_bit_index = vid & 0x1F;
2639 
2640 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2641 
2642 	/* defer register writes to a sleepable context */
2643 	schedule_work(&pdata->set_vlan);
2644 
2645 	return 0;
2646 }
2647 
lan78xx_init_ltm(struct lan78xx_net * dev)2648 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2649 {
2650 	int ret;
2651 	u32 buf;
2652 	u32 regs[6] = { 0 };
2653 
2654 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2655 	if (buf & USB_CFG1_LTM_ENABLE_) {
2656 		u8 temp[2];
2657 		/* Get values from EEPROM first */
2658 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2659 			if (temp[0] == 24) {
2660 				ret = lan78xx_read_raw_eeprom(dev,
2661 							      temp[1] * 2,
2662 							      24,
2663 							      (u8 *)regs);
2664 				if (ret < 0)
2665 					return;
2666 			}
2667 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2668 			if (temp[0] == 24) {
2669 				ret = lan78xx_read_raw_otp(dev,
2670 							   temp[1] * 2,
2671 							   24,
2672 							   (u8 *)regs);
2673 				if (ret < 0)
2674 					return;
2675 			}
2676 		}
2677 	}
2678 
2679 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2680 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2681 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2682 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2683 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2684 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2685 }
2686 
lan78xx_urb_config_init(struct lan78xx_net * dev)2687 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
2688 {
2689 	int result = 0;
2690 
2691 	switch (dev->udev->speed) {
2692 	case USB_SPEED_SUPER:
2693 		dev->rx_urb_size = RX_SS_URB_SIZE;
2694 		dev->tx_urb_size = TX_SS_URB_SIZE;
2695 		dev->n_rx_urbs = RX_SS_URB_NUM;
2696 		dev->n_tx_urbs = TX_SS_URB_NUM;
2697 		dev->bulk_in_delay = SS_BULK_IN_DELAY;
2698 		dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2699 		break;
2700 	case USB_SPEED_HIGH:
2701 		dev->rx_urb_size = RX_HS_URB_SIZE;
2702 		dev->tx_urb_size = TX_HS_URB_SIZE;
2703 		dev->n_rx_urbs = RX_HS_URB_NUM;
2704 		dev->n_tx_urbs = TX_HS_URB_NUM;
2705 		dev->bulk_in_delay = HS_BULK_IN_DELAY;
2706 		dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2707 		break;
2708 	case USB_SPEED_FULL:
2709 		dev->rx_urb_size = RX_FS_URB_SIZE;
2710 		dev->tx_urb_size = TX_FS_URB_SIZE;
2711 		dev->n_rx_urbs = RX_FS_URB_NUM;
2712 		dev->n_tx_urbs = TX_FS_URB_NUM;
2713 		dev->bulk_in_delay = FS_BULK_IN_DELAY;
2714 		dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2715 		break;
2716 	default:
2717 		netdev_warn(dev->net, "USB bus speed not supported\n");
2718 		result = -EIO;
2719 		break;
2720 	}
2721 
2722 	return result;
2723 }
2724 
lan78xx_start_hw(struct lan78xx_net * dev,u32 reg,u32 hw_enable)2725 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
2726 {
2727 	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
2728 }
2729 
lan78xx_stop_hw(struct lan78xx_net * dev,u32 reg,u32 hw_enabled,u32 hw_disabled)2730 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
2731 			   u32 hw_disabled)
2732 {
2733 	unsigned long timeout;
2734 	bool stopped = true;
2735 	int ret;
2736 	u32 buf;
2737 
2738 	/* Stop the h/w block (if not already stopped) */
2739 
2740 	ret = lan78xx_read_reg(dev, reg, &buf);
2741 	if (ret < 0)
2742 		return ret;
2743 
2744 	if (buf & hw_enabled) {
2745 		buf &= ~hw_enabled;
2746 
2747 		ret = lan78xx_write_reg(dev, reg, buf);
2748 		if (ret < 0)
2749 			return ret;
2750 
2751 		stopped = false;
2752 		timeout = jiffies + HW_DISABLE_TIMEOUT;
2753 		do  {
2754 			ret = lan78xx_read_reg(dev, reg, &buf);
2755 			if (ret < 0)
2756 				return ret;
2757 
2758 			if (buf & hw_disabled)
2759 				stopped = true;
2760 			else
2761 				msleep(HW_DISABLE_DELAY_MS);
2762 		} while (!stopped && !time_after(jiffies, timeout));
2763 	}
2764 
2765 	ret = stopped ? 0 : -ETIME;
2766 
2767 	return ret;
2768 }
2769 
lan78xx_flush_fifo(struct lan78xx_net * dev,u32 reg,u32 fifo_flush)2770 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
2771 {
2772 	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
2773 }
2774 
lan78xx_start_tx_path(struct lan78xx_net * dev)2775 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
2776 {
2777 	int ret;
2778 
2779 	netif_dbg(dev, drv, dev->net, "start tx path");
2780 
2781 	/* Start the MAC transmitter */
2782 
2783 	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
2784 	if (ret < 0)
2785 		return ret;
2786 
2787 	/* Start the Tx FIFO */
2788 
2789 	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
2790 	if (ret < 0)
2791 		return ret;
2792 
2793 	return 0;
2794 }
2795 
lan78xx_stop_tx_path(struct lan78xx_net * dev)2796 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
2797 {
2798 	int ret;
2799 
2800 	netif_dbg(dev, drv, dev->net, "stop tx path");
2801 
2802 	/* Stop the Tx FIFO */
2803 
2804 	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
2805 	if (ret < 0)
2806 		return ret;
2807 
2808 	/* Stop the MAC transmitter */
2809 
2810 	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
2811 	if (ret < 0)
2812 		return ret;
2813 
2814 	return 0;
2815 }
2816 
2817 /* The caller must ensure the Tx path is stopped before calling
2818  * lan78xx_flush_tx_fifo().
2819  */
lan78xx_flush_tx_fifo(struct lan78xx_net * dev)2820 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
2821 {
2822 	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
2823 }
2824 
lan78xx_start_rx_path(struct lan78xx_net * dev)2825 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
2826 {
2827 	int ret;
2828 
2829 	netif_dbg(dev, drv, dev->net, "start rx path");
2830 
2831 	/* Start the Rx FIFO */
2832 
2833 	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
2834 	if (ret < 0)
2835 		return ret;
2836 
2837 	/* Start the MAC receiver*/
2838 
2839 	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
2840 	if (ret < 0)
2841 		return ret;
2842 
2843 	return 0;
2844 }
2845 
lan78xx_stop_rx_path(struct lan78xx_net * dev)2846 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
2847 {
2848 	int ret;
2849 
2850 	netif_dbg(dev, drv, dev->net, "stop rx path");
2851 
2852 	/* Stop the MAC receiver */
2853 
2854 	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
2855 	if (ret < 0)
2856 		return ret;
2857 
2858 	/* Stop the Rx FIFO */
2859 
2860 	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
2861 	if (ret < 0)
2862 		return ret;
2863 
2864 	return 0;
2865 }
2866 
2867 /* The caller must ensure the Rx path is stopped before calling
2868  * lan78xx_flush_rx_fifo().
2869  */
lan78xx_flush_rx_fifo(struct lan78xx_net * dev)2870 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
2871 {
2872 	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
2873 }
2874 
lan78xx_reset(struct lan78xx_net * dev)2875 static int lan78xx_reset(struct lan78xx_net *dev)
2876 {
2877 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2878 	unsigned long timeout;
2879 	int ret;
2880 	u32 buf;
2881 	u8 sig;
2882 
2883 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2884 	if (ret < 0)
2885 		return ret;
2886 
2887 	buf |= HW_CFG_LRST_;
2888 
2889 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2890 	if (ret < 0)
2891 		return ret;
2892 
2893 	timeout = jiffies + HZ;
2894 	do {
2895 		mdelay(1);
2896 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2897 		if (ret < 0)
2898 			return ret;
2899 
2900 		if (time_after(jiffies, timeout)) {
2901 			netdev_warn(dev->net,
2902 				    "timeout on completion of LiteReset");
2903 			ret = -ETIMEDOUT;
2904 			return ret;
2905 		}
2906 	} while (buf & HW_CFG_LRST_);
2907 
2908 	lan78xx_init_mac_address(dev);
2909 
2910 	/* save DEVID for later usage */
2911 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2912 	if (ret < 0)
2913 		return ret;
2914 
2915 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2916 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2917 
2918 	/* Respond to the IN token with a NAK */
2919 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2920 	if (ret < 0)
2921 		return ret;
2922 
2923 	buf |= USB_CFG_BIR_;
2924 
2925 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2926 	if (ret < 0)
2927 		return ret;
2928 
2929 	/* Init LTM */
2930 	lan78xx_init_ltm(dev);
2931 
2932 	ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
2933 	if (ret < 0)
2934 		return ret;
2935 
2936 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
2937 	if (ret < 0)
2938 		return ret;
2939 
2940 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2941 	if (ret < 0)
2942 		return ret;
2943 
2944 	buf |= HW_CFG_MEF_;
2945 	buf |= HW_CFG_CLK125_EN_;
2946 	buf |= HW_CFG_REFCLK25_EN_;
2947 
2948 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2949 	if (ret < 0)
2950 		return ret;
2951 
2952 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2953 	if (ret < 0)
2954 		return ret;
2955 
2956 	buf |= USB_CFG_BCE_;
2957 
2958 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2959 	if (ret < 0)
2960 		return ret;
2961 
2962 	/* set FIFO sizes */
2963 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2964 
2965 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2966 	if (ret < 0)
2967 		return ret;
2968 
2969 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2970 
2971 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2972 	if (ret < 0)
2973 		return ret;
2974 
2975 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2976 	if (ret < 0)
2977 		return ret;
2978 
2979 	ret = lan78xx_write_reg(dev, FLOW, 0);
2980 	if (ret < 0)
2981 		return ret;
2982 
2983 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2984 	if (ret < 0)
2985 		return ret;
2986 
2987 	/* Don't need rfe_ctl_lock during initialisation */
2988 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2989 	if (ret < 0)
2990 		return ret;
2991 
2992 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2993 
2994 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2995 	if (ret < 0)
2996 		return ret;
2997 
2998 	/* Enable or disable checksum offload engines */
2999 	ret = lan78xx_set_features(dev->net, dev->net->features);
3000 	if (ret < 0)
3001 		return ret;
3002 
3003 	lan78xx_set_multicast(dev->net);
3004 
3005 	/* reset PHY */
3006 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3007 	if (ret < 0)
3008 		return ret;
3009 
3010 	buf |= PMT_CTL_PHY_RST_;
3011 
3012 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3013 	if (ret < 0)
3014 		return ret;
3015 
3016 	timeout = jiffies + HZ;
3017 	do {
3018 		mdelay(1);
3019 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3020 		if (ret < 0)
3021 			return ret;
3022 
3023 		if (time_after(jiffies, timeout)) {
3024 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
3025 			ret = -ETIMEDOUT;
3026 			return ret;
3027 		}
3028 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3029 
3030 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3031 	if (ret < 0)
3032 		return ret;
3033 
3034 	/* LAN7801 only has RGMII mode */
3035 	if (dev->chipid == ID_REV_CHIP_ID_7801_) {
3036 		buf &= ~MAC_CR_GMII_EN_;
3037 		/* Enable Auto Duplex and Auto speed */
3038 		buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3039 	}
3040 
3041 	if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
3042 	    dev->chipid == ID_REV_CHIP_ID_7850_) {
3043 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
3044 		if (!ret && sig != EEPROM_INDICATOR) {
3045 			/* Implies there is no external eeprom. Set mac speed */
3046 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
3047 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3048 		}
3049 	}
3050 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
3051 	if (ret < 0)
3052 		return ret;
3053 
3054 	ret = lan78xx_set_rx_max_frame_length(dev,
3055 					      RX_MAX_FRAME_LEN(dev->net->mtu));
3056 
3057 	return ret;
3058 }
3059 
lan78xx_init_stats(struct lan78xx_net * dev)3060 static void lan78xx_init_stats(struct lan78xx_net *dev)
3061 {
3062 	u32 *p;
3063 	int i;
3064 
3065 	/* initialize for stats update
3066 	 * some counters are 20bits and some are 32bits
3067 	 */
3068 	p = (u32 *)&dev->stats.rollover_max;
3069 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3070 		p[i] = 0xFFFFF;
3071 
3072 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3073 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3074 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3075 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3076 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3077 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3078 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3079 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3080 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3081 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3082 
3083 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
3084 }
3085 
lan78xx_open(struct net_device * net)3086 static int lan78xx_open(struct net_device *net)
3087 {
3088 	struct lan78xx_net *dev = netdev_priv(net);
3089 	int ret;
3090 
3091 	netif_dbg(dev, ifup, dev->net, "open device");
3092 
3093 	ret = usb_autopm_get_interface(dev->intf);
3094 	if (ret < 0)
3095 		return ret;
3096 
3097 	mutex_lock(&dev->dev_mutex);
3098 
3099 	phy_start(net->phydev);
3100 
3101 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
3102 
3103 	/* for Link Check */
3104 	if (dev->urb_intr) {
3105 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3106 		if (ret < 0) {
3107 			netif_err(dev, ifup, dev->net,
3108 				  "intr submit %d\n", ret);
3109 			goto done;
3110 		}
3111 	}
3112 
3113 	ret = lan78xx_flush_rx_fifo(dev);
3114 	if (ret < 0)
3115 		goto done;
3116 	ret = lan78xx_flush_tx_fifo(dev);
3117 	if (ret < 0)
3118 		goto done;
3119 
3120 	ret = lan78xx_start_tx_path(dev);
3121 	if (ret < 0)
3122 		goto done;
3123 	ret = lan78xx_start_rx_path(dev);
3124 	if (ret < 0)
3125 		goto done;
3126 
3127 	lan78xx_init_stats(dev);
3128 
3129 	set_bit(EVENT_DEV_OPEN, &dev->flags);
3130 
3131 	netif_start_queue(net);
3132 
3133 	dev->link_on = false;
3134 
3135 	napi_enable(&dev->napi);
3136 
3137 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
3138 done:
3139 	mutex_unlock(&dev->dev_mutex);
3140 
3141 	if (ret < 0)
3142 		usb_autopm_put_interface(dev->intf);
3143 
3144 	return ret;
3145 }
3146 
lan78xx_terminate_urbs(struct lan78xx_net * dev)3147 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3148 {
3149 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3150 	DECLARE_WAITQUEUE(wait, current);
3151 	int temp;
3152 
3153 	/* ensure there are no more active urbs */
3154 	add_wait_queue(&unlink_wakeup, &wait);
3155 	set_current_state(TASK_UNINTERRUPTIBLE);
3156 	dev->wait = &unlink_wakeup;
3157 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3158 
3159 	/* maybe wait for deletions to finish. */
3160 	while (!skb_queue_empty(&dev->rxq) ||
3161 	       !skb_queue_empty(&dev->txq)) {
3162 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3163 		set_current_state(TASK_UNINTERRUPTIBLE);
3164 		netif_dbg(dev, ifdown, dev->net,
3165 			  "waited for %d urb completions", temp);
3166 	}
3167 	set_current_state(TASK_RUNNING);
3168 	dev->wait = NULL;
3169 	remove_wait_queue(&unlink_wakeup, &wait);
3170 
3171 	/* empty Rx done, Rx overflow and Tx pend queues
3172 	 */
3173 	while (!skb_queue_empty(&dev->rxq_done)) {
3174 		struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3175 
3176 		lan78xx_release_rx_buf(dev, skb);
3177 	}
3178 
3179 	skb_queue_purge(&dev->rxq_overflow);
3180 	skb_queue_purge(&dev->txq_pend);
3181 }
3182 
lan78xx_stop(struct net_device * net)3183 static int lan78xx_stop(struct net_device *net)
3184 {
3185 	struct lan78xx_net *dev = netdev_priv(net);
3186 
3187 	netif_dbg(dev, ifup, dev->net, "stop device");
3188 
3189 	mutex_lock(&dev->dev_mutex);
3190 
3191 	if (timer_pending(&dev->stat_monitor))
3192 		del_timer_sync(&dev->stat_monitor);
3193 
3194 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3195 	netif_stop_queue(net);
3196 	napi_disable(&dev->napi);
3197 
3198 	lan78xx_terminate_urbs(dev);
3199 
3200 	netif_info(dev, ifdown, dev->net,
3201 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3202 		   net->stats.rx_packets, net->stats.tx_packets,
3203 		   net->stats.rx_errors, net->stats.tx_errors);
3204 
3205 	/* ignore errors that occur stopping the Tx and Rx data paths */
3206 	lan78xx_stop_tx_path(dev);
3207 	lan78xx_stop_rx_path(dev);
3208 
3209 	if (net->phydev)
3210 		phy_stop(net->phydev);
3211 
3212 	usb_kill_urb(dev->urb_intr);
3213 
3214 	/* deferred work (task, timer, softirq) must also stop.
3215 	 * can't flush_scheduled_work() until we drop rtnl (later),
3216 	 * else workers could deadlock; so make workers a NOP.
3217 	 */
3218 	clear_bit(EVENT_TX_HALT, &dev->flags);
3219 	clear_bit(EVENT_RX_HALT, &dev->flags);
3220 	clear_bit(EVENT_LINK_RESET, &dev->flags);
3221 	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3222 
3223 	cancel_delayed_work_sync(&dev->wq);
3224 
3225 	usb_autopm_put_interface(dev->intf);
3226 
3227 	mutex_unlock(&dev->dev_mutex);
3228 
3229 	return 0;
3230 }
3231 
defer_bh(struct lan78xx_net * dev,struct sk_buff * skb,struct sk_buff_head * list,enum skb_state state)3232 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3233 			       struct sk_buff_head *list, enum skb_state state)
3234 {
3235 	unsigned long flags;
3236 	enum skb_state old_state;
3237 	struct skb_data *entry = (struct skb_data *)skb->cb;
3238 
3239 	spin_lock_irqsave(&list->lock, flags);
3240 	old_state = entry->state;
3241 	entry->state = state;
3242 
3243 	__skb_unlink(skb, list);
3244 	spin_unlock(&list->lock);
3245 	spin_lock(&dev->rxq_done.lock);
3246 
3247 	__skb_queue_tail(&dev->rxq_done, skb);
3248 	if (skb_queue_len(&dev->rxq_done) == 1)
3249 		napi_schedule(&dev->napi);
3250 
3251 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3252 
3253 	return old_state;
3254 }
3255 
tx_complete(struct urb * urb)3256 static void tx_complete(struct urb *urb)
3257 {
3258 	struct sk_buff *skb = (struct sk_buff *)urb->context;
3259 	struct skb_data *entry = (struct skb_data *)skb->cb;
3260 	struct lan78xx_net *dev = entry->dev;
3261 
3262 	if (urb->status == 0) {
3263 		dev->net->stats.tx_packets += entry->num_of_packet;
3264 		dev->net->stats.tx_bytes += entry->length;
3265 	} else {
3266 		dev->net->stats.tx_errors += entry->num_of_packet;
3267 
3268 		switch (urb->status) {
3269 		case -EPIPE:
3270 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3271 			break;
3272 
3273 		/* software-driven interface shutdown */
3274 		case -ECONNRESET:
3275 		case -ESHUTDOWN:
3276 			netif_dbg(dev, tx_err, dev->net,
3277 				  "tx err interface gone %d\n",
3278 				  entry->urb->status);
3279 			break;
3280 
3281 		case -EPROTO:
3282 		case -ETIME:
3283 		case -EILSEQ:
3284 			netif_stop_queue(dev->net);
3285 			netif_dbg(dev, tx_err, dev->net,
3286 				  "tx err queue stopped %d\n",
3287 				  entry->urb->status);
3288 			break;
3289 		default:
3290 			netif_dbg(dev, tx_err, dev->net,
3291 				  "unknown tx err %d\n",
3292 				  entry->urb->status);
3293 			break;
3294 		}
3295 	}
3296 
3297 	usb_autopm_put_interface_async(dev->intf);
3298 
3299 	skb_unlink(skb, &dev->txq);
3300 
3301 	lan78xx_release_tx_buf(dev, skb);
3302 
3303 	/* Re-schedule NAPI if Tx data pending but no URBs in progress.
3304 	 */
3305 	if (skb_queue_empty(&dev->txq) &&
3306 	    !skb_queue_empty(&dev->txq_pend))
3307 		napi_schedule(&dev->napi);
3308 }
3309 
lan78xx_queue_skb(struct sk_buff_head * list,struct sk_buff * newsk,enum skb_state state)3310 static void lan78xx_queue_skb(struct sk_buff_head *list,
3311 			      struct sk_buff *newsk, enum skb_state state)
3312 {
3313 	struct skb_data *entry = (struct skb_data *)newsk->cb;
3314 
3315 	__skb_queue_tail(list, newsk);
3316 	entry->state = state;
3317 }
3318 
lan78xx_tx_urb_space(struct lan78xx_net * dev)3319 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3320 {
3321 	return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3322 }
3323 
lan78xx_tx_pend_data_len(struct lan78xx_net * dev)3324 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3325 {
3326 	return dev->tx_pend_data_len;
3327 }
3328 
lan78xx_tx_pend_skb_add(struct lan78xx_net * dev,struct sk_buff * skb,unsigned int * tx_pend_data_len)3329 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3330 				    struct sk_buff *skb,
3331 				    unsigned int *tx_pend_data_len)
3332 {
3333 	unsigned long flags;
3334 
3335 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3336 
3337 	__skb_queue_tail(&dev->txq_pend, skb);
3338 
3339 	dev->tx_pend_data_len += skb->len;
3340 	*tx_pend_data_len = dev->tx_pend_data_len;
3341 
3342 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3343 }
3344 
lan78xx_tx_pend_skb_head_add(struct lan78xx_net * dev,struct sk_buff * skb,unsigned int * tx_pend_data_len)3345 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3346 					 struct sk_buff *skb,
3347 					 unsigned int *tx_pend_data_len)
3348 {
3349 	unsigned long flags;
3350 
3351 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3352 
3353 	__skb_queue_head(&dev->txq_pend, skb);
3354 
3355 	dev->tx_pend_data_len += skb->len;
3356 	*tx_pend_data_len = dev->tx_pend_data_len;
3357 
3358 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3359 }
3360 
lan78xx_tx_pend_skb_get(struct lan78xx_net * dev,struct sk_buff ** skb,unsigned int * tx_pend_data_len)3361 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3362 				    struct sk_buff **skb,
3363 				    unsigned int *tx_pend_data_len)
3364 {
3365 	unsigned long flags;
3366 
3367 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3368 
3369 	*skb = __skb_dequeue(&dev->txq_pend);
3370 	if (*skb)
3371 		dev->tx_pend_data_len -= (*skb)->len;
3372 	*tx_pend_data_len = dev->tx_pend_data_len;
3373 
3374 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3375 }
3376 
3377 static netdev_tx_t
lan78xx_start_xmit(struct sk_buff * skb,struct net_device * net)3378 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3379 {
3380 	struct lan78xx_net *dev = netdev_priv(net);
3381 	unsigned int tx_pend_data_len;
3382 
3383 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3384 		schedule_delayed_work(&dev->wq, 0);
3385 
3386 	skb_tx_timestamp(skb);
3387 
3388 	lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3389 
3390 	/* Set up a Tx URB if none is in progress */
3391 
3392 	if (skb_queue_empty(&dev->txq))
3393 		napi_schedule(&dev->napi);
3394 
3395 	/* Stop stack Tx queue if we have enough data to fill
3396 	 * all the free Tx URBs.
3397 	 */
3398 	if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3399 		netif_stop_queue(net);
3400 
3401 		netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3402 			  tx_pend_data_len, lan78xx_tx_urb_space(dev));
3403 
3404 		/* Kick off transmission of pending data */
3405 
3406 		if (!skb_queue_empty(&dev->txq_free))
3407 			napi_schedule(&dev->napi);
3408 	}
3409 
3410 	return NETDEV_TX_OK;
3411 }
3412 
lan78xx_bind(struct lan78xx_net * dev,struct usb_interface * intf)3413 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3414 {
3415 	struct lan78xx_priv *pdata = NULL;
3416 	int ret;
3417 	int i;
3418 
3419 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3420 
3421 	pdata = (struct lan78xx_priv *)(dev->data[0]);
3422 	if (!pdata) {
3423 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3424 		return -ENOMEM;
3425 	}
3426 
3427 	pdata->dev = dev;
3428 
3429 	spin_lock_init(&pdata->rfe_ctl_lock);
3430 	mutex_init(&pdata->dataport_mutex);
3431 
3432 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3433 
3434 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3435 		pdata->vlan_table[i] = 0;
3436 
3437 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3438 
3439 	dev->net->features = 0;
3440 
3441 	if (DEFAULT_TX_CSUM_ENABLE)
3442 		dev->net->features |= NETIF_F_HW_CSUM;
3443 
3444 	if (DEFAULT_RX_CSUM_ENABLE)
3445 		dev->net->features |= NETIF_F_RXCSUM;
3446 
3447 	if (DEFAULT_TSO_CSUM_ENABLE)
3448 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3449 
3450 	if (DEFAULT_VLAN_RX_OFFLOAD)
3451 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3452 
3453 	if (DEFAULT_VLAN_FILTER_ENABLE)
3454 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3455 
3456 	dev->net->hw_features = dev->net->features;
3457 
3458 	ret = lan78xx_setup_irq_domain(dev);
3459 	if (ret < 0) {
3460 		netdev_warn(dev->net,
3461 			    "lan78xx_setup_irq_domain() failed : %d", ret);
3462 		goto out1;
3463 	}
3464 
3465 	/* Init all registers */
3466 	ret = lan78xx_reset(dev);
3467 	if (ret) {
3468 		netdev_warn(dev->net, "Registers INIT FAILED....");
3469 		goto out2;
3470 	}
3471 
3472 	ret = lan78xx_mdio_init(dev);
3473 	if (ret) {
3474 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3475 		goto out2;
3476 	}
3477 
3478 	dev->net->flags |= IFF_MULTICAST;
3479 
3480 	pdata->wol = WAKE_MAGIC;
3481 
3482 	return ret;
3483 
3484 out2:
3485 	lan78xx_remove_irq_domain(dev);
3486 
3487 out1:
3488 	netdev_warn(dev->net, "Bind routine FAILED");
3489 	cancel_work_sync(&pdata->set_multicast);
3490 	cancel_work_sync(&pdata->set_vlan);
3491 	kfree(pdata);
3492 	return ret;
3493 }
3494 
lan78xx_unbind(struct lan78xx_net * dev,struct usb_interface * intf)3495 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3496 {
3497 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3498 
3499 	lan78xx_remove_irq_domain(dev);
3500 
3501 	lan78xx_remove_mdio(dev);
3502 
3503 	if (pdata) {
3504 		cancel_work_sync(&pdata->set_multicast);
3505 		cancel_work_sync(&pdata->set_vlan);
3506 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3507 		kfree(pdata);
3508 		pdata = NULL;
3509 		dev->data[0] = 0;
3510 	}
3511 }
3512 
lan78xx_rx_csum_offload(struct lan78xx_net * dev,struct sk_buff * skb,u32 rx_cmd_a,u32 rx_cmd_b)3513 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3514 				    struct sk_buff *skb,
3515 				    u32 rx_cmd_a, u32 rx_cmd_b)
3516 {
3517 	/* HW Checksum offload appears to be flawed if used when not stripping
3518 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3519 	 */
3520 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3521 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3522 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3523 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3524 		skb->ip_summed = CHECKSUM_NONE;
3525 	} else {
3526 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3527 		skb->ip_summed = CHECKSUM_COMPLETE;
3528 	}
3529 }
3530 
lan78xx_rx_vlan_offload(struct lan78xx_net * dev,struct sk_buff * skb,u32 rx_cmd_a,u32 rx_cmd_b)3531 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3532 				    struct sk_buff *skb,
3533 				    u32 rx_cmd_a, u32 rx_cmd_b)
3534 {
3535 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3536 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3537 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3538 				       (rx_cmd_b & 0xffff));
3539 }
3540 
lan78xx_skb_return(struct lan78xx_net * dev,struct sk_buff * skb)3541 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3542 {
3543 	dev->net->stats.rx_packets++;
3544 	dev->net->stats.rx_bytes += skb->len;
3545 
3546 	skb->protocol = eth_type_trans(skb, dev->net);
3547 
3548 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3549 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3550 	memset(skb->cb, 0, sizeof(struct skb_data));
3551 
3552 	if (skb_defer_rx_timestamp(skb))
3553 		return;
3554 
3555 	napi_gro_receive(&dev->napi, skb);
3556 }
3557 
lan78xx_rx(struct lan78xx_net * dev,struct sk_buff * skb,int budget,int * work_done)3558 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3559 		      int budget, int *work_done)
3560 {
3561 	if (skb->len < RX_SKB_MIN_LEN)
3562 		return 0;
3563 
3564 	/* Extract frames from the URB buffer and pass each one to
3565 	 * the stack in a new NAPI SKB.
3566 	 */
3567 	while (skb->len > 0) {
3568 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3569 		u16 rx_cmd_c;
3570 		unsigned char *packet;
3571 
3572 		rx_cmd_a = get_unaligned_le32(skb->data);
3573 		skb_pull(skb, sizeof(rx_cmd_a));
3574 
3575 		rx_cmd_b = get_unaligned_le32(skb->data);
3576 		skb_pull(skb, sizeof(rx_cmd_b));
3577 
3578 		rx_cmd_c = get_unaligned_le16(skb->data);
3579 		skb_pull(skb, sizeof(rx_cmd_c));
3580 
3581 		packet = skb->data;
3582 
3583 		/* get the packet length */
3584 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3585 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3586 
3587 		if (unlikely(size > skb->len)) {
3588 			netif_dbg(dev, rx_err, dev->net,
3589 				  "size err rx_cmd_a=0x%08x\n",
3590 				  rx_cmd_a);
3591 			return 0;
3592 		}
3593 
3594 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3595 			netif_dbg(dev, rx_err, dev->net,
3596 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3597 		} else {
3598 			u32 frame_len;
3599 			struct sk_buff *skb2;
3600 
3601 			if (unlikely(size < ETH_FCS_LEN)) {
3602 				netif_dbg(dev, rx_err, dev->net,
3603 					  "size err rx_cmd_a=0x%08x\n",
3604 					  rx_cmd_a);
3605 				return 0;
3606 			}
3607 
3608 			frame_len = size - ETH_FCS_LEN;
3609 
3610 			skb2 = napi_alloc_skb(&dev->napi, frame_len);
3611 			if (!skb2)
3612 				return 0;
3613 
3614 			memcpy(skb2->data, packet, frame_len);
3615 
3616 			skb_put(skb2, frame_len);
3617 
3618 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3619 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3620 
3621 			/* Processing of the URB buffer must complete once
3622 			 * it has started. If the NAPI work budget is exhausted
3623 			 * while frames remain they are added to the overflow
3624 			 * queue for delivery in the next NAPI polling cycle.
3625 			 */
3626 			if (*work_done < budget) {
3627 				lan78xx_skb_return(dev, skb2);
3628 				++(*work_done);
3629 			} else {
3630 				skb_queue_tail(&dev->rxq_overflow, skb2);
3631 			}
3632 		}
3633 
3634 		skb_pull(skb, size);
3635 
3636 		/* skip padding bytes before the next frame starts */
3637 		if (skb->len)
3638 			skb_pull(skb, align_count);
3639 	}
3640 
3641 	return 1;
3642 }
3643 
rx_process(struct lan78xx_net * dev,struct sk_buff * skb,int budget,int * work_done)3644 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3645 			      int budget, int *work_done)
3646 {
3647 	if (!lan78xx_rx(dev, skb, budget, work_done)) {
3648 		netif_dbg(dev, rx_err, dev->net, "drop\n");
3649 		dev->net->stats.rx_errors++;
3650 	}
3651 }
3652 
rx_complete(struct urb * urb)3653 static void rx_complete(struct urb *urb)
3654 {
3655 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3656 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3657 	struct lan78xx_net *dev = entry->dev;
3658 	int urb_status = urb->status;
3659 	enum skb_state state;
3660 
3661 	netif_dbg(dev, rx_status, dev->net,
3662 		  "rx done: status %d", urb->status);
3663 
3664 	skb_put(skb, urb->actual_length);
3665 	state = rx_done;
3666 
3667 	if (urb != entry->urb)
3668 		netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
3669 
3670 	switch (urb_status) {
3671 	case 0:
3672 		if (skb->len < RX_SKB_MIN_LEN) {
3673 			state = rx_cleanup;
3674 			dev->net->stats.rx_errors++;
3675 			dev->net->stats.rx_length_errors++;
3676 			netif_dbg(dev, rx_err, dev->net,
3677 				  "rx length %d\n", skb->len);
3678 		}
3679 		usb_mark_last_busy(dev->udev);
3680 		break;
3681 	case -EPIPE:
3682 		dev->net->stats.rx_errors++;
3683 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3684 		fallthrough;
3685 	case -ECONNRESET:				/* async unlink */
3686 	case -ESHUTDOWN:				/* hardware gone */
3687 		netif_dbg(dev, ifdown, dev->net,
3688 			  "rx shutdown, code %d\n", urb_status);
3689 		state = rx_cleanup;
3690 		break;
3691 	case -EPROTO:
3692 	case -ETIME:
3693 	case -EILSEQ:
3694 		dev->net->stats.rx_errors++;
3695 		state = rx_cleanup;
3696 		break;
3697 
3698 	/* data overrun ... flush fifo? */
3699 	case -EOVERFLOW:
3700 		dev->net->stats.rx_over_errors++;
3701 		fallthrough;
3702 
3703 	default:
3704 		state = rx_cleanup;
3705 		dev->net->stats.rx_errors++;
3706 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3707 		break;
3708 	}
3709 
3710 	state = defer_bh(dev, skb, &dev->rxq, state);
3711 }
3712 
rx_submit(struct lan78xx_net * dev,struct sk_buff * skb,gfp_t flags)3713 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
3714 {
3715 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3716 	size_t size = dev->rx_urb_size;
3717 	struct urb *urb = entry->urb;
3718 	unsigned long lockflags;
3719 	int ret = 0;
3720 
3721 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3722 			  skb->data, size, rx_complete, skb);
3723 
3724 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3725 
3726 	if (netif_device_present(dev->net) &&
3727 	    netif_running(dev->net) &&
3728 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3729 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3730 		ret = usb_submit_urb(urb, flags);
3731 		switch (ret) {
3732 		case 0:
3733 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3734 			break;
3735 		case -EPIPE:
3736 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3737 			break;
3738 		case -ENODEV:
3739 		case -ENOENT:
3740 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3741 			netif_device_detach(dev->net);
3742 			break;
3743 		case -EHOSTUNREACH:
3744 			ret = -ENOLINK;
3745 			napi_schedule(&dev->napi);
3746 			break;
3747 		default:
3748 			netif_dbg(dev, rx_err, dev->net,
3749 				  "rx submit, %d\n", ret);
3750 			napi_schedule(&dev->napi);
3751 			break;
3752 		}
3753 	} else {
3754 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3755 		ret = -ENOLINK;
3756 	}
3757 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3758 
3759 	if (ret)
3760 		lan78xx_release_rx_buf(dev, skb);
3761 
3762 	return ret;
3763 }
3764 
lan78xx_rx_urb_submit_all(struct lan78xx_net * dev)3765 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
3766 {
3767 	struct sk_buff *rx_buf;
3768 
3769 	/* Ensure the maximum number of Rx URBs is submitted
3770 	 */
3771 	while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
3772 		if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
3773 			break;
3774 	}
3775 }
3776 
lan78xx_rx_urb_resubmit(struct lan78xx_net * dev,struct sk_buff * rx_buf)3777 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
3778 				    struct sk_buff *rx_buf)
3779 {
3780 	/* reset SKB data pointers */
3781 
3782 	rx_buf->data = rx_buf->head;
3783 	skb_reset_tail_pointer(rx_buf);
3784 	rx_buf->len = 0;
3785 	rx_buf->data_len = 0;
3786 
3787 	rx_submit(dev, rx_buf, GFP_ATOMIC);
3788 }
3789 
lan78xx_fill_tx_cmd_words(struct sk_buff * skb,u8 * buffer)3790 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
3791 {
3792 	u32 tx_cmd_a;
3793 	u32 tx_cmd_b;
3794 
3795 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
3796 
3797 	if (skb->ip_summed == CHECKSUM_PARTIAL)
3798 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
3799 
3800 	tx_cmd_b = 0;
3801 	if (skb_is_gso(skb)) {
3802 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
3803 
3804 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
3805 
3806 		tx_cmd_a |= TX_CMD_A_LSO_;
3807 	}
3808 
3809 	if (skb_vlan_tag_present(skb)) {
3810 		tx_cmd_a |= TX_CMD_A_IVTG_;
3811 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
3812 	}
3813 
3814 	put_unaligned_le32(tx_cmd_a, buffer);
3815 	put_unaligned_le32(tx_cmd_b, buffer + 4);
3816 }
3817 
lan78xx_tx_buf_fill(struct lan78xx_net * dev,struct sk_buff * tx_buf)3818 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
3819 					    struct sk_buff *tx_buf)
3820 {
3821 	struct skb_data *entry = (struct skb_data *)tx_buf->cb;
3822 	int remain = dev->tx_urb_size;
3823 	u8 *tx_data = tx_buf->data;
3824 	u32 urb_len = 0;
3825 
3826 	entry->num_of_packet = 0;
3827 	entry->length = 0;
3828 
3829 	/* Work through the pending SKBs and copy the data of each SKB into
3830 	 * the URB buffer if there room for all the SKB data.
3831 	 *
3832 	 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
3833 	 */
3834 	while (remain >= TX_SKB_MIN_LEN) {
3835 		unsigned int pending_bytes;
3836 		unsigned int align_bytes;
3837 		struct sk_buff *skb;
3838 		unsigned int len;
3839 
3840 		lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
3841 
3842 		if (!skb)
3843 			break;
3844 
3845 		align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
3846 			      TX_ALIGNMENT;
3847 		len = align_bytes + TX_CMD_LEN + skb->len;
3848 		if (len > remain) {
3849 			lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
3850 			break;
3851 		}
3852 
3853 		tx_data += align_bytes;
3854 
3855 		lan78xx_fill_tx_cmd_words(skb, tx_data);
3856 		tx_data += TX_CMD_LEN;
3857 
3858 		len = skb->len;
3859 		if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
3860 			struct net_device_stats *stats = &dev->net->stats;
3861 
3862 			stats->tx_dropped++;
3863 			dev_kfree_skb_any(skb);
3864 			tx_data -= TX_CMD_LEN;
3865 			continue;
3866 		}
3867 
3868 		tx_data += len;
3869 		entry->length += len;
3870 		entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
3871 
3872 		dev_kfree_skb_any(skb);
3873 
3874 		urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
3875 
3876 		remain = dev->tx_urb_size - urb_len;
3877 	}
3878 
3879 	skb_put(tx_buf, urb_len);
3880 
3881 	return entry;
3882 }
3883 
lan78xx_tx_bh(struct lan78xx_net * dev)3884 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3885 {
3886 	int ret;
3887 
3888 	/* Start the stack Tx queue if it was stopped
3889 	 */
3890 	netif_tx_lock(dev->net);
3891 	if (netif_queue_stopped(dev->net)) {
3892 		if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
3893 			netif_wake_queue(dev->net);
3894 	}
3895 	netif_tx_unlock(dev->net);
3896 
3897 	/* Go through the Tx pending queue and set up URBs to transfer
3898 	 * the data to the device. Stop if no more pending data or URBs,
3899 	 * or if an error occurs when a URB is submitted.
3900 	 */
3901 	do {
3902 		struct skb_data *entry;
3903 		struct sk_buff *tx_buf;
3904 		unsigned long flags;
3905 
3906 		if (skb_queue_empty(&dev->txq_pend))
3907 			break;
3908 
3909 		tx_buf = lan78xx_get_tx_buf(dev);
3910 		if (!tx_buf)
3911 			break;
3912 
3913 		entry = lan78xx_tx_buf_fill(dev, tx_buf);
3914 
3915 		spin_lock_irqsave(&dev->txq.lock, flags);
3916 		ret = usb_autopm_get_interface_async(dev->intf);
3917 		if (ret < 0) {
3918 			spin_unlock_irqrestore(&dev->txq.lock, flags);
3919 			goto out;
3920 		}
3921 
3922 		usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
3923 				  tx_buf->data, tx_buf->len, tx_complete,
3924 				  tx_buf);
3925 
3926 		if (tx_buf->len % dev->maxpacket == 0) {
3927 			/* send USB_ZERO_PACKET */
3928 			entry->urb->transfer_flags |= URB_ZERO_PACKET;
3929 		}
3930 
3931 #ifdef CONFIG_PM
3932 		/* if device is asleep stop outgoing packet processing */
3933 		if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3934 			usb_anchor_urb(entry->urb, &dev->deferred);
3935 			netif_stop_queue(dev->net);
3936 			spin_unlock_irqrestore(&dev->txq.lock, flags);
3937 			netdev_dbg(dev->net,
3938 				   "Delaying transmission for resumption\n");
3939 			return;
3940 		}
3941 #endif
3942 		ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
3943 		switch (ret) {
3944 		case 0:
3945 			netif_trans_update(dev->net);
3946 			lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
3947 			break;
3948 		case -EPIPE:
3949 			netif_stop_queue(dev->net);
3950 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3951 			usb_autopm_put_interface_async(dev->intf);
3952 			break;
3953 		case -ENODEV:
3954 		case -ENOENT:
3955 			netif_dbg(dev, tx_err, dev->net,
3956 				  "tx submit urb err %d (disconnected?)", ret);
3957 			netif_device_detach(dev->net);
3958 			break;
3959 		default:
3960 			usb_autopm_put_interface_async(dev->intf);
3961 			netif_dbg(dev, tx_err, dev->net,
3962 				  "tx submit urb err %d\n", ret);
3963 			break;
3964 		}
3965 
3966 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3967 
3968 		if (ret) {
3969 			netdev_warn(dev->net, "failed to tx urb %d\n", ret);
3970 out:
3971 			dev->net->stats.tx_dropped += entry->num_of_packet;
3972 			lan78xx_release_tx_buf(dev, tx_buf);
3973 		}
3974 	} while (ret == 0);
3975 }
3976 
lan78xx_bh(struct lan78xx_net * dev,int budget)3977 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
3978 {
3979 	struct sk_buff_head done;
3980 	struct sk_buff *rx_buf;
3981 	struct skb_data *entry;
3982 	unsigned long flags;
3983 	int work_done = 0;
3984 
3985 	/* Pass frames received in the last NAPI cycle before
3986 	 * working on newly completed URBs.
3987 	 */
3988 	while (!skb_queue_empty(&dev->rxq_overflow)) {
3989 		lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
3990 		++work_done;
3991 	}
3992 
3993 	/* Take a snapshot of the done queue and move items to a
3994 	 * temporary queue. Rx URB completions will continue to add
3995 	 * to the done queue.
3996 	 */
3997 	__skb_queue_head_init(&done);
3998 
3999 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4000 	skb_queue_splice_init(&dev->rxq_done, &done);
4001 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4002 
4003 	/* Extract receive frames from completed URBs and
4004 	 * pass them to the stack. Re-submit each completed URB.
4005 	 */
4006 	while ((work_done < budget) &&
4007 	       (rx_buf = __skb_dequeue(&done))) {
4008 		entry = (struct skb_data *)(rx_buf->cb);
4009 		switch (entry->state) {
4010 		case rx_done:
4011 			rx_process(dev, rx_buf, budget, &work_done);
4012 			break;
4013 		case rx_cleanup:
4014 			break;
4015 		default:
4016 			netdev_dbg(dev->net, "rx buf state %d\n",
4017 				   entry->state);
4018 			break;
4019 		}
4020 
4021 		lan78xx_rx_urb_resubmit(dev, rx_buf);
4022 	}
4023 
4024 	/* If budget was consumed before processing all the URBs put them
4025 	 * back on the front of the done queue. They will be first to be
4026 	 * processed in the next NAPI cycle.
4027 	 */
4028 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4029 	skb_queue_splice(&done, &dev->rxq_done);
4030 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4031 
4032 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
4033 		/* reset update timer delta */
4034 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4035 			dev->delta = 1;
4036 			mod_timer(&dev->stat_monitor,
4037 				  jiffies + STAT_UPDATE_TIMER);
4038 		}
4039 
4040 		/* Submit all free Rx URBs */
4041 
4042 		if (!test_bit(EVENT_RX_HALT, &dev->flags))
4043 			lan78xx_rx_urb_submit_all(dev);
4044 
4045 		/* Submit new Tx URBs */
4046 
4047 		lan78xx_tx_bh(dev);
4048 	}
4049 
4050 	return work_done;
4051 }
4052 
lan78xx_poll(struct napi_struct * napi,int budget)4053 static int lan78xx_poll(struct napi_struct *napi, int budget)
4054 {
4055 	struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4056 	int result = budget;
4057 	int work_done;
4058 
4059 	/* Don't do any work if the device is suspended */
4060 
4061 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4062 		napi_complete_done(napi, 0);
4063 		return 0;
4064 	}
4065 
4066 	/* Process completed URBs and submit new URBs */
4067 
4068 	work_done = lan78xx_bh(dev, budget);
4069 
4070 	if (work_done < budget) {
4071 		napi_complete_done(napi, work_done);
4072 
4073 		/* Start a new polling cycle if data was received or
4074 		 * data is waiting to be transmitted.
4075 		 */
4076 		if (!skb_queue_empty(&dev->rxq_done)) {
4077 			napi_schedule(napi);
4078 		} else if (netif_carrier_ok(dev->net)) {
4079 			if (skb_queue_empty(&dev->txq) &&
4080 			    !skb_queue_empty(&dev->txq_pend)) {
4081 				napi_schedule(napi);
4082 			} else {
4083 				netif_tx_lock(dev->net);
4084 				if (netif_queue_stopped(dev->net)) {
4085 					netif_wake_queue(dev->net);
4086 					napi_schedule(napi);
4087 				}
4088 				netif_tx_unlock(dev->net);
4089 			}
4090 		}
4091 		result = work_done;
4092 	}
4093 
4094 	return result;
4095 }
4096 
lan78xx_delayedwork(struct work_struct * work)4097 static void lan78xx_delayedwork(struct work_struct *work)
4098 {
4099 	int status;
4100 	struct lan78xx_net *dev;
4101 
4102 	dev = container_of(work, struct lan78xx_net, wq.work);
4103 
4104 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4105 		return;
4106 
4107 	if (usb_autopm_get_interface(dev->intf) < 0)
4108 		return;
4109 
4110 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4111 		unlink_urbs(dev, &dev->txq);
4112 
4113 		status = usb_clear_halt(dev->udev, dev->pipe_out);
4114 		if (status < 0 &&
4115 		    status != -EPIPE &&
4116 		    status != -ESHUTDOWN) {
4117 			if (netif_msg_tx_err(dev))
4118 				netdev_err(dev->net,
4119 					   "can't clear tx halt, status %d\n",
4120 					   status);
4121 		} else {
4122 			clear_bit(EVENT_TX_HALT, &dev->flags);
4123 			if (status != -ESHUTDOWN)
4124 				netif_wake_queue(dev->net);
4125 		}
4126 	}
4127 
4128 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4129 		unlink_urbs(dev, &dev->rxq);
4130 		status = usb_clear_halt(dev->udev, dev->pipe_in);
4131 		if (status < 0 &&
4132 		    status != -EPIPE &&
4133 		    status != -ESHUTDOWN) {
4134 			if (netif_msg_rx_err(dev))
4135 				netdev_err(dev->net,
4136 					   "can't clear rx halt, status %d\n",
4137 					   status);
4138 		} else {
4139 			clear_bit(EVENT_RX_HALT, &dev->flags);
4140 			napi_schedule(&dev->napi);
4141 		}
4142 	}
4143 
4144 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
4145 		int ret = 0;
4146 
4147 		clear_bit(EVENT_LINK_RESET, &dev->flags);
4148 		if (lan78xx_link_reset(dev) < 0) {
4149 			netdev_info(dev->net, "link reset failed (%d)\n",
4150 				    ret);
4151 		}
4152 	}
4153 
4154 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4155 		lan78xx_update_stats(dev);
4156 
4157 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4158 
4159 		mod_timer(&dev->stat_monitor,
4160 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
4161 
4162 		dev->delta = min((dev->delta * 2), 50);
4163 	}
4164 
4165 	usb_autopm_put_interface(dev->intf);
4166 }
4167 
intr_complete(struct urb * urb)4168 static void intr_complete(struct urb *urb)
4169 {
4170 	struct lan78xx_net *dev = urb->context;
4171 	int status = urb->status;
4172 
4173 	switch (status) {
4174 	/* success */
4175 	case 0:
4176 		lan78xx_status(dev, urb);
4177 		break;
4178 
4179 	/* software-driven interface shutdown */
4180 	case -ENOENT:			/* urb killed */
4181 	case -ENODEV:			/* hardware gone */
4182 	case -ESHUTDOWN:		/* hardware gone */
4183 		netif_dbg(dev, ifdown, dev->net,
4184 			  "intr shutdown, code %d\n", status);
4185 		return;
4186 
4187 	/* NOTE:  not throttling like RX/TX, since this endpoint
4188 	 * already polls infrequently
4189 	 */
4190 	default:
4191 		netdev_dbg(dev->net, "intr status %d\n", status);
4192 		break;
4193 	}
4194 
4195 	if (!netif_device_present(dev->net) ||
4196 	    !netif_running(dev->net)) {
4197 		netdev_warn(dev->net, "not submitting new status URB");
4198 		return;
4199 	}
4200 
4201 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4202 	status = usb_submit_urb(urb, GFP_ATOMIC);
4203 
4204 	switch (status) {
4205 	case  0:
4206 		break;
4207 	case -ENODEV:
4208 	case -ENOENT:
4209 		netif_dbg(dev, timer, dev->net,
4210 			  "intr resubmit %d (disconnect?)", status);
4211 		netif_device_detach(dev->net);
4212 		break;
4213 	default:
4214 		netif_err(dev, timer, dev->net,
4215 			  "intr resubmit --> %d\n", status);
4216 		break;
4217 	}
4218 }
4219 
lan78xx_disconnect(struct usb_interface * intf)4220 static void lan78xx_disconnect(struct usb_interface *intf)
4221 {
4222 	struct lan78xx_net *dev;
4223 	struct usb_device *udev;
4224 	struct net_device *net;
4225 	struct phy_device *phydev;
4226 
4227 	dev = usb_get_intfdata(intf);
4228 	usb_set_intfdata(intf, NULL);
4229 	if (!dev)
4230 		return;
4231 
4232 	udev = interface_to_usbdev(intf);
4233 	net = dev->net;
4234 
4235 	unregister_netdev(net);
4236 
4237 	timer_shutdown_sync(&dev->stat_monitor);
4238 	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4239 	cancel_delayed_work_sync(&dev->wq);
4240 
4241 	phydev = net->phydev;
4242 
4243 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
4244 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
4245 
4246 	phy_disconnect(net->phydev);
4247 
4248 	if (phy_is_pseudo_fixed_link(phydev)) {
4249 		fixed_phy_unregister(phydev);
4250 		phy_device_free(phydev);
4251 	}
4252 
4253 	usb_scuttle_anchored_urbs(&dev->deferred);
4254 
4255 	lan78xx_unbind(dev, intf);
4256 
4257 	lan78xx_free_tx_resources(dev);
4258 	lan78xx_free_rx_resources(dev);
4259 
4260 	usb_kill_urb(dev->urb_intr);
4261 	usb_free_urb(dev->urb_intr);
4262 
4263 	free_netdev(net);
4264 	usb_put_dev(udev);
4265 }
4266 
lan78xx_tx_timeout(struct net_device * net,unsigned int txqueue)4267 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4268 {
4269 	struct lan78xx_net *dev = netdev_priv(net);
4270 
4271 	unlink_urbs(dev, &dev->txq);
4272 	napi_schedule(&dev->napi);
4273 }
4274 
lan78xx_features_check(struct sk_buff * skb,struct net_device * netdev,netdev_features_t features)4275 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4276 						struct net_device *netdev,
4277 						netdev_features_t features)
4278 {
4279 	struct lan78xx_net *dev = netdev_priv(netdev);
4280 
4281 	if (skb->len > LAN78XX_TSO_SIZE(dev))
4282 		features &= ~NETIF_F_GSO_MASK;
4283 
4284 	features = vlan_features_check(skb, features);
4285 	features = vxlan_features_check(skb, features);
4286 
4287 	return features;
4288 }
4289 
4290 static const struct net_device_ops lan78xx_netdev_ops = {
4291 	.ndo_open		= lan78xx_open,
4292 	.ndo_stop		= lan78xx_stop,
4293 	.ndo_start_xmit		= lan78xx_start_xmit,
4294 	.ndo_tx_timeout		= lan78xx_tx_timeout,
4295 	.ndo_change_mtu		= lan78xx_change_mtu,
4296 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
4297 	.ndo_validate_addr	= eth_validate_addr,
4298 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4299 	.ndo_set_rx_mode	= lan78xx_set_multicast,
4300 	.ndo_set_features	= lan78xx_set_features,
4301 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
4302 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
4303 	.ndo_features_check	= lan78xx_features_check,
4304 };
4305 
lan78xx_stat_monitor(struct timer_list * t)4306 static void lan78xx_stat_monitor(struct timer_list *t)
4307 {
4308 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
4309 
4310 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4311 }
4312 
lan78xx_probe(struct usb_interface * intf,const struct usb_device_id * id)4313 static int lan78xx_probe(struct usb_interface *intf,
4314 			 const struct usb_device_id *id)
4315 {
4316 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4317 	struct lan78xx_net *dev;
4318 	struct net_device *netdev;
4319 	struct usb_device *udev;
4320 	int ret;
4321 	unsigned int maxp;
4322 	unsigned int period;
4323 	u8 *buf = NULL;
4324 
4325 	udev = interface_to_usbdev(intf);
4326 	udev = usb_get_dev(udev);
4327 
4328 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4329 	if (!netdev) {
4330 		dev_err(&intf->dev, "Error: OOM\n");
4331 		ret = -ENOMEM;
4332 		goto out1;
4333 	}
4334 
4335 	/* netdev_printk() needs this */
4336 	SET_NETDEV_DEV(netdev, &intf->dev);
4337 
4338 	dev = netdev_priv(netdev);
4339 	dev->udev = udev;
4340 	dev->intf = intf;
4341 	dev->net = netdev;
4342 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4343 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4344 
4345 	skb_queue_head_init(&dev->rxq);
4346 	skb_queue_head_init(&dev->txq);
4347 	skb_queue_head_init(&dev->rxq_done);
4348 	skb_queue_head_init(&dev->txq_pend);
4349 	skb_queue_head_init(&dev->rxq_overflow);
4350 	mutex_init(&dev->phy_mutex);
4351 	mutex_init(&dev->dev_mutex);
4352 
4353 	ret = lan78xx_urb_config_init(dev);
4354 	if (ret < 0)
4355 		goto out2;
4356 
4357 	ret = lan78xx_alloc_tx_resources(dev);
4358 	if (ret < 0)
4359 		goto out2;
4360 
4361 	ret = lan78xx_alloc_rx_resources(dev);
4362 	if (ret < 0)
4363 		goto out3;
4364 
4365 	/* MTU range: 68 - 9000 */
4366 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4367 
4368 	netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4369 
4370 	netif_napi_add(netdev, &dev->napi, lan78xx_poll);
4371 
4372 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4373 	init_usb_anchor(&dev->deferred);
4374 
4375 	netdev->netdev_ops = &lan78xx_netdev_ops;
4376 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4377 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4378 
4379 	dev->delta = 1;
4380 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4381 
4382 	mutex_init(&dev->stats.access_lock);
4383 
4384 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4385 		ret = -ENODEV;
4386 		goto out4;
4387 	}
4388 
4389 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4390 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4391 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4392 		ret = -ENODEV;
4393 		goto out4;
4394 	}
4395 
4396 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4397 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4398 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4399 		ret = -ENODEV;
4400 		goto out4;
4401 	}
4402 
4403 	ep_intr = &intf->cur_altsetting->endpoint[2];
4404 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4405 		ret = -ENODEV;
4406 		goto out4;
4407 	}
4408 
4409 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4410 					usb_endpoint_num(&ep_intr->desc));
4411 
4412 	ret = lan78xx_bind(dev, intf);
4413 	if (ret < 0)
4414 		goto out4;
4415 
4416 	period = ep_intr->desc.bInterval;
4417 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
4418 
4419 	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4420 	if (!dev->urb_intr) {
4421 		ret = -ENOMEM;
4422 		goto out5;
4423 	}
4424 
4425 	buf = kmalloc(maxp, GFP_KERNEL);
4426 	if (!buf) {
4427 		ret = -ENOMEM;
4428 		goto free_urbs;
4429 	}
4430 
4431 	usb_fill_int_urb(dev->urb_intr, dev->udev,
4432 			 dev->pipe_intr, buf, maxp,
4433 			 intr_complete, dev, period);
4434 	dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4435 
4436 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
4437 
4438 	/* Reject broken descriptors. */
4439 	if (dev->maxpacket == 0) {
4440 		ret = -ENODEV;
4441 		goto free_urbs;
4442 	}
4443 
4444 	/* driver requires remote-wakeup capability during autosuspend. */
4445 	intf->needs_remote_wakeup = 1;
4446 
4447 	ret = lan78xx_phy_init(dev);
4448 	if (ret < 0)
4449 		goto free_urbs;
4450 
4451 	ret = register_netdev(netdev);
4452 	if (ret != 0) {
4453 		netif_err(dev, probe, netdev, "couldn't register the device\n");
4454 		goto out8;
4455 	}
4456 
4457 	usb_set_intfdata(intf, dev);
4458 
4459 	ret = device_set_wakeup_enable(&udev->dev, true);
4460 
4461 	 /* Default delay of 2sec has more overhead than advantage.
4462 	  * Set to 10sec as default.
4463 	  */
4464 	pm_runtime_set_autosuspend_delay(&udev->dev,
4465 					 DEFAULT_AUTOSUSPEND_DELAY);
4466 
4467 	return 0;
4468 
4469 out8:
4470 	phy_disconnect(netdev->phydev);
4471 free_urbs:
4472 	usb_free_urb(dev->urb_intr);
4473 out5:
4474 	lan78xx_unbind(dev, intf);
4475 out4:
4476 	netif_napi_del(&dev->napi);
4477 	lan78xx_free_rx_resources(dev);
4478 out3:
4479 	lan78xx_free_tx_resources(dev);
4480 out2:
4481 	free_netdev(netdev);
4482 out1:
4483 	usb_put_dev(udev);
4484 
4485 	return ret;
4486 }
4487 
lan78xx_wakeframe_crc16(const u8 * buf,int len)4488 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4489 {
4490 	const u16 crc16poly = 0x8005;
4491 	int i;
4492 	u16 bit, crc, msb;
4493 	u8 data;
4494 
4495 	crc = 0xFFFF;
4496 	for (i = 0; i < len; i++) {
4497 		data = *buf++;
4498 		for (bit = 0; bit < 8; bit++) {
4499 			msb = crc >> 15;
4500 			crc <<= 1;
4501 
4502 			if (msb ^ (u16)(data & 1)) {
4503 				crc ^= crc16poly;
4504 				crc |= (u16)0x0001U;
4505 			}
4506 			data >>= 1;
4507 		}
4508 	}
4509 
4510 	return crc;
4511 }
4512 
lan78xx_set_auto_suspend(struct lan78xx_net * dev)4513 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4514 {
4515 	u32 buf;
4516 	int ret;
4517 
4518 	ret = lan78xx_stop_tx_path(dev);
4519 	if (ret < 0)
4520 		return ret;
4521 
4522 	ret = lan78xx_stop_rx_path(dev);
4523 	if (ret < 0)
4524 		return ret;
4525 
4526 	/* auto suspend (selective suspend) */
4527 
4528 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4529 	if (ret < 0)
4530 		return ret;
4531 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4532 	if (ret < 0)
4533 		return ret;
4534 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4535 	if (ret < 0)
4536 		return ret;
4537 
4538 	/* set goodframe wakeup */
4539 
4540 	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4541 	if (ret < 0)
4542 		return ret;
4543 
4544 	buf |= WUCSR_RFE_WAKE_EN_;
4545 	buf |= WUCSR_STORE_WAKE_;
4546 
4547 	ret = lan78xx_write_reg(dev, WUCSR, buf);
4548 	if (ret < 0)
4549 		return ret;
4550 
4551 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4552 	if (ret < 0)
4553 		return ret;
4554 
4555 	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4556 	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4557 	buf |= PMT_CTL_PHY_WAKE_EN_;
4558 	buf |= PMT_CTL_WOL_EN_;
4559 	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4560 	buf |= PMT_CTL_SUS_MODE_3_;
4561 
4562 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4563 	if (ret < 0)
4564 		return ret;
4565 
4566 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4567 	if (ret < 0)
4568 		return ret;
4569 
4570 	buf |= PMT_CTL_WUPS_MASK_;
4571 
4572 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4573 	if (ret < 0)
4574 		return ret;
4575 
4576 	ret = lan78xx_start_rx_path(dev);
4577 
4578 	return ret;
4579 }
4580 
lan78xx_set_suspend(struct lan78xx_net * dev,u32 wol)4581 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4582 {
4583 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4584 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4585 	const u8 arp_type[2] = { 0x08, 0x06 };
4586 	u32 temp_pmt_ctl;
4587 	int mask_index;
4588 	u32 temp_wucsr;
4589 	u32 buf;
4590 	u16 crc;
4591 	int ret;
4592 
4593 	ret = lan78xx_stop_tx_path(dev);
4594 	if (ret < 0)
4595 		return ret;
4596 	ret = lan78xx_stop_rx_path(dev);
4597 	if (ret < 0)
4598 		return ret;
4599 
4600 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4601 	if (ret < 0)
4602 		return ret;
4603 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4604 	if (ret < 0)
4605 		return ret;
4606 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4607 	if (ret < 0)
4608 		return ret;
4609 
4610 	temp_wucsr = 0;
4611 
4612 	temp_pmt_ctl = 0;
4613 
4614 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4615 	if (ret < 0)
4616 		return ret;
4617 
4618 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4619 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4620 
4621 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4622 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4623 		if (ret < 0)
4624 			return ret;
4625 	}
4626 
4627 	mask_index = 0;
4628 	if (wol & WAKE_PHY) {
4629 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4630 
4631 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4632 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4633 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4634 	}
4635 	if (wol & WAKE_MAGIC) {
4636 		temp_wucsr |= WUCSR_MPEN_;
4637 
4638 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4639 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4640 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4641 	}
4642 	if (wol & WAKE_BCAST) {
4643 		temp_wucsr |= WUCSR_BCST_EN_;
4644 
4645 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4646 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4647 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4648 	}
4649 	if (wol & WAKE_MCAST) {
4650 		temp_wucsr |= WUCSR_WAKE_EN_;
4651 
4652 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4653 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4654 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4655 					WUF_CFGX_EN_ |
4656 					WUF_CFGX_TYPE_MCAST_ |
4657 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4658 					(crc & WUF_CFGX_CRC16_MASK_));
4659 		if (ret < 0)
4660 			return ret;
4661 
4662 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4663 		if (ret < 0)
4664 			return ret;
4665 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4666 		if (ret < 0)
4667 			return ret;
4668 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4669 		if (ret < 0)
4670 			return ret;
4671 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4672 		if (ret < 0)
4673 			return ret;
4674 
4675 		mask_index++;
4676 
4677 		/* for IPv6 Multicast */
4678 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4679 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4680 					WUF_CFGX_EN_ |
4681 					WUF_CFGX_TYPE_MCAST_ |
4682 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4683 					(crc & WUF_CFGX_CRC16_MASK_));
4684 		if (ret < 0)
4685 			return ret;
4686 
4687 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4688 		if (ret < 0)
4689 			return ret;
4690 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4691 		if (ret < 0)
4692 			return ret;
4693 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4694 		if (ret < 0)
4695 			return ret;
4696 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4697 		if (ret < 0)
4698 			return ret;
4699 
4700 		mask_index++;
4701 
4702 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4703 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4704 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4705 	}
4706 	if (wol & WAKE_UCAST) {
4707 		temp_wucsr |= WUCSR_PFDA_EN_;
4708 
4709 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4710 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4711 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4712 	}
4713 	if (wol & WAKE_ARP) {
4714 		temp_wucsr |= WUCSR_WAKE_EN_;
4715 
4716 		/* set WUF_CFG & WUF_MASK
4717 		 * for packettype (offset 12,13) = ARP (0x0806)
4718 		 */
4719 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
4720 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4721 					WUF_CFGX_EN_ |
4722 					WUF_CFGX_TYPE_ALL_ |
4723 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4724 					(crc & WUF_CFGX_CRC16_MASK_));
4725 		if (ret < 0)
4726 			return ret;
4727 
4728 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
4729 		if (ret < 0)
4730 			return ret;
4731 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4732 		if (ret < 0)
4733 			return ret;
4734 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4735 		if (ret < 0)
4736 			return ret;
4737 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4738 		if (ret < 0)
4739 			return ret;
4740 
4741 		mask_index++;
4742 
4743 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4744 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4745 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4746 	}
4747 
4748 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
4749 	if (ret < 0)
4750 		return ret;
4751 
4752 	/* when multiple WOL bits are set */
4753 	if (hweight_long((unsigned long)wol) > 1) {
4754 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4755 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4756 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4757 	}
4758 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
4759 	if (ret < 0)
4760 		return ret;
4761 
4762 	/* clear WUPS */
4763 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4764 	if (ret < 0)
4765 		return ret;
4766 
4767 	buf |= PMT_CTL_WUPS_MASK_;
4768 
4769 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4770 	if (ret < 0)
4771 		return ret;
4772 
4773 	ret = lan78xx_start_rx_path(dev);
4774 
4775 	return ret;
4776 }
4777 
lan78xx_suspend(struct usb_interface * intf,pm_message_t message)4778 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4779 {
4780 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4781 	bool dev_open;
4782 	int ret;
4783 
4784 	mutex_lock(&dev->dev_mutex);
4785 
4786 	netif_dbg(dev, ifdown, dev->net,
4787 		  "suspending: pm event %#x", message.event);
4788 
4789 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4790 
4791 	if (dev_open) {
4792 		spin_lock_irq(&dev->txq.lock);
4793 		/* don't autosuspend while transmitting */
4794 		if ((skb_queue_len(&dev->txq) ||
4795 		     skb_queue_len(&dev->txq_pend)) &&
4796 		    PMSG_IS_AUTO(message)) {
4797 			spin_unlock_irq(&dev->txq.lock);
4798 			ret = -EBUSY;
4799 			goto out;
4800 		} else {
4801 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4802 			spin_unlock_irq(&dev->txq.lock);
4803 		}
4804 
4805 		/* stop RX */
4806 		ret = lan78xx_stop_rx_path(dev);
4807 		if (ret < 0)
4808 			goto out;
4809 
4810 		ret = lan78xx_flush_rx_fifo(dev);
4811 		if (ret < 0)
4812 			goto out;
4813 
4814 		/* stop Tx */
4815 		ret = lan78xx_stop_tx_path(dev);
4816 		if (ret < 0)
4817 			goto out;
4818 
4819 		/* empty out the Rx and Tx queues */
4820 		netif_device_detach(dev->net);
4821 		lan78xx_terminate_urbs(dev);
4822 		usb_kill_urb(dev->urb_intr);
4823 
4824 		/* reattach */
4825 		netif_device_attach(dev->net);
4826 
4827 		del_timer(&dev->stat_monitor);
4828 
4829 		if (PMSG_IS_AUTO(message)) {
4830 			ret = lan78xx_set_auto_suspend(dev);
4831 			if (ret < 0)
4832 				goto out;
4833 		} else {
4834 			struct lan78xx_priv *pdata;
4835 
4836 			pdata = (struct lan78xx_priv *)(dev->data[0]);
4837 			netif_carrier_off(dev->net);
4838 			ret = lan78xx_set_suspend(dev, pdata->wol);
4839 			if (ret < 0)
4840 				goto out;
4841 		}
4842 	} else {
4843 		/* Interface is down; don't allow WOL and PHY
4844 		 * events to wake up the host
4845 		 */
4846 		u32 buf;
4847 
4848 		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4849 
4850 		ret = lan78xx_write_reg(dev, WUCSR, 0);
4851 		if (ret < 0)
4852 			goto out;
4853 		ret = lan78xx_write_reg(dev, WUCSR2, 0);
4854 		if (ret < 0)
4855 			goto out;
4856 
4857 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4858 		if (ret < 0)
4859 			goto out;
4860 
4861 		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4862 		buf |= PMT_CTL_RES_CLR_WKP_STS_;
4863 		buf &= ~PMT_CTL_SUS_MODE_MASK_;
4864 		buf |= PMT_CTL_SUS_MODE_3_;
4865 
4866 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4867 		if (ret < 0)
4868 			goto out;
4869 
4870 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4871 		if (ret < 0)
4872 			goto out;
4873 
4874 		buf |= PMT_CTL_WUPS_MASK_;
4875 
4876 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4877 		if (ret < 0)
4878 			goto out;
4879 	}
4880 
4881 	ret = 0;
4882 out:
4883 	mutex_unlock(&dev->dev_mutex);
4884 
4885 	return ret;
4886 }
4887 
lan78xx_submit_deferred_urbs(struct lan78xx_net * dev)4888 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
4889 {
4890 	bool pipe_halted = false;
4891 	struct urb *urb;
4892 
4893 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
4894 		struct sk_buff *skb = urb->context;
4895 		int ret;
4896 
4897 		if (!netif_device_present(dev->net) ||
4898 		    !netif_carrier_ok(dev->net) ||
4899 		    pipe_halted) {
4900 			lan78xx_release_tx_buf(dev, skb);
4901 			continue;
4902 		}
4903 
4904 		ret = usb_submit_urb(urb, GFP_ATOMIC);
4905 
4906 		if (ret == 0) {
4907 			netif_trans_update(dev->net);
4908 			lan78xx_queue_skb(&dev->txq, skb, tx_start);
4909 		} else {
4910 			if (ret == -EPIPE) {
4911 				netif_stop_queue(dev->net);
4912 				pipe_halted = true;
4913 			} else if (ret == -ENODEV) {
4914 				netif_device_detach(dev->net);
4915 			}
4916 
4917 			lan78xx_release_tx_buf(dev, skb);
4918 		}
4919 	}
4920 
4921 	return pipe_halted;
4922 }
4923 
lan78xx_resume(struct usb_interface * intf)4924 static int lan78xx_resume(struct usb_interface *intf)
4925 {
4926 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4927 	bool dev_open;
4928 	int ret;
4929 
4930 	mutex_lock(&dev->dev_mutex);
4931 
4932 	netif_dbg(dev, ifup, dev->net, "resuming device");
4933 
4934 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
4935 
4936 	if (dev_open) {
4937 		bool pipe_halted = false;
4938 
4939 		ret = lan78xx_flush_tx_fifo(dev);
4940 		if (ret < 0)
4941 			goto out;
4942 
4943 		if (dev->urb_intr) {
4944 			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
4945 
4946 			if (ret < 0) {
4947 				if (ret == -ENODEV)
4948 					netif_device_detach(dev->net);
4949 				netdev_warn(dev->net, "Failed to submit intr URB");
4950 			}
4951 		}
4952 
4953 		spin_lock_irq(&dev->txq.lock);
4954 
4955 		if (netif_device_present(dev->net)) {
4956 			pipe_halted = lan78xx_submit_deferred_urbs(dev);
4957 
4958 			if (pipe_halted)
4959 				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4960 		}
4961 
4962 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4963 
4964 		spin_unlock_irq(&dev->txq.lock);
4965 
4966 		if (!pipe_halted &&
4967 		    netif_device_present(dev->net) &&
4968 		    (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
4969 			netif_start_queue(dev->net);
4970 
4971 		ret = lan78xx_start_tx_path(dev);
4972 		if (ret < 0)
4973 			goto out;
4974 
4975 		napi_schedule(&dev->napi);
4976 
4977 		if (!timer_pending(&dev->stat_monitor)) {
4978 			dev->delta = 1;
4979 			mod_timer(&dev->stat_monitor,
4980 				  jiffies + STAT_UPDATE_TIMER);
4981 		}
4982 
4983 	} else {
4984 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4985 	}
4986 
4987 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4988 	if (ret < 0)
4989 		goto out;
4990 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4991 	if (ret < 0)
4992 		goto out;
4993 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4994 	if (ret < 0)
4995 		goto out;
4996 
4997 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4998 					     WUCSR2_ARP_RCD_ |
4999 					     WUCSR2_IPV6_TCPSYN_RCD_ |
5000 					     WUCSR2_IPV4_TCPSYN_RCD_);
5001 	if (ret < 0)
5002 		goto out;
5003 
5004 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5005 					    WUCSR_EEE_RX_WAKE_ |
5006 					    WUCSR_PFDA_FR_ |
5007 					    WUCSR_RFE_WAKE_FR_ |
5008 					    WUCSR_WUFR_ |
5009 					    WUCSR_MPR_ |
5010 					    WUCSR_BCST_FR_);
5011 	if (ret < 0)
5012 		goto out;
5013 
5014 	ret = 0;
5015 out:
5016 	mutex_unlock(&dev->dev_mutex);
5017 
5018 	return ret;
5019 }
5020 
lan78xx_reset_resume(struct usb_interface * intf)5021 static int lan78xx_reset_resume(struct usb_interface *intf)
5022 {
5023 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5024 	int ret;
5025 
5026 	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5027 
5028 	ret = lan78xx_reset(dev);
5029 	if (ret < 0)
5030 		return ret;
5031 
5032 	phy_start(dev->net->phydev);
5033 
5034 	ret = lan78xx_resume(intf);
5035 
5036 	return ret;
5037 }
5038 
5039 static const struct usb_device_id products[] = {
5040 	{
5041 	/* LAN7800 USB Gigabit Ethernet Device */
5042 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5043 	},
5044 	{
5045 	/* LAN7850 USB Gigabit Ethernet Device */
5046 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5047 	},
5048 	{
5049 	/* LAN7801 USB Gigabit Ethernet Device */
5050 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5051 	},
5052 	{
5053 	/* ATM2-AF USB Gigabit Ethernet Device */
5054 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5055 	},
5056 	{},
5057 };
5058 MODULE_DEVICE_TABLE(usb, products);
5059 
5060 static struct usb_driver lan78xx_driver = {
5061 	.name			= DRIVER_NAME,
5062 	.id_table		= products,
5063 	.probe			= lan78xx_probe,
5064 	.disconnect		= lan78xx_disconnect,
5065 	.suspend		= lan78xx_suspend,
5066 	.resume			= lan78xx_resume,
5067 	.reset_resume		= lan78xx_reset_resume,
5068 	.supports_autosuspend	= 1,
5069 	.disable_hub_initiated_lpm = 1,
5070 };
5071 
5072 module_usb_driver(lan78xx_driver);
5073 
5074 MODULE_AUTHOR(DRIVER_AUTHOR);
5075 MODULE_DESCRIPTION(DRIVER_DESC);
5076 MODULE_LICENSE("GPL");
5077