• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/version.h>
6 #include <linux/module.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/usb.h>
11 #include <linux/crc32.h>
12 #include <linux/signal.h>
13 #include <linux/slab.h>
14 #include <linux/if_vlan.h>
15 #include <linux/uaccess.h>
16 #include <linux/linkmode.h>
17 #include <linux/list.h>
18 #include <linux/ip.h>
19 #include <linux/ipv6.h>
20 #include <linux/mdio.h>
21 #include <linux/phy.h>
22 #include <net/ip6_checksum.h>
23 #include <net/vxlan.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
26 #include <linux/irq.h>
27 #include <linux/irqchip/chained_irq.h>
28 #include <linux/microchipphy.h>
29 #include <linux/phy_fixed.h>
30 #include <linux/of_mdio.h>
31 #include <linux/of_net.h>
32 #include "lan78xx.h"
33 
34 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
35 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
36 #define DRIVER_NAME	"lan78xx"
37 
38 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
39 #define THROTTLE_JIFFIES		(HZ / 8)
40 #define UNLINK_TIMEOUT_MS		3
41 
42 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
43 
44 #define SS_USB_PKT_SIZE			(1024)
45 #define HS_USB_PKT_SIZE			(512)
46 #define FS_USB_PKT_SIZE			(64)
47 
48 #define MAX_RX_FIFO_SIZE		(12 * 1024)
49 #define MAX_TX_FIFO_SIZE		(12 * 1024)
50 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
51 #define DEFAULT_BULK_IN_DELAY		(0x0800)
52 #define MAX_SINGLE_PACKET_SIZE		(9000)
53 #define DEFAULT_TX_CSUM_ENABLE		(true)
54 #define DEFAULT_RX_CSUM_ENABLE		(true)
55 #define DEFAULT_TSO_CSUM_ENABLE		(true)
56 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
57 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
58 #define TX_OVERHEAD			(8)
59 #define RXW_PADDING			2
60 
61 #define LAN78XX_USB_VENDOR_ID		(0x0424)
62 #define LAN7800_USB_PRODUCT_ID		(0x7800)
63 #define LAN7850_USB_PRODUCT_ID		(0x7850)
64 #define LAN7801_USB_PRODUCT_ID		(0x7801)
65 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
66 #define LAN78XX_OTP_MAGIC		(0x78F3)
67 #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
68 #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
69 
70 #define	MII_READ			1
71 #define	MII_WRITE			0
72 
73 #define EEPROM_INDICATOR		(0xA5)
74 #define EEPROM_MAC_OFFSET		(0x01)
75 #define MAX_EEPROM_SIZE			512
76 #define OTP_INDICATOR_1			(0xF3)
77 #define OTP_INDICATOR_2			(0xF7)
78 
79 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
80 					 WAKE_MCAST | WAKE_BCAST | \
81 					 WAKE_ARP | WAKE_MAGIC)
82 
83 /* USB related defines */
84 #define BULK_IN_PIPE			1
85 #define BULK_OUT_PIPE			2
86 
87 /* default autosuspend delay (mSec)*/
88 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
89 
90 /* statistic update interval (mSec) */
91 #define STAT_UPDATE_TIMER		(1 * 1000)
92 
93 /* defines interrupts from interrupt EP */
94 #define MAX_INT_EP			(32)
95 #define INT_EP_INTEP			(31)
96 #define INT_EP_OTP_WR_DONE		(28)
97 #define INT_EP_EEE_TX_LPI_START		(26)
98 #define INT_EP_EEE_TX_LPI_STOP		(25)
99 #define INT_EP_EEE_RX_LPI		(24)
100 #define INT_EP_MAC_RESET_TIMEOUT	(23)
101 #define INT_EP_RDFO			(22)
102 #define INT_EP_TXE			(21)
103 #define INT_EP_USB_STATUS		(20)
104 #define INT_EP_TX_DIS			(19)
105 #define INT_EP_RX_DIS			(18)
106 #define INT_EP_PHY			(17)
107 #define INT_EP_DP			(16)
108 #define INT_EP_MAC_ERR			(15)
109 #define INT_EP_TDFU			(14)
110 #define INT_EP_TDFO			(13)
111 #define INT_EP_UTX			(12)
112 #define INT_EP_GPIO_11			(11)
113 #define INT_EP_GPIO_10			(10)
114 #define INT_EP_GPIO_9			(9)
115 #define INT_EP_GPIO_8			(8)
116 #define INT_EP_GPIO_7			(7)
117 #define INT_EP_GPIO_6			(6)
118 #define INT_EP_GPIO_5			(5)
119 #define INT_EP_GPIO_4			(4)
120 #define INT_EP_GPIO_3			(3)
121 #define INT_EP_GPIO_2			(2)
122 #define INT_EP_GPIO_1			(1)
123 #define INT_EP_GPIO_0			(0)
124 
125 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
126 	"RX FCS Errors",
127 	"RX Alignment Errors",
128 	"Rx Fragment Errors",
129 	"RX Jabber Errors",
130 	"RX Undersize Frame Errors",
131 	"RX Oversize Frame Errors",
132 	"RX Dropped Frames",
133 	"RX Unicast Byte Count",
134 	"RX Broadcast Byte Count",
135 	"RX Multicast Byte Count",
136 	"RX Unicast Frames",
137 	"RX Broadcast Frames",
138 	"RX Multicast Frames",
139 	"RX Pause Frames",
140 	"RX 64 Byte Frames",
141 	"RX 65 - 127 Byte Frames",
142 	"RX 128 - 255 Byte Frames",
143 	"RX 256 - 511 Bytes Frames",
144 	"RX 512 - 1023 Byte Frames",
145 	"RX 1024 - 1518 Byte Frames",
146 	"RX Greater 1518 Byte Frames",
147 	"EEE RX LPI Transitions",
148 	"EEE RX LPI Time",
149 	"TX FCS Errors",
150 	"TX Excess Deferral Errors",
151 	"TX Carrier Errors",
152 	"TX Bad Byte Count",
153 	"TX Single Collisions",
154 	"TX Multiple Collisions",
155 	"TX Excessive Collision",
156 	"TX Late Collisions",
157 	"TX Unicast Byte Count",
158 	"TX Broadcast Byte Count",
159 	"TX Multicast Byte Count",
160 	"TX Unicast Frames",
161 	"TX Broadcast Frames",
162 	"TX Multicast Frames",
163 	"TX Pause Frames",
164 	"TX 64 Byte Frames",
165 	"TX 65 - 127 Byte Frames",
166 	"TX 128 - 255 Byte Frames",
167 	"TX 256 - 511 Bytes Frames",
168 	"TX 512 - 1023 Byte Frames",
169 	"TX 1024 - 1518 Byte Frames",
170 	"TX Greater 1518 Byte Frames",
171 	"EEE TX LPI Transitions",
172 	"EEE TX LPI Time",
173 };
174 
175 struct lan78xx_statstage {
176 	u32 rx_fcs_errors;
177 	u32 rx_alignment_errors;
178 	u32 rx_fragment_errors;
179 	u32 rx_jabber_errors;
180 	u32 rx_undersize_frame_errors;
181 	u32 rx_oversize_frame_errors;
182 	u32 rx_dropped_frames;
183 	u32 rx_unicast_byte_count;
184 	u32 rx_broadcast_byte_count;
185 	u32 rx_multicast_byte_count;
186 	u32 rx_unicast_frames;
187 	u32 rx_broadcast_frames;
188 	u32 rx_multicast_frames;
189 	u32 rx_pause_frames;
190 	u32 rx_64_byte_frames;
191 	u32 rx_65_127_byte_frames;
192 	u32 rx_128_255_byte_frames;
193 	u32 rx_256_511_bytes_frames;
194 	u32 rx_512_1023_byte_frames;
195 	u32 rx_1024_1518_byte_frames;
196 	u32 rx_greater_1518_byte_frames;
197 	u32 eee_rx_lpi_transitions;
198 	u32 eee_rx_lpi_time;
199 	u32 tx_fcs_errors;
200 	u32 tx_excess_deferral_errors;
201 	u32 tx_carrier_errors;
202 	u32 tx_bad_byte_count;
203 	u32 tx_single_collisions;
204 	u32 tx_multiple_collisions;
205 	u32 tx_excessive_collision;
206 	u32 tx_late_collisions;
207 	u32 tx_unicast_byte_count;
208 	u32 tx_broadcast_byte_count;
209 	u32 tx_multicast_byte_count;
210 	u32 tx_unicast_frames;
211 	u32 tx_broadcast_frames;
212 	u32 tx_multicast_frames;
213 	u32 tx_pause_frames;
214 	u32 tx_64_byte_frames;
215 	u32 tx_65_127_byte_frames;
216 	u32 tx_128_255_byte_frames;
217 	u32 tx_256_511_bytes_frames;
218 	u32 tx_512_1023_byte_frames;
219 	u32 tx_1024_1518_byte_frames;
220 	u32 tx_greater_1518_byte_frames;
221 	u32 eee_tx_lpi_transitions;
222 	u32 eee_tx_lpi_time;
223 };
224 
225 struct lan78xx_statstage64 {
226 	u64 rx_fcs_errors;
227 	u64 rx_alignment_errors;
228 	u64 rx_fragment_errors;
229 	u64 rx_jabber_errors;
230 	u64 rx_undersize_frame_errors;
231 	u64 rx_oversize_frame_errors;
232 	u64 rx_dropped_frames;
233 	u64 rx_unicast_byte_count;
234 	u64 rx_broadcast_byte_count;
235 	u64 rx_multicast_byte_count;
236 	u64 rx_unicast_frames;
237 	u64 rx_broadcast_frames;
238 	u64 rx_multicast_frames;
239 	u64 rx_pause_frames;
240 	u64 rx_64_byte_frames;
241 	u64 rx_65_127_byte_frames;
242 	u64 rx_128_255_byte_frames;
243 	u64 rx_256_511_bytes_frames;
244 	u64 rx_512_1023_byte_frames;
245 	u64 rx_1024_1518_byte_frames;
246 	u64 rx_greater_1518_byte_frames;
247 	u64 eee_rx_lpi_transitions;
248 	u64 eee_rx_lpi_time;
249 	u64 tx_fcs_errors;
250 	u64 tx_excess_deferral_errors;
251 	u64 tx_carrier_errors;
252 	u64 tx_bad_byte_count;
253 	u64 tx_single_collisions;
254 	u64 tx_multiple_collisions;
255 	u64 tx_excessive_collision;
256 	u64 tx_late_collisions;
257 	u64 tx_unicast_byte_count;
258 	u64 tx_broadcast_byte_count;
259 	u64 tx_multicast_byte_count;
260 	u64 tx_unicast_frames;
261 	u64 tx_broadcast_frames;
262 	u64 tx_multicast_frames;
263 	u64 tx_pause_frames;
264 	u64 tx_64_byte_frames;
265 	u64 tx_65_127_byte_frames;
266 	u64 tx_128_255_byte_frames;
267 	u64 tx_256_511_bytes_frames;
268 	u64 tx_512_1023_byte_frames;
269 	u64 tx_1024_1518_byte_frames;
270 	u64 tx_greater_1518_byte_frames;
271 	u64 eee_tx_lpi_transitions;
272 	u64 eee_tx_lpi_time;
273 };
274 
275 static u32 lan78xx_regs[] = {
276 	ID_REV,
277 	INT_STS,
278 	HW_CFG,
279 	PMT_CTL,
280 	E2P_CMD,
281 	E2P_DATA,
282 	USB_STATUS,
283 	VLAN_TYPE,
284 	MAC_CR,
285 	MAC_RX,
286 	MAC_TX,
287 	FLOW,
288 	ERR_STS,
289 	MII_ACC,
290 	MII_DATA,
291 	EEE_TX_LPI_REQ_DLY,
292 	EEE_TW_TX_SYS,
293 	EEE_TX_LPI_REM_DLY,
294 	WUCSR
295 };
296 
297 #define PHY_REG_SIZE (32 * sizeof(u32))
298 
299 struct lan78xx_net;
300 
301 struct lan78xx_priv {
302 	struct lan78xx_net *dev;
303 	u32 rfe_ctl;
304 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
305 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
306 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
307 	struct mutex dataport_mutex; /* for dataport access */
308 	spinlock_t rfe_ctl_lock; /* for rfe register access */
309 	struct work_struct set_multicast;
310 	struct work_struct set_vlan;
311 	u32 wol;
312 };
313 
314 enum skb_state {
315 	illegal = 0,
316 	tx_start,
317 	tx_done,
318 	rx_start,
319 	rx_done,
320 	rx_cleanup,
321 	unlink_start
322 };
323 
324 struct skb_data {		/* skb->cb is one of these */
325 	struct urb *urb;
326 	struct lan78xx_net *dev;
327 	enum skb_state state;
328 	size_t length;
329 	int num_of_packet;
330 };
331 
332 struct usb_context {
333 	struct usb_ctrlrequest req;
334 	struct lan78xx_net *dev;
335 };
336 
337 #define EVENT_TX_HALT			0
338 #define EVENT_RX_HALT			1
339 #define EVENT_RX_MEMORY			2
340 #define EVENT_STS_SPLIT			3
341 #define EVENT_LINK_RESET		4
342 #define EVENT_RX_PAUSED			5
343 #define EVENT_DEV_WAKING		6
344 #define EVENT_DEV_ASLEEP		7
345 #define EVENT_DEV_OPEN			8
346 #define EVENT_STAT_UPDATE		9
347 
348 struct statstage {
349 	struct mutex			access_lock;	/* for stats access */
350 	struct lan78xx_statstage	saved;
351 	struct lan78xx_statstage	rollover_count;
352 	struct lan78xx_statstage	rollover_max;
353 	struct lan78xx_statstage64	curr_stat;
354 };
355 
356 struct irq_domain_data {
357 	struct irq_domain	*irqdomain;
358 	unsigned int		phyirq;
359 	struct irq_chip		*irqchip;
360 	irq_flow_handler_t	irq_handler;
361 	u32			irqenable;
362 	struct mutex		irq_lock;		/* for irq bus access */
363 };
364 
365 struct lan78xx_net {
366 	struct net_device	*net;
367 	struct usb_device	*udev;
368 	struct usb_interface	*intf;
369 	void			*driver_priv;
370 
371 	int			rx_qlen;
372 	int			tx_qlen;
373 	struct sk_buff_head	rxq;
374 	struct sk_buff_head	txq;
375 	struct sk_buff_head	done;
376 	struct sk_buff_head	rxq_pause;
377 	struct sk_buff_head	txq_pend;
378 
379 	struct tasklet_struct	bh;
380 	struct delayed_work	wq;
381 
382 	int			msg_enable;
383 
384 	struct urb		*urb_intr;
385 	struct usb_anchor	deferred;
386 
387 	struct mutex		phy_mutex; /* for phy access */
388 	unsigned		pipe_in, pipe_out, pipe_intr;
389 
390 	u32			hard_mtu;	/* count any extra framing */
391 	size_t			rx_urb_size;	/* size for rx urbs */
392 
393 	unsigned long		flags;
394 
395 	wait_queue_head_t	*wait;
396 	unsigned char		suspend_count;
397 
398 	unsigned		maxpacket;
399 	struct timer_list	delay;
400 	struct timer_list	stat_monitor;
401 
402 	unsigned long		data[5];
403 
404 	int			link_on;
405 	u8			mdix_ctrl;
406 
407 	u32			chipid;
408 	u32			chiprev;
409 	struct mii_bus		*mdiobus;
410 	phy_interface_t		interface;
411 
412 	int			fc_autoneg;
413 	u8			fc_request_control;
414 
415 	int			delta;
416 	struct statstage	stats;
417 
418 	struct irq_domain_data	domain_data;
419 };
420 
421 /* define external phy id */
422 #define	PHY_LAN8835			(0x0007C130)
423 #define	PHY_KSZ9031RNX			(0x00221620)
424 
425 /* use ethtool to change the level for any given device */
426 static int msg_level = -1;
427 module_param(msg_level, int, 0);
428 MODULE_PARM_DESC(msg_level, "Override default message level");
429 
lan78xx_read_reg(struct lan78xx_net * dev,u32 index,u32 * data)430 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
431 {
432 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
433 	int ret;
434 
435 	if (!buf)
436 		return -ENOMEM;
437 
438 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
439 			      USB_VENDOR_REQUEST_READ_REGISTER,
440 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
441 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
442 	if (likely(ret >= 0)) {
443 		le32_to_cpus(buf);
444 		*data = *buf;
445 	} else {
446 		netdev_warn(dev->net,
447 			    "Failed to read register index 0x%08x. ret = %d",
448 			    index, ret);
449 	}
450 
451 	kfree(buf);
452 
453 	return ret;
454 }
455 
lan78xx_write_reg(struct lan78xx_net * dev,u32 index,u32 data)456 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
457 {
458 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
459 	int ret;
460 
461 	if (!buf)
462 		return -ENOMEM;
463 
464 	*buf = data;
465 	cpu_to_le32s(buf);
466 
467 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
468 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
469 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
470 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
471 	if (unlikely(ret < 0)) {
472 		netdev_warn(dev->net,
473 			    "Failed to write register index 0x%08x. ret = %d",
474 			    index, ret);
475 	}
476 
477 	kfree(buf);
478 
479 	return ret;
480 }
481 
lan78xx_read_stats(struct lan78xx_net * dev,struct lan78xx_statstage * data)482 static int lan78xx_read_stats(struct lan78xx_net *dev,
483 			      struct lan78xx_statstage *data)
484 {
485 	int ret = 0;
486 	int i;
487 	struct lan78xx_statstage *stats;
488 	u32 *src;
489 	u32 *dst;
490 
491 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
492 	if (!stats)
493 		return -ENOMEM;
494 
495 	ret = usb_control_msg(dev->udev,
496 			      usb_rcvctrlpipe(dev->udev, 0),
497 			      USB_VENDOR_REQUEST_GET_STATS,
498 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
499 			      0,
500 			      0,
501 			      (void *)stats,
502 			      sizeof(*stats),
503 			      USB_CTRL_SET_TIMEOUT);
504 	if (likely(ret >= 0)) {
505 		src = (u32 *)stats;
506 		dst = (u32 *)data;
507 		for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
508 			le32_to_cpus(&src[i]);
509 			dst[i] = src[i];
510 		}
511 	} else {
512 		netdev_warn(dev->net,
513 			    "Failed to read stat ret = %d", ret);
514 	}
515 
516 	kfree(stats);
517 
518 	return ret;
519 }
520 
521 #define check_counter_rollover(struct1, dev_stats, member) {	\
522 	if (struct1->member < dev_stats.saved.member)		\
523 		dev_stats.rollover_count.member++;		\
524 	}
525 
lan78xx_check_stat_rollover(struct lan78xx_net * dev,struct lan78xx_statstage * stats)526 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
527 					struct lan78xx_statstage *stats)
528 {
529 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
530 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
531 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
532 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
533 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
534 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
535 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
536 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
537 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
538 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
539 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
540 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
541 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
542 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
543 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
544 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
545 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
546 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
547 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
548 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
549 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
550 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
551 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
552 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
553 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
554 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
555 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
556 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
557 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
558 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
559 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
560 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
561 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
562 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
563 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
564 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
565 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
566 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
567 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
568 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
569 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
570 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
571 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
572 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
573 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
574 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
575 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
576 
577 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
578 }
579 
lan78xx_update_stats(struct lan78xx_net * dev)580 static void lan78xx_update_stats(struct lan78xx_net *dev)
581 {
582 	u32 *p, *count, *max;
583 	u64 *data;
584 	int i;
585 	struct lan78xx_statstage lan78xx_stats;
586 
587 	if (usb_autopm_get_interface(dev->intf) < 0)
588 		return;
589 
590 	p = (u32 *)&lan78xx_stats;
591 	count = (u32 *)&dev->stats.rollover_count;
592 	max = (u32 *)&dev->stats.rollover_max;
593 	data = (u64 *)&dev->stats.curr_stat;
594 
595 	mutex_lock(&dev->stats.access_lock);
596 
597 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
598 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
599 
600 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
601 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
602 
603 	mutex_unlock(&dev->stats.access_lock);
604 
605 	usb_autopm_put_interface(dev->intf);
606 }
607 
608 /* Loop until the read is completed with timeout called with phy_mutex held */
lan78xx_phy_wait_not_busy(struct lan78xx_net * dev)609 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
610 {
611 	unsigned long start_time = jiffies;
612 	u32 val;
613 	int ret;
614 
615 	do {
616 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
617 		if (unlikely(ret < 0))
618 			return -EIO;
619 
620 		if (!(val & MII_ACC_MII_BUSY_))
621 			return 0;
622 	} while (!time_after(jiffies, start_time + HZ));
623 
624 	return -EIO;
625 }
626 
mii_access(int id,int index,int read)627 static inline u32 mii_access(int id, int index, int read)
628 {
629 	u32 ret;
630 
631 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
632 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
633 	if (read)
634 		ret |= MII_ACC_MII_READ_;
635 	else
636 		ret |= MII_ACC_MII_WRITE_;
637 	ret |= MII_ACC_MII_BUSY_;
638 
639 	return ret;
640 }
641 
lan78xx_wait_eeprom(struct lan78xx_net * dev)642 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
643 {
644 	unsigned long start_time = jiffies;
645 	u32 val;
646 	int ret;
647 
648 	do {
649 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
650 		if (unlikely(ret < 0))
651 			return -EIO;
652 
653 		if (!(val & E2P_CMD_EPC_BUSY_) ||
654 		    (val & E2P_CMD_EPC_TIMEOUT_))
655 			break;
656 		usleep_range(40, 100);
657 	} while (!time_after(jiffies, start_time + HZ));
658 
659 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
660 		netdev_warn(dev->net, "EEPROM read operation timeout");
661 		return -EIO;
662 	}
663 
664 	return 0;
665 }
666 
lan78xx_eeprom_confirm_not_busy(struct lan78xx_net * dev)667 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
668 {
669 	unsigned long start_time = jiffies;
670 	u32 val;
671 	int ret;
672 
673 	do {
674 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
675 		if (unlikely(ret < 0))
676 			return -EIO;
677 
678 		if (!(val & E2P_CMD_EPC_BUSY_))
679 			return 0;
680 
681 		usleep_range(40, 100);
682 	} while (!time_after(jiffies, start_time + HZ));
683 
684 	netdev_warn(dev->net, "EEPROM is busy");
685 	return -EIO;
686 }
687 
lan78xx_read_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)688 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
689 				   u32 length, u8 *data)
690 {
691 	u32 val;
692 	u32 saved;
693 	int i, ret;
694 	int retval;
695 
696 	/* depends on chip, some EEPROM pins are muxed with LED function.
697 	 * disable & restore LED function to access EEPROM.
698 	 */
699 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
700 	saved = val;
701 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
702 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
703 		ret = lan78xx_write_reg(dev, HW_CFG, val);
704 	}
705 
706 	retval = lan78xx_eeprom_confirm_not_busy(dev);
707 	if (retval)
708 		return retval;
709 
710 	for (i = 0; i < length; i++) {
711 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
712 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
713 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
714 		if (unlikely(ret < 0)) {
715 			retval = -EIO;
716 			goto exit;
717 		}
718 
719 		retval = lan78xx_wait_eeprom(dev);
720 		if (retval < 0)
721 			goto exit;
722 
723 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
724 		if (unlikely(ret < 0)) {
725 			retval = -EIO;
726 			goto exit;
727 		}
728 
729 		data[i] = val & 0xFF;
730 		offset++;
731 	}
732 
733 	retval = 0;
734 exit:
735 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
736 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
737 
738 	return retval;
739 }
740 
lan78xx_read_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)741 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
742 			       u32 length, u8 *data)
743 {
744 	u8 sig;
745 	int ret;
746 
747 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
748 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
749 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
750 	else
751 		ret = -EINVAL;
752 
753 	return ret;
754 }
755 
lan78xx_write_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)756 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
757 				    u32 length, u8 *data)
758 {
759 	u32 val;
760 	u32 saved;
761 	int i, ret;
762 	int retval;
763 
764 	/* depends on chip, some EEPROM pins are muxed with LED function.
765 	 * disable & restore LED function to access EEPROM.
766 	 */
767 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
768 	saved = val;
769 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
770 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
771 		ret = lan78xx_write_reg(dev, HW_CFG, val);
772 	}
773 
774 	retval = lan78xx_eeprom_confirm_not_busy(dev);
775 	if (retval)
776 		goto exit;
777 
778 	/* Issue write/erase enable command */
779 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
780 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
781 	if (unlikely(ret < 0)) {
782 		retval = -EIO;
783 		goto exit;
784 	}
785 
786 	retval = lan78xx_wait_eeprom(dev);
787 	if (retval < 0)
788 		goto exit;
789 
790 	for (i = 0; i < length; i++) {
791 		/* Fill data register */
792 		val = data[i];
793 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
794 		if (ret < 0) {
795 			retval = -EIO;
796 			goto exit;
797 		}
798 
799 		/* Send "write" command */
800 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
801 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
802 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
803 		if (ret < 0) {
804 			retval = -EIO;
805 			goto exit;
806 		}
807 
808 		retval = lan78xx_wait_eeprom(dev);
809 		if (retval < 0)
810 			goto exit;
811 
812 		offset++;
813 	}
814 
815 	retval = 0;
816 exit:
817 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
818 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
819 
820 	return retval;
821 }
822 
lan78xx_read_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)823 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
824 				u32 length, u8 *data)
825 {
826 	int i;
827 	int ret;
828 	u32 buf;
829 	unsigned long timeout;
830 
831 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
832 
833 	if (buf & OTP_PWR_DN_PWRDN_N_) {
834 		/* clear it and wait to be cleared */
835 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
836 
837 		timeout = jiffies + HZ;
838 		do {
839 			usleep_range(1, 10);
840 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
841 			if (time_after(jiffies, timeout)) {
842 				netdev_warn(dev->net,
843 					    "timeout on OTP_PWR_DN");
844 				return -EIO;
845 			}
846 		} while (buf & OTP_PWR_DN_PWRDN_N_);
847 	}
848 
849 	for (i = 0; i < length; i++) {
850 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
851 					((offset + i) >> 8) & OTP_ADDR1_15_11);
852 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
853 					((offset + i) & OTP_ADDR2_10_3));
854 
855 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
856 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
857 
858 		timeout = jiffies + HZ;
859 		do {
860 			udelay(1);
861 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
862 			if (time_after(jiffies, timeout)) {
863 				netdev_warn(dev->net,
864 					    "timeout on OTP_STATUS");
865 				return -EIO;
866 			}
867 		} while (buf & OTP_STATUS_BUSY_);
868 
869 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
870 
871 		data[i] = (u8)(buf & 0xFF);
872 	}
873 
874 	return 0;
875 }
876 
lan78xx_write_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)877 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
878 				 u32 length, u8 *data)
879 {
880 	int i;
881 	int ret;
882 	u32 buf;
883 	unsigned long timeout;
884 
885 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
886 
887 	if (buf & OTP_PWR_DN_PWRDN_N_) {
888 		/* clear it and wait to be cleared */
889 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
890 
891 		timeout = jiffies + HZ;
892 		do {
893 			udelay(1);
894 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
895 			if (time_after(jiffies, timeout)) {
896 				netdev_warn(dev->net,
897 					    "timeout on OTP_PWR_DN completion");
898 				return -EIO;
899 			}
900 		} while (buf & OTP_PWR_DN_PWRDN_N_);
901 	}
902 
903 	/* set to BYTE program mode */
904 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
905 
906 	for (i = 0; i < length; i++) {
907 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
908 					((offset + i) >> 8) & OTP_ADDR1_15_11);
909 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
910 					((offset + i) & OTP_ADDR2_10_3));
911 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
912 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
913 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
914 
915 		timeout = jiffies + HZ;
916 		do {
917 			udelay(1);
918 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
919 			if (time_after(jiffies, timeout)) {
920 				netdev_warn(dev->net,
921 					    "Timeout on OTP_STATUS completion");
922 				return -EIO;
923 			}
924 		} while (buf & OTP_STATUS_BUSY_);
925 	}
926 
927 	return 0;
928 }
929 
lan78xx_read_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)930 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
931 			    u32 length, u8 *data)
932 {
933 	u8 sig;
934 	int ret;
935 
936 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
937 
938 	if (ret == 0) {
939 		if (sig == OTP_INDICATOR_2)
940 			offset += 0x100;
941 		else if (sig != OTP_INDICATOR_1)
942 			ret = -EINVAL;
943 		if (!ret)
944 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
945 	}
946 
947 	return ret;
948 }
949 
lan78xx_dataport_wait_not_busy(struct lan78xx_net * dev)950 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
951 {
952 	int i, ret;
953 
954 	for (i = 0; i < 100; i++) {
955 		u32 dp_sel;
956 
957 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
958 		if (unlikely(ret < 0))
959 			return -EIO;
960 
961 		if (dp_sel & DP_SEL_DPRDY_)
962 			return 0;
963 
964 		usleep_range(40, 100);
965 	}
966 
967 	netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
968 
969 	return -EIO;
970 }
971 
lan78xx_dataport_write(struct lan78xx_net * dev,u32 ram_select,u32 addr,u32 length,u32 * buf)972 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
973 				  u32 addr, u32 length, u32 *buf)
974 {
975 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
976 	u32 dp_sel;
977 	int i, ret;
978 
979 	if (usb_autopm_get_interface(dev->intf) < 0)
980 			return 0;
981 
982 	mutex_lock(&pdata->dataport_mutex);
983 
984 	ret = lan78xx_dataport_wait_not_busy(dev);
985 	if (ret < 0)
986 		goto done;
987 
988 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
989 
990 	dp_sel &= ~DP_SEL_RSEL_MASK_;
991 	dp_sel |= ram_select;
992 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
993 
994 	for (i = 0; i < length; i++) {
995 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
996 
997 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
998 
999 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1000 
1001 		ret = lan78xx_dataport_wait_not_busy(dev);
1002 		if (ret < 0)
1003 			goto done;
1004 	}
1005 
1006 done:
1007 	mutex_unlock(&pdata->dataport_mutex);
1008 	usb_autopm_put_interface(dev->intf);
1009 
1010 	return ret;
1011 }
1012 
lan78xx_set_addr_filter(struct lan78xx_priv * pdata,int index,u8 addr[ETH_ALEN])1013 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1014 				    int index, u8 addr[ETH_ALEN])
1015 {
1016 	u32 temp;
1017 
1018 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1019 		temp = addr[3];
1020 		temp = addr[2] | (temp << 8);
1021 		temp = addr[1] | (temp << 8);
1022 		temp = addr[0] | (temp << 8);
1023 		pdata->pfilter_table[index][1] = temp;
1024 		temp = addr[5];
1025 		temp = addr[4] | (temp << 8);
1026 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1027 		pdata->pfilter_table[index][0] = temp;
1028 	}
1029 }
1030 
1031 /* returns hash bit number for given MAC address */
lan78xx_hash(char addr[ETH_ALEN])1032 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1033 {
1034 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1035 }
1036 
lan78xx_deferred_multicast_write(struct work_struct * param)1037 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1038 {
1039 	struct lan78xx_priv *pdata =
1040 			container_of(param, struct lan78xx_priv, set_multicast);
1041 	struct lan78xx_net *dev = pdata->dev;
1042 	int i;
1043 	int ret;
1044 
1045 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1046 		  pdata->rfe_ctl);
1047 
1048 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1049 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1050 
1051 	for (i = 1; i < NUM_OF_MAF; i++) {
1052 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1053 		ret = lan78xx_write_reg(dev, MAF_LO(i),
1054 					pdata->pfilter_table[i][1]);
1055 		ret = lan78xx_write_reg(dev, MAF_HI(i),
1056 					pdata->pfilter_table[i][0]);
1057 	}
1058 
1059 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1060 }
1061 
lan78xx_set_multicast(struct net_device * netdev)1062 static void lan78xx_set_multicast(struct net_device *netdev)
1063 {
1064 	struct lan78xx_net *dev = netdev_priv(netdev);
1065 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1066 	unsigned long flags;
1067 	int i;
1068 
1069 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1070 
1071 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1072 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1073 
1074 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1075 			pdata->mchash_table[i] = 0;
1076 	/* pfilter_table[0] has own HW address */
1077 	for (i = 1; i < NUM_OF_MAF; i++) {
1078 			pdata->pfilter_table[i][0] =
1079 			pdata->pfilter_table[i][1] = 0;
1080 	}
1081 
1082 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1083 
1084 	if (dev->net->flags & IFF_PROMISC) {
1085 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1086 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1087 	} else {
1088 		if (dev->net->flags & IFF_ALLMULTI) {
1089 			netif_dbg(dev, drv, dev->net,
1090 				  "receive all multicast enabled");
1091 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1092 		}
1093 	}
1094 
1095 	if (netdev_mc_count(dev->net)) {
1096 		struct netdev_hw_addr *ha;
1097 		int i;
1098 
1099 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1100 
1101 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1102 
1103 		i = 1;
1104 		netdev_for_each_mc_addr(ha, netdev) {
1105 			/* set first 32 into Perfect Filter */
1106 			if (i < 33) {
1107 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1108 			} else {
1109 				u32 bitnum = lan78xx_hash(ha->addr);
1110 
1111 				pdata->mchash_table[bitnum / 32] |=
1112 							(1 << (bitnum % 32));
1113 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1114 			}
1115 			i++;
1116 		}
1117 	}
1118 
1119 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1120 
1121 	/* defer register writes to a sleepable context */
1122 	schedule_work(&pdata->set_multicast);
1123 }
1124 
lan78xx_update_flowcontrol(struct lan78xx_net * dev,u8 duplex,u16 lcladv,u16 rmtadv)1125 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1126 				      u16 lcladv, u16 rmtadv)
1127 {
1128 	u32 flow = 0, fct_flow = 0;
1129 	int ret;
1130 	u8 cap;
1131 
1132 	if (dev->fc_autoneg)
1133 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1134 	else
1135 		cap = dev->fc_request_control;
1136 
1137 	if (cap & FLOW_CTRL_TX)
1138 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1139 
1140 	if (cap & FLOW_CTRL_RX)
1141 		flow |= FLOW_CR_RX_FCEN_;
1142 
1143 	if (dev->udev->speed == USB_SPEED_SUPER)
1144 		fct_flow = 0x817;
1145 	else if (dev->udev->speed == USB_SPEED_HIGH)
1146 		fct_flow = 0x211;
1147 
1148 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1149 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1150 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1151 
1152 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1153 
1154 	/* threshold value should be set before enabling flow */
1155 	ret = lan78xx_write_reg(dev, FLOW, flow);
1156 
1157 	return 0;
1158 }
1159 
lan78xx_link_reset(struct lan78xx_net * dev)1160 static int lan78xx_link_reset(struct lan78xx_net *dev)
1161 {
1162 	struct phy_device *phydev = dev->net->phydev;
1163 	struct ethtool_link_ksettings ecmd;
1164 	int ladv, radv, ret, link;
1165 	u32 buf;
1166 
1167 	/* clear LAN78xx interrupt status */
1168 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1169 	if (unlikely(ret < 0))
1170 		return -EIO;
1171 
1172 	mutex_lock(&phydev->lock);
1173 	phy_read_status(phydev);
1174 	link = phydev->link;
1175 	mutex_unlock(&phydev->lock);
1176 
1177 	if (!link && dev->link_on) {
1178 		dev->link_on = false;
1179 
1180 		/* reset MAC */
1181 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1182 		if (unlikely(ret < 0))
1183 			return -EIO;
1184 		buf |= MAC_CR_RST_;
1185 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1186 		if (unlikely(ret < 0))
1187 			return -EIO;
1188 
1189 		del_timer(&dev->stat_monitor);
1190 	} else if (link && !dev->link_on) {
1191 		dev->link_on = true;
1192 
1193 		phy_ethtool_ksettings_get(phydev, &ecmd);
1194 
1195 		if (dev->udev->speed == USB_SPEED_SUPER) {
1196 			if (ecmd.base.speed == 1000) {
1197 				/* disable U2 */
1198 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1199 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1200 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1201 				/* enable U1 */
1202 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1203 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1204 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1205 			} else {
1206 				/* enable U1 & U2 */
1207 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1208 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1209 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1210 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1211 			}
1212 		}
1213 
1214 		ladv = phy_read(phydev, MII_ADVERTISE);
1215 		if (ladv < 0)
1216 			return ladv;
1217 
1218 		radv = phy_read(phydev, MII_LPA);
1219 		if (radv < 0)
1220 			return radv;
1221 
1222 		netif_dbg(dev, link, dev->net,
1223 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1224 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1225 
1226 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1227 						 radv);
1228 
1229 		if (!timer_pending(&dev->stat_monitor)) {
1230 			dev->delta = 1;
1231 			mod_timer(&dev->stat_monitor,
1232 				  jiffies + STAT_UPDATE_TIMER);
1233 		}
1234 
1235 		tasklet_schedule(&dev->bh);
1236 	}
1237 
1238 	return ret;
1239 }
1240 
1241 /* some work can't be done in tasklets, so we use keventd
1242  *
1243  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1244  * but tasklet_schedule() doesn't.	hope the failure is rare.
1245  */
lan78xx_defer_kevent(struct lan78xx_net * dev,int work)1246 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1247 {
1248 	set_bit(work, &dev->flags);
1249 	if (!schedule_delayed_work(&dev->wq, 0))
1250 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1251 }
1252 
lan78xx_status(struct lan78xx_net * dev,struct urb * urb)1253 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1254 {
1255 	u32 intdata;
1256 
1257 	if (urb->actual_length != 4) {
1258 		netdev_warn(dev->net,
1259 			    "unexpected urb length %d", urb->actual_length);
1260 		return;
1261 	}
1262 
1263 	intdata = get_unaligned_le32(urb->transfer_buffer);
1264 
1265 	if (intdata & INT_ENP_PHY_INT) {
1266 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1267 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1268 
1269 		if (dev->domain_data.phyirq > 0) {
1270 			local_irq_disable();
1271 			generic_handle_irq(dev->domain_data.phyirq);
1272 			local_irq_enable();
1273 		}
1274 	} else
1275 		netdev_warn(dev->net,
1276 			    "unexpected interrupt: 0x%08x\n", intdata);
1277 }
1278 
lan78xx_ethtool_get_eeprom_len(struct net_device * netdev)1279 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1280 {
1281 	return MAX_EEPROM_SIZE;
1282 }
1283 
lan78xx_ethtool_get_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1284 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1285 				      struct ethtool_eeprom *ee, u8 *data)
1286 {
1287 	struct lan78xx_net *dev = netdev_priv(netdev);
1288 	int ret;
1289 
1290 	ret = usb_autopm_get_interface(dev->intf);
1291 	if (ret)
1292 		return ret;
1293 
1294 	ee->magic = LAN78XX_EEPROM_MAGIC;
1295 
1296 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1297 
1298 	usb_autopm_put_interface(dev->intf);
1299 
1300 	return ret;
1301 }
1302 
lan78xx_ethtool_set_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1303 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1304 				      struct ethtool_eeprom *ee, u8 *data)
1305 {
1306 	struct lan78xx_net *dev = netdev_priv(netdev);
1307 	int ret;
1308 
1309 	ret = usb_autopm_get_interface(dev->intf);
1310 	if (ret)
1311 		return ret;
1312 
1313 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1314 	 * to load data from EEPROM
1315 	 */
1316 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1317 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1318 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1319 		 (ee->offset == 0) &&
1320 		 (ee->len == 512) &&
1321 		 (data[0] == OTP_INDICATOR_1))
1322 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1323 
1324 	usb_autopm_put_interface(dev->intf);
1325 
1326 	return ret;
1327 }
1328 
lan78xx_get_strings(struct net_device * netdev,u32 stringset,u8 * data)1329 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1330 				u8 *data)
1331 {
1332 	if (stringset == ETH_SS_STATS)
1333 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1334 }
1335 
lan78xx_get_sset_count(struct net_device * netdev,int sset)1336 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1337 {
1338 	if (sset == ETH_SS_STATS)
1339 		return ARRAY_SIZE(lan78xx_gstrings);
1340 	else
1341 		return -EOPNOTSUPP;
1342 }
1343 
lan78xx_get_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)1344 static void lan78xx_get_stats(struct net_device *netdev,
1345 			      struct ethtool_stats *stats, u64 *data)
1346 {
1347 	struct lan78xx_net *dev = netdev_priv(netdev);
1348 
1349 	lan78xx_update_stats(dev);
1350 
1351 	mutex_lock(&dev->stats.access_lock);
1352 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1353 	mutex_unlock(&dev->stats.access_lock);
1354 }
1355 
lan78xx_get_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1356 static void lan78xx_get_wol(struct net_device *netdev,
1357 			    struct ethtool_wolinfo *wol)
1358 {
1359 	struct lan78xx_net *dev = netdev_priv(netdev);
1360 	int ret;
1361 	u32 buf;
1362 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1363 
1364 	if (usb_autopm_get_interface(dev->intf) < 0)
1365 			return;
1366 
1367 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1368 	if (unlikely(ret < 0)) {
1369 		wol->supported = 0;
1370 		wol->wolopts = 0;
1371 	} else {
1372 		if (buf & USB_CFG_RMT_WKP_) {
1373 			wol->supported = WAKE_ALL;
1374 			wol->wolopts = pdata->wol;
1375 		} else {
1376 			wol->supported = 0;
1377 			wol->wolopts = 0;
1378 		}
1379 	}
1380 
1381 	usb_autopm_put_interface(dev->intf);
1382 }
1383 
lan78xx_set_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1384 static int lan78xx_set_wol(struct net_device *netdev,
1385 			   struct ethtool_wolinfo *wol)
1386 {
1387 	struct lan78xx_net *dev = netdev_priv(netdev);
1388 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1389 	int ret;
1390 
1391 	ret = usb_autopm_get_interface(dev->intf);
1392 	if (ret < 0)
1393 		return ret;
1394 
1395 	if (wol->wolopts & ~WAKE_ALL)
1396 		return -EINVAL;
1397 
1398 	pdata->wol = wol->wolopts;
1399 
1400 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1401 
1402 	phy_ethtool_set_wol(netdev->phydev, wol);
1403 
1404 	usb_autopm_put_interface(dev->intf);
1405 
1406 	return ret;
1407 }
1408 
lan78xx_get_eee(struct net_device * net,struct ethtool_eee * edata)1409 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1410 {
1411 	struct lan78xx_net *dev = netdev_priv(net);
1412 	struct phy_device *phydev = net->phydev;
1413 	int ret;
1414 	u32 buf;
1415 
1416 	ret = usb_autopm_get_interface(dev->intf);
1417 	if (ret < 0)
1418 		return ret;
1419 
1420 	ret = phy_ethtool_get_eee(phydev, edata);
1421 	if (ret < 0)
1422 		goto exit;
1423 
1424 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1425 	if (buf & MAC_CR_EEE_EN_) {
1426 		edata->eee_enabled = true;
1427 		edata->eee_active = !!(edata->advertised &
1428 				       edata->lp_advertised);
1429 		edata->tx_lpi_enabled = true;
1430 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1431 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1432 		edata->tx_lpi_timer = buf;
1433 	} else {
1434 		edata->eee_enabled = false;
1435 		edata->eee_active = false;
1436 		edata->tx_lpi_enabled = false;
1437 		edata->tx_lpi_timer = 0;
1438 	}
1439 
1440 	ret = 0;
1441 exit:
1442 	usb_autopm_put_interface(dev->intf);
1443 
1444 	return ret;
1445 }
1446 
lan78xx_set_eee(struct net_device * net,struct ethtool_eee * edata)1447 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1448 {
1449 	struct lan78xx_net *dev = netdev_priv(net);
1450 	int ret;
1451 	u32 buf;
1452 
1453 	ret = usb_autopm_get_interface(dev->intf);
1454 	if (ret < 0)
1455 		return ret;
1456 
1457 	if (edata->eee_enabled) {
1458 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1459 		buf |= MAC_CR_EEE_EN_;
1460 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1461 
1462 		phy_ethtool_set_eee(net->phydev, edata);
1463 
1464 		buf = (u32)edata->tx_lpi_timer;
1465 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1466 	} else {
1467 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1468 		buf &= ~MAC_CR_EEE_EN_;
1469 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1470 	}
1471 
1472 	usb_autopm_put_interface(dev->intf);
1473 
1474 	return 0;
1475 }
1476 
lan78xx_get_link(struct net_device * net)1477 static u32 lan78xx_get_link(struct net_device *net)
1478 {
1479 	u32 link;
1480 
1481 	mutex_lock(&net->phydev->lock);
1482 	phy_read_status(net->phydev);
1483 	link = net->phydev->link;
1484 	mutex_unlock(&net->phydev->lock);
1485 
1486 	return link;
1487 }
1488 
lan78xx_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * info)1489 static void lan78xx_get_drvinfo(struct net_device *net,
1490 				struct ethtool_drvinfo *info)
1491 {
1492 	struct lan78xx_net *dev = netdev_priv(net);
1493 
1494 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1495 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1496 }
1497 
lan78xx_get_msglevel(struct net_device * net)1498 static u32 lan78xx_get_msglevel(struct net_device *net)
1499 {
1500 	struct lan78xx_net *dev = netdev_priv(net);
1501 
1502 	return dev->msg_enable;
1503 }
1504 
lan78xx_set_msglevel(struct net_device * net,u32 level)1505 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1506 {
1507 	struct lan78xx_net *dev = netdev_priv(net);
1508 
1509 	dev->msg_enable = level;
1510 }
1511 
lan78xx_get_link_ksettings(struct net_device * net,struct ethtool_link_ksettings * cmd)1512 static int lan78xx_get_link_ksettings(struct net_device *net,
1513 				      struct ethtool_link_ksettings *cmd)
1514 {
1515 	struct lan78xx_net *dev = netdev_priv(net);
1516 	struct phy_device *phydev = net->phydev;
1517 	int ret;
1518 
1519 	ret = usb_autopm_get_interface(dev->intf);
1520 	if (ret < 0)
1521 		return ret;
1522 
1523 	phy_ethtool_ksettings_get(phydev, cmd);
1524 
1525 	usb_autopm_put_interface(dev->intf);
1526 
1527 	return ret;
1528 }
1529 
lan78xx_set_link_ksettings(struct net_device * net,const struct ethtool_link_ksettings * cmd)1530 static int lan78xx_set_link_ksettings(struct net_device *net,
1531 				      const struct ethtool_link_ksettings *cmd)
1532 {
1533 	struct lan78xx_net *dev = netdev_priv(net);
1534 	struct phy_device *phydev = net->phydev;
1535 	int ret = 0;
1536 	int temp;
1537 
1538 	ret = usb_autopm_get_interface(dev->intf);
1539 	if (ret < 0)
1540 		return ret;
1541 
1542 	/* change speed & duplex */
1543 	ret = phy_ethtool_ksettings_set(phydev, cmd);
1544 
1545 	if (!cmd->base.autoneg) {
1546 		/* force link down */
1547 		temp = phy_read(phydev, MII_BMCR);
1548 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1549 		mdelay(1);
1550 		phy_write(phydev, MII_BMCR, temp);
1551 	}
1552 
1553 	usb_autopm_put_interface(dev->intf);
1554 
1555 	return ret;
1556 }
1557 
lan78xx_get_pause(struct net_device * net,struct ethtool_pauseparam * pause)1558 static void lan78xx_get_pause(struct net_device *net,
1559 			      struct ethtool_pauseparam *pause)
1560 {
1561 	struct lan78xx_net *dev = netdev_priv(net);
1562 	struct phy_device *phydev = net->phydev;
1563 	struct ethtool_link_ksettings ecmd;
1564 
1565 	phy_ethtool_ksettings_get(phydev, &ecmd);
1566 
1567 	pause->autoneg = dev->fc_autoneg;
1568 
1569 	if (dev->fc_request_control & FLOW_CTRL_TX)
1570 		pause->tx_pause = 1;
1571 
1572 	if (dev->fc_request_control & FLOW_CTRL_RX)
1573 		pause->rx_pause = 1;
1574 }
1575 
lan78xx_set_pause(struct net_device * net,struct ethtool_pauseparam * pause)1576 static int lan78xx_set_pause(struct net_device *net,
1577 			     struct ethtool_pauseparam *pause)
1578 {
1579 	struct lan78xx_net *dev = netdev_priv(net);
1580 	struct phy_device *phydev = net->phydev;
1581 	struct ethtool_link_ksettings ecmd;
1582 	int ret;
1583 
1584 	phy_ethtool_ksettings_get(phydev, &ecmd);
1585 
1586 	if (pause->autoneg && !ecmd.base.autoneg) {
1587 		ret = -EINVAL;
1588 		goto exit;
1589 	}
1590 
1591 	dev->fc_request_control = 0;
1592 	if (pause->rx_pause)
1593 		dev->fc_request_control |= FLOW_CTRL_RX;
1594 
1595 	if (pause->tx_pause)
1596 		dev->fc_request_control |= FLOW_CTRL_TX;
1597 
1598 	if (ecmd.base.autoneg) {
1599 		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1600 		u32 mii_adv;
1601 
1602 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1603 				   ecmd.link_modes.advertising);
1604 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1605 				   ecmd.link_modes.advertising);
1606 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1607 		mii_adv_to_linkmode_adv_t(fc, mii_adv);
1608 		linkmode_or(ecmd.link_modes.advertising, fc,
1609 			    ecmd.link_modes.advertising);
1610 
1611 		phy_ethtool_ksettings_set(phydev, &ecmd);
1612 	}
1613 
1614 	dev->fc_autoneg = pause->autoneg;
1615 
1616 	ret = 0;
1617 exit:
1618 	return ret;
1619 }
1620 
lan78xx_get_regs_len(struct net_device * netdev)1621 static int lan78xx_get_regs_len(struct net_device *netdev)
1622 {
1623 	if (!netdev->phydev)
1624 		return (sizeof(lan78xx_regs));
1625 	else
1626 		return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1627 }
1628 
1629 static void
lan78xx_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * buf)1630 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1631 		 void *buf)
1632 {
1633 	u32 *data = buf;
1634 	int i, j;
1635 	struct lan78xx_net *dev = netdev_priv(netdev);
1636 
1637 	/* Read Device/MAC registers */
1638 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1639 		lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1640 
1641 	if (!netdev->phydev)
1642 		return;
1643 
1644 	/* Read PHY registers */
1645 	for (j = 0; j < 32; i++, j++)
1646 		data[i] = phy_read(netdev->phydev, j);
1647 }
1648 
1649 static const struct ethtool_ops lan78xx_ethtool_ops = {
1650 	.get_link	= lan78xx_get_link,
1651 	.nway_reset	= phy_ethtool_nway_reset,
1652 	.get_drvinfo	= lan78xx_get_drvinfo,
1653 	.get_msglevel	= lan78xx_get_msglevel,
1654 	.set_msglevel	= lan78xx_set_msglevel,
1655 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1656 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1657 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1658 	.get_ethtool_stats = lan78xx_get_stats,
1659 	.get_sset_count = lan78xx_get_sset_count,
1660 	.get_strings	= lan78xx_get_strings,
1661 	.get_wol	= lan78xx_get_wol,
1662 	.set_wol	= lan78xx_set_wol,
1663 	.get_eee	= lan78xx_get_eee,
1664 	.set_eee	= lan78xx_set_eee,
1665 	.get_pauseparam	= lan78xx_get_pause,
1666 	.set_pauseparam	= lan78xx_set_pause,
1667 	.get_link_ksettings = lan78xx_get_link_ksettings,
1668 	.set_link_ksettings = lan78xx_set_link_ksettings,
1669 	.get_regs_len	= lan78xx_get_regs_len,
1670 	.get_regs	= lan78xx_get_regs,
1671 };
1672 
lan78xx_init_mac_address(struct lan78xx_net * dev)1673 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1674 {
1675 	u32 addr_lo, addr_hi;
1676 	int ret;
1677 	u8 addr[6];
1678 
1679 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1680 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1681 
1682 	addr[0] = addr_lo & 0xFF;
1683 	addr[1] = (addr_lo >> 8) & 0xFF;
1684 	addr[2] = (addr_lo >> 16) & 0xFF;
1685 	addr[3] = (addr_lo >> 24) & 0xFF;
1686 	addr[4] = addr_hi & 0xFF;
1687 	addr[5] = (addr_hi >> 8) & 0xFF;
1688 
1689 	if (!is_valid_ether_addr(addr)) {
1690 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1691 			/* valid address present in Device Tree */
1692 			netif_dbg(dev, ifup, dev->net,
1693 				  "MAC address read from Device Tree");
1694 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1695 						 ETH_ALEN, addr) == 0) ||
1696 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1697 					      ETH_ALEN, addr) == 0)) &&
1698 			   is_valid_ether_addr(addr)) {
1699 			/* eeprom values are valid so use them */
1700 			netif_dbg(dev, ifup, dev->net,
1701 				  "MAC address read from EEPROM");
1702 		} else {
1703 			/* generate random MAC */
1704 			eth_random_addr(addr);
1705 			netif_dbg(dev, ifup, dev->net,
1706 				  "MAC address set to random addr");
1707 		}
1708 
1709 		addr_lo = addr[0] | (addr[1] << 8) |
1710 			  (addr[2] << 16) | (addr[3] << 24);
1711 		addr_hi = addr[4] | (addr[5] << 8);
1712 
1713 		ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1714 		ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1715 	}
1716 
1717 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1718 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1719 
1720 	ether_addr_copy(dev->net->dev_addr, addr);
1721 }
1722 
1723 /* MDIO read and write wrappers for phylib */
lan78xx_mdiobus_read(struct mii_bus * bus,int phy_id,int idx)1724 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1725 {
1726 	struct lan78xx_net *dev = bus->priv;
1727 	u32 val, addr;
1728 	int ret;
1729 
1730 	ret = usb_autopm_get_interface(dev->intf);
1731 	if (ret < 0)
1732 		return ret;
1733 
1734 	mutex_lock(&dev->phy_mutex);
1735 
1736 	/* confirm MII not busy */
1737 	ret = lan78xx_phy_wait_not_busy(dev);
1738 	if (ret < 0)
1739 		goto done;
1740 
1741 	/* set the address, index & direction (read from PHY) */
1742 	addr = mii_access(phy_id, idx, MII_READ);
1743 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1744 
1745 	ret = lan78xx_phy_wait_not_busy(dev);
1746 	if (ret < 0)
1747 		goto done;
1748 
1749 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
1750 
1751 	ret = (int)(val & 0xFFFF);
1752 
1753 done:
1754 	mutex_unlock(&dev->phy_mutex);
1755 	usb_autopm_put_interface(dev->intf);
1756 
1757 	return ret;
1758 }
1759 
lan78xx_mdiobus_write(struct mii_bus * bus,int phy_id,int idx,u16 regval)1760 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1761 				 u16 regval)
1762 {
1763 	struct lan78xx_net *dev = bus->priv;
1764 	u32 val, addr;
1765 	int ret;
1766 
1767 	ret = usb_autopm_get_interface(dev->intf);
1768 	if (ret < 0)
1769 		return ret;
1770 
1771 	mutex_lock(&dev->phy_mutex);
1772 
1773 	/* confirm MII not busy */
1774 	ret = lan78xx_phy_wait_not_busy(dev);
1775 	if (ret < 0)
1776 		goto done;
1777 
1778 	val = (u32)regval;
1779 	ret = lan78xx_write_reg(dev, MII_DATA, val);
1780 
1781 	/* set the address, index & direction (write to PHY) */
1782 	addr = mii_access(phy_id, idx, MII_WRITE);
1783 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1784 
1785 	ret = lan78xx_phy_wait_not_busy(dev);
1786 	if (ret < 0)
1787 		goto done;
1788 
1789 done:
1790 	mutex_unlock(&dev->phy_mutex);
1791 	usb_autopm_put_interface(dev->intf);
1792 	return 0;
1793 }
1794 
lan78xx_mdio_init(struct lan78xx_net * dev)1795 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1796 {
1797 	struct device_node *node;
1798 	int ret;
1799 
1800 	dev->mdiobus = mdiobus_alloc();
1801 	if (!dev->mdiobus) {
1802 		netdev_err(dev->net, "can't allocate MDIO bus\n");
1803 		return -ENOMEM;
1804 	}
1805 
1806 	dev->mdiobus->priv = (void *)dev;
1807 	dev->mdiobus->read = lan78xx_mdiobus_read;
1808 	dev->mdiobus->write = lan78xx_mdiobus_write;
1809 	dev->mdiobus->name = "lan78xx-mdiobus";
1810 	dev->mdiobus->parent = &dev->udev->dev;
1811 
1812 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1813 		 dev->udev->bus->busnum, dev->udev->devnum);
1814 
1815 	switch (dev->chipid) {
1816 	case ID_REV_CHIP_ID_7800_:
1817 	case ID_REV_CHIP_ID_7850_:
1818 		/* set to internal PHY id */
1819 		dev->mdiobus->phy_mask = ~(1 << 1);
1820 		break;
1821 	case ID_REV_CHIP_ID_7801_:
1822 		/* scan thru PHYAD[2..0] */
1823 		dev->mdiobus->phy_mask = ~(0xFF);
1824 		break;
1825 	}
1826 
1827 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1828 	ret = of_mdiobus_register(dev->mdiobus, node);
1829 	of_node_put(node);
1830 	if (ret) {
1831 		netdev_err(dev->net, "can't register MDIO bus\n");
1832 		goto exit1;
1833 	}
1834 
1835 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1836 	return 0;
1837 exit1:
1838 	mdiobus_free(dev->mdiobus);
1839 	return ret;
1840 }
1841 
lan78xx_remove_mdio(struct lan78xx_net * dev)1842 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1843 {
1844 	mdiobus_unregister(dev->mdiobus);
1845 	mdiobus_free(dev->mdiobus);
1846 }
1847 
lan78xx_link_status_change(struct net_device * net)1848 static void lan78xx_link_status_change(struct net_device *net)
1849 {
1850 	struct phy_device *phydev = net->phydev;
1851 	int ret, temp;
1852 
1853 	/* At forced 100 F/H mode, chip may fail to set mode correctly
1854 	 * when cable is switched between long(~50+m) and short one.
1855 	 * As workaround, set to 10 before setting to 100
1856 	 * at forced 100 F/H mode.
1857 	 */
1858 	if (!phydev->autoneg && (phydev->speed == 100)) {
1859 		/* disable phy interrupt */
1860 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1861 		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1862 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1863 
1864 		temp = phy_read(phydev, MII_BMCR);
1865 		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1866 		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1867 		temp |= BMCR_SPEED100;
1868 		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1869 
1870 		/* clear pending interrupt generated while workaround */
1871 		temp = phy_read(phydev, LAN88XX_INT_STS);
1872 
1873 		/* enable phy interrupt back */
1874 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1875 		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1876 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1877 	}
1878 }
1879 
irq_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hwirq)1880 static int irq_map(struct irq_domain *d, unsigned int irq,
1881 		   irq_hw_number_t hwirq)
1882 {
1883 	struct irq_domain_data *data = d->host_data;
1884 
1885 	irq_set_chip_data(irq, data);
1886 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1887 	irq_set_noprobe(irq);
1888 
1889 	return 0;
1890 }
1891 
irq_unmap(struct irq_domain * d,unsigned int irq)1892 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1893 {
1894 	irq_set_chip_and_handler(irq, NULL, NULL);
1895 	irq_set_chip_data(irq, NULL);
1896 }
1897 
1898 static const struct irq_domain_ops chip_domain_ops = {
1899 	.map	= irq_map,
1900 	.unmap	= irq_unmap,
1901 };
1902 
lan78xx_irq_mask(struct irq_data * irqd)1903 static void lan78xx_irq_mask(struct irq_data *irqd)
1904 {
1905 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1906 
1907 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1908 }
1909 
lan78xx_irq_unmask(struct irq_data * irqd)1910 static void lan78xx_irq_unmask(struct irq_data *irqd)
1911 {
1912 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1913 
1914 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
1915 }
1916 
lan78xx_irq_bus_lock(struct irq_data * irqd)1917 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1918 {
1919 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1920 
1921 	mutex_lock(&data->irq_lock);
1922 }
1923 
lan78xx_irq_bus_sync_unlock(struct irq_data * irqd)1924 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1925 {
1926 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1927 	struct lan78xx_net *dev =
1928 			container_of(data, struct lan78xx_net, domain_data);
1929 	u32 buf;
1930 	int ret;
1931 
1932 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
1933 	 * are only two callbacks executed in non-atomic contex.
1934 	 */
1935 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1936 	if (buf != data->irqenable)
1937 		ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1938 
1939 	mutex_unlock(&data->irq_lock);
1940 }
1941 
1942 static struct irq_chip lan78xx_irqchip = {
1943 	.name			= "lan78xx-irqs",
1944 	.irq_mask		= lan78xx_irq_mask,
1945 	.irq_unmask		= lan78xx_irq_unmask,
1946 	.irq_bus_lock		= lan78xx_irq_bus_lock,
1947 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
1948 };
1949 
lan78xx_setup_irq_domain(struct lan78xx_net * dev)1950 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1951 {
1952 	struct device_node *of_node;
1953 	struct irq_domain *irqdomain;
1954 	unsigned int irqmap = 0;
1955 	u32 buf;
1956 	int ret = 0;
1957 
1958 	of_node = dev->udev->dev.parent->of_node;
1959 
1960 	mutex_init(&dev->domain_data.irq_lock);
1961 
1962 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1963 	dev->domain_data.irqenable = buf;
1964 
1965 	dev->domain_data.irqchip = &lan78xx_irqchip;
1966 	dev->domain_data.irq_handler = handle_simple_irq;
1967 
1968 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1969 					  &chip_domain_ops, &dev->domain_data);
1970 	if (irqdomain) {
1971 		/* create mapping for PHY interrupt */
1972 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1973 		if (!irqmap) {
1974 			irq_domain_remove(irqdomain);
1975 
1976 			irqdomain = NULL;
1977 			ret = -EINVAL;
1978 		}
1979 	} else {
1980 		ret = -EINVAL;
1981 	}
1982 
1983 	dev->domain_data.irqdomain = irqdomain;
1984 	dev->domain_data.phyirq = irqmap;
1985 
1986 	return ret;
1987 }
1988 
lan78xx_remove_irq_domain(struct lan78xx_net * dev)1989 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1990 {
1991 	if (dev->domain_data.phyirq > 0) {
1992 		irq_dispose_mapping(dev->domain_data.phyirq);
1993 
1994 		if (dev->domain_data.irqdomain)
1995 			irq_domain_remove(dev->domain_data.irqdomain);
1996 	}
1997 	dev->domain_data.phyirq = 0;
1998 	dev->domain_data.irqdomain = NULL;
1999 }
2000 
lan8835_fixup(struct phy_device * phydev)2001 static int lan8835_fixup(struct phy_device *phydev)
2002 {
2003 	int buf;
2004 	int ret;
2005 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2006 
2007 	/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2008 	buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2009 	buf &= ~0x1800;
2010 	buf |= 0x0800;
2011 	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2012 
2013 	/* RGMII MAC TXC Delay Enable */
2014 	ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2015 				MAC_RGMII_ID_TXC_DELAY_EN_);
2016 
2017 	/* RGMII TX DLL Tune Adjust */
2018 	ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2019 
2020 	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2021 
2022 	return 1;
2023 }
2024 
ksz9031rnx_fixup(struct phy_device * phydev)2025 static int ksz9031rnx_fixup(struct phy_device *phydev)
2026 {
2027 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2028 
2029 	/* Micrel9301RNX PHY configuration */
2030 	/* RGMII Control Signal Pad Skew */
2031 	phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2032 	/* RGMII RX Data Pad Skew */
2033 	phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2034 	/* RGMII RX Clock Pad Skew */
2035 	phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2036 
2037 	dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2038 
2039 	return 1;
2040 }
2041 
lan7801_phy_init(struct lan78xx_net * dev)2042 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2043 {
2044 	u32 buf;
2045 	int ret;
2046 	struct fixed_phy_status fphy_status = {
2047 		.link = 1,
2048 		.speed = SPEED_1000,
2049 		.duplex = DUPLEX_FULL,
2050 	};
2051 	struct phy_device *phydev;
2052 
2053 	phydev = phy_find_first(dev->mdiobus);
2054 	if (!phydev) {
2055 		netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2056 		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2057 		if (IS_ERR(phydev)) {
2058 			netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2059 			return NULL;
2060 		}
2061 		netdev_dbg(dev->net, "Registered FIXED PHY\n");
2062 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2063 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2064 					MAC_RGMII_ID_TXC_DELAY_EN_);
2065 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2066 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2067 		buf |= HW_CFG_CLK125_EN_;
2068 		buf |= HW_CFG_REFCLK25_EN_;
2069 		ret = lan78xx_write_reg(dev, HW_CFG, buf);
2070 	} else {
2071 		if (!phydev->drv) {
2072 			netdev_err(dev->net, "no PHY driver found\n");
2073 			return NULL;
2074 		}
2075 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2076 		/* external PHY fixup for KSZ9031RNX */
2077 		ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2078 						 ksz9031rnx_fixup);
2079 		if (ret < 0) {
2080 			netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2081 			return NULL;
2082 		}
2083 		/* external PHY fixup for LAN8835 */
2084 		ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2085 						 lan8835_fixup);
2086 		if (ret < 0) {
2087 			netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2088 			return NULL;
2089 		}
2090 		/* add more external PHY fixup here if needed */
2091 
2092 		phydev->is_internal = false;
2093 	}
2094 	return phydev;
2095 }
2096 
lan78xx_phy_init(struct lan78xx_net * dev)2097 static int lan78xx_phy_init(struct lan78xx_net *dev)
2098 {
2099 	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2100 	int ret;
2101 	u32 mii_adv;
2102 	struct phy_device *phydev;
2103 
2104 	switch (dev->chipid) {
2105 	case ID_REV_CHIP_ID_7801_:
2106 		phydev = lan7801_phy_init(dev);
2107 		if (!phydev) {
2108 			netdev_err(dev->net, "lan7801: PHY Init Failed");
2109 			return -EIO;
2110 		}
2111 		break;
2112 
2113 	case ID_REV_CHIP_ID_7800_:
2114 	case ID_REV_CHIP_ID_7850_:
2115 		phydev = phy_find_first(dev->mdiobus);
2116 		if (!phydev) {
2117 			netdev_err(dev->net, "no PHY found\n");
2118 			return -EIO;
2119 		}
2120 		phydev->is_internal = true;
2121 		dev->interface = PHY_INTERFACE_MODE_GMII;
2122 		break;
2123 
2124 	default:
2125 		netdev_err(dev->net, "Unknown CHIP ID found\n");
2126 		return -EIO;
2127 	}
2128 
2129 	/* if phyirq is not set, use polling mode in phylib */
2130 	if (dev->domain_data.phyirq > 0)
2131 		phydev->irq = dev->domain_data.phyirq;
2132 	else
2133 		phydev->irq = PHY_POLL;
2134 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2135 
2136 	/* set to AUTOMDIX */
2137 	phydev->mdix = ETH_TP_MDI_AUTO;
2138 
2139 	ret = phy_connect_direct(dev->net, phydev,
2140 				 lan78xx_link_status_change,
2141 				 dev->interface);
2142 	if (ret) {
2143 		netdev_err(dev->net, "can't attach PHY to %s\n",
2144 			   dev->mdiobus->id);
2145 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2146 			if (phy_is_pseudo_fixed_link(phydev)) {
2147 				fixed_phy_unregister(phydev);
2148 			} else {
2149 				phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2150 							     0xfffffff0);
2151 				phy_unregister_fixup_for_uid(PHY_LAN8835,
2152 							     0xfffffff0);
2153 			}
2154 		}
2155 		return -EIO;
2156 	}
2157 
2158 	/* MAC doesn't support 1000T Half */
2159 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2160 
2161 	/* support both flow controls */
2162 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2163 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2164 			   phydev->advertising);
2165 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2166 			   phydev->advertising);
2167 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2168 	mii_adv_to_linkmode_adv_t(fc, mii_adv);
2169 	linkmode_or(phydev->advertising, fc, phydev->advertising);
2170 
2171 	if (phydev->mdio.dev.of_node) {
2172 		u32 reg;
2173 		int len;
2174 
2175 		len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2176 						      "microchip,led-modes",
2177 						      sizeof(u32));
2178 		if (len >= 0) {
2179 			/* Ensure the appropriate LEDs are enabled */
2180 			lan78xx_read_reg(dev, HW_CFG, &reg);
2181 			reg &= ~(HW_CFG_LED0_EN_ |
2182 				 HW_CFG_LED1_EN_ |
2183 				 HW_CFG_LED2_EN_ |
2184 				 HW_CFG_LED3_EN_);
2185 			reg |= (len > 0) * HW_CFG_LED0_EN_ |
2186 				(len > 1) * HW_CFG_LED1_EN_ |
2187 				(len > 2) * HW_CFG_LED2_EN_ |
2188 				(len > 3) * HW_CFG_LED3_EN_;
2189 			lan78xx_write_reg(dev, HW_CFG, reg);
2190 		}
2191 	}
2192 
2193 	genphy_config_aneg(phydev);
2194 
2195 	dev->fc_autoneg = phydev->autoneg;
2196 
2197 	return 0;
2198 }
2199 
lan78xx_set_rx_max_frame_length(struct lan78xx_net * dev,int size)2200 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2201 {
2202 	int ret = 0;
2203 	u32 buf;
2204 	bool rxenabled;
2205 
2206 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2207 
2208 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2209 
2210 	if (rxenabled) {
2211 		buf &= ~MAC_RX_RXEN_;
2212 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2213 	}
2214 
2215 	/* add 4 to size for FCS */
2216 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2217 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2218 
2219 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2220 
2221 	if (rxenabled) {
2222 		buf |= MAC_RX_RXEN_;
2223 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2224 	}
2225 
2226 	return 0;
2227 }
2228 
unlink_urbs(struct lan78xx_net * dev,struct sk_buff_head * q)2229 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2230 {
2231 	struct sk_buff *skb;
2232 	unsigned long flags;
2233 	int count = 0;
2234 
2235 	spin_lock_irqsave(&q->lock, flags);
2236 	while (!skb_queue_empty(q)) {
2237 		struct skb_data	*entry;
2238 		struct urb *urb;
2239 		int ret;
2240 
2241 		skb_queue_walk(q, skb) {
2242 			entry = (struct skb_data *)skb->cb;
2243 			if (entry->state != unlink_start)
2244 				goto found;
2245 		}
2246 		break;
2247 found:
2248 		entry->state = unlink_start;
2249 		urb = entry->urb;
2250 
2251 		/* Get reference count of the URB to avoid it to be
2252 		 * freed during usb_unlink_urb, which may trigger
2253 		 * use-after-free problem inside usb_unlink_urb since
2254 		 * usb_unlink_urb is always racing with .complete
2255 		 * handler(include defer_bh).
2256 		 */
2257 		usb_get_urb(urb);
2258 		spin_unlock_irqrestore(&q->lock, flags);
2259 		/* during some PM-driven resume scenarios,
2260 		 * these (async) unlinks complete immediately
2261 		 */
2262 		ret = usb_unlink_urb(urb);
2263 		if (ret != -EINPROGRESS && ret != 0)
2264 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2265 		else
2266 			count++;
2267 		usb_put_urb(urb);
2268 		spin_lock_irqsave(&q->lock, flags);
2269 	}
2270 	spin_unlock_irqrestore(&q->lock, flags);
2271 	return count;
2272 }
2273 
lan78xx_change_mtu(struct net_device * netdev,int new_mtu)2274 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2275 {
2276 	struct lan78xx_net *dev = netdev_priv(netdev);
2277 	int ll_mtu = new_mtu + netdev->hard_header_len;
2278 	int old_hard_mtu = dev->hard_mtu;
2279 	int old_rx_urb_size = dev->rx_urb_size;
2280 	int ret;
2281 
2282 	/* no second zero-length packet read wanted after mtu-sized packets */
2283 	if ((ll_mtu % dev->maxpacket) == 0)
2284 		return -EDOM;
2285 
2286 	ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2287 
2288 	netdev->mtu = new_mtu;
2289 
2290 	dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2291 	if (dev->rx_urb_size == old_hard_mtu) {
2292 		dev->rx_urb_size = dev->hard_mtu;
2293 		if (dev->rx_urb_size > old_rx_urb_size) {
2294 			if (netif_running(dev->net)) {
2295 				unlink_urbs(dev, &dev->rxq);
2296 				tasklet_schedule(&dev->bh);
2297 			}
2298 		}
2299 	}
2300 
2301 	return 0;
2302 }
2303 
lan78xx_set_mac_addr(struct net_device * netdev,void * p)2304 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2305 {
2306 	struct lan78xx_net *dev = netdev_priv(netdev);
2307 	struct sockaddr *addr = p;
2308 	u32 addr_lo, addr_hi;
2309 	int ret;
2310 
2311 	if (netif_running(netdev))
2312 		return -EBUSY;
2313 
2314 	if (!is_valid_ether_addr(addr->sa_data))
2315 		return -EADDRNOTAVAIL;
2316 
2317 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
2318 
2319 	addr_lo = netdev->dev_addr[0] |
2320 		  netdev->dev_addr[1] << 8 |
2321 		  netdev->dev_addr[2] << 16 |
2322 		  netdev->dev_addr[3] << 24;
2323 	addr_hi = netdev->dev_addr[4] |
2324 		  netdev->dev_addr[5] << 8;
2325 
2326 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2327 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2328 
2329 	/* Added to support MAC address changes */
2330 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2331 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2332 
2333 	return 0;
2334 }
2335 
2336 /* Enable or disable Rx checksum offload engine */
lan78xx_set_features(struct net_device * netdev,netdev_features_t features)2337 static int lan78xx_set_features(struct net_device *netdev,
2338 				netdev_features_t features)
2339 {
2340 	struct lan78xx_net *dev = netdev_priv(netdev);
2341 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2342 	unsigned long flags;
2343 	int ret;
2344 
2345 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2346 
2347 	if (features & NETIF_F_RXCSUM) {
2348 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2349 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2350 	} else {
2351 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2352 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2353 	}
2354 
2355 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2356 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2357 	else
2358 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2359 
2360 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2361 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2362 	else
2363 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2364 
2365 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2366 
2367 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2368 
2369 	return 0;
2370 }
2371 
lan78xx_deferred_vlan_write(struct work_struct * param)2372 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2373 {
2374 	struct lan78xx_priv *pdata =
2375 			container_of(param, struct lan78xx_priv, set_vlan);
2376 	struct lan78xx_net *dev = pdata->dev;
2377 
2378 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2379 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2380 }
2381 
lan78xx_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)2382 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2383 				   __be16 proto, u16 vid)
2384 {
2385 	struct lan78xx_net *dev = netdev_priv(netdev);
2386 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2387 	u16 vid_bit_index;
2388 	u16 vid_dword_index;
2389 
2390 	vid_dword_index = (vid >> 5) & 0x7F;
2391 	vid_bit_index = vid & 0x1F;
2392 
2393 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2394 
2395 	/* defer register writes to a sleepable context */
2396 	schedule_work(&pdata->set_vlan);
2397 
2398 	return 0;
2399 }
2400 
lan78xx_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)2401 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2402 				    __be16 proto, u16 vid)
2403 {
2404 	struct lan78xx_net *dev = netdev_priv(netdev);
2405 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2406 	u16 vid_bit_index;
2407 	u16 vid_dword_index;
2408 
2409 	vid_dword_index = (vid >> 5) & 0x7F;
2410 	vid_bit_index = vid & 0x1F;
2411 
2412 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2413 
2414 	/* defer register writes to a sleepable context */
2415 	schedule_work(&pdata->set_vlan);
2416 
2417 	return 0;
2418 }
2419 
lan78xx_init_ltm(struct lan78xx_net * dev)2420 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2421 {
2422 	int ret;
2423 	u32 buf;
2424 	u32 regs[6] = { 0 };
2425 
2426 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2427 	if (buf & USB_CFG1_LTM_ENABLE_) {
2428 		u8 temp[2];
2429 		/* Get values from EEPROM first */
2430 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2431 			if (temp[0] == 24) {
2432 				ret = lan78xx_read_raw_eeprom(dev,
2433 							      temp[1] * 2,
2434 							      24,
2435 							      (u8 *)regs);
2436 				if (ret < 0)
2437 					return;
2438 			}
2439 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2440 			if (temp[0] == 24) {
2441 				ret = lan78xx_read_raw_otp(dev,
2442 							   temp[1] * 2,
2443 							   24,
2444 							   (u8 *)regs);
2445 				if (ret < 0)
2446 					return;
2447 			}
2448 		}
2449 	}
2450 
2451 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2452 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2453 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2454 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2455 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2456 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2457 }
2458 
lan78xx_reset(struct lan78xx_net * dev)2459 static int lan78xx_reset(struct lan78xx_net *dev)
2460 {
2461 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2462 	u32 buf;
2463 	int ret = 0;
2464 	unsigned long timeout;
2465 	u8 sig;
2466 
2467 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2468 	buf |= HW_CFG_LRST_;
2469 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2470 
2471 	timeout = jiffies + HZ;
2472 	do {
2473 		mdelay(1);
2474 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2475 		if (time_after(jiffies, timeout)) {
2476 			netdev_warn(dev->net,
2477 				    "timeout on completion of LiteReset");
2478 			return -EIO;
2479 		}
2480 	} while (buf & HW_CFG_LRST_);
2481 
2482 	lan78xx_init_mac_address(dev);
2483 
2484 	/* save DEVID for later usage */
2485 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2486 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2487 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2488 
2489 	/* Respond to the IN token with a NAK */
2490 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2491 	buf |= USB_CFG_BIR_;
2492 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2493 
2494 	/* Init LTM */
2495 	lan78xx_init_ltm(dev);
2496 
2497 	if (dev->udev->speed == USB_SPEED_SUPER) {
2498 		buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2499 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2500 		dev->rx_qlen = 4;
2501 		dev->tx_qlen = 4;
2502 	} else if (dev->udev->speed == USB_SPEED_HIGH) {
2503 		buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2504 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2505 		dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2506 		dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2507 	} else {
2508 		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2509 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2510 		dev->rx_qlen = 4;
2511 		dev->tx_qlen = 4;
2512 	}
2513 
2514 	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2515 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2516 
2517 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2518 	buf |= HW_CFG_MEF_;
2519 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2520 
2521 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2522 	buf |= USB_CFG_BCE_;
2523 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2524 
2525 	/* set FIFO sizes */
2526 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2527 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2528 
2529 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2530 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2531 
2532 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2533 	ret = lan78xx_write_reg(dev, FLOW, 0);
2534 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2535 
2536 	/* Don't need rfe_ctl_lock during initialisation */
2537 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2538 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2539 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2540 
2541 	/* Enable or disable checksum offload engines */
2542 	lan78xx_set_features(dev->net, dev->net->features);
2543 
2544 	lan78xx_set_multicast(dev->net);
2545 
2546 	/* reset PHY */
2547 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2548 	buf |= PMT_CTL_PHY_RST_;
2549 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2550 
2551 	timeout = jiffies + HZ;
2552 	do {
2553 		mdelay(1);
2554 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2555 		if (time_after(jiffies, timeout)) {
2556 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
2557 			return -EIO;
2558 		}
2559 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2560 
2561 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2562 	/* LAN7801 only has RGMII mode */
2563 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
2564 		buf &= ~MAC_CR_GMII_EN_;
2565 
2566 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2567 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2568 		if (!ret && sig != EEPROM_INDICATOR) {
2569 			/* Implies there is no external eeprom. Set mac speed */
2570 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2571 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2572 		}
2573 	}
2574 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
2575 
2576 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2577 	buf |= MAC_TX_TXEN_;
2578 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
2579 
2580 	ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2581 	buf |= FCT_TX_CTL_EN_;
2582 	ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2583 
2584 	ret = lan78xx_set_rx_max_frame_length(dev,
2585 					      dev->net->mtu + VLAN_ETH_HLEN);
2586 
2587 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2588 	buf |= MAC_RX_RXEN_;
2589 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2590 
2591 	ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2592 	buf |= FCT_RX_CTL_EN_;
2593 	ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2594 
2595 	return 0;
2596 }
2597 
lan78xx_init_stats(struct lan78xx_net * dev)2598 static void lan78xx_init_stats(struct lan78xx_net *dev)
2599 {
2600 	u32 *p;
2601 	int i;
2602 
2603 	/* initialize for stats update
2604 	 * some counters are 20bits and some are 32bits
2605 	 */
2606 	p = (u32 *)&dev->stats.rollover_max;
2607 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2608 		p[i] = 0xFFFFF;
2609 
2610 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2611 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2612 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2613 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2614 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2615 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2616 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2617 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2618 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2619 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2620 
2621 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
2622 }
2623 
lan78xx_open(struct net_device * net)2624 static int lan78xx_open(struct net_device *net)
2625 {
2626 	struct lan78xx_net *dev = netdev_priv(net);
2627 	int ret;
2628 
2629 	ret = usb_autopm_get_interface(dev->intf);
2630 	if (ret < 0)
2631 		goto out;
2632 
2633 	phy_start(net->phydev);
2634 
2635 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2636 
2637 	/* for Link Check */
2638 	if (dev->urb_intr) {
2639 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2640 		if (ret < 0) {
2641 			netif_err(dev, ifup, dev->net,
2642 				  "intr submit %d\n", ret);
2643 			goto done;
2644 		}
2645 	}
2646 
2647 	lan78xx_init_stats(dev);
2648 
2649 	set_bit(EVENT_DEV_OPEN, &dev->flags);
2650 
2651 	netif_start_queue(net);
2652 
2653 	dev->link_on = false;
2654 
2655 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2656 done:
2657 	usb_autopm_put_interface(dev->intf);
2658 
2659 out:
2660 	return ret;
2661 }
2662 
lan78xx_terminate_urbs(struct lan78xx_net * dev)2663 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2664 {
2665 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2666 	DECLARE_WAITQUEUE(wait, current);
2667 	int temp;
2668 
2669 	/* ensure there are no more active urbs */
2670 	add_wait_queue(&unlink_wakeup, &wait);
2671 	set_current_state(TASK_UNINTERRUPTIBLE);
2672 	dev->wait = &unlink_wakeup;
2673 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2674 
2675 	/* maybe wait for deletions to finish. */
2676 	while (!skb_queue_empty(&dev->rxq) &&
2677 	       !skb_queue_empty(&dev->txq) &&
2678 	       !skb_queue_empty(&dev->done)) {
2679 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2680 		set_current_state(TASK_UNINTERRUPTIBLE);
2681 		netif_dbg(dev, ifdown, dev->net,
2682 			  "waited for %d urb completions\n", temp);
2683 	}
2684 	set_current_state(TASK_RUNNING);
2685 	dev->wait = NULL;
2686 	remove_wait_queue(&unlink_wakeup, &wait);
2687 }
2688 
lan78xx_stop(struct net_device * net)2689 static int lan78xx_stop(struct net_device *net)
2690 {
2691 	struct lan78xx_net *dev = netdev_priv(net);
2692 
2693 	if (timer_pending(&dev->stat_monitor))
2694 		del_timer_sync(&dev->stat_monitor);
2695 
2696 	if (net->phydev)
2697 		phy_stop(net->phydev);
2698 
2699 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
2700 	netif_stop_queue(net);
2701 
2702 	netif_info(dev, ifdown, dev->net,
2703 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2704 		   net->stats.rx_packets, net->stats.tx_packets,
2705 		   net->stats.rx_errors, net->stats.tx_errors);
2706 
2707 	lan78xx_terminate_urbs(dev);
2708 
2709 	usb_kill_urb(dev->urb_intr);
2710 
2711 	skb_queue_purge(&dev->rxq_pause);
2712 
2713 	/* deferred work (task, timer, softirq) must also stop.
2714 	 * can't flush_scheduled_work() until we drop rtnl (later),
2715 	 * else workers could deadlock; so make workers a NOP.
2716 	 */
2717 	dev->flags = 0;
2718 	cancel_delayed_work_sync(&dev->wq);
2719 	tasklet_kill(&dev->bh);
2720 
2721 	usb_autopm_put_interface(dev->intf);
2722 
2723 	return 0;
2724 }
2725 
lan78xx_tx_prep(struct lan78xx_net * dev,struct sk_buff * skb,gfp_t flags)2726 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2727 				       struct sk_buff *skb, gfp_t flags)
2728 {
2729 	u32 tx_cmd_a, tx_cmd_b;
2730 	void *ptr;
2731 
2732 	if (skb_cow_head(skb, TX_OVERHEAD)) {
2733 		dev_kfree_skb_any(skb);
2734 		return NULL;
2735 	}
2736 
2737 	if (skb_linearize(skb)) {
2738 		dev_kfree_skb_any(skb);
2739 		return NULL;
2740 	}
2741 
2742 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2743 
2744 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2745 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2746 
2747 	tx_cmd_b = 0;
2748 	if (skb_is_gso(skb)) {
2749 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2750 
2751 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2752 
2753 		tx_cmd_a |= TX_CMD_A_LSO_;
2754 	}
2755 
2756 	if (skb_vlan_tag_present(skb)) {
2757 		tx_cmd_a |= TX_CMD_A_IVTG_;
2758 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2759 	}
2760 
2761 	ptr = skb_push(skb, 8);
2762 	put_unaligned_le32(tx_cmd_a, ptr);
2763 	put_unaligned_le32(tx_cmd_b, ptr + 4);
2764 
2765 	return skb;
2766 }
2767 
defer_bh(struct lan78xx_net * dev,struct sk_buff * skb,struct sk_buff_head * list,enum skb_state state)2768 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2769 			       struct sk_buff_head *list, enum skb_state state)
2770 {
2771 	unsigned long flags;
2772 	enum skb_state old_state;
2773 	struct skb_data *entry = (struct skb_data *)skb->cb;
2774 
2775 	spin_lock_irqsave(&list->lock, flags);
2776 	old_state = entry->state;
2777 	entry->state = state;
2778 
2779 	__skb_unlink(skb, list);
2780 	spin_unlock(&list->lock);
2781 	spin_lock(&dev->done.lock);
2782 
2783 	__skb_queue_tail(&dev->done, skb);
2784 	if (skb_queue_len(&dev->done) == 1)
2785 		tasklet_schedule(&dev->bh);
2786 	spin_unlock_irqrestore(&dev->done.lock, flags);
2787 
2788 	return old_state;
2789 }
2790 
tx_complete(struct urb * urb)2791 static void tx_complete(struct urb *urb)
2792 {
2793 	struct sk_buff *skb = (struct sk_buff *)urb->context;
2794 	struct skb_data *entry = (struct skb_data *)skb->cb;
2795 	struct lan78xx_net *dev = entry->dev;
2796 
2797 	if (urb->status == 0) {
2798 		dev->net->stats.tx_packets += entry->num_of_packet;
2799 		dev->net->stats.tx_bytes += entry->length;
2800 	} else {
2801 		dev->net->stats.tx_errors++;
2802 
2803 		switch (urb->status) {
2804 		case -EPIPE:
2805 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2806 			break;
2807 
2808 		/* software-driven interface shutdown */
2809 		case -ECONNRESET:
2810 		case -ESHUTDOWN:
2811 			break;
2812 
2813 		case -EPROTO:
2814 		case -ETIME:
2815 		case -EILSEQ:
2816 			netif_stop_queue(dev->net);
2817 			break;
2818 		default:
2819 			netif_dbg(dev, tx_err, dev->net,
2820 				  "tx err %d\n", entry->urb->status);
2821 			break;
2822 		}
2823 	}
2824 
2825 	usb_autopm_put_interface_async(dev->intf);
2826 
2827 	defer_bh(dev, skb, &dev->txq, tx_done);
2828 }
2829 
lan78xx_queue_skb(struct sk_buff_head * list,struct sk_buff * newsk,enum skb_state state)2830 static void lan78xx_queue_skb(struct sk_buff_head *list,
2831 			      struct sk_buff *newsk, enum skb_state state)
2832 {
2833 	struct skb_data *entry = (struct skb_data *)newsk->cb;
2834 
2835 	__skb_queue_tail(list, newsk);
2836 	entry->state = state;
2837 }
2838 
2839 static netdev_tx_t
lan78xx_start_xmit(struct sk_buff * skb,struct net_device * net)2840 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2841 {
2842 	struct lan78xx_net *dev = netdev_priv(net);
2843 	struct sk_buff *skb2 = NULL;
2844 
2845 	if (skb) {
2846 		skb_tx_timestamp(skb);
2847 		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2848 	}
2849 
2850 	if (skb2) {
2851 		skb_queue_tail(&dev->txq_pend, skb2);
2852 
2853 		/* throttle TX patch at slower than SUPER SPEED USB */
2854 		if ((dev->udev->speed < USB_SPEED_SUPER) &&
2855 		    (skb_queue_len(&dev->txq_pend) > 10))
2856 			netif_stop_queue(net);
2857 	} else {
2858 		netif_dbg(dev, tx_err, dev->net,
2859 			  "lan78xx_tx_prep return NULL\n");
2860 		dev->net->stats.tx_errors++;
2861 		dev->net->stats.tx_dropped++;
2862 	}
2863 
2864 	tasklet_schedule(&dev->bh);
2865 
2866 	return NETDEV_TX_OK;
2867 }
2868 
lan78xx_bind(struct lan78xx_net * dev,struct usb_interface * intf)2869 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2870 {
2871 	struct lan78xx_priv *pdata = NULL;
2872 	int ret;
2873 	int i;
2874 
2875 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2876 
2877 	pdata = (struct lan78xx_priv *)(dev->data[0]);
2878 	if (!pdata) {
2879 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2880 		return -ENOMEM;
2881 	}
2882 
2883 	pdata->dev = dev;
2884 
2885 	spin_lock_init(&pdata->rfe_ctl_lock);
2886 	mutex_init(&pdata->dataport_mutex);
2887 
2888 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2889 
2890 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2891 		pdata->vlan_table[i] = 0;
2892 
2893 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2894 
2895 	dev->net->features = 0;
2896 
2897 	if (DEFAULT_TX_CSUM_ENABLE)
2898 		dev->net->features |= NETIF_F_HW_CSUM;
2899 
2900 	if (DEFAULT_RX_CSUM_ENABLE)
2901 		dev->net->features |= NETIF_F_RXCSUM;
2902 
2903 	if (DEFAULT_TSO_CSUM_ENABLE)
2904 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2905 
2906 	if (DEFAULT_VLAN_RX_OFFLOAD)
2907 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2908 
2909 	if (DEFAULT_VLAN_FILTER_ENABLE)
2910 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2911 
2912 	dev->net->hw_features = dev->net->features;
2913 
2914 	ret = lan78xx_setup_irq_domain(dev);
2915 	if (ret < 0) {
2916 		netdev_warn(dev->net,
2917 			    "lan78xx_setup_irq_domain() failed : %d", ret);
2918 		goto out1;
2919 	}
2920 
2921 	dev->net->hard_header_len += TX_OVERHEAD;
2922 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2923 
2924 	/* Init all registers */
2925 	ret = lan78xx_reset(dev);
2926 	if (ret) {
2927 		netdev_warn(dev->net, "Registers INIT FAILED....");
2928 		goto out2;
2929 	}
2930 
2931 	ret = lan78xx_mdio_init(dev);
2932 	if (ret) {
2933 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
2934 		goto out2;
2935 	}
2936 
2937 	dev->net->flags |= IFF_MULTICAST;
2938 
2939 	pdata->wol = WAKE_MAGIC;
2940 
2941 	return ret;
2942 
2943 out2:
2944 	lan78xx_remove_irq_domain(dev);
2945 
2946 out1:
2947 	netdev_warn(dev->net, "Bind routine FAILED");
2948 	cancel_work_sync(&pdata->set_multicast);
2949 	cancel_work_sync(&pdata->set_vlan);
2950 	kfree(pdata);
2951 	return ret;
2952 }
2953 
lan78xx_unbind(struct lan78xx_net * dev,struct usb_interface * intf)2954 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2955 {
2956 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2957 
2958 	lan78xx_remove_irq_domain(dev);
2959 
2960 	lan78xx_remove_mdio(dev);
2961 
2962 	if (pdata) {
2963 		cancel_work_sync(&pdata->set_multicast);
2964 		cancel_work_sync(&pdata->set_vlan);
2965 		netif_dbg(dev, ifdown, dev->net, "free pdata");
2966 		kfree(pdata);
2967 		pdata = NULL;
2968 		dev->data[0] = 0;
2969 	}
2970 }
2971 
lan78xx_rx_csum_offload(struct lan78xx_net * dev,struct sk_buff * skb,u32 rx_cmd_a,u32 rx_cmd_b)2972 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2973 				    struct sk_buff *skb,
2974 				    u32 rx_cmd_a, u32 rx_cmd_b)
2975 {
2976 	/* HW Checksum offload appears to be flawed if used when not stripping
2977 	 * VLAN headers. Drop back to S/W checksums under these conditions.
2978 	 */
2979 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
2980 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
2981 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
2982 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
2983 		skb->ip_summed = CHECKSUM_NONE;
2984 	} else {
2985 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2986 		skb->ip_summed = CHECKSUM_COMPLETE;
2987 	}
2988 }
2989 
lan78xx_rx_vlan_offload(struct lan78xx_net * dev,struct sk_buff * skb,u32 rx_cmd_a,u32 rx_cmd_b)2990 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
2991 				    struct sk_buff *skb,
2992 				    u32 rx_cmd_a, u32 rx_cmd_b)
2993 {
2994 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2995 	    (rx_cmd_a & RX_CMD_A_FVTG_))
2996 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2997 				       (rx_cmd_b & 0xffff));
2998 }
2999 
lan78xx_skb_return(struct lan78xx_net * dev,struct sk_buff * skb)3000 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3001 {
3002 	int status;
3003 
3004 	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3005 		skb_queue_tail(&dev->rxq_pause, skb);
3006 		return;
3007 	}
3008 
3009 	dev->net->stats.rx_packets++;
3010 	dev->net->stats.rx_bytes += skb->len;
3011 
3012 	skb->protocol = eth_type_trans(skb, dev->net);
3013 
3014 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3015 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3016 	memset(skb->cb, 0, sizeof(struct skb_data));
3017 
3018 	if (skb_defer_rx_timestamp(skb))
3019 		return;
3020 
3021 	status = netif_rx(skb);
3022 	if (status != NET_RX_SUCCESS)
3023 		netif_dbg(dev, rx_err, dev->net,
3024 			  "netif_rx status %d\n", status);
3025 }
3026 
lan78xx_rx(struct lan78xx_net * dev,struct sk_buff * skb)3027 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3028 {
3029 	if (skb->len < dev->net->hard_header_len)
3030 		return 0;
3031 
3032 	while (skb->len > 0) {
3033 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3034 		u16 rx_cmd_c;
3035 		struct sk_buff *skb2;
3036 		unsigned char *packet;
3037 
3038 		rx_cmd_a = get_unaligned_le32(skb->data);
3039 		skb_pull(skb, sizeof(rx_cmd_a));
3040 
3041 		rx_cmd_b = get_unaligned_le32(skb->data);
3042 		skb_pull(skb, sizeof(rx_cmd_b));
3043 
3044 		rx_cmd_c = get_unaligned_le16(skb->data);
3045 		skb_pull(skb, sizeof(rx_cmd_c));
3046 
3047 		packet = skb->data;
3048 
3049 		/* get the packet length */
3050 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3051 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3052 
3053 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3054 			netif_dbg(dev, rx_err, dev->net,
3055 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3056 		} else {
3057 			/* last frame in this batch */
3058 			if (skb->len == size) {
3059 				lan78xx_rx_csum_offload(dev, skb,
3060 							rx_cmd_a, rx_cmd_b);
3061 				lan78xx_rx_vlan_offload(dev, skb,
3062 							rx_cmd_a, rx_cmd_b);
3063 
3064 				skb_trim(skb, skb->len - 4); /* remove fcs */
3065 				skb->truesize = size + sizeof(struct sk_buff);
3066 
3067 				return 1;
3068 			}
3069 
3070 			skb2 = skb_clone(skb, GFP_ATOMIC);
3071 			if (unlikely(!skb2)) {
3072 				netdev_warn(dev->net, "Error allocating skb");
3073 				return 0;
3074 			}
3075 
3076 			skb2->len = size;
3077 			skb2->data = packet;
3078 			skb_set_tail_pointer(skb2, size);
3079 
3080 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3081 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3082 
3083 			skb_trim(skb2, skb2->len - 4); /* remove fcs */
3084 			skb2->truesize = size + sizeof(struct sk_buff);
3085 
3086 			lan78xx_skb_return(dev, skb2);
3087 		}
3088 
3089 		skb_pull(skb, size);
3090 
3091 		/* padding bytes before the next frame starts */
3092 		if (skb->len)
3093 			skb_pull(skb, align_count);
3094 	}
3095 
3096 	return 1;
3097 }
3098 
rx_process(struct lan78xx_net * dev,struct sk_buff * skb)3099 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3100 {
3101 	if (!lan78xx_rx(dev, skb)) {
3102 		dev->net->stats.rx_errors++;
3103 		goto done;
3104 	}
3105 
3106 	if (skb->len) {
3107 		lan78xx_skb_return(dev, skb);
3108 		return;
3109 	}
3110 
3111 	netif_dbg(dev, rx_err, dev->net, "drop\n");
3112 	dev->net->stats.rx_errors++;
3113 done:
3114 	skb_queue_tail(&dev->done, skb);
3115 }
3116 
3117 static void rx_complete(struct urb *urb);
3118 
rx_submit(struct lan78xx_net * dev,struct urb * urb,gfp_t flags)3119 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3120 {
3121 	struct sk_buff *skb;
3122 	struct skb_data *entry;
3123 	unsigned long lockflags;
3124 	size_t size = dev->rx_urb_size;
3125 	int ret = 0;
3126 
3127 	skb = netdev_alloc_skb_ip_align(dev->net, size);
3128 	if (!skb) {
3129 		usb_free_urb(urb);
3130 		return -ENOMEM;
3131 	}
3132 
3133 	entry = (struct skb_data *)skb->cb;
3134 	entry->urb = urb;
3135 	entry->dev = dev;
3136 	entry->length = 0;
3137 
3138 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3139 			  skb->data, size, rx_complete, skb);
3140 
3141 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3142 
3143 	if (netif_device_present(dev->net) &&
3144 	    netif_running(dev->net) &&
3145 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3146 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3147 		ret = usb_submit_urb(urb, GFP_ATOMIC);
3148 		switch (ret) {
3149 		case 0:
3150 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3151 			break;
3152 		case -EPIPE:
3153 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3154 			break;
3155 		case -ENODEV:
3156 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3157 			netif_device_detach(dev->net);
3158 			break;
3159 		case -EHOSTUNREACH:
3160 			ret = -ENOLINK;
3161 			break;
3162 		default:
3163 			netif_dbg(dev, rx_err, dev->net,
3164 				  "rx submit, %d\n", ret);
3165 			tasklet_schedule(&dev->bh);
3166 		}
3167 	} else {
3168 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3169 		ret = -ENOLINK;
3170 	}
3171 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3172 	if (ret) {
3173 		dev_kfree_skb_any(skb);
3174 		usb_free_urb(urb);
3175 	}
3176 	return ret;
3177 }
3178 
rx_complete(struct urb * urb)3179 static void rx_complete(struct urb *urb)
3180 {
3181 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3182 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3183 	struct lan78xx_net *dev = entry->dev;
3184 	int urb_status = urb->status;
3185 	enum skb_state state;
3186 
3187 	skb_put(skb, urb->actual_length);
3188 	state = rx_done;
3189 	entry->urb = NULL;
3190 
3191 	switch (urb_status) {
3192 	case 0:
3193 		if (skb->len < dev->net->hard_header_len) {
3194 			state = rx_cleanup;
3195 			dev->net->stats.rx_errors++;
3196 			dev->net->stats.rx_length_errors++;
3197 			netif_dbg(dev, rx_err, dev->net,
3198 				  "rx length %d\n", skb->len);
3199 		}
3200 		usb_mark_last_busy(dev->udev);
3201 		break;
3202 	case -EPIPE:
3203 		dev->net->stats.rx_errors++;
3204 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3205 		fallthrough;
3206 	case -ECONNRESET:				/* async unlink */
3207 	case -ESHUTDOWN:				/* hardware gone */
3208 		netif_dbg(dev, ifdown, dev->net,
3209 			  "rx shutdown, code %d\n", urb_status);
3210 		state = rx_cleanup;
3211 		entry->urb = urb;
3212 		urb = NULL;
3213 		break;
3214 	case -EPROTO:
3215 	case -ETIME:
3216 	case -EILSEQ:
3217 		dev->net->stats.rx_errors++;
3218 		state = rx_cleanup;
3219 		entry->urb = urb;
3220 		urb = NULL;
3221 		break;
3222 
3223 	/* data overrun ... flush fifo? */
3224 	case -EOVERFLOW:
3225 		dev->net->stats.rx_over_errors++;
3226 		fallthrough;
3227 
3228 	default:
3229 		state = rx_cleanup;
3230 		dev->net->stats.rx_errors++;
3231 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3232 		break;
3233 	}
3234 
3235 	state = defer_bh(dev, skb, &dev->rxq, state);
3236 
3237 	if (urb) {
3238 		if (netif_running(dev->net) &&
3239 		    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3240 		    state != unlink_start) {
3241 			rx_submit(dev, urb, GFP_ATOMIC);
3242 			return;
3243 		}
3244 		usb_free_urb(urb);
3245 	}
3246 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3247 }
3248 
lan78xx_tx_bh(struct lan78xx_net * dev)3249 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3250 {
3251 	int length;
3252 	struct urb *urb = NULL;
3253 	struct skb_data *entry;
3254 	unsigned long flags;
3255 	struct sk_buff_head *tqp = &dev->txq_pend;
3256 	struct sk_buff *skb, *skb2;
3257 	int ret;
3258 	int count, pos;
3259 	int skb_totallen, pkt_cnt;
3260 
3261 	skb_totallen = 0;
3262 	pkt_cnt = 0;
3263 	count = 0;
3264 	length = 0;
3265 	spin_lock_irqsave(&tqp->lock, flags);
3266 	skb_queue_walk(tqp, skb) {
3267 		if (skb_is_gso(skb)) {
3268 			if (!skb_queue_is_first(tqp, skb)) {
3269 				/* handle previous packets first */
3270 				break;
3271 			}
3272 			count = 1;
3273 			length = skb->len - TX_OVERHEAD;
3274 			__skb_unlink(skb, tqp);
3275 			spin_unlock_irqrestore(&tqp->lock, flags);
3276 			goto gso_skb;
3277 		}
3278 
3279 		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3280 			break;
3281 		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3282 		pkt_cnt++;
3283 	}
3284 	spin_unlock_irqrestore(&tqp->lock, flags);
3285 
3286 	/* copy to a single skb */
3287 	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3288 	if (!skb)
3289 		goto drop;
3290 
3291 	skb_put(skb, skb_totallen);
3292 
3293 	for (count = pos = 0; count < pkt_cnt; count++) {
3294 		skb2 = skb_dequeue(tqp);
3295 		if (skb2) {
3296 			length += (skb2->len - TX_OVERHEAD);
3297 			memcpy(skb->data + pos, skb2->data, skb2->len);
3298 			pos += roundup(skb2->len, sizeof(u32));
3299 			dev_kfree_skb(skb2);
3300 		}
3301 	}
3302 
3303 gso_skb:
3304 	urb = usb_alloc_urb(0, GFP_ATOMIC);
3305 	if (!urb)
3306 		goto drop;
3307 
3308 	entry = (struct skb_data *)skb->cb;
3309 	entry->urb = urb;
3310 	entry->dev = dev;
3311 	entry->length = length;
3312 	entry->num_of_packet = count;
3313 
3314 	spin_lock_irqsave(&dev->txq.lock, flags);
3315 	ret = usb_autopm_get_interface_async(dev->intf);
3316 	if (ret < 0) {
3317 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3318 		goto drop;
3319 	}
3320 
3321 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3322 			  skb->data, skb->len, tx_complete, skb);
3323 
3324 	if (length % dev->maxpacket == 0) {
3325 		/* send USB_ZERO_PACKET */
3326 		urb->transfer_flags |= URB_ZERO_PACKET;
3327 	}
3328 
3329 #ifdef CONFIG_PM
3330 	/* if this triggers the device is still a sleep */
3331 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3332 		/* transmission will be done in resume */
3333 		usb_anchor_urb(urb, &dev->deferred);
3334 		/* no use to process more packets */
3335 		netif_stop_queue(dev->net);
3336 		usb_put_urb(urb);
3337 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3338 		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3339 		return;
3340 	}
3341 #endif
3342 
3343 	ret = usb_submit_urb(urb, GFP_ATOMIC);
3344 	switch (ret) {
3345 	case 0:
3346 		netif_trans_update(dev->net);
3347 		lan78xx_queue_skb(&dev->txq, skb, tx_start);
3348 		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3349 			netif_stop_queue(dev->net);
3350 		break;
3351 	case -EPIPE:
3352 		netif_stop_queue(dev->net);
3353 		lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3354 		usb_autopm_put_interface_async(dev->intf);
3355 		break;
3356 	default:
3357 		usb_autopm_put_interface_async(dev->intf);
3358 		netif_dbg(dev, tx_err, dev->net,
3359 			  "tx: submit urb err %d\n", ret);
3360 		break;
3361 	}
3362 
3363 	spin_unlock_irqrestore(&dev->txq.lock, flags);
3364 
3365 	if (ret) {
3366 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3367 drop:
3368 		dev->net->stats.tx_dropped++;
3369 		if (skb)
3370 			dev_kfree_skb_any(skb);
3371 		usb_free_urb(urb);
3372 	} else
3373 		netif_dbg(dev, tx_queued, dev->net,
3374 			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
3375 }
3376 
lan78xx_rx_bh(struct lan78xx_net * dev)3377 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3378 {
3379 	struct urb *urb;
3380 	int i;
3381 
3382 	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3383 		for (i = 0; i < 10; i++) {
3384 			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3385 				break;
3386 			urb = usb_alloc_urb(0, GFP_ATOMIC);
3387 			if (urb)
3388 				if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3389 					return;
3390 		}
3391 
3392 		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3393 			tasklet_schedule(&dev->bh);
3394 	}
3395 	if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3396 		netif_wake_queue(dev->net);
3397 }
3398 
lan78xx_bh(unsigned long param)3399 static void lan78xx_bh(unsigned long param)
3400 {
3401 	struct lan78xx_net *dev = (struct lan78xx_net *)param;
3402 	struct sk_buff *skb;
3403 	struct skb_data *entry;
3404 
3405 	while ((skb = skb_dequeue(&dev->done))) {
3406 		entry = (struct skb_data *)(skb->cb);
3407 		switch (entry->state) {
3408 		case rx_done:
3409 			entry->state = rx_cleanup;
3410 			rx_process(dev, skb);
3411 			continue;
3412 		case tx_done:
3413 			usb_free_urb(entry->urb);
3414 			dev_kfree_skb(skb);
3415 			continue;
3416 		case rx_cleanup:
3417 			usb_free_urb(entry->urb);
3418 			dev_kfree_skb(skb);
3419 			continue;
3420 		default:
3421 			netdev_dbg(dev->net, "skb state %d\n", entry->state);
3422 			return;
3423 		}
3424 	}
3425 
3426 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
3427 		/* reset update timer delta */
3428 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3429 			dev->delta = 1;
3430 			mod_timer(&dev->stat_monitor,
3431 				  jiffies + STAT_UPDATE_TIMER);
3432 		}
3433 
3434 		if (!skb_queue_empty(&dev->txq_pend))
3435 			lan78xx_tx_bh(dev);
3436 
3437 		if (!timer_pending(&dev->delay) &&
3438 		    !test_bit(EVENT_RX_HALT, &dev->flags))
3439 			lan78xx_rx_bh(dev);
3440 	}
3441 }
3442 
lan78xx_delayedwork(struct work_struct * work)3443 static void lan78xx_delayedwork(struct work_struct *work)
3444 {
3445 	int status;
3446 	struct lan78xx_net *dev;
3447 
3448 	dev = container_of(work, struct lan78xx_net, wq.work);
3449 
3450 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3451 		unlink_urbs(dev, &dev->txq);
3452 		status = usb_autopm_get_interface(dev->intf);
3453 		if (status < 0)
3454 			goto fail_pipe;
3455 		status = usb_clear_halt(dev->udev, dev->pipe_out);
3456 		usb_autopm_put_interface(dev->intf);
3457 		if (status < 0 &&
3458 		    status != -EPIPE &&
3459 		    status != -ESHUTDOWN) {
3460 			if (netif_msg_tx_err(dev))
3461 fail_pipe:
3462 				netdev_err(dev->net,
3463 					   "can't clear tx halt, status %d\n",
3464 					   status);
3465 		} else {
3466 			clear_bit(EVENT_TX_HALT, &dev->flags);
3467 			if (status != -ESHUTDOWN)
3468 				netif_wake_queue(dev->net);
3469 		}
3470 	}
3471 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3472 		unlink_urbs(dev, &dev->rxq);
3473 		status = usb_autopm_get_interface(dev->intf);
3474 		if (status < 0)
3475 				goto fail_halt;
3476 		status = usb_clear_halt(dev->udev, dev->pipe_in);
3477 		usb_autopm_put_interface(dev->intf);
3478 		if (status < 0 &&
3479 		    status != -EPIPE &&
3480 		    status != -ESHUTDOWN) {
3481 			if (netif_msg_rx_err(dev))
3482 fail_halt:
3483 				netdev_err(dev->net,
3484 					   "can't clear rx halt, status %d\n",
3485 					   status);
3486 		} else {
3487 			clear_bit(EVENT_RX_HALT, &dev->flags);
3488 			tasklet_schedule(&dev->bh);
3489 		}
3490 	}
3491 
3492 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3493 		int ret = 0;
3494 
3495 		clear_bit(EVENT_LINK_RESET, &dev->flags);
3496 		status = usb_autopm_get_interface(dev->intf);
3497 		if (status < 0)
3498 			goto skip_reset;
3499 		if (lan78xx_link_reset(dev) < 0) {
3500 			usb_autopm_put_interface(dev->intf);
3501 skip_reset:
3502 			netdev_info(dev->net, "link reset failed (%d)\n",
3503 				    ret);
3504 		} else {
3505 			usb_autopm_put_interface(dev->intf);
3506 		}
3507 	}
3508 
3509 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3510 		lan78xx_update_stats(dev);
3511 
3512 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3513 
3514 		mod_timer(&dev->stat_monitor,
3515 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
3516 
3517 		dev->delta = min((dev->delta * 2), 50);
3518 	}
3519 }
3520 
intr_complete(struct urb * urb)3521 static void intr_complete(struct urb *urb)
3522 {
3523 	struct lan78xx_net *dev = urb->context;
3524 	int status = urb->status;
3525 
3526 	switch (status) {
3527 	/* success */
3528 	case 0:
3529 		lan78xx_status(dev, urb);
3530 		break;
3531 
3532 	/* software-driven interface shutdown */
3533 	case -ENOENT:			/* urb killed */
3534 	case -ESHUTDOWN:		/* hardware gone */
3535 		netif_dbg(dev, ifdown, dev->net,
3536 			  "intr shutdown, code %d\n", status);
3537 		return;
3538 
3539 	/* NOTE:  not throttling like RX/TX, since this endpoint
3540 	 * already polls infrequently
3541 	 */
3542 	default:
3543 		netdev_dbg(dev->net, "intr status %d\n", status);
3544 		break;
3545 	}
3546 
3547 	if (!netif_running(dev->net))
3548 		return;
3549 
3550 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3551 	status = usb_submit_urb(urb, GFP_ATOMIC);
3552 	if (status != 0)
3553 		netif_err(dev, timer, dev->net,
3554 			  "intr resubmit --> %d\n", status);
3555 }
3556 
lan78xx_disconnect(struct usb_interface * intf)3557 static void lan78xx_disconnect(struct usb_interface *intf)
3558 {
3559 	struct lan78xx_net *dev;
3560 	struct usb_device *udev;
3561 	struct net_device *net;
3562 	struct phy_device *phydev;
3563 
3564 	dev = usb_get_intfdata(intf);
3565 	usb_set_intfdata(intf, NULL);
3566 	if (!dev)
3567 		return;
3568 
3569 	udev = interface_to_usbdev(intf);
3570 	net = dev->net;
3571 	phydev = net->phydev;
3572 
3573 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3574 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3575 
3576 	phy_disconnect(net->phydev);
3577 
3578 	if (phy_is_pseudo_fixed_link(phydev))
3579 		fixed_phy_unregister(phydev);
3580 
3581 	unregister_netdev(net);
3582 
3583 	cancel_delayed_work_sync(&dev->wq);
3584 
3585 	usb_scuttle_anchored_urbs(&dev->deferred);
3586 
3587 	lan78xx_unbind(dev, intf);
3588 
3589 	usb_kill_urb(dev->urb_intr);
3590 	usb_free_urb(dev->urb_intr);
3591 
3592 	free_netdev(net);
3593 	usb_put_dev(udev);
3594 }
3595 
lan78xx_tx_timeout(struct net_device * net,unsigned int txqueue)3596 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
3597 {
3598 	struct lan78xx_net *dev = netdev_priv(net);
3599 
3600 	unlink_urbs(dev, &dev->txq);
3601 	tasklet_schedule(&dev->bh);
3602 }
3603 
lan78xx_features_check(struct sk_buff * skb,struct net_device * netdev,netdev_features_t features)3604 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3605 						struct net_device *netdev,
3606 						netdev_features_t features)
3607 {
3608 	if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3609 		features &= ~NETIF_F_GSO_MASK;
3610 
3611 	features = vlan_features_check(skb, features);
3612 	features = vxlan_features_check(skb, features);
3613 
3614 	return features;
3615 }
3616 
3617 static const struct net_device_ops lan78xx_netdev_ops = {
3618 	.ndo_open		= lan78xx_open,
3619 	.ndo_stop		= lan78xx_stop,
3620 	.ndo_start_xmit		= lan78xx_start_xmit,
3621 	.ndo_tx_timeout		= lan78xx_tx_timeout,
3622 	.ndo_change_mtu		= lan78xx_change_mtu,
3623 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
3624 	.ndo_validate_addr	= eth_validate_addr,
3625 	.ndo_do_ioctl		= phy_do_ioctl_running,
3626 	.ndo_set_rx_mode	= lan78xx_set_multicast,
3627 	.ndo_set_features	= lan78xx_set_features,
3628 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
3629 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
3630 	.ndo_features_check	= lan78xx_features_check,
3631 };
3632 
lan78xx_stat_monitor(struct timer_list * t)3633 static void lan78xx_stat_monitor(struct timer_list *t)
3634 {
3635 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3636 
3637 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3638 }
3639 
lan78xx_probe(struct usb_interface * intf,const struct usb_device_id * id)3640 static int lan78xx_probe(struct usb_interface *intf,
3641 			 const struct usb_device_id *id)
3642 {
3643 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
3644 	struct lan78xx_net *dev;
3645 	struct net_device *netdev;
3646 	struct usb_device *udev;
3647 	int ret;
3648 	unsigned maxp;
3649 	unsigned period;
3650 	u8 *buf = NULL;
3651 
3652 	udev = interface_to_usbdev(intf);
3653 	udev = usb_get_dev(udev);
3654 
3655 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3656 	if (!netdev) {
3657 		dev_err(&intf->dev, "Error: OOM\n");
3658 		ret = -ENOMEM;
3659 		goto out1;
3660 	}
3661 
3662 	/* netdev_printk() needs this */
3663 	SET_NETDEV_DEV(netdev, &intf->dev);
3664 
3665 	dev = netdev_priv(netdev);
3666 	dev->udev = udev;
3667 	dev->intf = intf;
3668 	dev->net = netdev;
3669 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3670 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
3671 
3672 	skb_queue_head_init(&dev->rxq);
3673 	skb_queue_head_init(&dev->txq);
3674 	skb_queue_head_init(&dev->done);
3675 	skb_queue_head_init(&dev->rxq_pause);
3676 	skb_queue_head_init(&dev->txq_pend);
3677 	mutex_init(&dev->phy_mutex);
3678 
3679 	tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3680 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3681 	init_usb_anchor(&dev->deferred);
3682 
3683 	netdev->netdev_ops = &lan78xx_netdev_ops;
3684 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3685 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
3686 
3687 	dev->delta = 1;
3688 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3689 
3690 	mutex_init(&dev->stats.access_lock);
3691 
3692 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
3693 		ret = -ENODEV;
3694 		goto out2;
3695 	}
3696 
3697 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3698 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
3699 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
3700 		ret = -ENODEV;
3701 		goto out2;
3702 	}
3703 
3704 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3705 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
3706 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
3707 		ret = -ENODEV;
3708 		goto out2;
3709 	}
3710 
3711 	ep_intr = &intf->cur_altsetting->endpoint[2];
3712 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
3713 		ret = -ENODEV;
3714 		goto out2;
3715 	}
3716 
3717 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
3718 					usb_endpoint_num(&ep_intr->desc));
3719 
3720 	ret = lan78xx_bind(dev, intf);
3721 	if (ret < 0)
3722 		goto out2;
3723 
3724 	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3725 		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3726 
3727 	/* MTU range: 68 - 9000 */
3728 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3729 	netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
3730 
3731 	period = ep_intr->desc.bInterval;
3732 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3733 	buf = kmalloc(maxp, GFP_KERNEL);
3734 	if (buf) {
3735 		dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3736 		if (!dev->urb_intr) {
3737 			ret = -ENOMEM;
3738 			kfree(buf);
3739 			goto out3;
3740 		} else {
3741 			usb_fill_int_urb(dev->urb_intr, dev->udev,
3742 					 dev->pipe_intr, buf, maxp,
3743 					 intr_complete, dev, period);
3744 			dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
3745 		}
3746 	}
3747 
3748 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3749 
3750 	/* Reject broken descriptors. */
3751 	if (dev->maxpacket == 0) {
3752 		ret = -ENODEV;
3753 		goto out4;
3754 	}
3755 
3756 	/* driver requires remote-wakeup capability during autosuspend. */
3757 	intf->needs_remote_wakeup = 1;
3758 
3759 	ret = lan78xx_phy_init(dev);
3760 	if (ret < 0)
3761 		goto out4;
3762 
3763 	ret = register_netdev(netdev);
3764 	if (ret != 0) {
3765 		netif_err(dev, probe, netdev, "couldn't register the device\n");
3766 		goto out5;
3767 	}
3768 
3769 	usb_set_intfdata(intf, dev);
3770 
3771 	ret = device_set_wakeup_enable(&udev->dev, true);
3772 
3773 	 /* Default delay of 2sec has more overhead than advantage.
3774 	  * Set to 10sec as default.
3775 	  */
3776 	pm_runtime_set_autosuspend_delay(&udev->dev,
3777 					 DEFAULT_AUTOSUSPEND_DELAY);
3778 
3779 	return 0;
3780 
3781 out5:
3782 	phy_disconnect(netdev->phydev);
3783 out4:
3784 	usb_free_urb(dev->urb_intr);
3785 out3:
3786 	lan78xx_unbind(dev, intf);
3787 out2:
3788 	free_netdev(netdev);
3789 out1:
3790 	usb_put_dev(udev);
3791 
3792 	return ret;
3793 }
3794 
lan78xx_wakeframe_crc16(const u8 * buf,int len)3795 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3796 {
3797 	const u16 crc16poly = 0x8005;
3798 	int i;
3799 	u16 bit, crc, msb;
3800 	u8 data;
3801 
3802 	crc = 0xFFFF;
3803 	for (i = 0; i < len; i++) {
3804 		data = *buf++;
3805 		for (bit = 0; bit < 8; bit++) {
3806 			msb = crc >> 15;
3807 			crc <<= 1;
3808 
3809 			if (msb ^ (u16)(data & 1)) {
3810 				crc ^= crc16poly;
3811 				crc |= (u16)0x0001U;
3812 			}
3813 			data >>= 1;
3814 		}
3815 	}
3816 
3817 	return crc;
3818 }
3819 
lan78xx_set_suspend(struct lan78xx_net * dev,u32 wol)3820 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3821 {
3822 	u32 buf;
3823 	int ret;
3824 	int mask_index;
3825 	u16 crc;
3826 	u32 temp_wucsr;
3827 	u32 temp_pmt_ctl;
3828 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3829 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3830 	const u8 arp_type[2] = { 0x08, 0x06 };
3831 
3832 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3833 	buf &= ~MAC_TX_TXEN_;
3834 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
3835 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3836 	buf &= ~MAC_RX_RXEN_;
3837 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3838 
3839 	ret = lan78xx_write_reg(dev, WUCSR, 0);
3840 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
3841 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3842 
3843 	temp_wucsr = 0;
3844 
3845 	temp_pmt_ctl = 0;
3846 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3847 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3848 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3849 
3850 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3851 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3852 
3853 	mask_index = 0;
3854 	if (wol & WAKE_PHY) {
3855 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3856 
3857 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3858 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3859 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3860 	}
3861 	if (wol & WAKE_MAGIC) {
3862 		temp_wucsr |= WUCSR_MPEN_;
3863 
3864 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3865 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3866 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3867 	}
3868 	if (wol & WAKE_BCAST) {
3869 		temp_wucsr |= WUCSR_BCST_EN_;
3870 
3871 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3872 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3873 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3874 	}
3875 	if (wol & WAKE_MCAST) {
3876 		temp_wucsr |= WUCSR_WAKE_EN_;
3877 
3878 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3879 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3880 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3881 					WUF_CFGX_EN_ |
3882 					WUF_CFGX_TYPE_MCAST_ |
3883 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3884 					(crc & WUF_CFGX_CRC16_MASK_));
3885 
3886 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3887 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3888 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3889 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3890 		mask_index++;
3891 
3892 		/* for IPv6 Multicast */
3893 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3894 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3895 					WUF_CFGX_EN_ |
3896 					WUF_CFGX_TYPE_MCAST_ |
3897 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3898 					(crc & WUF_CFGX_CRC16_MASK_));
3899 
3900 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3901 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3902 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3903 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3904 		mask_index++;
3905 
3906 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3907 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3908 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3909 	}
3910 	if (wol & WAKE_UCAST) {
3911 		temp_wucsr |= WUCSR_PFDA_EN_;
3912 
3913 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3914 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3915 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3916 	}
3917 	if (wol & WAKE_ARP) {
3918 		temp_wucsr |= WUCSR_WAKE_EN_;
3919 
3920 		/* set WUF_CFG & WUF_MASK
3921 		 * for packettype (offset 12,13) = ARP (0x0806)
3922 		 */
3923 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
3924 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3925 					WUF_CFGX_EN_ |
3926 					WUF_CFGX_TYPE_ALL_ |
3927 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3928 					(crc & WUF_CFGX_CRC16_MASK_));
3929 
3930 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3931 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3932 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3933 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3934 		mask_index++;
3935 
3936 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3937 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3938 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3939 	}
3940 
3941 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3942 
3943 	/* when multiple WOL bits are set */
3944 	if (hweight_long((unsigned long)wol) > 1) {
3945 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3946 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3947 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3948 	}
3949 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3950 
3951 	/* clear WUPS */
3952 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3953 	buf |= PMT_CTL_WUPS_MASK_;
3954 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3955 
3956 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3957 	buf |= MAC_RX_RXEN_;
3958 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3959 
3960 	return 0;
3961 }
3962 
lan78xx_suspend(struct usb_interface * intf,pm_message_t message)3963 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3964 {
3965 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3966 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3967 	u32 buf;
3968 	int ret;
3969 
3970 	if (!dev->suspend_count++) {
3971 		spin_lock_irq(&dev->txq.lock);
3972 		/* don't autosuspend while transmitting */
3973 		if ((skb_queue_len(&dev->txq) ||
3974 		     skb_queue_len(&dev->txq_pend)) &&
3975 			PMSG_IS_AUTO(message)) {
3976 			spin_unlock_irq(&dev->txq.lock);
3977 			ret = -EBUSY;
3978 			goto out;
3979 		} else {
3980 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3981 			spin_unlock_irq(&dev->txq.lock);
3982 		}
3983 
3984 		/* stop TX & RX */
3985 		ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3986 		buf &= ~MAC_TX_TXEN_;
3987 		ret = lan78xx_write_reg(dev, MAC_TX, buf);
3988 		ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3989 		buf &= ~MAC_RX_RXEN_;
3990 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
3991 
3992 		/* empty out the rx and queues */
3993 		netif_device_detach(dev->net);
3994 		lan78xx_terminate_urbs(dev);
3995 		usb_kill_urb(dev->urb_intr);
3996 
3997 		/* reattach */
3998 		netif_device_attach(dev->net);
3999 	}
4000 
4001 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4002 		del_timer(&dev->stat_monitor);
4003 
4004 		if (PMSG_IS_AUTO(message)) {
4005 			/* auto suspend (selective suspend) */
4006 			ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4007 			buf &= ~MAC_TX_TXEN_;
4008 			ret = lan78xx_write_reg(dev, MAC_TX, buf);
4009 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4010 			buf &= ~MAC_RX_RXEN_;
4011 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
4012 
4013 			ret = lan78xx_write_reg(dev, WUCSR, 0);
4014 			ret = lan78xx_write_reg(dev, WUCSR2, 0);
4015 			ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4016 
4017 			/* set goodframe wakeup */
4018 			ret = lan78xx_read_reg(dev, WUCSR, &buf);
4019 
4020 			buf |= WUCSR_RFE_WAKE_EN_;
4021 			buf |= WUCSR_STORE_WAKE_;
4022 
4023 			ret = lan78xx_write_reg(dev, WUCSR, buf);
4024 
4025 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4026 
4027 			buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4028 			buf |= PMT_CTL_RES_CLR_WKP_STS_;
4029 
4030 			buf |= PMT_CTL_PHY_WAKE_EN_;
4031 			buf |= PMT_CTL_WOL_EN_;
4032 			buf &= ~PMT_CTL_SUS_MODE_MASK_;
4033 			buf |= PMT_CTL_SUS_MODE_3_;
4034 
4035 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4036 
4037 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4038 
4039 			buf |= PMT_CTL_WUPS_MASK_;
4040 
4041 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4042 
4043 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4044 			buf |= MAC_RX_RXEN_;
4045 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
4046 		} else {
4047 			lan78xx_set_suspend(dev, pdata->wol);
4048 		}
4049 	}
4050 
4051 	ret = 0;
4052 out:
4053 	return ret;
4054 }
4055 
lan78xx_resume(struct usb_interface * intf)4056 static int lan78xx_resume(struct usb_interface *intf)
4057 {
4058 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4059 	struct sk_buff *skb;
4060 	struct urb *res;
4061 	int ret;
4062 	u32 buf;
4063 
4064 	if (!timer_pending(&dev->stat_monitor)) {
4065 		dev->delta = 1;
4066 		mod_timer(&dev->stat_monitor,
4067 			  jiffies + STAT_UPDATE_TIMER);
4068 	}
4069 
4070 	if (!--dev->suspend_count) {
4071 		/* resume interrupt URBs */
4072 		if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4073 				usb_submit_urb(dev->urb_intr, GFP_NOIO);
4074 
4075 		spin_lock_irq(&dev->txq.lock);
4076 		while ((res = usb_get_from_anchor(&dev->deferred))) {
4077 			skb = (struct sk_buff *)res->context;
4078 			ret = usb_submit_urb(res, GFP_ATOMIC);
4079 			if (ret < 0) {
4080 				dev_kfree_skb_any(skb);
4081 				usb_free_urb(res);
4082 				usb_autopm_put_interface_async(dev->intf);
4083 			} else {
4084 				netif_trans_update(dev->net);
4085 				lan78xx_queue_skb(&dev->txq, skb, tx_start);
4086 			}
4087 		}
4088 
4089 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4090 		spin_unlock_irq(&dev->txq.lock);
4091 
4092 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4093 			if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4094 				netif_start_queue(dev->net);
4095 			tasklet_schedule(&dev->bh);
4096 		}
4097 	}
4098 
4099 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4100 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4101 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4102 
4103 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4104 					     WUCSR2_ARP_RCD_ |
4105 					     WUCSR2_IPV6_TCPSYN_RCD_ |
4106 					     WUCSR2_IPV4_TCPSYN_RCD_);
4107 
4108 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4109 					    WUCSR_EEE_RX_WAKE_ |
4110 					    WUCSR_PFDA_FR_ |
4111 					    WUCSR_RFE_WAKE_FR_ |
4112 					    WUCSR_WUFR_ |
4113 					    WUCSR_MPR_ |
4114 					    WUCSR_BCST_FR_);
4115 
4116 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4117 	buf |= MAC_TX_TXEN_;
4118 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
4119 
4120 	return 0;
4121 }
4122 
lan78xx_reset_resume(struct usb_interface * intf)4123 static int lan78xx_reset_resume(struct usb_interface *intf)
4124 {
4125 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4126 
4127 	lan78xx_reset(dev);
4128 
4129 	phy_start(dev->net->phydev);
4130 
4131 	return lan78xx_resume(intf);
4132 }
4133 
4134 static const struct usb_device_id products[] = {
4135 	{
4136 	/* LAN7800 USB Gigabit Ethernet Device */
4137 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4138 	},
4139 	{
4140 	/* LAN7850 USB Gigabit Ethernet Device */
4141 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4142 	},
4143 	{
4144 	/* LAN7801 USB Gigabit Ethernet Device */
4145 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4146 	},
4147 	{
4148 	/* ATM2-AF USB Gigabit Ethernet Device */
4149 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
4150 	},
4151 	{},
4152 };
4153 MODULE_DEVICE_TABLE(usb, products);
4154 
4155 static struct usb_driver lan78xx_driver = {
4156 	.name			= DRIVER_NAME,
4157 	.id_table		= products,
4158 	.probe			= lan78xx_probe,
4159 	.disconnect		= lan78xx_disconnect,
4160 	.suspend		= lan78xx_suspend,
4161 	.resume			= lan78xx_resume,
4162 	.reset_resume		= lan78xx_reset_resume,
4163 	.supports_autosuspend	= 1,
4164 	.disable_hub_initiated_lpm = 1,
4165 };
4166 
4167 module_usb_driver(lan78xx_driver);
4168 
4169 MODULE_AUTHOR(DRIVER_AUTHOR);
4170 MODULE_DESCRIPTION(DRIVER_DESC);
4171 MODULE_LICENSE("GPL");
4172