• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <linux/phy.h>
33 #include <net/ip6_checksum.h>
34 #include <net/vxlan.h>
35 #include <linux/interrupt.h>
36 #include <linux/irqdomain.h>
37 #include <linux/irq.h>
38 #include <linux/irqchip/chained_irq.h>
39 #include <linux/microchipphy.h>
40 #include <linux/phy.h>
41 #include <linux/of_net.h>
42 #include "lan78xx.h"
43 
44 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
45 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
46 #define DRIVER_NAME	"lan78xx"
47 #define DRIVER_VERSION	"1.0.6"
48 
49 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
50 #define THROTTLE_JIFFIES		(HZ / 8)
51 #define UNLINK_TIMEOUT_MS		3
52 
53 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
54 
55 #define SS_USB_PKT_SIZE			(1024)
56 #define HS_USB_PKT_SIZE			(512)
57 #define FS_USB_PKT_SIZE			(64)
58 
59 #define MAX_RX_FIFO_SIZE		(12 * 1024)
60 #define MAX_TX_FIFO_SIZE		(12 * 1024)
61 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
62 #define DEFAULT_BULK_IN_DELAY		(0x0800)
63 #define MAX_SINGLE_PACKET_SIZE		(9000)
64 #define DEFAULT_TX_CSUM_ENABLE		(true)
65 #define DEFAULT_RX_CSUM_ENABLE		(true)
66 #define DEFAULT_TSO_CSUM_ENABLE		(true)
67 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
68 #define TX_OVERHEAD			(8)
69 #define RXW_PADDING			2
70 
71 #define LAN78XX_USB_VENDOR_ID		(0x0424)
72 #define LAN7800_USB_PRODUCT_ID		(0x7800)
73 #define LAN7850_USB_PRODUCT_ID		(0x7850)
74 #define LAN7801_USB_PRODUCT_ID		(0x7801)
75 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
76 #define LAN78XX_OTP_MAGIC		(0x78F3)
77 
78 #define	MII_READ			1
79 #define	MII_WRITE			0
80 
81 #define EEPROM_INDICATOR		(0xA5)
82 #define EEPROM_MAC_OFFSET		(0x01)
83 #define MAX_EEPROM_SIZE			512
84 #define OTP_INDICATOR_1			(0xF3)
85 #define OTP_INDICATOR_2			(0xF7)
86 
87 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
88 					 WAKE_MCAST | WAKE_BCAST | \
89 					 WAKE_ARP | WAKE_MAGIC)
90 
91 /* USB related defines */
92 #define BULK_IN_PIPE			1
93 #define BULK_OUT_PIPE			2
94 
95 /* default autosuspend delay (mSec)*/
96 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
97 
98 /* statistic update interval (mSec) */
99 #define STAT_UPDATE_TIMER		(1 * 1000)
100 
101 /* defines interrupts from interrupt EP */
102 #define MAX_INT_EP			(32)
103 #define INT_EP_INTEP			(31)
104 #define INT_EP_OTP_WR_DONE		(28)
105 #define INT_EP_EEE_TX_LPI_START		(26)
106 #define INT_EP_EEE_TX_LPI_STOP		(25)
107 #define INT_EP_EEE_RX_LPI		(24)
108 #define INT_EP_MAC_RESET_TIMEOUT	(23)
109 #define INT_EP_RDFO			(22)
110 #define INT_EP_TXE			(21)
111 #define INT_EP_USB_STATUS		(20)
112 #define INT_EP_TX_DIS			(19)
113 #define INT_EP_RX_DIS			(18)
114 #define INT_EP_PHY			(17)
115 #define INT_EP_DP			(16)
116 #define INT_EP_MAC_ERR			(15)
117 #define INT_EP_TDFU			(14)
118 #define INT_EP_TDFO			(13)
119 #define INT_EP_UTX			(12)
120 #define INT_EP_GPIO_11			(11)
121 #define INT_EP_GPIO_10			(10)
122 #define INT_EP_GPIO_9			(9)
123 #define INT_EP_GPIO_8			(8)
124 #define INT_EP_GPIO_7			(7)
125 #define INT_EP_GPIO_6			(6)
126 #define INT_EP_GPIO_5			(5)
127 #define INT_EP_GPIO_4			(4)
128 #define INT_EP_GPIO_3			(3)
129 #define INT_EP_GPIO_2			(2)
130 #define INT_EP_GPIO_1			(1)
131 #define INT_EP_GPIO_0			(0)
132 
133 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
134 	"RX FCS Errors",
135 	"RX Alignment Errors",
136 	"Rx Fragment Errors",
137 	"RX Jabber Errors",
138 	"RX Undersize Frame Errors",
139 	"RX Oversize Frame Errors",
140 	"RX Dropped Frames",
141 	"RX Unicast Byte Count",
142 	"RX Broadcast Byte Count",
143 	"RX Multicast Byte Count",
144 	"RX Unicast Frames",
145 	"RX Broadcast Frames",
146 	"RX Multicast Frames",
147 	"RX Pause Frames",
148 	"RX 64 Byte Frames",
149 	"RX 65 - 127 Byte Frames",
150 	"RX 128 - 255 Byte Frames",
151 	"RX 256 - 511 Bytes Frames",
152 	"RX 512 - 1023 Byte Frames",
153 	"RX 1024 - 1518 Byte Frames",
154 	"RX Greater 1518 Byte Frames",
155 	"EEE RX LPI Transitions",
156 	"EEE RX LPI Time",
157 	"TX FCS Errors",
158 	"TX Excess Deferral Errors",
159 	"TX Carrier Errors",
160 	"TX Bad Byte Count",
161 	"TX Single Collisions",
162 	"TX Multiple Collisions",
163 	"TX Excessive Collision",
164 	"TX Late Collisions",
165 	"TX Unicast Byte Count",
166 	"TX Broadcast Byte Count",
167 	"TX Multicast Byte Count",
168 	"TX Unicast Frames",
169 	"TX Broadcast Frames",
170 	"TX Multicast Frames",
171 	"TX Pause Frames",
172 	"TX 64 Byte Frames",
173 	"TX 65 - 127 Byte Frames",
174 	"TX 128 - 255 Byte Frames",
175 	"TX 256 - 511 Bytes Frames",
176 	"TX 512 - 1023 Byte Frames",
177 	"TX 1024 - 1518 Byte Frames",
178 	"TX Greater 1518 Byte Frames",
179 	"EEE TX LPI Transitions",
180 	"EEE TX LPI Time",
181 };
182 
183 struct lan78xx_statstage {
184 	u32 rx_fcs_errors;
185 	u32 rx_alignment_errors;
186 	u32 rx_fragment_errors;
187 	u32 rx_jabber_errors;
188 	u32 rx_undersize_frame_errors;
189 	u32 rx_oversize_frame_errors;
190 	u32 rx_dropped_frames;
191 	u32 rx_unicast_byte_count;
192 	u32 rx_broadcast_byte_count;
193 	u32 rx_multicast_byte_count;
194 	u32 rx_unicast_frames;
195 	u32 rx_broadcast_frames;
196 	u32 rx_multicast_frames;
197 	u32 rx_pause_frames;
198 	u32 rx_64_byte_frames;
199 	u32 rx_65_127_byte_frames;
200 	u32 rx_128_255_byte_frames;
201 	u32 rx_256_511_bytes_frames;
202 	u32 rx_512_1023_byte_frames;
203 	u32 rx_1024_1518_byte_frames;
204 	u32 rx_greater_1518_byte_frames;
205 	u32 eee_rx_lpi_transitions;
206 	u32 eee_rx_lpi_time;
207 	u32 tx_fcs_errors;
208 	u32 tx_excess_deferral_errors;
209 	u32 tx_carrier_errors;
210 	u32 tx_bad_byte_count;
211 	u32 tx_single_collisions;
212 	u32 tx_multiple_collisions;
213 	u32 tx_excessive_collision;
214 	u32 tx_late_collisions;
215 	u32 tx_unicast_byte_count;
216 	u32 tx_broadcast_byte_count;
217 	u32 tx_multicast_byte_count;
218 	u32 tx_unicast_frames;
219 	u32 tx_broadcast_frames;
220 	u32 tx_multicast_frames;
221 	u32 tx_pause_frames;
222 	u32 tx_64_byte_frames;
223 	u32 tx_65_127_byte_frames;
224 	u32 tx_128_255_byte_frames;
225 	u32 tx_256_511_bytes_frames;
226 	u32 tx_512_1023_byte_frames;
227 	u32 tx_1024_1518_byte_frames;
228 	u32 tx_greater_1518_byte_frames;
229 	u32 eee_tx_lpi_transitions;
230 	u32 eee_tx_lpi_time;
231 };
232 
233 struct lan78xx_statstage64 {
234 	u64 rx_fcs_errors;
235 	u64 rx_alignment_errors;
236 	u64 rx_fragment_errors;
237 	u64 rx_jabber_errors;
238 	u64 rx_undersize_frame_errors;
239 	u64 rx_oversize_frame_errors;
240 	u64 rx_dropped_frames;
241 	u64 rx_unicast_byte_count;
242 	u64 rx_broadcast_byte_count;
243 	u64 rx_multicast_byte_count;
244 	u64 rx_unicast_frames;
245 	u64 rx_broadcast_frames;
246 	u64 rx_multicast_frames;
247 	u64 rx_pause_frames;
248 	u64 rx_64_byte_frames;
249 	u64 rx_65_127_byte_frames;
250 	u64 rx_128_255_byte_frames;
251 	u64 rx_256_511_bytes_frames;
252 	u64 rx_512_1023_byte_frames;
253 	u64 rx_1024_1518_byte_frames;
254 	u64 rx_greater_1518_byte_frames;
255 	u64 eee_rx_lpi_transitions;
256 	u64 eee_rx_lpi_time;
257 	u64 tx_fcs_errors;
258 	u64 tx_excess_deferral_errors;
259 	u64 tx_carrier_errors;
260 	u64 tx_bad_byte_count;
261 	u64 tx_single_collisions;
262 	u64 tx_multiple_collisions;
263 	u64 tx_excessive_collision;
264 	u64 tx_late_collisions;
265 	u64 tx_unicast_byte_count;
266 	u64 tx_broadcast_byte_count;
267 	u64 tx_multicast_byte_count;
268 	u64 tx_unicast_frames;
269 	u64 tx_broadcast_frames;
270 	u64 tx_multicast_frames;
271 	u64 tx_pause_frames;
272 	u64 tx_64_byte_frames;
273 	u64 tx_65_127_byte_frames;
274 	u64 tx_128_255_byte_frames;
275 	u64 tx_256_511_bytes_frames;
276 	u64 tx_512_1023_byte_frames;
277 	u64 tx_1024_1518_byte_frames;
278 	u64 tx_greater_1518_byte_frames;
279 	u64 eee_tx_lpi_transitions;
280 	u64 eee_tx_lpi_time;
281 };
282 
283 struct lan78xx_net;
284 
285 struct lan78xx_priv {
286 	struct lan78xx_net *dev;
287 	u32 rfe_ctl;
288 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
289 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
290 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
291 	struct mutex dataport_mutex; /* for dataport access */
292 	spinlock_t rfe_ctl_lock; /* for rfe register access */
293 	struct work_struct set_multicast;
294 	struct work_struct set_vlan;
295 	u32 wol;
296 };
297 
298 enum skb_state {
299 	illegal = 0,
300 	tx_start,
301 	tx_done,
302 	rx_start,
303 	rx_done,
304 	rx_cleanup,
305 	unlink_start
306 };
307 
308 struct skb_data {		/* skb->cb is one of these */
309 	struct urb *urb;
310 	struct lan78xx_net *dev;
311 	enum skb_state state;
312 	size_t length;
313 	int num_of_packet;
314 };
315 
316 struct usb_context {
317 	struct usb_ctrlrequest req;
318 	struct lan78xx_net *dev;
319 };
320 
321 #define EVENT_TX_HALT			0
322 #define EVENT_RX_HALT			1
323 #define EVENT_RX_MEMORY			2
324 #define EVENT_STS_SPLIT			3
325 #define EVENT_LINK_RESET		4
326 #define EVENT_RX_PAUSED			5
327 #define EVENT_DEV_WAKING		6
328 #define EVENT_DEV_ASLEEP		7
329 #define EVENT_DEV_OPEN			8
330 #define EVENT_STAT_UPDATE		9
331 
332 struct statstage {
333 	struct mutex			access_lock;	/* for stats access */
334 	struct lan78xx_statstage	saved;
335 	struct lan78xx_statstage	rollover_count;
336 	struct lan78xx_statstage	rollover_max;
337 	struct lan78xx_statstage64	curr_stat;
338 };
339 
340 struct irq_domain_data {
341 	struct irq_domain	*irqdomain;
342 	unsigned int		phyirq;
343 	struct irq_chip		*irqchip;
344 	irq_flow_handler_t	irq_handler;
345 	u32			irqenable;
346 	struct mutex		irq_lock;		/* for irq bus access */
347 };
348 
349 struct lan78xx_net {
350 	struct net_device	*net;
351 	struct usb_device	*udev;
352 	struct usb_interface	*intf;
353 	void			*driver_priv;
354 
355 	int			rx_qlen;
356 	int			tx_qlen;
357 	struct sk_buff_head	rxq;
358 	struct sk_buff_head	txq;
359 	struct sk_buff_head	done;
360 	struct sk_buff_head	rxq_pause;
361 	struct sk_buff_head	txq_pend;
362 
363 	struct tasklet_struct	bh;
364 	struct delayed_work	wq;
365 
366 	struct usb_host_endpoint *ep_blkin;
367 	struct usb_host_endpoint *ep_blkout;
368 	struct usb_host_endpoint *ep_intr;
369 
370 	int			msg_enable;
371 
372 	struct urb		*urb_intr;
373 	struct usb_anchor	deferred;
374 
375 	struct mutex		phy_mutex; /* for phy access */
376 	unsigned		pipe_in, pipe_out, pipe_intr;
377 
378 	u32			hard_mtu;	/* count any extra framing */
379 	size_t			rx_urb_size;	/* size for rx urbs */
380 
381 	unsigned long		flags;
382 
383 	wait_queue_head_t	*wait;
384 	unsigned char		suspend_count;
385 
386 	unsigned		maxpacket;
387 	struct timer_list	delay;
388 	struct timer_list	stat_monitor;
389 
390 	unsigned long		data[5];
391 
392 	int			link_on;
393 	u8			mdix_ctrl;
394 
395 	u32			chipid;
396 	u32			chiprev;
397 	struct mii_bus		*mdiobus;
398 	phy_interface_t		interface;
399 
400 	int			fc_autoneg;
401 	u8			fc_request_control;
402 
403 	int			delta;
404 	struct statstage	stats;
405 
406 	struct irq_domain_data	domain_data;
407 };
408 
409 /* define external phy id */
410 #define	PHY_LAN8835			(0x0007C130)
411 #define	PHY_KSZ9031RNX			(0x00221620)
412 
413 /* use ethtool to change the level for any given device */
414 static int msg_level = -1;
415 module_param(msg_level, int, 0);
416 MODULE_PARM_DESC(msg_level, "Override default message level");
417 
lan78xx_read_reg(struct lan78xx_net * dev,u32 index,u32 * data)418 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
419 {
420 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
421 	int ret;
422 
423 	if (!buf)
424 		return -ENOMEM;
425 
426 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
427 			      USB_VENDOR_REQUEST_READ_REGISTER,
428 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
429 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
430 	if (likely(ret >= 0)) {
431 		le32_to_cpus(buf);
432 		*data = *buf;
433 	} else {
434 		netdev_warn(dev->net,
435 			    "Failed to read register index 0x%08x. ret = %d",
436 			    index, ret);
437 	}
438 
439 	kfree(buf);
440 
441 	return ret;
442 }
443 
lan78xx_write_reg(struct lan78xx_net * dev,u32 index,u32 data)444 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
445 {
446 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
447 	int ret;
448 
449 	if (!buf)
450 		return -ENOMEM;
451 
452 	*buf = data;
453 	cpu_to_le32s(buf);
454 
455 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
456 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
457 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
458 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
459 	if (unlikely(ret < 0)) {
460 		netdev_warn(dev->net,
461 			    "Failed to write register index 0x%08x. ret = %d",
462 			    index, ret);
463 	}
464 
465 	kfree(buf);
466 
467 	return ret;
468 }
469 
lan78xx_read_stats(struct lan78xx_net * dev,struct lan78xx_statstage * data)470 static int lan78xx_read_stats(struct lan78xx_net *dev,
471 			      struct lan78xx_statstage *data)
472 {
473 	int ret = 0;
474 	int i;
475 	struct lan78xx_statstage *stats;
476 	u32 *src;
477 	u32 *dst;
478 
479 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
480 	if (!stats)
481 		return -ENOMEM;
482 
483 	ret = usb_control_msg(dev->udev,
484 			      usb_rcvctrlpipe(dev->udev, 0),
485 			      USB_VENDOR_REQUEST_GET_STATS,
486 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
487 			      0,
488 			      0,
489 			      (void *)stats,
490 			      sizeof(*stats),
491 			      USB_CTRL_SET_TIMEOUT);
492 	if (likely(ret >= 0)) {
493 		src = (u32 *)stats;
494 		dst = (u32 *)data;
495 		for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
496 			le32_to_cpus(&src[i]);
497 			dst[i] = src[i];
498 		}
499 	} else {
500 		netdev_warn(dev->net,
501 			    "Failed to read stat ret = %d", ret);
502 	}
503 
504 	kfree(stats);
505 
506 	return ret;
507 }
508 
509 #define check_counter_rollover(struct1, dev_stats, member) {	\
510 	if (struct1->member < dev_stats.saved.member)		\
511 		dev_stats.rollover_count.member++;		\
512 	}
513 
lan78xx_check_stat_rollover(struct lan78xx_net * dev,struct lan78xx_statstage * stats)514 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
515 					struct lan78xx_statstage *stats)
516 {
517 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
518 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
519 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
520 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
521 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
522 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
523 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
524 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
525 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
526 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
527 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
528 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
529 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
530 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
531 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
532 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
533 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
534 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
535 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
536 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
537 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
538 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
539 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
540 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
541 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
542 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
543 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
544 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
545 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
546 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
547 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
548 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
549 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
550 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
551 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
552 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
553 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
554 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
555 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
556 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
557 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
558 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
559 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
560 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
561 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
562 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
563 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
564 
565 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
566 }
567 
lan78xx_update_stats(struct lan78xx_net * dev)568 static void lan78xx_update_stats(struct lan78xx_net *dev)
569 {
570 	u32 *p, *count, *max;
571 	u64 *data;
572 	int i;
573 	struct lan78xx_statstage lan78xx_stats;
574 
575 	if (usb_autopm_get_interface(dev->intf) < 0)
576 		return;
577 
578 	p = (u32 *)&lan78xx_stats;
579 	count = (u32 *)&dev->stats.rollover_count;
580 	max = (u32 *)&dev->stats.rollover_max;
581 	data = (u64 *)&dev->stats.curr_stat;
582 
583 	mutex_lock(&dev->stats.access_lock);
584 
585 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
586 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
587 
588 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
589 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
590 
591 	mutex_unlock(&dev->stats.access_lock);
592 
593 	usb_autopm_put_interface(dev->intf);
594 }
595 
596 /* Loop until the read is completed with timeout called with phy_mutex held */
lan78xx_phy_wait_not_busy(struct lan78xx_net * dev)597 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
598 {
599 	unsigned long start_time = jiffies;
600 	u32 val;
601 	int ret;
602 
603 	do {
604 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
605 		if (unlikely(ret < 0))
606 			return -EIO;
607 
608 		if (!(val & MII_ACC_MII_BUSY_))
609 			return 0;
610 	} while (!time_after(jiffies, start_time + HZ));
611 
612 	return -EIO;
613 }
614 
mii_access(int id,int index,int read)615 static inline u32 mii_access(int id, int index, int read)
616 {
617 	u32 ret;
618 
619 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
620 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
621 	if (read)
622 		ret |= MII_ACC_MII_READ_;
623 	else
624 		ret |= MII_ACC_MII_WRITE_;
625 	ret |= MII_ACC_MII_BUSY_;
626 
627 	return ret;
628 }
629 
lan78xx_wait_eeprom(struct lan78xx_net * dev)630 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
631 {
632 	unsigned long start_time = jiffies;
633 	u32 val;
634 	int ret;
635 
636 	do {
637 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
638 		if (unlikely(ret < 0))
639 			return -EIO;
640 
641 		if (!(val & E2P_CMD_EPC_BUSY_) ||
642 		    (val & E2P_CMD_EPC_TIMEOUT_))
643 			break;
644 		usleep_range(40, 100);
645 	} while (!time_after(jiffies, start_time + HZ));
646 
647 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
648 		netdev_warn(dev->net, "EEPROM read operation timeout");
649 		return -EIO;
650 	}
651 
652 	return 0;
653 }
654 
lan78xx_eeprom_confirm_not_busy(struct lan78xx_net * dev)655 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
656 {
657 	unsigned long start_time = jiffies;
658 	u32 val;
659 	int ret;
660 
661 	do {
662 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
663 		if (unlikely(ret < 0))
664 			return -EIO;
665 
666 		if (!(val & E2P_CMD_EPC_BUSY_))
667 			return 0;
668 
669 		usleep_range(40, 100);
670 	} while (!time_after(jiffies, start_time + HZ));
671 
672 	netdev_warn(dev->net, "EEPROM is busy");
673 	return -EIO;
674 }
675 
lan78xx_read_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)676 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
677 				   u32 length, u8 *data)
678 {
679 	u32 val;
680 	u32 saved;
681 	int i, ret;
682 	int retval;
683 
684 	/* depends on chip, some EEPROM pins are muxed with LED function.
685 	 * disable & restore LED function to access EEPROM.
686 	 */
687 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
688 	saved = val;
689 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
690 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
691 		ret = lan78xx_write_reg(dev, HW_CFG, val);
692 	}
693 
694 	retval = lan78xx_eeprom_confirm_not_busy(dev);
695 	if (retval)
696 		return retval;
697 
698 	for (i = 0; i < length; i++) {
699 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
700 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
701 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
702 		if (unlikely(ret < 0)) {
703 			retval = -EIO;
704 			goto exit;
705 		}
706 
707 		retval = lan78xx_wait_eeprom(dev);
708 		if (retval < 0)
709 			goto exit;
710 
711 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
712 		if (unlikely(ret < 0)) {
713 			retval = -EIO;
714 			goto exit;
715 		}
716 
717 		data[i] = val & 0xFF;
718 		offset++;
719 	}
720 
721 	retval = 0;
722 exit:
723 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
724 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
725 
726 	return retval;
727 }
728 
lan78xx_read_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)729 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
730 			       u32 length, u8 *data)
731 {
732 	u8 sig;
733 	int ret;
734 
735 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
736 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
737 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
738 	else
739 		ret = -EINVAL;
740 
741 	return ret;
742 }
743 
lan78xx_write_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)744 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
745 				    u32 length, u8 *data)
746 {
747 	u32 val;
748 	u32 saved;
749 	int i, ret;
750 	int retval;
751 
752 	/* depends on chip, some EEPROM pins are muxed with LED function.
753 	 * disable & restore LED function to access EEPROM.
754 	 */
755 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
756 	saved = val;
757 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
758 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
759 		ret = lan78xx_write_reg(dev, HW_CFG, val);
760 	}
761 
762 	retval = lan78xx_eeprom_confirm_not_busy(dev);
763 	if (retval)
764 		goto exit;
765 
766 	/* Issue write/erase enable command */
767 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
768 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
769 	if (unlikely(ret < 0)) {
770 		retval = -EIO;
771 		goto exit;
772 	}
773 
774 	retval = lan78xx_wait_eeprom(dev);
775 	if (retval < 0)
776 		goto exit;
777 
778 	for (i = 0; i < length; i++) {
779 		/* Fill data register */
780 		val = data[i];
781 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
782 		if (ret < 0) {
783 			retval = -EIO;
784 			goto exit;
785 		}
786 
787 		/* Send "write" command */
788 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
789 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
790 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
791 		if (ret < 0) {
792 			retval = -EIO;
793 			goto exit;
794 		}
795 
796 		retval = lan78xx_wait_eeprom(dev);
797 		if (retval < 0)
798 			goto exit;
799 
800 		offset++;
801 	}
802 
803 	retval = 0;
804 exit:
805 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
806 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
807 
808 	return retval;
809 }
810 
lan78xx_read_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)811 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
812 				u32 length, u8 *data)
813 {
814 	int i;
815 	int ret;
816 	u32 buf;
817 	unsigned long timeout;
818 
819 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
820 
821 	if (buf & OTP_PWR_DN_PWRDN_N_) {
822 		/* clear it and wait to be cleared */
823 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
824 
825 		timeout = jiffies + HZ;
826 		do {
827 			usleep_range(1, 10);
828 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
829 			if (time_after(jiffies, timeout)) {
830 				netdev_warn(dev->net,
831 					    "timeout on OTP_PWR_DN");
832 				return -EIO;
833 			}
834 		} while (buf & OTP_PWR_DN_PWRDN_N_);
835 	}
836 
837 	for (i = 0; i < length; i++) {
838 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
839 					((offset + i) >> 8) & OTP_ADDR1_15_11);
840 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
841 					((offset + i) & OTP_ADDR2_10_3));
842 
843 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
844 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
845 
846 		timeout = jiffies + HZ;
847 		do {
848 			udelay(1);
849 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
850 			if (time_after(jiffies, timeout)) {
851 				netdev_warn(dev->net,
852 					    "timeout on OTP_STATUS");
853 				return -EIO;
854 			}
855 		} while (buf & OTP_STATUS_BUSY_);
856 
857 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
858 
859 		data[i] = (u8)(buf & 0xFF);
860 	}
861 
862 	return 0;
863 }
864 
lan78xx_write_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)865 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
866 				 u32 length, u8 *data)
867 {
868 	int i;
869 	int ret;
870 	u32 buf;
871 	unsigned long timeout;
872 
873 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
874 
875 	if (buf & OTP_PWR_DN_PWRDN_N_) {
876 		/* clear it and wait to be cleared */
877 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
878 
879 		timeout = jiffies + HZ;
880 		do {
881 			udelay(1);
882 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
883 			if (time_after(jiffies, timeout)) {
884 				netdev_warn(dev->net,
885 					    "timeout on OTP_PWR_DN completion");
886 				return -EIO;
887 			}
888 		} while (buf & OTP_PWR_DN_PWRDN_N_);
889 	}
890 
891 	/* set to BYTE program mode */
892 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
893 
894 	for (i = 0; i < length; i++) {
895 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
896 					((offset + i) >> 8) & OTP_ADDR1_15_11);
897 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
898 					((offset + i) & OTP_ADDR2_10_3));
899 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
900 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
901 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
902 
903 		timeout = jiffies + HZ;
904 		do {
905 			udelay(1);
906 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
907 			if (time_after(jiffies, timeout)) {
908 				netdev_warn(dev->net,
909 					    "Timeout on OTP_STATUS completion");
910 				return -EIO;
911 			}
912 		} while (buf & OTP_STATUS_BUSY_);
913 	}
914 
915 	return 0;
916 }
917 
lan78xx_read_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)918 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
919 			    u32 length, u8 *data)
920 {
921 	u8 sig;
922 	int ret;
923 
924 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
925 
926 	if (ret == 0) {
927 		if (sig == OTP_INDICATOR_1)
928 			offset = offset;
929 		else if (sig == OTP_INDICATOR_2)
930 			offset += 0x100;
931 		else
932 			ret = -EINVAL;
933 		if (!ret)
934 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
935 	}
936 
937 	return ret;
938 }
939 
lan78xx_dataport_wait_not_busy(struct lan78xx_net * dev)940 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
941 {
942 	int i, ret;
943 
944 	for (i = 0; i < 100; i++) {
945 		u32 dp_sel;
946 
947 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
948 		if (unlikely(ret < 0))
949 			return -EIO;
950 
951 		if (dp_sel & DP_SEL_DPRDY_)
952 			return 0;
953 
954 		usleep_range(40, 100);
955 	}
956 
957 	netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
958 
959 	return -EIO;
960 }
961 
lan78xx_dataport_write(struct lan78xx_net * dev,u32 ram_select,u32 addr,u32 length,u32 * buf)962 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
963 				  u32 addr, u32 length, u32 *buf)
964 {
965 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
966 	u32 dp_sel;
967 	int i, ret;
968 
969 	if (usb_autopm_get_interface(dev->intf) < 0)
970 			return 0;
971 
972 	mutex_lock(&pdata->dataport_mutex);
973 
974 	ret = lan78xx_dataport_wait_not_busy(dev);
975 	if (ret < 0)
976 		goto done;
977 
978 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
979 
980 	dp_sel &= ~DP_SEL_RSEL_MASK_;
981 	dp_sel |= ram_select;
982 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
983 
984 	for (i = 0; i < length; i++) {
985 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
986 
987 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
988 
989 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
990 
991 		ret = lan78xx_dataport_wait_not_busy(dev);
992 		if (ret < 0)
993 			goto done;
994 	}
995 
996 done:
997 	mutex_unlock(&pdata->dataport_mutex);
998 	usb_autopm_put_interface(dev->intf);
999 
1000 	return ret;
1001 }
1002 
lan78xx_set_addr_filter(struct lan78xx_priv * pdata,int index,u8 addr[ETH_ALEN])1003 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1004 				    int index, u8 addr[ETH_ALEN])
1005 {
1006 	u32	temp;
1007 
1008 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1009 		temp = addr[3];
1010 		temp = addr[2] | (temp << 8);
1011 		temp = addr[1] | (temp << 8);
1012 		temp = addr[0] | (temp << 8);
1013 		pdata->pfilter_table[index][1] = temp;
1014 		temp = addr[5];
1015 		temp = addr[4] | (temp << 8);
1016 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1017 		pdata->pfilter_table[index][0] = temp;
1018 	}
1019 }
1020 
1021 /* returns hash bit number for given MAC address */
lan78xx_hash(char addr[ETH_ALEN])1022 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1023 {
1024 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1025 }
1026 
lan78xx_deferred_multicast_write(struct work_struct * param)1027 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1028 {
1029 	struct lan78xx_priv *pdata =
1030 			container_of(param, struct lan78xx_priv, set_multicast);
1031 	struct lan78xx_net *dev = pdata->dev;
1032 	int i;
1033 	int ret;
1034 
1035 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1036 		  pdata->rfe_ctl);
1037 
1038 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1039 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1040 
1041 	for (i = 1; i < NUM_OF_MAF; i++) {
1042 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1043 		ret = lan78xx_write_reg(dev, MAF_LO(i),
1044 					pdata->pfilter_table[i][1]);
1045 		ret = lan78xx_write_reg(dev, MAF_HI(i),
1046 					pdata->pfilter_table[i][0]);
1047 	}
1048 
1049 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1050 }
1051 
lan78xx_set_multicast(struct net_device * netdev)1052 static void lan78xx_set_multicast(struct net_device *netdev)
1053 {
1054 	struct lan78xx_net *dev = netdev_priv(netdev);
1055 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1056 	unsigned long flags;
1057 	int i;
1058 
1059 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1060 
1061 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1062 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1063 
1064 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1065 			pdata->mchash_table[i] = 0;
1066 	/* pfilter_table[0] has own HW address */
1067 	for (i = 1; i < NUM_OF_MAF; i++) {
1068 			pdata->pfilter_table[i][0] =
1069 			pdata->pfilter_table[i][1] = 0;
1070 	}
1071 
1072 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1073 
1074 	if (dev->net->flags & IFF_PROMISC) {
1075 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1076 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1077 	} else {
1078 		if (dev->net->flags & IFF_ALLMULTI) {
1079 			netif_dbg(dev, drv, dev->net,
1080 				  "receive all multicast enabled");
1081 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1082 		}
1083 	}
1084 
1085 	if (netdev_mc_count(dev->net)) {
1086 		struct netdev_hw_addr *ha;
1087 		int i;
1088 
1089 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1090 
1091 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1092 
1093 		i = 1;
1094 		netdev_for_each_mc_addr(ha, netdev) {
1095 			/* set first 32 into Perfect Filter */
1096 			if (i < 33) {
1097 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1098 			} else {
1099 				u32 bitnum = lan78xx_hash(ha->addr);
1100 
1101 				pdata->mchash_table[bitnum / 32] |=
1102 							(1 << (bitnum % 32));
1103 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1104 			}
1105 			i++;
1106 		}
1107 	}
1108 
1109 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1110 
1111 	/* defer register writes to a sleepable context */
1112 	schedule_work(&pdata->set_multicast);
1113 }
1114 
lan78xx_update_flowcontrol(struct lan78xx_net * dev,u8 duplex,u16 lcladv,u16 rmtadv)1115 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1116 				      u16 lcladv, u16 rmtadv)
1117 {
1118 	u32 flow = 0, fct_flow = 0;
1119 	int ret;
1120 	u8 cap;
1121 
1122 	if (dev->fc_autoneg)
1123 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1124 	else
1125 		cap = dev->fc_request_control;
1126 
1127 	if (cap & FLOW_CTRL_TX)
1128 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1129 
1130 	if (cap & FLOW_CTRL_RX)
1131 		flow |= FLOW_CR_RX_FCEN_;
1132 
1133 	if (dev->udev->speed == USB_SPEED_SUPER)
1134 		fct_flow = 0x817;
1135 	else if (dev->udev->speed == USB_SPEED_HIGH)
1136 		fct_flow = 0x211;
1137 
1138 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1139 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1140 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1141 
1142 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1143 
1144 	/* threshold value should be set before enabling flow */
1145 	ret = lan78xx_write_reg(dev, FLOW, flow);
1146 
1147 	return 0;
1148 }
1149 
lan78xx_link_reset(struct lan78xx_net * dev)1150 static int lan78xx_link_reset(struct lan78xx_net *dev)
1151 {
1152 	struct phy_device *phydev = dev->net->phydev;
1153 	struct ethtool_link_ksettings ecmd;
1154 	int ladv, radv, ret;
1155 	u32 buf;
1156 
1157 	/* clear LAN78xx interrupt status */
1158 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1159 	if (unlikely(ret < 0))
1160 		return -EIO;
1161 
1162 	phy_read_status(phydev);
1163 
1164 	if (!phydev->link && dev->link_on) {
1165 		dev->link_on = false;
1166 
1167 		/* reset MAC */
1168 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1169 		if (unlikely(ret < 0))
1170 			return -EIO;
1171 		buf |= MAC_CR_RST_;
1172 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1173 		if (unlikely(ret < 0))
1174 			return -EIO;
1175 
1176 		del_timer(&dev->stat_monitor);
1177 	} else if (phydev->link && !dev->link_on) {
1178 		dev->link_on = true;
1179 
1180 		phy_ethtool_ksettings_get(phydev, &ecmd);
1181 
1182 		if (dev->udev->speed == USB_SPEED_SUPER) {
1183 			if (ecmd.base.speed == 1000) {
1184 				/* disable U2 */
1185 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1186 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1187 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1188 				/* enable U1 */
1189 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1190 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1191 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1192 			} else {
1193 				/* enable U1 & U2 */
1194 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1195 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1196 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1197 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1198 			}
1199 		}
1200 
1201 		ladv = phy_read(phydev, MII_ADVERTISE);
1202 		if (ladv < 0)
1203 			return ladv;
1204 
1205 		radv = phy_read(phydev, MII_LPA);
1206 		if (radv < 0)
1207 			return radv;
1208 
1209 		netif_dbg(dev, link, dev->net,
1210 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1211 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1212 
1213 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1214 						 radv);
1215 
1216 		if (!timer_pending(&dev->stat_monitor)) {
1217 			dev->delta = 1;
1218 			mod_timer(&dev->stat_monitor,
1219 				  jiffies + STAT_UPDATE_TIMER);
1220 		}
1221 
1222 		tasklet_schedule(&dev->bh);
1223 	}
1224 
1225 	return ret;
1226 }
1227 
1228 /* some work can't be done in tasklets, so we use keventd
1229  *
1230  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1231  * but tasklet_schedule() doesn't.	hope the failure is rare.
1232  */
lan78xx_defer_kevent(struct lan78xx_net * dev,int work)1233 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1234 {
1235 	set_bit(work, &dev->flags);
1236 	if (!schedule_delayed_work(&dev->wq, 0))
1237 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1238 }
1239 
lan78xx_status(struct lan78xx_net * dev,struct urb * urb)1240 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1241 {
1242 	u32 intdata;
1243 
1244 	if (urb->actual_length != 4) {
1245 		netdev_warn(dev->net,
1246 			    "unexpected urb length %d", urb->actual_length);
1247 		return;
1248 	}
1249 
1250 	memcpy(&intdata, urb->transfer_buffer, 4);
1251 	le32_to_cpus(&intdata);
1252 
1253 	if (intdata & INT_ENP_PHY_INT) {
1254 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1255 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1256 
1257 		if (dev->domain_data.phyirq > 0)
1258 			generic_handle_irq(dev->domain_data.phyirq);
1259 	} else
1260 		netdev_warn(dev->net,
1261 			    "unexpected interrupt: 0x%08x\n", intdata);
1262 }
1263 
lan78xx_ethtool_get_eeprom_len(struct net_device * netdev)1264 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1265 {
1266 	return MAX_EEPROM_SIZE;
1267 }
1268 
lan78xx_ethtool_get_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1269 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1270 				      struct ethtool_eeprom *ee, u8 *data)
1271 {
1272 	struct lan78xx_net *dev = netdev_priv(netdev);
1273 	int ret;
1274 
1275 	ret = usb_autopm_get_interface(dev->intf);
1276 	if (ret)
1277 		return ret;
1278 
1279 	ee->magic = LAN78XX_EEPROM_MAGIC;
1280 
1281 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1282 
1283 	usb_autopm_put_interface(dev->intf);
1284 
1285 	return ret;
1286 }
1287 
lan78xx_ethtool_set_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1288 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1289 				      struct ethtool_eeprom *ee, u8 *data)
1290 {
1291 	struct lan78xx_net *dev = netdev_priv(netdev);
1292 	int ret;
1293 
1294 	ret = usb_autopm_get_interface(dev->intf);
1295 	if (ret)
1296 		return ret;
1297 
1298 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1299 	 * to load data from EEPROM
1300 	 */
1301 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1302 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1303 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1304 		 (ee->offset == 0) &&
1305 		 (ee->len == 512) &&
1306 		 (data[0] == OTP_INDICATOR_1))
1307 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1308 
1309 	usb_autopm_put_interface(dev->intf);
1310 
1311 	return ret;
1312 }
1313 
lan78xx_get_strings(struct net_device * netdev,u32 stringset,u8 * data)1314 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1315 				u8 *data)
1316 {
1317 	if (stringset == ETH_SS_STATS)
1318 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1319 }
1320 
lan78xx_get_sset_count(struct net_device * netdev,int sset)1321 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1322 {
1323 	if (sset == ETH_SS_STATS)
1324 		return ARRAY_SIZE(lan78xx_gstrings);
1325 	else
1326 		return -EOPNOTSUPP;
1327 }
1328 
lan78xx_get_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)1329 static void lan78xx_get_stats(struct net_device *netdev,
1330 			      struct ethtool_stats *stats, u64 *data)
1331 {
1332 	struct lan78xx_net *dev = netdev_priv(netdev);
1333 
1334 	lan78xx_update_stats(dev);
1335 
1336 	mutex_lock(&dev->stats.access_lock);
1337 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1338 	mutex_unlock(&dev->stats.access_lock);
1339 }
1340 
lan78xx_get_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1341 static void lan78xx_get_wol(struct net_device *netdev,
1342 			    struct ethtool_wolinfo *wol)
1343 {
1344 	struct lan78xx_net *dev = netdev_priv(netdev);
1345 	int ret;
1346 	u32 buf;
1347 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1348 
1349 	if (usb_autopm_get_interface(dev->intf) < 0)
1350 			return;
1351 
1352 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1353 	if (unlikely(ret < 0)) {
1354 		wol->supported = 0;
1355 		wol->wolopts = 0;
1356 	} else {
1357 		if (buf & USB_CFG_RMT_WKP_) {
1358 			wol->supported = WAKE_ALL;
1359 			wol->wolopts = pdata->wol;
1360 		} else {
1361 			wol->supported = 0;
1362 			wol->wolopts = 0;
1363 		}
1364 	}
1365 
1366 	usb_autopm_put_interface(dev->intf);
1367 }
1368 
lan78xx_set_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1369 static int lan78xx_set_wol(struct net_device *netdev,
1370 			   struct ethtool_wolinfo *wol)
1371 {
1372 	struct lan78xx_net *dev = netdev_priv(netdev);
1373 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1374 	int ret;
1375 
1376 	ret = usb_autopm_get_interface(dev->intf);
1377 	if (ret < 0)
1378 		return ret;
1379 
1380 	if (wol->wolopts & ~WAKE_ALL)
1381 		return -EINVAL;
1382 
1383 	pdata->wol = wol->wolopts;
1384 
1385 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1386 
1387 	phy_ethtool_set_wol(netdev->phydev, wol);
1388 
1389 	usb_autopm_put_interface(dev->intf);
1390 
1391 	return ret;
1392 }
1393 
lan78xx_get_eee(struct net_device * net,struct ethtool_eee * edata)1394 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1395 {
1396 	struct lan78xx_net *dev = netdev_priv(net);
1397 	struct phy_device *phydev = net->phydev;
1398 	int ret;
1399 	u32 buf;
1400 
1401 	ret = usb_autopm_get_interface(dev->intf);
1402 	if (ret < 0)
1403 		return ret;
1404 
1405 	ret = phy_ethtool_get_eee(phydev, edata);
1406 	if (ret < 0)
1407 		goto exit;
1408 
1409 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1410 	if (buf & MAC_CR_EEE_EN_) {
1411 		edata->eee_enabled = true;
1412 		edata->eee_active = !!(edata->advertised &
1413 				       edata->lp_advertised);
1414 		edata->tx_lpi_enabled = true;
1415 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1416 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1417 		edata->tx_lpi_timer = buf;
1418 	} else {
1419 		edata->eee_enabled = false;
1420 		edata->eee_active = false;
1421 		edata->tx_lpi_enabled = false;
1422 		edata->tx_lpi_timer = 0;
1423 	}
1424 
1425 	ret = 0;
1426 exit:
1427 	usb_autopm_put_interface(dev->intf);
1428 
1429 	return ret;
1430 }
1431 
lan78xx_set_eee(struct net_device * net,struct ethtool_eee * edata)1432 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1433 {
1434 	struct lan78xx_net *dev = netdev_priv(net);
1435 	int ret;
1436 	u32 buf;
1437 
1438 	ret = usb_autopm_get_interface(dev->intf);
1439 	if (ret < 0)
1440 		return ret;
1441 
1442 	if (edata->eee_enabled) {
1443 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1444 		buf |= MAC_CR_EEE_EN_;
1445 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1446 
1447 		phy_ethtool_set_eee(net->phydev, edata);
1448 
1449 		buf = (u32)edata->tx_lpi_timer;
1450 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1451 	} else {
1452 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1453 		buf &= ~MAC_CR_EEE_EN_;
1454 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1455 	}
1456 
1457 	usb_autopm_put_interface(dev->intf);
1458 
1459 	return 0;
1460 }
1461 
lan78xx_get_link(struct net_device * net)1462 static u32 lan78xx_get_link(struct net_device *net)
1463 {
1464 	phy_read_status(net->phydev);
1465 
1466 	return net->phydev->link;
1467 }
1468 
lan78xx_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * info)1469 static void lan78xx_get_drvinfo(struct net_device *net,
1470 				struct ethtool_drvinfo *info)
1471 {
1472 	struct lan78xx_net *dev = netdev_priv(net);
1473 
1474 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1475 	strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1476 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1477 }
1478 
lan78xx_get_msglevel(struct net_device * net)1479 static u32 lan78xx_get_msglevel(struct net_device *net)
1480 {
1481 	struct lan78xx_net *dev = netdev_priv(net);
1482 
1483 	return dev->msg_enable;
1484 }
1485 
lan78xx_set_msglevel(struct net_device * net,u32 level)1486 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1487 {
1488 	struct lan78xx_net *dev = netdev_priv(net);
1489 
1490 	dev->msg_enable = level;
1491 }
1492 
lan78xx_get_link_ksettings(struct net_device * net,struct ethtool_link_ksettings * cmd)1493 static int lan78xx_get_link_ksettings(struct net_device *net,
1494 				      struct ethtool_link_ksettings *cmd)
1495 {
1496 	struct lan78xx_net *dev = netdev_priv(net);
1497 	struct phy_device *phydev = net->phydev;
1498 	int ret;
1499 
1500 	ret = usb_autopm_get_interface(dev->intf);
1501 	if (ret < 0)
1502 		return ret;
1503 
1504 	phy_ethtool_ksettings_get(phydev, cmd);
1505 
1506 	usb_autopm_put_interface(dev->intf);
1507 
1508 	return ret;
1509 }
1510 
lan78xx_set_link_ksettings(struct net_device * net,const struct ethtool_link_ksettings * cmd)1511 static int lan78xx_set_link_ksettings(struct net_device *net,
1512 				      const struct ethtool_link_ksettings *cmd)
1513 {
1514 	struct lan78xx_net *dev = netdev_priv(net);
1515 	struct phy_device *phydev = net->phydev;
1516 	int ret = 0;
1517 	int temp;
1518 
1519 	ret = usb_autopm_get_interface(dev->intf);
1520 	if (ret < 0)
1521 		return ret;
1522 
1523 	/* change speed & duplex */
1524 	ret = phy_ethtool_ksettings_set(phydev, cmd);
1525 
1526 	if (!cmd->base.autoneg) {
1527 		/* force link down */
1528 		temp = phy_read(phydev, MII_BMCR);
1529 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1530 		mdelay(1);
1531 		phy_write(phydev, MII_BMCR, temp);
1532 	}
1533 
1534 	usb_autopm_put_interface(dev->intf);
1535 
1536 	return ret;
1537 }
1538 
lan78xx_get_pause(struct net_device * net,struct ethtool_pauseparam * pause)1539 static void lan78xx_get_pause(struct net_device *net,
1540 			      struct ethtool_pauseparam *pause)
1541 {
1542 	struct lan78xx_net *dev = netdev_priv(net);
1543 	struct phy_device *phydev = net->phydev;
1544 	struct ethtool_link_ksettings ecmd;
1545 
1546 	phy_ethtool_ksettings_get(phydev, &ecmd);
1547 
1548 	pause->autoneg = dev->fc_autoneg;
1549 
1550 	if (dev->fc_request_control & FLOW_CTRL_TX)
1551 		pause->tx_pause = 1;
1552 
1553 	if (dev->fc_request_control & FLOW_CTRL_RX)
1554 		pause->rx_pause = 1;
1555 }
1556 
lan78xx_set_pause(struct net_device * net,struct ethtool_pauseparam * pause)1557 static int lan78xx_set_pause(struct net_device *net,
1558 			     struct ethtool_pauseparam *pause)
1559 {
1560 	struct lan78xx_net *dev = netdev_priv(net);
1561 	struct phy_device *phydev = net->phydev;
1562 	struct ethtool_link_ksettings ecmd;
1563 	int ret;
1564 
1565 	phy_ethtool_ksettings_get(phydev, &ecmd);
1566 
1567 	if (pause->autoneg && !ecmd.base.autoneg) {
1568 		ret = -EINVAL;
1569 		goto exit;
1570 	}
1571 
1572 	dev->fc_request_control = 0;
1573 	if (pause->rx_pause)
1574 		dev->fc_request_control |= FLOW_CTRL_RX;
1575 
1576 	if (pause->tx_pause)
1577 		dev->fc_request_control |= FLOW_CTRL_TX;
1578 
1579 	if (ecmd.base.autoneg) {
1580 		u32 mii_adv;
1581 		u32 advertising;
1582 
1583 		ethtool_convert_link_mode_to_legacy_u32(
1584 			&advertising, ecmd.link_modes.advertising);
1585 
1586 		advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1587 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1588 		advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1589 
1590 		ethtool_convert_legacy_u32_to_link_mode(
1591 			ecmd.link_modes.advertising, advertising);
1592 
1593 		phy_ethtool_ksettings_set(phydev, &ecmd);
1594 	}
1595 
1596 	dev->fc_autoneg = pause->autoneg;
1597 
1598 	ret = 0;
1599 exit:
1600 	return ret;
1601 }
1602 
1603 static const struct ethtool_ops lan78xx_ethtool_ops = {
1604 	.get_link	= lan78xx_get_link,
1605 	.nway_reset	= phy_ethtool_nway_reset,
1606 	.get_drvinfo	= lan78xx_get_drvinfo,
1607 	.get_msglevel	= lan78xx_get_msglevel,
1608 	.set_msglevel	= lan78xx_set_msglevel,
1609 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1610 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1611 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1612 	.get_ethtool_stats = lan78xx_get_stats,
1613 	.get_sset_count = lan78xx_get_sset_count,
1614 	.get_strings	= lan78xx_get_strings,
1615 	.get_wol	= lan78xx_get_wol,
1616 	.set_wol	= lan78xx_set_wol,
1617 	.get_eee	= lan78xx_get_eee,
1618 	.set_eee	= lan78xx_set_eee,
1619 	.get_pauseparam	= lan78xx_get_pause,
1620 	.set_pauseparam	= lan78xx_set_pause,
1621 	.get_link_ksettings = lan78xx_get_link_ksettings,
1622 	.set_link_ksettings = lan78xx_set_link_ksettings,
1623 };
1624 
lan78xx_ioctl(struct net_device * netdev,struct ifreq * rq,int cmd)1625 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1626 {
1627 	if (!netif_running(netdev))
1628 		return -EINVAL;
1629 
1630 	return phy_mii_ioctl(netdev->phydev, rq, cmd);
1631 }
1632 
lan78xx_init_mac_address(struct lan78xx_net * dev)1633 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1634 {
1635 	u32 addr_lo, addr_hi;
1636 	int ret;
1637 	u8 addr[6];
1638 
1639 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1640 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1641 
1642 	addr[0] = addr_lo & 0xFF;
1643 	addr[1] = (addr_lo >> 8) & 0xFF;
1644 	addr[2] = (addr_lo >> 16) & 0xFF;
1645 	addr[3] = (addr_lo >> 24) & 0xFF;
1646 	addr[4] = addr_hi & 0xFF;
1647 	addr[5] = (addr_hi >> 8) & 0xFF;
1648 
1649 	if (!is_valid_ether_addr(addr)) {
1650 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1651 			/* valid address present in Device Tree */
1652 			netif_dbg(dev, ifup, dev->net,
1653 				  "MAC address read from Device Tree");
1654 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1655 						 ETH_ALEN, addr) == 0) ||
1656 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1657 					      ETH_ALEN, addr) == 0)) &&
1658 			   is_valid_ether_addr(addr)) {
1659 			/* eeprom values are valid so use them */
1660 			netif_dbg(dev, ifup, dev->net,
1661 				  "MAC address read from EEPROM");
1662 		} else {
1663 			/* generate random MAC */
1664 			random_ether_addr(addr);
1665 			netif_dbg(dev, ifup, dev->net,
1666 				  "MAC address set to random addr");
1667 		}
1668 
1669 		addr_lo = addr[0] | (addr[1] << 8) |
1670 			  (addr[2] << 16) | (addr[3] << 24);
1671 		addr_hi = addr[4] | (addr[5] << 8);
1672 
1673 		ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1674 		ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1675 	}
1676 
1677 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1678 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1679 
1680 	ether_addr_copy(dev->net->dev_addr, addr);
1681 }
1682 
1683 /* MDIO read and write wrappers for phylib */
lan78xx_mdiobus_read(struct mii_bus * bus,int phy_id,int idx)1684 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1685 {
1686 	struct lan78xx_net *dev = bus->priv;
1687 	u32 val, addr;
1688 	int ret;
1689 
1690 	ret = usb_autopm_get_interface(dev->intf);
1691 	if (ret < 0)
1692 		return ret;
1693 
1694 	mutex_lock(&dev->phy_mutex);
1695 
1696 	/* confirm MII not busy */
1697 	ret = lan78xx_phy_wait_not_busy(dev);
1698 	if (ret < 0)
1699 		goto done;
1700 
1701 	/* set the address, index & direction (read from PHY) */
1702 	addr = mii_access(phy_id, idx, MII_READ);
1703 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1704 
1705 	ret = lan78xx_phy_wait_not_busy(dev);
1706 	if (ret < 0)
1707 		goto done;
1708 
1709 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
1710 
1711 	ret = (int)(val & 0xFFFF);
1712 
1713 done:
1714 	mutex_unlock(&dev->phy_mutex);
1715 	usb_autopm_put_interface(dev->intf);
1716 
1717 	return ret;
1718 }
1719 
lan78xx_mdiobus_write(struct mii_bus * bus,int phy_id,int idx,u16 regval)1720 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1721 				 u16 regval)
1722 {
1723 	struct lan78xx_net *dev = bus->priv;
1724 	u32 val, addr;
1725 	int ret;
1726 
1727 	ret = usb_autopm_get_interface(dev->intf);
1728 	if (ret < 0)
1729 		return ret;
1730 
1731 	mutex_lock(&dev->phy_mutex);
1732 
1733 	/* confirm MII not busy */
1734 	ret = lan78xx_phy_wait_not_busy(dev);
1735 	if (ret < 0)
1736 		goto done;
1737 
1738 	val = (u32)regval;
1739 	ret = lan78xx_write_reg(dev, MII_DATA, val);
1740 
1741 	/* set the address, index & direction (write to PHY) */
1742 	addr = mii_access(phy_id, idx, MII_WRITE);
1743 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1744 
1745 	ret = lan78xx_phy_wait_not_busy(dev);
1746 	if (ret < 0)
1747 		goto done;
1748 
1749 done:
1750 	mutex_unlock(&dev->phy_mutex);
1751 	usb_autopm_put_interface(dev->intf);
1752 	return 0;
1753 }
1754 
lan78xx_mdio_init(struct lan78xx_net * dev)1755 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1756 {
1757 	int ret;
1758 
1759 	dev->mdiobus = mdiobus_alloc();
1760 	if (!dev->mdiobus) {
1761 		netdev_err(dev->net, "can't allocate MDIO bus\n");
1762 		return -ENOMEM;
1763 	}
1764 
1765 	dev->mdiobus->priv = (void *)dev;
1766 	dev->mdiobus->read = lan78xx_mdiobus_read;
1767 	dev->mdiobus->write = lan78xx_mdiobus_write;
1768 	dev->mdiobus->name = "lan78xx-mdiobus";
1769 	dev->mdiobus->parent = &dev->udev->dev;
1770 
1771 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1772 		 dev->udev->bus->busnum, dev->udev->devnum);
1773 
1774 	switch (dev->chipid) {
1775 	case ID_REV_CHIP_ID_7800_:
1776 	case ID_REV_CHIP_ID_7850_:
1777 		/* set to internal PHY id */
1778 		dev->mdiobus->phy_mask = ~(1 << 1);
1779 		break;
1780 	case ID_REV_CHIP_ID_7801_:
1781 		/* scan thru PHYAD[2..0] */
1782 		dev->mdiobus->phy_mask = ~(0xFF);
1783 		break;
1784 	}
1785 
1786 	ret = mdiobus_register(dev->mdiobus);
1787 	if (ret) {
1788 		netdev_err(dev->net, "can't register MDIO bus\n");
1789 		goto exit1;
1790 	}
1791 
1792 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1793 	return 0;
1794 exit1:
1795 	mdiobus_free(dev->mdiobus);
1796 	return ret;
1797 }
1798 
lan78xx_remove_mdio(struct lan78xx_net * dev)1799 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1800 {
1801 	mdiobus_unregister(dev->mdiobus);
1802 	mdiobus_free(dev->mdiobus);
1803 }
1804 
lan78xx_link_status_change(struct net_device * net)1805 static void lan78xx_link_status_change(struct net_device *net)
1806 {
1807 	struct phy_device *phydev = net->phydev;
1808 	int ret, temp;
1809 
1810 	/* At forced 100 F/H mode, chip may fail to set mode correctly
1811 	 * when cable is switched between long(~50+m) and short one.
1812 	 * As workaround, set to 10 before setting to 100
1813 	 * at forced 100 F/H mode.
1814 	 */
1815 	if (!phydev->autoneg && (phydev->speed == 100)) {
1816 		/* disable phy interrupt */
1817 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1818 		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1819 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1820 
1821 		temp = phy_read(phydev, MII_BMCR);
1822 		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1823 		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1824 		temp |= BMCR_SPEED100;
1825 		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1826 
1827 		/* clear pending interrupt generated while workaround */
1828 		temp = phy_read(phydev, LAN88XX_INT_STS);
1829 
1830 		/* enable phy interrupt back */
1831 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1832 		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1833 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1834 	}
1835 }
1836 
irq_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hwirq)1837 static int irq_map(struct irq_domain *d, unsigned int irq,
1838 		   irq_hw_number_t hwirq)
1839 {
1840 	struct irq_domain_data *data = d->host_data;
1841 
1842 	irq_set_chip_data(irq, data);
1843 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1844 	irq_set_noprobe(irq);
1845 
1846 	return 0;
1847 }
1848 
irq_unmap(struct irq_domain * d,unsigned int irq)1849 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1850 {
1851 	irq_set_chip_and_handler(irq, NULL, NULL);
1852 	irq_set_chip_data(irq, NULL);
1853 }
1854 
1855 static const struct irq_domain_ops chip_domain_ops = {
1856 	.map	= irq_map,
1857 	.unmap	= irq_unmap,
1858 };
1859 
lan78xx_irq_mask(struct irq_data * irqd)1860 static void lan78xx_irq_mask(struct irq_data *irqd)
1861 {
1862 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1863 
1864 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1865 }
1866 
lan78xx_irq_unmask(struct irq_data * irqd)1867 static void lan78xx_irq_unmask(struct irq_data *irqd)
1868 {
1869 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1870 
1871 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
1872 }
1873 
lan78xx_irq_bus_lock(struct irq_data * irqd)1874 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1875 {
1876 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1877 
1878 	mutex_lock(&data->irq_lock);
1879 }
1880 
lan78xx_irq_bus_sync_unlock(struct irq_data * irqd)1881 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1882 {
1883 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1884 	struct lan78xx_net *dev =
1885 			container_of(data, struct lan78xx_net, domain_data);
1886 	u32 buf;
1887 	int ret;
1888 
1889 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
1890 	 * are only two callbacks executed in non-atomic contex.
1891 	 */
1892 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1893 	if (buf != data->irqenable)
1894 		ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1895 
1896 	mutex_unlock(&data->irq_lock);
1897 }
1898 
1899 static struct irq_chip lan78xx_irqchip = {
1900 	.name			= "lan78xx-irqs",
1901 	.irq_mask		= lan78xx_irq_mask,
1902 	.irq_unmask		= lan78xx_irq_unmask,
1903 	.irq_bus_lock		= lan78xx_irq_bus_lock,
1904 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
1905 };
1906 
lan78xx_setup_irq_domain(struct lan78xx_net * dev)1907 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1908 {
1909 	struct device_node *of_node;
1910 	struct irq_domain *irqdomain;
1911 	unsigned int irqmap = 0;
1912 	u32 buf;
1913 	int ret = 0;
1914 
1915 	of_node = dev->udev->dev.parent->of_node;
1916 
1917 	mutex_init(&dev->domain_data.irq_lock);
1918 
1919 	lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1920 	dev->domain_data.irqenable = buf;
1921 
1922 	dev->domain_data.irqchip = &lan78xx_irqchip;
1923 	dev->domain_data.irq_handler = handle_simple_irq;
1924 
1925 	irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1926 					  &chip_domain_ops, &dev->domain_data);
1927 	if (irqdomain) {
1928 		/* create mapping for PHY interrupt */
1929 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1930 		if (!irqmap) {
1931 			irq_domain_remove(irqdomain);
1932 
1933 			irqdomain = NULL;
1934 			ret = -EINVAL;
1935 		}
1936 	} else {
1937 		ret = -EINVAL;
1938 	}
1939 
1940 	dev->domain_data.irqdomain = irqdomain;
1941 	dev->domain_data.phyirq = irqmap;
1942 
1943 	return ret;
1944 }
1945 
lan78xx_remove_irq_domain(struct lan78xx_net * dev)1946 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1947 {
1948 	if (dev->domain_data.phyirq > 0) {
1949 		irq_dispose_mapping(dev->domain_data.phyirq);
1950 
1951 		if (dev->domain_data.irqdomain)
1952 			irq_domain_remove(dev->domain_data.irqdomain);
1953 	}
1954 	dev->domain_data.phyirq = 0;
1955 	dev->domain_data.irqdomain = NULL;
1956 }
1957 
lan8835_fixup(struct phy_device * phydev)1958 static int lan8835_fixup(struct phy_device *phydev)
1959 {
1960 	int buf;
1961 	int ret;
1962 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1963 
1964 	/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
1965 	buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
1966 	buf &= ~0x1800;
1967 	buf |= 0x0800;
1968 	phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
1969 
1970 	/* RGMII MAC TXC Delay Enable */
1971 	ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
1972 				MAC_RGMII_ID_TXC_DELAY_EN_);
1973 
1974 	/* RGMII TX DLL Tune Adjust */
1975 	ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
1976 
1977 	dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
1978 
1979 	return 1;
1980 }
1981 
ksz9031rnx_fixup(struct phy_device * phydev)1982 static int ksz9031rnx_fixup(struct phy_device *phydev)
1983 {
1984 	struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1985 
1986 	/* Micrel9301RNX PHY configuration */
1987 	/* RGMII Control Signal Pad Skew */
1988 	phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
1989 	/* RGMII RX Data Pad Skew */
1990 	phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
1991 	/* RGMII RX Clock Pad Skew */
1992 	phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
1993 
1994 	dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
1995 
1996 	return 1;
1997 }
1998 
lan78xx_phy_init(struct lan78xx_net * dev)1999 static int lan78xx_phy_init(struct lan78xx_net *dev)
2000 {
2001 	int ret;
2002 	u32 mii_adv;
2003 	struct phy_device *phydev = dev->net->phydev;
2004 
2005 	phydev = phy_find_first(dev->mdiobus);
2006 	if (!phydev) {
2007 		netdev_err(dev->net, "no PHY found\n");
2008 		return -EIO;
2009 	}
2010 
2011 	if ((dev->chipid == ID_REV_CHIP_ID_7800_) ||
2012 	    (dev->chipid == ID_REV_CHIP_ID_7850_)) {
2013 		phydev->is_internal = true;
2014 		dev->interface = PHY_INTERFACE_MODE_GMII;
2015 
2016 	} else if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2017 		if (!phydev->drv) {
2018 			netdev_err(dev->net, "no PHY driver found\n");
2019 			return -EIO;
2020 		}
2021 
2022 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2023 
2024 		/* external PHY fixup for KSZ9031RNX */
2025 		ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2026 						 ksz9031rnx_fixup);
2027 		if (ret < 0) {
2028 			netdev_err(dev->net, "fail to register fixup\n");
2029 			return ret;
2030 		}
2031 		/* external PHY fixup for LAN8835 */
2032 		ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2033 						 lan8835_fixup);
2034 		if (ret < 0) {
2035 			netdev_err(dev->net, "fail to register fixup\n");
2036 			return ret;
2037 		}
2038 		/* add more external PHY fixup here if needed */
2039 
2040 		phydev->is_internal = false;
2041 	} else {
2042 		netdev_err(dev->net, "unknown ID found\n");
2043 		ret = -EIO;
2044 		goto error;
2045 	}
2046 
2047 	/* if phyirq is not set, use polling mode in phylib */
2048 	if (dev->domain_data.phyirq > 0)
2049 		phydev->irq = dev->domain_data.phyirq;
2050 	else
2051 		phydev->irq = 0;
2052 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2053 
2054 	/* set to AUTOMDIX */
2055 	phydev->mdix = ETH_TP_MDI_AUTO;
2056 
2057 	ret = phy_connect_direct(dev->net, phydev,
2058 				 lan78xx_link_status_change,
2059 				 dev->interface);
2060 	if (ret) {
2061 		netdev_err(dev->net, "can't attach PHY to %s\n",
2062 			   dev->mdiobus->id);
2063 		return -EIO;
2064 	}
2065 
2066 	/* MAC doesn't support 1000T Half */
2067 	phydev->supported &= ~SUPPORTED_1000baseT_Half;
2068 
2069 	/* support both flow controls */
2070 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2071 	phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2072 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2073 	phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2074 
2075 	genphy_config_aneg(phydev);
2076 
2077 	dev->fc_autoneg = phydev->autoneg;
2078 
2079 	return 0;
2080 
2081 error:
2082 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2083 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2084 
2085 	return ret;
2086 }
2087 
lan78xx_set_rx_max_frame_length(struct lan78xx_net * dev,int size)2088 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2089 {
2090 	int ret = 0;
2091 	u32 buf;
2092 	bool rxenabled;
2093 
2094 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2095 
2096 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2097 
2098 	if (rxenabled) {
2099 		buf &= ~MAC_RX_RXEN_;
2100 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2101 	}
2102 
2103 	/* add 4 to size for FCS */
2104 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2105 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2106 
2107 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2108 
2109 	if (rxenabled) {
2110 		buf |= MAC_RX_RXEN_;
2111 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2112 	}
2113 
2114 	return 0;
2115 }
2116 
unlink_urbs(struct lan78xx_net * dev,struct sk_buff_head * q)2117 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2118 {
2119 	struct sk_buff *skb;
2120 	unsigned long flags;
2121 	int count = 0;
2122 
2123 	spin_lock_irqsave(&q->lock, flags);
2124 	while (!skb_queue_empty(q)) {
2125 		struct skb_data	*entry;
2126 		struct urb *urb;
2127 		int ret;
2128 
2129 		skb_queue_walk(q, skb) {
2130 			entry = (struct skb_data *)skb->cb;
2131 			if (entry->state != unlink_start)
2132 				goto found;
2133 		}
2134 		break;
2135 found:
2136 		entry->state = unlink_start;
2137 		urb = entry->urb;
2138 
2139 		/* Get reference count of the URB to avoid it to be
2140 		 * freed during usb_unlink_urb, which may trigger
2141 		 * use-after-free problem inside usb_unlink_urb since
2142 		 * usb_unlink_urb is always racing with .complete
2143 		 * handler(include defer_bh).
2144 		 */
2145 		usb_get_urb(urb);
2146 		spin_unlock_irqrestore(&q->lock, flags);
2147 		/* during some PM-driven resume scenarios,
2148 		 * these (async) unlinks complete immediately
2149 		 */
2150 		ret = usb_unlink_urb(urb);
2151 		if (ret != -EINPROGRESS && ret != 0)
2152 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2153 		else
2154 			count++;
2155 		usb_put_urb(urb);
2156 		spin_lock_irqsave(&q->lock, flags);
2157 	}
2158 	spin_unlock_irqrestore(&q->lock, flags);
2159 	return count;
2160 }
2161 
lan78xx_change_mtu(struct net_device * netdev,int new_mtu)2162 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2163 {
2164 	struct lan78xx_net *dev = netdev_priv(netdev);
2165 	int ll_mtu = new_mtu + netdev->hard_header_len;
2166 	int old_hard_mtu = dev->hard_mtu;
2167 	int old_rx_urb_size = dev->rx_urb_size;
2168 	int ret;
2169 
2170 	/* no second zero-length packet read wanted after mtu-sized packets */
2171 	if ((ll_mtu % dev->maxpacket) == 0)
2172 		return -EDOM;
2173 
2174 	ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2175 
2176 	netdev->mtu = new_mtu;
2177 
2178 	dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2179 	if (dev->rx_urb_size == old_hard_mtu) {
2180 		dev->rx_urb_size = dev->hard_mtu;
2181 		if (dev->rx_urb_size > old_rx_urb_size) {
2182 			if (netif_running(dev->net)) {
2183 				unlink_urbs(dev, &dev->rxq);
2184 				tasklet_schedule(&dev->bh);
2185 			}
2186 		}
2187 	}
2188 
2189 	return 0;
2190 }
2191 
lan78xx_set_mac_addr(struct net_device * netdev,void * p)2192 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2193 {
2194 	struct lan78xx_net *dev = netdev_priv(netdev);
2195 	struct sockaddr *addr = p;
2196 	u32 addr_lo, addr_hi;
2197 	int ret;
2198 
2199 	if (netif_running(netdev))
2200 		return -EBUSY;
2201 
2202 	if (!is_valid_ether_addr(addr->sa_data))
2203 		return -EADDRNOTAVAIL;
2204 
2205 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
2206 
2207 	addr_lo = netdev->dev_addr[0] |
2208 		  netdev->dev_addr[1] << 8 |
2209 		  netdev->dev_addr[2] << 16 |
2210 		  netdev->dev_addr[3] << 24;
2211 	addr_hi = netdev->dev_addr[4] |
2212 		  netdev->dev_addr[5] << 8;
2213 
2214 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2215 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2216 
2217 	/* Added to support MAC address changes */
2218 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2219 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2220 
2221 	return 0;
2222 }
2223 
2224 /* Enable or disable Rx checksum offload engine */
lan78xx_set_features(struct net_device * netdev,netdev_features_t features)2225 static int lan78xx_set_features(struct net_device *netdev,
2226 				netdev_features_t features)
2227 {
2228 	struct lan78xx_net *dev = netdev_priv(netdev);
2229 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2230 	unsigned long flags;
2231 	int ret;
2232 
2233 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2234 
2235 	if (features & NETIF_F_RXCSUM) {
2236 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2237 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2238 	} else {
2239 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2240 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2241 	}
2242 
2243 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2244 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2245 	else
2246 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2247 
2248 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2249 
2250 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2251 
2252 	return 0;
2253 }
2254 
lan78xx_deferred_vlan_write(struct work_struct * param)2255 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2256 {
2257 	struct lan78xx_priv *pdata =
2258 			container_of(param, struct lan78xx_priv, set_vlan);
2259 	struct lan78xx_net *dev = pdata->dev;
2260 
2261 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2262 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2263 }
2264 
lan78xx_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)2265 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2266 				   __be16 proto, u16 vid)
2267 {
2268 	struct lan78xx_net *dev = netdev_priv(netdev);
2269 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2270 	u16 vid_bit_index;
2271 	u16 vid_dword_index;
2272 
2273 	vid_dword_index = (vid >> 5) & 0x7F;
2274 	vid_bit_index = vid & 0x1F;
2275 
2276 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2277 
2278 	/* defer register writes to a sleepable context */
2279 	schedule_work(&pdata->set_vlan);
2280 
2281 	return 0;
2282 }
2283 
lan78xx_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)2284 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2285 				    __be16 proto, u16 vid)
2286 {
2287 	struct lan78xx_net *dev = netdev_priv(netdev);
2288 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2289 	u16 vid_bit_index;
2290 	u16 vid_dword_index;
2291 
2292 	vid_dword_index = (vid >> 5) & 0x7F;
2293 	vid_bit_index = vid & 0x1F;
2294 
2295 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2296 
2297 	/* defer register writes to a sleepable context */
2298 	schedule_work(&pdata->set_vlan);
2299 
2300 	return 0;
2301 }
2302 
lan78xx_init_ltm(struct lan78xx_net * dev)2303 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2304 {
2305 	int ret;
2306 	u32 buf;
2307 	u32 regs[6] = { 0 };
2308 
2309 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2310 	if (buf & USB_CFG1_LTM_ENABLE_) {
2311 		u8 temp[2];
2312 		/* Get values from EEPROM first */
2313 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2314 			if (temp[0] == 24) {
2315 				ret = lan78xx_read_raw_eeprom(dev,
2316 							      temp[1] * 2,
2317 							      24,
2318 							      (u8 *)regs);
2319 				if (ret < 0)
2320 					return;
2321 			}
2322 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2323 			if (temp[0] == 24) {
2324 				ret = lan78xx_read_raw_otp(dev,
2325 							   temp[1] * 2,
2326 							   24,
2327 							   (u8 *)regs);
2328 				if (ret < 0)
2329 					return;
2330 			}
2331 		}
2332 	}
2333 
2334 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2335 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2336 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2337 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2338 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2339 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2340 }
2341 
lan78xx_reset(struct lan78xx_net * dev)2342 static int lan78xx_reset(struct lan78xx_net *dev)
2343 {
2344 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2345 	u32 buf;
2346 	int ret = 0;
2347 	unsigned long timeout;
2348 	u8 sig;
2349 
2350 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2351 	buf |= HW_CFG_LRST_;
2352 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2353 
2354 	timeout = jiffies + HZ;
2355 	do {
2356 		mdelay(1);
2357 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2358 		if (time_after(jiffies, timeout)) {
2359 			netdev_warn(dev->net,
2360 				    "timeout on completion of LiteReset");
2361 			return -EIO;
2362 		}
2363 	} while (buf & HW_CFG_LRST_);
2364 
2365 	lan78xx_init_mac_address(dev);
2366 
2367 	/* save DEVID for later usage */
2368 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2369 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2370 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2371 
2372 	/* Respond to the IN token with a NAK */
2373 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2374 	buf |= USB_CFG_BIR_;
2375 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2376 
2377 	/* Init LTM */
2378 	lan78xx_init_ltm(dev);
2379 
2380 	if (dev->udev->speed == USB_SPEED_SUPER) {
2381 		buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2382 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2383 		dev->rx_qlen = 4;
2384 		dev->tx_qlen = 4;
2385 	} else if (dev->udev->speed == USB_SPEED_HIGH) {
2386 		buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2387 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2388 		dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2389 		dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2390 	} else {
2391 		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2392 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2393 		dev->rx_qlen = 4;
2394 		dev->tx_qlen = 4;
2395 	}
2396 
2397 	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2398 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2399 
2400 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2401 	buf |= HW_CFG_MEF_;
2402 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2403 
2404 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2405 	buf |= USB_CFG_BCE_;
2406 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2407 
2408 	/* set FIFO sizes */
2409 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2410 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2411 
2412 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2413 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2414 
2415 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2416 	ret = lan78xx_write_reg(dev, FLOW, 0);
2417 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2418 
2419 	/* Don't need rfe_ctl_lock during initialisation */
2420 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2421 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2422 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2423 
2424 	/* Enable or disable checksum offload engines */
2425 	lan78xx_set_features(dev->net, dev->net->features);
2426 
2427 	lan78xx_set_multicast(dev->net);
2428 
2429 	/* reset PHY */
2430 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2431 	buf |= PMT_CTL_PHY_RST_;
2432 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2433 
2434 	timeout = jiffies + HZ;
2435 	do {
2436 		mdelay(1);
2437 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2438 		if (time_after(jiffies, timeout)) {
2439 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
2440 			return -EIO;
2441 		}
2442 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2443 
2444 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2445 	/* LAN7801 only has RGMII mode */
2446 	if (dev->chipid == ID_REV_CHIP_ID_7801_)
2447 		buf &= ~MAC_CR_GMII_EN_;
2448 
2449 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2450 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2451 		if (!ret && sig != EEPROM_INDICATOR) {
2452 			/* Implies there is no external eeprom. Set mac speed */
2453 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2454 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2455 		}
2456 	}
2457 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
2458 
2459 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2460 	buf |= MAC_TX_TXEN_;
2461 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
2462 
2463 	ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2464 	buf |= FCT_TX_CTL_EN_;
2465 	ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2466 
2467 	ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2468 
2469 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2470 	buf |= MAC_RX_RXEN_;
2471 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2472 
2473 	ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2474 	buf |= FCT_RX_CTL_EN_;
2475 	ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2476 
2477 	return 0;
2478 }
2479 
lan78xx_init_stats(struct lan78xx_net * dev)2480 static void lan78xx_init_stats(struct lan78xx_net *dev)
2481 {
2482 	u32 *p;
2483 	int i;
2484 
2485 	/* initialize for stats update
2486 	 * some counters are 20bits and some are 32bits
2487 	 */
2488 	p = (u32 *)&dev->stats.rollover_max;
2489 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2490 		p[i] = 0xFFFFF;
2491 
2492 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2493 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2494 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2495 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2496 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2497 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2498 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2499 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2500 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2501 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2502 
2503 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2504 }
2505 
lan78xx_open(struct net_device * net)2506 static int lan78xx_open(struct net_device *net)
2507 {
2508 	struct lan78xx_net *dev = netdev_priv(net);
2509 	int ret;
2510 
2511 	ret = usb_autopm_get_interface(dev->intf);
2512 	if (ret < 0)
2513 		goto out;
2514 
2515 	phy_start(net->phydev);
2516 
2517 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2518 
2519 	/* for Link Check */
2520 	if (dev->urb_intr) {
2521 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2522 		if (ret < 0) {
2523 			netif_err(dev, ifup, dev->net,
2524 				  "intr submit %d\n", ret);
2525 			goto done;
2526 		}
2527 	}
2528 
2529 	lan78xx_init_stats(dev);
2530 
2531 	set_bit(EVENT_DEV_OPEN, &dev->flags);
2532 
2533 	netif_start_queue(net);
2534 
2535 	dev->link_on = false;
2536 
2537 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2538 done:
2539 	usb_autopm_put_interface(dev->intf);
2540 
2541 out:
2542 	return ret;
2543 }
2544 
lan78xx_terminate_urbs(struct lan78xx_net * dev)2545 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2546 {
2547 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2548 	DECLARE_WAITQUEUE(wait, current);
2549 	int temp;
2550 
2551 	/* ensure there are no more active urbs */
2552 	add_wait_queue(&unlink_wakeup, &wait);
2553 	set_current_state(TASK_UNINTERRUPTIBLE);
2554 	dev->wait = &unlink_wakeup;
2555 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2556 
2557 	/* maybe wait for deletions to finish. */
2558 	while (!skb_queue_empty(&dev->rxq) &&
2559 	       !skb_queue_empty(&dev->txq) &&
2560 	       !skb_queue_empty(&dev->done)) {
2561 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2562 		set_current_state(TASK_UNINTERRUPTIBLE);
2563 		netif_dbg(dev, ifdown, dev->net,
2564 			  "waited for %d urb completions\n", temp);
2565 	}
2566 	set_current_state(TASK_RUNNING);
2567 	dev->wait = NULL;
2568 	remove_wait_queue(&unlink_wakeup, &wait);
2569 }
2570 
lan78xx_stop(struct net_device * net)2571 static int lan78xx_stop(struct net_device *net)
2572 {
2573 	struct lan78xx_net		*dev = netdev_priv(net);
2574 
2575 	if (timer_pending(&dev->stat_monitor))
2576 		del_timer_sync(&dev->stat_monitor);
2577 
2578 	if (net->phydev)
2579 		phy_stop(net->phydev);
2580 
2581 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
2582 	netif_stop_queue(net);
2583 
2584 	netif_info(dev, ifdown, dev->net,
2585 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2586 		   net->stats.rx_packets, net->stats.tx_packets,
2587 		   net->stats.rx_errors, net->stats.tx_errors);
2588 
2589 	lan78xx_terminate_urbs(dev);
2590 
2591 	usb_kill_urb(dev->urb_intr);
2592 
2593 	skb_queue_purge(&dev->rxq_pause);
2594 
2595 	/* deferred work (task, timer, softirq) must also stop.
2596 	 * can't flush_scheduled_work() until we drop rtnl (later),
2597 	 * else workers could deadlock; so make workers a NOP.
2598 	 */
2599 	dev->flags = 0;
2600 	cancel_delayed_work_sync(&dev->wq);
2601 	tasklet_kill(&dev->bh);
2602 
2603 	usb_autopm_put_interface(dev->intf);
2604 
2605 	return 0;
2606 }
2607 
lan78xx_tx_prep(struct lan78xx_net * dev,struct sk_buff * skb,gfp_t flags)2608 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2609 				       struct sk_buff *skb, gfp_t flags)
2610 {
2611 	u32 tx_cmd_a, tx_cmd_b;
2612 
2613 	if (skb_cow_head(skb, TX_OVERHEAD)) {
2614 		dev_kfree_skb_any(skb);
2615 		return NULL;
2616 	}
2617 
2618 	if (skb_linearize(skb)) {
2619 		dev_kfree_skb_any(skb);
2620 		return NULL;
2621 	}
2622 
2623 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2624 
2625 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2626 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2627 
2628 	tx_cmd_b = 0;
2629 	if (skb_is_gso(skb)) {
2630 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2631 
2632 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2633 
2634 		tx_cmd_a |= TX_CMD_A_LSO_;
2635 	}
2636 
2637 	if (skb_vlan_tag_present(skb)) {
2638 		tx_cmd_a |= TX_CMD_A_IVTG_;
2639 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2640 	}
2641 
2642 	skb_push(skb, 4);
2643 	cpu_to_le32s(&tx_cmd_b);
2644 	memcpy(skb->data, &tx_cmd_b, 4);
2645 
2646 	skb_push(skb, 4);
2647 	cpu_to_le32s(&tx_cmd_a);
2648 	memcpy(skb->data, &tx_cmd_a, 4);
2649 
2650 	return skb;
2651 }
2652 
defer_bh(struct lan78xx_net * dev,struct sk_buff * skb,struct sk_buff_head * list,enum skb_state state)2653 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2654 			       struct sk_buff_head *list, enum skb_state state)
2655 {
2656 	unsigned long flags;
2657 	enum skb_state old_state;
2658 	struct skb_data *entry = (struct skb_data *)skb->cb;
2659 
2660 	spin_lock_irqsave(&list->lock, flags);
2661 	old_state = entry->state;
2662 	entry->state = state;
2663 
2664 	__skb_unlink(skb, list);
2665 	spin_unlock(&list->lock);
2666 	spin_lock(&dev->done.lock);
2667 
2668 	__skb_queue_tail(&dev->done, skb);
2669 	if (skb_queue_len(&dev->done) == 1)
2670 		tasklet_schedule(&dev->bh);
2671 	spin_unlock_irqrestore(&dev->done.lock, flags);
2672 
2673 	return old_state;
2674 }
2675 
tx_complete(struct urb * urb)2676 static void tx_complete(struct urb *urb)
2677 {
2678 	struct sk_buff *skb = (struct sk_buff *)urb->context;
2679 	struct skb_data *entry = (struct skb_data *)skb->cb;
2680 	struct lan78xx_net *dev = entry->dev;
2681 
2682 	if (urb->status == 0) {
2683 		dev->net->stats.tx_packets += entry->num_of_packet;
2684 		dev->net->stats.tx_bytes += entry->length;
2685 	} else {
2686 		dev->net->stats.tx_errors++;
2687 
2688 		switch (urb->status) {
2689 		case -EPIPE:
2690 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2691 			break;
2692 
2693 		/* software-driven interface shutdown */
2694 		case -ECONNRESET:
2695 		case -ESHUTDOWN:
2696 			break;
2697 
2698 		case -EPROTO:
2699 		case -ETIME:
2700 		case -EILSEQ:
2701 			netif_stop_queue(dev->net);
2702 			break;
2703 		default:
2704 			netif_dbg(dev, tx_err, dev->net,
2705 				  "tx err %d\n", entry->urb->status);
2706 			break;
2707 		}
2708 	}
2709 
2710 	usb_autopm_put_interface_async(dev->intf);
2711 
2712 	defer_bh(dev, skb, &dev->txq, tx_done);
2713 }
2714 
lan78xx_queue_skb(struct sk_buff_head * list,struct sk_buff * newsk,enum skb_state state)2715 static void lan78xx_queue_skb(struct sk_buff_head *list,
2716 			      struct sk_buff *newsk, enum skb_state state)
2717 {
2718 	struct skb_data *entry = (struct skb_data *)newsk->cb;
2719 
2720 	__skb_queue_tail(list, newsk);
2721 	entry->state = state;
2722 }
2723 
2724 static netdev_tx_t
lan78xx_start_xmit(struct sk_buff * skb,struct net_device * net)2725 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2726 {
2727 	struct lan78xx_net *dev = netdev_priv(net);
2728 	struct sk_buff *skb2 = NULL;
2729 
2730 	if (skb) {
2731 		skb_tx_timestamp(skb);
2732 		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2733 	}
2734 
2735 	if (skb2) {
2736 		skb_queue_tail(&dev->txq_pend, skb2);
2737 
2738 		/* throttle TX patch at slower than SUPER SPEED USB */
2739 		if ((dev->udev->speed < USB_SPEED_SUPER) &&
2740 		    (skb_queue_len(&dev->txq_pend) > 10))
2741 			netif_stop_queue(net);
2742 	} else {
2743 		netif_dbg(dev, tx_err, dev->net,
2744 			  "lan78xx_tx_prep return NULL\n");
2745 		dev->net->stats.tx_errors++;
2746 		dev->net->stats.tx_dropped++;
2747 	}
2748 
2749 	tasklet_schedule(&dev->bh);
2750 
2751 	return NETDEV_TX_OK;
2752 }
2753 
2754 static int
lan78xx_get_endpoints(struct lan78xx_net * dev,struct usb_interface * intf)2755 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2756 {
2757 	int tmp;
2758 	struct usb_host_interface *alt = NULL;
2759 	struct usb_host_endpoint *in = NULL, *out = NULL;
2760 	struct usb_host_endpoint *status = NULL;
2761 
2762 	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2763 		unsigned ep;
2764 
2765 		in = NULL;
2766 		out = NULL;
2767 		status = NULL;
2768 		alt = intf->altsetting + tmp;
2769 
2770 		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2771 			struct usb_host_endpoint *e;
2772 			int intr = 0;
2773 
2774 			e = alt->endpoint + ep;
2775 			switch (e->desc.bmAttributes) {
2776 			case USB_ENDPOINT_XFER_INT:
2777 				if (!usb_endpoint_dir_in(&e->desc))
2778 					continue;
2779 				intr = 1;
2780 				/* FALLTHROUGH */
2781 			case USB_ENDPOINT_XFER_BULK:
2782 				break;
2783 			default:
2784 				continue;
2785 			}
2786 			if (usb_endpoint_dir_in(&e->desc)) {
2787 				if (!intr && !in)
2788 					in = e;
2789 				else if (intr && !status)
2790 					status = e;
2791 			} else {
2792 				if (!out)
2793 					out = e;
2794 			}
2795 		}
2796 		if (in && out)
2797 			break;
2798 	}
2799 	if (!alt || !in || !out)
2800 		return -EINVAL;
2801 
2802 	dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2803 				       in->desc.bEndpointAddress &
2804 				       USB_ENDPOINT_NUMBER_MASK);
2805 	dev->pipe_out = usb_sndbulkpipe(dev->udev,
2806 					out->desc.bEndpointAddress &
2807 					USB_ENDPOINT_NUMBER_MASK);
2808 	dev->ep_intr = status;
2809 
2810 	return 0;
2811 }
2812 
lan78xx_bind(struct lan78xx_net * dev,struct usb_interface * intf)2813 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2814 {
2815 	struct lan78xx_priv *pdata = NULL;
2816 	int ret;
2817 	int i;
2818 
2819 	ret = lan78xx_get_endpoints(dev, intf);
2820 	if (ret) {
2821 		netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
2822 			    ret);
2823 		return ret;
2824 	}
2825 
2826 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2827 
2828 	pdata = (struct lan78xx_priv *)(dev->data[0]);
2829 	if (!pdata) {
2830 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2831 		return -ENOMEM;
2832 	}
2833 
2834 	pdata->dev = dev;
2835 
2836 	spin_lock_init(&pdata->rfe_ctl_lock);
2837 	mutex_init(&pdata->dataport_mutex);
2838 
2839 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2840 
2841 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2842 		pdata->vlan_table[i] = 0;
2843 
2844 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2845 
2846 	dev->net->features = 0;
2847 
2848 	if (DEFAULT_TX_CSUM_ENABLE)
2849 		dev->net->features |= NETIF_F_HW_CSUM;
2850 
2851 	if (DEFAULT_RX_CSUM_ENABLE)
2852 		dev->net->features |= NETIF_F_RXCSUM;
2853 
2854 	if (DEFAULT_TSO_CSUM_ENABLE)
2855 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2856 
2857 	dev->net->hw_features = dev->net->features;
2858 
2859 	ret = lan78xx_setup_irq_domain(dev);
2860 	if (ret < 0) {
2861 		netdev_warn(dev->net,
2862 			    "lan78xx_setup_irq_domain() failed : %d", ret);
2863 		goto out1;
2864 	}
2865 
2866 	dev->net->hard_header_len += TX_OVERHEAD;
2867 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2868 
2869 	/* Init all registers */
2870 	ret = lan78xx_reset(dev);
2871 	if (ret) {
2872 		netdev_warn(dev->net, "Registers INIT FAILED....");
2873 		goto out2;
2874 	}
2875 
2876 	ret = lan78xx_mdio_init(dev);
2877 	if (ret) {
2878 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
2879 		goto out2;
2880 	}
2881 
2882 	dev->net->flags |= IFF_MULTICAST;
2883 
2884 	pdata->wol = WAKE_MAGIC;
2885 
2886 	return ret;
2887 
2888 out2:
2889 	lan78xx_remove_irq_domain(dev);
2890 
2891 out1:
2892 	netdev_warn(dev->net, "Bind routine FAILED");
2893 	cancel_work_sync(&pdata->set_multicast);
2894 	cancel_work_sync(&pdata->set_vlan);
2895 	kfree(pdata);
2896 	return ret;
2897 }
2898 
lan78xx_unbind(struct lan78xx_net * dev,struct usb_interface * intf)2899 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2900 {
2901 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2902 
2903 	lan78xx_remove_irq_domain(dev);
2904 
2905 	lan78xx_remove_mdio(dev);
2906 
2907 	if (pdata) {
2908 		cancel_work_sync(&pdata->set_multicast);
2909 		cancel_work_sync(&pdata->set_vlan);
2910 		netif_dbg(dev, ifdown, dev->net, "free pdata");
2911 		kfree(pdata);
2912 		pdata = NULL;
2913 		dev->data[0] = 0;
2914 	}
2915 }
2916 
lan78xx_rx_csum_offload(struct lan78xx_net * dev,struct sk_buff * skb,u32 rx_cmd_a,u32 rx_cmd_b)2917 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2918 				    struct sk_buff *skb,
2919 				    u32 rx_cmd_a, u32 rx_cmd_b)
2920 {
2921 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
2922 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2923 		skb->ip_summed = CHECKSUM_NONE;
2924 	} else {
2925 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2926 		skb->ip_summed = CHECKSUM_COMPLETE;
2927 	}
2928 }
2929 
lan78xx_skb_return(struct lan78xx_net * dev,struct sk_buff * skb)2930 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2931 {
2932 	int		status;
2933 
2934 	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2935 		skb_queue_tail(&dev->rxq_pause, skb);
2936 		return;
2937 	}
2938 
2939 	dev->net->stats.rx_packets++;
2940 	dev->net->stats.rx_bytes += skb->len;
2941 
2942 	skb->protocol = eth_type_trans(skb, dev->net);
2943 
2944 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2945 		  skb->len + sizeof(struct ethhdr), skb->protocol);
2946 	memset(skb->cb, 0, sizeof(struct skb_data));
2947 
2948 	if (skb_defer_rx_timestamp(skb))
2949 		return;
2950 
2951 	status = netif_rx(skb);
2952 	if (status != NET_RX_SUCCESS)
2953 		netif_dbg(dev, rx_err, dev->net,
2954 			  "netif_rx status %d\n", status);
2955 }
2956 
lan78xx_rx(struct lan78xx_net * dev,struct sk_buff * skb)2957 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2958 {
2959 	if (skb->len < dev->net->hard_header_len)
2960 		return 0;
2961 
2962 	while (skb->len > 0) {
2963 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
2964 		u16 rx_cmd_c;
2965 		struct sk_buff *skb2;
2966 		unsigned char *packet;
2967 
2968 		memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2969 		le32_to_cpus(&rx_cmd_a);
2970 		skb_pull(skb, sizeof(rx_cmd_a));
2971 
2972 		memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2973 		le32_to_cpus(&rx_cmd_b);
2974 		skb_pull(skb, sizeof(rx_cmd_b));
2975 
2976 		memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2977 		le16_to_cpus(&rx_cmd_c);
2978 		skb_pull(skb, sizeof(rx_cmd_c));
2979 
2980 		packet = skb->data;
2981 
2982 		/* get the packet length */
2983 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2984 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2985 
2986 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2987 			netif_dbg(dev, rx_err, dev->net,
2988 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
2989 		} else {
2990 			/* last frame in this batch */
2991 			if (skb->len == size) {
2992 				lan78xx_rx_csum_offload(dev, skb,
2993 							rx_cmd_a, rx_cmd_b);
2994 
2995 				skb_trim(skb, skb->len - 4); /* remove fcs */
2996 				skb->truesize = size + sizeof(struct sk_buff);
2997 
2998 				return 1;
2999 			}
3000 
3001 			skb2 = skb_clone(skb, GFP_ATOMIC);
3002 			if (unlikely(!skb2)) {
3003 				netdev_warn(dev->net, "Error allocating skb");
3004 				return 0;
3005 			}
3006 
3007 			skb2->len = size;
3008 			skb2->data = packet;
3009 			skb_set_tail_pointer(skb2, size);
3010 
3011 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3012 
3013 			skb_trim(skb2, skb2->len - 4); /* remove fcs */
3014 			skb2->truesize = size + sizeof(struct sk_buff);
3015 
3016 			lan78xx_skb_return(dev, skb2);
3017 		}
3018 
3019 		skb_pull(skb, size);
3020 
3021 		/* padding bytes before the next frame starts */
3022 		if (skb->len)
3023 			skb_pull(skb, align_count);
3024 	}
3025 
3026 	return 1;
3027 }
3028 
rx_process(struct lan78xx_net * dev,struct sk_buff * skb)3029 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3030 {
3031 	if (!lan78xx_rx(dev, skb)) {
3032 		dev->net->stats.rx_errors++;
3033 		goto done;
3034 	}
3035 
3036 	if (skb->len) {
3037 		lan78xx_skb_return(dev, skb);
3038 		return;
3039 	}
3040 
3041 	netif_dbg(dev, rx_err, dev->net, "drop\n");
3042 	dev->net->stats.rx_errors++;
3043 done:
3044 	skb_queue_tail(&dev->done, skb);
3045 }
3046 
3047 static void rx_complete(struct urb *urb);
3048 
rx_submit(struct lan78xx_net * dev,struct urb * urb,gfp_t flags)3049 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3050 {
3051 	struct sk_buff *skb;
3052 	struct skb_data *entry;
3053 	unsigned long lockflags;
3054 	size_t size = dev->rx_urb_size;
3055 	int ret = 0;
3056 
3057 	skb = netdev_alloc_skb_ip_align(dev->net, size);
3058 	if (!skb) {
3059 		usb_free_urb(urb);
3060 		return -ENOMEM;
3061 	}
3062 
3063 	entry = (struct skb_data *)skb->cb;
3064 	entry->urb = urb;
3065 	entry->dev = dev;
3066 	entry->length = 0;
3067 
3068 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3069 			  skb->data, size, rx_complete, skb);
3070 
3071 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
3072 
3073 	if (netif_device_present(dev->net) &&
3074 	    netif_running(dev->net) &&
3075 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3076 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3077 		ret = usb_submit_urb(urb, GFP_ATOMIC);
3078 		switch (ret) {
3079 		case 0:
3080 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3081 			break;
3082 		case -EPIPE:
3083 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3084 			break;
3085 		case -ENODEV:
3086 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
3087 			netif_device_detach(dev->net);
3088 			break;
3089 		case -EHOSTUNREACH:
3090 			ret = -ENOLINK;
3091 			break;
3092 		default:
3093 			netif_dbg(dev, rx_err, dev->net,
3094 				  "rx submit, %d\n", ret);
3095 			tasklet_schedule(&dev->bh);
3096 		}
3097 	} else {
3098 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3099 		ret = -ENOLINK;
3100 	}
3101 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3102 	if (ret) {
3103 		dev_kfree_skb_any(skb);
3104 		usb_free_urb(urb);
3105 	}
3106 	return ret;
3107 }
3108 
rx_complete(struct urb * urb)3109 static void rx_complete(struct urb *urb)
3110 {
3111 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3112 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3113 	struct lan78xx_net *dev = entry->dev;
3114 	int urb_status = urb->status;
3115 	enum skb_state state;
3116 
3117 	skb_put(skb, urb->actual_length);
3118 	state = rx_done;
3119 	entry->urb = NULL;
3120 
3121 	switch (urb_status) {
3122 	case 0:
3123 		if (skb->len < dev->net->hard_header_len) {
3124 			state = rx_cleanup;
3125 			dev->net->stats.rx_errors++;
3126 			dev->net->stats.rx_length_errors++;
3127 			netif_dbg(dev, rx_err, dev->net,
3128 				  "rx length %d\n", skb->len);
3129 		}
3130 		usb_mark_last_busy(dev->udev);
3131 		break;
3132 	case -EPIPE:
3133 		dev->net->stats.rx_errors++;
3134 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3135 		/* FALLTHROUGH */
3136 	case -ECONNRESET:				/* async unlink */
3137 	case -ESHUTDOWN:				/* hardware gone */
3138 		netif_dbg(dev, ifdown, dev->net,
3139 			  "rx shutdown, code %d\n", urb_status);
3140 		state = rx_cleanup;
3141 		entry->urb = urb;
3142 		urb = NULL;
3143 		break;
3144 	case -EPROTO:
3145 	case -ETIME:
3146 	case -EILSEQ:
3147 		dev->net->stats.rx_errors++;
3148 		state = rx_cleanup;
3149 		entry->urb = urb;
3150 		urb = NULL;
3151 		break;
3152 
3153 	/* data overrun ... flush fifo? */
3154 	case -EOVERFLOW:
3155 		dev->net->stats.rx_over_errors++;
3156 		/* FALLTHROUGH */
3157 
3158 	default:
3159 		state = rx_cleanup;
3160 		dev->net->stats.rx_errors++;
3161 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3162 		break;
3163 	}
3164 
3165 	state = defer_bh(dev, skb, &dev->rxq, state);
3166 
3167 	if (urb) {
3168 		if (netif_running(dev->net) &&
3169 		    !test_bit(EVENT_RX_HALT, &dev->flags) &&
3170 		    state != unlink_start) {
3171 			rx_submit(dev, urb, GFP_ATOMIC);
3172 			return;
3173 		}
3174 		usb_free_urb(urb);
3175 	}
3176 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3177 }
3178 
lan78xx_tx_bh(struct lan78xx_net * dev)3179 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3180 {
3181 	int length;
3182 	struct urb *urb = NULL;
3183 	struct skb_data *entry;
3184 	unsigned long flags;
3185 	struct sk_buff_head *tqp = &dev->txq_pend;
3186 	struct sk_buff *skb, *skb2;
3187 	int ret;
3188 	int count, pos;
3189 	int skb_totallen, pkt_cnt;
3190 
3191 	skb_totallen = 0;
3192 	pkt_cnt = 0;
3193 	count = 0;
3194 	length = 0;
3195 	spin_lock_irqsave(&tqp->lock, flags);
3196 	for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3197 		if (skb_is_gso(skb)) {
3198 			if (pkt_cnt) {
3199 				/* handle previous packets first */
3200 				break;
3201 			}
3202 			count = 1;
3203 			length = skb->len - TX_OVERHEAD;
3204 			__skb_unlink(skb, tqp);
3205 			spin_unlock_irqrestore(&tqp->lock, flags);
3206 			goto gso_skb;
3207 		}
3208 
3209 		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3210 			break;
3211 		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3212 		pkt_cnt++;
3213 	}
3214 	spin_unlock_irqrestore(&tqp->lock, flags);
3215 
3216 	/* copy to a single skb */
3217 	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3218 	if (!skb)
3219 		goto drop;
3220 
3221 	skb_put(skb, skb_totallen);
3222 
3223 	for (count = pos = 0; count < pkt_cnt; count++) {
3224 		skb2 = skb_dequeue(tqp);
3225 		if (skb2) {
3226 			length += (skb2->len - TX_OVERHEAD);
3227 			memcpy(skb->data + pos, skb2->data, skb2->len);
3228 			pos += roundup(skb2->len, sizeof(u32));
3229 			dev_kfree_skb(skb2);
3230 		}
3231 	}
3232 
3233 gso_skb:
3234 	urb = usb_alloc_urb(0, GFP_ATOMIC);
3235 	if (!urb)
3236 		goto drop;
3237 
3238 	entry = (struct skb_data *)skb->cb;
3239 	entry->urb = urb;
3240 	entry->dev = dev;
3241 	entry->length = length;
3242 	entry->num_of_packet = count;
3243 
3244 	spin_lock_irqsave(&dev->txq.lock, flags);
3245 	ret = usb_autopm_get_interface_async(dev->intf);
3246 	if (ret < 0) {
3247 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3248 		goto drop;
3249 	}
3250 
3251 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3252 			  skb->data, skb->len, tx_complete, skb);
3253 
3254 	if (length % dev->maxpacket == 0) {
3255 		/* send USB_ZERO_PACKET */
3256 		urb->transfer_flags |= URB_ZERO_PACKET;
3257 	}
3258 
3259 #ifdef CONFIG_PM
3260 	/* if this triggers the device is still a sleep */
3261 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3262 		/* transmission will be done in resume */
3263 		usb_anchor_urb(urb, &dev->deferred);
3264 		/* no use to process more packets */
3265 		netif_stop_queue(dev->net);
3266 		usb_put_urb(urb);
3267 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3268 		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3269 		return;
3270 	}
3271 #endif
3272 
3273 	ret = usb_submit_urb(urb, GFP_ATOMIC);
3274 	switch (ret) {
3275 	case 0:
3276 		netif_trans_update(dev->net);
3277 		lan78xx_queue_skb(&dev->txq, skb, tx_start);
3278 		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3279 			netif_stop_queue(dev->net);
3280 		break;
3281 	case -EPIPE:
3282 		netif_stop_queue(dev->net);
3283 		lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3284 		usb_autopm_put_interface_async(dev->intf);
3285 		break;
3286 	default:
3287 		usb_autopm_put_interface_async(dev->intf);
3288 		netif_dbg(dev, tx_err, dev->net,
3289 			  "tx: submit urb err %d\n", ret);
3290 		break;
3291 	}
3292 
3293 	spin_unlock_irqrestore(&dev->txq.lock, flags);
3294 
3295 	if (ret) {
3296 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3297 drop:
3298 		dev->net->stats.tx_dropped++;
3299 		if (skb)
3300 			dev_kfree_skb_any(skb);
3301 		usb_free_urb(urb);
3302 	} else
3303 		netif_dbg(dev, tx_queued, dev->net,
3304 			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
3305 }
3306 
lan78xx_rx_bh(struct lan78xx_net * dev)3307 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3308 {
3309 	struct urb *urb;
3310 	int i;
3311 
3312 	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3313 		for (i = 0; i < 10; i++) {
3314 			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3315 				break;
3316 			urb = usb_alloc_urb(0, GFP_ATOMIC);
3317 			if (urb)
3318 				if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3319 					return;
3320 		}
3321 
3322 		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3323 			tasklet_schedule(&dev->bh);
3324 	}
3325 	if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3326 		netif_wake_queue(dev->net);
3327 }
3328 
lan78xx_bh(unsigned long param)3329 static void lan78xx_bh(unsigned long param)
3330 {
3331 	struct lan78xx_net *dev = (struct lan78xx_net *)param;
3332 	struct sk_buff *skb;
3333 	struct skb_data *entry;
3334 
3335 	while ((skb = skb_dequeue(&dev->done))) {
3336 		entry = (struct skb_data *)(skb->cb);
3337 		switch (entry->state) {
3338 		case rx_done:
3339 			entry->state = rx_cleanup;
3340 			rx_process(dev, skb);
3341 			continue;
3342 		case tx_done:
3343 			usb_free_urb(entry->urb);
3344 			dev_kfree_skb(skb);
3345 			continue;
3346 		case rx_cleanup:
3347 			usb_free_urb(entry->urb);
3348 			dev_kfree_skb(skb);
3349 			continue;
3350 		default:
3351 			netdev_dbg(dev->net, "skb state %d\n", entry->state);
3352 			return;
3353 		}
3354 	}
3355 
3356 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
3357 		/* reset update timer delta */
3358 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3359 			dev->delta = 1;
3360 			mod_timer(&dev->stat_monitor,
3361 				  jiffies + STAT_UPDATE_TIMER);
3362 		}
3363 
3364 		if (!skb_queue_empty(&dev->txq_pend))
3365 			lan78xx_tx_bh(dev);
3366 
3367 		if (!timer_pending(&dev->delay) &&
3368 		    !test_bit(EVENT_RX_HALT, &dev->flags))
3369 			lan78xx_rx_bh(dev);
3370 	}
3371 }
3372 
lan78xx_delayedwork(struct work_struct * work)3373 static void lan78xx_delayedwork(struct work_struct *work)
3374 {
3375 	int status;
3376 	struct lan78xx_net *dev;
3377 
3378 	dev = container_of(work, struct lan78xx_net, wq.work);
3379 
3380 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3381 		unlink_urbs(dev, &dev->txq);
3382 		status = usb_autopm_get_interface(dev->intf);
3383 		if (status < 0)
3384 			goto fail_pipe;
3385 		status = usb_clear_halt(dev->udev, dev->pipe_out);
3386 		usb_autopm_put_interface(dev->intf);
3387 		if (status < 0 &&
3388 		    status != -EPIPE &&
3389 		    status != -ESHUTDOWN) {
3390 			if (netif_msg_tx_err(dev))
3391 fail_pipe:
3392 				netdev_err(dev->net,
3393 					   "can't clear tx halt, status %d\n",
3394 					   status);
3395 		} else {
3396 			clear_bit(EVENT_TX_HALT, &dev->flags);
3397 			if (status != -ESHUTDOWN)
3398 				netif_wake_queue(dev->net);
3399 		}
3400 	}
3401 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3402 		unlink_urbs(dev, &dev->rxq);
3403 		status = usb_autopm_get_interface(dev->intf);
3404 		if (status < 0)
3405 				goto fail_halt;
3406 		status = usb_clear_halt(dev->udev, dev->pipe_in);
3407 		usb_autopm_put_interface(dev->intf);
3408 		if (status < 0 &&
3409 		    status != -EPIPE &&
3410 		    status != -ESHUTDOWN) {
3411 			if (netif_msg_rx_err(dev))
3412 fail_halt:
3413 				netdev_err(dev->net,
3414 					   "can't clear rx halt, status %d\n",
3415 					   status);
3416 		} else {
3417 			clear_bit(EVENT_RX_HALT, &dev->flags);
3418 			tasklet_schedule(&dev->bh);
3419 		}
3420 	}
3421 
3422 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3423 		int ret = 0;
3424 
3425 		clear_bit(EVENT_LINK_RESET, &dev->flags);
3426 		status = usb_autopm_get_interface(dev->intf);
3427 		if (status < 0)
3428 			goto skip_reset;
3429 		if (lan78xx_link_reset(dev) < 0) {
3430 			usb_autopm_put_interface(dev->intf);
3431 skip_reset:
3432 			netdev_info(dev->net, "link reset failed (%d)\n",
3433 				    ret);
3434 		} else {
3435 			usb_autopm_put_interface(dev->intf);
3436 		}
3437 	}
3438 
3439 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3440 		lan78xx_update_stats(dev);
3441 
3442 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3443 
3444 		mod_timer(&dev->stat_monitor,
3445 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
3446 
3447 		dev->delta = min((dev->delta * 2), 50);
3448 	}
3449 }
3450 
intr_complete(struct urb * urb)3451 static void intr_complete(struct urb *urb)
3452 {
3453 	struct lan78xx_net *dev = urb->context;
3454 	int status = urb->status;
3455 
3456 	switch (status) {
3457 	/* success */
3458 	case 0:
3459 		lan78xx_status(dev, urb);
3460 		break;
3461 
3462 	/* software-driven interface shutdown */
3463 	case -ENOENT:			/* urb killed */
3464 	case -ESHUTDOWN:		/* hardware gone */
3465 		netif_dbg(dev, ifdown, dev->net,
3466 			  "intr shutdown, code %d\n", status);
3467 		return;
3468 
3469 	/* NOTE:  not throttling like RX/TX, since this endpoint
3470 	 * already polls infrequently
3471 	 */
3472 	default:
3473 		netdev_dbg(dev->net, "intr status %d\n", status);
3474 		break;
3475 	}
3476 
3477 	if (!netif_running(dev->net))
3478 		return;
3479 
3480 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3481 	status = usb_submit_urb(urb, GFP_ATOMIC);
3482 	if (status != 0)
3483 		netif_err(dev, timer, dev->net,
3484 			  "intr resubmit --> %d\n", status);
3485 }
3486 
lan78xx_disconnect(struct usb_interface * intf)3487 static void lan78xx_disconnect(struct usb_interface *intf)
3488 {
3489 	struct lan78xx_net		*dev;
3490 	struct usb_device		*udev;
3491 	struct net_device		*net;
3492 
3493 	dev = usb_get_intfdata(intf);
3494 	usb_set_intfdata(intf, NULL);
3495 	if (!dev)
3496 		return;
3497 
3498 	udev = interface_to_usbdev(intf);
3499 	net = dev->net;
3500 
3501 	phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3502 	phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3503 
3504 	phy_disconnect(net->phydev);
3505 
3506 	unregister_netdev(net);
3507 
3508 	cancel_delayed_work_sync(&dev->wq);
3509 
3510 	usb_scuttle_anchored_urbs(&dev->deferred);
3511 
3512 	lan78xx_unbind(dev, intf);
3513 
3514 	usb_kill_urb(dev->urb_intr);
3515 	usb_free_urb(dev->urb_intr);
3516 
3517 	free_netdev(net);
3518 	usb_put_dev(udev);
3519 }
3520 
lan78xx_tx_timeout(struct net_device * net)3521 static void lan78xx_tx_timeout(struct net_device *net)
3522 {
3523 	struct lan78xx_net *dev = netdev_priv(net);
3524 
3525 	unlink_urbs(dev, &dev->txq);
3526 	tasklet_schedule(&dev->bh);
3527 }
3528 
lan78xx_features_check(struct sk_buff * skb,struct net_device * netdev,netdev_features_t features)3529 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3530 						struct net_device *netdev,
3531 						netdev_features_t features)
3532 {
3533 	if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3534 		features &= ~NETIF_F_GSO_MASK;
3535 
3536 	features = vlan_features_check(skb, features);
3537 	features = vxlan_features_check(skb, features);
3538 
3539 	return features;
3540 }
3541 
3542 static const struct net_device_ops lan78xx_netdev_ops = {
3543 	.ndo_open		= lan78xx_open,
3544 	.ndo_stop		= lan78xx_stop,
3545 	.ndo_start_xmit		= lan78xx_start_xmit,
3546 	.ndo_tx_timeout		= lan78xx_tx_timeout,
3547 	.ndo_change_mtu		= lan78xx_change_mtu,
3548 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
3549 	.ndo_validate_addr	= eth_validate_addr,
3550 	.ndo_do_ioctl		= lan78xx_ioctl,
3551 	.ndo_set_rx_mode	= lan78xx_set_multicast,
3552 	.ndo_set_features	= lan78xx_set_features,
3553 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
3554 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
3555 	.ndo_features_check	= lan78xx_features_check,
3556 };
3557 
lan78xx_stat_monitor(unsigned long param)3558 static void lan78xx_stat_monitor(unsigned long param)
3559 {
3560 	struct lan78xx_net *dev;
3561 
3562 	dev = (struct lan78xx_net *)param;
3563 
3564 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3565 }
3566 
lan78xx_probe(struct usb_interface * intf,const struct usb_device_id * id)3567 static int lan78xx_probe(struct usb_interface *intf,
3568 			 const struct usb_device_id *id)
3569 {
3570 	struct lan78xx_net *dev;
3571 	struct net_device *netdev;
3572 	struct usb_device *udev;
3573 	int ret;
3574 	unsigned maxp;
3575 	unsigned period;
3576 	u8 *buf = NULL;
3577 
3578 	udev = interface_to_usbdev(intf);
3579 	udev = usb_get_dev(udev);
3580 
3581 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3582 	if (!netdev) {
3583 		dev_err(&intf->dev, "Error: OOM\n");
3584 		ret = -ENOMEM;
3585 		goto out1;
3586 	}
3587 
3588 	/* netdev_printk() needs this */
3589 	SET_NETDEV_DEV(netdev, &intf->dev);
3590 
3591 	dev = netdev_priv(netdev);
3592 	dev->udev = udev;
3593 	dev->intf = intf;
3594 	dev->net = netdev;
3595 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3596 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
3597 
3598 	skb_queue_head_init(&dev->rxq);
3599 	skb_queue_head_init(&dev->txq);
3600 	skb_queue_head_init(&dev->done);
3601 	skb_queue_head_init(&dev->rxq_pause);
3602 	skb_queue_head_init(&dev->txq_pend);
3603 	mutex_init(&dev->phy_mutex);
3604 
3605 	tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3606 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3607 	init_usb_anchor(&dev->deferred);
3608 
3609 	netdev->netdev_ops = &lan78xx_netdev_ops;
3610 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3611 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
3612 
3613 	dev->stat_monitor.function = lan78xx_stat_monitor;
3614 	dev->stat_monitor.data = (unsigned long)dev;
3615 	dev->delta = 1;
3616 	init_timer(&dev->stat_monitor);
3617 
3618 	mutex_init(&dev->stats.access_lock);
3619 
3620 	ret = lan78xx_bind(dev, intf);
3621 	if (ret < 0)
3622 		goto out2;
3623 	strcpy(netdev->name, "eth%d");
3624 
3625 	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3626 		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3627 
3628 	/* MTU range: 68 - 9000 */
3629 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3630 	netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
3631 
3632 	dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3633 	dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3634 	dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3635 
3636 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3637 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3638 
3639 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
3640 					dev->ep_intr->desc.bEndpointAddress &
3641 					USB_ENDPOINT_NUMBER_MASK);
3642 	period = dev->ep_intr->desc.bInterval;
3643 
3644 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3645 	buf = kmalloc(maxp, GFP_KERNEL);
3646 	if (buf) {
3647 		dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3648 		if (!dev->urb_intr) {
3649 			ret = -ENOMEM;
3650 			kfree(buf);
3651 			goto out3;
3652 		} else {
3653 			usb_fill_int_urb(dev->urb_intr, dev->udev,
3654 					 dev->pipe_intr, buf, maxp,
3655 					 intr_complete, dev, period);
3656 		}
3657 	}
3658 
3659 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3660 
3661 	/* driver requires remote-wakeup capability during autosuspend. */
3662 	intf->needs_remote_wakeup = 1;
3663 
3664 	ret = lan78xx_phy_init(dev);
3665 	if (ret < 0)
3666 		goto out4;
3667 
3668 	ret = register_netdev(netdev);
3669 	if (ret != 0) {
3670 		netif_err(dev, probe, netdev, "couldn't register the device\n");
3671 		goto out5;
3672 	}
3673 
3674 	usb_set_intfdata(intf, dev);
3675 
3676 	ret = device_set_wakeup_enable(&udev->dev, true);
3677 
3678 	 /* Default delay of 2sec has more overhead than advantage.
3679 	  * Set to 10sec as default.
3680 	  */
3681 	pm_runtime_set_autosuspend_delay(&udev->dev,
3682 					 DEFAULT_AUTOSUSPEND_DELAY);
3683 
3684 	return 0;
3685 
3686 out5:
3687 	phy_disconnect(netdev->phydev);
3688 out4:
3689 	usb_free_urb(dev->urb_intr);
3690 out3:
3691 	lan78xx_unbind(dev, intf);
3692 out2:
3693 	free_netdev(netdev);
3694 out1:
3695 	usb_put_dev(udev);
3696 
3697 	return ret;
3698 }
3699 
lan78xx_wakeframe_crc16(const u8 * buf,int len)3700 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3701 {
3702 	const u16 crc16poly = 0x8005;
3703 	int i;
3704 	u16 bit, crc, msb;
3705 	u8 data;
3706 
3707 	crc = 0xFFFF;
3708 	for (i = 0; i < len; i++) {
3709 		data = *buf++;
3710 		for (bit = 0; bit < 8; bit++) {
3711 			msb = crc >> 15;
3712 			crc <<= 1;
3713 
3714 			if (msb ^ (u16)(data & 1)) {
3715 				crc ^= crc16poly;
3716 				crc |= (u16)0x0001U;
3717 			}
3718 			data >>= 1;
3719 		}
3720 	}
3721 
3722 	return crc;
3723 }
3724 
lan78xx_set_suspend(struct lan78xx_net * dev,u32 wol)3725 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3726 {
3727 	u32 buf;
3728 	int ret;
3729 	int mask_index;
3730 	u16 crc;
3731 	u32 temp_wucsr;
3732 	u32 temp_pmt_ctl;
3733 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3734 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3735 	const u8 arp_type[2] = { 0x08, 0x06 };
3736 
3737 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3738 	buf &= ~MAC_TX_TXEN_;
3739 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
3740 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3741 	buf &= ~MAC_RX_RXEN_;
3742 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3743 
3744 	ret = lan78xx_write_reg(dev, WUCSR, 0);
3745 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
3746 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3747 
3748 	temp_wucsr = 0;
3749 
3750 	temp_pmt_ctl = 0;
3751 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3752 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3753 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3754 
3755 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3756 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3757 
3758 	mask_index = 0;
3759 	if (wol & WAKE_PHY) {
3760 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3761 
3762 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3763 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3764 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3765 	}
3766 	if (wol & WAKE_MAGIC) {
3767 		temp_wucsr |= WUCSR_MPEN_;
3768 
3769 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3770 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3771 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3772 	}
3773 	if (wol & WAKE_BCAST) {
3774 		temp_wucsr |= WUCSR_BCST_EN_;
3775 
3776 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3777 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3778 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3779 	}
3780 	if (wol & WAKE_MCAST) {
3781 		temp_wucsr |= WUCSR_WAKE_EN_;
3782 
3783 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3784 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3785 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3786 					WUF_CFGX_EN_ |
3787 					WUF_CFGX_TYPE_MCAST_ |
3788 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3789 					(crc & WUF_CFGX_CRC16_MASK_));
3790 
3791 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3792 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3793 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3794 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3795 		mask_index++;
3796 
3797 		/* for IPv6 Multicast */
3798 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3799 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3800 					WUF_CFGX_EN_ |
3801 					WUF_CFGX_TYPE_MCAST_ |
3802 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3803 					(crc & WUF_CFGX_CRC16_MASK_));
3804 
3805 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3806 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3807 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3808 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3809 		mask_index++;
3810 
3811 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3812 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3813 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3814 	}
3815 	if (wol & WAKE_UCAST) {
3816 		temp_wucsr |= WUCSR_PFDA_EN_;
3817 
3818 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3819 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3820 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3821 	}
3822 	if (wol & WAKE_ARP) {
3823 		temp_wucsr |= WUCSR_WAKE_EN_;
3824 
3825 		/* set WUF_CFG & WUF_MASK
3826 		 * for packettype (offset 12,13) = ARP (0x0806)
3827 		 */
3828 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
3829 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3830 					WUF_CFGX_EN_ |
3831 					WUF_CFGX_TYPE_ALL_ |
3832 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3833 					(crc & WUF_CFGX_CRC16_MASK_));
3834 
3835 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3836 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3837 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3838 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3839 		mask_index++;
3840 
3841 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3842 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3843 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3844 	}
3845 
3846 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3847 
3848 	/* when multiple WOL bits are set */
3849 	if (hweight_long((unsigned long)wol) > 1) {
3850 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3851 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3852 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3853 	}
3854 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3855 
3856 	/* clear WUPS */
3857 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3858 	buf |= PMT_CTL_WUPS_MASK_;
3859 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3860 
3861 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3862 	buf |= MAC_RX_RXEN_;
3863 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3864 
3865 	return 0;
3866 }
3867 
lan78xx_suspend(struct usb_interface * intf,pm_message_t message)3868 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3869 {
3870 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3871 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3872 	u32 buf;
3873 	int ret;
3874 	int event;
3875 
3876 	event = message.event;
3877 
3878 	if (!dev->suspend_count++) {
3879 		spin_lock_irq(&dev->txq.lock);
3880 		/* don't autosuspend while transmitting */
3881 		if ((skb_queue_len(&dev->txq) ||
3882 		     skb_queue_len(&dev->txq_pend)) &&
3883 			PMSG_IS_AUTO(message)) {
3884 			spin_unlock_irq(&dev->txq.lock);
3885 			ret = -EBUSY;
3886 			goto out;
3887 		} else {
3888 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3889 			spin_unlock_irq(&dev->txq.lock);
3890 		}
3891 
3892 		/* stop TX & RX */
3893 		ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3894 		buf &= ~MAC_TX_TXEN_;
3895 		ret = lan78xx_write_reg(dev, MAC_TX, buf);
3896 		ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3897 		buf &= ~MAC_RX_RXEN_;
3898 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
3899 
3900 		/* empty out the rx and queues */
3901 		netif_device_detach(dev->net);
3902 		lan78xx_terminate_urbs(dev);
3903 		usb_kill_urb(dev->urb_intr);
3904 
3905 		/* reattach */
3906 		netif_device_attach(dev->net);
3907 	}
3908 
3909 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3910 		del_timer(&dev->stat_monitor);
3911 
3912 		if (PMSG_IS_AUTO(message)) {
3913 			/* auto suspend (selective suspend) */
3914 			ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3915 			buf &= ~MAC_TX_TXEN_;
3916 			ret = lan78xx_write_reg(dev, MAC_TX, buf);
3917 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3918 			buf &= ~MAC_RX_RXEN_;
3919 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
3920 
3921 			ret = lan78xx_write_reg(dev, WUCSR, 0);
3922 			ret = lan78xx_write_reg(dev, WUCSR2, 0);
3923 			ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3924 
3925 			/* set goodframe wakeup */
3926 			ret = lan78xx_read_reg(dev, WUCSR, &buf);
3927 
3928 			buf |= WUCSR_RFE_WAKE_EN_;
3929 			buf |= WUCSR_STORE_WAKE_;
3930 
3931 			ret = lan78xx_write_reg(dev, WUCSR, buf);
3932 
3933 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3934 
3935 			buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3936 			buf |= PMT_CTL_RES_CLR_WKP_STS_;
3937 
3938 			buf |= PMT_CTL_PHY_WAKE_EN_;
3939 			buf |= PMT_CTL_WOL_EN_;
3940 			buf &= ~PMT_CTL_SUS_MODE_MASK_;
3941 			buf |= PMT_CTL_SUS_MODE_3_;
3942 
3943 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3944 
3945 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3946 
3947 			buf |= PMT_CTL_WUPS_MASK_;
3948 
3949 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3950 
3951 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3952 			buf |= MAC_RX_RXEN_;
3953 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
3954 		} else {
3955 			lan78xx_set_suspend(dev, pdata->wol);
3956 		}
3957 	}
3958 
3959 	ret = 0;
3960 out:
3961 	return ret;
3962 }
3963 
lan78xx_resume(struct usb_interface * intf)3964 static int lan78xx_resume(struct usb_interface *intf)
3965 {
3966 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3967 	struct sk_buff *skb;
3968 	struct urb *res;
3969 	int ret;
3970 	u32 buf;
3971 
3972 	if (!timer_pending(&dev->stat_monitor)) {
3973 		dev->delta = 1;
3974 		mod_timer(&dev->stat_monitor,
3975 			  jiffies + STAT_UPDATE_TIMER);
3976 	}
3977 
3978 	if (!--dev->suspend_count) {
3979 		/* resume interrupt URBs */
3980 		if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3981 				usb_submit_urb(dev->urb_intr, GFP_NOIO);
3982 
3983 		spin_lock_irq(&dev->txq.lock);
3984 		while ((res = usb_get_from_anchor(&dev->deferred))) {
3985 			skb = (struct sk_buff *)res->context;
3986 			ret = usb_submit_urb(res, GFP_ATOMIC);
3987 			if (ret < 0) {
3988 				dev_kfree_skb_any(skb);
3989 				usb_free_urb(res);
3990 				usb_autopm_put_interface_async(dev->intf);
3991 			} else {
3992 				netif_trans_update(dev->net);
3993 				lan78xx_queue_skb(&dev->txq, skb, tx_start);
3994 			}
3995 		}
3996 
3997 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3998 		spin_unlock_irq(&dev->txq.lock);
3999 
4000 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4001 			if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4002 				netif_start_queue(dev->net);
4003 			tasklet_schedule(&dev->bh);
4004 		}
4005 	}
4006 
4007 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4008 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4009 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4010 
4011 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4012 					     WUCSR2_ARP_RCD_ |
4013 					     WUCSR2_IPV6_TCPSYN_RCD_ |
4014 					     WUCSR2_IPV4_TCPSYN_RCD_);
4015 
4016 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4017 					    WUCSR_EEE_RX_WAKE_ |
4018 					    WUCSR_PFDA_FR_ |
4019 					    WUCSR_RFE_WAKE_FR_ |
4020 					    WUCSR_WUFR_ |
4021 					    WUCSR_MPR_ |
4022 					    WUCSR_BCST_FR_);
4023 
4024 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4025 	buf |= MAC_TX_TXEN_;
4026 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
4027 
4028 	return 0;
4029 }
4030 
lan78xx_reset_resume(struct usb_interface * intf)4031 static int lan78xx_reset_resume(struct usb_interface *intf)
4032 {
4033 	struct lan78xx_net *dev = usb_get_intfdata(intf);
4034 
4035 	lan78xx_reset(dev);
4036 
4037 	phy_start(dev->net->phydev);
4038 
4039 	return lan78xx_resume(intf);
4040 }
4041 
4042 static const struct usb_device_id products[] = {
4043 	{
4044 	/* LAN7800 USB Gigabit Ethernet Device */
4045 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4046 	},
4047 	{
4048 	/* LAN7850 USB Gigabit Ethernet Device */
4049 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4050 	},
4051 	{
4052 	/* LAN7801 USB Gigabit Ethernet Device */
4053 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4054 	},
4055 	{},
4056 };
4057 MODULE_DEVICE_TABLE(usb, products);
4058 
4059 static struct usb_driver lan78xx_driver = {
4060 	.name			= DRIVER_NAME,
4061 	.id_table		= products,
4062 	.probe			= lan78xx_probe,
4063 	.disconnect		= lan78xx_disconnect,
4064 	.suspend		= lan78xx_suspend,
4065 	.resume			= lan78xx_resume,
4066 	.reset_resume		= lan78xx_reset_resume,
4067 	.supports_autosuspend	= 1,
4068 	.disable_hub_initiated_lpm = 1,
4069 };
4070 
4071 module_usb_driver(lan78xx_driver);
4072 
4073 MODULE_AUTHOR(DRIVER_AUTHOR);
4074 MODULE_DESCRIPTION(DRIVER_DESC);
4075 MODULE_LICENSE("GPL");
4076