• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <linux/microchipphy.h>
34 #include "lan78xx.h"
35 
36 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
37 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
38 #define DRIVER_NAME	"lan78xx"
39 #define DRIVER_VERSION	"1.0.4"
40 
41 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
42 #define THROTTLE_JIFFIES		(HZ / 8)
43 #define UNLINK_TIMEOUT_MS		3
44 
45 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
46 
47 #define SS_USB_PKT_SIZE			(1024)
48 #define HS_USB_PKT_SIZE			(512)
49 #define FS_USB_PKT_SIZE			(64)
50 
51 #define MAX_RX_FIFO_SIZE		(12 * 1024)
52 #define MAX_TX_FIFO_SIZE		(12 * 1024)
53 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
54 #define DEFAULT_BULK_IN_DELAY		(0x0800)
55 #define MAX_SINGLE_PACKET_SIZE		(9000)
56 #define DEFAULT_TX_CSUM_ENABLE		(true)
57 #define DEFAULT_RX_CSUM_ENABLE		(true)
58 #define DEFAULT_TSO_CSUM_ENABLE		(true)
59 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
60 #define TX_OVERHEAD			(8)
61 #define RXW_PADDING			2
62 
63 #define LAN78XX_USB_VENDOR_ID		(0x0424)
64 #define LAN7800_USB_PRODUCT_ID		(0x7800)
65 #define LAN7850_USB_PRODUCT_ID		(0x7850)
66 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
67 #define LAN78XX_OTP_MAGIC		(0x78F3)
68 
69 #define	MII_READ			1
70 #define	MII_WRITE			0
71 
72 #define EEPROM_INDICATOR		(0xA5)
73 #define EEPROM_MAC_OFFSET		(0x01)
74 #define MAX_EEPROM_SIZE			512
75 #define OTP_INDICATOR_1			(0xF3)
76 #define OTP_INDICATOR_2			(0xF7)
77 
78 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
79 					 WAKE_MCAST | WAKE_BCAST | \
80 					 WAKE_ARP | WAKE_MAGIC)
81 
82 /* USB related defines */
83 #define BULK_IN_PIPE			1
84 #define BULK_OUT_PIPE			2
85 
86 /* default autosuspend delay (mSec)*/
87 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
88 
89 /* statistic update interval (mSec) */
90 #define STAT_UPDATE_TIMER		(1 * 1000)
91 
92 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
93 	"RX FCS Errors",
94 	"RX Alignment Errors",
95 	"Rx Fragment Errors",
96 	"RX Jabber Errors",
97 	"RX Undersize Frame Errors",
98 	"RX Oversize Frame Errors",
99 	"RX Dropped Frames",
100 	"RX Unicast Byte Count",
101 	"RX Broadcast Byte Count",
102 	"RX Multicast Byte Count",
103 	"RX Unicast Frames",
104 	"RX Broadcast Frames",
105 	"RX Multicast Frames",
106 	"RX Pause Frames",
107 	"RX 64 Byte Frames",
108 	"RX 65 - 127 Byte Frames",
109 	"RX 128 - 255 Byte Frames",
110 	"RX 256 - 511 Bytes Frames",
111 	"RX 512 - 1023 Byte Frames",
112 	"RX 1024 - 1518 Byte Frames",
113 	"RX Greater 1518 Byte Frames",
114 	"EEE RX LPI Transitions",
115 	"EEE RX LPI Time",
116 	"TX FCS Errors",
117 	"TX Excess Deferral Errors",
118 	"TX Carrier Errors",
119 	"TX Bad Byte Count",
120 	"TX Single Collisions",
121 	"TX Multiple Collisions",
122 	"TX Excessive Collision",
123 	"TX Late Collisions",
124 	"TX Unicast Byte Count",
125 	"TX Broadcast Byte Count",
126 	"TX Multicast Byte Count",
127 	"TX Unicast Frames",
128 	"TX Broadcast Frames",
129 	"TX Multicast Frames",
130 	"TX Pause Frames",
131 	"TX 64 Byte Frames",
132 	"TX 65 - 127 Byte Frames",
133 	"TX 128 - 255 Byte Frames",
134 	"TX 256 - 511 Bytes Frames",
135 	"TX 512 - 1023 Byte Frames",
136 	"TX 1024 - 1518 Byte Frames",
137 	"TX Greater 1518 Byte Frames",
138 	"EEE TX LPI Transitions",
139 	"EEE TX LPI Time",
140 };
141 
142 struct lan78xx_statstage {
143 	u32 rx_fcs_errors;
144 	u32 rx_alignment_errors;
145 	u32 rx_fragment_errors;
146 	u32 rx_jabber_errors;
147 	u32 rx_undersize_frame_errors;
148 	u32 rx_oversize_frame_errors;
149 	u32 rx_dropped_frames;
150 	u32 rx_unicast_byte_count;
151 	u32 rx_broadcast_byte_count;
152 	u32 rx_multicast_byte_count;
153 	u32 rx_unicast_frames;
154 	u32 rx_broadcast_frames;
155 	u32 rx_multicast_frames;
156 	u32 rx_pause_frames;
157 	u32 rx_64_byte_frames;
158 	u32 rx_65_127_byte_frames;
159 	u32 rx_128_255_byte_frames;
160 	u32 rx_256_511_bytes_frames;
161 	u32 rx_512_1023_byte_frames;
162 	u32 rx_1024_1518_byte_frames;
163 	u32 rx_greater_1518_byte_frames;
164 	u32 eee_rx_lpi_transitions;
165 	u32 eee_rx_lpi_time;
166 	u32 tx_fcs_errors;
167 	u32 tx_excess_deferral_errors;
168 	u32 tx_carrier_errors;
169 	u32 tx_bad_byte_count;
170 	u32 tx_single_collisions;
171 	u32 tx_multiple_collisions;
172 	u32 tx_excessive_collision;
173 	u32 tx_late_collisions;
174 	u32 tx_unicast_byte_count;
175 	u32 tx_broadcast_byte_count;
176 	u32 tx_multicast_byte_count;
177 	u32 tx_unicast_frames;
178 	u32 tx_broadcast_frames;
179 	u32 tx_multicast_frames;
180 	u32 tx_pause_frames;
181 	u32 tx_64_byte_frames;
182 	u32 tx_65_127_byte_frames;
183 	u32 tx_128_255_byte_frames;
184 	u32 tx_256_511_bytes_frames;
185 	u32 tx_512_1023_byte_frames;
186 	u32 tx_1024_1518_byte_frames;
187 	u32 tx_greater_1518_byte_frames;
188 	u32 eee_tx_lpi_transitions;
189 	u32 eee_tx_lpi_time;
190 };
191 
192 struct lan78xx_statstage64 {
193 	u64 rx_fcs_errors;
194 	u64 rx_alignment_errors;
195 	u64 rx_fragment_errors;
196 	u64 rx_jabber_errors;
197 	u64 rx_undersize_frame_errors;
198 	u64 rx_oversize_frame_errors;
199 	u64 rx_dropped_frames;
200 	u64 rx_unicast_byte_count;
201 	u64 rx_broadcast_byte_count;
202 	u64 rx_multicast_byte_count;
203 	u64 rx_unicast_frames;
204 	u64 rx_broadcast_frames;
205 	u64 rx_multicast_frames;
206 	u64 rx_pause_frames;
207 	u64 rx_64_byte_frames;
208 	u64 rx_65_127_byte_frames;
209 	u64 rx_128_255_byte_frames;
210 	u64 rx_256_511_bytes_frames;
211 	u64 rx_512_1023_byte_frames;
212 	u64 rx_1024_1518_byte_frames;
213 	u64 rx_greater_1518_byte_frames;
214 	u64 eee_rx_lpi_transitions;
215 	u64 eee_rx_lpi_time;
216 	u64 tx_fcs_errors;
217 	u64 tx_excess_deferral_errors;
218 	u64 tx_carrier_errors;
219 	u64 tx_bad_byte_count;
220 	u64 tx_single_collisions;
221 	u64 tx_multiple_collisions;
222 	u64 tx_excessive_collision;
223 	u64 tx_late_collisions;
224 	u64 tx_unicast_byte_count;
225 	u64 tx_broadcast_byte_count;
226 	u64 tx_multicast_byte_count;
227 	u64 tx_unicast_frames;
228 	u64 tx_broadcast_frames;
229 	u64 tx_multicast_frames;
230 	u64 tx_pause_frames;
231 	u64 tx_64_byte_frames;
232 	u64 tx_65_127_byte_frames;
233 	u64 tx_128_255_byte_frames;
234 	u64 tx_256_511_bytes_frames;
235 	u64 tx_512_1023_byte_frames;
236 	u64 tx_1024_1518_byte_frames;
237 	u64 tx_greater_1518_byte_frames;
238 	u64 eee_tx_lpi_transitions;
239 	u64 eee_tx_lpi_time;
240 };
241 
242 struct lan78xx_net;
243 
244 struct lan78xx_priv {
245 	struct lan78xx_net *dev;
246 	u32 rfe_ctl;
247 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
248 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
249 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
250 	struct mutex dataport_mutex; /* for dataport access */
251 	spinlock_t rfe_ctl_lock; /* for rfe register access */
252 	struct work_struct set_multicast;
253 	struct work_struct set_vlan;
254 	u32 wol;
255 };
256 
257 enum skb_state {
258 	illegal = 0,
259 	tx_start,
260 	tx_done,
261 	rx_start,
262 	rx_done,
263 	rx_cleanup,
264 	unlink_start
265 };
266 
267 struct skb_data {		/* skb->cb is one of these */
268 	struct urb *urb;
269 	struct lan78xx_net *dev;
270 	enum skb_state state;
271 	size_t length;
272 	int num_of_packet;
273 };
274 
275 struct usb_context {
276 	struct usb_ctrlrequest req;
277 	struct lan78xx_net *dev;
278 };
279 
280 #define EVENT_TX_HALT			0
281 #define EVENT_RX_HALT			1
282 #define EVENT_RX_MEMORY			2
283 #define EVENT_STS_SPLIT			3
284 #define EVENT_LINK_RESET		4
285 #define EVENT_RX_PAUSED			5
286 #define EVENT_DEV_WAKING		6
287 #define EVENT_DEV_ASLEEP		7
288 #define EVENT_DEV_OPEN			8
289 #define EVENT_STAT_UPDATE		9
290 
291 struct statstage {
292 	struct mutex			access_lock;	/* for stats access */
293 	struct lan78xx_statstage	saved;
294 	struct lan78xx_statstage	rollover_count;
295 	struct lan78xx_statstage	rollover_max;
296 	struct lan78xx_statstage64	curr_stat;
297 };
298 
299 struct lan78xx_net {
300 	struct net_device	*net;
301 	struct usb_device	*udev;
302 	struct usb_interface	*intf;
303 	void			*driver_priv;
304 
305 	int			rx_qlen;
306 	int			tx_qlen;
307 	struct sk_buff_head	rxq;
308 	struct sk_buff_head	txq;
309 	struct sk_buff_head	done;
310 	struct sk_buff_head	rxq_pause;
311 	struct sk_buff_head	txq_pend;
312 
313 	struct tasklet_struct	bh;
314 	struct delayed_work	wq;
315 
316 	struct usb_host_endpoint *ep_blkin;
317 	struct usb_host_endpoint *ep_blkout;
318 	struct usb_host_endpoint *ep_intr;
319 
320 	int			msg_enable;
321 
322 	struct urb		*urb_intr;
323 	struct usb_anchor	deferred;
324 
325 	struct mutex		phy_mutex; /* for phy access */
326 	unsigned		pipe_in, pipe_out, pipe_intr;
327 
328 	u32			hard_mtu;	/* count any extra framing */
329 	size_t			rx_urb_size;	/* size for rx urbs */
330 
331 	unsigned long		flags;
332 
333 	wait_queue_head_t	*wait;
334 	unsigned char		suspend_count;
335 
336 	unsigned		maxpacket;
337 	struct timer_list	delay;
338 	struct timer_list	stat_monitor;
339 
340 	unsigned long		data[5];
341 
342 	int			link_on;
343 	u8			mdix_ctrl;
344 
345 	u32			chipid;
346 	u32			chiprev;
347 	struct mii_bus		*mdiobus;
348 
349 	int			fc_autoneg;
350 	u8			fc_request_control;
351 
352 	int			delta;
353 	struct statstage	stats;
354 };
355 
356 /* use ethtool to change the level for any given device */
357 static int msg_level = -1;
358 module_param(msg_level, int, 0);
359 MODULE_PARM_DESC(msg_level, "Override default message level");
360 
lan78xx_read_reg(struct lan78xx_net * dev,u32 index,u32 * data)361 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
362 {
363 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
364 	int ret;
365 
366 	if (!buf)
367 		return -ENOMEM;
368 
369 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
370 			      USB_VENDOR_REQUEST_READ_REGISTER,
371 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
372 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
373 	if (likely(ret >= 0)) {
374 		le32_to_cpus(buf);
375 		*data = *buf;
376 	} else {
377 		netdev_warn(dev->net,
378 			    "Failed to read register index 0x%08x. ret = %d",
379 			    index, ret);
380 	}
381 
382 	kfree(buf);
383 
384 	return ret;
385 }
386 
lan78xx_write_reg(struct lan78xx_net * dev,u32 index,u32 data)387 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
388 {
389 	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
390 	int ret;
391 
392 	if (!buf)
393 		return -ENOMEM;
394 
395 	*buf = data;
396 	cpu_to_le32s(buf);
397 
398 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
399 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
400 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
401 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
402 	if (unlikely(ret < 0)) {
403 		netdev_warn(dev->net,
404 			    "Failed to write register index 0x%08x. ret = %d",
405 			    index, ret);
406 	}
407 
408 	kfree(buf);
409 
410 	return ret;
411 }
412 
lan78xx_read_stats(struct lan78xx_net * dev,struct lan78xx_statstage * data)413 static int lan78xx_read_stats(struct lan78xx_net *dev,
414 			      struct lan78xx_statstage *data)
415 {
416 	int ret = 0;
417 	int i;
418 	struct lan78xx_statstage *stats;
419 	u32 *src;
420 	u32 *dst;
421 
422 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
423 	if (!stats)
424 		return -ENOMEM;
425 
426 	ret = usb_control_msg(dev->udev,
427 			      usb_rcvctrlpipe(dev->udev, 0),
428 			      USB_VENDOR_REQUEST_GET_STATS,
429 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
430 			      0,
431 			      0,
432 			      (void *)stats,
433 			      sizeof(*stats),
434 			      USB_CTRL_SET_TIMEOUT);
435 	if (likely(ret >= 0)) {
436 		src = (u32 *)stats;
437 		dst = (u32 *)data;
438 		for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
439 			le32_to_cpus(&src[i]);
440 			dst[i] = src[i];
441 		}
442 	} else {
443 		netdev_warn(dev->net,
444 			    "Failed to read stat ret = 0x%x", ret);
445 	}
446 
447 	kfree(stats);
448 
449 	return ret;
450 }
451 
452 #define check_counter_rollover(struct1, dev_stats, member) {	\
453 	if (struct1->member < dev_stats.saved.member)		\
454 		dev_stats.rollover_count.member++;		\
455 	}
456 
lan78xx_check_stat_rollover(struct lan78xx_net * dev,struct lan78xx_statstage * stats)457 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
458 					struct lan78xx_statstage *stats)
459 {
460 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
461 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
462 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
463 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
464 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
465 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
466 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
467 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
468 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
469 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
470 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
471 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
472 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
473 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
474 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
475 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
476 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
477 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
478 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
479 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
480 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
481 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
482 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
483 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
484 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
485 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
486 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
487 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
488 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
489 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
490 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
491 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
492 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
493 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
494 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
495 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
496 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
497 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
498 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
499 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
500 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
501 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
502 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
503 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
504 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
505 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
506 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
507 
508 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
509 }
510 
lan78xx_update_stats(struct lan78xx_net * dev)511 static void lan78xx_update_stats(struct lan78xx_net *dev)
512 {
513 	u32 *p, *count, *max;
514 	u64 *data;
515 	int i;
516 	struct lan78xx_statstage lan78xx_stats;
517 
518 	if (usb_autopm_get_interface(dev->intf) < 0)
519 		return;
520 
521 	p = (u32 *)&lan78xx_stats;
522 	count = (u32 *)&dev->stats.rollover_count;
523 	max = (u32 *)&dev->stats.rollover_max;
524 	data = (u64 *)&dev->stats.curr_stat;
525 
526 	mutex_lock(&dev->stats.access_lock);
527 
528 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
529 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
530 
531 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
532 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
533 
534 	mutex_unlock(&dev->stats.access_lock);
535 
536 	usb_autopm_put_interface(dev->intf);
537 }
538 
539 /* Loop until the read is completed with timeout called with phy_mutex held */
lan78xx_phy_wait_not_busy(struct lan78xx_net * dev)540 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
541 {
542 	unsigned long start_time = jiffies;
543 	u32 val;
544 	int ret;
545 
546 	do {
547 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
548 		if (unlikely(ret < 0))
549 			return -EIO;
550 
551 		if (!(val & MII_ACC_MII_BUSY_))
552 			return 0;
553 	} while (!time_after(jiffies, start_time + HZ));
554 
555 	return -EIO;
556 }
557 
mii_access(int id,int index,int read)558 static inline u32 mii_access(int id, int index, int read)
559 {
560 	u32 ret;
561 
562 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
563 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
564 	if (read)
565 		ret |= MII_ACC_MII_READ_;
566 	else
567 		ret |= MII_ACC_MII_WRITE_;
568 	ret |= MII_ACC_MII_BUSY_;
569 
570 	return ret;
571 }
572 
lan78xx_wait_eeprom(struct lan78xx_net * dev)573 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
574 {
575 	unsigned long start_time = jiffies;
576 	u32 val;
577 	int ret;
578 
579 	do {
580 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
581 		if (unlikely(ret < 0))
582 			return -EIO;
583 
584 		if (!(val & E2P_CMD_EPC_BUSY_) ||
585 		    (val & E2P_CMD_EPC_TIMEOUT_))
586 			break;
587 		usleep_range(40, 100);
588 	} while (!time_after(jiffies, start_time + HZ));
589 
590 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
591 		netdev_warn(dev->net, "EEPROM read operation timeout");
592 		return -EIO;
593 	}
594 
595 	return 0;
596 }
597 
lan78xx_eeprom_confirm_not_busy(struct lan78xx_net * dev)598 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
599 {
600 	unsigned long start_time = jiffies;
601 	u32 val;
602 	int ret;
603 
604 	do {
605 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
606 		if (unlikely(ret < 0))
607 			return -EIO;
608 
609 		if (!(val & E2P_CMD_EPC_BUSY_))
610 			return 0;
611 
612 		usleep_range(40, 100);
613 	} while (!time_after(jiffies, start_time + HZ));
614 
615 	netdev_warn(dev->net, "EEPROM is busy");
616 	return -EIO;
617 }
618 
lan78xx_read_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)619 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
620 				   u32 length, u8 *data)
621 {
622 	u32 val;
623 	u32 saved;
624 	int i, ret;
625 	int retval;
626 
627 	/* depends on chip, some EEPROM pins are muxed with LED function.
628 	 * disable & restore LED function to access EEPROM.
629 	 */
630 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
631 	saved = val;
632 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
633 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
634 		ret = lan78xx_write_reg(dev, HW_CFG, val);
635 	}
636 
637 	retval = lan78xx_eeprom_confirm_not_busy(dev);
638 	if (retval)
639 		return retval;
640 
641 	for (i = 0; i < length; i++) {
642 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
643 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
644 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
645 		if (unlikely(ret < 0)) {
646 			retval = -EIO;
647 			goto exit;
648 		}
649 
650 		retval = lan78xx_wait_eeprom(dev);
651 		if (retval < 0)
652 			goto exit;
653 
654 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
655 		if (unlikely(ret < 0)) {
656 			retval = -EIO;
657 			goto exit;
658 		}
659 
660 		data[i] = val & 0xFF;
661 		offset++;
662 	}
663 
664 	retval = 0;
665 exit:
666 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
667 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
668 
669 	return retval;
670 }
671 
lan78xx_read_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)672 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
673 			       u32 length, u8 *data)
674 {
675 	u8 sig;
676 	int ret;
677 
678 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
679 	if ((ret == 0) && (sig == EEPROM_INDICATOR))
680 		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
681 	else
682 		ret = -EINVAL;
683 
684 	return ret;
685 }
686 
lan78xx_write_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)687 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
688 				    u32 length, u8 *data)
689 {
690 	u32 val;
691 	u32 saved;
692 	int i, ret;
693 	int retval;
694 
695 	/* depends on chip, some EEPROM pins are muxed with LED function.
696 	 * disable & restore LED function to access EEPROM.
697 	 */
698 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
699 	saved = val;
700 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
701 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
702 		ret = lan78xx_write_reg(dev, HW_CFG, val);
703 	}
704 
705 	retval = lan78xx_eeprom_confirm_not_busy(dev);
706 	if (retval)
707 		goto exit;
708 
709 	/* Issue write/erase enable command */
710 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
711 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
712 	if (unlikely(ret < 0)) {
713 		retval = -EIO;
714 		goto exit;
715 	}
716 
717 	retval = lan78xx_wait_eeprom(dev);
718 	if (retval < 0)
719 		goto exit;
720 
721 	for (i = 0; i < length; i++) {
722 		/* Fill data register */
723 		val = data[i];
724 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
725 		if (ret < 0) {
726 			retval = -EIO;
727 			goto exit;
728 		}
729 
730 		/* Send "write" command */
731 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
732 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
733 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
734 		if (ret < 0) {
735 			retval = -EIO;
736 			goto exit;
737 		}
738 
739 		retval = lan78xx_wait_eeprom(dev);
740 		if (retval < 0)
741 			goto exit;
742 
743 		offset++;
744 	}
745 
746 	retval = 0;
747 exit:
748 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
749 		ret = lan78xx_write_reg(dev, HW_CFG, saved);
750 
751 	return retval;
752 }
753 
lan78xx_read_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)754 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
755 				u32 length, u8 *data)
756 {
757 	int i;
758 	int ret;
759 	u32 buf;
760 	unsigned long timeout;
761 
762 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
763 
764 	if (buf & OTP_PWR_DN_PWRDN_N_) {
765 		/* clear it and wait to be cleared */
766 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
767 
768 		timeout = jiffies + HZ;
769 		do {
770 			usleep_range(1, 10);
771 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
772 			if (time_after(jiffies, timeout)) {
773 				netdev_warn(dev->net,
774 					    "timeout on OTP_PWR_DN");
775 				return -EIO;
776 			}
777 		} while (buf & OTP_PWR_DN_PWRDN_N_);
778 	}
779 
780 	for (i = 0; i < length; i++) {
781 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
782 					((offset + i) >> 8) & OTP_ADDR1_15_11);
783 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
784 					((offset + i) & OTP_ADDR2_10_3));
785 
786 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
787 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
788 
789 		timeout = jiffies + HZ;
790 		do {
791 			udelay(1);
792 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
793 			if (time_after(jiffies, timeout)) {
794 				netdev_warn(dev->net,
795 					    "timeout on OTP_STATUS");
796 				return -EIO;
797 			}
798 		} while (buf & OTP_STATUS_BUSY_);
799 
800 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
801 
802 		data[i] = (u8)(buf & 0xFF);
803 	}
804 
805 	return 0;
806 }
807 
lan78xx_write_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)808 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
809 				 u32 length, u8 *data)
810 {
811 	int i;
812 	int ret;
813 	u32 buf;
814 	unsigned long timeout;
815 
816 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
817 
818 	if (buf & OTP_PWR_DN_PWRDN_N_) {
819 		/* clear it and wait to be cleared */
820 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
821 
822 		timeout = jiffies + HZ;
823 		do {
824 			udelay(1);
825 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
826 			if (time_after(jiffies, timeout)) {
827 				netdev_warn(dev->net,
828 					    "timeout on OTP_PWR_DN completion");
829 				return -EIO;
830 			}
831 		} while (buf & OTP_PWR_DN_PWRDN_N_);
832 	}
833 
834 	/* set to BYTE program mode */
835 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
836 
837 	for (i = 0; i < length; i++) {
838 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
839 					((offset + i) >> 8) & OTP_ADDR1_15_11);
840 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
841 					((offset + i) & OTP_ADDR2_10_3));
842 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
843 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
844 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
845 
846 		timeout = jiffies + HZ;
847 		do {
848 			udelay(1);
849 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
850 			if (time_after(jiffies, timeout)) {
851 				netdev_warn(dev->net,
852 					    "Timeout on OTP_STATUS completion");
853 				return -EIO;
854 			}
855 		} while (buf & OTP_STATUS_BUSY_);
856 	}
857 
858 	return 0;
859 }
860 
lan78xx_read_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)861 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
862 			    u32 length, u8 *data)
863 {
864 	u8 sig;
865 	int ret;
866 
867 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
868 
869 	if (ret == 0) {
870 		if (sig == OTP_INDICATOR_1)
871 			offset = offset;
872 		else if (sig == OTP_INDICATOR_2)
873 			offset += 0x100;
874 		else
875 			ret = -EINVAL;
876 		if (!ret)
877 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
878 	}
879 
880 	return ret;
881 }
882 
lan78xx_dataport_wait_not_busy(struct lan78xx_net * dev)883 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
884 {
885 	int i, ret;
886 
887 	for (i = 0; i < 100; i++) {
888 		u32 dp_sel;
889 
890 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
891 		if (unlikely(ret < 0))
892 			return -EIO;
893 
894 		if (dp_sel & DP_SEL_DPRDY_)
895 			return 0;
896 
897 		usleep_range(40, 100);
898 	}
899 
900 	netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
901 
902 	return -EIO;
903 }
904 
lan78xx_dataport_write(struct lan78xx_net * dev,u32 ram_select,u32 addr,u32 length,u32 * buf)905 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
906 				  u32 addr, u32 length, u32 *buf)
907 {
908 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
909 	u32 dp_sel;
910 	int i, ret;
911 
912 	if (usb_autopm_get_interface(dev->intf) < 0)
913 			return 0;
914 
915 	mutex_lock(&pdata->dataport_mutex);
916 
917 	ret = lan78xx_dataport_wait_not_busy(dev);
918 	if (ret < 0)
919 		goto done;
920 
921 	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
922 
923 	dp_sel &= ~DP_SEL_RSEL_MASK_;
924 	dp_sel |= ram_select;
925 	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
926 
927 	for (i = 0; i < length; i++) {
928 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
929 
930 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
931 
932 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
933 
934 		ret = lan78xx_dataport_wait_not_busy(dev);
935 		if (ret < 0)
936 			goto done;
937 	}
938 
939 done:
940 	mutex_unlock(&pdata->dataport_mutex);
941 	usb_autopm_put_interface(dev->intf);
942 
943 	return ret;
944 }
945 
lan78xx_set_addr_filter(struct lan78xx_priv * pdata,int index,u8 addr[ETH_ALEN])946 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
947 				    int index, u8 addr[ETH_ALEN])
948 {
949 	u32	temp;
950 
951 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
952 		temp = addr[3];
953 		temp = addr[2] | (temp << 8);
954 		temp = addr[1] | (temp << 8);
955 		temp = addr[0] | (temp << 8);
956 		pdata->pfilter_table[index][1] = temp;
957 		temp = addr[5];
958 		temp = addr[4] | (temp << 8);
959 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
960 		pdata->pfilter_table[index][0] = temp;
961 	}
962 }
963 
964 /* returns hash bit number for given MAC address */
lan78xx_hash(char addr[ETH_ALEN])965 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
966 {
967 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
968 }
969 
lan78xx_deferred_multicast_write(struct work_struct * param)970 static void lan78xx_deferred_multicast_write(struct work_struct *param)
971 {
972 	struct lan78xx_priv *pdata =
973 			container_of(param, struct lan78xx_priv, set_multicast);
974 	struct lan78xx_net *dev = pdata->dev;
975 	int i;
976 	int ret;
977 
978 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
979 		  pdata->rfe_ctl);
980 
981 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
982 			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
983 
984 	for (i = 1; i < NUM_OF_MAF; i++) {
985 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
986 		ret = lan78xx_write_reg(dev, MAF_LO(i),
987 					pdata->pfilter_table[i][1]);
988 		ret = lan78xx_write_reg(dev, MAF_HI(i),
989 					pdata->pfilter_table[i][0]);
990 	}
991 
992 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
993 }
994 
lan78xx_set_multicast(struct net_device * netdev)995 static void lan78xx_set_multicast(struct net_device *netdev)
996 {
997 	struct lan78xx_net *dev = netdev_priv(netdev);
998 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
999 	unsigned long flags;
1000 	int i;
1001 
1002 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1003 
1004 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1005 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1006 
1007 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1008 			pdata->mchash_table[i] = 0;
1009 	/* pfilter_table[0] has own HW address */
1010 	for (i = 1; i < NUM_OF_MAF; i++) {
1011 			pdata->pfilter_table[i][0] =
1012 			pdata->pfilter_table[i][1] = 0;
1013 	}
1014 
1015 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1016 
1017 	if (dev->net->flags & IFF_PROMISC) {
1018 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1019 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1020 	} else {
1021 		if (dev->net->flags & IFF_ALLMULTI) {
1022 			netif_dbg(dev, drv, dev->net,
1023 				  "receive all multicast enabled");
1024 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1025 		}
1026 	}
1027 
1028 	if (netdev_mc_count(dev->net)) {
1029 		struct netdev_hw_addr *ha;
1030 		int i;
1031 
1032 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1033 
1034 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1035 
1036 		i = 1;
1037 		netdev_for_each_mc_addr(ha, netdev) {
1038 			/* set first 32 into Perfect Filter */
1039 			if (i < 33) {
1040 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1041 			} else {
1042 				u32 bitnum = lan78xx_hash(ha->addr);
1043 
1044 				pdata->mchash_table[bitnum / 32] |=
1045 							(1 << (bitnum % 32));
1046 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1047 			}
1048 			i++;
1049 		}
1050 	}
1051 
1052 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1053 
1054 	/* defer register writes to a sleepable context */
1055 	schedule_work(&pdata->set_multicast);
1056 }
1057 
lan78xx_update_flowcontrol(struct lan78xx_net * dev,u8 duplex,u16 lcladv,u16 rmtadv)1058 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1059 				      u16 lcladv, u16 rmtadv)
1060 {
1061 	u32 flow = 0, fct_flow = 0;
1062 	int ret;
1063 	u8 cap;
1064 
1065 	if (dev->fc_autoneg)
1066 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1067 	else
1068 		cap = dev->fc_request_control;
1069 
1070 	if (cap & FLOW_CTRL_TX)
1071 		flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1072 
1073 	if (cap & FLOW_CTRL_RX)
1074 		flow |= FLOW_CR_RX_FCEN_;
1075 
1076 	if (dev->udev->speed == USB_SPEED_SUPER)
1077 		fct_flow = 0x817;
1078 	else if (dev->udev->speed == USB_SPEED_HIGH)
1079 		fct_flow = 0x211;
1080 
1081 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1082 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1083 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1084 
1085 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1086 
1087 	/* threshold value should be set before enabling flow */
1088 	ret = lan78xx_write_reg(dev, FLOW, flow);
1089 
1090 	return 0;
1091 }
1092 
lan78xx_link_reset(struct lan78xx_net * dev)1093 static int lan78xx_link_reset(struct lan78xx_net *dev)
1094 {
1095 	struct phy_device *phydev = dev->net->phydev;
1096 	struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1097 	int ladv, radv, ret;
1098 	u32 buf;
1099 
1100 	/* clear PHY interrupt status */
1101 	ret = phy_read(phydev, LAN88XX_INT_STS);
1102 	if (unlikely(ret < 0))
1103 		return -EIO;
1104 
1105 	/* clear LAN78xx interrupt status */
1106 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1107 	if (unlikely(ret < 0))
1108 		return -EIO;
1109 
1110 	phy_read_status(phydev);
1111 
1112 	if (!phydev->link && dev->link_on) {
1113 		dev->link_on = false;
1114 
1115 		/* reset MAC */
1116 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1117 		if (unlikely(ret < 0))
1118 			return -EIO;
1119 		buf |= MAC_CR_RST_;
1120 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1121 		if (unlikely(ret < 0))
1122 			return -EIO;
1123 
1124 		phy_mac_interrupt(phydev, 0);
1125 
1126 		del_timer(&dev->stat_monitor);
1127 	} else if (phydev->link && !dev->link_on) {
1128 		dev->link_on = true;
1129 
1130 		phy_ethtool_gset(phydev, &ecmd);
1131 
1132 		ret = phy_read(phydev, LAN88XX_INT_STS);
1133 
1134 		if (dev->udev->speed == USB_SPEED_SUPER) {
1135 			if (ethtool_cmd_speed(&ecmd) == 1000) {
1136 				/* disable U2 */
1137 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1138 				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1139 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1140 				/* enable U1 */
1141 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1142 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1143 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1144 			} else {
1145 				/* enable U1 & U2 */
1146 				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1147 				buf |= USB_CFG1_DEV_U2_INIT_EN_;
1148 				buf |= USB_CFG1_DEV_U1_INIT_EN_;
1149 				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1150 			}
1151 		}
1152 
1153 		ladv = phy_read(phydev, MII_ADVERTISE);
1154 		if (ladv < 0)
1155 			return ladv;
1156 
1157 		radv = phy_read(phydev, MII_LPA);
1158 		if (radv < 0)
1159 			return radv;
1160 
1161 		netif_dbg(dev, link, dev->net,
1162 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1163 			  ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
1164 
1165 		ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
1166 		phy_mac_interrupt(phydev, 1);
1167 
1168 		if (!timer_pending(&dev->stat_monitor)) {
1169 			dev->delta = 1;
1170 			mod_timer(&dev->stat_monitor,
1171 				  jiffies + STAT_UPDATE_TIMER);
1172 		}
1173 	}
1174 
1175 	return ret;
1176 }
1177 
1178 /* some work can't be done in tasklets, so we use keventd
1179  *
1180  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1181  * but tasklet_schedule() doesn't.	hope the failure is rare.
1182  */
lan78xx_defer_kevent(struct lan78xx_net * dev,int work)1183 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1184 {
1185 	set_bit(work, &dev->flags);
1186 	if (!schedule_delayed_work(&dev->wq, 0))
1187 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1188 }
1189 
lan78xx_status(struct lan78xx_net * dev,struct urb * urb)1190 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1191 {
1192 	u32 intdata;
1193 
1194 	if (urb->actual_length != 4) {
1195 		netdev_warn(dev->net,
1196 			    "unexpected urb length %d", urb->actual_length);
1197 		return;
1198 	}
1199 
1200 	memcpy(&intdata, urb->transfer_buffer, 4);
1201 	le32_to_cpus(&intdata);
1202 
1203 	if (intdata & INT_ENP_PHY_INT) {
1204 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1205 			  lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1206 	} else
1207 		netdev_warn(dev->net,
1208 			    "unexpected interrupt: 0x%08x\n", intdata);
1209 }
1210 
lan78xx_ethtool_get_eeprom_len(struct net_device * netdev)1211 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1212 {
1213 	return MAX_EEPROM_SIZE;
1214 }
1215 
lan78xx_ethtool_get_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1216 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1217 				      struct ethtool_eeprom *ee, u8 *data)
1218 {
1219 	struct lan78xx_net *dev = netdev_priv(netdev);
1220 
1221 	ee->magic = LAN78XX_EEPROM_MAGIC;
1222 
1223 	return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1224 }
1225 
lan78xx_ethtool_set_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1226 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1227 				      struct ethtool_eeprom *ee, u8 *data)
1228 {
1229 	struct lan78xx_net *dev = netdev_priv(netdev);
1230 
1231 	/* Allow entire eeprom update only */
1232 	if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1233 	    (ee->offset == 0) &&
1234 	    (ee->len == 512) &&
1235 	    (data[0] == EEPROM_INDICATOR))
1236 		return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1237 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1238 		 (ee->offset == 0) &&
1239 		 (ee->len == 512) &&
1240 		 (data[0] == OTP_INDICATOR_1))
1241 		return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1242 
1243 	return -EINVAL;
1244 }
1245 
lan78xx_get_strings(struct net_device * netdev,u32 stringset,u8 * data)1246 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1247 				u8 *data)
1248 {
1249 	if (stringset == ETH_SS_STATS)
1250 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1251 }
1252 
lan78xx_get_sset_count(struct net_device * netdev,int sset)1253 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1254 {
1255 	if (sset == ETH_SS_STATS)
1256 		return ARRAY_SIZE(lan78xx_gstrings);
1257 	else
1258 		return -EOPNOTSUPP;
1259 }
1260 
lan78xx_get_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)1261 static void lan78xx_get_stats(struct net_device *netdev,
1262 			      struct ethtool_stats *stats, u64 *data)
1263 {
1264 	struct lan78xx_net *dev = netdev_priv(netdev);
1265 
1266 	lan78xx_update_stats(dev);
1267 
1268 	mutex_lock(&dev->stats.access_lock);
1269 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1270 	mutex_unlock(&dev->stats.access_lock);
1271 }
1272 
lan78xx_get_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1273 static void lan78xx_get_wol(struct net_device *netdev,
1274 			    struct ethtool_wolinfo *wol)
1275 {
1276 	struct lan78xx_net *dev = netdev_priv(netdev);
1277 	int ret;
1278 	u32 buf;
1279 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1280 
1281 	if (usb_autopm_get_interface(dev->intf) < 0)
1282 			return;
1283 
1284 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1285 	if (unlikely(ret < 0)) {
1286 		wol->supported = 0;
1287 		wol->wolopts = 0;
1288 	} else {
1289 		if (buf & USB_CFG_RMT_WKP_) {
1290 			wol->supported = WAKE_ALL;
1291 			wol->wolopts = pdata->wol;
1292 		} else {
1293 			wol->supported = 0;
1294 			wol->wolopts = 0;
1295 		}
1296 	}
1297 
1298 	usb_autopm_put_interface(dev->intf);
1299 }
1300 
lan78xx_set_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1301 static int lan78xx_set_wol(struct net_device *netdev,
1302 			   struct ethtool_wolinfo *wol)
1303 {
1304 	struct lan78xx_net *dev = netdev_priv(netdev);
1305 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1306 	int ret;
1307 
1308 	ret = usb_autopm_get_interface(dev->intf);
1309 	if (ret < 0)
1310 		return ret;
1311 
1312 	pdata->wol = 0;
1313 	if (wol->wolopts & WAKE_UCAST)
1314 		pdata->wol |= WAKE_UCAST;
1315 	if (wol->wolopts & WAKE_MCAST)
1316 		pdata->wol |= WAKE_MCAST;
1317 	if (wol->wolopts & WAKE_BCAST)
1318 		pdata->wol |= WAKE_BCAST;
1319 	if (wol->wolopts & WAKE_MAGIC)
1320 		pdata->wol |= WAKE_MAGIC;
1321 	if (wol->wolopts & WAKE_PHY)
1322 		pdata->wol |= WAKE_PHY;
1323 	if (wol->wolopts & WAKE_ARP)
1324 		pdata->wol |= WAKE_ARP;
1325 
1326 	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1327 
1328 	phy_ethtool_set_wol(netdev->phydev, wol);
1329 
1330 	usb_autopm_put_interface(dev->intf);
1331 
1332 	return ret;
1333 }
1334 
lan78xx_get_eee(struct net_device * net,struct ethtool_eee * edata)1335 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1336 {
1337 	struct lan78xx_net *dev = netdev_priv(net);
1338 	struct phy_device *phydev = net->phydev;
1339 	int ret;
1340 	u32 buf;
1341 
1342 	ret = usb_autopm_get_interface(dev->intf);
1343 	if (ret < 0)
1344 		return ret;
1345 
1346 	ret = phy_ethtool_get_eee(phydev, edata);
1347 	if (ret < 0)
1348 		goto exit;
1349 
1350 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1351 	if (buf & MAC_CR_EEE_EN_) {
1352 		edata->eee_enabled = true;
1353 		edata->eee_active = !!(edata->advertised &
1354 				       edata->lp_advertised);
1355 		edata->tx_lpi_enabled = true;
1356 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1357 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1358 		edata->tx_lpi_timer = buf;
1359 	} else {
1360 		edata->eee_enabled = false;
1361 		edata->eee_active = false;
1362 		edata->tx_lpi_enabled = false;
1363 		edata->tx_lpi_timer = 0;
1364 	}
1365 
1366 	ret = 0;
1367 exit:
1368 	usb_autopm_put_interface(dev->intf);
1369 
1370 	return ret;
1371 }
1372 
lan78xx_set_eee(struct net_device * net,struct ethtool_eee * edata)1373 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1374 {
1375 	struct lan78xx_net *dev = netdev_priv(net);
1376 	int ret;
1377 	u32 buf;
1378 
1379 	ret = usb_autopm_get_interface(dev->intf);
1380 	if (ret < 0)
1381 		return ret;
1382 
1383 	if (edata->eee_enabled) {
1384 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1385 		buf |= MAC_CR_EEE_EN_;
1386 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1387 
1388 		phy_ethtool_set_eee(net->phydev, edata);
1389 
1390 		buf = (u32)edata->tx_lpi_timer;
1391 		ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1392 	} else {
1393 		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1394 		buf &= ~MAC_CR_EEE_EN_;
1395 		ret = lan78xx_write_reg(dev, MAC_CR, buf);
1396 	}
1397 
1398 	usb_autopm_put_interface(dev->intf);
1399 
1400 	return 0;
1401 }
1402 
lan78xx_get_link(struct net_device * net)1403 static u32 lan78xx_get_link(struct net_device *net)
1404 {
1405 	phy_read_status(net->phydev);
1406 
1407 	return net->phydev->link;
1408 }
1409 
lan78xx_nway_reset(struct net_device * net)1410 static int lan78xx_nway_reset(struct net_device *net)
1411 {
1412 	return phy_start_aneg(net->phydev);
1413 }
1414 
lan78xx_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * info)1415 static void lan78xx_get_drvinfo(struct net_device *net,
1416 				struct ethtool_drvinfo *info)
1417 {
1418 	struct lan78xx_net *dev = netdev_priv(net);
1419 
1420 	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1421 	strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1422 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1423 }
1424 
lan78xx_get_msglevel(struct net_device * net)1425 static u32 lan78xx_get_msglevel(struct net_device *net)
1426 {
1427 	struct lan78xx_net *dev = netdev_priv(net);
1428 
1429 	return dev->msg_enable;
1430 }
1431 
lan78xx_set_msglevel(struct net_device * net,u32 level)1432 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1433 {
1434 	struct lan78xx_net *dev = netdev_priv(net);
1435 
1436 	dev->msg_enable = level;
1437 }
1438 
lan78xx_get_mdix_status(struct net_device * net)1439 static int lan78xx_get_mdix_status(struct net_device *net)
1440 {
1441 	struct phy_device *phydev = net->phydev;
1442 	int buf;
1443 
1444 	phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1445 	buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1446 	phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1447 
1448 	return buf;
1449 }
1450 
lan78xx_set_mdix_status(struct net_device * net,__u8 mdix_ctrl)1451 static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1452 {
1453 	struct lan78xx_net *dev = netdev_priv(net);
1454 	struct phy_device *phydev = net->phydev;
1455 	int buf;
1456 
1457 	if (mdix_ctrl == ETH_TP_MDI) {
1458 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1459 			  LAN88XX_EXT_PAGE_SPACE_1);
1460 		buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1461 		buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1462 		phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1463 			  buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1464 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1465 			  LAN88XX_EXT_PAGE_SPACE_0);
1466 	} else if (mdix_ctrl == ETH_TP_MDI_X) {
1467 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1468 			  LAN88XX_EXT_PAGE_SPACE_1);
1469 		buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1470 		buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1471 		phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1472 			  buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1473 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1474 			  LAN88XX_EXT_PAGE_SPACE_0);
1475 	} else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1476 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1477 			  LAN88XX_EXT_PAGE_SPACE_1);
1478 		buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1479 		buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1480 		phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1481 			  buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1482 		phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1483 			  LAN88XX_EXT_PAGE_SPACE_0);
1484 	}
1485 	dev->mdix_ctrl = mdix_ctrl;
1486 }
1487 
lan78xx_get_settings(struct net_device * net,struct ethtool_cmd * cmd)1488 static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1489 {
1490 	struct lan78xx_net *dev = netdev_priv(net);
1491 	struct phy_device *phydev = net->phydev;
1492 	int ret;
1493 	int buf;
1494 
1495 	ret = usb_autopm_get_interface(dev->intf);
1496 	if (ret < 0)
1497 		return ret;
1498 
1499 	ret = phy_ethtool_gset(phydev, cmd);
1500 
1501 	buf = lan78xx_get_mdix_status(net);
1502 
1503 	buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1504 	if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
1505 		cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1506 		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1507 	} else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
1508 		cmd->eth_tp_mdix = ETH_TP_MDI;
1509 		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
1510 	} else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
1511 		cmd->eth_tp_mdix = ETH_TP_MDI_X;
1512 		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1513 	}
1514 
1515 	usb_autopm_put_interface(dev->intf);
1516 
1517 	return ret;
1518 }
1519 
lan78xx_set_settings(struct net_device * net,struct ethtool_cmd * cmd)1520 static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1521 {
1522 	struct lan78xx_net *dev = netdev_priv(net);
1523 	struct phy_device *phydev = net->phydev;
1524 	int ret = 0;
1525 	int temp;
1526 
1527 	ret = usb_autopm_get_interface(dev->intf);
1528 	if (ret < 0)
1529 		return ret;
1530 
1531 	if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
1532 		lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
1533 	}
1534 
1535 	/* change speed & duplex */
1536 	ret = phy_ethtool_sset(phydev, cmd);
1537 
1538 	if (!cmd->autoneg) {
1539 		/* force link down */
1540 		temp = phy_read(phydev, MII_BMCR);
1541 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1542 		mdelay(1);
1543 		phy_write(phydev, MII_BMCR, temp);
1544 	}
1545 
1546 	usb_autopm_put_interface(dev->intf);
1547 
1548 	return ret;
1549 }
1550 
lan78xx_get_pause(struct net_device * net,struct ethtool_pauseparam * pause)1551 static void lan78xx_get_pause(struct net_device *net,
1552 			      struct ethtool_pauseparam *pause)
1553 {
1554 	struct lan78xx_net *dev = netdev_priv(net);
1555 	struct phy_device *phydev = net->phydev;
1556 	struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1557 
1558 	phy_ethtool_gset(phydev, &ecmd);
1559 
1560 	pause->autoneg = dev->fc_autoneg;
1561 
1562 	if (dev->fc_request_control & FLOW_CTRL_TX)
1563 		pause->tx_pause = 1;
1564 
1565 	if (dev->fc_request_control & FLOW_CTRL_RX)
1566 		pause->rx_pause = 1;
1567 }
1568 
lan78xx_set_pause(struct net_device * net,struct ethtool_pauseparam * pause)1569 static int lan78xx_set_pause(struct net_device *net,
1570 			     struct ethtool_pauseparam *pause)
1571 {
1572 	struct lan78xx_net *dev = netdev_priv(net);
1573 	struct phy_device *phydev = net->phydev;
1574 	struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1575 	int ret;
1576 
1577 	phy_ethtool_gset(phydev, &ecmd);
1578 
1579 	if (pause->autoneg && !ecmd.autoneg) {
1580 		ret = -EINVAL;
1581 		goto exit;
1582 	}
1583 
1584 	dev->fc_request_control = 0;
1585 	if (pause->rx_pause)
1586 		dev->fc_request_control |= FLOW_CTRL_RX;
1587 
1588 	if (pause->tx_pause)
1589 		dev->fc_request_control |= FLOW_CTRL_TX;
1590 
1591 	if (ecmd.autoneg) {
1592 		u32 mii_adv;
1593 
1594 		ecmd.advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1595 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1596 		ecmd.advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1597 		phy_ethtool_sset(phydev, &ecmd);
1598 	}
1599 
1600 	dev->fc_autoneg = pause->autoneg;
1601 
1602 	ret = 0;
1603 exit:
1604 	return ret;
1605 }
1606 
1607 static const struct ethtool_ops lan78xx_ethtool_ops = {
1608 	.get_link	= lan78xx_get_link,
1609 	.nway_reset	= lan78xx_nway_reset,
1610 	.get_drvinfo	= lan78xx_get_drvinfo,
1611 	.get_msglevel	= lan78xx_get_msglevel,
1612 	.set_msglevel	= lan78xx_set_msglevel,
1613 	.get_settings	= lan78xx_get_settings,
1614 	.set_settings	= lan78xx_set_settings,
1615 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1616 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
1617 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
1618 	.get_ethtool_stats = lan78xx_get_stats,
1619 	.get_sset_count = lan78xx_get_sset_count,
1620 	.get_strings	= lan78xx_get_strings,
1621 	.get_wol	= lan78xx_get_wol,
1622 	.set_wol	= lan78xx_set_wol,
1623 	.get_eee	= lan78xx_get_eee,
1624 	.set_eee	= lan78xx_set_eee,
1625 	.get_pauseparam	= lan78xx_get_pause,
1626 	.set_pauseparam	= lan78xx_set_pause,
1627 };
1628 
lan78xx_ioctl(struct net_device * netdev,struct ifreq * rq,int cmd)1629 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1630 {
1631 	if (!netif_running(netdev))
1632 		return -EINVAL;
1633 
1634 	return phy_mii_ioctl(netdev->phydev, rq, cmd);
1635 }
1636 
lan78xx_init_mac_address(struct lan78xx_net * dev)1637 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1638 {
1639 	u32 addr_lo, addr_hi;
1640 	int ret;
1641 	u8 addr[6];
1642 
1643 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1644 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1645 
1646 	addr[0] = addr_lo & 0xFF;
1647 	addr[1] = (addr_lo >> 8) & 0xFF;
1648 	addr[2] = (addr_lo >> 16) & 0xFF;
1649 	addr[3] = (addr_lo >> 24) & 0xFF;
1650 	addr[4] = addr_hi & 0xFF;
1651 	addr[5] = (addr_hi >> 8) & 0xFF;
1652 
1653 	if (!is_valid_ether_addr(addr)) {
1654 		/* reading mac address from EEPROM or OTP */
1655 		if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1656 					 addr) == 0) ||
1657 		    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1658 				      addr) == 0)) {
1659 			if (is_valid_ether_addr(addr)) {
1660 				/* eeprom values are valid so use them */
1661 				netif_dbg(dev, ifup, dev->net,
1662 					  "MAC address read from EEPROM");
1663 			} else {
1664 				/* generate random MAC */
1665 				random_ether_addr(addr);
1666 				netif_dbg(dev, ifup, dev->net,
1667 					  "MAC address set to random addr");
1668 			}
1669 
1670 			addr_lo = addr[0] | (addr[1] << 8) |
1671 				  (addr[2] << 16) | (addr[3] << 24);
1672 			addr_hi = addr[4] | (addr[5] << 8);
1673 
1674 			ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1675 			ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1676 		} else {
1677 			/* generate random MAC */
1678 			random_ether_addr(addr);
1679 			netif_dbg(dev, ifup, dev->net,
1680 				  "MAC address set to random addr");
1681 		}
1682 	}
1683 
1684 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1685 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1686 
1687 	ether_addr_copy(dev->net->dev_addr, addr);
1688 }
1689 
1690 /* MDIO read and write wrappers for phylib */
lan78xx_mdiobus_read(struct mii_bus * bus,int phy_id,int idx)1691 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1692 {
1693 	struct lan78xx_net *dev = bus->priv;
1694 	u32 val, addr;
1695 	int ret;
1696 
1697 	ret = usb_autopm_get_interface(dev->intf);
1698 	if (ret < 0)
1699 		return ret;
1700 
1701 	mutex_lock(&dev->phy_mutex);
1702 
1703 	/* confirm MII not busy */
1704 	ret = lan78xx_phy_wait_not_busy(dev);
1705 	if (ret < 0)
1706 		goto done;
1707 
1708 	/* set the address, index & direction (read from PHY) */
1709 	addr = mii_access(phy_id, idx, MII_READ);
1710 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1711 
1712 	ret = lan78xx_phy_wait_not_busy(dev);
1713 	if (ret < 0)
1714 		goto done;
1715 
1716 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
1717 
1718 	ret = (int)(val & 0xFFFF);
1719 
1720 done:
1721 	mutex_unlock(&dev->phy_mutex);
1722 	usb_autopm_put_interface(dev->intf);
1723 	return ret;
1724 }
1725 
lan78xx_mdiobus_write(struct mii_bus * bus,int phy_id,int idx,u16 regval)1726 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1727 				 u16 regval)
1728 {
1729 	struct lan78xx_net *dev = bus->priv;
1730 	u32 val, addr;
1731 	int ret;
1732 
1733 	ret = usb_autopm_get_interface(dev->intf);
1734 	if (ret < 0)
1735 		return ret;
1736 
1737 	mutex_lock(&dev->phy_mutex);
1738 
1739 	/* confirm MII not busy */
1740 	ret = lan78xx_phy_wait_not_busy(dev);
1741 	if (ret < 0)
1742 		goto done;
1743 
1744 	val = (u32)regval;
1745 	ret = lan78xx_write_reg(dev, MII_DATA, val);
1746 
1747 	/* set the address, index & direction (write to PHY) */
1748 	addr = mii_access(phy_id, idx, MII_WRITE);
1749 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
1750 
1751 	ret = lan78xx_phy_wait_not_busy(dev);
1752 	if (ret < 0)
1753 		goto done;
1754 
1755 done:
1756 	mutex_unlock(&dev->phy_mutex);
1757 	usb_autopm_put_interface(dev->intf);
1758 	return 0;
1759 }
1760 
lan78xx_mdio_init(struct lan78xx_net * dev)1761 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1762 {
1763 	int ret;
1764 
1765 	dev->mdiobus = mdiobus_alloc();
1766 	if (!dev->mdiobus) {
1767 		netdev_err(dev->net, "can't allocate MDIO bus\n");
1768 		return -ENOMEM;
1769 	}
1770 
1771 	dev->mdiobus->priv = (void *)dev;
1772 	dev->mdiobus->read = lan78xx_mdiobus_read;
1773 	dev->mdiobus->write = lan78xx_mdiobus_write;
1774 	dev->mdiobus->name = "lan78xx-mdiobus";
1775 
1776 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1777 		 dev->udev->bus->busnum, dev->udev->devnum);
1778 
1779 	switch (dev->chipid) {
1780 	case ID_REV_CHIP_ID_7800_:
1781 	case ID_REV_CHIP_ID_7850_:
1782 		/* set to internal PHY id */
1783 		dev->mdiobus->phy_mask = ~(1 << 1);
1784 		break;
1785 	}
1786 
1787 	ret = mdiobus_register(dev->mdiobus);
1788 	if (ret) {
1789 		netdev_err(dev->net, "can't register MDIO bus\n");
1790 		goto exit1;
1791 	}
1792 
1793 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1794 	return 0;
1795 exit1:
1796 	mdiobus_free(dev->mdiobus);
1797 	return ret;
1798 }
1799 
lan78xx_remove_mdio(struct lan78xx_net * dev)1800 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1801 {
1802 	mdiobus_unregister(dev->mdiobus);
1803 	mdiobus_free(dev->mdiobus);
1804 }
1805 
lan78xx_link_status_change(struct net_device * net)1806 static void lan78xx_link_status_change(struct net_device *net)
1807 {
1808 	struct phy_device *phydev = net->phydev;
1809 	int ret, temp;
1810 
1811 	/* At forced 100 F/H mode, chip may fail to set mode correctly
1812 	 * when cable is switched between long(~50+m) and short one.
1813 	 * As workaround, set to 10 before setting to 100
1814 	 * at forced 100 F/H mode.
1815 	 */
1816 	if (!phydev->autoneg && (phydev->speed == 100)) {
1817 		/* disable phy interrupt */
1818 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1819 		temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1820 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1821 
1822 		temp = phy_read(phydev, MII_BMCR);
1823 		temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1824 		phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1825 		temp |= BMCR_SPEED100;
1826 		phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1827 
1828 		/* clear pending interrupt generated while workaround */
1829 		temp = phy_read(phydev, LAN88XX_INT_STS);
1830 
1831 		/* enable phy interrupt back */
1832 		temp = phy_read(phydev, LAN88XX_INT_MASK);
1833 		temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1834 		ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1835 	}
1836 }
1837 
lan78xx_phy_init(struct lan78xx_net * dev)1838 static int lan78xx_phy_init(struct lan78xx_net *dev)
1839 {
1840 	int ret;
1841 	u32 mii_adv;
1842 	struct phy_device *phydev = dev->net->phydev;
1843 
1844 	phydev = phy_find_first(dev->mdiobus);
1845 	if (!phydev) {
1846 		netdev_err(dev->net, "no PHY found\n");
1847 		return -EIO;
1848 	}
1849 
1850 	/* Enable PHY interrupts.
1851 	 * We handle our own interrupt
1852 	 */
1853 	ret = phy_read(phydev, LAN88XX_INT_STS);
1854 	ret = phy_write(phydev, LAN88XX_INT_MASK,
1855 			LAN88XX_INT_MASK_MDINTPIN_EN_ |
1856 			LAN88XX_INT_MASK_LINK_CHANGE_);
1857 
1858 	phydev->irq = PHY_IGNORE_INTERRUPT;
1859 
1860 	ret = phy_connect_direct(dev->net, phydev,
1861 				 lan78xx_link_status_change,
1862 				 PHY_INTERFACE_MODE_GMII);
1863 	if (ret) {
1864 		netdev_err(dev->net, "can't attach PHY to %s\n",
1865 			   dev->mdiobus->id);
1866 		return -EIO;
1867 	}
1868 
1869 	/* set to AUTOMDIX */
1870 	lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
1871 
1872 	/* MAC doesn't support 1000T Half */
1873 	phydev->supported &= ~SUPPORTED_1000baseT_Half;
1874 
1875 	/* support both flow controls */
1876 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
1877 	phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1878 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1879 	phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1880 
1881 	genphy_config_aneg(phydev);
1882 
1883 	dev->fc_autoneg = phydev->autoneg;
1884 
1885 	phy_start(phydev);
1886 
1887 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1888 
1889 	return 0;
1890 }
1891 
lan78xx_set_rx_max_frame_length(struct lan78xx_net * dev,int size)1892 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1893 {
1894 	int ret = 0;
1895 	u32 buf;
1896 	bool rxenabled;
1897 
1898 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1899 
1900 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1901 
1902 	if (rxenabled) {
1903 		buf &= ~MAC_RX_RXEN_;
1904 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
1905 	}
1906 
1907 	/* add 4 to size for FCS */
1908 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
1909 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1910 
1911 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
1912 
1913 	if (rxenabled) {
1914 		buf |= MAC_RX_RXEN_;
1915 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
1916 	}
1917 
1918 	return 0;
1919 }
1920 
unlink_urbs(struct lan78xx_net * dev,struct sk_buff_head * q)1921 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1922 {
1923 	struct sk_buff *skb;
1924 	unsigned long flags;
1925 	int count = 0;
1926 
1927 	spin_lock_irqsave(&q->lock, flags);
1928 	while (!skb_queue_empty(q)) {
1929 		struct skb_data	*entry;
1930 		struct urb *urb;
1931 		int ret;
1932 
1933 		skb_queue_walk(q, skb) {
1934 			entry = (struct skb_data *)skb->cb;
1935 			if (entry->state != unlink_start)
1936 				goto found;
1937 		}
1938 		break;
1939 found:
1940 		entry->state = unlink_start;
1941 		urb = entry->urb;
1942 
1943 		/* Get reference count of the URB to avoid it to be
1944 		 * freed during usb_unlink_urb, which may trigger
1945 		 * use-after-free problem inside usb_unlink_urb since
1946 		 * usb_unlink_urb is always racing with .complete
1947 		 * handler(include defer_bh).
1948 		 */
1949 		usb_get_urb(urb);
1950 		spin_unlock_irqrestore(&q->lock, flags);
1951 		/* during some PM-driven resume scenarios,
1952 		 * these (async) unlinks complete immediately
1953 		 */
1954 		ret = usb_unlink_urb(urb);
1955 		if (ret != -EINPROGRESS && ret != 0)
1956 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1957 		else
1958 			count++;
1959 		usb_put_urb(urb);
1960 		spin_lock_irqsave(&q->lock, flags);
1961 	}
1962 	spin_unlock_irqrestore(&q->lock, flags);
1963 	return count;
1964 }
1965 
lan78xx_change_mtu(struct net_device * netdev,int new_mtu)1966 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1967 {
1968 	struct lan78xx_net *dev = netdev_priv(netdev);
1969 	int ll_mtu = new_mtu + netdev->hard_header_len;
1970 	int old_hard_mtu = dev->hard_mtu;
1971 	int old_rx_urb_size = dev->rx_urb_size;
1972 	int ret;
1973 
1974 	if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1975 		return -EINVAL;
1976 
1977 	if (new_mtu <= 0)
1978 		return -EINVAL;
1979 	/* no second zero-length packet read wanted after mtu-sized packets */
1980 	if ((ll_mtu % dev->maxpacket) == 0)
1981 		return -EDOM;
1982 
1983 	ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1984 
1985 	netdev->mtu = new_mtu;
1986 
1987 	dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1988 	if (dev->rx_urb_size == old_hard_mtu) {
1989 		dev->rx_urb_size = dev->hard_mtu;
1990 		if (dev->rx_urb_size > old_rx_urb_size) {
1991 			if (netif_running(dev->net)) {
1992 				unlink_urbs(dev, &dev->rxq);
1993 				tasklet_schedule(&dev->bh);
1994 			}
1995 		}
1996 	}
1997 
1998 	return 0;
1999 }
2000 
lan78xx_set_mac_addr(struct net_device * netdev,void * p)2001 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2002 {
2003 	struct lan78xx_net *dev = netdev_priv(netdev);
2004 	struct sockaddr *addr = p;
2005 	u32 addr_lo, addr_hi;
2006 	int ret;
2007 
2008 	if (netif_running(netdev))
2009 		return -EBUSY;
2010 
2011 	if (!is_valid_ether_addr(addr->sa_data))
2012 		return -EADDRNOTAVAIL;
2013 
2014 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
2015 
2016 	addr_lo = netdev->dev_addr[0] |
2017 		  netdev->dev_addr[1] << 8 |
2018 		  netdev->dev_addr[2] << 16 |
2019 		  netdev->dev_addr[3] << 24;
2020 	addr_hi = netdev->dev_addr[4] |
2021 		  netdev->dev_addr[5] << 8;
2022 
2023 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2024 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2025 
2026 	return 0;
2027 }
2028 
2029 /* Enable or disable Rx checksum offload engine */
lan78xx_set_features(struct net_device * netdev,netdev_features_t features)2030 static int lan78xx_set_features(struct net_device *netdev,
2031 				netdev_features_t features)
2032 {
2033 	struct lan78xx_net *dev = netdev_priv(netdev);
2034 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2035 	unsigned long flags;
2036 	int ret;
2037 
2038 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2039 
2040 	if (features & NETIF_F_RXCSUM) {
2041 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2042 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2043 	} else {
2044 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2045 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2046 	}
2047 
2048 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2049 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2050 	else
2051 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2052 
2053 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2054 
2055 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2056 
2057 	return 0;
2058 }
2059 
lan78xx_deferred_vlan_write(struct work_struct * param)2060 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2061 {
2062 	struct lan78xx_priv *pdata =
2063 			container_of(param, struct lan78xx_priv, set_vlan);
2064 	struct lan78xx_net *dev = pdata->dev;
2065 
2066 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2067 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2068 }
2069 
lan78xx_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)2070 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2071 				   __be16 proto, u16 vid)
2072 {
2073 	struct lan78xx_net *dev = netdev_priv(netdev);
2074 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2075 	u16 vid_bit_index;
2076 	u16 vid_dword_index;
2077 
2078 	vid_dword_index = (vid >> 5) & 0x7F;
2079 	vid_bit_index = vid & 0x1F;
2080 
2081 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2082 
2083 	/* defer register writes to a sleepable context */
2084 	schedule_work(&pdata->set_vlan);
2085 
2086 	return 0;
2087 }
2088 
lan78xx_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)2089 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2090 				    __be16 proto, u16 vid)
2091 {
2092 	struct lan78xx_net *dev = netdev_priv(netdev);
2093 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2094 	u16 vid_bit_index;
2095 	u16 vid_dword_index;
2096 
2097 	vid_dword_index = (vid >> 5) & 0x7F;
2098 	vid_bit_index = vid & 0x1F;
2099 
2100 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2101 
2102 	/* defer register writes to a sleepable context */
2103 	schedule_work(&pdata->set_vlan);
2104 
2105 	return 0;
2106 }
2107 
lan78xx_init_ltm(struct lan78xx_net * dev)2108 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2109 {
2110 	int ret;
2111 	u32 buf;
2112 	u32 regs[6] = { 0 };
2113 
2114 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2115 	if (buf & USB_CFG1_LTM_ENABLE_) {
2116 		u8 temp[2];
2117 		/* Get values from EEPROM first */
2118 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2119 			if (temp[0] == 24) {
2120 				ret = lan78xx_read_raw_eeprom(dev,
2121 							      temp[1] * 2,
2122 							      24,
2123 							      (u8 *)regs);
2124 				if (ret < 0)
2125 					return;
2126 			}
2127 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2128 			if (temp[0] == 24) {
2129 				ret = lan78xx_read_raw_otp(dev,
2130 							   temp[1] * 2,
2131 							   24,
2132 							   (u8 *)regs);
2133 				if (ret < 0)
2134 					return;
2135 			}
2136 		}
2137 	}
2138 
2139 	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2140 	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2141 	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2142 	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2143 	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2144 	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2145 }
2146 
lan78xx_reset(struct lan78xx_net * dev)2147 static int lan78xx_reset(struct lan78xx_net *dev)
2148 {
2149 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2150 	u32 buf;
2151 	int ret = 0;
2152 	unsigned long timeout;
2153 
2154 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2155 	buf |= HW_CFG_LRST_;
2156 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2157 
2158 	timeout = jiffies + HZ;
2159 	do {
2160 		mdelay(1);
2161 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2162 		if (time_after(jiffies, timeout)) {
2163 			netdev_warn(dev->net,
2164 				    "timeout on completion of LiteReset");
2165 			return -EIO;
2166 		}
2167 	} while (buf & HW_CFG_LRST_);
2168 
2169 	lan78xx_init_mac_address(dev);
2170 
2171 	/* save DEVID for later usage */
2172 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
2173 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2174 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2175 
2176 	/* Respond to the IN token with a NAK */
2177 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2178 	buf |= USB_CFG_BIR_;
2179 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2180 
2181 	/* Init LTM */
2182 	lan78xx_init_ltm(dev);
2183 
2184 	dev->net->hard_header_len += TX_OVERHEAD;
2185 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2186 
2187 	if (dev->udev->speed == USB_SPEED_SUPER) {
2188 		buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2189 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2190 		dev->rx_qlen = 4;
2191 		dev->tx_qlen = 4;
2192 	} else if (dev->udev->speed == USB_SPEED_HIGH) {
2193 		buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2194 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2195 		dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2196 		dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2197 	} else {
2198 		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2199 		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2200 		dev->rx_qlen = 4;
2201 		dev->tx_qlen = 4;
2202 	}
2203 
2204 	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2205 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2206 
2207 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2208 	buf |= HW_CFG_MEF_;
2209 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
2210 
2211 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2212 	buf |= USB_CFG_BCE_;
2213 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2214 
2215 	/* set FIFO sizes */
2216 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2217 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2218 
2219 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2220 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2221 
2222 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2223 	ret = lan78xx_write_reg(dev, FLOW, 0);
2224 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2225 
2226 	/* Don't need rfe_ctl_lock during initialisation */
2227 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2228 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2229 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2230 
2231 	/* Enable or disable checksum offload engines */
2232 	lan78xx_set_features(dev->net, dev->net->features);
2233 
2234 	lan78xx_set_multicast(dev->net);
2235 
2236 	/* reset PHY */
2237 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2238 	buf |= PMT_CTL_PHY_RST_;
2239 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2240 
2241 	timeout = jiffies + HZ;
2242 	do {
2243 		mdelay(1);
2244 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2245 		if (time_after(jiffies, timeout)) {
2246 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
2247 			return -EIO;
2248 		}
2249 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2250 
2251 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2252 	buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2253 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
2254 
2255 	/* enable PHY interrupts */
2256 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2257 	buf |= INT_ENP_PHY_INT;
2258 	ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
2259 
2260 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2261 	buf |= MAC_TX_TXEN_;
2262 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
2263 
2264 	ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2265 	buf |= FCT_TX_CTL_EN_;
2266 	ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2267 
2268 	ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2269 
2270 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2271 	buf |= MAC_RX_RXEN_;
2272 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2273 
2274 	ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2275 	buf |= FCT_RX_CTL_EN_;
2276 	ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2277 
2278 	return 0;
2279 }
2280 
lan78xx_init_stats(struct lan78xx_net * dev)2281 static void lan78xx_init_stats(struct lan78xx_net *dev)
2282 {
2283 	u32 *p;
2284 	int i;
2285 
2286 	/* initialize for stats update
2287 	 * some counters are 20bits and some are 32bits
2288 	 */
2289 	p = (u32 *)&dev->stats.rollover_max;
2290 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2291 		p[i] = 0xFFFFF;
2292 
2293 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2294 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2295 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2296 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2297 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2298 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2299 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2300 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2301 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2302 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2303 
2304 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2305 }
2306 
lan78xx_open(struct net_device * net)2307 static int lan78xx_open(struct net_device *net)
2308 {
2309 	struct lan78xx_net *dev = netdev_priv(net);
2310 	int ret;
2311 
2312 	ret = usb_autopm_get_interface(dev->intf);
2313 	if (ret < 0)
2314 		goto out;
2315 
2316 	ret = lan78xx_reset(dev);
2317 	if (ret < 0)
2318 		goto done;
2319 
2320 	ret = lan78xx_phy_init(dev);
2321 	if (ret < 0)
2322 		goto done;
2323 
2324 	/* for Link Check */
2325 	if (dev->urb_intr) {
2326 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2327 		if (ret < 0) {
2328 			netif_err(dev, ifup, dev->net,
2329 				  "intr submit %d\n", ret);
2330 			goto done;
2331 		}
2332 	}
2333 
2334 	lan78xx_init_stats(dev);
2335 
2336 	set_bit(EVENT_DEV_OPEN, &dev->flags);
2337 
2338 	netif_start_queue(net);
2339 
2340 	dev->link_on = false;
2341 
2342 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2343 done:
2344 	usb_autopm_put_interface(dev->intf);
2345 
2346 out:
2347 	return ret;
2348 }
2349 
lan78xx_terminate_urbs(struct lan78xx_net * dev)2350 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2351 {
2352 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2353 	DECLARE_WAITQUEUE(wait, current);
2354 	int temp;
2355 
2356 	/* ensure there are no more active urbs */
2357 	add_wait_queue(&unlink_wakeup, &wait);
2358 	set_current_state(TASK_UNINTERRUPTIBLE);
2359 	dev->wait = &unlink_wakeup;
2360 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2361 
2362 	/* maybe wait for deletions to finish. */
2363 	while (!skb_queue_empty(&dev->rxq) &&
2364 	       !skb_queue_empty(&dev->txq) &&
2365 	       !skb_queue_empty(&dev->done)) {
2366 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2367 		set_current_state(TASK_UNINTERRUPTIBLE);
2368 		netif_dbg(dev, ifdown, dev->net,
2369 			  "waited for %d urb completions\n", temp);
2370 	}
2371 	set_current_state(TASK_RUNNING);
2372 	dev->wait = NULL;
2373 	remove_wait_queue(&unlink_wakeup, &wait);
2374 }
2375 
lan78xx_stop(struct net_device * net)2376 static int lan78xx_stop(struct net_device *net)
2377 {
2378 	struct lan78xx_net		*dev = netdev_priv(net);
2379 
2380 	if (timer_pending(&dev->stat_monitor))
2381 		del_timer_sync(&dev->stat_monitor);
2382 
2383 	phy_stop(net->phydev);
2384 	phy_disconnect(net->phydev);
2385 	net->phydev = NULL;
2386 
2387 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
2388 	netif_stop_queue(net);
2389 
2390 	netif_info(dev, ifdown, dev->net,
2391 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2392 		   net->stats.rx_packets, net->stats.tx_packets,
2393 		   net->stats.rx_errors, net->stats.tx_errors);
2394 
2395 	lan78xx_terminate_urbs(dev);
2396 
2397 	usb_kill_urb(dev->urb_intr);
2398 
2399 	skb_queue_purge(&dev->rxq_pause);
2400 
2401 	/* deferred work (task, timer, softirq) must also stop.
2402 	 * can't flush_scheduled_work() until we drop rtnl (later),
2403 	 * else workers could deadlock; so make workers a NOP.
2404 	 */
2405 	dev->flags = 0;
2406 	cancel_delayed_work_sync(&dev->wq);
2407 	tasklet_kill(&dev->bh);
2408 
2409 	usb_autopm_put_interface(dev->intf);
2410 
2411 	return 0;
2412 }
2413 
lan78xx_linearize(struct sk_buff * skb)2414 static int lan78xx_linearize(struct sk_buff *skb)
2415 {
2416 	return skb_linearize(skb);
2417 }
2418 
lan78xx_tx_prep(struct lan78xx_net * dev,struct sk_buff * skb,gfp_t flags)2419 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2420 				       struct sk_buff *skb, gfp_t flags)
2421 {
2422 	u32 tx_cmd_a, tx_cmd_b;
2423 
2424 	if (skb_cow_head(skb, TX_OVERHEAD)) {
2425 		dev_kfree_skb_any(skb);
2426 		return NULL;
2427 	}
2428 
2429 	if (lan78xx_linearize(skb) < 0)
2430 		return NULL;
2431 
2432 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2433 
2434 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2435 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2436 
2437 	tx_cmd_b = 0;
2438 	if (skb_is_gso(skb)) {
2439 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2440 
2441 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2442 
2443 		tx_cmd_a |= TX_CMD_A_LSO_;
2444 	}
2445 
2446 	if (skb_vlan_tag_present(skb)) {
2447 		tx_cmd_a |= TX_CMD_A_IVTG_;
2448 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2449 	}
2450 
2451 	skb_push(skb, 4);
2452 	cpu_to_le32s(&tx_cmd_b);
2453 	memcpy(skb->data, &tx_cmd_b, 4);
2454 
2455 	skb_push(skb, 4);
2456 	cpu_to_le32s(&tx_cmd_a);
2457 	memcpy(skb->data, &tx_cmd_a, 4);
2458 
2459 	return skb;
2460 }
2461 
defer_bh(struct lan78xx_net * dev,struct sk_buff * skb,struct sk_buff_head * list,enum skb_state state)2462 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2463 			       struct sk_buff_head *list, enum skb_state state)
2464 {
2465 	unsigned long flags;
2466 	enum skb_state old_state;
2467 	struct skb_data *entry = (struct skb_data *)skb->cb;
2468 
2469 	spin_lock_irqsave(&list->lock, flags);
2470 	old_state = entry->state;
2471 	entry->state = state;
2472 
2473 	__skb_unlink(skb, list);
2474 	spin_unlock(&list->lock);
2475 	spin_lock(&dev->done.lock);
2476 
2477 	__skb_queue_tail(&dev->done, skb);
2478 	if (skb_queue_len(&dev->done) == 1)
2479 		tasklet_schedule(&dev->bh);
2480 	spin_unlock_irqrestore(&dev->done.lock, flags);
2481 
2482 	return old_state;
2483 }
2484 
tx_complete(struct urb * urb)2485 static void tx_complete(struct urb *urb)
2486 {
2487 	struct sk_buff *skb = (struct sk_buff *)urb->context;
2488 	struct skb_data *entry = (struct skb_data *)skb->cb;
2489 	struct lan78xx_net *dev = entry->dev;
2490 
2491 	if (urb->status == 0) {
2492 		dev->net->stats.tx_packets += entry->num_of_packet;
2493 		dev->net->stats.tx_bytes += entry->length;
2494 	} else {
2495 		dev->net->stats.tx_errors++;
2496 
2497 		switch (urb->status) {
2498 		case -EPIPE:
2499 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2500 			break;
2501 
2502 		/* software-driven interface shutdown */
2503 		case -ECONNRESET:
2504 		case -ESHUTDOWN:
2505 			break;
2506 
2507 		case -EPROTO:
2508 		case -ETIME:
2509 		case -EILSEQ:
2510 			netif_stop_queue(dev->net);
2511 			break;
2512 		default:
2513 			netif_dbg(dev, tx_err, dev->net,
2514 				  "tx err %d\n", entry->urb->status);
2515 			break;
2516 		}
2517 	}
2518 
2519 	usb_autopm_put_interface_async(dev->intf);
2520 
2521 	defer_bh(dev, skb, &dev->txq, tx_done);
2522 }
2523 
lan78xx_queue_skb(struct sk_buff_head * list,struct sk_buff * newsk,enum skb_state state)2524 static void lan78xx_queue_skb(struct sk_buff_head *list,
2525 			      struct sk_buff *newsk, enum skb_state state)
2526 {
2527 	struct skb_data *entry = (struct skb_data *)newsk->cb;
2528 
2529 	__skb_queue_tail(list, newsk);
2530 	entry->state = state;
2531 }
2532 
2533 static netdev_tx_t
lan78xx_start_xmit(struct sk_buff * skb,struct net_device * net)2534 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2535 {
2536 	struct lan78xx_net *dev = netdev_priv(net);
2537 	struct sk_buff *skb2 = NULL;
2538 
2539 	if (skb) {
2540 		skb_tx_timestamp(skb);
2541 		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2542 	}
2543 
2544 	if (skb2) {
2545 		skb_queue_tail(&dev->txq_pend, skb2);
2546 
2547 		/* throttle TX patch at slower than SUPER SPEED USB */
2548 		if ((dev->udev->speed < USB_SPEED_SUPER) &&
2549 		    (skb_queue_len(&dev->txq_pend) > 10))
2550 			netif_stop_queue(net);
2551 	} else {
2552 		netif_dbg(dev, tx_err, dev->net,
2553 			  "lan78xx_tx_prep return NULL\n");
2554 		dev->net->stats.tx_errors++;
2555 		dev->net->stats.tx_dropped++;
2556 	}
2557 
2558 	tasklet_schedule(&dev->bh);
2559 
2560 	return NETDEV_TX_OK;
2561 }
2562 
2563 static int
lan78xx_get_endpoints(struct lan78xx_net * dev,struct usb_interface * intf)2564 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2565 {
2566 	int tmp;
2567 	struct usb_host_interface *alt = NULL;
2568 	struct usb_host_endpoint *in = NULL, *out = NULL;
2569 	struct usb_host_endpoint *status = NULL;
2570 
2571 	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2572 		unsigned ep;
2573 
2574 		in = NULL;
2575 		out = NULL;
2576 		status = NULL;
2577 		alt = intf->altsetting + tmp;
2578 
2579 		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2580 			struct usb_host_endpoint *e;
2581 			int intr = 0;
2582 
2583 			e = alt->endpoint + ep;
2584 			switch (e->desc.bmAttributes) {
2585 			case USB_ENDPOINT_XFER_INT:
2586 				if (!usb_endpoint_dir_in(&e->desc))
2587 					continue;
2588 				intr = 1;
2589 				/* FALLTHROUGH */
2590 			case USB_ENDPOINT_XFER_BULK:
2591 				break;
2592 			default:
2593 				continue;
2594 			}
2595 			if (usb_endpoint_dir_in(&e->desc)) {
2596 				if (!intr && !in)
2597 					in = e;
2598 				else if (intr && !status)
2599 					status = e;
2600 			} else {
2601 				if (!out)
2602 					out = e;
2603 			}
2604 		}
2605 		if (in && out)
2606 			break;
2607 	}
2608 	if (!alt || !in || !out)
2609 		return -EINVAL;
2610 
2611 	dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2612 				       in->desc.bEndpointAddress &
2613 				       USB_ENDPOINT_NUMBER_MASK);
2614 	dev->pipe_out = usb_sndbulkpipe(dev->udev,
2615 					out->desc.bEndpointAddress &
2616 					USB_ENDPOINT_NUMBER_MASK);
2617 	dev->ep_intr = status;
2618 
2619 	return 0;
2620 }
2621 
lan78xx_bind(struct lan78xx_net * dev,struct usb_interface * intf)2622 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2623 {
2624 	struct lan78xx_priv *pdata = NULL;
2625 	int ret;
2626 	int i;
2627 
2628 	ret = lan78xx_get_endpoints(dev, intf);
2629 
2630 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2631 
2632 	pdata = (struct lan78xx_priv *)(dev->data[0]);
2633 	if (!pdata) {
2634 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2635 		return -ENOMEM;
2636 	}
2637 
2638 	pdata->dev = dev;
2639 
2640 	spin_lock_init(&pdata->rfe_ctl_lock);
2641 	mutex_init(&pdata->dataport_mutex);
2642 
2643 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2644 
2645 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2646 		pdata->vlan_table[i] = 0;
2647 
2648 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2649 
2650 	dev->net->features = 0;
2651 
2652 	if (DEFAULT_TX_CSUM_ENABLE)
2653 		dev->net->features |= NETIF_F_HW_CSUM;
2654 
2655 	if (DEFAULT_RX_CSUM_ENABLE)
2656 		dev->net->features |= NETIF_F_RXCSUM;
2657 
2658 	if (DEFAULT_TSO_CSUM_ENABLE)
2659 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2660 
2661 	dev->net->hw_features = dev->net->features;
2662 
2663 	/* Init all registers */
2664 	ret = lan78xx_reset(dev);
2665 
2666 	lan78xx_mdio_init(dev);
2667 
2668 	dev->net->flags |= IFF_MULTICAST;
2669 
2670 	pdata->wol = WAKE_MAGIC;
2671 
2672 	return 0;
2673 }
2674 
lan78xx_unbind(struct lan78xx_net * dev,struct usb_interface * intf)2675 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2676 {
2677 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2678 
2679 	lan78xx_remove_mdio(dev);
2680 
2681 	if (pdata) {
2682 		netif_dbg(dev, ifdown, dev->net, "free pdata");
2683 		kfree(pdata);
2684 		pdata = NULL;
2685 		dev->data[0] = 0;
2686 	}
2687 }
2688 
lan78xx_rx_csum_offload(struct lan78xx_net * dev,struct sk_buff * skb,u32 rx_cmd_a,u32 rx_cmd_b)2689 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2690 				    struct sk_buff *skb,
2691 				    u32 rx_cmd_a, u32 rx_cmd_b)
2692 {
2693 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
2694 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2695 		skb->ip_summed = CHECKSUM_NONE;
2696 	} else {
2697 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2698 		skb->ip_summed = CHECKSUM_COMPLETE;
2699 	}
2700 }
2701 
lan78xx_skb_return(struct lan78xx_net * dev,struct sk_buff * skb)2702 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2703 {
2704 	int		status;
2705 
2706 	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2707 		skb_queue_tail(&dev->rxq_pause, skb);
2708 		return;
2709 	}
2710 
2711 	dev->net->stats.rx_packets++;
2712 	dev->net->stats.rx_bytes += skb->len;
2713 
2714 	skb->protocol = eth_type_trans(skb, dev->net);
2715 
2716 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2717 		  skb->len + sizeof(struct ethhdr), skb->protocol);
2718 	memset(skb->cb, 0, sizeof(struct skb_data));
2719 
2720 	if (skb_defer_rx_timestamp(skb))
2721 		return;
2722 
2723 	status = netif_rx(skb);
2724 	if (status != NET_RX_SUCCESS)
2725 		netif_dbg(dev, rx_err, dev->net,
2726 			  "netif_rx status %d\n", status);
2727 }
2728 
lan78xx_rx(struct lan78xx_net * dev,struct sk_buff * skb)2729 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2730 {
2731 	if (skb->len < dev->net->hard_header_len)
2732 		return 0;
2733 
2734 	while (skb->len > 0) {
2735 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
2736 		u16 rx_cmd_c;
2737 		struct sk_buff *skb2;
2738 		unsigned char *packet;
2739 
2740 		memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2741 		le32_to_cpus(&rx_cmd_a);
2742 		skb_pull(skb, sizeof(rx_cmd_a));
2743 
2744 		memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2745 		le32_to_cpus(&rx_cmd_b);
2746 		skb_pull(skb, sizeof(rx_cmd_b));
2747 
2748 		memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2749 		le16_to_cpus(&rx_cmd_c);
2750 		skb_pull(skb, sizeof(rx_cmd_c));
2751 
2752 		packet = skb->data;
2753 
2754 		/* get the packet length */
2755 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2756 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2757 
2758 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2759 			netif_dbg(dev, rx_err, dev->net,
2760 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
2761 		} else {
2762 			/* last frame in this batch */
2763 			if (skb->len == size) {
2764 				lan78xx_rx_csum_offload(dev, skb,
2765 							rx_cmd_a, rx_cmd_b);
2766 
2767 				skb_trim(skb, skb->len - 4); /* remove fcs */
2768 				skb->truesize = size + sizeof(struct sk_buff);
2769 
2770 				return 1;
2771 			}
2772 
2773 			skb2 = skb_clone(skb, GFP_ATOMIC);
2774 			if (unlikely(!skb2)) {
2775 				netdev_warn(dev->net, "Error allocating skb");
2776 				return 0;
2777 			}
2778 
2779 			skb2->len = size;
2780 			skb2->data = packet;
2781 			skb_set_tail_pointer(skb2, size);
2782 
2783 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2784 
2785 			skb_trim(skb2, skb2->len - 4); /* remove fcs */
2786 			skb2->truesize = size + sizeof(struct sk_buff);
2787 
2788 			lan78xx_skb_return(dev, skb2);
2789 		}
2790 
2791 		skb_pull(skb, size);
2792 
2793 		/* padding bytes before the next frame starts */
2794 		if (skb->len)
2795 			skb_pull(skb, align_count);
2796 	}
2797 
2798 	return 1;
2799 }
2800 
rx_process(struct lan78xx_net * dev,struct sk_buff * skb)2801 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2802 {
2803 	if (!lan78xx_rx(dev, skb)) {
2804 		dev->net->stats.rx_errors++;
2805 		goto done;
2806 	}
2807 
2808 	if (skb->len) {
2809 		lan78xx_skb_return(dev, skb);
2810 		return;
2811 	}
2812 
2813 	netif_dbg(dev, rx_err, dev->net, "drop\n");
2814 	dev->net->stats.rx_errors++;
2815 done:
2816 	skb_queue_tail(&dev->done, skb);
2817 }
2818 
2819 static void rx_complete(struct urb *urb);
2820 
rx_submit(struct lan78xx_net * dev,struct urb * urb,gfp_t flags)2821 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2822 {
2823 	struct sk_buff *skb;
2824 	struct skb_data *entry;
2825 	unsigned long lockflags;
2826 	size_t size = dev->rx_urb_size;
2827 	int ret = 0;
2828 
2829 	skb = netdev_alloc_skb_ip_align(dev->net, size);
2830 	if (!skb) {
2831 		usb_free_urb(urb);
2832 		return -ENOMEM;
2833 	}
2834 
2835 	entry = (struct skb_data *)skb->cb;
2836 	entry->urb = urb;
2837 	entry->dev = dev;
2838 	entry->length = 0;
2839 
2840 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2841 			  skb->data, size, rx_complete, skb);
2842 
2843 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
2844 
2845 	if (netif_device_present(dev->net) &&
2846 	    netif_running(dev->net) &&
2847 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
2848 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2849 		ret = usb_submit_urb(urb, GFP_ATOMIC);
2850 		switch (ret) {
2851 		case 0:
2852 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2853 			break;
2854 		case -EPIPE:
2855 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2856 			break;
2857 		case -ENODEV:
2858 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
2859 			netif_device_detach(dev->net);
2860 			break;
2861 		case -EHOSTUNREACH:
2862 			ret = -ENOLINK;
2863 			break;
2864 		default:
2865 			netif_dbg(dev, rx_err, dev->net,
2866 				  "rx submit, %d\n", ret);
2867 			tasklet_schedule(&dev->bh);
2868 		}
2869 	} else {
2870 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2871 		ret = -ENOLINK;
2872 	}
2873 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2874 	if (ret) {
2875 		dev_kfree_skb_any(skb);
2876 		usb_free_urb(urb);
2877 	}
2878 	return ret;
2879 }
2880 
rx_complete(struct urb * urb)2881 static void rx_complete(struct urb *urb)
2882 {
2883 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
2884 	struct skb_data	*entry = (struct skb_data *)skb->cb;
2885 	struct lan78xx_net *dev = entry->dev;
2886 	int urb_status = urb->status;
2887 	enum skb_state state;
2888 
2889 	skb_put(skb, urb->actual_length);
2890 	state = rx_done;
2891 	entry->urb = NULL;
2892 
2893 	switch (urb_status) {
2894 	case 0:
2895 		if (skb->len < dev->net->hard_header_len) {
2896 			state = rx_cleanup;
2897 			dev->net->stats.rx_errors++;
2898 			dev->net->stats.rx_length_errors++;
2899 			netif_dbg(dev, rx_err, dev->net,
2900 				  "rx length %d\n", skb->len);
2901 		}
2902 		usb_mark_last_busy(dev->udev);
2903 		break;
2904 	case -EPIPE:
2905 		dev->net->stats.rx_errors++;
2906 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2907 		/* FALLTHROUGH */
2908 	case -ECONNRESET:				/* async unlink */
2909 	case -ESHUTDOWN:				/* hardware gone */
2910 		netif_dbg(dev, ifdown, dev->net,
2911 			  "rx shutdown, code %d\n", urb_status);
2912 		state = rx_cleanup;
2913 		entry->urb = urb;
2914 		urb = NULL;
2915 		break;
2916 	case -EPROTO:
2917 	case -ETIME:
2918 	case -EILSEQ:
2919 		dev->net->stats.rx_errors++;
2920 		state = rx_cleanup;
2921 		entry->urb = urb;
2922 		urb = NULL;
2923 		break;
2924 
2925 	/* data overrun ... flush fifo? */
2926 	case -EOVERFLOW:
2927 		dev->net->stats.rx_over_errors++;
2928 		/* FALLTHROUGH */
2929 
2930 	default:
2931 		state = rx_cleanup;
2932 		dev->net->stats.rx_errors++;
2933 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2934 		break;
2935 	}
2936 
2937 	state = defer_bh(dev, skb, &dev->rxq, state);
2938 
2939 	if (urb) {
2940 		if (netif_running(dev->net) &&
2941 		    !test_bit(EVENT_RX_HALT, &dev->flags) &&
2942 		    state != unlink_start) {
2943 			rx_submit(dev, urb, GFP_ATOMIC);
2944 			return;
2945 		}
2946 		usb_free_urb(urb);
2947 	}
2948 	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2949 }
2950 
lan78xx_tx_bh(struct lan78xx_net * dev)2951 static void lan78xx_tx_bh(struct lan78xx_net *dev)
2952 {
2953 	int length;
2954 	struct urb *urb = NULL;
2955 	struct skb_data *entry;
2956 	unsigned long flags;
2957 	struct sk_buff_head *tqp = &dev->txq_pend;
2958 	struct sk_buff *skb, *skb2;
2959 	int ret;
2960 	int count, pos;
2961 	int skb_totallen, pkt_cnt;
2962 
2963 	skb_totallen = 0;
2964 	pkt_cnt = 0;
2965 	count = 0;
2966 	length = 0;
2967 	for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2968 		if (skb_is_gso(skb)) {
2969 			if (pkt_cnt) {
2970 				/* handle previous packets first */
2971 				break;
2972 			}
2973 			count = 1;
2974 			length = skb->len - TX_OVERHEAD;
2975 			skb2 = skb_dequeue(tqp);
2976 			goto gso_skb;
2977 		}
2978 
2979 		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2980 			break;
2981 		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2982 		pkt_cnt++;
2983 	}
2984 
2985 	/* copy to a single skb */
2986 	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2987 	if (!skb)
2988 		goto drop;
2989 
2990 	skb_put(skb, skb_totallen);
2991 
2992 	for (count = pos = 0; count < pkt_cnt; count++) {
2993 		skb2 = skb_dequeue(tqp);
2994 		if (skb2) {
2995 			length += (skb2->len - TX_OVERHEAD);
2996 			memcpy(skb->data + pos, skb2->data, skb2->len);
2997 			pos += roundup(skb2->len, sizeof(u32));
2998 			dev_kfree_skb(skb2);
2999 		}
3000 	}
3001 
3002 gso_skb:
3003 	urb = usb_alloc_urb(0, GFP_ATOMIC);
3004 	if (!urb)
3005 		goto drop;
3006 
3007 	entry = (struct skb_data *)skb->cb;
3008 	entry->urb = urb;
3009 	entry->dev = dev;
3010 	entry->length = length;
3011 	entry->num_of_packet = count;
3012 
3013 	spin_lock_irqsave(&dev->txq.lock, flags);
3014 	ret = usb_autopm_get_interface_async(dev->intf);
3015 	if (ret < 0) {
3016 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3017 		goto drop;
3018 	}
3019 
3020 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3021 			  skb->data, skb->len, tx_complete, skb);
3022 
3023 	if (length % dev->maxpacket == 0) {
3024 		/* send USB_ZERO_PACKET */
3025 		urb->transfer_flags |= URB_ZERO_PACKET;
3026 	}
3027 
3028 #ifdef CONFIG_PM
3029 	/* if this triggers the device is still a sleep */
3030 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3031 		/* transmission will be done in resume */
3032 		usb_anchor_urb(urb, &dev->deferred);
3033 		/* no use to process more packets */
3034 		netif_stop_queue(dev->net);
3035 		usb_put_urb(urb);
3036 		spin_unlock_irqrestore(&dev->txq.lock, flags);
3037 		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3038 		return;
3039 	}
3040 #endif
3041 
3042 	ret = usb_submit_urb(urb, GFP_ATOMIC);
3043 	switch (ret) {
3044 	case 0:
3045 		netif_trans_update(dev->net);
3046 		lan78xx_queue_skb(&dev->txq, skb, tx_start);
3047 		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3048 			netif_stop_queue(dev->net);
3049 		break;
3050 	case -EPIPE:
3051 		netif_stop_queue(dev->net);
3052 		lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3053 		usb_autopm_put_interface_async(dev->intf);
3054 		break;
3055 	default:
3056 		usb_autopm_put_interface_async(dev->intf);
3057 		netif_dbg(dev, tx_err, dev->net,
3058 			  "tx: submit urb err %d\n", ret);
3059 		break;
3060 	}
3061 
3062 	spin_unlock_irqrestore(&dev->txq.lock, flags);
3063 
3064 	if (ret) {
3065 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3066 drop:
3067 		dev->net->stats.tx_dropped++;
3068 		if (skb)
3069 			dev_kfree_skb_any(skb);
3070 		usb_free_urb(urb);
3071 	} else
3072 		netif_dbg(dev, tx_queued, dev->net,
3073 			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
3074 }
3075 
lan78xx_rx_bh(struct lan78xx_net * dev)3076 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3077 {
3078 	struct urb *urb;
3079 	int i;
3080 
3081 	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3082 		for (i = 0; i < 10; i++) {
3083 			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3084 				break;
3085 			urb = usb_alloc_urb(0, GFP_ATOMIC);
3086 			if (urb)
3087 				if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3088 					return;
3089 		}
3090 
3091 		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3092 			tasklet_schedule(&dev->bh);
3093 	}
3094 	if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3095 		netif_wake_queue(dev->net);
3096 }
3097 
lan78xx_bh(unsigned long param)3098 static void lan78xx_bh(unsigned long param)
3099 {
3100 	struct lan78xx_net *dev = (struct lan78xx_net *)param;
3101 	struct sk_buff *skb;
3102 	struct skb_data *entry;
3103 
3104 	while ((skb = skb_dequeue(&dev->done))) {
3105 		entry = (struct skb_data *)(skb->cb);
3106 		switch (entry->state) {
3107 		case rx_done:
3108 			entry->state = rx_cleanup;
3109 			rx_process(dev, skb);
3110 			continue;
3111 		case tx_done:
3112 			usb_free_urb(entry->urb);
3113 			dev_kfree_skb(skb);
3114 			continue;
3115 		case rx_cleanup:
3116 			usb_free_urb(entry->urb);
3117 			dev_kfree_skb(skb);
3118 			continue;
3119 		default:
3120 			netdev_dbg(dev->net, "skb state %d\n", entry->state);
3121 			return;
3122 		}
3123 	}
3124 
3125 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
3126 		/* reset update timer delta */
3127 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3128 			dev->delta = 1;
3129 			mod_timer(&dev->stat_monitor,
3130 				  jiffies + STAT_UPDATE_TIMER);
3131 		}
3132 
3133 		if (!skb_queue_empty(&dev->txq_pend))
3134 			lan78xx_tx_bh(dev);
3135 
3136 		if (!timer_pending(&dev->delay) &&
3137 		    !test_bit(EVENT_RX_HALT, &dev->flags))
3138 			lan78xx_rx_bh(dev);
3139 	}
3140 }
3141 
lan78xx_delayedwork(struct work_struct * work)3142 static void lan78xx_delayedwork(struct work_struct *work)
3143 {
3144 	int status;
3145 	struct lan78xx_net *dev;
3146 
3147 	dev = container_of(work, struct lan78xx_net, wq.work);
3148 
3149 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3150 		unlink_urbs(dev, &dev->txq);
3151 		status = usb_autopm_get_interface(dev->intf);
3152 		if (status < 0)
3153 			goto fail_pipe;
3154 		status = usb_clear_halt(dev->udev, dev->pipe_out);
3155 		usb_autopm_put_interface(dev->intf);
3156 		if (status < 0 &&
3157 		    status != -EPIPE &&
3158 		    status != -ESHUTDOWN) {
3159 			if (netif_msg_tx_err(dev))
3160 fail_pipe:
3161 				netdev_err(dev->net,
3162 					   "can't clear tx halt, status %d\n",
3163 					   status);
3164 		} else {
3165 			clear_bit(EVENT_TX_HALT, &dev->flags);
3166 			if (status != -ESHUTDOWN)
3167 				netif_wake_queue(dev->net);
3168 		}
3169 	}
3170 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3171 		unlink_urbs(dev, &dev->rxq);
3172 		status = usb_autopm_get_interface(dev->intf);
3173 		if (status < 0)
3174 				goto fail_halt;
3175 		status = usb_clear_halt(dev->udev, dev->pipe_in);
3176 		usb_autopm_put_interface(dev->intf);
3177 		if (status < 0 &&
3178 		    status != -EPIPE &&
3179 		    status != -ESHUTDOWN) {
3180 			if (netif_msg_rx_err(dev))
3181 fail_halt:
3182 				netdev_err(dev->net,
3183 					   "can't clear rx halt, status %d\n",
3184 					   status);
3185 		} else {
3186 			clear_bit(EVENT_RX_HALT, &dev->flags);
3187 			tasklet_schedule(&dev->bh);
3188 		}
3189 	}
3190 
3191 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3192 		int ret = 0;
3193 
3194 		clear_bit(EVENT_LINK_RESET, &dev->flags);
3195 		status = usb_autopm_get_interface(dev->intf);
3196 		if (status < 0)
3197 			goto skip_reset;
3198 		if (lan78xx_link_reset(dev) < 0) {
3199 			usb_autopm_put_interface(dev->intf);
3200 skip_reset:
3201 			netdev_info(dev->net, "link reset failed (%d)\n",
3202 				    ret);
3203 		} else {
3204 			usb_autopm_put_interface(dev->intf);
3205 		}
3206 	}
3207 
3208 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3209 		lan78xx_update_stats(dev);
3210 
3211 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3212 
3213 		mod_timer(&dev->stat_monitor,
3214 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
3215 
3216 		dev->delta = min((dev->delta * 2), 50);
3217 	}
3218 }
3219 
intr_complete(struct urb * urb)3220 static void intr_complete(struct urb *urb)
3221 {
3222 	struct lan78xx_net *dev = urb->context;
3223 	int status = urb->status;
3224 
3225 	switch (status) {
3226 	/* success */
3227 	case 0:
3228 		lan78xx_status(dev, urb);
3229 		break;
3230 
3231 	/* software-driven interface shutdown */
3232 	case -ENOENT:			/* urb killed */
3233 	case -ESHUTDOWN:		/* hardware gone */
3234 		netif_dbg(dev, ifdown, dev->net,
3235 			  "intr shutdown, code %d\n", status);
3236 		return;
3237 
3238 	/* NOTE:  not throttling like RX/TX, since this endpoint
3239 	 * already polls infrequently
3240 	 */
3241 	default:
3242 		netdev_dbg(dev->net, "intr status %d\n", status);
3243 		break;
3244 	}
3245 
3246 	if (!netif_running(dev->net))
3247 		return;
3248 
3249 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3250 	status = usb_submit_urb(urb, GFP_ATOMIC);
3251 	if (status != 0)
3252 		netif_err(dev, timer, dev->net,
3253 			  "intr resubmit --> %d\n", status);
3254 }
3255 
lan78xx_disconnect(struct usb_interface * intf)3256 static void lan78xx_disconnect(struct usb_interface *intf)
3257 {
3258 	struct lan78xx_net		*dev;
3259 	struct usb_device		*udev;
3260 	struct net_device		*net;
3261 
3262 	dev = usb_get_intfdata(intf);
3263 	usb_set_intfdata(intf, NULL);
3264 	if (!dev)
3265 		return;
3266 
3267 	udev = interface_to_usbdev(intf);
3268 
3269 	net = dev->net;
3270 	unregister_netdev(net);
3271 
3272 	cancel_delayed_work_sync(&dev->wq);
3273 
3274 	usb_scuttle_anchored_urbs(&dev->deferred);
3275 
3276 	lan78xx_unbind(dev, intf);
3277 
3278 	usb_kill_urb(dev->urb_intr);
3279 	usb_free_urb(dev->urb_intr);
3280 
3281 	free_netdev(net);
3282 	usb_put_dev(udev);
3283 }
3284 
lan78xx_tx_timeout(struct net_device * net)3285 static void lan78xx_tx_timeout(struct net_device *net)
3286 {
3287 	struct lan78xx_net *dev = netdev_priv(net);
3288 
3289 	unlink_urbs(dev, &dev->txq);
3290 	tasklet_schedule(&dev->bh);
3291 }
3292 
3293 static const struct net_device_ops lan78xx_netdev_ops = {
3294 	.ndo_open		= lan78xx_open,
3295 	.ndo_stop		= lan78xx_stop,
3296 	.ndo_start_xmit		= lan78xx_start_xmit,
3297 	.ndo_tx_timeout		= lan78xx_tx_timeout,
3298 	.ndo_change_mtu		= lan78xx_change_mtu,
3299 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
3300 	.ndo_validate_addr	= eth_validate_addr,
3301 	.ndo_do_ioctl		= lan78xx_ioctl,
3302 	.ndo_set_rx_mode	= lan78xx_set_multicast,
3303 	.ndo_set_features	= lan78xx_set_features,
3304 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
3305 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
3306 };
3307 
lan78xx_stat_monitor(unsigned long param)3308 static void lan78xx_stat_monitor(unsigned long param)
3309 {
3310 	struct lan78xx_net *dev;
3311 
3312 	dev = (struct lan78xx_net *)param;
3313 
3314 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3315 }
3316 
lan78xx_probe(struct usb_interface * intf,const struct usb_device_id * id)3317 static int lan78xx_probe(struct usb_interface *intf,
3318 			 const struct usb_device_id *id)
3319 {
3320 	struct lan78xx_net *dev;
3321 	struct net_device *netdev;
3322 	struct usb_device *udev;
3323 	int ret;
3324 	unsigned maxp;
3325 	unsigned period;
3326 	u8 *buf = NULL;
3327 
3328 	udev = interface_to_usbdev(intf);
3329 	udev = usb_get_dev(udev);
3330 
3331 	ret = -ENOMEM;
3332 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3333 	if (!netdev) {
3334 			dev_err(&intf->dev, "Error: OOM\n");
3335 			goto out1;
3336 	}
3337 
3338 	/* netdev_printk() needs this */
3339 	SET_NETDEV_DEV(netdev, &intf->dev);
3340 
3341 	dev = netdev_priv(netdev);
3342 	dev->udev = udev;
3343 	dev->intf = intf;
3344 	dev->net = netdev;
3345 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3346 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
3347 
3348 	skb_queue_head_init(&dev->rxq);
3349 	skb_queue_head_init(&dev->txq);
3350 	skb_queue_head_init(&dev->done);
3351 	skb_queue_head_init(&dev->rxq_pause);
3352 	skb_queue_head_init(&dev->txq_pend);
3353 	mutex_init(&dev->phy_mutex);
3354 
3355 	tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3356 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3357 	init_usb_anchor(&dev->deferred);
3358 
3359 	netdev->netdev_ops = &lan78xx_netdev_ops;
3360 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3361 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
3362 
3363 	dev->stat_monitor.function = lan78xx_stat_monitor;
3364 	dev->stat_monitor.data = (unsigned long)dev;
3365 	dev->delta = 1;
3366 	init_timer(&dev->stat_monitor);
3367 
3368 	mutex_init(&dev->stats.access_lock);
3369 
3370 	ret = lan78xx_bind(dev, intf);
3371 	if (ret < 0)
3372 		goto out2;
3373 	strcpy(netdev->name, "eth%d");
3374 
3375 	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3376 		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3377 
3378 	dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3379 	dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3380 	dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3381 
3382 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3383 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3384 
3385 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
3386 					dev->ep_intr->desc.bEndpointAddress &
3387 					USB_ENDPOINT_NUMBER_MASK);
3388 	period = dev->ep_intr->desc.bInterval;
3389 
3390 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3391 	buf = kmalloc(maxp, GFP_KERNEL);
3392 	if (buf) {
3393 		dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3394 		if (!dev->urb_intr) {
3395 			ret = -ENOMEM;
3396 			kfree(buf);
3397 			goto out3;
3398 		} else {
3399 			usb_fill_int_urb(dev->urb_intr, dev->udev,
3400 					 dev->pipe_intr, buf, maxp,
3401 					 intr_complete, dev, period);
3402 		}
3403 	}
3404 
3405 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3406 
3407 	/* driver requires remote-wakeup capability during autosuspend. */
3408 	intf->needs_remote_wakeup = 1;
3409 
3410 	ret = register_netdev(netdev);
3411 	if (ret != 0) {
3412 		netif_err(dev, probe, netdev, "couldn't register the device\n");
3413 		goto out2;
3414 	}
3415 
3416 	usb_set_intfdata(intf, dev);
3417 
3418 	ret = device_set_wakeup_enable(&udev->dev, true);
3419 
3420 	 /* Default delay of 2sec has more overhead than advantage.
3421 	  * Set to 10sec as default.
3422 	  */
3423 	pm_runtime_set_autosuspend_delay(&udev->dev,
3424 					 DEFAULT_AUTOSUSPEND_DELAY);
3425 
3426 	return 0;
3427 
3428 out3:
3429 	lan78xx_unbind(dev, intf);
3430 out2:
3431 	free_netdev(netdev);
3432 out1:
3433 	usb_put_dev(udev);
3434 
3435 	return ret;
3436 }
3437 
lan78xx_wakeframe_crc16(const u8 * buf,int len)3438 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3439 {
3440 	const u16 crc16poly = 0x8005;
3441 	int i;
3442 	u16 bit, crc, msb;
3443 	u8 data;
3444 
3445 	crc = 0xFFFF;
3446 	for (i = 0; i < len; i++) {
3447 		data = *buf++;
3448 		for (bit = 0; bit < 8; bit++) {
3449 			msb = crc >> 15;
3450 			crc <<= 1;
3451 
3452 			if (msb ^ (u16)(data & 1)) {
3453 				crc ^= crc16poly;
3454 				crc |= (u16)0x0001U;
3455 			}
3456 			data >>= 1;
3457 		}
3458 	}
3459 
3460 	return crc;
3461 }
3462 
lan78xx_set_suspend(struct lan78xx_net * dev,u32 wol)3463 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3464 {
3465 	u32 buf;
3466 	int ret;
3467 	int mask_index;
3468 	u16 crc;
3469 	u32 temp_wucsr;
3470 	u32 temp_pmt_ctl;
3471 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3472 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3473 	const u8 arp_type[2] = { 0x08, 0x06 };
3474 
3475 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3476 	buf &= ~MAC_TX_TXEN_;
3477 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
3478 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3479 	buf &= ~MAC_RX_RXEN_;
3480 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3481 
3482 	ret = lan78xx_write_reg(dev, WUCSR, 0);
3483 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
3484 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3485 
3486 	temp_wucsr = 0;
3487 
3488 	temp_pmt_ctl = 0;
3489 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3490 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3491 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3492 
3493 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3494 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3495 
3496 	mask_index = 0;
3497 	if (wol & WAKE_PHY) {
3498 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3499 
3500 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3501 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3502 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3503 	}
3504 	if (wol & WAKE_MAGIC) {
3505 		temp_wucsr |= WUCSR_MPEN_;
3506 
3507 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3508 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3509 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3510 	}
3511 	if (wol & WAKE_BCAST) {
3512 		temp_wucsr |= WUCSR_BCST_EN_;
3513 
3514 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3515 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3516 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3517 	}
3518 	if (wol & WAKE_MCAST) {
3519 		temp_wucsr |= WUCSR_WAKE_EN_;
3520 
3521 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3522 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3523 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3524 					WUF_CFGX_EN_ |
3525 					WUF_CFGX_TYPE_MCAST_ |
3526 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3527 					(crc & WUF_CFGX_CRC16_MASK_));
3528 
3529 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3530 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3531 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3532 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3533 		mask_index++;
3534 
3535 		/* for IPv6 Multicast */
3536 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3537 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3538 					WUF_CFGX_EN_ |
3539 					WUF_CFGX_TYPE_MCAST_ |
3540 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3541 					(crc & WUF_CFGX_CRC16_MASK_));
3542 
3543 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3544 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3545 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3546 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3547 		mask_index++;
3548 
3549 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3550 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3551 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3552 	}
3553 	if (wol & WAKE_UCAST) {
3554 		temp_wucsr |= WUCSR_PFDA_EN_;
3555 
3556 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3557 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3558 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3559 	}
3560 	if (wol & WAKE_ARP) {
3561 		temp_wucsr |= WUCSR_WAKE_EN_;
3562 
3563 		/* set WUF_CFG & WUF_MASK
3564 		 * for packettype (offset 12,13) = ARP (0x0806)
3565 		 */
3566 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
3567 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3568 					WUF_CFGX_EN_ |
3569 					WUF_CFGX_TYPE_ALL_ |
3570 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
3571 					(crc & WUF_CFGX_CRC16_MASK_));
3572 
3573 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3574 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3575 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3576 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3577 		mask_index++;
3578 
3579 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3580 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3581 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3582 	}
3583 
3584 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3585 
3586 	/* when multiple WOL bits are set */
3587 	if (hweight_long((unsigned long)wol) > 1) {
3588 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3589 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3590 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3591 	}
3592 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3593 
3594 	/* clear WUPS */
3595 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3596 	buf |= PMT_CTL_WUPS_MASK_;
3597 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3598 
3599 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3600 	buf |= MAC_RX_RXEN_;
3601 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
3602 
3603 	return 0;
3604 }
3605 
lan78xx_suspend(struct usb_interface * intf,pm_message_t message)3606 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3607 {
3608 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3609 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3610 	u32 buf;
3611 	int ret;
3612 	int event;
3613 
3614 	event = message.event;
3615 
3616 	if (!dev->suspend_count++) {
3617 		spin_lock_irq(&dev->txq.lock);
3618 		/* don't autosuspend while transmitting */
3619 		if ((skb_queue_len(&dev->txq) ||
3620 		     skb_queue_len(&dev->txq_pend)) &&
3621 			PMSG_IS_AUTO(message)) {
3622 			spin_unlock_irq(&dev->txq.lock);
3623 			ret = -EBUSY;
3624 			goto out;
3625 		} else {
3626 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3627 			spin_unlock_irq(&dev->txq.lock);
3628 		}
3629 
3630 		/* stop TX & RX */
3631 		ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3632 		buf &= ~MAC_TX_TXEN_;
3633 		ret = lan78xx_write_reg(dev, MAC_TX, buf);
3634 		ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3635 		buf &= ~MAC_RX_RXEN_;
3636 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
3637 
3638 		/* empty out the rx and queues */
3639 		netif_device_detach(dev->net);
3640 		lan78xx_terminate_urbs(dev);
3641 		usb_kill_urb(dev->urb_intr);
3642 
3643 		/* reattach */
3644 		netif_device_attach(dev->net);
3645 	}
3646 
3647 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3648 		del_timer(&dev->stat_monitor);
3649 
3650 		if (PMSG_IS_AUTO(message)) {
3651 			/* auto suspend (selective suspend) */
3652 			ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3653 			buf &= ~MAC_TX_TXEN_;
3654 			ret = lan78xx_write_reg(dev, MAC_TX, buf);
3655 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3656 			buf &= ~MAC_RX_RXEN_;
3657 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
3658 
3659 			ret = lan78xx_write_reg(dev, WUCSR, 0);
3660 			ret = lan78xx_write_reg(dev, WUCSR2, 0);
3661 			ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3662 
3663 			/* set goodframe wakeup */
3664 			ret = lan78xx_read_reg(dev, WUCSR, &buf);
3665 
3666 			buf |= WUCSR_RFE_WAKE_EN_;
3667 			buf |= WUCSR_STORE_WAKE_;
3668 
3669 			ret = lan78xx_write_reg(dev, WUCSR, buf);
3670 
3671 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3672 
3673 			buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3674 			buf |= PMT_CTL_RES_CLR_WKP_STS_;
3675 
3676 			buf |= PMT_CTL_PHY_WAKE_EN_;
3677 			buf |= PMT_CTL_WOL_EN_;
3678 			buf &= ~PMT_CTL_SUS_MODE_MASK_;
3679 			buf |= PMT_CTL_SUS_MODE_3_;
3680 
3681 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3682 
3683 			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3684 
3685 			buf |= PMT_CTL_WUPS_MASK_;
3686 
3687 			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3688 
3689 			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3690 			buf |= MAC_RX_RXEN_;
3691 			ret = lan78xx_write_reg(dev, MAC_RX, buf);
3692 		} else {
3693 			lan78xx_set_suspend(dev, pdata->wol);
3694 		}
3695 	}
3696 
3697 	ret = 0;
3698 out:
3699 	return ret;
3700 }
3701 
lan78xx_resume(struct usb_interface * intf)3702 static int lan78xx_resume(struct usb_interface *intf)
3703 {
3704 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3705 	struct sk_buff *skb;
3706 	struct urb *res;
3707 	int ret;
3708 	u32 buf;
3709 
3710 	if (!timer_pending(&dev->stat_monitor)) {
3711 		dev->delta = 1;
3712 		mod_timer(&dev->stat_monitor,
3713 			  jiffies + STAT_UPDATE_TIMER);
3714 	}
3715 
3716 	if (!--dev->suspend_count) {
3717 		/* resume interrupt URBs */
3718 		if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3719 				usb_submit_urb(dev->urb_intr, GFP_NOIO);
3720 
3721 		spin_lock_irq(&dev->txq.lock);
3722 		while ((res = usb_get_from_anchor(&dev->deferred))) {
3723 			skb = (struct sk_buff *)res->context;
3724 			ret = usb_submit_urb(res, GFP_ATOMIC);
3725 			if (ret < 0) {
3726 				dev_kfree_skb_any(skb);
3727 				usb_free_urb(res);
3728 				usb_autopm_put_interface_async(dev->intf);
3729 			} else {
3730 				netif_trans_update(dev->net);
3731 				lan78xx_queue_skb(&dev->txq, skb, tx_start);
3732 			}
3733 		}
3734 
3735 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3736 		spin_unlock_irq(&dev->txq.lock);
3737 
3738 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3739 			if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3740 				netif_start_queue(dev->net);
3741 			tasklet_schedule(&dev->bh);
3742 		}
3743 	}
3744 
3745 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
3746 	ret = lan78xx_write_reg(dev, WUCSR, 0);
3747 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3748 
3749 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3750 					     WUCSR2_ARP_RCD_ |
3751 					     WUCSR2_IPV6_TCPSYN_RCD_ |
3752 					     WUCSR2_IPV4_TCPSYN_RCD_);
3753 
3754 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3755 					    WUCSR_EEE_RX_WAKE_ |
3756 					    WUCSR_PFDA_FR_ |
3757 					    WUCSR_RFE_WAKE_FR_ |
3758 					    WUCSR_WUFR_ |
3759 					    WUCSR_MPR_ |
3760 					    WUCSR_BCST_FR_);
3761 
3762 	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3763 	buf |= MAC_TX_TXEN_;
3764 	ret = lan78xx_write_reg(dev, MAC_TX, buf);
3765 
3766 	return 0;
3767 }
3768 
lan78xx_reset_resume(struct usb_interface * intf)3769 static int lan78xx_reset_resume(struct usb_interface *intf)
3770 {
3771 	struct lan78xx_net *dev = usb_get_intfdata(intf);
3772 
3773 	lan78xx_reset(dev);
3774 
3775 	lan78xx_phy_init(dev);
3776 
3777 	return lan78xx_resume(intf);
3778 }
3779 
3780 static const struct usb_device_id products[] = {
3781 	{
3782 	/* LAN7800 USB Gigabit Ethernet Device */
3783 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3784 	},
3785 	{
3786 	/* LAN7850 USB Gigabit Ethernet Device */
3787 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3788 	},
3789 	{},
3790 };
3791 MODULE_DEVICE_TABLE(usb, products);
3792 
3793 static struct usb_driver lan78xx_driver = {
3794 	.name			= DRIVER_NAME,
3795 	.id_table		= products,
3796 	.probe			= lan78xx_probe,
3797 	.disconnect		= lan78xx_disconnect,
3798 	.suspend		= lan78xx_suspend,
3799 	.resume			= lan78xx_resume,
3800 	.reset_resume		= lan78xx_reset_resume,
3801 	.supports_autosuspend	= 1,
3802 	.disable_hub_initiated_lpm = 1,
3803 };
3804 
3805 module_usb_driver(lan78xx_driver);
3806 
3807 MODULE_AUTHOR(DRIVER_AUTHOR);
3808 MODULE_DESCRIPTION(DRIVER_DESC);
3809 MODULE_LICENSE("GPL");
3810