1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2015 Microchip Technology
4 */
5 #include <linux/version.h>
6 #include <linux/module.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/usb.h>
11 #include <linux/crc32.h>
12 #include <linux/signal.h>
13 #include <linux/slab.h>
14 #include <linux/if_vlan.h>
15 #include <linux/uaccess.h>
16 #include <linux/linkmode.h>
17 #include <linux/list.h>
18 #include <linux/ip.h>
19 #include <linux/ipv6.h>
20 #include <linux/mdio.h>
21 #include <linux/phy.h>
22 #include <net/ip6_checksum.h>
23 #include <net/vxlan.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
26 #include <linux/irq.h>
27 #include <linux/irqchip/chained_irq.h>
28 #include <linux/microchipphy.h>
29 #include <linux/phy_fixed.h>
30 #include <linux/of_mdio.h>
31 #include <linux/of_net.h>
32 #include "lan78xx.h"
33
34 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
35 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
36 #define DRIVER_NAME "lan78xx"
37
38 #define TX_TIMEOUT_JIFFIES (5 * HZ)
39 #define THROTTLE_JIFFIES (HZ / 8)
40 #define UNLINK_TIMEOUT_MS 3
41
42 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
43
44 #define SS_USB_PKT_SIZE (1024)
45 #define HS_USB_PKT_SIZE (512)
46 #define FS_USB_PKT_SIZE (64)
47
48 #define MAX_RX_FIFO_SIZE (12 * 1024)
49 #define MAX_TX_FIFO_SIZE (12 * 1024)
50 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
51 #define DEFAULT_BULK_IN_DELAY (0x0800)
52 #define MAX_SINGLE_PACKET_SIZE (9000)
53 #define DEFAULT_TX_CSUM_ENABLE (true)
54 #define DEFAULT_RX_CSUM_ENABLE (true)
55 #define DEFAULT_TSO_CSUM_ENABLE (true)
56 #define DEFAULT_VLAN_FILTER_ENABLE (true)
57 #define DEFAULT_VLAN_RX_OFFLOAD (true)
58 #define TX_OVERHEAD (8)
59 #define RXW_PADDING 2
60
61 #define LAN78XX_USB_VENDOR_ID (0x0424)
62 #define LAN7800_USB_PRODUCT_ID (0x7800)
63 #define LAN7850_USB_PRODUCT_ID (0x7850)
64 #define LAN7801_USB_PRODUCT_ID (0x7801)
65 #define LAN78XX_EEPROM_MAGIC (0x78A5)
66 #define LAN78XX_OTP_MAGIC (0x78F3)
67 #define AT29M2AF_USB_VENDOR_ID (0x07C9)
68 #define AT29M2AF_USB_PRODUCT_ID (0x0012)
69
70 #define MII_READ 1
71 #define MII_WRITE 0
72
73 #define EEPROM_INDICATOR (0xA5)
74 #define EEPROM_MAC_OFFSET (0x01)
75 #define MAX_EEPROM_SIZE 512
76 #define OTP_INDICATOR_1 (0xF3)
77 #define OTP_INDICATOR_2 (0xF7)
78
79 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
80 WAKE_MCAST | WAKE_BCAST | \
81 WAKE_ARP | WAKE_MAGIC)
82
83 /* USB related defines */
84 #define BULK_IN_PIPE 1
85 #define BULK_OUT_PIPE 2
86
87 /* default autosuspend delay (mSec)*/
88 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
89
90 /* statistic update interval (mSec) */
91 #define STAT_UPDATE_TIMER (1 * 1000)
92
93 /* defines interrupts from interrupt EP */
94 #define MAX_INT_EP (32)
95 #define INT_EP_INTEP (31)
96 #define INT_EP_OTP_WR_DONE (28)
97 #define INT_EP_EEE_TX_LPI_START (26)
98 #define INT_EP_EEE_TX_LPI_STOP (25)
99 #define INT_EP_EEE_RX_LPI (24)
100 #define INT_EP_MAC_RESET_TIMEOUT (23)
101 #define INT_EP_RDFO (22)
102 #define INT_EP_TXE (21)
103 #define INT_EP_USB_STATUS (20)
104 #define INT_EP_TX_DIS (19)
105 #define INT_EP_RX_DIS (18)
106 #define INT_EP_PHY (17)
107 #define INT_EP_DP (16)
108 #define INT_EP_MAC_ERR (15)
109 #define INT_EP_TDFU (14)
110 #define INT_EP_TDFO (13)
111 #define INT_EP_UTX (12)
112 #define INT_EP_GPIO_11 (11)
113 #define INT_EP_GPIO_10 (10)
114 #define INT_EP_GPIO_9 (9)
115 #define INT_EP_GPIO_8 (8)
116 #define INT_EP_GPIO_7 (7)
117 #define INT_EP_GPIO_6 (6)
118 #define INT_EP_GPIO_5 (5)
119 #define INT_EP_GPIO_4 (4)
120 #define INT_EP_GPIO_3 (3)
121 #define INT_EP_GPIO_2 (2)
122 #define INT_EP_GPIO_1 (1)
123 #define INT_EP_GPIO_0 (0)
124
125 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
126 "RX FCS Errors",
127 "RX Alignment Errors",
128 "Rx Fragment Errors",
129 "RX Jabber Errors",
130 "RX Undersize Frame Errors",
131 "RX Oversize Frame Errors",
132 "RX Dropped Frames",
133 "RX Unicast Byte Count",
134 "RX Broadcast Byte Count",
135 "RX Multicast Byte Count",
136 "RX Unicast Frames",
137 "RX Broadcast Frames",
138 "RX Multicast Frames",
139 "RX Pause Frames",
140 "RX 64 Byte Frames",
141 "RX 65 - 127 Byte Frames",
142 "RX 128 - 255 Byte Frames",
143 "RX 256 - 511 Bytes Frames",
144 "RX 512 - 1023 Byte Frames",
145 "RX 1024 - 1518 Byte Frames",
146 "RX Greater 1518 Byte Frames",
147 "EEE RX LPI Transitions",
148 "EEE RX LPI Time",
149 "TX FCS Errors",
150 "TX Excess Deferral Errors",
151 "TX Carrier Errors",
152 "TX Bad Byte Count",
153 "TX Single Collisions",
154 "TX Multiple Collisions",
155 "TX Excessive Collision",
156 "TX Late Collisions",
157 "TX Unicast Byte Count",
158 "TX Broadcast Byte Count",
159 "TX Multicast Byte Count",
160 "TX Unicast Frames",
161 "TX Broadcast Frames",
162 "TX Multicast Frames",
163 "TX Pause Frames",
164 "TX 64 Byte Frames",
165 "TX 65 - 127 Byte Frames",
166 "TX 128 - 255 Byte Frames",
167 "TX 256 - 511 Bytes Frames",
168 "TX 512 - 1023 Byte Frames",
169 "TX 1024 - 1518 Byte Frames",
170 "TX Greater 1518 Byte Frames",
171 "EEE TX LPI Transitions",
172 "EEE TX LPI Time",
173 };
174
175 struct lan78xx_statstage {
176 u32 rx_fcs_errors;
177 u32 rx_alignment_errors;
178 u32 rx_fragment_errors;
179 u32 rx_jabber_errors;
180 u32 rx_undersize_frame_errors;
181 u32 rx_oversize_frame_errors;
182 u32 rx_dropped_frames;
183 u32 rx_unicast_byte_count;
184 u32 rx_broadcast_byte_count;
185 u32 rx_multicast_byte_count;
186 u32 rx_unicast_frames;
187 u32 rx_broadcast_frames;
188 u32 rx_multicast_frames;
189 u32 rx_pause_frames;
190 u32 rx_64_byte_frames;
191 u32 rx_65_127_byte_frames;
192 u32 rx_128_255_byte_frames;
193 u32 rx_256_511_bytes_frames;
194 u32 rx_512_1023_byte_frames;
195 u32 rx_1024_1518_byte_frames;
196 u32 rx_greater_1518_byte_frames;
197 u32 eee_rx_lpi_transitions;
198 u32 eee_rx_lpi_time;
199 u32 tx_fcs_errors;
200 u32 tx_excess_deferral_errors;
201 u32 tx_carrier_errors;
202 u32 tx_bad_byte_count;
203 u32 tx_single_collisions;
204 u32 tx_multiple_collisions;
205 u32 tx_excessive_collision;
206 u32 tx_late_collisions;
207 u32 tx_unicast_byte_count;
208 u32 tx_broadcast_byte_count;
209 u32 tx_multicast_byte_count;
210 u32 tx_unicast_frames;
211 u32 tx_broadcast_frames;
212 u32 tx_multicast_frames;
213 u32 tx_pause_frames;
214 u32 tx_64_byte_frames;
215 u32 tx_65_127_byte_frames;
216 u32 tx_128_255_byte_frames;
217 u32 tx_256_511_bytes_frames;
218 u32 tx_512_1023_byte_frames;
219 u32 tx_1024_1518_byte_frames;
220 u32 tx_greater_1518_byte_frames;
221 u32 eee_tx_lpi_transitions;
222 u32 eee_tx_lpi_time;
223 };
224
225 struct lan78xx_statstage64 {
226 u64 rx_fcs_errors;
227 u64 rx_alignment_errors;
228 u64 rx_fragment_errors;
229 u64 rx_jabber_errors;
230 u64 rx_undersize_frame_errors;
231 u64 rx_oversize_frame_errors;
232 u64 rx_dropped_frames;
233 u64 rx_unicast_byte_count;
234 u64 rx_broadcast_byte_count;
235 u64 rx_multicast_byte_count;
236 u64 rx_unicast_frames;
237 u64 rx_broadcast_frames;
238 u64 rx_multicast_frames;
239 u64 rx_pause_frames;
240 u64 rx_64_byte_frames;
241 u64 rx_65_127_byte_frames;
242 u64 rx_128_255_byte_frames;
243 u64 rx_256_511_bytes_frames;
244 u64 rx_512_1023_byte_frames;
245 u64 rx_1024_1518_byte_frames;
246 u64 rx_greater_1518_byte_frames;
247 u64 eee_rx_lpi_transitions;
248 u64 eee_rx_lpi_time;
249 u64 tx_fcs_errors;
250 u64 tx_excess_deferral_errors;
251 u64 tx_carrier_errors;
252 u64 tx_bad_byte_count;
253 u64 tx_single_collisions;
254 u64 tx_multiple_collisions;
255 u64 tx_excessive_collision;
256 u64 tx_late_collisions;
257 u64 tx_unicast_byte_count;
258 u64 tx_broadcast_byte_count;
259 u64 tx_multicast_byte_count;
260 u64 tx_unicast_frames;
261 u64 tx_broadcast_frames;
262 u64 tx_multicast_frames;
263 u64 tx_pause_frames;
264 u64 tx_64_byte_frames;
265 u64 tx_65_127_byte_frames;
266 u64 tx_128_255_byte_frames;
267 u64 tx_256_511_bytes_frames;
268 u64 tx_512_1023_byte_frames;
269 u64 tx_1024_1518_byte_frames;
270 u64 tx_greater_1518_byte_frames;
271 u64 eee_tx_lpi_transitions;
272 u64 eee_tx_lpi_time;
273 };
274
275 static u32 lan78xx_regs[] = {
276 ID_REV,
277 INT_STS,
278 HW_CFG,
279 PMT_CTL,
280 E2P_CMD,
281 E2P_DATA,
282 USB_STATUS,
283 VLAN_TYPE,
284 MAC_CR,
285 MAC_RX,
286 MAC_TX,
287 FLOW,
288 ERR_STS,
289 MII_ACC,
290 MII_DATA,
291 EEE_TX_LPI_REQ_DLY,
292 EEE_TW_TX_SYS,
293 EEE_TX_LPI_REM_DLY,
294 WUCSR
295 };
296
297 #define PHY_REG_SIZE (32 * sizeof(u32))
298
299 struct lan78xx_net;
300
301 struct lan78xx_priv {
302 struct lan78xx_net *dev;
303 u32 rfe_ctl;
304 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
305 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
306 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
307 struct mutex dataport_mutex; /* for dataport access */
308 spinlock_t rfe_ctl_lock; /* for rfe register access */
309 struct work_struct set_multicast;
310 struct work_struct set_vlan;
311 u32 wol;
312 };
313
314 enum skb_state {
315 illegal = 0,
316 tx_start,
317 tx_done,
318 rx_start,
319 rx_done,
320 rx_cleanup,
321 unlink_start
322 };
323
324 struct skb_data { /* skb->cb is one of these */
325 struct urb *urb;
326 struct lan78xx_net *dev;
327 enum skb_state state;
328 size_t length;
329 int num_of_packet;
330 };
331
332 struct usb_context {
333 struct usb_ctrlrequest req;
334 struct lan78xx_net *dev;
335 };
336
337 #define EVENT_TX_HALT 0
338 #define EVENT_RX_HALT 1
339 #define EVENT_RX_MEMORY 2
340 #define EVENT_STS_SPLIT 3
341 #define EVENT_LINK_RESET 4
342 #define EVENT_RX_PAUSED 5
343 #define EVENT_DEV_WAKING 6
344 #define EVENT_DEV_ASLEEP 7
345 #define EVENT_DEV_OPEN 8
346 #define EVENT_STAT_UPDATE 9
347
348 struct statstage {
349 struct mutex access_lock; /* for stats access */
350 struct lan78xx_statstage saved;
351 struct lan78xx_statstage rollover_count;
352 struct lan78xx_statstage rollover_max;
353 struct lan78xx_statstage64 curr_stat;
354 };
355
356 struct irq_domain_data {
357 struct irq_domain *irqdomain;
358 unsigned int phyirq;
359 struct irq_chip *irqchip;
360 irq_flow_handler_t irq_handler;
361 u32 irqenable;
362 struct mutex irq_lock; /* for irq bus access */
363 };
364
365 struct lan78xx_net {
366 struct net_device *net;
367 struct usb_device *udev;
368 struct usb_interface *intf;
369 void *driver_priv;
370
371 int rx_qlen;
372 int tx_qlen;
373 struct sk_buff_head rxq;
374 struct sk_buff_head txq;
375 struct sk_buff_head done;
376 struct sk_buff_head rxq_pause;
377 struct sk_buff_head txq_pend;
378
379 struct tasklet_struct bh;
380 struct delayed_work wq;
381
382 int msg_enable;
383
384 struct urb *urb_intr;
385 struct usb_anchor deferred;
386
387 struct mutex phy_mutex; /* for phy access */
388 unsigned pipe_in, pipe_out, pipe_intr;
389
390 u32 hard_mtu; /* count any extra framing */
391 size_t rx_urb_size; /* size for rx urbs */
392
393 unsigned long flags;
394
395 wait_queue_head_t *wait;
396 unsigned char suspend_count;
397
398 unsigned maxpacket;
399 struct timer_list delay;
400 struct timer_list stat_monitor;
401
402 unsigned long data[5];
403
404 int link_on;
405 u8 mdix_ctrl;
406
407 u32 chipid;
408 u32 chiprev;
409 struct mii_bus *mdiobus;
410 phy_interface_t interface;
411
412 int fc_autoneg;
413 u8 fc_request_control;
414
415 int delta;
416 struct statstage stats;
417
418 struct irq_domain_data domain_data;
419 };
420
421 /* define external phy id */
422 #define PHY_LAN8835 (0x0007C130)
423 #define PHY_KSZ9031RNX (0x00221620)
424
425 /* use ethtool to change the level for any given device */
426 static int msg_level = -1;
427 module_param(msg_level, int, 0);
428 MODULE_PARM_DESC(msg_level, "Override default message level");
429
lan78xx_read_reg(struct lan78xx_net * dev,u32 index,u32 * data)430 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
431 {
432 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
433 int ret;
434
435 if (!buf)
436 return -ENOMEM;
437
438 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
439 USB_VENDOR_REQUEST_READ_REGISTER,
440 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
441 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
442 if (likely(ret >= 0)) {
443 le32_to_cpus(buf);
444 *data = *buf;
445 } else {
446 netdev_warn(dev->net,
447 "Failed to read register index 0x%08x. ret = %d",
448 index, ret);
449 }
450
451 kfree(buf);
452
453 return ret;
454 }
455
lan78xx_write_reg(struct lan78xx_net * dev,u32 index,u32 data)456 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
457 {
458 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
459 int ret;
460
461 if (!buf)
462 return -ENOMEM;
463
464 *buf = data;
465 cpu_to_le32s(buf);
466
467 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
468 USB_VENDOR_REQUEST_WRITE_REGISTER,
469 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
470 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
471 if (unlikely(ret < 0)) {
472 netdev_warn(dev->net,
473 "Failed to write register index 0x%08x. ret = %d",
474 index, ret);
475 }
476
477 kfree(buf);
478
479 return ret;
480 }
481
lan78xx_read_stats(struct lan78xx_net * dev,struct lan78xx_statstage * data)482 static int lan78xx_read_stats(struct lan78xx_net *dev,
483 struct lan78xx_statstage *data)
484 {
485 int ret = 0;
486 int i;
487 struct lan78xx_statstage *stats;
488 u32 *src;
489 u32 *dst;
490
491 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
492 if (!stats)
493 return -ENOMEM;
494
495 ret = usb_control_msg(dev->udev,
496 usb_rcvctrlpipe(dev->udev, 0),
497 USB_VENDOR_REQUEST_GET_STATS,
498 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
499 0,
500 0,
501 (void *)stats,
502 sizeof(*stats),
503 USB_CTRL_SET_TIMEOUT);
504 if (likely(ret >= 0)) {
505 src = (u32 *)stats;
506 dst = (u32 *)data;
507 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
508 le32_to_cpus(&src[i]);
509 dst[i] = src[i];
510 }
511 } else {
512 netdev_warn(dev->net,
513 "Failed to read stat ret = %d", ret);
514 }
515
516 kfree(stats);
517
518 return ret;
519 }
520
521 #define check_counter_rollover(struct1, dev_stats, member) { \
522 if (struct1->member < dev_stats.saved.member) \
523 dev_stats.rollover_count.member++; \
524 }
525
lan78xx_check_stat_rollover(struct lan78xx_net * dev,struct lan78xx_statstage * stats)526 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
527 struct lan78xx_statstage *stats)
528 {
529 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
530 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
531 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
532 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
533 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
534 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
535 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
536 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
537 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
538 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
539 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
540 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
541 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
542 check_counter_rollover(stats, dev->stats, rx_pause_frames);
543 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
544 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
545 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
546 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
547 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
548 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
549 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
550 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
551 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
552 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
553 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
554 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
555 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
556 check_counter_rollover(stats, dev->stats, tx_single_collisions);
557 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
558 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
559 check_counter_rollover(stats, dev->stats, tx_late_collisions);
560 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
561 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
562 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
563 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
564 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
565 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
566 check_counter_rollover(stats, dev->stats, tx_pause_frames);
567 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
568 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
569 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
570 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
571 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
572 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
573 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
574 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
575 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
576
577 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
578 }
579
lan78xx_update_stats(struct lan78xx_net * dev)580 static void lan78xx_update_stats(struct lan78xx_net *dev)
581 {
582 u32 *p, *count, *max;
583 u64 *data;
584 int i;
585 struct lan78xx_statstage lan78xx_stats;
586
587 if (usb_autopm_get_interface(dev->intf) < 0)
588 return;
589
590 p = (u32 *)&lan78xx_stats;
591 count = (u32 *)&dev->stats.rollover_count;
592 max = (u32 *)&dev->stats.rollover_max;
593 data = (u64 *)&dev->stats.curr_stat;
594
595 mutex_lock(&dev->stats.access_lock);
596
597 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
598 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
599
600 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
601 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
602
603 mutex_unlock(&dev->stats.access_lock);
604
605 usb_autopm_put_interface(dev->intf);
606 }
607
608 /* Loop until the read is completed with timeout called with phy_mutex held */
lan78xx_phy_wait_not_busy(struct lan78xx_net * dev)609 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
610 {
611 unsigned long start_time = jiffies;
612 u32 val;
613 int ret;
614
615 do {
616 ret = lan78xx_read_reg(dev, MII_ACC, &val);
617 if (unlikely(ret < 0))
618 return -EIO;
619
620 if (!(val & MII_ACC_MII_BUSY_))
621 return 0;
622 } while (!time_after(jiffies, start_time + HZ));
623
624 return -EIO;
625 }
626
mii_access(int id,int index,int read)627 static inline u32 mii_access(int id, int index, int read)
628 {
629 u32 ret;
630
631 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
632 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
633 if (read)
634 ret |= MII_ACC_MII_READ_;
635 else
636 ret |= MII_ACC_MII_WRITE_;
637 ret |= MII_ACC_MII_BUSY_;
638
639 return ret;
640 }
641
lan78xx_wait_eeprom(struct lan78xx_net * dev)642 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
643 {
644 unsigned long start_time = jiffies;
645 u32 val;
646 int ret;
647
648 do {
649 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
650 if (unlikely(ret < 0))
651 return -EIO;
652
653 if (!(val & E2P_CMD_EPC_BUSY_) ||
654 (val & E2P_CMD_EPC_TIMEOUT_))
655 break;
656 usleep_range(40, 100);
657 } while (!time_after(jiffies, start_time + HZ));
658
659 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
660 netdev_warn(dev->net, "EEPROM read operation timeout");
661 return -EIO;
662 }
663
664 return 0;
665 }
666
lan78xx_eeprom_confirm_not_busy(struct lan78xx_net * dev)667 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
668 {
669 unsigned long start_time = jiffies;
670 u32 val;
671 int ret;
672
673 do {
674 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
675 if (unlikely(ret < 0))
676 return -EIO;
677
678 if (!(val & E2P_CMD_EPC_BUSY_))
679 return 0;
680
681 usleep_range(40, 100);
682 } while (!time_after(jiffies, start_time + HZ));
683
684 netdev_warn(dev->net, "EEPROM is busy");
685 return -EIO;
686 }
687
lan78xx_read_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)688 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
689 u32 length, u8 *data)
690 {
691 u32 val;
692 u32 saved;
693 int i, ret;
694 int retval;
695
696 /* depends on chip, some EEPROM pins are muxed with LED function.
697 * disable & restore LED function to access EEPROM.
698 */
699 ret = lan78xx_read_reg(dev, HW_CFG, &val);
700 saved = val;
701 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
702 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
703 ret = lan78xx_write_reg(dev, HW_CFG, val);
704 }
705
706 retval = lan78xx_eeprom_confirm_not_busy(dev);
707 if (retval)
708 return retval;
709
710 for (i = 0; i < length; i++) {
711 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
712 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
713 ret = lan78xx_write_reg(dev, E2P_CMD, val);
714 if (unlikely(ret < 0)) {
715 retval = -EIO;
716 goto exit;
717 }
718
719 retval = lan78xx_wait_eeprom(dev);
720 if (retval < 0)
721 goto exit;
722
723 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
724 if (unlikely(ret < 0)) {
725 retval = -EIO;
726 goto exit;
727 }
728
729 data[i] = val & 0xFF;
730 offset++;
731 }
732
733 retval = 0;
734 exit:
735 if (dev->chipid == ID_REV_CHIP_ID_7800_)
736 ret = lan78xx_write_reg(dev, HW_CFG, saved);
737
738 return retval;
739 }
740
lan78xx_read_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)741 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
742 u32 length, u8 *data)
743 {
744 u8 sig;
745 int ret;
746
747 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
748 if ((ret == 0) && (sig == EEPROM_INDICATOR))
749 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
750 else
751 ret = -EINVAL;
752
753 return ret;
754 }
755
lan78xx_write_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)756 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
757 u32 length, u8 *data)
758 {
759 u32 val;
760 u32 saved;
761 int i, ret;
762 int retval;
763
764 /* depends on chip, some EEPROM pins are muxed with LED function.
765 * disable & restore LED function to access EEPROM.
766 */
767 ret = lan78xx_read_reg(dev, HW_CFG, &val);
768 saved = val;
769 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
770 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
771 ret = lan78xx_write_reg(dev, HW_CFG, val);
772 }
773
774 retval = lan78xx_eeprom_confirm_not_busy(dev);
775 if (retval)
776 goto exit;
777
778 /* Issue write/erase enable command */
779 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
780 ret = lan78xx_write_reg(dev, E2P_CMD, val);
781 if (unlikely(ret < 0)) {
782 retval = -EIO;
783 goto exit;
784 }
785
786 retval = lan78xx_wait_eeprom(dev);
787 if (retval < 0)
788 goto exit;
789
790 for (i = 0; i < length; i++) {
791 /* Fill data register */
792 val = data[i];
793 ret = lan78xx_write_reg(dev, E2P_DATA, val);
794 if (ret < 0) {
795 retval = -EIO;
796 goto exit;
797 }
798
799 /* Send "write" command */
800 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
801 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
802 ret = lan78xx_write_reg(dev, E2P_CMD, val);
803 if (ret < 0) {
804 retval = -EIO;
805 goto exit;
806 }
807
808 retval = lan78xx_wait_eeprom(dev);
809 if (retval < 0)
810 goto exit;
811
812 offset++;
813 }
814
815 retval = 0;
816 exit:
817 if (dev->chipid == ID_REV_CHIP_ID_7800_)
818 ret = lan78xx_write_reg(dev, HW_CFG, saved);
819
820 return retval;
821 }
822
lan78xx_read_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)823 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
824 u32 length, u8 *data)
825 {
826 int i;
827 u32 buf;
828 unsigned long timeout;
829
830 lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
831
832 if (buf & OTP_PWR_DN_PWRDN_N_) {
833 /* clear it and wait to be cleared */
834 lan78xx_write_reg(dev, OTP_PWR_DN, 0);
835
836 timeout = jiffies + HZ;
837 do {
838 usleep_range(1, 10);
839 lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
840 if (time_after(jiffies, timeout)) {
841 netdev_warn(dev->net,
842 "timeout on OTP_PWR_DN");
843 return -EIO;
844 }
845 } while (buf & OTP_PWR_DN_PWRDN_N_);
846 }
847
848 for (i = 0; i < length; i++) {
849 lan78xx_write_reg(dev, OTP_ADDR1,
850 ((offset + i) >> 8) & OTP_ADDR1_15_11);
851 lan78xx_write_reg(dev, OTP_ADDR2,
852 ((offset + i) & OTP_ADDR2_10_3));
853
854 lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
855 lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
856
857 timeout = jiffies + HZ;
858 do {
859 udelay(1);
860 lan78xx_read_reg(dev, OTP_STATUS, &buf);
861 if (time_after(jiffies, timeout)) {
862 netdev_warn(dev->net,
863 "timeout on OTP_STATUS");
864 return -EIO;
865 }
866 } while (buf & OTP_STATUS_BUSY_);
867
868 lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
869
870 data[i] = (u8)(buf & 0xFF);
871 }
872
873 return 0;
874 }
875
lan78xx_write_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)876 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
877 u32 length, u8 *data)
878 {
879 int i;
880 u32 buf;
881 unsigned long timeout;
882
883 lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
884
885 if (buf & OTP_PWR_DN_PWRDN_N_) {
886 /* clear it and wait to be cleared */
887 lan78xx_write_reg(dev, OTP_PWR_DN, 0);
888
889 timeout = jiffies + HZ;
890 do {
891 udelay(1);
892 lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
893 if (time_after(jiffies, timeout)) {
894 netdev_warn(dev->net,
895 "timeout on OTP_PWR_DN completion");
896 return -EIO;
897 }
898 } while (buf & OTP_PWR_DN_PWRDN_N_);
899 }
900
901 /* set to BYTE program mode */
902 lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
903
904 for (i = 0; i < length; i++) {
905 lan78xx_write_reg(dev, OTP_ADDR1,
906 ((offset + i) >> 8) & OTP_ADDR1_15_11);
907 lan78xx_write_reg(dev, OTP_ADDR2,
908 ((offset + i) & OTP_ADDR2_10_3));
909 lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
910 lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
911 lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
912
913 timeout = jiffies + HZ;
914 do {
915 udelay(1);
916 lan78xx_read_reg(dev, OTP_STATUS, &buf);
917 if (time_after(jiffies, timeout)) {
918 netdev_warn(dev->net,
919 "Timeout on OTP_STATUS completion");
920 return -EIO;
921 }
922 } while (buf & OTP_STATUS_BUSY_);
923 }
924
925 return 0;
926 }
927
lan78xx_read_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)928 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
929 u32 length, u8 *data)
930 {
931 u8 sig;
932 int ret;
933
934 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
935
936 if (ret == 0) {
937 if (sig == OTP_INDICATOR_2)
938 offset += 0x100;
939 else if (sig != OTP_INDICATOR_1)
940 ret = -EINVAL;
941 if (!ret)
942 ret = lan78xx_read_raw_otp(dev, offset, length, data);
943 }
944
945 return ret;
946 }
947
lan78xx_dataport_wait_not_busy(struct lan78xx_net * dev)948 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
949 {
950 int i, ret;
951
952 for (i = 0; i < 100; i++) {
953 u32 dp_sel;
954
955 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
956 if (unlikely(ret < 0))
957 return -EIO;
958
959 if (dp_sel & DP_SEL_DPRDY_)
960 return 0;
961
962 usleep_range(40, 100);
963 }
964
965 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
966
967 return -EIO;
968 }
969
lan78xx_dataport_write(struct lan78xx_net * dev,u32 ram_select,u32 addr,u32 length,u32 * buf)970 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
971 u32 addr, u32 length, u32 *buf)
972 {
973 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
974 u32 dp_sel;
975 int i, ret;
976
977 if (usb_autopm_get_interface(dev->intf) < 0)
978 return 0;
979
980 mutex_lock(&pdata->dataport_mutex);
981
982 ret = lan78xx_dataport_wait_not_busy(dev);
983 if (ret < 0)
984 goto done;
985
986 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
987
988 dp_sel &= ~DP_SEL_RSEL_MASK_;
989 dp_sel |= ram_select;
990 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
991
992 for (i = 0; i < length; i++) {
993 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
994
995 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
996
997 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
998
999 ret = lan78xx_dataport_wait_not_busy(dev);
1000 if (ret < 0)
1001 goto done;
1002 }
1003
1004 done:
1005 mutex_unlock(&pdata->dataport_mutex);
1006 usb_autopm_put_interface(dev->intf);
1007
1008 return ret;
1009 }
1010
lan78xx_set_addr_filter(struct lan78xx_priv * pdata,int index,u8 addr[ETH_ALEN])1011 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1012 int index, u8 addr[ETH_ALEN])
1013 {
1014 u32 temp;
1015
1016 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1017 temp = addr[3];
1018 temp = addr[2] | (temp << 8);
1019 temp = addr[1] | (temp << 8);
1020 temp = addr[0] | (temp << 8);
1021 pdata->pfilter_table[index][1] = temp;
1022 temp = addr[5];
1023 temp = addr[4] | (temp << 8);
1024 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1025 pdata->pfilter_table[index][0] = temp;
1026 }
1027 }
1028
1029 /* returns hash bit number for given MAC address */
lan78xx_hash(char addr[ETH_ALEN])1030 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1031 {
1032 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1033 }
1034
lan78xx_deferred_multicast_write(struct work_struct * param)1035 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1036 {
1037 struct lan78xx_priv *pdata =
1038 container_of(param, struct lan78xx_priv, set_multicast);
1039 struct lan78xx_net *dev = pdata->dev;
1040 int i;
1041
1042 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1043 pdata->rfe_ctl);
1044
1045 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1046 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1047
1048 for (i = 1; i < NUM_OF_MAF; i++) {
1049 lan78xx_write_reg(dev, MAF_HI(i), 0);
1050 lan78xx_write_reg(dev, MAF_LO(i),
1051 pdata->pfilter_table[i][1]);
1052 lan78xx_write_reg(dev, MAF_HI(i),
1053 pdata->pfilter_table[i][0]);
1054 }
1055
1056 lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1057 }
1058
lan78xx_set_multicast(struct net_device * netdev)1059 static void lan78xx_set_multicast(struct net_device *netdev)
1060 {
1061 struct lan78xx_net *dev = netdev_priv(netdev);
1062 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1063 unsigned long flags;
1064 int i;
1065
1066 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1067
1068 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1069 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1070
1071 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1072 pdata->mchash_table[i] = 0;
1073 /* pfilter_table[0] has own HW address */
1074 for (i = 1; i < NUM_OF_MAF; i++) {
1075 pdata->pfilter_table[i][0] =
1076 pdata->pfilter_table[i][1] = 0;
1077 }
1078
1079 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1080
1081 if (dev->net->flags & IFF_PROMISC) {
1082 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1083 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1084 } else {
1085 if (dev->net->flags & IFF_ALLMULTI) {
1086 netif_dbg(dev, drv, dev->net,
1087 "receive all multicast enabled");
1088 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1089 }
1090 }
1091
1092 if (netdev_mc_count(dev->net)) {
1093 struct netdev_hw_addr *ha;
1094 int i;
1095
1096 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1097
1098 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1099
1100 i = 1;
1101 netdev_for_each_mc_addr(ha, netdev) {
1102 /* set first 32 into Perfect Filter */
1103 if (i < 33) {
1104 lan78xx_set_addr_filter(pdata, i, ha->addr);
1105 } else {
1106 u32 bitnum = lan78xx_hash(ha->addr);
1107
1108 pdata->mchash_table[bitnum / 32] |=
1109 (1 << (bitnum % 32));
1110 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1111 }
1112 i++;
1113 }
1114 }
1115
1116 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1117
1118 /* defer register writes to a sleepable context */
1119 schedule_work(&pdata->set_multicast);
1120 }
1121
lan78xx_update_flowcontrol(struct lan78xx_net * dev,u8 duplex,u16 lcladv,u16 rmtadv)1122 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1123 u16 lcladv, u16 rmtadv)
1124 {
1125 u32 flow = 0, fct_flow = 0;
1126 u8 cap;
1127
1128 if (dev->fc_autoneg)
1129 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1130 else
1131 cap = dev->fc_request_control;
1132
1133 if (cap & FLOW_CTRL_TX)
1134 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1135
1136 if (cap & FLOW_CTRL_RX)
1137 flow |= FLOW_CR_RX_FCEN_;
1138
1139 if (dev->udev->speed == USB_SPEED_SUPER)
1140 fct_flow = 0x817;
1141 else if (dev->udev->speed == USB_SPEED_HIGH)
1142 fct_flow = 0x211;
1143
1144 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1145 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1146 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1147
1148 lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1149
1150 /* threshold value should be set before enabling flow */
1151 lan78xx_write_reg(dev, FLOW, flow);
1152
1153 return 0;
1154 }
1155
lan78xx_link_reset(struct lan78xx_net * dev)1156 static int lan78xx_link_reset(struct lan78xx_net *dev)
1157 {
1158 struct phy_device *phydev = dev->net->phydev;
1159 struct ethtool_link_ksettings ecmd;
1160 int ladv, radv, ret, link;
1161 u32 buf;
1162
1163 /* clear LAN78xx interrupt status */
1164 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1165 if (unlikely(ret < 0))
1166 return -EIO;
1167
1168 mutex_lock(&phydev->lock);
1169 phy_read_status(phydev);
1170 link = phydev->link;
1171 mutex_unlock(&phydev->lock);
1172
1173 if (!link && dev->link_on) {
1174 dev->link_on = false;
1175
1176 /* reset MAC */
1177 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1178 if (unlikely(ret < 0))
1179 return -EIO;
1180 buf |= MAC_CR_RST_;
1181 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1182 if (unlikely(ret < 0))
1183 return -EIO;
1184
1185 del_timer(&dev->stat_monitor);
1186 } else if (link && !dev->link_on) {
1187 dev->link_on = true;
1188
1189 phy_ethtool_ksettings_get(phydev, &ecmd);
1190
1191 if (dev->udev->speed == USB_SPEED_SUPER) {
1192 if (ecmd.base.speed == 1000) {
1193 /* disable U2 */
1194 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1195 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1196 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1197 /* enable U1 */
1198 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1199 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1200 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1201 } else {
1202 /* enable U1 & U2 */
1203 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1204 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1205 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1206 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1207 }
1208 }
1209
1210 ladv = phy_read(phydev, MII_ADVERTISE);
1211 if (ladv < 0)
1212 return ladv;
1213
1214 radv = phy_read(phydev, MII_LPA);
1215 if (radv < 0)
1216 return radv;
1217
1218 netif_dbg(dev, link, dev->net,
1219 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1220 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1221
1222 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1223 radv);
1224
1225 if (!timer_pending(&dev->stat_monitor)) {
1226 dev->delta = 1;
1227 mod_timer(&dev->stat_monitor,
1228 jiffies + STAT_UPDATE_TIMER);
1229 }
1230
1231 tasklet_schedule(&dev->bh);
1232 }
1233
1234 return ret;
1235 }
1236
1237 /* some work can't be done in tasklets, so we use keventd
1238 *
1239 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1240 * but tasklet_schedule() doesn't. hope the failure is rare.
1241 */
lan78xx_defer_kevent(struct lan78xx_net * dev,int work)1242 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1243 {
1244 set_bit(work, &dev->flags);
1245 if (!schedule_delayed_work(&dev->wq, 0))
1246 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1247 }
1248
lan78xx_status(struct lan78xx_net * dev,struct urb * urb)1249 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1250 {
1251 u32 intdata;
1252
1253 if (urb->actual_length != 4) {
1254 netdev_warn(dev->net,
1255 "unexpected urb length %d", urb->actual_length);
1256 return;
1257 }
1258
1259 intdata = get_unaligned_le32(urb->transfer_buffer);
1260
1261 if (intdata & INT_ENP_PHY_INT) {
1262 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1263 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1264
1265 if (dev->domain_data.phyirq > 0) {
1266 local_irq_disable();
1267 generic_handle_irq(dev->domain_data.phyirq);
1268 local_irq_enable();
1269 }
1270 } else
1271 netdev_warn(dev->net,
1272 "unexpected interrupt: 0x%08x\n", intdata);
1273 }
1274
lan78xx_ethtool_get_eeprom_len(struct net_device * netdev)1275 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1276 {
1277 return MAX_EEPROM_SIZE;
1278 }
1279
lan78xx_ethtool_get_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1280 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1281 struct ethtool_eeprom *ee, u8 *data)
1282 {
1283 struct lan78xx_net *dev = netdev_priv(netdev);
1284 int ret;
1285
1286 ret = usb_autopm_get_interface(dev->intf);
1287 if (ret)
1288 return ret;
1289
1290 ee->magic = LAN78XX_EEPROM_MAGIC;
1291
1292 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1293
1294 usb_autopm_put_interface(dev->intf);
1295
1296 return ret;
1297 }
1298
lan78xx_ethtool_set_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1299 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1300 struct ethtool_eeprom *ee, u8 *data)
1301 {
1302 struct lan78xx_net *dev = netdev_priv(netdev);
1303 int ret;
1304
1305 ret = usb_autopm_get_interface(dev->intf);
1306 if (ret)
1307 return ret;
1308
1309 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1310 * to load data from EEPROM
1311 */
1312 if (ee->magic == LAN78XX_EEPROM_MAGIC)
1313 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1314 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1315 (ee->offset == 0) &&
1316 (ee->len == 512) &&
1317 (data[0] == OTP_INDICATOR_1))
1318 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1319
1320 usb_autopm_put_interface(dev->intf);
1321
1322 return ret;
1323 }
1324
lan78xx_get_strings(struct net_device * netdev,u32 stringset,u8 * data)1325 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1326 u8 *data)
1327 {
1328 if (stringset == ETH_SS_STATS)
1329 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1330 }
1331
lan78xx_get_sset_count(struct net_device * netdev,int sset)1332 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1333 {
1334 if (sset == ETH_SS_STATS)
1335 return ARRAY_SIZE(lan78xx_gstrings);
1336 else
1337 return -EOPNOTSUPP;
1338 }
1339
lan78xx_get_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)1340 static void lan78xx_get_stats(struct net_device *netdev,
1341 struct ethtool_stats *stats, u64 *data)
1342 {
1343 struct lan78xx_net *dev = netdev_priv(netdev);
1344
1345 lan78xx_update_stats(dev);
1346
1347 mutex_lock(&dev->stats.access_lock);
1348 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1349 mutex_unlock(&dev->stats.access_lock);
1350 }
1351
lan78xx_get_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1352 static void lan78xx_get_wol(struct net_device *netdev,
1353 struct ethtool_wolinfo *wol)
1354 {
1355 struct lan78xx_net *dev = netdev_priv(netdev);
1356 int ret;
1357 u32 buf;
1358 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1359
1360 if (usb_autopm_get_interface(dev->intf) < 0)
1361 return;
1362
1363 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1364 if (unlikely(ret < 0)) {
1365 wol->supported = 0;
1366 wol->wolopts = 0;
1367 } else {
1368 if (buf & USB_CFG_RMT_WKP_) {
1369 wol->supported = WAKE_ALL;
1370 wol->wolopts = pdata->wol;
1371 } else {
1372 wol->supported = 0;
1373 wol->wolopts = 0;
1374 }
1375 }
1376
1377 usb_autopm_put_interface(dev->intf);
1378 }
1379
lan78xx_set_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1380 static int lan78xx_set_wol(struct net_device *netdev,
1381 struct ethtool_wolinfo *wol)
1382 {
1383 struct lan78xx_net *dev = netdev_priv(netdev);
1384 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1385 int ret;
1386
1387 ret = usb_autopm_get_interface(dev->intf);
1388 if (ret < 0)
1389 return ret;
1390
1391 if (wol->wolopts & ~WAKE_ALL)
1392 return -EINVAL;
1393
1394 pdata->wol = wol->wolopts;
1395
1396 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1397
1398 phy_ethtool_set_wol(netdev->phydev, wol);
1399
1400 usb_autopm_put_interface(dev->intf);
1401
1402 return ret;
1403 }
1404
lan78xx_get_eee(struct net_device * net,struct ethtool_eee * edata)1405 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1406 {
1407 struct lan78xx_net *dev = netdev_priv(net);
1408 struct phy_device *phydev = net->phydev;
1409 int ret;
1410 u32 buf;
1411
1412 ret = usb_autopm_get_interface(dev->intf);
1413 if (ret < 0)
1414 return ret;
1415
1416 ret = phy_ethtool_get_eee(phydev, edata);
1417 if (ret < 0)
1418 goto exit;
1419
1420 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1421 if (buf & MAC_CR_EEE_EN_) {
1422 edata->eee_enabled = true;
1423 edata->eee_active = !!(edata->advertised &
1424 edata->lp_advertised);
1425 edata->tx_lpi_enabled = true;
1426 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1427 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1428 edata->tx_lpi_timer = buf;
1429 } else {
1430 edata->eee_enabled = false;
1431 edata->eee_active = false;
1432 edata->tx_lpi_enabled = false;
1433 edata->tx_lpi_timer = 0;
1434 }
1435
1436 ret = 0;
1437 exit:
1438 usb_autopm_put_interface(dev->intf);
1439
1440 return ret;
1441 }
1442
lan78xx_set_eee(struct net_device * net,struct ethtool_eee * edata)1443 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1444 {
1445 struct lan78xx_net *dev = netdev_priv(net);
1446 int ret;
1447 u32 buf;
1448
1449 ret = usb_autopm_get_interface(dev->intf);
1450 if (ret < 0)
1451 return ret;
1452
1453 if (edata->eee_enabled) {
1454 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1455 buf |= MAC_CR_EEE_EN_;
1456 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1457
1458 phy_ethtool_set_eee(net->phydev, edata);
1459
1460 buf = (u32)edata->tx_lpi_timer;
1461 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1462 } else {
1463 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1464 buf &= ~MAC_CR_EEE_EN_;
1465 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1466 }
1467
1468 usb_autopm_put_interface(dev->intf);
1469
1470 return 0;
1471 }
1472
lan78xx_get_link(struct net_device * net)1473 static u32 lan78xx_get_link(struct net_device *net)
1474 {
1475 u32 link;
1476
1477 mutex_lock(&net->phydev->lock);
1478 phy_read_status(net->phydev);
1479 link = net->phydev->link;
1480 mutex_unlock(&net->phydev->lock);
1481
1482 return link;
1483 }
1484
lan78xx_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * info)1485 static void lan78xx_get_drvinfo(struct net_device *net,
1486 struct ethtool_drvinfo *info)
1487 {
1488 struct lan78xx_net *dev = netdev_priv(net);
1489
1490 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1491 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1492 }
1493
lan78xx_get_msglevel(struct net_device * net)1494 static u32 lan78xx_get_msglevel(struct net_device *net)
1495 {
1496 struct lan78xx_net *dev = netdev_priv(net);
1497
1498 return dev->msg_enable;
1499 }
1500
lan78xx_set_msglevel(struct net_device * net,u32 level)1501 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1502 {
1503 struct lan78xx_net *dev = netdev_priv(net);
1504
1505 dev->msg_enable = level;
1506 }
1507
lan78xx_get_link_ksettings(struct net_device * net,struct ethtool_link_ksettings * cmd)1508 static int lan78xx_get_link_ksettings(struct net_device *net,
1509 struct ethtool_link_ksettings *cmd)
1510 {
1511 struct lan78xx_net *dev = netdev_priv(net);
1512 struct phy_device *phydev = net->phydev;
1513 int ret;
1514
1515 ret = usb_autopm_get_interface(dev->intf);
1516 if (ret < 0)
1517 return ret;
1518
1519 phy_ethtool_ksettings_get(phydev, cmd);
1520
1521 usb_autopm_put_interface(dev->intf);
1522
1523 return ret;
1524 }
1525
lan78xx_set_link_ksettings(struct net_device * net,const struct ethtool_link_ksettings * cmd)1526 static int lan78xx_set_link_ksettings(struct net_device *net,
1527 const struct ethtool_link_ksettings *cmd)
1528 {
1529 struct lan78xx_net *dev = netdev_priv(net);
1530 struct phy_device *phydev = net->phydev;
1531 int ret = 0;
1532 int temp;
1533
1534 ret = usb_autopm_get_interface(dev->intf);
1535 if (ret < 0)
1536 return ret;
1537
1538 /* change speed & duplex */
1539 ret = phy_ethtool_ksettings_set(phydev, cmd);
1540
1541 if (!cmd->base.autoneg) {
1542 /* force link down */
1543 temp = phy_read(phydev, MII_BMCR);
1544 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1545 mdelay(1);
1546 phy_write(phydev, MII_BMCR, temp);
1547 }
1548
1549 usb_autopm_put_interface(dev->intf);
1550
1551 return ret;
1552 }
1553
lan78xx_get_pause(struct net_device * net,struct ethtool_pauseparam * pause)1554 static void lan78xx_get_pause(struct net_device *net,
1555 struct ethtool_pauseparam *pause)
1556 {
1557 struct lan78xx_net *dev = netdev_priv(net);
1558 struct phy_device *phydev = net->phydev;
1559 struct ethtool_link_ksettings ecmd;
1560
1561 phy_ethtool_ksettings_get(phydev, &ecmd);
1562
1563 pause->autoneg = dev->fc_autoneg;
1564
1565 if (dev->fc_request_control & FLOW_CTRL_TX)
1566 pause->tx_pause = 1;
1567
1568 if (dev->fc_request_control & FLOW_CTRL_RX)
1569 pause->rx_pause = 1;
1570 }
1571
lan78xx_set_pause(struct net_device * net,struct ethtool_pauseparam * pause)1572 static int lan78xx_set_pause(struct net_device *net,
1573 struct ethtool_pauseparam *pause)
1574 {
1575 struct lan78xx_net *dev = netdev_priv(net);
1576 struct phy_device *phydev = net->phydev;
1577 struct ethtool_link_ksettings ecmd;
1578 int ret;
1579
1580 phy_ethtool_ksettings_get(phydev, &ecmd);
1581
1582 if (pause->autoneg && !ecmd.base.autoneg) {
1583 ret = -EINVAL;
1584 goto exit;
1585 }
1586
1587 dev->fc_request_control = 0;
1588 if (pause->rx_pause)
1589 dev->fc_request_control |= FLOW_CTRL_RX;
1590
1591 if (pause->tx_pause)
1592 dev->fc_request_control |= FLOW_CTRL_TX;
1593
1594 if (ecmd.base.autoneg) {
1595 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1596 u32 mii_adv;
1597
1598 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1599 ecmd.link_modes.advertising);
1600 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1601 ecmd.link_modes.advertising);
1602 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1603 mii_adv_to_linkmode_adv_t(fc, mii_adv);
1604 linkmode_or(ecmd.link_modes.advertising, fc,
1605 ecmd.link_modes.advertising);
1606
1607 phy_ethtool_ksettings_set(phydev, &ecmd);
1608 }
1609
1610 dev->fc_autoneg = pause->autoneg;
1611
1612 ret = 0;
1613 exit:
1614 return ret;
1615 }
1616
lan78xx_get_regs_len(struct net_device * netdev)1617 static int lan78xx_get_regs_len(struct net_device *netdev)
1618 {
1619 if (!netdev->phydev)
1620 return (sizeof(lan78xx_regs));
1621 else
1622 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1623 }
1624
1625 static void
lan78xx_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * buf)1626 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1627 void *buf)
1628 {
1629 u32 *data = buf;
1630 int i, j;
1631 struct lan78xx_net *dev = netdev_priv(netdev);
1632
1633 /* Read Device/MAC registers */
1634 for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++)
1635 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1636
1637 if (!netdev->phydev)
1638 return;
1639
1640 /* Read PHY registers */
1641 for (j = 0; j < 32; i++, j++)
1642 data[i] = phy_read(netdev->phydev, j);
1643 }
1644
1645 static const struct ethtool_ops lan78xx_ethtool_ops = {
1646 .get_link = lan78xx_get_link,
1647 .nway_reset = phy_ethtool_nway_reset,
1648 .get_drvinfo = lan78xx_get_drvinfo,
1649 .get_msglevel = lan78xx_get_msglevel,
1650 .set_msglevel = lan78xx_set_msglevel,
1651 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1652 .get_eeprom = lan78xx_ethtool_get_eeprom,
1653 .set_eeprom = lan78xx_ethtool_set_eeprom,
1654 .get_ethtool_stats = lan78xx_get_stats,
1655 .get_sset_count = lan78xx_get_sset_count,
1656 .get_strings = lan78xx_get_strings,
1657 .get_wol = lan78xx_get_wol,
1658 .set_wol = lan78xx_set_wol,
1659 .get_eee = lan78xx_get_eee,
1660 .set_eee = lan78xx_set_eee,
1661 .get_pauseparam = lan78xx_get_pause,
1662 .set_pauseparam = lan78xx_set_pause,
1663 .get_link_ksettings = lan78xx_get_link_ksettings,
1664 .set_link_ksettings = lan78xx_set_link_ksettings,
1665 .get_regs_len = lan78xx_get_regs_len,
1666 .get_regs = lan78xx_get_regs,
1667 };
1668
lan78xx_init_mac_address(struct lan78xx_net * dev)1669 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1670 {
1671 u32 addr_lo, addr_hi;
1672 u8 addr[6];
1673
1674 lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1675 lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1676
1677 addr[0] = addr_lo & 0xFF;
1678 addr[1] = (addr_lo >> 8) & 0xFF;
1679 addr[2] = (addr_lo >> 16) & 0xFF;
1680 addr[3] = (addr_lo >> 24) & 0xFF;
1681 addr[4] = addr_hi & 0xFF;
1682 addr[5] = (addr_hi >> 8) & 0xFF;
1683
1684 if (!is_valid_ether_addr(addr)) {
1685 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1686 /* valid address present in Device Tree */
1687 netif_dbg(dev, ifup, dev->net,
1688 "MAC address read from Device Tree");
1689 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1690 ETH_ALEN, addr) == 0) ||
1691 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1692 ETH_ALEN, addr) == 0)) &&
1693 is_valid_ether_addr(addr)) {
1694 /* eeprom values are valid so use them */
1695 netif_dbg(dev, ifup, dev->net,
1696 "MAC address read from EEPROM");
1697 } else {
1698 /* generate random MAC */
1699 eth_random_addr(addr);
1700 netif_dbg(dev, ifup, dev->net,
1701 "MAC address set to random addr");
1702 }
1703
1704 addr_lo = addr[0] | (addr[1] << 8) |
1705 (addr[2] << 16) | (addr[3] << 24);
1706 addr_hi = addr[4] | (addr[5] << 8);
1707
1708 lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1709 lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1710 }
1711
1712 lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1713 lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1714
1715 ether_addr_copy(dev->net->dev_addr, addr);
1716 }
1717
1718 /* MDIO read and write wrappers for phylib */
lan78xx_mdiobus_read(struct mii_bus * bus,int phy_id,int idx)1719 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1720 {
1721 struct lan78xx_net *dev = bus->priv;
1722 u32 val, addr;
1723 int ret;
1724
1725 ret = usb_autopm_get_interface(dev->intf);
1726 if (ret < 0)
1727 return ret;
1728
1729 mutex_lock(&dev->phy_mutex);
1730
1731 /* confirm MII not busy */
1732 ret = lan78xx_phy_wait_not_busy(dev);
1733 if (ret < 0)
1734 goto done;
1735
1736 /* set the address, index & direction (read from PHY) */
1737 addr = mii_access(phy_id, idx, MII_READ);
1738 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1739
1740 ret = lan78xx_phy_wait_not_busy(dev);
1741 if (ret < 0)
1742 goto done;
1743
1744 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1745
1746 ret = (int)(val & 0xFFFF);
1747
1748 done:
1749 mutex_unlock(&dev->phy_mutex);
1750 usb_autopm_put_interface(dev->intf);
1751
1752 return ret;
1753 }
1754
lan78xx_mdiobus_write(struct mii_bus * bus,int phy_id,int idx,u16 regval)1755 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1756 u16 regval)
1757 {
1758 struct lan78xx_net *dev = bus->priv;
1759 u32 val, addr;
1760 int ret;
1761
1762 ret = usb_autopm_get_interface(dev->intf);
1763 if (ret < 0)
1764 return ret;
1765
1766 mutex_lock(&dev->phy_mutex);
1767
1768 /* confirm MII not busy */
1769 ret = lan78xx_phy_wait_not_busy(dev);
1770 if (ret < 0)
1771 goto done;
1772
1773 val = (u32)regval;
1774 ret = lan78xx_write_reg(dev, MII_DATA, val);
1775
1776 /* set the address, index & direction (write to PHY) */
1777 addr = mii_access(phy_id, idx, MII_WRITE);
1778 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1779
1780 ret = lan78xx_phy_wait_not_busy(dev);
1781 if (ret < 0)
1782 goto done;
1783
1784 done:
1785 mutex_unlock(&dev->phy_mutex);
1786 usb_autopm_put_interface(dev->intf);
1787 return 0;
1788 }
1789
lan78xx_mdio_init(struct lan78xx_net * dev)1790 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1791 {
1792 struct device_node *node;
1793 int ret;
1794
1795 dev->mdiobus = mdiobus_alloc();
1796 if (!dev->mdiobus) {
1797 netdev_err(dev->net, "can't allocate MDIO bus\n");
1798 return -ENOMEM;
1799 }
1800
1801 dev->mdiobus->priv = (void *)dev;
1802 dev->mdiobus->read = lan78xx_mdiobus_read;
1803 dev->mdiobus->write = lan78xx_mdiobus_write;
1804 dev->mdiobus->name = "lan78xx-mdiobus";
1805 dev->mdiobus->parent = &dev->udev->dev;
1806
1807 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1808 dev->udev->bus->busnum, dev->udev->devnum);
1809
1810 switch (dev->chipid) {
1811 case ID_REV_CHIP_ID_7800_:
1812 case ID_REV_CHIP_ID_7850_:
1813 /* set to internal PHY id */
1814 dev->mdiobus->phy_mask = ~(1 << 1);
1815 break;
1816 case ID_REV_CHIP_ID_7801_:
1817 /* scan thru PHYAD[2..0] */
1818 dev->mdiobus->phy_mask = ~(0xFF);
1819 break;
1820 }
1821
1822 node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1823 ret = of_mdiobus_register(dev->mdiobus, node);
1824 of_node_put(node);
1825 if (ret) {
1826 netdev_err(dev->net, "can't register MDIO bus\n");
1827 goto exit1;
1828 }
1829
1830 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1831 return 0;
1832 exit1:
1833 mdiobus_free(dev->mdiobus);
1834 return ret;
1835 }
1836
lan78xx_remove_mdio(struct lan78xx_net * dev)1837 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1838 {
1839 mdiobus_unregister(dev->mdiobus);
1840 mdiobus_free(dev->mdiobus);
1841 }
1842
lan78xx_link_status_change(struct net_device * net)1843 static void lan78xx_link_status_change(struct net_device *net)
1844 {
1845 struct phy_device *phydev = net->phydev;
1846
1847 phy_print_status(phydev);
1848 }
1849
irq_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hwirq)1850 static int irq_map(struct irq_domain *d, unsigned int irq,
1851 irq_hw_number_t hwirq)
1852 {
1853 struct irq_domain_data *data = d->host_data;
1854
1855 irq_set_chip_data(irq, data);
1856 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1857 irq_set_noprobe(irq);
1858
1859 return 0;
1860 }
1861
irq_unmap(struct irq_domain * d,unsigned int irq)1862 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1863 {
1864 irq_set_chip_and_handler(irq, NULL, NULL);
1865 irq_set_chip_data(irq, NULL);
1866 }
1867
1868 static const struct irq_domain_ops chip_domain_ops = {
1869 .map = irq_map,
1870 .unmap = irq_unmap,
1871 };
1872
lan78xx_irq_mask(struct irq_data * irqd)1873 static void lan78xx_irq_mask(struct irq_data *irqd)
1874 {
1875 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1876
1877 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1878 }
1879
lan78xx_irq_unmask(struct irq_data * irqd)1880 static void lan78xx_irq_unmask(struct irq_data *irqd)
1881 {
1882 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1883
1884 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1885 }
1886
lan78xx_irq_bus_lock(struct irq_data * irqd)1887 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1888 {
1889 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1890
1891 mutex_lock(&data->irq_lock);
1892 }
1893
lan78xx_irq_bus_sync_unlock(struct irq_data * irqd)1894 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1895 {
1896 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1897 struct lan78xx_net *dev =
1898 container_of(data, struct lan78xx_net, domain_data);
1899 u32 buf;
1900
1901 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1902 * are only two callbacks executed in non-atomic contex.
1903 */
1904 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1905 if (buf != data->irqenable)
1906 lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1907
1908 mutex_unlock(&data->irq_lock);
1909 }
1910
1911 static struct irq_chip lan78xx_irqchip = {
1912 .name = "lan78xx-irqs",
1913 .irq_mask = lan78xx_irq_mask,
1914 .irq_unmask = lan78xx_irq_unmask,
1915 .irq_bus_lock = lan78xx_irq_bus_lock,
1916 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1917 };
1918
lan78xx_setup_irq_domain(struct lan78xx_net * dev)1919 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1920 {
1921 struct device_node *of_node;
1922 struct irq_domain *irqdomain;
1923 unsigned int irqmap = 0;
1924 u32 buf;
1925 int ret = 0;
1926
1927 of_node = dev->udev->dev.parent->of_node;
1928
1929 mutex_init(&dev->domain_data.irq_lock);
1930
1931 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1932 dev->domain_data.irqenable = buf;
1933
1934 dev->domain_data.irqchip = &lan78xx_irqchip;
1935 dev->domain_data.irq_handler = handle_simple_irq;
1936
1937 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1938 &chip_domain_ops, &dev->domain_data);
1939 if (irqdomain) {
1940 /* create mapping for PHY interrupt */
1941 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1942 if (!irqmap) {
1943 irq_domain_remove(irqdomain);
1944
1945 irqdomain = NULL;
1946 ret = -EINVAL;
1947 }
1948 } else {
1949 ret = -EINVAL;
1950 }
1951
1952 dev->domain_data.irqdomain = irqdomain;
1953 dev->domain_data.phyirq = irqmap;
1954
1955 return ret;
1956 }
1957
lan78xx_remove_irq_domain(struct lan78xx_net * dev)1958 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1959 {
1960 if (dev->domain_data.phyirq > 0) {
1961 irq_dispose_mapping(dev->domain_data.phyirq);
1962
1963 if (dev->domain_data.irqdomain)
1964 irq_domain_remove(dev->domain_data.irqdomain);
1965 }
1966 dev->domain_data.phyirq = 0;
1967 dev->domain_data.irqdomain = NULL;
1968 }
1969
lan8835_fixup(struct phy_device * phydev)1970 static int lan8835_fixup(struct phy_device *phydev)
1971 {
1972 int buf;
1973 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1974
1975 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
1976 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
1977 buf &= ~0x1800;
1978 buf |= 0x0800;
1979 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
1980
1981 /* RGMII MAC TXC Delay Enable */
1982 lan78xx_write_reg(dev, MAC_RGMII_ID,
1983 MAC_RGMII_ID_TXC_DELAY_EN_);
1984
1985 /* RGMII TX DLL Tune Adjust */
1986 lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
1987
1988 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
1989
1990 return 1;
1991 }
1992
ksz9031rnx_fixup(struct phy_device * phydev)1993 static int ksz9031rnx_fixup(struct phy_device *phydev)
1994 {
1995 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1996
1997 /* Micrel9301RNX PHY configuration */
1998 /* RGMII Control Signal Pad Skew */
1999 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2000 /* RGMII RX Data Pad Skew */
2001 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2002 /* RGMII RX Clock Pad Skew */
2003 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2004
2005 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2006
2007 return 1;
2008 }
2009
lan7801_phy_init(struct lan78xx_net * dev)2010 static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2011 {
2012 u32 buf;
2013 int ret;
2014 struct fixed_phy_status fphy_status = {
2015 .link = 1,
2016 .speed = SPEED_1000,
2017 .duplex = DUPLEX_FULL,
2018 };
2019 struct phy_device *phydev;
2020
2021 phydev = phy_find_first(dev->mdiobus);
2022 if (!phydev) {
2023 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2024 phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
2025 if (IS_ERR(phydev)) {
2026 netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2027 return NULL;
2028 }
2029 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2030 dev->interface = PHY_INTERFACE_MODE_RGMII;
2031 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2032 MAC_RGMII_ID_TXC_DELAY_EN_);
2033 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2034 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2035 buf |= HW_CFG_CLK125_EN_;
2036 buf |= HW_CFG_REFCLK25_EN_;
2037 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2038 } else {
2039 if (!phydev->drv) {
2040 netdev_err(dev->net, "no PHY driver found\n");
2041 return NULL;
2042 }
2043 dev->interface = PHY_INTERFACE_MODE_RGMII;
2044 /* external PHY fixup for KSZ9031RNX */
2045 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2046 ksz9031rnx_fixup);
2047 if (ret < 0) {
2048 netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2049 return NULL;
2050 }
2051 /* external PHY fixup for LAN8835 */
2052 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2053 lan8835_fixup);
2054 if (ret < 0) {
2055 netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2056 return NULL;
2057 }
2058 /* add more external PHY fixup here if needed */
2059
2060 phydev->is_internal = false;
2061 }
2062 return phydev;
2063 }
2064
lan78xx_phy_init(struct lan78xx_net * dev)2065 static int lan78xx_phy_init(struct lan78xx_net *dev)
2066 {
2067 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2068 int ret;
2069 u32 mii_adv;
2070 struct phy_device *phydev;
2071
2072 switch (dev->chipid) {
2073 case ID_REV_CHIP_ID_7801_:
2074 phydev = lan7801_phy_init(dev);
2075 if (!phydev) {
2076 netdev_err(dev->net, "lan7801: PHY Init Failed");
2077 return -EIO;
2078 }
2079 break;
2080
2081 case ID_REV_CHIP_ID_7800_:
2082 case ID_REV_CHIP_ID_7850_:
2083 phydev = phy_find_first(dev->mdiobus);
2084 if (!phydev) {
2085 netdev_err(dev->net, "no PHY found\n");
2086 return -EIO;
2087 }
2088 phydev->is_internal = true;
2089 dev->interface = PHY_INTERFACE_MODE_GMII;
2090 break;
2091
2092 default:
2093 netdev_err(dev->net, "Unknown CHIP ID found\n");
2094 return -EIO;
2095 }
2096
2097 /* if phyirq is not set, use polling mode in phylib */
2098 if (dev->domain_data.phyirq > 0)
2099 phydev->irq = dev->domain_data.phyirq;
2100 else
2101 phydev->irq = PHY_POLL;
2102 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2103
2104 /* set to AUTOMDIX */
2105 phydev->mdix = ETH_TP_MDI_AUTO;
2106
2107 ret = phy_connect_direct(dev->net, phydev,
2108 lan78xx_link_status_change,
2109 dev->interface);
2110 if (ret) {
2111 netdev_err(dev->net, "can't attach PHY to %s\n",
2112 dev->mdiobus->id);
2113 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2114 if (phy_is_pseudo_fixed_link(phydev)) {
2115 fixed_phy_unregister(phydev);
2116 } else {
2117 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2118 0xfffffff0);
2119 phy_unregister_fixup_for_uid(PHY_LAN8835,
2120 0xfffffff0);
2121 }
2122 }
2123 return -EIO;
2124 }
2125
2126 /* MAC doesn't support 1000T Half */
2127 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2128
2129 /* support both flow controls */
2130 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2131 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2132 phydev->advertising);
2133 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2134 phydev->advertising);
2135 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2136 mii_adv_to_linkmode_adv_t(fc, mii_adv);
2137 linkmode_or(phydev->advertising, fc, phydev->advertising);
2138
2139 if (phydev->mdio.dev.of_node) {
2140 u32 reg;
2141 int len;
2142
2143 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2144 "microchip,led-modes",
2145 sizeof(u32));
2146 if (len >= 0) {
2147 /* Ensure the appropriate LEDs are enabled */
2148 lan78xx_read_reg(dev, HW_CFG, ®);
2149 reg &= ~(HW_CFG_LED0_EN_ |
2150 HW_CFG_LED1_EN_ |
2151 HW_CFG_LED2_EN_ |
2152 HW_CFG_LED3_EN_);
2153 reg |= (len > 0) * HW_CFG_LED0_EN_ |
2154 (len > 1) * HW_CFG_LED1_EN_ |
2155 (len > 2) * HW_CFG_LED2_EN_ |
2156 (len > 3) * HW_CFG_LED3_EN_;
2157 lan78xx_write_reg(dev, HW_CFG, reg);
2158 }
2159 }
2160
2161 genphy_config_aneg(phydev);
2162
2163 dev->fc_autoneg = phydev->autoneg;
2164
2165 return 0;
2166 }
2167
lan78xx_set_rx_max_frame_length(struct lan78xx_net * dev,int size)2168 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2169 {
2170 u32 buf;
2171 bool rxenabled;
2172
2173 lan78xx_read_reg(dev, MAC_RX, &buf);
2174
2175 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2176
2177 if (rxenabled) {
2178 buf &= ~MAC_RX_RXEN_;
2179 lan78xx_write_reg(dev, MAC_RX, buf);
2180 }
2181
2182 /* add 4 to size for FCS */
2183 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2184 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2185
2186 lan78xx_write_reg(dev, MAC_RX, buf);
2187
2188 if (rxenabled) {
2189 buf |= MAC_RX_RXEN_;
2190 lan78xx_write_reg(dev, MAC_RX, buf);
2191 }
2192
2193 return 0;
2194 }
2195
unlink_urbs(struct lan78xx_net * dev,struct sk_buff_head * q)2196 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2197 {
2198 struct sk_buff *skb;
2199 unsigned long flags;
2200 int count = 0;
2201
2202 spin_lock_irqsave(&q->lock, flags);
2203 while (!skb_queue_empty(q)) {
2204 struct skb_data *entry;
2205 struct urb *urb;
2206 int ret;
2207
2208 skb_queue_walk(q, skb) {
2209 entry = (struct skb_data *)skb->cb;
2210 if (entry->state != unlink_start)
2211 goto found;
2212 }
2213 break;
2214 found:
2215 entry->state = unlink_start;
2216 urb = entry->urb;
2217
2218 /* Get reference count of the URB to avoid it to be
2219 * freed during usb_unlink_urb, which may trigger
2220 * use-after-free problem inside usb_unlink_urb since
2221 * usb_unlink_urb is always racing with .complete
2222 * handler(include defer_bh).
2223 */
2224 usb_get_urb(urb);
2225 spin_unlock_irqrestore(&q->lock, flags);
2226 /* during some PM-driven resume scenarios,
2227 * these (async) unlinks complete immediately
2228 */
2229 ret = usb_unlink_urb(urb);
2230 if (ret != -EINPROGRESS && ret != 0)
2231 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2232 else
2233 count++;
2234 usb_put_urb(urb);
2235 spin_lock_irqsave(&q->lock, flags);
2236 }
2237 spin_unlock_irqrestore(&q->lock, flags);
2238 return count;
2239 }
2240
lan78xx_change_mtu(struct net_device * netdev,int new_mtu)2241 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2242 {
2243 struct lan78xx_net *dev = netdev_priv(netdev);
2244 int ll_mtu = new_mtu + netdev->hard_header_len;
2245 int old_hard_mtu = dev->hard_mtu;
2246 int old_rx_urb_size = dev->rx_urb_size;
2247
2248 /* no second zero-length packet read wanted after mtu-sized packets */
2249 if ((ll_mtu % dev->maxpacket) == 0)
2250 return -EDOM;
2251
2252 lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2253
2254 netdev->mtu = new_mtu;
2255
2256 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2257 if (dev->rx_urb_size == old_hard_mtu) {
2258 dev->rx_urb_size = dev->hard_mtu;
2259 if (dev->rx_urb_size > old_rx_urb_size) {
2260 if (netif_running(dev->net)) {
2261 unlink_urbs(dev, &dev->rxq);
2262 tasklet_schedule(&dev->bh);
2263 }
2264 }
2265 }
2266
2267 return 0;
2268 }
2269
lan78xx_set_mac_addr(struct net_device * netdev,void * p)2270 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2271 {
2272 struct lan78xx_net *dev = netdev_priv(netdev);
2273 struct sockaddr *addr = p;
2274 u32 addr_lo, addr_hi;
2275
2276 if (netif_running(netdev))
2277 return -EBUSY;
2278
2279 if (!is_valid_ether_addr(addr->sa_data))
2280 return -EADDRNOTAVAIL;
2281
2282 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2283
2284 addr_lo = netdev->dev_addr[0] |
2285 netdev->dev_addr[1] << 8 |
2286 netdev->dev_addr[2] << 16 |
2287 netdev->dev_addr[3] << 24;
2288 addr_hi = netdev->dev_addr[4] |
2289 netdev->dev_addr[5] << 8;
2290
2291 lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2292 lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2293
2294 /* Added to support MAC address changes */
2295 lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2296 lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2297
2298 return 0;
2299 }
2300
2301 /* Enable or disable Rx checksum offload engine */
lan78xx_set_features(struct net_device * netdev,netdev_features_t features)2302 static int lan78xx_set_features(struct net_device *netdev,
2303 netdev_features_t features)
2304 {
2305 struct lan78xx_net *dev = netdev_priv(netdev);
2306 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2307 unsigned long flags;
2308
2309 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2310
2311 if (features & NETIF_F_RXCSUM) {
2312 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2313 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2314 } else {
2315 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2316 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2317 }
2318
2319 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2320 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2321 else
2322 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2323
2324 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2325 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2326 else
2327 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2328
2329 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2330
2331 lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2332
2333 return 0;
2334 }
2335
lan78xx_deferred_vlan_write(struct work_struct * param)2336 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2337 {
2338 struct lan78xx_priv *pdata =
2339 container_of(param, struct lan78xx_priv, set_vlan);
2340 struct lan78xx_net *dev = pdata->dev;
2341
2342 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2343 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2344 }
2345
lan78xx_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)2346 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2347 __be16 proto, u16 vid)
2348 {
2349 struct lan78xx_net *dev = netdev_priv(netdev);
2350 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2351 u16 vid_bit_index;
2352 u16 vid_dword_index;
2353
2354 vid_dword_index = (vid >> 5) & 0x7F;
2355 vid_bit_index = vid & 0x1F;
2356
2357 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2358
2359 /* defer register writes to a sleepable context */
2360 schedule_work(&pdata->set_vlan);
2361
2362 return 0;
2363 }
2364
lan78xx_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)2365 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2366 __be16 proto, u16 vid)
2367 {
2368 struct lan78xx_net *dev = netdev_priv(netdev);
2369 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2370 u16 vid_bit_index;
2371 u16 vid_dword_index;
2372
2373 vid_dword_index = (vid >> 5) & 0x7F;
2374 vid_bit_index = vid & 0x1F;
2375
2376 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2377
2378 /* defer register writes to a sleepable context */
2379 schedule_work(&pdata->set_vlan);
2380
2381 return 0;
2382 }
2383
lan78xx_init_ltm(struct lan78xx_net * dev)2384 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2385 {
2386 int ret;
2387 u32 buf;
2388 u32 regs[6] = { 0 };
2389
2390 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2391 if (buf & USB_CFG1_LTM_ENABLE_) {
2392 u8 temp[2];
2393 /* Get values from EEPROM first */
2394 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2395 if (temp[0] == 24) {
2396 ret = lan78xx_read_raw_eeprom(dev,
2397 temp[1] * 2,
2398 24,
2399 (u8 *)regs);
2400 if (ret < 0)
2401 return;
2402 }
2403 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2404 if (temp[0] == 24) {
2405 ret = lan78xx_read_raw_otp(dev,
2406 temp[1] * 2,
2407 24,
2408 (u8 *)regs);
2409 if (ret < 0)
2410 return;
2411 }
2412 }
2413 }
2414
2415 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2416 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2417 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2418 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2419 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2420 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2421 }
2422
lan78xx_reset(struct lan78xx_net * dev)2423 static int lan78xx_reset(struct lan78xx_net *dev)
2424 {
2425 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2426 u32 buf;
2427 int ret = 0;
2428 unsigned long timeout;
2429 u8 sig;
2430
2431 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2432 buf |= HW_CFG_LRST_;
2433 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2434
2435 timeout = jiffies + HZ;
2436 do {
2437 mdelay(1);
2438 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2439 if (time_after(jiffies, timeout)) {
2440 netdev_warn(dev->net,
2441 "timeout on completion of LiteReset");
2442 return -EIO;
2443 }
2444 } while (buf & HW_CFG_LRST_);
2445
2446 lan78xx_init_mac_address(dev);
2447
2448 /* save DEVID for later usage */
2449 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2450 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2451 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2452
2453 /* Respond to the IN token with a NAK */
2454 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2455 buf |= USB_CFG_BIR_;
2456 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2457
2458 /* Init LTM */
2459 lan78xx_init_ltm(dev);
2460
2461 if (dev->udev->speed == USB_SPEED_SUPER) {
2462 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2463 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2464 dev->rx_qlen = 4;
2465 dev->tx_qlen = 4;
2466 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2467 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2468 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2469 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2470 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2471 } else {
2472 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2473 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2474 dev->rx_qlen = 4;
2475 dev->tx_qlen = 4;
2476 }
2477
2478 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2479 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2480
2481 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2482 buf |= HW_CFG_MEF_;
2483 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2484
2485 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2486 buf |= USB_CFG_BCE_;
2487 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2488
2489 /* set FIFO sizes */
2490 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2491 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2492
2493 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2494 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2495
2496 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2497 ret = lan78xx_write_reg(dev, FLOW, 0);
2498 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2499
2500 /* Don't need rfe_ctl_lock during initialisation */
2501 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2502 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2503 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2504
2505 /* Enable or disable checksum offload engines */
2506 lan78xx_set_features(dev->net, dev->net->features);
2507
2508 lan78xx_set_multicast(dev->net);
2509
2510 /* reset PHY */
2511 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2512 buf |= PMT_CTL_PHY_RST_;
2513 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2514
2515 timeout = jiffies + HZ;
2516 do {
2517 mdelay(1);
2518 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2519 if (time_after(jiffies, timeout)) {
2520 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2521 return -EIO;
2522 }
2523 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2524
2525 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2526 /* LAN7801 only has RGMII mode */
2527 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2528 buf &= ~MAC_CR_GMII_EN_;
2529
2530 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2531 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2532 if (!ret && sig != EEPROM_INDICATOR) {
2533 /* Implies there is no external eeprom. Set mac speed */
2534 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2535 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2536 }
2537 }
2538 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2539
2540 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2541 buf |= MAC_TX_TXEN_;
2542 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2543
2544 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2545 buf |= FCT_TX_CTL_EN_;
2546 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2547
2548 ret = lan78xx_set_rx_max_frame_length(dev,
2549 dev->net->mtu + VLAN_ETH_HLEN);
2550
2551 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2552 buf |= MAC_RX_RXEN_;
2553 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2554
2555 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2556 buf |= FCT_RX_CTL_EN_;
2557 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2558
2559 return 0;
2560 }
2561
lan78xx_init_stats(struct lan78xx_net * dev)2562 static void lan78xx_init_stats(struct lan78xx_net *dev)
2563 {
2564 u32 *p;
2565 int i;
2566
2567 /* initialize for stats update
2568 * some counters are 20bits and some are 32bits
2569 */
2570 p = (u32 *)&dev->stats.rollover_max;
2571 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2572 p[i] = 0xFFFFF;
2573
2574 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2575 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2576 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2577 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2578 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2579 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2580 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2581 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2582 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2583 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2584
2585 set_bit(EVENT_STAT_UPDATE, &dev->flags);
2586 }
2587
lan78xx_open(struct net_device * net)2588 static int lan78xx_open(struct net_device *net)
2589 {
2590 struct lan78xx_net *dev = netdev_priv(net);
2591 int ret;
2592
2593 ret = usb_autopm_get_interface(dev->intf);
2594 if (ret < 0)
2595 goto out;
2596
2597 phy_start(net->phydev);
2598
2599 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2600
2601 /* for Link Check */
2602 if (dev->urb_intr) {
2603 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2604 if (ret < 0) {
2605 netif_err(dev, ifup, dev->net,
2606 "intr submit %d\n", ret);
2607 goto done;
2608 }
2609 }
2610
2611 lan78xx_init_stats(dev);
2612
2613 set_bit(EVENT_DEV_OPEN, &dev->flags);
2614
2615 netif_start_queue(net);
2616
2617 dev->link_on = false;
2618
2619 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2620 done:
2621 usb_autopm_put_interface(dev->intf);
2622
2623 out:
2624 return ret;
2625 }
2626
lan78xx_terminate_urbs(struct lan78xx_net * dev)2627 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2628 {
2629 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2630 DECLARE_WAITQUEUE(wait, current);
2631 int temp;
2632
2633 /* ensure there are no more active urbs */
2634 add_wait_queue(&unlink_wakeup, &wait);
2635 set_current_state(TASK_UNINTERRUPTIBLE);
2636 dev->wait = &unlink_wakeup;
2637 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2638
2639 /* maybe wait for deletions to finish. */
2640 while (!skb_queue_empty(&dev->rxq) &&
2641 !skb_queue_empty(&dev->txq) &&
2642 !skb_queue_empty(&dev->done)) {
2643 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2644 set_current_state(TASK_UNINTERRUPTIBLE);
2645 netif_dbg(dev, ifdown, dev->net,
2646 "waited for %d urb completions\n", temp);
2647 }
2648 set_current_state(TASK_RUNNING);
2649 dev->wait = NULL;
2650 remove_wait_queue(&unlink_wakeup, &wait);
2651 }
2652
lan78xx_stop(struct net_device * net)2653 static int lan78xx_stop(struct net_device *net)
2654 {
2655 struct lan78xx_net *dev = netdev_priv(net);
2656
2657 if (timer_pending(&dev->stat_monitor))
2658 del_timer_sync(&dev->stat_monitor);
2659
2660 if (net->phydev)
2661 phy_stop(net->phydev);
2662
2663 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2664 netif_stop_queue(net);
2665
2666 netif_info(dev, ifdown, dev->net,
2667 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2668 net->stats.rx_packets, net->stats.tx_packets,
2669 net->stats.rx_errors, net->stats.tx_errors);
2670
2671 lan78xx_terminate_urbs(dev);
2672
2673 usb_kill_urb(dev->urb_intr);
2674
2675 skb_queue_purge(&dev->rxq_pause);
2676
2677 /* deferred work (task, timer, softirq) must also stop.
2678 * can't flush_scheduled_work() until we drop rtnl (later),
2679 * else workers could deadlock; so make workers a NOP.
2680 */
2681 dev->flags = 0;
2682 cancel_delayed_work_sync(&dev->wq);
2683 tasklet_kill(&dev->bh);
2684
2685 usb_autopm_put_interface(dev->intf);
2686
2687 return 0;
2688 }
2689
lan78xx_tx_prep(struct lan78xx_net * dev,struct sk_buff * skb,gfp_t flags)2690 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2691 struct sk_buff *skb, gfp_t flags)
2692 {
2693 u32 tx_cmd_a, tx_cmd_b;
2694 void *ptr;
2695
2696 if (skb_cow_head(skb, TX_OVERHEAD)) {
2697 dev_kfree_skb_any(skb);
2698 return NULL;
2699 }
2700
2701 if (skb_linearize(skb)) {
2702 dev_kfree_skb_any(skb);
2703 return NULL;
2704 }
2705
2706 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2707
2708 if (skb->ip_summed == CHECKSUM_PARTIAL)
2709 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2710
2711 tx_cmd_b = 0;
2712 if (skb_is_gso(skb)) {
2713 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2714
2715 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2716
2717 tx_cmd_a |= TX_CMD_A_LSO_;
2718 }
2719
2720 if (skb_vlan_tag_present(skb)) {
2721 tx_cmd_a |= TX_CMD_A_IVTG_;
2722 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2723 }
2724
2725 ptr = skb_push(skb, 8);
2726 put_unaligned_le32(tx_cmd_a, ptr);
2727 put_unaligned_le32(tx_cmd_b, ptr + 4);
2728
2729 return skb;
2730 }
2731
defer_bh(struct lan78xx_net * dev,struct sk_buff * skb,struct sk_buff_head * list,enum skb_state state)2732 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2733 struct sk_buff_head *list, enum skb_state state)
2734 {
2735 unsigned long flags;
2736 enum skb_state old_state;
2737 struct skb_data *entry = (struct skb_data *)skb->cb;
2738
2739 spin_lock_irqsave(&list->lock, flags);
2740 old_state = entry->state;
2741 entry->state = state;
2742
2743 __skb_unlink(skb, list);
2744 spin_unlock(&list->lock);
2745 spin_lock(&dev->done.lock);
2746
2747 __skb_queue_tail(&dev->done, skb);
2748 if (skb_queue_len(&dev->done) == 1)
2749 tasklet_schedule(&dev->bh);
2750 spin_unlock_irqrestore(&dev->done.lock, flags);
2751
2752 return old_state;
2753 }
2754
tx_complete(struct urb * urb)2755 static void tx_complete(struct urb *urb)
2756 {
2757 struct sk_buff *skb = (struct sk_buff *)urb->context;
2758 struct skb_data *entry = (struct skb_data *)skb->cb;
2759 struct lan78xx_net *dev = entry->dev;
2760
2761 if (urb->status == 0) {
2762 dev->net->stats.tx_packets += entry->num_of_packet;
2763 dev->net->stats.tx_bytes += entry->length;
2764 } else {
2765 dev->net->stats.tx_errors++;
2766
2767 switch (urb->status) {
2768 case -EPIPE:
2769 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2770 break;
2771
2772 /* software-driven interface shutdown */
2773 case -ECONNRESET:
2774 case -ESHUTDOWN:
2775 break;
2776
2777 case -EPROTO:
2778 case -ETIME:
2779 case -EILSEQ:
2780 netif_stop_queue(dev->net);
2781 break;
2782 default:
2783 netif_dbg(dev, tx_err, dev->net,
2784 "tx err %d\n", entry->urb->status);
2785 break;
2786 }
2787 }
2788
2789 usb_autopm_put_interface_async(dev->intf);
2790
2791 defer_bh(dev, skb, &dev->txq, tx_done);
2792 }
2793
lan78xx_queue_skb(struct sk_buff_head * list,struct sk_buff * newsk,enum skb_state state)2794 static void lan78xx_queue_skb(struct sk_buff_head *list,
2795 struct sk_buff *newsk, enum skb_state state)
2796 {
2797 struct skb_data *entry = (struct skb_data *)newsk->cb;
2798
2799 __skb_queue_tail(list, newsk);
2800 entry->state = state;
2801 }
2802
2803 static netdev_tx_t
lan78xx_start_xmit(struct sk_buff * skb,struct net_device * net)2804 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2805 {
2806 struct lan78xx_net *dev = netdev_priv(net);
2807 struct sk_buff *skb2 = NULL;
2808
2809 if (skb) {
2810 skb_tx_timestamp(skb);
2811 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2812 }
2813
2814 if (skb2) {
2815 skb_queue_tail(&dev->txq_pend, skb2);
2816
2817 /* throttle TX patch at slower than SUPER SPEED USB */
2818 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2819 (skb_queue_len(&dev->txq_pend) > 10))
2820 netif_stop_queue(net);
2821 } else {
2822 netif_dbg(dev, tx_err, dev->net,
2823 "lan78xx_tx_prep return NULL\n");
2824 dev->net->stats.tx_errors++;
2825 dev->net->stats.tx_dropped++;
2826 }
2827
2828 tasklet_schedule(&dev->bh);
2829
2830 return NETDEV_TX_OK;
2831 }
2832
lan78xx_bind(struct lan78xx_net * dev,struct usb_interface * intf)2833 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2834 {
2835 struct lan78xx_priv *pdata = NULL;
2836 int ret;
2837 int i;
2838
2839 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2840
2841 pdata = (struct lan78xx_priv *)(dev->data[0]);
2842 if (!pdata) {
2843 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2844 return -ENOMEM;
2845 }
2846
2847 pdata->dev = dev;
2848
2849 spin_lock_init(&pdata->rfe_ctl_lock);
2850 mutex_init(&pdata->dataport_mutex);
2851
2852 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2853
2854 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2855 pdata->vlan_table[i] = 0;
2856
2857 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2858
2859 dev->net->features = 0;
2860
2861 if (DEFAULT_TX_CSUM_ENABLE)
2862 dev->net->features |= NETIF_F_HW_CSUM;
2863
2864 if (DEFAULT_RX_CSUM_ENABLE)
2865 dev->net->features |= NETIF_F_RXCSUM;
2866
2867 if (DEFAULT_TSO_CSUM_ENABLE)
2868 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2869
2870 if (DEFAULT_VLAN_RX_OFFLOAD)
2871 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2872
2873 if (DEFAULT_VLAN_FILTER_ENABLE)
2874 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2875
2876 dev->net->hw_features = dev->net->features;
2877
2878 ret = lan78xx_setup_irq_domain(dev);
2879 if (ret < 0) {
2880 netdev_warn(dev->net,
2881 "lan78xx_setup_irq_domain() failed : %d", ret);
2882 goto out1;
2883 }
2884
2885 dev->net->hard_header_len += TX_OVERHEAD;
2886 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2887
2888 /* Init all registers */
2889 ret = lan78xx_reset(dev);
2890 if (ret) {
2891 netdev_warn(dev->net, "Registers INIT FAILED....");
2892 goto out2;
2893 }
2894
2895 ret = lan78xx_mdio_init(dev);
2896 if (ret) {
2897 netdev_warn(dev->net, "MDIO INIT FAILED.....");
2898 goto out2;
2899 }
2900
2901 dev->net->flags |= IFF_MULTICAST;
2902
2903 pdata->wol = WAKE_MAGIC;
2904
2905 return ret;
2906
2907 out2:
2908 lan78xx_remove_irq_domain(dev);
2909
2910 out1:
2911 netdev_warn(dev->net, "Bind routine FAILED");
2912 cancel_work_sync(&pdata->set_multicast);
2913 cancel_work_sync(&pdata->set_vlan);
2914 kfree(pdata);
2915 return ret;
2916 }
2917
lan78xx_unbind(struct lan78xx_net * dev,struct usb_interface * intf)2918 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2919 {
2920 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2921
2922 lan78xx_remove_irq_domain(dev);
2923
2924 lan78xx_remove_mdio(dev);
2925
2926 if (pdata) {
2927 cancel_work_sync(&pdata->set_multicast);
2928 cancel_work_sync(&pdata->set_vlan);
2929 netif_dbg(dev, ifdown, dev->net, "free pdata");
2930 kfree(pdata);
2931 pdata = NULL;
2932 dev->data[0] = 0;
2933 }
2934 }
2935
lan78xx_rx_csum_offload(struct lan78xx_net * dev,struct sk_buff * skb,u32 rx_cmd_a,u32 rx_cmd_b)2936 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2937 struct sk_buff *skb,
2938 u32 rx_cmd_a, u32 rx_cmd_b)
2939 {
2940 /* HW Checksum offload appears to be flawed if used when not stripping
2941 * VLAN headers. Drop back to S/W checksums under these conditions.
2942 */
2943 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2944 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
2945 ((rx_cmd_a & RX_CMD_A_FVTG_) &&
2946 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
2947 skb->ip_summed = CHECKSUM_NONE;
2948 } else {
2949 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2950 skb->ip_summed = CHECKSUM_COMPLETE;
2951 }
2952 }
2953
lan78xx_rx_vlan_offload(struct lan78xx_net * dev,struct sk_buff * skb,u32 rx_cmd_a,u32 rx_cmd_b)2954 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
2955 struct sk_buff *skb,
2956 u32 rx_cmd_a, u32 rx_cmd_b)
2957 {
2958 if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2959 (rx_cmd_a & RX_CMD_A_FVTG_))
2960 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2961 (rx_cmd_b & 0xffff));
2962 }
2963
lan78xx_skb_return(struct lan78xx_net * dev,struct sk_buff * skb)2964 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2965 {
2966 int status;
2967
2968 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2969 skb_queue_tail(&dev->rxq_pause, skb);
2970 return;
2971 }
2972
2973 dev->net->stats.rx_packets++;
2974 dev->net->stats.rx_bytes += skb->len;
2975
2976 skb->protocol = eth_type_trans(skb, dev->net);
2977
2978 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2979 skb->len + sizeof(struct ethhdr), skb->protocol);
2980 memset(skb->cb, 0, sizeof(struct skb_data));
2981
2982 if (skb_defer_rx_timestamp(skb))
2983 return;
2984
2985 status = netif_rx(skb);
2986 if (status != NET_RX_SUCCESS)
2987 netif_dbg(dev, rx_err, dev->net,
2988 "netif_rx status %d\n", status);
2989 }
2990
lan78xx_rx(struct lan78xx_net * dev,struct sk_buff * skb)2991 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2992 {
2993 if (skb->len < dev->net->hard_header_len)
2994 return 0;
2995
2996 while (skb->len > 0) {
2997 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2998 u16 rx_cmd_c;
2999 struct sk_buff *skb2;
3000 unsigned char *packet;
3001
3002 rx_cmd_a = get_unaligned_le32(skb->data);
3003 skb_pull(skb, sizeof(rx_cmd_a));
3004
3005 rx_cmd_b = get_unaligned_le32(skb->data);
3006 skb_pull(skb, sizeof(rx_cmd_b));
3007
3008 rx_cmd_c = get_unaligned_le16(skb->data);
3009 skb_pull(skb, sizeof(rx_cmd_c));
3010
3011 packet = skb->data;
3012
3013 /* get the packet length */
3014 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3015 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3016
3017 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3018 netif_dbg(dev, rx_err, dev->net,
3019 "Error rx_cmd_a=0x%08x", rx_cmd_a);
3020 } else {
3021 /* last frame in this batch */
3022 if (skb->len == size) {
3023 lan78xx_rx_csum_offload(dev, skb,
3024 rx_cmd_a, rx_cmd_b);
3025 lan78xx_rx_vlan_offload(dev, skb,
3026 rx_cmd_a, rx_cmd_b);
3027
3028 skb_trim(skb, skb->len - 4); /* remove fcs */
3029 skb->truesize = size + sizeof(struct sk_buff);
3030
3031 return 1;
3032 }
3033
3034 skb2 = skb_clone(skb, GFP_ATOMIC);
3035 if (unlikely(!skb2)) {
3036 netdev_warn(dev->net, "Error allocating skb");
3037 return 0;
3038 }
3039
3040 skb2->len = size;
3041 skb2->data = packet;
3042 skb_set_tail_pointer(skb2, size);
3043
3044 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3045 lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3046
3047 skb_trim(skb2, skb2->len - 4); /* remove fcs */
3048 skb2->truesize = size + sizeof(struct sk_buff);
3049
3050 lan78xx_skb_return(dev, skb2);
3051 }
3052
3053 skb_pull(skb, size);
3054
3055 /* padding bytes before the next frame starts */
3056 if (skb->len)
3057 skb_pull(skb, align_count);
3058 }
3059
3060 return 1;
3061 }
3062
rx_process(struct lan78xx_net * dev,struct sk_buff * skb)3063 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3064 {
3065 if (!lan78xx_rx(dev, skb)) {
3066 dev->net->stats.rx_errors++;
3067 goto done;
3068 }
3069
3070 if (skb->len) {
3071 lan78xx_skb_return(dev, skb);
3072 return;
3073 }
3074
3075 netif_dbg(dev, rx_err, dev->net, "drop\n");
3076 dev->net->stats.rx_errors++;
3077 done:
3078 skb_queue_tail(&dev->done, skb);
3079 }
3080
3081 static void rx_complete(struct urb *urb);
3082
rx_submit(struct lan78xx_net * dev,struct urb * urb,gfp_t flags)3083 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3084 {
3085 struct sk_buff *skb;
3086 struct skb_data *entry;
3087 unsigned long lockflags;
3088 size_t size = dev->rx_urb_size;
3089 int ret = 0;
3090
3091 skb = netdev_alloc_skb_ip_align(dev->net, size);
3092 if (!skb) {
3093 usb_free_urb(urb);
3094 return -ENOMEM;
3095 }
3096
3097 entry = (struct skb_data *)skb->cb;
3098 entry->urb = urb;
3099 entry->dev = dev;
3100 entry->length = 0;
3101
3102 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3103 skb->data, size, rx_complete, skb);
3104
3105 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3106
3107 if (netif_device_present(dev->net) &&
3108 netif_running(dev->net) &&
3109 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3110 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3111 ret = usb_submit_urb(urb, GFP_ATOMIC);
3112 switch (ret) {
3113 case 0:
3114 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3115 break;
3116 case -EPIPE:
3117 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3118 break;
3119 case -ENODEV:
3120 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3121 netif_device_detach(dev->net);
3122 break;
3123 case -EHOSTUNREACH:
3124 ret = -ENOLINK;
3125 break;
3126 default:
3127 netif_dbg(dev, rx_err, dev->net,
3128 "rx submit, %d\n", ret);
3129 tasklet_schedule(&dev->bh);
3130 }
3131 } else {
3132 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3133 ret = -ENOLINK;
3134 }
3135 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3136 if (ret) {
3137 dev_kfree_skb_any(skb);
3138 usb_free_urb(urb);
3139 }
3140 return ret;
3141 }
3142
rx_complete(struct urb * urb)3143 static void rx_complete(struct urb *urb)
3144 {
3145 struct sk_buff *skb = (struct sk_buff *)urb->context;
3146 struct skb_data *entry = (struct skb_data *)skb->cb;
3147 struct lan78xx_net *dev = entry->dev;
3148 int urb_status = urb->status;
3149 enum skb_state state;
3150
3151 skb_put(skb, urb->actual_length);
3152 state = rx_done;
3153 entry->urb = NULL;
3154
3155 switch (urb_status) {
3156 case 0:
3157 if (skb->len < dev->net->hard_header_len) {
3158 state = rx_cleanup;
3159 dev->net->stats.rx_errors++;
3160 dev->net->stats.rx_length_errors++;
3161 netif_dbg(dev, rx_err, dev->net,
3162 "rx length %d\n", skb->len);
3163 }
3164 usb_mark_last_busy(dev->udev);
3165 break;
3166 case -EPIPE:
3167 dev->net->stats.rx_errors++;
3168 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3169 fallthrough;
3170 case -ECONNRESET: /* async unlink */
3171 case -ESHUTDOWN: /* hardware gone */
3172 netif_dbg(dev, ifdown, dev->net,
3173 "rx shutdown, code %d\n", urb_status);
3174 state = rx_cleanup;
3175 entry->urb = urb;
3176 urb = NULL;
3177 break;
3178 case -EPROTO:
3179 case -ETIME:
3180 case -EILSEQ:
3181 dev->net->stats.rx_errors++;
3182 state = rx_cleanup;
3183 entry->urb = urb;
3184 urb = NULL;
3185 break;
3186
3187 /* data overrun ... flush fifo? */
3188 case -EOVERFLOW:
3189 dev->net->stats.rx_over_errors++;
3190 fallthrough;
3191
3192 default:
3193 state = rx_cleanup;
3194 dev->net->stats.rx_errors++;
3195 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3196 break;
3197 }
3198
3199 state = defer_bh(dev, skb, &dev->rxq, state);
3200
3201 if (urb) {
3202 if (netif_running(dev->net) &&
3203 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3204 state != unlink_start) {
3205 rx_submit(dev, urb, GFP_ATOMIC);
3206 return;
3207 }
3208 usb_free_urb(urb);
3209 }
3210 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3211 }
3212
lan78xx_tx_bh(struct lan78xx_net * dev)3213 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3214 {
3215 int length;
3216 struct urb *urb = NULL;
3217 struct skb_data *entry;
3218 unsigned long flags;
3219 struct sk_buff_head *tqp = &dev->txq_pend;
3220 struct sk_buff *skb, *skb2;
3221 int ret;
3222 int count, pos;
3223 int skb_totallen, pkt_cnt;
3224
3225 skb_totallen = 0;
3226 pkt_cnt = 0;
3227 count = 0;
3228 length = 0;
3229 spin_lock_irqsave(&tqp->lock, flags);
3230 skb_queue_walk(tqp, skb) {
3231 if (skb_is_gso(skb)) {
3232 if (!skb_queue_is_first(tqp, skb)) {
3233 /* handle previous packets first */
3234 break;
3235 }
3236 count = 1;
3237 length = skb->len - TX_OVERHEAD;
3238 __skb_unlink(skb, tqp);
3239 spin_unlock_irqrestore(&tqp->lock, flags);
3240 goto gso_skb;
3241 }
3242
3243 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3244 break;
3245 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3246 pkt_cnt++;
3247 }
3248 spin_unlock_irqrestore(&tqp->lock, flags);
3249
3250 /* copy to a single skb */
3251 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3252 if (!skb)
3253 goto drop;
3254
3255 skb_put(skb, skb_totallen);
3256
3257 for (count = pos = 0; count < pkt_cnt; count++) {
3258 skb2 = skb_dequeue(tqp);
3259 if (skb2) {
3260 length += (skb2->len - TX_OVERHEAD);
3261 memcpy(skb->data + pos, skb2->data, skb2->len);
3262 pos += roundup(skb2->len, sizeof(u32));
3263 dev_kfree_skb(skb2);
3264 }
3265 }
3266
3267 gso_skb:
3268 urb = usb_alloc_urb(0, GFP_ATOMIC);
3269 if (!urb)
3270 goto drop;
3271
3272 entry = (struct skb_data *)skb->cb;
3273 entry->urb = urb;
3274 entry->dev = dev;
3275 entry->length = length;
3276 entry->num_of_packet = count;
3277
3278 spin_lock_irqsave(&dev->txq.lock, flags);
3279 ret = usb_autopm_get_interface_async(dev->intf);
3280 if (ret < 0) {
3281 spin_unlock_irqrestore(&dev->txq.lock, flags);
3282 goto drop;
3283 }
3284
3285 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3286 skb->data, skb->len, tx_complete, skb);
3287
3288 if (length % dev->maxpacket == 0) {
3289 /* send USB_ZERO_PACKET */
3290 urb->transfer_flags |= URB_ZERO_PACKET;
3291 }
3292
3293 #ifdef CONFIG_PM
3294 /* if this triggers the device is still a sleep */
3295 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3296 /* transmission will be done in resume */
3297 usb_anchor_urb(urb, &dev->deferred);
3298 /* no use to process more packets */
3299 netif_stop_queue(dev->net);
3300 usb_put_urb(urb);
3301 spin_unlock_irqrestore(&dev->txq.lock, flags);
3302 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3303 return;
3304 }
3305 #endif
3306
3307 ret = usb_submit_urb(urb, GFP_ATOMIC);
3308 switch (ret) {
3309 case 0:
3310 netif_trans_update(dev->net);
3311 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3312 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3313 netif_stop_queue(dev->net);
3314 break;
3315 case -EPIPE:
3316 netif_stop_queue(dev->net);
3317 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3318 usb_autopm_put_interface_async(dev->intf);
3319 break;
3320 default:
3321 usb_autopm_put_interface_async(dev->intf);
3322 netif_dbg(dev, tx_err, dev->net,
3323 "tx: submit urb err %d\n", ret);
3324 break;
3325 }
3326
3327 spin_unlock_irqrestore(&dev->txq.lock, flags);
3328
3329 if (ret) {
3330 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3331 drop:
3332 dev->net->stats.tx_dropped++;
3333 if (skb)
3334 dev_kfree_skb_any(skb);
3335 usb_free_urb(urb);
3336 } else
3337 netif_dbg(dev, tx_queued, dev->net,
3338 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3339 }
3340
lan78xx_rx_bh(struct lan78xx_net * dev)3341 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3342 {
3343 struct urb *urb;
3344 int i;
3345
3346 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3347 for (i = 0; i < 10; i++) {
3348 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3349 break;
3350 urb = usb_alloc_urb(0, GFP_ATOMIC);
3351 if (urb)
3352 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3353 return;
3354 }
3355
3356 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3357 tasklet_schedule(&dev->bh);
3358 }
3359 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3360 netif_wake_queue(dev->net);
3361 }
3362
lan78xx_bh(unsigned long param)3363 static void lan78xx_bh(unsigned long param)
3364 {
3365 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3366 struct sk_buff *skb;
3367 struct skb_data *entry;
3368
3369 while ((skb = skb_dequeue(&dev->done))) {
3370 entry = (struct skb_data *)(skb->cb);
3371 switch (entry->state) {
3372 case rx_done:
3373 entry->state = rx_cleanup;
3374 rx_process(dev, skb);
3375 continue;
3376 case tx_done:
3377 usb_free_urb(entry->urb);
3378 dev_kfree_skb(skb);
3379 continue;
3380 case rx_cleanup:
3381 usb_free_urb(entry->urb);
3382 dev_kfree_skb(skb);
3383 continue;
3384 default:
3385 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3386 return;
3387 }
3388 }
3389
3390 if (netif_device_present(dev->net) && netif_running(dev->net)) {
3391 /* reset update timer delta */
3392 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3393 dev->delta = 1;
3394 mod_timer(&dev->stat_monitor,
3395 jiffies + STAT_UPDATE_TIMER);
3396 }
3397
3398 if (!skb_queue_empty(&dev->txq_pend))
3399 lan78xx_tx_bh(dev);
3400
3401 if (!timer_pending(&dev->delay) &&
3402 !test_bit(EVENT_RX_HALT, &dev->flags))
3403 lan78xx_rx_bh(dev);
3404 }
3405 }
3406
lan78xx_delayedwork(struct work_struct * work)3407 static void lan78xx_delayedwork(struct work_struct *work)
3408 {
3409 int status;
3410 struct lan78xx_net *dev;
3411
3412 dev = container_of(work, struct lan78xx_net, wq.work);
3413
3414 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3415 unlink_urbs(dev, &dev->txq);
3416 status = usb_autopm_get_interface(dev->intf);
3417 if (status < 0)
3418 goto fail_pipe;
3419 status = usb_clear_halt(dev->udev, dev->pipe_out);
3420 usb_autopm_put_interface(dev->intf);
3421 if (status < 0 &&
3422 status != -EPIPE &&
3423 status != -ESHUTDOWN) {
3424 if (netif_msg_tx_err(dev))
3425 fail_pipe:
3426 netdev_err(dev->net,
3427 "can't clear tx halt, status %d\n",
3428 status);
3429 } else {
3430 clear_bit(EVENT_TX_HALT, &dev->flags);
3431 if (status != -ESHUTDOWN)
3432 netif_wake_queue(dev->net);
3433 }
3434 }
3435 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3436 unlink_urbs(dev, &dev->rxq);
3437 status = usb_autopm_get_interface(dev->intf);
3438 if (status < 0)
3439 goto fail_halt;
3440 status = usb_clear_halt(dev->udev, dev->pipe_in);
3441 usb_autopm_put_interface(dev->intf);
3442 if (status < 0 &&
3443 status != -EPIPE &&
3444 status != -ESHUTDOWN) {
3445 if (netif_msg_rx_err(dev))
3446 fail_halt:
3447 netdev_err(dev->net,
3448 "can't clear rx halt, status %d\n",
3449 status);
3450 } else {
3451 clear_bit(EVENT_RX_HALT, &dev->flags);
3452 tasklet_schedule(&dev->bh);
3453 }
3454 }
3455
3456 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3457 int ret = 0;
3458
3459 clear_bit(EVENT_LINK_RESET, &dev->flags);
3460 status = usb_autopm_get_interface(dev->intf);
3461 if (status < 0)
3462 goto skip_reset;
3463 if (lan78xx_link_reset(dev) < 0) {
3464 usb_autopm_put_interface(dev->intf);
3465 skip_reset:
3466 netdev_info(dev->net, "link reset failed (%d)\n",
3467 ret);
3468 } else {
3469 usb_autopm_put_interface(dev->intf);
3470 }
3471 }
3472
3473 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3474 lan78xx_update_stats(dev);
3475
3476 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3477
3478 mod_timer(&dev->stat_monitor,
3479 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3480
3481 dev->delta = min((dev->delta * 2), 50);
3482 }
3483 }
3484
intr_complete(struct urb * urb)3485 static void intr_complete(struct urb *urb)
3486 {
3487 struct lan78xx_net *dev = urb->context;
3488 int status = urb->status;
3489
3490 switch (status) {
3491 /* success */
3492 case 0:
3493 lan78xx_status(dev, urb);
3494 break;
3495
3496 /* software-driven interface shutdown */
3497 case -ENOENT: /* urb killed */
3498 case -ESHUTDOWN: /* hardware gone */
3499 netif_dbg(dev, ifdown, dev->net,
3500 "intr shutdown, code %d\n", status);
3501 return;
3502
3503 /* NOTE: not throttling like RX/TX, since this endpoint
3504 * already polls infrequently
3505 */
3506 default:
3507 netdev_dbg(dev->net, "intr status %d\n", status);
3508 break;
3509 }
3510
3511 if (!netif_running(dev->net))
3512 return;
3513
3514 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3515 status = usb_submit_urb(urb, GFP_ATOMIC);
3516 if (status != 0)
3517 netif_err(dev, timer, dev->net,
3518 "intr resubmit --> %d\n", status);
3519 }
3520
lan78xx_disconnect(struct usb_interface * intf)3521 static void lan78xx_disconnect(struct usb_interface *intf)
3522 {
3523 struct lan78xx_net *dev;
3524 struct usb_device *udev;
3525 struct net_device *net;
3526 struct phy_device *phydev;
3527
3528 dev = usb_get_intfdata(intf);
3529 usb_set_intfdata(intf, NULL);
3530 if (!dev)
3531 return;
3532
3533 udev = interface_to_usbdev(intf);
3534 net = dev->net;
3535 phydev = net->phydev;
3536
3537 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3538 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3539
3540 phy_disconnect(net->phydev);
3541
3542 if (phy_is_pseudo_fixed_link(phydev))
3543 fixed_phy_unregister(phydev);
3544
3545 unregister_netdev(net);
3546
3547 cancel_delayed_work_sync(&dev->wq);
3548
3549 usb_scuttle_anchored_urbs(&dev->deferred);
3550
3551 lan78xx_unbind(dev, intf);
3552
3553 usb_kill_urb(dev->urb_intr);
3554 usb_free_urb(dev->urb_intr);
3555
3556 free_netdev(net);
3557 usb_put_dev(udev);
3558 }
3559
lan78xx_tx_timeout(struct net_device * net,unsigned int txqueue)3560 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
3561 {
3562 struct lan78xx_net *dev = netdev_priv(net);
3563
3564 unlink_urbs(dev, &dev->txq);
3565 tasklet_schedule(&dev->bh);
3566 }
3567
lan78xx_features_check(struct sk_buff * skb,struct net_device * netdev,netdev_features_t features)3568 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
3569 struct net_device *netdev,
3570 netdev_features_t features)
3571 {
3572 if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
3573 features &= ~NETIF_F_GSO_MASK;
3574
3575 features = vlan_features_check(skb, features);
3576 features = vxlan_features_check(skb, features);
3577
3578 return features;
3579 }
3580
3581 static const struct net_device_ops lan78xx_netdev_ops = {
3582 .ndo_open = lan78xx_open,
3583 .ndo_stop = lan78xx_stop,
3584 .ndo_start_xmit = lan78xx_start_xmit,
3585 .ndo_tx_timeout = lan78xx_tx_timeout,
3586 .ndo_change_mtu = lan78xx_change_mtu,
3587 .ndo_set_mac_address = lan78xx_set_mac_addr,
3588 .ndo_validate_addr = eth_validate_addr,
3589 .ndo_do_ioctl = phy_do_ioctl_running,
3590 .ndo_set_rx_mode = lan78xx_set_multicast,
3591 .ndo_set_features = lan78xx_set_features,
3592 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3593 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3594 .ndo_features_check = lan78xx_features_check,
3595 };
3596
lan78xx_stat_monitor(struct timer_list * t)3597 static void lan78xx_stat_monitor(struct timer_list *t)
3598 {
3599 struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3600
3601 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3602 }
3603
lan78xx_probe(struct usb_interface * intf,const struct usb_device_id * id)3604 static int lan78xx_probe(struct usb_interface *intf,
3605 const struct usb_device_id *id)
3606 {
3607 struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
3608 struct lan78xx_net *dev;
3609 struct net_device *netdev;
3610 struct usb_device *udev;
3611 int ret;
3612 unsigned maxp;
3613 unsigned period;
3614 u8 *buf = NULL;
3615
3616 udev = interface_to_usbdev(intf);
3617 udev = usb_get_dev(udev);
3618
3619 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3620 if (!netdev) {
3621 dev_err(&intf->dev, "Error: OOM\n");
3622 ret = -ENOMEM;
3623 goto out1;
3624 }
3625
3626 /* netdev_printk() needs this */
3627 SET_NETDEV_DEV(netdev, &intf->dev);
3628
3629 dev = netdev_priv(netdev);
3630 dev->udev = udev;
3631 dev->intf = intf;
3632 dev->net = netdev;
3633 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3634 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3635
3636 skb_queue_head_init(&dev->rxq);
3637 skb_queue_head_init(&dev->txq);
3638 skb_queue_head_init(&dev->done);
3639 skb_queue_head_init(&dev->rxq_pause);
3640 skb_queue_head_init(&dev->txq_pend);
3641 mutex_init(&dev->phy_mutex);
3642
3643 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3644 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3645 init_usb_anchor(&dev->deferred);
3646
3647 netdev->netdev_ops = &lan78xx_netdev_ops;
3648 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3649 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3650
3651 dev->delta = 1;
3652 timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3653
3654 mutex_init(&dev->stats.access_lock);
3655
3656 if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
3657 ret = -ENODEV;
3658 goto out2;
3659 }
3660
3661 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3662 ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
3663 if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
3664 ret = -ENODEV;
3665 goto out2;
3666 }
3667
3668 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3669 ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
3670 if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
3671 ret = -ENODEV;
3672 goto out2;
3673 }
3674
3675 ep_intr = &intf->cur_altsetting->endpoint[2];
3676 if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
3677 ret = -ENODEV;
3678 goto out2;
3679 }
3680
3681 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3682 usb_endpoint_num(&ep_intr->desc));
3683
3684 ret = lan78xx_bind(dev, intf);
3685 if (ret < 0)
3686 goto out2;
3687
3688 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3689 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3690
3691 /* MTU range: 68 - 9000 */
3692 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3693 netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
3694
3695 period = ep_intr->desc.bInterval;
3696 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3697 buf = kmalloc(maxp, GFP_KERNEL);
3698 if (buf) {
3699 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3700 if (!dev->urb_intr) {
3701 ret = -ENOMEM;
3702 kfree(buf);
3703 goto out3;
3704 } else {
3705 usb_fill_int_urb(dev->urb_intr, dev->udev,
3706 dev->pipe_intr, buf, maxp,
3707 intr_complete, dev, period);
3708 dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
3709 }
3710 }
3711
3712 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3713
3714 /* Reject broken descriptors. */
3715 if (dev->maxpacket == 0) {
3716 ret = -ENODEV;
3717 goto out4;
3718 }
3719
3720 /* driver requires remote-wakeup capability during autosuspend. */
3721 intf->needs_remote_wakeup = 1;
3722
3723 ret = lan78xx_phy_init(dev);
3724 if (ret < 0)
3725 goto out4;
3726
3727 ret = register_netdev(netdev);
3728 if (ret != 0) {
3729 netif_err(dev, probe, netdev, "couldn't register the device\n");
3730 goto out5;
3731 }
3732
3733 usb_set_intfdata(intf, dev);
3734
3735 ret = device_set_wakeup_enable(&udev->dev, true);
3736
3737 /* Default delay of 2sec has more overhead than advantage.
3738 * Set to 10sec as default.
3739 */
3740 pm_runtime_set_autosuspend_delay(&udev->dev,
3741 DEFAULT_AUTOSUSPEND_DELAY);
3742
3743 return 0;
3744
3745 out5:
3746 phy_disconnect(netdev->phydev);
3747 out4:
3748 usb_free_urb(dev->urb_intr);
3749 out3:
3750 lan78xx_unbind(dev, intf);
3751 out2:
3752 free_netdev(netdev);
3753 out1:
3754 usb_put_dev(udev);
3755
3756 return ret;
3757 }
3758
lan78xx_wakeframe_crc16(const u8 * buf,int len)3759 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3760 {
3761 const u16 crc16poly = 0x8005;
3762 int i;
3763 u16 bit, crc, msb;
3764 u8 data;
3765
3766 crc = 0xFFFF;
3767 for (i = 0; i < len; i++) {
3768 data = *buf++;
3769 for (bit = 0; bit < 8; bit++) {
3770 msb = crc >> 15;
3771 crc <<= 1;
3772
3773 if (msb ^ (u16)(data & 1)) {
3774 crc ^= crc16poly;
3775 crc |= (u16)0x0001U;
3776 }
3777 data >>= 1;
3778 }
3779 }
3780
3781 return crc;
3782 }
3783
lan78xx_set_suspend(struct lan78xx_net * dev,u32 wol)3784 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3785 {
3786 u32 buf;
3787 int mask_index;
3788 u16 crc;
3789 u32 temp_wucsr;
3790 u32 temp_pmt_ctl;
3791 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3792 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3793 const u8 arp_type[2] = { 0x08, 0x06 };
3794
3795 lan78xx_read_reg(dev, MAC_TX, &buf);
3796 buf &= ~MAC_TX_TXEN_;
3797 lan78xx_write_reg(dev, MAC_TX, buf);
3798 lan78xx_read_reg(dev, MAC_RX, &buf);
3799 buf &= ~MAC_RX_RXEN_;
3800 lan78xx_write_reg(dev, MAC_RX, buf);
3801
3802 lan78xx_write_reg(dev, WUCSR, 0);
3803 lan78xx_write_reg(dev, WUCSR2, 0);
3804 lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3805
3806 temp_wucsr = 0;
3807
3808 temp_pmt_ctl = 0;
3809 lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3810 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3811 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3812
3813 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3814 lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3815
3816 mask_index = 0;
3817 if (wol & WAKE_PHY) {
3818 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3819
3820 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3821 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3822 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3823 }
3824 if (wol & WAKE_MAGIC) {
3825 temp_wucsr |= WUCSR_MPEN_;
3826
3827 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3828 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3829 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3830 }
3831 if (wol & WAKE_BCAST) {
3832 temp_wucsr |= WUCSR_BCST_EN_;
3833
3834 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3835 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3836 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3837 }
3838 if (wol & WAKE_MCAST) {
3839 temp_wucsr |= WUCSR_WAKE_EN_;
3840
3841 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3842 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3843 lan78xx_write_reg(dev, WUF_CFG(mask_index),
3844 WUF_CFGX_EN_ |
3845 WUF_CFGX_TYPE_MCAST_ |
3846 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3847 (crc & WUF_CFGX_CRC16_MASK_));
3848
3849 lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3850 lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3851 lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3852 lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3853 mask_index++;
3854
3855 /* for IPv6 Multicast */
3856 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3857 lan78xx_write_reg(dev, WUF_CFG(mask_index),
3858 WUF_CFGX_EN_ |
3859 WUF_CFGX_TYPE_MCAST_ |
3860 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3861 (crc & WUF_CFGX_CRC16_MASK_));
3862
3863 lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3864 lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3865 lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3866 lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3867 mask_index++;
3868
3869 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3870 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3871 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3872 }
3873 if (wol & WAKE_UCAST) {
3874 temp_wucsr |= WUCSR_PFDA_EN_;
3875
3876 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3877 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3878 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3879 }
3880 if (wol & WAKE_ARP) {
3881 temp_wucsr |= WUCSR_WAKE_EN_;
3882
3883 /* set WUF_CFG & WUF_MASK
3884 * for packettype (offset 12,13) = ARP (0x0806)
3885 */
3886 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3887 lan78xx_write_reg(dev, WUF_CFG(mask_index),
3888 WUF_CFGX_EN_ |
3889 WUF_CFGX_TYPE_ALL_ |
3890 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3891 (crc & WUF_CFGX_CRC16_MASK_));
3892
3893 lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3894 lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3895 lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3896 lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3897 mask_index++;
3898
3899 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3900 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3901 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3902 }
3903
3904 lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3905
3906 /* when multiple WOL bits are set */
3907 if (hweight_long((unsigned long)wol) > 1) {
3908 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3909 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3910 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3911 }
3912 lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3913
3914 /* clear WUPS */
3915 lan78xx_read_reg(dev, PMT_CTL, &buf);
3916 buf |= PMT_CTL_WUPS_MASK_;
3917 lan78xx_write_reg(dev, PMT_CTL, buf);
3918
3919 lan78xx_read_reg(dev, MAC_RX, &buf);
3920 buf |= MAC_RX_RXEN_;
3921 lan78xx_write_reg(dev, MAC_RX, buf);
3922
3923 return 0;
3924 }
3925
lan78xx_suspend(struct usb_interface * intf,pm_message_t message)3926 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3927 {
3928 struct lan78xx_net *dev = usb_get_intfdata(intf);
3929 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3930 u32 buf;
3931 int ret;
3932
3933 if (!dev->suspend_count++) {
3934 spin_lock_irq(&dev->txq.lock);
3935 /* don't autosuspend while transmitting */
3936 if ((skb_queue_len(&dev->txq) ||
3937 skb_queue_len(&dev->txq_pend)) &&
3938 PMSG_IS_AUTO(message)) {
3939 spin_unlock_irq(&dev->txq.lock);
3940 ret = -EBUSY;
3941 goto out;
3942 } else {
3943 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3944 spin_unlock_irq(&dev->txq.lock);
3945 }
3946
3947 /* stop TX & RX */
3948 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3949 buf &= ~MAC_TX_TXEN_;
3950 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3951 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3952 buf &= ~MAC_RX_RXEN_;
3953 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3954
3955 /* empty out the rx and queues */
3956 netif_device_detach(dev->net);
3957 lan78xx_terminate_urbs(dev);
3958 usb_kill_urb(dev->urb_intr);
3959
3960 /* reattach */
3961 netif_device_attach(dev->net);
3962 }
3963
3964 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3965 del_timer(&dev->stat_monitor);
3966
3967 if (PMSG_IS_AUTO(message)) {
3968 /* auto suspend (selective suspend) */
3969 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3970 buf &= ~MAC_TX_TXEN_;
3971 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3972 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3973 buf &= ~MAC_RX_RXEN_;
3974 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3975
3976 ret = lan78xx_write_reg(dev, WUCSR, 0);
3977 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3978 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3979
3980 /* set goodframe wakeup */
3981 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3982
3983 buf |= WUCSR_RFE_WAKE_EN_;
3984 buf |= WUCSR_STORE_WAKE_;
3985
3986 ret = lan78xx_write_reg(dev, WUCSR, buf);
3987
3988 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3989
3990 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3991 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3992
3993 buf |= PMT_CTL_PHY_WAKE_EN_;
3994 buf |= PMT_CTL_WOL_EN_;
3995 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3996 buf |= PMT_CTL_SUS_MODE_3_;
3997
3998 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3999
4000 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4001
4002 buf |= PMT_CTL_WUPS_MASK_;
4003
4004 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4005
4006 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4007 buf |= MAC_RX_RXEN_;
4008 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4009 } else {
4010 lan78xx_set_suspend(dev, pdata->wol);
4011 }
4012 }
4013
4014 ret = 0;
4015 out:
4016 return ret;
4017 }
4018
lan78xx_resume(struct usb_interface * intf)4019 static int lan78xx_resume(struct usb_interface *intf)
4020 {
4021 struct lan78xx_net *dev = usb_get_intfdata(intf);
4022 struct sk_buff *skb;
4023 struct urb *res;
4024 int ret;
4025 u32 buf;
4026
4027 if (!timer_pending(&dev->stat_monitor)) {
4028 dev->delta = 1;
4029 mod_timer(&dev->stat_monitor,
4030 jiffies + STAT_UPDATE_TIMER);
4031 }
4032
4033 if (!--dev->suspend_count) {
4034 /* resume interrupt URBs */
4035 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4036 usb_submit_urb(dev->urb_intr, GFP_NOIO);
4037
4038 spin_lock_irq(&dev->txq.lock);
4039 while ((res = usb_get_from_anchor(&dev->deferred))) {
4040 skb = (struct sk_buff *)res->context;
4041 ret = usb_submit_urb(res, GFP_ATOMIC);
4042 if (ret < 0) {
4043 dev_kfree_skb_any(skb);
4044 usb_free_urb(res);
4045 usb_autopm_put_interface_async(dev->intf);
4046 } else {
4047 netif_trans_update(dev->net);
4048 lan78xx_queue_skb(&dev->txq, skb, tx_start);
4049 }
4050 }
4051
4052 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4053 spin_unlock_irq(&dev->txq.lock);
4054
4055 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4056 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4057 netif_start_queue(dev->net);
4058 tasklet_schedule(&dev->bh);
4059 }
4060 }
4061
4062 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4063 ret = lan78xx_write_reg(dev, WUCSR, 0);
4064 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4065
4066 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4067 WUCSR2_ARP_RCD_ |
4068 WUCSR2_IPV6_TCPSYN_RCD_ |
4069 WUCSR2_IPV4_TCPSYN_RCD_);
4070
4071 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4072 WUCSR_EEE_RX_WAKE_ |
4073 WUCSR_PFDA_FR_ |
4074 WUCSR_RFE_WAKE_FR_ |
4075 WUCSR_WUFR_ |
4076 WUCSR_MPR_ |
4077 WUCSR_BCST_FR_);
4078
4079 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4080 buf |= MAC_TX_TXEN_;
4081 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4082
4083 return 0;
4084 }
4085
lan78xx_reset_resume(struct usb_interface * intf)4086 static int lan78xx_reset_resume(struct usb_interface *intf)
4087 {
4088 struct lan78xx_net *dev = usb_get_intfdata(intf);
4089
4090 lan78xx_reset(dev);
4091
4092 phy_start(dev->net->phydev);
4093
4094 return lan78xx_resume(intf);
4095 }
4096
4097 static const struct usb_device_id products[] = {
4098 {
4099 /* LAN7800 USB Gigabit Ethernet Device */
4100 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4101 },
4102 {
4103 /* LAN7850 USB Gigabit Ethernet Device */
4104 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4105 },
4106 {
4107 /* LAN7801 USB Gigabit Ethernet Device */
4108 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4109 },
4110 {
4111 /* ATM2-AF USB Gigabit Ethernet Device */
4112 USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
4113 },
4114 {},
4115 };
4116 MODULE_DEVICE_TABLE(usb, products);
4117
4118 static struct usb_driver lan78xx_driver = {
4119 .name = DRIVER_NAME,
4120 .id_table = products,
4121 .probe = lan78xx_probe,
4122 .disconnect = lan78xx_disconnect,
4123 .suspend = lan78xx_suspend,
4124 .resume = lan78xx_resume,
4125 .reset_resume = lan78xx_reset_resume,
4126 .supports_autosuspend = 1,
4127 .disable_hub_initiated_lpm = 1,
4128 };
4129
4130 module_usb_driver(lan78xx_driver);
4131
4132 MODULE_AUTHOR(DRIVER_AUTHOR);
4133 MODULE_DESCRIPTION(DRIVER_DESC);
4134 MODULE_LICENSE("GPL");
4135