1 /*
2 * Copyright (c) 2001 Vojtech Pavlik
3 *
4 * CATC EL1210A NetMate USB Ethernet driver
5 *
6 * Sponsored by SuSE
7 *
8 * Based on the work of
9 * Donald Becker
10 *
11 * Old chipset support added by Simon Evans <spse@secret.org.uk> 2002
12 * - adds support for Belkin F5U011
13 */
14
15 /*
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, see <http://www.gnu.org/licenses/>.
28 *
29 * Should you need to contact me, the author, you can do so either by
30 * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
31 * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
32 */
33
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/string.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/skbuff.h>
40 #include <linux/spinlock.h>
41 #include <linux/ethtool.h>
42 #include <linux/crc32.h>
43 #include <linux/bitops.h>
44 #include <linux/gfp.h>
45 #include <asm/uaccess.h>
46
47 #undef DEBUG
48
49 #include <linux/usb.h>
50
51 /*
52 * Version information.
53 */
54
55 #define DRIVER_VERSION "v2.8"
56 #define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@suse.cz>"
57 #define DRIVER_DESC "CATC EL1210A NetMate USB Ethernet driver"
58 #define SHORT_DRIVER_DESC "EL1210A NetMate USB Ethernet"
59
60 MODULE_AUTHOR(DRIVER_AUTHOR);
61 MODULE_DESCRIPTION(DRIVER_DESC);
62 MODULE_LICENSE("GPL");
63
64 static const char driver_name[] = "catc";
65
66 /*
67 * Some defines.
68 */
69
70 #define STATS_UPDATE (HZ) /* Time between stats updates */
71 #define TX_TIMEOUT (5*HZ) /* Max time the queue can be stopped */
72 #define PKT_SZ 1536 /* Max Ethernet packet size */
73 #define RX_MAX_BURST 15 /* Max packets per rx buffer (> 0, < 16) */
74 #define TX_MAX_BURST 15 /* Max full sized packets per tx buffer (> 0) */
75 #define CTRL_QUEUE 16 /* Max control requests in flight (power of two) */
76 #define RX_PKT_SZ 1600 /* Max size of receive packet for F5U011 */
77
78 /*
79 * Control requests.
80 */
81
82 enum control_requests {
83 ReadMem = 0xf1,
84 GetMac = 0xf2,
85 Reset = 0xf4,
86 SetMac = 0xf5,
87 SetRxMode = 0xf5, /* F5U011 only */
88 WriteROM = 0xf8,
89 SetReg = 0xfa,
90 GetReg = 0xfb,
91 WriteMem = 0xfc,
92 ReadROM = 0xfd,
93 };
94
95 /*
96 * Registers.
97 */
98
99 enum register_offsets {
100 TxBufCount = 0x20,
101 RxBufCount = 0x21,
102 OpModes = 0x22,
103 TxQed = 0x23,
104 RxQed = 0x24,
105 MaxBurst = 0x25,
106 RxUnit = 0x60,
107 EthStatus = 0x61,
108 StationAddr0 = 0x67,
109 EthStats = 0x69,
110 LEDCtrl = 0x81,
111 };
112
113 enum eth_stats {
114 TxSingleColl = 0x00,
115 TxMultiColl = 0x02,
116 TxExcessColl = 0x04,
117 RxFramErr = 0x06,
118 };
119
120 enum op_mode_bits {
121 Op3MemWaits = 0x03,
122 OpLenInclude = 0x08,
123 OpRxMerge = 0x10,
124 OpTxMerge = 0x20,
125 OpWin95bugfix = 0x40,
126 OpLoopback = 0x80,
127 };
128
129 enum rx_filter_bits {
130 RxEnable = 0x01,
131 RxPolarity = 0x02,
132 RxForceOK = 0x04,
133 RxMultiCast = 0x08,
134 RxPromisc = 0x10,
135 AltRxPromisc = 0x20, /* F5U011 uses different bit */
136 };
137
138 enum led_values {
139 LEDFast = 0x01,
140 LEDSlow = 0x02,
141 LEDFlash = 0x03,
142 LEDPulse = 0x04,
143 LEDLink = 0x08,
144 };
145
146 enum link_status {
147 LinkNoChange = 0,
148 LinkGood = 1,
149 LinkBad = 2
150 };
151
152 /*
153 * The catc struct.
154 */
155
156 #define CTRL_RUNNING 0
157 #define RX_RUNNING 1
158 #define TX_RUNNING 2
159
160 struct catc {
161 struct net_device *netdev;
162 struct usb_device *usbdev;
163
164 unsigned long flags;
165
166 unsigned int tx_ptr, tx_idx;
167 unsigned int ctrl_head, ctrl_tail;
168 spinlock_t tx_lock, ctrl_lock;
169
170 u8 tx_buf[2][TX_MAX_BURST * (PKT_SZ + 2)];
171 u8 rx_buf[RX_MAX_BURST * (PKT_SZ + 2)];
172 u8 irq_buf[2];
173 u8 ctrl_buf[64];
174 struct usb_ctrlrequest ctrl_dr;
175
176 struct timer_list timer;
177 u8 stats_buf[8];
178 u16 stats_vals[4];
179 unsigned long last_stats;
180
181 u8 multicast[64];
182
183 struct ctrl_queue {
184 u8 dir;
185 u8 request;
186 u16 value;
187 u16 index;
188 void *buf;
189 int len;
190 void (*callback)(struct catc *catc, struct ctrl_queue *q);
191 } ctrl_queue[CTRL_QUEUE];
192
193 struct urb *tx_urb, *rx_urb, *irq_urb, *ctrl_urb;
194
195 u8 is_f5u011; /* Set if device is an F5U011 */
196 u8 rxmode[2]; /* Used for F5U011 */
197 atomic_t recq_sz; /* Used for F5U011 - counter of waiting rx packets */
198 };
199
200 /*
201 * Useful macros.
202 */
203
204 #define catc_get_mac(catc, mac) catc_ctrl_msg(catc, USB_DIR_IN, GetMac, 0, 0, mac, 6)
205 #define catc_reset(catc) catc_ctrl_msg(catc, USB_DIR_OUT, Reset, 0, 0, NULL, 0)
206 #define catc_set_reg(catc, reg, val) catc_ctrl_msg(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0)
207 #define catc_get_reg(catc, reg, buf) catc_ctrl_msg(catc, USB_DIR_IN, GetReg, 0, reg, buf, 1)
208 #define catc_write_mem(catc, addr, buf, size) catc_ctrl_msg(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size)
209 #define catc_read_mem(catc, addr, buf, size) catc_ctrl_msg(catc, USB_DIR_IN, ReadMem, 0, addr, buf, size)
210
211 #define f5u011_rxmode(catc, rxmode) catc_ctrl_msg(catc, USB_DIR_OUT, SetRxMode, 0, 1, rxmode, 2)
212 #define f5u011_rxmode_async(catc, rxmode) catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 1, &rxmode, 2, NULL)
213 #define f5u011_mchash_async(catc, hash) catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 2, &hash, 8, NULL)
214
215 #define catc_set_reg_async(catc, reg, val) catc_ctrl_async(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0, NULL)
216 #define catc_get_reg_async(catc, reg, cb) catc_ctrl_async(catc, USB_DIR_IN, GetReg, 0, reg, NULL, 1, cb)
217 #define catc_write_mem_async(catc, addr, buf, size) catc_ctrl_async(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size, NULL)
218
219 /*
220 * Receive routines.
221 */
222
catc_rx_done(struct urb * urb)223 static void catc_rx_done(struct urb *urb)
224 {
225 struct catc *catc = urb->context;
226 u8 *pkt_start = urb->transfer_buffer;
227 struct sk_buff *skb;
228 int pkt_len, pkt_offset = 0;
229 int status = urb->status;
230
231 if (!catc->is_f5u011) {
232 clear_bit(RX_RUNNING, &catc->flags);
233 pkt_offset = 2;
234 }
235
236 if (status) {
237 dev_dbg(&urb->dev->dev, "rx_done, status %d, length %d\n",
238 status, urb->actual_length);
239 return;
240 }
241
242 do {
243 if(!catc->is_f5u011) {
244 pkt_len = le16_to_cpup((__le16*)pkt_start);
245 if (pkt_len > urb->actual_length) {
246 catc->netdev->stats.rx_length_errors++;
247 catc->netdev->stats.rx_errors++;
248 break;
249 }
250 } else {
251 pkt_len = urb->actual_length;
252 }
253
254 if (!(skb = dev_alloc_skb(pkt_len)))
255 return;
256
257 skb_copy_to_linear_data(skb, pkt_start + pkt_offset, pkt_len);
258 skb_put(skb, pkt_len);
259
260 skb->protocol = eth_type_trans(skb, catc->netdev);
261 netif_rx(skb);
262
263 catc->netdev->stats.rx_packets++;
264 catc->netdev->stats.rx_bytes += pkt_len;
265
266 /* F5U011 only does one packet per RX */
267 if (catc->is_f5u011)
268 break;
269 pkt_start += (((pkt_len + 1) >> 6) + 1) << 6;
270
271 } while (pkt_start - (u8 *) urb->transfer_buffer < urb->actual_length);
272
273 if (catc->is_f5u011) {
274 if (atomic_read(&catc->recq_sz)) {
275 int state;
276 atomic_dec(&catc->recq_sz);
277 netdev_dbg(catc->netdev, "getting extra packet\n");
278 urb->dev = catc->usbdev;
279 if ((state = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
280 netdev_dbg(catc->netdev,
281 "submit(rx_urb) status %d\n", state);
282 }
283 } else {
284 clear_bit(RX_RUNNING, &catc->flags);
285 }
286 }
287 }
288
catc_irq_done(struct urb * urb)289 static void catc_irq_done(struct urb *urb)
290 {
291 struct catc *catc = urb->context;
292 u8 *data = urb->transfer_buffer;
293 int status = urb->status;
294 unsigned int hasdata = 0, linksts = LinkNoChange;
295 int res;
296
297 if (!catc->is_f5u011) {
298 hasdata = data[1] & 0x80;
299 if (data[1] & 0x40)
300 linksts = LinkGood;
301 else if (data[1] & 0x20)
302 linksts = LinkBad;
303 } else {
304 hasdata = (unsigned int)(be16_to_cpup((__be16*)data) & 0x0fff);
305 if (data[0] == 0x90)
306 linksts = LinkGood;
307 else if (data[0] == 0xA0)
308 linksts = LinkBad;
309 }
310
311 switch (status) {
312 case 0: /* success */
313 break;
314 case -ECONNRESET: /* unlink */
315 case -ENOENT:
316 case -ESHUTDOWN:
317 return;
318 /* -EPIPE: should clear the halt */
319 default: /* error */
320 dev_dbg(&urb->dev->dev,
321 "irq_done, status %d, data %02x %02x.\n",
322 status, data[0], data[1]);
323 goto resubmit;
324 }
325
326 if (linksts == LinkGood) {
327 netif_carrier_on(catc->netdev);
328 netdev_dbg(catc->netdev, "link ok\n");
329 }
330
331 if (linksts == LinkBad) {
332 netif_carrier_off(catc->netdev);
333 netdev_dbg(catc->netdev, "link bad\n");
334 }
335
336 if (hasdata) {
337 if (test_and_set_bit(RX_RUNNING, &catc->flags)) {
338 if (catc->is_f5u011)
339 atomic_inc(&catc->recq_sz);
340 } else {
341 catc->rx_urb->dev = catc->usbdev;
342 if ((res = usb_submit_urb(catc->rx_urb, GFP_ATOMIC)) < 0) {
343 dev_err(&catc->usbdev->dev,
344 "submit(rx_urb) status %d\n", res);
345 }
346 }
347 }
348 resubmit:
349 res = usb_submit_urb (urb, GFP_ATOMIC);
350 if (res)
351 dev_err(&catc->usbdev->dev,
352 "can't resubmit intr, %s-%s, status %d\n",
353 catc->usbdev->bus->bus_name,
354 catc->usbdev->devpath, res);
355 }
356
357 /*
358 * Transmit routines.
359 */
360
catc_tx_run(struct catc * catc)361 static int catc_tx_run(struct catc *catc)
362 {
363 int status;
364
365 if (catc->is_f5u011)
366 catc->tx_ptr = (catc->tx_ptr + 63) & ~63;
367
368 catc->tx_urb->transfer_buffer_length = catc->tx_ptr;
369 catc->tx_urb->transfer_buffer = catc->tx_buf[catc->tx_idx];
370 catc->tx_urb->dev = catc->usbdev;
371
372 if ((status = usb_submit_urb(catc->tx_urb, GFP_ATOMIC)) < 0)
373 dev_err(&catc->usbdev->dev, "submit(tx_urb), status %d\n",
374 status);
375
376 catc->tx_idx = !catc->tx_idx;
377 catc->tx_ptr = 0;
378
379 catc->netdev->trans_start = jiffies;
380 return status;
381 }
382
catc_tx_done(struct urb * urb)383 static void catc_tx_done(struct urb *urb)
384 {
385 struct catc *catc = urb->context;
386 unsigned long flags;
387 int r, status = urb->status;
388
389 if (status == -ECONNRESET) {
390 dev_dbg(&urb->dev->dev, "Tx Reset.\n");
391 urb->status = 0;
392 catc->netdev->trans_start = jiffies;
393 catc->netdev->stats.tx_errors++;
394 clear_bit(TX_RUNNING, &catc->flags);
395 netif_wake_queue(catc->netdev);
396 return;
397 }
398
399 if (status) {
400 dev_dbg(&urb->dev->dev, "tx_done, status %d, length %d\n",
401 status, urb->actual_length);
402 return;
403 }
404
405 spin_lock_irqsave(&catc->tx_lock, flags);
406
407 if (catc->tx_ptr) {
408 r = catc_tx_run(catc);
409 if (unlikely(r < 0))
410 clear_bit(TX_RUNNING, &catc->flags);
411 } else {
412 clear_bit(TX_RUNNING, &catc->flags);
413 }
414
415 netif_wake_queue(catc->netdev);
416
417 spin_unlock_irqrestore(&catc->tx_lock, flags);
418 }
419
catc_start_xmit(struct sk_buff * skb,struct net_device * netdev)420 static netdev_tx_t catc_start_xmit(struct sk_buff *skb,
421 struct net_device *netdev)
422 {
423 struct catc *catc = netdev_priv(netdev);
424 unsigned long flags;
425 int r = 0;
426 char *tx_buf;
427
428 spin_lock_irqsave(&catc->tx_lock, flags);
429
430 catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6;
431 tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr;
432 if (catc->is_f5u011)
433 *(__be16 *)tx_buf = cpu_to_be16(skb->len);
434 else
435 *(__le16 *)tx_buf = cpu_to_le16(skb->len);
436 skb_copy_from_linear_data(skb, tx_buf + 2, skb->len);
437 catc->tx_ptr += skb->len + 2;
438
439 if (!test_and_set_bit(TX_RUNNING, &catc->flags)) {
440 r = catc_tx_run(catc);
441 if (r < 0)
442 clear_bit(TX_RUNNING, &catc->flags);
443 }
444
445 if ((catc->is_f5u011 && catc->tx_ptr) ||
446 (catc->tx_ptr >= ((TX_MAX_BURST - 1) * (PKT_SZ + 2))))
447 netif_stop_queue(netdev);
448
449 spin_unlock_irqrestore(&catc->tx_lock, flags);
450
451 if (r >= 0) {
452 catc->netdev->stats.tx_bytes += skb->len;
453 catc->netdev->stats.tx_packets++;
454 }
455
456 dev_kfree_skb(skb);
457
458 return NETDEV_TX_OK;
459 }
460
catc_tx_timeout(struct net_device * netdev)461 static void catc_tx_timeout(struct net_device *netdev)
462 {
463 struct catc *catc = netdev_priv(netdev);
464
465 dev_warn(&netdev->dev, "Transmit timed out.\n");
466 usb_unlink_urb(catc->tx_urb);
467 }
468
469 /*
470 * Control messages.
471 */
472
catc_ctrl_msg(struct catc * catc,u8 dir,u8 request,u16 value,u16 index,void * buf,int len)473 static int catc_ctrl_msg(struct catc *catc, u8 dir, u8 request, u16 value, u16 index, void *buf, int len)
474 {
475 int retval = usb_control_msg(catc->usbdev,
476 dir ? usb_rcvctrlpipe(catc->usbdev, 0) : usb_sndctrlpipe(catc->usbdev, 0),
477 request, 0x40 | dir, value, index, buf, len, 1000);
478 return retval < 0 ? retval : 0;
479 }
480
catc_ctrl_run(struct catc * catc)481 static void catc_ctrl_run(struct catc *catc)
482 {
483 struct ctrl_queue *q = catc->ctrl_queue + catc->ctrl_tail;
484 struct usb_device *usbdev = catc->usbdev;
485 struct urb *urb = catc->ctrl_urb;
486 struct usb_ctrlrequest *dr = &catc->ctrl_dr;
487 int status;
488
489 dr->bRequest = q->request;
490 dr->bRequestType = 0x40 | q->dir;
491 dr->wValue = cpu_to_le16(q->value);
492 dr->wIndex = cpu_to_le16(q->index);
493 dr->wLength = cpu_to_le16(q->len);
494
495 urb->pipe = q->dir ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0);
496 urb->transfer_buffer_length = q->len;
497 urb->transfer_buffer = catc->ctrl_buf;
498 urb->setup_packet = (void *) dr;
499 urb->dev = usbdev;
500
501 if (!q->dir && q->buf && q->len)
502 memcpy(catc->ctrl_buf, q->buf, q->len);
503
504 if ((status = usb_submit_urb(catc->ctrl_urb, GFP_ATOMIC)))
505 dev_err(&catc->usbdev->dev, "submit(ctrl_urb) status %d\n",
506 status);
507 }
508
catc_ctrl_done(struct urb * urb)509 static void catc_ctrl_done(struct urb *urb)
510 {
511 struct catc *catc = urb->context;
512 struct ctrl_queue *q;
513 unsigned long flags;
514 int status = urb->status;
515
516 if (status)
517 dev_dbg(&urb->dev->dev, "ctrl_done, status %d, len %d.\n",
518 status, urb->actual_length);
519
520 spin_lock_irqsave(&catc->ctrl_lock, flags);
521
522 q = catc->ctrl_queue + catc->ctrl_tail;
523
524 if (q->dir) {
525 if (q->buf && q->len)
526 memcpy(q->buf, catc->ctrl_buf, q->len);
527 else
528 q->buf = catc->ctrl_buf;
529 }
530
531 if (q->callback)
532 q->callback(catc, q);
533
534 catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1);
535
536 if (catc->ctrl_head != catc->ctrl_tail)
537 catc_ctrl_run(catc);
538 else
539 clear_bit(CTRL_RUNNING, &catc->flags);
540
541 spin_unlock_irqrestore(&catc->ctrl_lock, flags);
542 }
543
catc_ctrl_async(struct catc * catc,u8 dir,u8 request,u16 value,u16 index,void * buf,int len,void (* callback)(struct catc * catc,struct ctrl_queue * q))544 static int catc_ctrl_async(struct catc *catc, u8 dir, u8 request, u16 value,
545 u16 index, void *buf, int len, void (*callback)(struct catc *catc, struct ctrl_queue *q))
546 {
547 struct ctrl_queue *q;
548 int retval = 0;
549 unsigned long flags;
550
551 spin_lock_irqsave(&catc->ctrl_lock, flags);
552
553 q = catc->ctrl_queue + catc->ctrl_head;
554
555 q->dir = dir;
556 q->request = request;
557 q->value = value;
558 q->index = index;
559 q->buf = buf;
560 q->len = len;
561 q->callback = callback;
562
563 catc->ctrl_head = (catc->ctrl_head + 1) & (CTRL_QUEUE - 1);
564
565 if (catc->ctrl_head == catc->ctrl_tail) {
566 dev_err(&catc->usbdev->dev, "ctrl queue full\n");
567 catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1);
568 retval = -1;
569 }
570
571 if (!test_and_set_bit(CTRL_RUNNING, &catc->flags))
572 catc_ctrl_run(catc);
573
574 spin_unlock_irqrestore(&catc->ctrl_lock, flags);
575
576 return retval;
577 }
578
579 /*
580 * Statistics.
581 */
582
catc_stats_done(struct catc * catc,struct ctrl_queue * q)583 static void catc_stats_done(struct catc *catc, struct ctrl_queue *q)
584 {
585 int index = q->index - EthStats;
586 u16 data, last;
587
588 catc->stats_buf[index] = *((char *)q->buf);
589
590 if (index & 1)
591 return;
592
593 data = ((u16)catc->stats_buf[index] << 8) | catc->stats_buf[index + 1];
594 last = catc->stats_vals[index >> 1];
595
596 switch (index) {
597 case TxSingleColl:
598 case TxMultiColl:
599 catc->netdev->stats.collisions += data - last;
600 break;
601 case TxExcessColl:
602 catc->netdev->stats.tx_aborted_errors += data - last;
603 catc->netdev->stats.tx_errors += data - last;
604 break;
605 case RxFramErr:
606 catc->netdev->stats.rx_frame_errors += data - last;
607 catc->netdev->stats.rx_errors += data - last;
608 break;
609 }
610
611 catc->stats_vals[index >> 1] = data;
612 }
613
catc_stats_timer(unsigned long data)614 static void catc_stats_timer(unsigned long data)
615 {
616 struct catc *catc = (void *) data;
617 int i;
618
619 for (i = 0; i < 8; i++)
620 catc_get_reg_async(catc, EthStats + 7 - i, catc_stats_done);
621
622 mod_timer(&catc->timer, jiffies + STATS_UPDATE);
623 }
624
625 /*
626 * Receive modes. Broadcast, Multicast, Promisc.
627 */
628
catc_multicast(unsigned char * addr,u8 * multicast)629 static void catc_multicast(unsigned char *addr, u8 *multicast)
630 {
631 u32 crc;
632
633 crc = ether_crc_le(6, addr);
634 multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7);
635 }
636
catc_set_multicast_list(struct net_device * netdev)637 static void catc_set_multicast_list(struct net_device *netdev)
638 {
639 struct catc *catc = netdev_priv(netdev);
640 struct netdev_hw_addr *ha;
641 u8 broadcast[ETH_ALEN];
642 u8 rx = RxEnable | RxPolarity | RxMultiCast;
643
644 memset(broadcast, 0xff, ETH_ALEN);
645 memset(catc->multicast, 0, 64);
646
647 catc_multicast(broadcast, catc->multicast);
648 catc_multicast(netdev->dev_addr, catc->multicast);
649
650 if (netdev->flags & IFF_PROMISC) {
651 memset(catc->multicast, 0xff, 64);
652 rx |= (!catc->is_f5u011) ? RxPromisc : AltRxPromisc;
653 }
654
655 if (netdev->flags & IFF_ALLMULTI) {
656 memset(catc->multicast, 0xff, 64);
657 } else {
658 netdev_for_each_mc_addr(ha, netdev) {
659 u32 crc = ether_crc_le(6, ha->addr);
660 if (!catc->is_f5u011) {
661 catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7);
662 } else {
663 catc->multicast[7-(crc >> 29)] |= 1 << ((crc >> 26) & 7);
664 }
665 }
666 }
667 if (!catc->is_f5u011) {
668 catc_set_reg_async(catc, RxUnit, rx);
669 catc_write_mem_async(catc, 0xfa80, catc->multicast, 64);
670 } else {
671 f5u011_mchash_async(catc, catc->multicast);
672 if (catc->rxmode[0] != rx) {
673 catc->rxmode[0] = rx;
674 netdev_dbg(catc->netdev,
675 "Setting RX mode to %2.2X %2.2X\n",
676 catc->rxmode[0], catc->rxmode[1]);
677 f5u011_rxmode_async(catc, catc->rxmode);
678 }
679 }
680 }
681
catc_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)682 static void catc_get_drvinfo(struct net_device *dev,
683 struct ethtool_drvinfo *info)
684 {
685 struct catc *catc = netdev_priv(dev);
686 strlcpy(info->driver, driver_name, sizeof(info->driver));
687 strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
688 usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info));
689 }
690
catc_get_settings(struct net_device * dev,struct ethtool_cmd * cmd)691 static int catc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
692 {
693 struct catc *catc = netdev_priv(dev);
694 if (!catc->is_f5u011)
695 return -EOPNOTSUPP;
696
697 cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_TP;
698 cmd->advertising = ADVERTISED_10baseT_Half | ADVERTISED_TP;
699 ethtool_cmd_speed_set(cmd, SPEED_10);
700 cmd->duplex = DUPLEX_HALF;
701 cmd->port = PORT_TP;
702 cmd->phy_address = 0;
703 cmd->transceiver = XCVR_INTERNAL;
704 cmd->autoneg = AUTONEG_DISABLE;
705 cmd->maxtxpkt = 1;
706 cmd->maxrxpkt = 1;
707 return 0;
708 }
709
710 static const struct ethtool_ops ops = {
711 .get_drvinfo = catc_get_drvinfo,
712 .get_settings = catc_get_settings,
713 .get_link = ethtool_op_get_link
714 };
715
716 /*
717 * Open, close.
718 */
719
catc_open(struct net_device * netdev)720 static int catc_open(struct net_device *netdev)
721 {
722 struct catc *catc = netdev_priv(netdev);
723 int status;
724
725 catc->irq_urb->dev = catc->usbdev;
726 if ((status = usb_submit_urb(catc->irq_urb, GFP_KERNEL)) < 0) {
727 dev_err(&catc->usbdev->dev, "submit(irq_urb) status %d\n",
728 status);
729 return -1;
730 }
731
732 netif_start_queue(netdev);
733
734 if (!catc->is_f5u011)
735 mod_timer(&catc->timer, jiffies + STATS_UPDATE);
736
737 return 0;
738 }
739
catc_stop(struct net_device * netdev)740 static int catc_stop(struct net_device *netdev)
741 {
742 struct catc *catc = netdev_priv(netdev);
743
744 netif_stop_queue(netdev);
745
746 if (!catc->is_f5u011)
747 del_timer_sync(&catc->timer);
748
749 usb_kill_urb(catc->rx_urb);
750 usb_kill_urb(catc->tx_urb);
751 usb_kill_urb(catc->irq_urb);
752 usb_kill_urb(catc->ctrl_urb);
753
754 return 0;
755 }
756
757 static const struct net_device_ops catc_netdev_ops = {
758 .ndo_open = catc_open,
759 .ndo_stop = catc_stop,
760 .ndo_start_xmit = catc_start_xmit,
761
762 .ndo_tx_timeout = catc_tx_timeout,
763 .ndo_set_rx_mode = catc_set_multicast_list,
764 .ndo_change_mtu = eth_change_mtu,
765 .ndo_set_mac_address = eth_mac_addr,
766 .ndo_validate_addr = eth_validate_addr,
767 };
768
769 /*
770 * USB probe, disconnect.
771 */
772
catc_probe(struct usb_interface * intf,const struct usb_device_id * id)773 static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id)
774 {
775 struct device *dev = &intf->dev;
776 struct usb_device *usbdev = interface_to_usbdev(intf);
777 struct net_device *netdev;
778 struct catc *catc;
779 u8 broadcast[ETH_ALEN];
780 int pktsz, ret;
781
782 if (usb_set_interface(usbdev,
783 intf->altsetting->desc.bInterfaceNumber, 1)) {
784 dev_err(dev, "Can't set altsetting 1.\n");
785 return -EIO;
786 }
787
788 netdev = alloc_etherdev(sizeof(struct catc));
789 if (!netdev)
790 return -ENOMEM;
791
792 catc = netdev_priv(netdev);
793
794 netdev->netdev_ops = &catc_netdev_ops;
795 netdev->watchdog_timeo = TX_TIMEOUT;
796 netdev->ethtool_ops = &ops;
797
798 catc->usbdev = usbdev;
799 catc->netdev = netdev;
800
801 spin_lock_init(&catc->tx_lock);
802 spin_lock_init(&catc->ctrl_lock);
803
804 init_timer(&catc->timer);
805 catc->timer.data = (long) catc;
806 catc->timer.function = catc_stats_timer;
807
808 catc->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
809 catc->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
810 catc->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
811 catc->irq_urb = usb_alloc_urb(0, GFP_KERNEL);
812 if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
813 (!catc->rx_urb) || (!catc->irq_urb)) {
814 dev_err(&intf->dev, "No free urbs available.\n");
815 ret = -ENOMEM;
816 goto fail_free;
817 }
818
819 /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
820 if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 &&
821 le16_to_cpu(usbdev->descriptor.idProduct) == 0xa &&
822 le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) {
823 dev_dbg(dev, "Testing for f5u011\n");
824 catc->is_f5u011 = 1;
825 atomic_set(&catc->recq_sz, 0);
826 pktsz = RX_PKT_SZ;
827 } else {
828 pktsz = RX_MAX_BURST * (PKT_SZ + 2);
829 }
830
831 usb_fill_control_urb(catc->ctrl_urb, usbdev, usb_sndctrlpipe(usbdev, 0),
832 NULL, NULL, 0, catc_ctrl_done, catc);
833
834 usb_fill_bulk_urb(catc->tx_urb, usbdev, usb_sndbulkpipe(usbdev, 1),
835 NULL, 0, catc_tx_done, catc);
836
837 usb_fill_bulk_urb(catc->rx_urb, usbdev, usb_rcvbulkpipe(usbdev, 1),
838 catc->rx_buf, pktsz, catc_rx_done, catc);
839
840 usb_fill_int_urb(catc->irq_urb, usbdev, usb_rcvintpipe(usbdev, 2),
841 catc->irq_buf, 2, catc_irq_done, catc, 1);
842
843 if (!catc->is_f5u011) {
844 u32 *buf;
845 int i;
846
847 dev_dbg(dev, "Checking memory size\n");
848
849 buf = kmalloc(4, GFP_KERNEL);
850 if (!buf) {
851 ret = -ENOMEM;
852 goto fail_free;
853 }
854
855 *buf = 0x12345678;
856 catc_write_mem(catc, 0x7a80, buf, 4);
857 *buf = 0x87654321;
858 catc_write_mem(catc, 0xfa80, buf, 4);
859 catc_read_mem(catc, 0x7a80, buf, 4);
860
861 switch (*buf) {
862 case 0x12345678:
863 catc_set_reg(catc, TxBufCount, 8);
864 catc_set_reg(catc, RxBufCount, 32);
865 dev_dbg(dev, "64k Memory\n");
866 break;
867 default:
868 dev_warn(&intf->dev,
869 "Couldn't detect memory size, assuming 32k\n");
870 case 0x87654321:
871 catc_set_reg(catc, TxBufCount, 4);
872 catc_set_reg(catc, RxBufCount, 16);
873 dev_dbg(dev, "32k Memory\n");
874 break;
875 }
876
877 kfree(buf);
878
879 dev_dbg(dev, "Getting MAC from SEEROM.\n");
880
881 catc_get_mac(catc, netdev->dev_addr);
882
883 dev_dbg(dev, "Setting MAC into registers.\n");
884
885 for (i = 0; i < 6; i++)
886 catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]);
887
888 dev_dbg(dev, "Filling the multicast list.\n");
889
890 memset(broadcast, 0xff, ETH_ALEN);
891 catc_multicast(broadcast, catc->multicast);
892 catc_multicast(netdev->dev_addr, catc->multicast);
893 catc_write_mem(catc, 0xfa80, catc->multicast, 64);
894
895 dev_dbg(dev, "Clearing error counters.\n");
896
897 for (i = 0; i < 8; i++)
898 catc_set_reg(catc, EthStats + i, 0);
899 catc->last_stats = jiffies;
900
901 dev_dbg(dev, "Enabling.\n");
902
903 catc_set_reg(catc, MaxBurst, RX_MAX_BURST);
904 catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits);
905 catc_set_reg(catc, LEDCtrl, LEDLink);
906 catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast);
907 } else {
908 dev_dbg(dev, "Performing reset\n");
909 catc_reset(catc);
910 catc_get_mac(catc, netdev->dev_addr);
911
912 dev_dbg(dev, "Setting RX Mode\n");
913 catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast;
914 catc->rxmode[1] = 0;
915 f5u011_rxmode(catc, catc->rxmode);
916 }
917 dev_dbg(dev, "Init done.\n");
918 printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n",
919 netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate",
920 usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr);
921 usb_set_intfdata(intf, catc);
922
923 SET_NETDEV_DEV(netdev, &intf->dev);
924 ret = register_netdev(netdev);
925 if (ret)
926 goto fail_clear_intfdata;
927
928 return 0;
929
930 fail_clear_intfdata:
931 usb_set_intfdata(intf, NULL);
932 fail_free:
933 usb_free_urb(catc->ctrl_urb);
934 usb_free_urb(catc->tx_urb);
935 usb_free_urb(catc->rx_urb);
936 usb_free_urb(catc->irq_urb);
937 free_netdev(netdev);
938 return ret;
939 }
940
catc_disconnect(struct usb_interface * intf)941 static void catc_disconnect(struct usb_interface *intf)
942 {
943 struct catc *catc = usb_get_intfdata(intf);
944
945 usb_set_intfdata(intf, NULL);
946 if (catc) {
947 unregister_netdev(catc->netdev);
948 usb_free_urb(catc->ctrl_urb);
949 usb_free_urb(catc->tx_urb);
950 usb_free_urb(catc->rx_urb);
951 usb_free_urb(catc->irq_urb);
952 free_netdev(catc->netdev);
953 }
954 }
955
956 /*
957 * Module functions and tables.
958 */
959
960 static struct usb_device_id catc_id_table [] = {
961 { USB_DEVICE(0x0423, 0xa) }, /* CATC Netmate, Belkin F5U011 */
962 { USB_DEVICE(0x0423, 0xc) }, /* CATC Netmate II, Belkin F5U111 */
963 { USB_DEVICE(0x08d1, 0x1) }, /* smartBridges smartNIC */
964 { }
965 };
966
967 MODULE_DEVICE_TABLE(usb, catc_id_table);
968
969 static struct usb_driver catc_driver = {
970 .name = driver_name,
971 .probe = catc_probe,
972 .disconnect = catc_disconnect,
973 .id_table = catc_id_table,
974 .disable_hub_initiated_lpm = 1,
975 };
976
977 module_usb_driver(catc_driver);
978