1 /*
2 * 7990.c -- LANCE ethernet IC generic routines.
3 * This is an attempt to separate out the bits of various ethernet
4 * drivers that are common because they all use the AMD 7990 LANCE
5 * (Local Area Network Controller for Ethernet) chip.
6 *
7 * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
8 *
9 * Most of this stuff was obtained by looking at other LANCE drivers,
10 * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful.
11 * NB: this was made easy by the fact that Jes Sorensen had cleaned up
12 * most of a2025 and sunlance with the aim of merging them, so the
13 * common code was pretty obvious.
14 */
15 #include <linux/crc32.h>
16 #include <linux/delay.h>
17 #include <linux/errno.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/fcntl.h>
25 #include <linux/interrupt.h>
26 #include <linux/ioport.h>
27 #include <linux/in.h>
28 #include <linux/route.h>
29 #include <linux/slab.h>
30 #include <linux/string.h>
31 #include <linux/skbuff.h>
32 #include <asm/irq.h>
33 /* Used for the temporal inet entries and routing */
34 #include <linux/socket.h>
35 #include <linux/bitops.h>
36
37 #include <asm/system.h>
38 #include <asm/io.h>
39 #include <asm/dma.h>
40 #include <asm/pgtable.h>
41 #ifdef CONFIG_HP300
42 #include <asm/blinken.h>
43 #endif
44
45 #include "7990.h"
46
47 #define WRITERAP(lp,x) out_be16(lp->base + LANCE_RAP, (x))
48 #define WRITERDP(lp,x) out_be16(lp->base + LANCE_RDP, (x))
49 #define READRDP(lp) in_be16(lp->base + LANCE_RDP)
50
51 #if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE)
52 #include "hplance.h"
53
54 #undef WRITERAP
55 #undef WRITERDP
56 #undef READRDP
57
58 #if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE)
59
60 /* Lossage Factor Nine, Mr Sulu. */
61 #define WRITERAP(lp,x) (lp->writerap(lp,x))
62 #define WRITERDP(lp,x) (lp->writerdp(lp,x))
63 #define READRDP(lp) (lp->readrdp(lp))
64
65 #else
66
67 /* These inlines can be used if only CONFIG_HPLANCE is defined */
WRITERAP(struct lance_private * lp,__u16 value)68 static inline void WRITERAP(struct lance_private *lp, __u16 value)
69 {
70 do {
71 out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value);
72 } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
73 }
74
WRITERDP(struct lance_private * lp,__u16 value)75 static inline void WRITERDP(struct lance_private *lp, __u16 value)
76 {
77 do {
78 out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value);
79 } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
80 }
81
READRDP(struct lance_private * lp)82 static inline __u16 READRDP(struct lance_private *lp)
83 {
84 __u16 value;
85 do {
86 value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP);
87 } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
88 return value;
89 }
90
91 #endif
92 #endif /* CONFIG_HPLANCE || CONFIG_HPLANCE_MODULE */
93
94 /* debugging output macros, various flavours */
95 /* #define TEST_HITS */
96 #ifdef UNDEF
97 #define PRINT_RINGS() \
98 do { \
99 int t; \
100 for (t=0; t < RX_RING_SIZE; t++) { \
101 printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n",\
102 t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0,\
103 ib->brx_ring[t].length,\
104 ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits);\
105 }\
106 for (t=0; t < TX_RING_SIZE; t++) { \
107 printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n",\
108 t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0,\
109 ib->btx_ring[t].length,\
110 ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits);\
111 }\
112 } while (0)
113 #else
114 #define PRINT_RINGS()
115 #endif
116
117 /* Load the CSR registers. The LANCE has to be STOPped when we do this! */
load_csrs(struct lance_private * lp)118 static void load_csrs (struct lance_private *lp)
119 {
120 volatile struct lance_init_block *aib = lp->lance_init_block;
121 int leptr;
122
123 leptr = LANCE_ADDR (aib);
124
125 WRITERAP(lp, LE_CSR1); /* load address of init block */
126 WRITERDP(lp, leptr & 0xFFFF);
127 WRITERAP(lp, LE_CSR2);
128 WRITERDP(lp, leptr >> 16);
129 WRITERAP(lp, LE_CSR3);
130 WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */
131
132 /* Point back to csr0 */
133 WRITERAP(lp, LE_CSR0);
134 }
135
136 /* #define to 0 or 1 appropriately */
137 #define DEBUG_IRING 0
138 /* Set up the Lance Rx and Tx rings and the init block */
lance_init_ring(struct net_device * dev)139 static void lance_init_ring (struct net_device *dev)
140 {
141 struct lance_private *lp = netdev_priv(dev);
142 volatile struct lance_init_block *ib = lp->init_block;
143 volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
144 int leptr;
145 int i;
146
147 aib = lp->lance_init_block;
148
149 lp->rx_new = lp->tx_new = 0;
150 lp->rx_old = lp->tx_old = 0;
151
152 ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */
153
154 /* Copy the ethernet address to the lance init block
155 * Notice that we do a byteswap if we're big endian.
156 * [I think this is the right criterion; at least, sunlance,
157 * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
158 * However, the datasheet says that the BSWAP bit doesn't affect
159 * the init block, so surely it should be low byte first for
160 * everybody? Um.]
161 * We could define the ib->physaddr as three 16bit values and
162 * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
163 */
164 #ifdef __BIG_ENDIAN
165 ib->phys_addr [0] = dev->dev_addr [1];
166 ib->phys_addr [1] = dev->dev_addr [0];
167 ib->phys_addr [2] = dev->dev_addr [3];
168 ib->phys_addr [3] = dev->dev_addr [2];
169 ib->phys_addr [4] = dev->dev_addr [5];
170 ib->phys_addr [5] = dev->dev_addr [4];
171 #else
172 for (i=0; i<6; i++)
173 ib->phys_addr[i] = dev->dev_addr[i];
174 #endif
175
176 if (DEBUG_IRING)
177 printk ("TX rings:\n");
178
179 lp->tx_full = 0;
180 /* Setup the Tx ring entries */
181 for (i = 0; i < (1<<lp->lance_log_tx_bufs); i++) {
182 leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
183 ib->btx_ring [i].tmd0 = leptr;
184 ib->btx_ring [i].tmd1_hadr = leptr >> 16;
185 ib->btx_ring [i].tmd1_bits = 0;
186 ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
187 ib->btx_ring [i].misc = 0;
188 if (DEBUG_IRING)
189 printk ("%d: 0x%8.8x\n", i, leptr);
190 }
191
192 /* Setup the Rx ring entries */
193 if (DEBUG_IRING)
194 printk ("RX rings:\n");
195 for (i = 0; i < (1<<lp->lance_log_rx_bufs); i++) {
196 leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
197
198 ib->brx_ring [i].rmd0 = leptr;
199 ib->brx_ring [i].rmd1_hadr = leptr >> 16;
200 ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
201 /* 0xf000 == bits that must be one (reserved, presumably) */
202 ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
203 ib->brx_ring [i].mblength = 0;
204 if (DEBUG_IRING)
205 printk ("%d: 0x%8.8x\n", i, leptr);
206 }
207
208 /* Setup the initialization block */
209
210 /* Setup rx descriptor pointer */
211 leptr = LANCE_ADDR(&aib->brx_ring);
212 ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
213 ib->rx_ptr = leptr;
214 if (DEBUG_IRING)
215 printk ("RX ptr: %8.8x\n", leptr);
216
217 /* Setup tx descriptor pointer */
218 leptr = LANCE_ADDR(&aib->btx_ring);
219 ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
220 ib->tx_ptr = leptr;
221 if (DEBUG_IRING)
222 printk ("TX ptr: %8.8x\n", leptr);
223
224 /* Clear the multicast filter */
225 ib->filter [0] = 0;
226 ib->filter [1] = 0;
227 PRINT_RINGS();
228 }
229
230 /* LANCE must be STOPped before we do this, too... */
init_restart_lance(struct lance_private * lp)231 static int init_restart_lance (struct lance_private *lp)
232 {
233 int i;
234
235 WRITERAP(lp, LE_CSR0);
236 WRITERDP(lp, LE_C0_INIT);
237
238 /* Need a hook here for sunlance ledma stuff */
239
240 /* Wait for the lance to complete initialization */
241 for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
242 barrier();
243 if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
244 printk ("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
245 return -1;
246 }
247
248 /* Clear IDON by writing a "1", enable interrupts and start lance */
249 WRITERDP(lp, LE_C0_IDON);
250 WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
251
252 return 0;
253 }
254
lance_reset(struct net_device * dev)255 static int lance_reset (struct net_device *dev)
256 {
257 struct lance_private *lp = netdev_priv(dev);
258 int status;
259
260 /* Stop the lance */
261 WRITERAP(lp, LE_CSR0);
262 WRITERDP(lp, LE_C0_STOP);
263
264 load_csrs (lp);
265 lance_init_ring (dev);
266 dev->trans_start = jiffies;
267 status = init_restart_lance (lp);
268 #ifdef DEBUG_DRIVER
269 printk ("Lance restart=%d\n", status);
270 #endif
271 return status;
272 }
273
lance_rx(struct net_device * dev)274 static int lance_rx (struct net_device *dev)
275 {
276 struct lance_private *lp = netdev_priv(dev);
277 volatile struct lance_init_block *ib = lp->init_block;
278 volatile struct lance_rx_desc *rd;
279 unsigned char bits;
280 #ifdef TEST_HITS
281 int i;
282 #endif
283
284 #ifdef TEST_HITS
285 printk ("[");
286 for (i = 0; i < RX_RING_SIZE; i++) {
287 if (i == lp->rx_new)
288 printk ("%s",
289 ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X");
290 else
291 printk ("%s",
292 ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1");
293 }
294 printk ("]");
295 #endif
296 #ifdef CONFIG_HP300
297 blinken_leds(0x40, 0);
298 #endif
299 WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */
300 for (rd = &ib->brx_ring [lp->rx_new]; /* For each Rx ring we own... */
301 !((bits = rd->rmd1_bits) & LE_R1_OWN);
302 rd = &ib->brx_ring [lp->rx_new]) {
303
304 /* We got an incomplete frame? */
305 if ((bits & LE_R1_POK) != LE_R1_POK) {
306 dev->stats.rx_over_errors++;
307 dev->stats.rx_errors++;
308 continue;
309 } else if (bits & LE_R1_ERR) {
310 /* Count only the end frame as a rx error,
311 * not the beginning
312 */
313 if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
314 if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
315 if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
316 if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
317 if (bits & LE_R1_EOP) dev->stats.rx_errors++;
318 } else {
319 int len = (rd->mblength & 0xfff) - 4;
320 struct sk_buff *skb = dev_alloc_skb (len+2);
321
322 if (!skb) {
323 printk ("%s: Memory squeeze, deferring packet.\n",
324 dev->name);
325 dev->stats.rx_dropped++;
326 rd->mblength = 0;
327 rd->rmd1_bits = LE_R1_OWN;
328 lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
329 return 0;
330 }
331
332 skb_reserve (skb, 2); /* 16 byte align */
333 skb_put (skb, len); /* make room */
334 skb_copy_to_linear_data(skb,
335 (unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
336 len);
337 skb->protocol = eth_type_trans (skb, dev);
338 netif_rx (skb);
339 dev->stats.rx_packets++;
340 dev->stats.rx_bytes += len;
341 }
342
343 /* Return the packet to the pool */
344 rd->mblength = 0;
345 rd->rmd1_bits = LE_R1_OWN;
346 lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
347 }
348 return 0;
349 }
350
lance_tx(struct net_device * dev)351 static int lance_tx (struct net_device *dev)
352 {
353 struct lance_private *lp = netdev_priv(dev);
354 volatile struct lance_init_block *ib = lp->init_block;
355 volatile struct lance_tx_desc *td;
356 int i, j;
357 int status;
358
359 #ifdef CONFIG_HP300
360 blinken_leds(0x80, 0);
361 #endif
362 /* csr0 is 2f3 */
363 WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
364 /* csr0 is 73 */
365
366 j = lp->tx_old;
367 for (i = j; i != lp->tx_new; i = j) {
368 td = &ib->btx_ring [i];
369
370 /* If we hit a packet not owned by us, stop */
371 if (td->tmd1_bits & LE_T1_OWN)
372 break;
373
374 if (td->tmd1_bits & LE_T1_ERR) {
375 status = td->misc;
376
377 dev->stats.tx_errors++;
378 if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
379 if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
380
381 if (status & LE_T3_CLOS) {
382 dev->stats.tx_carrier_errors++;
383 if (lp->auto_select) {
384 lp->tpe = 1 - lp->tpe;
385 printk("%s: Carrier Lost, trying %s\n",
386 dev->name, lp->tpe?"TPE":"AUI");
387 /* Stop the lance */
388 WRITERAP(lp, LE_CSR0);
389 WRITERDP(lp, LE_C0_STOP);
390 lance_init_ring (dev);
391 load_csrs (lp);
392 init_restart_lance (lp);
393 return 0;
394 }
395 }
396
397 /* buffer errors and underflows turn off the transmitter */
398 /* Restart the adapter */
399 if (status & (LE_T3_BUF|LE_T3_UFL)) {
400 dev->stats.tx_fifo_errors++;
401
402 printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
403 dev->name);
404 /* Stop the lance */
405 WRITERAP(lp, LE_CSR0);
406 WRITERDP(lp, LE_C0_STOP);
407 lance_init_ring (dev);
408 load_csrs (lp);
409 init_restart_lance (lp);
410 return 0;
411 }
412 } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
413 /*
414 * So we don't count the packet more than once.
415 */
416 td->tmd1_bits &= ~(LE_T1_POK);
417
418 /* One collision before packet was sent. */
419 if (td->tmd1_bits & LE_T1_EONE)
420 dev->stats.collisions++;
421
422 /* More than one collision, be optimistic. */
423 if (td->tmd1_bits & LE_T1_EMORE)
424 dev->stats.collisions += 2;
425
426 dev->stats.tx_packets++;
427 }
428
429 j = (j + 1) & lp->tx_ring_mod_mask;
430 }
431 lp->tx_old = j;
432 WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
433 return 0;
434 }
435
436 static irqreturn_t
lance_interrupt(int irq,void * dev_id)437 lance_interrupt (int irq, void *dev_id)
438 {
439 struct net_device *dev = (struct net_device *)dev_id;
440 struct lance_private *lp = netdev_priv(dev);
441 int csr0;
442
443 spin_lock (&lp->devlock);
444
445 WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */
446 csr0 = READRDP(lp);
447
448 PRINT_RINGS();
449
450 if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */
451 spin_unlock (&lp->devlock);
452 return IRQ_NONE; /* been generated by the Lance. */
453 }
454
455 /* Acknowledge all the interrupt sources ASAP */
456 WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
457
458 if ((csr0 & LE_C0_ERR)) {
459 /* Clear the error condition */
460 WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
461 }
462
463 if (csr0 & LE_C0_RINT)
464 lance_rx (dev);
465
466 if (csr0 & LE_C0_TINT)
467 lance_tx (dev);
468
469 /* Log misc errors. */
470 if (csr0 & LE_C0_BABL)
471 dev->stats.tx_errors++; /* Tx babble. */
472 if (csr0 & LE_C0_MISS)
473 dev->stats.rx_errors++; /* Missed a Rx frame. */
474 if (csr0 & LE_C0_MERR) {
475 printk("%s: Bus master arbitration failure, status %4.4x.\n",
476 dev->name, csr0);
477 /* Restart the chip. */
478 WRITERDP(lp, LE_C0_STRT);
479 }
480
481 if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
482 lp->tx_full = 0;
483 netif_wake_queue (dev);
484 }
485
486 WRITERAP(lp, LE_CSR0);
487 WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
488
489 spin_unlock (&lp->devlock);
490 return IRQ_HANDLED;
491 }
492
lance_open(struct net_device * dev)493 int lance_open (struct net_device *dev)
494 {
495 struct lance_private *lp = netdev_priv(dev);
496 int res;
497
498 /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
499 if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev))
500 return -EAGAIN;
501
502 res = lance_reset(dev);
503 spin_lock_init(&lp->devlock);
504 netif_start_queue (dev);
505
506 return res;
507 }
508 EXPORT_SYMBOL_GPL(lance_open);
509
lance_close(struct net_device * dev)510 int lance_close (struct net_device *dev)
511 {
512 struct lance_private *lp = netdev_priv(dev);
513
514 netif_stop_queue (dev);
515
516 /* Stop the LANCE */
517 WRITERAP(lp, LE_CSR0);
518 WRITERDP(lp, LE_C0_STOP);
519
520 free_irq(lp->irq, dev);
521
522 return 0;
523 }
524 EXPORT_SYMBOL_GPL(lance_close);
525
lance_tx_timeout(struct net_device * dev)526 void lance_tx_timeout(struct net_device *dev)
527 {
528 printk("lance_tx_timeout\n");
529 lance_reset(dev);
530 dev->trans_start = jiffies;
531 netif_wake_queue (dev);
532 }
533 EXPORT_SYMBOL_GPL(lance_tx_timeout);
534
lance_start_xmit(struct sk_buff * skb,struct net_device * dev)535 int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
536 {
537 struct lance_private *lp = netdev_priv(dev);
538 volatile struct lance_init_block *ib = lp->init_block;
539 int entry, skblen, len;
540 static int outs;
541 unsigned long flags;
542
543 if (!TX_BUFFS_AVAIL)
544 return -1;
545
546 netif_stop_queue (dev);
547
548 skblen = skb->len;
549
550 #ifdef DEBUG_DRIVER
551 /* dump the packet */
552 {
553 int i;
554
555 for (i = 0; i < 64; i++) {
556 if ((i % 16) == 0)
557 printk ("\n");
558 printk ("%2.2x ", skb->data [i]);
559 }
560 }
561 #endif
562 len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
563 entry = lp->tx_new & lp->tx_ring_mod_mask;
564 ib->btx_ring [entry].length = (-len) | 0xf000;
565 ib->btx_ring [entry].misc = 0;
566
567 if (skb->len < ETH_ZLEN)
568 memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
569 skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
570
571 /* Now, give the packet to the lance */
572 ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
573 lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
574
575 outs++;
576 /* Kick the lance: transmit now */
577 WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
578 dev->trans_start = jiffies;
579 dev_kfree_skb (skb);
580
581 spin_lock_irqsave (&lp->devlock, flags);
582 if (TX_BUFFS_AVAIL)
583 netif_start_queue (dev);
584 else
585 lp->tx_full = 1;
586 spin_unlock_irqrestore (&lp->devlock, flags);
587
588 return 0;
589 }
590 EXPORT_SYMBOL_GPL(lance_start_xmit);
591
592 /* taken from the depca driver via a2065.c */
lance_load_multicast(struct net_device * dev)593 static void lance_load_multicast (struct net_device *dev)
594 {
595 struct lance_private *lp = netdev_priv(dev);
596 volatile struct lance_init_block *ib = lp->init_block;
597 volatile u16 *mcast_table = (u16 *)&ib->filter;
598 struct dev_mc_list *dmi=dev->mc_list;
599 char *addrs;
600 int i;
601 u32 crc;
602
603 /* set all multicast bits */
604 if (dev->flags & IFF_ALLMULTI){
605 ib->filter [0] = 0xffffffff;
606 ib->filter [1] = 0xffffffff;
607 return;
608 }
609 /* clear the multicast filter */
610 ib->filter [0] = 0;
611 ib->filter [1] = 0;
612
613 /* Add addresses */
614 for (i = 0; i < dev->mc_count; i++){
615 addrs = dmi->dmi_addr;
616 dmi = dmi->next;
617
618 /* multicast address? */
619 if (!(*addrs & 1))
620 continue;
621
622 crc = ether_crc_le(6, addrs);
623 crc = crc >> 26;
624 mcast_table [crc >> 4] |= 1 << (crc & 0xf);
625 }
626 return;
627 }
628
629
lance_set_multicast(struct net_device * dev)630 void lance_set_multicast (struct net_device *dev)
631 {
632 struct lance_private *lp = netdev_priv(dev);
633 volatile struct lance_init_block *ib = lp->init_block;
634 int stopped;
635
636 stopped = netif_queue_stopped(dev);
637 if (!stopped)
638 netif_stop_queue (dev);
639
640 while (lp->tx_old != lp->tx_new)
641 schedule();
642
643 WRITERAP(lp, LE_CSR0);
644 WRITERDP(lp, LE_C0_STOP);
645 lance_init_ring (dev);
646
647 if (dev->flags & IFF_PROMISC) {
648 ib->mode |= LE_MO_PROM;
649 } else {
650 ib->mode &= ~LE_MO_PROM;
651 lance_load_multicast (dev);
652 }
653 load_csrs (lp);
654 init_restart_lance (lp);
655
656 if (!stopped)
657 netif_start_queue (dev);
658 }
659 EXPORT_SYMBOL_GPL(lance_set_multicast);
660
661 #ifdef CONFIG_NET_POLL_CONTROLLER
lance_poll(struct net_device * dev)662 void lance_poll(struct net_device *dev)
663 {
664 struct lance_private *lp = netdev_priv(dev);
665
666 spin_lock (&lp->devlock);
667 WRITERAP(lp, LE_CSR0);
668 WRITERDP(lp, LE_C0_STRT);
669 spin_unlock (&lp->devlock);
670 lance_interrupt(dev->irq, dev);
671 }
672 #endif
673
674 MODULE_LICENSE("GPL");
675