1 /*
2 * sonic.c
3 *
4 * (C) 2005 Finn Thain
5 *
6 * Converted to DMA API, added zero-copy buffer handling, and
7 * (from the mac68k project) introduced dhd's support for 16-bit cards.
8 *
9 * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
10 *
11 * This driver is based on work from Andreas Busse, but most of
12 * the code is rewritten.
13 *
14 * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
15 *
16 * Core code included by system sonic drivers
17 *
18 * And... partially rewritten again by David Huggins-Daines in order
19 * to cope with screwed up Macintosh NICs that may or may not use
20 * 16-bit DMA.
21 *
22 * (C) 1999 David Huggins-Daines <dhd@debian.org>
23 *
24 */
25
26 /*
27 * Sources: Olivetti M700-10 Risc Personal Computer hardware handbook,
28 * National Semiconductors data sheet for the DP83932B Sonic Ethernet
29 * controller, and the files "8390.c" and "skeleton.c" in this directory.
30 *
31 * Additional sources: Nat Semi data sheet for the DP83932C and Nat Semi
32 * Application Note AN-746, the files "lance.c" and "ibmlana.c". See also
33 * the NetBSD file "sys/arch/mac68k/dev/if_sn.c".
34 */
35
36 static unsigned int version_printed;
37
38 static int sonic_debug = -1;
39 module_param(sonic_debug, int, 0);
40 MODULE_PARM_DESC(sonic_debug, "debug message level");
41
sonic_msg_init(struct net_device * dev)42 static void sonic_msg_init(struct net_device *dev)
43 {
44 struct sonic_local *lp = netdev_priv(dev);
45
46 lp->msg_enable = netif_msg_init(sonic_debug, 0);
47
48 if (version_printed++ == 0)
49 netif_dbg(lp, drv, dev, "%s", version);
50 }
51
52 /*
53 * Open/initialize the SONIC controller.
54 *
55 * This routine should set everything up anew at each open, even
56 * registers that "should" only need to be set once at boot, so that
57 * there is non-reboot way to recover if something goes wrong.
58 */
sonic_open(struct net_device * dev)59 static int sonic_open(struct net_device *dev)
60 {
61 struct sonic_local *lp = netdev_priv(dev);
62 int i;
63
64 netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
65
66 spin_lock_init(&lp->lock);
67
68 for (i = 0; i < SONIC_NUM_RRS; i++) {
69 struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
70 if (skb == NULL) {
71 while(i > 0) { /* free any that were allocated successfully */
72 i--;
73 dev_kfree_skb(lp->rx_skb[i]);
74 lp->rx_skb[i] = NULL;
75 }
76 printk(KERN_ERR "%s: couldn't allocate receive buffers\n",
77 dev->name);
78 return -ENOMEM;
79 }
80 /* align IP header unless DMA requires otherwise */
81 if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
82 skb_reserve(skb, 2);
83 lp->rx_skb[i] = skb;
84 }
85
86 for (i = 0; i < SONIC_NUM_RRS; i++) {
87 dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
88 SONIC_RBSIZE, DMA_FROM_DEVICE);
89 if (dma_mapping_error(lp->device, laddr)) {
90 while(i > 0) { /* free any that were mapped successfully */
91 i--;
92 dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
93 lp->rx_laddr[i] = (dma_addr_t)0;
94 }
95 for (i = 0; i < SONIC_NUM_RRS; i++) {
96 dev_kfree_skb(lp->rx_skb[i]);
97 lp->rx_skb[i] = NULL;
98 }
99 printk(KERN_ERR "%s: couldn't map rx DMA buffers\n",
100 dev->name);
101 return -ENOMEM;
102 }
103 lp->rx_laddr[i] = laddr;
104 }
105
106 /*
107 * Initialize the SONIC
108 */
109 sonic_init(dev);
110
111 netif_start_queue(dev);
112
113 netif_dbg(lp, ifup, dev, "%s: Initialization done\n", __func__);
114
115 return 0;
116 }
117
118 /* Wait for the SONIC to become idle. */
sonic_quiesce(struct net_device * dev,u16 mask)119 static void sonic_quiesce(struct net_device *dev, u16 mask)
120 {
121 struct sonic_local * __maybe_unused lp = netdev_priv(dev);
122 int i;
123 u16 bits;
124
125 for (i = 0; i < 1000; ++i) {
126 bits = SONIC_READ(SONIC_CMD) & mask;
127 if (!bits)
128 return;
129 if (irqs_disabled() || in_interrupt())
130 udelay(20);
131 else
132 usleep_range(100, 200);
133 }
134 WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
135 }
136
137 /*
138 * Close the SONIC device
139 */
sonic_close(struct net_device * dev)140 static int sonic_close(struct net_device *dev)
141 {
142 struct sonic_local *lp = netdev_priv(dev);
143 int i;
144
145 netif_dbg(lp, ifdown, dev, "%s\n", __func__);
146
147 netif_stop_queue(dev);
148
149 /*
150 * stop the SONIC, disable interrupts
151 */
152 SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
153 sonic_quiesce(dev, SONIC_CR_ALL);
154
155 SONIC_WRITE(SONIC_IMR, 0);
156 SONIC_WRITE(SONIC_ISR, 0x7fff);
157 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
158
159 /* unmap and free skbs that haven't been transmitted */
160 for (i = 0; i < SONIC_NUM_TDS; i++) {
161 if(lp->tx_laddr[i]) {
162 dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
163 lp->tx_laddr[i] = (dma_addr_t)0;
164 }
165 if(lp->tx_skb[i]) {
166 dev_kfree_skb(lp->tx_skb[i]);
167 lp->tx_skb[i] = NULL;
168 }
169 }
170
171 /* unmap and free the receive buffers */
172 for (i = 0; i < SONIC_NUM_RRS; i++) {
173 if(lp->rx_laddr[i]) {
174 dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
175 lp->rx_laddr[i] = (dma_addr_t)0;
176 }
177 if(lp->rx_skb[i]) {
178 dev_kfree_skb(lp->rx_skb[i]);
179 lp->rx_skb[i] = NULL;
180 }
181 }
182
183 return 0;
184 }
185
sonic_tx_timeout(struct net_device * dev)186 static void sonic_tx_timeout(struct net_device *dev)
187 {
188 struct sonic_local *lp = netdev_priv(dev);
189 int i;
190 /*
191 * put the Sonic into software-reset mode and
192 * disable all interrupts before releasing DMA buffers
193 */
194 SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
195 sonic_quiesce(dev, SONIC_CR_ALL);
196
197 SONIC_WRITE(SONIC_IMR, 0);
198 SONIC_WRITE(SONIC_ISR, 0x7fff);
199 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
200 /* We could resend the original skbs. Easier to re-initialise. */
201 for (i = 0; i < SONIC_NUM_TDS; i++) {
202 if(lp->tx_laddr[i]) {
203 dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
204 lp->tx_laddr[i] = (dma_addr_t)0;
205 }
206 if(lp->tx_skb[i]) {
207 dev_kfree_skb(lp->tx_skb[i]);
208 lp->tx_skb[i] = NULL;
209 }
210 }
211 /* Try to restart the adaptor. */
212 sonic_init(dev);
213 lp->stats.tx_errors++;
214 netif_trans_update(dev); /* prevent tx timeout */
215 netif_wake_queue(dev);
216 }
217
218 /*
219 * transmit packet
220 *
221 * Appends new TD during transmission thus avoiding any TX interrupts
222 * until we run out of TDs.
223 * This routine interacts closely with the ISR in that it may,
224 * set tx_skb[i]
225 * reset the status flags of the new TD
226 * set and reset EOL flags
227 * stop the tx queue
228 * The ISR interacts with this routine in various ways. It may,
229 * reset tx_skb[i]
230 * test the EOL and status flags of the TDs
231 * wake the tx queue
232 * Concurrently with all of this, the SONIC is potentially writing to
233 * the status flags of the TDs.
234 */
235
sonic_send_packet(struct sk_buff * skb,struct net_device * dev)236 static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
237 {
238 struct sonic_local *lp = netdev_priv(dev);
239 dma_addr_t laddr;
240 int length;
241 int entry;
242 unsigned long flags;
243
244 netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
245
246 length = skb->len;
247 if (length < ETH_ZLEN) {
248 if (skb_padto(skb, ETH_ZLEN))
249 return NETDEV_TX_OK;
250 length = ETH_ZLEN;
251 }
252
253 /*
254 * Map the packet data into the logical DMA address space
255 */
256
257 laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
258 if (!laddr) {
259 pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
260 dev_kfree_skb_any(skb);
261 return NETDEV_TX_OK;
262 }
263
264 spin_lock_irqsave(&lp->lock, flags);
265
266 entry = lp->next_tx;
267
268 sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
269 sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
270 sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
271 sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff);
272 sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16);
273 sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length);
274 sonic_tda_put(dev, entry, SONIC_TD_LINK,
275 sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
276
277 wmb();
278 lp->tx_len[entry] = length;
279 lp->tx_laddr[entry] = laddr;
280 lp->tx_skb[entry] = skb;
281
282 wmb();
283 sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK,
284 sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL);
285 lp->eol_tx = entry;
286
287 lp->next_tx = (entry + 1) & SONIC_TDS_MASK;
288 if (lp->tx_skb[lp->next_tx] != NULL) {
289 /* The ring is full, the ISR has yet to process the next TD. */
290 netif_dbg(lp, tx_queued, dev, "%s: stopping queue\n", __func__);
291 netif_stop_queue(dev);
292 /* after this packet, wait for ISR to free up some TDAs */
293 } else netif_start_queue(dev);
294
295 netif_dbg(lp, tx_queued, dev, "%s: issuing Tx command\n", __func__);
296
297 SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
298
299 spin_unlock_irqrestore(&lp->lock, flags);
300
301 return NETDEV_TX_OK;
302 }
303
304 /*
305 * The typical workload of the driver:
306 * Handle the network interface interrupts.
307 */
sonic_interrupt(int irq,void * dev_id)308 static irqreturn_t sonic_interrupt(int irq, void *dev_id)
309 {
310 struct net_device *dev = dev_id;
311 struct sonic_local *lp = netdev_priv(dev);
312 int status;
313 unsigned long flags;
314
315 /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
316 * with sonic_send_packet() so that the two functions can share state.
317 * Secondly, it makes sonic_interrupt() re-entrant, as that is required
318 * by macsonic which must use two IRQs with different priority levels.
319 */
320 spin_lock_irqsave(&lp->lock, flags);
321
322 status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
323 if (!status) {
324 spin_unlock_irqrestore(&lp->lock, flags);
325
326 return IRQ_NONE;
327 }
328
329 do {
330 SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
331
332 if (status & SONIC_INT_PKTRX) {
333 netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
334 sonic_rx(dev); /* got packet(s) */
335 }
336
337 if (status & SONIC_INT_TXDN) {
338 int entry = lp->cur_tx;
339 int td_status;
340 int freed_some = 0;
341
342 /* The state of a Transmit Descriptor may be inferred
343 * from { tx_skb[entry], td_status } as follows.
344 * { clear, clear } => the TD has never been used
345 * { set, clear } => the TD was handed to SONIC
346 * { set, set } => the TD was handed back
347 * { clear, set } => the TD is available for re-use
348 */
349
350 netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
351
352 while (lp->tx_skb[entry] != NULL) {
353 if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
354 break;
355
356 if (td_status & SONIC_TCR_PTX) {
357 lp->stats.tx_packets++;
358 lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
359 } else {
360 if (td_status & (SONIC_TCR_EXD |
361 SONIC_TCR_EXC | SONIC_TCR_BCM))
362 lp->stats.tx_aborted_errors++;
363 if (td_status &
364 (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
365 lp->stats.tx_carrier_errors++;
366 if (td_status & SONIC_TCR_OWC)
367 lp->stats.tx_window_errors++;
368 if (td_status & SONIC_TCR_FU)
369 lp->stats.tx_fifo_errors++;
370 }
371
372 /* We must free the original skb */
373 dev_kfree_skb_irq(lp->tx_skb[entry]);
374 lp->tx_skb[entry] = NULL;
375 /* and unmap DMA buffer */
376 dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE);
377 lp->tx_laddr[entry] = (dma_addr_t)0;
378 freed_some = 1;
379
380 if (sonic_tda_get(dev, entry, SONIC_TD_LINK) & SONIC_EOL) {
381 entry = (entry + 1) & SONIC_TDS_MASK;
382 break;
383 }
384 entry = (entry + 1) & SONIC_TDS_MASK;
385 }
386
387 if (freed_some || lp->tx_skb[entry] == NULL)
388 netif_wake_queue(dev); /* The ring is no longer full */
389 lp->cur_tx = entry;
390 }
391
392 /*
393 * check error conditions
394 */
395 if (status & SONIC_INT_RFO) {
396 netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
397 __func__);
398 }
399 if (status & SONIC_INT_RDE) {
400 netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
401 __func__);
402 }
403 if (status & SONIC_INT_RBAE) {
404 netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
405 __func__);
406 }
407
408 /* counter overruns; all counters are 16bit wide */
409 if (status & SONIC_INT_FAE)
410 lp->stats.rx_frame_errors += 65536;
411 if (status & SONIC_INT_CRC)
412 lp->stats.rx_crc_errors += 65536;
413 if (status & SONIC_INT_MP)
414 lp->stats.rx_missed_errors += 65536;
415
416 /* transmit error */
417 if (status & SONIC_INT_TXER) {
418 u16 tcr = SONIC_READ(SONIC_TCR);
419
420 netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
421 __func__, tcr);
422
423 if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
424 SONIC_TCR_FU | SONIC_TCR_BCM)) {
425 /* Aborted transmission. Try again. */
426 netif_stop_queue(dev);
427 SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
428 }
429 }
430
431 /* bus retry */
432 if (status & SONIC_INT_BR) {
433 printk(KERN_ERR "%s: Bus retry occurred! Device interrupt disabled.\n",
434 dev->name);
435 /* ... to help debug DMA problems causing endless interrupts. */
436 /* Bounce the eth interface to turn on the interrupt again. */
437 SONIC_WRITE(SONIC_IMR, 0);
438 }
439
440 status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
441 } while (status);
442
443 spin_unlock_irqrestore(&lp->lock, flags);
444
445 return IRQ_HANDLED;
446 }
447
448 /* Return the array index corresponding to a given Receive Buffer pointer. */
index_from_addr(struct sonic_local * lp,dma_addr_t addr,unsigned int last)449 static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
450 unsigned int last)
451 {
452 unsigned int i = last;
453
454 do {
455 i = (i + 1) & SONIC_RRS_MASK;
456 if (addr == lp->rx_laddr[i])
457 return i;
458 } while (i != last);
459
460 return -ENOENT;
461 }
462
463 /* Allocate and map a new skb to be used as a receive buffer. */
sonic_alloc_rb(struct net_device * dev,struct sonic_local * lp,struct sk_buff ** new_skb,dma_addr_t * new_addr)464 static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
465 struct sk_buff **new_skb, dma_addr_t *new_addr)
466 {
467 *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
468 if (!*new_skb)
469 return false;
470
471 if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
472 skb_reserve(*new_skb, 2);
473
474 *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
475 SONIC_RBSIZE, DMA_FROM_DEVICE);
476 if (!*new_addr) {
477 dev_kfree_skb(*new_skb);
478 *new_skb = NULL;
479 return false;
480 }
481
482 return true;
483 }
484
485 /* Place a new receive resource in the Receive Resource Area and update RWP. */
sonic_update_rra(struct net_device * dev,struct sonic_local * lp,dma_addr_t old_addr,dma_addr_t new_addr)486 static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
487 dma_addr_t old_addr, dma_addr_t new_addr)
488 {
489 unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP));
490 unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP));
491 u32 buf;
492
493 /* The resources in the range [RRP, RWP) belong to the SONIC. This loop
494 * scans the other resources in the RRA, those in the range [RWP, RRP).
495 */
496 do {
497 buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) |
498 sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L);
499
500 if (buf == old_addr)
501 break;
502
503 entry = (entry + 1) & SONIC_RRS_MASK;
504 } while (entry != end);
505
506 WARN_ONCE(buf != old_addr, "failed to find resource!\n");
507
508 sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16);
509 sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff);
510
511 entry = (entry + 1) & SONIC_RRS_MASK;
512
513 SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry));
514 }
515
516 /*
517 * We have a good packet(s), pass it/them up the network stack.
518 */
sonic_rx(struct net_device * dev)519 static void sonic_rx(struct net_device *dev)
520 {
521 struct sonic_local *lp = netdev_priv(dev);
522 int entry = lp->cur_rx;
523 int prev_entry = lp->eol_rx;
524 bool rbe = false;
525
526 while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
527 u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
528
529 /* If the RD has LPKT set, the chip has finished with the RB */
530 if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
531 struct sk_buff *new_skb;
532 dma_addr_t new_laddr;
533 u32 addr = (sonic_rda_get(dev, entry,
534 SONIC_RD_PKTPTR_H) << 16) |
535 sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
536 int i = index_from_addr(lp, addr, entry);
537
538 if (i < 0) {
539 WARN_ONCE(1, "failed to find buffer!\n");
540 break;
541 }
542
543 if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
544 struct sk_buff *used_skb = lp->rx_skb[i];
545 int pkt_len;
546
547 /* Pass the used buffer up the stack */
548 dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
549 DMA_FROM_DEVICE);
550
551 pkt_len = sonic_rda_get(dev, entry,
552 SONIC_RD_PKTLEN);
553 skb_trim(used_skb, pkt_len);
554 used_skb->protocol = eth_type_trans(used_skb,
555 dev);
556 netif_rx(used_skb);
557 lp->stats.rx_packets++;
558 lp->stats.rx_bytes += pkt_len;
559
560 lp->rx_skb[i] = new_skb;
561 lp->rx_laddr[i] = new_laddr;
562 } else {
563 /* Failed to obtain a new buffer so re-use it */
564 new_laddr = addr;
565 lp->stats.rx_dropped++;
566 }
567 /* If RBE is already asserted when RWP advances then
568 * it's safe to clear RBE after processing this packet.
569 */
570 rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE;
571 sonic_update_rra(dev, lp, addr, new_laddr);
572 }
573 /*
574 * give back the descriptor
575 */
576 sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0);
577 sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
578
579 prev_entry = entry;
580 entry = (entry + 1) & SONIC_RDS_MASK;
581 }
582
583 lp->cur_rx = entry;
584
585 if (prev_entry != lp->eol_rx) {
586 /* Advance the EOL flag to put descriptors back into service */
587 sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL |
588 sonic_rda_get(dev, prev_entry, SONIC_RD_LINK));
589 sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
590 sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
591 lp->eol_rx = prev_entry;
592 }
593
594 if (rbe)
595 SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
596 /*
597 * If any worth-while packets have been received, netif_rx()
598 * has done a mark_bh(NET_BH) for us and will work on them
599 * when we get to the bottom-half routine.
600 */
601 }
602
603
604 /*
605 * Get the current statistics.
606 * This may be called with the device open or closed.
607 */
sonic_get_stats(struct net_device * dev)608 static struct net_device_stats *sonic_get_stats(struct net_device *dev)
609 {
610 struct sonic_local *lp = netdev_priv(dev);
611
612 /* read the tally counter from the SONIC and reset them */
613 lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT);
614 SONIC_WRITE(SONIC_CRCT, 0xffff);
615 lp->stats.rx_frame_errors += SONIC_READ(SONIC_FAET);
616 SONIC_WRITE(SONIC_FAET, 0xffff);
617 lp->stats.rx_missed_errors += SONIC_READ(SONIC_MPT);
618 SONIC_WRITE(SONIC_MPT, 0xffff);
619
620 return &lp->stats;
621 }
622
623
624 /*
625 * Set or clear the multicast filter for this adaptor.
626 */
sonic_multicast_list(struct net_device * dev)627 static void sonic_multicast_list(struct net_device *dev)
628 {
629 struct sonic_local *lp = netdev_priv(dev);
630 unsigned int rcr;
631 struct netdev_hw_addr *ha;
632 unsigned char *addr;
633 int i;
634
635 rcr = SONIC_READ(SONIC_RCR) & ~(SONIC_RCR_PRO | SONIC_RCR_AMC);
636 rcr |= SONIC_RCR_BRD; /* accept broadcast packets */
637
638 if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
639 rcr |= SONIC_RCR_PRO;
640 } else {
641 if ((dev->flags & IFF_ALLMULTI) ||
642 (netdev_mc_count(dev) > 15)) {
643 rcr |= SONIC_RCR_AMC;
644 } else {
645 unsigned long flags;
646
647 netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
648 netdev_mc_count(dev));
649 sonic_set_cam_enable(dev, 1); /* always enable our own address */
650 i = 1;
651 netdev_for_each_mc_addr(ha, dev) {
652 addr = ha->addr;
653 sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
654 sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
655 sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
656 sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i));
657 i++;
658 }
659 SONIC_WRITE(SONIC_CDC, 16);
660 SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
661
662 /* LCAM and TXP commands can't be used simultaneously */
663 spin_lock_irqsave(&lp->lock, flags);
664 sonic_quiesce(dev, SONIC_CR_TXP);
665 SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
666 sonic_quiesce(dev, SONIC_CR_LCAM);
667 spin_unlock_irqrestore(&lp->lock, flags);
668 }
669 }
670
671 netif_dbg(lp, ifup, dev, "%s: setting RCR=%x\n", __func__, rcr);
672
673 SONIC_WRITE(SONIC_RCR, rcr);
674 }
675
676
677 /*
678 * Initialize the SONIC ethernet controller.
679 */
sonic_init(struct net_device * dev)680 static int sonic_init(struct net_device *dev)
681 {
682 struct sonic_local *lp = netdev_priv(dev);
683 int i;
684
685 /*
686 * put the Sonic into software-reset mode and
687 * disable all interrupts
688 */
689 SONIC_WRITE(SONIC_IMR, 0);
690 SONIC_WRITE(SONIC_ISR, 0x7fff);
691 SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
692
693 /* While in reset mode, clear CAM Enable register */
694 SONIC_WRITE(SONIC_CE, 0);
695
696 /*
697 * clear software reset flag, disable receiver, clear and
698 * enable interrupts, then completely initialize the SONIC
699 */
700 SONIC_WRITE(SONIC_CMD, 0);
701 SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
702 sonic_quiesce(dev, SONIC_CR_ALL);
703
704 /*
705 * initialize the receive resource area
706 */
707 netif_dbg(lp, ifup, dev, "%s: initialize receive resource area\n",
708 __func__);
709
710 for (i = 0; i < SONIC_NUM_RRS; i++) {
711 u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff;
712 u16 bufadr_h = (unsigned long)lp->rx_laddr[i] >> 16;
713 sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l);
714 sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h);
715 sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_L, SONIC_RBSIZE >> 1);
716 sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_H, 0);
717 }
718
719 /* initialize all RRA registers */
720 SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0));
721 SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS));
722 SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0));
723 SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1));
724 SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
725 SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
726
727 /* load the resource pointers */
728 netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
729
730 SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
731 sonic_quiesce(dev, SONIC_CR_RRRA);
732
733 /*
734 * Initialize the receive descriptors so that they
735 * become a circular linked list, ie. let the last
736 * descriptor point to the first again.
737 */
738 netif_dbg(lp, ifup, dev, "%s: initialize receive descriptors\n",
739 __func__);
740
741 for (i=0; i<SONIC_NUM_RDS; i++) {
742 sonic_rda_put(dev, i, SONIC_RD_STATUS, 0);
743 sonic_rda_put(dev, i, SONIC_RD_PKTLEN, 0);
744 sonic_rda_put(dev, i, SONIC_RD_PKTPTR_L, 0);
745 sonic_rda_put(dev, i, SONIC_RD_PKTPTR_H, 0);
746 sonic_rda_put(dev, i, SONIC_RD_SEQNO, 0);
747 sonic_rda_put(dev, i, SONIC_RD_IN_USE, 1);
748 sonic_rda_put(dev, i, SONIC_RD_LINK,
749 lp->rda_laddr +
750 ((i+1) * SIZEOF_SONIC_RD * SONIC_BUS_SCALE(lp->dma_bitmode)));
751 }
752 /* fix last descriptor */
753 sonic_rda_put(dev, SONIC_NUM_RDS - 1, SONIC_RD_LINK,
754 (lp->rda_laddr & 0xffff) | SONIC_EOL);
755 lp->eol_rx = SONIC_NUM_RDS - 1;
756 lp->cur_rx = 0;
757 SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16);
758 SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff);
759
760 /*
761 * initialize transmit descriptors
762 */
763 netif_dbg(lp, ifup, dev, "%s: initialize transmit descriptors\n",
764 __func__);
765
766 for (i = 0; i < SONIC_NUM_TDS; i++) {
767 sonic_tda_put(dev, i, SONIC_TD_STATUS, 0);
768 sonic_tda_put(dev, i, SONIC_TD_CONFIG, 0);
769 sonic_tda_put(dev, i, SONIC_TD_PKTSIZE, 0);
770 sonic_tda_put(dev, i, SONIC_TD_FRAG_COUNT, 0);
771 sonic_tda_put(dev, i, SONIC_TD_LINK,
772 (lp->tda_laddr & 0xffff) +
773 (i + 1) * SIZEOF_SONIC_TD * SONIC_BUS_SCALE(lp->dma_bitmode));
774 lp->tx_skb[i] = NULL;
775 }
776 /* fix last descriptor */
777 sonic_tda_put(dev, SONIC_NUM_TDS - 1, SONIC_TD_LINK,
778 (lp->tda_laddr & 0xffff));
779
780 SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16);
781 SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff);
782 lp->cur_tx = lp->next_tx = 0;
783 lp->eol_tx = SONIC_NUM_TDS - 1;
784
785 /*
786 * put our own address to CAM desc[0]
787 */
788 sonic_cda_put(dev, 0, SONIC_CD_CAP0, dev->dev_addr[1] << 8 | dev->dev_addr[0]);
789 sonic_cda_put(dev, 0, SONIC_CD_CAP1, dev->dev_addr[3] << 8 | dev->dev_addr[2]);
790 sonic_cda_put(dev, 0, SONIC_CD_CAP2, dev->dev_addr[5] << 8 | dev->dev_addr[4]);
791 sonic_set_cam_enable(dev, 1);
792
793 for (i = 0; i < 16; i++)
794 sonic_cda_put(dev, i, SONIC_CD_ENTRY_POINTER, i);
795
796 /*
797 * initialize CAM registers
798 */
799 SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
800 SONIC_WRITE(SONIC_CDC, 16);
801
802 /*
803 * load the CAM
804 */
805 SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
806 sonic_quiesce(dev, SONIC_CR_LCAM);
807
808 /*
809 * enable receiver, disable loopback
810 * and enable all interrupts
811 */
812 SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
813 SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
814 SONIC_WRITE(SONIC_ISR, 0x7fff);
815 SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
816 SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN);
817
818 netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
819 SONIC_READ(SONIC_CMD));
820
821 return 0;
822 }
823
824 MODULE_LICENSE("GPL");
825