• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-1.0+
2 
3 /* 8390.c: A general NS8390 ethernet driver core for linux. */
4 /*
5 	Written 1992-94 by Donald Becker.
6 
7 	Copyright 1993 United States Government as represented by the
8 	Director, National Security Agency.
9 
10 	The author may be reached as becker@scyld.com, or C/O
11 	Scyld Computing Corporation
12 	410 Severn Ave., Suite 210
13 	Annapolis MD 21403
14 
15 
16   This is the chip-specific code for many 8390-based ethernet adaptors.
17   This is not a complete driver, it must be combined with board-specific
18   code such as ne.c, wd.c, 3c503.c, etc.
19 
20   Seeing how at least eight drivers use this code, (not counting the
21   PCMCIA ones either) it is easy to break some card by what seems like
22   a simple innocent change. Please contact me or Donald if you think
23   you have found something that needs changing. -- PG
24 
25 
26   Changelog:
27 
28   Paul Gortmaker	: remove set_bit lock, other cleanups.
29   Paul Gortmaker	: add ei_get_8390_hdr() so we can pass skb's to
30 			  ei_block_input() for eth_io_copy_and_sum().
31   Paul Gortmaker	: exchange static int ei_pingpong for a #define,
32 			  also add better Tx error handling.
33   Paul Gortmaker	: rewrite Rx overrun handling as per NS specs.
34   Alexey Kuznetsov	: use the 8390's six bit hash multicast filter.
35   Paul Gortmaker	: tweak ANK's above multicast changes a bit.
36   Paul Gortmaker	: update packet statistics for v2.1.x
37   Alan Cox		: support arbitrary stupid port mappings on the
38 			  68K Macintosh. Support >16bit I/O spaces
39   Paul Gortmaker	: add kmod support for auto-loading of the 8390
40 			  module by all drivers that require it.
41   Alan Cox		: Spinlocking work, added 'BUG_83C690'
42   Paul Gortmaker	: Separate out Tx timeout code from Tx path.
43   Paul Gortmaker	: Remove old unused single Tx buffer code.
44   Hayato Fujiwara	: Add m32r support.
45   Paul Gortmaker	: use skb_padto() instead of stack scratch area
46 
47   Sources:
48   The National Semiconductor LAN Databook, and the 3Com 3c503 databook.
49 
50   */
51 
52 #include <linux/build_bug.h>
53 #include <linux/module.h>
54 #include <linux/kernel.h>
55 #include <linux/jiffies.h>
56 #include <linux/fs.h>
57 #include <linux/types.h>
58 #include <linux/string.h>
59 #include <linux/bitops.h>
60 #include <linux/uaccess.h>
61 #include <linux/io.h>
62 #include <asm/irq.h>
63 #include <linux/delay.h>
64 #include <linux/errno.h>
65 #include <linux/fcntl.h>
66 #include <linux/in.h>
67 #include <linux/interrupt.h>
68 #include <linux/init.h>
69 #include <linux/crc32.h>
70 
71 #include <linux/netdevice.h>
72 #include <linux/etherdevice.h>
73 
74 #define NS8390_CORE
75 #include "8390.h"
76 
77 #define BUG_83C690
78 
79 /* These are the operational function interfaces to board-specific
80    routines.
81 	void reset_8390(struct net_device *dev)
82 		Resets the board associated with DEV, including a hardware reset of
83 		the 8390.  This is only called when there is a transmit timeout, and
84 		it is always followed by 8390_init().
85 	void block_output(struct net_device *dev, int count, const unsigned char *buf,
86 					  int start_page)
87 		Write the COUNT bytes of BUF to the packet buffer at START_PAGE.  The
88 		"page" value uses the 8390's 256-byte pages.
89 	void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page)
90 		Read the 4 byte, page aligned 8390 header. *If* there is a
91 		subsequent read, it will be of the rest of the packet.
92 	void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
93 		Read COUNT bytes from the packet buffer into the skb data area. Start
94 		reading from RING_OFFSET, the address as the 8390 sees it.  This will always
95 		follow the read of the 8390 header.
96 */
97 #define ei_reset_8390 (ei_local->reset_8390)
98 #define ei_block_output (ei_local->block_output)
99 #define ei_block_input (ei_local->block_input)
100 #define ei_get_8390_hdr (ei_local->get_8390_hdr)
101 
102 /* Index to functions. */
103 static void ei_tx_intr(struct net_device *dev);
104 static void ei_tx_err(struct net_device *dev);
105 static void ei_receive(struct net_device *dev);
106 static void ei_rx_overrun(struct net_device *dev);
107 
108 /* Routines generic to NS8390-based boards. */
109 static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
110 								int start_page);
111 static void do_set_multicast_list(struct net_device *dev);
112 static void __NS8390_init(struct net_device *dev, int startp);
113 
114 static unsigned version_printed;
115 static int msg_enable;
116 static const int default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_RX_ERR |
117 				     NETIF_MSG_TX_ERR);
118 module_param(msg_enable, int, 0444);
119 MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
120 
121 /*
122  *	SMP and the 8390 setup.
123  *
124  *	The 8390 isn't exactly designed to be multithreaded on RX/TX. There is
125  *	a page register that controls bank and packet buffer access. We guard
126  *	this with ei_local->page_lock. Nobody should assume or set the page other
127  *	than zero when the lock is not held. Lock holders must restore page 0
128  *	before unlocking. Even pure readers must take the lock to protect in
129  *	page 0.
130  *
131  *	To make life difficult the chip can also be very slow. We therefore can't
132  *	just use spinlocks. For the longer lockups we disable the irq the device
133  *	sits on and hold the lock. We must hold the lock because there is a dual
134  *	processor case other than interrupts (get stats/set multicast list in
135  *	parallel with each other and transmit).
136  *
137  *	Note: in theory we can just disable the irq on the card _but_ there is
138  *	a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs"
139  *	enter lock, take the queued irq. So we waddle instead of flying.
140  *
141  *	Finally by special arrangement for the purpose of being generally
142  *	annoying the transmit function is called bh atomic. That places
143  *	restrictions on the user context callers as disable_irq won't save
144  *	them.
145  *
146  *	Additional explanation of problems with locking by Alan Cox:
147  *
148  *	"The author (me) didn't use spin_lock_irqsave because the slowness of the
149  *	card means that approach caused horrible problems like losing serial data
150  *	at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA
151  *	chips with FPGA front ends.
152  *
153  *	Ok the logic behind the 8390 is very simple:
154  *
155  *	Things to know
156  *		- IRQ delivery is asynchronous to the PCI bus
157  *		- Blocking the local CPU IRQ via spin locks was too slow
158  *		- The chip has register windows needing locking work
159  *
160  *	So the path was once (I say once as people appear to have changed it
161  *	in the mean time and it now looks rather bogus if the changes to use
162  *	disable_irq_nosync_irqsave are disabling the local IRQ)
163  *
164  *
165  *		Take the page lock
166  *		Mask the IRQ on chip
167  *		Disable the IRQ (but not mask locally- someone seems to have
168  *			broken this with the lock validator stuff)
169  *			[This must be _nosync as the page lock may otherwise
170  *				deadlock us]
171  *		Drop the page lock and turn IRQs back on
172  *
173  *		At this point an existing IRQ may still be running but we can't
174  *		get a new one
175  *
176  *		Take the lock (so we know the IRQ has terminated) but don't mask
177  *	the IRQs on the processor
178  *		Set irqlock [for debug]
179  *
180  *		Transmit (slow as ****)
181  *
182  *		re-enable the IRQ
183  *
184  *
185  *	We have to use disable_irq because otherwise you will get delayed
186  *	interrupts on the APIC bus deadlocking the transmit path.
187  *
188  *	Quite hairy but the chip simply wasn't designed for SMP and you can't
189  *	even ACK an interrupt without risking corrupting other parallel
190  *	activities on the chip." [lkml, 25 Jul 2007]
191  */
192 
193 
194 
195 /**
196  * ei_open - Open/initialize the board.
197  * @dev: network device to initialize
198  *
199  * This routine goes all-out, setting everything
200  * up anew at each open, even though many of these registers should only
201  * need to be set once at boot.
202  */
__ei_open(struct net_device * dev)203 static int __ei_open(struct net_device *dev)
204 {
205 	unsigned long flags;
206 	struct ei_device *ei_local = netdev_priv(dev);
207 
208 	if (dev->watchdog_timeo <= 0)
209 		dev->watchdog_timeo = TX_TIMEOUT;
210 
211 	/*
212 	 *	Grab the page lock so we own the register set, then call
213 	 *	the init function.
214 	 */
215 
216 	spin_lock_irqsave(&ei_local->page_lock, flags);
217 	__NS8390_init(dev, 1);
218 	/* Set the flag before we drop the lock, That way the IRQ arrives
219 	   after its set and we get no silly warnings */
220 	netif_start_queue(dev);
221 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
222 	ei_local->irqlock = 0;
223 	return 0;
224 }
225 
226 /**
227  * ei_close - shut down network device
228  * @dev: network device to close
229  *
230  * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done.
231  */
__ei_close(struct net_device * dev)232 static int __ei_close(struct net_device *dev)
233 {
234 	struct ei_device *ei_local = netdev_priv(dev);
235 	unsigned long flags;
236 
237 	/*
238 	 *	Hold the page lock during close
239 	 */
240 
241 	spin_lock_irqsave(&ei_local->page_lock, flags);
242 	__NS8390_init(dev, 0);
243 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
244 	netif_stop_queue(dev);
245 	return 0;
246 }
247 
248 /**
249  * ei_tx_timeout - handle transmit time out condition
250  * @dev: network device which has apparently fallen asleep
251  *
252  * Called by kernel when device never acknowledges a transmit has
253  * completed (or failed) - i.e. never posted a Tx related interrupt.
254  */
255 
__ei_tx_timeout(struct net_device * dev,unsigned int txqueue)256 static void __ei_tx_timeout(struct net_device *dev, unsigned int txqueue)
257 {
258 	unsigned long e8390_base = dev->base_addr;
259 	struct ei_device *ei_local = netdev_priv(dev);
260 	int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
261 	unsigned long flags;
262 
263 	dev->stats.tx_errors++;
264 
265 	spin_lock_irqsave(&ei_local->page_lock, flags);
266 	txsr = ei_inb(e8390_base+EN0_TSR);
267 	isr = ei_inb(e8390_base+EN0_ISR);
268 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
269 
270 	netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d\n",
271 		   (txsr & ENTSR_ABT) ? "excess collisions." :
272 		   (isr) ? "lost interrupt?" : "cable problem?",
273 		   txsr, isr, tickssofar);
274 
275 	if (!isr && !dev->stats.tx_packets) {
276 		/* The 8390 probably hasn't gotten on the cable yet. */
277 		ei_local->interface_num ^= 1;   /* Try a different xcvr.  */
278 	}
279 
280 	/* Ugly but a reset can be slow, yet must be protected */
281 
282 	disable_irq_nosync_lockdep(dev->irq);
283 	spin_lock(&ei_local->page_lock);
284 
285 	/* Try to restart the card.  Perhaps the user has fixed something. */
286 	ei_reset_8390(dev);
287 	__NS8390_init(dev, 1);
288 
289 	spin_unlock(&ei_local->page_lock);
290 	enable_irq_lockdep(dev->irq);
291 	netif_wake_queue(dev);
292 }
293 
294 /**
295  * ei_start_xmit - begin packet transmission
296  * @skb: packet to be sent
297  * @dev: network device to which packet is sent
298  *
299  * Sends a packet to an 8390 network device.
300  */
301 
__ei_start_xmit(struct sk_buff * skb,struct net_device * dev)302 static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
303 				   struct net_device *dev)
304 {
305 	unsigned long e8390_base = dev->base_addr;
306 	struct ei_device *ei_local = netdev_priv(dev);
307 	int send_length = skb->len, output_page;
308 	unsigned long flags;
309 	char buf[ETH_ZLEN];
310 	char *data = skb->data;
311 
312 	if (skb->len < ETH_ZLEN) {
313 		memset(buf, 0, ETH_ZLEN);	/* more efficient than doing just the needed bits */
314 		memcpy(buf, data, skb->len);
315 		send_length = ETH_ZLEN;
316 		data = buf;
317 	}
318 
319 	/* Mask interrupts from the ethercard.
320 	   SMP: We have to grab the lock here otherwise the IRQ handler
321 	   on another CPU can flip window and race the IRQ mask set. We end
322 	   up trashing the mcast filter not disabling irqs if we don't lock */
323 
324 	spin_lock_irqsave(&ei_local->page_lock, flags);
325 	ei_outb_p(0x00, e8390_base + EN0_IMR);
326 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
327 
328 
329 	/*
330 	 *	Slow phase with lock held.
331 	 */
332 
333 	disable_irq_nosync_lockdep_irqsave(dev->irq, &flags);
334 
335 	spin_lock(&ei_local->page_lock);
336 
337 	ei_local->irqlock = 1;
338 
339 	/*
340 	 * We have two Tx slots available for use. Find the first free
341 	 * slot, and then perform some sanity checks. With two Tx bufs,
342 	 * you get very close to transmitting back-to-back packets. With
343 	 * only one Tx buf, the transmitter sits idle while you reload the
344 	 * card, leaving a substantial gap between each transmitted packet.
345 	 */
346 
347 	if (ei_local->tx1 == 0) {
348 		output_page = ei_local->tx_start_page;
349 		ei_local->tx1 = send_length;
350 		if ((netif_msg_tx_queued(ei_local)) &&
351 		    ei_local->tx2 > 0)
352 			netdev_dbg(dev,
353 				   "idle transmitter tx2=%d, lasttx=%d, txing=%d\n",
354 				   ei_local->tx2, ei_local->lasttx, ei_local->txing);
355 	} else if (ei_local->tx2 == 0) {
356 		output_page = ei_local->tx_start_page + TX_PAGES/2;
357 		ei_local->tx2 = send_length;
358 		if ((netif_msg_tx_queued(ei_local)) &&
359 		    ei_local->tx1 > 0)
360 			netdev_dbg(dev,
361 				   "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n",
362 				   ei_local->tx1, ei_local->lasttx, ei_local->txing);
363 	} else {			/* We should never get here. */
364 		netif_dbg(ei_local, tx_err, dev,
365 			  "No Tx buffers free! tx1=%d tx2=%d last=%d\n",
366 			  ei_local->tx1, ei_local->tx2, ei_local->lasttx);
367 		ei_local->irqlock = 0;
368 		netif_stop_queue(dev);
369 		ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
370 		spin_unlock(&ei_local->page_lock);
371 		enable_irq_lockdep_irqrestore(dev->irq, &flags);
372 		dev->stats.tx_errors++;
373 		return NETDEV_TX_BUSY;
374 	}
375 
376 	/*
377 	 * Okay, now upload the packet and trigger a send if the transmitter
378 	 * isn't already sending. If it is busy, the interrupt handler will
379 	 * trigger the send later, upon receiving a Tx done interrupt.
380 	 */
381 
382 	ei_block_output(dev, send_length, data, output_page);
383 
384 	if (!ei_local->txing) {
385 		ei_local->txing = 1;
386 		NS8390_trigger_send(dev, send_length, output_page);
387 		if (output_page == ei_local->tx_start_page) {
388 			ei_local->tx1 = -1;
389 			ei_local->lasttx = -1;
390 		} else {
391 			ei_local->tx2 = -1;
392 			ei_local->lasttx = -2;
393 		}
394 	} else
395 		ei_local->txqueue++;
396 
397 	if (ei_local->tx1 && ei_local->tx2)
398 		netif_stop_queue(dev);
399 	else
400 		netif_start_queue(dev);
401 
402 	/* Turn 8390 interrupts back on. */
403 	ei_local->irqlock = 0;
404 	ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR);
405 
406 	spin_unlock(&ei_local->page_lock);
407 	enable_irq_lockdep_irqrestore(dev->irq, &flags);
408 	skb_tx_timestamp(skb);
409 	dev_consume_skb_any(skb);
410 	dev->stats.tx_bytes += send_length;
411 
412 	return NETDEV_TX_OK;
413 }
414 
415 /**
416  * ei_interrupt - handle the interrupts from an 8390
417  * @irq: interrupt number
418  * @dev_id: a pointer to the net_device
419  *
420  * Handle the ether interface interrupts. We pull packets from
421  * the 8390 via the card specific functions and fire them at the networking
422  * stack. We also handle transmit completions and wake the transmit path if
423  * necessary. We also update the counters and do other housekeeping as
424  * needed.
425  */
426 
__ei_interrupt(int irq,void * dev_id)427 static irqreturn_t __ei_interrupt(int irq, void *dev_id)
428 {
429 	struct net_device *dev = dev_id;
430 	unsigned long e8390_base = dev->base_addr;
431 	int interrupts, nr_serviced = 0;
432 	struct ei_device *ei_local = netdev_priv(dev);
433 
434 	/*
435 	 *	Protect the irq test too.
436 	 */
437 
438 	spin_lock(&ei_local->page_lock);
439 
440 	if (ei_local->irqlock) {
441 		/*
442 		 * This might just be an interrupt for a PCI device sharing
443 		 * this line
444 		 */
445 		netdev_err(dev, "Interrupted while interrupts are masked! isr=%#2x imr=%#2x\n",
446 			   ei_inb_p(e8390_base + EN0_ISR),
447 			   ei_inb_p(e8390_base + EN0_IMR));
448 		spin_unlock(&ei_local->page_lock);
449 		return IRQ_NONE;
450 	}
451 
452 	/* Change to page 0 and read the intr status reg. */
453 	ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
454 	netif_dbg(ei_local, intr, dev, "interrupt(isr=%#2.2x)\n",
455 		  ei_inb_p(e8390_base + EN0_ISR));
456 
457 	/* !!Assumption!! -- we stay in page 0.	 Don't break this. */
458 	while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 &&
459 	       ++nr_serviced < MAX_SERVICE) {
460 		if (!netif_running(dev)) {
461 			netdev_warn(dev, "interrupt from stopped card\n");
462 			/* rmk - acknowledge the interrupts */
463 			ei_outb_p(interrupts, e8390_base + EN0_ISR);
464 			interrupts = 0;
465 			break;
466 		}
467 		if (interrupts & ENISR_OVER)
468 			ei_rx_overrun(dev);
469 		else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) {
470 			/* Got a good (?) packet. */
471 			ei_receive(dev);
472 		}
473 		/* Push the next to-transmit packet through. */
474 		if (interrupts & ENISR_TX)
475 			ei_tx_intr(dev);
476 		else if (interrupts & ENISR_TX_ERR)
477 			ei_tx_err(dev);
478 
479 		if (interrupts & ENISR_COUNTERS) {
480 			dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0);
481 			dev->stats.rx_crc_errors   += ei_inb_p(e8390_base + EN0_COUNTER1);
482 			dev->stats.rx_missed_errors += ei_inb_p(e8390_base + EN0_COUNTER2);
483 			ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */
484 		}
485 
486 		/* Ignore any RDC interrupts that make it back to here. */
487 		if (interrupts & ENISR_RDC)
488 			ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR);
489 
490 		ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
491 	}
492 
493 	if (interrupts && (netif_msg_intr(ei_local))) {
494 		ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD);
495 		if (nr_serviced >= MAX_SERVICE) {
496 			/* 0xFF is valid for a card removal */
497 			if (interrupts != 0xFF)
498 				netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n",
499 					    interrupts);
500 			ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */
501 		} else {
502 			netdev_warn(dev, "unknown interrupt %#2x\n", interrupts);
503 			ei_outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */
504 		}
505 	}
506 	spin_unlock(&ei_local->page_lock);
507 	return IRQ_RETVAL(nr_serviced > 0);
508 }
509 
510 #ifdef CONFIG_NET_POLL_CONTROLLER
__ei_poll(struct net_device * dev)511 static void __ei_poll(struct net_device *dev)
512 {
513 	disable_irq(dev->irq);
514 	__ei_interrupt(dev->irq, dev);
515 	enable_irq(dev->irq);
516 }
517 #endif
518 
519 /**
520  * ei_tx_err - handle transmitter error
521  * @dev: network device which threw the exception
522  *
523  * A transmitter error has happened. Most likely excess collisions (which
524  * is a fairly normal condition). If the error is one where the Tx will
525  * have been aborted, we try and send another one right away, instead of
526  * letting the failed packet sit and collect dust in the Tx buffer. This
527  * is a much better solution as it avoids kernel based Tx timeouts, and
528  * an unnecessary card reset.
529  *
530  * Called with lock held.
531  */
532 
ei_tx_err(struct net_device * dev)533 static void ei_tx_err(struct net_device *dev)
534 {
535 	unsigned long e8390_base = dev->base_addr;
536 	/* ei_local is used on some platforms via the EI_SHIFT macro */
537 	struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
538 	unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR);
539 	unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU);
540 
541 #ifdef VERBOSE_ERROR_DUMP
542 	netdev_dbg(dev, "transmitter error (%#2x):", txsr);
543 	if (txsr & ENTSR_ABT)
544 		pr_cont(" excess-collisions ");
545 	if (txsr & ENTSR_ND)
546 		pr_cont(" non-deferral ");
547 	if (txsr & ENTSR_CRS)
548 		pr_cont(" lost-carrier ");
549 	if (txsr & ENTSR_FU)
550 		pr_cont(" FIFO-underrun ");
551 	if (txsr & ENTSR_CDH)
552 		pr_cont(" lost-heartbeat ");
553 	pr_cont("\n");
554 #endif
555 
556 	ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */
557 
558 	if (tx_was_aborted)
559 		ei_tx_intr(dev);
560 	else {
561 		dev->stats.tx_errors++;
562 		if (txsr & ENTSR_CRS)
563 			dev->stats.tx_carrier_errors++;
564 		if (txsr & ENTSR_CDH)
565 			dev->stats.tx_heartbeat_errors++;
566 		if (txsr & ENTSR_OWC)
567 			dev->stats.tx_window_errors++;
568 	}
569 }
570 
571 /**
572  * ei_tx_intr - transmit interrupt handler
573  * @dev: network device for which tx intr is handled
574  *
575  * We have finished a transmit: check for errors and then trigger the next
576  * packet to be sent. Called with lock held.
577  */
578 
ei_tx_intr(struct net_device * dev)579 static void ei_tx_intr(struct net_device *dev)
580 {
581 	unsigned long e8390_base = dev->base_addr;
582 	struct ei_device *ei_local = netdev_priv(dev);
583 	int status = ei_inb(e8390_base + EN0_TSR);
584 
585 	ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
586 
587 	/*
588 	 * There are two Tx buffers, see which one finished, and trigger
589 	 * the send of another one if it exists.
590 	 */
591 	ei_local->txqueue--;
592 
593 	if (ei_local->tx1 < 0) {
594 		if (ei_local->lasttx != 1 && ei_local->lasttx != -1)
595 			pr_err("%s: bogus last_tx_buffer %d, tx1=%d\n",
596 			       ei_local->name, ei_local->lasttx, ei_local->tx1);
597 		ei_local->tx1 = 0;
598 		if (ei_local->tx2 > 0) {
599 			ei_local->txing = 1;
600 			NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
601 			netif_trans_update(dev);
602 			ei_local->tx2 = -1;
603 			ei_local->lasttx = 2;
604 		} else {
605 			ei_local->lasttx = 20;
606 			ei_local->txing = 0;
607 		}
608 	} else if (ei_local->tx2 < 0) {
609 		if (ei_local->lasttx != 2  &&  ei_local->lasttx != -2)
610 			pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n",
611 			       ei_local->name, ei_local->lasttx, ei_local->tx2);
612 		ei_local->tx2 = 0;
613 		if (ei_local->tx1 > 0) {
614 			ei_local->txing = 1;
615 			NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
616 			netif_trans_update(dev);
617 			ei_local->tx1 = -1;
618 			ei_local->lasttx = 1;
619 		} else {
620 			ei_local->lasttx = 10;
621 			ei_local->txing = 0;
622 		}
623 	} /* else
624 		netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
625 			    ei_local->lasttx);
626 */
627 
628 	/* Minimize Tx latency: update the statistics after we restart TXing. */
629 	if (status & ENTSR_COL)
630 		dev->stats.collisions++;
631 	if (status & ENTSR_PTX)
632 		dev->stats.tx_packets++;
633 	else {
634 		dev->stats.tx_errors++;
635 		if (status & ENTSR_ABT) {
636 			dev->stats.tx_aborted_errors++;
637 			dev->stats.collisions += 16;
638 		}
639 		if (status & ENTSR_CRS)
640 			dev->stats.tx_carrier_errors++;
641 		if (status & ENTSR_FU)
642 			dev->stats.tx_fifo_errors++;
643 		if (status & ENTSR_CDH)
644 			dev->stats.tx_heartbeat_errors++;
645 		if (status & ENTSR_OWC)
646 			dev->stats.tx_window_errors++;
647 	}
648 	netif_wake_queue(dev);
649 }
650 
651 /**
652  * ei_receive - receive some packets
653  * @dev: network device with which receive will be run
654  *
655  * We have a good packet(s), get it/them out of the buffers.
656  * Called with lock held.
657  */
658 
ei_receive(struct net_device * dev)659 static void ei_receive(struct net_device *dev)
660 {
661 	unsigned long e8390_base = dev->base_addr;
662 	struct ei_device *ei_local = netdev_priv(dev);
663 	unsigned char rxing_page, this_frame, next_frame;
664 	unsigned short current_offset;
665 	int rx_pkt_count = 0;
666 	struct e8390_pkt_hdr rx_frame;
667 	int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page;
668 
669 	while (++rx_pkt_count < 10) {
670 		int pkt_len, pkt_stat;
671 
672 		/* Get the rx page (incoming packet pointer). */
673 		ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD);
674 		rxing_page = ei_inb_p(e8390_base + EN1_CURPAG);
675 		ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD);
676 
677 		/* Remove one frame from the ring.  Boundary is always a page behind. */
678 		this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1;
679 		if (this_frame >= ei_local->stop_page)
680 			this_frame = ei_local->rx_start_page;
681 
682 		/* Someday we'll omit the previous, iff we never get this message.
683 		   (There is at least one clone claimed to have a problem.)
684 
685 		   Keep quiet if it looks like a card removal. One problem here
686 		   is that some clones crash in roughly the same way.
687 		 */
688 		if ((netif_msg_rx_status(ei_local)) &&
689 		    this_frame != ei_local->current_page &&
690 		    (this_frame != 0x0 || rxing_page != 0xFF))
691 			netdev_err(dev,
692 				   "mismatched read page pointers %2x vs %2x\n",
693 				   this_frame, ei_local->current_page);
694 
695 		if (this_frame == rxing_page)	/* Read all the frames? */
696 			break;				/* Done for now */
697 
698 		current_offset = this_frame << 8;
699 		ei_get_8390_hdr(dev, &rx_frame, this_frame);
700 
701 		pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr);
702 		pkt_stat = rx_frame.status;
703 
704 		next_frame = this_frame + 1 + ((pkt_len+4)>>8);
705 
706 		/* Check for bogosity warned by 3c503 book: the status byte is never
707 		   written.  This happened a lot during testing! This code should be
708 		   cleaned up someday. */
709 		if (rx_frame.next != next_frame &&
710 		    rx_frame.next != next_frame + 1 &&
711 		    rx_frame.next != next_frame - num_rx_pages &&
712 		    rx_frame.next != next_frame + 1 - num_rx_pages) {
713 			ei_local->current_page = rxing_page;
714 			ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY);
715 			dev->stats.rx_errors++;
716 			continue;
717 		}
718 
719 		if (pkt_len < 60  ||  pkt_len > 1518) {
720 			netif_dbg(ei_local, rx_status, dev,
721 				  "bogus packet size: %d, status=%#2x nxpg=%#2x\n",
722 				  rx_frame.count, rx_frame.status,
723 				  rx_frame.next);
724 			dev->stats.rx_errors++;
725 			dev->stats.rx_length_errors++;
726 		} else if ((pkt_stat & 0x0F) == ENRSR_RXOK) {
727 			struct sk_buff *skb;
728 
729 			skb = netdev_alloc_skb(dev, pkt_len + 2);
730 			if (skb == NULL) {
731 				netif_err(ei_local, rx_err, dev,
732 					  "Couldn't allocate a sk_buff of size %d\n",
733 					  pkt_len);
734 				dev->stats.rx_dropped++;
735 				break;
736 			} else {
737 				skb_reserve(skb, 2);	/* IP headers on 16 byte boundaries */
738 				skb_put(skb, pkt_len);	/* Make room */
739 				ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
740 				skb->protocol = eth_type_trans(skb, dev);
741 				if (!skb_defer_rx_timestamp(skb))
742 					netif_rx(skb);
743 				dev->stats.rx_packets++;
744 				dev->stats.rx_bytes += pkt_len;
745 				if (pkt_stat & ENRSR_PHY)
746 					dev->stats.multicast++;
747 			}
748 		} else {
749 			netif_err(ei_local, rx_err, dev,
750 				  "bogus packet: status=%#2x nxpg=%#2x size=%d\n",
751 				  rx_frame.status, rx_frame.next,
752 				  rx_frame.count);
753 			dev->stats.rx_errors++;
754 			/* NB: The NIC counts CRC, frame and missed errors. */
755 			if (pkt_stat & ENRSR_FO)
756 				dev->stats.rx_fifo_errors++;
757 		}
758 		next_frame = rx_frame.next;
759 
760 		/* This _should_ never happen: it's here for avoiding bad clones. */
761 		if (next_frame >= ei_local->stop_page) {
762 			netdev_notice(dev, "next frame inconsistency, %#2x\n",
763 				      next_frame);
764 			next_frame = ei_local->rx_start_page;
765 		}
766 		ei_local->current_page = next_frame;
767 		ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
768 	}
769 
770 	/* We used to also ack ENISR_OVER here, but that would sometimes mask
771 	   a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
772 	ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
773 }
774 
775 /**
776  * ei_rx_overrun - handle receiver overrun
777  * @dev: network device which threw exception
778  *
779  * We have a receiver overrun: we have to kick the 8390 to get it started
780  * again. Problem is that you have to kick it exactly as NS prescribes in
781  * the updated datasheets, or "the NIC may act in an unpredictable manner."
782  * This includes causing "the NIC to defer indefinitely when it is stopped
783  * on a busy network."  Ugh.
784  * Called with lock held. Don't call this with the interrupts off or your
785  * computer will hate you - it takes 10ms or so.
786  */
787 
ei_rx_overrun(struct net_device * dev)788 static void ei_rx_overrun(struct net_device *dev)
789 {
790 	unsigned long e8390_base = dev->base_addr;
791 	unsigned char was_txing, must_resend = 0;
792 	/* ei_local is used on some platforms via the EI_SHIFT macro */
793 	struct ei_device *ei_local __maybe_unused = netdev_priv(dev);
794 
795 	/*
796 	 * Record whether a Tx was in progress and then issue the
797 	 * stop command.
798 	 */
799 	was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS;
800 	ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
801 
802 	netif_dbg(ei_local, rx_err, dev, "Receiver overrun\n");
803 	dev->stats.rx_over_errors++;
804 
805 	/*
806 	 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
807 	 * Early datasheets said to poll the reset bit, but now they say that
808 	 * it "is not a reliable indicator and subsequently should be ignored."
809 	 * We wait at least 10ms.
810 	 */
811 
812 	mdelay(10);
813 
814 	/*
815 	 * Reset RBCR[01] back to zero as per magic incantation.
816 	 */
817 	ei_outb_p(0x00, e8390_base+EN0_RCNTLO);
818 	ei_outb_p(0x00, e8390_base+EN0_RCNTHI);
819 
820 	/*
821 	 * See if any Tx was interrupted or not. According to NS, this
822 	 * step is vital, and skipping it will cause no end of havoc.
823 	 */
824 
825 	if (was_txing) {
826 		unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR);
827 		if (!tx_completed)
828 			must_resend = 1;
829 	}
830 
831 	/*
832 	 * Have to enter loopback mode and then restart the NIC before
833 	 * you are allowed to slurp packets up off the ring.
834 	 */
835 	ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR);
836 	ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD);
837 
838 	/*
839 	 * Clear the Rx ring of all the debris, and ack the interrupt.
840 	 */
841 	ei_receive(dev);
842 	ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR);
843 
844 	/*
845 	 * Leave loopback mode, and resend any packet that got stopped.
846 	 */
847 	ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR);
848 	if (must_resend)
849 		ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD);
850 }
851 
852 /*
853  *	Collect the stats. This is called unlocked and from several contexts.
854  */
855 
__ei_get_stats(struct net_device * dev)856 static struct net_device_stats *__ei_get_stats(struct net_device *dev)
857 {
858 	unsigned long ioaddr = dev->base_addr;
859 	struct ei_device *ei_local = netdev_priv(dev);
860 	unsigned long flags;
861 
862 	/* If the card is stopped, just return the present stats. */
863 	if (!netif_running(dev))
864 		return &dev->stats;
865 
866 	spin_lock_irqsave(&ei_local->page_lock, flags);
867 	/* Read the counter registers, assuming we are in page 0. */
868 	dev->stats.rx_frame_errors  += ei_inb_p(ioaddr + EN0_COUNTER0);
869 	dev->stats.rx_crc_errors    += ei_inb_p(ioaddr + EN0_COUNTER1);
870 	dev->stats.rx_missed_errors += ei_inb_p(ioaddr + EN0_COUNTER2);
871 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
872 
873 	return &dev->stats;
874 }
875 
876 /*
877  * Form the 64 bit 8390 multicast table from the linked list of addresses
878  * associated with this dev structure.
879  */
880 
make_mc_bits(u8 * bits,struct net_device * dev)881 static inline void make_mc_bits(u8 *bits, struct net_device *dev)
882 {
883 	struct netdev_hw_addr *ha;
884 
885 	netdev_for_each_mc_addr(ha, dev) {
886 		u32 crc = ether_crc(ETH_ALEN, ha->addr);
887 		/*
888 		 * The 8390 uses the 6 most significant bits of the
889 		 * CRC to index the multicast table.
890 		 */
891 		bits[crc>>29] |= (1<<((crc>>26)&7));
892 	}
893 }
894 
895 /**
896  * do_set_multicast_list - set/clear multicast filter
897  * @dev: net device for which multicast filter is adjusted
898  *
899  *	Set or clear the multicast filter for this adaptor. May be called
900  *	from a BH in 2.1.x. Must be called with lock held.
901  */
902 
do_set_multicast_list(struct net_device * dev)903 static void do_set_multicast_list(struct net_device *dev)
904 {
905 	unsigned long e8390_base = dev->base_addr;
906 	int i;
907 	struct ei_device *ei_local = netdev_priv(dev);
908 
909 	if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
910 		memset(ei_local->mcfilter, 0, 8);
911 		if (!netdev_mc_empty(dev))
912 			make_mc_bits(ei_local->mcfilter, dev);
913 	} else
914 		memset(ei_local->mcfilter, 0xFF, 8);	/* mcast set to accept-all */
915 
916 	/*
917 	 * DP8390 manuals don't specify any magic sequence for altering
918 	 * the multicast regs on an already running card. To be safe, we
919 	 * ensure multicast mode is off prior to loading up the new hash
920 	 * table. If this proves to be not enough, we can always resort
921 	 * to stopping the NIC, loading the table and then restarting.
922 	 *
923 	 * Bug Alert!  The MC regs on the SMC 83C690 (SMC Elite and SMC
924 	 * Elite16) appear to be write-only. The NS 8390 data sheet lists
925 	 * them as r/w so this is a bug.  The SMC 83C790 (SMC Ultra and
926 	 * Ultra32 EISA) appears to have this bug fixed.
927 	 */
928 
929 	if (netif_running(dev))
930 		ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
931 	ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD);
932 	for (i = 0; i < 8; i++) {
933 		ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i));
934 #ifndef BUG_83C690
935 		if (ei_inb_p(e8390_base + EN1_MULT_SHIFT(i)) != ei_local->mcfilter[i])
936 			netdev_err(dev, "Multicast filter read/write mismap %d\n",
937 				   i);
938 #endif
939 	}
940 	ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD);
941 
942 	if (dev->flags&IFF_PROMISC)
943 		ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
944 	else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
945 		ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
946 	else
947 		ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
948 }
949 
950 /*
951  *	Called without lock held. This is invoked from user context and may
952  *	be parallel to just about everything else. Its also fairly quick and
953  *	not called too often. Must protect against both bh and irq users
954  */
955 
__ei_set_multicast_list(struct net_device * dev)956 static void __ei_set_multicast_list(struct net_device *dev)
957 {
958 	unsigned long flags;
959 	struct ei_device *ei_local = netdev_priv(dev);
960 
961 	spin_lock_irqsave(&ei_local->page_lock, flags);
962 	do_set_multicast_list(dev);
963 	spin_unlock_irqrestore(&ei_local->page_lock, flags);
964 }
965 
966 /**
967  * ethdev_setup - init rest of 8390 device struct
968  * @dev: network device structure to init
969  *
970  * Initialize the rest of the 8390 device structure.  Do NOT __init
971  * this, as it is used by 8390 based modular drivers too.
972  */
973 
ethdev_setup(struct net_device * dev)974 static void ethdev_setup(struct net_device *dev)
975 {
976 	struct ei_device *ei_local = netdev_priv(dev);
977 
978 	ether_setup(dev);
979 
980 	spin_lock_init(&ei_local->page_lock);
981 
982 	ei_local->msg_enable = netif_msg_init(msg_enable, default_msg_level);
983 
984 	if (netif_msg_drv(ei_local) && (version_printed++ == 0))
985 		pr_info("%s", version);
986 }
987 
988 /**
989  * alloc_ei_netdev - alloc_etherdev counterpart for 8390
990  * @size: extra bytes to allocate
991  *
992  * Allocate 8390-specific net_device.
993  */
____alloc_ei_netdev(int size)994 static struct net_device *____alloc_ei_netdev(int size)
995 {
996 	return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
997 			    NET_NAME_UNKNOWN, ethdev_setup);
998 }
999 
1000 
1001 
1002 
1003 /* This page of functions should be 8390 generic */
1004 /* Follow National Semi's recommendations for initializing the "NIC". */
1005 
1006 /**
1007  * NS8390_init - initialize 8390 hardware
1008  * @dev: network device to initialize
1009  * @startp: boolean.  non-zero value to initiate chip processing
1010  *
1011  *	Must be called with lock held.
1012  */
1013 
__NS8390_init(struct net_device * dev,int startp)1014 static void __NS8390_init(struct net_device *dev, int startp)
1015 {
1016 	unsigned long e8390_base = dev->base_addr;
1017 	struct ei_device *ei_local = netdev_priv(dev);
1018 	int i;
1019 	int endcfg = ei_local->word16
1020 	    ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
1021 	    : 0x48;
1022 
1023 	BUILD_BUG_ON(sizeof(struct e8390_pkt_hdr) != 4);
1024 	/* Follow National Semi's recommendations for initing the DP83902. */
1025 	ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
1026 	ei_outb_p(endcfg, e8390_base + EN0_DCFG);	/* 0x48 or 0x49 */
1027 	/* Clear the remote byte count registers. */
1028 	ei_outb_p(0x00,  e8390_base + EN0_RCNTLO);
1029 	ei_outb_p(0x00,  e8390_base + EN0_RCNTHI);
1030 	/* Set to monitor and loopback mode -- this is vital!. */
1031 	ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */
1032 	ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */
1033 	/* Set the transmit page and receive ring. */
1034 	ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR);
1035 	ei_local->tx1 = ei_local->tx2 = 0;
1036 	ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG);
1037 	ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY);	/* 3c503 says 0x3f,NS0x26*/
1038 	ei_local->current_page = ei_local->rx_start_page;		/* assert boundary+1 */
1039 	ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG);
1040 	/* Clear the pending interrupts and mask. */
1041 	ei_outb_p(0xFF, e8390_base + EN0_ISR);
1042 	ei_outb_p(0x00,  e8390_base + EN0_IMR);
1043 
1044 	/* Copy the station address into the DS8390 registers. */
1045 
1046 	ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */
1047 	for (i = 0; i < 6; i++) {
1048 		ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i));
1049 		if ((netif_msg_probe(ei_local)) &&
1050 		    ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i])
1051 			netdev_err(dev,
1052 				   "Hw. address read/write mismap %d\n", i);
1053 	}
1054 
1055 	ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
1056 	ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
1057 
1058 	ei_local->tx1 = ei_local->tx2 = 0;
1059 	ei_local->txing = 0;
1060 
1061 	if (startp) {
1062 		ei_outb_p(0xff,  e8390_base + EN0_ISR);
1063 		ei_outb_p(ENISR_ALL,  e8390_base + EN0_IMR);
1064 		ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD);
1065 		ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */
1066 		/* 3c503 TechMan says rxconfig only after the NIC is started. */
1067 		ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on,  */
1068 		do_set_multicast_list(dev);	/* (re)load the mcast table */
1069 	}
1070 }
1071 
1072 /* Trigger a transmit start, assuming the length is valid.
1073    Always called with the page lock held */
1074 
NS8390_trigger_send(struct net_device * dev,unsigned int length,int start_page)1075 static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1076 								int start_page)
1077 {
1078 	unsigned long e8390_base = dev->base_addr;
1079 	struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
1080 
1081 	ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
1082 
1083 	if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS) {
1084 		netdev_warn(dev, "trigger_send() called with the transmitter busy\n");
1085 		return;
1086 	}
1087 	ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO);
1088 	ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI);
1089 	ei_outb_p(start_page, e8390_base + EN0_TPSR);
1090 	ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD);
1091 }
1092