• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
2 /*
3 	Written/copyright 1997-2001 by Donald Becker.
4 
5 	This software may be used and distributed according to the terms of
6 	the GNU General Public License (GPL), incorporated herein by reference.
7 	Drivers based on or derived from this code fall under the GPL and must
8 	retain the authorship, copyright and license notice.  This file is not
9 	a complete program and may only be used when the entire operating
10 	system is licensed under the GPL.
11 
12 	This driver is for the SMC83c170/175 "EPIC" series, as used on the
13 	SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
14 
15 	The author may be reached as becker@scyld.com, or C/O
16 	Scyld Computing Corporation
17 	410 Severn Ave., Suite 210
18 	Annapolis MD 21403
19 
20 	Information and updates available at
21 	http://www.scyld.com/network/epic100.html
22 	[this link no longer provides anything useful -jgarzik]
23 
24 	---------------------------------------------------------------------
25 
26 */
27 
28 #define DRV_NAME        "epic100"
29 #define DRV_VERSION     "2.1"
30 #define DRV_RELDATE     "Sept 11, 2006"
31 
32 /* The user-configurable values.
33    These may be modified when a driver module is loaded.*/
34 
35 static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
36 
37 /* Used to pass the full-duplex flag, etc. */
38 #define MAX_UNITS 8		/* More are supported, limit only on options */
39 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
40 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
41 
42 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
43    Setting to > 1518 effectively disables this feature. */
44 static int rx_copybreak;
45 
46 /* Operational parameters that are set at compile time. */
47 
48 /* Keep the ring sizes a power of two for operational efficiency.
49    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
50    Making the Tx ring too large decreases the effectiveness of channel
51    bonding and packet priority.
52    There are no ill effects from too-large receive rings. */
53 #define TX_RING_SIZE	256
54 #define TX_QUEUE_LEN	240		/* Limit ring entries actually used.  */
55 #define RX_RING_SIZE	256
56 #define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct epic_tx_desc)
57 #define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct epic_rx_desc)
58 
59 /* Operational parameters that usually are not changed. */
60 /* Time in jiffies before concluding the transmitter is hung. */
61 #define TX_TIMEOUT  (2*HZ)
62 
63 #define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
64 
65 /* Bytes transferred to chip before transmission starts. */
66 /* Initial threshold, increased on underflow, rounded down to 4 byte units. */
67 #define TX_FIFO_THRESH 256
68 #define RX_FIFO_THRESH 1		/* 0-3, 0==32, 64,96, or 3==128 bytes  */
69 
70 #include <linux/module.h>
71 #include <linux/kernel.h>
72 #include <linux/string.h>
73 #include <linux/timer.h>
74 #include <linux/errno.h>
75 #include <linux/ioport.h>
76 #include <linux/interrupt.h>
77 #include <linux/pci.h>
78 #include <linux/delay.h>
79 #include <linux/netdevice.h>
80 #include <linux/etherdevice.h>
81 #include <linux/skbuff.h>
82 #include <linux/init.h>
83 #include <linux/spinlock.h>
84 #include <linux/ethtool.h>
85 #include <linux/mii.h>
86 #include <linux/crc32.h>
87 #include <linux/bitops.h>
88 #include <asm/io.h>
89 #include <asm/uaccess.h>
90 #include <asm/byteorder.h>
91 
92 /* These identify the driver base version and may not be removed. */
93 static char version[] __devinitdata =
94 DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
95 static char version2[] __devinitdata =
96 "  (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
97 
98 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
99 MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
100 MODULE_LICENSE("GPL");
101 
102 module_param(debug, int, 0);
103 module_param(rx_copybreak, int, 0);
104 module_param_array(options, int, NULL, 0);
105 module_param_array(full_duplex, int, NULL, 0);
106 MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
107 MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
108 MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
109 MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
110 
111 /*
112 				Theory of Operation
113 
114 I. Board Compatibility
115 
116 This device driver is designed for the SMC "EPIC/100", the SMC
117 single-chip Ethernet controllers for PCI.  This chip is used on
118 the SMC EtherPower II boards.
119 
120 II. Board-specific settings
121 
122 PCI bus devices are configured by the system at boot time, so no jumpers
123 need to be set on the board.  The system BIOS will assign the
124 PCI INTA signal to a (preferably otherwise unused) system IRQ line.
125 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
126 interrupt lines.
127 
128 III. Driver operation
129 
130 IIIa. Ring buffers
131 
132 IVb. References
133 
134 http://www.smsc.com/media/Downloads_Public/discontinued/83c171.pdf
135 http://www.smsc.com/media/Downloads_Public/discontinued/83c175.pdf
136 http://scyld.com/expert/NWay.html
137 http://www.national.com/pf/DP/DP83840A.html
138 
139 IVc. Errata
140 
141 */
142 
143 
144 enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
145 
146 #define EPIC_TOTAL_SIZE 0x100
147 #define USE_IO_OPS 1
148 
149 typedef enum {
150 	SMSC_83C170_0,
151 	SMSC_83C170,
152 	SMSC_83C175,
153 } chip_t;
154 
155 
156 struct epic_chip_info {
157 	const char *name;
158         int drv_flags;                          /* Driver use, intended as capability flags. */
159 };
160 
161 
162 /* indexed by chip_t */
163 static const struct epic_chip_info pci_id_tbl[] = {
164 	{ "SMSC EPIC/100 83c170",	TYPE2_INTR | NO_MII | MII_PWRDWN },
165 	{ "SMSC EPIC/100 83c170",	TYPE2_INTR },
166 	{ "SMSC EPIC/C 83c175",		TYPE2_INTR | MII_PWRDWN },
167 };
168 
169 
170 static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = {
171 	{ 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
172 	{ 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
173 	{ 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
174 	  PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
175 	{ 0,}
176 };
177 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
178 
179 
180 #ifndef USE_IO_OPS
181 #undef inb
182 #undef inw
183 #undef inl
184 #undef outb
185 #undef outw
186 #undef outl
187 #define inb readb
188 #define inw readw
189 #define inl readl
190 #define outb writeb
191 #define outw writew
192 #define outl writel
193 #endif
194 
195 /* Offsets to registers, using the (ugh) SMC names. */
196 enum epic_registers {
197   COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
198   PCIBurstCnt=0x18,
199   TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28,	/* Rx error counters. */
200   MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
201   LAN0=64,						/* MAC address. */
202   MC0=80,						/* Multicast filter table. */
203   RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
204   PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
205 };
206 
207 /* Interrupt register bits, using my own meaningful names. */
208 enum IntrStatus {
209 	TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
210 	PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
211 	RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
212 	TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
213 	RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
214 };
215 enum CommandBits {
216 	StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
217 	StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
218 };
219 
220 #define EpicRemoved	0xffffffff	/* Chip failed or removed (CardBus) */
221 
222 #define EpicNapiEvent	(TxEmpty | TxDone | \
223 			 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
224 #define EpicNormalEvent	(0x0000ffff & ~EpicNapiEvent)
225 
226 static const u16 media2miictl[16] = {
227 	0, 0x0C00, 0x0C00, 0x2000,  0x0100, 0x2100, 0, 0,
228 	0, 0, 0, 0,  0, 0, 0, 0 };
229 
230 /*
231  * The EPIC100 Rx and Tx buffer descriptors.  Note that these
232  * really ARE host-endian; it's not a misannotation.  We tell
233  * the card to byteswap them internally on big-endian hosts -
234  * look for #ifdef __BIG_ENDIAN in epic_open().
235  */
236 
237 struct epic_tx_desc {
238 	u32 txstatus;
239 	u32 bufaddr;
240 	u32 buflength;
241 	u32 next;
242 };
243 
244 struct epic_rx_desc {
245 	u32 rxstatus;
246 	u32 bufaddr;
247 	u32 buflength;
248 	u32 next;
249 };
250 
251 enum desc_status_bits {
252 	DescOwn=0x8000,
253 };
254 
255 #define PRIV_ALIGN	15 	/* Required alignment mask */
256 struct epic_private {
257 	struct epic_rx_desc *rx_ring;
258 	struct epic_tx_desc *tx_ring;
259 	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
260 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
261 	/* The addresses of receive-in-place skbuffs. */
262 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
263 
264 	dma_addr_t tx_ring_dma;
265 	dma_addr_t rx_ring_dma;
266 
267 	/* Ring pointers. */
268 	spinlock_t lock;				/* Group with Tx control cache line. */
269 	spinlock_t napi_lock;
270 	struct napi_struct napi;
271 	unsigned int reschedule_in_poll;
272 	unsigned int cur_tx, dirty_tx;
273 
274 	unsigned int cur_rx, dirty_rx;
275 	u32 irq_mask;
276 	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
277 
278 	struct pci_dev *pci_dev;			/* PCI bus location. */
279 	int chip_id, chip_flags;
280 
281 	struct timer_list timer;			/* Media selection timer. */
282 	int tx_threshold;
283 	unsigned char mc_filter[8];
284 	signed char phys[4];				/* MII device addresses. */
285 	u16 advertising;					/* NWay media advertisement */
286 	int mii_phy_cnt;
287 	struct mii_if_info mii;
288 	unsigned int tx_full:1;				/* The Tx queue is full. */
289 	unsigned int default_port:4;		/* Last dev->if_port value. */
290 };
291 
292 static int epic_open(struct net_device *dev);
293 static int read_eeprom(long ioaddr, int location);
294 static int mdio_read(struct net_device *dev, int phy_id, int location);
295 static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
296 static void epic_restart(struct net_device *dev);
297 static void epic_timer(unsigned long data);
298 static void epic_tx_timeout(struct net_device *dev);
299 static void epic_init_ring(struct net_device *dev);
300 static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
301 				   struct net_device *dev);
302 static int epic_rx(struct net_device *dev, int budget);
303 static int epic_poll(struct napi_struct *napi, int budget);
304 static irqreturn_t epic_interrupt(int irq, void *dev_instance);
305 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
306 static const struct ethtool_ops netdev_ethtool_ops;
307 static int epic_close(struct net_device *dev);
308 static struct net_device_stats *epic_get_stats(struct net_device *dev);
309 static void set_rx_mode(struct net_device *dev);
310 
311 static const struct net_device_ops epic_netdev_ops = {
312 	.ndo_open		= epic_open,
313 	.ndo_stop		= epic_close,
314 	.ndo_start_xmit		= epic_start_xmit,
315 	.ndo_tx_timeout 	= epic_tx_timeout,
316 	.ndo_get_stats		= epic_get_stats,
317 	.ndo_set_rx_mode	= set_rx_mode,
318 	.ndo_do_ioctl 		= netdev_ioctl,
319 	.ndo_change_mtu		= eth_change_mtu,
320 	.ndo_set_mac_address 	= eth_mac_addr,
321 	.ndo_validate_addr	= eth_validate_addr,
322 };
323 
epic_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)324 static int __devinit epic_init_one (struct pci_dev *pdev,
325 				    const struct pci_device_id *ent)
326 {
327 	static int card_idx = -1;
328 	long ioaddr;
329 	int chip_idx = (int) ent->driver_data;
330 	int irq;
331 	struct net_device *dev;
332 	struct epic_private *ep;
333 	int i, ret, option = 0, duplex = 0;
334 	void *ring_space;
335 	dma_addr_t ring_dma;
336 
337 /* when built into the kernel, we only print version if device is found */
338 #ifndef MODULE
339 	static int printed_version;
340 	if (!printed_version++)
341 		printk(KERN_INFO "%s%s", version, version2);
342 #endif
343 
344 	card_idx++;
345 
346 	ret = pci_enable_device(pdev);
347 	if (ret)
348 		goto out;
349 	irq = pdev->irq;
350 
351 	if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
352 		dev_err(&pdev->dev, "no PCI region space\n");
353 		ret = -ENODEV;
354 		goto err_out_disable;
355 	}
356 
357 	pci_set_master(pdev);
358 
359 	ret = pci_request_regions(pdev, DRV_NAME);
360 	if (ret < 0)
361 		goto err_out_disable;
362 
363 	ret = -ENOMEM;
364 
365 	dev = alloc_etherdev(sizeof (*ep));
366 	if (!dev)
367 		goto err_out_free_res;
368 
369 	SET_NETDEV_DEV(dev, &pdev->dev);
370 
371 #ifdef USE_IO_OPS
372 	ioaddr = pci_resource_start (pdev, 0);
373 #else
374 	ioaddr = pci_resource_start (pdev, 1);
375 	ioaddr = (long) pci_ioremap_bar(pdev, 1);
376 	if (!ioaddr) {
377 		dev_err(&pdev->dev, "ioremap failed\n");
378 		goto err_out_free_netdev;
379 	}
380 #endif
381 
382 	pci_set_drvdata(pdev, dev);
383 	ep = netdev_priv(dev);
384 	ep->mii.dev = dev;
385 	ep->mii.mdio_read = mdio_read;
386 	ep->mii.mdio_write = mdio_write;
387 	ep->mii.phy_id_mask = 0x1f;
388 	ep->mii.reg_num_mask = 0x1f;
389 
390 	ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
391 	if (!ring_space)
392 		goto err_out_iounmap;
393 	ep->tx_ring = ring_space;
394 	ep->tx_ring_dma = ring_dma;
395 
396 	ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
397 	if (!ring_space)
398 		goto err_out_unmap_tx;
399 	ep->rx_ring = ring_space;
400 	ep->rx_ring_dma = ring_dma;
401 
402 	if (dev->mem_start) {
403 		option = dev->mem_start;
404 		duplex = (dev->mem_start & 16) ? 1 : 0;
405 	} else if (card_idx >= 0  &&  card_idx < MAX_UNITS) {
406 		if (options[card_idx] >= 0)
407 			option = options[card_idx];
408 		if (full_duplex[card_idx] >= 0)
409 			duplex = full_duplex[card_idx];
410 	}
411 
412 	dev->base_addr = ioaddr;
413 	dev->irq = irq;
414 
415 	spin_lock_init(&ep->lock);
416 	spin_lock_init(&ep->napi_lock);
417 	ep->reschedule_in_poll = 0;
418 
419 	/* Bring the chip out of low-power mode. */
420 	outl(0x4200, ioaddr + GENCTL);
421 	/* Magic?!  If we don't set this bit the MII interface won't work. */
422 	/* This magic is documented in SMSC app note 7.15 */
423 	for (i = 16; i > 0; i--)
424 		outl(0x0008, ioaddr + TEST1);
425 
426 	/* Turn on the MII transceiver. */
427 	outl(0x12, ioaddr + MIICfg);
428 	if (chip_idx == 1)
429 		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
430 	outl(0x0200, ioaddr + GENCTL);
431 
432 	/* Note: the '175 does not have a serial EEPROM. */
433 	for (i = 0; i < 3; i++)
434 		((__le16 *)dev->dev_addr)[i] = cpu_to_le16(inw(ioaddr + LAN0 + i*4));
435 
436 	if (debug > 2) {
437 		dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
438 		for (i = 0; i < 64; i++)
439 			printk(" %4.4x%s", read_eeprom(ioaddr, i),
440 				   i % 16 == 15 ? "\n" : "");
441 	}
442 
443 	ep->pci_dev = pdev;
444 	ep->chip_id = chip_idx;
445 	ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
446 	ep->irq_mask =
447 		(ep->chip_flags & TYPE2_INTR ?  PCIBusErr175 : PCIBusErr170)
448 		 | CntFull | TxUnderrun | EpicNapiEvent;
449 
450 	/* Find the connected MII xcvrs.
451 	   Doing this in open() would allow detecting external xcvrs later, but
452 	   takes much time and no cards have external MII. */
453 	{
454 		int phy, phy_idx = 0;
455 		for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
456 			int mii_status = mdio_read(dev, phy, MII_BMSR);
457 			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
458 				ep->phys[phy_idx++] = phy;
459 				dev_info(&pdev->dev,
460 					"MII transceiver #%d control "
461 					"%4.4x status %4.4x.\n",
462 					phy, mdio_read(dev, phy, 0), mii_status);
463 			}
464 		}
465 		ep->mii_phy_cnt = phy_idx;
466 		if (phy_idx != 0) {
467 			phy = ep->phys[0];
468 			ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
469 			dev_info(&pdev->dev,
470 				"Autonegotiation advertising %4.4x link "
471 				   "partner %4.4x.\n",
472 				   ep->mii.advertising, mdio_read(dev, phy, 5));
473 		} else if ( ! (ep->chip_flags & NO_MII)) {
474 			dev_warn(&pdev->dev,
475 				"***WARNING***: No MII transceiver found!\n");
476 			/* Use the known PHY address of the EPII. */
477 			ep->phys[0] = 3;
478 		}
479 		ep->mii.phy_id = ep->phys[0];
480 	}
481 
482 	/* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
483 	if (ep->chip_flags & MII_PWRDWN)
484 		outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
485 	outl(0x0008, ioaddr + GENCTL);
486 
487 	/* The lower four bits are the media type. */
488 	if (duplex) {
489 		ep->mii.force_media = ep->mii.full_duplex = 1;
490 		dev_info(&pdev->dev, "Forced full duplex requested.\n");
491 	}
492 	dev->if_port = ep->default_port = option;
493 
494 	/* The Epic-specific entries in the device structure. */
495 	dev->netdev_ops = &epic_netdev_ops;
496 	dev->ethtool_ops = &netdev_ethtool_ops;
497 	dev->watchdog_timeo = TX_TIMEOUT;
498 	netif_napi_add(dev, &ep->napi, epic_poll, 64);
499 
500 	ret = register_netdev(dev);
501 	if (ret < 0)
502 		goto err_out_unmap_rx;
503 
504 	printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n",
505 	       dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq,
506 	       dev->dev_addr);
507 
508 out:
509 	return ret;
510 
511 err_out_unmap_rx:
512 	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
513 err_out_unmap_tx:
514 	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
515 err_out_iounmap:
516 #ifndef USE_IO_OPS
517 	iounmap(ioaddr);
518 err_out_free_netdev:
519 #endif
520 	free_netdev(dev);
521 err_out_free_res:
522 	pci_release_regions(pdev);
523 err_out_disable:
524 	pci_disable_device(pdev);
525 	goto out;
526 }
527 
528 /* Serial EEPROM section. */
529 
530 /*  EEPROM_Ctrl bits. */
531 #define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
532 #define EE_CS			0x02	/* EEPROM chip select. */
533 #define EE_DATA_WRITE	0x08	/* EEPROM chip data in. */
534 #define EE_WRITE_0		0x01
535 #define EE_WRITE_1		0x09
536 #define EE_DATA_READ	0x10	/* EEPROM chip data out. */
537 #define EE_ENB			(0x0001 | EE_CS)
538 
539 /* Delay between EEPROM clock transitions.
540    This serves to flush the operation to the PCI bus.
541  */
542 
543 #define eeprom_delay()	inl(ee_addr)
544 
545 /* The EEPROM commands include the alway-set leading bit. */
546 #define EE_WRITE_CMD	(5 << 6)
547 #define EE_READ64_CMD	(6 << 6)
548 #define EE_READ256_CMD	(6 << 8)
549 #define EE_ERASE_CMD	(7 << 6)
550 
epic_disable_int(struct net_device * dev,struct epic_private * ep)551 static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
552 {
553 	long ioaddr = dev->base_addr;
554 
555 	outl(0x00000000, ioaddr + INTMASK);
556 }
557 
__epic_pci_commit(long ioaddr)558 static inline void __epic_pci_commit(long ioaddr)
559 {
560 #ifndef USE_IO_OPS
561 	inl(ioaddr + INTMASK);
562 #endif
563 }
564 
epic_napi_irq_off(struct net_device * dev,struct epic_private * ep)565 static inline void epic_napi_irq_off(struct net_device *dev,
566 				     struct epic_private *ep)
567 {
568 	long ioaddr = dev->base_addr;
569 
570 	outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK);
571 	__epic_pci_commit(ioaddr);
572 }
573 
epic_napi_irq_on(struct net_device * dev,struct epic_private * ep)574 static inline void epic_napi_irq_on(struct net_device *dev,
575 				    struct epic_private *ep)
576 {
577 	long ioaddr = dev->base_addr;
578 
579 	/* No need to commit possible posted write */
580 	outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK);
581 }
582 
read_eeprom(long ioaddr,int location)583 static int __devinit read_eeprom(long ioaddr, int location)
584 {
585 	int i;
586 	int retval = 0;
587 	long ee_addr = ioaddr + EECTL;
588 	int read_cmd = location |
589 		(inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
590 
591 	outl(EE_ENB & ~EE_CS, ee_addr);
592 	outl(EE_ENB, ee_addr);
593 
594 	/* Shift the read command bits out. */
595 	for (i = 12; i >= 0; i--) {
596 		short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
597 		outl(EE_ENB | dataval, ee_addr);
598 		eeprom_delay();
599 		outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
600 		eeprom_delay();
601 	}
602 	outl(EE_ENB, ee_addr);
603 
604 	for (i = 16; i > 0; i--) {
605 		outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
606 		eeprom_delay();
607 		retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
608 		outl(EE_ENB, ee_addr);
609 		eeprom_delay();
610 	}
611 
612 	/* Terminate the EEPROM access. */
613 	outl(EE_ENB & ~EE_CS, ee_addr);
614 	return retval;
615 }
616 
617 #define MII_READOP		1
618 #define MII_WRITEOP		2
mdio_read(struct net_device * dev,int phy_id,int location)619 static int mdio_read(struct net_device *dev, int phy_id, int location)
620 {
621 	long ioaddr = dev->base_addr;
622 	int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
623 	int i;
624 
625 	outl(read_cmd, ioaddr + MIICtrl);
626 	/* Typical operation takes 25 loops. */
627 	for (i = 400; i > 0; i--) {
628 		barrier();
629 		if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
630 			/* Work around read failure bug. */
631 			if (phy_id == 1 && location < 6 &&
632 			    inw(ioaddr + MIIData) == 0xffff) {
633 				outl(read_cmd, ioaddr + MIICtrl);
634 				continue;
635 			}
636 			return inw(ioaddr + MIIData);
637 		}
638 	}
639 	return 0xffff;
640 }
641 
mdio_write(struct net_device * dev,int phy_id,int loc,int value)642 static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
643 {
644 	long ioaddr = dev->base_addr;
645 	int i;
646 
647 	outw(value, ioaddr + MIIData);
648 	outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
649 	for (i = 10000; i > 0; i--) {
650 		barrier();
651 		if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
652 			break;
653 	}
654 }
655 
656 
epic_open(struct net_device * dev)657 static int epic_open(struct net_device *dev)
658 {
659 	struct epic_private *ep = netdev_priv(dev);
660 	long ioaddr = dev->base_addr;
661 	int i;
662 	int retval;
663 
664 	/* Soft reset the chip. */
665 	outl(0x4001, ioaddr + GENCTL);
666 
667 	napi_enable(&ep->napi);
668 	if ((retval = request_irq(dev->irq, epic_interrupt, IRQF_SHARED, dev->name, dev))) {
669 		napi_disable(&ep->napi);
670 		return retval;
671 	}
672 
673 	epic_init_ring(dev);
674 
675 	outl(0x4000, ioaddr + GENCTL);
676 	/* This magic is documented in SMSC app note 7.15 */
677 	for (i = 16; i > 0; i--)
678 		outl(0x0008, ioaddr + TEST1);
679 
680 	/* Pull the chip out of low-power mode, enable interrupts, and set for
681 	   PCI read multiple.  The MIIcfg setting and strange write order are
682 	   required by the details of which bits are reset and the transceiver
683 	   wiring on the Ositech CardBus card.
684 	*/
685 #if 0
686 	outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
687 #endif
688 	if (ep->chip_flags & MII_PWRDWN)
689 		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
690 
691 	/* Tell the chip to byteswap descriptors on big-endian hosts */
692 #ifdef __BIG_ENDIAN
693 	outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
694 	inl(ioaddr + GENCTL);
695 	outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
696 #else
697 	outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
698 	inl(ioaddr + GENCTL);
699 	outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
700 #endif
701 
702 	udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
703 
704 	for (i = 0; i < 3; i++)
705 		outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
706 
707 	ep->tx_threshold = TX_FIFO_THRESH;
708 	outl(ep->tx_threshold, ioaddr + TxThresh);
709 
710 	if (media2miictl[dev->if_port & 15]) {
711 		if (ep->mii_phy_cnt)
712 			mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
713 		if (dev->if_port == 1) {
714 			if (debug > 1)
715 				printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
716 					   "status %4.4x.\n",
717 					   dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
718 		}
719 	} else {
720 		int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
721 		if (mii_lpa != 0xffff) {
722 			if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
723 				ep->mii.full_duplex = 1;
724 			else if (! (mii_lpa & LPA_LPACK))
725 				mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
726 			if (debug > 1)
727 				printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
728 					   " register read of %4.4x.\n", dev->name,
729 					   ep->mii.full_duplex ? "full" : "half",
730 					   ep->phys[0], mii_lpa);
731 		}
732 	}
733 
734 	outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
735 	outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
736 	outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
737 
738 	/* Start the chip's Rx process. */
739 	set_rx_mode(dev);
740 	outl(StartRx | RxQueued, ioaddr + COMMAND);
741 
742 	netif_start_queue(dev);
743 
744 	/* Enable interrupts by setting the interrupt mask. */
745 	outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
746 		 | CntFull | TxUnderrun
747 		 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
748 
749 	if (debug > 1)
750 		printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
751 			   "%s-duplex.\n",
752 			   dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
753 			   ep->mii.full_duplex ? "full" : "half");
754 
755 	/* Set the timer to switch to check for link beat and perhaps switch
756 	   to an alternate media type. */
757 	init_timer(&ep->timer);
758 	ep->timer.expires = jiffies + 3*HZ;
759 	ep->timer.data = (unsigned long)dev;
760 	ep->timer.function = epic_timer;				/* timer handler */
761 	add_timer(&ep->timer);
762 
763 	return 0;
764 }
765 
766 /* Reset the chip to recover from a PCI transaction error.
767    This may occur at interrupt time. */
epic_pause(struct net_device * dev)768 static void epic_pause(struct net_device *dev)
769 {
770 	long ioaddr = dev->base_addr;
771 
772 	netif_stop_queue (dev);
773 
774 	/* Disable interrupts by clearing the interrupt mask. */
775 	outl(0x00000000, ioaddr + INTMASK);
776 	/* Stop the chip's Tx and Rx DMA processes. */
777 	outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
778 
779 	/* Update the error counts. */
780 	if (inw(ioaddr + COMMAND) != 0xffff) {
781 		dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
782 		dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
783 		dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
784 	}
785 
786 	/* Remove the packets on the Rx queue. */
787 	epic_rx(dev, RX_RING_SIZE);
788 }
789 
epic_restart(struct net_device * dev)790 static void epic_restart(struct net_device *dev)
791 {
792 	long ioaddr = dev->base_addr;
793 	struct epic_private *ep = netdev_priv(dev);
794 	int i;
795 
796 	/* Soft reset the chip. */
797 	outl(0x4001, ioaddr + GENCTL);
798 
799 	printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
800 		   dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
801 	udelay(1);
802 
803 	/* This magic is documented in SMSC app note 7.15 */
804 	for (i = 16; i > 0; i--)
805 		outl(0x0008, ioaddr + TEST1);
806 
807 #ifdef __BIG_ENDIAN
808 	outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
809 #else
810 	outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
811 #endif
812 	outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
813 	if (ep->chip_flags & MII_PWRDWN)
814 		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
815 
816 	for (i = 0; i < 3; i++)
817 		outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
818 
819 	ep->tx_threshold = TX_FIFO_THRESH;
820 	outl(ep->tx_threshold, ioaddr + TxThresh);
821 	outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
822 	outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
823 		sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);
824 	outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
825 		 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);
826 
827 	/* Start the chip's Rx process. */
828 	set_rx_mode(dev);
829 	outl(StartRx | RxQueued, ioaddr + COMMAND);
830 
831 	/* Enable interrupts by setting the interrupt mask. */
832 	outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
833 		 | CntFull | TxUnderrun
834 		 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
835 
836 	printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
837 		   " interrupt %4.4x.\n",
838 		   dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
839 		   (int)inl(ioaddr + INTSTAT));
840 }
841 
check_media(struct net_device * dev)842 static void check_media(struct net_device *dev)
843 {
844 	struct epic_private *ep = netdev_priv(dev);
845 	long ioaddr = dev->base_addr;
846 	int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
847 	int negotiated = mii_lpa & ep->mii.advertising;
848 	int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
849 
850 	if (ep->mii.force_media)
851 		return;
852 	if (mii_lpa == 0xffff)		/* Bogus read */
853 		return;
854 	if (ep->mii.full_duplex != duplex) {
855 		ep->mii.full_duplex = duplex;
856 		printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
857 			   " partner capability of %4.4x.\n", dev->name,
858 			   ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
859 		outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
860 	}
861 }
862 
epic_timer(unsigned long data)863 static void epic_timer(unsigned long data)
864 {
865 	struct net_device *dev = (struct net_device *)data;
866 	struct epic_private *ep = netdev_priv(dev);
867 	long ioaddr = dev->base_addr;
868 	int next_tick = 5*HZ;
869 
870 	if (debug > 3) {
871 		printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
872 			   dev->name, (int)inl(ioaddr + TxSTAT));
873 		printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
874 			   "IntStatus %4.4x RxStatus %4.4x.\n",
875 			   dev->name, (int)inl(ioaddr + INTMASK),
876 			   (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
877 	}
878 
879 	check_media(dev);
880 
881 	ep->timer.expires = jiffies + next_tick;
882 	add_timer(&ep->timer);
883 }
884 
epic_tx_timeout(struct net_device * dev)885 static void epic_tx_timeout(struct net_device *dev)
886 {
887 	struct epic_private *ep = netdev_priv(dev);
888 	long ioaddr = dev->base_addr;
889 
890 	if (debug > 0) {
891 		printk(KERN_WARNING "%s: Transmit timeout using MII device, "
892 			   "Tx status %4.4x.\n",
893 			   dev->name, (int)inw(ioaddr + TxSTAT));
894 		if (debug > 1) {
895 			printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
896 				   dev->name, ep->dirty_tx, ep->cur_tx);
897 		}
898 	}
899 	if (inw(ioaddr + TxSTAT) & 0x10) {		/* Tx FIFO underflow. */
900 		dev->stats.tx_fifo_errors++;
901 		outl(RestartTx, ioaddr + COMMAND);
902 	} else {
903 		epic_restart(dev);
904 		outl(TxQueued, dev->base_addr + COMMAND);
905 	}
906 
907 	dev->trans_start = jiffies; /* prevent tx timeout */
908 	dev->stats.tx_errors++;
909 	if (!ep->tx_full)
910 		netif_wake_queue(dev);
911 }
912 
913 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
epic_init_ring(struct net_device * dev)914 static void epic_init_ring(struct net_device *dev)
915 {
916 	struct epic_private *ep = netdev_priv(dev);
917 	int i;
918 
919 	ep->tx_full = 0;
920 	ep->dirty_tx = ep->cur_tx = 0;
921 	ep->cur_rx = ep->dirty_rx = 0;
922 	ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
923 
924 	/* Initialize all Rx descriptors. */
925 	for (i = 0; i < RX_RING_SIZE; i++) {
926 		ep->rx_ring[i].rxstatus = 0;
927 		ep->rx_ring[i].buflength = ep->rx_buf_sz;
928 		ep->rx_ring[i].next = ep->rx_ring_dma +
929 				      (i+1)*sizeof(struct epic_rx_desc);
930 		ep->rx_skbuff[i] = NULL;
931 	}
932 	/* Mark the last entry as wrapping the ring. */
933 	ep->rx_ring[i-1].next = ep->rx_ring_dma;
934 
935 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
936 	for (i = 0; i < RX_RING_SIZE; i++) {
937 		struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
938 		ep->rx_skbuff[i] = skb;
939 		if (skb == NULL)
940 			break;
941 		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
942 		ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
943 			skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
944 		ep->rx_ring[i].rxstatus = DescOwn;
945 	}
946 	ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
947 
948 	/* The Tx buffer descriptor is filled in as needed, but we
949 	   do need to clear the ownership bit. */
950 	for (i = 0; i < TX_RING_SIZE; i++) {
951 		ep->tx_skbuff[i] = NULL;
952 		ep->tx_ring[i].txstatus = 0x0000;
953 		ep->tx_ring[i].next = ep->tx_ring_dma +
954 			(i+1)*sizeof(struct epic_tx_desc);
955 	}
956 	ep->tx_ring[i-1].next = ep->tx_ring_dma;
957 }
958 
epic_start_xmit(struct sk_buff * skb,struct net_device * dev)959 static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
960 {
961 	struct epic_private *ep = netdev_priv(dev);
962 	int entry, free_count;
963 	u32 ctrl_word;
964 	unsigned long flags;
965 
966 	if (skb_padto(skb, ETH_ZLEN))
967 		return NETDEV_TX_OK;
968 
969 	/* Caution: the write order is important here, set the field with the
970 	   "ownership" bit last. */
971 
972 	/* Calculate the next Tx descriptor entry. */
973 	spin_lock_irqsave(&ep->lock, flags);
974 	free_count = ep->cur_tx - ep->dirty_tx;
975 	entry = ep->cur_tx % TX_RING_SIZE;
976 
977 	ep->tx_skbuff[entry] = skb;
978 	ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
979 		 			            skb->len, PCI_DMA_TODEVICE);
980 	if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
981 		ctrl_word = 0x100000; /* No interrupt */
982 	} else if (free_count == TX_QUEUE_LEN/2) {
983 		ctrl_word = 0x140000; /* Tx-done intr. */
984 	} else if (free_count < TX_QUEUE_LEN - 1) {
985 		ctrl_word = 0x100000; /* No Tx-done intr. */
986 	} else {
987 		/* Leave room for an additional entry. */
988 		ctrl_word = 0x140000; /* Tx-done intr. */
989 		ep->tx_full = 1;
990 	}
991 	ep->tx_ring[entry].buflength = ctrl_word | skb->len;
992 	ep->tx_ring[entry].txstatus =
993 		((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
994 			    | DescOwn;
995 
996 	ep->cur_tx++;
997 	if (ep->tx_full)
998 		netif_stop_queue(dev);
999 
1000 	spin_unlock_irqrestore(&ep->lock, flags);
1001 	/* Trigger an immediate transmit demand. */
1002 	outl(TxQueued, dev->base_addr + COMMAND);
1003 
1004 	if (debug > 4)
1005 		printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
1006 			   "flag %2.2x Tx status %8.8x.\n",
1007 			   dev->name, (int)skb->len, entry, ctrl_word,
1008 			   (int)inl(dev->base_addr + TxSTAT));
1009 
1010 	return NETDEV_TX_OK;
1011 }
1012 
epic_tx_error(struct net_device * dev,struct epic_private * ep,int status)1013 static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
1014 			  int status)
1015 {
1016 	struct net_device_stats *stats = &dev->stats;
1017 
1018 #ifndef final_version
1019 	/* There was an major error, log it. */
1020 	if (debug > 1)
1021 		printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1022 		       dev->name, status);
1023 #endif
1024 	stats->tx_errors++;
1025 	if (status & 0x1050)
1026 		stats->tx_aborted_errors++;
1027 	if (status & 0x0008)
1028 		stats->tx_carrier_errors++;
1029 	if (status & 0x0040)
1030 		stats->tx_window_errors++;
1031 	if (status & 0x0010)
1032 		stats->tx_fifo_errors++;
1033 }
1034 
epic_tx(struct net_device * dev,struct epic_private * ep)1035 static void epic_tx(struct net_device *dev, struct epic_private *ep)
1036 {
1037 	unsigned int dirty_tx, cur_tx;
1038 
1039 	/*
1040 	 * Note: if this lock becomes a problem we can narrow the locked
1041 	 * region at the cost of occasionally grabbing the lock more times.
1042 	 */
1043 	cur_tx = ep->cur_tx;
1044 	for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1045 		struct sk_buff *skb;
1046 		int entry = dirty_tx % TX_RING_SIZE;
1047 		int txstatus = ep->tx_ring[entry].txstatus;
1048 
1049 		if (txstatus & DescOwn)
1050 			break;	/* It still hasn't been Txed */
1051 
1052 		if (likely(txstatus & 0x0001)) {
1053 			dev->stats.collisions += (txstatus >> 8) & 15;
1054 			dev->stats.tx_packets++;
1055 			dev->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1056 		} else
1057 			epic_tx_error(dev, ep, txstatus);
1058 
1059 		/* Free the original skb. */
1060 		skb = ep->tx_skbuff[entry];
1061 		pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1062 				 skb->len, PCI_DMA_TODEVICE);
1063 		dev_kfree_skb_irq(skb);
1064 		ep->tx_skbuff[entry] = NULL;
1065 	}
1066 
1067 #ifndef final_version
1068 	if (cur_tx - dirty_tx > TX_RING_SIZE) {
1069 		printk(KERN_WARNING
1070 		       "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1071 		       dev->name, dirty_tx, cur_tx, ep->tx_full);
1072 		dirty_tx += TX_RING_SIZE;
1073 	}
1074 #endif
1075 	ep->dirty_tx = dirty_tx;
1076 	if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1077 		/* The ring is no longer full, allow new TX entries. */
1078 		ep->tx_full = 0;
1079 		netif_wake_queue(dev);
1080 	}
1081 }
1082 
1083 /* The interrupt handler does all of the Rx thread work and cleans up
1084    after the Tx thread. */
epic_interrupt(int irq,void * dev_instance)1085 static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1086 {
1087 	struct net_device *dev = dev_instance;
1088 	struct epic_private *ep = netdev_priv(dev);
1089 	long ioaddr = dev->base_addr;
1090 	unsigned int handled = 0;
1091 	int status;
1092 
1093 	status = inl(ioaddr + INTSTAT);
1094 	/* Acknowledge all of the current interrupt sources ASAP. */
1095 	outl(status & EpicNormalEvent, ioaddr + INTSTAT);
1096 
1097 	if (debug > 4) {
1098 		printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
1099 				   "intstat=%#8.8x.\n", dev->name, status,
1100 				   (int)inl(ioaddr + INTSTAT));
1101 	}
1102 
1103 	if ((status & IntrSummary) == 0)
1104 		goto out;
1105 
1106 	handled = 1;
1107 
1108 	if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
1109 		spin_lock(&ep->napi_lock);
1110 		if (napi_schedule_prep(&ep->napi)) {
1111 			epic_napi_irq_off(dev, ep);
1112 			__napi_schedule(&ep->napi);
1113 		} else
1114 			ep->reschedule_in_poll++;
1115 		spin_unlock(&ep->napi_lock);
1116 	}
1117 	status &= ~EpicNapiEvent;
1118 
1119 	/* Check uncommon events all at once. */
1120 	if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1121 		if (status == EpicRemoved)
1122 			goto out;
1123 
1124 		/* Always update the error counts to avoid overhead later. */
1125 		dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1126 		dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1127 		dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1128 
1129 		if (status & TxUnderrun) { /* Tx FIFO underflow. */
1130 			dev->stats.tx_fifo_errors++;
1131 			outl(ep->tx_threshold += 128, ioaddr + TxThresh);
1132 			/* Restart the transmit process. */
1133 			outl(RestartTx, ioaddr + COMMAND);
1134 		}
1135 		if (status & PCIBusErr170) {
1136 			printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
1137 					 dev->name, status);
1138 			epic_pause(dev);
1139 			epic_restart(dev);
1140 		}
1141 		/* Clear all error sources. */
1142 		outl(status & 0x7f18, ioaddr + INTSTAT);
1143 	}
1144 
1145 out:
1146 	if (debug > 3) {
1147 		printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
1148 				   dev->name, status);
1149 	}
1150 
1151 	return IRQ_RETVAL(handled);
1152 }
1153 
epic_rx(struct net_device * dev,int budget)1154 static int epic_rx(struct net_device *dev, int budget)
1155 {
1156 	struct epic_private *ep = netdev_priv(dev);
1157 	int entry = ep->cur_rx % RX_RING_SIZE;
1158 	int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1159 	int work_done = 0;
1160 
1161 	if (debug > 4)
1162 		printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
1163 			   ep->rx_ring[entry].rxstatus);
1164 
1165 	if (rx_work_limit > budget)
1166 		rx_work_limit = budget;
1167 
1168 	/* If we own the next entry, it's a new packet. Send it up. */
1169 	while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
1170 		int status = ep->rx_ring[entry].rxstatus;
1171 
1172 		if (debug > 4)
1173 			printk(KERN_DEBUG "  epic_rx() status was %8.8x.\n", status);
1174 		if (--rx_work_limit < 0)
1175 			break;
1176 		if (status & 0x2006) {
1177 			if (debug > 2)
1178 				printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
1179 					   dev->name, status);
1180 			if (status & 0x2000) {
1181 				printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1182 					   "multiple buffers, status %4.4x!\n", dev->name, status);
1183 				dev->stats.rx_length_errors++;
1184 			} else if (status & 0x0006)
1185 				/* Rx Frame errors are counted in hardware. */
1186 				dev->stats.rx_errors++;
1187 		} else {
1188 			/* Malloc up new buffer, compatible with net-2e. */
1189 			/* Omit the four octet CRC from the length. */
1190 			short pkt_len = (status >> 16) - 4;
1191 			struct sk_buff *skb;
1192 
1193 			if (pkt_len > PKT_BUF_SZ - 4) {
1194 				printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
1195 					   "%d bytes.\n",
1196 					   dev->name, status, pkt_len);
1197 				pkt_len = 1514;
1198 			}
1199 			/* Check if the packet is long enough to accept without copying
1200 			   to a minimally-sized skbuff. */
1201 			if (pkt_len < rx_copybreak &&
1202 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1203 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1204 				pci_dma_sync_single_for_cpu(ep->pci_dev,
1205 							    ep->rx_ring[entry].bufaddr,
1206 							    ep->rx_buf_sz,
1207 							    PCI_DMA_FROMDEVICE);
1208 				skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
1209 				skb_put(skb, pkt_len);
1210 				pci_dma_sync_single_for_device(ep->pci_dev,
1211 							       ep->rx_ring[entry].bufaddr,
1212 							       ep->rx_buf_sz,
1213 							       PCI_DMA_FROMDEVICE);
1214 			} else {
1215 				pci_unmap_single(ep->pci_dev,
1216 					ep->rx_ring[entry].bufaddr,
1217 					ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1218 				skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1219 				ep->rx_skbuff[entry] = NULL;
1220 			}
1221 			skb->protocol = eth_type_trans(skb, dev);
1222 			netif_receive_skb(skb);
1223 			dev->stats.rx_packets++;
1224 			dev->stats.rx_bytes += pkt_len;
1225 		}
1226 		work_done++;
1227 		entry = (++ep->cur_rx) % RX_RING_SIZE;
1228 	}
1229 
1230 	/* Refill the Rx ring buffers. */
1231 	for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1232 		entry = ep->dirty_rx % RX_RING_SIZE;
1233 		if (ep->rx_skbuff[entry] == NULL) {
1234 			struct sk_buff *skb;
1235 			skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
1236 			if (skb == NULL)
1237 				break;
1238 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1239 			ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1240 				skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1241 			work_done++;
1242 		}
1243 		/* AV: shouldn't we add a barrier here? */
1244 		ep->rx_ring[entry].rxstatus = DescOwn;
1245 	}
1246 	return work_done;
1247 }
1248 
epic_rx_err(struct net_device * dev,struct epic_private * ep)1249 static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1250 {
1251 	long ioaddr = dev->base_addr;
1252 	int status;
1253 
1254 	status = inl(ioaddr + INTSTAT);
1255 
1256 	if (status == EpicRemoved)
1257 		return;
1258 	if (status & RxOverflow) 	/* Missed a Rx frame. */
1259 		dev->stats.rx_errors++;
1260 	if (status & (RxOverflow | RxFull))
1261 		outw(RxQueued, ioaddr + COMMAND);
1262 }
1263 
epic_poll(struct napi_struct * napi,int budget)1264 static int epic_poll(struct napi_struct *napi, int budget)
1265 {
1266 	struct epic_private *ep = container_of(napi, struct epic_private, napi);
1267 	struct net_device *dev = ep->mii.dev;
1268 	int work_done = 0;
1269 	long ioaddr = dev->base_addr;
1270 
1271 rx_action:
1272 
1273 	epic_tx(dev, ep);
1274 
1275 	work_done += epic_rx(dev, budget);
1276 
1277 	epic_rx_err(dev, ep);
1278 
1279 	if (work_done < budget) {
1280 		unsigned long flags;
1281 		int more;
1282 
1283 		/* A bit baroque but it avoids a (space hungry) spin_unlock */
1284 
1285 		spin_lock_irqsave(&ep->napi_lock, flags);
1286 
1287 		more = ep->reschedule_in_poll;
1288 		if (!more) {
1289 			__napi_complete(napi);
1290 			outl(EpicNapiEvent, ioaddr + INTSTAT);
1291 			epic_napi_irq_on(dev, ep);
1292 		} else
1293 			ep->reschedule_in_poll--;
1294 
1295 		spin_unlock_irqrestore(&ep->napi_lock, flags);
1296 
1297 		if (more)
1298 			goto rx_action;
1299 	}
1300 
1301 	return work_done;
1302 }
1303 
epic_close(struct net_device * dev)1304 static int epic_close(struct net_device *dev)
1305 {
1306 	long ioaddr = dev->base_addr;
1307 	struct epic_private *ep = netdev_priv(dev);
1308 	struct sk_buff *skb;
1309 	int i;
1310 
1311 	netif_stop_queue(dev);
1312 	napi_disable(&ep->napi);
1313 
1314 	if (debug > 1)
1315 		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1316 			   dev->name, (int)inl(ioaddr + INTSTAT));
1317 
1318 	del_timer_sync(&ep->timer);
1319 
1320 	epic_disable_int(dev, ep);
1321 
1322 	free_irq(dev->irq, dev);
1323 
1324 	epic_pause(dev);
1325 
1326 	/* Free all the skbuffs in the Rx queue. */
1327 	for (i = 0; i < RX_RING_SIZE; i++) {
1328 		skb = ep->rx_skbuff[i];
1329 		ep->rx_skbuff[i] = NULL;
1330 		ep->rx_ring[i].rxstatus = 0;		/* Not owned by Epic chip. */
1331 		ep->rx_ring[i].buflength = 0;
1332 		if (skb) {
1333 			pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
1334 				 	 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1335 			dev_kfree_skb(skb);
1336 		}
1337 		ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1338 	}
1339 	for (i = 0; i < TX_RING_SIZE; i++) {
1340 		skb = ep->tx_skbuff[i];
1341 		ep->tx_skbuff[i] = NULL;
1342 		if (!skb)
1343 			continue;
1344 		pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
1345 				 skb->len, PCI_DMA_TODEVICE);
1346 		dev_kfree_skb(skb);
1347 	}
1348 
1349 	/* Green! Leave the chip in low-power mode. */
1350 	outl(0x0008, ioaddr + GENCTL);
1351 
1352 	return 0;
1353 }
1354 
epic_get_stats(struct net_device * dev)1355 static struct net_device_stats *epic_get_stats(struct net_device *dev)
1356 {
1357 	long ioaddr = dev->base_addr;
1358 
1359 	if (netif_running(dev)) {
1360 		/* Update the error counts. */
1361 		dev->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1362 		dev->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1363 		dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1364 	}
1365 
1366 	return &dev->stats;
1367 }
1368 
1369 /* Set or clear the multicast filter for this adaptor.
1370    Note that we only use exclusion around actually queueing the
1371    new frame, not around filling ep->setup_frame.  This is non-deterministic
1372    when re-entered but still correct. */
1373 
set_rx_mode(struct net_device * dev)1374 static void set_rx_mode(struct net_device *dev)
1375 {
1376 	long ioaddr = dev->base_addr;
1377 	struct epic_private *ep = netdev_priv(dev);
1378 	unsigned char mc_filter[8];		 /* Multicast hash filter */
1379 	int i;
1380 
1381 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1382 		outl(0x002C, ioaddr + RxCtrl);
1383 		/* Unconditionally log net taps. */
1384 		memset(mc_filter, 0xff, sizeof(mc_filter));
1385 	} else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
1386 		/* There is apparently a chip bug, so the multicast filter
1387 		   is never enabled. */
1388 		/* Too many to filter perfectly -- accept all multicasts. */
1389 		memset(mc_filter, 0xff, sizeof(mc_filter));
1390 		outl(0x000C, ioaddr + RxCtrl);
1391 	} else if (netdev_mc_empty(dev)) {
1392 		outl(0x0004, ioaddr + RxCtrl);
1393 		return;
1394 	} else {					/* Never executed, for now. */
1395 		struct netdev_hw_addr *ha;
1396 
1397 		memset(mc_filter, 0, sizeof(mc_filter));
1398 		netdev_for_each_mc_addr(ha, dev) {
1399 			unsigned int bit_nr =
1400 				ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
1401 			mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1402 		}
1403 	}
1404 	/* ToDo: perhaps we need to stop the Tx and Rx process here? */
1405 	if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1406 		for (i = 0; i < 4; i++)
1407 			outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
1408 		memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1409 	}
1410 }
1411 
netdev_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1412 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1413 {
1414 	struct epic_private *np = netdev_priv(dev);
1415 
1416 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1417 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1418 	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1419 }
1420 
netdev_get_settings(struct net_device * dev,struct ethtool_cmd * cmd)1421 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1422 {
1423 	struct epic_private *np = netdev_priv(dev);
1424 	int rc;
1425 
1426 	spin_lock_irq(&np->lock);
1427 	rc = mii_ethtool_gset(&np->mii, cmd);
1428 	spin_unlock_irq(&np->lock);
1429 
1430 	return rc;
1431 }
1432 
netdev_set_settings(struct net_device * dev,struct ethtool_cmd * cmd)1433 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1434 {
1435 	struct epic_private *np = netdev_priv(dev);
1436 	int rc;
1437 
1438 	spin_lock_irq(&np->lock);
1439 	rc = mii_ethtool_sset(&np->mii, cmd);
1440 	spin_unlock_irq(&np->lock);
1441 
1442 	return rc;
1443 }
1444 
netdev_nway_reset(struct net_device * dev)1445 static int netdev_nway_reset(struct net_device *dev)
1446 {
1447 	struct epic_private *np = netdev_priv(dev);
1448 	return mii_nway_restart(&np->mii);
1449 }
1450 
netdev_get_link(struct net_device * dev)1451 static u32 netdev_get_link(struct net_device *dev)
1452 {
1453 	struct epic_private *np = netdev_priv(dev);
1454 	return mii_link_ok(&np->mii);
1455 }
1456 
netdev_get_msglevel(struct net_device * dev)1457 static u32 netdev_get_msglevel(struct net_device *dev)
1458 {
1459 	return debug;
1460 }
1461 
netdev_set_msglevel(struct net_device * dev,u32 value)1462 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1463 {
1464 	debug = value;
1465 }
1466 
ethtool_begin(struct net_device * dev)1467 static int ethtool_begin(struct net_device *dev)
1468 {
1469 	unsigned long ioaddr = dev->base_addr;
1470 	/* power-up, if interface is down */
1471 	if (! netif_running(dev)) {
1472 		outl(0x0200, ioaddr + GENCTL);
1473 		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1474 	}
1475 	return 0;
1476 }
1477 
ethtool_complete(struct net_device * dev)1478 static void ethtool_complete(struct net_device *dev)
1479 {
1480 	unsigned long ioaddr = dev->base_addr;
1481 	/* power-down, if interface is down */
1482 	if (! netif_running(dev)) {
1483 		outl(0x0008, ioaddr + GENCTL);
1484 		outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1485 	}
1486 }
1487 
1488 static const struct ethtool_ops netdev_ethtool_ops = {
1489 	.get_drvinfo		= netdev_get_drvinfo,
1490 	.get_settings		= netdev_get_settings,
1491 	.set_settings		= netdev_set_settings,
1492 	.nway_reset		= netdev_nway_reset,
1493 	.get_link		= netdev_get_link,
1494 	.get_msglevel		= netdev_get_msglevel,
1495 	.set_msglevel		= netdev_set_msglevel,
1496 	.begin			= ethtool_begin,
1497 	.complete		= ethtool_complete
1498 };
1499 
netdev_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1500 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1501 {
1502 	struct epic_private *np = netdev_priv(dev);
1503 	long ioaddr = dev->base_addr;
1504 	struct mii_ioctl_data *data = if_mii(rq);
1505 	int rc;
1506 
1507 	/* power-up, if interface is down */
1508 	if (! netif_running(dev)) {
1509 		outl(0x0200, ioaddr + GENCTL);
1510 		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1511 	}
1512 
1513 	/* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1514 	spin_lock_irq(&np->lock);
1515 	rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1516 	spin_unlock_irq(&np->lock);
1517 
1518 	/* power-down, if interface is down */
1519 	if (! netif_running(dev)) {
1520 		outl(0x0008, ioaddr + GENCTL);
1521 		outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1522 	}
1523 	return rc;
1524 }
1525 
1526 
epic_remove_one(struct pci_dev * pdev)1527 static void __devexit epic_remove_one (struct pci_dev *pdev)
1528 {
1529 	struct net_device *dev = pci_get_drvdata(pdev);
1530 	struct epic_private *ep = netdev_priv(dev);
1531 
1532 	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1533 	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1534 	unregister_netdev(dev);
1535 #ifndef USE_IO_OPS
1536 	iounmap((void*) dev->base_addr);
1537 #endif
1538 	pci_release_regions(pdev);
1539 	free_netdev(dev);
1540 	pci_disable_device(pdev);
1541 	pci_set_drvdata(pdev, NULL);
1542 	/* pci_power_off(pdev, -1); */
1543 }
1544 
1545 
1546 #ifdef CONFIG_PM
1547 
epic_suspend(struct pci_dev * pdev,pm_message_t state)1548 static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1549 {
1550 	struct net_device *dev = pci_get_drvdata(pdev);
1551 	long ioaddr = dev->base_addr;
1552 
1553 	if (!netif_running(dev))
1554 		return 0;
1555 	epic_pause(dev);
1556 	/* Put the chip into low-power mode. */
1557 	outl(0x0008, ioaddr + GENCTL);
1558 	/* pci_power_off(pdev, -1); */
1559 	return 0;
1560 }
1561 
1562 
epic_resume(struct pci_dev * pdev)1563 static int epic_resume (struct pci_dev *pdev)
1564 {
1565 	struct net_device *dev = pci_get_drvdata(pdev);
1566 
1567 	if (!netif_running(dev))
1568 		return 0;
1569 	epic_restart(dev);
1570 	/* pci_power_on(pdev); */
1571 	return 0;
1572 }
1573 
1574 #endif /* CONFIG_PM */
1575 
1576 
1577 static struct pci_driver epic_driver = {
1578 	.name		= DRV_NAME,
1579 	.id_table	= epic_pci_tbl,
1580 	.probe		= epic_init_one,
1581 	.remove		= __devexit_p(epic_remove_one),
1582 #ifdef CONFIG_PM
1583 	.suspend	= epic_suspend,
1584 	.resume		= epic_resume,
1585 #endif /* CONFIG_PM */
1586 };
1587 
1588 
epic_init(void)1589 static int __init epic_init (void)
1590 {
1591 /* when a module, this is printed whether or not devices are found in probe */
1592 #ifdef MODULE
1593 	printk (KERN_INFO "%s%s",
1594 		version, version2);
1595 #endif
1596 
1597 	return pci_register_driver(&epic_driver);
1598 }
1599 
1600 
epic_cleanup(void)1601 static void __exit epic_cleanup (void)
1602 {
1603 	pci_unregister_driver (&epic_driver);
1604 }
1605 
1606 
1607 module_init(epic_init);
1608 module_exit(epic_cleanup);
1609