• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
2 /*
3 	Written/copyright 1997-2001 by Donald Becker.
4 
5 	This software may be used and distributed according to the terms of
6 	the GNU General Public License (GPL), incorporated herein by reference.
7 	Drivers based on or derived from this code fall under the GPL and must
8 	retain the authorship, copyright and license notice.  This file is not
9 	a complete program and may only be used when the entire operating
10 	system is licensed under the GPL.
11 
12 	This driver is for the SMC83c170/175 "EPIC" series, as used on the
13 	SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
14 
15 	The author may be reached as becker@scyld.com, or C/O
16 	Scyld Computing Corporation
17 	410 Severn Ave., Suite 210
18 	Annapolis MD 21403
19 
20 	Information and updates available at
21 	http://www.scyld.com/network/epic100.html
22 	[this link no longer provides anything useful -jgarzik]
23 
24 	---------------------------------------------------------------------
25 
26 */
27 
28 #define DRV_NAME        "epic100"
29 #define DRV_VERSION     "2.1"
30 #define DRV_RELDATE     "Sept 11, 2006"
31 
32 /* The user-configurable values.
33    These may be modified when a driver module is loaded.*/
34 
35 static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
36 
37 /* Used to pass the full-duplex flag, etc. */
38 #define MAX_UNITS 8		/* More are supported, limit only on options */
39 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
40 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
41 
42 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
43    Setting to > 1518 effectively disables this feature. */
44 static int rx_copybreak;
45 
46 /* Operational parameters that are set at compile time. */
47 
48 /* Keep the ring sizes a power of two for operational efficiency.
49    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
50    Making the Tx ring too large decreases the effectiveness of channel
51    bonding and packet priority.
52    There are no ill effects from too-large receive rings. */
53 #define TX_RING_SIZE	256
54 #define TX_QUEUE_LEN	240		/* Limit ring entries actually used.  */
55 #define RX_RING_SIZE	256
56 #define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct epic_tx_desc)
57 #define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct epic_rx_desc)
58 
59 /* Operational parameters that usually are not changed. */
60 /* Time in jiffies before concluding the transmitter is hung. */
61 #define TX_TIMEOUT  (2*HZ)
62 
63 #define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
64 
65 /* Bytes transferred to chip before transmission starts. */
66 /* Initial threshold, increased on underflow, rounded down to 4 byte units. */
67 #define TX_FIFO_THRESH 256
68 #define RX_FIFO_THRESH 1		/* 0-3, 0==32, 64,96, or 3==128 bytes  */
69 
70 #include <linux/module.h>
71 #include <linux/kernel.h>
72 #include <linux/string.h>
73 #include <linux/timer.h>
74 #include <linux/errno.h>
75 #include <linux/ioport.h>
76 #include <linux/slab.h>
77 #include <linux/interrupt.h>
78 #include <linux/pci.h>
79 #include <linux/delay.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/init.h>
84 #include <linux/spinlock.h>
85 #include <linux/ethtool.h>
86 #include <linux/mii.h>
87 #include <linux/crc32.h>
88 #include <linux/bitops.h>
89 #include <asm/io.h>
90 #include <asm/uaccess.h>
91 
92 /* These identify the driver base version and may not be removed. */
93 static char version[] __devinitdata =
94 DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
95 static char version2[] __devinitdata =
96 "  (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
97 
98 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
99 MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
100 MODULE_LICENSE("GPL");
101 
102 module_param(debug, int, 0);
103 module_param(rx_copybreak, int, 0);
104 module_param_array(options, int, NULL, 0);
105 module_param_array(full_duplex, int, NULL, 0);
106 MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
107 MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
108 MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
109 MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
110 
111 /*
112 				Theory of Operation
113 
114 I. Board Compatibility
115 
116 This device driver is designed for the SMC "EPIC/100", the SMC
117 single-chip Ethernet controllers for PCI.  This chip is used on
118 the SMC EtherPower II boards.
119 
120 II. Board-specific settings
121 
122 PCI bus devices are configured by the system at boot time, so no jumpers
123 need to be set on the board.  The system BIOS will assign the
124 PCI INTA signal to a (preferably otherwise unused) system IRQ line.
125 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
126 interrupt lines.
127 
128 III. Driver operation
129 
130 IIIa. Ring buffers
131 
132 IVb. References
133 
134 http://www.smsc.com/main/tools/discontinued/83c171.pdf
135 http://www.smsc.com/main/tools/discontinued/83c175.pdf
136 http://scyld.com/expert/NWay.html
137 http://www.national.com/pf/DP/DP83840A.html
138 
139 IVc. Errata
140 
141 */
142 
143 
144 enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
145 
146 #define EPIC_TOTAL_SIZE 0x100
147 #define USE_IO_OPS 1
148 
149 typedef enum {
150 	SMSC_83C170_0,
151 	SMSC_83C170,
152 	SMSC_83C175,
153 } chip_t;
154 
155 
156 struct epic_chip_info {
157 	const char *name;
158         int drv_flags;                          /* Driver use, intended as capability flags. */
159 };
160 
161 
162 /* indexed by chip_t */
163 static const struct epic_chip_info pci_id_tbl[] = {
164 	{ "SMSC EPIC/100 83c170",	TYPE2_INTR | NO_MII | MII_PWRDWN },
165 	{ "SMSC EPIC/100 83c170",	TYPE2_INTR },
166 	{ "SMSC EPIC/C 83c175",		TYPE2_INTR | MII_PWRDWN },
167 };
168 
169 
170 static struct pci_device_id epic_pci_tbl[] = {
171 	{ 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
172 	{ 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
173 	{ 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
174 	  PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
175 	{ 0,}
176 };
177 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
178 
179 
180 #ifndef USE_IO_OPS
181 #undef inb
182 #undef inw
183 #undef inl
184 #undef outb
185 #undef outw
186 #undef outl
187 #define inb readb
188 #define inw readw
189 #define inl readl
190 #define outb writeb
191 #define outw writew
192 #define outl writel
193 #endif
194 
195 /* Offsets to registers, using the (ugh) SMC names. */
196 enum epic_registers {
197   COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
198   PCIBurstCnt=0x18,
199   TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28,	/* Rx error counters. */
200   MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
201   LAN0=64,						/* MAC address. */
202   MC0=80,						/* Multicast filter table. */
203   RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
204   PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
205 };
206 
207 /* Interrupt register bits, using my own meaningful names. */
208 enum IntrStatus {
209 	TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
210 	PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
211 	RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
212 	TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
213 	RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
214 };
215 enum CommandBits {
216 	StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
217 	StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
218 };
219 
220 #define EpicRemoved	0xffffffff	/* Chip failed or removed (CardBus) */
221 
222 #define EpicNapiEvent	(TxEmpty | TxDone | \
223 			 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
224 #define EpicNormalEvent	(0x0000ffff & ~EpicNapiEvent)
225 
226 static const u16 media2miictl[16] = {
227 	0, 0x0C00, 0x0C00, 0x2000,  0x0100, 0x2100, 0, 0,
228 	0, 0, 0, 0,  0, 0, 0, 0 };
229 
230 /*
231  * The EPIC100 Rx and Tx buffer descriptors.  Note that these
232  * really ARE host-endian; it's not a misannotation.  We tell
233  * the card to byteswap them internally on big-endian hosts -
234  * look for #ifdef CONFIG_BIG_ENDIAN in epic_open().
235  */
236 
237 struct epic_tx_desc {
238 	u32 txstatus;
239 	u32 bufaddr;
240 	u32 buflength;
241 	u32 next;
242 };
243 
244 struct epic_rx_desc {
245 	u32 rxstatus;
246 	u32 bufaddr;
247 	u32 buflength;
248 	u32 next;
249 };
250 
251 enum desc_status_bits {
252 	DescOwn=0x8000,
253 };
254 
255 #define PRIV_ALIGN	15 	/* Required alignment mask */
256 struct epic_private {
257 	struct epic_rx_desc *rx_ring;
258 	struct epic_tx_desc *tx_ring;
259 	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
260 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
261 	/* The addresses of receive-in-place skbuffs. */
262 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
263 
264 	dma_addr_t tx_ring_dma;
265 	dma_addr_t rx_ring_dma;
266 
267 	/* Ring pointers. */
268 	spinlock_t lock;				/* Group with Tx control cache line. */
269 	spinlock_t napi_lock;
270 	struct napi_struct napi;
271 	unsigned int reschedule_in_poll;
272 	unsigned int cur_tx, dirty_tx;
273 
274 	unsigned int cur_rx, dirty_rx;
275 	u32 irq_mask;
276 	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
277 
278 	struct pci_dev *pci_dev;			/* PCI bus location. */
279 	int chip_id, chip_flags;
280 
281 	struct net_device_stats stats;
282 	struct timer_list timer;			/* Media selection timer. */
283 	int tx_threshold;
284 	unsigned char mc_filter[8];
285 	signed char phys[4];				/* MII device addresses. */
286 	u16 advertising;					/* NWay media advertisement */
287 	int mii_phy_cnt;
288 	struct mii_if_info mii;
289 	unsigned int tx_full:1;				/* The Tx queue is full. */
290 	unsigned int default_port:4;		/* Last dev->if_port value. */
291 };
292 
293 static int epic_open(struct net_device *dev);
294 static int read_eeprom(long ioaddr, int location);
295 static int mdio_read(struct net_device *dev, int phy_id, int location);
296 static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
297 static void epic_restart(struct net_device *dev);
298 static void epic_timer(unsigned long data);
299 static void epic_tx_timeout(struct net_device *dev);
300 static void epic_init_ring(struct net_device *dev);
301 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
302 static int epic_rx(struct net_device *dev, int budget);
303 static int epic_poll(struct napi_struct *napi, int budget);
304 static irqreturn_t epic_interrupt(int irq, void *dev_instance);
305 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
306 static const struct ethtool_ops netdev_ethtool_ops;
307 static int epic_close(struct net_device *dev);
308 static struct net_device_stats *epic_get_stats(struct net_device *dev);
309 static void set_rx_mode(struct net_device *dev);
310 
311 static const struct net_device_ops epic_netdev_ops = {
312 	.ndo_open		= epic_open,
313 	.ndo_stop		= epic_close,
314 	.ndo_start_xmit		= epic_start_xmit,
315 	.ndo_tx_timeout 	= epic_tx_timeout,
316 	.ndo_get_stats		= epic_get_stats,
317 	.ndo_set_multicast_list = set_rx_mode,
318 	.ndo_do_ioctl 		= netdev_ioctl,
319 	.ndo_change_mtu		= eth_change_mtu,
320 	.ndo_set_mac_address 	= eth_mac_addr,
321 	.ndo_validate_addr	= eth_validate_addr,
322 };
323 
epic_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)324 static int __devinit epic_init_one (struct pci_dev *pdev,
325 				    const struct pci_device_id *ent)
326 {
327 	static int card_idx = -1;
328 	long ioaddr;
329 	int chip_idx = (int) ent->driver_data;
330 	int irq;
331 	struct net_device *dev;
332 	struct epic_private *ep;
333 	int i, ret, option = 0, duplex = 0;
334 	void *ring_space;
335 	dma_addr_t ring_dma;
336 
337 /* when built into the kernel, we only print version if device is found */
338 #ifndef MODULE
339 	static int printed_version;
340 	if (!printed_version++)
341 		printk (KERN_INFO "%s" KERN_INFO "%s",
342 			version, version2);
343 #endif
344 
345 	card_idx++;
346 
347 	ret = pci_enable_device(pdev);
348 	if (ret)
349 		goto out;
350 	irq = pdev->irq;
351 
352 	if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
353 		dev_err(&pdev->dev, "no PCI region space\n");
354 		ret = -ENODEV;
355 		goto err_out_disable;
356 	}
357 
358 	pci_set_master(pdev);
359 
360 	ret = pci_request_regions(pdev, DRV_NAME);
361 	if (ret < 0)
362 		goto err_out_disable;
363 
364 	ret = -ENOMEM;
365 
366 	dev = alloc_etherdev(sizeof (*ep));
367 	if (!dev) {
368 		dev_err(&pdev->dev, "no memory for eth device\n");
369 		goto err_out_free_res;
370 	}
371 	SET_NETDEV_DEV(dev, &pdev->dev);
372 
373 #ifdef USE_IO_OPS
374 	ioaddr = pci_resource_start (pdev, 0);
375 #else
376 	ioaddr = pci_resource_start (pdev, 1);
377 	ioaddr = (long) pci_ioremap_bar(pdev, 1);
378 	if (!ioaddr) {
379 		dev_err(&pdev->dev, "ioremap failed\n");
380 		goto err_out_free_netdev;
381 	}
382 #endif
383 
384 	pci_set_drvdata(pdev, dev);
385 	ep = netdev_priv(dev);
386 	ep->mii.dev = dev;
387 	ep->mii.mdio_read = mdio_read;
388 	ep->mii.mdio_write = mdio_write;
389 	ep->mii.phy_id_mask = 0x1f;
390 	ep->mii.reg_num_mask = 0x1f;
391 
392 	ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
393 	if (!ring_space)
394 		goto err_out_iounmap;
395 	ep->tx_ring = (struct epic_tx_desc *)ring_space;
396 	ep->tx_ring_dma = ring_dma;
397 
398 	ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
399 	if (!ring_space)
400 		goto err_out_unmap_tx;
401 	ep->rx_ring = (struct epic_rx_desc *)ring_space;
402 	ep->rx_ring_dma = ring_dma;
403 
404 	if (dev->mem_start) {
405 		option = dev->mem_start;
406 		duplex = (dev->mem_start & 16) ? 1 : 0;
407 	} else if (card_idx >= 0  &&  card_idx < MAX_UNITS) {
408 		if (options[card_idx] >= 0)
409 			option = options[card_idx];
410 		if (full_duplex[card_idx] >= 0)
411 			duplex = full_duplex[card_idx];
412 	}
413 
414 	dev->base_addr = ioaddr;
415 	dev->irq = irq;
416 
417 	spin_lock_init(&ep->lock);
418 	spin_lock_init(&ep->napi_lock);
419 	ep->reschedule_in_poll = 0;
420 
421 	/* Bring the chip out of low-power mode. */
422 	outl(0x4200, ioaddr + GENCTL);
423 	/* Magic?!  If we don't set this bit the MII interface won't work. */
424 	/* This magic is documented in SMSC app note 7.15 */
425 	for (i = 16; i > 0; i--)
426 		outl(0x0008, ioaddr + TEST1);
427 
428 	/* Turn on the MII transceiver. */
429 	outl(0x12, ioaddr + MIICfg);
430 	if (chip_idx == 1)
431 		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
432 	outl(0x0200, ioaddr + GENCTL);
433 
434 	/* Note: the '175 does not have a serial EEPROM. */
435 	for (i = 0; i < 3; i++)
436 		((__le16 *)dev->dev_addr)[i] = cpu_to_le16(inw(ioaddr + LAN0 + i*4));
437 
438 	if (debug > 2) {
439 		dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
440 		for (i = 0; i < 64; i++)
441 			printk(" %4.4x%s", read_eeprom(ioaddr, i),
442 				   i % 16 == 15 ? "\n" : "");
443 	}
444 
445 	ep->pci_dev = pdev;
446 	ep->chip_id = chip_idx;
447 	ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
448 	ep->irq_mask =
449 		(ep->chip_flags & TYPE2_INTR ?  PCIBusErr175 : PCIBusErr170)
450 		 | CntFull | TxUnderrun | EpicNapiEvent;
451 
452 	/* Find the connected MII xcvrs.
453 	   Doing this in open() would allow detecting external xcvrs later, but
454 	   takes much time and no cards have external MII. */
455 	{
456 		int phy, phy_idx = 0;
457 		for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
458 			int mii_status = mdio_read(dev, phy, MII_BMSR);
459 			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
460 				ep->phys[phy_idx++] = phy;
461 				dev_info(&pdev->dev,
462 					"MII transceiver #%d control "
463 					"%4.4x status %4.4x.\n",
464 					phy, mdio_read(dev, phy, 0), mii_status);
465 			}
466 		}
467 		ep->mii_phy_cnt = phy_idx;
468 		if (phy_idx != 0) {
469 			phy = ep->phys[0];
470 			ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
471 			dev_info(&pdev->dev,
472 				"Autonegotiation advertising %4.4x link "
473 				   "partner %4.4x.\n",
474 				   ep->mii.advertising, mdio_read(dev, phy, 5));
475 		} else if ( ! (ep->chip_flags & NO_MII)) {
476 			dev_warn(&pdev->dev,
477 				"***WARNING***: No MII transceiver found!\n");
478 			/* Use the known PHY address of the EPII. */
479 			ep->phys[0] = 3;
480 		}
481 		ep->mii.phy_id = ep->phys[0];
482 	}
483 
484 	/* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
485 	if (ep->chip_flags & MII_PWRDWN)
486 		outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
487 	outl(0x0008, ioaddr + GENCTL);
488 
489 	/* The lower four bits are the media type. */
490 	if (duplex) {
491 		ep->mii.force_media = ep->mii.full_duplex = 1;
492 		dev_info(&pdev->dev, "Forced full duplex requested.\n");
493 	}
494 	dev->if_port = ep->default_port = option;
495 
496 	/* The Epic-specific entries in the device structure. */
497 	dev->netdev_ops = &epic_netdev_ops;
498 	dev->ethtool_ops = &netdev_ethtool_ops;
499 	dev->watchdog_timeo = TX_TIMEOUT;
500 	netif_napi_add(dev, &ep->napi, epic_poll, 64);
501 
502 	ret = register_netdev(dev);
503 	if (ret < 0)
504 		goto err_out_unmap_rx;
505 
506 	printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n",
507 	       dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq,
508 	       dev->dev_addr);
509 
510 out:
511 	return ret;
512 
513 err_out_unmap_rx:
514 	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
515 err_out_unmap_tx:
516 	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
517 err_out_iounmap:
518 #ifndef USE_IO_OPS
519 	iounmap(ioaddr);
520 err_out_free_netdev:
521 #endif
522 	free_netdev(dev);
523 err_out_free_res:
524 	pci_release_regions(pdev);
525 err_out_disable:
526 	pci_disable_device(pdev);
527 	goto out;
528 }
529 
530 /* Serial EEPROM section. */
531 
532 /*  EEPROM_Ctrl bits. */
533 #define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
534 #define EE_CS			0x02	/* EEPROM chip select. */
535 #define EE_DATA_WRITE	0x08	/* EEPROM chip data in. */
536 #define EE_WRITE_0		0x01
537 #define EE_WRITE_1		0x09
538 #define EE_DATA_READ	0x10	/* EEPROM chip data out. */
539 #define EE_ENB			(0x0001 | EE_CS)
540 
541 /* Delay between EEPROM clock transitions.
542    This serves to flush the operation to the PCI bus.
543  */
544 
545 #define eeprom_delay()	inl(ee_addr)
546 
547 /* The EEPROM commands include the alway-set leading bit. */
548 #define EE_WRITE_CMD	(5 << 6)
549 #define EE_READ64_CMD	(6 << 6)
550 #define EE_READ256_CMD	(6 << 8)
551 #define EE_ERASE_CMD	(7 << 6)
552 
epic_disable_int(struct net_device * dev,struct epic_private * ep)553 static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
554 {
555 	long ioaddr = dev->base_addr;
556 
557 	outl(0x00000000, ioaddr + INTMASK);
558 }
559 
__epic_pci_commit(long ioaddr)560 static inline void __epic_pci_commit(long ioaddr)
561 {
562 #ifndef USE_IO_OPS
563 	inl(ioaddr + INTMASK);
564 #endif
565 }
566 
epic_napi_irq_off(struct net_device * dev,struct epic_private * ep)567 static inline void epic_napi_irq_off(struct net_device *dev,
568 				     struct epic_private *ep)
569 {
570 	long ioaddr = dev->base_addr;
571 
572 	outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK);
573 	__epic_pci_commit(ioaddr);
574 }
575 
epic_napi_irq_on(struct net_device * dev,struct epic_private * ep)576 static inline void epic_napi_irq_on(struct net_device *dev,
577 				    struct epic_private *ep)
578 {
579 	long ioaddr = dev->base_addr;
580 
581 	/* No need to commit possible posted write */
582 	outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK);
583 }
584 
read_eeprom(long ioaddr,int location)585 static int __devinit read_eeprom(long ioaddr, int location)
586 {
587 	int i;
588 	int retval = 0;
589 	long ee_addr = ioaddr + EECTL;
590 	int read_cmd = location |
591 		(inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
592 
593 	outl(EE_ENB & ~EE_CS, ee_addr);
594 	outl(EE_ENB, ee_addr);
595 
596 	/* Shift the read command bits out. */
597 	for (i = 12; i >= 0; i--) {
598 		short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
599 		outl(EE_ENB | dataval, ee_addr);
600 		eeprom_delay();
601 		outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
602 		eeprom_delay();
603 	}
604 	outl(EE_ENB, ee_addr);
605 
606 	for (i = 16; i > 0; i--) {
607 		outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
608 		eeprom_delay();
609 		retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
610 		outl(EE_ENB, ee_addr);
611 		eeprom_delay();
612 	}
613 
614 	/* Terminate the EEPROM access. */
615 	outl(EE_ENB & ~EE_CS, ee_addr);
616 	return retval;
617 }
618 
619 #define MII_READOP		1
620 #define MII_WRITEOP		2
mdio_read(struct net_device * dev,int phy_id,int location)621 static int mdio_read(struct net_device *dev, int phy_id, int location)
622 {
623 	long ioaddr = dev->base_addr;
624 	int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
625 	int i;
626 
627 	outl(read_cmd, ioaddr + MIICtrl);
628 	/* Typical operation takes 25 loops. */
629 	for (i = 400; i > 0; i--) {
630 		barrier();
631 		if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
632 			/* Work around read failure bug. */
633 			if (phy_id == 1 && location < 6
634 				&& inw(ioaddr + MIIData) == 0xffff) {
635 				outl(read_cmd, ioaddr + MIICtrl);
636 				continue;
637 			}
638 			return inw(ioaddr + MIIData);
639 		}
640 	}
641 	return 0xffff;
642 }
643 
mdio_write(struct net_device * dev,int phy_id,int loc,int value)644 static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
645 {
646 	long ioaddr = dev->base_addr;
647 	int i;
648 
649 	outw(value, ioaddr + MIIData);
650 	outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
651 	for (i = 10000; i > 0; i--) {
652 		barrier();
653 		if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
654 			break;
655 	}
656 	return;
657 }
658 
659 
epic_open(struct net_device * dev)660 static int epic_open(struct net_device *dev)
661 {
662 	struct epic_private *ep = netdev_priv(dev);
663 	long ioaddr = dev->base_addr;
664 	int i;
665 	int retval;
666 
667 	/* Soft reset the chip. */
668 	outl(0x4001, ioaddr + GENCTL);
669 
670 	napi_enable(&ep->napi);
671 	if ((retval = request_irq(dev->irq, &epic_interrupt, IRQF_SHARED, dev->name, dev))) {
672 		napi_disable(&ep->napi);
673 		return retval;
674 	}
675 
676 	epic_init_ring(dev);
677 
678 	outl(0x4000, ioaddr + GENCTL);
679 	/* This magic is documented in SMSC app note 7.15 */
680 	for (i = 16; i > 0; i--)
681 		outl(0x0008, ioaddr + TEST1);
682 
683 	/* Pull the chip out of low-power mode, enable interrupts, and set for
684 	   PCI read multiple.  The MIIcfg setting and strange write order are
685 	   required by the details of which bits are reset and the transceiver
686 	   wiring on the Ositech CardBus card.
687 	*/
688 #if 0
689 	outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
690 #endif
691 	if (ep->chip_flags & MII_PWRDWN)
692 		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
693 
694 	/* Tell the chip to byteswap descriptors on big-endian hosts */
695 #ifdef CONFIG_BIG_ENDIAN
696 	outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
697 	inl(ioaddr + GENCTL);
698 	outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
699 #else
700 	outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
701 	inl(ioaddr + GENCTL);
702 	outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
703 #endif
704 
705 	udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
706 
707 	for (i = 0; i < 3; i++)
708 		outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
709 
710 	ep->tx_threshold = TX_FIFO_THRESH;
711 	outl(ep->tx_threshold, ioaddr + TxThresh);
712 
713 	if (media2miictl[dev->if_port & 15]) {
714 		if (ep->mii_phy_cnt)
715 			mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
716 		if (dev->if_port == 1) {
717 			if (debug > 1)
718 				printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
719 					   "status %4.4x.\n",
720 					   dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
721 		}
722 	} else {
723 		int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
724 		if (mii_lpa != 0xffff) {
725 			if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
726 				ep->mii.full_duplex = 1;
727 			else if (! (mii_lpa & LPA_LPACK))
728 				mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
729 			if (debug > 1)
730 				printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
731 					   " register read of %4.4x.\n", dev->name,
732 					   ep->mii.full_duplex ? "full" : "half",
733 					   ep->phys[0], mii_lpa);
734 		}
735 	}
736 
737 	outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
738 	outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
739 	outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
740 
741 	/* Start the chip's Rx process. */
742 	set_rx_mode(dev);
743 	outl(StartRx | RxQueued, ioaddr + COMMAND);
744 
745 	netif_start_queue(dev);
746 
747 	/* Enable interrupts by setting the interrupt mask. */
748 	outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
749 		 | CntFull | TxUnderrun
750 		 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
751 
752 	if (debug > 1)
753 		printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
754 			   "%s-duplex.\n",
755 			   dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
756 			   ep->mii.full_duplex ? "full" : "half");
757 
758 	/* Set the timer to switch to check for link beat and perhaps switch
759 	   to an alternate media type. */
760 	init_timer(&ep->timer);
761 	ep->timer.expires = jiffies + 3*HZ;
762 	ep->timer.data = (unsigned long)dev;
763 	ep->timer.function = &epic_timer;				/* timer handler */
764 	add_timer(&ep->timer);
765 
766 	return 0;
767 }
768 
769 /* Reset the chip to recover from a PCI transaction error.
770    This may occur at interrupt time. */
epic_pause(struct net_device * dev)771 static void epic_pause(struct net_device *dev)
772 {
773 	long ioaddr = dev->base_addr;
774 	struct epic_private *ep = netdev_priv(dev);
775 
776 	netif_stop_queue (dev);
777 
778 	/* Disable interrupts by clearing the interrupt mask. */
779 	outl(0x00000000, ioaddr + INTMASK);
780 	/* Stop the chip's Tx and Rx DMA processes. */
781 	outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
782 
783 	/* Update the error counts. */
784 	if (inw(ioaddr + COMMAND) != 0xffff) {
785 		ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
786 		ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
787 		ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
788 	}
789 
790 	/* Remove the packets on the Rx queue. */
791 	epic_rx(dev, RX_RING_SIZE);
792 }
793 
epic_restart(struct net_device * dev)794 static void epic_restart(struct net_device *dev)
795 {
796 	long ioaddr = dev->base_addr;
797 	struct epic_private *ep = netdev_priv(dev);
798 	int i;
799 
800 	/* Soft reset the chip. */
801 	outl(0x4001, ioaddr + GENCTL);
802 
803 	printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
804 		   dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
805 	udelay(1);
806 
807 	/* This magic is documented in SMSC app note 7.15 */
808 	for (i = 16; i > 0; i--)
809 		outl(0x0008, ioaddr + TEST1);
810 
811 #ifdef CONFIG_BIG_ENDIAN
812 	outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
813 #else
814 	outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
815 #endif
816 	outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
817 	if (ep->chip_flags & MII_PWRDWN)
818 		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
819 
820 	for (i = 0; i < 3; i++)
821 		outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
822 
823 	ep->tx_threshold = TX_FIFO_THRESH;
824 	outl(ep->tx_threshold, ioaddr + TxThresh);
825 	outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
826 	outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
827 		sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);
828 	outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
829 		 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);
830 
831 	/* Start the chip's Rx process. */
832 	set_rx_mode(dev);
833 	outl(StartRx | RxQueued, ioaddr + COMMAND);
834 
835 	/* Enable interrupts by setting the interrupt mask. */
836 	outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
837 		 | CntFull | TxUnderrun
838 		 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
839 
840 	printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
841 		   " interrupt %4.4x.\n",
842 		   dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
843 		   (int)inl(ioaddr + INTSTAT));
844 	return;
845 }
846 
check_media(struct net_device * dev)847 static void check_media(struct net_device *dev)
848 {
849 	struct epic_private *ep = netdev_priv(dev);
850 	long ioaddr = dev->base_addr;
851 	int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
852 	int negotiated = mii_lpa & ep->mii.advertising;
853 	int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
854 
855 	if (ep->mii.force_media)
856 		return;
857 	if (mii_lpa == 0xffff)		/* Bogus read */
858 		return;
859 	if (ep->mii.full_duplex != duplex) {
860 		ep->mii.full_duplex = duplex;
861 		printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
862 			   " partner capability of %4.4x.\n", dev->name,
863 			   ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
864 		outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
865 	}
866 }
867 
epic_timer(unsigned long data)868 static void epic_timer(unsigned long data)
869 {
870 	struct net_device *dev = (struct net_device *)data;
871 	struct epic_private *ep = netdev_priv(dev);
872 	long ioaddr = dev->base_addr;
873 	int next_tick = 5*HZ;
874 
875 	if (debug > 3) {
876 		printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
877 			   dev->name, (int)inl(ioaddr + TxSTAT));
878 		printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
879 			   "IntStatus %4.4x RxStatus %4.4x.\n",
880 			   dev->name, (int)inl(ioaddr + INTMASK),
881 			   (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
882 	}
883 
884 	check_media(dev);
885 
886 	ep->timer.expires = jiffies + next_tick;
887 	add_timer(&ep->timer);
888 }
889 
epic_tx_timeout(struct net_device * dev)890 static void epic_tx_timeout(struct net_device *dev)
891 {
892 	struct epic_private *ep = netdev_priv(dev);
893 	long ioaddr = dev->base_addr;
894 
895 	if (debug > 0) {
896 		printk(KERN_WARNING "%s: Transmit timeout using MII device, "
897 			   "Tx status %4.4x.\n",
898 			   dev->name, (int)inw(ioaddr + TxSTAT));
899 		if (debug > 1) {
900 			printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
901 				   dev->name, ep->dirty_tx, ep->cur_tx);
902 		}
903 	}
904 	if (inw(ioaddr + TxSTAT) & 0x10) {		/* Tx FIFO underflow. */
905 		ep->stats.tx_fifo_errors++;
906 		outl(RestartTx, ioaddr + COMMAND);
907 	} else {
908 		epic_restart(dev);
909 		outl(TxQueued, dev->base_addr + COMMAND);
910 	}
911 
912 	dev->trans_start = jiffies;
913 	ep->stats.tx_errors++;
914 	if (!ep->tx_full)
915 		netif_wake_queue(dev);
916 }
917 
918 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
epic_init_ring(struct net_device * dev)919 static void epic_init_ring(struct net_device *dev)
920 {
921 	struct epic_private *ep = netdev_priv(dev);
922 	int i;
923 
924 	ep->tx_full = 0;
925 	ep->dirty_tx = ep->cur_tx = 0;
926 	ep->cur_rx = ep->dirty_rx = 0;
927 	ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
928 
929 	/* Initialize all Rx descriptors. */
930 	for (i = 0; i < RX_RING_SIZE; i++) {
931 		ep->rx_ring[i].rxstatus = 0;
932 		ep->rx_ring[i].buflength = ep->rx_buf_sz;
933 		ep->rx_ring[i].next = ep->rx_ring_dma +
934 				      (i+1)*sizeof(struct epic_rx_desc);
935 		ep->rx_skbuff[i] = NULL;
936 	}
937 	/* Mark the last entry as wrapping the ring. */
938 	ep->rx_ring[i-1].next = ep->rx_ring_dma;
939 
940 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
941 	for (i = 0; i < RX_RING_SIZE; i++) {
942 		struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
943 		ep->rx_skbuff[i] = skb;
944 		if (skb == NULL)
945 			break;
946 		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
947 		ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
948 			skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
949 		ep->rx_ring[i].rxstatus = DescOwn;
950 	}
951 	ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
952 
953 	/* The Tx buffer descriptor is filled in as needed, but we
954 	   do need to clear the ownership bit. */
955 	for (i = 0; i < TX_RING_SIZE; i++) {
956 		ep->tx_skbuff[i] = NULL;
957 		ep->tx_ring[i].txstatus = 0x0000;
958 		ep->tx_ring[i].next = ep->tx_ring_dma +
959 			(i+1)*sizeof(struct epic_tx_desc);
960 	}
961 	ep->tx_ring[i-1].next = ep->tx_ring_dma;
962 	return;
963 }
964 
epic_start_xmit(struct sk_buff * skb,struct net_device * dev)965 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
966 {
967 	struct epic_private *ep = netdev_priv(dev);
968 	int entry, free_count;
969 	u32 ctrl_word;
970 	unsigned long flags;
971 
972 	if (skb_padto(skb, ETH_ZLEN))
973 		return 0;
974 
975 	/* Caution: the write order is important here, set the field with the
976 	   "ownership" bit last. */
977 
978 	/* Calculate the next Tx descriptor entry. */
979 	spin_lock_irqsave(&ep->lock, flags);
980 	free_count = ep->cur_tx - ep->dirty_tx;
981 	entry = ep->cur_tx % TX_RING_SIZE;
982 
983 	ep->tx_skbuff[entry] = skb;
984 	ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
985 		 			            skb->len, PCI_DMA_TODEVICE);
986 	if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
987 		ctrl_word = 0x100000; /* No interrupt */
988 	} else if (free_count == TX_QUEUE_LEN/2) {
989 		ctrl_word = 0x140000; /* Tx-done intr. */
990 	} else if (free_count < TX_QUEUE_LEN - 1) {
991 		ctrl_word = 0x100000; /* No Tx-done intr. */
992 	} else {
993 		/* Leave room for an additional entry. */
994 		ctrl_word = 0x140000; /* Tx-done intr. */
995 		ep->tx_full = 1;
996 	}
997 	ep->tx_ring[entry].buflength = ctrl_word | skb->len;
998 	ep->tx_ring[entry].txstatus =
999 		((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
1000 			    | DescOwn;
1001 
1002 	ep->cur_tx++;
1003 	if (ep->tx_full)
1004 		netif_stop_queue(dev);
1005 
1006 	spin_unlock_irqrestore(&ep->lock, flags);
1007 	/* Trigger an immediate transmit demand. */
1008 	outl(TxQueued, dev->base_addr + COMMAND);
1009 
1010 	dev->trans_start = jiffies;
1011 	if (debug > 4)
1012 		printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
1013 			   "flag %2.2x Tx status %8.8x.\n",
1014 			   dev->name, (int)skb->len, entry, ctrl_word,
1015 			   (int)inl(dev->base_addr + TxSTAT));
1016 
1017 	return 0;
1018 }
1019 
epic_tx_error(struct net_device * dev,struct epic_private * ep,int status)1020 static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
1021 			  int status)
1022 {
1023 	struct net_device_stats *stats = &ep->stats;
1024 
1025 #ifndef final_version
1026 	/* There was an major error, log it. */
1027 	if (debug > 1)
1028 		printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1029 		       dev->name, status);
1030 #endif
1031 	stats->tx_errors++;
1032 	if (status & 0x1050)
1033 		stats->tx_aborted_errors++;
1034 	if (status & 0x0008)
1035 		stats->tx_carrier_errors++;
1036 	if (status & 0x0040)
1037 		stats->tx_window_errors++;
1038 	if (status & 0x0010)
1039 		stats->tx_fifo_errors++;
1040 }
1041 
epic_tx(struct net_device * dev,struct epic_private * ep)1042 static void epic_tx(struct net_device *dev, struct epic_private *ep)
1043 {
1044 	unsigned int dirty_tx, cur_tx;
1045 
1046 	/*
1047 	 * Note: if this lock becomes a problem we can narrow the locked
1048 	 * region at the cost of occasionally grabbing the lock more times.
1049 	 */
1050 	cur_tx = ep->cur_tx;
1051 	for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1052 		struct sk_buff *skb;
1053 		int entry = dirty_tx % TX_RING_SIZE;
1054 		int txstatus = ep->tx_ring[entry].txstatus;
1055 
1056 		if (txstatus & DescOwn)
1057 			break;	/* It still hasn't been Txed */
1058 
1059 		if (likely(txstatus & 0x0001)) {
1060 			ep->stats.collisions += (txstatus >> 8) & 15;
1061 			ep->stats.tx_packets++;
1062 			ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1063 		} else
1064 			epic_tx_error(dev, ep, txstatus);
1065 
1066 		/* Free the original skb. */
1067 		skb = ep->tx_skbuff[entry];
1068 		pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1069 				 skb->len, PCI_DMA_TODEVICE);
1070 		dev_kfree_skb_irq(skb);
1071 		ep->tx_skbuff[entry] = NULL;
1072 	}
1073 
1074 #ifndef final_version
1075 	if (cur_tx - dirty_tx > TX_RING_SIZE) {
1076 		printk(KERN_WARNING
1077 		       "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1078 		       dev->name, dirty_tx, cur_tx, ep->tx_full);
1079 		dirty_tx += TX_RING_SIZE;
1080 	}
1081 #endif
1082 	ep->dirty_tx = dirty_tx;
1083 	if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1084 		/* The ring is no longer full, allow new TX entries. */
1085 		ep->tx_full = 0;
1086 		netif_wake_queue(dev);
1087 	}
1088 }
1089 
1090 /* The interrupt handler does all of the Rx thread work and cleans up
1091    after the Tx thread. */
epic_interrupt(int irq,void * dev_instance)1092 static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1093 {
1094 	struct net_device *dev = dev_instance;
1095 	struct epic_private *ep = netdev_priv(dev);
1096 	long ioaddr = dev->base_addr;
1097 	unsigned int handled = 0;
1098 	int status;
1099 
1100 	status = inl(ioaddr + INTSTAT);
1101 	/* Acknowledge all of the current interrupt sources ASAP. */
1102 	outl(status & EpicNormalEvent, ioaddr + INTSTAT);
1103 
1104 	if (debug > 4) {
1105 		printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
1106 				   "intstat=%#8.8x.\n", dev->name, status,
1107 				   (int)inl(ioaddr + INTSTAT));
1108 	}
1109 
1110 	if ((status & IntrSummary) == 0)
1111 		goto out;
1112 
1113 	handled = 1;
1114 
1115 	if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
1116 		spin_lock(&ep->napi_lock);
1117 		if (netif_rx_schedule_prep(&ep->napi)) {
1118 			epic_napi_irq_off(dev, ep);
1119 			__netif_rx_schedule(&ep->napi);
1120 		} else
1121 			ep->reschedule_in_poll++;
1122 		spin_unlock(&ep->napi_lock);
1123 	}
1124 	status &= ~EpicNapiEvent;
1125 
1126 	/* Check uncommon events all at once. */
1127 	if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1128 		if (status == EpicRemoved)
1129 			goto out;
1130 
1131 		/* Always update the error counts to avoid overhead later. */
1132 		ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1133 		ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1134 		ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1135 
1136 		if (status & TxUnderrun) { /* Tx FIFO underflow. */
1137 			ep->stats.tx_fifo_errors++;
1138 			outl(ep->tx_threshold += 128, ioaddr + TxThresh);
1139 			/* Restart the transmit process. */
1140 			outl(RestartTx, ioaddr + COMMAND);
1141 		}
1142 		if (status & PCIBusErr170) {
1143 			printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
1144 					 dev->name, status);
1145 			epic_pause(dev);
1146 			epic_restart(dev);
1147 		}
1148 		/* Clear all error sources. */
1149 		outl(status & 0x7f18, ioaddr + INTSTAT);
1150 	}
1151 
1152 out:
1153 	if (debug > 3) {
1154 		printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
1155 				   dev->name, status);
1156 	}
1157 
1158 	return IRQ_RETVAL(handled);
1159 }
1160 
epic_rx(struct net_device * dev,int budget)1161 static int epic_rx(struct net_device *dev, int budget)
1162 {
1163 	struct epic_private *ep = netdev_priv(dev);
1164 	int entry = ep->cur_rx % RX_RING_SIZE;
1165 	int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1166 	int work_done = 0;
1167 
1168 	if (debug > 4)
1169 		printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
1170 			   ep->rx_ring[entry].rxstatus);
1171 
1172 	if (rx_work_limit > budget)
1173 		rx_work_limit = budget;
1174 
1175 	/* If we own the next entry, it's a new packet. Send it up. */
1176 	while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
1177 		int status = ep->rx_ring[entry].rxstatus;
1178 
1179 		if (debug > 4)
1180 			printk(KERN_DEBUG "  epic_rx() status was %8.8x.\n", status);
1181 		if (--rx_work_limit < 0)
1182 			break;
1183 		if (status & 0x2006) {
1184 			if (debug > 2)
1185 				printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
1186 					   dev->name, status);
1187 			if (status & 0x2000) {
1188 				printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1189 					   "multiple buffers, status %4.4x!\n", dev->name, status);
1190 				ep->stats.rx_length_errors++;
1191 			} else if (status & 0x0006)
1192 				/* Rx Frame errors are counted in hardware. */
1193 				ep->stats.rx_errors++;
1194 		} else {
1195 			/* Malloc up new buffer, compatible with net-2e. */
1196 			/* Omit the four octet CRC from the length. */
1197 			short pkt_len = (status >> 16) - 4;
1198 			struct sk_buff *skb;
1199 
1200 			if (pkt_len > PKT_BUF_SZ - 4) {
1201 				printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
1202 					   "%d bytes.\n",
1203 					   dev->name, status, pkt_len);
1204 				pkt_len = 1514;
1205 			}
1206 			/* Check if the packet is long enough to accept without copying
1207 			   to a minimally-sized skbuff. */
1208 			if (pkt_len < rx_copybreak
1209 				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1210 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1211 				pci_dma_sync_single_for_cpu(ep->pci_dev,
1212 							    ep->rx_ring[entry].bufaddr,
1213 							    ep->rx_buf_sz,
1214 							    PCI_DMA_FROMDEVICE);
1215 				skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
1216 				skb_put(skb, pkt_len);
1217 				pci_dma_sync_single_for_device(ep->pci_dev,
1218 							       ep->rx_ring[entry].bufaddr,
1219 							       ep->rx_buf_sz,
1220 							       PCI_DMA_FROMDEVICE);
1221 			} else {
1222 				pci_unmap_single(ep->pci_dev,
1223 					ep->rx_ring[entry].bufaddr,
1224 					ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1225 				skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1226 				ep->rx_skbuff[entry] = NULL;
1227 			}
1228 			skb->protocol = eth_type_trans(skb, dev);
1229 			netif_receive_skb(skb);
1230 			ep->stats.rx_packets++;
1231 			ep->stats.rx_bytes += pkt_len;
1232 		}
1233 		work_done++;
1234 		entry = (++ep->cur_rx) % RX_RING_SIZE;
1235 	}
1236 
1237 	/* Refill the Rx ring buffers. */
1238 	for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1239 		entry = ep->dirty_rx % RX_RING_SIZE;
1240 		if (ep->rx_skbuff[entry] == NULL) {
1241 			struct sk_buff *skb;
1242 			skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
1243 			if (skb == NULL)
1244 				break;
1245 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1246 			ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1247 				skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1248 			work_done++;
1249 		}
1250 		/* AV: shouldn't we add a barrier here? */
1251 		ep->rx_ring[entry].rxstatus = DescOwn;
1252 	}
1253 	return work_done;
1254 }
1255 
epic_rx_err(struct net_device * dev,struct epic_private * ep)1256 static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1257 {
1258 	long ioaddr = dev->base_addr;
1259 	int status;
1260 
1261 	status = inl(ioaddr + INTSTAT);
1262 
1263 	if (status == EpicRemoved)
1264 		return;
1265 	if (status & RxOverflow) 	/* Missed a Rx frame. */
1266 		ep->stats.rx_errors++;
1267 	if (status & (RxOverflow | RxFull))
1268 		outw(RxQueued, ioaddr + COMMAND);
1269 }
1270 
epic_poll(struct napi_struct * napi,int budget)1271 static int epic_poll(struct napi_struct *napi, int budget)
1272 {
1273 	struct epic_private *ep = container_of(napi, struct epic_private, napi);
1274 	struct net_device *dev = ep->mii.dev;
1275 	int work_done = 0;
1276 	long ioaddr = dev->base_addr;
1277 
1278 rx_action:
1279 
1280 	epic_tx(dev, ep);
1281 
1282 	work_done += epic_rx(dev, budget);
1283 
1284 	epic_rx_err(dev, ep);
1285 
1286 	if (work_done < budget) {
1287 		unsigned long flags;
1288 		int more;
1289 
1290 		/* A bit baroque but it avoids a (space hungry) spin_unlock */
1291 
1292 		spin_lock_irqsave(&ep->napi_lock, flags);
1293 
1294 		more = ep->reschedule_in_poll;
1295 		if (!more) {
1296 			__netif_rx_complete(napi);
1297 			outl(EpicNapiEvent, ioaddr + INTSTAT);
1298 			epic_napi_irq_on(dev, ep);
1299 		} else
1300 			ep->reschedule_in_poll--;
1301 
1302 		spin_unlock_irqrestore(&ep->napi_lock, flags);
1303 
1304 		if (more)
1305 			goto rx_action;
1306 	}
1307 
1308 	return work_done;
1309 }
1310 
epic_close(struct net_device * dev)1311 static int epic_close(struct net_device *dev)
1312 {
1313 	long ioaddr = dev->base_addr;
1314 	struct epic_private *ep = netdev_priv(dev);
1315 	struct sk_buff *skb;
1316 	int i;
1317 
1318 	netif_stop_queue(dev);
1319 	napi_disable(&ep->napi);
1320 
1321 	if (debug > 1)
1322 		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1323 			   dev->name, (int)inl(ioaddr + INTSTAT));
1324 
1325 	del_timer_sync(&ep->timer);
1326 
1327 	epic_disable_int(dev, ep);
1328 
1329 	free_irq(dev->irq, dev);
1330 
1331 	epic_pause(dev);
1332 
1333 	/* Free all the skbuffs in the Rx queue. */
1334 	for (i = 0; i < RX_RING_SIZE; i++) {
1335 		skb = ep->rx_skbuff[i];
1336 		ep->rx_skbuff[i] = NULL;
1337 		ep->rx_ring[i].rxstatus = 0;		/* Not owned by Epic chip. */
1338 		ep->rx_ring[i].buflength = 0;
1339 		if (skb) {
1340 			pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
1341 				 	 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1342 			dev_kfree_skb(skb);
1343 		}
1344 		ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1345 	}
1346 	for (i = 0; i < TX_RING_SIZE; i++) {
1347 		skb = ep->tx_skbuff[i];
1348 		ep->tx_skbuff[i] = NULL;
1349 		if (!skb)
1350 			continue;
1351 		pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
1352 				 skb->len, PCI_DMA_TODEVICE);
1353 		dev_kfree_skb(skb);
1354 	}
1355 
1356 	/* Green! Leave the chip in low-power mode. */
1357 	outl(0x0008, ioaddr + GENCTL);
1358 
1359 	return 0;
1360 }
1361 
epic_get_stats(struct net_device * dev)1362 static struct net_device_stats *epic_get_stats(struct net_device *dev)
1363 {
1364 	struct epic_private *ep = netdev_priv(dev);
1365 	long ioaddr = dev->base_addr;
1366 
1367 	if (netif_running(dev)) {
1368 		/* Update the error counts. */
1369 		ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1370 		ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1371 		ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1372 	}
1373 
1374 	return &ep->stats;
1375 }
1376 
1377 /* Set or clear the multicast filter for this adaptor.
1378    Note that we only use exclusion around actually queueing the
1379    new frame, not around filling ep->setup_frame.  This is non-deterministic
1380    when re-entered but still correct. */
1381 
set_rx_mode(struct net_device * dev)1382 static void set_rx_mode(struct net_device *dev)
1383 {
1384 	long ioaddr = dev->base_addr;
1385 	struct epic_private *ep = netdev_priv(dev);
1386 	unsigned char mc_filter[8];		 /* Multicast hash filter */
1387 	int i;
1388 
1389 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1390 		outl(0x002C, ioaddr + RxCtrl);
1391 		/* Unconditionally log net taps. */
1392 		memset(mc_filter, 0xff, sizeof(mc_filter));
1393 	} else if ((dev->mc_count > 0)  ||  (dev->flags & IFF_ALLMULTI)) {
1394 		/* There is apparently a chip bug, so the multicast filter
1395 		   is never enabled. */
1396 		/* Too many to filter perfectly -- accept all multicasts. */
1397 		memset(mc_filter, 0xff, sizeof(mc_filter));
1398 		outl(0x000C, ioaddr + RxCtrl);
1399 	} else if (dev->mc_count == 0) {
1400 		outl(0x0004, ioaddr + RxCtrl);
1401 		return;
1402 	} else {					/* Never executed, for now. */
1403 		struct dev_mc_list *mclist;
1404 
1405 		memset(mc_filter, 0, sizeof(mc_filter));
1406 		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1407 			 i++, mclist = mclist->next) {
1408 			unsigned int bit_nr =
1409 				ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
1410 			mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1411 		}
1412 	}
1413 	/* ToDo: perhaps we need to stop the Tx and Rx process here? */
1414 	if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1415 		for (i = 0; i < 4; i++)
1416 			outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
1417 		memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1418 	}
1419 	return;
1420 }
1421 
netdev_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1422 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1423 {
1424 	struct epic_private *np = netdev_priv(dev);
1425 
1426 	strcpy (info->driver, DRV_NAME);
1427 	strcpy (info->version, DRV_VERSION);
1428 	strcpy (info->bus_info, pci_name(np->pci_dev));
1429 }
1430 
netdev_get_settings(struct net_device * dev,struct ethtool_cmd * cmd)1431 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1432 {
1433 	struct epic_private *np = netdev_priv(dev);
1434 	int rc;
1435 
1436 	spin_lock_irq(&np->lock);
1437 	rc = mii_ethtool_gset(&np->mii, cmd);
1438 	spin_unlock_irq(&np->lock);
1439 
1440 	return rc;
1441 }
1442 
netdev_set_settings(struct net_device * dev,struct ethtool_cmd * cmd)1443 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1444 {
1445 	struct epic_private *np = netdev_priv(dev);
1446 	int rc;
1447 
1448 	spin_lock_irq(&np->lock);
1449 	rc = mii_ethtool_sset(&np->mii, cmd);
1450 	spin_unlock_irq(&np->lock);
1451 
1452 	return rc;
1453 }
1454 
netdev_nway_reset(struct net_device * dev)1455 static int netdev_nway_reset(struct net_device *dev)
1456 {
1457 	struct epic_private *np = netdev_priv(dev);
1458 	return mii_nway_restart(&np->mii);
1459 }
1460 
netdev_get_link(struct net_device * dev)1461 static u32 netdev_get_link(struct net_device *dev)
1462 {
1463 	struct epic_private *np = netdev_priv(dev);
1464 	return mii_link_ok(&np->mii);
1465 }
1466 
netdev_get_msglevel(struct net_device * dev)1467 static u32 netdev_get_msglevel(struct net_device *dev)
1468 {
1469 	return debug;
1470 }
1471 
netdev_set_msglevel(struct net_device * dev,u32 value)1472 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1473 {
1474 	debug = value;
1475 }
1476 
ethtool_begin(struct net_device * dev)1477 static int ethtool_begin(struct net_device *dev)
1478 {
1479 	unsigned long ioaddr = dev->base_addr;
1480 	/* power-up, if interface is down */
1481 	if (! netif_running(dev)) {
1482 		outl(0x0200, ioaddr + GENCTL);
1483 		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1484 	}
1485 	return 0;
1486 }
1487 
ethtool_complete(struct net_device * dev)1488 static void ethtool_complete(struct net_device *dev)
1489 {
1490 	unsigned long ioaddr = dev->base_addr;
1491 	/* power-down, if interface is down */
1492 	if (! netif_running(dev)) {
1493 		outl(0x0008, ioaddr + GENCTL);
1494 		outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1495 	}
1496 }
1497 
1498 static const struct ethtool_ops netdev_ethtool_ops = {
1499 	.get_drvinfo		= netdev_get_drvinfo,
1500 	.get_settings		= netdev_get_settings,
1501 	.set_settings		= netdev_set_settings,
1502 	.nway_reset		= netdev_nway_reset,
1503 	.get_link		= netdev_get_link,
1504 	.get_msglevel		= netdev_get_msglevel,
1505 	.set_msglevel		= netdev_set_msglevel,
1506 	.begin			= ethtool_begin,
1507 	.complete		= ethtool_complete
1508 };
1509 
netdev_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1510 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1511 {
1512 	struct epic_private *np = netdev_priv(dev);
1513 	long ioaddr = dev->base_addr;
1514 	struct mii_ioctl_data *data = if_mii(rq);
1515 	int rc;
1516 
1517 	/* power-up, if interface is down */
1518 	if (! netif_running(dev)) {
1519 		outl(0x0200, ioaddr + GENCTL);
1520 		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1521 	}
1522 
1523 	/* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1524 	spin_lock_irq(&np->lock);
1525 	rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1526 	spin_unlock_irq(&np->lock);
1527 
1528 	/* power-down, if interface is down */
1529 	if (! netif_running(dev)) {
1530 		outl(0x0008, ioaddr + GENCTL);
1531 		outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1532 	}
1533 	return rc;
1534 }
1535 
1536 
epic_remove_one(struct pci_dev * pdev)1537 static void __devexit epic_remove_one (struct pci_dev *pdev)
1538 {
1539 	struct net_device *dev = pci_get_drvdata(pdev);
1540 	struct epic_private *ep = netdev_priv(dev);
1541 
1542 	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1543 	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1544 	unregister_netdev(dev);
1545 #ifndef USE_IO_OPS
1546 	iounmap((void*) dev->base_addr);
1547 #endif
1548 	pci_release_regions(pdev);
1549 	free_netdev(dev);
1550 	pci_disable_device(pdev);
1551 	pci_set_drvdata(pdev, NULL);
1552 	/* pci_power_off(pdev, -1); */
1553 }
1554 
1555 
1556 #ifdef CONFIG_PM
1557 
epic_suspend(struct pci_dev * pdev,pm_message_t state)1558 static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1559 {
1560 	struct net_device *dev = pci_get_drvdata(pdev);
1561 	long ioaddr = dev->base_addr;
1562 
1563 	if (!netif_running(dev))
1564 		return 0;
1565 	epic_pause(dev);
1566 	/* Put the chip into low-power mode. */
1567 	outl(0x0008, ioaddr + GENCTL);
1568 	/* pci_power_off(pdev, -1); */
1569 	return 0;
1570 }
1571 
1572 
epic_resume(struct pci_dev * pdev)1573 static int epic_resume (struct pci_dev *pdev)
1574 {
1575 	struct net_device *dev = pci_get_drvdata(pdev);
1576 
1577 	if (!netif_running(dev))
1578 		return 0;
1579 	epic_restart(dev);
1580 	/* pci_power_on(pdev); */
1581 	return 0;
1582 }
1583 
1584 #endif /* CONFIG_PM */
1585 
1586 
1587 static struct pci_driver epic_driver = {
1588 	.name		= DRV_NAME,
1589 	.id_table	= epic_pci_tbl,
1590 	.probe		= epic_init_one,
1591 	.remove		= __devexit_p(epic_remove_one),
1592 #ifdef CONFIG_PM
1593 	.suspend	= epic_suspend,
1594 	.resume		= epic_resume,
1595 #endif /* CONFIG_PM */
1596 };
1597 
1598 
epic_init(void)1599 static int __init epic_init (void)
1600 {
1601 /* when a module, this is printed whether or not devices are found in probe */
1602 #ifdef MODULE
1603 	printk (KERN_INFO "%s" KERN_INFO "%s",
1604 		version, version2);
1605 #endif
1606 
1607 	return pci_register_driver(&epic_driver);
1608 }
1609 
1610 
epic_cleanup(void)1611 static void __exit epic_cleanup (void)
1612 {
1613 	pci_unregister_driver (&epic_driver);
1614 }
1615 
1616 
1617 module_init(epic_init);
1618 module_exit(epic_cleanup);
1619