• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
2 /*
3 	Written 1997-2001 by Donald Becker.
4 
5 	This software may be used and distributed according to the terms of
6 	the GNU General Public License (GPL), incorporated herein by reference.
7 	Drivers based on or derived from this code fall under the GPL and must
8 	retain the authorship, copyright and license notice.  This file is not
9 	a complete program and may only be used when the entire operating
10 	system is licensed under the GPL.
11 
12 	This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
13 	It also supports the Symbios Logic version of the same chip core.
14 
15 	The author may be reached as becker@scyld.com, or C/O
16 	Scyld Computing Corporation
17 	410 Severn Ave., Suite 210
18 	Annapolis MD 21403
19 
20 	Support and updates available at
21 	http://www.scyld.com/network/yellowfin.html
22 	[link no longer provides useful info -jgarzik]
23 
24 */
25 
26 #define DRV_NAME	"yellowfin"
27 #define DRV_VERSION	"2.1"
28 #define DRV_RELDATE	"Sep 11, 2006"
29 
30 #define PFX DRV_NAME ": "
31 
32 /* The user-configurable values.
33    These may be modified when a driver module is loaded.*/
34 
35 static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
36 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
37 static int max_interrupt_work = 20;
38 static int mtu;
39 #ifdef YF_PROTOTYPE			/* Support for prototype hardware errata. */
40 /* System-wide count of bogus-rx frames. */
41 static int bogus_rx;
42 static int dma_ctrl = 0x004A0263; 			/* Constrained by errata */
43 static int fifo_cfg = 0x0020;				/* Bypass external Tx FIFO. */
44 #elif defined(YF_NEW)					/* A future perfect board :->.  */
45 static int dma_ctrl = 0x00CAC277;			/* Override when loading module! */
46 static int fifo_cfg = 0x0028;
47 #else
48 static const int dma_ctrl = 0x004A0263; 			/* Constrained by errata */
49 static const int fifo_cfg = 0x0020;				/* Bypass external Tx FIFO. */
50 #endif
51 
52 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
53    Setting to > 1514 effectively disables this feature. */
54 static int rx_copybreak;
55 
56 /* Used to pass the media type, etc.
57    No media types are currently defined.  These exist for driver
58    interoperability.
59 */
60 #define MAX_UNITS 8				/* More are supported, limit only on options */
61 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
62 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
63 
64 /* Do ugly workaround for GX server chipset errata. */
65 static int gx_fix;
66 
67 /* Operational parameters that are set at compile time. */
68 
69 /* Keep the ring sizes a power of two for efficiency.
70    Making the Tx ring too long decreases the effectiveness of channel
71    bonding and packet priority.
72    There are no ill effects from too-large receive rings. */
73 #define TX_RING_SIZE	16
74 #define TX_QUEUE_SIZE	12		/* Must be > 4 && <= TX_RING_SIZE */
75 #define RX_RING_SIZE	64
76 #define STATUS_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct tx_status_words)
77 #define TX_TOTAL_SIZE		2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
78 #define RX_TOTAL_SIZE		RX_RING_SIZE*sizeof(struct yellowfin_desc)
79 
80 /* Operational parameters that usually are not changed. */
81 /* Time in jiffies before concluding the transmitter is hung. */
82 #define TX_TIMEOUT  (2*HZ)
83 #define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
84 
85 #define yellowfin_debug debug
86 
87 #include <linux/module.h>
88 #include <linux/kernel.h>
89 #include <linux/string.h>
90 #include <linux/timer.h>
91 #include <linux/errno.h>
92 #include <linux/ioport.h>
93 #include <linux/slab.h>
94 #include <linux/interrupt.h>
95 #include <linux/pci.h>
96 #include <linux/init.h>
97 #include <linux/mii.h>
98 #include <linux/netdevice.h>
99 #include <linux/etherdevice.h>
100 #include <linux/skbuff.h>
101 #include <linux/ethtool.h>
102 #include <linux/crc32.h>
103 #include <linux/bitops.h>
104 #include <asm/uaccess.h>
105 #include <asm/processor.h>		/* Processor type for cache alignment. */
106 #include <asm/unaligned.h>
107 #include <asm/io.h>
108 
109 /* These identify the driver base version and may not be removed. */
110 static char version[] __devinitdata =
111 KERN_INFO DRV_NAME ".c:v1.05  1/09/2001  Written by Donald Becker <becker@scyld.com>\n"
112 KERN_INFO "  (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
113 
114 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
115 MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
116 MODULE_LICENSE("GPL");
117 
118 module_param(max_interrupt_work, int, 0);
119 module_param(mtu, int, 0);
120 module_param(debug, int, 0);
121 module_param(rx_copybreak, int, 0);
122 module_param_array(options, int, NULL, 0);
123 module_param_array(full_duplex, int, NULL, 0);
124 module_param(gx_fix, int, 0);
125 MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
126 MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
127 MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
128 MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
129 MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
130 MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
131 MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
132 
133 /*
134 				Theory of Operation
135 
136 I. Board Compatibility
137 
138 This device driver is designed for the Packet Engines "Yellowfin" Gigabit
139 Ethernet adapter.  The G-NIC 64-bit PCI card is supported, as well as the
140 Symbios 53C885E dual function chip.
141 
142 II. Board-specific settings
143 
144 PCI bus devices are configured by the system at boot time, so no jumpers
145 need to be set on the board.  The system BIOS preferably should assign the
146 PCI INTA signal to an otherwise unused system IRQ line.
147 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
148 interrupt lines.
149 
150 III. Driver operation
151 
152 IIIa. Ring buffers
153 
154 The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
155 This is a descriptor list scheme similar to that used by the EEPro100 and
156 Tulip.  This driver uses two statically allocated fixed-size descriptor lists
157 formed into rings by a branch from the final descriptor to the beginning of
158 the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
159 
160 The driver allocates full frame size skbuffs for the Rx ring buffers at
161 open() time and passes the skb->data field to the Yellowfin as receive data
162 buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
163 a fresh skbuff is allocated and the frame is copied to the new skbuff.
164 When the incoming frame is larger, the skbuff is passed directly up the
165 protocol stack and replaced by a newly allocated skbuff.
166 
167 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
168 using a full-sized skbuff for small frames vs. the copying costs of larger
169 frames.  For small frames the copying cost is negligible (esp. considering
170 that we are pre-loading the cache with immediately useful header
171 information).  For large frames the copying cost is non-trivial, and the
172 larger copy might flush the cache of useful data.
173 
174 IIIC. Synchronization
175 
176 The driver runs as two independent, single-threaded flows of control.  One
177 is the send-packet routine, which enforces single-threaded use by the
178 dev->tbusy flag.  The other thread is the interrupt handler, which is single
179 threaded by the hardware and other software.
180 
181 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
182 flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
183 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
184 the 'yp->tx_full' flag.
185 
186 The interrupt handler has exclusive control over the Rx ring and records stats
187 from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
188 empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
189 clears both the tx_full and tbusy flags.
190 
191 IV. Notes
192 
193 Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
194 Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
195 and an AlphaStation to verifty the Alpha port!
196 
197 IVb. References
198 
199 Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
200 Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
201    Data Manual v3.0
202 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
203 http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
204 
205 IVc. Errata
206 
207 See Packet Engines confidential appendix (prototype chips only).
208 */
209 
210 
211 
212 enum capability_flags {
213 	HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
214 	HasMACAddrBug=32, /* Only on early revs.  */
215 	DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
216 };
217 
218 /* The PCI I/O space extent. */
219 enum {
220 	YELLOWFIN_SIZE	= 0x100,
221 };
222 
223 struct pci_id_info {
224         const char *name;
225         struct match_info {
226                 int     pci, pci_mask, subsystem, subsystem_mask;
227                 int revision, revision_mask;                            /* Only 8 bits. */
228         } id;
229         int drv_flags;                          /* Driver use, intended as capability flags. */
230 };
231 
232 static const struct pci_id_info pci_id_tbl[] = {
233 	{"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
234 	 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
235 	{"Symbios SYM83C885", { 0x07011000, 0xffffffff},
236 	  HasMII | DontUseEeprom },
237 	{ }
238 };
239 
240 static const struct pci_device_id yellowfin_pci_tbl[] = {
241 	{ 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
242 	{ 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
243 	{ }
244 };
245 MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
246 
247 
248 /* Offsets to the Yellowfin registers.  Various sizes and alignments. */
249 enum yellowfin_offsets {
250 	TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
251 	TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
252 	RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
253 	RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
254 	EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
255 	ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
256 	Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
257 	MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
258 	MII_Status=0xAE,
259 	RxDepth=0xB8, FlowCtrl=0xBC,
260 	AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
261 	EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
262 	EEFeature=0xF5,
263 };
264 
265 /* The Yellowfin Rx and Tx buffer descriptors.
266    Elements are written as 32 bit for endian portability. */
267 struct yellowfin_desc {
268 	__le32 dbdma_cmd;
269 	__le32 addr;
270 	__le32 branch_addr;
271 	__le32 result_status;
272 };
273 
274 struct tx_status_words {
275 #ifdef __BIG_ENDIAN
276 	u16 tx_errs;
277 	u16 tx_cnt;
278 	u16 paused;
279 	u16 total_tx_cnt;
280 #else  /* Little endian chips. */
281 	u16 tx_cnt;
282 	u16 tx_errs;
283 	u16 total_tx_cnt;
284 	u16 paused;
285 #endif /* __BIG_ENDIAN */
286 };
287 
288 /* Bits in yellowfin_desc.cmd */
289 enum desc_cmd_bits {
290 	CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
291 	CMD_NOP=0x60000000, CMD_STOP=0x70000000,
292 	BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
293 	BRANCH_IFTRUE=0x040000,
294 };
295 
296 /* Bits in yellowfin_desc.status */
297 enum desc_status_bits { RX_EOP=0x0040, };
298 
299 /* Bits in the interrupt status/mask registers. */
300 enum intr_status_bits {
301 	IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
302 	IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
303 	IntrEarlyRx=0x100, IntrWakeup=0x200, };
304 
305 #define PRIV_ALIGN	31 	/* Required alignment mask */
306 #define MII_CNT		4
307 struct yellowfin_private {
308 	/* Descriptor rings first for alignment.
309 	   Tx requires a second descriptor for status. */
310 	struct yellowfin_desc *rx_ring;
311 	struct yellowfin_desc *tx_ring;
312 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
313 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
314 	dma_addr_t rx_ring_dma;
315 	dma_addr_t tx_ring_dma;
316 
317 	struct tx_status_words *tx_status;
318 	dma_addr_t tx_status_dma;
319 
320 	struct timer_list timer;	/* Media selection timer. */
321 	/* Frequently used and paired value: keep adjacent for cache effect. */
322 	int chip_id, drv_flags;
323 	struct pci_dev *pci_dev;
324 	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
325 	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
326 	struct tx_status_words *tx_tail_desc;
327 	unsigned int cur_tx, dirty_tx;
328 	int tx_threshold;
329 	unsigned int tx_full:1;				/* The Tx queue is full. */
330 	unsigned int full_duplex:1;			/* Full-duplex operation requested. */
331 	unsigned int duplex_lock:1;
332 	unsigned int medialock:1;			/* Do not sense media. */
333 	unsigned int default_port:4;		/* Last dev->if_port value. */
334 	/* MII transceiver section. */
335 	int mii_cnt;						/* MII device addresses. */
336 	u16 advertising;					/* NWay media advertisement */
337 	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used */
338 	spinlock_t lock;
339 	void __iomem *base;
340 };
341 
342 static int read_eeprom(void __iomem *ioaddr, int location);
343 static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
344 static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
345 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
346 static int yellowfin_open(struct net_device *dev);
347 static void yellowfin_timer(unsigned long data);
348 static void yellowfin_tx_timeout(struct net_device *dev);
349 static void yellowfin_init_ring(struct net_device *dev);
350 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev);
351 static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
352 static int yellowfin_rx(struct net_device *dev);
353 static void yellowfin_error(struct net_device *dev, int intr_status);
354 static int yellowfin_close(struct net_device *dev);
355 static void set_rx_mode(struct net_device *dev);
356 static const struct ethtool_ops ethtool_ops;
357 
358 static const struct net_device_ops netdev_ops = {
359 	.ndo_open 		= yellowfin_open,
360 	.ndo_stop 		= yellowfin_close,
361 	.ndo_start_xmit 	= yellowfin_start_xmit,
362 	.ndo_set_multicast_list = set_rx_mode,
363 	.ndo_change_mtu		= eth_change_mtu,
364 	.ndo_validate_addr	= eth_validate_addr,
365 	.ndo_set_mac_address 	= eth_mac_addr,
366 	.ndo_do_ioctl 		= netdev_ioctl,
367 	.ndo_tx_timeout 	= yellowfin_tx_timeout,
368 };
369 
yellowfin_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)370 static int __devinit yellowfin_init_one(struct pci_dev *pdev,
371 					const struct pci_device_id *ent)
372 {
373 	struct net_device *dev;
374 	struct yellowfin_private *np;
375 	int irq;
376 	int chip_idx = ent->driver_data;
377 	static int find_cnt;
378 	void __iomem *ioaddr;
379 	int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
380 	int drv_flags = pci_id_tbl[chip_idx].drv_flags;
381         void *ring_space;
382         dma_addr_t ring_dma;
383 #ifdef USE_IO_OPS
384 	int bar = 0;
385 #else
386 	int bar = 1;
387 #endif
388 
389 /* when built into the kernel, we only print version if device is found */
390 #ifndef MODULE
391 	static int printed_version;
392 	if (!printed_version++)
393 		printk(version);
394 #endif
395 
396 	i = pci_enable_device(pdev);
397 	if (i) return i;
398 
399 	dev = alloc_etherdev(sizeof(*np));
400 	if (!dev) {
401 		printk (KERN_ERR PFX "cannot allocate ethernet device\n");
402 		return -ENOMEM;
403 	}
404 	SET_NETDEV_DEV(dev, &pdev->dev);
405 
406 	np = netdev_priv(dev);
407 
408 	if (pci_request_regions(pdev, DRV_NAME))
409 		goto err_out_free_netdev;
410 
411 	pci_set_master (pdev);
412 
413 	ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
414 	if (!ioaddr)
415 		goto err_out_free_res;
416 
417 	irq = pdev->irq;
418 
419 	if (drv_flags & DontUseEeprom)
420 		for (i = 0; i < 6; i++)
421 			dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
422 	else {
423 		int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
424 		for (i = 0; i < 6; i++)
425 			dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
426 	}
427 
428 	/* Reset the chip. */
429 	iowrite32(0x80000000, ioaddr + DMACtrl);
430 
431 	dev->base_addr = (unsigned long)ioaddr;
432 	dev->irq = irq;
433 
434 	pci_set_drvdata(pdev, dev);
435 	spin_lock_init(&np->lock);
436 
437 	np->pci_dev = pdev;
438 	np->chip_id = chip_idx;
439 	np->drv_flags = drv_flags;
440 	np->base = ioaddr;
441 
442 	ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
443 	if (!ring_space)
444 		goto err_out_cleardev;
445 	np->tx_ring = (struct yellowfin_desc *)ring_space;
446 	np->tx_ring_dma = ring_dma;
447 
448 	ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
449 	if (!ring_space)
450 		goto err_out_unmap_tx;
451 	np->rx_ring = (struct yellowfin_desc *)ring_space;
452 	np->rx_ring_dma = ring_dma;
453 
454 	ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
455 	if (!ring_space)
456 		goto err_out_unmap_rx;
457 	np->tx_status = (struct tx_status_words *)ring_space;
458 	np->tx_status_dma = ring_dma;
459 
460 	if (dev->mem_start)
461 		option = dev->mem_start;
462 
463 	/* The lower four bits are the media type. */
464 	if (option > 0) {
465 		if (option & 0x200)
466 			np->full_duplex = 1;
467 		np->default_port = option & 15;
468 		if (np->default_port)
469 			np->medialock = 1;
470 	}
471 	if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt] > 0)
472 		np->full_duplex = 1;
473 
474 	if (np->full_duplex)
475 		np->duplex_lock = 1;
476 
477 	/* The Yellowfin-specific entries in the device structure. */
478 	dev->netdev_ops = &netdev_ops;
479 	SET_ETHTOOL_OPS(dev, &ethtool_ops);
480 	dev->watchdog_timeo = TX_TIMEOUT;
481 
482 	if (mtu)
483 		dev->mtu = mtu;
484 
485 	i = register_netdev(dev);
486 	if (i)
487 		goto err_out_unmap_status;
488 
489 	printk(KERN_INFO "%s: %s type %8x at %p, %pM, IRQ %d.\n",
490 		   dev->name, pci_id_tbl[chip_idx].name,
491 		   ioread32(ioaddr + ChipRev), ioaddr,
492 		   dev->dev_addr, irq);
493 
494 	if (np->drv_flags & HasMII) {
495 		int phy, phy_idx = 0;
496 		for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
497 			int mii_status = mdio_read(ioaddr, phy, 1);
498 			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
499 				np->phys[phy_idx++] = phy;
500 				np->advertising = mdio_read(ioaddr, phy, 4);
501 				printk(KERN_INFO "%s: MII PHY found at address %d, status "
502 					   "0x%4.4x advertising %4.4x.\n",
503 					   dev->name, phy, mii_status, np->advertising);
504 			}
505 		}
506 		np->mii_cnt = phy_idx;
507 	}
508 
509 	find_cnt++;
510 
511 	return 0;
512 
513 err_out_unmap_status:
514         pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
515 		np->tx_status_dma);
516 err_out_unmap_rx:
517         pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
518 err_out_unmap_tx:
519         pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
520 err_out_cleardev:
521 	pci_set_drvdata(pdev, NULL);
522 	pci_iounmap(pdev, ioaddr);
523 err_out_free_res:
524 	pci_release_regions(pdev);
525 err_out_free_netdev:
526 	free_netdev (dev);
527 	return -ENODEV;
528 }
529 
read_eeprom(void __iomem * ioaddr,int location)530 static int __devinit read_eeprom(void __iomem *ioaddr, int location)
531 {
532 	int bogus_cnt = 10000;		/* Typical 33Mhz: 1050 ticks */
533 
534 	iowrite8(location, ioaddr + EEAddr);
535 	iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
536 	while ((ioread8(ioaddr + EEStatus) & 0x80)  &&  --bogus_cnt > 0)
537 		;
538 	return ioread8(ioaddr + EERead);
539 }
540 
541 /* MII Managemen Data I/O accesses.
542    These routines assume the MDIO controller is idle, and do not exit until
543    the command is finished. */
544 
mdio_read(void __iomem * ioaddr,int phy_id,int location)545 static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
546 {
547 	int i;
548 
549 	iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
550 	iowrite16(1, ioaddr + MII_Cmd);
551 	for (i = 10000; i >= 0; i--)
552 		if ((ioread16(ioaddr + MII_Status) & 1) == 0)
553 			break;
554 	return ioread16(ioaddr + MII_Rd_Data);
555 }
556 
mdio_write(void __iomem * ioaddr,int phy_id,int location,int value)557 static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
558 {
559 	int i;
560 
561 	iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
562 	iowrite16(value, ioaddr + MII_Wr_Data);
563 
564 	/* Wait for the command to finish. */
565 	for (i = 10000; i >= 0; i--)
566 		if ((ioread16(ioaddr + MII_Status) & 1) == 0)
567 			break;
568 	return;
569 }
570 
571 
yellowfin_open(struct net_device * dev)572 static int yellowfin_open(struct net_device *dev)
573 {
574 	struct yellowfin_private *yp = netdev_priv(dev);
575 	void __iomem *ioaddr = yp->base;
576 	int i;
577 
578 	/* Reset the chip. */
579 	iowrite32(0x80000000, ioaddr + DMACtrl);
580 
581 	i = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
582 	if (i) return i;
583 
584 	if (yellowfin_debug > 1)
585 		printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
586 			   dev->name, dev->irq);
587 
588 	yellowfin_init_ring(dev);
589 
590 	iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
591 	iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
592 
593 	for (i = 0; i < 6; i++)
594 		iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
595 
596 	/* Set up various condition 'select' registers.
597 	   There are no options here. */
598 	iowrite32(0x00800080, ioaddr + TxIntrSel); 	/* Interrupt on Tx abort */
599 	iowrite32(0x00800080, ioaddr + TxBranchSel);	/* Branch on Tx abort */
600 	iowrite32(0x00400040, ioaddr + TxWaitSel); 	/* Wait on Tx status */
601 	iowrite32(0x00400040, ioaddr + RxIntrSel);	/* Interrupt on Rx done */
602 	iowrite32(0x00400040, ioaddr + RxBranchSel);	/* Branch on Rx error */
603 	iowrite32(0x00400040, ioaddr + RxWaitSel);	/* Wait on Rx done */
604 
605 	/* Initialize other registers: with so many this eventually this will
606 	   converted to an offset/value list. */
607 	iowrite32(dma_ctrl, ioaddr + DMACtrl);
608 	iowrite16(fifo_cfg, ioaddr + FIFOcfg);
609 	/* Enable automatic generation of flow control frames, period 0xffff. */
610 	iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
611 
612 	yp->tx_threshold = 32;
613 	iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
614 
615 	if (dev->if_port == 0)
616 		dev->if_port = yp->default_port;
617 
618 	netif_start_queue(dev);
619 
620 	/* Setting the Rx mode will start the Rx process. */
621 	if (yp->drv_flags & IsGigabit) {
622 		/* We are always in full-duplex mode with gigabit! */
623 		yp->full_duplex = 1;
624 		iowrite16(0x01CF, ioaddr + Cnfg);
625 	} else {
626 		iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
627 		iowrite16(0x1018, ioaddr + FrameGap1);
628 		iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
629 	}
630 	set_rx_mode(dev);
631 
632 	/* Enable interrupts by setting the interrupt mask. */
633 	iowrite16(0x81ff, ioaddr + IntrEnb);			/* See enum intr_status_bits */
634 	iowrite16(0x0000, ioaddr + EventStatus);		/* Clear non-interrupting events */
635 	iowrite32(0x80008000, ioaddr + RxCtrl);		/* Start Rx and Tx channels. */
636 	iowrite32(0x80008000, ioaddr + TxCtrl);
637 
638 	if (yellowfin_debug > 2) {
639 		printk(KERN_DEBUG "%s: Done yellowfin_open().\n",
640 			   dev->name);
641 	}
642 
643 	/* Set the timer to check for link beat. */
644 	init_timer(&yp->timer);
645 	yp->timer.expires = jiffies + 3*HZ;
646 	yp->timer.data = (unsigned long)dev;
647 	yp->timer.function = &yellowfin_timer;				/* timer handler */
648 	add_timer(&yp->timer);
649 
650 	return 0;
651 }
652 
yellowfin_timer(unsigned long data)653 static void yellowfin_timer(unsigned long data)
654 {
655 	struct net_device *dev = (struct net_device *)data;
656 	struct yellowfin_private *yp = netdev_priv(dev);
657 	void __iomem *ioaddr = yp->base;
658 	int next_tick = 60*HZ;
659 
660 	if (yellowfin_debug > 3) {
661 		printk(KERN_DEBUG "%s: Yellowfin timer tick, status %8.8x.\n",
662 			   dev->name, ioread16(ioaddr + IntrStatus));
663 	}
664 
665 	if (yp->mii_cnt) {
666 		int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
667 		int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
668 		int negotiated = lpa & yp->advertising;
669 		if (yellowfin_debug > 1)
670 			printk(KERN_DEBUG "%s: MII #%d status register is %4.4x, "
671 				   "link partner capability %4.4x.\n",
672 				   dev->name, yp->phys[0], bmsr, lpa);
673 
674 		yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
675 
676 		iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
677 
678 		if (bmsr & BMSR_LSTATUS)
679 			next_tick = 60*HZ;
680 		else
681 			next_tick = 3*HZ;
682 	}
683 
684 	yp->timer.expires = jiffies + next_tick;
685 	add_timer(&yp->timer);
686 }
687 
yellowfin_tx_timeout(struct net_device * dev)688 static void yellowfin_tx_timeout(struct net_device *dev)
689 {
690 	struct yellowfin_private *yp = netdev_priv(dev);
691 	void __iomem *ioaddr = yp->base;
692 
693 	printk(KERN_WARNING "%s: Yellowfin transmit timed out at %d/%d Tx "
694 		   "status %4.4x, Rx status %4.4x, resetting...\n",
695 		   dev->name, yp->cur_tx, yp->dirty_tx,
696 		   ioread32(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
697 
698 	/* Note: these should be KERN_DEBUG. */
699 	if (yellowfin_debug) {
700 		int i;
701 		printk(KERN_WARNING "  Rx ring %p: ", yp->rx_ring);
702 		for (i = 0; i < RX_RING_SIZE; i++)
703 			printk(" %8.8x", yp->rx_ring[i].result_status);
704 		printk("\n"KERN_WARNING"  Tx ring %p: ", yp->tx_ring);
705 		for (i = 0; i < TX_RING_SIZE; i++)
706 			printk(" %4.4x /%8.8x", yp->tx_status[i].tx_errs,
707 				   yp->tx_ring[i].result_status);
708 		printk("\n");
709 	}
710 
711 	/* If the hardware is found to hang regularly, we will update the code
712 	   to reinitialize the chip here. */
713 	dev->if_port = 0;
714 
715 	/* Wake the potentially-idle transmit channel. */
716 	iowrite32(0x10001000, yp->base + TxCtrl);
717 	if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
718 		netif_wake_queue (dev);		/* Typical path */
719 
720 	dev->trans_start = jiffies;
721 	dev->stats.tx_errors++;
722 }
723 
724 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
yellowfin_init_ring(struct net_device * dev)725 static void yellowfin_init_ring(struct net_device *dev)
726 {
727 	struct yellowfin_private *yp = netdev_priv(dev);
728 	int i;
729 
730 	yp->tx_full = 0;
731 	yp->cur_rx = yp->cur_tx = 0;
732 	yp->dirty_tx = 0;
733 
734 	yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
735 
736 	for (i = 0; i < RX_RING_SIZE; i++) {
737 		yp->rx_ring[i].dbdma_cmd =
738 			cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
739 		yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
740 			((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
741 	}
742 
743 	for (i = 0; i < RX_RING_SIZE; i++) {
744 		struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
745 		yp->rx_skbuff[i] = skb;
746 		if (skb == NULL)
747 			break;
748 		skb->dev = dev;		/* Mark as being used by this device. */
749 		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
750 		yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
751 			skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
752 	}
753 	yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
754 	yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
755 
756 #define NO_TXSTATS
757 #ifdef NO_TXSTATS
758 	/* In this mode the Tx ring needs only a single descriptor. */
759 	for (i = 0; i < TX_RING_SIZE; i++) {
760 		yp->tx_skbuff[i] = NULL;
761 		yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
762 		yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
763 			((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
764 	}
765 	/* Wrap ring */
766 	yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
767 #else
768 {
769 	int j;
770 
771 	/* Tx ring needs a pair of descriptors, the second for the status. */
772 	for (i = 0; i < TX_RING_SIZE; i++) {
773 		j = 2*i;
774 		yp->tx_skbuff[i] = 0;
775 		/* Branch on Tx error. */
776 		yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
777 		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
778 			(j+1)*sizeof(struct yellowfin_desc));
779 		j++;
780 		if (yp->flags & FullTxStatus) {
781 			yp->tx_ring[j].dbdma_cmd =
782 				cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
783 			yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
784 			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
785 				i*sizeof(struct tx_status_words));
786 		} else {
787 			/* Symbios chips write only tx_errs word. */
788 			yp->tx_ring[j].dbdma_cmd =
789 				cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
790 			yp->tx_ring[j].request_cnt = 2;
791 			/* Om pade ummmmm... */
792 			yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
793 				i*sizeof(struct tx_status_words) +
794 				&(yp->tx_status[0].tx_errs) -
795 				&(yp->tx_status[0]));
796 		}
797 		yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
798 			((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
799 	}
800 	/* Wrap ring */
801 	yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
802 }
803 #endif
804 	yp->tx_tail_desc = &yp->tx_status[0];
805 	return;
806 }
807 
yellowfin_start_xmit(struct sk_buff * skb,struct net_device * dev)808 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev)
809 {
810 	struct yellowfin_private *yp = netdev_priv(dev);
811 	unsigned entry;
812 	int len = skb->len;
813 
814 	netif_stop_queue (dev);
815 
816 	/* Note: Ordering is important here, set the field with the
817 	   "ownership" bit last, and only then increment cur_tx. */
818 
819 	/* Calculate the next Tx descriptor entry. */
820 	entry = yp->cur_tx % TX_RING_SIZE;
821 
822 	if (gx_fix) {	/* Note: only works for paddable protocols e.g.  IP. */
823 		int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
824 		/* Fix GX chipset errata. */
825 		if (cacheline_end > 24  || cacheline_end == 0) {
826 			len = skb->len + 32 - cacheline_end + 1;
827 			if (skb_padto(skb, len)) {
828 				yp->tx_skbuff[entry] = NULL;
829 				netif_wake_queue(dev);
830 				return 0;
831 			}
832 		}
833 	}
834 	yp->tx_skbuff[entry] = skb;
835 
836 #ifdef NO_TXSTATS
837 	yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
838 		skb->data, len, PCI_DMA_TODEVICE));
839 	yp->tx_ring[entry].result_status = 0;
840 	if (entry >= TX_RING_SIZE-1) {
841 		/* New stop command. */
842 		yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
843 		yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
844 			cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
845 	} else {
846 		yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
847 		yp->tx_ring[entry].dbdma_cmd =
848 			cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
849 	}
850 	yp->cur_tx++;
851 #else
852 	yp->tx_ring[entry<<1].request_cnt = len;
853 	yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
854 		skb->data, len, PCI_DMA_TODEVICE));
855 	/* The input_last (status-write) command is constant, but we must
856 	   rewrite the subsequent 'stop' command. */
857 
858 	yp->cur_tx++;
859 	{
860 		unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
861 		yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
862 	}
863 	/* Final step -- overwrite the old 'stop' command. */
864 
865 	yp->tx_ring[entry<<1].dbdma_cmd =
866 		cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
867 					  CMD_TX_PKT | BRANCH_IFTRUE) | len);
868 #endif
869 
870 	/* Non-x86 Todo: explicitly flush cache lines here. */
871 
872 	/* Wake the potentially-idle transmit channel. */
873 	iowrite32(0x10001000, yp->base + TxCtrl);
874 
875 	if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
876 		netif_start_queue (dev);		/* Typical path */
877 	else
878 		yp->tx_full = 1;
879 	dev->trans_start = jiffies;
880 
881 	if (yellowfin_debug > 4) {
882 		printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n",
883 			   dev->name, yp->cur_tx, entry);
884 	}
885 	return 0;
886 }
887 
888 /* The interrupt handler does all of the Rx thread work and cleans up
889    after the Tx thread. */
yellowfin_interrupt(int irq,void * dev_instance)890 static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
891 {
892 	struct net_device *dev = dev_instance;
893 	struct yellowfin_private *yp;
894 	void __iomem *ioaddr;
895 	int boguscnt = max_interrupt_work;
896 	unsigned int handled = 0;
897 
898 	yp = netdev_priv(dev);
899 	ioaddr = yp->base;
900 
901 	spin_lock (&yp->lock);
902 
903 	do {
904 		u16 intr_status = ioread16(ioaddr + IntrClear);
905 
906 		if (yellowfin_debug > 4)
907 			printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n",
908 				   dev->name, intr_status);
909 
910 		if (intr_status == 0)
911 			break;
912 		handled = 1;
913 
914 		if (intr_status & (IntrRxDone | IntrEarlyRx)) {
915 			yellowfin_rx(dev);
916 			iowrite32(0x10001000, ioaddr + RxCtrl);		/* Wake Rx engine. */
917 		}
918 
919 #ifdef NO_TXSTATS
920 		for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
921 			int entry = yp->dirty_tx % TX_RING_SIZE;
922 			struct sk_buff *skb;
923 
924 			if (yp->tx_ring[entry].result_status == 0)
925 				break;
926 			skb = yp->tx_skbuff[entry];
927 			dev->stats.tx_packets++;
928 			dev->stats.tx_bytes += skb->len;
929 			/* Free the original skb. */
930 			pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr),
931 				skb->len, PCI_DMA_TODEVICE);
932 			dev_kfree_skb_irq(skb);
933 			yp->tx_skbuff[entry] = NULL;
934 		}
935 		if (yp->tx_full
936 			&& yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
937 			/* The ring is no longer full, clear tbusy. */
938 			yp->tx_full = 0;
939 			netif_wake_queue(dev);
940 		}
941 #else
942 		if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
943 			unsigned dirty_tx = yp->dirty_tx;
944 
945 			for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
946 				 dirty_tx++) {
947 				/* Todo: optimize this. */
948 				int entry = dirty_tx % TX_RING_SIZE;
949 				u16 tx_errs = yp->tx_status[entry].tx_errs;
950 				struct sk_buff *skb;
951 
952 #ifndef final_version
953 				if (yellowfin_debug > 5)
954 					printk(KERN_DEBUG "%s: Tx queue %d check, Tx status "
955 						   "%4.4x %4.4x %4.4x %4.4x.\n",
956 						   dev->name, entry,
957 						   yp->tx_status[entry].tx_cnt,
958 						   yp->tx_status[entry].tx_errs,
959 						   yp->tx_status[entry].total_tx_cnt,
960 						   yp->tx_status[entry].paused);
961 #endif
962 				if (tx_errs == 0)
963 					break;	/* It still hasn't been Txed */
964 				skb = yp->tx_skbuff[entry];
965 				if (tx_errs & 0xF810) {
966 					/* There was an major error, log it. */
967 #ifndef final_version
968 					if (yellowfin_debug > 1)
969 						printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",
970 							   dev->name, tx_errs);
971 #endif
972 					dev->stats.tx_errors++;
973 					if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
974 					if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++;
975 					if (tx_errs & 0x2000) dev->stats.tx_window_errors++;
976 					if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++;
977 				} else {
978 #ifndef final_version
979 					if (yellowfin_debug > 4)
980 						printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n",
981 							   dev->name, tx_errs);
982 #endif
983 					dev->stats.tx_bytes += skb->len;
984 					dev->stats.collisions += tx_errs & 15;
985 					dev->stats.tx_packets++;
986 				}
987 				/* Free the original skb. */
988 				pci_unmap_single(yp->pci_dev,
989 					yp->tx_ring[entry<<1].addr, skb->len,
990 					PCI_DMA_TODEVICE);
991 				dev_kfree_skb_irq(skb);
992 				yp->tx_skbuff[entry] = 0;
993 				/* Mark status as empty. */
994 				yp->tx_status[entry].tx_errs = 0;
995 			}
996 
997 #ifndef final_version
998 			if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
999 				printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1000 					   dev->name, dirty_tx, yp->cur_tx, yp->tx_full);
1001 				dirty_tx += TX_RING_SIZE;
1002 			}
1003 #endif
1004 
1005 			if (yp->tx_full
1006 				&& yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1007 				/* The ring is no longer full, clear tbusy. */
1008 				yp->tx_full = 0;
1009 				netif_wake_queue(dev);
1010 			}
1011 
1012 			yp->dirty_tx = dirty_tx;
1013 			yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1014 		}
1015 #endif
1016 
1017 		/* Log errors and other uncommon events. */
1018 		if (intr_status & 0x2ee)	/* Abnormal error summary. */
1019 			yellowfin_error(dev, intr_status);
1020 
1021 		if (--boguscnt < 0) {
1022 			printk(KERN_WARNING "%s: Too much work at interrupt, "
1023 				   "status=0x%4.4x.\n",
1024 				   dev->name, intr_status);
1025 			break;
1026 		}
1027 	} while (1);
1028 
1029 	if (yellowfin_debug > 3)
1030 		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1031 			   dev->name, ioread16(ioaddr + IntrStatus));
1032 
1033 	spin_unlock (&yp->lock);
1034 	return IRQ_RETVAL(handled);
1035 }
1036 
1037 /* This routine is logically part of the interrupt handler, but separated
1038    for clarity and better register allocation. */
yellowfin_rx(struct net_device * dev)1039 static int yellowfin_rx(struct net_device *dev)
1040 {
1041 	struct yellowfin_private *yp = netdev_priv(dev);
1042 	int entry = yp->cur_rx % RX_RING_SIZE;
1043 	int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1044 
1045 	if (yellowfin_debug > 4) {
1046 		printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %8.8x.\n",
1047 			   entry, yp->rx_ring[entry].result_status);
1048 		printk(KERN_DEBUG "   #%d desc. %8.8x %8.8x %8.8x.\n",
1049 			   entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1050 			   yp->rx_ring[entry].result_status);
1051 	}
1052 
1053 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1054 	while (1) {
1055 		struct yellowfin_desc *desc = &yp->rx_ring[entry];
1056 		struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1057 		s16 frame_status;
1058 		u16 desc_status;
1059 		int data_size;
1060 		u8 *buf_addr;
1061 
1062 		if(!desc->result_status)
1063 			break;
1064 		pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr),
1065 			yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1066 		desc_status = le32_to_cpu(desc->result_status) >> 16;
1067 		buf_addr = rx_skb->data;
1068 		data_size = (le32_to_cpu(desc->dbdma_cmd) -
1069 			le32_to_cpu(desc->result_status)) & 0xffff;
1070 		frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
1071 		if (yellowfin_debug > 4)
1072 			printk(KERN_DEBUG "  yellowfin_rx() status was %4.4x.\n",
1073 				   frame_status);
1074 		if (--boguscnt < 0)
1075 			break;
1076 		if ( ! (desc_status & RX_EOP)) {
1077 			if (data_size != 0)
1078 				printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers,"
1079 					   " status %4.4x, data_size %d!\n", dev->name, desc_status, data_size);
1080 			dev->stats.rx_length_errors++;
1081 		} else if ((yp->drv_flags & IsGigabit)  &&  (frame_status & 0x0038)) {
1082 			/* There was a error. */
1083 			if (yellowfin_debug > 3)
1084 				printk(KERN_DEBUG "  yellowfin_rx() Rx error was %4.4x.\n",
1085 					   frame_status);
1086 			dev->stats.rx_errors++;
1087 			if (frame_status & 0x0060) dev->stats.rx_length_errors++;
1088 			if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
1089 			if (frame_status & 0x0010) dev->stats.rx_crc_errors++;
1090 			if (frame_status < 0) dev->stats.rx_dropped++;
1091 		} else if ( !(yp->drv_flags & IsGigabit)  &&
1092 				   ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1093 			u8 status1 = buf_addr[data_size-2];
1094 			u8 status2 = buf_addr[data_size-1];
1095 			dev->stats.rx_errors++;
1096 			if (status1 & 0xC0) dev->stats.rx_length_errors++;
1097 			if (status2 & 0x03) dev->stats.rx_frame_errors++;
1098 			if (status2 & 0x04) dev->stats.rx_crc_errors++;
1099 			if (status2 & 0x80) dev->stats.rx_dropped++;
1100 #ifdef YF_PROTOTYPE		/* Support for prototype hardware errata. */
1101 		} else if ((yp->flags & HasMACAddrBug)  &&
1102 			memcmp(le32_to_cpu(yp->rx_ring_dma +
1103 				entry*sizeof(struct yellowfin_desc)),
1104 				dev->dev_addr, 6) != 0 &&
1105 			memcmp(le32_to_cpu(yp->rx_ring_dma +
1106 				entry*sizeof(struct yellowfin_desc)),
1107 				"\377\377\377\377\377\377", 6) != 0) {
1108 			if (bogus_rx++ == 0)
1109 				printk(KERN_WARNING "%s: Bad frame to %pM\n",
1110 					   dev->name, buf_addr);
1111 #endif
1112 		} else {
1113 			struct sk_buff *skb;
1114 			int pkt_len = data_size -
1115 				(yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1116 			/* To verify: Yellowfin Length should omit the CRC! */
1117 
1118 #ifndef final_version
1119 			if (yellowfin_debug > 4)
1120 				printk(KERN_DEBUG "  yellowfin_rx() normal Rx pkt length %d"
1121 					   " of %d, bogus_cnt %d.\n",
1122 					   pkt_len, data_size, boguscnt);
1123 #endif
1124 			/* Check if the packet is long enough to just pass up the skbuff
1125 			   without copying to a properly sized skbuff. */
1126 			if (pkt_len > rx_copybreak) {
1127 				skb_put(skb = rx_skb, pkt_len);
1128 				pci_unmap_single(yp->pci_dev,
1129 					le32_to_cpu(yp->rx_ring[entry].addr),
1130 					yp->rx_buf_sz,
1131 					PCI_DMA_FROMDEVICE);
1132 				yp->rx_skbuff[entry] = NULL;
1133 			} else {
1134 				skb = dev_alloc_skb(pkt_len + 2);
1135 				if (skb == NULL)
1136 					break;
1137 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1138 				skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
1139 				skb_put(skb, pkt_len);
1140 				pci_dma_sync_single_for_device(yp->pci_dev,
1141 								le32_to_cpu(desc->addr),
1142 								yp->rx_buf_sz,
1143 								PCI_DMA_FROMDEVICE);
1144 			}
1145 			skb->protocol = eth_type_trans(skb, dev);
1146 			netif_rx(skb);
1147 			dev->stats.rx_packets++;
1148 			dev->stats.rx_bytes += pkt_len;
1149 		}
1150 		entry = (++yp->cur_rx) % RX_RING_SIZE;
1151 	}
1152 
1153 	/* Refill the Rx ring buffers. */
1154 	for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1155 		entry = yp->dirty_rx % RX_RING_SIZE;
1156 		if (yp->rx_skbuff[entry] == NULL) {
1157 			struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
1158 			if (skb == NULL)
1159 				break;				/* Better luck next round. */
1160 			yp->rx_skbuff[entry] = skb;
1161 			skb->dev = dev;	/* Mark as being used by this device. */
1162 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1163 			yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
1164 				skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1165 		}
1166 		yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1167 		yp->rx_ring[entry].result_status = 0;	/* Clear complete bit. */
1168 		if (entry != 0)
1169 			yp->rx_ring[entry - 1].dbdma_cmd =
1170 				cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1171 		else
1172 			yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1173 				cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1174 							| yp->rx_buf_sz);
1175 	}
1176 
1177 	return 0;
1178 }
1179 
yellowfin_error(struct net_device * dev,int intr_status)1180 static void yellowfin_error(struct net_device *dev, int intr_status)
1181 {
1182 	printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1183 		   dev->name, intr_status);
1184 	/* Hmmmmm, it's not clear what to do here. */
1185 	if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1186 		dev->stats.tx_errors++;
1187 	if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1188 		dev->stats.rx_errors++;
1189 }
1190 
yellowfin_close(struct net_device * dev)1191 static int yellowfin_close(struct net_device *dev)
1192 {
1193 	struct yellowfin_private *yp = netdev_priv(dev);
1194 	void __iomem *ioaddr = yp->base;
1195 	int i;
1196 
1197 	netif_stop_queue (dev);
1198 
1199 	if (yellowfin_debug > 1) {
1200 		printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
1201 			   "Rx %4.4x Int %2.2x.\n",
1202 			   dev->name, ioread16(ioaddr + TxStatus),
1203 			   ioread16(ioaddr + RxStatus),
1204 			   ioread16(ioaddr + IntrStatus));
1205 		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1206 			   dev->name, yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx);
1207 	}
1208 
1209 	/* Disable interrupts by clearing the interrupt mask. */
1210 	iowrite16(0x0000, ioaddr + IntrEnb);
1211 
1212 	/* Stop the chip's Tx and Rx processes. */
1213 	iowrite32(0x80000000, ioaddr + RxCtrl);
1214 	iowrite32(0x80000000, ioaddr + TxCtrl);
1215 
1216 	del_timer(&yp->timer);
1217 
1218 #if defined(__i386__)
1219 	if (yellowfin_debug > 2) {
1220 		printk("\n"KERN_DEBUG"  Tx ring at %8.8llx:\n",
1221 				(unsigned long long)yp->tx_ring_dma);
1222 		for (i = 0; i < TX_RING_SIZE*2; i++)
1223 			printk(" %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n",
1224 				   ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1225 				   i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1226 				   yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1227 		printk(KERN_DEBUG "  Tx status %p:\n", yp->tx_status);
1228 		for (i = 0; i < TX_RING_SIZE; i++)
1229 			printk("   #%d status %4.4x %4.4x %4.4x %4.4x.\n",
1230 				   i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1231 				   yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1232 
1233 		printk("\n"KERN_DEBUG "  Rx ring %8.8llx:\n",
1234 				(unsigned long long)yp->rx_ring_dma);
1235 		for (i = 0; i < RX_RING_SIZE; i++) {
1236 			printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x\n",
1237 				   ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1238 				   i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1239 				   yp->rx_ring[i].result_status);
1240 			if (yellowfin_debug > 6) {
1241 				if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1242 					int j;
1243 					for (j = 0; j < 0x50; j++)
1244 						printk(" %4.4x",
1245 							   get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1246 					printk("\n");
1247 				}
1248 			}
1249 		}
1250 	}
1251 #endif /* __i386__ debugging only */
1252 
1253 	free_irq(dev->irq, dev);
1254 
1255 	/* Free all the skbuffs in the Rx queue. */
1256 	for (i = 0; i < RX_RING_SIZE; i++) {
1257 		yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1258 		yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1259 		if (yp->rx_skbuff[i]) {
1260 			dev_kfree_skb(yp->rx_skbuff[i]);
1261 		}
1262 		yp->rx_skbuff[i] = NULL;
1263 	}
1264 	for (i = 0; i < TX_RING_SIZE; i++) {
1265 		if (yp->tx_skbuff[i])
1266 			dev_kfree_skb(yp->tx_skbuff[i]);
1267 		yp->tx_skbuff[i] = NULL;
1268 	}
1269 
1270 #ifdef YF_PROTOTYPE			/* Support for prototype hardware errata. */
1271 	if (yellowfin_debug > 0) {
1272 		printk(KERN_DEBUG "%s: Received %d frames that we should not have.\n",
1273 			   dev->name, bogus_rx);
1274 	}
1275 #endif
1276 
1277 	return 0;
1278 }
1279 
1280 /* Set or clear the multicast filter for this adaptor. */
1281 
set_rx_mode(struct net_device * dev)1282 static void set_rx_mode(struct net_device *dev)
1283 {
1284 	struct yellowfin_private *yp = netdev_priv(dev);
1285 	void __iomem *ioaddr = yp->base;
1286 	u16 cfg_value = ioread16(ioaddr + Cnfg);
1287 
1288 	/* Stop the Rx process to change any value. */
1289 	iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
1290 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1291 		iowrite16(0x000F, ioaddr + AddrMode);
1292 	} else if ((dev->mc_count > 64)  ||  (dev->flags & IFF_ALLMULTI)) {
1293 		/* Too many to filter well, or accept all multicasts. */
1294 		iowrite16(0x000B, ioaddr + AddrMode);
1295 	} else if (dev->mc_count > 0) { /* Must use the multicast hash table. */
1296 		struct dev_mc_list *mclist;
1297 		u16 hash_table[4];
1298 		int i;
1299 		memset(hash_table, 0, sizeof(hash_table));
1300 		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1301 			 i++, mclist = mclist->next) {
1302 			unsigned int bit;
1303 
1304 			/* Due to a bug in the early chip versions, multiple filter
1305 			   slots must be set for each address. */
1306 			if (yp->drv_flags & HasMulticastBug) {
1307 				bit = (ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f;
1308 				hash_table[bit >> 4] |= (1 << bit);
1309 				bit = (ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f;
1310 				hash_table[bit >> 4] |= (1 << bit);
1311 				bit = (ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f;
1312 				hash_table[bit >> 4] |= (1 << bit);
1313 			}
1314 			bit = (ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f;
1315 			hash_table[bit >> 4] |= (1 << bit);
1316 		}
1317 		/* Copy the hash table to the chip. */
1318 		for (i = 0; i < 4; i++)
1319 			iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
1320 		iowrite16(0x0003, ioaddr + AddrMode);
1321 	} else {					/* Normal, unicast/broadcast-only mode. */
1322 		iowrite16(0x0001, ioaddr + AddrMode);
1323 	}
1324 	/* Restart the Rx process. */
1325 	iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
1326 }
1327 
yellowfin_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1328 static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1329 {
1330 	struct yellowfin_private *np = netdev_priv(dev);
1331 	strcpy(info->driver, DRV_NAME);
1332 	strcpy(info->version, DRV_VERSION);
1333 	strcpy(info->bus_info, pci_name(np->pci_dev));
1334 }
1335 
1336 static const struct ethtool_ops ethtool_ops = {
1337 	.get_drvinfo = yellowfin_get_drvinfo
1338 };
1339 
netdev_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1340 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1341 {
1342 	struct yellowfin_private *np = netdev_priv(dev);
1343 	void __iomem *ioaddr = np->base;
1344 	struct mii_ioctl_data *data = if_mii(rq);
1345 
1346 	switch(cmd) {
1347 	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
1348 		data->phy_id = np->phys[0] & 0x1f;
1349 		/* Fall Through */
1350 
1351 	case SIOCGMIIREG:		/* Read MII PHY register. */
1352 		data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
1353 		return 0;
1354 
1355 	case SIOCSMIIREG:		/* Write MII PHY register. */
1356 		if (!capable(CAP_NET_ADMIN))
1357 			return -EPERM;
1358 		if (data->phy_id == np->phys[0]) {
1359 			u16 value = data->val_in;
1360 			switch (data->reg_num) {
1361 			case 0:
1362 				/* Check for autonegotiation on or reset. */
1363 				np->medialock = (value & 0x9000) ? 0 : 1;
1364 				if (np->medialock)
1365 					np->full_duplex = (value & 0x0100) ? 1 : 0;
1366 				break;
1367 			case 4: np->advertising = value; break;
1368 			}
1369 			/* Perhaps check_duplex(dev), depending on chip semantics. */
1370 		}
1371 		mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1372 		return 0;
1373 	default:
1374 		return -EOPNOTSUPP;
1375 	}
1376 }
1377 
1378 
yellowfin_remove_one(struct pci_dev * pdev)1379 static void __devexit yellowfin_remove_one (struct pci_dev *pdev)
1380 {
1381 	struct net_device *dev = pci_get_drvdata(pdev);
1382 	struct yellowfin_private *np;
1383 
1384 	BUG_ON(!dev);
1385 	np = netdev_priv(dev);
1386 
1387         pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
1388 		np->tx_status_dma);
1389 	pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
1390 	pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
1391 	unregister_netdev (dev);
1392 
1393 	pci_iounmap(pdev, np->base);
1394 
1395 	pci_release_regions (pdev);
1396 
1397 	free_netdev (dev);
1398 	pci_set_drvdata(pdev, NULL);
1399 }
1400 
1401 
1402 static struct pci_driver yellowfin_driver = {
1403 	.name		= DRV_NAME,
1404 	.id_table	= yellowfin_pci_tbl,
1405 	.probe		= yellowfin_init_one,
1406 	.remove		= __devexit_p(yellowfin_remove_one),
1407 };
1408 
1409 
yellowfin_init(void)1410 static int __init yellowfin_init (void)
1411 {
1412 /* when a module, this is printed whether or not devices are found in probe */
1413 #ifdef MODULE
1414 	printk(version);
1415 #endif
1416 	return pci_register_driver(&yellowfin_driver);
1417 }
1418 
1419 
yellowfin_cleanup(void)1420 static void __exit yellowfin_cleanup (void)
1421 {
1422 	pci_unregister_driver (&yellowfin_driver);
1423 }
1424 
1425 
1426 module_init(yellowfin_init);
1427 module_exit(yellowfin_cleanup);
1428