• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2 /*
3 	Written 1999-2000 by Donald Becker.
4 
5 	This software may be used and distributed according to the terms of
6 	the GNU General Public License (GPL), incorporated herein by reference.
7 	Drivers based on or derived from this code fall under the GPL and must
8 	retain the authorship, copyright and license notice.  This file is not
9 	a complete program and may only be used when the entire operating
10 	system is licensed under the GPL.
11 
12 	The author may be reached as becker@scyld.com, or C/O
13 	Scyld Computing Corporation
14 	410 Severn Ave., Suite 210
15 	Annapolis MD 21403
16 
17 	Support and updates available at
18 	http://www.scyld.com/network/sundance.html
19 	[link no longer provides useful info -jgarzik]
20 	Archives of the mailing list are still available at
21 	http://www.beowulf.org/pipermail/netdrivers/
22 
23 */
24 
25 #define DRV_NAME	"sundance"
26 #define DRV_VERSION	"1.2"
27 #define DRV_RELDATE	"11-Sep-2006"
28 
29 
30 /* The user-configurable values.
31    These may be modified when a driver module is loaded.*/
32 static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
33 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34    Typical is a 64 element hash table based on the Ethernet CRC.  */
35 static const int multicast_filter_limit = 32;
36 
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38    Setting to > 1518 effectively disables this feature.
39    This chip can receive into offset buffers, so the Alpha does not
40    need a copy-align. */
41 static int rx_copybreak;
42 static int flowctrl=1;
43 
44 /* media[] specifies the media type the NIC operates at.
45 		 autosense	Autosensing active media.
46 		 10mbps_hd 	10Mbps half duplex.
47 		 10mbps_fd 	10Mbps full duplex.
48 		 100mbps_hd 	100Mbps half duplex.
49 		 100mbps_fd 	100Mbps full duplex.
50 		 0		Autosensing active media.
51 		 1	 	10Mbps half duplex.
52 		 2	 	10Mbps full duplex.
53 		 3	 	100Mbps half duplex.
54 		 4	 	100Mbps full duplex.
55 */
56 #define MAX_UNITS 8
57 static char *media[MAX_UNITS];
58 
59 
60 /* Operational parameters that are set at compile time. */
61 
62 /* Keep the ring sizes a power of two for compile efficiency.
63    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64    Making the Tx ring too large decreases the effectiveness of channel
65    bonding and packet priority, and more than 128 requires modifying the
66    Tx error recovery.
67    Large receive rings merely waste memory. */
68 #define TX_RING_SIZE	32
69 #define TX_QUEUE_LEN	(TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
70 #define RX_RING_SIZE	64
71 #define RX_BUDGET	32
72 #define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct netdev_desc)
74 
75 /* Operational parameters that usually are not changed. */
76 /* Time in jiffies before concluding the transmitter is hung. */
77 #define TX_TIMEOUT  (4*HZ)
78 #define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
79 
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/slab.h>
88 #include <linux/interrupt.h>
89 #include <linux/pci.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/skbuff.h>
93 #include <linux/init.h>
94 #include <linux/bitops.h>
95 #include <asm/uaccess.h>
96 #include <asm/processor.h>		/* Processor type for cache alignment. */
97 #include <asm/io.h>
98 #include <linux/delay.h>
99 #include <linux/spinlock.h>
100 #ifndef _COMPAT_WITH_OLD_KERNEL
101 #include <linux/crc32.h>
102 #include <linux/ethtool.h>
103 #include <linux/mii.h>
104 #else
105 #include "crc32.h"
106 #include "ethtool.h"
107 #include "mii.h"
108 #include "compat.h"
109 #endif
110 
111 /* These identify the driver base version and may not be removed. */
112 static char version[] =
113 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "  Written by Donald Becker\n";
114 
115 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
116 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
117 MODULE_LICENSE("GPL");
118 
119 module_param(debug, int, 0);
120 module_param(rx_copybreak, int, 0);
121 module_param_array(media, charp, NULL, 0);
122 module_param(flowctrl, int, 0);
123 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
124 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
125 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
126 
127 /*
128 				Theory of Operation
129 
130 I. Board Compatibility
131 
132 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
133 
134 II. Board-specific settings
135 
136 III. Driver operation
137 
138 IIIa. Ring buffers
139 
140 This driver uses two statically allocated fixed-size descriptor lists
141 formed into rings by a branch from the final descriptor to the beginning of
142 the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
143 Some chips explicitly use only 2^N sized rings, while others use a
144 'next descriptor' pointer that the driver forms into rings.
145 
146 IIIb/c. Transmit/Receive Structure
147 
148 This driver uses a zero-copy receive and transmit scheme.
149 The driver allocates full frame size skbuffs for the Rx ring buffers at
150 open() time and passes the skb->data field to the chip as receive data
151 buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
152 a fresh skbuff is allocated and the frame is copied to the new skbuff.
153 When the incoming frame is larger, the skbuff is passed directly up the
154 protocol stack.  Buffers consumed this way are replaced by newly allocated
155 skbuffs in a later phase of receives.
156 
157 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
158 using a full-sized skbuff for small frames vs. the copying costs of larger
159 frames.  New boards are typically used in generously configured machines
160 and the underfilled buffers have negligible impact compared to the benefit of
161 a single allocation size, so the default value of zero results in never
162 copying packets.  When copying is done, the cost is usually mitigated by using
163 a combined copy/checksum routine.  Copying also preloads the cache, which is
164 most useful with small frames.
165 
166 A subtle aspect of the operation is that the IP header at offset 14 in an
167 ethernet frame isn't longword aligned for further processing.
168 Unaligned buffers are permitted by the Sundance hardware, so
169 frames are received into the skbuff at an offset of "+2", 16-byte aligning
170 the IP header.
171 
172 IIId. Synchronization
173 
174 The driver runs as two independent, single-threaded flows of control.  One
175 is the send-packet routine, which enforces single-threaded use by the
176 dev->tbusy flag.  The other thread is the interrupt handler, which is single
177 threaded by the hardware and interrupt handling software.
178 
179 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
180 flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
181 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
182 the 'lp->tx_full' flag.
183 
184 The interrupt handler has exclusive control over the Rx ring and records stats
185 from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
186 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
187 clears both the tx_full and tbusy flags.
188 
189 IV. Notes
190 
191 IVb. References
192 
193 The Sundance ST201 datasheet, preliminary version.
194 The Kendin KS8723 datasheet, preliminary version.
195 The ICplus IP100 datasheet, preliminary version.
196 http://www.scyld.com/expert/100mbps.html
197 http://www.scyld.com/expert/NWay.html
198 
199 IVc. Errata
200 
201 */
202 
203 /* Work-around for Kendin chip bugs. */
204 #ifndef CONFIG_SUNDANCE_MMIO
205 #define USE_IO_OPS 1
206 #endif
207 
208 static const struct pci_device_id sundance_pci_tbl[] = {
209 	{ 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
210 	{ 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
211 	{ 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
212 	{ 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
213 	{ 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
214 	{ 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
215 	{ 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
216 	{ }
217 };
218 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
219 
220 enum {
221 	netdev_io_size = 128
222 };
223 
224 struct pci_id_info {
225         const char *name;
226 };
227 static const struct pci_id_info pci_id_tbl[] __devinitdata = {
228 	{"D-Link DFE-550TX FAST Ethernet Adapter"},
229 	{"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
230 	{"D-Link DFE-580TX 4 port Server Adapter"},
231 	{"D-Link DFE-530TXS FAST Ethernet Adapter"},
232 	{"D-Link DL10050-based FAST Ethernet Adapter"},
233 	{"Sundance Technology Alta"},
234 	{"IC Plus Corporation IP100A FAST Ethernet Adapter"},
235 	{ }	/* terminate list. */
236 };
237 
238 /* This driver was written to use PCI memory space, however x86-oriented
239    hardware often uses I/O space accesses. */
240 
241 /* Offsets to the device registers.
242    Unlike software-only systems, device drivers interact with complex hardware.
243    It's not useful to define symbolic names for every register bit in the
244    device.  The name can only partially document the semantics and make
245    the driver longer and more difficult to read.
246    In general, only the important configuration values or bits changed
247    multiple times should be defined symbolically.
248 */
249 enum alta_offsets {
250 	DMACtrl = 0x00,
251 	TxListPtr = 0x04,
252 	TxDMABurstThresh = 0x08,
253 	TxDMAUrgentThresh = 0x09,
254 	TxDMAPollPeriod = 0x0a,
255 	RxDMAStatus = 0x0c,
256 	RxListPtr = 0x10,
257 	DebugCtrl0 = 0x1a,
258 	DebugCtrl1 = 0x1c,
259 	RxDMABurstThresh = 0x14,
260 	RxDMAUrgentThresh = 0x15,
261 	RxDMAPollPeriod = 0x16,
262 	LEDCtrl = 0x1a,
263 	ASICCtrl = 0x30,
264 	EEData = 0x34,
265 	EECtrl = 0x36,
266 	FlashAddr = 0x40,
267 	FlashData = 0x44,
268 	TxStatus = 0x46,
269 	TxFrameId = 0x47,
270 	DownCounter = 0x18,
271 	IntrClear = 0x4a,
272 	IntrEnable = 0x4c,
273 	IntrStatus = 0x4e,
274 	MACCtrl0 = 0x50,
275 	MACCtrl1 = 0x52,
276 	StationAddr = 0x54,
277 	MaxFrameSize = 0x5A,
278 	RxMode = 0x5c,
279 	MIICtrl = 0x5e,
280 	MulticastFilter0 = 0x60,
281 	MulticastFilter1 = 0x64,
282 	RxOctetsLow = 0x68,
283 	RxOctetsHigh = 0x6a,
284 	TxOctetsLow = 0x6c,
285 	TxOctetsHigh = 0x6e,
286 	TxFramesOK = 0x70,
287 	RxFramesOK = 0x72,
288 	StatsCarrierError = 0x74,
289 	StatsLateColl = 0x75,
290 	StatsMultiColl = 0x76,
291 	StatsOneColl = 0x77,
292 	StatsTxDefer = 0x78,
293 	RxMissed = 0x79,
294 	StatsTxXSDefer = 0x7a,
295 	StatsTxAbort = 0x7b,
296 	StatsBcastTx = 0x7c,
297 	StatsBcastRx = 0x7d,
298 	StatsMcastTx = 0x7e,
299 	StatsMcastRx = 0x7f,
300 	/* Aliased and bogus values! */
301 	RxStatus = 0x0c,
302 };
303 enum ASICCtrl_HiWord_bit {
304 	GlobalReset = 0x0001,
305 	RxReset = 0x0002,
306 	TxReset = 0x0004,
307 	DMAReset = 0x0008,
308 	FIFOReset = 0x0010,
309 	NetworkReset = 0x0020,
310 	HostReset = 0x0040,
311 	ResetBusy = 0x0400,
312 };
313 
314 /* Bits in the interrupt status/mask registers. */
315 enum intr_status_bits {
316 	IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
317 	IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
318 	IntrDrvRqst=0x0040,
319 	StatsMax=0x0080, LinkChange=0x0100,
320 	IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
321 };
322 
323 /* Bits in the RxMode register. */
324 enum rx_mode_bits {
325 	AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
326 	AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
327 };
328 /* Bits in MACCtrl. */
329 enum mac_ctrl0_bits {
330 	EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
331 	EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
332 };
333 enum mac_ctrl1_bits {
334 	StatsEnable=0x0020,	StatsDisable=0x0040, StatsEnabled=0x0080,
335 	TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
336 	RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
337 };
338 
339 /* The Rx and Tx buffer descriptors. */
340 /* Note that using only 32 bit fields simplifies conversion to big-endian
341    architectures. */
342 struct netdev_desc {
343 	__le32 next_desc;
344 	__le32 status;
345 	struct desc_frag { __le32 addr, length; } frag[1];
346 };
347 
348 /* Bits in netdev_desc.status */
349 enum desc_status_bits {
350 	DescOwn=0x8000,
351 	DescEndPacket=0x4000,
352 	DescEndRing=0x2000,
353 	LastFrag=0x80000000,
354 	DescIntrOnTx=0x8000,
355 	DescIntrOnDMADone=0x80000000,
356 	DisableAlign = 0x00000001,
357 };
358 
359 #define PRIV_ALIGN	15 	/* Required alignment mask */
360 /* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
361    within the structure. */
362 #define MII_CNT		4
363 struct netdev_private {
364 	/* Descriptor rings first for alignment. */
365 	struct netdev_desc *rx_ring;
366 	struct netdev_desc *tx_ring;
367 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
368 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
369         dma_addr_t tx_ring_dma;
370         dma_addr_t rx_ring_dma;
371 	struct net_device_stats stats;
372 	struct timer_list timer;		/* Media monitoring timer. */
373 	/* Frequently used values: keep some adjacent for cache effect. */
374 	spinlock_t lock;
375 	spinlock_t rx_lock;			/* Group with Tx control cache line. */
376 	int msg_enable;
377 	int chip_id;
378 	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
379 	unsigned int rx_buf_sz;			/* Based on MTU+slack. */
380 	struct netdev_desc *last_tx;		/* Last Tx descriptor used. */
381 	unsigned int cur_tx, dirty_tx;
382 	/* These values are keep track of the transceiver/media in use. */
383 	unsigned int flowctrl:1;
384 	unsigned int default_port:4;		/* Last dev->if_port value. */
385 	unsigned int an_enable:1;
386 	unsigned int speed;
387 	struct tasklet_struct rx_tasklet;
388 	struct tasklet_struct tx_tasklet;
389 	int budget;
390 	int cur_task;
391 	/* Multicast and receive mode. */
392 	spinlock_t mcastlock;			/* SMP lock multicast updates. */
393 	u16 mcast_filter[4];
394 	/* MII transceiver section. */
395 	struct mii_if_info mii_if;
396 	int mii_preamble_required;
397 	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used. */
398 	struct pci_dev *pci_dev;
399 	void __iomem *base;
400 };
401 
402 /* The station address location in the EEPROM. */
403 #define EEPROM_SA_OFFSET	0x10
404 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
405 			IntrDrvRqst | IntrTxDone | StatsMax | \
406 			LinkChange)
407 
408 static int  change_mtu(struct net_device *dev, int new_mtu);
409 static int  eeprom_read(void __iomem *ioaddr, int location);
410 static int  mdio_read(struct net_device *dev, int phy_id, int location);
411 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
412 static int  mdio_wait_link(struct net_device *dev, int wait);
413 static int  netdev_open(struct net_device *dev);
414 static void check_duplex(struct net_device *dev);
415 static void netdev_timer(unsigned long data);
416 static void tx_timeout(struct net_device *dev);
417 static void init_ring(struct net_device *dev);
418 static int  start_tx(struct sk_buff *skb, struct net_device *dev);
419 static int reset_tx (struct net_device *dev);
420 static irqreturn_t intr_handler(int irq, void *dev_instance);
421 static void rx_poll(unsigned long data);
422 static void tx_poll(unsigned long data);
423 static void refill_rx (struct net_device *dev);
424 static void netdev_error(struct net_device *dev, int intr_status);
425 static void netdev_error(struct net_device *dev, int intr_status);
426 static void set_rx_mode(struct net_device *dev);
427 static int __set_mac_addr(struct net_device *dev);
428 static struct net_device_stats *get_stats(struct net_device *dev);
429 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
430 static int  netdev_close(struct net_device *dev);
431 static const struct ethtool_ops ethtool_ops;
432 
sundance_reset(struct net_device * dev,unsigned long reset_cmd)433 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
434 {
435 	struct netdev_private *np = netdev_priv(dev);
436 	void __iomem *ioaddr = np->base + ASICCtrl;
437 	int countdown;
438 
439 	/* ST201 documentation states ASICCtrl is a 32bit register */
440 	iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
441 	/* ST201 documentation states reset can take up to 1 ms */
442 	countdown = 10 + 1;
443 	while (ioread32 (ioaddr) & (ResetBusy << 16)) {
444 		if (--countdown == 0) {
445 			printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
446 			break;
447 		}
448 		udelay(100);
449 	}
450 }
451 
452 static const struct net_device_ops netdev_ops = {
453 	.ndo_open		= netdev_open,
454 	.ndo_stop		= netdev_close,
455 	.ndo_start_xmit		= start_tx,
456 	.ndo_get_stats 		= get_stats,
457 	.ndo_set_multicast_list = set_rx_mode,
458 	.ndo_do_ioctl 		= netdev_ioctl,
459 	.ndo_tx_timeout		= tx_timeout,
460 	.ndo_change_mtu		= change_mtu,
461 	.ndo_set_mac_address 	= eth_mac_addr,
462 	.ndo_validate_addr	= eth_validate_addr,
463 };
464 
sundance_probe1(struct pci_dev * pdev,const struct pci_device_id * ent)465 static int __devinit sundance_probe1 (struct pci_dev *pdev,
466 				      const struct pci_device_id *ent)
467 {
468 	struct net_device *dev;
469 	struct netdev_private *np;
470 	static int card_idx;
471 	int chip_idx = ent->driver_data;
472 	int irq;
473 	int i;
474 	void __iomem *ioaddr;
475 	u16 mii_ctl;
476 	void *ring_space;
477 	dma_addr_t ring_dma;
478 #ifdef USE_IO_OPS
479 	int bar = 0;
480 #else
481 	int bar = 1;
482 #endif
483 	int phy, phy_end, phy_idx = 0;
484 
485 /* when built into the kernel, we only print version if device is found */
486 #ifndef MODULE
487 	static int printed_version;
488 	if (!printed_version++)
489 		printk(version);
490 #endif
491 
492 	if (pci_enable_device(pdev))
493 		return -EIO;
494 	pci_set_master(pdev);
495 
496 	irq = pdev->irq;
497 
498 	dev = alloc_etherdev(sizeof(*np));
499 	if (!dev)
500 		return -ENOMEM;
501 	SET_NETDEV_DEV(dev, &pdev->dev);
502 
503 	if (pci_request_regions(pdev, DRV_NAME))
504 		goto err_out_netdev;
505 
506 	ioaddr = pci_iomap(pdev, bar, netdev_io_size);
507 	if (!ioaddr)
508 		goto err_out_res;
509 
510 	for (i = 0; i < 3; i++)
511 		((__le16 *)dev->dev_addr)[i] =
512 			cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
513 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
514 
515 	dev->base_addr = (unsigned long)ioaddr;
516 	dev->irq = irq;
517 
518 	np = netdev_priv(dev);
519 	np->base = ioaddr;
520 	np->pci_dev = pdev;
521 	np->chip_id = chip_idx;
522 	np->msg_enable = (1 << debug) - 1;
523 	spin_lock_init(&np->lock);
524 	tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
525 	tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
526 
527 	ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
528 	if (!ring_space)
529 		goto err_out_cleardev;
530 	np->tx_ring = (struct netdev_desc *)ring_space;
531 	np->tx_ring_dma = ring_dma;
532 
533 	ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
534 	if (!ring_space)
535 		goto err_out_unmap_tx;
536 	np->rx_ring = (struct netdev_desc *)ring_space;
537 	np->rx_ring_dma = ring_dma;
538 
539 	np->mii_if.dev = dev;
540 	np->mii_if.mdio_read = mdio_read;
541 	np->mii_if.mdio_write = mdio_write;
542 	np->mii_if.phy_id_mask = 0x1f;
543 	np->mii_if.reg_num_mask = 0x1f;
544 
545 	/* The chip-specific entries in the device structure. */
546 	dev->netdev_ops = &netdev_ops;
547 	SET_ETHTOOL_OPS(dev, &ethtool_ops);
548 	dev->watchdog_timeo = TX_TIMEOUT;
549 
550 	pci_set_drvdata(pdev, dev);
551 
552 	i = register_netdev(dev);
553 	if (i)
554 		goto err_out_unmap_rx;
555 
556 	printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
557 	       dev->name, pci_id_tbl[chip_idx].name, ioaddr,
558 	       dev->dev_addr, irq);
559 
560 	np->phys[0] = 1;		/* Default setting */
561 	np->mii_preamble_required++;
562 
563 	/*
564 	 * It seems some phys doesn't deal well with address 0 being accessed
565 	 * first
566 	 */
567 	if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
568 		phy = 0;
569 		phy_end = 31;
570 	} else {
571 		phy = 1;
572 		phy_end = 32;	/* wraps to zero, due to 'phy & 0x1f' */
573 	}
574 	for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
575 		int phyx = phy & 0x1f;
576 		int mii_status = mdio_read(dev, phyx, MII_BMSR);
577 		if (mii_status != 0xffff  &&  mii_status != 0x0000) {
578 			np->phys[phy_idx++] = phyx;
579 			np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
580 			if ((mii_status & 0x0040) == 0)
581 				np->mii_preamble_required++;
582 			printk(KERN_INFO "%s: MII PHY found at address %d, status "
583 				   "0x%4.4x advertising %4.4x.\n",
584 				   dev->name, phyx, mii_status, np->mii_if.advertising);
585 		}
586 	}
587 	np->mii_preamble_required--;
588 
589 	if (phy_idx == 0) {
590 		printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
591 			   dev->name, ioread32(ioaddr + ASICCtrl));
592 		goto err_out_unregister;
593 	}
594 
595 	np->mii_if.phy_id = np->phys[0];
596 
597 	/* Parse override configuration */
598 	np->an_enable = 1;
599 	if (card_idx < MAX_UNITS) {
600 		if (media[card_idx] != NULL) {
601 			np->an_enable = 0;
602 			if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
603 			    strcmp (media[card_idx], "4") == 0) {
604 				np->speed = 100;
605 				np->mii_if.full_duplex = 1;
606 			} else if (strcmp (media[card_idx], "100mbps_hd") == 0
607 				   || strcmp (media[card_idx], "3") == 0) {
608 				np->speed = 100;
609 				np->mii_if.full_duplex = 0;
610 			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
611 				   strcmp (media[card_idx], "2") == 0) {
612 				np->speed = 10;
613 				np->mii_if.full_duplex = 1;
614 			} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
615 				   strcmp (media[card_idx], "1") == 0) {
616 				np->speed = 10;
617 				np->mii_if.full_duplex = 0;
618 			} else {
619 				np->an_enable = 1;
620 			}
621 		}
622 		if (flowctrl == 1)
623 			np->flowctrl = 1;
624 	}
625 
626 	/* Fibre PHY? */
627 	if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
628 		/* Default 100Mbps Full */
629 		if (np->an_enable) {
630 			np->speed = 100;
631 			np->mii_if.full_duplex = 1;
632 			np->an_enable = 0;
633 		}
634 	}
635 	/* Reset PHY */
636 	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
637 	mdelay (300);
638 	/* If flow control enabled, we need to advertise it.*/
639 	if (np->flowctrl)
640 		mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
641 	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
642 	/* Force media type */
643 	if (!np->an_enable) {
644 		mii_ctl = 0;
645 		mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
646 		mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
647 		mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
648 		printk (KERN_INFO "Override speed=%d, %s duplex\n",
649 			np->speed, np->mii_if.full_duplex ? "Full" : "Half");
650 
651 	}
652 
653 	/* Perhaps move the reset here? */
654 	/* Reset the chip to erase previous misconfiguration. */
655 	if (netif_msg_hw(np))
656 		printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
657 	sundance_reset(dev, 0x00ff << 16);
658 	if (netif_msg_hw(np))
659 		printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
660 
661 	card_idx++;
662 	return 0;
663 
664 err_out_unregister:
665 	unregister_netdev(dev);
666 err_out_unmap_rx:
667         pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
668 err_out_unmap_tx:
669         pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
670 err_out_cleardev:
671 	pci_set_drvdata(pdev, NULL);
672 	pci_iounmap(pdev, ioaddr);
673 err_out_res:
674 	pci_release_regions(pdev);
675 err_out_netdev:
676 	free_netdev (dev);
677 	return -ENODEV;
678 }
679 
change_mtu(struct net_device * dev,int new_mtu)680 static int change_mtu(struct net_device *dev, int new_mtu)
681 {
682 	if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
683 		return -EINVAL;
684 	if (netif_running(dev))
685 		return -EBUSY;
686 	dev->mtu = new_mtu;
687 	return 0;
688 }
689 
690 #define eeprom_delay(ee_addr)	ioread32(ee_addr)
691 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
eeprom_read(void __iomem * ioaddr,int location)692 static int __devinit eeprom_read(void __iomem *ioaddr, int location)
693 {
694 	int boguscnt = 10000;		/* Typical 1900 ticks. */
695 	iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
696 	do {
697 		eeprom_delay(ioaddr + EECtrl);
698 		if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
699 			return ioread16(ioaddr + EEData);
700 		}
701 	} while (--boguscnt > 0);
702 	return 0;
703 }
704 
705 /*  MII transceiver control section.
706 	Read and write the MII registers using software-generated serial
707 	MDIO protocol.  See the MII specifications or DP83840A data sheet
708 	for details.
709 
710 	The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
711 	met by back-to-back 33Mhz PCI cycles. */
712 #define mdio_delay() ioread8(mdio_addr)
713 
714 enum mii_reg_bits {
715 	MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
716 };
717 #define MDIO_EnbIn  (0)
718 #define MDIO_WRITE0 (MDIO_EnbOutput)
719 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
720 
721 /* Generate the preamble required for initial synchronization and
722    a few older transceivers. */
mdio_sync(void __iomem * mdio_addr)723 static void mdio_sync(void __iomem *mdio_addr)
724 {
725 	int bits = 32;
726 
727 	/* Establish sync by sending at least 32 logic ones. */
728 	while (--bits >= 0) {
729 		iowrite8(MDIO_WRITE1, mdio_addr);
730 		mdio_delay();
731 		iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
732 		mdio_delay();
733 	}
734 }
735 
mdio_read(struct net_device * dev,int phy_id,int location)736 static int mdio_read(struct net_device *dev, int phy_id, int location)
737 {
738 	struct netdev_private *np = netdev_priv(dev);
739 	void __iomem *mdio_addr = np->base + MIICtrl;
740 	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
741 	int i, retval = 0;
742 
743 	if (np->mii_preamble_required)
744 		mdio_sync(mdio_addr);
745 
746 	/* Shift the read command bits out. */
747 	for (i = 15; i >= 0; i--) {
748 		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
749 
750 		iowrite8(dataval, mdio_addr);
751 		mdio_delay();
752 		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
753 		mdio_delay();
754 	}
755 	/* Read the two transition, 16 data, and wire-idle bits. */
756 	for (i = 19; i > 0; i--) {
757 		iowrite8(MDIO_EnbIn, mdio_addr);
758 		mdio_delay();
759 		retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
760 		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
761 		mdio_delay();
762 	}
763 	return (retval>>1) & 0xffff;
764 }
765 
mdio_write(struct net_device * dev,int phy_id,int location,int value)766 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
767 {
768 	struct netdev_private *np = netdev_priv(dev);
769 	void __iomem *mdio_addr = np->base + MIICtrl;
770 	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
771 	int i;
772 
773 	if (np->mii_preamble_required)
774 		mdio_sync(mdio_addr);
775 
776 	/* Shift the command bits out. */
777 	for (i = 31; i >= 0; i--) {
778 		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
779 
780 		iowrite8(dataval, mdio_addr);
781 		mdio_delay();
782 		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
783 		mdio_delay();
784 	}
785 	/* Clear out extra bits. */
786 	for (i = 2; i > 0; i--) {
787 		iowrite8(MDIO_EnbIn, mdio_addr);
788 		mdio_delay();
789 		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
790 		mdio_delay();
791 	}
792 	return;
793 }
794 
mdio_wait_link(struct net_device * dev,int wait)795 static int mdio_wait_link(struct net_device *dev, int wait)
796 {
797 	int bmsr;
798 	int phy_id;
799 	struct netdev_private *np;
800 
801 	np = netdev_priv(dev);
802 	phy_id = np->phys[0];
803 
804 	do {
805 		bmsr = mdio_read(dev, phy_id, MII_BMSR);
806 		if (bmsr & 0x0004)
807 			return 0;
808 		mdelay(1);
809 	} while (--wait > 0);
810 	return -1;
811 }
812 
netdev_open(struct net_device * dev)813 static int netdev_open(struct net_device *dev)
814 {
815 	struct netdev_private *np = netdev_priv(dev);
816 	void __iomem *ioaddr = np->base;
817 	unsigned long flags;
818 	int i;
819 
820 	/* Do we need to reset the chip??? */
821 
822 	i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
823 	if (i)
824 		return i;
825 
826 	if (netif_msg_ifup(np))
827 		printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
828 			   dev->name, dev->irq);
829 	init_ring(dev);
830 
831 	iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
832 	/* The Tx list pointer is written as packets are queued. */
833 
834 	/* Initialize other registers. */
835 	__set_mac_addr(dev);
836 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
837 	iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
838 #else
839 	iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
840 #endif
841 	if (dev->mtu > 2047)
842 		iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
843 
844 	/* Configure the PCI bus bursts and FIFO thresholds. */
845 
846 	if (dev->if_port == 0)
847 		dev->if_port = np->default_port;
848 
849 	spin_lock_init(&np->mcastlock);
850 
851 	set_rx_mode(dev);
852 	iowrite16(0, ioaddr + IntrEnable);
853 	iowrite16(0, ioaddr + DownCounter);
854 	/* Set the chip to poll every N*320nsec. */
855 	iowrite8(100, ioaddr + RxDMAPollPeriod);
856 	iowrite8(127, ioaddr + TxDMAPollPeriod);
857 	/* Fix DFE-580TX packet drop issue */
858 	if (np->pci_dev->revision >= 0x14)
859 		iowrite8(0x01, ioaddr + DebugCtrl1);
860 	netif_start_queue(dev);
861 
862 	spin_lock_irqsave(&np->lock, flags);
863 	reset_tx(dev);
864 	spin_unlock_irqrestore(&np->lock, flags);
865 
866 	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
867 
868 	if (netif_msg_ifup(np))
869 		printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
870 			   "MAC Control %x, %4.4x %4.4x.\n",
871 			   dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
872 			   ioread32(ioaddr + MACCtrl0),
873 			   ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
874 
875 	/* Set the timer to check for link beat. */
876 	init_timer(&np->timer);
877 	np->timer.expires = jiffies + 3*HZ;
878 	np->timer.data = (unsigned long)dev;
879 	np->timer.function = &netdev_timer;				/* timer handler */
880 	add_timer(&np->timer);
881 
882 	/* Enable interrupts by setting the interrupt mask. */
883 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
884 
885 	return 0;
886 }
887 
check_duplex(struct net_device * dev)888 static void check_duplex(struct net_device *dev)
889 {
890 	struct netdev_private *np = netdev_priv(dev);
891 	void __iomem *ioaddr = np->base;
892 	int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
893 	int negotiated = mii_lpa & np->mii_if.advertising;
894 	int duplex;
895 
896 	/* Force media */
897 	if (!np->an_enable || mii_lpa == 0xffff) {
898 		if (np->mii_if.full_duplex)
899 			iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
900 				ioaddr + MACCtrl0);
901 		return;
902 	}
903 
904 	/* Autonegotiation */
905 	duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
906 	if (np->mii_if.full_duplex != duplex) {
907 		np->mii_if.full_duplex = duplex;
908 		if (netif_msg_link(np))
909 			printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
910 				   "negotiated capability %4.4x.\n", dev->name,
911 				   duplex ? "full" : "half", np->phys[0], negotiated);
912 		iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
913 	}
914 }
915 
netdev_timer(unsigned long data)916 static void netdev_timer(unsigned long data)
917 {
918 	struct net_device *dev = (struct net_device *)data;
919 	struct netdev_private *np = netdev_priv(dev);
920 	void __iomem *ioaddr = np->base;
921 	int next_tick = 10*HZ;
922 
923 	if (netif_msg_timer(np)) {
924 		printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
925 			   "Tx %x Rx %x.\n",
926 			   dev->name, ioread16(ioaddr + IntrEnable),
927 			   ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
928 	}
929 	check_duplex(dev);
930 	np->timer.expires = jiffies + next_tick;
931 	add_timer(&np->timer);
932 }
933 
tx_timeout(struct net_device * dev)934 static void tx_timeout(struct net_device *dev)
935 {
936 	struct netdev_private *np = netdev_priv(dev);
937 	void __iomem *ioaddr = np->base;
938 	unsigned long flag;
939 
940 	netif_stop_queue(dev);
941 	tasklet_disable(&np->tx_tasklet);
942 	iowrite16(0, ioaddr + IntrEnable);
943 	printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
944 		   "TxFrameId %2.2x,"
945 		   " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
946 		   ioread8(ioaddr + TxFrameId));
947 
948 	{
949 		int i;
950 		for (i=0; i<TX_RING_SIZE; i++) {
951 			printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
952 				(unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
953 				le32_to_cpu(np->tx_ring[i].next_desc),
954 				le32_to_cpu(np->tx_ring[i].status),
955 				(le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
956 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
957 				le32_to_cpu(np->tx_ring[i].frag[0].length));
958 		}
959 		printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
960 			ioread32(np->base + TxListPtr),
961 			netif_queue_stopped(dev));
962 		printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
963 			np->cur_tx, np->cur_tx % TX_RING_SIZE,
964 			np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
965 		printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
966 		printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
967 	}
968 	spin_lock_irqsave(&np->lock, flag);
969 
970 	/* Stop and restart the chip's Tx processes . */
971 	reset_tx(dev);
972 	spin_unlock_irqrestore(&np->lock, flag);
973 
974 	dev->if_port = 0;
975 
976 	dev->trans_start = jiffies;
977 	np->stats.tx_errors++;
978 	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
979 		netif_wake_queue(dev);
980 	}
981 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
982 	tasklet_enable(&np->tx_tasklet);
983 }
984 
985 
986 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
init_ring(struct net_device * dev)987 static void init_ring(struct net_device *dev)
988 {
989 	struct netdev_private *np = netdev_priv(dev);
990 	int i;
991 
992 	np->cur_rx = np->cur_tx = 0;
993 	np->dirty_rx = np->dirty_tx = 0;
994 	np->cur_task = 0;
995 
996 	np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
997 
998 	/* Initialize all Rx descriptors. */
999 	for (i = 0; i < RX_RING_SIZE; i++) {
1000 		np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1001 			((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1002 		np->rx_ring[i].status = 0;
1003 		np->rx_ring[i].frag[0].length = 0;
1004 		np->rx_skbuff[i] = NULL;
1005 	}
1006 
1007 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1008 	for (i = 0; i < RX_RING_SIZE; i++) {
1009 		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1010 		np->rx_skbuff[i] = skb;
1011 		if (skb == NULL)
1012 			break;
1013 		skb->dev = dev;		/* Mark as being used by this device. */
1014 		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
1015 		np->rx_ring[i].frag[0].addr = cpu_to_le32(
1016 			pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
1017 				PCI_DMA_FROMDEVICE));
1018 		np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1019 	}
1020 	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1021 
1022 	for (i = 0; i < TX_RING_SIZE; i++) {
1023 		np->tx_skbuff[i] = NULL;
1024 		np->tx_ring[i].status = 0;
1025 	}
1026 	return;
1027 }
1028 
tx_poll(unsigned long data)1029 static void tx_poll (unsigned long data)
1030 {
1031 	struct net_device *dev = (struct net_device *)data;
1032 	struct netdev_private *np = netdev_priv(dev);
1033 	unsigned head = np->cur_task % TX_RING_SIZE;
1034 	struct netdev_desc *txdesc =
1035 		&np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1036 
1037 	/* Chain the next pointer */
1038 	for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1039 		int entry = np->cur_task % TX_RING_SIZE;
1040 		txdesc = &np->tx_ring[entry];
1041 		if (np->last_tx) {
1042 			np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1043 				entry*sizeof(struct netdev_desc));
1044 		}
1045 		np->last_tx = txdesc;
1046 	}
1047 	/* Indicate the latest descriptor of tx ring */
1048 	txdesc->status |= cpu_to_le32(DescIntrOnTx);
1049 
1050 	if (ioread32 (np->base + TxListPtr) == 0)
1051 		iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1052 			np->base + TxListPtr);
1053 	return;
1054 }
1055 
1056 static int
start_tx(struct sk_buff * skb,struct net_device * dev)1057 start_tx (struct sk_buff *skb, struct net_device *dev)
1058 {
1059 	struct netdev_private *np = netdev_priv(dev);
1060 	struct netdev_desc *txdesc;
1061 	unsigned entry;
1062 
1063 	/* Calculate the next Tx descriptor entry. */
1064 	entry = np->cur_tx % TX_RING_SIZE;
1065 	np->tx_skbuff[entry] = skb;
1066 	txdesc = &np->tx_ring[entry];
1067 
1068 	txdesc->next_desc = 0;
1069 	txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1070 	txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1071 							skb->len,
1072 							PCI_DMA_TODEVICE));
1073 	txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1074 
1075 	/* Increment cur_tx before tasklet_schedule() */
1076 	np->cur_tx++;
1077 	mb();
1078 	/* Schedule a tx_poll() task */
1079 	tasklet_schedule(&np->tx_tasklet);
1080 
1081 	/* On some architectures: explicitly flush cache lines here. */
1082 	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1083 			&& !netif_queue_stopped(dev)) {
1084 		/* do nothing */
1085 	} else {
1086 		netif_stop_queue (dev);
1087 	}
1088 	dev->trans_start = jiffies;
1089 	if (netif_msg_tx_queued(np)) {
1090 		printk (KERN_DEBUG
1091 			"%s: Transmit frame #%d queued in slot %d.\n",
1092 			dev->name, np->cur_tx, entry);
1093 	}
1094 	return 0;
1095 }
1096 
1097 /* Reset hardware tx and free all of tx buffers */
1098 static int
reset_tx(struct net_device * dev)1099 reset_tx (struct net_device *dev)
1100 {
1101 	struct netdev_private *np = netdev_priv(dev);
1102 	void __iomem *ioaddr = np->base;
1103 	struct sk_buff *skb;
1104 	int i;
1105 	int irq = in_interrupt();
1106 
1107 	/* Reset tx logic, TxListPtr will be cleaned */
1108 	iowrite16 (TxDisable, ioaddr + MACCtrl1);
1109 	sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1110 
1111 	/* free all tx skbuff */
1112 	for (i = 0; i < TX_RING_SIZE; i++) {
1113 		np->tx_ring[i].next_desc = 0;
1114 
1115 		skb = np->tx_skbuff[i];
1116 		if (skb) {
1117 			pci_unmap_single(np->pci_dev,
1118 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1119 				skb->len, PCI_DMA_TODEVICE);
1120 			if (irq)
1121 				dev_kfree_skb_irq (skb);
1122 			else
1123 				dev_kfree_skb (skb);
1124 			np->tx_skbuff[i] = NULL;
1125 			np->stats.tx_dropped++;
1126 		}
1127 	}
1128 	np->cur_tx = np->dirty_tx = 0;
1129 	np->cur_task = 0;
1130 
1131 	np->last_tx = NULL;
1132 	iowrite8(127, ioaddr + TxDMAPollPeriod);
1133 
1134 	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1135 	return 0;
1136 }
1137 
1138 /* The interrupt handler cleans up after the Tx thread,
1139    and schedule a Rx thread work */
intr_handler(int irq,void * dev_instance)1140 static irqreturn_t intr_handler(int irq, void *dev_instance)
1141 {
1142 	struct net_device *dev = (struct net_device *)dev_instance;
1143 	struct netdev_private *np = netdev_priv(dev);
1144 	void __iomem *ioaddr = np->base;
1145 	int hw_frame_id;
1146 	int tx_cnt;
1147 	int tx_status;
1148 	int handled = 0;
1149 	int i;
1150 
1151 
1152 	do {
1153 		int intr_status = ioread16(ioaddr + IntrStatus);
1154 		iowrite16(intr_status, ioaddr + IntrStatus);
1155 
1156 		if (netif_msg_intr(np))
1157 			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1158 				   dev->name, intr_status);
1159 
1160 		if (!(intr_status & DEFAULT_INTR))
1161 			break;
1162 
1163 		handled = 1;
1164 
1165 		if (intr_status & (IntrRxDMADone)) {
1166 			iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1167 					ioaddr + IntrEnable);
1168 			if (np->budget < 0)
1169 				np->budget = RX_BUDGET;
1170 			tasklet_schedule(&np->rx_tasklet);
1171 		}
1172 		if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1173 			tx_status = ioread16 (ioaddr + TxStatus);
1174 			for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1175 				if (netif_msg_tx_done(np))
1176 					printk
1177 					    ("%s: Transmit status is %2.2x.\n",
1178 				     	dev->name, tx_status);
1179 				if (tx_status & 0x1e) {
1180 					if (netif_msg_tx_err(np))
1181 						printk("%s: Transmit error status %4.4x.\n",
1182 							   dev->name, tx_status);
1183 					np->stats.tx_errors++;
1184 					if (tx_status & 0x10)
1185 						np->stats.tx_fifo_errors++;
1186 					if (tx_status & 0x08)
1187 						np->stats.collisions++;
1188 					if (tx_status & 0x04)
1189 						np->stats.tx_fifo_errors++;
1190 					if (tx_status & 0x02)
1191 						np->stats.tx_window_errors++;
1192 
1193 					/*
1194 					** This reset has been verified on
1195 					** DFE-580TX boards ! phdm@macqel.be.
1196 					*/
1197 					if (tx_status & 0x10) {	/* TxUnderrun */
1198 						/* Restart Tx FIFO and transmitter */
1199 						sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1200 						/* No need to reset the Tx pointer here */
1201 					}
1202 					/* Restart the Tx. Need to make sure tx enabled */
1203 					i = 10;
1204 					do {
1205 						iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1206 						if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1207 							break;
1208 						mdelay(1);
1209 					} while (--i);
1210 				}
1211 				/* Yup, this is a documentation bug.  It cost me *hours*. */
1212 				iowrite16 (0, ioaddr + TxStatus);
1213 				if (tx_cnt < 0) {
1214 					iowrite32(5000, ioaddr + DownCounter);
1215 					break;
1216 				}
1217 				tx_status = ioread16 (ioaddr + TxStatus);
1218 			}
1219 			hw_frame_id = (tx_status >> 8) & 0xff;
1220 		} else 	{
1221 			hw_frame_id = ioread8(ioaddr + TxFrameId);
1222 		}
1223 
1224 		if (np->pci_dev->revision >= 0x14) {
1225 			spin_lock(&np->lock);
1226 			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1227 				int entry = np->dirty_tx % TX_RING_SIZE;
1228 				struct sk_buff *skb;
1229 				int sw_frame_id;
1230 				sw_frame_id = (le32_to_cpu(
1231 					np->tx_ring[entry].status) >> 2) & 0xff;
1232 				if (sw_frame_id == hw_frame_id &&
1233 					!(le32_to_cpu(np->tx_ring[entry].status)
1234 					& 0x00010000))
1235 						break;
1236 				if (sw_frame_id == (hw_frame_id + 1) %
1237 					TX_RING_SIZE)
1238 						break;
1239 				skb = np->tx_skbuff[entry];
1240 				/* Free the original skb. */
1241 				pci_unmap_single(np->pci_dev,
1242 					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1243 					skb->len, PCI_DMA_TODEVICE);
1244 				dev_kfree_skb_irq (np->tx_skbuff[entry]);
1245 				np->tx_skbuff[entry] = NULL;
1246 				np->tx_ring[entry].frag[0].addr = 0;
1247 				np->tx_ring[entry].frag[0].length = 0;
1248 			}
1249 			spin_unlock(&np->lock);
1250 		} else {
1251 			spin_lock(&np->lock);
1252 			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1253 				int entry = np->dirty_tx % TX_RING_SIZE;
1254 				struct sk_buff *skb;
1255 				if (!(le32_to_cpu(np->tx_ring[entry].status)
1256 							& 0x00010000))
1257 					break;
1258 				skb = np->tx_skbuff[entry];
1259 				/* Free the original skb. */
1260 				pci_unmap_single(np->pci_dev,
1261 					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1262 					skb->len, PCI_DMA_TODEVICE);
1263 				dev_kfree_skb_irq (np->tx_skbuff[entry]);
1264 				np->tx_skbuff[entry] = NULL;
1265 				np->tx_ring[entry].frag[0].addr = 0;
1266 				np->tx_ring[entry].frag[0].length = 0;
1267 			}
1268 			spin_unlock(&np->lock);
1269 		}
1270 
1271 		if (netif_queue_stopped(dev) &&
1272 			np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1273 			/* The ring is no longer full, clear busy flag. */
1274 			netif_wake_queue (dev);
1275 		}
1276 		/* Abnormal error summary/uncommon events handlers. */
1277 		if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1278 			netdev_error(dev, intr_status);
1279 	} while (0);
1280 	if (netif_msg_intr(np))
1281 		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1282 			   dev->name, ioread16(ioaddr + IntrStatus));
1283 	return IRQ_RETVAL(handled);
1284 }
1285 
rx_poll(unsigned long data)1286 static void rx_poll(unsigned long data)
1287 {
1288 	struct net_device *dev = (struct net_device *)data;
1289 	struct netdev_private *np = netdev_priv(dev);
1290 	int entry = np->cur_rx % RX_RING_SIZE;
1291 	int boguscnt = np->budget;
1292 	void __iomem *ioaddr = np->base;
1293 	int received = 0;
1294 
1295 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1296 	while (1) {
1297 		struct netdev_desc *desc = &(np->rx_ring[entry]);
1298 		u32 frame_status = le32_to_cpu(desc->status);
1299 		int pkt_len;
1300 
1301 		if (--boguscnt < 0) {
1302 			goto not_done;
1303 		}
1304 		if (!(frame_status & DescOwn))
1305 			break;
1306 		pkt_len = frame_status & 0x1fff;	/* Chip omits the CRC. */
1307 		if (netif_msg_rx_status(np))
1308 			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1309 				   frame_status);
1310 		if (frame_status & 0x001f4000) {
1311 			/* There was a error. */
1312 			if (netif_msg_rx_err(np))
1313 				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1314 					   frame_status);
1315 			np->stats.rx_errors++;
1316 			if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1317 			if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1318 			if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1319 			if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1320 			if (frame_status & 0x00100000) {
1321 				printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1322 					   " status %8.8x.\n",
1323 					   dev->name, frame_status);
1324 			}
1325 		} else {
1326 			struct sk_buff *skb;
1327 #ifndef final_version
1328 			if (netif_msg_rx_status(np))
1329 				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1330 					   ", bogus_cnt %d.\n",
1331 					   pkt_len, boguscnt);
1332 #endif
1333 			/* Check if the packet is long enough to accept without copying
1334 			   to a minimally-sized skbuff. */
1335 			if (pkt_len < rx_copybreak
1336 				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1337 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1338 				pci_dma_sync_single_for_cpu(np->pci_dev,
1339 							    le32_to_cpu(desc->frag[0].addr),
1340 							    np->rx_buf_sz,
1341 							    PCI_DMA_FROMDEVICE);
1342 
1343 				skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1344 				pci_dma_sync_single_for_device(np->pci_dev,
1345 							       le32_to_cpu(desc->frag[0].addr),
1346 							       np->rx_buf_sz,
1347 							       PCI_DMA_FROMDEVICE);
1348 				skb_put(skb, pkt_len);
1349 			} else {
1350 				pci_unmap_single(np->pci_dev,
1351 					le32_to_cpu(desc->frag[0].addr),
1352 					np->rx_buf_sz,
1353 					PCI_DMA_FROMDEVICE);
1354 				skb_put(skb = np->rx_skbuff[entry], pkt_len);
1355 				np->rx_skbuff[entry] = NULL;
1356 			}
1357 			skb->protocol = eth_type_trans(skb, dev);
1358 			/* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1359 			netif_rx(skb);
1360 		}
1361 		entry = (entry + 1) % RX_RING_SIZE;
1362 		received++;
1363 	}
1364 	np->cur_rx = entry;
1365 	refill_rx (dev);
1366 	np->budget -= received;
1367 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1368 	return;
1369 
1370 not_done:
1371 	np->cur_rx = entry;
1372 	refill_rx (dev);
1373 	if (!received)
1374 		received = 1;
1375 	np->budget -= received;
1376 	if (np->budget <= 0)
1377 		np->budget = RX_BUDGET;
1378 	tasklet_schedule(&np->rx_tasklet);
1379 	return;
1380 }
1381 
refill_rx(struct net_device * dev)1382 static void refill_rx (struct net_device *dev)
1383 {
1384 	struct netdev_private *np = netdev_priv(dev);
1385 	int entry;
1386 	int cnt = 0;
1387 
1388 	/* Refill the Rx ring buffers. */
1389 	for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1390 		np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1391 		struct sk_buff *skb;
1392 		entry = np->dirty_rx % RX_RING_SIZE;
1393 		if (np->rx_skbuff[entry] == NULL) {
1394 			skb = dev_alloc_skb(np->rx_buf_sz);
1395 			np->rx_skbuff[entry] = skb;
1396 			if (skb == NULL)
1397 				break;		/* Better luck next round. */
1398 			skb->dev = dev;		/* Mark as being used by this device. */
1399 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1400 			np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1401 				pci_map_single(np->pci_dev, skb->data,
1402 					np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1403 		}
1404 		/* Perhaps we need not reset this field. */
1405 		np->rx_ring[entry].frag[0].length =
1406 			cpu_to_le32(np->rx_buf_sz | LastFrag);
1407 		np->rx_ring[entry].status = 0;
1408 		cnt++;
1409 	}
1410 	return;
1411 }
netdev_error(struct net_device * dev,int intr_status)1412 static void netdev_error(struct net_device *dev, int intr_status)
1413 {
1414 	struct netdev_private *np = netdev_priv(dev);
1415 	void __iomem *ioaddr = np->base;
1416 	u16 mii_ctl, mii_advertise, mii_lpa;
1417 	int speed;
1418 
1419 	if (intr_status & LinkChange) {
1420 		if (mdio_wait_link(dev, 10) == 0) {
1421 			printk(KERN_INFO "%s: Link up\n", dev->name);
1422 			if (np->an_enable) {
1423 				mii_advertise = mdio_read(dev, np->phys[0],
1424 							   MII_ADVERTISE);
1425 				mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1426 				mii_advertise &= mii_lpa;
1427 				printk(KERN_INFO "%s: Link changed: ",
1428 					dev->name);
1429 				if (mii_advertise & ADVERTISE_100FULL) {
1430 					np->speed = 100;
1431 					printk("100Mbps, full duplex\n");
1432 				} else if (mii_advertise & ADVERTISE_100HALF) {
1433 					np->speed = 100;
1434 					printk("100Mbps, half duplex\n");
1435 				} else if (mii_advertise & ADVERTISE_10FULL) {
1436 					np->speed = 10;
1437 					printk("10Mbps, full duplex\n");
1438 				} else if (mii_advertise & ADVERTISE_10HALF) {
1439 					np->speed = 10;
1440 					printk("10Mbps, half duplex\n");
1441 				} else
1442 					printk("\n");
1443 
1444 			} else {
1445 				mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1446 				speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1447 				np->speed = speed;
1448 				printk(KERN_INFO "%s: Link changed: %dMbps ,",
1449 					dev->name, speed);
1450 				printk("%s duplex.\n",
1451 					(mii_ctl & BMCR_FULLDPLX) ?
1452 						"full" : "half");
1453 			}
1454 			check_duplex(dev);
1455 			if (np->flowctrl && np->mii_if.full_duplex) {
1456 				iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1457 					ioaddr + MulticastFilter1+2);
1458 				iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1459 					ioaddr + MACCtrl0);
1460 			}
1461 			netif_carrier_on(dev);
1462 		} else {
1463 			printk(KERN_INFO "%s: Link down\n", dev->name);
1464 			netif_carrier_off(dev);
1465 		}
1466 	}
1467 	if (intr_status & StatsMax) {
1468 		get_stats(dev);
1469 	}
1470 	if (intr_status & IntrPCIErr) {
1471 		printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1472 			   dev->name, intr_status);
1473 		/* We must do a global reset of DMA to continue. */
1474 	}
1475 }
1476 
get_stats(struct net_device * dev)1477 static struct net_device_stats *get_stats(struct net_device *dev)
1478 {
1479 	struct netdev_private *np = netdev_priv(dev);
1480 	void __iomem *ioaddr = np->base;
1481 	int i;
1482 
1483 	/* We should lock this segment of code for SMP eventually, although
1484 	   the vulnerability window is very small and statistics are
1485 	   non-critical. */
1486 	/* The chip only need report frame silently dropped. */
1487 	np->stats.rx_missed_errors	+= ioread8(ioaddr + RxMissed);
1488 	np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1489 	np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1490 	np->stats.collisions += ioread8(ioaddr + StatsLateColl);
1491 	np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1492 	np->stats.collisions += ioread8(ioaddr + StatsOneColl);
1493 	np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1494 	ioread8(ioaddr + StatsTxDefer);
1495 	for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1496 		ioread8(ioaddr + i);
1497 	np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1498 	np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1499 	np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1500 	np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1501 
1502 	return &np->stats;
1503 }
1504 
set_rx_mode(struct net_device * dev)1505 static void set_rx_mode(struct net_device *dev)
1506 {
1507 	struct netdev_private *np = netdev_priv(dev);
1508 	void __iomem *ioaddr = np->base;
1509 	u16 mc_filter[4];			/* Multicast hash filter */
1510 	u32 rx_mode;
1511 	int i;
1512 
1513 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1514 		memset(mc_filter, 0xff, sizeof(mc_filter));
1515 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1516 	} else if ((dev->mc_count > multicast_filter_limit)
1517 			   ||  (dev->flags & IFF_ALLMULTI)) {
1518 		/* Too many to match, or accept all multicasts. */
1519 		memset(mc_filter, 0xff, sizeof(mc_filter));
1520 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1521 	} else if (dev->mc_count) {
1522 		struct dev_mc_list *mclist;
1523 		int bit;
1524 		int index;
1525 		int crc;
1526 		memset (mc_filter, 0, sizeof (mc_filter));
1527 		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1528 		     i++, mclist = mclist->next) {
1529 			crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1530 			for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1531 				if (crc & 0x80000000) index |= 1 << bit;
1532 			mc_filter[index/16] |= (1 << (index % 16));
1533 		}
1534 		rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1535 	} else {
1536 		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1537 		return;
1538 	}
1539 	if (np->mii_if.full_duplex && np->flowctrl)
1540 		mc_filter[3] |= 0x0200;
1541 
1542 	for (i = 0; i < 4; i++)
1543 		iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1544 	iowrite8(rx_mode, ioaddr + RxMode);
1545 }
1546 
__set_mac_addr(struct net_device * dev)1547 static int __set_mac_addr(struct net_device *dev)
1548 {
1549 	struct netdev_private *np = netdev_priv(dev);
1550 	u16 addr16;
1551 
1552 	addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1553 	iowrite16(addr16, np->base + StationAddr);
1554 	addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1555 	iowrite16(addr16, np->base + StationAddr+2);
1556 	addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1557 	iowrite16(addr16, np->base + StationAddr+4);
1558 	return 0;
1559 }
1560 
check_if_running(struct net_device * dev)1561 static int check_if_running(struct net_device *dev)
1562 {
1563 	if (!netif_running(dev))
1564 		return -EINVAL;
1565 	return 0;
1566 }
1567 
get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1568 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1569 {
1570 	struct netdev_private *np = netdev_priv(dev);
1571 	strcpy(info->driver, DRV_NAME);
1572 	strcpy(info->version, DRV_VERSION);
1573 	strcpy(info->bus_info, pci_name(np->pci_dev));
1574 }
1575 
get_settings(struct net_device * dev,struct ethtool_cmd * ecmd)1576 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1577 {
1578 	struct netdev_private *np = netdev_priv(dev);
1579 	spin_lock_irq(&np->lock);
1580 	mii_ethtool_gset(&np->mii_if, ecmd);
1581 	spin_unlock_irq(&np->lock);
1582 	return 0;
1583 }
1584 
set_settings(struct net_device * dev,struct ethtool_cmd * ecmd)1585 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1586 {
1587 	struct netdev_private *np = netdev_priv(dev);
1588 	int res;
1589 	spin_lock_irq(&np->lock);
1590 	res = mii_ethtool_sset(&np->mii_if, ecmd);
1591 	spin_unlock_irq(&np->lock);
1592 	return res;
1593 }
1594 
nway_reset(struct net_device * dev)1595 static int nway_reset(struct net_device *dev)
1596 {
1597 	struct netdev_private *np = netdev_priv(dev);
1598 	return mii_nway_restart(&np->mii_if);
1599 }
1600 
get_link(struct net_device * dev)1601 static u32 get_link(struct net_device *dev)
1602 {
1603 	struct netdev_private *np = netdev_priv(dev);
1604 	return mii_link_ok(&np->mii_if);
1605 }
1606 
get_msglevel(struct net_device * dev)1607 static u32 get_msglevel(struct net_device *dev)
1608 {
1609 	struct netdev_private *np = netdev_priv(dev);
1610 	return np->msg_enable;
1611 }
1612 
set_msglevel(struct net_device * dev,u32 val)1613 static void set_msglevel(struct net_device *dev, u32 val)
1614 {
1615 	struct netdev_private *np = netdev_priv(dev);
1616 	np->msg_enable = val;
1617 }
1618 
1619 static const struct ethtool_ops ethtool_ops = {
1620 	.begin = check_if_running,
1621 	.get_drvinfo = get_drvinfo,
1622 	.get_settings = get_settings,
1623 	.set_settings = set_settings,
1624 	.nway_reset = nway_reset,
1625 	.get_link = get_link,
1626 	.get_msglevel = get_msglevel,
1627 	.set_msglevel = set_msglevel,
1628 };
1629 
netdev_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1630 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1631 {
1632 	struct netdev_private *np = netdev_priv(dev);
1633 	int rc;
1634 
1635 	if (!netif_running(dev))
1636 		return -EINVAL;
1637 
1638 	spin_lock_irq(&np->lock);
1639 	rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1640 	spin_unlock_irq(&np->lock);
1641 
1642 	return rc;
1643 }
1644 
netdev_close(struct net_device * dev)1645 static int netdev_close(struct net_device *dev)
1646 {
1647 	struct netdev_private *np = netdev_priv(dev);
1648 	void __iomem *ioaddr = np->base;
1649 	struct sk_buff *skb;
1650 	int i;
1651 
1652 	/* Wait and kill tasklet */
1653 	tasklet_kill(&np->rx_tasklet);
1654 	tasklet_kill(&np->tx_tasklet);
1655 	np->cur_tx = 0;
1656 	np->dirty_tx = 0;
1657 	np->cur_task = 0;
1658 	np->last_tx = NULL;
1659 
1660 	netif_stop_queue(dev);
1661 
1662 	if (netif_msg_ifdown(np)) {
1663 		printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1664 			   "Rx %4.4x Int %2.2x.\n",
1665 			   dev->name, ioread8(ioaddr + TxStatus),
1666 			   ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1667 		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1668 			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1669 	}
1670 
1671 	/* Disable interrupts by clearing the interrupt mask. */
1672 	iowrite16(0x0000, ioaddr + IntrEnable);
1673 
1674 	/* Disable Rx and Tx DMA for safely release resource */
1675 	iowrite32(0x500, ioaddr + DMACtrl);
1676 
1677 	/* Stop the chip's Tx and Rx processes. */
1678 	iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1679 
1680     	for (i = 2000; i > 0; i--) {
1681  		if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1682 			break;
1683 		mdelay(1);
1684     	}
1685 
1686     	iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1687 			ioaddr +ASICCtrl + 2);
1688 
1689     	for (i = 2000; i > 0; i--) {
1690  		if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
1691 			break;
1692 		mdelay(1);
1693     	}
1694 
1695 #ifdef __i386__
1696 	if (netif_msg_hw(np)) {
1697 		printk("\n"KERN_DEBUG"  Tx ring at %8.8x:\n",
1698 			   (int)(np->tx_ring_dma));
1699 		for (i = 0; i < TX_RING_SIZE; i++)
1700 			printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1701 				   i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1702 				   np->tx_ring[i].frag[0].length);
1703 		printk("\n"KERN_DEBUG "  Rx ring %8.8x:\n",
1704 			   (int)(np->rx_ring_dma));
1705 		for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1706 			printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1707 				   i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1708 				   np->rx_ring[i].frag[0].length);
1709 		}
1710 	}
1711 #endif /* __i386__ debugging only */
1712 
1713 	free_irq(dev->irq, dev);
1714 
1715 	del_timer_sync(&np->timer);
1716 
1717 	/* Free all the skbuffs in the Rx queue. */
1718 	for (i = 0; i < RX_RING_SIZE; i++) {
1719 		np->rx_ring[i].status = 0;
1720 		skb = np->rx_skbuff[i];
1721 		if (skb) {
1722 			pci_unmap_single(np->pci_dev,
1723 				le32_to_cpu(np->rx_ring[i].frag[0].addr),
1724 				np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1725 			dev_kfree_skb(skb);
1726 			np->rx_skbuff[i] = NULL;
1727 		}
1728 		np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1729 	}
1730 	for (i = 0; i < TX_RING_SIZE; i++) {
1731 		np->tx_ring[i].next_desc = 0;
1732 		skb = np->tx_skbuff[i];
1733 		if (skb) {
1734 			pci_unmap_single(np->pci_dev,
1735 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1736 				skb->len, PCI_DMA_TODEVICE);
1737 			dev_kfree_skb(skb);
1738 			np->tx_skbuff[i] = NULL;
1739 		}
1740 	}
1741 
1742 	return 0;
1743 }
1744 
sundance_remove1(struct pci_dev * pdev)1745 static void __devexit sundance_remove1 (struct pci_dev *pdev)
1746 {
1747 	struct net_device *dev = pci_get_drvdata(pdev);
1748 
1749 	if (dev) {
1750 		struct netdev_private *np = netdev_priv(dev);
1751 
1752 		unregister_netdev(dev);
1753         	pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1754 			np->rx_ring_dma);
1755 	        pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1756 			np->tx_ring_dma);
1757 		pci_iounmap(pdev, np->base);
1758 		pci_release_regions(pdev);
1759 		free_netdev(dev);
1760 		pci_set_drvdata(pdev, NULL);
1761 	}
1762 }
1763 
1764 static struct pci_driver sundance_driver = {
1765 	.name		= DRV_NAME,
1766 	.id_table	= sundance_pci_tbl,
1767 	.probe		= sundance_probe1,
1768 	.remove		= __devexit_p(sundance_remove1),
1769 };
1770 
sundance_init(void)1771 static int __init sundance_init(void)
1772 {
1773 /* when a module, this is printed whether or not devices are found in probe */
1774 #ifdef MODULE
1775 	printk(version);
1776 #endif
1777 	return pci_register_driver(&sundance_driver);
1778 }
1779 
sundance_exit(void)1780 static void __exit sundance_exit(void)
1781 {
1782 	pci_unregister_driver(&sundance_driver);
1783 }
1784 
1785 module_init(sundance_init);
1786 module_exit(sundance_exit);
1787 
1788 
1789