• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2 /*
3 	Written 1999-2000 by Donald Becker.
4 
5 	This software may be used and distributed according to the terms of
6 	the GNU General Public License (GPL), incorporated herein by reference.
7 	Drivers based on or derived from this code fall under the GPL and must
8 	retain the authorship, copyright and license notice.  This file is not
9 	a complete program and may only be used when the entire operating
10 	system is licensed under the GPL.
11 
12 	The author may be reached as becker@scyld.com, or C/O
13 	Scyld Computing Corporation
14 	410 Severn Ave., Suite 210
15 	Annapolis MD 21403
16 
17 	Support and updates available at
18 	http://www.scyld.com/network/sundance.html
19 	[link no longer provides useful info -jgarzik]
20 	Archives of the mailing list are still available at
21 	https://www.beowulf.org/pipermail/netdrivers/
22 
23 */
24 
25 #define DRV_NAME	"sundance"
26 
27 /* The user-configurable values.
28    These may be modified when a driver module is loaded.*/
29 static int debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
30 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
31    Typical is a 64 element hash table based on the Ethernet CRC.  */
32 static const int multicast_filter_limit = 32;
33 
34 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
35    Setting to > 1518 effectively disables this feature.
36    This chip can receive into offset buffers, so the Alpha does not
37    need a copy-align. */
38 static int rx_copybreak;
39 static int flowctrl=1;
40 
41 /* media[] specifies the media type the NIC operates at.
42 		 autosense	Autosensing active media.
43 		 10mbps_hd 	10Mbps half duplex.
44 		 10mbps_fd 	10Mbps full duplex.
45 		 100mbps_hd 	100Mbps half duplex.
46 		 100mbps_fd 	100Mbps full duplex.
47 		 0		Autosensing active media.
48 		 1	 	10Mbps half duplex.
49 		 2	 	10Mbps full duplex.
50 		 3	 	100Mbps half duplex.
51 		 4	 	100Mbps full duplex.
52 */
53 #define MAX_UNITS 8
54 static char *media[MAX_UNITS];
55 
56 
57 /* Operational parameters that are set at compile time. */
58 
59 /* Keep the ring sizes a power of two for compile efficiency.
60    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
61    Making the Tx ring too large decreases the effectiveness of channel
62    bonding and packet priority, and more than 128 requires modifying the
63    Tx error recovery.
64    Large receive rings merely waste memory. */
65 #define TX_RING_SIZE	32
66 #define TX_QUEUE_LEN	(TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
67 #define RX_RING_SIZE	64
68 #define RX_BUDGET	32
69 #define TX_TOTAL_SIZE	TX_RING_SIZE*sizeof(struct netdev_desc)
70 #define RX_TOTAL_SIZE	RX_RING_SIZE*sizeof(struct netdev_desc)
71 
72 /* Operational parameters that usually are not changed. */
73 /* Time in jiffies before concluding the transmitter is hung. */
74 #define TX_TIMEOUT  (4*HZ)
75 #define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
76 
77 /* Include files, designed to support most kernel versions 2.0.0 and later. */
78 #include <linux/module.h>
79 #include <linux/kernel.h>
80 #include <linux/string.h>
81 #include <linux/timer.h>
82 #include <linux/errno.h>
83 #include <linux/ioport.h>
84 #include <linux/interrupt.h>
85 #include <linux/pci.h>
86 #include <linux/netdevice.h>
87 #include <linux/etherdevice.h>
88 #include <linux/skbuff.h>
89 #include <linux/init.h>
90 #include <linux/bitops.h>
91 #include <linux/uaccess.h>
92 #include <asm/processor.h>		/* Processor type for cache alignment. */
93 #include <asm/io.h>
94 #include <linux/delay.h>
95 #include <linux/spinlock.h>
96 #include <linux/dma-mapping.h>
97 #include <linux/crc32.h>
98 #include <linux/ethtool.h>
99 #include <linux/mii.h>
100 
101 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
102 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
103 MODULE_LICENSE("GPL");
104 
105 module_param(debug, int, 0);
106 module_param(rx_copybreak, int, 0);
107 module_param_array(media, charp, NULL, 0);
108 module_param(flowctrl, int, 0);
109 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
110 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
111 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
112 
113 /*
114 				Theory of Operation
115 
116 I. Board Compatibility
117 
118 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
119 
120 II. Board-specific settings
121 
122 III. Driver operation
123 
124 IIIa. Ring buffers
125 
126 This driver uses two statically allocated fixed-size descriptor lists
127 formed into rings by a branch from the final descriptor to the beginning of
128 the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
129 Some chips explicitly use only 2^N sized rings, while others use a
130 'next descriptor' pointer that the driver forms into rings.
131 
132 IIIb/c. Transmit/Receive Structure
133 
134 This driver uses a zero-copy receive and transmit scheme.
135 The driver allocates full frame size skbuffs for the Rx ring buffers at
136 open() time and passes the skb->data field to the chip as receive data
137 buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
138 a fresh skbuff is allocated and the frame is copied to the new skbuff.
139 When the incoming frame is larger, the skbuff is passed directly up the
140 protocol stack.  Buffers consumed this way are replaced by newly allocated
141 skbuffs in a later phase of receives.
142 
143 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
144 using a full-sized skbuff for small frames vs. the copying costs of larger
145 frames.  New boards are typically used in generously configured machines
146 and the underfilled buffers have negligible impact compared to the benefit of
147 a single allocation size, so the default value of zero results in never
148 copying packets.  When copying is done, the cost is usually mitigated by using
149 a combined copy/checksum routine.  Copying also preloads the cache, which is
150 most useful with small frames.
151 
152 A subtle aspect of the operation is that the IP header at offset 14 in an
153 ethernet frame isn't longword aligned for further processing.
154 Unaligned buffers are permitted by the Sundance hardware, so
155 frames are received into the skbuff at an offset of "+2", 16-byte aligning
156 the IP header.
157 
158 IIId. Synchronization
159 
160 The driver runs as two independent, single-threaded flows of control.  One
161 is the send-packet routine, which enforces single-threaded use by the
162 dev->tbusy flag.  The other thread is the interrupt handler, which is single
163 threaded by the hardware and interrupt handling software.
164 
165 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
166 flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
167 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
168 the 'lp->tx_full' flag.
169 
170 The interrupt handler has exclusive control over the Rx ring and records stats
171 from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
172 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
173 clears both the tx_full and tbusy flags.
174 
175 IV. Notes
176 
177 IVb. References
178 
179 The Sundance ST201 datasheet, preliminary version.
180 The Kendin KS8723 datasheet, preliminary version.
181 The ICplus IP100 datasheet, preliminary version.
182 http://www.scyld.com/expert/100mbps.html
183 http://www.scyld.com/expert/NWay.html
184 
185 IVc. Errata
186 
187 */
188 
189 /* Work-around for Kendin chip bugs. */
190 #ifndef CONFIG_SUNDANCE_MMIO
191 #define USE_IO_OPS 1
192 #endif
193 
194 static const struct pci_device_id sundance_pci_tbl[] = {
195 	{ 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
196 	{ 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
197 	{ 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
198 	{ 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
199 	{ 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
200 	{ 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
201 	{ 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
202 	{ }
203 };
204 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
205 
206 enum {
207 	netdev_io_size = 128
208 };
209 
210 struct pci_id_info {
211         const char *name;
212 };
213 static const struct pci_id_info pci_id_tbl[] = {
214 	{"D-Link DFE-550TX FAST Ethernet Adapter"},
215 	{"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
216 	{"D-Link DFE-580TX 4 port Server Adapter"},
217 	{"D-Link DFE-530TXS FAST Ethernet Adapter"},
218 	{"D-Link DL10050-based FAST Ethernet Adapter"},
219 	{"Sundance Technology Alta"},
220 	{"IC Plus Corporation IP100A FAST Ethernet Adapter"},
221 	{ }	/* terminate list. */
222 };
223 
224 /* This driver was written to use PCI memory space, however x86-oriented
225    hardware often uses I/O space accesses. */
226 
227 /* Offsets to the device registers.
228    Unlike software-only systems, device drivers interact with complex hardware.
229    It's not useful to define symbolic names for every register bit in the
230    device.  The name can only partially document the semantics and make
231    the driver longer and more difficult to read.
232    In general, only the important configuration values or bits changed
233    multiple times should be defined symbolically.
234 */
235 enum alta_offsets {
236 	DMACtrl = 0x00,
237 	TxListPtr = 0x04,
238 	TxDMABurstThresh = 0x08,
239 	TxDMAUrgentThresh = 0x09,
240 	TxDMAPollPeriod = 0x0a,
241 	RxDMAStatus = 0x0c,
242 	RxListPtr = 0x10,
243 	DebugCtrl0 = 0x1a,
244 	DebugCtrl1 = 0x1c,
245 	RxDMABurstThresh = 0x14,
246 	RxDMAUrgentThresh = 0x15,
247 	RxDMAPollPeriod = 0x16,
248 	LEDCtrl = 0x1a,
249 	ASICCtrl = 0x30,
250 	EEData = 0x34,
251 	EECtrl = 0x36,
252 	FlashAddr = 0x40,
253 	FlashData = 0x44,
254 	WakeEvent = 0x45,
255 	TxStatus = 0x46,
256 	TxFrameId = 0x47,
257 	DownCounter = 0x18,
258 	IntrClear = 0x4a,
259 	IntrEnable = 0x4c,
260 	IntrStatus = 0x4e,
261 	MACCtrl0 = 0x50,
262 	MACCtrl1 = 0x52,
263 	StationAddr = 0x54,
264 	MaxFrameSize = 0x5A,
265 	RxMode = 0x5c,
266 	MIICtrl = 0x5e,
267 	MulticastFilter0 = 0x60,
268 	MulticastFilter1 = 0x64,
269 	RxOctetsLow = 0x68,
270 	RxOctetsHigh = 0x6a,
271 	TxOctetsLow = 0x6c,
272 	TxOctetsHigh = 0x6e,
273 	TxFramesOK = 0x70,
274 	RxFramesOK = 0x72,
275 	StatsCarrierError = 0x74,
276 	StatsLateColl = 0x75,
277 	StatsMultiColl = 0x76,
278 	StatsOneColl = 0x77,
279 	StatsTxDefer = 0x78,
280 	RxMissed = 0x79,
281 	StatsTxXSDefer = 0x7a,
282 	StatsTxAbort = 0x7b,
283 	StatsBcastTx = 0x7c,
284 	StatsBcastRx = 0x7d,
285 	StatsMcastTx = 0x7e,
286 	StatsMcastRx = 0x7f,
287 	/* Aliased and bogus values! */
288 	RxStatus = 0x0c,
289 };
290 
291 #define ASIC_HI_WORD(x)	((x) + 2)
292 
293 enum ASICCtrl_HiWord_bit {
294 	GlobalReset = 0x0001,
295 	RxReset = 0x0002,
296 	TxReset = 0x0004,
297 	DMAReset = 0x0008,
298 	FIFOReset = 0x0010,
299 	NetworkReset = 0x0020,
300 	HostReset = 0x0040,
301 	ResetBusy = 0x0400,
302 };
303 
304 /* Bits in the interrupt status/mask registers. */
305 enum intr_status_bits {
306 	IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
307 	IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
308 	IntrDrvRqst=0x0040,
309 	StatsMax=0x0080, LinkChange=0x0100,
310 	IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
311 };
312 
313 /* Bits in the RxMode register. */
314 enum rx_mode_bits {
315 	AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
316 	AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
317 };
318 /* Bits in MACCtrl. */
319 enum mac_ctrl0_bits {
320 	EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
321 	EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
322 };
323 enum mac_ctrl1_bits {
324 	StatsEnable=0x0020,	StatsDisable=0x0040, StatsEnabled=0x0080,
325 	TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
326 	RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
327 };
328 
329 /* Bits in WakeEvent register. */
330 enum wake_event_bits {
331 	WakePktEnable = 0x01,
332 	MagicPktEnable = 0x02,
333 	LinkEventEnable = 0x04,
334 	WolEnable = 0x80,
335 };
336 
337 /* The Rx and Tx buffer descriptors. */
338 /* Note that using only 32 bit fields simplifies conversion to big-endian
339    architectures. */
340 struct netdev_desc {
341 	__le32 next_desc;
342 	__le32 status;
343 	struct desc_frag { __le32 addr, length; } frag[1];
344 };
345 
346 /* Bits in netdev_desc.status */
347 enum desc_status_bits {
348 	DescOwn=0x8000,
349 	DescEndPacket=0x4000,
350 	DescEndRing=0x2000,
351 	LastFrag=0x80000000,
352 	DescIntrOnTx=0x8000,
353 	DescIntrOnDMADone=0x80000000,
354 	DisableAlign = 0x00000001,
355 };
356 
357 #define PRIV_ALIGN	15 	/* Required alignment mask */
358 /* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
359    within the structure. */
360 #define MII_CNT		4
361 struct netdev_private {
362 	/* Descriptor rings first for alignment. */
363 	struct netdev_desc *rx_ring;
364 	struct netdev_desc *tx_ring;
365 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
366 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
367         dma_addr_t tx_ring_dma;
368         dma_addr_t rx_ring_dma;
369 	struct timer_list timer;		/* Media monitoring timer. */
370 	struct net_device *ndev;		/* backpointer */
371 	/* ethtool extra stats */
372 	struct {
373 		u64 tx_multiple_collisions;
374 		u64 tx_single_collisions;
375 		u64 tx_late_collisions;
376 		u64 tx_deferred;
377 		u64 tx_deferred_excessive;
378 		u64 tx_aborted;
379 		u64 tx_bcasts;
380 		u64 rx_bcasts;
381 		u64 tx_mcasts;
382 		u64 rx_mcasts;
383 	} xstats;
384 	/* Frequently used values: keep some adjacent for cache effect. */
385 	spinlock_t lock;
386 	int msg_enable;
387 	int chip_id;
388 	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
389 	unsigned int rx_buf_sz;			/* Based on MTU+slack. */
390 	struct netdev_desc *last_tx;		/* Last Tx descriptor used. */
391 	unsigned int cur_tx, dirty_tx;
392 	/* These values are keep track of the transceiver/media in use. */
393 	unsigned int flowctrl:1;
394 	unsigned int default_port:4;		/* Last dev->if_port value. */
395 	unsigned int an_enable:1;
396 	unsigned int speed;
397 	unsigned int wol_enabled:1;			/* Wake on LAN enabled */
398 	struct tasklet_struct rx_tasklet;
399 	struct tasklet_struct tx_tasklet;
400 	int budget;
401 	int cur_task;
402 	/* Multicast and receive mode. */
403 	spinlock_t mcastlock;			/* SMP lock multicast updates. */
404 	u16 mcast_filter[4];
405 	/* MII transceiver section. */
406 	struct mii_if_info mii_if;
407 	int mii_preamble_required;
408 	unsigned char phys[MII_CNT];		/* MII device addresses, only first one used. */
409 	struct pci_dev *pci_dev;
410 	void __iomem *base;
411 	spinlock_t statlock;
412 };
413 
414 /* The station address location in the EEPROM. */
415 #define EEPROM_SA_OFFSET	0x10
416 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
417 			IntrDrvRqst | IntrTxDone | StatsMax | \
418 			LinkChange)
419 
420 static int  change_mtu(struct net_device *dev, int new_mtu);
421 static int  eeprom_read(void __iomem *ioaddr, int location);
422 static int  mdio_read(struct net_device *dev, int phy_id, int location);
423 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
424 static int  mdio_wait_link(struct net_device *dev, int wait);
425 static int  netdev_open(struct net_device *dev);
426 static void check_duplex(struct net_device *dev);
427 static void netdev_timer(struct timer_list *t);
428 static void tx_timeout(struct net_device *dev, unsigned int txqueue);
429 static void init_ring(struct net_device *dev);
430 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
431 static int reset_tx (struct net_device *dev);
432 static irqreturn_t intr_handler(int irq, void *dev_instance);
433 static void rx_poll(struct tasklet_struct *t);
434 static void tx_poll(struct tasklet_struct *t);
435 static void refill_rx (struct net_device *dev);
436 static void netdev_error(struct net_device *dev, int intr_status);
437 static void netdev_error(struct net_device *dev, int intr_status);
438 static void set_rx_mode(struct net_device *dev);
439 static int __set_mac_addr(struct net_device *dev);
440 static int sundance_set_mac_addr(struct net_device *dev, void *data);
441 static struct net_device_stats *get_stats(struct net_device *dev);
442 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
443 static int  netdev_close(struct net_device *dev);
444 static const struct ethtool_ops ethtool_ops;
445 
sundance_reset(struct net_device * dev,unsigned long reset_cmd)446 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
447 {
448 	struct netdev_private *np = netdev_priv(dev);
449 	void __iomem *ioaddr = np->base + ASICCtrl;
450 	int countdown;
451 
452 	/* ST201 documentation states ASICCtrl is a 32bit register */
453 	iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
454 	/* ST201 documentation states reset can take up to 1 ms */
455 	countdown = 10 + 1;
456 	while (ioread32 (ioaddr) & (ResetBusy << 16)) {
457 		if (--countdown == 0) {
458 			printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
459 			break;
460 		}
461 		udelay(100);
462 	}
463 }
464 
465 #ifdef CONFIG_NET_POLL_CONTROLLER
sundance_poll_controller(struct net_device * dev)466 static void sundance_poll_controller(struct net_device *dev)
467 {
468 	struct netdev_private *np = netdev_priv(dev);
469 
470 	disable_irq(np->pci_dev->irq);
471 	intr_handler(np->pci_dev->irq, dev);
472 	enable_irq(np->pci_dev->irq);
473 }
474 #endif
475 
476 static const struct net_device_ops netdev_ops = {
477 	.ndo_open		= netdev_open,
478 	.ndo_stop		= netdev_close,
479 	.ndo_start_xmit		= start_tx,
480 	.ndo_get_stats 		= get_stats,
481 	.ndo_set_rx_mode	= set_rx_mode,
482 	.ndo_eth_ioctl		= netdev_ioctl,
483 	.ndo_tx_timeout		= tx_timeout,
484 	.ndo_change_mtu		= change_mtu,
485 	.ndo_set_mac_address 	= sundance_set_mac_addr,
486 	.ndo_validate_addr	= eth_validate_addr,
487 #ifdef CONFIG_NET_POLL_CONTROLLER
488 	.ndo_poll_controller 	= sundance_poll_controller,
489 #endif
490 };
491 
sundance_probe1(struct pci_dev * pdev,const struct pci_device_id * ent)492 static int sundance_probe1(struct pci_dev *pdev,
493 			   const struct pci_device_id *ent)
494 {
495 	struct net_device *dev;
496 	struct netdev_private *np;
497 	static int card_idx;
498 	int chip_idx = ent->driver_data;
499 	int irq;
500 	int i;
501 	void __iomem *ioaddr;
502 	u16 mii_ctl;
503 	void *ring_space;
504 	dma_addr_t ring_dma;
505 #ifdef USE_IO_OPS
506 	int bar = 0;
507 #else
508 	int bar = 1;
509 #endif
510 	int phy, phy_end, phy_idx = 0;
511 
512 	if (pci_enable_device(pdev))
513 		return -EIO;
514 	pci_set_master(pdev);
515 
516 	irq = pdev->irq;
517 
518 	dev = alloc_etherdev(sizeof(*np));
519 	if (!dev)
520 		return -ENOMEM;
521 	SET_NETDEV_DEV(dev, &pdev->dev);
522 
523 	if (pci_request_regions(pdev, DRV_NAME))
524 		goto err_out_netdev;
525 
526 	ioaddr = pci_iomap(pdev, bar, netdev_io_size);
527 	if (!ioaddr)
528 		goto err_out_res;
529 
530 	for (i = 0; i < 3; i++)
531 		((__le16 *)dev->dev_addr)[i] =
532 			cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
533 
534 	np = netdev_priv(dev);
535 	np->ndev = dev;
536 	np->base = ioaddr;
537 	np->pci_dev = pdev;
538 	np->chip_id = chip_idx;
539 	np->msg_enable = (1 << debug) - 1;
540 	spin_lock_init(&np->lock);
541 	spin_lock_init(&np->statlock);
542 	tasklet_setup(&np->rx_tasklet, rx_poll);
543 	tasklet_setup(&np->tx_tasklet, tx_poll);
544 
545 	ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
546 			&ring_dma, GFP_KERNEL);
547 	if (!ring_space)
548 		goto err_out_cleardev;
549 	np->tx_ring = (struct netdev_desc *)ring_space;
550 	np->tx_ring_dma = ring_dma;
551 
552 	ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
553 			&ring_dma, GFP_KERNEL);
554 	if (!ring_space)
555 		goto err_out_unmap_tx;
556 	np->rx_ring = (struct netdev_desc *)ring_space;
557 	np->rx_ring_dma = ring_dma;
558 
559 	np->mii_if.dev = dev;
560 	np->mii_if.mdio_read = mdio_read;
561 	np->mii_if.mdio_write = mdio_write;
562 	np->mii_if.phy_id_mask = 0x1f;
563 	np->mii_if.reg_num_mask = 0x1f;
564 
565 	/* The chip-specific entries in the device structure. */
566 	dev->netdev_ops = &netdev_ops;
567 	dev->ethtool_ops = &ethtool_ops;
568 	dev->watchdog_timeo = TX_TIMEOUT;
569 
570 	/* MTU range: 68 - 8191 */
571 	dev->min_mtu = ETH_MIN_MTU;
572 	dev->max_mtu = 8191;
573 
574 	pci_set_drvdata(pdev, dev);
575 
576 	i = register_netdev(dev);
577 	if (i)
578 		goto err_out_unmap_rx;
579 
580 	printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
581 	       dev->name, pci_id_tbl[chip_idx].name, ioaddr,
582 	       dev->dev_addr, irq);
583 
584 	np->phys[0] = 1;		/* Default setting */
585 	np->mii_preamble_required++;
586 
587 	/*
588 	 * It seems some phys doesn't deal well with address 0 being accessed
589 	 * first
590 	 */
591 	if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
592 		phy = 0;
593 		phy_end = 31;
594 	} else {
595 		phy = 1;
596 		phy_end = 32;	/* wraps to zero, due to 'phy & 0x1f' */
597 	}
598 	for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
599 		int phyx = phy & 0x1f;
600 		int mii_status = mdio_read(dev, phyx, MII_BMSR);
601 		if (mii_status != 0xffff  &&  mii_status != 0x0000) {
602 			np->phys[phy_idx++] = phyx;
603 			np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
604 			if ((mii_status & 0x0040) == 0)
605 				np->mii_preamble_required++;
606 			printk(KERN_INFO "%s: MII PHY found at address %d, status "
607 				   "0x%4.4x advertising %4.4x.\n",
608 				   dev->name, phyx, mii_status, np->mii_if.advertising);
609 		}
610 	}
611 	np->mii_preamble_required--;
612 
613 	if (phy_idx == 0) {
614 		printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
615 			   dev->name, ioread32(ioaddr + ASICCtrl));
616 		goto err_out_unregister;
617 	}
618 
619 	np->mii_if.phy_id = np->phys[0];
620 
621 	/* Parse override configuration */
622 	np->an_enable = 1;
623 	if (card_idx < MAX_UNITS) {
624 		if (media[card_idx] != NULL) {
625 			np->an_enable = 0;
626 			if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
627 			    strcmp (media[card_idx], "4") == 0) {
628 				np->speed = 100;
629 				np->mii_if.full_duplex = 1;
630 			} else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
631 				   strcmp (media[card_idx], "3") == 0) {
632 				np->speed = 100;
633 				np->mii_if.full_duplex = 0;
634 			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
635 				   strcmp (media[card_idx], "2") == 0) {
636 				np->speed = 10;
637 				np->mii_if.full_duplex = 1;
638 			} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
639 				   strcmp (media[card_idx], "1") == 0) {
640 				np->speed = 10;
641 				np->mii_if.full_duplex = 0;
642 			} else {
643 				np->an_enable = 1;
644 			}
645 		}
646 		if (flowctrl == 1)
647 			np->flowctrl = 1;
648 	}
649 
650 	/* Fibre PHY? */
651 	if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
652 		/* Default 100Mbps Full */
653 		if (np->an_enable) {
654 			np->speed = 100;
655 			np->mii_if.full_duplex = 1;
656 			np->an_enable = 0;
657 		}
658 	}
659 	/* Reset PHY */
660 	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
661 	mdelay (300);
662 	/* If flow control enabled, we need to advertise it.*/
663 	if (np->flowctrl)
664 		mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
665 	mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
666 	/* Force media type */
667 	if (!np->an_enable) {
668 		mii_ctl = 0;
669 		mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
670 		mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
671 		mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
672 		printk (KERN_INFO "Override speed=%d, %s duplex\n",
673 			np->speed, np->mii_if.full_duplex ? "Full" : "Half");
674 
675 	}
676 
677 	/* Perhaps move the reset here? */
678 	/* Reset the chip to erase previous misconfiguration. */
679 	if (netif_msg_hw(np))
680 		printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
681 	sundance_reset(dev, 0x00ff << 16);
682 	if (netif_msg_hw(np))
683 		printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
684 
685 	card_idx++;
686 	return 0;
687 
688 err_out_unregister:
689 	unregister_netdev(dev);
690 err_out_unmap_rx:
691 	dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
692 		np->rx_ring, np->rx_ring_dma);
693 err_out_unmap_tx:
694 	dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
695 		np->tx_ring, np->tx_ring_dma);
696 err_out_cleardev:
697 	pci_iounmap(pdev, ioaddr);
698 err_out_res:
699 	pci_release_regions(pdev);
700 err_out_netdev:
701 	free_netdev (dev);
702 	return -ENODEV;
703 }
704 
change_mtu(struct net_device * dev,int new_mtu)705 static int change_mtu(struct net_device *dev, int new_mtu)
706 {
707 	if (netif_running(dev))
708 		return -EBUSY;
709 	dev->mtu = new_mtu;
710 	return 0;
711 }
712 
713 #define eeprom_delay(ee_addr)	ioread32(ee_addr)
714 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
eeprom_read(void __iomem * ioaddr,int location)715 static int eeprom_read(void __iomem *ioaddr, int location)
716 {
717 	int boguscnt = 10000;		/* Typical 1900 ticks. */
718 	iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
719 	do {
720 		eeprom_delay(ioaddr + EECtrl);
721 		if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
722 			return ioread16(ioaddr + EEData);
723 		}
724 	} while (--boguscnt > 0);
725 	return 0;
726 }
727 
728 /*  MII transceiver control section.
729 	Read and write the MII registers using software-generated serial
730 	MDIO protocol.  See the MII specifications or DP83840A data sheet
731 	for details.
732 
733 	The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
734 	met by back-to-back 33Mhz PCI cycles. */
735 #define mdio_delay() ioread8(mdio_addr)
736 
737 enum mii_reg_bits {
738 	MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
739 };
740 #define MDIO_EnbIn  (0)
741 #define MDIO_WRITE0 (MDIO_EnbOutput)
742 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
743 
744 /* Generate the preamble required for initial synchronization and
745    a few older transceivers. */
mdio_sync(void __iomem * mdio_addr)746 static void mdio_sync(void __iomem *mdio_addr)
747 {
748 	int bits = 32;
749 
750 	/* Establish sync by sending at least 32 logic ones. */
751 	while (--bits >= 0) {
752 		iowrite8(MDIO_WRITE1, mdio_addr);
753 		mdio_delay();
754 		iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
755 		mdio_delay();
756 	}
757 }
758 
mdio_read(struct net_device * dev,int phy_id,int location)759 static int mdio_read(struct net_device *dev, int phy_id, int location)
760 {
761 	struct netdev_private *np = netdev_priv(dev);
762 	void __iomem *mdio_addr = np->base + MIICtrl;
763 	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
764 	int i, retval = 0;
765 
766 	if (np->mii_preamble_required)
767 		mdio_sync(mdio_addr);
768 
769 	/* Shift the read command bits out. */
770 	for (i = 15; i >= 0; i--) {
771 		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
772 
773 		iowrite8(dataval, mdio_addr);
774 		mdio_delay();
775 		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
776 		mdio_delay();
777 	}
778 	/* Read the two transition, 16 data, and wire-idle bits. */
779 	for (i = 19; i > 0; i--) {
780 		iowrite8(MDIO_EnbIn, mdio_addr);
781 		mdio_delay();
782 		retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
783 		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
784 		mdio_delay();
785 	}
786 	return (retval>>1) & 0xffff;
787 }
788 
mdio_write(struct net_device * dev,int phy_id,int location,int value)789 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
790 {
791 	struct netdev_private *np = netdev_priv(dev);
792 	void __iomem *mdio_addr = np->base + MIICtrl;
793 	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
794 	int i;
795 
796 	if (np->mii_preamble_required)
797 		mdio_sync(mdio_addr);
798 
799 	/* Shift the command bits out. */
800 	for (i = 31; i >= 0; i--) {
801 		int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
802 
803 		iowrite8(dataval, mdio_addr);
804 		mdio_delay();
805 		iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
806 		mdio_delay();
807 	}
808 	/* Clear out extra bits. */
809 	for (i = 2; i > 0; i--) {
810 		iowrite8(MDIO_EnbIn, mdio_addr);
811 		mdio_delay();
812 		iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
813 		mdio_delay();
814 	}
815 }
816 
mdio_wait_link(struct net_device * dev,int wait)817 static int mdio_wait_link(struct net_device *dev, int wait)
818 {
819 	int bmsr;
820 	int phy_id;
821 	struct netdev_private *np;
822 
823 	np = netdev_priv(dev);
824 	phy_id = np->phys[0];
825 
826 	do {
827 		bmsr = mdio_read(dev, phy_id, MII_BMSR);
828 		if (bmsr & 0x0004)
829 			return 0;
830 		mdelay(1);
831 	} while (--wait > 0);
832 	return -1;
833 }
834 
netdev_open(struct net_device * dev)835 static int netdev_open(struct net_device *dev)
836 {
837 	struct netdev_private *np = netdev_priv(dev);
838 	void __iomem *ioaddr = np->base;
839 	const int irq = np->pci_dev->irq;
840 	unsigned long flags;
841 	int i;
842 
843 	sundance_reset(dev, 0x00ff << 16);
844 
845 	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
846 	if (i)
847 		return i;
848 
849 	if (netif_msg_ifup(np))
850 		printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
851 
852 	init_ring(dev);
853 
854 	iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
855 	/* The Tx list pointer is written as packets are queued. */
856 
857 	/* Initialize other registers. */
858 	__set_mac_addr(dev);
859 #if IS_ENABLED(CONFIG_VLAN_8021Q)
860 	iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
861 #else
862 	iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
863 #endif
864 	if (dev->mtu > 2047)
865 		iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
866 
867 	/* Configure the PCI bus bursts and FIFO thresholds. */
868 
869 	if (dev->if_port == 0)
870 		dev->if_port = np->default_port;
871 
872 	spin_lock_init(&np->mcastlock);
873 
874 	set_rx_mode(dev);
875 	iowrite16(0, ioaddr + IntrEnable);
876 	iowrite16(0, ioaddr + DownCounter);
877 	/* Set the chip to poll every N*320nsec. */
878 	iowrite8(100, ioaddr + RxDMAPollPeriod);
879 	iowrite8(127, ioaddr + TxDMAPollPeriod);
880 	/* Fix DFE-580TX packet drop issue */
881 	if (np->pci_dev->revision >= 0x14)
882 		iowrite8(0x01, ioaddr + DebugCtrl1);
883 	netif_start_queue(dev);
884 
885 	spin_lock_irqsave(&np->lock, flags);
886 	reset_tx(dev);
887 	spin_unlock_irqrestore(&np->lock, flags);
888 
889 	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
890 
891 	/* Disable Wol */
892 	iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
893 	np->wol_enabled = 0;
894 
895 	if (netif_msg_ifup(np))
896 		printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
897 			   "MAC Control %x, %4.4x %4.4x.\n",
898 			   dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
899 			   ioread32(ioaddr + MACCtrl0),
900 			   ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
901 
902 	/* Set the timer to check for link beat. */
903 	timer_setup(&np->timer, netdev_timer, 0);
904 	np->timer.expires = jiffies + 3*HZ;
905 	add_timer(&np->timer);
906 
907 	/* Enable interrupts by setting the interrupt mask. */
908 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
909 
910 	return 0;
911 }
912 
check_duplex(struct net_device * dev)913 static void check_duplex(struct net_device *dev)
914 {
915 	struct netdev_private *np = netdev_priv(dev);
916 	void __iomem *ioaddr = np->base;
917 	int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
918 	int negotiated = mii_lpa & np->mii_if.advertising;
919 	int duplex;
920 
921 	/* Force media */
922 	if (!np->an_enable || mii_lpa == 0xffff) {
923 		if (np->mii_if.full_duplex)
924 			iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
925 				ioaddr + MACCtrl0);
926 		return;
927 	}
928 
929 	/* Autonegotiation */
930 	duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
931 	if (np->mii_if.full_duplex != duplex) {
932 		np->mii_if.full_duplex = duplex;
933 		if (netif_msg_link(np))
934 			printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
935 				   "negotiated capability %4.4x.\n", dev->name,
936 				   duplex ? "full" : "half", np->phys[0], negotiated);
937 		iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
938 	}
939 }
940 
netdev_timer(struct timer_list * t)941 static void netdev_timer(struct timer_list *t)
942 {
943 	struct netdev_private *np = from_timer(np, t, timer);
944 	struct net_device *dev = np->mii_if.dev;
945 	void __iomem *ioaddr = np->base;
946 	int next_tick = 10*HZ;
947 
948 	if (netif_msg_timer(np)) {
949 		printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
950 			   "Tx %x Rx %x.\n",
951 			   dev->name, ioread16(ioaddr + IntrEnable),
952 			   ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
953 	}
954 	check_duplex(dev);
955 	np->timer.expires = jiffies + next_tick;
956 	add_timer(&np->timer);
957 }
958 
tx_timeout(struct net_device * dev,unsigned int txqueue)959 static void tx_timeout(struct net_device *dev, unsigned int txqueue)
960 {
961 	struct netdev_private *np = netdev_priv(dev);
962 	void __iomem *ioaddr = np->base;
963 	unsigned long flag;
964 
965 	netif_stop_queue(dev);
966 	tasklet_disable_in_atomic(&np->tx_tasklet);
967 	iowrite16(0, ioaddr + IntrEnable);
968 	printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
969 		   "TxFrameId %2.2x,"
970 		   " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
971 		   ioread8(ioaddr + TxFrameId));
972 
973 	{
974 		int i;
975 		for (i=0; i<TX_RING_SIZE; i++) {
976 			printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
977 				(unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
978 				le32_to_cpu(np->tx_ring[i].next_desc),
979 				le32_to_cpu(np->tx_ring[i].status),
980 				(le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
981 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
982 				le32_to_cpu(np->tx_ring[i].frag[0].length));
983 		}
984 		printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
985 			ioread32(np->base + TxListPtr),
986 			netif_queue_stopped(dev));
987 		printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
988 			np->cur_tx, np->cur_tx % TX_RING_SIZE,
989 			np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
990 		printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
991 		printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
992 	}
993 	spin_lock_irqsave(&np->lock, flag);
994 
995 	/* Stop and restart the chip's Tx processes . */
996 	reset_tx(dev);
997 	spin_unlock_irqrestore(&np->lock, flag);
998 
999 	dev->if_port = 0;
1000 
1001 	netif_trans_update(dev); /* prevent tx timeout */
1002 	dev->stats.tx_errors++;
1003 	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1004 		netif_wake_queue(dev);
1005 	}
1006 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1007 	tasklet_enable(&np->tx_tasklet);
1008 }
1009 
1010 
1011 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
init_ring(struct net_device * dev)1012 static void init_ring(struct net_device *dev)
1013 {
1014 	struct netdev_private *np = netdev_priv(dev);
1015 	int i;
1016 
1017 	np->cur_rx = np->cur_tx = 0;
1018 	np->dirty_rx = np->dirty_tx = 0;
1019 	np->cur_task = 0;
1020 
1021 	np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1022 
1023 	/* Initialize all Rx descriptors. */
1024 	for (i = 0; i < RX_RING_SIZE; i++) {
1025 		np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1026 			((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1027 		np->rx_ring[i].status = 0;
1028 		np->rx_ring[i].frag[0].length = 0;
1029 		np->rx_skbuff[i] = NULL;
1030 	}
1031 
1032 	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1033 	for (i = 0; i < RX_RING_SIZE; i++) {
1034 		struct sk_buff *skb =
1035 			netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1036 		np->rx_skbuff[i] = skb;
1037 		if (skb == NULL)
1038 			break;
1039 		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
1040 		np->rx_ring[i].frag[0].addr = cpu_to_le32(
1041 			dma_map_single(&np->pci_dev->dev, skb->data,
1042 				np->rx_buf_sz, DMA_FROM_DEVICE));
1043 		if (dma_mapping_error(&np->pci_dev->dev,
1044 					np->rx_ring[i].frag[0].addr)) {
1045 			dev_kfree_skb(skb);
1046 			np->rx_skbuff[i] = NULL;
1047 			break;
1048 		}
1049 		np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1050 	}
1051 	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1052 
1053 	for (i = 0; i < TX_RING_SIZE; i++) {
1054 		np->tx_skbuff[i] = NULL;
1055 		np->tx_ring[i].status = 0;
1056 	}
1057 }
1058 
tx_poll(struct tasklet_struct * t)1059 static void tx_poll(struct tasklet_struct *t)
1060 {
1061 	struct netdev_private *np = from_tasklet(np, t, tx_tasklet);
1062 	unsigned head = np->cur_task % TX_RING_SIZE;
1063 	struct netdev_desc *txdesc =
1064 		&np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1065 
1066 	/* Chain the next pointer */
1067 	for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1068 		int entry = np->cur_task % TX_RING_SIZE;
1069 		txdesc = &np->tx_ring[entry];
1070 		if (np->last_tx) {
1071 			np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1072 				entry*sizeof(struct netdev_desc));
1073 		}
1074 		np->last_tx = txdesc;
1075 	}
1076 	/* Indicate the latest descriptor of tx ring */
1077 	txdesc->status |= cpu_to_le32(DescIntrOnTx);
1078 
1079 	if (ioread32 (np->base + TxListPtr) == 0)
1080 		iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1081 			np->base + TxListPtr);
1082 }
1083 
1084 static netdev_tx_t
start_tx(struct sk_buff * skb,struct net_device * dev)1085 start_tx (struct sk_buff *skb, struct net_device *dev)
1086 {
1087 	struct netdev_private *np = netdev_priv(dev);
1088 	struct netdev_desc *txdesc;
1089 	unsigned entry;
1090 
1091 	/* Calculate the next Tx descriptor entry. */
1092 	entry = np->cur_tx % TX_RING_SIZE;
1093 	np->tx_skbuff[entry] = skb;
1094 	txdesc = &np->tx_ring[entry];
1095 
1096 	txdesc->next_desc = 0;
1097 	txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1098 	txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1099 				skb->data, skb->len, DMA_TO_DEVICE));
1100 	if (dma_mapping_error(&np->pci_dev->dev,
1101 				txdesc->frag[0].addr))
1102 			goto drop_frame;
1103 	txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1104 
1105 	/* Increment cur_tx before tasklet_schedule() */
1106 	np->cur_tx++;
1107 	mb();
1108 	/* Schedule a tx_poll() task */
1109 	tasklet_schedule(&np->tx_tasklet);
1110 
1111 	/* On some architectures: explicitly flush cache lines here. */
1112 	if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1113 	    !netif_queue_stopped(dev)) {
1114 		/* do nothing */
1115 	} else {
1116 		netif_stop_queue (dev);
1117 	}
1118 	if (netif_msg_tx_queued(np)) {
1119 		printk (KERN_DEBUG
1120 			"%s: Transmit frame #%d queued in slot %d.\n",
1121 			dev->name, np->cur_tx, entry);
1122 	}
1123 	return NETDEV_TX_OK;
1124 
1125 drop_frame:
1126 	dev_kfree_skb_any(skb);
1127 	np->tx_skbuff[entry] = NULL;
1128 	dev->stats.tx_dropped++;
1129 	return NETDEV_TX_OK;
1130 }
1131 
1132 /* Reset hardware tx and free all of tx buffers */
1133 static int
reset_tx(struct net_device * dev)1134 reset_tx (struct net_device *dev)
1135 {
1136 	struct netdev_private *np = netdev_priv(dev);
1137 	void __iomem *ioaddr = np->base;
1138 	struct sk_buff *skb;
1139 	int i;
1140 
1141 	/* Reset tx logic, TxListPtr will be cleaned */
1142 	iowrite16 (TxDisable, ioaddr + MACCtrl1);
1143 	sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1144 
1145 	/* free all tx skbuff */
1146 	for (i = 0; i < TX_RING_SIZE; i++) {
1147 		np->tx_ring[i].next_desc = 0;
1148 
1149 		skb = np->tx_skbuff[i];
1150 		if (skb) {
1151 			dma_unmap_single(&np->pci_dev->dev,
1152 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1153 				skb->len, DMA_TO_DEVICE);
1154 			dev_kfree_skb_any(skb);
1155 			np->tx_skbuff[i] = NULL;
1156 			dev->stats.tx_dropped++;
1157 		}
1158 	}
1159 	np->cur_tx = np->dirty_tx = 0;
1160 	np->cur_task = 0;
1161 
1162 	np->last_tx = NULL;
1163 	iowrite8(127, ioaddr + TxDMAPollPeriod);
1164 
1165 	iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1166 	return 0;
1167 }
1168 
1169 /* The interrupt handler cleans up after the Tx thread,
1170    and schedule a Rx thread work */
intr_handler(int irq,void * dev_instance)1171 static irqreturn_t intr_handler(int irq, void *dev_instance)
1172 {
1173 	struct net_device *dev = (struct net_device *)dev_instance;
1174 	struct netdev_private *np = netdev_priv(dev);
1175 	void __iomem *ioaddr = np->base;
1176 	int hw_frame_id;
1177 	int tx_cnt;
1178 	int tx_status;
1179 	int handled = 0;
1180 	int i;
1181 
1182 	do {
1183 		int intr_status = ioread16(ioaddr + IntrStatus);
1184 		iowrite16(intr_status, ioaddr + IntrStatus);
1185 
1186 		if (netif_msg_intr(np))
1187 			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1188 				   dev->name, intr_status);
1189 
1190 		if (!(intr_status & DEFAULT_INTR))
1191 			break;
1192 
1193 		handled = 1;
1194 
1195 		if (intr_status & (IntrRxDMADone)) {
1196 			iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1197 					ioaddr + IntrEnable);
1198 			if (np->budget < 0)
1199 				np->budget = RX_BUDGET;
1200 			tasklet_schedule(&np->rx_tasklet);
1201 		}
1202 		if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1203 			tx_status = ioread16 (ioaddr + TxStatus);
1204 			for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1205 				if (netif_msg_tx_done(np))
1206 					printk
1207 					    ("%s: Transmit status is %2.2x.\n",
1208 				     	dev->name, tx_status);
1209 				if (tx_status & 0x1e) {
1210 					if (netif_msg_tx_err(np))
1211 						printk("%s: Transmit error status %4.4x.\n",
1212 							   dev->name, tx_status);
1213 					dev->stats.tx_errors++;
1214 					if (tx_status & 0x10)
1215 						dev->stats.tx_fifo_errors++;
1216 					if (tx_status & 0x08)
1217 						dev->stats.collisions++;
1218 					if (tx_status & 0x04)
1219 						dev->stats.tx_fifo_errors++;
1220 					if (tx_status & 0x02)
1221 						dev->stats.tx_window_errors++;
1222 
1223 					/*
1224 					** This reset has been verified on
1225 					** DFE-580TX boards ! phdm@macqel.be.
1226 					*/
1227 					if (tx_status & 0x10) {	/* TxUnderrun */
1228 						/* Restart Tx FIFO and transmitter */
1229 						sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1230 						/* No need to reset the Tx pointer here */
1231 					}
1232 					/* Restart the Tx. Need to make sure tx enabled */
1233 					i = 10;
1234 					do {
1235 						iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1236 						if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1237 							break;
1238 						mdelay(1);
1239 					} while (--i);
1240 				}
1241 				/* Yup, this is a documentation bug.  It cost me *hours*. */
1242 				iowrite16 (0, ioaddr + TxStatus);
1243 				if (tx_cnt < 0) {
1244 					iowrite32(5000, ioaddr + DownCounter);
1245 					break;
1246 				}
1247 				tx_status = ioread16 (ioaddr + TxStatus);
1248 			}
1249 			hw_frame_id = (tx_status >> 8) & 0xff;
1250 		} else 	{
1251 			hw_frame_id = ioread8(ioaddr + TxFrameId);
1252 		}
1253 
1254 		if (np->pci_dev->revision >= 0x14) {
1255 			spin_lock(&np->lock);
1256 			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1257 				int entry = np->dirty_tx % TX_RING_SIZE;
1258 				struct sk_buff *skb;
1259 				int sw_frame_id;
1260 				sw_frame_id = (le32_to_cpu(
1261 					np->tx_ring[entry].status) >> 2) & 0xff;
1262 				if (sw_frame_id == hw_frame_id &&
1263 					!(le32_to_cpu(np->tx_ring[entry].status)
1264 					& 0x00010000))
1265 						break;
1266 				if (sw_frame_id == (hw_frame_id + 1) %
1267 					TX_RING_SIZE)
1268 						break;
1269 				skb = np->tx_skbuff[entry];
1270 				/* Free the original skb. */
1271 				dma_unmap_single(&np->pci_dev->dev,
1272 					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1273 					skb->len, DMA_TO_DEVICE);
1274 				dev_consume_skb_irq(np->tx_skbuff[entry]);
1275 				np->tx_skbuff[entry] = NULL;
1276 				np->tx_ring[entry].frag[0].addr = 0;
1277 				np->tx_ring[entry].frag[0].length = 0;
1278 			}
1279 			spin_unlock(&np->lock);
1280 		} else {
1281 			spin_lock(&np->lock);
1282 			for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1283 				int entry = np->dirty_tx % TX_RING_SIZE;
1284 				struct sk_buff *skb;
1285 				if (!(le32_to_cpu(np->tx_ring[entry].status)
1286 							& 0x00010000))
1287 					break;
1288 				skb = np->tx_skbuff[entry];
1289 				/* Free the original skb. */
1290 				dma_unmap_single(&np->pci_dev->dev,
1291 					le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1292 					skb->len, DMA_TO_DEVICE);
1293 				dev_consume_skb_irq(np->tx_skbuff[entry]);
1294 				np->tx_skbuff[entry] = NULL;
1295 				np->tx_ring[entry].frag[0].addr = 0;
1296 				np->tx_ring[entry].frag[0].length = 0;
1297 			}
1298 			spin_unlock(&np->lock);
1299 		}
1300 
1301 		if (netif_queue_stopped(dev) &&
1302 			np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1303 			/* The ring is no longer full, clear busy flag. */
1304 			netif_wake_queue (dev);
1305 		}
1306 		/* Abnormal error summary/uncommon events handlers. */
1307 		if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1308 			netdev_error(dev, intr_status);
1309 	} while (0);
1310 	if (netif_msg_intr(np))
1311 		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1312 			   dev->name, ioread16(ioaddr + IntrStatus));
1313 	return IRQ_RETVAL(handled);
1314 }
1315 
rx_poll(struct tasklet_struct * t)1316 static void rx_poll(struct tasklet_struct *t)
1317 {
1318 	struct netdev_private *np = from_tasklet(np, t, rx_tasklet);
1319 	struct net_device *dev = np->ndev;
1320 	int entry = np->cur_rx % RX_RING_SIZE;
1321 	int boguscnt = np->budget;
1322 	void __iomem *ioaddr = np->base;
1323 	int received = 0;
1324 
1325 	/* If EOP is set on the next entry, it's a new packet. Send it up. */
1326 	while (1) {
1327 		struct netdev_desc *desc = &(np->rx_ring[entry]);
1328 		u32 frame_status = le32_to_cpu(desc->status);
1329 		int pkt_len;
1330 
1331 		if (--boguscnt < 0) {
1332 			goto not_done;
1333 		}
1334 		if (!(frame_status & DescOwn))
1335 			break;
1336 		pkt_len = frame_status & 0x1fff;	/* Chip omits the CRC. */
1337 		if (netif_msg_rx_status(np))
1338 			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1339 				   frame_status);
1340 		if (frame_status & 0x001f4000) {
1341 			/* There was a error. */
1342 			if (netif_msg_rx_err(np))
1343 				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1344 					   frame_status);
1345 			dev->stats.rx_errors++;
1346 			if (frame_status & 0x00100000)
1347 				dev->stats.rx_length_errors++;
1348 			if (frame_status & 0x00010000)
1349 				dev->stats.rx_fifo_errors++;
1350 			if (frame_status & 0x00060000)
1351 				dev->stats.rx_frame_errors++;
1352 			if (frame_status & 0x00080000)
1353 				dev->stats.rx_crc_errors++;
1354 			if (frame_status & 0x00100000) {
1355 				printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1356 					   " status %8.8x.\n",
1357 					   dev->name, frame_status);
1358 			}
1359 		} else {
1360 			struct sk_buff *skb;
1361 #ifndef final_version
1362 			if (netif_msg_rx_status(np))
1363 				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1364 					   ", bogus_cnt %d.\n",
1365 					   pkt_len, boguscnt);
1366 #endif
1367 			/* Check if the packet is long enough to accept without copying
1368 			   to a minimally-sized skbuff. */
1369 			if (pkt_len < rx_copybreak &&
1370 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1371 				skb_reserve(skb, 2);	/* 16 byte align the IP header */
1372 				dma_sync_single_for_cpu(&np->pci_dev->dev,
1373 						le32_to_cpu(desc->frag[0].addr),
1374 						np->rx_buf_sz, DMA_FROM_DEVICE);
1375 				skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1376 				dma_sync_single_for_device(&np->pci_dev->dev,
1377 						le32_to_cpu(desc->frag[0].addr),
1378 						np->rx_buf_sz, DMA_FROM_DEVICE);
1379 				skb_put(skb, pkt_len);
1380 			} else {
1381 				dma_unmap_single(&np->pci_dev->dev,
1382 					le32_to_cpu(desc->frag[0].addr),
1383 					np->rx_buf_sz, DMA_FROM_DEVICE);
1384 				skb_put(skb = np->rx_skbuff[entry], pkt_len);
1385 				np->rx_skbuff[entry] = NULL;
1386 			}
1387 			skb->protocol = eth_type_trans(skb, dev);
1388 			/* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1389 			netif_rx(skb);
1390 		}
1391 		entry = (entry + 1) % RX_RING_SIZE;
1392 		received++;
1393 	}
1394 	np->cur_rx = entry;
1395 	refill_rx (dev);
1396 	np->budget -= received;
1397 	iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1398 	return;
1399 
1400 not_done:
1401 	np->cur_rx = entry;
1402 	refill_rx (dev);
1403 	if (!received)
1404 		received = 1;
1405 	np->budget -= received;
1406 	if (np->budget <= 0)
1407 		np->budget = RX_BUDGET;
1408 	tasklet_schedule(&np->rx_tasklet);
1409 }
1410 
refill_rx(struct net_device * dev)1411 static void refill_rx (struct net_device *dev)
1412 {
1413 	struct netdev_private *np = netdev_priv(dev);
1414 	int entry;
1415 	int cnt = 0;
1416 
1417 	/* Refill the Rx ring buffers. */
1418 	for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1419 		np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1420 		struct sk_buff *skb;
1421 		entry = np->dirty_rx % RX_RING_SIZE;
1422 		if (np->rx_skbuff[entry] == NULL) {
1423 			skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1424 			np->rx_skbuff[entry] = skb;
1425 			if (skb == NULL)
1426 				break;		/* Better luck next round. */
1427 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
1428 			np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1429 				dma_map_single(&np->pci_dev->dev, skb->data,
1430 					np->rx_buf_sz, DMA_FROM_DEVICE));
1431 			if (dma_mapping_error(&np->pci_dev->dev,
1432 				    np->rx_ring[entry].frag[0].addr)) {
1433 			    dev_kfree_skb_irq(skb);
1434 			    np->rx_skbuff[entry] = NULL;
1435 			    break;
1436 			}
1437 		}
1438 		/* Perhaps we need not reset this field. */
1439 		np->rx_ring[entry].frag[0].length =
1440 			cpu_to_le32(np->rx_buf_sz | LastFrag);
1441 		np->rx_ring[entry].status = 0;
1442 		cnt++;
1443 	}
1444 }
netdev_error(struct net_device * dev,int intr_status)1445 static void netdev_error(struct net_device *dev, int intr_status)
1446 {
1447 	struct netdev_private *np = netdev_priv(dev);
1448 	void __iomem *ioaddr = np->base;
1449 	u16 mii_ctl, mii_advertise, mii_lpa;
1450 	int speed;
1451 
1452 	if (intr_status & LinkChange) {
1453 		if (mdio_wait_link(dev, 10) == 0) {
1454 			printk(KERN_INFO "%s: Link up\n", dev->name);
1455 			if (np->an_enable) {
1456 				mii_advertise = mdio_read(dev, np->phys[0],
1457 							   MII_ADVERTISE);
1458 				mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1459 				mii_advertise &= mii_lpa;
1460 				printk(KERN_INFO "%s: Link changed: ",
1461 					dev->name);
1462 				if (mii_advertise & ADVERTISE_100FULL) {
1463 					np->speed = 100;
1464 					printk("100Mbps, full duplex\n");
1465 				} else if (mii_advertise & ADVERTISE_100HALF) {
1466 					np->speed = 100;
1467 					printk("100Mbps, half duplex\n");
1468 				} else if (mii_advertise & ADVERTISE_10FULL) {
1469 					np->speed = 10;
1470 					printk("10Mbps, full duplex\n");
1471 				} else if (mii_advertise & ADVERTISE_10HALF) {
1472 					np->speed = 10;
1473 					printk("10Mbps, half duplex\n");
1474 				} else
1475 					printk("\n");
1476 
1477 			} else {
1478 				mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1479 				speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1480 				np->speed = speed;
1481 				printk(KERN_INFO "%s: Link changed: %dMbps ,",
1482 					dev->name, speed);
1483 				printk("%s duplex.\n",
1484 					(mii_ctl & BMCR_FULLDPLX) ?
1485 						"full" : "half");
1486 			}
1487 			check_duplex(dev);
1488 			if (np->flowctrl && np->mii_if.full_duplex) {
1489 				iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1490 					ioaddr + MulticastFilter1+2);
1491 				iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1492 					ioaddr + MACCtrl0);
1493 			}
1494 			netif_carrier_on(dev);
1495 		} else {
1496 			printk(KERN_INFO "%s: Link down\n", dev->name);
1497 			netif_carrier_off(dev);
1498 		}
1499 	}
1500 	if (intr_status & StatsMax) {
1501 		get_stats(dev);
1502 	}
1503 	if (intr_status & IntrPCIErr) {
1504 		printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1505 			   dev->name, intr_status);
1506 		/* We must do a global reset of DMA to continue. */
1507 	}
1508 }
1509 
get_stats(struct net_device * dev)1510 static struct net_device_stats *get_stats(struct net_device *dev)
1511 {
1512 	struct netdev_private *np = netdev_priv(dev);
1513 	void __iomem *ioaddr = np->base;
1514 	unsigned long flags;
1515 	u8 late_coll, single_coll, mult_coll;
1516 
1517 	spin_lock_irqsave(&np->statlock, flags);
1518 	/* The chip only need report frame silently dropped. */
1519 	dev->stats.rx_missed_errors	+= ioread8(ioaddr + RxMissed);
1520 	dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1521 	dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1522 	dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1523 
1524 	mult_coll = ioread8(ioaddr + StatsMultiColl);
1525 	np->xstats.tx_multiple_collisions += mult_coll;
1526 	single_coll = ioread8(ioaddr + StatsOneColl);
1527 	np->xstats.tx_single_collisions += single_coll;
1528 	late_coll = ioread8(ioaddr + StatsLateColl);
1529 	np->xstats.tx_late_collisions += late_coll;
1530 	dev->stats.collisions += mult_coll
1531 		+ single_coll
1532 		+ late_coll;
1533 
1534 	np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1535 	np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1536 	np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1537 	np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1538 	np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1539 	np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1540 	np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1541 
1542 	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1543 	dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1544 	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1545 	dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1546 
1547 	spin_unlock_irqrestore(&np->statlock, flags);
1548 
1549 	return &dev->stats;
1550 }
1551 
set_rx_mode(struct net_device * dev)1552 static void set_rx_mode(struct net_device *dev)
1553 {
1554 	struct netdev_private *np = netdev_priv(dev);
1555 	void __iomem *ioaddr = np->base;
1556 	u16 mc_filter[4];			/* Multicast hash filter */
1557 	u32 rx_mode;
1558 	int i;
1559 
1560 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1561 		memset(mc_filter, 0xff, sizeof(mc_filter));
1562 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1563 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1564 		   (dev->flags & IFF_ALLMULTI)) {
1565 		/* Too many to match, or accept all multicasts. */
1566 		memset(mc_filter, 0xff, sizeof(mc_filter));
1567 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1568 	} else if (!netdev_mc_empty(dev)) {
1569 		struct netdev_hw_addr *ha;
1570 		int bit;
1571 		int index;
1572 		int crc;
1573 		memset (mc_filter, 0, sizeof (mc_filter));
1574 		netdev_for_each_mc_addr(ha, dev) {
1575 			crc = ether_crc_le(ETH_ALEN, ha->addr);
1576 			for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1577 				if (crc & 0x80000000) index |= 1 << bit;
1578 			mc_filter[index/16] |= (1 << (index % 16));
1579 		}
1580 		rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1581 	} else {
1582 		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1583 		return;
1584 	}
1585 	if (np->mii_if.full_duplex && np->flowctrl)
1586 		mc_filter[3] |= 0x0200;
1587 
1588 	for (i = 0; i < 4; i++)
1589 		iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1590 	iowrite8(rx_mode, ioaddr + RxMode);
1591 }
1592 
__set_mac_addr(struct net_device * dev)1593 static int __set_mac_addr(struct net_device *dev)
1594 {
1595 	struct netdev_private *np = netdev_priv(dev);
1596 	u16 addr16;
1597 
1598 	addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1599 	iowrite16(addr16, np->base + StationAddr);
1600 	addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1601 	iowrite16(addr16, np->base + StationAddr+2);
1602 	addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1603 	iowrite16(addr16, np->base + StationAddr+4);
1604 	return 0;
1605 }
1606 
1607 /* Invoked with rtnl_lock held */
sundance_set_mac_addr(struct net_device * dev,void * data)1608 static int sundance_set_mac_addr(struct net_device *dev, void *data)
1609 {
1610 	const struct sockaddr *addr = data;
1611 
1612 	if (!is_valid_ether_addr(addr->sa_data))
1613 		return -EADDRNOTAVAIL;
1614 	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1615 	__set_mac_addr(dev);
1616 
1617 	return 0;
1618 }
1619 
1620 static const struct {
1621 	const char name[ETH_GSTRING_LEN];
1622 } sundance_stats[] = {
1623 	{ "tx_multiple_collisions" },
1624 	{ "tx_single_collisions" },
1625 	{ "tx_late_collisions" },
1626 	{ "tx_deferred" },
1627 	{ "tx_deferred_excessive" },
1628 	{ "tx_aborted" },
1629 	{ "tx_bcasts" },
1630 	{ "rx_bcasts" },
1631 	{ "tx_mcasts" },
1632 	{ "rx_mcasts" },
1633 };
1634 
check_if_running(struct net_device * dev)1635 static int check_if_running(struct net_device *dev)
1636 {
1637 	if (!netif_running(dev))
1638 		return -EINVAL;
1639 	return 0;
1640 }
1641 
get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1642 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1643 {
1644 	struct netdev_private *np = netdev_priv(dev);
1645 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1646 	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1647 }
1648 
get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1649 static int get_link_ksettings(struct net_device *dev,
1650 			      struct ethtool_link_ksettings *cmd)
1651 {
1652 	struct netdev_private *np = netdev_priv(dev);
1653 	spin_lock_irq(&np->lock);
1654 	mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1655 	spin_unlock_irq(&np->lock);
1656 	return 0;
1657 }
1658 
set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1659 static int set_link_ksettings(struct net_device *dev,
1660 			      const struct ethtool_link_ksettings *cmd)
1661 {
1662 	struct netdev_private *np = netdev_priv(dev);
1663 	int res;
1664 	spin_lock_irq(&np->lock);
1665 	res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1666 	spin_unlock_irq(&np->lock);
1667 	return res;
1668 }
1669 
nway_reset(struct net_device * dev)1670 static int nway_reset(struct net_device *dev)
1671 {
1672 	struct netdev_private *np = netdev_priv(dev);
1673 	return mii_nway_restart(&np->mii_if);
1674 }
1675 
get_link(struct net_device * dev)1676 static u32 get_link(struct net_device *dev)
1677 {
1678 	struct netdev_private *np = netdev_priv(dev);
1679 	return mii_link_ok(&np->mii_if);
1680 }
1681 
get_msglevel(struct net_device * dev)1682 static u32 get_msglevel(struct net_device *dev)
1683 {
1684 	struct netdev_private *np = netdev_priv(dev);
1685 	return np->msg_enable;
1686 }
1687 
set_msglevel(struct net_device * dev,u32 val)1688 static void set_msglevel(struct net_device *dev, u32 val)
1689 {
1690 	struct netdev_private *np = netdev_priv(dev);
1691 	np->msg_enable = val;
1692 }
1693 
get_strings(struct net_device * dev,u32 stringset,u8 * data)1694 static void get_strings(struct net_device *dev, u32 stringset,
1695 		u8 *data)
1696 {
1697 	if (stringset == ETH_SS_STATS)
1698 		memcpy(data, sundance_stats, sizeof(sundance_stats));
1699 }
1700 
get_sset_count(struct net_device * dev,int sset)1701 static int get_sset_count(struct net_device *dev, int sset)
1702 {
1703 	switch (sset) {
1704 	case ETH_SS_STATS:
1705 		return ARRAY_SIZE(sundance_stats);
1706 	default:
1707 		return -EOPNOTSUPP;
1708 	}
1709 }
1710 
get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)1711 static void get_ethtool_stats(struct net_device *dev,
1712 		struct ethtool_stats *stats, u64 *data)
1713 {
1714 	struct netdev_private *np = netdev_priv(dev);
1715 	int i = 0;
1716 
1717 	get_stats(dev);
1718 	data[i++] = np->xstats.tx_multiple_collisions;
1719 	data[i++] = np->xstats.tx_single_collisions;
1720 	data[i++] = np->xstats.tx_late_collisions;
1721 	data[i++] = np->xstats.tx_deferred;
1722 	data[i++] = np->xstats.tx_deferred_excessive;
1723 	data[i++] = np->xstats.tx_aborted;
1724 	data[i++] = np->xstats.tx_bcasts;
1725 	data[i++] = np->xstats.rx_bcasts;
1726 	data[i++] = np->xstats.tx_mcasts;
1727 	data[i++] = np->xstats.rx_mcasts;
1728 }
1729 
1730 #ifdef CONFIG_PM
1731 
sundance_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)1732 static void sundance_get_wol(struct net_device *dev,
1733 		struct ethtool_wolinfo *wol)
1734 {
1735 	struct netdev_private *np = netdev_priv(dev);
1736 	void __iomem *ioaddr = np->base;
1737 	u8 wol_bits;
1738 
1739 	wol->wolopts = 0;
1740 
1741 	wol->supported = (WAKE_PHY | WAKE_MAGIC);
1742 	if (!np->wol_enabled)
1743 		return;
1744 
1745 	wol_bits = ioread8(ioaddr + WakeEvent);
1746 	if (wol_bits & MagicPktEnable)
1747 		wol->wolopts |= WAKE_MAGIC;
1748 	if (wol_bits & LinkEventEnable)
1749 		wol->wolopts |= WAKE_PHY;
1750 }
1751 
sundance_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)1752 static int sundance_set_wol(struct net_device *dev,
1753 	struct ethtool_wolinfo *wol)
1754 {
1755 	struct netdev_private *np = netdev_priv(dev);
1756 	void __iomem *ioaddr = np->base;
1757 	u8 wol_bits;
1758 
1759 	if (!device_can_wakeup(&np->pci_dev->dev))
1760 		return -EOPNOTSUPP;
1761 
1762 	np->wol_enabled = !!(wol->wolopts);
1763 	wol_bits = ioread8(ioaddr + WakeEvent);
1764 	wol_bits &= ~(WakePktEnable | MagicPktEnable |
1765 			LinkEventEnable | WolEnable);
1766 
1767 	if (np->wol_enabled) {
1768 		if (wol->wolopts & WAKE_MAGIC)
1769 			wol_bits |= (MagicPktEnable | WolEnable);
1770 		if (wol->wolopts & WAKE_PHY)
1771 			wol_bits |= (LinkEventEnable | WolEnable);
1772 	}
1773 	iowrite8(wol_bits, ioaddr + WakeEvent);
1774 
1775 	device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
1776 
1777 	return 0;
1778 }
1779 #else
1780 #define sundance_get_wol NULL
1781 #define sundance_set_wol NULL
1782 #endif /* CONFIG_PM */
1783 
1784 static const struct ethtool_ops ethtool_ops = {
1785 	.begin = check_if_running,
1786 	.get_drvinfo = get_drvinfo,
1787 	.nway_reset = nway_reset,
1788 	.get_link = get_link,
1789 	.get_wol = sundance_get_wol,
1790 	.set_wol = sundance_set_wol,
1791 	.get_msglevel = get_msglevel,
1792 	.set_msglevel = set_msglevel,
1793 	.get_strings = get_strings,
1794 	.get_sset_count = get_sset_count,
1795 	.get_ethtool_stats = get_ethtool_stats,
1796 	.get_link_ksettings = get_link_ksettings,
1797 	.set_link_ksettings = set_link_ksettings,
1798 };
1799 
netdev_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1800 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1801 {
1802 	struct netdev_private *np = netdev_priv(dev);
1803 	int rc;
1804 
1805 	if (!netif_running(dev))
1806 		return -EINVAL;
1807 
1808 	spin_lock_irq(&np->lock);
1809 	rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1810 	spin_unlock_irq(&np->lock);
1811 
1812 	return rc;
1813 }
1814 
netdev_close(struct net_device * dev)1815 static int netdev_close(struct net_device *dev)
1816 {
1817 	struct netdev_private *np = netdev_priv(dev);
1818 	void __iomem *ioaddr = np->base;
1819 	struct sk_buff *skb;
1820 	int i;
1821 
1822 	/* Wait and kill tasklet */
1823 	tasklet_kill(&np->rx_tasklet);
1824 	tasklet_kill(&np->tx_tasklet);
1825 	np->cur_tx = 0;
1826 	np->dirty_tx = 0;
1827 	np->cur_task = 0;
1828 	np->last_tx = NULL;
1829 
1830 	netif_stop_queue(dev);
1831 
1832 	if (netif_msg_ifdown(np)) {
1833 		printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1834 			   "Rx %4.4x Int %2.2x.\n",
1835 			   dev->name, ioread8(ioaddr + TxStatus),
1836 			   ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1837 		printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1838 			   dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1839 	}
1840 
1841 	/* Disable interrupts by clearing the interrupt mask. */
1842 	iowrite16(0x0000, ioaddr + IntrEnable);
1843 
1844 	/* Disable Rx and Tx DMA for safely release resource */
1845 	iowrite32(0x500, ioaddr + DMACtrl);
1846 
1847 	/* Stop the chip's Tx and Rx processes. */
1848 	iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1849 
1850 	for (i = 2000; i > 0; i--) {
1851 		if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1852 			break;
1853 		mdelay(1);
1854 	}
1855 
1856 	iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1857 			ioaddr + ASIC_HI_WORD(ASICCtrl));
1858 
1859 	for (i = 2000; i > 0; i--) {
1860 		if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1861 			break;
1862 		mdelay(1);
1863 	}
1864 
1865 #ifdef __i386__
1866 	if (netif_msg_hw(np)) {
1867 		printk(KERN_DEBUG "  Tx ring at %8.8x:\n",
1868 			   (int)(np->tx_ring_dma));
1869 		for (i = 0; i < TX_RING_SIZE; i++)
1870 			printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1871 				   i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1872 				   np->tx_ring[i].frag[0].length);
1873 		printk(KERN_DEBUG "  Rx ring %8.8x:\n",
1874 			   (int)(np->rx_ring_dma));
1875 		for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1876 			printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1877 				   i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1878 				   np->rx_ring[i].frag[0].length);
1879 		}
1880 	}
1881 #endif /* __i386__ debugging only */
1882 
1883 	free_irq(np->pci_dev->irq, dev);
1884 
1885 	del_timer_sync(&np->timer);
1886 
1887 	/* Free all the skbuffs in the Rx queue. */
1888 	for (i = 0; i < RX_RING_SIZE; i++) {
1889 		np->rx_ring[i].status = 0;
1890 		skb = np->rx_skbuff[i];
1891 		if (skb) {
1892 			dma_unmap_single(&np->pci_dev->dev,
1893 				le32_to_cpu(np->rx_ring[i].frag[0].addr),
1894 				np->rx_buf_sz, DMA_FROM_DEVICE);
1895 			dev_kfree_skb(skb);
1896 			np->rx_skbuff[i] = NULL;
1897 		}
1898 		np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1899 	}
1900 	for (i = 0; i < TX_RING_SIZE; i++) {
1901 		np->tx_ring[i].next_desc = 0;
1902 		skb = np->tx_skbuff[i];
1903 		if (skb) {
1904 			dma_unmap_single(&np->pci_dev->dev,
1905 				le32_to_cpu(np->tx_ring[i].frag[0].addr),
1906 				skb->len, DMA_TO_DEVICE);
1907 			dev_kfree_skb(skb);
1908 			np->tx_skbuff[i] = NULL;
1909 		}
1910 	}
1911 
1912 	return 0;
1913 }
1914 
sundance_remove1(struct pci_dev * pdev)1915 static void sundance_remove1(struct pci_dev *pdev)
1916 {
1917 	struct net_device *dev = pci_get_drvdata(pdev);
1918 
1919 	if (dev) {
1920 	    struct netdev_private *np = netdev_priv(dev);
1921 	    unregister_netdev(dev);
1922 	    dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE,
1923 		    np->rx_ring, np->rx_ring_dma);
1924 	    dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
1925 		    np->tx_ring, np->tx_ring_dma);
1926 	    pci_iounmap(pdev, np->base);
1927 	    pci_release_regions(pdev);
1928 	    free_netdev(dev);
1929 	}
1930 }
1931 
sundance_suspend(struct device * dev_d)1932 static int __maybe_unused sundance_suspend(struct device *dev_d)
1933 {
1934 	struct net_device *dev = dev_get_drvdata(dev_d);
1935 	struct netdev_private *np = netdev_priv(dev);
1936 	void __iomem *ioaddr = np->base;
1937 
1938 	if (!netif_running(dev))
1939 		return 0;
1940 
1941 	netdev_close(dev);
1942 	netif_device_detach(dev);
1943 
1944 	if (np->wol_enabled) {
1945 		iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1946 		iowrite16(RxEnable, ioaddr + MACCtrl1);
1947 	}
1948 
1949 	device_set_wakeup_enable(dev_d, np->wol_enabled);
1950 
1951 	return 0;
1952 }
1953 
sundance_resume(struct device * dev_d)1954 static int __maybe_unused sundance_resume(struct device *dev_d)
1955 {
1956 	struct net_device *dev = dev_get_drvdata(dev_d);
1957 	int err = 0;
1958 
1959 	if (!netif_running(dev))
1960 		return 0;
1961 
1962 	err = netdev_open(dev);
1963 	if (err) {
1964 		printk(KERN_ERR "%s: Can't resume interface!\n",
1965 				dev->name);
1966 		goto out;
1967 	}
1968 
1969 	netif_device_attach(dev);
1970 
1971 out:
1972 	return err;
1973 }
1974 
1975 static SIMPLE_DEV_PM_OPS(sundance_pm_ops, sundance_suspend, sundance_resume);
1976 
1977 static struct pci_driver sundance_driver = {
1978 	.name		= DRV_NAME,
1979 	.id_table	= sundance_pci_tbl,
1980 	.probe		= sundance_probe1,
1981 	.remove		= sundance_remove1,
1982 	.driver.pm	= &sundance_pm_ops,
1983 };
1984 
1985 module_pci_driver(sundance_driver);
1986