• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*	tulip_core.c: A DEC 21x4x-family ethernet driver for Linux.
2 
3 	Copyright 2000,2001  The Linux Kernel Team
4 	Written/copyright 1994-2001 by Donald Becker.
5 
6 	This software may be used and distributed according to the terms
7 	of the GNU General Public License, incorporated herein by reference.
8 
9 	Please submit bugs to http://bugzilla.kernel.org/ .
10 */
11 
12 #define pr_fmt(fmt) "tulip: " fmt
13 
14 #define DRV_NAME	"tulip"
15 #ifdef CONFIG_TULIP_NAPI
16 #define DRV_VERSION    "1.1.15-NAPI" /* Keep at least for test */
17 #else
18 #define DRV_VERSION	"1.1.15"
19 #endif
20 #define DRV_RELDATE	"Feb 27, 2007"
21 
22 
23 #include <linux/module.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 #include "tulip.h"
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
29 #include <linux/etherdevice.h>
30 #include <linux/delay.h>
31 #include <linux/mii.h>
32 #include <linux/crc32.h>
33 #include <asm/unaligned.h>
34 #include <asm/uaccess.h>
35 
36 #ifdef CONFIG_SPARC
37 #include <asm/prom.h>
38 #endif
39 
40 static char version[] =
41 	"Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
42 
43 /* A few user-configurable values. */
44 
45 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
46 static unsigned int max_interrupt_work = 25;
47 
48 #define MAX_UNITS 8
49 /* Used to pass the full-duplex flag, etc. */
50 static int full_duplex[MAX_UNITS];
51 static int options[MAX_UNITS];
52 static int mtu[MAX_UNITS];			/* Jumbo MTU for interfaces. */
53 
54 /*  The possible media types that can be set in options[] are: */
55 const char * const medianame[32] = {
56 	"10baseT", "10base2", "AUI", "100baseTx",
57 	"10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
58 	"100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
59 	"10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
60 	"MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
61 	"","","","", "","","","",  "","","","Transceiver reset",
62 };
63 
64 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
65 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
66 	defined(CONFIG_SPARC) || defined(__ia64__) || \
67 	defined(__sh__) || defined(__mips__)
68 static int rx_copybreak = 1518;
69 #else
70 static int rx_copybreak = 100;
71 #endif
72 
73 /*
74   Set the bus performance register.
75 	Typical: Set 16 longword cache alignment, no burst limit.
76 	Cache alignment bits 15:14	     Burst length 13:8
77 		0000	No alignment  0x00000000 unlimited		0800 8 longwords
78 		4000	8  longwords		0100 1 longword		1000 16 longwords
79 		8000	16 longwords		0200 2 longwords	2000 32 longwords
80 		C000	32  longwords		0400 4 longwords
81 	Warning: many older 486 systems are broken and require setting 0x00A04800
82 	   8 longword cache alignment, 8 longword burst.
83 	ToDo: Non-Intel setting could be better.
84 */
85 
86 #if defined(__alpha__) || defined(__ia64__)
87 static int csr0 = 0x01A00000 | 0xE000;
88 #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
89 static int csr0 = 0x01A00000 | 0x8000;
90 #elif defined(CONFIG_SPARC) || defined(__hppa__)
91 /* The UltraSparc PCI controllers will disconnect at every 64-byte
92  * crossing anyways so it makes no sense to tell Tulip to burst
93  * any more than that.
94  */
95 static int csr0 = 0x01A00000 | 0x9000;
96 #elif defined(__arm__) || defined(__sh__)
97 static int csr0 = 0x01A00000 | 0x4800;
98 #elif defined(__mips__)
99 static int csr0 = 0x00200000 | 0x4000;
100 #else
101 static int csr0;
102 #endif
103 
104 /* Operational parameters that usually are not changed. */
105 /* Time in jiffies before concluding the transmitter is hung. */
106 #define TX_TIMEOUT  (4*HZ)
107 
108 
109 MODULE_AUTHOR("The Linux Kernel Team");
110 MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
111 MODULE_LICENSE("GPL");
112 MODULE_VERSION(DRV_VERSION);
113 module_param(tulip_debug, int, 0);
114 module_param(max_interrupt_work, int, 0);
115 module_param(rx_copybreak, int, 0);
116 module_param(csr0, int, 0);
117 module_param_array(options, int, NULL, 0);
118 module_param_array(full_duplex, int, NULL, 0);
119 
120 #ifdef TULIP_DEBUG
121 int tulip_debug = TULIP_DEBUG;
122 #else
123 int tulip_debug = 1;
124 #endif
125 
tulip_timer(unsigned long data)126 static void tulip_timer(unsigned long data)
127 {
128 	struct net_device *dev = (struct net_device *)data;
129 	struct tulip_private *tp = netdev_priv(dev);
130 
131 	if (netif_running(dev))
132 		schedule_work(&tp->media_work);
133 }
134 
135 /*
136  * This table use during operation for capabilities and media timer.
137  *
138  * It is indexed via the values in 'enum chips'
139  */
140 
141 struct tulip_chip_table tulip_tbl[] = {
142   { }, /* placeholder for array, slot unused currently */
143   { }, /* placeholder for array, slot unused currently */
144 
145   /* DC21140 */
146   { "Digital DS21140 Tulip", 128, 0x0001ebef,
147 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
148 	tulip_media_task },
149 
150   /* DC21142, DC21143 */
151   { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
152 	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
153 	| HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
154 
155   /* LC82C168 */
156   { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
157 	HAS_MII | HAS_PNICNWAY, pnic_timer, },
158 
159   /* MX98713 */
160   { "Macronix 98713 PMAC", 128, 0x0001ebef,
161 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
162 
163   /* MX98715 */
164   { "Macronix 98715 PMAC", 256, 0x0001ebef,
165 	HAS_MEDIA_TABLE, mxic_timer, },
166 
167   /* MX98725 */
168   { "Macronix 98725 PMAC", 256, 0x0001ebef,
169 	HAS_MEDIA_TABLE, mxic_timer, },
170 
171   /* AX88140 */
172   { "ASIX AX88140", 128, 0x0001fbff,
173 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
174 	| IS_ASIX, tulip_timer, tulip_media_task },
175 
176   /* PNIC2 */
177   { "Lite-On PNIC-II", 256, 0x0801fbff,
178 	HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
179 
180   /* COMET */
181   { "ADMtek Comet", 256, 0x0001abef,
182 	HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
183 
184   /* COMPEX9881 */
185   { "Compex 9881 PMAC", 128, 0x0001ebef,
186 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
187 
188   /* I21145 */
189   { "Intel DS21145 Tulip", 128, 0x0801fbff,
190 	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
191 	| HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
192 
193   /* DM910X */
194 #ifdef CONFIG_TULIP_DM910X
195   { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
196 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
197 	tulip_timer, tulip_media_task },
198 #else
199   { NULL },
200 #endif
201 
202   /* RS7112 */
203   { "Conexant LANfinity", 256, 0x0001ebef,
204 	HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
205 
206 };
207 
208 
209 static const struct pci_device_id tulip_pci_tbl[] = {
210 	{ 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
211 	{ 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
212 	{ 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
213 	{ 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
214 	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
215 /*	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
216 	{ 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
217 	{ 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
218 	{ 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
219 	{ 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
220 	{ 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
221 	{ 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
222 	{ 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
223 	{ 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
224 	{ 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
225 	{ 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
226 	{ 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
227 	{ 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
228 	{ 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
229 	{ 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
230 #ifdef CONFIG_TULIP_DM910X
231 	{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
232 	{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
233 #endif
234 	{ 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
235 	{ 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
236 	{ 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
237 	{ 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
238 	{ 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
239 	{ 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
240 	{ 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
241 	{ 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
242 	{ 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
243 	{ 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
244 	{ 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
245 	{ 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
246 	{ 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
247 	{ 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
248 	{ 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
249 	{ } /* terminate list */
250 };
251 MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
252 
253 
254 /* A full-duplex map for media types. */
255 const char tulip_media_cap[32] =
256 {0,0,0,16,  3,19,16,24,  27,4,7,5, 0,20,23,20,  28,31,0,0, };
257 
258 static void tulip_tx_timeout(struct net_device *dev);
259 static void tulip_init_ring(struct net_device *dev);
260 static void tulip_free_ring(struct net_device *dev);
261 static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
262 					  struct net_device *dev);
263 static int tulip_open(struct net_device *dev);
264 static int tulip_close(struct net_device *dev);
265 static void tulip_up(struct net_device *dev);
266 static void tulip_down(struct net_device *dev);
267 static struct net_device_stats *tulip_get_stats(struct net_device *dev);
268 static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
269 static void set_rx_mode(struct net_device *dev);
270 static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
271 #ifdef CONFIG_NET_POLL_CONTROLLER
272 static void poll_tulip(struct net_device *dev);
273 #endif
274 
tulip_set_power_state(struct tulip_private * tp,int sleep,int snooze)275 static void tulip_set_power_state (struct tulip_private *tp,
276 				   int sleep, int snooze)
277 {
278 	if (tp->flags & HAS_ACPI) {
279 		u32 tmp, newtmp;
280 		pci_read_config_dword (tp->pdev, CFDD, &tmp);
281 		newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
282 		if (sleep)
283 			newtmp |= CFDD_Sleep;
284 		else if (snooze)
285 			newtmp |= CFDD_Snooze;
286 		if (tmp != newtmp)
287 			pci_write_config_dword (tp->pdev, CFDD, newtmp);
288 	}
289 
290 }
291 
292 
tulip_up(struct net_device * dev)293 static void tulip_up(struct net_device *dev)
294 {
295 	struct tulip_private *tp = netdev_priv(dev);
296 	void __iomem *ioaddr = tp->base_addr;
297 	int next_tick = 3*HZ;
298 	u32 reg;
299 	int i;
300 
301 #ifdef CONFIG_TULIP_NAPI
302 	napi_enable(&tp->napi);
303 #endif
304 
305 	/* Wake the chip from sleep/snooze mode. */
306 	tulip_set_power_state (tp, 0, 0);
307 
308 	/* Disable all WOL events */
309 	pci_enable_wake(tp->pdev, PCI_D3hot, 0);
310 	pci_enable_wake(tp->pdev, PCI_D3cold, 0);
311 	tulip_set_wolopts(tp->pdev, 0);
312 
313 	/* On some chip revs we must set the MII/SYM port before the reset!? */
314 	if (tp->mii_cnt  ||  (tp->mtable  &&  tp->mtable->has_mii))
315 		iowrite32(0x00040000, ioaddr + CSR6);
316 
317 	/* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
318 	iowrite32(0x00000001, ioaddr + CSR0);
319 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg);  /* flush write */
320 	udelay(100);
321 
322 	/* Deassert reset.
323 	   Wait the specified 50 PCI cycles after a reset by initializing
324 	   Tx and Rx queues and the address filter list. */
325 	iowrite32(tp->csr0, ioaddr + CSR0);
326 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg);  /* flush write */
327 	udelay(100);
328 
329 	if (tulip_debug > 1)
330 		netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
331 
332 	iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
333 	iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
334 	tp->cur_rx = tp->cur_tx = 0;
335 	tp->dirty_rx = tp->dirty_tx = 0;
336 
337 	if (tp->flags & MC_HASH_ONLY) {
338 		u32 addr_low = get_unaligned_le32(dev->dev_addr);
339 		u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
340 		if (tp->chip_id == AX88140) {
341 			iowrite32(0, ioaddr + CSR13);
342 			iowrite32(addr_low,  ioaddr + CSR14);
343 			iowrite32(1, ioaddr + CSR13);
344 			iowrite32(addr_high, ioaddr + CSR14);
345 		} else if (tp->flags & COMET_MAC_ADDR) {
346 			iowrite32(addr_low,  ioaddr + 0xA4);
347 			iowrite32(addr_high, ioaddr + 0xA8);
348 			iowrite32(0, ioaddr + CSR27);
349 			iowrite32(0, ioaddr + CSR28);
350 		}
351 	} else {
352 		/* This is set_rx_mode(), but without starting the transmitter. */
353 		u16 *eaddrs = (u16 *)dev->dev_addr;
354 		u16 *setup_frm = &tp->setup_frame[15*6];
355 		dma_addr_t mapping;
356 
357 		/* 21140 bug: you must add the broadcast address. */
358 		memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
359 		/* Fill the final entry of the table with our physical address. */
360 		*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
361 		*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
362 		*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
363 
364 		mapping = pci_map_single(tp->pdev, tp->setup_frame,
365 					 sizeof(tp->setup_frame),
366 					 PCI_DMA_TODEVICE);
367 		tp->tx_buffers[tp->cur_tx].skb = NULL;
368 		tp->tx_buffers[tp->cur_tx].mapping = mapping;
369 
370 		/* Put the setup frame on the Tx list. */
371 		tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
372 		tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
373 		tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
374 
375 		tp->cur_tx++;
376 	}
377 
378 	tp->saved_if_port = dev->if_port;
379 	if (dev->if_port == 0)
380 		dev->if_port = tp->default_port;
381 
382 	/* Allow selecting a default media. */
383 	i = 0;
384 	if (tp->mtable == NULL)
385 		goto media_picked;
386 	if (dev->if_port) {
387 		int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
388 			(dev->if_port == 12 ? 0 : dev->if_port);
389 		for (i = 0; i < tp->mtable->leafcount; i++)
390 			if (tp->mtable->mleaf[i].media == looking_for) {
391 				dev_info(&dev->dev,
392 					 "Using user-specified media %s\n",
393 					 medianame[dev->if_port]);
394 				goto media_picked;
395 			}
396 	}
397 	if ((tp->mtable->defaultmedia & 0x0800) == 0) {
398 		int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
399 		for (i = 0; i < tp->mtable->leafcount; i++)
400 			if (tp->mtable->mleaf[i].media == looking_for) {
401 				dev_info(&dev->dev,
402 					 "Using EEPROM-set media %s\n",
403 					 medianame[looking_for]);
404 				goto media_picked;
405 			}
406 	}
407 	/* Start sensing first non-full-duplex media. */
408 	for (i = tp->mtable->leafcount - 1;
409 		 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
410 		;
411 media_picked:
412 
413 	tp->csr6 = 0;
414 	tp->cur_index = i;
415 	tp->nwayset = 0;
416 
417 	if (dev->if_port) {
418 		if (tp->chip_id == DC21143  &&
419 		    (tulip_media_cap[dev->if_port] & MediaIsMII)) {
420 			/* We must reset the media CSRs when we force-select MII mode. */
421 			iowrite32(0x0000, ioaddr + CSR13);
422 			iowrite32(0x0000, ioaddr + CSR14);
423 			iowrite32(0x0008, ioaddr + CSR15);
424 		}
425 		tulip_select_media(dev, 1);
426 	} else if (tp->chip_id == DC21142) {
427 		if (tp->mii_cnt) {
428 			tulip_select_media(dev, 1);
429 			if (tulip_debug > 1)
430 				dev_info(&dev->dev,
431 					 "Using MII transceiver %d, status %04x\n",
432 					 tp->phys[0],
433 					 tulip_mdio_read(dev, tp->phys[0], 1));
434 			iowrite32(csr6_mask_defstate, ioaddr + CSR6);
435 			tp->csr6 = csr6_mask_hdcap;
436 			dev->if_port = 11;
437 			iowrite32(0x0000, ioaddr + CSR13);
438 			iowrite32(0x0000, ioaddr + CSR14);
439 		} else
440 			t21142_start_nway(dev);
441 	} else if (tp->chip_id == PNIC2) {
442 	        /* for initial startup advertise 10/100 Full and Half */
443 	        tp->sym_advertise = 0x01E0;
444                 /* enable autonegotiate end interrupt */
445 	        iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
446 	        iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
447 		pnic2_start_nway(dev);
448 	} else if (tp->chip_id == LC82C168  &&  ! tp->medialock) {
449 		if (tp->mii_cnt) {
450 			dev->if_port = 11;
451 			tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
452 			iowrite32(0x0001, ioaddr + CSR15);
453 		} else if (ioread32(ioaddr + CSR5) & TPLnkPass)
454 			pnic_do_nway(dev);
455 		else {
456 			/* Start with 10mbps to do autonegotiation. */
457 			iowrite32(0x32, ioaddr + CSR12);
458 			tp->csr6 = 0x00420000;
459 			iowrite32(0x0001B078, ioaddr + 0xB8);
460 			iowrite32(0x0201B078, ioaddr + 0xB8);
461 			next_tick = 1*HZ;
462 		}
463 	} else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
464 		   ! tp->medialock) {
465 		dev->if_port = 0;
466 		tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
467 		iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
468 	} else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
469 		/* Provided by BOLO, Macronix - 12/10/1998. */
470 		dev->if_port = 0;
471 		tp->csr6 = 0x01a80200;
472 		iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
473 		iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
474 	} else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
475 		/* Enable automatic Tx underrun recovery. */
476 		iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
477 		dev->if_port = tp->mii_cnt ? 11 : 0;
478 		tp->csr6 = 0x00040000;
479 	} else if (tp->chip_id == AX88140) {
480 		tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
481 	} else
482 		tulip_select_media(dev, 1);
483 
484 	/* Start the chip's Tx to process setup frame. */
485 	tulip_stop_rxtx(tp);
486 	barrier();
487 	udelay(5);
488 	iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
489 
490 	/* Enable interrupts by setting the interrupt mask. */
491 	iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
492 	iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
493 	tulip_start_rxtx(tp);
494 	iowrite32(0, ioaddr + CSR2);		/* Rx poll demand */
495 
496 	if (tulip_debug > 2) {
497 		netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
498 			   ioread32(ioaddr + CSR0),
499 			   ioread32(ioaddr + CSR5),
500 			   ioread32(ioaddr + CSR6));
501 	}
502 
503 	/* Set the timer to switch to check for link beat and perhaps switch
504 	   to an alternate media type. */
505 	tp->timer.expires = RUN_AT(next_tick);
506 	add_timer(&tp->timer);
507 #ifdef CONFIG_TULIP_NAPI
508 	init_timer(&tp->oom_timer);
509         tp->oom_timer.data = (unsigned long)dev;
510         tp->oom_timer.function = oom_timer;
511 #endif
512 }
513 
514 static int
tulip_open(struct net_device * dev)515 tulip_open(struct net_device *dev)
516 {
517 	struct tulip_private *tp = netdev_priv(dev);
518 	int retval;
519 
520 	tulip_init_ring (dev);
521 
522 	retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED,
523 			     dev->name, dev);
524 	if (retval)
525 		goto free_ring;
526 
527 	tulip_up (dev);
528 
529 	netif_start_queue (dev);
530 
531 	return 0;
532 
533 free_ring:
534 	tulip_free_ring (dev);
535 	return retval;
536 }
537 
538 
tulip_tx_timeout(struct net_device * dev)539 static void tulip_tx_timeout(struct net_device *dev)
540 {
541 	struct tulip_private *tp = netdev_priv(dev);
542 	void __iomem *ioaddr = tp->base_addr;
543 	unsigned long flags;
544 
545 	spin_lock_irqsave (&tp->lock, flags);
546 
547 	if (tulip_media_cap[dev->if_port] & MediaIsMII) {
548 		/* Do nothing -- the media monitor should handle this. */
549 		if (tulip_debug > 1)
550 			dev_warn(&dev->dev,
551 				 "Transmit timeout using MII device\n");
552 	} else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
553 		   tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
554 		   tp->chip_id == DM910X) {
555 		dev_warn(&dev->dev,
556 			 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
557 			 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
558 			 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
559 			 ioread32(ioaddr + CSR15));
560 		tp->timeout_recovery = 1;
561 		schedule_work(&tp->media_work);
562 		goto out_unlock;
563 	} else if (tp->chip_id == PNIC2) {
564 		dev_warn(&dev->dev,
565 			 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
566 			 (int)ioread32(ioaddr + CSR5),
567 			 (int)ioread32(ioaddr + CSR6),
568 			 (int)ioread32(ioaddr + CSR7),
569 			 (int)ioread32(ioaddr + CSR12));
570 	} else {
571 		dev_warn(&dev->dev,
572 			 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
573 			 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
574 		dev->if_port = 0;
575 	}
576 
577 #if defined(way_too_many_messages)
578 	if (tulip_debug > 3) {
579 		int i;
580 		for (i = 0; i < RX_RING_SIZE; i++) {
581 			u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
582 			int j;
583 			printk(KERN_DEBUG
584 			       "%2d: %08x %08x %08x %08x  %02x %02x %02x\n",
585 			       i,
586 			       (unsigned int)tp->rx_ring[i].status,
587 			       (unsigned int)tp->rx_ring[i].length,
588 			       (unsigned int)tp->rx_ring[i].buffer1,
589 			       (unsigned int)tp->rx_ring[i].buffer2,
590 			       buf[0], buf[1], buf[2]);
591 			for (j = 0; ((j < 1600) && buf[j] != 0xee); j++)
592 				if (j < 100)
593 					pr_cont(" %02x", buf[j]);
594 			pr_cont(" j=%d\n", j);
595 		}
596 		printk(KERN_DEBUG "  Rx ring %p: ", tp->rx_ring);
597 		for (i = 0; i < RX_RING_SIZE; i++)
598 			pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
599 		printk(KERN_DEBUG "  Tx ring %p: ", tp->tx_ring);
600 		for (i = 0; i < TX_RING_SIZE; i++)
601 			pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
602 		pr_cont("\n");
603 	}
604 #endif
605 
606 	tulip_tx_timeout_complete(tp, ioaddr);
607 
608 out_unlock:
609 	spin_unlock_irqrestore (&tp->lock, flags);
610 	dev->trans_start = jiffies; /* prevent tx timeout */
611 	netif_wake_queue (dev);
612 }
613 
614 
615 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
tulip_init_ring(struct net_device * dev)616 static void tulip_init_ring(struct net_device *dev)
617 {
618 	struct tulip_private *tp = netdev_priv(dev);
619 	int i;
620 
621 	tp->susp_rx = 0;
622 	tp->ttimer = 0;
623 	tp->nir = 0;
624 
625 	for (i = 0; i < RX_RING_SIZE; i++) {
626 		tp->rx_ring[i].status = 0x00000000;
627 		tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
628 		tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
629 		tp->rx_buffers[i].skb = NULL;
630 		tp->rx_buffers[i].mapping = 0;
631 	}
632 	/* Mark the last entry as wrapping the ring. */
633 	tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
634 	tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
635 
636 	for (i = 0; i < RX_RING_SIZE; i++) {
637 		dma_addr_t mapping;
638 
639 		/* Note the receive buffer must be longword aligned.
640 		   netdev_alloc_skb() provides 16 byte alignment.  But do *not*
641 		   use skb_reserve() to align the IP header! */
642 		struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
643 		tp->rx_buffers[i].skb = skb;
644 		if (skb == NULL)
645 			break;
646 		mapping = pci_map_single(tp->pdev, skb->data,
647 					 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
648 		tp->rx_buffers[i].mapping = mapping;
649 		tp->rx_ring[i].status = cpu_to_le32(DescOwned);	/* Owned by Tulip chip */
650 		tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
651 	}
652 	tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
653 
654 	/* The Tx buffer descriptor is filled in as needed, but we
655 	   do need to clear the ownership bit. */
656 	for (i = 0; i < TX_RING_SIZE; i++) {
657 		tp->tx_buffers[i].skb = NULL;
658 		tp->tx_buffers[i].mapping = 0;
659 		tp->tx_ring[i].status = 0x00000000;
660 		tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
661 	}
662 	tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
663 }
664 
665 static netdev_tx_t
tulip_start_xmit(struct sk_buff * skb,struct net_device * dev)666 tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
667 {
668 	struct tulip_private *tp = netdev_priv(dev);
669 	int entry;
670 	u32 flag;
671 	dma_addr_t mapping;
672 	unsigned long flags;
673 
674 	spin_lock_irqsave(&tp->lock, flags);
675 
676 	/* Calculate the next Tx descriptor entry. */
677 	entry = tp->cur_tx % TX_RING_SIZE;
678 
679 	tp->tx_buffers[entry].skb = skb;
680 	mapping = pci_map_single(tp->pdev, skb->data,
681 				 skb->len, PCI_DMA_TODEVICE);
682 	tp->tx_buffers[entry].mapping = mapping;
683 	tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
684 
685 	if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
686 		flag = 0x60000000; /* No interrupt */
687 	} else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
688 		flag = 0xe0000000; /* Tx-done intr. */
689 	} else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
690 		flag = 0x60000000; /* No Tx-done intr. */
691 	} else {		/* Leave room for set_rx_mode() to fill entries. */
692 		flag = 0xe0000000; /* Tx-done intr. */
693 		netif_stop_queue(dev);
694 	}
695 	if (entry == TX_RING_SIZE-1)
696 		flag = 0xe0000000 | DESC_RING_WRAP;
697 
698 	tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
699 	/* if we were using Transmit Automatic Polling, we would need a
700 	 * wmb() here. */
701 	tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
702 	wmb();
703 
704 	tp->cur_tx++;
705 
706 	/* Trigger an immediate transmit demand. */
707 	iowrite32(0, tp->base_addr + CSR1);
708 
709 	spin_unlock_irqrestore(&tp->lock, flags);
710 
711 	return NETDEV_TX_OK;
712 }
713 
tulip_clean_tx_ring(struct tulip_private * tp)714 static void tulip_clean_tx_ring(struct tulip_private *tp)
715 {
716 	unsigned int dirty_tx;
717 
718 	for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
719 		dirty_tx++) {
720 		int entry = dirty_tx % TX_RING_SIZE;
721 		int status = le32_to_cpu(tp->tx_ring[entry].status);
722 
723 		if (status < 0) {
724 			tp->dev->stats.tx_errors++;	/* It wasn't Txed */
725 			tp->tx_ring[entry].status = 0;
726 		}
727 
728 		/* Check for Tx filter setup frames. */
729 		if (tp->tx_buffers[entry].skb == NULL) {
730 			/* test because dummy frames not mapped */
731 			if (tp->tx_buffers[entry].mapping)
732 				pci_unmap_single(tp->pdev,
733 					tp->tx_buffers[entry].mapping,
734 					sizeof(tp->setup_frame),
735 					PCI_DMA_TODEVICE);
736 			continue;
737 		}
738 
739 		pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
740 				tp->tx_buffers[entry].skb->len,
741 				PCI_DMA_TODEVICE);
742 
743 		/* Free the original skb. */
744 		dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
745 		tp->tx_buffers[entry].skb = NULL;
746 		tp->tx_buffers[entry].mapping = 0;
747 	}
748 }
749 
tulip_down(struct net_device * dev)750 static void tulip_down (struct net_device *dev)
751 {
752 	struct tulip_private *tp = netdev_priv(dev);
753 	void __iomem *ioaddr = tp->base_addr;
754 	unsigned long flags;
755 
756 	cancel_work_sync(&tp->media_work);
757 
758 #ifdef CONFIG_TULIP_NAPI
759 	napi_disable(&tp->napi);
760 #endif
761 
762 	del_timer_sync (&tp->timer);
763 #ifdef CONFIG_TULIP_NAPI
764 	del_timer_sync (&tp->oom_timer);
765 #endif
766 	spin_lock_irqsave (&tp->lock, flags);
767 
768 	/* Disable interrupts by clearing the interrupt mask. */
769 	iowrite32 (0x00000000, ioaddr + CSR7);
770 
771 	/* Stop the Tx and Rx processes. */
772 	tulip_stop_rxtx(tp);
773 
774 	/* prepare receive buffers */
775 	tulip_refill_rx(dev);
776 
777 	/* release any unconsumed transmit buffers */
778 	tulip_clean_tx_ring(tp);
779 
780 	if (ioread32(ioaddr + CSR6) != 0xffffffff)
781 		dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
782 
783 	spin_unlock_irqrestore (&tp->lock, flags);
784 
785 	init_timer(&tp->timer);
786 	tp->timer.data = (unsigned long)dev;
787 	tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
788 
789 	dev->if_port = tp->saved_if_port;
790 
791 	/* Leave the driver in snooze, not sleep, mode. */
792 	tulip_set_power_state (tp, 0, 1);
793 }
794 
tulip_free_ring(struct net_device * dev)795 static void tulip_free_ring (struct net_device *dev)
796 {
797 	struct tulip_private *tp = netdev_priv(dev);
798 	int i;
799 
800 	/* Free all the skbuffs in the Rx queue. */
801 	for (i = 0; i < RX_RING_SIZE; i++) {
802 		struct sk_buff *skb = tp->rx_buffers[i].skb;
803 		dma_addr_t mapping = tp->rx_buffers[i].mapping;
804 
805 		tp->rx_buffers[i].skb = NULL;
806 		tp->rx_buffers[i].mapping = 0;
807 
808 		tp->rx_ring[i].status = 0;	/* Not owned by Tulip chip. */
809 		tp->rx_ring[i].length = 0;
810 		/* An invalid address. */
811 		tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
812 		if (skb) {
813 			pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
814 					 PCI_DMA_FROMDEVICE);
815 			dev_kfree_skb (skb);
816 		}
817 	}
818 
819 	for (i = 0; i < TX_RING_SIZE; i++) {
820 		struct sk_buff *skb = tp->tx_buffers[i].skb;
821 
822 		if (skb != NULL) {
823 			pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
824 					 skb->len, PCI_DMA_TODEVICE);
825 			dev_kfree_skb (skb);
826 		}
827 		tp->tx_buffers[i].skb = NULL;
828 		tp->tx_buffers[i].mapping = 0;
829 	}
830 }
831 
tulip_close(struct net_device * dev)832 static int tulip_close (struct net_device *dev)
833 {
834 	struct tulip_private *tp = netdev_priv(dev);
835 	void __iomem *ioaddr = tp->base_addr;
836 
837 	netif_stop_queue (dev);
838 
839 	tulip_down (dev);
840 
841 	if (tulip_debug > 1)
842 		netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
843 			   ioread32 (ioaddr + CSR5));
844 
845 	free_irq (tp->pdev->irq, dev);
846 
847 	tulip_free_ring (dev);
848 
849 	return 0;
850 }
851 
tulip_get_stats(struct net_device * dev)852 static struct net_device_stats *tulip_get_stats(struct net_device *dev)
853 {
854 	struct tulip_private *tp = netdev_priv(dev);
855 	void __iomem *ioaddr = tp->base_addr;
856 
857 	if (netif_running(dev)) {
858 		unsigned long flags;
859 
860 		spin_lock_irqsave (&tp->lock, flags);
861 
862 		dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
863 
864 		spin_unlock_irqrestore(&tp->lock, flags);
865 	}
866 
867 	return &dev->stats;
868 }
869 
870 
tulip_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)871 static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
872 {
873 	struct tulip_private *np = netdev_priv(dev);
874 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
875 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
876 	strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
877 }
878 
879 
tulip_ethtool_set_wol(struct net_device * dev,struct ethtool_wolinfo * wolinfo)880 static int tulip_ethtool_set_wol(struct net_device *dev,
881 				 struct ethtool_wolinfo *wolinfo)
882 {
883 	struct tulip_private *tp = netdev_priv(dev);
884 
885 	if (wolinfo->wolopts & (~tp->wolinfo.supported))
886 		   return -EOPNOTSUPP;
887 
888 	tp->wolinfo.wolopts = wolinfo->wolopts;
889 	device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
890 	return 0;
891 }
892 
tulip_ethtool_get_wol(struct net_device * dev,struct ethtool_wolinfo * wolinfo)893 static void tulip_ethtool_get_wol(struct net_device *dev,
894 				  struct ethtool_wolinfo *wolinfo)
895 {
896 	struct tulip_private *tp = netdev_priv(dev);
897 
898 	wolinfo->supported = tp->wolinfo.supported;
899 	wolinfo->wolopts = tp->wolinfo.wolopts;
900 	return;
901 }
902 
903 
904 static const struct ethtool_ops ops = {
905 	.get_drvinfo = tulip_get_drvinfo,
906 	.set_wol     = tulip_ethtool_set_wol,
907 	.get_wol     = tulip_ethtool_get_wol,
908 };
909 
910 /* Provide ioctl() calls to examine the MII xcvr state. */
private_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)911 static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
912 {
913 	struct tulip_private *tp = netdev_priv(dev);
914 	void __iomem *ioaddr = tp->base_addr;
915 	struct mii_ioctl_data *data = if_mii(rq);
916 	const unsigned int phy_idx = 0;
917 	int phy = tp->phys[phy_idx] & 0x1f;
918 	unsigned int regnum = data->reg_num;
919 
920 	switch (cmd) {
921 	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
922 		if (tp->mii_cnt)
923 			data->phy_id = phy;
924 		else if (tp->flags & HAS_NWAY)
925 			data->phy_id = 32;
926 		else if (tp->chip_id == COMET)
927 			data->phy_id = 1;
928 		else
929 			return -ENODEV;
930 
931 	case SIOCGMIIREG:		/* Read MII PHY register. */
932 		if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
933 			int csr12 = ioread32 (ioaddr + CSR12);
934 			int csr14 = ioread32 (ioaddr + CSR14);
935 			switch (regnum) {
936 			case 0:
937                                 if (((csr14<<5) & 0x1000) ||
938                                         (dev->if_port == 5 && tp->nwayset))
939                                         data->val_out = 0x1000;
940                                 else
941                                         data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
942                                                 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
943 				break;
944 			case 1:
945                                 data->val_out =
946 					0x1848 +
947 					((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
948 					((csr12&0x06) == 6 ? 0 : 4);
949                                 data->val_out |= 0x6048;
950 				break;
951 			case 4:
952                                 /* Advertised value, bogus 10baseTx-FD value from CSR6. */
953                                 data->val_out =
954 					((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
955 					((csr14 >> 1) & 0x20) + 1;
956                                 data->val_out |= ((csr14 >> 9) & 0x03C0);
957 				break;
958 			case 5: data->val_out = tp->lpar; break;
959 			default: data->val_out = 0; break;
960 			}
961 		} else {
962 			data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
963 		}
964 		return 0;
965 
966 	case SIOCSMIIREG:		/* Write MII PHY register. */
967 		if (regnum & ~0x1f)
968 			return -EINVAL;
969 		if (data->phy_id == phy) {
970 			u16 value = data->val_in;
971 			switch (regnum) {
972 			case 0:	/* Check for autonegotiation on or reset. */
973 				tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
974 				if (tp->full_duplex_lock)
975 					tp->full_duplex = (value & 0x0100) ? 1 : 0;
976 				break;
977 			case 4:
978 				tp->advertising[phy_idx] =
979 				tp->mii_advertise = data->val_in;
980 				break;
981 			}
982 		}
983 		if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
984 			u16 value = data->val_in;
985 			if (regnum == 0) {
986 			  if ((value & 0x1200) == 0x1200) {
987 			    if (tp->chip_id == PNIC2) {
988                                    pnic2_start_nway (dev);
989                             } else {
990 				   t21142_start_nway (dev);
991                             }
992 			  }
993 			} else if (regnum == 4)
994 				tp->sym_advertise = value;
995 		} else {
996 			tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
997 		}
998 		return 0;
999 	default:
1000 		return -EOPNOTSUPP;
1001 	}
1002 
1003 	return -EOPNOTSUPP;
1004 }
1005 
1006 
1007 /* Set or clear the multicast filter for this adaptor.
1008    Note that we only use exclusion around actually queueing the
1009    new frame, not around filling tp->setup_frame.  This is non-deterministic
1010    when re-entered but still correct. */
1011 
build_setup_frame_hash(u16 * setup_frm,struct net_device * dev)1012 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
1013 {
1014 	struct tulip_private *tp = netdev_priv(dev);
1015 	u16 hash_table[32];
1016 	struct netdev_hw_addr *ha;
1017 	int i;
1018 	u16 *eaddrs;
1019 
1020 	memset(hash_table, 0, sizeof(hash_table));
1021 	__set_bit_le(255, hash_table);			/* Broadcast entry */
1022 	/* This should work on big-endian machines as well. */
1023 	netdev_for_each_mc_addr(ha, dev) {
1024 		int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1025 
1026 		__set_bit_le(index, hash_table);
1027 	}
1028 	for (i = 0; i < 32; i++) {
1029 		*setup_frm++ = hash_table[i];
1030 		*setup_frm++ = hash_table[i];
1031 	}
1032 	setup_frm = &tp->setup_frame[13*6];
1033 
1034 	/* Fill the final entry with our physical address. */
1035 	eaddrs = (u16 *)dev->dev_addr;
1036 	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1037 	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1038 	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1039 }
1040 
build_setup_frame_perfect(u16 * setup_frm,struct net_device * dev)1041 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1042 {
1043 	struct tulip_private *tp = netdev_priv(dev);
1044 	struct netdev_hw_addr *ha;
1045 	u16 *eaddrs;
1046 
1047 	/* We have <= 14 addresses so we can use the wonderful
1048 	   16 address perfect filtering of the Tulip. */
1049 	netdev_for_each_mc_addr(ha, dev) {
1050 		eaddrs = (u16 *) ha->addr;
1051 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1052 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1053 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1054 	}
1055 	/* Fill the unused entries with the broadcast address. */
1056 	memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1057 	setup_frm = &tp->setup_frame[15*6];
1058 
1059 	/* Fill the final entry with our physical address. */
1060 	eaddrs = (u16 *)dev->dev_addr;
1061 	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1062 	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1063 	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1064 }
1065 
1066 
set_rx_mode(struct net_device * dev)1067 static void set_rx_mode(struct net_device *dev)
1068 {
1069 	struct tulip_private *tp = netdev_priv(dev);
1070 	void __iomem *ioaddr = tp->base_addr;
1071 	int csr6;
1072 
1073 	csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1074 
1075 	tp->csr6 &= ~0x00D5;
1076 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1077 		tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1078 		csr6 |= AcceptAllMulticast | AcceptAllPhys;
1079 	} else if ((netdev_mc_count(dev) > 1000) ||
1080 		   (dev->flags & IFF_ALLMULTI)) {
1081 		/* Too many to filter well -- accept all multicasts. */
1082 		tp->csr6 |= AcceptAllMulticast;
1083 		csr6 |= AcceptAllMulticast;
1084 	} else	if (tp->flags & MC_HASH_ONLY) {
1085 		/* Some work-alikes have only a 64-entry hash filter table. */
1086 		/* Should verify correctness on big-endian/__powerpc__ */
1087 		struct netdev_hw_addr *ha;
1088 		if (netdev_mc_count(dev) > 64) {
1089 			/* Arbitrary non-effective limit. */
1090 			tp->csr6 |= AcceptAllMulticast;
1091 			csr6 |= AcceptAllMulticast;
1092 		} else {
1093 			u32 mc_filter[2] = {0, 0};		 /* Multicast hash filter */
1094 			int filterbit;
1095 			netdev_for_each_mc_addr(ha, dev) {
1096 				if (tp->flags & COMET_MAC_ADDR)
1097 					filterbit = ether_crc_le(ETH_ALEN,
1098 								 ha->addr);
1099 				else
1100 					filterbit = ether_crc(ETH_ALEN,
1101 							      ha->addr) >> 26;
1102 				filterbit &= 0x3f;
1103 				mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1104 				if (tulip_debug > 2)
1105 					dev_info(&dev->dev,
1106 						 "Added filter for %pM  %08x bit %d\n",
1107 						 ha->addr,
1108 						 ether_crc(ETH_ALEN, ha->addr),
1109 						 filterbit);
1110 			}
1111 			if (mc_filter[0] == tp->mc_filter[0]  &&
1112 				mc_filter[1] == tp->mc_filter[1])
1113 				;				/* No change. */
1114 			else if (tp->flags & IS_ASIX) {
1115 				iowrite32(2, ioaddr + CSR13);
1116 				iowrite32(mc_filter[0], ioaddr + CSR14);
1117 				iowrite32(3, ioaddr + CSR13);
1118 				iowrite32(mc_filter[1], ioaddr + CSR14);
1119 			} else if (tp->flags & COMET_MAC_ADDR) {
1120 				iowrite32(mc_filter[0], ioaddr + CSR27);
1121 				iowrite32(mc_filter[1], ioaddr + CSR28);
1122 			}
1123 			tp->mc_filter[0] = mc_filter[0];
1124 			tp->mc_filter[1] = mc_filter[1];
1125 		}
1126 	} else {
1127 		unsigned long flags;
1128 		u32 tx_flags = 0x08000000 | 192;
1129 
1130 		/* Note that only the low-address shortword of setup_frame is valid!
1131 		   The values are doubled for big-endian architectures. */
1132 		if (netdev_mc_count(dev) > 14) {
1133 			/* Must use a multicast hash table. */
1134 			build_setup_frame_hash(tp->setup_frame, dev);
1135 			tx_flags = 0x08400000 | 192;
1136 		} else {
1137 			build_setup_frame_perfect(tp->setup_frame, dev);
1138 		}
1139 
1140 		spin_lock_irqsave(&tp->lock, flags);
1141 
1142 		if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1143 			/* Same setup recently queued, we need not add it. */
1144 		} else {
1145 			unsigned int entry;
1146 			int dummy = -1;
1147 
1148 			/* Now add this frame to the Tx list. */
1149 
1150 			entry = tp->cur_tx++ % TX_RING_SIZE;
1151 
1152 			if (entry != 0) {
1153 				/* Avoid a chip errata by prefixing a dummy entry. */
1154 				tp->tx_buffers[entry].skb = NULL;
1155 				tp->tx_buffers[entry].mapping = 0;
1156 				tp->tx_ring[entry].length =
1157 					(entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1158 				tp->tx_ring[entry].buffer1 = 0;
1159 				/* Must set DescOwned later to avoid race with chip */
1160 				dummy = entry;
1161 				entry = tp->cur_tx++ % TX_RING_SIZE;
1162 
1163 			}
1164 
1165 			tp->tx_buffers[entry].skb = NULL;
1166 			tp->tx_buffers[entry].mapping =
1167 				pci_map_single(tp->pdev, tp->setup_frame,
1168 					       sizeof(tp->setup_frame),
1169 					       PCI_DMA_TODEVICE);
1170 			/* Put the setup frame on the Tx list. */
1171 			if (entry == TX_RING_SIZE-1)
1172 				tx_flags |= DESC_RING_WRAP;		/* Wrap ring. */
1173 			tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1174 			tp->tx_ring[entry].buffer1 =
1175 				cpu_to_le32(tp->tx_buffers[entry].mapping);
1176 			tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1177 			if (dummy >= 0)
1178 				tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1179 			if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1180 				netif_stop_queue(dev);
1181 
1182 			/* Trigger an immediate transmit demand. */
1183 			iowrite32(0, ioaddr + CSR1);
1184 		}
1185 
1186 		spin_unlock_irqrestore(&tp->lock, flags);
1187 	}
1188 
1189 	iowrite32(csr6, ioaddr + CSR6);
1190 }
1191 
1192 #ifdef CONFIG_TULIP_MWI
tulip_mwi_config(struct pci_dev * pdev,struct net_device * dev)1193 static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
1194 {
1195 	struct tulip_private *tp = netdev_priv(dev);
1196 	u8 cache;
1197 	u16 pci_command;
1198 	u32 csr0;
1199 
1200 	if (tulip_debug > 3)
1201 		netdev_dbg(dev, "tulip_mwi_config()\n");
1202 
1203 	tp->csr0 = csr0 = 0;
1204 
1205 	/* if we have any cache line size at all, we can do MRM and MWI */
1206 	csr0 |= MRM | MWI;
1207 
1208 	/* Enable MWI in the standard PCI command bit.
1209 	 * Check for the case where MWI is desired but not available
1210 	 */
1211 	pci_try_set_mwi(pdev);
1212 
1213 	/* read result from hardware (in case bit refused to enable) */
1214 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1215 	if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1216 		csr0 &= ~MWI;
1217 
1218 	/* if cache line size hardwired to zero, no MWI */
1219 	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1220 	if ((csr0 & MWI) && (cache == 0)) {
1221 		csr0 &= ~MWI;
1222 		pci_clear_mwi(pdev);
1223 	}
1224 
1225 	/* assign per-cacheline-size cache alignment and
1226 	 * burst length values
1227 	 */
1228 	switch (cache) {
1229 	case 8:
1230 		csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1231 		break;
1232 	case 16:
1233 		csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1234 		break;
1235 	case 32:
1236 		csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1237 		break;
1238 	default:
1239 		cache = 0;
1240 		break;
1241 	}
1242 
1243 	/* if we have a good cache line size, we by now have a good
1244 	 * csr0, so save it and exit
1245 	 */
1246 	if (cache)
1247 		goto out;
1248 
1249 	/* we don't have a good csr0 or cache line size, disable MWI */
1250 	if (csr0 & MWI) {
1251 		pci_clear_mwi(pdev);
1252 		csr0 &= ~MWI;
1253 	}
1254 
1255 	/* sane defaults for burst length and cache alignment
1256 	 * originally from de4x5 driver
1257 	 */
1258 	csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1259 
1260 out:
1261 	tp->csr0 = csr0;
1262 	if (tulip_debug > 2)
1263 		netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
1264 			   cache, csr0);
1265 }
1266 #endif
1267 
1268 /*
1269  *	Chips that have the MRM/reserved bit quirk and the burst quirk. That
1270  *	is the DM910X and the on chip ULi devices
1271  */
1272 
tulip_uli_dm_quirk(struct pci_dev * pdev)1273 static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1274 {
1275 	if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1276 		return 1;
1277 	return 0;
1278 }
1279 
1280 static const struct net_device_ops tulip_netdev_ops = {
1281 	.ndo_open		= tulip_open,
1282 	.ndo_start_xmit		= tulip_start_xmit,
1283 	.ndo_tx_timeout		= tulip_tx_timeout,
1284 	.ndo_stop		= tulip_close,
1285 	.ndo_get_stats		= tulip_get_stats,
1286 	.ndo_do_ioctl 		= private_ioctl,
1287 	.ndo_set_rx_mode	= set_rx_mode,
1288 	.ndo_change_mtu		= eth_change_mtu,
1289 	.ndo_set_mac_address	= eth_mac_addr,
1290 	.ndo_validate_addr	= eth_validate_addr,
1291 #ifdef CONFIG_NET_POLL_CONTROLLER
1292 	.ndo_poll_controller	 = poll_tulip,
1293 #endif
1294 };
1295 
1296 const struct pci_device_id early_486_chipsets[] = {
1297 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1298 	{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1299 	{ },
1300 };
1301 
tulip_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1302 static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1303 {
1304 	struct tulip_private *tp;
1305 	/* See note below on the multiport cards. */
1306 	static unsigned char last_phys_addr[ETH_ALEN] = {
1307 		0x00, 'L', 'i', 'n', 'u', 'x'
1308 	};
1309 	static int last_irq;
1310 	static int multiport_cnt;	/* For four-port boards w/one EEPROM */
1311 	int i, irq;
1312 	unsigned short sum;
1313 	unsigned char *ee_data;
1314 	struct net_device *dev;
1315 	void __iomem *ioaddr;
1316 	static int board_idx = -1;
1317 	int chip_idx = ent->driver_data;
1318 	const char *chip_name = tulip_tbl[chip_idx].chip_name;
1319 	unsigned int eeprom_missing = 0;
1320 	unsigned int force_csr0 = 0;
1321 
1322 #ifndef MODULE
1323 	if (tulip_debug > 0)
1324 		printk_once(KERN_INFO "%s", version);
1325 #endif
1326 
1327 	board_idx++;
1328 
1329 	/*
1330 	 *	Lan media wire a tulip chip to a wan interface. Needs a very
1331 	 *	different driver (lmc driver)
1332 	 */
1333 
1334         if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1335 		pr_err("skipping LMC card\n");
1336 		return -ENODEV;
1337 	} else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
1338 		   (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
1339 		    pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
1340 		    pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
1341 		pr_err("skipping SBE T3E3 port\n");
1342 		return -ENODEV;
1343 	}
1344 
1345 	/*
1346 	 *	DM910x chips should be handled by the dmfe driver, except
1347 	 *	on-board chips on SPARC systems.  Also, early DM9100s need
1348 	 *	software CRC which only the dmfe driver supports.
1349 	 */
1350 
1351 #ifdef CONFIG_TULIP_DM910X
1352 	if (chip_idx == DM910X) {
1353 		struct device_node *dp;
1354 
1355 		if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1356 		    pdev->revision < 0x30) {
1357 			pr_info("skipping early DM9100 with Crc bug (use dmfe)\n");
1358 			return -ENODEV;
1359 		}
1360 
1361 		dp = pci_device_to_OF_node(pdev);
1362 		if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1363 			pr_info("skipping DM910x expansion card (use dmfe)\n");
1364 			return -ENODEV;
1365 		}
1366 	}
1367 #endif
1368 
1369 	/*
1370 	 *	Looks for early PCI chipsets where people report hangs
1371 	 *	without the workarounds being on.
1372 	 */
1373 
1374 	/* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
1375 	      aligned.  Aries might need this too. The Saturn errata are not
1376 	      pretty reading but thankfully it's an old 486 chipset.
1377 
1378 	   2. The dreaded SiS496 486 chipset. Same workaround as Intel
1379 	      Saturn.
1380 	*/
1381 
1382 	if (pci_dev_present(early_486_chipsets)) {
1383 		csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1384 		force_csr0 = 1;
1385 	}
1386 
1387 	/* bugfix: the ASIX must have a burst limit or horrible things happen. */
1388 	if (chip_idx == AX88140) {
1389 		if ((csr0 & 0x3f00) == 0)
1390 			csr0 |= 0x2000;
1391 	}
1392 
1393 	/* PNIC doesn't have MWI/MRL/MRM... */
1394 	if (chip_idx == LC82C168)
1395 		csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
1396 
1397 	/* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
1398 	if (tulip_uli_dm_quirk(pdev)) {
1399 		csr0 &= ~0x01f100ff;
1400 #if defined(CONFIG_SPARC)
1401                 csr0 = (csr0 & ~0xff00) | 0xe000;
1402 #endif
1403 	}
1404 	/*
1405 	 *	And back to business
1406 	 */
1407 
1408 	i = pci_enable_device(pdev);
1409 	if (i) {
1410 		pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
1411 		return i;
1412 	}
1413 
1414 	irq = pdev->irq;
1415 
1416 	/* alloc_etherdev ensures aligned and zeroed private structures */
1417 	dev = alloc_etherdev (sizeof (*tp));
1418 	if (!dev)
1419 		return -ENOMEM;
1420 
1421 	SET_NETDEV_DEV(dev, &pdev->dev);
1422 	if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1423 		pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1424 		       pci_name(pdev),
1425 		       (unsigned long long)pci_resource_len (pdev, 0),
1426 		       (unsigned long long)pci_resource_start (pdev, 0));
1427 		goto err_out_free_netdev;
1428 	}
1429 
1430 	/* grab all resources from both PIO and MMIO regions, as we
1431 	 * don't want anyone else messing around with our hardware */
1432 	if (pci_request_regions (pdev, DRV_NAME))
1433 		goto err_out_free_netdev;
1434 
1435 	ioaddr =  pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
1436 
1437 	if (!ioaddr)
1438 		goto err_out_free_res;
1439 
1440 	/*
1441 	 * initialize private data structure 'tp'
1442 	 * it is zeroed and aligned in alloc_etherdev
1443 	 */
1444 	tp = netdev_priv(dev);
1445 	tp->dev = dev;
1446 
1447 	tp->rx_ring = pci_alloc_consistent(pdev,
1448 					   sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1449 					   sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1450 					   &tp->rx_ring_dma);
1451 	if (!tp->rx_ring)
1452 		goto err_out_mtable;
1453 	tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1454 	tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1455 
1456 	tp->chip_id = chip_idx;
1457 	tp->flags = tulip_tbl[chip_idx].flags;
1458 
1459 	tp->wolinfo.supported = 0;
1460 	tp->wolinfo.wolopts = 0;
1461 	/* COMET: Enable power management only for AN983B */
1462 	if (chip_idx == COMET ) {
1463 		u32 sig;
1464 		pci_read_config_dword (pdev, 0x80, &sig);
1465 		if (sig == 0x09811317) {
1466 			tp->flags |= COMET_PM;
1467 			tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
1468 			pr_info("%s: Enabled WOL support for AN983B\n",
1469 				__func__);
1470 		}
1471 	}
1472 	tp->pdev = pdev;
1473 	tp->base_addr = ioaddr;
1474 	tp->revision = pdev->revision;
1475 	tp->csr0 = csr0;
1476 	spin_lock_init(&tp->lock);
1477 	spin_lock_init(&tp->mii_lock);
1478 	init_timer(&tp->timer);
1479 	tp->timer.data = (unsigned long)dev;
1480 	tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
1481 
1482 	INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1483 
1484 #ifdef CONFIG_TULIP_MWI
1485 	if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1486 		tulip_mwi_config (pdev, dev);
1487 #endif
1488 
1489 	/* Stop the chip's Tx and Rx processes. */
1490 	tulip_stop_rxtx(tp);
1491 
1492 	pci_set_master(pdev);
1493 
1494 #ifdef CONFIG_GSC
1495 	if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1496 		switch (pdev->subsystem_device) {
1497 		default:
1498 			break;
1499 		case 0x1061:
1500 		case 0x1062:
1501 		case 0x1063:
1502 		case 0x1098:
1503 		case 0x1099:
1504 		case 0x10EE:
1505 			tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1506 			chip_name = "GSC DS21140 Tulip";
1507 		}
1508 	}
1509 #endif
1510 
1511 	/* Clear the missed-packet counter. */
1512 	ioread32(ioaddr + CSR8);
1513 
1514 	/* The station address ROM is read byte serially.  The register must
1515 	   be polled, waiting for the value to be read bit serially from the
1516 	   EEPROM.
1517 	   */
1518 	ee_data = tp->eeprom;
1519 	memset(ee_data, 0, sizeof(tp->eeprom));
1520 	sum = 0;
1521 	if (chip_idx == LC82C168) {
1522 		for (i = 0; i < 3; i++) {
1523 			int value, boguscnt = 100000;
1524 			iowrite32(0x600 | i, ioaddr + 0x98);
1525 			do {
1526 				value = ioread32(ioaddr + CSR9);
1527 			} while (value < 0  && --boguscnt > 0);
1528 			put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
1529 			sum += value & 0xffff;
1530 		}
1531 	} else if (chip_idx == COMET) {
1532 		/* No need to read the EEPROM. */
1533 		put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
1534 		put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
1535 		for (i = 0; i < 6; i ++)
1536 			sum += dev->dev_addr[i];
1537 	} else {
1538 		/* A serial EEPROM interface, we read now and sort it out later. */
1539 		int sa_offset = 0;
1540 		int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1541 		int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
1542 
1543 		if (ee_max_addr > sizeof(tp->eeprom))
1544 			ee_max_addr = sizeof(tp->eeprom);
1545 
1546 		for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
1547 			u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1548 			ee_data[i] = data & 0xff;
1549 			ee_data[i + 1] = data >> 8;
1550 		}
1551 
1552 		/* DEC now has a specification (see Notes) but early board makers
1553 		   just put the address in the first EEPROM locations. */
1554 		/* This does  memcmp(ee_data, ee_data+16, 8) */
1555 		for (i = 0; i < 8; i ++)
1556 			if (ee_data[i] != ee_data[16+i])
1557 				sa_offset = 20;
1558 		if (chip_idx == CONEXANT) {
1559 			/* Check that the tuple type and length is correct. */
1560 			if (ee_data[0x198] == 0x04  &&  ee_data[0x199] == 6)
1561 				sa_offset = 0x19A;
1562 		} else if (ee_data[0] == 0xff  &&  ee_data[1] == 0xff &&
1563 				   ee_data[2] == 0) {
1564 			sa_offset = 2;		/* Grrr, damn Matrox boards. */
1565 			multiport_cnt = 4;
1566 		}
1567 #ifdef CONFIG_MIPS_COBALT
1568                if ((pdev->bus->number == 0) &&
1569                    ((PCI_SLOT(pdev->devfn) == 7) ||
1570                     (PCI_SLOT(pdev->devfn) == 12))) {
1571                        /* Cobalt MAC address in first EEPROM locations. */
1572                        sa_offset = 0;
1573 		       /* Ensure our media table fixup get's applied */
1574 		       memcpy(ee_data + 16, ee_data, 8);
1575                }
1576 #endif
1577 #ifdef CONFIG_GSC
1578 		/* Check to see if we have a broken srom */
1579 		if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1580 			/* pci_vendor_id and subsystem_id are swapped */
1581 			ee_data[0] = ee_data[2];
1582 			ee_data[1] = ee_data[3];
1583 			ee_data[2] = 0x61;
1584 			ee_data[3] = 0x10;
1585 
1586 			/* HSC-PCI boards need to be byte-swaped and shifted
1587 			 * up 1 word.  This shift needs to happen at the end
1588 			 * of the MAC first because of the 2 byte overlap.
1589 			 */
1590 			for (i = 4; i >= 0; i -= 2) {
1591 				ee_data[17 + i + 3] = ee_data[17 + i];
1592 				ee_data[16 + i + 5] = ee_data[16 + i];
1593 			}
1594 		}
1595 #endif
1596 
1597 		for (i = 0; i < 6; i ++) {
1598 			dev->dev_addr[i] = ee_data[i + sa_offset];
1599 			sum += ee_data[i + sa_offset];
1600 		}
1601 	}
1602 	/* Lite-On boards have the address byte-swapped. */
1603 	if ((dev->dev_addr[0] == 0xA0 ||
1604 	     dev->dev_addr[0] == 0xC0 ||
1605 	     dev->dev_addr[0] == 0x02) &&
1606 	    dev->dev_addr[1] == 0x00)
1607 		for (i = 0; i < 6; i+=2) {
1608 			char tmp = dev->dev_addr[i];
1609 			dev->dev_addr[i] = dev->dev_addr[i+1];
1610 			dev->dev_addr[i+1] = tmp;
1611 		}
1612 	/* On the Zynx 315 Etherarray and other multiport boards only the
1613 	   first Tulip has an EEPROM.
1614 	   On Sparc systems the mac address is held in the OBP property
1615 	   "local-mac-address".
1616 	   The addresses of the subsequent ports are derived from the first.
1617 	   Many PCI BIOSes also incorrectly report the IRQ line, so we correct
1618 	   that here as well. */
1619 	if (sum == 0  || sum == 6*0xff) {
1620 #if defined(CONFIG_SPARC)
1621 		struct device_node *dp = pci_device_to_OF_node(pdev);
1622 		const unsigned char *addr;
1623 		int len;
1624 #endif
1625 		eeprom_missing = 1;
1626 		for (i = 0; i < 5; i++)
1627 			dev->dev_addr[i] = last_phys_addr[i];
1628 		dev->dev_addr[i] = last_phys_addr[i] + 1;
1629 #if defined(CONFIG_SPARC)
1630 		addr = of_get_property(dp, "local-mac-address", &len);
1631 		if (addr && len == ETH_ALEN)
1632 			memcpy(dev->dev_addr, addr, ETH_ALEN);
1633 #endif
1634 #if defined(__i386__) || defined(__x86_64__)	/* Patch up x86 BIOS bug. */
1635 		if (last_irq)
1636 			irq = last_irq;
1637 #endif
1638 	}
1639 
1640 	for (i = 0; i < 6; i++)
1641 		last_phys_addr[i] = dev->dev_addr[i];
1642 	last_irq = irq;
1643 
1644 	/* The lower four bits are the media type. */
1645 	if (board_idx >= 0  &&  board_idx < MAX_UNITS) {
1646 		if (options[board_idx] & MEDIA_MASK)
1647 			tp->default_port = options[board_idx] & MEDIA_MASK;
1648 		if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1649 			tp->full_duplex = 1;
1650 		if (mtu[board_idx] > 0)
1651 			dev->mtu = mtu[board_idx];
1652 	}
1653 	if (dev->mem_start & MEDIA_MASK)
1654 		tp->default_port = dev->mem_start & MEDIA_MASK;
1655 	if (tp->default_port) {
1656 		pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1657 			board_idx, medianame[tp->default_port & MEDIA_MASK]);
1658 		tp->medialock = 1;
1659 		if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1660 			tp->full_duplex = 1;
1661 	}
1662 	if (tp->full_duplex)
1663 		tp->full_duplex_lock = 1;
1664 
1665 	if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1666 		static const u16 media2advert[] = {
1667 			0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
1668 		};
1669 		tp->mii_advertise = media2advert[tp->default_port - 9];
1670 		tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
1671 	}
1672 
1673 	if (tp->flags & HAS_MEDIA_TABLE) {
1674 		sprintf(dev->name, DRV_NAME "%d", board_idx);	/* hack */
1675 		tulip_parse_eeprom(dev);
1676 		strcpy(dev->name, "eth%d");			/* un-hack */
1677 	}
1678 
1679 	if ((tp->flags & ALWAYS_CHECK_MII) ||
1680 		(tp->mtable  &&  tp->mtable->has_mii) ||
1681 		( ! tp->mtable  &&  (tp->flags & HAS_MII))) {
1682 		if (tp->mtable  &&  tp->mtable->has_mii) {
1683 			for (i = 0; i < tp->mtable->leafcount; i++)
1684 				if (tp->mtable->mleaf[i].media == 11) {
1685 					tp->cur_index = i;
1686 					tp->saved_if_port = dev->if_port;
1687 					tulip_select_media(dev, 2);
1688 					dev->if_port = tp->saved_if_port;
1689 					break;
1690 				}
1691 		}
1692 
1693 		/* Find the connected MII xcvrs.
1694 		   Doing this in open() would allow detecting external xcvrs
1695 		   later, but takes much time. */
1696 		tulip_find_mii (dev, board_idx);
1697 	}
1698 
1699 	/* The Tulip-specific entries in the device structure. */
1700 	dev->netdev_ops = &tulip_netdev_ops;
1701 	dev->watchdog_timeo = TX_TIMEOUT;
1702 #ifdef CONFIG_TULIP_NAPI
1703 	netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1704 #endif
1705 	dev->ethtool_ops = &ops;
1706 
1707 	if (register_netdev(dev))
1708 		goto err_out_free_ring;
1709 
1710 	pci_set_drvdata(pdev, dev);
1711 
1712 	dev_info(&dev->dev,
1713 #ifdef CONFIG_TULIP_MMIO
1714 		 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1715 #else
1716 		 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1717 #endif
1718 		 chip_name, pdev->revision,
1719 		 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1720 		 eeprom_missing ? " EEPROM not present," : "",
1721 		 dev->dev_addr, irq);
1722 
1723         if (tp->chip_id == PNIC2)
1724 		tp->link_change = pnic2_lnk_change;
1725 	else if (tp->flags & HAS_NWAY)
1726 		tp->link_change = t21142_lnk_change;
1727 	else if (tp->flags & HAS_PNICNWAY)
1728 		tp->link_change = pnic_lnk_change;
1729 
1730 	/* Reset the xcvr interface and turn on heartbeat. */
1731 	switch (chip_idx) {
1732 	case DC21140:
1733 	case DM910X:
1734 	default:
1735 		if (tp->mtable)
1736 			iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1737 		break;
1738 	case DC21142:
1739 		if (tp->mii_cnt  ||  tulip_media_cap[dev->if_port] & MediaIsMII) {
1740 			iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1741 			iowrite32(0x0000, ioaddr + CSR13);
1742 			iowrite32(0x0000, ioaddr + CSR14);
1743 			iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1744 		} else
1745 			t21142_start_nway(dev);
1746 		break;
1747 	case PNIC2:
1748 	        /* just do a reset for sanity sake */
1749 		iowrite32(0x0000, ioaddr + CSR13);
1750 		iowrite32(0x0000, ioaddr + CSR14);
1751 		break;
1752 	case LC82C168:
1753 		if ( ! tp->mii_cnt) {
1754 			tp->nway = 1;
1755 			tp->nwayset = 0;
1756 			iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1757 			iowrite32(0x30, ioaddr + CSR12);
1758 			iowrite32(0x0001F078, ioaddr + CSR6);
1759 			iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
1760 		}
1761 		break;
1762 	case MX98713:
1763 	case COMPEX9881:
1764 		iowrite32(0x00000000, ioaddr + CSR6);
1765 		iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
1766 		iowrite32(0x00000001, ioaddr + CSR13);
1767 		break;
1768 	case MX98715:
1769 	case MX98725:
1770 		iowrite32(0x01a80000, ioaddr + CSR6);
1771 		iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1772 		iowrite32(0x00001000, ioaddr + CSR12);
1773 		break;
1774 	case COMET:
1775 		/* No initialization necessary. */
1776 		break;
1777 	}
1778 
1779 	/* put the chip in snooze mode until opened */
1780 	tulip_set_power_state (tp, 0, 1);
1781 
1782 	return 0;
1783 
1784 err_out_free_ring:
1785 	pci_free_consistent (pdev,
1786 			     sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1787 			     sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1788 			     tp->rx_ring, tp->rx_ring_dma);
1789 
1790 err_out_mtable:
1791 	kfree (tp->mtable);
1792 	pci_iounmap(pdev, ioaddr);
1793 
1794 err_out_free_res:
1795 	pci_release_regions (pdev);
1796 
1797 err_out_free_netdev:
1798 	free_netdev (dev);
1799 	return -ENODEV;
1800 }
1801 
1802 
1803 /* set the registers according to the given wolopts */
tulip_set_wolopts(struct pci_dev * pdev,u32 wolopts)1804 static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
1805 {
1806 	struct net_device *dev = pci_get_drvdata(pdev);
1807 	struct tulip_private *tp = netdev_priv(dev);
1808 	void __iomem *ioaddr = tp->base_addr;
1809 
1810 	if (tp->flags & COMET_PM) {
1811 
1812 		unsigned int tmp;
1813 
1814 		tmp = ioread32(ioaddr + CSR18);
1815 		tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
1816 		tmp |= comet_csr18_pm_mode;
1817 		iowrite32(tmp, ioaddr + CSR18);
1818 
1819 		/* Set the Wake-up Control/Status Register to the given WOL options*/
1820 		tmp = ioread32(ioaddr + CSR13);
1821 		tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
1822 		if (wolopts & WAKE_MAGIC)
1823 			tmp |= comet_csr13_mpre;
1824 		if (wolopts & WAKE_PHY)
1825 			tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
1826 		/* Clear the event flags */
1827 		tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
1828 		iowrite32(tmp, ioaddr + CSR13);
1829 	}
1830 }
1831 
1832 #ifdef CONFIG_PM
1833 
1834 
tulip_suspend(struct pci_dev * pdev,pm_message_t state)1835 static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1836 {
1837 	pci_power_t pstate;
1838 	struct net_device *dev = pci_get_drvdata(pdev);
1839 	struct tulip_private *tp = netdev_priv(dev);
1840 
1841 	if (!dev)
1842 		return -EINVAL;
1843 
1844 	if (!netif_running(dev))
1845 		goto save_state;
1846 
1847 	tulip_down(dev);
1848 
1849 	netif_device_detach(dev);
1850 	/* FIXME: it needlessly adds an error path. */
1851 	free_irq(tp->pdev->irq, dev);
1852 
1853 save_state:
1854 	pci_save_state(pdev);
1855 	pci_disable_device(pdev);
1856 	pstate = pci_choose_state(pdev, state);
1857 	if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) {
1858 		int rc;
1859 
1860 		tulip_set_wolopts(pdev, tp->wolinfo.wolopts);
1861 		rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts);
1862 		if (rc)
1863 			pr_err("pci_enable_wake failed (%d)\n", rc);
1864 	}
1865 	pci_set_power_state(pdev, pstate);
1866 
1867 	return 0;
1868 }
1869 
1870 
tulip_resume(struct pci_dev * pdev)1871 static int tulip_resume(struct pci_dev *pdev)
1872 {
1873 	struct net_device *dev = pci_get_drvdata(pdev);
1874 	struct tulip_private *tp = netdev_priv(dev);
1875 	void __iomem *ioaddr = tp->base_addr;
1876 	int retval;
1877 	unsigned int tmp;
1878 
1879 	if (!dev)
1880 		return -EINVAL;
1881 
1882 	pci_set_power_state(pdev, PCI_D0);
1883 	pci_restore_state(pdev);
1884 
1885 	if (!netif_running(dev))
1886 		return 0;
1887 
1888 	if ((retval = pci_enable_device(pdev))) {
1889 		pr_err("pci_enable_device failed in resume\n");
1890 		return retval;
1891 	}
1892 
1893 	retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
1894 			     dev->name, dev);
1895 	if (retval) {
1896 		pr_err("request_irq failed in resume\n");
1897 		return retval;
1898 	}
1899 
1900 	if (tp->flags & COMET_PM) {
1901 		pci_enable_wake(pdev, PCI_D3hot, 0);
1902 		pci_enable_wake(pdev, PCI_D3cold, 0);
1903 
1904 		/* Clear the PMES flag */
1905 		tmp = ioread32(ioaddr + CSR20);
1906 		tmp |= comet_csr20_pmes;
1907 		iowrite32(tmp, ioaddr + CSR20);
1908 
1909 		/* Disable all wake-up events */
1910 		tulip_set_wolopts(pdev, 0);
1911 	}
1912 	netif_device_attach(dev);
1913 
1914 	if (netif_running(dev))
1915 		tulip_up(dev);
1916 
1917 	return 0;
1918 }
1919 
1920 #endif /* CONFIG_PM */
1921 
1922 
tulip_remove_one(struct pci_dev * pdev)1923 static void tulip_remove_one(struct pci_dev *pdev)
1924 {
1925 	struct net_device *dev = pci_get_drvdata (pdev);
1926 	struct tulip_private *tp;
1927 
1928 	if (!dev)
1929 		return;
1930 
1931 	tp = netdev_priv(dev);
1932 	unregister_netdev(dev);
1933 	pci_free_consistent (pdev,
1934 			     sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1935 			     sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1936 			     tp->rx_ring, tp->rx_ring_dma);
1937 	kfree (tp->mtable);
1938 	pci_iounmap(pdev, tp->base_addr);
1939 	free_netdev (dev);
1940 	pci_release_regions (pdev);
1941 	pci_disable_device(pdev);
1942 
1943 	/* pci_power_off (pdev, -1); */
1944 }
1945 
1946 #ifdef CONFIG_NET_POLL_CONTROLLER
1947 /*
1948  * Polling 'interrupt' - used by things like netconsole to send skbs
1949  * without having to re-enable interrupts. It's not called while
1950  * the interrupt routine is executing.
1951  */
1952 
poll_tulip(struct net_device * dev)1953 static void poll_tulip (struct net_device *dev)
1954 {
1955 	struct tulip_private *tp = netdev_priv(dev);
1956 	const int irq = tp->pdev->irq;
1957 
1958 	/* disable_irq here is not very nice, but with the lockless
1959 	   interrupt handler we have no other choice. */
1960 	disable_irq(irq);
1961 	tulip_interrupt (irq, dev);
1962 	enable_irq(irq);
1963 }
1964 #endif
1965 
1966 static struct pci_driver tulip_driver = {
1967 	.name		= DRV_NAME,
1968 	.id_table	= tulip_pci_tbl,
1969 	.probe		= tulip_init_one,
1970 	.remove		= tulip_remove_one,
1971 #ifdef CONFIG_PM
1972 	.suspend	= tulip_suspend,
1973 	.resume		= tulip_resume,
1974 #endif /* CONFIG_PM */
1975 };
1976 
1977 
tulip_init(void)1978 static int __init tulip_init (void)
1979 {
1980 #ifdef MODULE
1981 	pr_info("%s", version);
1982 #endif
1983 
1984 	if (!csr0) {
1985 		pr_warn("tulip: unknown CPU architecture, using default csr0\n");
1986 		/* default to 8 longword cache line alignment */
1987 		csr0 = 0x00A00000 | 0x4800;
1988 	}
1989 
1990 	/* copy module parms into globals */
1991 	tulip_rx_copybreak = rx_copybreak;
1992 	tulip_max_interrupt_work = max_interrupt_work;
1993 
1994 	/* probe for and init boards */
1995 	return pci_register_driver(&tulip_driver);
1996 }
1997 
1998 
tulip_cleanup(void)1999 static void __exit tulip_cleanup (void)
2000 {
2001 	pci_unregister_driver (&tulip_driver);
2002 }
2003 
2004 
2005 module_init(tulip_init);
2006 module_exit(tulip_cleanup);
2007