1 /* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux.
2
3 Copyright 2000,2001 The Linux Kernel Team
4 Written/copyright 1994-2001 by Donald Becker.
5
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
8
9 Please submit bugs to http://bugzilla.kernel.org/ .
10 */
11
12 #define pr_fmt(fmt) "tulip: " fmt
13
14 #define DRV_NAME "tulip"
15 #ifdef CONFIG_TULIP_NAPI
16 #define DRV_VERSION "1.1.15-NAPI" /* Keep at least for test */
17 #else
18 #define DRV_VERSION "1.1.15"
19 #endif
20 #define DRV_RELDATE "Feb 27, 2007"
21
22
23 #include <linux/module.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 #include "tulip.h"
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
29 #include <linux/etherdevice.h>
30 #include <linux/delay.h>
31 #include <linux/mii.h>
32 #include <linux/crc32.h>
33 #include <asm/unaligned.h>
34 #include <asm/uaccess.h>
35
36 #ifdef CONFIG_SPARC
37 #include <asm/prom.h>
38 #endif
39
40 static char version[] =
41 "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
42
43 /* A few user-configurable values. */
44
45 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
46 static unsigned int max_interrupt_work = 25;
47
48 #define MAX_UNITS 8
49 /* Used to pass the full-duplex flag, etc. */
50 static int full_duplex[MAX_UNITS];
51 static int options[MAX_UNITS];
52 static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
53
54 /* The possible media types that can be set in options[] are: */
55 const char * const medianame[32] = {
56 "10baseT", "10base2", "AUI", "100baseTx",
57 "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
58 "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
59 "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
60 "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
61 "","","","", "","","","", "","","","Transceiver reset",
62 };
63
64 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
65 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
66 defined(CONFIG_SPARC) || defined(__ia64__) || \
67 defined(__sh__) || defined(__mips__)
68 static int rx_copybreak = 1518;
69 #else
70 static int rx_copybreak = 100;
71 #endif
72
73 /*
74 Set the bus performance register.
75 Typical: Set 16 longword cache alignment, no burst limit.
76 Cache alignment bits 15:14 Burst length 13:8
77 0000 No alignment 0x00000000 unlimited 0800 8 longwords
78 4000 8 longwords 0100 1 longword 1000 16 longwords
79 8000 16 longwords 0200 2 longwords 2000 32 longwords
80 C000 32 longwords 0400 4 longwords
81 Warning: many older 486 systems are broken and require setting 0x00A04800
82 8 longword cache alignment, 8 longword burst.
83 ToDo: Non-Intel setting could be better.
84 */
85
86 #if defined(__alpha__) || defined(__ia64__)
87 static int csr0 = 0x01A00000 | 0xE000;
88 #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
89 static int csr0 = 0x01A00000 | 0x8000;
90 #elif defined(CONFIG_SPARC) || defined(__hppa__)
91 /* The UltraSparc PCI controllers will disconnect at every 64-byte
92 * crossing anyways so it makes no sense to tell Tulip to burst
93 * any more than that.
94 */
95 static int csr0 = 0x01A00000 | 0x9000;
96 #elif defined(__arm__) || defined(__sh__)
97 static int csr0 = 0x01A00000 | 0x4800;
98 #elif defined(__mips__)
99 static int csr0 = 0x00200000 | 0x4000;
100 #else
101 #warning Processor architecture undefined!
102 static int csr0 = 0x00A00000 | 0x4800;
103 #endif
104
105 /* Operational parameters that usually are not changed. */
106 /* Time in jiffies before concluding the transmitter is hung. */
107 #define TX_TIMEOUT (4*HZ)
108
109
110 MODULE_AUTHOR("The Linux Kernel Team");
111 MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
112 MODULE_LICENSE("GPL");
113 MODULE_VERSION(DRV_VERSION);
114 module_param(tulip_debug, int, 0);
115 module_param(max_interrupt_work, int, 0);
116 module_param(rx_copybreak, int, 0);
117 module_param(csr0, int, 0);
118 module_param_array(options, int, NULL, 0);
119 module_param_array(full_duplex, int, NULL, 0);
120
121 #ifdef TULIP_DEBUG
122 int tulip_debug = TULIP_DEBUG;
123 #else
124 int tulip_debug = 1;
125 #endif
126
tulip_timer(unsigned long data)127 static void tulip_timer(unsigned long data)
128 {
129 struct net_device *dev = (struct net_device *)data;
130 struct tulip_private *tp = netdev_priv(dev);
131
132 if (netif_running(dev))
133 schedule_work(&tp->media_work);
134 }
135
136 /*
137 * This table use during operation for capabilities and media timer.
138 *
139 * It is indexed via the values in 'enum chips'
140 */
141
142 struct tulip_chip_table tulip_tbl[] = {
143 { }, /* placeholder for array, slot unused currently */
144 { }, /* placeholder for array, slot unused currently */
145
146 /* DC21140 */
147 { "Digital DS21140 Tulip", 128, 0x0001ebef,
148 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
149 tulip_media_task },
150
151 /* DC21142, DC21143 */
152 { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
153 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
154 | HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
155
156 /* LC82C168 */
157 { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
158 HAS_MII | HAS_PNICNWAY, pnic_timer, },
159
160 /* MX98713 */
161 { "Macronix 98713 PMAC", 128, 0x0001ebef,
162 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
163
164 /* MX98715 */
165 { "Macronix 98715 PMAC", 256, 0x0001ebef,
166 HAS_MEDIA_TABLE, mxic_timer, },
167
168 /* MX98725 */
169 { "Macronix 98725 PMAC", 256, 0x0001ebef,
170 HAS_MEDIA_TABLE, mxic_timer, },
171
172 /* AX88140 */
173 { "ASIX AX88140", 128, 0x0001fbff,
174 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
175 | IS_ASIX, tulip_timer, tulip_media_task },
176
177 /* PNIC2 */
178 { "Lite-On PNIC-II", 256, 0x0801fbff,
179 HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
180
181 /* COMET */
182 { "ADMtek Comet", 256, 0x0001abef,
183 HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
184
185 /* COMPEX9881 */
186 { "Compex 9881 PMAC", 128, 0x0001ebef,
187 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
188
189 /* I21145 */
190 { "Intel DS21145 Tulip", 128, 0x0801fbff,
191 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
192 | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
193
194 /* DM910X */
195 #ifdef CONFIG_TULIP_DM910X
196 { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
197 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
198 tulip_timer, tulip_media_task },
199 #else
200 { NULL },
201 #endif
202
203 /* RS7112 */
204 { "Conexant LANfinity", 256, 0x0001ebef,
205 HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
206
207 };
208
209
210 static DEFINE_PCI_DEVICE_TABLE(tulip_pci_tbl) = {
211 { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
212 { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
213 { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
214 { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
215 { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
216 /* { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
217 { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
218 { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
219 { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
220 { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
221 { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
222 { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
223 { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
224 { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
225 { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
226 { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
227 { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
228 { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
229 { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
230 { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
231 #ifdef CONFIG_TULIP_DM910X
232 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
233 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
234 #endif
235 { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
236 { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
237 { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
238 { 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
239 { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
240 { 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
241 { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
242 { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
243 { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
244 { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
245 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
246 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
247 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
248 { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
249 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
250 { } /* terminate list */
251 };
252 MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
253
254
255 /* A full-duplex map for media types. */
256 const char tulip_media_cap[32] =
257 {0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
258
259 static void tulip_tx_timeout(struct net_device *dev);
260 static void tulip_init_ring(struct net_device *dev);
261 static void tulip_free_ring(struct net_device *dev);
262 static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
263 struct net_device *dev);
264 static int tulip_open(struct net_device *dev);
265 static int tulip_close(struct net_device *dev);
266 static void tulip_up(struct net_device *dev);
267 static void tulip_down(struct net_device *dev);
268 static struct net_device_stats *tulip_get_stats(struct net_device *dev);
269 static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
270 static void set_rx_mode(struct net_device *dev);
271 static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
272 #ifdef CONFIG_NET_POLL_CONTROLLER
273 static void poll_tulip(struct net_device *dev);
274 #endif
275
tulip_set_power_state(struct tulip_private * tp,int sleep,int snooze)276 static void tulip_set_power_state (struct tulip_private *tp,
277 int sleep, int snooze)
278 {
279 if (tp->flags & HAS_ACPI) {
280 u32 tmp, newtmp;
281 pci_read_config_dword (tp->pdev, CFDD, &tmp);
282 newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
283 if (sleep)
284 newtmp |= CFDD_Sleep;
285 else if (snooze)
286 newtmp |= CFDD_Snooze;
287 if (tmp != newtmp)
288 pci_write_config_dword (tp->pdev, CFDD, newtmp);
289 }
290
291 }
292
293
tulip_up(struct net_device * dev)294 static void tulip_up(struct net_device *dev)
295 {
296 struct tulip_private *tp = netdev_priv(dev);
297 void __iomem *ioaddr = tp->base_addr;
298 int next_tick = 3*HZ;
299 u32 reg;
300 int i;
301
302 #ifdef CONFIG_TULIP_NAPI
303 napi_enable(&tp->napi);
304 #endif
305
306 /* Wake the chip from sleep/snooze mode. */
307 tulip_set_power_state (tp, 0, 0);
308
309 /* Disable all WOL events */
310 pci_enable_wake(tp->pdev, PCI_D3hot, 0);
311 pci_enable_wake(tp->pdev, PCI_D3cold, 0);
312 tulip_set_wolopts(tp->pdev, 0);
313
314 /* On some chip revs we must set the MII/SYM port before the reset!? */
315 if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
316 iowrite32(0x00040000, ioaddr + CSR6);
317
318 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
319 iowrite32(0x00000001, ioaddr + CSR0);
320 pci_read_config_dword(tp->pdev, PCI_COMMAND, ®); /* flush write */
321 udelay(100);
322
323 /* Deassert reset.
324 Wait the specified 50 PCI cycles after a reset by initializing
325 Tx and Rx queues and the address filter list. */
326 iowrite32(tp->csr0, ioaddr + CSR0);
327 pci_read_config_dword(tp->pdev, PCI_COMMAND, ®); /* flush write */
328 udelay(100);
329
330 if (tulip_debug > 1)
331 netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
332
333 iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
334 iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
335 tp->cur_rx = tp->cur_tx = 0;
336 tp->dirty_rx = tp->dirty_tx = 0;
337
338 if (tp->flags & MC_HASH_ONLY) {
339 u32 addr_low = get_unaligned_le32(dev->dev_addr);
340 u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
341 if (tp->chip_id == AX88140) {
342 iowrite32(0, ioaddr + CSR13);
343 iowrite32(addr_low, ioaddr + CSR14);
344 iowrite32(1, ioaddr + CSR13);
345 iowrite32(addr_high, ioaddr + CSR14);
346 } else if (tp->flags & COMET_MAC_ADDR) {
347 iowrite32(addr_low, ioaddr + 0xA4);
348 iowrite32(addr_high, ioaddr + 0xA8);
349 iowrite32(0, ioaddr + CSR27);
350 iowrite32(0, ioaddr + CSR28);
351 }
352 } else {
353 /* This is set_rx_mode(), but without starting the transmitter. */
354 u16 *eaddrs = (u16 *)dev->dev_addr;
355 u16 *setup_frm = &tp->setup_frame[15*6];
356 dma_addr_t mapping;
357
358 /* 21140 bug: you must add the broadcast address. */
359 memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
360 /* Fill the final entry of the table with our physical address. */
361 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
362 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
363 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
364
365 mapping = pci_map_single(tp->pdev, tp->setup_frame,
366 sizeof(tp->setup_frame),
367 PCI_DMA_TODEVICE);
368 tp->tx_buffers[tp->cur_tx].skb = NULL;
369 tp->tx_buffers[tp->cur_tx].mapping = mapping;
370
371 /* Put the setup frame on the Tx list. */
372 tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
373 tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
374 tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
375
376 tp->cur_tx++;
377 }
378
379 tp->saved_if_port = dev->if_port;
380 if (dev->if_port == 0)
381 dev->if_port = tp->default_port;
382
383 /* Allow selecting a default media. */
384 i = 0;
385 if (tp->mtable == NULL)
386 goto media_picked;
387 if (dev->if_port) {
388 int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
389 (dev->if_port == 12 ? 0 : dev->if_port);
390 for (i = 0; i < tp->mtable->leafcount; i++)
391 if (tp->mtable->mleaf[i].media == looking_for) {
392 dev_info(&dev->dev,
393 "Using user-specified media %s\n",
394 medianame[dev->if_port]);
395 goto media_picked;
396 }
397 }
398 if ((tp->mtable->defaultmedia & 0x0800) == 0) {
399 int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
400 for (i = 0; i < tp->mtable->leafcount; i++)
401 if (tp->mtable->mleaf[i].media == looking_for) {
402 dev_info(&dev->dev,
403 "Using EEPROM-set media %s\n",
404 medianame[looking_for]);
405 goto media_picked;
406 }
407 }
408 /* Start sensing first non-full-duplex media. */
409 for (i = tp->mtable->leafcount - 1;
410 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
411 ;
412 media_picked:
413
414 tp->csr6 = 0;
415 tp->cur_index = i;
416 tp->nwayset = 0;
417
418 if (dev->if_port) {
419 if (tp->chip_id == DC21143 &&
420 (tulip_media_cap[dev->if_port] & MediaIsMII)) {
421 /* We must reset the media CSRs when we force-select MII mode. */
422 iowrite32(0x0000, ioaddr + CSR13);
423 iowrite32(0x0000, ioaddr + CSR14);
424 iowrite32(0x0008, ioaddr + CSR15);
425 }
426 tulip_select_media(dev, 1);
427 } else if (tp->chip_id == DC21142) {
428 if (tp->mii_cnt) {
429 tulip_select_media(dev, 1);
430 if (tulip_debug > 1)
431 dev_info(&dev->dev,
432 "Using MII transceiver %d, status %04x\n",
433 tp->phys[0],
434 tulip_mdio_read(dev, tp->phys[0], 1));
435 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
436 tp->csr6 = csr6_mask_hdcap;
437 dev->if_port = 11;
438 iowrite32(0x0000, ioaddr + CSR13);
439 iowrite32(0x0000, ioaddr + CSR14);
440 } else
441 t21142_start_nway(dev);
442 } else if (tp->chip_id == PNIC2) {
443 /* for initial startup advertise 10/100 Full and Half */
444 tp->sym_advertise = 0x01E0;
445 /* enable autonegotiate end interrupt */
446 iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
447 iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
448 pnic2_start_nway(dev);
449 } else if (tp->chip_id == LC82C168 && ! tp->medialock) {
450 if (tp->mii_cnt) {
451 dev->if_port = 11;
452 tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
453 iowrite32(0x0001, ioaddr + CSR15);
454 } else if (ioread32(ioaddr + CSR5) & TPLnkPass)
455 pnic_do_nway(dev);
456 else {
457 /* Start with 10mbps to do autonegotiation. */
458 iowrite32(0x32, ioaddr + CSR12);
459 tp->csr6 = 0x00420000;
460 iowrite32(0x0001B078, ioaddr + 0xB8);
461 iowrite32(0x0201B078, ioaddr + 0xB8);
462 next_tick = 1*HZ;
463 }
464 } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
465 ! tp->medialock) {
466 dev->if_port = 0;
467 tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
468 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
469 } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
470 /* Provided by BOLO, Macronix - 12/10/1998. */
471 dev->if_port = 0;
472 tp->csr6 = 0x01a80200;
473 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
474 iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
475 } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
476 /* Enable automatic Tx underrun recovery. */
477 iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
478 dev->if_port = tp->mii_cnt ? 11 : 0;
479 tp->csr6 = 0x00040000;
480 } else if (tp->chip_id == AX88140) {
481 tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
482 } else
483 tulip_select_media(dev, 1);
484
485 /* Start the chip's Tx to process setup frame. */
486 tulip_stop_rxtx(tp);
487 barrier();
488 udelay(5);
489 iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
490
491 /* Enable interrupts by setting the interrupt mask. */
492 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
493 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
494 tulip_start_rxtx(tp);
495 iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
496
497 if (tulip_debug > 2) {
498 netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
499 ioread32(ioaddr + CSR0),
500 ioread32(ioaddr + CSR5),
501 ioread32(ioaddr + CSR6));
502 }
503
504 /* Set the timer to switch to check for link beat and perhaps switch
505 to an alternate media type. */
506 tp->timer.expires = RUN_AT(next_tick);
507 add_timer(&tp->timer);
508 #ifdef CONFIG_TULIP_NAPI
509 init_timer(&tp->oom_timer);
510 tp->oom_timer.data = (unsigned long)dev;
511 tp->oom_timer.function = oom_timer;
512 #endif
513 }
514
515 static int
tulip_open(struct net_device * dev)516 tulip_open(struct net_device *dev)
517 {
518 struct tulip_private *tp = netdev_priv(dev);
519 int retval;
520
521 tulip_init_ring (dev);
522
523 retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED,
524 dev->name, dev);
525 if (retval)
526 goto free_ring;
527
528 tulip_up (dev);
529
530 netif_start_queue (dev);
531
532 return 0;
533
534 free_ring:
535 tulip_free_ring (dev);
536 return retval;
537 }
538
539
tulip_tx_timeout(struct net_device * dev)540 static void tulip_tx_timeout(struct net_device *dev)
541 {
542 struct tulip_private *tp = netdev_priv(dev);
543 void __iomem *ioaddr = tp->base_addr;
544 unsigned long flags;
545
546 spin_lock_irqsave (&tp->lock, flags);
547
548 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
549 /* Do nothing -- the media monitor should handle this. */
550 if (tulip_debug > 1)
551 dev_warn(&dev->dev,
552 "Transmit timeout using MII device\n");
553 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
554 tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
555 tp->chip_id == DM910X) {
556 dev_warn(&dev->dev,
557 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
558 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
559 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
560 ioread32(ioaddr + CSR15));
561 tp->timeout_recovery = 1;
562 schedule_work(&tp->media_work);
563 goto out_unlock;
564 } else if (tp->chip_id == PNIC2) {
565 dev_warn(&dev->dev,
566 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
567 (int)ioread32(ioaddr + CSR5),
568 (int)ioread32(ioaddr + CSR6),
569 (int)ioread32(ioaddr + CSR7),
570 (int)ioread32(ioaddr + CSR12));
571 } else {
572 dev_warn(&dev->dev,
573 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
574 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
575 dev->if_port = 0;
576 }
577
578 #if defined(way_too_many_messages)
579 if (tulip_debug > 3) {
580 int i;
581 for (i = 0; i < RX_RING_SIZE; i++) {
582 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
583 int j;
584 printk(KERN_DEBUG
585 "%2d: %08x %08x %08x %08x %02x %02x %02x\n",
586 i,
587 (unsigned int)tp->rx_ring[i].status,
588 (unsigned int)tp->rx_ring[i].length,
589 (unsigned int)tp->rx_ring[i].buffer1,
590 (unsigned int)tp->rx_ring[i].buffer2,
591 buf[0], buf[1], buf[2]);
592 for (j = 0; buf[j] != 0xee && j < 1600; j++)
593 if (j < 100)
594 pr_cont(" %02x", buf[j]);
595 pr_cont(" j=%d\n", j);
596 }
597 printk(KERN_DEBUG " Rx ring %p: ", tp->rx_ring);
598 for (i = 0; i < RX_RING_SIZE; i++)
599 pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
600 printk(KERN_DEBUG " Tx ring %p: ", tp->tx_ring);
601 for (i = 0; i < TX_RING_SIZE; i++)
602 pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
603 pr_cont("\n");
604 }
605 #endif
606
607 tulip_tx_timeout_complete(tp, ioaddr);
608
609 out_unlock:
610 spin_unlock_irqrestore (&tp->lock, flags);
611 dev->trans_start = jiffies; /* prevent tx timeout */
612 netif_wake_queue (dev);
613 }
614
615
616 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
tulip_init_ring(struct net_device * dev)617 static void tulip_init_ring(struct net_device *dev)
618 {
619 struct tulip_private *tp = netdev_priv(dev);
620 int i;
621
622 tp->susp_rx = 0;
623 tp->ttimer = 0;
624 tp->nir = 0;
625
626 for (i = 0; i < RX_RING_SIZE; i++) {
627 tp->rx_ring[i].status = 0x00000000;
628 tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
629 tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
630 tp->rx_buffers[i].skb = NULL;
631 tp->rx_buffers[i].mapping = 0;
632 }
633 /* Mark the last entry as wrapping the ring. */
634 tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
635 tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
636
637 for (i = 0; i < RX_RING_SIZE; i++) {
638 dma_addr_t mapping;
639
640 /* Note the receive buffer must be longword aligned.
641 netdev_alloc_skb() provides 16 byte alignment. But do *not*
642 use skb_reserve() to align the IP header! */
643 struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
644 tp->rx_buffers[i].skb = skb;
645 if (skb == NULL)
646 break;
647 mapping = pci_map_single(tp->pdev, skb->data,
648 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
649 tp->rx_buffers[i].mapping = mapping;
650 tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
651 tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
652 }
653 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
654
655 /* The Tx buffer descriptor is filled in as needed, but we
656 do need to clear the ownership bit. */
657 for (i = 0; i < TX_RING_SIZE; i++) {
658 tp->tx_buffers[i].skb = NULL;
659 tp->tx_buffers[i].mapping = 0;
660 tp->tx_ring[i].status = 0x00000000;
661 tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
662 }
663 tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
664 }
665
666 static netdev_tx_t
tulip_start_xmit(struct sk_buff * skb,struct net_device * dev)667 tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
668 {
669 struct tulip_private *tp = netdev_priv(dev);
670 int entry;
671 u32 flag;
672 dma_addr_t mapping;
673 unsigned long flags;
674
675 spin_lock_irqsave(&tp->lock, flags);
676
677 /* Calculate the next Tx descriptor entry. */
678 entry = tp->cur_tx % TX_RING_SIZE;
679
680 tp->tx_buffers[entry].skb = skb;
681 mapping = pci_map_single(tp->pdev, skb->data,
682 skb->len, PCI_DMA_TODEVICE);
683 tp->tx_buffers[entry].mapping = mapping;
684 tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
685
686 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
687 flag = 0x60000000; /* No interrupt */
688 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
689 flag = 0xe0000000; /* Tx-done intr. */
690 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
691 flag = 0x60000000; /* No Tx-done intr. */
692 } else { /* Leave room for set_rx_mode() to fill entries. */
693 flag = 0xe0000000; /* Tx-done intr. */
694 netif_stop_queue(dev);
695 }
696 if (entry == TX_RING_SIZE-1)
697 flag = 0xe0000000 | DESC_RING_WRAP;
698
699 tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
700 /* if we were using Transmit Automatic Polling, we would need a
701 * wmb() here. */
702 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
703 wmb();
704
705 tp->cur_tx++;
706
707 /* Trigger an immediate transmit demand. */
708 iowrite32(0, tp->base_addr + CSR1);
709
710 spin_unlock_irqrestore(&tp->lock, flags);
711
712 return NETDEV_TX_OK;
713 }
714
tulip_clean_tx_ring(struct tulip_private * tp)715 static void tulip_clean_tx_ring(struct tulip_private *tp)
716 {
717 unsigned int dirty_tx;
718
719 for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
720 dirty_tx++) {
721 int entry = dirty_tx % TX_RING_SIZE;
722 int status = le32_to_cpu(tp->tx_ring[entry].status);
723
724 if (status < 0) {
725 tp->dev->stats.tx_errors++; /* It wasn't Txed */
726 tp->tx_ring[entry].status = 0;
727 }
728
729 /* Check for Tx filter setup frames. */
730 if (tp->tx_buffers[entry].skb == NULL) {
731 /* test because dummy frames not mapped */
732 if (tp->tx_buffers[entry].mapping)
733 pci_unmap_single(tp->pdev,
734 tp->tx_buffers[entry].mapping,
735 sizeof(tp->setup_frame),
736 PCI_DMA_TODEVICE);
737 continue;
738 }
739
740 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
741 tp->tx_buffers[entry].skb->len,
742 PCI_DMA_TODEVICE);
743
744 /* Free the original skb. */
745 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
746 tp->tx_buffers[entry].skb = NULL;
747 tp->tx_buffers[entry].mapping = 0;
748 }
749 }
750
tulip_down(struct net_device * dev)751 static void tulip_down (struct net_device *dev)
752 {
753 struct tulip_private *tp = netdev_priv(dev);
754 void __iomem *ioaddr = tp->base_addr;
755 unsigned long flags;
756
757 cancel_work_sync(&tp->media_work);
758
759 #ifdef CONFIG_TULIP_NAPI
760 napi_disable(&tp->napi);
761 #endif
762
763 del_timer_sync (&tp->timer);
764 #ifdef CONFIG_TULIP_NAPI
765 del_timer_sync (&tp->oom_timer);
766 #endif
767 spin_lock_irqsave (&tp->lock, flags);
768
769 /* Disable interrupts by clearing the interrupt mask. */
770 iowrite32 (0x00000000, ioaddr + CSR7);
771
772 /* Stop the Tx and Rx processes. */
773 tulip_stop_rxtx(tp);
774
775 /* prepare receive buffers */
776 tulip_refill_rx(dev);
777
778 /* release any unconsumed transmit buffers */
779 tulip_clean_tx_ring(tp);
780
781 if (ioread32(ioaddr + CSR6) != 0xffffffff)
782 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
783
784 spin_unlock_irqrestore (&tp->lock, flags);
785
786 init_timer(&tp->timer);
787 tp->timer.data = (unsigned long)dev;
788 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
789
790 dev->if_port = tp->saved_if_port;
791
792 /* Leave the driver in snooze, not sleep, mode. */
793 tulip_set_power_state (tp, 0, 1);
794 }
795
tulip_free_ring(struct net_device * dev)796 static void tulip_free_ring (struct net_device *dev)
797 {
798 struct tulip_private *tp = netdev_priv(dev);
799 int i;
800
801 /* Free all the skbuffs in the Rx queue. */
802 for (i = 0; i < RX_RING_SIZE; i++) {
803 struct sk_buff *skb = tp->rx_buffers[i].skb;
804 dma_addr_t mapping = tp->rx_buffers[i].mapping;
805
806 tp->rx_buffers[i].skb = NULL;
807 tp->rx_buffers[i].mapping = 0;
808
809 tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */
810 tp->rx_ring[i].length = 0;
811 /* An invalid address. */
812 tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
813 if (skb) {
814 pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
815 PCI_DMA_FROMDEVICE);
816 dev_kfree_skb (skb);
817 }
818 }
819
820 for (i = 0; i < TX_RING_SIZE; i++) {
821 struct sk_buff *skb = tp->tx_buffers[i].skb;
822
823 if (skb != NULL) {
824 pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
825 skb->len, PCI_DMA_TODEVICE);
826 dev_kfree_skb (skb);
827 }
828 tp->tx_buffers[i].skb = NULL;
829 tp->tx_buffers[i].mapping = 0;
830 }
831 }
832
tulip_close(struct net_device * dev)833 static int tulip_close (struct net_device *dev)
834 {
835 struct tulip_private *tp = netdev_priv(dev);
836 void __iomem *ioaddr = tp->base_addr;
837
838 netif_stop_queue (dev);
839
840 tulip_down (dev);
841
842 if (tulip_debug > 1)
843 netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
844 ioread32 (ioaddr + CSR5));
845
846 free_irq (tp->pdev->irq, dev);
847
848 tulip_free_ring (dev);
849
850 return 0;
851 }
852
tulip_get_stats(struct net_device * dev)853 static struct net_device_stats *tulip_get_stats(struct net_device *dev)
854 {
855 struct tulip_private *tp = netdev_priv(dev);
856 void __iomem *ioaddr = tp->base_addr;
857
858 if (netif_running(dev)) {
859 unsigned long flags;
860
861 spin_lock_irqsave (&tp->lock, flags);
862
863 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
864
865 spin_unlock_irqrestore(&tp->lock, flags);
866 }
867
868 return &dev->stats;
869 }
870
871
tulip_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)872 static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
873 {
874 struct tulip_private *np = netdev_priv(dev);
875 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
876 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
877 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
878 }
879
880
tulip_ethtool_set_wol(struct net_device * dev,struct ethtool_wolinfo * wolinfo)881 static int tulip_ethtool_set_wol(struct net_device *dev,
882 struct ethtool_wolinfo *wolinfo)
883 {
884 struct tulip_private *tp = netdev_priv(dev);
885
886 if (wolinfo->wolopts & (~tp->wolinfo.supported))
887 return -EOPNOTSUPP;
888
889 tp->wolinfo.wolopts = wolinfo->wolopts;
890 device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
891 return 0;
892 }
893
tulip_ethtool_get_wol(struct net_device * dev,struct ethtool_wolinfo * wolinfo)894 static void tulip_ethtool_get_wol(struct net_device *dev,
895 struct ethtool_wolinfo *wolinfo)
896 {
897 struct tulip_private *tp = netdev_priv(dev);
898
899 wolinfo->supported = tp->wolinfo.supported;
900 wolinfo->wolopts = tp->wolinfo.wolopts;
901 return;
902 }
903
904
905 static const struct ethtool_ops ops = {
906 .get_drvinfo = tulip_get_drvinfo,
907 .set_wol = tulip_ethtool_set_wol,
908 .get_wol = tulip_ethtool_get_wol,
909 };
910
911 /* Provide ioctl() calls to examine the MII xcvr state. */
private_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)912 static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
913 {
914 struct tulip_private *tp = netdev_priv(dev);
915 void __iomem *ioaddr = tp->base_addr;
916 struct mii_ioctl_data *data = if_mii(rq);
917 const unsigned int phy_idx = 0;
918 int phy = tp->phys[phy_idx] & 0x1f;
919 unsigned int regnum = data->reg_num;
920
921 switch (cmd) {
922 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
923 if (tp->mii_cnt)
924 data->phy_id = phy;
925 else if (tp->flags & HAS_NWAY)
926 data->phy_id = 32;
927 else if (tp->chip_id == COMET)
928 data->phy_id = 1;
929 else
930 return -ENODEV;
931
932 case SIOCGMIIREG: /* Read MII PHY register. */
933 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
934 int csr12 = ioread32 (ioaddr + CSR12);
935 int csr14 = ioread32 (ioaddr + CSR14);
936 switch (regnum) {
937 case 0:
938 if (((csr14<<5) & 0x1000) ||
939 (dev->if_port == 5 && tp->nwayset))
940 data->val_out = 0x1000;
941 else
942 data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
943 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
944 break;
945 case 1:
946 data->val_out =
947 0x1848 +
948 ((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
949 ((csr12&0x06) == 6 ? 0 : 4);
950 data->val_out |= 0x6048;
951 break;
952 case 4:
953 /* Advertised value, bogus 10baseTx-FD value from CSR6. */
954 data->val_out =
955 ((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
956 ((csr14 >> 1) & 0x20) + 1;
957 data->val_out |= ((csr14 >> 9) & 0x03C0);
958 break;
959 case 5: data->val_out = tp->lpar; break;
960 default: data->val_out = 0; break;
961 }
962 } else {
963 data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
964 }
965 return 0;
966
967 case SIOCSMIIREG: /* Write MII PHY register. */
968 if (regnum & ~0x1f)
969 return -EINVAL;
970 if (data->phy_id == phy) {
971 u16 value = data->val_in;
972 switch (regnum) {
973 case 0: /* Check for autonegotiation on or reset. */
974 tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
975 if (tp->full_duplex_lock)
976 tp->full_duplex = (value & 0x0100) ? 1 : 0;
977 break;
978 case 4:
979 tp->advertising[phy_idx] =
980 tp->mii_advertise = data->val_in;
981 break;
982 }
983 }
984 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
985 u16 value = data->val_in;
986 if (regnum == 0) {
987 if ((value & 0x1200) == 0x1200) {
988 if (tp->chip_id == PNIC2) {
989 pnic2_start_nway (dev);
990 } else {
991 t21142_start_nway (dev);
992 }
993 }
994 } else if (regnum == 4)
995 tp->sym_advertise = value;
996 } else {
997 tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
998 }
999 return 0;
1000 default:
1001 return -EOPNOTSUPP;
1002 }
1003
1004 return -EOPNOTSUPP;
1005 }
1006
1007
1008 /* Set or clear the multicast filter for this adaptor.
1009 Note that we only use exclusion around actually queueing the
1010 new frame, not around filling tp->setup_frame. This is non-deterministic
1011 when re-entered but still correct. */
1012
build_setup_frame_hash(u16 * setup_frm,struct net_device * dev)1013 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
1014 {
1015 struct tulip_private *tp = netdev_priv(dev);
1016 u16 hash_table[32];
1017 struct netdev_hw_addr *ha;
1018 int i;
1019 u16 *eaddrs;
1020
1021 memset(hash_table, 0, sizeof(hash_table));
1022 __set_bit_le(255, hash_table); /* Broadcast entry */
1023 /* This should work on big-endian machines as well. */
1024 netdev_for_each_mc_addr(ha, dev) {
1025 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1026
1027 __set_bit_le(index, hash_table);
1028 }
1029 for (i = 0; i < 32; i++) {
1030 *setup_frm++ = hash_table[i];
1031 *setup_frm++ = hash_table[i];
1032 }
1033 setup_frm = &tp->setup_frame[13*6];
1034
1035 /* Fill the final entry with our physical address. */
1036 eaddrs = (u16 *)dev->dev_addr;
1037 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1038 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1039 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1040 }
1041
build_setup_frame_perfect(u16 * setup_frm,struct net_device * dev)1042 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1043 {
1044 struct tulip_private *tp = netdev_priv(dev);
1045 struct netdev_hw_addr *ha;
1046 u16 *eaddrs;
1047
1048 /* We have <= 14 addresses so we can use the wonderful
1049 16 address perfect filtering of the Tulip. */
1050 netdev_for_each_mc_addr(ha, dev) {
1051 eaddrs = (u16 *) ha->addr;
1052 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1053 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1054 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1055 }
1056 /* Fill the unused entries with the broadcast address. */
1057 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1058 setup_frm = &tp->setup_frame[15*6];
1059
1060 /* Fill the final entry with our physical address. */
1061 eaddrs = (u16 *)dev->dev_addr;
1062 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1063 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1064 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1065 }
1066
1067
set_rx_mode(struct net_device * dev)1068 static void set_rx_mode(struct net_device *dev)
1069 {
1070 struct tulip_private *tp = netdev_priv(dev);
1071 void __iomem *ioaddr = tp->base_addr;
1072 int csr6;
1073
1074 csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1075
1076 tp->csr6 &= ~0x00D5;
1077 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1078 tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1079 csr6 |= AcceptAllMulticast | AcceptAllPhys;
1080 } else if ((netdev_mc_count(dev) > 1000) ||
1081 (dev->flags & IFF_ALLMULTI)) {
1082 /* Too many to filter well -- accept all multicasts. */
1083 tp->csr6 |= AcceptAllMulticast;
1084 csr6 |= AcceptAllMulticast;
1085 } else if (tp->flags & MC_HASH_ONLY) {
1086 /* Some work-alikes have only a 64-entry hash filter table. */
1087 /* Should verify correctness on big-endian/__powerpc__ */
1088 struct netdev_hw_addr *ha;
1089 if (netdev_mc_count(dev) > 64) {
1090 /* Arbitrary non-effective limit. */
1091 tp->csr6 |= AcceptAllMulticast;
1092 csr6 |= AcceptAllMulticast;
1093 } else {
1094 u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
1095 int filterbit;
1096 netdev_for_each_mc_addr(ha, dev) {
1097 if (tp->flags & COMET_MAC_ADDR)
1098 filterbit = ether_crc_le(ETH_ALEN,
1099 ha->addr);
1100 else
1101 filterbit = ether_crc(ETH_ALEN,
1102 ha->addr) >> 26;
1103 filterbit &= 0x3f;
1104 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1105 if (tulip_debug > 2)
1106 dev_info(&dev->dev,
1107 "Added filter for %pM %08x bit %d\n",
1108 ha->addr,
1109 ether_crc(ETH_ALEN, ha->addr),
1110 filterbit);
1111 }
1112 if (mc_filter[0] == tp->mc_filter[0] &&
1113 mc_filter[1] == tp->mc_filter[1])
1114 ; /* No change. */
1115 else if (tp->flags & IS_ASIX) {
1116 iowrite32(2, ioaddr + CSR13);
1117 iowrite32(mc_filter[0], ioaddr + CSR14);
1118 iowrite32(3, ioaddr + CSR13);
1119 iowrite32(mc_filter[1], ioaddr + CSR14);
1120 } else if (tp->flags & COMET_MAC_ADDR) {
1121 iowrite32(mc_filter[0], ioaddr + CSR27);
1122 iowrite32(mc_filter[1], ioaddr + CSR28);
1123 }
1124 tp->mc_filter[0] = mc_filter[0];
1125 tp->mc_filter[1] = mc_filter[1];
1126 }
1127 } else {
1128 unsigned long flags;
1129 u32 tx_flags = 0x08000000 | 192;
1130
1131 /* Note that only the low-address shortword of setup_frame is valid!
1132 The values are doubled for big-endian architectures. */
1133 if (netdev_mc_count(dev) > 14) {
1134 /* Must use a multicast hash table. */
1135 build_setup_frame_hash(tp->setup_frame, dev);
1136 tx_flags = 0x08400000 | 192;
1137 } else {
1138 build_setup_frame_perfect(tp->setup_frame, dev);
1139 }
1140
1141 spin_lock_irqsave(&tp->lock, flags);
1142
1143 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1144 /* Same setup recently queued, we need not add it. */
1145 } else {
1146 unsigned int entry;
1147 int dummy = -1;
1148
1149 /* Now add this frame to the Tx list. */
1150
1151 entry = tp->cur_tx++ % TX_RING_SIZE;
1152
1153 if (entry != 0) {
1154 /* Avoid a chip errata by prefixing a dummy entry. */
1155 tp->tx_buffers[entry].skb = NULL;
1156 tp->tx_buffers[entry].mapping = 0;
1157 tp->tx_ring[entry].length =
1158 (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1159 tp->tx_ring[entry].buffer1 = 0;
1160 /* Must set DescOwned later to avoid race with chip */
1161 dummy = entry;
1162 entry = tp->cur_tx++ % TX_RING_SIZE;
1163
1164 }
1165
1166 tp->tx_buffers[entry].skb = NULL;
1167 tp->tx_buffers[entry].mapping =
1168 pci_map_single(tp->pdev, tp->setup_frame,
1169 sizeof(tp->setup_frame),
1170 PCI_DMA_TODEVICE);
1171 /* Put the setup frame on the Tx list. */
1172 if (entry == TX_RING_SIZE-1)
1173 tx_flags |= DESC_RING_WRAP; /* Wrap ring. */
1174 tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1175 tp->tx_ring[entry].buffer1 =
1176 cpu_to_le32(tp->tx_buffers[entry].mapping);
1177 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1178 if (dummy >= 0)
1179 tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1180 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1181 netif_stop_queue(dev);
1182
1183 /* Trigger an immediate transmit demand. */
1184 iowrite32(0, ioaddr + CSR1);
1185 }
1186
1187 spin_unlock_irqrestore(&tp->lock, flags);
1188 }
1189
1190 iowrite32(csr6, ioaddr + CSR6);
1191 }
1192
1193 #ifdef CONFIG_TULIP_MWI
tulip_mwi_config(struct pci_dev * pdev,struct net_device * dev)1194 static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
1195 {
1196 struct tulip_private *tp = netdev_priv(dev);
1197 u8 cache;
1198 u16 pci_command;
1199 u32 csr0;
1200
1201 if (tulip_debug > 3)
1202 netdev_dbg(dev, "tulip_mwi_config()\n");
1203
1204 tp->csr0 = csr0 = 0;
1205
1206 /* if we have any cache line size at all, we can do MRM and MWI */
1207 csr0 |= MRM | MWI;
1208
1209 /* Enable MWI in the standard PCI command bit.
1210 * Check for the case where MWI is desired but not available
1211 */
1212 pci_try_set_mwi(pdev);
1213
1214 /* read result from hardware (in case bit refused to enable) */
1215 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1216 if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1217 csr0 &= ~MWI;
1218
1219 /* if cache line size hardwired to zero, no MWI */
1220 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1221 if ((csr0 & MWI) && (cache == 0)) {
1222 csr0 &= ~MWI;
1223 pci_clear_mwi(pdev);
1224 }
1225
1226 /* assign per-cacheline-size cache alignment and
1227 * burst length values
1228 */
1229 switch (cache) {
1230 case 8:
1231 csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1232 break;
1233 case 16:
1234 csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1235 break;
1236 case 32:
1237 csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1238 break;
1239 default:
1240 cache = 0;
1241 break;
1242 }
1243
1244 /* if we have a good cache line size, we by now have a good
1245 * csr0, so save it and exit
1246 */
1247 if (cache)
1248 goto out;
1249
1250 /* we don't have a good csr0 or cache line size, disable MWI */
1251 if (csr0 & MWI) {
1252 pci_clear_mwi(pdev);
1253 csr0 &= ~MWI;
1254 }
1255
1256 /* sane defaults for burst length and cache alignment
1257 * originally from de4x5 driver
1258 */
1259 csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1260
1261 out:
1262 tp->csr0 = csr0;
1263 if (tulip_debug > 2)
1264 netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
1265 cache, csr0);
1266 }
1267 #endif
1268
1269 /*
1270 * Chips that have the MRM/reserved bit quirk and the burst quirk. That
1271 * is the DM910X and the on chip ULi devices
1272 */
1273
tulip_uli_dm_quirk(struct pci_dev * pdev)1274 static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1275 {
1276 if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1277 return 1;
1278 return 0;
1279 }
1280
1281 static const struct net_device_ops tulip_netdev_ops = {
1282 .ndo_open = tulip_open,
1283 .ndo_start_xmit = tulip_start_xmit,
1284 .ndo_tx_timeout = tulip_tx_timeout,
1285 .ndo_stop = tulip_close,
1286 .ndo_get_stats = tulip_get_stats,
1287 .ndo_do_ioctl = private_ioctl,
1288 .ndo_set_rx_mode = set_rx_mode,
1289 .ndo_change_mtu = eth_change_mtu,
1290 .ndo_set_mac_address = eth_mac_addr,
1291 .ndo_validate_addr = eth_validate_addr,
1292 #ifdef CONFIG_NET_POLL_CONTROLLER
1293 .ndo_poll_controller = poll_tulip,
1294 #endif
1295 };
1296
1297 DEFINE_PCI_DEVICE_TABLE(early_486_chipsets) = {
1298 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1299 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1300 { },
1301 };
1302
tulip_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1303 static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1304 {
1305 struct tulip_private *tp;
1306 /* See note below on the multiport cards. */
1307 static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
1308 static int last_irq;
1309 static int multiport_cnt; /* For four-port boards w/one EEPROM */
1310 int i, irq;
1311 unsigned short sum;
1312 unsigned char *ee_data;
1313 struct net_device *dev;
1314 void __iomem *ioaddr;
1315 static int board_idx = -1;
1316 int chip_idx = ent->driver_data;
1317 const char *chip_name = tulip_tbl[chip_idx].chip_name;
1318 unsigned int eeprom_missing = 0;
1319 unsigned int force_csr0 = 0;
1320
1321 #ifndef MODULE
1322 if (tulip_debug > 0)
1323 printk_once(KERN_INFO "%s", version);
1324 #endif
1325
1326 board_idx++;
1327
1328 /*
1329 * Lan media wire a tulip chip to a wan interface. Needs a very
1330 * different driver (lmc driver)
1331 */
1332
1333 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1334 pr_err("skipping LMC card\n");
1335 return -ENODEV;
1336 } else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
1337 (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
1338 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
1339 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
1340 pr_err("skipping SBE T3E3 port\n");
1341 return -ENODEV;
1342 }
1343
1344 /*
1345 * DM910x chips should be handled by the dmfe driver, except
1346 * on-board chips on SPARC systems. Also, early DM9100s need
1347 * software CRC which only the dmfe driver supports.
1348 */
1349
1350 #ifdef CONFIG_TULIP_DM910X
1351 if (chip_idx == DM910X) {
1352 struct device_node *dp;
1353
1354 if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1355 pdev->revision < 0x30) {
1356 pr_info("skipping early DM9100 with Crc bug (use dmfe)\n");
1357 return -ENODEV;
1358 }
1359
1360 dp = pci_device_to_OF_node(pdev);
1361 if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1362 pr_info("skipping DM910x expansion card (use dmfe)\n");
1363 return -ENODEV;
1364 }
1365 }
1366 #endif
1367
1368 /*
1369 * Looks for early PCI chipsets where people report hangs
1370 * without the workarounds being on.
1371 */
1372
1373 /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
1374 aligned. Aries might need this too. The Saturn errata are not
1375 pretty reading but thankfully it's an old 486 chipset.
1376
1377 2. The dreaded SiS496 486 chipset. Same workaround as Intel
1378 Saturn.
1379 */
1380
1381 if (pci_dev_present(early_486_chipsets)) {
1382 csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1383 force_csr0 = 1;
1384 }
1385
1386 /* bugfix: the ASIX must have a burst limit or horrible things happen. */
1387 if (chip_idx == AX88140) {
1388 if ((csr0 & 0x3f00) == 0)
1389 csr0 |= 0x2000;
1390 }
1391
1392 /* PNIC doesn't have MWI/MRL/MRM... */
1393 if (chip_idx == LC82C168)
1394 csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
1395
1396 /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
1397 if (tulip_uli_dm_quirk(pdev)) {
1398 csr0 &= ~0x01f100ff;
1399 #if defined(CONFIG_SPARC)
1400 csr0 = (csr0 & ~0xff00) | 0xe000;
1401 #endif
1402 }
1403 /*
1404 * And back to business
1405 */
1406
1407 i = pci_enable_device(pdev);
1408 if (i) {
1409 pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
1410 return i;
1411 }
1412
1413 /* The chip will fail to enter a low-power state later unless
1414 * first explicitly commanded into D0 */
1415 if (pci_set_power_state(pdev, PCI_D0)) {
1416 pr_notice("Failed to set power state to D0\n");
1417 }
1418
1419 irq = pdev->irq;
1420
1421 /* alloc_etherdev ensures aligned and zeroed private structures */
1422 dev = alloc_etherdev (sizeof (*tp));
1423 if (!dev)
1424 return -ENOMEM;
1425
1426 SET_NETDEV_DEV(dev, &pdev->dev);
1427 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1428 pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1429 pci_name(pdev),
1430 (unsigned long long)pci_resource_len (pdev, 0),
1431 (unsigned long long)pci_resource_start (pdev, 0));
1432 goto err_out_free_netdev;
1433 }
1434
1435 /* grab all resources from both PIO and MMIO regions, as we
1436 * don't want anyone else messing around with our hardware */
1437 if (pci_request_regions (pdev, DRV_NAME))
1438 goto err_out_free_netdev;
1439
1440 ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
1441
1442 if (!ioaddr)
1443 goto err_out_free_res;
1444
1445 /*
1446 * initialize private data structure 'tp'
1447 * it is zeroed and aligned in alloc_etherdev
1448 */
1449 tp = netdev_priv(dev);
1450 tp->dev = dev;
1451
1452 tp->rx_ring = pci_alloc_consistent(pdev,
1453 sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1454 sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1455 &tp->rx_ring_dma);
1456 if (!tp->rx_ring)
1457 goto err_out_mtable;
1458 tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1459 tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1460
1461 tp->chip_id = chip_idx;
1462 tp->flags = tulip_tbl[chip_idx].flags;
1463
1464 tp->wolinfo.supported = 0;
1465 tp->wolinfo.wolopts = 0;
1466 /* COMET: Enable power management only for AN983B */
1467 if (chip_idx == COMET ) {
1468 u32 sig;
1469 pci_read_config_dword (pdev, 0x80, &sig);
1470 if (sig == 0x09811317) {
1471 tp->flags |= COMET_PM;
1472 tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
1473 pr_info("%s: Enabled WOL support for AN983B\n",
1474 __func__);
1475 }
1476 }
1477 tp->pdev = pdev;
1478 tp->base_addr = ioaddr;
1479 tp->revision = pdev->revision;
1480 tp->csr0 = csr0;
1481 spin_lock_init(&tp->lock);
1482 spin_lock_init(&tp->mii_lock);
1483 init_timer(&tp->timer);
1484 tp->timer.data = (unsigned long)dev;
1485 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
1486
1487 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1488
1489 #ifdef CONFIG_TULIP_MWI
1490 if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1491 tulip_mwi_config (pdev, dev);
1492 #endif
1493
1494 /* Stop the chip's Tx and Rx processes. */
1495 tulip_stop_rxtx(tp);
1496
1497 pci_set_master(pdev);
1498
1499 #ifdef CONFIG_GSC
1500 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1501 switch (pdev->subsystem_device) {
1502 default:
1503 break;
1504 case 0x1061:
1505 case 0x1062:
1506 case 0x1063:
1507 case 0x1098:
1508 case 0x1099:
1509 case 0x10EE:
1510 tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1511 chip_name = "GSC DS21140 Tulip";
1512 }
1513 }
1514 #endif
1515
1516 /* Clear the missed-packet counter. */
1517 ioread32(ioaddr + CSR8);
1518
1519 /* The station address ROM is read byte serially. The register must
1520 be polled, waiting for the value to be read bit serially from the
1521 EEPROM.
1522 */
1523 ee_data = tp->eeprom;
1524 memset(ee_data, 0, sizeof(tp->eeprom));
1525 sum = 0;
1526 if (chip_idx == LC82C168) {
1527 for (i = 0; i < 3; i++) {
1528 int value, boguscnt = 100000;
1529 iowrite32(0x600 | i, ioaddr + 0x98);
1530 do {
1531 value = ioread32(ioaddr + CSR9);
1532 } while (value < 0 && --boguscnt > 0);
1533 put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
1534 sum += value & 0xffff;
1535 }
1536 } else if (chip_idx == COMET) {
1537 /* No need to read the EEPROM. */
1538 put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
1539 put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
1540 for (i = 0; i < 6; i ++)
1541 sum += dev->dev_addr[i];
1542 } else {
1543 /* A serial EEPROM interface, we read now and sort it out later. */
1544 int sa_offset = 0;
1545 int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1546 int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
1547
1548 if (ee_max_addr > sizeof(tp->eeprom))
1549 ee_max_addr = sizeof(tp->eeprom);
1550
1551 for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
1552 u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1553 ee_data[i] = data & 0xff;
1554 ee_data[i + 1] = data >> 8;
1555 }
1556
1557 /* DEC now has a specification (see Notes) but early board makers
1558 just put the address in the first EEPROM locations. */
1559 /* This does memcmp(ee_data, ee_data+16, 8) */
1560 for (i = 0; i < 8; i ++)
1561 if (ee_data[i] != ee_data[16+i])
1562 sa_offset = 20;
1563 if (chip_idx == CONEXANT) {
1564 /* Check that the tuple type and length is correct. */
1565 if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6)
1566 sa_offset = 0x19A;
1567 } else if (ee_data[0] == 0xff && ee_data[1] == 0xff &&
1568 ee_data[2] == 0) {
1569 sa_offset = 2; /* Grrr, damn Matrox boards. */
1570 multiport_cnt = 4;
1571 }
1572 #ifdef CONFIG_MIPS_COBALT
1573 if ((pdev->bus->number == 0) &&
1574 ((PCI_SLOT(pdev->devfn) == 7) ||
1575 (PCI_SLOT(pdev->devfn) == 12))) {
1576 /* Cobalt MAC address in first EEPROM locations. */
1577 sa_offset = 0;
1578 /* Ensure our media table fixup get's applied */
1579 memcpy(ee_data + 16, ee_data, 8);
1580 }
1581 #endif
1582 #ifdef CONFIG_GSC
1583 /* Check to see if we have a broken srom */
1584 if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1585 /* pci_vendor_id and subsystem_id are swapped */
1586 ee_data[0] = ee_data[2];
1587 ee_data[1] = ee_data[3];
1588 ee_data[2] = 0x61;
1589 ee_data[3] = 0x10;
1590
1591 /* HSC-PCI boards need to be byte-swaped and shifted
1592 * up 1 word. This shift needs to happen at the end
1593 * of the MAC first because of the 2 byte overlap.
1594 */
1595 for (i = 4; i >= 0; i -= 2) {
1596 ee_data[17 + i + 3] = ee_data[17 + i];
1597 ee_data[16 + i + 5] = ee_data[16 + i];
1598 }
1599 }
1600 #endif
1601
1602 for (i = 0; i < 6; i ++) {
1603 dev->dev_addr[i] = ee_data[i + sa_offset];
1604 sum += ee_data[i + sa_offset];
1605 }
1606 }
1607 /* Lite-On boards have the address byte-swapped. */
1608 if ((dev->dev_addr[0] == 0xA0 ||
1609 dev->dev_addr[0] == 0xC0 ||
1610 dev->dev_addr[0] == 0x02) &&
1611 dev->dev_addr[1] == 0x00)
1612 for (i = 0; i < 6; i+=2) {
1613 char tmp = dev->dev_addr[i];
1614 dev->dev_addr[i] = dev->dev_addr[i+1];
1615 dev->dev_addr[i+1] = tmp;
1616 }
1617 /* On the Zynx 315 Etherarray and other multiport boards only the
1618 first Tulip has an EEPROM.
1619 On Sparc systems the mac address is held in the OBP property
1620 "local-mac-address".
1621 The addresses of the subsequent ports are derived from the first.
1622 Many PCI BIOSes also incorrectly report the IRQ line, so we correct
1623 that here as well. */
1624 if (sum == 0 || sum == 6*0xff) {
1625 #if defined(CONFIG_SPARC)
1626 struct device_node *dp = pci_device_to_OF_node(pdev);
1627 const unsigned char *addr;
1628 int len;
1629 #endif
1630 eeprom_missing = 1;
1631 for (i = 0; i < 5; i++)
1632 dev->dev_addr[i] = last_phys_addr[i];
1633 dev->dev_addr[i] = last_phys_addr[i] + 1;
1634 #if defined(CONFIG_SPARC)
1635 addr = of_get_property(dp, "local-mac-address", &len);
1636 if (addr && len == 6)
1637 memcpy(dev->dev_addr, addr, 6);
1638 #endif
1639 #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
1640 if (last_irq)
1641 irq = last_irq;
1642 #endif
1643 }
1644
1645 for (i = 0; i < 6; i++)
1646 last_phys_addr[i] = dev->dev_addr[i];
1647 last_irq = irq;
1648
1649 /* The lower four bits are the media type. */
1650 if (board_idx >= 0 && board_idx < MAX_UNITS) {
1651 if (options[board_idx] & MEDIA_MASK)
1652 tp->default_port = options[board_idx] & MEDIA_MASK;
1653 if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1654 tp->full_duplex = 1;
1655 if (mtu[board_idx] > 0)
1656 dev->mtu = mtu[board_idx];
1657 }
1658 if (dev->mem_start & MEDIA_MASK)
1659 tp->default_port = dev->mem_start & MEDIA_MASK;
1660 if (tp->default_port) {
1661 pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1662 board_idx, medianame[tp->default_port & MEDIA_MASK]);
1663 tp->medialock = 1;
1664 if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1665 tp->full_duplex = 1;
1666 }
1667 if (tp->full_duplex)
1668 tp->full_duplex_lock = 1;
1669
1670 if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1671 static const u16 media2advert[] = {
1672 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
1673 };
1674 tp->mii_advertise = media2advert[tp->default_port - 9];
1675 tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
1676 }
1677
1678 if (tp->flags & HAS_MEDIA_TABLE) {
1679 sprintf(dev->name, DRV_NAME "%d", board_idx); /* hack */
1680 tulip_parse_eeprom(dev);
1681 strcpy(dev->name, "eth%d"); /* un-hack */
1682 }
1683
1684 if ((tp->flags & ALWAYS_CHECK_MII) ||
1685 (tp->mtable && tp->mtable->has_mii) ||
1686 ( ! tp->mtable && (tp->flags & HAS_MII))) {
1687 if (tp->mtable && tp->mtable->has_mii) {
1688 for (i = 0; i < tp->mtable->leafcount; i++)
1689 if (tp->mtable->mleaf[i].media == 11) {
1690 tp->cur_index = i;
1691 tp->saved_if_port = dev->if_port;
1692 tulip_select_media(dev, 2);
1693 dev->if_port = tp->saved_if_port;
1694 break;
1695 }
1696 }
1697
1698 /* Find the connected MII xcvrs.
1699 Doing this in open() would allow detecting external xcvrs
1700 later, but takes much time. */
1701 tulip_find_mii (dev, board_idx);
1702 }
1703
1704 /* The Tulip-specific entries in the device structure. */
1705 dev->netdev_ops = &tulip_netdev_ops;
1706 dev->watchdog_timeo = TX_TIMEOUT;
1707 #ifdef CONFIG_TULIP_NAPI
1708 netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1709 #endif
1710 SET_ETHTOOL_OPS(dev, &ops);
1711
1712 if (register_netdev(dev))
1713 goto err_out_free_ring;
1714
1715 pci_set_drvdata(pdev, dev);
1716
1717 dev_info(&dev->dev,
1718 #ifdef CONFIG_TULIP_MMIO
1719 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1720 #else
1721 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1722 #endif
1723 chip_name, pdev->revision,
1724 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1725 eeprom_missing ? " EEPROM not present," : "",
1726 dev->dev_addr, irq);
1727
1728 if (tp->chip_id == PNIC2)
1729 tp->link_change = pnic2_lnk_change;
1730 else if (tp->flags & HAS_NWAY)
1731 tp->link_change = t21142_lnk_change;
1732 else if (tp->flags & HAS_PNICNWAY)
1733 tp->link_change = pnic_lnk_change;
1734
1735 /* Reset the xcvr interface and turn on heartbeat. */
1736 switch (chip_idx) {
1737 case DC21140:
1738 case DM910X:
1739 default:
1740 if (tp->mtable)
1741 iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1742 break;
1743 case DC21142:
1744 if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) {
1745 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1746 iowrite32(0x0000, ioaddr + CSR13);
1747 iowrite32(0x0000, ioaddr + CSR14);
1748 iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1749 } else
1750 t21142_start_nway(dev);
1751 break;
1752 case PNIC2:
1753 /* just do a reset for sanity sake */
1754 iowrite32(0x0000, ioaddr + CSR13);
1755 iowrite32(0x0000, ioaddr + CSR14);
1756 break;
1757 case LC82C168:
1758 if ( ! tp->mii_cnt) {
1759 tp->nway = 1;
1760 tp->nwayset = 0;
1761 iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1762 iowrite32(0x30, ioaddr + CSR12);
1763 iowrite32(0x0001F078, ioaddr + CSR6);
1764 iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
1765 }
1766 break;
1767 case MX98713:
1768 case COMPEX9881:
1769 iowrite32(0x00000000, ioaddr + CSR6);
1770 iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
1771 iowrite32(0x00000001, ioaddr + CSR13);
1772 break;
1773 case MX98715:
1774 case MX98725:
1775 iowrite32(0x01a80000, ioaddr + CSR6);
1776 iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1777 iowrite32(0x00001000, ioaddr + CSR12);
1778 break;
1779 case COMET:
1780 /* No initialization necessary. */
1781 break;
1782 }
1783
1784 /* put the chip in snooze mode until opened */
1785 tulip_set_power_state (tp, 0, 1);
1786
1787 return 0;
1788
1789 err_out_free_ring:
1790 pci_free_consistent (pdev,
1791 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1792 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1793 tp->rx_ring, tp->rx_ring_dma);
1794
1795 err_out_mtable:
1796 kfree (tp->mtable);
1797 pci_iounmap(pdev, ioaddr);
1798
1799 err_out_free_res:
1800 pci_release_regions (pdev);
1801
1802 err_out_free_netdev:
1803 free_netdev (dev);
1804 return -ENODEV;
1805 }
1806
1807
1808 /* set the registers according to the given wolopts */
tulip_set_wolopts(struct pci_dev * pdev,u32 wolopts)1809 static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
1810 {
1811 struct net_device *dev = pci_get_drvdata(pdev);
1812 struct tulip_private *tp = netdev_priv(dev);
1813 void __iomem *ioaddr = tp->base_addr;
1814
1815 if (tp->flags & COMET_PM) {
1816
1817 unsigned int tmp;
1818
1819 tmp = ioread32(ioaddr + CSR18);
1820 tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
1821 tmp |= comet_csr18_pm_mode;
1822 iowrite32(tmp, ioaddr + CSR18);
1823
1824 /* Set the Wake-up Control/Status Register to the given WOL options*/
1825 tmp = ioread32(ioaddr + CSR13);
1826 tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
1827 if (wolopts & WAKE_MAGIC)
1828 tmp |= comet_csr13_mpre;
1829 if (wolopts & WAKE_PHY)
1830 tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
1831 /* Clear the event flags */
1832 tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
1833 iowrite32(tmp, ioaddr + CSR13);
1834 }
1835 }
1836
1837 #ifdef CONFIG_PM
1838
1839
tulip_suspend(struct pci_dev * pdev,pm_message_t state)1840 static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1841 {
1842 pci_power_t pstate;
1843 struct net_device *dev = pci_get_drvdata(pdev);
1844 struct tulip_private *tp = netdev_priv(dev);
1845
1846 if (!dev)
1847 return -EINVAL;
1848
1849 if (!netif_running(dev))
1850 goto save_state;
1851
1852 tulip_down(dev);
1853
1854 netif_device_detach(dev);
1855 /* FIXME: it needlessly adds an error path. */
1856 free_irq(tp->pdev->irq, dev);
1857
1858 save_state:
1859 pci_save_state(pdev);
1860 pci_disable_device(pdev);
1861 pstate = pci_choose_state(pdev, state);
1862 if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) {
1863 int rc;
1864
1865 tulip_set_wolopts(pdev, tp->wolinfo.wolopts);
1866 rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts);
1867 if (rc)
1868 pr_err("pci_enable_wake failed (%d)\n", rc);
1869 }
1870 pci_set_power_state(pdev, pstate);
1871
1872 return 0;
1873 }
1874
1875
tulip_resume(struct pci_dev * pdev)1876 static int tulip_resume(struct pci_dev *pdev)
1877 {
1878 struct net_device *dev = pci_get_drvdata(pdev);
1879 struct tulip_private *tp = netdev_priv(dev);
1880 void __iomem *ioaddr = tp->base_addr;
1881 int retval;
1882 unsigned int tmp;
1883
1884 if (!dev)
1885 return -EINVAL;
1886
1887 pci_set_power_state(pdev, PCI_D0);
1888 pci_restore_state(pdev);
1889
1890 if (!netif_running(dev))
1891 return 0;
1892
1893 if ((retval = pci_enable_device(pdev))) {
1894 pr_err("pci_enable_device failed in resume\n");
1895 return retval;
1896 }
1897
1898 retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
1899 dev->name, dev);
1900 if (retval) {
1901 pr_err("request_irq failed in resume\n");
1902 return retval;
1903 }
1904
1905 if (tp->flags & COMET_PM) {
1906 pci_enable_wake(pdev, PCI_D3hot, 0);
1907 pci_enable_wake(pdev, PCI_D3cold, 0);
1908
1909 /* Clear the PMES flag */
1910 tmp = ioread32(ioaddr + CSR20);
1911 tmp |= comet_csr20_pmes;
1912 iowrite32(tmp, ioaddr + CSR20);
1913
1914 /* Disable all wake-up events */
1915 tulip_set_wolopts(pdev, 0);
1916 }
1917 netif_device_attach(dev);
1918
1919 if (netif_running(dev))
1920 tulip_up(dev);
1921
1922 return 0;
1923 }
1924
1925 #endif /* CONFIG_PM */
1926
1927
tulip_remove_one(struct pci_dev * pdev)1928 static void tulip_remove_one(struct pci_dev *pdev)
1929 {
1930 struct net_device *dev = pci_get_drvdata (pdev);
1931 struct tulip_private *tp;
1932
1933 if (!dev)
1934 return;
1935
1936 tp = netdev_priv(dev);
1937 unregister_netdev(dev);
1938 pci_free_consistent (pdev,
1939 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1940 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1941 tp->rx_ring, tp->rx_ring_dma);
1942 kfree (tp->mtable);
1943 pci_iounmap(pdev, tp->base_addr);
1944 free_netdev (dev);
1945 pci_release_regions (pdev);
1946 pci_set_drvdata (pdev, NULL);
1947
1948 /* pci_power_off (pdev, -1); */
1949 }
1950
1951 #ifdef CONFIG_NET_POLL_CONTROLLER
1952 /*
1953 * Polling 'interrupt' - used by things like netconsole to send skbs
1954 * without having to re-enable interrupts. It's not called while
1955 * the interrupt routine is executing.
1956 */
1957
poll_tulip(struct net_device * dev)1958 static void poll_tulip (struct net_device *dev)
1959 {
1960 struct tulip_private *tp = netdev_priv(dev);
1961 const int irq = tp->pdev->irq;
1962
1963 /* disable_irq here is not very nice, but with the lockless
1964 interrupt handler we have no other choice. */
1965 disable_irq(irq);
1966 tulip_interrupt (irq, dev);
1967 enable_irq(irq);
1968 }
1969 #endif
1970
1971 static struct pci_driver tulip_driver = {
1972 .name = DRV_NAME,
1973 .id_table = tulip_pci_tbl,
1974 .probe = tulip_init_one,
1975 .remove = tulip_remove_one,
1976 #ifdef CONFIG_PM
1977 .suspend = tulip_suspend,
1978 .resume = tulip_resume,
1979 #endif /* CONFIG_PM */
1980 };
1981
1982
tulip_init(void)1983 static int __init tulip_init (void)
1984 {
1985 #ifdef MODULE
1986 pr_info("%s", version);
1987 #endif
1988
1989 /* copy module parms into globals */
1990 tulip_rx_copybreak = rx_copybreak;
1991 tulip_max_interrupt_work = max_interrupt_work;
1992
1993 /* probe for and init boards */
1994 return pci_register_driver(&tulip_driver);
1995 }
1996
1997
tulip_cleanup(void)1998 static void __exit tulip_cleanup (void)
1999 {
2000 pci_unregister_driver (&tulip_driver);
2001 }
2002
2003
2004 module_init(tulip_init);
2005 module_exit(tulip_cleanup);
2006