1 /*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
3
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
7
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9 genuine driver.
10
11 This software may be used and distributed according to the terms of
12 the GNU General Public License (GPL), incorporated herein by reference.
13 Drivers based on or derived from this code fall under the GPL and must
14 retain the authorship, copyright and license notice. This file is not
15 a complete program and may only be used when the entire operating
16 system is licensed under the GPL.
17
18 See the file COPYING in this distribution for more information.
19
20 */
21
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/netdevice.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/pci.h>
29 #include <linux/mii.h>
30 #include <linux/delay.h>
31 #include <linux/crc32.h>
32 #include <linux/dma-mapping.h>
33 #include <asm/irq.h>
34
35 #define net_drv(p, arg...) if (netif_msg_drv(p)) \
36 printk(arg)
37 #define net_probe(p, arg...) if (netif_msg_probe(p)) \
38 printk(arg)
39 #define net_link(p, arg...) if (netif_msg_link(p)) \
40 printk(arg)
41 #define net_intr(p, arg...) if (netif_msg_intr(p)) \
42 printk(arg)
43 #define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
44 printk(arg)
45
46 #define PHY_MAX_ADDR 32
47 #define PHY_ID_ANY 0x1f
48 #define MII_REG_ANY 0x1f
49
50 #define DRV_VERSION "1.2"
51 #define DRV_NAME "sis190"
52 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
53 #define PFX DRV_NAME ": "
54
55 #define sis190_rx_skb netif_rx
56 #define sis190_rx_quota(count, quota) count
57
58 #define MAC_ADDR_LEN 6
59
60 #define NUM_TX_DESC 64 /* [8..1024] */
61 #define NUM_RX_DESC 64 /* [8..8192] */
62 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
63 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
64 #define RX_BUF_SIZE 1536
65 #define RX_BUF_MASK 0xfff8
66
67 #define SIS190_REGS_SIZE 0x80
68 #define SIS190_TX_TIMEOUT (6*HZ)
69 #define SIS190_PHY_TIMEOUT (10*HZ)
70 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
71 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
72 NETIF_MSG_IFDOWN)
73
74 /* Enhanced PHY access register bit definitions */
75 #define EhnMIIread 0x0000
76 #define EhnMIIwrite 0x0020
77 #define EhnMIIdataShift 16
78 #define EhnMIIpmdShift 6 /* 7016 only */
79 #define EhnMIIregShift 11
80 #define EhnMIIreq 0x0010
81 #define EhnMIInotDone 0x0010
82
83 /* Write/read MMIO register */
84 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
85 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
86 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
87 #define SIS_R8(reg) readb (ioaddr + (reg))
88 #define SIS_R16(reg) readw (ioaddr + (reg))
89 #define SIS_R32(reg) readl (ioaddr + (reg))
90
91 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
92
93 enum sis190_registers {
94 TxControl = 0x00,
95 TxDescStartAddr = 0x04,
96 rsv0 = 0x08, // reserved
97 TxSts = 0x0c, // unused (Control/Status)
98 RxControl = 0x10,
99 RxDescStartAddr = 0x14,
100 rsv1 = 0x18, // reserved
101 RxSts = 0x1c, // unused
102 IntrStatus = 0x20,
103 IntrMask = 0x24,
104 IntrControl = 0x28,
105 IntrTimer = 0x2c, // unused (Interupt Timer)
106 PMControl = 0x30, // unused (Power Mgmt Control/Status)
107 rsv2 = 0x34, // reserved
108 ROMControl = 0x38,
109 ROMInterface = 0x3c,
110 StationControl = 0x40,
111 GMIIControl = 0x44,
112 GIoCR = 0x48, // unused (GMAC IO Compensation)
113 GIoCtrl = 0x4c, // unused (GMAC IO Control)
114 TxMacControl = 0x50,
115 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
116 RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
117 rsv3 = 0x5c, // reserved
118 RxMacControl = 0x60,
119 RxMacAddr = 0x62,
120 RxHashTable = 0x68,
121 // Undocumented = 0x6c,
122 RxWolCtrl = 0x70,
123 RxWolData = 0x74, // unused (Rx WOL Data Access)
124 RxMPSControl = 0x78, // unused (Rx MPS Control)
125 rsv4 = 0x7c, // reserved
126 };
127
128 enum sis190_register_content {
129 /* IntrStatus */
130 SoftInt = 0x40000000, // unused
131 Timeup = 0x20000000, // unused
132 PauseFrame = 0x00080000, // unused
133 MagicPacket = 0x00040000, // unused
134 WakeupFrame = 0x00020000, // unused
135 LinkChange = 0x00010000,
136 RxQEmpty = 0x00000080,
137 RxQInt = 0x00000040,
138 TxQ1Empty = 0x00000020, // unused
139 TxQ1Int = 0x00000010,
140 TxQ0Empty = 0x00000008, // unused
141 TxQ0Int = 0x00000004,
142 RxHalt = 0x00000002,
143 TxHalt = 0x00000001,
144
145 /* {Rx/Tx}CmdBits */
146 CmdReset = 0x10,
147 CmdRxEnb = 0x08, // unused
148 CmdTxEnb = 0x01,
149 RxBufEmpty = 0x01, // unused
150
151 /* Cfg9346Bits */
152 Cfg9346_Lock = 0x00, // unused
153 Cfg9346_Unlock = 0xc0, // unused
154
155 /* RxMacControl */
156 AcceptErr = 0x20, // unused
157 AcceptRunt = 0x10, // unused
158 AcceptBroadcast = 0x0800,
159 AcceptMulticast = 0x0400,
160 AcceptMyPhys = 0x0200,
161 AcceptAllPhys = 0x0100,
162
163 /* RxConfigBits */
164 RxCfgFIFOShift = 13,
165 RxCfgDMAShift = 8, // 0x1a in RxControl ?
166
167 /* TxConfigBits */
168 TxInterFrameGapShift = 24,
169 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
170
171 LinkStatus = 0x02, // unused
172 FullDup = 0x01, // unused
173
174 /* TBICSRBit */
175 TBILinkOK = 0x02000000, // unused
176 };
177
178 struct TxDesc {
179 __le32 PSize;
180 __le32 status;
181 __le32 addr;
182 __le32 size;
183 };
184
185 struct RxDesc {
186 __le32 PSize;
187 __le32 status;
188 __le32 addr;
189 __le32 size;
190 };
191
192 enum _DescStatusBit {
193 /* _Desc.status */
194 OWNbit = 0x80000000, // RXOWN/TXOWN
195 INTbit = 0x40000000, // RXINT/TXINT
196 CRCbit = 0x00020000, // CRCOFF/CRCEN
197 PADbit = 0x00010000, // PREADD/PADEN
198 /* _Desc.size */
199 RingEnd = 0x80000000,
200 /* TxDesc.status */
201 LSEN = 0x08000000, // TSO ? -- FR
202 IPCS = 0x04000000,
203 TCPCS = 0x02000000,
204 UDPCS = 0x01000000,
205 BSTEN = 0x00800000,
206 EXTEN = 0x00400000,
207 DEFEN = 0x00200000,
208 BKFEN = 0x00100000,
209 CRSEN = 0x00080000,
210 COLEN = 0x00040000,
211 THOL3 = 0x30000000,
212 THOL2 = 0x20000000,
213 THOL1 = 0x10000000,
214 THOL0 = 0x00000000,
215
216 WND = 0x00080000,
217 TABRT = 0x00040000,
218 FIFO = 0x00020000,
219 LINK = 0x00010000,
220 ColCountMask = 0x0000ffff,
221 /* RxDesc.status */
222 IPON = 0x20000000,
223 TCPON = 0x10000000,
224 UDPON = 0x08000000,
225 Wakup = 0x00400000,
226 Magic = 0x00200000,
227 Pause = 0x00100000,
228 DEFbit = 0x00200000,
229 BCAST = 0x000c0000,
230 MCAST = 0x00080000,
231 UCAST = 0x00040000,
232 /* RxDesc.PSize */
233 TAGON = 0x80000000,
234 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
235 ABORT = 0x00800000,
236 SHORT = 0x00400000,
237 LIMIT = 0x00200000,
238 MIIER = 0x00100000,
239 OVRUN = 0x00080000,
240 NIBON = 0x00040000,
241 COLON = 0x00020000,
242 CRCOK = 0x00010000,
243 RxSizeMask = 0x0000ffff
244 /*
245 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
246 * provide two (unused with Linux) Tx queues. No publically
247 * available documentation alas.
248 */
249 };
250
251 enum sis190_eeprom_access_register_bits {
252 EECS = 0x00000001, // unused
253 EECLK = 0x00000002, // unused
254 EEDO = 0x00000008, // unused
255 EEDI = 0x00000004, // unused
256 EEREQ = 0x00000080,
257 EEROP = 0x00000200,
258 EEWOP = 0x00000100 // unused
259 };
260
261 /* EEPROM Addresses */
262 enum sis190_eeprom_address {
263 EEPROMSignature = 0x00,
264 EEPROMCLK = 0x01, // unused
265 EEPROMInfo = 0x02,
266 EEPROMMACAddr = 0x03
267 };
268
269 enum sis190_feature {
270 F_HAS_RGMII = 1,
271 F_PHY_88E1111 = 2,
272 F_PHY_BCM5461 = 4
273 };
274
275 struct sis190_private {
276 void __iomem *mmio_addr;
277 struct pci_dev *pci_dev;
278 struct net_device *dev;
279 spinlock_t lock;
280 u32 rx_buf_sz;
281 u32 cur_rx;
282 u32 cur_tx;
283 u32 dirty_rx;
284 u32 dirty_tx;
285 dma_addr_t rx_dma;
286 dma_addr_t tx_dma;
287 struct RxDesc *RxDescRing;
288 struct TxDesc *TxDescRing;
289 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
290 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
291 struct work_struct phy_task;
292 struct timer_list timer;
293 u32 msg_enable;
294 struct mii_if_info mii_if;
295 struct list_head first_phy;
296 u32 features;
297 };
298
299 struct sis190_phy {
300 struct list_head list;
301 int phy_id;
302 u16 id[2];
303 u16 status;
304 u8 type;
305 };
306
307 enum sis190_phy_type {
308 UNKNOWN = 0x00,
309 HOME = 0x01,
310 LAN = 0x02,
311 MIX = 0x03
312 };
313
314 static struct mii_chip_info {
315 const char *name;
316 u16 id[2];
317 unsigned int type;
318 u32 feature;
319 } mii_chip_table[] = {
320 { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 },
321 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
322 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
323 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
324 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
325 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
326 { NULL, }
327 };
328
329 static const struct {
330 const char *name;
331 } sis_chip_info[] = {
332 { "SiS 190 PCI Fast Ethernet adapter" },
333 { "SiS 191 PCI Gigabit Ethernet adapter" },
334 };
335
336 static struct pci_device_id sis190_pci_tbl[] = {
337 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
338 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
339 { 0, },
340 };
341
342 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
343
344 static int rx_copybreak = 200;
345
346 static struct {
347 u32 msg_enable;
348 } debug = { -1 };
349
350 MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
351 module_param(rx_copybreak, int, 0);
352 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
353 module_param_named(debug, debug.msg_enable, int, 0);
354 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
355 MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
356 MODULE_VERSION(DRV_VERSION);
357 MODULE_LICENSE("GPL");
358
359 static const u32 sis190_intr_mask =
360 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange;
361
362 /*
363 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
364 * The chips use a 64 element hash table based on the Ethernet CRC.
365 */
366 static const int multicast_filter_limit = 32;
367
__mdio_cmd(void __iomem * ioaddr,u32 ctl)368 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
369 {
370 unsigned int i;
371
372 SIS_W32(GMIIControl, ctl);
373
374 msleep(1);
375
376 for (i = 0; i < 100; i++) {
377 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
378 break;
379 msleep(1);
380 }
381
382 if (i > 99)
383 printk(KERN_ERR PFX "PHY command failed !\n");
384 }
385
mdio_write(void __iomem * ioaddr,int phy_id,int reg,int val)386 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
387 {
388 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
389 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
390 (((u32) val) << EhnMIIdataShift));
391 }
392
mdio_read(void __iomem * ioaddr,int phy_id,int reg)393 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
394 {
395 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
396 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
397
398 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
399 }
400
__mdio_write(struct net_device * dev,int phy_id,int reg,int val)401 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
402 {
403 struct sis190_private *tp = netdev_priv(dev);
404
405 mdio_write(tp->mmio_addr, phy_id, reg, val);
406 }
407
__mdio_read(struct net_device * dev,int phy_id,int reg)408 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
409 {
410 struct sis190_private *tp = netdev_priv(dev);
411
412 return mdio_read(tp->mmio_addr, phy_id, reg);
413 }
414
mdio_read_latched(void __iomem * ioaddr,int phy_id,int reg)415 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
416 {
417 mdio_read(ioaddr, phy_id, reg);
418 return mdio_read(ioaddr, phy_id, reg);
419 }
420
sis190_read_eeprom(void __iomem * ioaddr,u32 reg)421 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
422 {
423 u16 data = 0xffff;
424 unsigned int i;
425
426 if (!(SIS_R32(ROMControl) & 0x0002))
427 return 0;
428
429 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
430
431 for (i = 0; i < 200; i++) {
432 if (!(SIS_R32(ROMInterface) & EEREQ)) {
433 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
434 break;
435 }
436 msleep(1);
437 }
438
439 return data;
440 }
441
sis190_irq_mask_and_ack(void __iomem * ioaddr)442 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
443 {
444 SIS_W32(IntrMask, 0x00);
445 SIS_W32(IntrStatus, 0xffffffff);
446 SIS_PCI_COMMIT();
447 }
448
sis190_asic_down(void __iomem * ioaddr)449 static void sis190_asic_down(void __iomem *ioaddr)
450 {
451 /* Stop the chip's Tx and Rx DMA processes. */
452
453 SIS_W32(TxControl, 0x1a00);
454 SIS_W32(RxControl, 0x1a00);
455
456 sis190_irq_mask_and_ack(ioaddr);
457 }
458
sis190_mark_as_last_descriptor(struct RxDesc * desc)459 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
460 {
461 desc->size |= cpu_to_le32(RingEnd);
462 }
463
sis190_give_to_asic(struct RxDesc * desc,u32 rx_buf_sz)464 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
465 {
466 u32 eor = le32_to_cpu(desc->size) & RingEnd;
467
468 desc->PSize = 0x0;
469 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
470 wmb();
471 desc->status = cpu_to_le32(OWNbit | INTbit);
472 }
473
sis190_map_to_asic(struct RxDesc * desc,dma_addr_t mapping,u32 rx_buf_sz)474 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
475 u32 rx_buf_sz)
476 {
477 desc->addr = cpu_to_le32(mapping);
478 sis190_give_to_asic(desc, rx_buf_sz);
479 }
480
sis190_make_unusable_by_asic(struct RxDesc * desc)481 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
482 {
483 desc->PSize = 0x0;
484 desc->addr = cpu_to_le32(0xdeadbeef);
485 desc->size &= cpu_to_le32(RingEnd);
486 wmb();
487 desc->status = 0x0;
488 }
489
sis190_alloc_rx_skb(struct sis190_private * tp,struct RxDesc * desc)490 static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
491 struct RxDesc *desc)
492 {
493 u32 rx_buf_sz = tp->rx_buf_sz;
494 struct sk_buff *skb;
495
496 skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
497 if (likely(skb)) {
498 dma_addr_t mapping;
499
500 mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
501 PCI_DMA_FROMDEVICE);
502 sis190_map_to_asic(desc, mapping, rx_buf_sz);
503 } else
504 sis190_make_unusable_by_asic(desc);
505
506 return skb;
507 }
508
sis190_rx_fill(struct sis190_private * tp,struct net_device * dev,u32 start,u32 end)509 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
510 u32 start, u32 end)
511 {
512 u32 cur;
513
514 for (cur = start; cur < end; cur++) {
515 unsigned int i = cur % NUM_RX_DESC;
516
517 if (tp->Rx_skbuff[i])
518 continue;
519
520 tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
521
522 if (!tp->Rx_skbuff[i])
523 break;
524 }
525 return cur - start;
526 }
527
sis190_try_rx_copy(struct sis190_private * tp,struct sk_buff ** sk_buff,int pkt_size,dma_addr_t addr)528 static bool sis190_try_rx_copy(struct sis190_private *tp,
529 struct sk_buff **sk_buff, int pkt_size,
530 dma_addr_t addr)
531 {
532 struct sk_buff *skb;
533 bool done = false;
534
535 if (pkt_size >= rx_copybreak)
536 goto out;
537
538 skb = netdev_alloc_skb(tp->dev, pkt_size + 2);
539 if (!skb)
540 goto out;
541
542 pci_dma_sync_single_for_device(tp->pci_dev, addr, pkt_size,
543 PCI_DMA_FROMDEVICE);
544 skb_reserve(skb, 2);
545 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
546 *sk_buff = skb;
547 done = true;
548 out:
549 return done;
550 }
551
sis190_rx_pkt_err(u32 status,struct net_device_stats * stats)552 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
553 {
554 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
555
556 if ((status & CRCOK) && !(status & ErrMask))
557 return 0;
558
559 if (!(status & CRCOK))
560 stats->rx_crc_errors++;
561 else if (status & OVRUN)
562 stats->rx_over_errors++;
563 else if (status & (SHORT | LIMIT))
564 stats->rx_length_errors++;
565 else if (status & (MIIER | NIBON | COLON))
566 stats->rx_frame_errors++;
567
568 stats->rx_errors++;
569 return -1;
570 }
571
sis190_rx_interrupt(struct net_device * dev,struct sis190_private * tp,void __iomem * ioaddr)572 static int sis190_rx_interrupt(struct net_device *dev,
573 struct sis190_private *tp, void __iomem *ioaddr)
574 {
575 struct net_device_stats *stats = &dev->stats;
576 u32 rx_left, cur_rx = tp->cur_rx;
577 u32 delta, count;
578
579 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
580 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
581
582 for (; rx_left > 0; rx_left--, cur_rx++) {
583 unsigned int entry = cur_rx % NUM_RX_DESC;
584 struct RxDesc *desc = tp->RxDescRing + entry;
585 u32 status;
586
587 if (le32_to_cpu(desc->status) & OWNbit)
588 break;
589
590 status = le32_to_cpu(desc->PSize);
591
592 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
593 // status);
594
595 if (sis190_rx_pkt_err(status, stats) < 0)
596 sis190_give_to_asic(desc, tp->rx_buf_sz);
597 else {
598 struct sk_buff *skb = tp->Rx_skbuff[entry];
599 dma_addr_t addr = le32_to_cpu(desc->addr);
600 int pkt_size = (status & RxSizeMask) - 4;
601 struct pci_dev *pdev = tp->pci_dev;
602
603 if (unlikely(pkt_size > tp->rx_buf_sz)) {
604 net_intr(tp, KERN_INFO
605 "%s: (frag) status = %08x.\n",
606 dev->name, status);
607 stats->rx_dropped++;
608 stats->rx_length_errors++;
609 sis190_give_to_asic(desc, tp->rx_buf_sz);
610 continue;
611 }
612
613
614 if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
615 pci_dma_sync_single_for_device(pdev, addr,
616 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
617 sis190_give_to_asic(desc, tp->rx_buf_sz);
618 } else {
619 pci_unmap_single(pdev, addr, tp->rx_buf_sz,
620 PCI_DMA_FROMDEVICE);
621 tp->Rx_skbuff[entry] = NULL;
622 sis190_make_unusable_by_asic(desc);
623 }
624
625 skb_put(skb, pkt_size);
626 skb->protocol = eth_type_trans(skb, dev);
627
628 sis190_rx_skb(skb);
629
630 stats->rx_packets++;
631 stats->rx_bytes += pkt_size;
632 if ((status & BCAST) == MCAST)
633 stats->multicast++;
634 }
635 }
636 count = cur_rx - tp->cur_rx;
637 tp->cur_rx = cur_rx;
638
639 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
640 if (!delta && count && netif_msg_intr(tp))
641 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
642 tp->dirty_rx += delta;
643
644 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
645 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
646
647 return count;
648 }
649
sis190_unmap_tx_skb(struct pci_dev * pdev,struct sk_buff * skb,struct TxDesc * desc)650 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
651 struct TxDesc *desc)
652 {
653 unsigned int len;
654
655 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
656
657 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
658
659 memset(desc, 0x00, sizeof(*desc));
660 }
661
sis190_tx_pkt_err(u32 status,struct net_device_stats * stats)662 static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats)
663 {
664 #define TxErrMask (WND | TABRT | FIFO | LINK)
665
666 if (!unlikely(status & TxErrMask))
667 return 0;
668
669 if (status & WND)
670 stats->tx_window_errors++;
671 if (status & TABRT)
672 stats->tx_aborted_errors++;
673 if (status & FIFO)
674 stats->tx_fifo_errors++;
675 if (status & LINK)
676 stats->tx_carrier_errors++;
677
678 stats->tx_errors++;
679
680 return -1;
681 }
682
sis190_tx_interrupt(struct net_device * dev,struct sis190_private * tp,void __iomem * ioaddr)683 static void sis190_tx_interrupt(struct net_device *dev,
684 struct sis190_private *tp, void __iomem *ioaddr)
685 {
686 struct net_device_stats *stats = &dev->stats;
687 u32 pending, dirty_tx = tp->dirty_tx;
688 /*
689 * It would not be needed if queueing was allowed to be enabled
690 * again too early (hint: think preempt and unclocked smp systems).
691 */
692 unsigned int queue_stopped;
693
694 smp_rmb();
695 pending = tp->cur_tx - dirty_tx;
696 queue_stopped = (pending == NUM_TX_DESC);
697
698 for (; pending; pending--, dirty_tx++) {
699 unsigned int entry = dirty_tx % NUM_TX_DESC;
700 struct TxDesc *txd = tp->TxDescRing + entry;
701 u32 status = le32_to_cpu(txd->status);
702 struct sk_buff *skb;
703
704 if (status & OWNbit)
705 break;
706
707 skb = tp->Tx_skbuff[entry];
708
709 if (likely(sis190_tx_pkt_err(status, stats) == 0)) {
710 stats->tx_packets++;
711 stats->tx_bytes += skb->len;
712 stats->collisions += ((status & ColCountMask) - 1);
713 }
714
715 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
716 tp->Tx_skbuff[entry] = NULL;
717 dev_kfree_skb_irq(skb);
718 }
719
720 if (tp->dirty_tx != dirty_tx) {
721 tp->dirty_tx = dirty_tx;
722 smp_wmb();
723 if (queue_stopped)
724 netif_wake_queue(dev);
725 }
726 }
727
728 /*
729 * The interrupt handler does all of the Rx thread work and cleans up after
730 * the Tx thread.
731 */
sis190_interrupt(int irq,void * __dev)732 static irqreturn_t sis190_interrupt(int irq, void *__dev)
733 {
734 struct net_device *dev = __dev;
735 struct sis190_private *tp = netdev_priv(dev);
736 void __iomem *ioaddr = tp->mmio_addr;
737 unsigned int handled = 0;
738 u32 status;
739
740 status = SIS_R32(IntrStatus);
741
742 if ((status == 0xffffffff) || !status)
743 goto out;
744
745 handled = 1;
746
747 if (unlikely(!netif_running(dev))) {
748 sis190_asic_down(ioaddr);
749 goto out;
750 }
751
752 SIS_W32(IntrStatus, status);
753
754 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
755
756 if (status & LinkChange) {
757 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
758 schedule_work(&tp->phy_task);
759 }
760
761 if (status & RxQInt)
762 sis190_rx_interrupt(dev, tp, ioaddr);
763
764 if (status & TxQ0Int)
765 sis190_tx_interrupt(dev, tp, ioaddr);
766 out:
767 return IRQ_RETVAL(handled);
768 }
769
770 #ifdef CONFIG_NET_POLL_CONTROLLER
sis190_netpoll(struct net_device * dev)771 static void sis190_netpoll(struct net_device *dev)
772 {
773 struct sis190_private *tp = netdev_priv(dev);
774 struct pci_dev *pdev = tp->pci_dev;
775
776 disable_irq(pdev->irq);
777 sis190_interrupt(pdev->irq, dev);
778 enable_irq(pdev->irq);
779 }
780 #endif
781
sis190_free_rx_skb(struct sis190_private * tp,struct sk_buff ** sk_buff,struct RxDesc * desc)782 static void sis190_free_rx_skb(struct sis190_private *tp,
783 struct sk_buff **sk_buff, struct RxDesc *desc)
784 {
785 struct pci_dev *pdev = tp->pci_dev;
786
787 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
788 PCI_DMA_FROMDEVICE);
789 dev_kfree_skb(*sk_buff);
790 *sk_buff = NULL;
791 sis190_make_unusable_by_asic(desc);
792 }
793
sis190_rx_clear(struct sis190_private * tp)794 static void sis190_rx_clear(struct sis190_private *tp)
795 {
796 unsigned int i;
797
798 for (i = 0; i < NUM_RX_DESC; i++) {
799 if (!tp->Rx_skbuff[i])
800 continue;
801 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
802 }
803 }
804
sis190_init_ring_indexes(struct sis190_private * tp)805 static void sis190_init_ring_indexes(struct sis190_private *tp)
806 {
807 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
808 }
809
sis190_init_ring(struct net_device * dev)810 static int sis190_init_ring(struct net_device *dev)
811 {
812 struct sis190_private *tp = netdev_priv(dev);
813
814 sis190_init_ring_indexes(tp);
815
816 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
817 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
818
819 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
820 goto err_rx_clear;
821
822 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
823
824 return 0;
825
826 err_rx_clear:
827 sis190_rx_clear(tp);
828 return -ENOMEM;
829 }
830
sis190_set_rx_mode(struct net_device * dev)831 static void sis190_set_rx_mode(struct net_device *dev)
832 {
833 struct sis190_private *tp = netdev_priv(dev);
834 void __iomem *ioaddr = tp->mmio_addr;
835 unsigned long flags;
836 u32 mc_filter[2]; /* Multicast hash filter */
837 u16 rx_mode;
838
839 if (dev->flags & IFF_PROMISC) {
840 rx_mode =
841 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
842 AcceptAllPhys;
843 mc_filter[1] = mc_filter[0] = 0xffffffff;
844 } else if ((dev->mc_count > multicast_filter_limit) ||
845 (dev->flags & IFF_ALLMULTI)) {
846 /* Too many to filter perfectly -- accept all multicasts. */
847 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
848 mc_filter[1] = mc_filter[0] = 0xffffffff;
849 } else {
850 struct dev_mc_list *mclist;
851 unsigned int i;
852
853 rx_mode = AcceptBroadcast | AcceptMyPhys;
854 mc_filter[1] = mc_filter[0] = 0;
855 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
856 i++, mclist = mclist->next) {
857 int bit_nr =
858 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
859 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
860 rx_mode |= AcceptMulticast;
861 }
862 }
863
864 spin_lock_irqsave(&tp->lock, flags);
865
866 SIS_W16(RxMacControl, rx_mode | 0x2);
867 SIS_W32(RxHashTable, mc_filter[0]);
868 SIS_W32(RxHashTable + 4, mc_filter[1]);
869
870 spin_unlock_irqrestore(&tp->lock, flags);
871 }
872
sis190_soft_reset(void __iomem * ioaddr)873 static void sis190_soft_reset(void __iomem *ioaddr)
874 {
875 SIS_W32(IntrControl, 0x8000);
876 SIS_PCI_COMMIT();
877 SIS_W32(IntrControl, 0x0);
878 sis190_asic_down(ioaddr);
879 }
880
sis190_hw_start(struct net_device * dev)881 static void sis190_hw_start(struct net_device *dev)
882 {
883 struct sis190_private *tp = netdev_priv(dev);
884 void __iomem *ioaddr = tp->mmio_addr;
885
886 sis190_soft_reset(ioaddr);
887
888 SIS_W32(TxDescStartAddr, tp->tx_dma);
889 SIS_W32(RxDescStartAddr, tp->rx_dma);
890
891 SIS_W32(IntrStatus, 0xffffffff);
892 SIS_W32(IntrMask, 0x0);
893 SIS_W32(GMIIControl, 0x0);
894 SIS_W32(TxMacControl, 0x60);
895 SIS_W16(RxMacControl, 0x02);
896 SIS_W32(RxHashTable, 0x0);
897 SIS_W32(0x6c, 0x0);
898 SIS_W32(RxWolCtrl, 0x0);
899 SIS_W32(RxWolData, 0x0);
900
901 SIS_PCI_COMMIT();
902
903 sis190_set_rx_mode(dev);
904
905 /* Enable all known interrupts by setting the interrupt mask. */
906 SIS_W32(IntrMask, sis190_intr_mask);
907
908 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
909 SIS_W32(RxControl, 0x1a1d);
910
911 netif_start_queue(dev);
912 }
913
sis190_phy_task(struct work_struct * work)914 static void sis190_phy_task(struct work_struct *work)
915 {
916 struct sis190_private *tp =
917 container_of(work, struct sis190_private, phy_task);
918 struct net_device *dev = tp->dev;
919 void __iomem *ioaddr = tp->mmio_addr;
920 int phy_id = tp->mii_if.phy_id;
921 u16 val;
922
923 rtnl_lock();
924
925 if (!netif_running(dev))
926 goto out_unlock;
927
928 val = mdio_read(ioaddr, phy_id, MII_BMCR);
929 if (val & BMCR_RESET) {
930 // FIXME: needlessly high ? -- FR 02/07/2005
931 mod_timer(&tp->timer, jiffies + HZ/10);
932 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
933 BMSR_ANEGCOMPLETE)) {
934 netif_carrier_off(dev);
935 net_link(tp, KERN_WARNING "%s: auto-negotiating...\n",
936 dev->name);
937 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
938 } else {
939 /* Rejoice ! */
940 struct {
941 int val;
942 u32 ctl;
943 const char *msg;
944 } reg31[] = {
945 { LPA_1000XFULL | LPA_SLCT, 0x07000c00 | 0x00001000,
946 "1000 Mbps Full Duplex" },
947 { LPA_1000XHALF | LPA_SLCT, 0x07000c00,
948 "1000 Mbps Half Duplex" },
949 { LPA_100FULL, 0x04000800 | 0x00001000,
950 "100 Mbps Full Duplex" },
951 { LPA_100HALF, 0x04000800,
952 "100 Mbps Half Duplex" },
953 { LPA_10FULL, 0x04000400 | 0x00001000,
954 "10 Mbps Full Duplex" },
955 { LPA_10HALF, 0x04000400,
956 "10 Mbps Half Duplex" },
957 { 0, 0x04000400, "unknown" }
958 }, *p;
959 u16 adv;
960
961 val = mdio_read(ioaddr, phy_id, 0x1f);
962 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
963
964 val = mdio_read(ioaddr, phy_id, MII_LPA);
965 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
966 net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",
967 dev->name, val, adv);
968
969 val &= adv;
970
971 for (p = reg31; p->val; p++) {
972 if ((val & p->val) == p->val)
973 break;
974 }
975
976 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
977
978 if ((tp->features & F_HAS_RGMII) &&
979 (tp->features & F_PHY_BCM5461)) {
980 // Set Tx Delay in RGMII mode.
981 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
982 udelay(200);
983 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
984 p->ctl |= 0x03000000;
985 }
986
987 SIS_W32(StationControl, p->ctl);
988
989 if (tp->features & F_HAS_RGMII) {
990 SIS_W32(RGDelay, 0x0441);
991 SIS_W32(RGDelay, 0x0440);
992 }
993
994 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
995 p->msg);
996 netif_carrier_on(dev);
997 }
998
999 out_unlock:
1000 rtnl_unlock();
1001 }
1002
sis190_phy_timer(unsigned long __opaque)1003 static void sis190_phy_timer(unsigned long __opaque)
1004 {
1005 struct net_device *dev = (struct net_device *)__opaque;
1006 struct sis190_private *tp = netdev_priv(dev);
1007
1008 if (likely(netif_running(dev)))
1009 schedule_work(&tp->phy_task);
1010 }
1011
sis190_delete_timer(struct net_device * dev)1012 static inline void sis190_delete_timer(struct net_device *dev)
1013 {
1014 struct sis190_private *tp = netdev_priv(dev);
1015
1016 del_timer_sync(&tp->timer);
1017 }
1018
sis190_request_timer(struct net_device * dev)1019 static inline void sis190_request_timer(struct net_device *dev)
1020 {
1021 struct sis190_private *tp = netdev_priv(dev);
1022 struct timer_list *timer = &tp->timer;
1023
1024 init_timer(timer);
1025 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1026 timer->data = (unsigned long)dev;
1027 timer->function = sis190_phy_timer;
1028 add_timer(timer);
1029 }
1030
sis190_set_rxbufsize(struct sis190_private * tp,struct net_device * dev)1031 static void sis190_set_rxbufsize(struct sis190_private *tp,
1032 struct net_device *dev)
1033 {
1034 unsigned int mtu = dev->mtu;
1035
1036 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1037 /* RxDesc->size has a licence to kill the lower bits */
1038 if (tp->rx_buf_sz & 0x07) {
1039 tp->rx_buf_sz += 8;
1040 tp->rx_buf_sz &= RX_BUF_MASK;
1041 }
1042 }
1043
sis190_open(struct net_device * dev)1044 static int sis190_open(struct net_device *dev)
1045 {
1046 struct sis190_private *tp = netdev_priv(dev);
1047 struct pci_dev *pdev = tp->pci_dev;
1048 int rc = -ENOMEM;
1049
1050 sis190_set_rxbufsize(tp, dev);
1051
1052 /*
1053 * Rx and Tx descriptors need 256 bytes alignment.
1054 * pci_alloc_consistent() guarantees a stronger alignment.
1055 */
1056 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
1057 if (!tp->TxDescRing)
1058 goto out;
1059
1060 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
1061 if (!tp->RxDescRing)
1062 goto err_free_tx_0;
1063
1064 rc = sis190_init_ring(dev);
1065 if (rc < 0)
1066 goto err_free_rx_1;
1067
1068 sis190_request_timer(dev);
1069
1070 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);
1071 if (rc < 0)
1072 goto err_release_timer_2;
1073
1074 sis190_hw_start(dev);
1075 out:
1076 return rc;
1077
1078 err_release_timer_2:
1079 sis190_delete_timer(dev);
1080 sis190_rx_clear(tp);
1081 err_free_rx_1:
1082 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
1083 tp->rx_dma);
1084 err_free_tx_0:
1085 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
1086 tp->tx_dma);
1087 goto out;
1088 }
1089
sis190_tx_clear(struct sis190_private * tp)1090 static void sis190_tx_clear(struct sis190_private *tp)
1091 {
1092 unsigned int i;
1093
1094 for (i = 0; i < NUM_TX_DESC; i++) {
1095 struct sk_buff *skb = tp->Tx_skbuff[i];
1096
1097 if (!skb)
1098 continue;
1099
1100 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1101 tp->Tx_skbuff[i] = NULL;
1102 dev_kfree_skb(skb);
1103
1104 tp->dev->stats.tx_dropped++;
1105 }
1106 tp->cur_tx = tp->dirty_tx = 0;
1107 }
1108
sis190_down(struct net_device * dev)1109 static void sis190_down(struct net_device *dev)
1110 {
1111 struct sis190_private *tp = netdev_priv(dev);
1112 void __iomem *ioaddr = tp->mmio_addr;
1113 unsigned int poll_locked = 0;
1114
1115 sis190_delete_timer(dev);
1116
1117 netif_stop_queue(dev);
1118
1119 do {
1120 spin_lock_irq(&tp->lock);
1121
1122 sis190_asic_down(ioaddr);
1123
1124 spin_unlock_irq(&tp->lock);
1125
1126 synchronize_irq(dev->irq);
1127
1128 if (!poll_locked)
1129 poll_locked++;
1130
1131 synchronize_sched();
1132
1133 } while (SIS_R32(IntrMask));
1134
1135 sis190_tx_clear(tp);
1136 sis190_rx_clear(tp);
1137 }
1138
sis190_close(struct net_device * dev)1139 static int sis190_close(struct net_device *dev)
1140 {
1141 struct sis190_private *tp = netdev_priv(dev);
1142 struct pci_dev *pdev = tp->pci_dev;
1143
1144 sis190_down(dev);
1145
1146 free_irq(dev->irq, dev);
1147
1148 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1149 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1150
1151 tp->TxDescRing = NULL;
1152 tp->RxDescRing = NULL;
1153
1154 return 0;
1155 }
1156
sis190_start_xmit(struct sk_buff * skb,struct net_device * dev)1157 static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1158 {
1159 struct sis190_private *tp = netdev_priv(dev);
1160 void __iomem *ioaddr = tp->mmio_addr;
1161 u32 len, entry, dirty_tx;
1162 struct TxDesc *desc;
1163 dma_addr_t mapping;
1164
1165 if (unlikely(skb->len < ETH_ZLEN)) {
1166 if (skb_padto(skb, ETH_ZLEN)) {
1167 dev->stats.tx_dropped++;
1168 goto out;
1169 }
1170 len = ETH_ZLEN;
1171 } else {
1172 len = skb->len;
1173 }
1174
1175 entry = tp->cur_tx % NUM_TX_DESC;
1176 desc = tp->TxDescRing + entry;
1177
1178 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1179 netif_stop_queue(dev);
1180 net_tx_err(tp, KERN_ERR PFX
1181 "%s: BUG! Tx Ring full when queue awake!\n",
1182 dev->name);
1183 return NETDEV_TX_BUSY;
1184 }
1185
1186 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1187
1188 tp->Tx_skbuff[entry] = skb;
1189
1190 desc->PSize = cpu_to_le32(len);
1191 desc->addr = cpu_to_le32(mapping);
1192
1193 desc->size = cpu_to_le32(len);
1194 if (entry == (NUM_TX_DESC - 1))
1195 desc->size |= cpu_to_le32(RingEnd);
1196
1197 wmb();
1198
1199 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1200
1201 tp->cur_tx++;
1202
1203 smp_wmb();
1204
1205 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1206
1207 dev->trans_start = jiffies;
1208
1209 dirty_tx = tp->dirty_tx;
1210 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1211 netif_stop_queue(dev);
1212 smp_rmb();
1213 if (dirty_tx != tp->dirty_tx)
1214 netif_wake_queue(dev);
1215 }
1216 out:
1217 return NETDEV_TX_OK;
1218 }
1219
sis190_free_phy(struct list_head * first_phy)1220 static void sis190_free_phy(struct list_head *first_phy)
1221 {
1222 struct sis190_phy *cur, *next;
1223
1224 list_for_each_entry_safe(cur, next, first_phy, list) {
1225 kfree(cur);
1226 }
1227 }
1228
1229 /**
1230 * sis190_default_phy - Select default PHY for sis190 mac.
1231 * @dev: the net device to probe for
1232 *
1233 * Select first detected PHY with link as default.
1234 * If no one is link on, select PHY whose types is HOME as default.
1235 * If HOME doesn't exist, select LAN.
1236 */
sis190_default_phy(struct net_device * dev)1237 static u16 sis190_default_phy(struct net_device *dev)
1238 {
1239 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1240 struct sis190_private *tp = netdev_priv(dev);
1241 struct mii_if_info *mii_if = &tp->mii_if;
1242 void __iomem *ioaddr = tp->mmio_addr;
1243 u16 status;
1244
1245 phy_home = phy_default = phy_lan = NULL;
1246
1247 list_for_each_entry(phy, &tp->first_phy, list) {
1248 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1249
1250 // Link ON & Not select default PHY & not ghost PHY.
1251 if ((status & BMSR_LSTATUS) &&
1252 !phy_default &&
1253 (phy->type != UNKNOWN)) {
1254 phy_default = phy;
1255 } else {
1256 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1257 mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1258 status | BMCR_ANENABLE | BMCR_ISOLATE);
1259 if (phy->type == HOME)
1260 phy_home = phy;
1261 else if (phy->type == LAN)
1262 phy_lan = phy;
1263 }
1264 }
1265
1266 if (!phy_default) {
1267 if (phy_home)
1268 phy_default = phy_home;
1269 else if (phy_lan)
1270 phy_default = phy_lan;
1271 else
1272 phy_default = list_entry(&tp->first_phy,
1273 struct sis190_phy, list);
1274 }
1275
1276 if (mii_if->phy_id != phy_default->phy_id) {
1277 mii_if->phy_id = phy_default->phy_id;
1278 net_probe(tp, KERN_INFO
1279 "%s: Using transceiver at address %d as default.\n",
1280 pci_name(tp->pci_dev), mii_if->phy_id);
1281 }
1282
1283 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1284 status &= (~BMCR_ISOLATE);
1285
1286 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1287 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1288
1289 return status;
1290 }
1291
sis190_init_phy(struct net_device * dev,struct sis190_private * tp,struct sis190_phy * phy,unsigned int phy_id,u16 mii_status)1292 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1293 struct sis190_phy *phy, unsigned int phy_id,
1294 u16 mii_status)
1295 {
1296 void __iomem *ioaddr = tp->mmio_addr;
1297 struct mii_chip_info *p;
1298
1299 INIT_LIST_HEAD(&phy->list);
1300 phy->status = mii_status;
1301 phy->phy_id = phy_id;
1302
1303 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1304 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1305
1306 for (p = mii_chip_table; p->type; p++) {
1307 if ((p->id[0] == phy->id[0]) &&
1308 (p->id[1] == (phy->id[1] & 0xfff0))) {
1309 break;
1310 }
1311 }
1312
1313 if (p->id[1]) {
1314 phy->type = (p->type == MIX) ?
1315 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1316 LAN : HOME) : p->type;
1317 tp->features |= p->feature;
1318 } else
1319 phy->type = UNKNOWN;
1320
1321 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
1322 pci_name(tp->pci_dev),
1323 (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id);
1324 }
1325
sis190_mii_probe_88e1111_fixup(struct sis190_private * tp)1326 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1327 {
1328 if (tp->features & F_PHY_88E1111) {
1329 void __iomem *ioaddr = tp->mmio_addr;
1330 int phy_id = tp->mii_if.phy_id;
1331 u16 reg[2][2] = {
1332 { 0x808b, 0x0ce1 },
1333 { 0x808f, 0x0c60 }
1334 }, *p;
1335
1336 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1337
1338 mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1339 udelay(200);
1340 mdio_write(ioaddr, phy_id, 0x14, p[1]);
1341 udelay(200);
1342 }
1343 }
1344
1345 /**
1346 * sis190_mii_probe - Probe MII PHY for sis190
1347 * @dev: the net device to probe for
1348 *
1349 * Search for total of 32 possible mii phy addresses.
1350 * Identify and set current phy if found one,
1351 * return error if it failed to found.
1352 */
sis190_mii_probe(struct net_device * dev)1353 static int __devinit sis190_mii_probe(struct net_device *dev)
1354 {
1355 struct sis190_private *tp = netdev_priv(dev);
1356 struct mii_if_info *mii_if = &tp->mii_if;
1357 void __iomem *ioaddr = tp->mmio_addr;
1358 int phy_id;
1359 int rc = 0;
1360
1361 INIT_LIST_HEAD(&tp->first_phy);
1362
1363 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1364 struct sis190_phy *phy;
1365 u16 status;
1366
1367 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1368
1369 // Try next mii if the current one is not accessible.
1370 if (status == 0xffff || status == 0x0000)
1371 continue;
1372
1373 phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1374 if (!phy) {
1375 sis190_free_phy(&tp->first_phy);
1376 rc = -ENOMEM;
1377 goto out;
1378 }
1379
1380 sis190_init_phy(dev, tp, phy, phy_id, status);
1381
1382 list_add(&tp->first_phy, &phy->list);
1383 }
1384
1385 if (list_empty(&tp->first_phy)) {
1386 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
1387 pci_name(tp->pci_dev));
1388 rc = -EIO;
1389 goto out;
1390 }
1391
1392 /* Select default PHY for mac */
1393 sis190_default_phy(dev);
1394
1395 sis190_mii_probe_88e1111_fixup(tp);
1396
1397 mii_if->dev = dev;
1398 mii_if->mdio_read = __mdio_read;
1399 mii_if->mdio_write = __mdio_write;
1400 mii_if->phy_id_mask = PHY_ID_ANY;
1401 mii_if->reg_num_mask = MII_REG_ANY;
1402 out:
1403 return rc;
1404 }
1405
sis190_mii_remove(struct net_device * dev)1406 static void sis190_mii_remove(struct net_device *dev)
1407 {
1408 struct sis190_private *tp = netdev_priv(dev);
1409
1410 sis190_free_phy(&tp->first_phy);
1411 }
1412
sis190_release_board(struct pci_dev * pdev)1413 static void sis190_release_board(struct pci_dev *pdev)
1414 {
1415 struct net_device *dev = pci_get_drvdata(pdev);
1416 struct sis190_private *tp = netdev_priv(dev);
1417
1418 iounmap(tp->mmio_addr);
1419 pci_release_regions(pdev);
1420 pci_disable_device(pdev);
1421 free_netdev(dev);
1422 }
1423
sis190_init_board(struct pci_dev * pdev)1424 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1425 {
1426 struct sis190_private *tp;
1427 struct net_device *dev;
1428 void __iomem *ioaddr;
1429 int rc;
1430
1431 dev = alloc_etherdev(sizeof(*tp));
1432 if (!dev) {
1433 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1434 rc = -ENOMEM;
1435 goto err_out_0;
1436 }
1437
1438 SET_NETDEV_DEV(dev, &pdev->dev);
1439
1440 tp = netdev_priv(dev);
1441 tp->dev = dev;
1442 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1443
1444 rc = pci_enable_device(pdev);
1445 if (rc < 0) {
1446 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1447 goto err_free_dev_1;
1448 }
1449
1450 rc = -ENODEV;
1451
1452 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1453 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1454 pci_name(pdev));
1455 goto err_pci_disable_2;
1456 }
1457 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1458 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1459 pci_name(pdev));
1460 goto err_pci_disable_2;
1461 }
1462
1463 rc = pci_request_regions(pdev, DRV_NAME);
1464 if (rc < 0) {
1465 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1466 pci_name(pdev));
1467 goto err_pci_disable_2;
1468 }
1469
1470 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1471 if (rc < 0) {
1472 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1473 pci_name(pdev));
1474 goto err_free_res_3;
1475 }
1476
1477 pci_set_master(pdev);
1478
1479 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1480 if (!ioaddr) {
1481 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1482 pci_name(pdev));
1483 rc = -EIO;
1484 goto err_free_res_3;
1485 }
1486
1487 tp->pci_dev = pdev;
1488 tp->mmio_addr = ioaddr;
1489
1490 sis190_irq_mask_and_ack(ioaddr);
1491
1492 sis190_soft_reset(ioaddr);
1493 out:
1494 return dev;
1495
1496 err_free_res_3:
1497 pci_release_regions(pdev);
1498 err_pci_disable_2:
1499 pci_disable_device(pdev);
1500 err_free_dev_1:
1501 free_netdev(dev);
1502 err_out_0:
1503 dev = ERR_PTR(rc);
1504 goto out;
1505 }
1506
sis190_tx_timeout(struct net_device * dev)1507 static void sis190_tx_timeout(struct net_device *dev)
1508 {
1509 struct sis190_private *tp = netdev_priv(dev);
1510 void __iomem *ioaddr = tp->mmio_addr;
1511 u8 tmp8;
1512
1513 /* Disable Tx, if not already */
1514 tmp8 = SIS_R8(TxControl);
1515 if (tmp8 & CmdTxEnb)
1516 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1517
1518
1519 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
1520 dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
1521
1522 /* Disable interrupts by clearing the interrupt mask. */
1523 SIS_W32(IntrMask, 0x0000);
1524
1525 /* Stop a shared interrupt from scavenging while we are. */
1526 spin_lock_irq(&tp->lock);
1527 sis190_tx_clear(tp);
1528 spin_unlock_irq(&tp->lock);
1529
1530 /* ...and finally, reset everything. */
1531 sis190_hw_start(dev);
1532
1533 netif_wake_queue(dev);
1534 }
1535
sis190_set_rgmii(struct sis190_private * tp,u8 reg)1536 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1537 {
1538 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1539 }
1540
sis190_get_mac_addr_from_eeprom(struct pci_dev * pdev,struct net_device * dev)1541 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1542 struct net_device *dev)
1543 {
1544 struct sis190_private *tp = netdev_priv(dev);
1545 void __iomem *ioaddr = tp->mmio_addr;
1546 u16 sig;
1547 int i;
1548
1549 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
1550 pci_name(pdev));
1551
1552 /* Check to see if there is a sane EEPROM */
1553 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1554
1555 if ((sig == 0xffff) || (sig == 0x0000)) {
1556 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
1557 pci_name(pdev), sig);
1558 return -EIO;
1559 }
1560
1561 /* Get MAC address from EEPROM */
1562 for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
1563 u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1564
1565 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
1566 }
1567
1568 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1569
1570 return 0;
1571 }
1572
1573 /**
1574 * sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model
1575 * @pdev: PCI device
1576 * @dev: network device to get address for
1577 *
1578 * SiS96x model, use APC CMOS RAM to store MAC address.
1579 * APC CMOS RAM is accessed through ISA bridge.
1580 * MAC address is read into @net_dev->dev_addr.
1581 */
sis190_get_mac_addr_from_apc(struct pci_dev * pdev,struct net_device * dev)1582 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1583 struct net_device *dev)
1584 {
1585 static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
1586 struct sis190_private *tp = netdev_priv(dev);
1587 struct pci_dev *isa_bridge;
1588 u8 reg, tmp8;
1589 unsigned int i;
1590
1591 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
1592 pci_name(pdev));
1593
1594 for (i = 0; i < ARRAY_SIZE(ids); i++) {
1595 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
1596 if (isa_bridge)
1597 break;
1598 }
1599
1600 if (!isa_bridge) {
1601 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
1602 pci_name(pdev));
1603 return -EIO;
1604 }
1605
1606 /* Enable port 78h & 79h to access APC Registers. */
1607 pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1608 reg = (tmp8 & ~0x02);
1609 pci_write_config_byte(isa_bridge, 0x48, reg);
1610 udelay(50);
1611 pci_read_config_byte(isa_bridge, 0x48, ®);
1612
1613 for (i = 0; i < MAC_ADDR_LEN; i++) {
1614 outb(0x9 + i, 0x78);
1615 dev->dev_addr[i] = inb(0x79);
1616 }
1617
1618 outb(0x12, 0x78);
1619 reg = inb(0x79);
1620
1621 sis190_set_rgmii(tp, reg);
1622
1623 /* Restore the value to ISA Bridge */
1624 pci_write_config_byte(isa_bridge, 0x48, tmp8);
1625 pci_dev_put(isa_bridge);
1626
1627 return 0;
1628 }
1629
1630 /**
1631 * sis190_init_rxfilter - Initialize the Rx filter
1632 * @dev: network device to initialize
1633 *
1634 * Set receive filter address to our MAC address
1635 * and enable packet filtering.
1636 */
sis190_init_rxfilter(struct net_device * dev)1637 static inline void sis190_init_rxfilter(struct net_device *dev)
1638 {
1639 struct sis190_private *tp = netdev_priv(dev);
1640 void __iomem *ioaddr = tp->mmio_addr;
1641 u16 ctl;
1642 int i;
1643
1644 ctl = SIS_R16(RxMacControl);
1645 /*
1646 * Disable packet filtering before setting filter.
1647 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1648 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1649 */
1650 SIS_W16(RxMacControl, ctl & ~0x0f00);
1651
1652 for (i = 0; i < MAC_ADDR_LEN; i++)
1653 SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1654
1655 SIS_W16(RxMacControl, ctl);
1656 SIS_PCI_COMMIT();
1657 }
1658
sis190_get_mac_addr(struct pci_dev * pdev,struct net_device * dev)1659 static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
1660 struct net_device *dev)
1661 {
1662 int rc;
1663
1664 rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
1665 if (rc < 0) {
1666 u8 reg;
1667
1668 pci_read_config_byte(pdev, 0x73, ®);
1669
1670 if (reg & 0x00000001)
1671 rc = sis190_get_mac_addr_from_apc(pdev, dev);
1672 }
1673 return rc;
1674 }
1675
sis190_set_speed_auto(struct net_device * dev)1676 static void sis190_set_speed_auto(struct net_device *dev)
1677 {
1678 struct sis190_private *tp = netdev_priv(dev);
1679 void __iomem *ioaddr = tp->mmio_addr;
1680 int phy_id = tp->mii_if.phy_id;
1681 int val;
1682
1683 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1684
1685 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1686
1687 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1688 // unchanged.
1689 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1690 ADVERTISE_100FULL | ADVERTISE_10FULL |
1691 ADVERTISE_100HALF | ADVERTISE_10HALF);
1692
1693 // Enable 1000 Full Mode.
1694 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1695
1696 // Enable auto-negotiation and restart auto-negotiation.
1697 mdio_write(ioaddr, phy_id, MII_BMCR,
1698 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1699 }
1700
sis190_get_settings(struct net_device * dev,struct ethtool_cmd * cmd)1701 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1702 {
1703 struct sis190_private *tp = netdev_priv(dev);
1704
1705 return mii_ethtool_gset(&tp->mii_if, cmd);
1706 }
1707
sis190_set_settings(struct net_device * dev,struct ethtool_cmd * cmd)1708 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1709 {
1710 struct sis190_private *tp = netdev_priv(dev);
1711
1712 return mii_ethtool_sset(&tp->mii_if, cmd);
1713 }
1714
sis190_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1715 static void sis190_get_drvinfo(struct net_device *dev,
1716 struct ethtool_drvinfo *info)
1717 {
1718 struct sis190_private *tp = netdev_priv(dev);
1719
1720 strcpy(info->driver, DRV_NAME);
1721 strcpy(info->version, DRV_VERSION);
1722 strcpy(info->bus_info, pci_name(tp->pci_dev));
1723 }
1724
sis190_get_regs_len(struct net_device * dev)1725 static int sis190_get_regs_len(struct net_device *dev)
1726 {
1727 return SIS190_REGS_SIZE;
1728 }
1729
sis190_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * p)1730 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1731 void *p)
1732 {
1733 struct sis190_private *tp = netdev_priv(dev);
1734 unsigned long flags;
1735
1736 if (regs->len > SIS190_REGS_SIZE)
1737 regs->len = SIS190_REGS_SIZE;
1738
1739 spin_lock_irqsave(&tp->lock, flags);
1740 memcpy_fromio(p, tp->mmio_addr, regs->len);
1741 spin_unlock_irqrestore(&tp->lock, flags);
1742 }
1743
sis190_nway_reset(struct net_device * dev)1744 static int sis190_nway_reset(struct net_device *dev)
1745 {
1746 struct sis190_private *tp = netdev_priv(dev);
1747
1748 return mii_nway_restart(&tp->mii_if);
1749 }
1750
sis190_get_msglevel(struct net_device * dev)1751 static u32 sis190_get_msglevel(struct net_device *dev)
1752 {
1753 struct sis190_private *tp = netdev_priv(dev);
1754
1755 return tp->msg_enable;
1756 }
1757
sis190_set_msglevel(struct net_device * dev,u32 value)1758 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1759 {
1760 struct sis190_private *tp = netdev_priv(dev);
1761
1762 tp->msg_enable = value;
1763 }
1764
1765 static const struct ethtool_ops sis190_ethtool_ops = {
1766 .get_settings = sis190_get_settings,
1767 .set_settings = sis190_set_settings,
1768 .get_drvinfo = sis190_get_drvinfo,
1769 .get_regs_len = sis190_get_regs_len,
1770 .get_regs = sis190_get_regs,
1771 .get_link = ethtool_op_get_link,
1772 .get_msglevel = sis190_get_msglevel,
1773 .set_msglevel = sis190_set_msglevel,
1774 .nway_reset = sis190_nway_reset,
1775 };
1776
sis190_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1777 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1778 {
1779 struct sis190_private *tp = netdev_priv(dev);
1780
1781 return !netif_running(dev) ? -EINVAL :
1782 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1783 }
1784
1785 static const struct net_device_ops sis190_netdev_ops = {
1786 .ndo_open = sis190_open,
1787 .ndo_stop = sis190_close,
1788 .ndo_do_ioctl = sis190_ioctl,
1789 .ndo_start_xmit = sis190_start_xmit,
1790 .ndo_tx_timeout = sis190_tx_timeout,
1791 .ndo_set_multicast_list = sis190_set_rx_mode,
1792 .ndo_change_mtu = eth_change_mtu,
1793 .ndo_set_mac_address = eth_mac_addr,
1794 .ndo_validate_addr = eth_validate_addr,
1795 #ifdef CONFIG_NET_POLL_CONTROLLER
1796 .ndo_poll_controller = sis190_netpoll,
1797 #endif
1798 };
1799
sis190_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1800 static int __devinit sis190_init_one(struct pci_dev *pdev,
1801 const struct pci_device_id *ent)
1802 {
1803 static int printed_version = 0;
1804 struct sis190_private *tp;
1805 struct net_device *dev;
1806 void __iomem *ioaddr;
1807 int rc;
1808
1809 if (!printed_version) {
1810 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1811 printed_version = 1;
1812 }
1813
1814 dev = sis190_init_board(pdev);
1815 if (IS_ERR(dev)) {
1816 rc = PTR_ERR(dev);
1817 goto out;
1818 }
1819
1820 pci_set_drvdata(pdev, dev);
1821
1822 tp = netdev_priv(dev);
1823 ioaddr = tp->mmio_addr;
1824
1825 rc = sis190_get_mac_addr(pdev, dev);
1826 if (rc < 0)
1827 goto err_release_board;
1828
1829 sis190_init_rxfilter(dev);
1830
1831 INIT_WORK(&tp->phy_task, sis190_phy_task);
1832
1833 dev->netdev_ops = &sis190_netdev_ops;
1834
1835 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1836 dev->irq = pdev->irq;
1837 dev->base_addr = (unsigned long) 0xdead;
1838 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1839
1840 spin_lock_init(&tp->lock);
1841
1842 rc = sis190_mii_probe(dev);
1843 if (rc < 0)
1844 goto err_release_board;
1845
1846 rc = register_netdev(dev);
1847 if (rc < 0)
1848 goto err_remove_mii;
1849
1850 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), %pM\n",
1851 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1852 ioaddr, dev->irq, dev->dev_addr);
1853
1854 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
1855 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1856
1857 netif_carrier_off(dev);
1858
1859 sis190_set_speed_auto(dev);
1860 out:
1861 return rc;
1862
1863 err_remove_mii:
1864 sis190_mii_remove(dev);
1865 err_release_board:
1866 sis190_release_board(pdev);
1867 goto out;
1868 }
1869
sis190_remove_one(struct pci_dev * pdev)1870 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1871 {
1872 struct net_device *dev = pci_get_drvdata(pdev);
1873
1874 sis190_mii_remove(dev);
1875 flush_scheduled_work();
1876 unregister_netdev(dev);
1877 sis190_release_board(pdev);
1878 pci_set_drvdata(pdev, NULL);
1879 }
1880
1881 static struct pci_driver sis190_pci_driver = {
1882 .name = DRV_NAME,
1883 .id_table = sis190_pci_tbl,
1884 .probe = sis190_init_one,
1885 .remove = __devexit_p(sis190_remove_one),
1886 };
1887
sis190_init_module(void)1888 static int __init sis190_init_module(void)
1889 {
1890 return pci_register_driver(&sis190_pci_driver);
1891 }
1892
sis190_cleanup_module(void)1893 static void __exit sis190_cleanup_module(void)
1894 {
1895 pci_unregister_driver(&sis190_pci_driver);
1896 }
1897
1898 module_init(sis190_init_module);
1899 module_exit(sis190_cleanup_module);
1900