1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2 /*
3 Written 1998-2001 by Donald Becker.
4
5 Current Maintainer: Kevin Brace <kevinbrace@bracecomputerlab.com>
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
28 [link no longer provides useful info -jgarzik]
29
30 */
31
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34 #define DRV_NAME "via-rhine"
35
36 #include <linux/types.h>
37
38 /* A few user-configurable values.
39 These may be modified when a driver module is loaded. */
40 static int debug = 0;
41 #define RHINE_MSG_DEFAULT \
42 (0x0000)
43
44 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
45 Setting to > 1518 effectively disables this feature. */
46 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
47 defined(CONFIG_SPARC) || defined(__ia64__) || \
48 defined(__sh__) || defined(__mips__)
49 static int rx_copybreak = 1518;
50 #else
51 static int rx_copybreak;
52 #endif
53
54 /* Work-around for broken BIOSes: they are unable to get the chip back out of
55 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
56 static bool avoid_D3;
57
58 /*
59 * In case you are looking for 'options[]' or 'full_duplex[]', they
60 * are gone. Use ethtool(8) instead.
61 */
62
63 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
64 The Rhine has a 64 element 8390-like hash table. */
65 static const int multicast_filter_limit = 32;
66
67
68 /* Operational parameters that are set at compile time. */
69
70 /* Keep the ring sizes a power of two for compile efficiency.
71 * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
72 * Making the Tx ring too large decreases the effectiveness of channel
73 * bonding and packet priority.
74 * With BQL support, we can increase TX ring safely.
75 * There are no ill effects from too-large receive rings.
76 */
77 #define TX_RING_SIZE 64
78 #define TX_QUEUE_LEN (TX_RING_SIZE - 6) /* Limit ring entries actually used. */
79 #define RX_RING_SIZE 64
80
81 /* Operational parameters that usually are not changed. */
82
83 /* Time in jiffies before concluding the transmitter is hung. */
84 #define TX_TIMEOUT (2*HZ)
85
86 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
87
88 #include <linux/module.h>
89 #include <linux/moduleparam.h>
90 #include <linux/kernel.h>
91 #include <linux/string.h>
92 #include <linux/timer.h>
93 #include <linux/errno.h>
94 #include <linux/ioport.h>
95 #include <linux/interrupt.h>
96 #include <linux/pci.h>
97 #include <linux/of_device.h>
98 #include <linux/of_irq.h>
99 #include <linux/platform_device.h>
100 #include <linux/dma-mapping.h>
101 #include <linux/netdevice.h>
102 #include <linux/etherdevice.h>
103 #include <linux/skbuff.h>
104 #include <linux/init.h>
105 #include <linux/delay.h>
106 #include <linux/mii.h>
107 #include <linux/ethtool.h>
108 #include <linux/crc32.h>
109 #include <linux/if_vlan.h>
110 #include <linux/bitops.h>
111 #include <linux/workqueue.h>
112 #include <asm/processor.h> /* Processor type for cache alignment. */
113 #include <asm/io.h>
114 #include <asm/irq.h>
115 #include <linux/uaccess.h>
116 #include <linux/dmi.h>
117
118 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
119 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
120 MODULE_LICENSE("GPL");
121
122 module_param(debug, int, 0);
123 module_param(rx_copybreak, int, 0);
124 module_param(avoid_D3, bool, 0);
125 MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
126 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
127 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
128
129 #define MCAM_SIZE 32
130 #define VCAM_SIZE 32
131
132 /*
133 Theory of Operation
134
135 I. Board Compatibility
136
137 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
138 controller.
139
140 II. Board-specific settings
141
142 Boards with this chip are functional only in a bus-master PCI slot.
143
144 Many operational settings are loaded from the EEPROM to the Config word at
145 offset 0x78. For most of these settings, this driver assumes that they are
146 correct.
147 If this driver is compiled to use PCI memory space operations the EEPROM
148 must be configured to enable memory ops.
149
150 III. Driver operation
151
152 IIIa. Ring buffers
153
154 This driver uses two statically allocated fixed-size descriptor lists
155 formed into rings by a branch from the final descriptor to the beginning of
156 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
157
158 IIIb/c. Transmit/Receive Structure
159
160 This driver attempts to use a zero-copy receive and transmit scheme.
161
162 Alas, all data buffers are required to start on a 32 bit boundary, so
163 the driver must often copy transmit packets into bounce buffers.
164
165 The driver allocates full frame size skbuffs for the Rx ring buffers at
166 open() time and passes the skb->data field to the chip as receive data
167 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
168 a fresh skbuff is allocated and the frame is copied to the new skbuff.
169 When the incoming frame is larger, the skbuff is passed directly up the
170 protocol stack. Buffers consumed this way are replaced by newly allocated
171 skbuffs in the last phase of rhine_rx().
172
173 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
174 using a full-sized skbuff for small frames vs. the copying costs of larger
175 frames. New boards are typically used in generously configured machines
176 and the underfilled buffers have negligible impact compared to the benefit of
177 a single allocation size, so the default value of zero results in never
178 copying packets. When copying is done, the cost is usually mitigated by using
179 a combined copy/checksum routine. Copying also preloads the cache, which is
180 most useful with small frames.
181
182 Since the VIA chips are only able to transfer data to buffers on 32 bit
183 boundaries, the IP header at offset 14 in an ethernet frame isn't
184 longword aligned for further processing. Copying these unaligned buffers
185 has the beneficial effect of 16-byte aligning the IP header.
186
187 IIId. Synchronization
188
189 The driver runs as two independent, single-threaded flows of control. One
190 is the send-packet routine, which enforces single-threaded use by the
191 netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
192 which is single threaded by the hardware and interrupt handling software.
193
194 The send packet thread has partial control over the Tx ring. It locks the
195 netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
196 the ring is not available it stops the transmit queue by
197 calling netif_stop_queue.
198
199 The interrupt handler has exclusive control over the Rx ring and records stats
200 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
201 empty by incrementing the dirty_tx mark. If at least half of the entries in
202 the Rx ring are available the transmit queue is woken up if it was stopped.
203
204 IV. Notes
205
206 IVb. References
207
208 Preliminary VT86C100A manual from http://www.via.com.tw/
209 http://www.scyld.com/expert/100mbps.html
210 http://www.scyld.com/expert/NWay.html
211 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
212 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
213
214
215 IVc. Errata
216
217 The VT86C100A manual is not reliable information.
218 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
219 in significant performance degradation for bounce buffer copies on transmit
220 and unaligned IP headers on receive.
221 The chip does not pad to minimum transmit length.
222
223 */
224
225
226 /* This table drives the PCI probe routines. It's mostly boilerplate in all
227 of the drivers, and will likely be provided by some future kernel.
228 Note the matching code -- the first table entry matchs all 56** cards but
229 second only the 1234 card.
230 */
231
232 enum rhine_revs {
233 VT86C100A = 0x00,
234 VTunknown0 = 0x20,
235 VT6102 = 0x40,
236 VT8231 = 0x50, /* Integrated MAC */
237 VT8233 = 0x60, /* Integrated MAC */
238 VT8235 = 0x74, /* Integrated MAC */
239 VT8237 = 0x78, /* Integrated MAC */
240 VT8251 = 0x7C, /* Integrated MAC */
241 VT6105 = 0x80,
242 VT6105_B0 = 0x83,
243 VT6105L = 0x8A,
244 VT6107 = 0x8C,
245 VTunknown2 = 0x8E,
246 VT6105M = 0x90, /* Management adapter */
247 };
248
249 enum rhine_quirks {
250 rqWOL = 0x0001, /* Wake-On-LAN support */
251 rqForceReset = 0x0002,
252 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
253 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
254 rqRhineI = 0x0100, /* See comment below */
255 rqIntPHY = 0x0200, /* Integrated PHY */
256 rqMgmt = 0x0400, /* Management adapter */
257 rqNeedEnMMIO = 0x0800, /* Whether the core needs to be
258 * switched from PIO mode to MMIO
259 * (only applies to PCI)
260 */
261 };
262 /*
263 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
264 * MMIO as well as for the collision counter and the Tx FIFO underflow
265 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
266 */
267
268 /* Beware of PCI posted writes */
269 #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
270
271 static const struct pci_device_id rhine_pci_tbl[] = {
272 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
273 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
274 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
275 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
276 { } /* terminate list */
277 };
278 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
279
280 /* OpenFirmware identifiers for platform-bus devices
281 * The .data field is currently only used to store quirks
282 */
283 static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
284 static const struct of_device_id rhine_of_tbl[] = {
285 { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
286 { } /* terminate list */
287 };
288 MODULE_DEVICE_TABLE(of, rhine_of_tbl);
289
290 /* Offsets to the device registers. */
291 enum register_offsets {
292 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
293 ChipCmd1=0x09, TQWake=0x0A,
294 IntrStatus=0x0C, IntrEnable=0x0E,
295 MulticastFilter0=0x10, MulticastFilter1=0x14,
296 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
297 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
298 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
299 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
300 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
301 StickyHW=0x83, IntrStatus2=0x84,
302 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
303 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
304 WOLcrClr1=0xA6, WOLcgClr=0xA7,
305 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
306 };
307
308 /* Bits in ConfigD */
309 enum backoff_bits {
310 BackOptional=0x01, BackModify=0x02,
311 BackCaptureEffect=0x04, BackRandom=0x08
312 };
313
314 /* Bits in the TxConfig (TCR) register */
315 enum tcr_bits {
316 TCR_PQEN=0x01,
317 TCR_LB0=0x02, /* loopback[0] */
318 TCR_LB1=0x04, /* loopback[1] */
319 TCR_OFSET=0x08,
320 TCR_RTGOPT=0x10,
321 TCR_RTFT0=0x20,
322 TCR_RTFT1=0x40,
323 TCR_RTSF=0x80,
324 };
325
326 /* Bits in the CamCon (CAMC) register */
327 enum camcon_bits {
328 CAMC_CAMEN=0x01,
329 CAMC_VCAMSL=0x02,
330 CAMC_CAMWR=0x04,
331 CAMC_CAMRD=0x08,
332 };
333
334 /* Bits in the PCIBusConfig1 (BCR1) register */
335 enum bcr1_bits {
336 BCR1_POT0=0x01,
337 BCR1_POT1=0x02,
338 BCR1_POT2=0x04,
339 BCR1_CTFT0=0x08,
340 BCR1_CTFT1=0x10,
341 BCR1_CTSF=0x20,
342 BCR1_TXQNOBK=0x40, /* for VT6105 */
343 BCR1_VIDFR=0x80, /* for VT6105 */
344 BCR1_MED0=0x40, /* for VT6102 */
345 BCR1_MED1=0x80, /* for VT6102 */
346 };
347
348 /* Registers we check that mmio and reg are the same. */
349 static const int mmio_verify_registers[] = {
350 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
351 0
352 };
353
354 /* Bits in the interrupt status/mask registers. */
355 enum intr_status_bits {
356 IntrRxDone = 0x0001,
357 IntrTxDone = 0x0002,
358 IntrRxErr = 0x0004,
359 IntrTxError = 0x0008,
360 IntrRxEmpty = 0x0020,
361 IntrPCIErr = 0x0040,
362 IntrStatsMax = 0x0080,
363 IntrRxEarly = 0x0100,
364 IntrTxUnderrun = 0x0210,
365 IntrRxOverflow = 0x0400,
366 IntrRxDropped = 0x0800,
367 IntrRxNoBuf = 0x1000,
368 IntrTxAborted = 0x2000,
369 IntrLinkChange = 0x4000,
370 IntrRxWakeUp = 0x8000,
371 IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */
372 IntrNormalSummary = IntrRxDone | IntrTxDone,
373 IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError |
374 IntrTxUnderrun,
375 };
376
377 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
378 enum wol_bits {
379 WOLucast = 0x10,
380 WOLmagic = 0x20,
381 WOLbmcast = 0x30,
382 WOLlnkon = 0x40,
383 WOLlnkoff = 0x80,
384 };
385
386 /* The Rx and Tx buffer descriptors. */
387 struct rx_desc {
388 __le32 rx_status;
389 __le32 desc_length; /* Chain flag, Buffer/frame length */
390 __le32 addr;
391 __le32 next_desc;
392 };
393 struct tx_desc {
394 __le32 tx_status;
395 __le32 desc_length; /* Chain flag, Tx Config, Frame length */
396 __le32 addr;
397 __le32 next_desc;
398 };
399
400 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
401 #define TXDESC 0x00e08000
402
403 enum rx_status_bits {
404 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
405 };
406
407 /* Bits in *_desc.*_status */
408 enum desc_status_bits {
409 DescOwn=0x80000000
410 };
411
412 /* Bits in *_desc.*_length */
413 enum desc_length_bits {
414 DescTag=0x00010000
415 };
416
417 /* Bits in ChipCmd. */
418 enum chip_cmd_bits {
419 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
420 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
421 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
422 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
423 };
424
425 struct rhine_stats {
426 u64 packets;
427 u64 bytes;
428 struct u64_stats_sync syncp;
429 };
430
431 struct rhine_private {
432 /* Bit mask for configured VLAN ids */
433 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
434
435 /* Descriptor rings */
436 struct rx_desc *rx_ring;
437 struct tx_desc *tx_ring;
438 dma_addr_t rx_ring_dma;
439 dma_addr_t tx_ring_dma;
440
441 /* The addresses of receive-in-place skbuffs. */
442 struct sk_buff *rx_skbuff[RX_RING_SIZE];
443 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
444
445 /* The saved address of a sent-in-place packet/buffer, for later free(). */
446 struct sk_buff *tx_skbuff[TX_RING_SIZE];
447 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
448
449 /* Tx bounce buffers (Rhine-I only) */
450 unsigned char *tx_buf[TX_RING_SIZE];
451 unsigned char *tx_bufs;
452 dma_addr_t tx_bufs_dma;
453
454 int irq;
455 long pioaddr;
456 struct net_device *dev;
457 struct napi_struct napi;
458 spinlock_t lock;
459 struct mutex task_lock;
460 bool task_enable;
461 struct work_struct slow_event_task;
462 struct work_struct reset_task;
463
464 u32 msg_enable;
465
466 /* Frequently used values: keep some adjacent for cache effect. */
467 u32 quirks;
468 unsigned int cur_rx;
469 unsigned int cur_tx, dirty_tx;
470 unsigned int rx_buf_sz; /* Based on MTU+slack. */
471 struct rhine_stats rx_stats;
472 struct rhine_stats tx_stats;
473 u8 wolopts;
474
475 u8 tx_thresh, rx_thresh;
476
477 struct mii_if_info mii_if;
478 void __iomem *base;
479 };
480
481 #define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
482 #define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
483 #define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
484
485 #define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
486 #define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
487 #define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
488
489 #define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
490 #define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
491 #define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
492
493 #define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
494 #define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
495 #define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
496
497
498 static int mdio_read(struct net_device *dev, int phy_id, int location);
499 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
500 static int rhine_open(struct net_device *dev);
501 static void rhine_reset_task(struct work_struct *work);
502 static void rhine_slow_event_task(struct work_struct *work);
503 static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue);
504 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
505 struct net_device *dev);
506 static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
507 static void rhine_tx(struct net_device *dev);
508 static int rhine_rx(struct net_device *dev, int limit);
509 static void rhine_set_rx_mode(struct net_device *dev);
510 static void rhine_get_stats64(struct net_device *dev,
511 struct rtnl_link_stats64 *stats);
512 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
513 static const struct ethtool_ops netdev_ethtool_ops;
514 static int rhine_close(struct net_device *dev);
515 static int rhine_vlan_rx_add_vid(struct net_device *dev,
516 __be16 proto, u16 vid);
517 static int rhine_vlan_rx_kill_vid(struct net_device *dev,
518 __be16 proto, u16 vid);
519 static void rhine_restart_tx(struct net_device *dev);
520
rhine_wait_bit(struct rhine_private * rp,u8 reg,u8 mask,bool low)521 static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
522 {
523 void __iomem *ioaddr = rp->base;
524 int i;
525
526 for (i = 0; i < 1024; i++) {
527 bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
528
529 if (low ^ has_mask_bits)
530 break;
531 udelay(10);
532 }
533 if (i > 64) {
534 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
535 "count: %04d\n", low ? "low" : "high", reg, mask, i);
536 }
537 }
538
rhine_wait_bit_high(struct rhine_private * rp,u8 reg,u8 mask)539 static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
540 {
541 rhine_wait_bit(rp, reg, mask, false);
542 }
543
rhine_wait_bit_low(struct rhine_private * rp,u8 reg,u8 mask)544 static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
545 {
546 rhine_wait_bit(rp, reg, mask, true);
547 }
548
rhine_get_events(struct rhine_private * rp)549 static u32 rhine_get_events(struct rhine_private *rp)
550 {
551 void __iomem *ioaddr = rp->base;
552 u32 intr_status;
553
554 intr_status = ioread16(ioaddr + IntrStatus);
555 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
556 if (rp->quirks & rqStatusWBRace)
557 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
558 return intr_status;
559 }
560
rhine_ack_events(struct rhine_private * rp,u32 mask)561 static void rhine_ack_events(struct rhine_private *rp, u32 mask)
562 {
563 void __iomem *ioaddr = rp->base;
564
565 if (rp->quirks & rqStatusWBRace)
566 iowrite8(mask >> 16, ioaddr + IntrStatus2);
567 iowrite16(mask, ioaddr + IntrStatus);
568 }
569
570 /*
571 * Get power related registers into sane state.
572 * Notify user about past WOL event.
573 */
rhine_power_init(struct net_device * dev)574 static void rhine_power_init(struct net_device *dev)
575 {
576 struct rhine_private *rp = netdev_priv(dev);
577 void __iomem *ioaddr = rp->base;
578 u16 wolstat;
579
580 if (rp->quirks & rqWOL) {
581 /* Make sure chip is in power state D0 */
582 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
583
584 /* Disable "force PME-enable" */
585 iowrite8(0x80, ioaddr + WOLcgClr);
586
587 /* Clear power-event config bits (WOL) */
588 iowrite8(0xFF, ioaddr + WOLcrClr);
589 /* More recent cards can manage two additional patterns */
590 if (rp->quirks & rq6patterns)
591 iowrite8(0x03, ioaddr + WOLcrClr1);
592
593 /* Save power-event status bits */
594 wolstat = ioread8(ioaddr + PwrcsrSet);
595 if (rp->quirks & rq6patterns)
596 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
597
598 /* Clear power-event status bits */
599 iowrite8(0xFF, ioaddr + PwrcsrClr);
600 if (rp->quirks & rq6patterns)
601 iowrite8(0x03, ioaddr + PwrcsrClr1);
602
603 if (wolstat) {
604 char *reason;
605 switch (wolstat) {
606 case WOLmagic:
607 reason = "Magic packet";
608 break;
609 case WOLlnkon:
610 reason = "Link went up";
611 break;
612 case WOLlnkoff:
613 reason = "Link went down";
614 break;
615 case WOLucast:
616 reason = "Unicast packet";
617 break;
618 case WOLbmcast:
619 reason = "Multicast/broadcast packet";
620 break;
621 default:
622 reason = "Unknown";
623 }
624 netdev_info(dev, "Woke system up. Reason: %s\n",
625 reason);
626 }
627 }
628 }
629
rhine_chip_reset(struct net_device * dev)630 static void rhine_chip_reset(struct net_device *dev)
631 {
632 struct rhine_private *rp = netdev_priv(dev);
633 void __iomem *ioaddr = rp->base;
634 u8 cmd1;
635
636 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
637 IOSYNC;
638
639 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
640 netdev_info(dev, "Reset not complete yet. Trying harder.\n");
641
642 /* Force reset */
643 if (rp->quirks & rqForceReset)
644 iowrite8(0x40, ioaddr + MiscCmd);
645
646 /* Reset can take somewhat longer (rare) */
647 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
648 }
649
650 cmd1 = ioread8(ioaddr + ChipCmd1);
651 netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
652 "failed" : "succeeded");
653 }
654
enable_mmio(long pioaddr,u32 quirks)655 static void enable_mmio(long pioaddr, u32 quirks)
656 {
657 int n;
658
659 if (quirks & rqNeedEnMMIO) {
660 if (quirks & rqRhineI) {
661 /* More recent docs say that this bit is reserved */
662 n = inb(pioaddr + ConfigA) | 0x20;
663 outb(n, pioaddr + ConfigA);
664 } else {
665 n = inb(pioaddr + ConfigD) | 0x80;
666 outb(n, pioaddr + ConfigD);
667 }
668 }
669 }
670
verify_mmio(struct device * hwdev,long pioaddr,void __iomem * ioaddr,u32 quirks)671 static inline int verify_mmio(struct device *hwdev,
672 long pioaddr,
673 void __iomem *ioaddr,
674 u32 quirks)
675 {
676 if (quirks & rqNeedEnMMIO) {
677 int i = 0;
678
679 /* Check that selected MMIO registers match the PIO ones */
680 while (mmio_verify_registers[i]) {
681 int reg = mmio_verify_registers[i++];
682 unsigned char a = inb(pioaddr+reg);
683 unsigned char b = readb(ioaddr+reg);
684
685 if (a != b) {
686 dev_err(hwdev,
687 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
688 reg, a, b);
689 return -EIO;
690 }
691 }
692 }
693 return 0;
694 }
695
696 /*
697 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
698 * (plus 0x6C for Rhine-I/II)
699 */
rhine_reload_eeprom(long pioaddr,struct net_device * dev)700 static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
701 {
702 struct rhine_private *rp = netdev_priv(dev);
703 void __iomem *ioaddr = rp->base;
704 int i;
705
706 outb(0x20, pioaddr + MACRegEEcsr);
707 for (i = 0; i < 1024; i++) {
708 if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
709 break;
710 }
711 if (i > 512)
712 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
713
714 /*
715 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
716 * MMIO. If reloading EEPROM was done first this could be avoided, but
717 * it is not known if that still works with the "win98-reboot" problem.
718 */
719 enable_mmio(pioaddr, rp->quirks);
720
721 /* Turn off EEPROM-controlled wake-up (magic packet) */
722 if (rp->quirks & rqWOL)
723 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
724
725 }
726
727 #ifdef CONFIG_NET_POLL_CONTROLLER
rhine_poll(struct net_device * dev)728 static void rhine_poll(struct net_device *dev)
729 {
730 struct rhine_private *rp = netdev_priv(dev);
731 const int irq = rp->irq;
732
733 disable_irq(irq);
734 rhine_interrupt(irq, dev);
735 enable_irq(irq);
736 }
737 #endif
738
rhine_kick_tx_threshold(struct rhine_private * rp)739 static void rhine_kick_tx_threshold(struct rhine_private *rp)
740 {
741 if (rp->tx_thresh < 0xe0) {
742 void __iomem *ioaddr = rp->base;
743
744 rp->tx_thresh += 0x20;
745 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
746 }
747 }
748
rhine_tx_err(struct rhine_private * rp,u32 status)749 static void rhine_tx_err(struct rhine_private *rp, u32 status)
750 {
751 struct net_device *dev = rp->dev;
752
753 if (status & IntrTxAborted) {
754 netif_info(rp, tx_err, dev,
755 "Abort %08x, frame dropped\n", status);
756 }
757
758 if (status & IntrTxUnderrun) {
759 rhine_kick_tx_threshold(rp);
760 netif_info(rp, tx_err ,dev, "Transmitter underrun, "
761 "Tx threshold now %02x\n", rp->tx_thresh);
762 }
763
764 if (status & IntrTxDescRace)
765 netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
766
767 if ((status & IntrTxError) &&
768 (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
769 rhine_kick_tx_threshold(rp);
770 netif_info(rp, tx_err, dev, "Unspecified error. "
771 "Tx threshold now %02x\n", rp->tx_thresh);
772 }
773
774 rhine_restart_tx(dev);
775 }
776
rhine_update_rx_crc_and_missed_errord(struct rhine_private * rp)777 static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
778 {
779 void __iomem *ioaddr = rp->base;
780 struct net_device_stats *stats = &rp->dev->stats;
781
782 stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
783 stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
784
785 /*
786 * Clears the "tally counters" for CRC errors and missed frames(?).
787 * It has been reported that some chips need a write of 0 to clear
788 * these, for others the counters are set to 1 when written to and
789 * instead cleared when read. So we clear them both ways ...
790 */
791 iowrite32(0, ioaddr + RxMissed);
792 ioread16(ioaddr + RxCRCErrs);
793 ioread16(ioaddr + RxMissed);
794 }
795
796 #define RHINE_EVENT_NAPI_RX (IntrRxDone | \
797 IntrRxErr | \
798 IntrRxEmpty | \
799 IntrRxOverflow | \
800 IntrRxDropped | \
801 IntrRxNoBuf | \
802 IntrRxWakeUp)
803
804 #define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
805 IntrTxAborted | \
806 IntrTxUnderrun | \
807 IntrTxDescRace)
808 #define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
809
810 #define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \
811 RHINE_EVENT_NAPI_TX | \
812 IntrStatsMax)
813 #define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange)
814 #define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
815
rhine_napipoll(struct napi_struct * napi,int budget)816 static int rhine_napipoll(struct napi_struct *napi, int budget)
817 {
818 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
819 struct net_device *dev = rp->dev;
820 void __iomem *ioaddr = rp->base;
821 u16 enable_mask = RHINE_EVENT & 0xffff;
822 int work_done = 0;
823 u32 status;
824
825 status = rhine_get_events(rp);
826 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
827
828 if (status & RHINE_EVENT_NAPI_RX)
829 work_done += rhine_rx(dev, budget);
830
831 if (status & RHINE_EVENT_NAPI_TX) {
832 if (status & RHINE_EVENT_NAPI_TX_ERR) {
833 /* Avoid scavenging before Tx engine turned off */
834 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
835 if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
836 netif_warn(rp, tx_err, dev, "Tx still on\n");
837 }
838
839 rhine_tx(dev);
840
841 if (status & RHINE_EVENT_NAPI_TX_ERR)
842 rhine_tx_err(rp, status);
843 }
844
845 if (status & IntrStatsMax) {
846 spin_lock(&rp->lock);
847 rhine_update_rx_crc_and_missed_errord(rp);
848 spin_unlock(&rp->lock);
849 }
850
851 if (status & RHINE_EVENT_SLOW) {
852 enable_mask &= ~RHINE_EVENT_SLOW;
853 schedule_work(&rp->slow_event_task);
854 }
855
856 if (work_done < budget) {
857 napi_complete_done(napi, work_done);
858 iowrite16(enable_mask, ioaddr + IntrEnable);
859 }
860 return work_done;
861 }
862
rhine_hw_init(struct net_device * dev,long pioaddr)863 static void rhine_hw_init(struct net_device *dev, long pioaddr)
864 {
865 struct rhine_private *rp = netdev_priv(dev);
866
867 /* Reset the chip to erase previous misconfiguration. */
868 rhine_chip_reset(dev);
869
870 /* Rhine-I needs extra time to recuperate before EEPROM reload */
871 if (rp->quirks & rqRhineI)
872 msleep(5);
873
874 /* Reload EEPROM controlled bytes cleared by soft reset */
875 if (dev_is_pci(dev->dev.parent))
876 rhine_reload_eeprom(pioaddr, dev);
877 }
878
879 static const struct net_device_ops rhine_netdev_ops = {
880 .ndo_open = rhine_open,
881 .ndo_stop = rhine_close,
882 .ndo_start_xmit = rhine_start_tx,
883 .ndo_get_stats64 = rhine_get_stats64,
884 .ndo_set_rx_mode = rhine_set_rx_mode,
885 .ndo_validate_addr = eth_validate_addr,
886 .ndo_set_mac_address = eth_mac_addr,
887 .ndo_do_ioctl = netdev_ioctl,
888 .ndo_tx_timeout = rhine_tx_timeout,
889 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
890 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
891 #ifdef CONFIG_NET_POLL_CONTROLLER
892 .ndo_poll_controller = rhine_poll,
893 #endif
894 };
895
rhine_init_one_common(struct device * hwdev,u32 quirks,long pioaddr,void __iomem * ioaddr,int irq)896 static int rhine_init_one_common(struct device *hwdev, u32 quirks,
897 long pioaddr, void __iomem *ioaddr, int irq)
898 {
899 struct net_device *dev;
900 struct rhine_private *rp;
901 int i, rc, phy_id;
902 const char *name;
903
904 /* this should always be supported */
905 rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
906 if (rc) {
907 dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
908 goto err_out;
909 }
910
911 dev = alloc_etherdev(sizeof(struct rhine_private));
912 if (!dev) {
913 rc = -ENOMEM;
914 goto err_out;
915 }
916 SET_NETDEV_DEV(dev, hwdev);
917
918 rp = netdev_priv(dev);
919 rp->dev = dev;
920 rp->quirks = quirks;
921 rp->pioaddr = pioaddr;
922 rp->base = ioaddr;
923 rp->irq = irq;
924 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
925
926 phy_id = rp->quirks & rqIntPHY ? 1 : 0;
927
928 u64_stats_init(&rp->tx_stats.syncp);
929 u64_stats_init(&rp->rx_stats.syncp);
930
931 /* Get chip registers into a sane state */
932 rhine_power_init(dev);
933 rhine_hw_init(dev, pioaddr);
934
935 for (i = 0; i < 6; i++)
936 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
937
938 if (!is_valid_ether_addr(dev->dev_addr)) {
939 /* Report it and use a random ethernet address instead */
940 netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
941 eth_hw_addr_random(dev);
942 netdev_info(dev, "Using random MAC address: %pM\n",
943 dev->dev_addr);
944 }
945
946 /* For Rhine-I/II, phy_id is loaded from EEPROM */
947 if (!phy_id)
948 phy_id = ioread8(ioaddr + 0x6C);
949
950 spin_lock_init(&rp->lock);
951 mutex_init(&rp->task_lock);
952 INIT_WORK(&rp->reset_task, rhine_reset_task);
953 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
954
955 rp->mii_if.dev = dev;
956 rp->mii_if.mdio_read = mdio_read;
957 rp->mii_if.mdio_write = mdio_write;
958 rp->mii_if.phy_id_mask = 0x1f;
959 rp->mii_if.reg_num_mask = 0x1f;
960
961 /* The chip-specific entries in the device structure. */
962 dev->netdev_ops = &rhine_netdev_ops;
963 dev->ethtool_ops = &netdev_ethtool_ops;
964 dev->watchdog_timeo = TX_TIMEOUT;
965
966 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
967
968 if (rp->quirks & rqRhineI)
969 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
970
971 if (rp->quirks & rqMgmt)
972 dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
973 NETIF_F_HW_VLAN_CTAG_RX |
974 NETIF_F_HW_VLAN_CTAG_FILTER;
975
976 /* dev->name not defined before register_netdev()! */
977 rc = register_netdev(dev);
978 if (rc)
979 goto err_out_free_netdev;
980
981 if (rp->quirks & rqRhineI)
982 name = "Rhine";
983 else if (rp->quirks & rqStatusWBRace)
984 name = "Rhine II";
985 else if (rp->quirks & rqMgmt)
986 name = "Rhine III (Management Adapter)";
987 else
988 name = "Rhine III";
989
990 netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n",
991 name, ioaddr, dev->dev_addr, rp->irq);
992
993 dev_set_drvdata(hwdev, dev);
994
995 {
996 u16 mii_cmd;
997 int mii_status = mdio_read(dev, phy_id, 1);
998 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
999 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1000 if (mii_status != 0xffff && mii_status != 0x0000) {
1001 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1002 netdev_info(dev,
1003 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1004 phy_id,
1005 mii_status, rp->mii_if.advertising,
1006 mdio_read(dev, phy_id, 5));
1007
1008 /* set IFF_RUNNING */
1009 if (mii_status & BMSR_LSTATUS)
1010 netif_carrier_on(dev);
1011 else
1012 netif_carrier_off(dev);
1013
1014 }
1015 }
1016 rp->mii_if.phy_id = phy_id;
1017 if (avoid_D3)
1018 netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1019
1020 return 0;
1021
1022 err_out_free_netdev:
1023 free_netdev(dev);
1024 err_out:
1025 return rc;
1026 }
1027
rhine_init_one_pci(struct pci_dev * pdev,const struct pci_device_id * ent)1028 static int rhine_init_one_pci(struct pci_dev *pdev,
1029 const struct pci_device_id *ent)
1030 {
1031 struct device *hwdev = &pdev->dev;
1032 int rc;
1033 long pioaddr, memaddr;
1034 void __iomem *ioaddr;
1035 int io_size = pdev->revision < VTunknown0 ? 128 : 256;
1036
1037 /* This driver was written to use PCI memory space. Some early versions
1038 * of the Rhine may only work correctly with I/O space accesses.
1039 * TODO: determine for which revisions this is true and assign the flag
1040 * in code as opposed to this Kconfig option (???)
1041 */
1042 #ifdef CONFIG_VIA_RHINE_MMIO
1043 u32 quirks = rqNeedEnMMIO;
1044 #else
1045 u32 quirks = 0;
1046 #endif
1047
1048 rc = pci_enable_device(pdev);
1049 if (rc)
1050 goto err_out;
1051
1052 if (pdev->revision < VTunknown0) {
1053 quirks |= rqRhineI;
1054 } else if (pdev->revision >= VT6102) {
1055 quirks |= rqWOL | rqForceReset;
1056 if (pdev->revision < VT6105) {
1057 quirks |= rqStatusWBRace;
1058 } else {
1059 quirks |= rqIntPHY;
1060 if (pdev->revision >= VT6105_B0)
1061 quirks |= rq6patterns;
1062 if (pdev->revision >= VT6105M)
1063 quirks |= rqMgmt;
1064 }
1065 }
1066
1067 /* sanity check */
1068 if ((pci_resource_len(pdev, 0) < io_size) ||
1069 (pci_resource_len(pdev, 1) < io_size)) {
1070 rc = -EIO;
1071 dev_err(hwdev, "Insufficient PCI resources, aborting\n");
1072 goto err_out_pci_disable;
1073 }
1074
1075 pioaddr = pci_resource_start(pdev, 0);
1076 memaddr = pci_resource_start(pdev, 1);
1077
1078 pci_set_master(pdev);
1079
1080 rc = pci_request_regions(pdev, DRV_NAME);
1081 if (rc)
1082 goto err_out_pci_disable;
1083
1084 ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
1085 if (!ioaddr) {
1086 rc = -EIO;
1087 dev_err(hwdev,
1088 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
1089 dev_name(hwdev), io_size, memaddr);
1090 goto err_out_free_res;
1091 }
1092
1093 enable_mmio(pioaddr, quirks);
1094
1095 rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
1096 if (rc)
1097 goto err_out_unmap;
1098
1099 rc = rhine_init_one_common(&pdev->dev, quirks,
1100 pioaddr, ioaddr, pdev->irq);
1101 if (!rc)
1102 return 0;
1103
1104 err_out_unmap:
1105 pci_iounmap(pdev, ioaddr);
1106 err_out_free_res:
1107 pci_release_regions(pdev);
1108 err_out_pci_disable:
1109 pci_disable_device(pdev);
1110 err_out:
1111 return rc;
1112 }
1113
rhine_init_one_platform(struct platform_device * pdev)1114 static int rhine_init_one_platform(struct platform_device *pdev)
1115 {
1116 const struct of_device_id *match;
1117 const u32 *quirks;
1118 int irq;
1119 void __iomem *ioaddr;
1120
1121 match = of_match_device(rhine_of_tbl, &pdev->dev);
1122 if (!match)
1123 return -EINVAL;
1124
1125 ioaddr = devm_platform_ioremap_resource(pdev, 0);
1126 if (IS_ERR(ioaddr))
1127 return PTR_ERR(ioaddr);
1128
1129 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1130 if (!irq)
1131 return -EINVAL;
1132
1133 quirks = match->data;
1134 if (!quirks)
1135 return -EINVAL;
1136
1137 return rhine_init_one_common(&pdev->dev, *quirks,
1138 (long)ioaddr, ioaddr, irq);
1139 }
1140
alloc_ring(struct net_device * dev)1141 static int alloc_ring(struct net_device* dev)
1142 {
1143 struct rhine_private *rp = netdev_priv(dev);
1144 struct device *hwdev = dev->dev.parent;
1145 void *ring;
1146 dma_addr_t ring_dma;
1147
1148 ring = dma_alloc_coherent(hwdev,
1149 RX_RING_SIZE * sizeof(struct rx_desc) +
1150 TX_RING_SIZE * sizeof(struct tx_desc),
1151 &ring_dma,
1152 GFP_ATOMIC);
1153 if (!ring) {
1154 netdev_err(dev, "Could not allocate DMA memory\n");
1155 return -ENOMEM;
1156 }
1157 if (rp->quirks & rqRhineI) {
1158 rp->tx_bufs = dma_alloc_coherent(hwdev,
1159 PKT_BUF_SZ * TX_RING_SIZE,
1160 &rp->tx_bufs_dma,
1161 GFP_ATOMIC);
1162 if (rp->tx_bufs == NULL) {
1163 dma_free_coherent(hwdev,
1164 RX_RING_SIZE * sizeof(struct rx_desc) +
1165 TX_RING_SIZE * sizeof(struct tx_desc),
1166 ring, ring_dma);
1167 return -ENOMEM;
1168 }
1169 }
1170
1171 rp->rx_ring = ring;
1172 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1173 rp->rx_ring_dma = ring_dma;
1174 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1175
1176 return 0;
1177 }
1178
free_ring(struct net_device * dev)1179 static void free_ring(struct net_device* dev)
1180 {
1181 struct rhine_private *rp = netdev_priv(dev);
1182 struct device *hwdev = dev->dev.parent;
1183
1184 dma_free_coherent(hwdev,
1185 RX_RING_SIZE * sizeof(struct rx_desc) +
1186 TX_RING_SIZE * sizeof(struct tx_desc),
1187 rp->rx_ring, rp->rx_ring_dma);
1188 rp->tx_ring = NULL;
1189
1190 if (rp->tx_bufs)
1191 dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
1192 rp->tx_bufs, rp->tx_bufs_dma);
1193
1194 rp->tx_bufs = NULL;
1195
1196 }
1197
1198 struct rhine_skb_dma {
1199 struct sk_buff *skb;
1200 dma_addr_t dma;
1201 };
1202
rhine_skb_dma_init(struct net_device * dev,struct rhine_skb_dma * sd)1203 static inline int rhine_skb_dma_init(struct net_device *dev,
1204 struct rhine_skb_dma *sd)
1205 {
1206 struct rhine_private *rp = netdev_priv(dev);
1207 struct device *hwdev = dev->dev.parent;
1208 const int size = rp->rx_buf_sz;
1209
1210 sd->skb = netdev_alloc_skb(dev, size);
1211 if (!sd->skb)
1212 return -ENOMEM;
1213
1214 sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
1215 if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
1216 netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
1217 dev_kfree_skb_any(sd->skb);
1218 return -EIO;
1219 }
1220
1221 return 0;
1222 }
1223
rhine_reset_rbufs(struct rhine_private * rp)1224 static void rhine_reset_rbufs(struct rhine_private *rp)
1225 {
1226 int i;
1227
1228 rp->cur_rx = 0;
1229
1230 for (i = 0; i < RX_RING_SIZE; i++)
1231 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1232 }
1233
rhine_skb_dma_nic_store(struct rhine_private * rp,struct rhine_skb_dma * sd,int entry)1234 static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
1235 struct rhine_skb_dma *sd, int entry)
1236 {
1237 rp->rx_skbuff_dma[entry] = sd->dma;
1238 rp->rx_skbuff[entry] = sd->skb;
1239
1240 rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
1241 dma_wmb();
1242 }
1243
1244 static void free_rbufs(struct net_device* dev);
1245
alloc_rbufs(struct net_device * dev)1246 static int alloc_rbufs(struct net_device *dev)
1247 {
1248 struct rhine_private *rp = netdev_priv(dev);
1249 dma_addr_t next;
1250 int rc, i;
1251
1252 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1253 next = rp->rx_ring_dma;
1254
1255 /* Init the ring entries */
1256 for (i = 0; i < RX_RING_SIZE; i++) {
1257 rp->rx_ring[i].rx_status = 0;
1258 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1259 next += sizeof(struct rx_desc);
1260 rp->rx_ring[i].next_desc = cpu_to_le32(next);
1261 rp->rx_skbuff[i] = NULL;
1262 }
1263 /* Mark the last entry as wrapping the ring. */
1264 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1265
1266 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1267 for (i = 0; i < RX_RING_SIZE; i++) {
1268 struct rhine_skb_dma sd;
1269
1270 rc = rhine_skb_dma_init(dev, &sd);
1271 if (rc < 0) {
1272 free_rbufs(dev);
1273 goto out;
1274 }
1275
1276 rhine_skb_dma_nic_store(rp, &sd, i);
1277 }
1278
1279 rhine_reset_rbufs(rp);
1280 out:
1281 return rc;
1282 }
1283
free_rbufs(struct net_device * dev)1284 static void free_rbufs(struct net_device* dev)
1285 {
1286 struct rhine_private *rp = netdev_priv(dev);
1287 struct device *hwdev = dev->dev.parent;
1288 int i;
1289
1290 /* Free all the skbuffs in the Rx queue. */
1291 for (i = 0; i < RX_RING_SIZE; i++) {
1292 rp->rx_ring[i].rx_status = 0;
1293 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1294 if (rp->rx_skbuff[i]) {
1295 dma_unmap_single(hwdev,
1296 rp->rx_skbuff_dma[i],
1297 rp->rx_buf_sz, DMA_FROM_DEVICE);
1298 dev_kfree_skb(rp->rx_skbuff[i]);
1299 }
1300 rp->rx_skbuff[i] = NULL;
1301 }
1302 }
1303
alloc_tbufs(struct net_device * dev)1304 static void alloc_tbufs(struct net_device* dev)
1305 {
1306 struct rhine_private *rp = netdev_priv(dev);
1307 dma_addr_t next;
1308 int i;
1309
1310 rp->dirty_tx = rp->cur_tx = 0;
1311 next = rp->tx_ring_dma;
1312 for (i = 0; i < TX_RING_SIZE; i++) {
1313 rp->tx_skbuff[i] = NULL;
1314 rp->tx_ring[i].tx_status = 0;
1315 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1316 next += sizeof(struct tx_desc);
1317 rp->tx_ring[i].next_desc = cpu_to_le32(next);
1318 if (rp->quirks & rqRhineI)
1319 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1320 }
1321 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1322
1323 netdev_reset_queue(dev);
1324 }
1325
free_tbufs(struct net_device * dev)1326 static void free_tbufs(struct net_device* dev)
1327 {
1328 struct rhine_private *rp = netdev_priv(dev);
1329 struct device *hwdev = dev->dev.parent;
1330 int i;
1331
1332 for (i = 0; i < TX_RING_SIZE; i++) {
1333 rp->tx_ring[i].tx_status = 0;
1334 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1335 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1336 if (rp->tx_skbuff[i]) {
1337 if (rp->tx_skbuff_dma[i]) {
1338 dma_unmap_single(hwdev,
1339 rp->tx_skbuff_dma[i],
1340 rp->tx_skbuff[i]->len,
1341 DMA_TO_DEVICE);
1342 }
1343 dev_kfree_skb(rp->tx_skbuff[i]);
1344 }
1345 rp->tx_skbuff[i] = NULL;
1346 rp->tx_buf[i] = NULL;
1347 }
1348 }
1349
rhine_check_media(struct net_device * dev,unsigned int init_media)1350 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1351 {
1352 struct rhine_private *rp = netdev_priv(dev);
1353 void __iomem *ioaddr = rp->base;
1354
1355 if (!rp->mii_if.force_media)
1356 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1357
1358 if (rp->mii_if.full_duplex)
1359 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1360 ioaddr + ChipCmd1);
1361 else
1362 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1363 ioaddr + ChipCmd1);
1364
1365 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1366 rp->mii_if.force_media, netif_carrier_ok(dev));
1367 }
1368
1369 /* Called after status of force_media possibly changed */
rhine_set_carrier(struct mii_if_info * mii)1370 static void rhine_set_carrier(struct mii_if_info *mii)
1371 {
1372 struct net_device *dev = mii->dev;
1373 struct rhine_private *rp = netdev_priv(dev);
1374
1375 if (mii->force_media) {
1376 /* autoneg is off: Link is always assumed to be up */
1377 if (!netif_carrier_ok(dev))
1378 netif_carrier_on(dev);
1379 }
1380
1381 rhine_check_media(dev, 0);
1382
1383 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1384 mii->force_media, netif_carrier_ok(dev));
1385 }
1386
1387 /**
1388 * rhine_set_cam - set CAM multicast filters
1389 * @ioaddr: register block of this Rhine
1390 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1391 * @addr: multicast address (6 bytes)
1392 *
1393 * Load addresses into multicast filters.
1394 */
rhine_set_cam(void __iomem * ioaddr,int idx,u8 * addr)1395 static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1396 {
1397 int i;
1398
1399 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1400 wmb();
1401
1402 /* Paranoid -- idx out of range should never happen */
1403 idx &= (MCAM_SIZE - 1);
1404
1405 iowrite8((u8) idx, ioaddr + CamAddr);
1406
1407 for (i = 0; i < 6; i++, addr++)
1408 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1409 udelay(10);
1410 wmb();
1411
1412 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1413 udelay(10);
1414
1415 iowrite8(0, ioaddr + CamCon);
1416 }
1417
1418 /**
1419 * rhine_set_vlan_cam - set CAM VLAN filters
1420 * @ioaddr: register block of this Rhine
1421 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1422 * @addr: VLAN ID (2 bytes)
1423 *
1424 * Load addresses into VLAN filters.
1425 */
rhine_set_vlan_cam(void __iomem * ioaddr,int idx,u8 * addr)1426 static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1427 {
1428 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1429 wmb();
1430
1431 /* Paranoid -- idx out of range should never happen */
1432 idx &= (VCAM_SIZE - 1);
1433
1434 iowrite8((u8) idx, ioaddr + CamAddr);
1435
1436 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1437 udelay(10);
1438 wmb();
1439
1440 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1441 udelay(10);
1442
1443 iowrite8(0, ioaddr + CamCon);
1444 }
1445
1446 /**
1447 * rhine_set_cam_mask - set multicast CAM mask
1448 * @ioaddr: register block of this Rhine
1449 * @mask: multicast CAM mask
1450 *
1451 * Mask sets multicast filters active/inactive.
1452 */
rhine_set_cam_mask(void __iomem * ioaddr,u32 mask)1453 static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1454 {
1455 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1456 wmb();
1457
1458 /* write mask */
1459 iowrite32(mask, ioaddr + CamMask);
1460
1461 /* disable CAMEN */
1462 iowrite8(0, ioaddr + CamCon);
1463 }
1464
1465 /**
1466 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1467 * @ioaddr: register block of this Rhine
1468 * @mask: VLAN CAM mask
1469 *
1470 * Mask sets VLAN filters active/inactive.
1471 */
rhine_set_vlan_cam_mask(void __iomem * ioaddr,u32 mask)1472 static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1473 {
1474 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1475 wmb();
1476
1477 /* write mask */
1478 iowrite32(mask, ioaddr + CamMask);
1479
1480 /* disable CAMEN */
1481 iowrite8(0, ioaddr + CamCon);
1482 }
1483
1484 /**
1485 * rhine_init_cam_filter - initialize CAM filters
1486 * @dev: network device
1487 *
1488 * Initialize (disable) hardware VLAN and multicast support on this
1489 * Rhine.
1490 */
rhine_init_cam_filter(struct net_device * dev)1491 static void rhine_init_cam_filter(struct net_device *dev)
1492 {
1493 struct rhine_private *rp = netdev_priv(dev);
1494 void __iomem *ioaddr = rp->base;
1495
1496 /* Disable all CAMs */
1497 rhine_set_vlan_cam_mask(ioaddr, 0);
1498 rhine_set_cam_mask(ioaddr, 0);
1499
1500 /* disable hardware VLAN support */
1501 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1502 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1503 }
1504
1505 /**
1506 * rhine_update_vcam - update VLAN CAM filters
1507 * @dev: rhine_private data of this Rhine
1508 *
1509 * Update VLAN CAM filters to match configuration change.
1510 */
rhine_update_vcam(struct net_device * dev)1511 static void rhine_update_vcam(struct net_device *dev)
1512 {
1513 struct rhine_private *rp = netdev_priv(dev);
1514 void __iomem *ioaddr = rp->base;
1515 u16 vid;
1516 u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
1517 unsigned int i = 0;
1518
1519 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1520 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1521 vCAMmask |= 1 << i;
1522 if (++i >= VCAM_SIZE)
1523 break;
1524 }
1525 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1526 }
1527
rhine_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)1528 static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1529 {
1530 struct rhine_private *rp = netdev_priv(dev);
1531
1532 spin_lock_bh(&rp->lock);
1533 set_bit(vid, rp->active_vlans);
1534 rhine_update_vcam(dev);
1535 spin_unlock_bh(&rp->lock);
1536 return 0;
1537 }
1538
rhine_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)1539 static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1540 {
1541 struct rhine_private *rp = netdev_priv(dev);
1542
1543 spin_lock_bh(&rp->lock);
1544 clear_bit(vid, rp->active_vlans);
1545 rhine_update_vcam(dev);
1546 spin_unlock_bh(&rp->lock);
1547 return 0;
1548 }
1549
init_registers(struct net_device * dev)1550 static void init_registers(struct net_device *dev)
1551 {
1552 struct rhine_private *rp = netdev_priv(dev);
1553 void __iomem *ioaddr = rp->base;
1554 int i;
1555
1556 for (i = 0; i < 6; i++)
1557 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1558
1559 /* Initialize other registers. */
1560 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1561 /* Configure initial FIFO thresholds. */
1562 iowrite8(0x20, ioaddr + TxConfig);
1563 rp->tx_thresh = 0x20;
1564 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1565
1566 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1567 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1568
1569 rhine_set_rx_mode(dev);
1570
1571 if (rp->quirks & rqMgmt)
1572 rhine_init_cam_filter(dev);
1573
1574 napi_enable(&rp->napi);
1575
1576 iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1577
1578 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1579 ioaddr + ChipCmd);
1580 rhine_check_media(dev, 1);
1581 }
1582
1583 /* Enable MII link status auto-polling (required for IntrLinkChange) */
rhine_enable_linkmon(struct rhine_private * rp)1584 static void rhine_enable_linkmon(struct rhine_private *rp)
1585 {
1586 void __iomem *ioaddr = rp->base;
1587
1588 iowrite8(0, ioaddr + MIICmd);
1589 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1590 iowrite8(0x80, ioaddr + MIICmd);
1591
1592 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1593
1594 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1595 }
1596
1597 /* Disable MII link status auto-polling (required for MDIO access) */
rhine_disable_linkmon(struct rhine_private * rp)1598 static void rhine_disable_linkmon(struct rhine_private *rp)
1599 {
1600 void __iomem *ioaddr = rp->base;
1601
1602 iowrite8(0, ioaddr + MIICmd);
1603
1604 if (rp->quirks & rqRhineI) {
1605 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1606
1607 /* Can be called from ISR. Evil. */
1608 mdelay(1);
1609
1610 /* 0x80 must be set immediately before turning it off */
1611 iowrite8(0x80, ioaddr + MIICmd);
1612
1613 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1614
1615 /* Heh. Now clear 0x80 again. */
1616 iowrite8(0, ioaddr + MIICmd);
1617 }
1618 else
1619 rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1620 }
1621
1622 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1623
mdio_read(struct net_device * dev,int phy_id,int regnum)1624 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1625 {
1626 struct rhine_private *rp = netdev_priv(dev);
1627 void __iomem *ioaddr = rp->base;
1628 int result;
1629
1630 rhine_disable_linkmon(rp);
1631
1632 /* rhine_disable_linkmon already cleared MIICmd */
1633 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1634 iowrite8(regnum, ioaddr + MIIRegAddr);
1635 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1636 rhine_wait_bit_low(rp, MIICmd, 0x40);
1637 result = ioread16(ioaddr + MIIData);
1638
1639 rhine_enable_linkmon(rp);
1640 return result;
1641 }
1642
mdio_write(struct net_device * dev,int phy_id,int regnum,int value)1643 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1644 {
1645 struct rhine_private *rp = netdev_priv(dev);
1646 void __iomem *ioaddr = rp->base;
1647
1648 rhine_disable_linkmon(rp);
1649
1650 /* rhine_disable_linkmon already cleared MIICmd */
1651 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1652 iowrite8(regnum, ioaddr + MIIRegAddr);
1653 iowrite16(value, ioaddr + MIIData);
1654 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1655 rhine_wait_bit_low(rp, MIICmd, 0x20);
1656
1657 rhine_enable_linkmon(rp);
1658 }
1659
rhine_task_disable(struct rhine_private * rp)1660 static void rhine_task_disable(struct rhine_private *rp)
1661 {
1662 mutex_lock(&rp->task_lock);
1663 rp->task_enable = false;
1664 mutex_unlock(&rp->task_lock);
1665
1666 cancel_work_sync(&rp->slow_event_task);
1667 cancel_work_sync(&rp->reset_task);
1668 }
1669
rhine_task_enable(struct rhine_private * rp)1670 static void rhine_task_enable(struct rhine_private *rp)
1671 {
1672 mutex_lock(&rp->task_lock);
1673 rp->task_enable = true;
1674 mutex_unlock(&rp->task_lock);
1675 }
1676
rhine_open(struct net_device * dev)1677 static int rhine_open(struct net_device *dev)
1678 {
1679 struct rhine_private *rp = netdev_priv(dev);
1680 void __iomem *ioaddr = rp->base;
1681 int rc;
1682
1683 rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
1684 if (rc)
1685 goto out;
1686
1687 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
1688
1689 rc = alloc_ring(dev);
1690 if (rc < 0)
1691 goto out_free_irq;
1692
1693 rc = alloc_rbufs(dev);
1694 if (rc < 0)
1695 goto out_free_ring;
1696
1697 alloc_tbufs(dev);
1698 enable_mmio(rp->pioaddr, rp->quirks);
1699 rhine_power_init(dev);
1700 rhine_chip_reset(dev);
1701 rhine_task_enable(rp);
1702 init_registers(dev);
1703
1704 netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1705 __func__, ioread16(ioaddr + ChipCmd),
1706 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1707
1708 netif_start_queue(dev);
1709
1710 out:
1711 return rc;
1712
1713 out_free_ring:
1714 free_ring(dev);
1715 out_free_irq:
1716 free_irq(rp->irq, dev);
1717 goto out;
1718 }
1719
rhine_reset_task(struct work_struct * work)1720 static void rhine_reset_task(struct work_struct *work)
1721 {
1722 struct rhine_private *rp = container_of(work, struct rhine_private,
1723 reset_task);
1724 struct net_device *dev = rp->dev;
1725
1726 mutex_lock(&rp->task_lock);
1727
1728 if (!rp->task_enable)
1729 goto out_unlock;
1730
1731 napi_disable(&rp->napi);
1732 netif_tx_disable(dev);
1733 spin_lock_bh(&rp->lock);
1734
1735 /* clear all descriptors */
1736 free_tbufs(dev);
1737 alloc_tbufs(dev);
1738
1739 rhine_reset_rbufs(rp);
1740
1741 /* Reinitialize the hardware. */
1742 rhine_chip_reset(dev);
1743 init_registers(dev);
1744
1745 spin_unlock_bh(&rp->lock);
1746
1747 netif_trans_update(dev); /* prevent tx timeout */
1748 dev->stats.tx_errors++;
1749 netif_wake_queue(dev);
1750
1751 out_unlock:
1752 mutex_unlock(&rp->task_lock);
1753 }
1754
rhine_tx_timeout(struct net_device * dev,unsigned int txqueue)1755 static void rhine_tx_timeout(struct net_device *dev, unsigned int txqueue)
1756 {
1757 struct rhine_private *rp = netdev_priv(dev);
1758 void __iomem *ioaddr = rp->base;
1759
1760 netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1761 ioread16(ioaddr + IntrStatus),
1762 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1763
1764 schedule_work(&rp->reset_task);
1765 }
1766
rhine_tx_queue_full(struct rhine_private * rp)1767 static inline bool rhine_tx_queue_full(struct rhine_private *rp)
1768 {
1769 return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN;
1770 }
1771
rhine_start_tx(struct sk_buff * skb,struct net_device * dev)1772 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1773 struct net_device *dev)
1774 {
1775 struct rhine_private *rp = netdev_priv(dev);
1776 struct device *hwdev = dev->dev.parent;
1777 void __iomem *ioaddr = rp->base;
1778 unsigned entry;
1779
1780 /* Caution: the write order is important here, set the field
1781 with the "ownership" bits last. */
1782
1783 /* Calculate the next Tx descriptor entry. */
1784 entry = rp->cur_tx % TX_RING_SIZE;
1785
1786 if (skb_padto(skb, ETH_ZLEN))
1787 return NETDEV_TX_OK;
1788
1789 rp->tx_skbuff[entry] = skb;
1790
1791 if ((rp->quirks & rqRhineI) &&
1792 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1793 /* Must use alignment buffer. */
1794 if (skb->len > PKT_BUF_SZ) {
1795 /* packet too long, drop it */
1796 dev_kfree_skb_any(skb);
1797 rp->tx_skbuff[entry] = NULL;
1798 dev->stats.tx_dropped++;
1799 return NETDEV_TX_OK;
1800 }
1801
1802 /* Padding is not copied and so must be redone. */
1803 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1804 if (skb->len < ETH_ZLEN)
1805 memset(rp->tx_buf[entry] + skb->len, 0,
1806 ETH_ZLEN - skb->len);
1807 rp->tx_skbuff_dma[entry] = 0;
1808 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1809 (rp->tx_buf[entry] -
1810 rp->tx_bufs));
1811 } else {
1812 rp->tx_skbuff_dma[entry] =
1813 dma_map_single(hwdev, skb->data, skb->len,
1814 DMA_TO_DEVICE);
1815 if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
1816 dev_kfree_skb_any(skb);
1817 rp->tx_skbuff_dma[entry] = 0;
1818 dev->stats.tx_dropped++;
1819 return NETDEV_TX_OK;
1820 }
1821 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1822 }
1823
1824 rp->tx_ring[entry].desc_length =
1825 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1826
1827 if (unlikely(skb_vlan_tag_present(skb))) {
1828 u16 vid_pcp = skb_vlan_tag_get(skb);
1829
1830 /* drop CFI/DEI bit, register needs VID and PCP */
1831 vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1832 ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1833 rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1834 /* request tagging */
1835 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1836 }
1837 else
1838 rp->tx_ring[entry].tx_status = 0;
1839
1840 netdev_sent_queue(dev, skb->len);
1841 /* lock eth irq */
1842 dma_wmb();
1843 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1844 wmb();
1845
1846 rp->cur_tx++;
1847 /*
1848 * Nobody wants cur_tx write to rot for ages after the NIC will have
1849 * seen the transmit request, especially as the transmit completion
1850 * handler could miss it.
1851 */
1852 smp_wmb();
1853
1854 /* Non-x86 Todo: explicitly flush cache lines here. */
1855
1856 if (skb_vlan_tag_present(skb))
1857 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1858 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1859
1860 /* Wake the potentially-idle transmit channel */
1861 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1862 ioaddr + ChipCmd1);
1863 IOSYNC;
1864
1865 /* dirty_tx may be pessimistically out-of-sync. See rhine_tx. */
1866 if (rhine_tx_queue_full(rp)) {
1867 netif_stop_queue(dev);
1868 smp_rmb();
1869 /* Rejuvenate. */
1870 if (!rhine_tx_queue_full(rp))
1871 netif_wake_queue(dev);
1872 }
1873
1874 netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1875 rp->cur_tx - 1, entry);
1876
1877 return NETDEV_TX_OK;
1878 }
1879
rhine_irq_disable(struct rhine_private * rp)1880 static void rhine_irq_disable(struct rhine_private *rp)
1881 {
1882 iowrite16(0x0000, rp->base + IntrEnable);
1883 }
1884
1885 /* The interrupt handler does all of the Rx thread work and cleans up
1886 after the Tx thread. */
rhine_interrupt(int irq,void * dev_instance)1887 static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1888 {
1889 struct net_device *dev = dev_instance;
1890 struct rhine_private *rp = netdev_priv(dev);
1891 u32 status;
1892 int handled = 0;
1893
1894 status = rhine_get_events(rp);
1895
1896 netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1897
1898 if (status & RHINE_EVENT) {
1899 handled = 1;
1900
1901 rhine_irq_disable(rp);
1902 napi_schedule(&rp->napi);
1903 }
1904
1905 if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1906 netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1907 status);
1908 }
1909
1910 return IRQ_RETVAL(handled);
1911 }
1912
1913 /* This routine is logically part of the interrupt handler, but isolated
1914 for clarity. */
rhine_tx(struct net_device * dev)1915 static void rhine_tx(struct net_device *dev)
1916 {
1917 struct rhine_private *rp = netdev_priv(dev);
1918 struct device *hwdev = dev->dev.parent;
1919 unsigned int pkts_compl = 0, bytes_compl = 0;
1920 unsigned int dirty_tx = rp->dirty_tx;
1921 unsigned int cur_tx;
1922 struct sk_buff *skb;
1923
1924 /*
1925 * The race with rhine_start_tx does not matter here as long as the
1926 * driver enforces a value of cur_tx that was relevant when the
1927 * packet was scheduled to the network chipset.
1928 * Executive summary: smp_rmb() balances smp_wmb() in rhine_start_tx.
1929 */
1930 smp_rmb();
1931 cur_tx = rp->cur_tx;
1932 /* find and cleanup dirty tx descriptors */
1933 while (dirty_tx != cur_tx) {
1934 unsigned int entry = dirty_tx % TX_RING_SIZE;
1935 u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1936
1937 netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1938 entry, txstatus);
1939 if (txstatus & DescOwn)
1940 break;
1941 skb = rp->tx_skbuff[entry];
1942 if (txstatus & 0x8000) {
1943 netif_dbg(rp, tx_done, dev,
1944 "Transmit error, Tx status %08x\n", txstatus);
1945 dev->stats.tx_errors++;
1946 if (txstatus & 0x0400)
1947 dev->stats.tx_carrier_errors++;
1948 if (txstatus & 0x0200)
1949 dev->stats.tx_window_errors++;
1950 if (txstatus & 0x0100)
1951 dev->stats.tx_aborted_errors++;
1952 if (txstatus & 0x0080)
1953 dev->stats.tx_heartbeat_errors++;
1954 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1955 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1956 dev->stats.tx_fifo_errors++;
1957 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1958 break; /* Keep the skb - we try again */
1959 }
1960 /* Transmitter restarted in 'abnormal' handler. */
1961 } else {
1962 if (rp->quirks & rqRhineI)
1963 dev->stats.collisions += (txstatus >> 3) & 0x0F;
1964 else
1965 dev->stats.collisions += txstatus & 0x0F;
1966 netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1967 (txstatus >> 3) & 0xF, txstatus & 0xF);
1968
1969 u64_stats_update_begin(&rp->tx_stats.syncp);
1970 rp->tx_stats.bytes += skb->len;
1971 rp->tx_stats.packets++;
1972 u64_stats_update_end(&rp->tx_stats.syncp);
1973 }
1974 /* Free the original skb. */
1975 if (rp->tx_skbuff_dma[entry]) {
1976 dma_unmap_single(hwdev,
1977 rp->tx_skbuff_dma[entry],
1978 skb->len,
1979 DMA_TO_DEVICE);
1980 }
1981 bytes_compl += skb->len;
1982 pkts_compl++;
1983 dev_consume_skb_any(skb);
1984 rp->tx_skbuff[entry] = NULL;
1985 dirty_tx++;
1986 }
1987
1988 rp->dirty_tx = dirty_tx;
1989 /* Pity we can't rely on the nearby BQL completion implicit barrier. */
1990 smp_wmb();
1991
1992 netdev_completed_queue(dev, pkts_compl, bytes_compl);
1993
1994 /* cur_tx may be optimistically out-of-sync. See rhine_start_tx. */
1995 if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) {
1996 netif_wake_queue(dev);
1997 smp_rmb();
1998 /* Rejuvenate. */
1999 if (rhine_tx_queue_full(rp))
2000 netif_stop_queue(dev);
2001 }
2002 }
2003
2004 /**
2005 * rhine_get_vlan_tci - extract TCI from Rx data buffer
2006 * @skb: pointer to sk_buff
2007 * @data_size: used data area of the buffer including CRC
2008 *
2009 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
2010 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
2011 * aligned following the CRC.
2012 */
rhine_get_vlan_tci(struct sk_buff * skb,int data_size)2013 static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
2014 {
2015 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
2016 return be16_to_cpup((__be16 *)trailer);
2017 }
2018
rhine_rx_vlan_tag(struct sk_buff * skb,struct rx_desc * desc,int data_size)2019 static inline void rhine_rx_vlan_tag(struct sk_buff *skb, struct rx_desc *desc,
2020 int data_size)
2021 {
2022 dma_rmb();
2023 if (unlikely(desc->desc_length & cpu_to_le32(DescTag))) {
2024 u16 vlan_tci;
2025
2026 vlan_tci = rhine_get_vlan_tci(skb, data_size);
2027 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
2028 }
2029 }
2030
2031 /* Process up to limit frames from receive ring */
rhine_rx(struct net_device * dev,int limit)2032 static int rhine_rx(struct net_device *dev, int limit)
2033 {
2034 struct rhine_private *rp = netdev_priv(dev);
2035 struct device *hwdev = dev->dev.parent;
2036 int entry = rp->cur_rx % RX_RING_SIZE;
2037 int count;
2038
2039 netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
2040 entry, le32_to_cpu(rp->rx_ring[entry].rx_status));
2041
2042 /* If EOP is set on the next entry, it's a new packet. Send it up. */
2043 for (count = 0; count < limit; ++count) {
2044 struct rx_desc *desc = rp->rx_ring + entry;
2045 u32 desc_status = le32_to_cpu(desc->rx_status);
2046 int data_size = desc_status >> 16;
2047
2048 if (desc_status & DescOwn)
2049 break;
2050
2051 netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
2052 desc_status);
2053
2054 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
2055 if ((desc_status & RxWholePkt) != RxWholePkt) {
2056 netdev_warn(dev,
2057 "Oversized Ethernet frame spanned multiple buffers, "
2058 "entry %#x length %d status %08x!\n",
2059 entry, data_size,
2060 desc_status);
2061 dev->stats.rx_length_errors++;
2062 } else if (desc_status & RxErr) {
2063 /* There was a error. */
2064 netif_dbg(rp, rx_err, dev,
2065 "%s() Rx error %08x\n", __func__,
2066 desc_status);
2067 dev->stats.rx_errors++;
2068 if (desc_status & 0x0030)
2069 dev->stats.rx_length_errors++;
2070 if (desc_status & 0x0048)
2071 dev->stats.rx_fifo_errors++;
2072 if (desc_status & 0x0004)
2073 dev->stats.rx_frame_errors++;
2074 if (desc_status & 0x0002) {
2075 /* this can also be updated outside the interrupt handler */
2076 spin_lock(&rp->lock);
2077 dev->stats.rx_crc_errors++;
2078 spin_unlock(&rp->lock);
2079 }
2080 }
2081 } else {
2082 /* Length should omit the CRC */
2083 int pkt_len = data_size - 4;
2084 struct sk_buff *skb;
2085
2086 /* Check if the packet is long enough to accept without
2087 copying to a minimally-sized skbuff. */
2088 if (pkt_len < rx_copybreak) {
2089 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
2090 if (unlikely(!skb))
2091 goto drop;
2092
2093 dma_sync_single_for_cpu(hwdev,
2094 rp->rx_skbuff_dma[entry],
2095 rp->rx_buf_sz,
2096 DMA_FROM_DEVICE);
2097
2098 skb_copy_to_linear_data(skb,
2099 rp->rx_skbuff[entry]->data,
2100 pkt_len);
2101
2102 dma_sync_single_for_device(hwdev,
2103 rp->rx_skbuff_dma[entry],
2104 rp->rx_buf_sz,
2105 DMA_FROM_DEVICE);
2106 } else {
2107 struct rhine_skb_dma sd;
2108
2109 if (unlikely(rhine_skb_dma_init(dev, &sd) < 0))
2110 goto drop;
2111
2112 skb = rp->rx_skbuff[entry];
2113
2114 dma_unmap_single(hwdev,
2115 rp->rx_skbuff_dma[entry],
2116 rp->rx_buf_sz,
2117 DMA_FROM_DEVICE);
2118 rhine_skb_dma_nic_store(rp, &sd, entry);
2119 }
2120
2121 skb_put(skb, pkt_len);
2122
2123 rhine_rx_vlan_tag(skb, desc, data_size);
2124
2125 skb->protocol = eth_type_trans(skb, dev);
2126
2127 netif_receive_skb(skb);
2128
2129 u64_stats_update_begin(&rp->rx_stats.syncp);
2130 rp->rx_stats.bytes += pkt_len;
2131 rp->rx_stats.packets++;
2132 u64_stats_update_end(&rp->rx_stats.syncp);
2133 }
2134 give_descriptor_to_nic:
2135 desc->rx_status = cpu_to_le32(DescOwn);
2136 entry = (++rp->cur_rx) % RX_RING_SIZE;
2137 }
2138
2139 return count;
2140
2141 drop:
2142 dev->stats.rx_dropped++;
2143 goto give_descriptor_to_nic;
2144 }
2145
rhine_restart_tx(struct net_device * dev)2146 static void rhine_restart_tx(struct net_device *dev) {
2147 struct rhine_private *rp = netdev_priv(dev);
2148 void __iomem *ioaddr = rp->base;
2149 int entry = rp->dirty_tx % TX_RING_SIZE;
2150 u32 intr_status;
2151
2152 /*
2153 * If new errors occurred, we need to sort them out before doing Tx.
2154 * In that case the ISR will be back here RSN anyway.
2155 */
2156 intr_status = rhine_get_events(rp);
2157
2158 if ((intr_status & IntrTxErrSummary) == 0) {
2159
2160 /* We know better than the chip where it should continue. */
2161 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2162 ioaddr + TxRingPtr);
2163
2164 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2165 ioaddr + ChipCmd);
2166
2167 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2168 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
2169 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2170
2171 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2172 ioaddr + ChipCmd1);
2173 IOSYNC;
2174 }
2175 else {
2176 /* This should never happen */
2177 netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2178 intr_status);
2179 }
2180
2181 }
2182
rhine_slow_event_task(struct work_struct * work)2183 static void rhine_slow_event_task(struct work_struct *work)
2184 {
2185 struct rhine_private *rp =
2186 container_of(work, struct rhine_private, slow_event_task);
2187 struct net_device *dev = rp->dev;
2188 u32 intr_status;
2189
2190 mutex_lock(&rp->task_lock);
2191
2192 if (!rp->task_enable)
2193 goto out_unlock;
2194
2195 intr_status = rhine_get_events(rp);
2196 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2197
2198 if (intr_status & IntrLinkChange)
2199 rhine_check_media(dev, 0);
2200
2201 if (intr_status & IntrPCIErr)
2202 netif_warn(rp, hw, dev, "PCI error\n");
2203
2204 iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2205
2206 out_unlock:
2207 mutex_unlock(&rp->task_lock);
2208 }
2209
2210 static void
rhine_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)2211 rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2212 {
2213 struct rhine_private *rp = netdev_priv(dev);
2214 unsigned int start;
2215
2216 spin_lock_bh(&rp->lock);
2217 rhine_update_rx_crc_and_missed_errord(rp);
2218 spin_unlock_bh(&rp->lock);
2219
2220 netdev_stats_to_stats64(stats, &dev->stats);
2221
2222 do {
2223 start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
2224 stats->rx_packets = rp->rx_stats.packets;
2225 stats->rx_bytes = rp->rx_stats.bytes;
2226 } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
2227
2228 do {
2229 start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
2230 stats->tx_packets = rp->tx_stats.packets;
2231 stats->tx_bytes = rp->tx_stats.bytes;
2232 } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
2233 }
2234
rhine_set_rx_mode(struct net_device * dev)2235 static void rhine_set_rx_mode(struct net_device *dev)
2236 {
2237 struct rhine_private *rp = netdev_priv(dev);
2238 void __iomem *ioaddr = rp->base;
2239 u32 mc_filter[2]; /* Multicast hash filter */
2240 u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
2241 struct netdev_hw_addr *ha;
2242
2243 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2244 rx_mode = 0x1C;
2245 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2246 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2247 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2248 (dev->flags & IFF_ALLMULTI)) {
2249 /* Too many to match, or accept all multicasts. */
2250 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2251 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2252 } else if (rp->quirks & rqMgmt) {
2253 int i = 0;
2254 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
2255 netdev_for_each_mc_addr(ha, dev) {
2256 if (i == MCAM_SIZE)
2257 break;
2258 rhine_set_cam(ioaddr, i, ha->addr);
2259 mCAMmask |= 1 << i;
2260 i++;
2261 }
2262 rhine_set_cam_mask(ioaddr, mCAMmask);
2263 } else {
2264 memset(mc_filter, 0, sizeof(mc_filter));
2265 netdev_for_each_mc_addr(ha, dev) {
2266 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2267
2268 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2269 }
2270 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2271 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2272 }
2273 /* enable/disable VLAN receive filtering */
2274 if (rp->quirks & rqMgmt) {
2275 if (dev->flags & IFF_PROMISC)
2276 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2277 else
2278 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2279 }
2280 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2281 }
2282
netdev_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)2283 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2284 {
2285 struct device *hwdev = dev->dev.parent;
2286
2287 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2288 strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
2289 }
2290
netdev_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)2291 static int netdev_get_link_ksettings(struct net_device *dev,
2292 struct ethtool_link_ksettings *cmd)
2293 {
2294 struct rhine_private *rp = netdev_priv(dev);
2295
2296 mutex_lock(&rp->task_lock);
2297 mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
2298 mutex_unlock(&rp->task_lock);
2299
2300 return 0;
2301 }
2302
netdev_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)2303 static int netdev_set_link_ksettings(struct net_device *dev,
2304 const struct ethtool_link_ksettings *cmd)
2305 {
2306 struct rhine_private *rp = netdev_priv(dev);
2307 int rc;
2308
2309 mutex_lock(&rp->task_lock);
2310 rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd);
2311 rhine_set_carrier(&rp->mii_if);
2312 mutex_unlock(&rp->task_lock);
2313
2314 return rc;
2315 }
2316
netdev_nway_reset(struct net_device * dev)2317 static int netdev_nway_reset(struct net_device *dev)
2318 {
2319 struct rhine_private *rp = netdev_priv(dev);
2320
2321 return mii_nway_restart(&rp->mii_if);
2322 }
2323
netdev_get_link(struct net_device * dev)2324 static u32 netdev_get_link(struct net_device *dev)
2325 {
2326 struct rhine_private *rp = netdev_priv(dev);
2327
2328 return mii_link_ok(&rp->mii_if);
2329 }
2330
netdev_get_msglevel(struct net_device * dev)2331 static u32 netdev_get_msglevel(struct net_device *dev)
2332 {
2333 struct rhine_private *rp = netdev_priv(dev);
2334
2335 return rp->msg_enable;
2336 }
2337
netdev_set_msglevel(struct net_device * dev,u32 value)2338 static void netdev_set_msglevel(struct net_device *dev, u32 value)
2339 {
2340 struct rhine_private *rp = netdev_priv(dev);
2341
2342 rp->msg_enable = value;
2343 }
2344
rhine_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2345 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2346 {
2347 struct rhine_private *rp = netdev_priv(dev);
2348
2349 if (!(rp->quirks & rqWOL))
2350 return;
2351
2352 spin_lock_irq(&rp->lock);
2353 wol->supported = WAKE_PHY | WAKE_MAGIC |
2354 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2355 wol->wolopts = rp->wolopts;
2356 spin_unlock_irq(&rp->lock);
2357 }
2358
rhine_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2359 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2360 {
2361 struct rhine_private *rp = netdev_priv(dev);
2362 u32 support = WAKE_PHY | WAKE_MAGIC |
2363 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2364
2365 if (!(rp->quirks & rqWOL))
2366 return -EINVAL;
2367
2368 if (wol->wolopts & ~support)
2369 return -EINVAL;
2370
2371 spin_lock_irq(&rp->lock);
2372 rp->wolopts = wol->wolopts;
2373 spin_unlock_irq(&rp->lock);
2374
2375 return 0;
2376 }
2377
2378 static const struct ethtool_ops netdev_ethtool_ops = {
2379 .get_drvinfo = netdev_get_drvinfo,
2380 .nway_reset = netdev_nway_reset,
2381 .get_link = netdev_get_link,
2382 .get_msglevel = netdev_get_msglevel,
2383 .set_msglevel = netdev_set_msglevel,
2384 .get_wol = rhine_get_wol,
2385 .set_wol = rhine_set_wol,
2386 .get_link_ksettings = netdev_get_link_ksettings,
2387 .set_link_ksettings = netdev_set_link_ksettings,
2388 };
2389
netdev_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)2390 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2391 {
2392 struct rhine_private *rp = netdev_priv(dev);
2393 int rc;
2394
2395 if (!netif_running(dev))
2396 return -EINVAL;
2397
2398 mutex_lock(&rp->task_lock);
2399 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2400 rhine_set_carrier(&rp->mii_if);
2401 mutex_unlock(&rp->task_lock);
2402
2403 return rc;
2404 }
2405
rhine_close(struct net_device * dev)2406 static int rhine_close(struct net_device *dev)
2407 {
2408 struct rhine_private *rp = netdev_priv(dev);
2409 void __iomem *ioaddr = rp->base;
2410
2411 rhine_task_disable(rp);
2412 napi_disable(&rp->napi);
2413 netif_stop_queue(dev);
2414
2415 netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2416 ioread16(ioaddr + ChipCmd));
2417
2418 /* Switch to loopback mode to avoid hardware races. */
2419 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2420
2421 rhine_irq_disable(rp);
2422
2423 /* Stop the chip's Tx and Rx processes. */
2424 iowrite16(CmdStop, ioaddr + ChipCmd);
2425
2426 free_irq(rp->irq, dev);
2427 free_rbufs(dev);
2428 free_tbufs(dev);
2429 free_ring(dev);
2430
2431 return 0;
2432 }
2433
2434
rhine_remove_one_pci(struct pci_dev * pdev)2435 static void rhine_remove_one_pci(struct pci_dev *pdev)
2436 {
2437 struct net_device *dev = pci_get_drvdata(pdev);
2438 struct rhine_private *rp = netdev_priv(dev);
2439
2440 unregister_netdev(dev);
2441
2442 pci_iounmap(pdev, rp->base);
2443 pci_release_regions(pdev);
2444
2445 free_netdev(dev);
2446 pci_disable_device(pdev);
2447 }
2448
rhine_remove_one_platform(struct platform_device * pdev)2449 static int rhine_remove_one_platform(struct platform_device *pdev)
2450 {
2451 struct net_device *dev = platform_get_drvdata(pdev);
2452 struct rhine_private *rp = netdev_priv(dev);
2453
2454 unregister_netdev(dev);
2455
2456 iounmap(rp->base);
2457
2458 free_netdev(dev);
2459
2460 return 0;
2461 }
2462
rhine_shutdown_pci(struct pci_dev * pdev)2463 static void rhine_shutdown_pci(struct pci_dev *pdev)
2464 {
2465 struct net_device *dev = pci_get_drvdata(pdev);
2466 struct rhine_private *rp = netdev_priv(dev);
2467 void __iomem *ioaddr = rp->base;
2468
2469 if (!(rp->quirks & rqWOL))
2470 return; /* Nothing to do for non-WOL adapters */
2471
2472 rhine_power_init(dev);
2473
2474 /* Make sure we use pattern 0, 1 and not 4, 5 */
2475 if (rp->quirks & rq6patterns)
2476 iowrite8(0x04, ioaddr + WOLcgClr);
2477
2478 spin_lock(&rp->lock);
2479
2480 if (rp->wolopts & WAKE_MAGIC) {
2481 iowrite8(WOLmagic, ioaddr + WOLcrSet);
2482 /*
2483 * Turn EEPROM-controlled wake-up back on -- some hardware may
2484 * not cooperate otherwise.
2485 */
2486 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2487 }
2488
2489 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2490 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2491
2492 if (rp->wolopts & WAKE_PHY)
2493 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2494
2495 if (rp->wolopts & WAKE_UCAST)
2496 iowrite8(WOLucast, ioaddr + WOLcrSet);
2497
2498 if (rp->wolopts) {
2499 /* Enable legacy WOL (for old motherboards) */
2500 iowrite8(0x01, ioaddr + PwcfgSet);
2501 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2502 }
2503
2504 spin_unlock(&rp->lock);
2505
2506 if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2507 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2508
2509 pci_wake_from_d3(pdev, true);
2510 pci_set_power_state(pdev, PCI_D3hot);
2511 }
2512 }
2513
2514 #ifdef CONFIG_PM_SLEEP
rhine_suspend(struct device * device)2515 static int rhine_suspend(struct device *device)
2516 {
2517 struct net_device *dev = dev_get_drvdata(device);
2518 struct rhine_private *rp = netdev_priv(dev);
2519
2520 if (!netif_running(dev))
2521 return 0;
2522
2523 rhine_task_disable(rp);
2524 rhine_irq_disable(rp);
2525 napi_disable(&rp->napi);
2526
2527 netif_device_detach(dev);
2528
2529 if (dev_is_pci(device))
2530 rhine_shutdown_pci(to_pci_dev(device));
2531
2532 return 0;
2533 }
2534
rhine_resume(struct device * device)2535 static int rhine_resume(struct device *device)
2536 {
2537 struct net_device *dev = dev_get_drvdata(device);
2538 struct rhine_private *rp = netdev_priv(dev);
2539
2540 if (!netif_running(dev))
2541 return 0;
2542
2543 enable_mmio(rp->pioaddr, rp->quirks);
2544 rhine_power_init(dev);
2545 free_tbufs(dev);
2546 alloc_tbufs(dev);
2547 rhine_reset_rbufs(rp);
2548 rhine_task_enable(rp);
2549 spin_lock_bh(&rp->lock);
2550 init_registers(dev);
2551 spin_unlock_bh(&rp->lock);
2552
2553 netif_device_attach(dev);
2554
2555 return 0;
2556 }
2557
2558 static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2559 #define RHINE_PM_OPS (&rhine_pm_ops)
2560
2561 #else
2562
2563 #define RHINE_PM_OPS NULL
2564
2565 #endif /* !CONFIG_PM_SLEEP */
2566
2567 static struct pci_driver rhine_driver_pci = {
2568 .name = DRV_NAME,
2569 .id_table = rhine_pci_tbl,
2570 .probe = rhine_init_one_pci,
2571 .remove = rhine_remove_one_pci,
2572 .shutdown = rhine_shutdown_pci,
2573 .driver.pm = RHINE_PM_OPS,
2574 };
2575
2576 static struct platform_driver rhine_driver_platform = {
2577 .probe = rhine_init_one_platform,
2578 .remove = rhine_remove_one_platform,
2579 .driver = {
2580 .name = DRV_NAME,
2581 .of_match_table = rhine_of_tbl,
2582 .pm = RHINE_PM_OPS,
2583 }
2584 };
2585
2586 static const struct dmi_system_id rhine_dmi_table[] __initconst = {
2587 {
2588 .ident = "EPIA-M",
2589 .matches = {
2590 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2591 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2592 },
2593 },
2594 {
2595 .ident = "KV7",
2596 .matches = {
2597 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2598 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2599 },
2600 },
2601 { NULL }
2602 };
2603
rhine_init(void)2604 static int __init rhine_init(void)
2605 {
2606 int ret_pci, ret_platform;
2607
2608 /* when a module, this is printed whether or not devices are found in probe */
2609 if (dmi_check_system(rhine_dmi_table)) {
2610 /* these BIOSes fail at PXE boot if chip is in D3 */
2611 avoid_D3 = true;
2612 pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2613 }
2614 else if (avoid_D3)
2615 pr_info("avoid_D3 set\n");
2616
2617 ret_pci = pci_register_driver(&rhine_driver_pci);
2618 ret_platform = platform_driver_register(&rhine_driver_platform);
2619 if ((ret_pci < 0) && (ret_platform < 0))
2620 return ret_pci;
2621
2622 return 0;
2623 }
2624
2625
rhine_cleanup(void)2626 static void __exit rhine_cleanup(void)
2627 {
2628 platform_driver_unregister(&rhine_driver_platform);
2629 pci_unregister_driver(&rhine_driver_pci);
2630 }
2631
2632
2633 module_init(rhine_init);
2634 module_exit(rhine_cleanup);
2635