• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
2 /*
3 	Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
4 
5 	Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6 	Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7 	Copyright 2001 Manfred Spraul				    [natsemi.c]
8 	Copyright 1999-2001 by Donald Becker.			    [natsemi.c]
9        	Written 1997-2001 by Donald Becker.			    [8139too.c]
10 	Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
11 
12 	This software may be used and distributed according to the terms of
13 	the GNU General Public License (GPL), incorporated herein by reference.
14 	Drivers based on or derived from this code fall under the GPL and must
15 	retain the authorship, copyright and license notice.  This file is not
16 	a complete program and may only be used when the entire operating
17 	system is licensed under the GPL.
18 
19 	See the file COPYING in this distribution for more information.
20 
21 	Contributors:
22 
23 		Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 		PCI suspend/resume  - Felipe Damasio <felipewd@terra.com.br>
25 		LinkChg interrupt   - Felipe Damasio <felipewd@terra.com.br>
26 
27 	TODO:
28 	* Test Tx checksumming thoroughly
29 
30 	Low priority TODO:
31 	* Complete reset on PciErr
32 	* Consider Rx interrupt mitigation using TimerIntr
33 	* Investigate using skb->priority with h/w VLAN priority
34 	* Investigate using High Priority Tx Queue with skb->priority
35 	* Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
36 	* Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
37 	* Implement Tx software interrupt mitigation via
38 	  Tx descriptor bit
39 	* The real minimum of CP_MIN_MTU is 4 bytes.  However,
40 	  for this to be supported, one must(?) turn on packet padding.
41 	* Support external MII transceivers (patch available)
42 
43 	NOTES:
44 	* TX checksumming is considered experimental.  It is off by
45 	  default, use ethtool to turn it on.
46 
47  */
48 
49 #define DRV_NAME		"8139cp"
50 #define DRV_VERSION		"1.3"
51 #define DRV_RELDATE		"Mar 22, 2004"
52 
53 
54 #include <linux/module.h>
55 #include <linux/moduleparam.h>
56 #include <linux/kernel.h>
57 #include <linux/compiler.h>
58 #include <linux/netdevice.h>
59 #include <linux/etherdevice.h>
60 #include <linux/init.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/delay.h>
64 #include <linux/ethtool.h>
65 #include <linux/mii.h>
66 #include <linux/if_vlan.h>
67 #include <linux/crc32.h>
68 #include <linux/in.h>
69 #include <linux/ip.h>
70 #include <linux/tcp.h>
71 #include <linux/udp.h>
72 #include <linux/cache.h>
73 #include <asm/io.h>
74 #include <asm/irq.h>
75 #include <asm/uaccess.h>
76 
77 /* VLAN tagging feature enable/disable */
78 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
79 #define CP_VLAN_TAG_USED 1
80 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
81 	do { (tx_desc)->opts2 = cpu_to_le32(vlan_tag_value); } while (0)
82 #else
83 #define CP_VLAN_TAG_USED 0
84 #define CP_VLAN_TX_TAG(tx_desc,vlan_tag_value) \
85 	do { (tx_desc)->opts2 = 0; } while (0)
86 #endif
87 
88 /* These identify the driver base version and may not be removed. */
89 static char version[] =
90 KERN_INFO DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
91 
92 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
93 MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
94 MODULE_VERSION(DRV_VERSION);
95 MODULE_LICENSE("GPL");
96 
97 static int debug = -1;
98 module_param(debug, int, 0);
99 MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
100 
101 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
102    The RTL chips use a 64 element hash table based on the Ethernet CRC.  */
103 static int multicast_filter_limit = 32;
104 module_param(multicast_filter_limit, int, 0);
105 MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
106 
107 #define PFX			DRV_NAME ": "
108 
109 #define CP_DEF_MSG_ENABLE	(NETIF_MSG_DRV		| \
110 				 NETIF_MSG_PROBE 	| \
111 				 NETIF_MSG_LINK)
112 #define CP_NUM_STATS		14	/* struct cp_dma_stats, plus one */
113 #define CP_STATS_SIZE		64	/* size in bytes of DMA stats block */
114 #define CP_REGS_SIZE		(0xff + 1)
115 #define CP_REGS_VER		1		/* version 1 */
116 #define CP_RX_RING_SIZE		64
117 #define CP_TX_RING_SIZE		64
118 #define CP_RING_BYTES		\
119 		((sizeof(struct cp_desc) * CP_RX_RING_SIZE) +	\
120 		 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) +	\
121 		 CP_STATS_SIZE)
122 #define NEXT_TX(N)		(((N) + 1) & (CP_TX_RING_SIZE - 1))
123 #define NEXT_RX(N)		(((N) + 1) & (CP_RX_RING_SIZE - 1))
124 #define TX_BUFFS_AVAIL(CP)					\
125 	(((CP)->tx_tail <= (CP)->tx_head) ?			\
126 	  (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head :	\
127 	  (CP)->tx_tail - (CP)->tx_head - 1)
128 
129 #define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer.*/
130 #define CP_INTERNAL_PHY		32
131 
132 /* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
133 #define RX_FIFO_THRESH		5	/* Rx buffer level before first PCI xfer.  */
134 #define RX_DMA_BURST		4	/* Maximum PCI burst, '4' is 256 */
135 #define TX_DMA_BURST		6	/* Maximum PCI burst, '6' is 1024 */
136 #define TX_EARLY_THRESH		256	/* Early Tx threshold, in bytes */
137 
138 /* Time in jiffies before concluding the transmitter is hung. */
139 #define TX_TIMEOUT		(6*HZ)
140 
141 /* hardware minimum and maximum for a single frame's data payload */
142 #define CP_MIN_MTU		60	/* TODO: allow lower, but pad */
143 #define CP_MAX_MTU		4096
144 
145 enum {
146 	/* NIC register offsets */
147 	MAC0		= 0x00,	/* Ethernet hardware address. */
148 	MAR0		= 0x08,	/* Multicast filter. */
149 	StatsAddr	= 0x10,	/* 64-bit start addr of 64-byte DMA stats blk */
150 	TxRingAddr	= 0x20, /* 64-bit start addr of Tx ring */
151 	HiTxRingAddr	= 0x28, /* 64-bit start addr of high priority Tx ring */
152 	Cmd		= 0x37, /* Command register */
153 	IntrMask	= 0x3C, /* Interrupt mask */
154 	IntrStatus	= 0x3E, /* Interrupt status */
155 	TxConfig	= 0x40, /* Tx configuration */
156 	ChipVersion	= 0x43, /* 8-bit chip version, inside TxConfig */
157 	RxConfig	= 0x44, /* Rx configuration */
158 	RxMissed	= 0x4C,	/* 24 bits valid, write clears */
159 	Cfg9346		= 0x50, /* EEPROM select/control; Cfg reg [un]lock */
160 	Config1		= 0x52, /* Config1 */
161 	Config3		= 0x59, /* Config3 */
162 	Config4		= 0x5A, /* Config4 */
163 	MultiIntr	= 0x5C, /* Multiple interrupt select */
164 	BasicModeCtrl	= 0x62,	/* MII BMCR */
165 	BasicModeStatus	= 0x64, /* MII BMSR */
166 	NWayAdvert	= 0x66, /* MII ADVERTISE */
167 	NWayLPAR	= 0x68, /* MII LPA */
168 	NWayExpansion	= 0x6A, /* MII Expansion */
169 	Config5		= 0xD8,	/* Config5 */
170 	TxPoll		= 0xD9,	/* Tell chip to check Tx descriptors for work */
171 	RxMaxSize	= 0xDA, /* Max size of an Rx packet (8169 only) */
172 	CpCmd		= 0xE0, /* C+ Command register (C+ mode only) */
173 	IntrMitigate	= 0xE2,	/* rx/tx interrupt mitigation control */
174 	RxRingAddr	= 0xE4, /* 64-bit start addr of Rx ring */
175 	TxThresh	= 0xEC, /* Early Tx threshold */
176 	OldRxBufAddr	= 0x30, /* DMA address of Rx ring buffer (C mode) */
177 	OldTSD0		= 0x10, /* DMA address of first Tx desc (C mode) */
178 
179 	/* Tx and Rx status descriptors */
180 	DescOwn		= (1 << 31), /* Descriptor is owned by NIC */
181 	RingEnd		= (1 << 30), /* End of descriptor ring */
182 	FirstFrag	= (1 << 29), /* First segment of a packet */
183 	LastFrag	= (1 << 28), /* Final segment of a packet */
184 	LargeSend	= (1 << 27), /* TCP Large Send Offload (TSO) */
185 	MSSShift	= 16,	     /* MSS value position */
186 	MSSMask		= 0xfff,     /* MSS value: 11 bits */
187 	TxError		= (1 << 23), /* Tx error summary */
188 	RxError		= (1 << 20), /* Rx error summary */
189 	IPCS		= (1 << 18), /* Calculate IP checksum */
190 	UDPCS		= (1 << 17), /* Calculate UDP/IP checksum */
191 	TCPCS		= (1 << 16), /* Calculate TCP/IP checksum */
192 	TxVlanTag	= (1 << 17), /* Add VLAN tag */
193 	RxVlanTagged	= (1 << 16), /* Rx VLAN tag available */
194 	IPFail		= (1 << 15), /* IP checksum failed */
195 	UDPFail		= (1 << 14), /* UDP/IP checksum failed */
196 	TCPFail		= (1 << 13), /* TCP/IP checksum failed */
197 	NormalTxPoll	= (1 << 6),  /* One or more normal Tx packets to send */
198 	PID1		= (1 << 17), /* 2 protocol id bits:  0==non-IP, */
199 	PID0		= (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
200 	RxProtoTCP	= 1,
201 	RxProtoUDP	= 2,
202 	RxProtoIP	= 3,
203 	TxFIFOUnder	= (1 << 25), /* Tx FIFO underrun */
204 	TxOWC		= (1 << 22), /* Tx Out-of-window collision */
205 	TxLinkFail	= (1 << 21), /* Link failed during Tx of packet */
206 	TxMaxCol	= (1 << 20), /* Tx aborted due to excessive collisions */
207 	TxColCntShift	= 16,	     /* Shift, to get 4-bit Tx collision cnt */
208 	TxColCntMask	= 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
209 	RxErrFrame	= (1 << 27), /* Rx frame alignment error */
210 	RxMcast		= (1 << 26), /* Rx multicast packet rcv'd */
211 	RxErrCRC	= (1 << 18), /* Rx CRC error */
212 	RxErrRunt	= (1 << 19), /* Rx error, packet < 64 bytes */
213 	RxErrLong	= (1 << 21), /* Rx error, packet > 4096 bytes */
214 	RxErrFIFO	= (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
215 
216 	/* StatsAddr register */
217 	DumpStats	= (1 << 3),  /* Begin stats dump */
218 
219 	/* RxConfig register */
220 	RxCfgFIFOShift	= 13,	     /* Shift, to get Rx FIFO thresh value */
221 	RxCfgDMAShift	= 8,	     /* Shift, to get Rx Max DMA value */
222 	AcceptErr	= 0x20,	     /* Accept packets with CRC errors */
223 	AcceptRunt	= 0x10,	     /* Accept runt (<64 bytes) packets */
224 	AcceptBroadcast	= 0x08,	     /* Accept broadcast packets */
225 	AcceptMulticast	= 0x04,	     /* Accept multicast packets */
226 	AcceptMyPhys	= 0x02,	     /* Accept pkts with our MAC as dest */
227 	AcceptAllPhys	= 0x01,	     /* Accept all pkts w/ physical dest */
228 
229 	/* IntrMask / IntrStatus registers */
230 	PciErr		= (1 << 15), /* System error on the PCI bus */
231 	TimerIntr	= (1 << 14), /* Asserted when TCTR reaches TimerInt value */
232 	LenChg		= (1 << 13), /* Cable length change */
233 	SWInt		= (1 << 8),  /* Software-requested interrupt */
234 	TxEmpty		= (1 << 7),  /* No Tx descriptors available */
235 	RxFIFOOvr	= (1 << 6),  /* Rx FIFO Overflow */
236 	LinkChg		= (1 << 5),  /* Packet underrun, or link change */
237 	RxEmpty		= (1 << 4),  /* No Rx descriptors available */
238 	TxErr		= (1 << 3),  /* Tx error */
239 	TxOK		= (1 << 2),  /* Tx packet sent */
240 	RxErr		= (1 << 1),  /* Rx error */
241 	RxOK		= (1 << 0),  /* Rx packet received */
242 	IntrResvd	= (1 << 10), /* reserved, according to RealTek engineers,
243 					but hardware likes to raise it */
244 
245 	IntrAll		= PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
246 			  RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
247 			  RxErr | RxOK | IntrResvd,
248 
249 	/* C mode command register */
250 	CmdReset	= (1 << 4),  /* Enable to reset; self-clearing */
251 	RxOn		= (1 << 3),  /* Rx mode enable */
252 	TxOn		= (1 << 2),  /* Tx mode enable */
253 
254 	/* C+ mode command register */
255 	RxVlanOn	= (1 << 6),  /* Rx VLAN de-tagging enable */
256 	RxChkSum	= (1 << 5),  /* Rx checksum offload enable */
257 	PCIDAC		= (1 << 4),  /* PCI Dual Address Cycle (64-bit PCI) */
258 	PCIMulRW	= (1 << 3),  /* Enable PCI read/write multiple */
259 	CpRxOn		= (1 << 1),  /* Rx mode enable */
260 	CpTxOn		= (1 << 0),  /* Tx mode enable */
261 
262 	/* Cfg9436 EEPROM control register */
263 	Cfg9346_Lock	= 0x00,	     /* Lock ConfigX/MII register access */
264 	Cfg9346_Unlock	= 0xC0,	     /* Unlock ConfigX/MII register access */
265 
266 	/* TxConfig register */
267 	IFG		= (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
268 	TxDMAShift	= 8,	     /* DMA burst value (0-7) is shift this many bits */
269 
270 	/* Early Tx Threshold register */
271 	TxThreshMask	= 0x3f,	     /* Mask bits 5-0 */
272 	TxThreshMax	= 2048,	     /* Max early Tx threshold */
273 
274 	/* Config1 register */
275 	DriverLoaded	= (1 << 5),  /* Software marker, driver is loaded */
276 	LWACT           = (1 << 4),  /* LWAKE active mode */
277 	PMEnable	= (1 << 0),  /* Enable various PM features of chip */
278 
279 	/* Config3 register */
280 	PARMEnable	= (1 << 6),  /* Enable auto-loading of PHY parms */
281 	MagicPacket     = (1 << 5),  /* Wake up when receives a Magic Packet */
282 	LinkUp          = (1 << 4),  /* Wake up when the cable connection is re-established */
283 
284 	/* Config4 register */
285 	LWPTN           = (1 << 1),  /* LWAKE Pattern */
286 	LWPME           = (1 << 4),  /* LANWAKE vs PMEB */
287 
288 	/* Config5 register */
289 	BWF             = (1 << 6),  /* Accept Broadcast wakeup frame */
290 	MWF             = (1 << 5),  /* Accept Multicast wakeup frame */
291 	UWF             = (1 << 4),  /* Accept Unicast wakeup frame */
292 	LANWake         = (1 << 1),  /* Enable LANWake signal */
293 	PMEStatus	= (1 << 0),  /* PME status can be reset by PCI RST# */
294 
295 	cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
296 	cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
297 	cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
298 };
299 
300 static const unsigned int cp_rx_config =
301 	  (RX_FIFO_THRESH << RxCfgFIFOShift) |
302 	  (RX_DMA_BURST << RxCfgDMAShift);
303 
304 struct cp_desc {
305 	__le32		opts1;
306 	__le32		opts2;
307 	__le64		addr;
308 };
309 
310 struct cp_dma_stats {
311 	__le64			tx_ok;
312 	__le64			rx_ok;
313 	__le64			tx_err;
314 	__le32			rx_err;
315 	__le16			rx_fifo;
316 	__le16			frame_align;
317 	__le32			tx_ok_1col;
318 	__le32			tx_ok_mcol;
319 	__le64			rx_ok_phys;
320 	__le64			rx_ok_bcast;
321 	__le32			rx_ok_mcast;
322 	__le16			tx_abort;
323 	__le16			tx_underrun;
324 } __attribute__((packed));
325 
326 struct cp_extra_stats {
327 	unsigned long		rx_frags;
328 };
329 
330 struct cp_private {
331 	void			__iomem *regs;
332 	struct net_device	*dev;
333 	spinlock_t		lock;
334 	u32			msg_enable;
335 
336 	struct napi_struct	napi;
337 
338 	struct pci_dev		*pdev;
339 	u32			rx_config;
340 	u16			cpcmd;
341 
342 	struct cp_extra_stats	cp_stats;
343 
344 	unsigned		rx_head		____cacheline_aligned;
345 	unsigned		rx_tail;
346 	struct cp_desc		*rx_ring;
347 	struct sk_buff		*rx_skb[CP_RX_RING_SIZE];
348 
349 	unsigned		tx_head		____cacheline_aligned;
350 	unsigned		tx_tail;
351 	struct cp_desc		*tx_ring;
352 	struct sk_buff		*tx_skb[CP_TX_RING_SIZE];
353 
354 	unsigned		rx_buf_sz;
355 	unsigned		wol_enabled : 1; /* Is Wake-on-LAN enabled? */
356 
357 #if CP_VLAN_TAG_USED
358 	struct vlan_group	*vlgrp;
359 #endif
360 	dma_addr_t		ring_dma;
361 
362 	struct mii_if_info	mii_if;
363 };
364 
365 #define cpr8(reg)	readb(cp->regs + (reg))
366 #define cpr16(reg)	readw(cp->regs + (reg))
367 #define cpr32(reg)	readl(cp->regs + (reg))
368 #define cpw8(reg,val)	writeb((val), cp->regs + (reg))
369 #define cpw16(reg,val)	writew((val), cp->regs + (reg))
370 #define cpw32(reg,val)	writel((val), cp->regs + (reg))
371 #define cpw8_f(reg,val) do {			\
372 	writeb((val), cp->regs + (reg));	\
373 	readb(cp->regs + (reg));		\
374 	} while (0)
375 #define cpw16_f(reg,val) do {			\
376 	writew((val), cp->regs + (reg));	\
377 	readw(cp->regs + (reg));		\
378 	} while (0)
379 #define cpw32_f(reg,val) do {			\
380 	writel((val), cp->regs + (reg));	\
381 	readl(cp->regs + (reg));		\
382 	} while (0)
383 
384 
385 static void __cp_set_rx_mode (struct net_device *dev);
386 static void cp_tx (struct cp_private *cp);
387 static void cp_clean_rings (struct cp_private *cp);
388 #ifdef CONFIG_NET_POLL_CONTROLLER
389 static void cp_poll_controller(struct net_device *dev);
390 #endif
391 static int cp_get_eeprom_len(struct net_device *dev);
392 static int cp_get_eeprom(struct net_device *dev,
393 			 struct ethtool_eeprom *eeprom, u8 *data);
394 static int cp_set_eeprom(struct net_device *dev,
395 			 struct ethtool_eeprom *eeprom, u8 *data);
396 
397 static struct pci_device_id cp_pci_tbl[] = {
398 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	PCI_DEVICE_ID_REALTEK_8139), },
399 	{ PCI_DEVICE(PCI_VENDOR_ID_TTTECH,	PCI_DEVICE_ID_TTTECH_MC322), },
400 	{ },
401 };
402 MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
403 
404 static struct {
405 	const char str[ETH_GSTRING_LEN];
406 } ethtool_stats_keys[] = {
407 	{ "tx_ok" },
408 	{ "rx_ok" },
409 	{ "tx_err" },
410 	{ "rx_err" },
411 	{ "rx_fifo" },
412 	{ "frame_align" },
413 	{ "tx_ok_1col" },
414 	{ "tx_ok_mcol" },
415 	{ "rx_ok_phys" },
416 	{ "rx_ok_bcast" },
417 	{ "rx_ok_mcast" },
418 	{ "tx_abort" },
419 	{ "tx_underrun" },
420 	{ "rx_frags" },
421 };
422 
423 
424 #if CP_VLAN_TAG_USED
cp_vlan_rx_register(struct net_device * dev,struct vlan_group * grp)425 static void cp_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
426 {
427 	struct cp_private *cp = netdev_priv(dev);
428 	unsigned long flags;
429 
430 	spin_lock_irqsave(&cp->lock, flags);
431 	cp->vlgrp = grp;
432 	if (grp)
433 		cp->cpcmd |= RxVlanOn;
434 	else
435 		cp->cpcmd &= ~RxVlanOn;
436 
437 	cpw16(CpCmd, cp->cpcmd);
438 	spin_unlock_irqrestore(&cp->lock, flags);
439 }
440 #endif /* CP_VLAN_TAG_USED */
441 
cp_set_rxbufsize(struct cp_private * cp)442 static inline void cp_set_rxbufsize (struct cp_private *cp)
443 {
444 	unsigned int mtu = cp->dev->mtu;
445 
446 	if (mtu > ETH_DATA_LEN)
447 		/* MTU + ethernet header + FCS + optional VLAN tag */
448 		cp->rx_buf_sz = mtu + ETH_HLEN + 8;
449 	else
450 		cp->rx_buf_sz = PKT_BUF_SZ;
451 }
452 
cp_rx_skb(struct cp_private * cp,struct sk_buff * skb,struct cp_desc * desc)453 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
454 			      struct cp_desc *desc)
455 {
456 	skb->protocol = eth_type_trans (skb, cp->dev);
457 
458 	cp->dev->stats.rx_packets++;
459 	cp->dev->stats.rx_bytes += skb->len;
460 
461 #if CP_VLAN_TAG_USED
462 	if (cp->vlgrp && (desc->opts2 & cpu_to_le32(RxVlanTagged))) {
463 		vlan_hwaccel_receive_skb(skb, cp->vlgrp,
464 					 swab16(le32_to_cpu(desc->opts2) & 0xffff));
465 	} else
466 #endif
467 		netif_receive_skb(skb);
468 }
469 
cp_rx_err_acct(struct cp_private * cp,unsigned rx_tail,u32 status,u32 len)470 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
471 			    u32 status, u32 len)
472 {
473 	if (netif_msg_rx_err (cp))
474 		printk (KERN_DEBUG
475 			"%s: rx err, slot %d status 0x%x len %d\n",
476 			cp->dev->name, rx_tail, status, len);
477 	cp->dev->stats.rx_errors++;
478 	if (status & RxErrFrame)
479 		cp->dev->stats.rx_frame_errors++;
480 	if (status & RxErrCRC)
481 		cp->dev->stats.rx_crc_errors++;
482 	if ((status & RxErrRunt) || (status & RxErrLong))
483 		cp->dev->stats.rx_length_errors++;
484 	if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
485 		cp->dev->stats.rx_length_errors++;
486 	if (status & RxErrFIFO)
487 		cp->dev->stats.rx_fifo_errors++;
488 }
489 
cp_rx_csum_ok(u32 status)490 static inline unsigned int cp_rx_csum_ok (u32 status)
491 {
492 	unsigned int protocol = (status >> 16) & 0x3;
493 
494 	if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
495 		return 1;
496 	else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
497 		return 1;
498 	else if ((protocol == RxProtoIP) && (!(status & IPFail)))
499 		return 1;
500 	return 0;
501 }
502 
cp_rx_poll(struct napi_struct * napi,int budget)503 static int cp_rx_poll(struct napi_struct *napi, int budget)
504 {
505 	struct cp_private *cp = container_of(napi, struct cp_private, napi);
506 	struct net_device *dev = cp->dev;
507 	unsigned int rx_tail = cp->rx_tail;
508 	int rx;
509 
510 rx_status_loop:
511 	rx = 0;
512 	cpw16(IntrStatus, cp_rx_intr_mask);
513 
514 	while (1) {
515 		u32 status, len;
516 		dma_addr_t mapping;
517 		struct sk_buff *skb, *new_skb;
518 		struct cp_desc *desc;
519 		unsigned buflen;
520 
521 		skb = cp->rx_skb[rx_tail];
522 		BUG_ON(!skb);
523 
524 		desc = &cp->rx_ring[rx_tail];
525 		status = le32_to_cpu(desc->opts1);
526 		if (status & DescOwn)
527 			break;
528 
529 		len = (status & 0x1fff) - 4;
530 		mapping = le64_to_cpu(desc->addr);
531 
532 		if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
533 			/* we don't support incoming fragmented frames.
534 			 * instead, we attempt to ensure that the
535 			 * pre-allocated RX skbs are properly sized such
536 			 * that RX fragments are never encountered
537 			 */
538 			cp_rx_err_acct(cp, rx_tail, status, len);
539 			dev->stats.rx_dropped++;
540 			cp->cp_stats.rx_frags++;
541 			goto rx_next;
542 		}
543 
544 		if (status & (RxError | RxErrFIFO)) {
545 			cp_rx_err_acct(cp, rx_tail, status, len);
546 			goto rx_next;
547 		}
548 
549 		if (netif_msg_rx_status(cp))
550 			printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
551 			       dev->name, rx_tail, status, len);
552 
553 		buflen = cp->rx_buf_sz + NET_IP_ALIGN;
554 		new_skb = netdev_alloc_skb(dev, buflen);
555 		if (!new_skb) {
556 			dev->stats.rx_dropped++;
557 			goto rx_next;
558 		}
559 
560 		skb_reserve(new_skb, NET_IP_ALIGN);
561 
562 		dma_unmap_single(&cp->pdev->dev, mapping,
563 				 buflen, PCI_DMA_FROMDEVICE);
564 
565 		/* Handle checksum offloading for incoming packets. */
566 		if (cp_rx_csum_ok(status))
567 			skb->ip_summed = CHECKSUM_UNNECESSARY;
568 		else
569 			skb->ip_summed = CHECKSUM_NONE;
570 
571 		skb_put(skb, len);
572 
573 		mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
574 					 PCI_DMA_FROMDEVICE);
575 		cp->rx_skb[rx_tail] = new_skb;
576 
577 		cp_rx_skb(cp, skb, desc);
578 		rx++;
579 
580 rx_next:
581 		cp->rx_ring[rx_tail].opts2 = 0;
582 		cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
583 		if (rx_tail == (CP_RX_RING_SIZE - 1))
584 			desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
585 						  cp->rx_buf_sz);
586 		else
587 			desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
588 		rx_tail = NEXT_RX(rx_tail);
589 
590 		if (rx >= budget)
591 			break;
592 	}
593 
594 	cp->rx_tail = rx_tail;
595 
596 	/* if we did not reach work limit, then we're done with
597 	 * this round of polling
598 	 */
599 	if (rx < budget) {
600 		unsigned long flags;
601 
602 		if (cpr16(IntrStatus) & cp_rx_intr_mask)
603 			goto rx_status_loop;
604 
605 		spin_lock_irqsave(&cp->lock, flags);
606 		cpw16_f(IntrMask, cp_intr_mask);
607 		__netif_rx_complete(napi);
608 		spin_unlock_irqrestore(&cp->lock, flags);
609 	}
610 
611 	return rx;
612 }
613 
cp_interrupt(int irq,void * dev_instance)614 static irqreturn_t cp_interrupt (int irq, void *dev_instance)
615 {
616 	struct net_device *dev = dev_instance;
617 	struct cp_private *cp;
618 	u16 status;
619 
620 	if (unlikely(dev == NULL))
621 		return IRQ_NONE;
622 	cp = netdev_priv(dev);
623 
624 	status = cpr16(IntrStatus);
625 	if (!status || (status == 0xFFFF))
626 		return IRQ_NONE;
627 
628 	if (netif_msg_intr(cp))
629 		printk(KERN_DEBUG "%s: intr, status %04x cmd %02x cpcmd %04x\n",
630 		        dev->name, status, cpr8(Cmd), cpr16(CpCmd));
631 
632 	cpw16(IntrStatus, status & ~cp_rx_intr_mask);
633 
634 	spin_lock(&cp->lock);
635 
636 	/* close possible race's with dev_close */
637 	if (unlikely(!netif_running(dev))) {
638 		cpw16(IntrMask, 0);
639 		spin_unlock(&cp->lock);
640 		return IRQ_HANDLED;
641 	}
642 
643 	if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
644 		if (netif_rx_schedule_prep(&cp->napi)) {
645 			cpw16_f(IntrMask, cp_norx_intr_mask);
646 			__netif_rx_schedule(&cp->napi);
647 		}
648 
649 	if (status & (TxOK | TxErr | TxEmpty | SWInt))
650 		cp_tx(cp);
651 	if (status & LinkChg)
652 		mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
653 
654 	spin_unlock(&cp->lock);
655 
656 	if (status & PciErr) {
657 		u16 pci_status;
658 
659 		pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
660 		pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
661 		printk(KERN_ERR "%s: PCI bus error, status=%04x, PCI status=%04x\n",
662 		       dev->name, status, pci_status);
663 
664 		/* TODO: reset hardware */
665 	}
666 
667 	return IRQ_HANDLED;
668 }
669 
670 #ifdef CONFIG_NET_POLL_CONTROLLER
671 /*
672  * Polling receive - used by netconsole and other diagnostic tools
673  * to allow network i/o with interrupts disabled.
674  */
cp_poll_controller(struct net_device * dev)675 static void cp_poll_controller(struct net_device *dev)
676 {
677 	disable_irq(dev->irq);
678 	cp_interrupt(dev->irq, dev);
679 	enable_irq(dev->irq);
680 }
681 #endif
682 
cp_tx(struct cp_private * cp)683 static void cp_tx (struct cp_private *cp)
684 {
685 	unsigned tx_head = cp->tx_head;
686 	unsigned tx_tail = cp->tx_tail;
687 
688 	while (tx_tail != tx_head) {
689 		struct cp_desc *txd = cp->tx_ring + tx_tail;
690 		struct sk_buff *skb;
691 		u32 status;
692 
693 		rmb();
694 		status = le32_to_cpu(txd->opts1);
695 		if (status & DescOwn)
696 			break;
697 
698 		skb = cp->tx_skb[tx_tail];
699 		BUG_ON(!skb);
700 
701 		dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
702 				 le32_to_cpu(txd->opts1) & 0xffff,
703 				 PCI_DMA_TODEVICE);
704 
705 		if (status & LastFrag) {
706 			if (status & (TxError | TxFIFOUnder)) {
707 				if (netif_msg_tx_err(cp))
708 					printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
709 					       cp->dev->name, status);
710 				cp->dev->stats.tx_errors++;
711 				if (status & TxOWC)
712 					cp->dev->stats.tx_window_errors++;
713 				if (status & TxMaxCol)
714 					cp->dev->stats.tx_aborted_errors++;
715 				if (status & TxLinkFail)
716 					cp->dev->stats.tx_carrier_errors++;
717 				if (status & TxFIFOUnder)
718 					cp->dev->stats.tx_fifo_errors++;
719 			} else {
720 				cp->dev->stats.collisions +=
721 					((status >> TxColCntShift) & TxColCntMask);
722 				cp->dev->stats.tx_packets++;
723 				cp->dev->stats.tx_bytes += skb->len;
724 				if (netif_msg_tx_done(cp))
725 					printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail);
726 			}
727 			dev_kfree_skb_irq(skb);
728 		}
729 
730 		cp->tx_skb[tx_tail] = NULL;
731 
732 		tx_tail = NEXT_TX(tx_tail);
733 	}
734 
735 	cp->tx_tail = tx_tail;
736 
737 	if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
738 		netif_wake_queue(cp->dev);
739 }
740 
cp_start_xmit(struct sk_buff * skb,struct net_device * dev)741 static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
742 {
743 	struct cp_private *cp = netdev_priv(dev);
744 	unsigned entry;
745 	u32 eor, flags;
746 	unsigned long intr_flags;
747 #if CP_VLAN_TAG_USED
748 	u32 vlan_tag = 0;
749 #endif
750 	int mss = 0;
751 
752 	spin_lock_irqsave(&cp->lock, intr_flags);
753 
754 	/* This is a hard error, log it. */
755 	if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
756 		netif_stop_queue(dev);
757 		spin_unlock_irqrestore(&cp->lock, intr_flags);
758 		printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
759 		       dev->name);
760 		return 1;
761 	}
762 
763 #if CP_VLAN_TAG_USED
764 	if (cp->vlgrp && vlan_tx_tag_present(skb))
765 		vlan_tag = TxVlanTag | swab16(vlan_tx_tag_get(skb));
766 #endif
767 
768 	entry = cp->tx_head;
769 	eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
770 	if (dev->features & NETIF_F_TSO)
771 		mss = skb_shinfo(skb)->gso_size;
772 
773 	if (skb_shinfo(skb)->nr_frags == 0) {
774 		struct cp_desc *txd = &cp->tx_ring[entry];
775 		u32 len;
776 		dma_addr_t mapping;
777 
778 		len = skb->len;
779 		mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
780 		CP_VLAN_TX_TAG(txd, vlan_tag);
781 		txd->addr = cpu_to_le64(mapping);
782 		wmb();
783 
784 		flags = eor | len | DescOwn | FirstFrag | LastFrag;
785 
786 		if (mss)
787 			flags |= LargeSend | ((mss & MSSMask) << MSSShift);
788 		else if (skb->ip_summed == CHECKSUM_PARTIAL) {
789 			const struct iphdr *ip = ip_hdr(skb);
790 			if (ip->protocol == IPPROTO_TCP)
791 				flags |= IPCS | TCPCS;
792 			else if (ip->protocol == IPPROTO_UDP)
793 				flags |= IPCS | UDPCS;
794 			else
795 				WARN_ON(1);	/* we need a WARN() */
796 		}
797 
798 		txd->opts1 = cpu_to_le32(flags);
799 		wmb();
800 
801 		cp->tx_skb[entry] = skb;
802 		entry = NEXT_TX(entry);
803 	} else {
804 		struct cp_desc *txd;
805 		u32 first_len, first_eor;
806 		dma_addr_t first_mapping;
807 		int frag, first_entry = entry;
808 		const struct iphdr *ip = ip_hdr(skb);
809 
810 		/* We must give this initial chunk to the device last.
811 		 * Otherwise we could race with the device.
812 		 */
813 		first_eor = eor;
814 		first_len = skb_headlen(skb);
815 		first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
816 					       first_len, PCI_DMA_TODEVICE);
817 		cp->tx_skb[entry] = skb;
818 		entry = NEXT_TX(entry);
819 
820 		for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
821 			skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
822 			u32 len;
823 			u32 ctrl;
824 			dma_addr_t mapping;
825 
826 			len = this_frag->size;
827 			mapping = dma_map_single(&cp->pdev->dev,
828 						 ((void *) page_address(this_frag->page) +
829 						  this_frag->page_offset),
830 						 len, PCI_DMA_TODEVICE);
831 			eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
832 
833 			ctrl = eor | len | DescOwn;
834 
835 			if (mss)
836 				ctrl |= LargeSend |
837 					((mss & MSSMask) << MSSShift);
838 			else if (skb->ip_summed == CHECKSUM_PARTIAL) {
839 				if (ip->protocol == IPPROTO_TCP)
840 					ctrl |= IPCS | TCPCS;
841 				else if (ip->protocol == IPPROTO_UDP)
842 					ctrl |= IPCS | UDPCS;
843 				else
844 					BUG();
845 			}
846 
847 			if (frag == skb_shinfo(skb)->nr_frags - 1)
848 				ctrl |= LastFrag;
849 
850 			txd = &cp->tx_ring[entry];
851 			CP_VLAN_TX_TAG(txd, vlan_tag);
852 			txd->addr = cpu_to_le64(mapping);
853 			wmb();
854 
855 			txd->opts1 = cpu_to_le32(ctrl);
856 			wmb();
857 
858 			cp->tx_skb[entry] = skb;
859 			entry = NEXT_TX(entry);
860 		}
861 
862 		txd = &cp->tx_ring[first_entry];
863 		CP_VLAN_TX_TAG(txd, vlan_tag);
864 		txd->addr = cpu_to_le64(first_mapping);
865 		wmb();
866 
867 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
868 			if (ip->protocol == IPPROTO_TCP)
869 				txd->opts1 = cpu_to_le32(first_eor | first_len |
870 							 FirstFrag | DescOwn |
871 							 IPCS | TCPCS);
872 			else if (ip->protocol == IPPROTO_UDP)
873 				txd->opts1 = cpu_to_le32(first_eor | first_len |
874 							 FirstFrag | DescOwn |
875 							 IPCS | UDPCS);
876 			else
877 				BUG();
878 		} else
879 			txd->opts1 = cpu_to_le32(first_eor | first_len |
880 						 FirstFrag | DescOwn);
881 		wmb();
882 	}
883 	cp->tx_head = entry;
884 	if (netif_msg_tx_queued(cp))
885 		printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
886 		       dev->name, entry, skb->len);
887 	if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
888 		netif_stop_queue(dev);
889 
890 	spin_unlock_irqrestore(&cp->lock, intr_flags);
891 
892 	cpw8(TxPoll, NormalTxPoll);
893 	dev->trans_start = jiffies;
894 
895 	return 0;
896 }
897 
898 /* Set or clear the multicast filter for this adaptor.
899    This routine is not state sensitive and need not be SMP locked. */
900 
__cp_set_rx_mode(struct net_device * dev)901 static void __cp_set_rx_mode (struct net_device *dev)
902 {
903 	struct cp_private *cp = netdev_priv(dev);
904 	u32 mc_filter[2];	/* Multicast hash filter */
905 	int i, rx_mode;
906 	u32 tmp;
907 
908 	/* Note: do not reorder, GCC is clever about common statements. */
909 	if (dev->flags & IFF_PROMISC) {
910 		/* Unconditionally log net taps. */
911 		rx_mode =
912 		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
913 		    AcceptAllPhys;
914 		mc_filter[1] = mc_filter[0] = 0xffffffff;
915 	} else if ((dev->mc_count > multicast_filter_limit)
916 		   || (dev->flags & IFF_ALLMULTI)) {
917 		/* Too many to filter perfectly -- accept all multicasts. */
918 		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
919 		mc_filter[1] = mc_filter[0] = 0xffffffff;
920 	} else {
921 		struct dev_mc_list *mclist;
922 		rx_mode = AcceptBroadcast | AcceptMyPhys;
923 		mc_filter[1] = mc_filter[0] = 0;
924 		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
925 		     i++, mclist = mclist->next) {
926 			int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
927 
928 			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
929 			rx_mode |= AcceptMulticast;
930 		}
931 	}
932 
933 	/* We can safely update without stopping the chip. */
934 	tmp = cp_rx_config | rx_mode;
935 	if (cp->rx_config != tmp) {
936 		cpw32_f (RxConfig, tmp);
937 		cp->rx_config = tmp;
938 	}
939 	cpw32_f (MAR0 + 0, mc_filter[0]);
940 	cpw32_f (MAR0 + 4, mc_filter[1]);
941 }
942 
cp_set_rx_mode(struct net_device * dev)943 static void cp_set_rx_mode (struct net_device *dev)
944 {
945 	unsigned long flags;
946 	struct cp_private *cp = netdev_priv(dev);
947 
948 	spin_lock_irqsave (&cp->lock, flags);
949 	__cp_set_rx_mode(dev);
950 	spin_unlock_irqrestore (&cp->lock, flags);
951 }
952 
__cp_get_stats(struct cp_private * cp)953 static void __cp_get_stats(struct cp_private *cp)
954 {
955 	/* only lower 24 bits valid; write any value to clear */
956 	cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
957 	cpw32 (RxMissed, 0);
958 }
959 
cp_get_stats(struct net_device * dev)960 static struct net_device_stats *cp_get_stats(struct net_device *dev)
961 {
962 	struct cp_private *cp = netdev_priv(dev);
963 	unsigned long flags;
964 
965 	/* The chip only need report frame silently dropped. */
966 	spin_lock_irqsave(&cp->lock, flags);
967  	if (netif_running(dev) && netif_device_present(dev))
968  		__cp_get_stats(cp);
969 	spin_unlock_irqrestore(&cp->lock, flags);
970 
971 	return &dev->stats;
972 }
973 
cp_stop_hw(struct cp_private * cp)974 static void cp_stop_hw (struct cp_private *cp)
975 {
976 	cpw16(IntrStatus, ~(cpr16(IntrStatus)));
977 	cpw16_f(IntrMask, 0);
978 	cpw8(Cmd, 0);
979 	cpw16_f(CpCmd, 0);
980 	cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
981 
982 	cp->rx_tail = 0;
983 	cp->tx_head = cp->tx_tail = 0;
984 }
985 
cp_reset_hw(struct cp_private * cp)986 static void cp_reset_hw (struct cp_private *cp)
987 {
988 	unsigned work = 1000;
989 
990 	cpw8(Cmd, CmdReset);
991 
992 	while (work--) {
993 		if (!(cpr8(Cmd) & CmdReset))
994 			return;
995 
996 		schedule_timeout_uninterruptible(10);
997 	}
998 
999 	printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name);
1000 }
1001 
cp_start_hw(struct cp_private * cp)1002 static inline void cp_start_hw (struct cp_private *cp)
1003 {
1004 	cpw16(CpCmd, cp->cpcmd);
1005 	cpw8(Cmd, RxOn | TxOn);
1006 }
1007 
cp_init_hw(struct cp_private * cp)1008 static void cp_init_hw (struct cp_private *cp)
1009 {
1010 	struct net_device *dev = cp->dev;
1011 	dma_addr_t ring_dma;
1012 
1013 	cp_reset_hw(cp);
1014 
1015 	cpw8_f (Cfg9346, Cfg9346_Unlock);
1016 
1017 	/* Restore our idea of the MAC address. */
1018 	cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1019 	cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1020 
1021 	cp_start_hw(cp);
1022 	cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
1023 
1024 	__cp_set_rx_mode(dev);
1025 	cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1026 
1027 	cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1028 	/* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1029 	cpw8(Config3, PARMEnable);
1030 	cp->wol_enabled = 0;
1031 
1032 	cpw8(Config5, cpr8(Config5) & PMEStatus);
1033 
1034 	cpw32_f(HiTxRingAddr, 0);
1035 	cpw32_f(HiTxRingAddr + 4, 0);
1036 
1037 	ring_dma = cp->ring_dma;
1038 	cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1039 	cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1040 
1041 	ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1042 	cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1043 	cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1044 
1045 	cpw16(MultiIntr, 0);
1046 
1047 	cpw16_f(IntrMask, cp_intr_mask);
1048 
1049 	cpw8_f(Cfg9346, Cfg9346_Lock);
1050 }
1051 
cp_refill_rx(struct cp_private * cp)1052 static int cp_refill_rx(struct cp_private *cp)
1053 {
1054 	struct net_device *dev = cp->dev;
1055 	unsigned i;
1056 
1057 	for (i = 0; i < CP_RX_RING_SIZE; i++) {
1058 		struct sk_buff *skb;
1059 		dma_addr_t mapping;
1060 
1061 		skb = netdev_alloc_skb(dev, cp->rx_buf_sz + NET_IP_ALIGN);
1062 		if (!skb)
1063 			goto err_out;
1064 
1065 		skb_reserve(skb, NET_IP_ALIGN);
1066 
1067 		mapping = dma_map_single(&cp->pdev->dev, skb->data,
1068 					 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1069 		cp->rx_skb[i] = skb;
1070 
1071 		cp->rx_ring[i].opts2 = 0;
1072 		cp->rx_ring[i].addr = cpu_to_le64(mapping);
1073 		if (i == (CP_RX_RING_SIZE - 1))
1074 			cp->rx_ring[i].opts1 =
1075 				cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1076 		else
1077 			cp->rx_ring[i].opts1 =
1078 				cpu_to_le32(DescOwn | cp->rx_buf_sz);
1079 	}
1080 
1081 	return 0;
1082 
1083 err_out:
1084 	cp_clean_rings(cp);
1085 	return -ENOMEM;
1086 }
1087 
cp_init_rings_index(struct cp_private * cp)1088 static void cp_init_rings_index (struct cp_private *cp)
1089 {
1090 	cp->rx_tail = 0;
1091 	cp->tx_head = cp->tx_tail = 0;
1092 }
1093 
cp_init_rings(struct cp_private * cp)1094 static int cp_init_rings (struct cp_private *cp)
1095 {
1096 	memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1097 	cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1098 
1099 	cp_init_rings_index(cp);
1100 
1101 	return cp_refill_rx (cp);
1102 }
1103 
cp_alloc_rings(struct cp_private * cp)1104 static int cp_alloc_rings (struct cp_private *cp)
1105 {
1106 	void *mem;
1107 
1108 	mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES,
1109 				 &cp->ring_dma, GFP_KERNEL);
1110 	if (!mem)
1111 		return -ENOMEM;
1112 
1113 	cp->rx_ring = mem;
1114 	cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1115 
1116 	return cp_init_rings(cp);
1117 }
1118 
cp_clean_rings(struct cp_private * cp)1119 static void cp_clean_rings (struct cp_private *cp)
1120 {
1121 	struct cp_desc *desc;
1122 	unsigned i;
1123 
1124 	for (i = 0; i < CP_RX_RING_SIZE; i++) {
1125 		if (cp->rx_skb[i]) {
1126 			desc = cp->rx_ring + i;
1127 			dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1128 					 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1129 			dev_kfree_skb(cp->rx_skb[i]);
1130 		}
1131 	}
1132 
1133 	for (i = 0; i < CP_TX_RING_SIZE; i++) {
1134 		if (cp->tx_skb[i]) {
1135 			struct sk_buff *skb = cp->tx_skb[i];
1136 
1137 			desc = cp->tx_ring + i;
1138 			dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1139 					 le32_to_cpu(desc->opts1) & 0xffff,
1140 					 PCI_DMA_TODEVICE);
1141 			if (le32_to_cpu(desc->opts1) & LastFrag)
1142 				dev_kfree_skb(skb);
1143 			cp->dev->stats.tx_dropped++;
1144 		}
1145 	}
1146 
1147 	memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1148 	memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1149 
1150 	memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1151 	memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1152 }
1153 
cp_free_rings(struct cp_private * cp)1154 static void cp_free_rings (struct cp_private *cp)
1155 {
1156 	cp_clean_rings(cp);
1157 	dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1158 			  cp->ring_dma);
1159 	cp->rx_ring = NULL;
1160 	cp->tx_ring = NULL;
1161 }
1162 
cp_open(struct net_device * dev)1163 static int cp_open (struct net_device *dev)
1164 {
1165 	struct cp_private *cp = netdev_priv(dev);
1166 	int rc;
1167 
1168 	if (netif_msg_ifup(cp))
1169 		printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1170 
1171 	rc = cp_alloc_rings(cp);
1172 	if (rc)
1173 		return rc;
1174 
1175 	napi_enable(&cp->napi);
1176 
1177 	cp_init_hw(cp);
1178 
1179 	rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1180 	if (rc)
1181 		goto err_out_hw;
1182 
1183 	netif_carrier_off(dev);
1184 	mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1185 	netif_start_queue(dev);
1186 
1187 	return 0;
1188 
1189 err_out_hw:
1190 	napi_disable(&cp->napi);
1191 	cp_stop_hw(cp);
1192 	cp_free_rings(cp);
1193 	return rc;
1194 }
1195 
cp_close(struct net_device * dev)1196 static int cp_close (struct net_device *dev)
1197 {
1198 	struct cp_private *cp = netdev_priv(dev);
1199 	unsigned long flags;
1200 
1201 	napi_disable(&cp->napi);
1202 
1203 	if (netif_msg_ifdown(cp))
1204 		printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1205 
1206 	spin_lock_irqsave(&cp->lock, flags);
1207 
1208 	netif_stop_queue(dev);
1209 	netif_carrier_off(dev);
1210 
1211 	cp_stop_hw(cp);
1212 
1213 	spin_unlock_irqrestore(&cp->lock, flags);
1214 
1215 	free_irq(dev->irq, dev);
1216 
1217 	cp_free_rings(cp);
1218 	return 0;
1219 }
1220 
cp_tx_timeout(struct net_device * dev)1221 static void cp_tx_timeout(struct net_device *dev)
1222 {
1223 	struct cp_private *cp = netdev_priv(dev);
1224 	unsigned long flags;
1225 	int rc;
1226 
1227 	printk(KERN_WARNING "%s: Transmit timeout, status %2x %4x %4x %4x\n",
1228 	       dev->name, cpr8(Cmd), cpr16(CpCmd),
1229 	       cpr16(IntrStatus), cpr16(IntrMask));
1230 
1231 	spin_lock_irqsave(&cp->lock, flags);
1232 
1233 	cp_stop_hw(cp);
1234 	cp_clean_rings(cp);
1235 	rc = cp_init_rings(cp);
1236 	cp_start_hw(cp);
1237 
1238 	netif_wake_queue(dev);
1239 
1240 	spin_unlock_irqrestore(&cp->lock, flags);
1241 
1242 	return;
1243 }
1244 
1245 #ifdef BROKEN
cp_change_mtu(struct net_device * dev,int new_mtu)1246 static int cp_change_mtu(struct net_device *dev, int new_mtu)
1247 {
1248 	struct cp_private *cp = netdev_priv(dev);
1249 	int rc;
1250 	unsigned long flags;
1251 
1252 	/* check for invalid MTU, according to hardware limits */
1253 	if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1254 		return -EINVAL;
1255 
1256 	/* if network interface not up, no need for complexity */
1257 	if (!netif_running(dev)) {
1258 		dev->mtu = new_mtu;
1259 		cp_set_rxbufsize(cp);	/* set new rx buf size */
1260 		return 0;
1261 	}
1262 
1263 	spin_lock_irqsave(&cp->lock, flags);
1264 
1265 	cp_stop_hw(cp);			/* stop h/w and free rings */
1266 	cp_clean_rings(cp);
1267 
1268 	dev->mtu = new_mtu;
1269 	cp_set_rxbufsize(cp);		/* set new rx buf size */
1270 
1271 	rc = cp_init_rings(cp);		/* realloc and restart h/w */
1272 	cp_start_hw(cp);
1273 
1274 	spin_unlock_irqrestore(&cp->lock, flags);
1275 
1276 	return rc;
1277 }
1278 #endif /* BROKEN */
1279 
1280 static const char mii_2_8139_map[8] = {
1281 	BasicModeCtrl,
1282 	BasicModeStatus,
1283 	0,
1284 	0,
1285 	NWayAdvert,
1286 	NWayLPAR,
1287 	NWayExpansion,
1288 	0
1289 };
1290 
mdio_read(struct net_device * dev,int phy_id,int location)1291 static int mdio_read(struct net_device *dev, int phy_id, int location)
1292 {
1293 	struct cp_private *cp = netdev_priv(dev);
1294 
1295 	return location < 8 && mii_2_8139_map[location] ?
1296 	       readw(cp->regs + mii_2_8139_map[location]) : 0;
1297 }
1298 
1299 
mdio_write(struct net_device * dev,int phy_id,int location,int value)1300 static void mdio_write(struct net_device *dev, int phy_id, int location,
1301 		       int value)
1302 {
1303 	struct cp_private *cp = netdev_priv(dev);
1304 
1305 	if (location == 0) {
1306 		cpw8(Cfg9346, Cfg9346_Unlock);
1307 		cpw16(BasicModeCtrl, value);
1308 		cpw8(Cfg9346, Cfg9346_Lock);
1309 	} else if (location < 8 && mii_2_8139_map[location])
1310 		cpw16(mii_2_8139_map[location], value);
1311 }
1312 
1313 /* Set the ethtool Wake-on-LAN settings */
netdev_set_wol(struct cp_private * cp,const struct ethtool_wolinfo * wol)1314 static int netdev_set_wol (struct cp_private *cp,
1315 			   const struct ethtool_wolinfo *wol)
1316 {
1317 	u8 options;
1318 
1319 	options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1320 	/* If WOL is being disabled, no need for complexity */
1321 	if (wol->wolopts) {
1322 		if (wol->wolopts & WAKE_PHY)	options |= LinkUp;
1323 		if (wol->wolopts & WAKE_MAGIC)	options |= MagicPacket;
1324 	}
1325 
1326 	cpw8 (Cfg9346, Cfg9346_Unlock);
1327 	cpw8 (Config3, options);
1328 	cpw8 (Cfg9346, Cfg9346_Lock);
1329 
1330 	options = 0; /* Paranoia setting */
1331 	options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1332 	/* If WOL is being disabled, no need for complexity */
1333 	if (wol->wolopts) {
1334 		if (wol->wolopts & WAKE_UCAST)  options |= UWF;
1335 		if (wol->wolopts & WAKE_BCAST)	options |= BWF;
1336 		if (wol->wolopts & WAKE_MCAST)	options |= MWF;
1337 	}
1338 
1339 	cpw8 (Config5, options);
1340 
1341 	cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1342 
1343 	return 0;
1344 }
1345 
1346 /* Get the ethtool Wake-on-LAN settings */
netdev_get_wol(struct cp_private * cp,struct ethtool_wolinfo * wol)1347 static void netdev_get_wol (struct cp_private *cp,
1348 	             struct ethtool_wolinfo *wol)
1349 {
1350 	u8 options;
1351 
1352 	wol->wolopts   = 0; /* Start from scratch */
1353 	wol->supported = WAKE_PHY   | WAKE_BCAST | WAKE_MAGIC |
1354 		         WAKE_MCAST | WAKE_UCAST;
1355 	/* We don't need to go on if WOL is disabled */
1356 	if (!cp->wol_enabled) return;
1357 
1358 	options        = cpr8 (Config3);
1359 	if (options & LinkUp)        wol->wolopts |= WAKE_PHY;
1360 	if (options & MagicPacket)   wol->wolopts |= WAKE_MAGIC;
1361 
1362 	options        = 0; /* Paranoia setting */
1363 	options        = cpr8 (Config5);
1364 	if (options & UWF)           wol->wolopts |= WAKE_UCAST;
1365 	if (options & BWF)           wol->wolopts |= WAKE_BCAST;
1366 	if (options & MWF)           wol->wolopts |= WAKE_MCAST;
1367 }
1368 
cp_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1369 static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1370 {
1371 	struct cp_private *cp = netdev_priv(dev);
1372 
1373 	strcpy (info->driver, DRV_NAME);
1374 	strcpy (info->version, DRV_VERSION);
1375 	strcpy (info->bus_info, pci_name(cp->pdev));
1376 }
1377 
cp_get_regs_len(struct net_device * dev)1378 static int cp_get_regs_len(struct net_device *dev)
1379 {
1380 	return CP_REGS_SIZE;
1381 }
1382 
cp_get_sset_count(struct net_device * dev,int sset)1383 static int cp_get_sset_count (struct net_device *dev, int sset)
1384 {
1385 	switch (sset) {
1386 	case ETH_SS_STATS:
1387 		return CP_NUM_STATS;
1388 	default:
1389 		return -EOPNOTSUPP;
1390 	}
1391 }
1392 
cp_get_settings(struct net_device * dev,struct ethtool_cmd * cmd)1393 static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1394 {
1395 	struct cp_private *cp = netdev_priv(dev);
1396 	int rc;
1397 	unsigned long flags;
1398 
1399 	spin_lock_irqsave(&cp->lock, flags);
1400 	rc = mii_ethtool_gset(&cp->mii_if, cmd);
1401 	spin_unlock_irqrestore(&cp->lock, flags);
1402 
1403 	return rc;
1404 }
1405 
cp_set_settings(struct net_device * dev,struct ethtool_cmd * cmd)1406 static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1407 {
1408 	struct cp_private *cp = netdev_priv(dev);
1409 	int rc;
1410 	unsigned long flags;
1411 
1412 	spin_lock_irqsave(&cp->lock, flags);
1413 	rc = mii_ethtool_sset(&cp->mii_if, cmd);
1414 	spin_unlock_irqrestore(&cp->lock, flags);
1415 
1416 	return rc;
1417 }
1418 
cp_nway_reset(struct net_device * dev)1419 static int cp_nway_reset(struct net_device *dev)
1420 {
1421 	struct cp_private *cp = netdev_priv(dev);
1422 	return mii_nway_restart(&cp->mii_if);
1423 }
1424 
cp_get_msglevel(struct net_device * dev)1425 static u32 cp_get_msglevel(struct net_device *dev)
1426 {
1427 	struct cp_private *cp = netdev_priv(dev);
1428 	return cp->msg_enable;
1429 }
1430 
cp_set_msglevel(struct net_device * dev,u32 value)1431 static void cp_set_msglevel(struct net_device *dev, u32 value)
1432 {
1433 	struct cp_private *cp = netdev_priv(dev);
1434 	cp->msg_enable = value;
1435 }
1436 
cp_get_rx_csum(struct net_device * dev)1437 static u32 cp_get_rx_csum(struct net_device *dev)
1438 {
1439 	struct cp_private *cp = netdev_priv(dev);
1440 	return (cpr16(CpCmd) & RxChkSum) ? 1 : 0;
1441 }
1442 
cp_set_rx_csum(struct net_device * dev,u32 data)1443 static int cp_set_rx_csum(struct net_device *dev, u32 data)
1444 {
1445 	struct cp_private *cp = netdev_priv(dev);
1446 	u16 cmd = cp->cpcmd, newcmd;
1447 
1448 	newcmd = cmd;
1449 
1450 	if (data)
1451 		newcmd |= RxChkSum;
1452 	else
1453 		newcmd &= ~RxChkSum;
1454 
1455 	if (newcmd != cmd) {
1456 		unsigned long flags;
1457 
1458 		spin_lock_irqsave(&cp->lock, flags);
1459 		cp->cpcmd = newcmd;
1460 		cpw16_f(CpCmd, newcmd);
1461 		spin_unlock_irqrestore(&cp->lock, flags);
1462 	}
1463 
1464 	return 0;
1465 }
1466 
cp_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * p)1467 static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1468 		        void *p)
1469 {
1470 	struct cp_private *cp = netdev_priv(dev);
1471 	unsigned long flags;
1472 
1473 	if (regs->len < CP_REGS_SIZE)
1474 		return /* -EINVAL */;
1475 
1476 	regs->version = CP_REGS_VER;
1477 
1478 	spin_lock_irqsave(&cp->lock, flags);
1479 	memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1480 	spin_unlock_irqrestore(&cp->lock, flags);
1481 }
1482 
cp_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)1483 static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1484 {
1485 	struct cp_private *cp = netdev_priv(dev);
1486 	unsigned long flags;
1487 
1488 	spin_lock_irqsave (&cp->lock, flags);
1489 	netdev_get_wol (cp, wol);
1490 	spin_unlock_irqrestore (&cp->lock, flags);
1491 }
1492 
cp_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)1493 static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1494 {
1495 	struct cp_private *cp = netdev_priv(dev);
1496 	unsigned long flags;
1497 	int rc;
1498 
1499 	spin_lock_irqsave (&cp->lock, flags);
1500 	rc = netdev_set_wol (cp, wol);
1501 	spin_unlock_irqrestore (&cp->lock, flags);
1502 
1503 	return rc;
1504 }
1505 
cp_get_strings(struct net_device * dev,u32 stringset,u8 * buf)1506 static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1507 {
1508 	switch (stringset) {
1509 	case ETH_SS_STATS:
1510 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1511 		break;
1512 	default:
1513 		BUG();
1514 		break;
1515 	}
1516 }
1517 
cp_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)1518 static void cp_get_ethtool_stats (struct net_device *dev,
1519 				  struct ethtool_stats *estats, u64 *tmp_stats)
1520 {
1521 	struct cp_private *cp = netdev_priv(dev);
1522 	struct cp_dma_stats *nic_stats;
1523 	dma_addr_t dma;
1524 	int i;
1525 
1526 	nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1527 				       &dma, GFP_KERNEL);
1528 	if (!nic_stats)
1529 		return;
1530 
1531 	/* begin NIC statistics dump */
1532 	cpw32(StatsAddr + 4, (u64)dma >> 32);
1533 	cpw32(StatsAddr, ((u64)dma & DMA_32BIT_MASK) | DumpStats);
1534 	cpr32(StatsAddr);
1535 
1536 	for (i = 0; i < 1000; i++) {
1537 		if ((cpr32(StatsAddr) & DumpStats) == 0)
1538 			break;
1539 		udelay(10);
1540 	}
1541 	cpw32(StatsAddr, 0);
1542 	cpw32(StatsAddr + 4, 0);
1543 	cpr32(StatsAddr);
1544 
1545 	i = 0;
1546 	tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1547 	tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1548 	tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1549 	tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1550 	tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1551 	tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1552 	tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1553 	tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1554 	tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1555 	tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1556 	tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1557 	tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1558 	tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1559 	tmp_stats[i++] = cp->cp_stats.rx_frags;
1560 	BUG_ON(i != CP_NUM_STATS);
1561 
1562 	dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1563 }
1564 
1565 static const struct ethtool_ops cp_ethtool_ops = {
1566 	.get_drvinfo		= cp_get_drvinfo,
1567 	.get_regs_len		= cp_get_regs_len,
1568 	.get_sset_count		= cp_get_sset_count,
1569 	.get_settings		= cp_get_settings,
1570 	.set_settings		= cp_set_settings,
1571 	.nway_reset		= cp_nway_reset,
1572 	.get_link		= ethtool_op_get_link,
1573 	.get_msglevel		= cp_get_msglevel,
1574 	.set_msglevel		= cp_set_msglevel,
1575 	.get_rx_csum		= cp_get_rx_csum,
1576 	.set_rx_csum		= cp_set_rx_csum,
1577 	.set_tx_csum		= ethtool_op_set_tx_csum, /* local! */
1578 	.set_sg			= ethtool_op_set_sg,
1579 	.set_tso		= ethtool_op_set_tso,
1580 	.get_regs		= cp_get_regs,
1581 	.get_wol		= cp_get_wol,
1582 	.set_wol		= cp_set_wol,
1583 	.get_strings		= cp_get_strings,
1584 	.get_ethtool_stats	= cp_get_ethtool_stats,
1585 	.get_eeprom_len		= cp_get_eeprom_len,
1586 	.get_eeprom		= cp_get_eeprom,
1587 	.set_eeprom		= cp_set_eeprom,
1588 };
1589 
cp_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1590 static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1591 {
1592 	struct cp_private *cp = netdev_priv(dev);
1593 	int rc;
1594 	unsigned long flags;
1595 
1596 	if (!netif_running(dev))
1597 		return -EINVAL;
1598 
1599 	spin_lock_irqsave(&cp->lock, flags);
1600 	rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1601 	spin_unlock_irqrestore(&cp->lock, flags);
1602 	return rc;
1603 }
1604 
1605 /* Serial EEPROM section. */
1606 
1607 /*  EEPROM_Ctrl bits. */
1608 #define EE_SHIFT_CLK	0x04	/* EEPROM shift clock. */
1609 #define EE_CS			0x08	/* EEPROM chip select. */
1610 #define EE_DATA_WRITE	0x02	/* EEPROM chip data in. */
1611 #define EE_WRITE_0		0x00
1612 #define EE_WRITE_1		0x02
1613 #define EE_DATA_READ	0x01	/* EEPROM chip data out. */
1614 #define EE_ENB			(0x80 | EE_CS)
1615 
1616 /* Delay between EEPROM clock transitions.
1617    No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1618  */
1619 
1620 #define eeprom_delay()	readl(ee_addr)
1621 
1622 /* The EEPROM commands include the alway-set leading bit. */
1623 #define EE_EXTEND_CMD	(4)
1624 #define EE_WRITE_CMD	(5)
1625 #define EE_READ_CMD		(6)
1626 #define EE_ERASE_CMD	(7)
1627 
1628 #define EE_EWDS_ADDR	(0)
1629 #define EE_WRAL_ADDR	(1)
1630 #define EE_ERAL_ADDR	(2)
1631 #define EE_EWEN_ADDR	(3)
1632 
1633 #define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1634 
eeprom_cmd_start(void __iomem * ee_addr)1635 static void eeprom_cmd_start(void __iomem *ee_addr)
1636 {
1637 	writeb (EE_ENB & ~EE_CS, ee_addr);
1638 	writeb (EE_ENB, ee_addr);
1639 	eeprom_delay ();
1640 }
1641 
eeprom_cmd(void __iomem * ee_addr,int cmd,int cmd_len)1642 static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1643 {
1644 	int i;
1645 
1646 	/* Shift the command bits out. */
1647 	for (i = cmd_len - 1; i >= 0; i--) {
1648 		int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1649 		writeb (EE_ENB | dataval, ee_addr);
1650 		eeprom_delay ();
1651 		writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1652 		eeprom_delay ();
1653 	}
1654 	writeb (EE_ENB, ee_addr);
1655 	eeprom_delay ();
1656 }
1657 
eeprom_cmd_end(void __iomem * ee_addr)1658 static void eeprom_cmd_end(void __iomem *ee_addr)
1659 {
1660 	writeb (~EE_CS, ee_addr);
1661 	eeprom_delay ();
1662 }
1663 
eeprom_extend_cmd(void __iomem * ee_addr,int extend_cmd,int addr_len)1664 static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1665 			      int addr_len)
1666 {
1667 	int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1668 
1669 	eeprom_cmd_start(ee_addr);
1670 	eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1671 	eeprom_cmd_end(ee_addr);
1672 }
1673 
read_eeprom(void __iomem * ioaddr,int location,int addr_len)1674 static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1675 {
1676 	int i;
1677 	u16 retval = 0;
1678 	void __iomem *ee_addr = ioaddr + Cfg9346;
1679 	int read_cmd = location | (EE_READ_CMD << addr_len);
1680 
1681 	eeprom_cmd_start(ee_addr);
1682 	eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1683 
1684 	for (i = 16; i > 0; i--) {
1685 		writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1686 		eeprom_delay ();
1687 		retval =
1688 		    (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1689 				     0);
1690 		writeb (EE_ENB, ee_addr);
1691 		eeprom_delay ();
1692 	}
1693 
1694 	eeprom_cmd_end(ee_addr);
1695 
1696 	return retval;
1697 }
1698 
write_eeprom(void __iomem * ioaddr,int location,u16 val,int addr_len)1699 static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1700 			 int addr_len)
1701 {
1702 	int i;
1703 	void __iomem *ee_addr = ioaddr + Cfg9346;
1704 	int write_cmd = location | (EE_WRITE_CMD << addr_len);
1705 
1706 	eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1707 
1708 	eeprom_cmd_start(ee_addr);
1709 	eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1710 	eeprom_cmd(ee_addr, val, 16);
1711 	eeprom_cmd_end(ee_addr);
1712 
1713 	eeprom_cmd_start(ee_addr);
1714 	for (i = 0; i < 20000; i++)
1715 		if (readb(ee_addr) & EE_DATA_READ)
1716 			break;
1717 	eeprom_cmd_end(ee_addr);
1718 
1719 	eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1720 }
1721 
cp_get_eeprom_len(struct net_device * dev)1722 static int cp_get_eeprom_len(struct net_device *dev)
1723 {
1724 	struct cp_private *cp = netdev_priv(dev);
1725 	int size;
1726 
1727 	spin_lock_irq(&cp->lock);
1728 	size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1729 	spin_unlock_irq(&cp->lock);
1730 
1731 	return size;
1732 }
1733 
cp_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)1734 static int cp_get_eeprom(struct net_device *dev,
1735 			 struct ethtool_eeprom *eeprom, u8 *data)
1736 {
1737 	struct cp_private *cp = netdev_priv(dev);
1738 	unsigned int addr_len;
1739 	u16 val;
1740 	u32 offset = eeprom->offset >> 1;
1741 	u32 len = eeprom->len;
1742 	u32 i = 0;
1743 
1744 	eeprom->magic = CP_EEPROM_MAGIC;
1745 
1746 	spin_lock_irq(&cp->lock);
1747 
1748 	addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1749 
1750 	if (eeprom->offset & 1) {
1751 		val = read_eeprom(cp->regs, offset, addr_len);
1752 		data[i++] = (u8)(val >> 8);
1753 		offset++;
1754 	}
1755 
1756 	while (i < len - 1) {
1757 		val = read_eeprom(cp->regs, offset, addr_len);
1758 		data[i++] = (u8)val;
1759 		data[i++] = (u8)(val >> 8);
1760 		offset++;
1761 	}
1762 
1763 	if (i < len) {
1764 		val = read_eeprom(cp->regs, offset, addr_len);
1765 		data[i] = (u8)val;
1766 	}
1767 
1768 	spin_unlock_irq(&cp->lock);
1769 	return 0;
1770 }
1771 
cp_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)1772 static int cp_set_eeprom(struct net_device *dev,
1773 			 struct ethtool_eeprom *eeprom, u8 *data)
1774 {
1775 	struct cp_private *cp = netdev_priv(dev);
1776 	unsigned int addr_len;
1777 	u16 val;
1778 	u32 offset = eeprom->offset >> 1;
1779 	u32 len = eeprom->len;
1780 	u32 i = 0;
1781 
1782 	if (eeprom->magic != CP_EEPROM_MAGIC)
1783 		return -EINVAL;
1784 
1785 	spin_lock_irq(&cp->lock);
1786 
1787 	addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1788 
1789 	if (eeprom->offset & 1) {
1790 		val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1791 		val |= (u16)data[i++] << 8;
1792 		write_eeprom(cp->regs, offset, val, addr_len);
1793 		offset++;
1794 	}
1795 
1796 	while (i < len - 1) {
1797 		val = (u16)data[i++];
1798 		val |= (u16)data[i++] << 8;
1799 		write_eeprom(cp->regs, offset, val, addr_len);
1800 		offset++;
1801 	}
1802 
1803 	if (i < len) {
1804 		val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1805 		val |= (u16)data[i];
1806 		write_eeprom(cp->regs, offset, val, addr_len);
1807 	}
1808 
1809 	spin_unlock_irq(&cp->lock);
1810 	return 0;
1811 }
1812 
1813 /* Put the board into D3cold state and wait for WakeUp signal */
cp_set_d3_state(struct cp_private * cp)1814 static void cp_set_d3_state (struct cp_private *cp)
1815 {
1816 	pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1817 	pci_set_power_state (cp->pdev, PCI_D3hot);
1818 }
1819 
1820 static const struct net_device_ops cp_netdev_ops = {
1821 	.ndo_open		= cp_open,
1822 	.ndo_stop		= cp_close,
1823 	.ndo_validate_addr	= eth_validate_addr,
1824 	.ndo_set_mac_address 	= eth_mac_addr,
1825 	.ndo_set_multicast_list	= cp_set_rx_mode,
1826 	.ndo_get_stats		= cp_get_stats,
1827 	.ndo_do_ioctl		= cp_ioctl,
1828 	.ndo_start_xmit		= cp_start_xmit,
1829 	.ndo_tx_timeout		= cp_tx_timeout,
1830 #if CP_VLAN_TAG_USED
1831 	.ndo_vlan_rx_register	= cp_vlan_rx_register,
1832 #endif
1833 #ifdef BROKEN
1834 	.ndo_change_mtu		= cp_change_mtu,
1835 #endif
1836 
1837 #ifdef CONFIG_NET_POLL_CONTROLLER
1838 	.ndo_poll_controller	= cp_poll_controller,
1839 #endif
1840 };
1841 
cp_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1842 static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1843 {
1844 	struct net_device *dev;
1845 	struct cp_private *cp;
1846 	int rc;
1847 	void __iomem *regs;
1848 	resource_size_t pciaddr;
1849 	unsigned int addr_len, i, pci_using_dac;
1850 
1851 #ifndef MODULE
1852 	static int version_printed;
1853 	if (version_printed++ == 0)
1854 		printk("%s", version);
1855 #endif
1856 
1857 	if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1858 	    pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1859 		dev_info(&pdev->dev,
1860 			   "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1861 		           pdev->vendor, pdev->device, pdev->revision);
1862 		return -ENODEV;
1863 	}
1864 
1865 	dev = alloc_etherdev(sizeof(struct cp_private));
1866 	if (!dev)
1867 		return -ENOMEM;
1868 	SET_NETDEV_DEV(dev, &pdev->dev);
1869 
1870 	cp = netdev_priv(dev);
1871 	cp->pdev = pdev;
1872 	cp->dev = dev;
1873 	cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1874 	spin_lock_init (&cp->lock);
1875 	cp->mii_if.dev = dev;
1876 	cp->mii_if.mdio_read = mdio_read;
1877 	cp->mii_if.mdio_write = mdio_write;
1878 	cp->mii_if.phy_id = CP_INTERNAL_PHY;
1879 	cp->mii_if.phy_id_mask = 0x1f;
1880 	cp->mii_if.reg_num_mask = 0x1f;
1881 	cp_set_rxbufsize(cp);
1882 
1883 	rc = pci_enable_device(pdev);
1884 	if (rc)
1885 		goto err_out_free;
1886 
1887 	rc = pci_set_mwi(pdev);
1888 	if (rc)
1889 		goto err_out_disable;
1890 
1891 	rc = pci_request_regions(pdev, DRV_NAME);
1892 	if (rc)
1893 		goto err_out_mwi;
1894 
1895 	pciaddr = pci_resource_start(pdev, 1);
1896 	if (!pciaddr) {
1897 		rc = -EIO;
1898 		dev_err(&pdev->dev, "no MMIO resource\n");
1899 		goto err_out_res;
1900 	}
1901 	if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1902 		rc = -EIO;
1903 		dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1904 		       (unsigned long long)pci_resource_len(pdev, 1));
1905 		goto err_out_res;
1906 	}
1907 
1908 	/* Configure DMA attributes. */
1909 	if ((sizeof(dma_addr_t) > 4) &&
1910 	    !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) &&
1911 	    !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1912 		pci_using_dac = 1;
1913 	} else {
1914 		pci_using_dac = 0;
1915 
1916 		rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1917 		if (rc) {
1918 			dev_err(&pdev->dev,
1919 				   "No usable DMA configuration, aborting.\n");
1920 			goto err_out_res;
1921 		}
1922 		rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1923 		if (rc) {
1924 			dev_err(&pdev->dev,
1925 				   "No usable consistent DMA configuration, "
1926 			           "aborting.\n");
1927 			goto err_out_res;
1928 		}
1929 	}
1930 
1931 	cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1932 		    PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1933 
1934 	regs = ioremap(pciaddr, CP_REGS_SIZE);
1935 	if (!regs) {
1936 		rc = -EIO;
1937 		dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1938 		       (unsigned long long)pci_resource_len(pdev, 1),
1939 		       (unsigned long long)pciaddr);
1940 		goto err_out_res;
1941 	}
1942 	dev->base_addr = (unsigned long) regs;
1943 	cp->regs = regs;
1944 
1945 	cp_stop_hw(cp);
1946 
1947 	/* read MAC address from EEPROM */
1948 	addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1949 	for (i = 0; i < 3; i++)
1950 		((__le16 *) (dev->dev_addr))[i] =
1951 		    cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1952 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1953 
1954 	dev->netdev_ops = &cp_netdev_ops;
1955 	netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1956 	dev->ethtool_ops = &cp_ethtool_ops;
1957 	dev->watchdog_timeo = TX_TIMEOUT;
1958 
1959 #if CP_VLAN_TAG_USED
1960 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1961 #endif
1962 
1963 	if (pci_using_dac)
1964 		dev->features |= NETIF_F_HIGHDMA;
1965 
1966 #if 0 /* disabled by default until verified */
1967 	dev->features |= NETIF_F_TSO;
1968 #endif
1969 
1970 	dev->irq = pdev->irq;
1971 
1972 	rc = register_netdev(dev);
1973 	if (rc)
1974 		goto err_out_iomap;
1975 
1976 	printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, "
1977 		"%pM, IRQ %d\n",
1978 		dev->name,
1979 		dev->base_addr,
1980 		dev->dev_addr,
1981 		dev->irq);
1982 
1983 	pci_set_drvdata(pdev, dev);
1984 
1985 	/* enable busmastering and memory-write-invalidate */
1986 	pci_set_master(pdev);
1987 
1988 	if (cp->wol_enabled)
1989 		cp_set_d3_state (cp);
1990 
1991 	return 0;
1992 
1993 err_out_iomap:
1994 	iounmap(regs);
1995 err_out_res:
1996 	pci_release_regions(pdev);
1997 err_out_mwi:
1998 	pci_clear_mwi(pdev);
1999 err_out_disable:
2000 	pci_disable_device(pdev);
2001 err_out_free:
2002 	free_netdev(dev);
2003 	return rc;
2004 }
2005 
cp_remove_one(struct pci_dev * pdev)2006 static void cp_remove_one (struct pci_dev *pdev)
2007 {
2008 	struct net_device *dev = pci_get_drvdata(pdev);
2009 	struct cp_private *cp = netdev_priv(dev);
2010 
2011 	unregister_netdev(dev);
2012 	iounmap(cp->regs);
2013 	if (cp->wol_enabled)
2014 		pci_set_power_state (pdev, PCI_D0);
2015 	pci_release_regions(pdev);
2016 	pci_clear_mwi(pdev);
2017 	pci_disable_device(pdev);
2018 	pci_set_drvdata(pdev, NULL);
2019 	free_netdev(dev);
2020 }
2021 
2022 #ifdef CONFIG_PM
cp_suspend(struct pci_dev * pdev,pm_message_t state)2023 static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
2024 {
2025 	struct net_device *dev = pci_get_drvdata(pdev);
2026 	struct cp_private *cp = netdev_priv(dev);
2027 	unsigned long flags;
2028 
2029 	if (!netif_running(dev))
2030 		return 0;
2031 
2032 	netif_device_detach (dev);
2033 	netif_stop_queue (dev);
2034 
2035 	spin_lock_irqsave (&cp->lock, flags);
2036 
2037 	/* Disable Rx and Tx */
2038 	cpw16 (IntrMask, 0);
2039 	cpw8  (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2040 
2041 	spin_unlock_irqrestore (&cp->lock, flags);
2042 
2043 	pci_save_state(pdev);
2044 	pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2045 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
2046 
2047 	return 0;
2048 }
2049 
cp_resume(struct pci_dev * pdev)2050 static int cp_resume (struct pci_dev *pdev)
2051 {
2052 	struct net_device *dev = pci_get_drvdata (pdev);
2053 	struct cp_private *cp = netdev_priv(dev);
2054 	unsigned long flags;
2055 
2056 	if (!netif_running(dev))
2057 		return 0;
2058 
2059 	netif_device_attach (dev);
2060 
2061 	pci_set_power_state(pdev, PCI_D0);
2062 	pci_restore_state(pdev);
2063 	pci_enable_wake(pdev, PCI_D0, 0);
2064 
2065 	/* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2066 	cp_init_rings_index (cp);
2067 	cp_init_hw (cp);
2068 	netif_start_queue (dev);
2069 
2070 	spin_lock_irqsave (&cp->lock, flags);
2071 
2072 	mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2073 
2074 	spin_unlock_irqrestore (&cp->lock, flags);
2075 
2076 	return 0;
2077 }
2078 #endif /* CONFIG_PM */
2079 
2080 static struct pci_driver cp_driver = {
2081 	.name         = DRV_NAME,
2082 	.id_table     = cp_pci_tbl,
2083 	.probe        =	cp_init_one,
2084 	.remove       = cp_remove_one,
2085 #ifdef CONFIG_PM
2086 	.resume       = cp_resume,
2087 	.suspend      = cp_suspend,
2088 #endif
2089 };
2090 
cp_init(void)2091 static int __init cp_init (void)
2092 {
2093 #ifdef MODULE
2094 	printk("%s", version);
2095 #endif
2096 	return pci_register_driver(&cp_driver);
2097 }
2098 
cp_exit(void)2099 static void __exit cp_exit (void)
2100 {
2101 	pci_unregister_driver (&cp_driver);
2102 }
2103 
2104 module_init(cp_init);
2105 module_exit(cp_exit);
2106