• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2 /*
3 	Written 2002-2004 by David Dillow <dave@thedillows.org>
4 	Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5 	Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
6 
7 	This software may be used and distributed according to the terms of
8 	the GNU General Public License (GPL), incorporated herein by reference.
9 	Drivers based on or derived from this code fall under the GPL and must
10 	retain the authorship, copyright and license notice.  This file is not
11 	a complete program and may only be used when the entire operating
12 	system is licensed under the GPL.
13 
14 	This software is available on a public web site. It may enable
15 	cryptographic capabilities of the 3Com hardware, and may be
16 	exported from the United States under License Exception "TSU"
17 	pursuant to 15 C.F.R. Section 740.13(e).
18 
19 	This work was funded by the National Library of Medicine under
20 	the Department of Energy project number 0274DD06D1 and NLM project
21 	number Y1-LM-2015-01.
22 
23 	This driver is designed for the 3Com 3CR990 Family of cards with the
24 	3XP Processor. It has been tested on x86 and sparc64.
25 
26 	KNOWN ISSUES:
27 	*) The current firmware always strips the VLAN tag off, even if
28 		we tell it not to. You should filter VLANs at the switch
29 		as a workaround (good practice in any event) until we can
30 		get this fixed.
31 	*) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32 		issue. Hopefully 3Com will fix it.
33 	*) Waiting for a command response takes 8ms due to non-preemptable
34 		polling. Only significant for getting stats and creating
35 		SAs, but an ugly wart never the less.
36 
37 	TODO:
38 	*) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
39 	*) Add more support for ethtool (especially for NIC stats)
40 	*) Allow disabling of RX checksum offloading
41 	*) Fix MAC changing to work while the interface is up
42 		(Need to put commands on the TX ring, which changes
43 		the locking)
44 	*) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
45 		http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
46 */
47 
48 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
49  * Setting to > 1518 effectively disables this feature.
50  */
51 static int rx_copybreak = 200;
52 
53 /* Should we use MMIO or Port IO?
54  * 0: Port IO
55  * 1: MMIO
56  * 2: Try MMIO, fallback to Port IO
57  */
58 static unsigned int use_mmio = 2;
59 
60 /* end user-configurable values */
61 
62 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63  */
64 static const int multicast_filter_limit = 32;
65 
66 /* Operational parameters that are set at compile time. */
67 
68 /* Keep the ring sizes a power of two for compile efficiency.
69  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
70  * Making the Tx ring too large decreases the effectiveness of channel
71  * bonding and packet priority.
72  * There are no ill effects from too-large receive rings.
73  *
74  * We don't currently use the Hi Tx ring so, don't make it very big.
75  *
76  * Beware that if we start using the Hi Tx ring, we will need to change
77  * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
78  */
79 #define TXHI_ENTRIES		2
80 #define TXLO_ENTRIES		128
81 #define RX_ENTRIES		32
82 #define COMMAND_ENTRIES		16
83 #define RESPONSE_ENTRIES	32
84 
85 #define COMMAND_RING_SIZE	(COMMAND_ENTRIES * sizeof(struct cmd_desc))
86 #define RESPONSE_RING_SIZE	(RESPONSE_ENTRIES * sizeof(struct resp_desc))
87 
88 /* The 3XP will preload and remove 64 entries from the free buffer
89  * list, and we need one entry to keep the ring from wrapping, so
90  * to keep this a power of two, we use 128 entries.
91  */
92 #define RXFREE_ENTRIES		128
93 #define RXENT_ENTRIES		(RXFREE_ENTRIES - 1)
94 
95 /* Operational parameters that usually are not changed. */
96 
97 /* Time in jiffies before concluding the transmitter is hung. */
98 #define TX_TIMEOUT  (2*HZ)
99 
100 #define PKT_BUF_SZ		1536
101 
102 #define DRV_MODULE_NAME		"typhoon"
103 #define DRV_MODULE_VERSION 	"1.5.8"
104 #define DRV_MODULE_RELDATE	"06/11/09"
105 #define PFX			DRV_MODULE_NAME ": "
106 #define ERR_PFX			KERN_ERR PFX
107 
108 #include <linux/module.h>
109 #include <linux/kernel.h>
110 #include <linux/string.h>
111 #include <linux/timer.h>
112 #include <linux/errno.h>
113 #include <linux/ioport.h>
114 #include <linux/slab.h>
115 #include <linux/interrupt.h>
116 #include <linux/pci.h>
117 #include <linux/netdevice.h>
118 #include <linux/etherdevice.h>
119 #include <linux/skbuff.h>
120 #include <linux/mm.h>
121 #include <linux/init.h>
122 #include <linux/delay.h>
123 #include <linux/ethtool.h>
124 #include <linux/if_vlan.h>
125 #include <linux/crc32.h>
126 #include <linux/bitops.h>
127 #include <asm/processor.h>
128 #include <asm/io.h>
129 #include <asm/uaccess.h>
130 #include <linux/in6.h>
131 #include <linux/dma-mapping.h>
132 
133 #include "typhoon.h"
134 #include "typhoon-firmware.h"
135 
136 static char version[] __devinitdata =
137     "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138 
139 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
140 MODULE_VERSION(DRV_MODULE_VERSION);
141 MODULE_LICENSE("GPL");
142 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
143 MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
144 			       "the buffer given back to the NIC. Default "
145 			       "is 200.");
146 MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
147 			   "Default is to try MMIO and fallback to PIO.");
148 module_param(rx_copybreak, int, 0);
149 module_param(use_mmio, int, 0);
150 
151 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
152 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
153 #undef NETIF_F_TSO
154 #endif
155 
156 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
157 #error TX ring too small!
158 #endif
159 
160 struct typhoon_card_info {
161 	char *name;
162 	int capabilities;
163 };
164 
165 #define TYPHOON_CRYPTO_NONE		0x00
166 #define TYPHOON_CRYPTO_DES		0x01
167 #define TYPHOON_CRYPTO_3DES		0x02
168 #define	TYPHOON_CRYPTO_VARIABLE		0x04
169 #define TYPHOON_FIBER			0x08
170 #define TYPHOON_WAKEUP_NEEDS_RESET	0x10
171 
172 enum typhoon_cards {
173 	TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
174 	TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
175 	TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
176 	TYPHOON_FXM,
177 };
178 
179 /* directly indexed by enum typhoon_cards, above */
180 static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
181 	{ "3Com Typhoon (3C990-TX)",
182 		TYPHOON_CRYPTO_NONE},
183 	{ "3Com Typhoon (3CR990-TX-95)",
184 		TYPHOON_CRYPTO_DES},
185 	{ "3Com Typhoon (3CR990-TX-97)",
186 	 	TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
187 	{ "3Com Typhoon (3C990SVR)",
188 		TYPHOON_CRYPTO_NONE},
189 	{ "3Com Typhoon (3CR990SVR95)",
190 		TYPHOON_CRYPTO_DES},
191 	{ "3Com Typhoon (3CR990SVR97)",
192 	 	TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
193 	{ "3Com Typhoon2 (3C990B-TX-M)",
194 		TYPHOON_CRYPTO_VARIABLE},
195 	{ "3Com Typhoon2 (3C990BSVR)",
196 		TYPHOON_CRYPTO_VARIABLE},
197 	{ "3Com Typhoon (3CR990-FX-95)",
198 		TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
199 	{ "3Com Typhoon (3CR990-FX-97)",
200 	 	TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
201 	{ "3Com Typhoon (3CR990-FX-95 Server)",
202 	 	TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
203 	{ "3Com Typhoon (3CR990-FX-97 Server)",
204 	 	TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
205 	{ "3Com Typhoon2 (3C990B-FX-97)",
206 		TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
207 };
208 
209 /* Notes on the new subsystem numbering scheme:
210  * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
211  * bit 4 indicates if this card has secured firmware (we don't support it)
212  * bit 8 indicates if this is a (0) copper or (1) fiber card
213  * bits 12-16 indicate card type: (0) client and (1) server
214  */
215 static struct pci_device_id typhoon_pci_tbl[] = {
216 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
217 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
218 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
219 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
220 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
221 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
222 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
223 	  PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
224 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
225 	  PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
226 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
227 	  PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
228 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
229 	  PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
230 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
231 	  PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
232 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
233 	  PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
234 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
235 	  PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
236 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
237 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
238 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
239 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
240 	{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
241 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
242 	{ 0, }
243 };
244 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
245 
246 /* Define the shared memory area
247  * Align everything the 3XP will normally be using.
248  * We'll need to move/align txHi if we start using that ring.
249  */
250 #define __3xp_aligned	____cacheline_aligned
251 struct typhoon_shared {
252 	struct typhoon_interface	iface;
253 	struct typhoon_indexes		indexes			__3xp_aligned;
254 	struct tx_desc			txLo[TXLO_ENTRIES] 	__3xp_aligned;
255 	struct rx_desc			rxLo[RX_ENTRIES]	__3xp_aligned;
256 	struct rx_desc			rxHi[RX_ENTRIES]	__3xp_aligned;
257 	struct cmd_desc			cmd[COMMAND_ENTRIES]	__3xp_aligned;
258 	struct resp_desc		resp[RESPONSE_ENTRIES]	__3xp_aligned;
259 	struct rx_free			rxBuff[RXFREE_ENTRIES]	__3xp_aligned;
260 	u32				zeroWord;
261 	struct tx_desc			txHi[TXHI_ENTRIES];
262 } __attribute__ ((packed));
263 
264 struct rxbuff_ent {
265 	struct sk_buff *skb;
266 	dma_addr_t	dma_addr;
267 };
268 
269 struct typhoon {
270 	/* Tx cache line section */
271 	struct transmit_ring 	txLoRing	____cacheline_aligned;
272 	struct pci_dev *	tx_pdev;
273 	void __iomem		*tx_ioaddr;
274 	u32			txlo_dma_addr;
275 
276 	/* Irq/Rx cache line section */
277 	void __iomem		*ioaddr		____cacheline_aligned;
278 	struct typhoon_indexes *indexes;
279 	u8			awaiting_resp;
280 	u8			duplex;
281 	u8			speed;
282 	u8			card_state;
283 	struct basic_ring	rxLoRing;
284 	struct pci_dev *	pdev;
285 	struct net_device *	dev;
286 	struct napi_struct	napi;
287 	spinlock_t		state_lock;
288 	struct vlan_group *	vlgrp;
289 	struct basic_ring	rxHiRing;
290 	struct basic_ring	rxBuffRing;
291 	struct rxbuff_ent	rxbuffers[RXENT_ENTRIES];
292 
293 	/* general section */
294 	spinlock_t		command_lock	____cacheline_aligned;
295 	struct basic_ring	cmdRing;
296 	struct basic_ring	respRing;
297 	struct net_device_stats	stats;
298 	struct net_device_stats	stats_saved;
299 	const char *		name;
300 	struct typhoon_shared *	shared;
301 	dma_addr_t		shared_dma;
302 	__le16			xcvr_select;
303 	__le16			wol_events;
304 	__le32			offload;
305 
306 	/* unused stuff (future use) */
307 	int			capabilities;
308 	struct transmit_ring 	txHiRing;
309 };
310 
311 enum completion_wait_values {
312 	NoWait = 0, WaitNoSleep, WaitSleep,
313 };
314 
315 /* These are the values for the typhoon.card_state variable.
316  * These determine where the statistics will come from in get_stats().
317  * The sleep image does not support the statistics we need.
318  */
319 enum state_values {
320 	Sleeping = 0, Running,
321 };
322 
323 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
324  * cannot pass a read, so this forces current writes to post.
325  */
326 #define typhoon_post_pci_writes(x) \
327 	do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
328 
329 /* We'll wait up to six seconds for a reset, and half a second normally.
330  */
331 #define TYPHOON_UDELAY			50
332 #define TYPHOON_RESET_TIMEOUT_SLEEP	(6 * HZ)
333 #define TYPHOON_RESET_TIMEOUT_NOSLEEP	((6 * 1000000) / TYPHOON_UDELAY)
334 #define TYPHOON_WAIT_TIMEOUT		((1000000 / 2) / TYPHOON_UDELAY)
335 
336 #if defined(NETIF_F_TSO)
337 #define skb_tso_size(x)		(skb_shinfo(x)->gso_size)
338 #define TSO_NUM_DESCRIPTORS	2
339 #define TSO_OFFLOAD_ON		TYPHOON_OFFLOAD_TCP_SEGMENT
340 #else
341 #define NETIF_F_TSO 		0
342 #define skb_tso_size(x) 	0
343 #define TSO_NUM_DESCRIPTORS	0
344 #define TSO_OFFLOAD_ON		0
345 #endif
346 
347 static inline void
typhoon_inc_index(u32 * index,const int count,const int num_entries)348 typhoon_inc_index(u32 *index, const int count, const int num_entries)
349 {
350 	/* Increment a ring index -- we can use this for all rings execept
351 	 * the Rx rings, as they use different size descriptors
352 	 * otherwise, everything is the same size as a cmd_desc
353 	 */
354 	*index += count * sizeof(struct cmd_desc);
355 	*index %= num_entries * sizeof(struct cmd_desc);
356 }
357 
358 static inline void
typhoon_inc_cmd_index(u32 * index,const int count)359 typhoon_inc_cmd_index(u32 *index, const int count)
360 {
361 	typhoon_inc_index(index, count, COMMAND_ENTRIES);
362 }
363 
364 static inline void
typhoon_inc_resp_index(u32 * index,const int count)365 typhoon_inc_resp_index(u32 *index, const int count)
366 {
367 	typhoon_inc_index(index, count, RESPONSE_ENTRIES);
368 }
369 
370 static inline void
typhoon_inc_rxfree_index(u32 * index,const int count)371 typhoon_inc_rxfree_index(u32 *index, const int count)
372 {
373 	typhoon_inc_index(index, count, RXFREE_ENTRIES);
374 }
375 
376 static inline void
typhoon_inc_tx_index(u32 * index,const int count)377 typhoon_inc_tx_index(u32 *index, const int count)
378 {
379 	/* if we start using the Hi Tx ring, this needs updateing */
380 	typhoon_inc_index(index, count, TXLO_ENTRIES);
381 }
382 
383 static inline void
typhoon_inc_rx_index(u32 * index,const int count)384 typhoon_inc_rx_index(u32 *index, const int count)
385 {
386 	/* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
387 	*index += count * sizeof(struct rx_desc);
388 	*index %= RX_ENTRIES * sizeof(struct rx_desc);
389 }
390 
391 static int
typhoon_reset(void __iomem * ioaddr,int wait_type)392 typhoon_reset(void __iomem *ioaddr, int wait_type)
393 {
394 	int i, err = 0;
395 	int timeout;
396 
397 	if(wait_type == WaitNoSleep)
398 		timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
399 	else
400 		timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
401 
402 	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
403 	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
404 
405 	iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
406 	typhoon_post_pci_writes(ioaddr);
407 	udelay(1);
408 	iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
409 
410 	if(wait_type != NoWait) {
411 		for(i = 0; i < timeout; i++) {
412 			if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
413 			   TYPHOON_STATUS_WAITING_FOR_HOST)
414 				goto out;
415 
416 			if(wait_type == WaitSleep)
417 				schedule_timeout_uninterruptible(1);
418 			else
419 				udelay(TYPHOON_UDELAY);
420 		}
421 
422 		err = -ETIMEDOUT;
423 	}
424 
425 out:
426 	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
427 	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
428 
429 	/* The 3XP seems to need a little extra time to complete the load
430 	 * of the sleep image before we can reliably boot it. Failure to
431 	 * do this occasionally results in a hung adapter after boot in
432 	 * typhoon_init_one() while trying to read the MAC address or
433 	 * putting the card to sleep. 3Com's driver waits 5ms, but
434 	 * that seems to be overkill. However, if we can sleep, we might
435 	 * as well give it that much time. Otherwise, we'll give it 500us,
436 	 * which should be enough (I've see it work well at 100us, but still
437 	 * saw occasional problems.)
438 	 */
439 	if(wait_type == WaitSleep)
440 		msleep(5);
441 	else
442 		udelay(500);
443 	return err;
444 }
445 
446 static int
typhoon_wait_status(void __iomem * ioaddr,u32 wait_value)447 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
448 {
449 	int i, err = 0;
450 
451 	for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
452 		if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
453 			goto out;
454 		udelay(TYPHOON_UDELAY);
455 	}
456 
457 	err = -ETIMEDOUT;
458 
459 out:
460 	return err;
461 }
462 
463 static inline void
typhoon_media_status(struct net_device * dev,struct resp_desc * resp)464 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
465 {
466 	if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
467 		netif_carrier_off(dev);
468 	else
469 		netif_carrier_on(dev);
470 }
471 
472 static inline void
typhoon_hello(struct typhoon * tp)473 typhoon_hello(struct typhoon *tp)
474 {
475 	struct basic_ring *ring = &tp->cmdRing;
476 	struct cmd_desc *cmd;
477 
478 	/* We only get a hello request if we've not sent anything to the
479 	 * card in a long while. If the lock is held, then we're in the
480 	 * process of issuing a command, so we don't need to respond.
481 	 */
482 	if(spin_trylock(&tp->command_lock)) {
483 		cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
484 		typhoon_inc_cmd_index(&ring->lastWrite, 1);
485 
486 		INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
487 		smp_wmb();
488 		iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
489 		spin_unlock(&tp->command_lock);
490 	}
491 }
492 
493 static int
typhoon_process_response(struct typhoon * tp,int resp_size,struct resp_desc * resp_save)494 typhoon_process_response(struct typhoon *tp, int resp_size,
495 				struct resp_desc *resp_save)
496 {
497 	struct typhoon_indexes *indexes = tp->indexes;
498 	struct resp_desc *resp;
499 	u8 *base = tp->respRing.ringBase;
500 	int count, len, wrap_len;
501 	u32 cleared;
502 	u32 ready;
503 
504 	cleared = le32_to_cpu(indexes->respCleared);
505 	ready = le32_to_cpu(indexes->respReady);
506 	while(cleared != ready) {
507 		resp = (struct resp_desc *)(base + cleared);
508 		count = resp->numDesc + 1;
509 		if(resp_save && resp->seqNo) {
510 			if(count > resp_size) {
511 				resp_save->flags = TYPHOON_RESP_ERROR;
512 				goto cleanup;
513 			}
514 
515 			wrap_len = 0;
516 			len = count * sizeof(*resp);
517 			if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
518 				wrap_len = cleared + len - RESPONSE_RING_SIZE;
519 				len = RESPONSE_RING_SIZE - cleared;
520 			}
521 
522 			memcpy(resp_save, resp, len);
523 			if(unlikely(wrap_len)) {
524 				resp_save += len / sizeof(*resp);
525 				memcpy(resp_save, base, wrap_len);
526 			}
527 
528 			resp_save = NULL;
529 		} else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
530 			typhoon_media_status(tp->dev, resp);
531 		} else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
532 			typhoon_hello(tp);
533 		} else {
534 			printk(KERN_ERR "%s: dumping unexpected response "
535 			       "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
536 			       tp->name, le16_to_cpu(resp->cmd),
537 			       resp->numDesc, resp->flags,
538 			       le16_to_cpu(resp->parm1),
539 			       le32_to_cpu(resp->parm2),
540 			       le32_to_cpu(resp->parm3));
541 		}
542 
543 cleanup:
544 		typhoon_inc_resp_index(&cleared, count);
545 	}
546 
547 	indexes->respCleared = cpu_to_le32(cleared);
548 	wmb();
549 	return (resp_save == NULL);
550 }
551 
552 static inline int
typhoon_num_free(int lastWrite,int lastRead,int ringSize)553 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
554 {
555 	/* this works for all descriptors but rx_desc, as they are a
556 	 * different size than the cmd_desc -- everyone else is the same
557 	 */
558 	lastWrite /= sizeof(struct cmd_desc);
559 	lastRead /= sizeof(struct cmd_desc);
560 	return (ringSize + lastRead - lastWrite - 1) % ringSize;
561 }
562 
563 static inline int
typhoon_num_free_cmd(struct typhoon * tp)564 typhoon_num_free_cmd(struct typhoon *tp)
565 {
566 	int lastWrite = tp->cmdRing.lastWrite;
567 	int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
568 
569 	return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
570 }
571 
572 static inline int
typhoon_num_free_resp(struct typhoon * tp)573 typhoon_num_free_resp(struct typhoon *tp)
574 {
575 	int respReady = le32_to_cpu(tp->indexes->respReady);
576 	int respCleared = le32_to_cpu(tp->indexes->respCleared);
577 
578 	return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
579 }
580 
581 static inline int
typhoon_num_free_tx(struct transmit_ring * ring)582 typhoon_num_free_tx(struct transmit_ring *ring)
583 {
584 	/* if we start using the Hi Tx ring, this needs updating */
585 	return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
586 }
587 
588 static int
typhoon_issue_command(struct typhoon * tp,int num_cmd,struct cmd_desc * cmd,int num_resp,struct resp_desc * resp)589 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
590 		      int num_resp, struct resp_desc *resp)
591 {
592 	struct typhoon_indexes *indexes = tp->indexes;
593 	struct basic_ring *ring = &tp->cmdRing;
594 	struct resp_desc local_resp;
595 	int i, err = 0;
596 	int got_resp;
597 	int freeCmd, freeResp;
598 	int len, wrap_len;
599 
600 	spin_lock(&tp->command_lock);
601 
602 	freeCmd = typhoon_num_free_cmd(tp);
603 	freeResp = typhoon_num_free_resp(tp);
604 
605 	if(freeCmd < num_cmd || freeResp < num_resp) {
606 		printk("%s: no descs for cmd, had (needed) %d (%d) cmd, "
607 			"%d (%d) resp\n", tp->name, freeCmd, num_cmd,
608 			freeResp, num_resp);
609 		err = -ENOMEM;
610 		goto out;
611 	}
612 
613 	if(cmd->flags & TYPHOON_CMD_RESPOND) {
614 		/* If we're expecting a response, but the caller hasn't given
615 		 * us a place to put it, we'll provide one.
616 		 */
617 		tp->awaiting_resp = 1;
618 		if(resp == NULL) {
619 			resp = &local_resp;
620 			num_resp = 1;
621 		}
622 	}
623 
624 	wrap_len = 0;
625 	len = num_cmd * sizeof(*cmd);
626 	if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
627 		wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
628 		len = COMMAND_RING_SIZE - ring->lastWrite;
629 	}
630 
631 	memcpy(ring->ringBase + ring->lastWrite, cmd, len);
632 	if(unlikely(wrap_len)) {
633 		struct cmd_desc *wrap_ptr = cmd;
634 		wrap_ptr += len / sizeof(*cmd);
635 		memcpy(ring->ringBase, wrap_ptr, wrap_len);
636 	}
637 
638 	typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
639 
640 	/* "I feel a presence... another warrior is on the mesa."
641 	 */
642 	wmb();
643 	iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
644 	typhoon_post_pci_writes(tp->ioaddr);
645 
646 	if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
647 		goto out;
648 
649 	/* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
650 	 * preempt or do anything other than take interrupts. So, don't
651 	 * wait for a response unless you have to.
652 	 *
653 	 * I've thought about trying to sleep here, but we're called
654 	 * from many contexts that don't allow that. Also, given the way
655 	 * 3Com has implemented irq coalescing, we would likely timeout --
656 	 * this has been observed in real life!
657 	 *
658 	 * The big killer is we have to wait to get stats from the card,
659 	 * though we could go to a periodic refresh of those if we don't
660 	 * mind them getting somewhat stale. The rest of the waiting
661 	 * commands occur during open/close/suspend/resume, so they aren't
662 	 * time critical. Creating SAs in the future will also have to
663 	 * wait here.
664 	 */
665 	got_resp = 0;
666 	for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
667 		if(indexes->respCleared != indexes->respReady)
668 			got_resp = typhoon_process_response(tp, num_resp,
669 								resp);
670 		udelay(TYPHOON_UDELAY);
671 	}
672 
673 	if(!got_resp) {
674 		err = -ETIMEDOUT;
675 		goto out;
676 	}
677 
678 	/* Collect the error response even if we don't care about the
679 	 * rest of the response
680 	 */
681 	if(resp->flags & TYPHOON_RESP_ERROR)
682 		err = -EIO;
683 
684 out:
685 	if(tp->awaiting_resp) {
686 		tp->awaiting_resp = 0;
687 		smp_wmb();
688 
689 		/* Ugh. If a response was added to the ring between
690 		 * the call to typhoon_process_response() and the clearing
691 		 * of tp->awaiting_resp, we could have missed the interrupt
692 		 * and it could hang in the ring an indeterminate amount of
693 		 * time. So, check for it, and interrupt ourselves if this
694 		 * is the case.
695 		 */
696 		if(indexes->respCleared != indexes->respReady)
697 			iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
698 	}
699 
700 	spin_unlock(&tp->command_lock);
701 	return err;
702 }
703 
704 static void
typhoon_vlan_rx_register(struct net_device * dev,struct vlan_group * grp)705 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
706 {
707 	struct typhoon *tp = netdev_priv(dev);
708 	struct cmd_desc xp_cmd;
709 	int err;
710 
711 	spin_lock_bh(&tp->state_lock);
712 	if(!tp->vlgrp != !grp) {
713 		/* We've either been turned on for the first time, or we've
714 		 * been turned off. Update the 3XP.
715 		 */
716 		if(grp)
717 			tp->offload |= TYPHOON_OFFLOAD_VLAN;
718 		else
719 			tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
720 
721 		/* If the interface is up, the runtime is running -- and we
722 		 * must be up for the vlan core to call us.
723 		 *
724 		 * Do the command outside of the spin lock, as it is slow.
725 		 */
726 		INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
727 					TYPHOON_CMD_SET_OFFLOAD_TASKS);
728 		xp_cmd.parm2 = tp->offload;
729 		xp_cmd.parm3 = tp->offload;
730 		spin_unlock_bh(&tp->state_lock);
731 		err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
732 		if(err < 0)
733 			printk("%s: vlan offload error %d\n", tp->name, -err);
734 		spin_lock_bh(&tp->state_lock);
735 	}
736 
737 	/* now make the change visible */
738 	tp->vlgrp = grp;
739 	spin_unlock_bh(&tp->state_lock);
740 }
741 
742 static inline void
typhoon_tso_fill(struct sk_buff * skb,struct transmit_ring * txRing,u32 ring_dma)743 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
744 			u32 ring_dma)
745 {
746 	struct tcpopt_desc *tcpd;
747 	u32 tcpd_offset = ring_dma;
748 
749 	tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
750 	tcpd_offset += txRing->lastWrite;
751 	tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
752 	typhoon_inc_tx_index(&txRing->lastWrite, 1);
753 
754 	tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
755 	tcpd->numDesc = 1;
756 	tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
757 	tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
758 	tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
759 	tcpd->bytesTx = cpu_to_le32(skb->len);
760 	tcpd->status = 0;
761 }
762 
763 static int
typhoon_start_tx(struct sk_buff * skb,struct net_device * dev)764 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
765 {
766 	struct typhoon *tp = netdev_priv(dev);
767 	struct transmit_ring *txRing;
768 	struct tx_desc *txd, *first_txd;
769 	dma_addr_t skb_dma;
770 	int numDesc;
771 
772 	/* we have two rings to choose from, but we only use txLo for now
773 	 * If we start using the Hi ring as well, we'll need to update
774 	 * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
775 	 * and TXHI_ENTRIES to match, as well as update the TSO code below
776 	 * to get the right DMA address
777 	 */
778 	txRing = &tp->txLoRing;
779 
780 	/* We need one descriptor for each fragment of the sk_buff, plus the
781 	 * one for the ->data area of it.
782 	 *
783 	 * The docs say a maximum of 16 fragment descriptors per TCP option
784 	 * descriptor, then make a new packet descriptor and option descriptor
785 	 * for the next 16 fragments. The engineers say just an option
786 	 * descriptor is needed. I've tested up to 26 fragments with a single
787 	 * packet descriptor/option descriptor combo, so I use that for now.
788 	 *
789 	 * If problems develop with TSO, check this first.
790 	 */
791 	numDesc = skb_shinfo(skb)->nr_frags + 1;
792 	if (skb_is_gso(skb))
793 		numDesc++;
794 
795 	/* When checking for free space in the ring, we need to also
796 	 * account for the initial Tx descriptor, and we always must leave
797 	 * at least one descriptor unused in the ring so that it doesn't
798 	 * wrap and look empty.
799 	 *
800 	 * The only time we should loop here is when we hit the race
801 	 * between marking the queue awake and updating the cleared index.
802 	 * Just loop and it will appear. This comes from the acenic driver.
803 	 */
804 	while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
805 		smp_rmb();
806 
807 	first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
808 	typhoon_inc_tx_index(&txRing->lastWrite, 1);
809 
810 	first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
811 	first_txd->numDesc = 0;
812 	first_txd->len = 0;
813 	first_txd->tx_addr = (u64)((unsigned long) skb);
814 	first_txd->processFlags = 0;
815 
816 	if(skb->ip_summed == CHECKSUM_PARTIAL) {
817 		/* The 3XP will figure out if this is UDP/TCP */
818 		first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
819 		first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
820 		first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
821 	}
822 
823 	if(vlan_tx_tag_present(skb)) {
824 		first_txd->processFlags |=
825 		    TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
826 		first_txd->processFlags |=
827 		    cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
828 				TYPHOON_TX_PF_VLAN_TAG_SHIFT);
829 	}
830 
831 	if (skb_is_gso(skb)) {
832 		first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
833 		first_txd->numDesc++;
834 
835 		typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
836 	}
837 
838 	txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
839 	typhoon_inc_tx_index(&txRing->lastWrite, 1);
840 
841 	/* No need to worry about padding packet -- the firmware pads
842 	 * it with zeros to ETH_ZLEN for us.
843 	 */
844 	if(skb_shinfo(skb)->nr_frags == 0) {
845 		skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
846 				       PCI_DMA_TODEVICE);
847 		txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
848 		txd->len = cpu_to_le16(skb->len);
849 		txd->frag.addr = cpu_to_le32(skb_dma);
850 		txd->frag.addrHi = 0;
851 		first_txd->numDesc++;
852 	} else {
853 		int i, len;
854 
855 		len = skb_headlen(skb);
856 		skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
857 				         PCI_DMA_TODEVICE);
858 		txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
859 		txd->len = cpu_to_le16(len);
860 		txd->frag.addr = cpu_to_le32(skb_dma);
861 		txd->frag.addrHi = 0;
862 		first_txd->numDesc++;
863 
864 		for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
865 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
866 			void *frag_addr;
867 
868 			txd = (struct tx_desc *) (txRing->ringBase +
869 						txRing->lastWrite);
870 			typhoon_inc_tx_index(&txRing->lastWrite, 1);
871 
872 			len = frag->size;
873 			frag_addr = (void *) page_address(frag->page) +
874 						frag->page_offset;
875 			skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
876 					 PCI_DMA_TODEVICE);
877 			txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
878 			txd->len = cpu_to_le16(len);
879 			txd->frag.addr = cpu_to_le32(skb_dma);
880 			txd->frag.addrHi = 0;
881 			first_txd->numDesc++;
882 		}
883 	}
884 
885 	/* Kick the 3XP
886 	 */
887 	wmb();
888 	iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
889 
890 	dev->trans_start = jiffies;
891 
892 	/* If we don't have room to put the worst case packet on the
893 	 * queue, then we must stop the queue. We need 2 extra
894 	 * descriptors -- one to prevent ring wrap, and one for the
895 	 * Tx header.
896 	 */
897 	numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
898 
899 	if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
900 		netif_stop_queue(dev);
901 
902 		/* A Tx complete IRQ could have gotten inbetween, making
903 		 * the ring free again. Only need to recheck here, since
904 		 * Tx is serialized.
905 		 */
906 		if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
907 			netif_wake_queue(dev);
908 	}
909 
910 	return 0;
911 }
912 
913 static void
typhoon_set_rx_mode(struct net_device * dev)914 typhoon_set_rx_mode(struct net_device *dev)
915 {
916 	struct typhoon *tp = netdev_priv(dev);
917 	struct cmd_desc xp_cmd;
918 	u32 mc_filter[2];
919 	__le16 filter;
920 
921 	filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
922 	if(dev->flags & IFF_PROMISC) {
923 		filter |= TYPHOON_RX_FILTER_PROMISCOUS;
924 	} else if((dev->mc_count > multicast_filter_limit) ||
925 		  (dev->flags & IFF_ALLMULTI)) {
926 		/* Too many to match, or accept all multicasts. */
927 		filter |= TYPHOON_RX_FILTER_ALL_MCAST;
928 	} else if(dev->mc_count) {
929 		struct dev_mc_list *mclist;
930 		int i;
931 
932 		memset(mc_filter, 0, sizeof(mc_filter));
933 		for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
934 		    i++, mclist = mclist->next) {
935 			int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
936 			mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
937 		}
938 
939 		INIT_COMMAND_NO_RESPONSE(&xp_cmd,
940 					 TYPHOON_CMD_SET_MULTICAST_HASH);
941 		xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
942 		xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
943 		xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
944 		typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
945 
946 		filter |= TYPHOON_RX_FILTER_MCAST_HASH;
947 	}
948 
949 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
950 	xp_cmd.parm1 = filter;
951 	typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
952 }
953 
954 static int
typhoon_do_get_stats(struct typhoon * tp)955 typhoon_do_get_stats(struct typhoon *tp)
956 {
957 	struct net_device_stats *stats = &tp->stats;
958 	struct net_device_stats *saved = &tp->stats_saved;
959 	struct cmd_desc xp_cmd;
960 	struct resp_desc xp_resp[7];
961 	struct stats_resp *s = (struct stats_resp *) xp_resp;
962 	int err;
963 
964 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
965 	err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
966 	if(err < 0)
967 		return err;
968 
969 	/* 3Com's Linux driver uses txMultipleCollisions as it's
970 	 * collisions value, but there is some other collision info as well...
971 	 *
972 	 * The extra status reported would be a good candidate for
973 	 * ethtool_ops->get_{strings,stats}()
974 	 */
975 	stats->tx_packets = le32_to_cpu(s->txPackets);
976 	stats->tx_bytes = le64_to_cpu(s->txBytes);
977 	stats->tx_errors = le32_to_cpu(s->txCarrierLost);
978 	stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
979 	stats->collisions = le32_to_cpu(s->txMultipleCollisions);
980 	stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
981 	stats->rx_bytes = le64_to_cpu(s->rxBytesGood);
982 	stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
983 	stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
984 			le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
985 	stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
986 	stats->rx_length_errors = le32_to_cpu(s->rxOversized);
987 	tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
988 			SPEED_100 : SPEED_10;
989 	tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
990 			DUPLEX_FULL : DUPLEX_HALF;
991 
992 	/* add in the saved statistics
993 	 */
994 	stats->tx_packets += saved->tx_packets;
995 	stats->tx_bytes += saved->tx_bytes;
996 	stats->tx_errors += saved->tx_errors;
997 	stats->collisions += saved->collisions;
998 	stats->rx_packets += saved->rx_packets;
999 	stats->rx_bytes += saved->rx_bytes;
1000 	stats->rx_fifo_errors += saved->rx_fifo_errors;
1001 	stats->rx_errors += saved->rx_errors;
1002 	stats->rx_crc_errors += saved->rx_crc_errors;
1003 	stats->rx_length_errors += saved->rx_length_errors;
1004 
1005 	return 0;
1006 }
1007 
1008 static struct net_device_stats *
typhoon_get_stats(struct net_device * dev)1009 typhoon_get_stats(struct net_device *dev)
1010 {
1011 	struct typhoon *tp = netdev_priv(dev);
1012 	struct net_device_stats *stats = &tp->stats;
1013 	struct net_device_stats *saved = &tp->stats_saved;
1014 
1015 	smp_rmb();
1016 	if(tp->card_state == Sleeping)
1017 		return saved;
1018 
1019 	if(typhoon_do_get_stats(tp) < 0) {
1020 		printk(KERN_ERR "%s: error getting stats\n", dev->name);
1021 		return saved;
1022 	}
1023 
1024 	return stats;
1025 }
1026 
1027 static int
typhoon_set_mac_address(struct net_device * dev,void * addr)1028 typhoon_set_mac_address(struct net_device *dev, void *addr)
1029 {
1030 	struct sockaddr *saddr = (struct sockaddr *) addr;
1031 
1032 	if(netif_running(dev))
1033 		return -EBUSY;
1034 
1035 	memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1036 	return 0;
1037 }
1038 
1039 static void
typhoon_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1040 typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1041 {
1042 	struct typhoon *tp = netdev_priv(dev);
1043 	struct pci_dev *pci_dev = tp->pdev;
1044 	struct cmd_desc xp_cmd;
1045 	struct resp_desc xp_resp[3];
1046 
1047 	smp_rmb();
1048 	if(tp->card_state == Sleeping) {
1049 		strcpy(info->fw_version, "Sleep image");
1050 	} else {
1051 		INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1052 		if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1053 			strcpy(info->fw_version, "Unknown runtime");
1054 		} else {
1055 			u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
1056 			snprintf(info->fw_version, 32, "%02x.%03x.%03x",
1057 				 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
1058 				 sleep_ver & 0xfff);
1059 		}
1060 	}
1061 
1062 	strcpy(info->driver, DRV_MODULE_NAME);
1063 	strcpy(info->version, DRV_MODULE_VERSION);
1064 	strcpy(info->bus_info, pci_name(pci_dev));
1065 }
1066 
1067 static int
typhoon_get_settings(struct net_device * dev,struct ethtool_cmd * cmd)1068 typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1069 {
1070 	struct typhoon *tp = netdev_priv(dev);
1071 
1072 	cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1073 				SUPPORTED_Autoneg;
1074 
1075 	switch (tp->xcvr_select) {
1076 	case TYPHOON_XCVR_10HALF:
1077 		cmd->advertising = ADVERTISED_10baseT_Half;
1078 		break;
1079 	case TYPHOON_XCVR_10FULL:
1080 		cmd->advertising = ADVERTISED_10baseT_Full;
1081 		break;
1082 	case TYPHOON_XCVR_100HALF:
1083 		cmd->advertising = ADVERTISED_100baseT_Half;
1084 		break;
1085 	case TYPHOON_XCVR_100FULL:
1086 		cmd->advertising = ADVERTISED_100baseT_Full;
1087 		break;
1088 	case TYPHOON_XCVR_AUTONEG:
1089 		cmd->advertising = ADVERTISED_10baseT_Half |
1090 					    ADVERTISED_10baseT_Full |
1091 					    ADVERTISED_100baseT_Half |
1092 					    ADVERTISED_100baseT_Full |
1093 					    ADVERTISED_Autoneg;
1094 		break;
1095 	}
1096 
1097 	if(tp->capabilities & TYPHOON_FIBER) {
1098 		cmd->supported |= SUPPORTED_FIBRE;
1099 		cmd->advertising |= ADVERTISED_FIBRE;
1100 		cmd->port = PORT_FIBRE;
1101 	} else {
1102 		cmd->supported |= SUPPORTED_10baseT_Half |
1103 		    			SUPPORTED_10baseT_Full |
1104 					SUPPORTED_TP;
1105 		cmd->advertising |= ADVERTISED_TP;
1106 		cmd->port = PORT_TP;
1107 	}
1108 
1109 	/* need to get stats to make these link speed/duplex valid */
1110 	typhoon_do_get_stats(tp);
1111 	cmd->speed = tp->speed;
1112 	cmd->duplex = tp->duplex;
1113 	cmd->phy_address = 0;
1114 	cmd->transceiver = XCVR_INTERNAL;
1115 	if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1116 		cmd->autoneg = AUTONEG_ENABLE;
1117 	else
1118 		cmd->autoneg = AUTONEG_DISABLE;
1119 	cmd->maxtxpkt = 1;
1120 	cmd->maxrxpkt = 1;
1121 
1122 	return 0;
1123 }
1124 
1125 static int
typhoon_set_settings(struct net_device * dev,struct ethtool_cmd * cmd)1126 typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1127 {
1128 	struct typhoon *tp = netdev_priv(dev);
1129 	struct cmd_desc xp_cmd;
1130 	__le16 xcvr;
1131 	int err;
1132 
1133 	err = -EINVAL;
1134 	if(cmd->autoneg == AUTONEG_ENABLE) {
1135 		xcvr = TYPHOON_XCVR_AUTONEG;
1136 	} else {
1137 		if(cmd->duplex == DUPLEX_HALF) {
1138 			if(cmd->speed == SPEED_10)
1139 				xcvr = TYPHOON_XCVR_10HALF;
1140 			else if(cmd->speed == SPEED_100)
1141 				xcvr = TYPHOON_XCVR_100HALF;
1142 			else
1143 				goto out;
1144 		} else if(cmd->duplex == DUPLEX_FULL) {
1145 			if(cmd->speed == SPEED_10)
1146 				xcvr = TYPHOON_XCVR_10FULL;
1147 			else if(cmd->speed == SPEED_100)
1148 				xcvr = TYPHOON_XCVR_100FULL;
1149 			else
1150 				goto out;
1151 		} else
1152 			goto out;
1153 	}
1154 
1155 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1156 	xp_cmd.parm1 = xcvr;
1157 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1158 	if(err < 0)
1159 		goto out;
1160 
1161 	tp->xcvr_select = xcvr;
1162 	if(cmd->autoneg == AUTONEG_ENABLE) {
1163 		tp->speed = 0xff;	/* invalid */
1164 		tp->duplex = 0xff;	/* invalid */
1165 	} else {
1166 		tp->speed = cmd->speed;
1167 		tp->duplex = cmd->duplex;
1168 	}
1169 
1170 out:
1171 	return err;
1172 }
1173 
1174 static void
typhoon_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)1175 typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1176 {
1177 	struct typhoon *tp = netdev_priv(dev);
1178 
1179 	wol->supported = WAKE_PHY | WAKE_MAGIC;
1180 	wol->wolopts = 0;
1181 	if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1182 		wol->wolopts |= WAKE_PHY;
1183 	if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1184 		wol->wolopts |= WAKE_MAGIC;
1185 	memset(&wol->sopass, 0, sizeof(wol->sopass));
1186 }
1187 
1188 static int
typhoon_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)1189 typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1190 {
1191 	struct typhoon *tp = netdev_priv(dev);
1192 
1193 	if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1194 		return -EINVAL;
1195 
1196 	tp->wol_events = 0;
1197 	if(wol->wolopts & WAKE_PHY)
1198 		tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1199 	if(wol->wolopts & WAKE_MAGIC)
1200 		tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1201 
1202 	return 0;
1203 }
1204 
1205 static u32
typhoon_get_rx_csum(struct net_device * dev)1206 typhoon_get_rx_csum(struct net_device *dev)
1207 {
1208 	/* For now, we don't allow turning off RX checksums.
1209 	 */
1210 	return 1;
1211 }
1212 
1213 static void
typhoon_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering)1214 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1215 {
1216 	ering->rx_max_pending = RXENT_ENTRIES;
1217 	ering->rx_mini_max_pending = 0;
1218 	ering->rx_jumbo_max_pending = 0;
1219 	ering->tx_max_pending = TXLO_ENTRIES - 1;
1220 
1221 	ering->rx_pending = RXENT_ENTRIES;
1222 	ering->rx_mini_pending = 0;
1223 	ering->rx_jumbo_pending = 0;
1224 	ering->tx_pending = TXLO_ENTRIES - 1;
1225 }
1226 
1227 static const struct ethtool_ops typhoon_ethtool_ops = {
1228 	.get_settings		= typhoon_get_settings,
1229 	.set_settings		= typhoon_set_settings,
1230 	.get_drvinfo		= typhoon_get_drvinfo,
1231 	.get_wol		= typhoon_get_wol,
1232 	.set_wol		= typhoon_set_wol,
1233 	.get_link		= ethtool_op_get_link,
1234 	.get_rx_csum		= typhoon_get_rx_csum,
1235 	.set_tx_csum		= ethtool_op_set_tx_csum,
1236 	.set_sg			= ethtool_op_set_sg,
1237 	.set_tso		= ethtool_op_set_tso,
1238 	.get_ringparam		= typhoon_get_ringparam,
1239 };
1240 
1241 static int
typhoon_wait_interrupt(void __iomem * ioaddr)1242 typhoon_wait_interrupt(void __iomem *ioaddr)
1243 {
1244 	int i, err = 0;
1245 
1246 	for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1247 		if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1248 		   TYPHOON_INTR_BOOTCMD)
1249 			goto out;
1250 		udelay(TYPHOON_UDELAY);
1251 	}
1252 
1253 	err = -ETIMEDOUT;
1254 
1255 out:
1256 	iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1257 	return err;
1258 }
1259 
1260 #define shared_offset(x)	offsetof(struct typhoon_shared, x)
1261 
1262 static void
typhoon_init_interface(struct typhoon * tp)1263 typhoon_init_interface(struct typhoon *tp)
1264 {
1265 	struct typhoon_interface *iface = &tp->shared->iface;
1266 	dma_addr_t shared_dma;
1267 
1268 	memset(tp->shared, 0, sizeof(struct typhoon_shared));
1269 
1270 	/* The *Hi members of iface are all init'd to zero by the memset().
1271 	 */
1272 	shared_dma = tp->shared_dma + shared_offset(indexes);
1273 	iface->ringIndex = cpu_to_le32(shared_dma);
1274 
1275 	shared_dma = tp->shared_dma + shared_offset(txLo);
1276 	iface->txLoAddr = cpu_to_le32(shared_dma);
1277 	iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1278 
1279 	shared_dma = tp->shared_dma + shared_offset(txHi);
1280 	iface->txHiAddr = cpu_to_le32(shared_dma);
1281 	iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1282 
1283 	shared_dma = tp->shared_dma + shared_offset(rxBuff);
1284 	iface->rxBuffAddr = cpu_to_le32(shared_dma);
1285 	iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1286 					sizeof(struct rx_free));
1287 
1288 	shared_dma = tp->shared_dma + shared_offset(rxLo);
1289 	iface->rxLoAddr = cpu_to_le32(shared_dma);
1290 	iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1291 
1292 	shared_dma = tp->shared_dma + shared_offset(rxHi);
1293 	iface->rxHiAddr = cpu_to_le32(shared_dma);
1294 	iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1295 
1296 	shared_dma = tp->shared_dma + shared_offset(cmd);
1297 	iface->cmdAddr = cpu_to_le32(shared_dma);
1298 	iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1299 
1300 	shared_dma = tp->shared_dma + shared_offset(resp);
1301 	iface->respAddr = cpu_to_le32(shared_dma);
1302 	iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1303 
1304 	shared_dma = tp->shared_dma + shared_offset(zeroWord);
1305 	iface->zeroAddr = cpu_to_le32(shared_dma);
1306 
1307 	tp->indexes = &tp->shared->indexes;
1308 	tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1309 	tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1310 	tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1311 	tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1312 	tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1313 	tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1314 	tp->respRing.ringBase = (u8 *) tp->shared->resp;
1315 
1316 	tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1317 	tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1318 
1319 	tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1320 	tp->card_state = Sleeping;
1321 	smp_wmb();
1322 
1323 	tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1324 	tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1325 
1326 	spin_lock_init(&tp->command_lock);
1327 	spin_lock_init(&tp->state_lock);
1328 }
1329 
1330 static void
typhoon_init_rings(struct typhoon * tp)1331 typhoon_init_rings(struct typhoon *tp)
1332 {
1333 	memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1334 
1335 	tp->txLoRing.lastWrite = 0;
1336 	tp->txHiRing.lastWrite = 0;
1337 	tp->rxLoRing.lastWrite = 0;
1338 	tp->rxHiRing.lastWrite = 0;
1339 	tp->rxBuffRing.lastWrite = 0;
1340 	tp->cmdRing.lastWrite = 0;
1341 	tp->cmdRing.lastWrite = 0;
1342 
1343 	tp->txLoRing.lastRead = 0;
1344 	tp->txHiRing.lastRead = 0;
1345 }
1346 
1347 static int
typhoon_download_firmware(struct typhoon * tp)1348 typhoon_download_firmware(struct typhoon *tp)
1349 {
1350 	void __iomem *ioaddr = tp->ioaddr;
1351 	struct pci_dev *pdev = tp->pdev;
1352 	struct typhoon_file_header *fHdr;
1353 	struct typhoon_section_header *sHdr;
1354 	u8 *image_data;
1355 	void *dpage;
1356 	dma_addr_t dpage_dma;
1357 	__sum16 csum;
1358 	u32 irqEnabled;
1359 	u32 irqMasked;
1360 	u32 numSections;
1361 	u32 section_len;
1362 	u32 len;
1363 	u32 load_addr;
1364 	u32 hmac;
1365 	int i;
1366 	int err;
1367 
1368 	err = -EINVAL;
1369 	fHdr = (struct typhoon_file_header *) typhoon_firmware_image;
1370 	image_data = (u8 *) fHdr;
1371 
1372 	if(memcmp(fHdr->tag, "TYPHOON", 8)) {
1373 		printk(KERN_ERR "%s: Invalid firmware image!\n", tp->name);
1374 		goto err_out;
1375 	}
1376 
1377 	/* Cannot just map the firmware image using pci_map_single() as
1378 	 * the firmware is part of the kernel/module image, so we allocate
1379 	 * some consistent memory to copy the sections into, as it is simpler,
1380 	 * and short-lived. If we ever split out and require a userland
1381 	 * firmware loader, then we can revisit this.
1382 	 */
1383 	err = -ENOMEM;
1384 	dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1385 	if(!dpage) {
1386 		printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name);
1387 		goto err_out;
1388 	}
1389 
1390 	irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1391 	iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1392 	       ioaddr + TYPHOON_REG_INTR_ENABLE);
1393 	irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1394 	iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1395 	       ioaddr + TYPHOON_REG_INTR_MASK);
1396 
1397 	err = -ETIMEDOUT;
1398 	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1399 		printk(KERN_ERR "%s: card ready timeout\n", tp->name);
1400 		goto err_out_irq;
1401 	}
1402 
1403 	numSections = le32_to_cpu(fHdr->numSections);
1404 	load_addr = le32_to_cpu(fHdr->startAddr);
1405 
1406 	iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1407 	iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1408 	hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1409 	iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1410 	hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1411 	iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1412 	hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1413 	iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1414 	hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1415 	iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1416 	hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1417 	iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1418 	typhoon_post_pci_writes(ioaddr);
1419 	iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1420 
1421 	image_data += sizeof(struct typhoon_file_header);
1422 
1423 	/* The ioread32() in typhoon_wait_interrupt() will force the
1424 	 * last write to the command register to post, so
1425 	 * we don't need a typhoon_post_pci_writes() after it.
1426 	 */
1427 	for(i = 0; i < numSections; i++) {
1428 		sHdr = (struct typhoon_section_header *) image_data;
1429 		image_data += sizeof(struct typhoon_section_header);
1430 		load_addr = le32_to_cpu(sHdr->startAddr);
1431 		section_len = le32_to_cpu(sHdr->len);
1432 
1433 		while(section_len) {
1434 			len = min_t(u32, section_len, PAGE_SIZE);
1435 
1436 			if(typhoon_wait_interrupt(ioaddr) < 0 ||
1437 			   ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1438 			   TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1439 				printk(KERN_ERR "%s: segment ready timeout\n",
1440 				       tp->name);
1441 				goto err_out_irq;
1442 			}
1443 
1444 			/* Do an pseudo IPv4 checksum on the data -- first
1445 			 * need to convert each u16 to cpu order before
1446 			 * summing. Fortunately, due to the properties of
1447 			 * the checksum, we can do this once, at the end.
1448 			 */
1449 			csum = csum_fold(csum_partial_copy_nocheck(image_data,
1450 								  dpage, len,
1451 								  0));
1452 
1453 			iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1454 			iowrite32(le16_to_cpu((__force __le16)csum),
1455 					ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1456 			iowrite32(load_addr,
1457 					ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1458 			iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1459 			iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1460 			typhoon_post_pci_writes(ioaddr);
1461 			iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1462 			       ioaddr + TYPHOON_REG_COMMAND);
1463 
1464 			image_data += len;
1465 			load_addr += len;
1466 			section_len -= len;
1467 		}
1468 	}
1469 
1470 	if(typhoon_wait_interrupt(ioaddr) < 0 ||
1471 	   ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1472 	   TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1473 		printk(KERN_ERR "%s: final segment ready timeout\n", tp->name);
1474 		goto err_out_irq;
1475 	}
1476 
1477 	iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1478 
1479 	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1480 		printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n",
1481 		       tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
1482 		goto err_out_irq;
1483 	}
1484 
1485 	err = 0;
1486 
1487 err_out_irq:
1488 	iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1489 	iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1490 
1491 	pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1492 
1493 err_out:
1494 	return err;
1495 }
1496 
1497 static int
typhoon_boot_3XP(struct typhoon * tp,u32 initial_status)1498 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1499 {
1500 	void __iomem *ioaddr = tp->ioaddr;
1501 
1502 	if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1503 		printk(KERN_ERR "%s: boot ready timeout\n", tp->name);
1504 		goto out_timeout;
1505 	}
1506 
1507 	iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1508 	iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1509 	typhoon_post_pci_writes(ioaddr);
1510 	iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1511 				ioaddr + TYPHOON_REG_COMMAND);
1512 
1513 	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1514 		printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n",
1515 		       tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
1516 		goto out_timeout;
1517 	}
1518 
1519 	/* Clear the Transmit and Command ready registers
1520 	 */
1521 	iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1522 	iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1523 	iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1524 	typhoon_post_pci_writes(ioaddr);
1525 	iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1526 
1527 	return 0;
1528 
1529 out_timeout:
1530 	return -ETIMEDOUT;
1531 }
1532 
1533 static u32
typhoon_clean_tx(struct typhoon * tp,struct transmit_ring * txRing,volatile __le32 * index)1534 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1535 			volatile __le32 * index)
1536 {
1537 	u32 lastRead = txRing->lastRead;
1538 	struct tx_desc *tx;
1539 	dma_addr_t skb_dma;
1540 	int dma_len;
1541 	int type;
1542 
1543 	while(lastRead != le32_to_cpu(*index)) {
1544 		tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1545 		type = tx->flags & TYPHOON_TYPE_MASK;
1546 
1547 		if(type == TYPHOON_TX_DESC) {
1548 			/* This tx_desc describes a packet.
1549 			 */
1550 			unsigned long ptr = tx->tx_addr;
1551 			struct sk_buff *skb = (struct sk_buff *) ptr;
1552 			dev_kfree_skb_irq(skb);
1553 		} else if(type == TYPHOON_FRAG_DESC) {
1554 			/* This tx_desc describes a memory mapping. Free it.
1555 			 */
1556 			skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1557 			dma_len = le16_to_cpu(tx->len);
1558 			pci_unmap_single(tp->pdev, skb_dma, dma_len,
1559 				       PCI_DMA_TODEVICE);
1560 		}
1561 
1562 		tx->flags = 0;
1563 		typhoon_inc_tx_index(&lastRead, 1);
1564 	}
1565 
1566 	return lastRead;
1567 }
1568 
1569 static void
typhoon_tx_complete(struct typhoon * tp,struct transmit_ring * txRing,volatile __le32 * index)1570 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1571 			volatile __le32 * index)
1572 {
1573 	u32 lastRead;
1574 	int numDesc = MAX_SKB_FRAGS + 1;
1575 
1576 	/* This will need changing if we start to use the Hi Tx ring. */
1577 	lastRead = typhoon_clean_tx(tp, txRing, index);
1578 	if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1579 				lastRead, TXLO_ENTRIES) > (numDesc + 2))
1580 		netif_wake_queue(tp->dev);
1581 
1582 	txRing->lastRead = lastRead;
1583 	smp_wmb();
1584 }
1585 
1586 static void
typhoon_recycle_rx_skb(struct typhoon * tp,u32 idx)1587 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1588 {
1589 	struct typhoon_indexes *indexes = tp->indexes;
1590 	struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1591 	struct basic_ring *ring = &tp->rxBuffRing;
1592 	struct rx_free *r;
1593 
1594 	if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1595 				le32_to_cpu(indexes->rxBuffCleared)) {
1596 		/* no room in ring, just drop the skb
1597 		 */
1598 		dev_kfree_skb_any(rxb->skb);
1599 		rxb->skb = NULL;
1600 		return;
1601 	}
1602 
1603 	r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1604 	typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1605 	r->virtAddr = idx;
1606 	r->physAddr = cpu_to_le32(rxb->dma_addr);
1607 
1608 	/* Tell the card about it */
1609 	wmb();
1610 	indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1611 }
1612 
1613 static int
typhoon_alloc_rx_skb(struct typhoon * tp,u32 idx)1614 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1615 {
1616 	struct typhoon_indexes *indexes = tp->indexes;
1617 	struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1618 	struct basic_ring *ring = &tp->rxBuffRing;
1619 	struct rx_free *r;
1620 	struct sk_buff *skb;
1621 	dma_addr_t dma_addr;
1622 
1623 	rxb->skb = NULL;
1624 
1625 	if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1626 				le32_to_cpu(indexes->rxBuffCleared))
1627 		return -ENOMEM;
1628 
1629 	skb = dev_alloc_skb(PKT_BUF_SZ);
1630 	if(!skb)
1631 		return -ENOMEM;
1632 
1633 #if 0
1634 	/* Please, 3com, fix the firmware to allow DMA to a unaligned
1635 	 * address! Pretty please?
1636 	 */
1637 	skb_reserve(skb, 2);
1638 #endif
1639 
1640 	skb->dev = tp->dev;
1641 	dma_addr = pci_map_single(tp->pdev, skb->data,
1642 				  PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1643 
1644 	/* Since no card does 64 bit DAC, the high bits will never
1645 	 * change from zero.
1646 	 */
1647 	r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1648 	typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1649 	r->virtAddr = idx;
1650 	r->physAddr = cpu_to_le32(dma_addr);
1651 	rxb->skb = skb;
1652 	rxb->dma_addr = dma_addr;
1653 
1654 	/* Tell the card about it */
1655 	wmb();
1656 	indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1657 	return 0;
1658 }
1659 
1660 static int
typhoon_rx(struct typhoon * tp,struct basic_ring * rxRing,volatile __le32 * ready,volatile __le32 * cleared,int budget)1661 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1662 	   volatile __le32 * cleared, int budget)
1663 {
1664 	struct rx_desc *rx;
1665 	struct sk_buff *skb, *new_skb;
1666 	struct rxbuff_ent *rxb;
1667 	dma_addr_t dma_addr;
1668 	u32 local_ready;
1669 	u32 rxaddr;
1670 	int pkt_len;
1671 	u32 idx;
1672 	__le32 csum_bits;
1673 	int received;
1674 
1675 	received = 0;
1676 	local_ready = le32_to_cpu(*ready);
1677 	rxaddr = le32_to_cpu(*cleared);
1678 	while(rxaddr != local_ready && budget > 0) {
1679 		rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1680 		idx = rx->addr;
1681 		rxb = &tp->rxbuffers[idx];
1682 		skb = rxb->skb;
1683 		dma_addr = rxb->dma_addr;
1684 
1685 		typhoon_inc_rx_index(&rxaddr, 1);
1686 
1687 		if(rx->flags & TYPHOON_RX_ERROR) {
1688 			typhoon_recycle_rx_skb(tp, idx);
1689 			continue;
1690 		}
1691 
1692 		pkt_len = le16_to_cpu(rx->frameLen);
1693 
1694 		if(pkt_len < rx_copybreak &&
1695 		   (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1696 			skb_reserve(new_skb, 2);
1697 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1698 						    PKT_BUF_SZ,
1699 						    PCI_DMA_FROMDEVICE);
1700 			skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1701 			pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1702 						       PKT_BUF_SZ,
1703 						       PCI_DMA_FROMDEVICE);
1704 			skb_put(new_skb, pkt_len);
1705 			typhoon_recycle_rx_skb(tp, idx);
1706 		} else {
1707 			new_skb = skb;
1708 			skb_put(new_skb, pkt_len);
1709 			pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1710 				       PCI_DMA_FROMDEVICE);
1711 			typhoon_alloc_rx_skb(tp, idx);
1712 		}
1713 		new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1714 		csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1715 			TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1716 		if(csum_bits ==
1717 		   (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD)
1718 		   || csum_bits ==
1719 		   (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1720 			new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1721 		} else
1722 			new_skb->ip_summed = CHECKSUM_NONE;
1723 
1724 		spin_lock(&tp->state_lock);
1725 		if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1726 			vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1727 						 ntohl(rx->vlanTag) & 0xffff);
1728 		else
1729 			netif_receive_skb(new_skb);
1730 		spin_unlock(&tp->state_lock);
1731 
1732 		received++;
1733 		budget--;
1734 	}
1735 	*cleared = cpu_to_le32(rxaddr);
1736 
1737 	return received;
1738 }
1739 
1740 static void
typhoon_fill_free_ring(struct typhoon * tp)1741 typhoon_fill_free_ring(struct typhoon *tp)
1742 {
1743 	u32 i;
1744 
1745 	for(i = 0; i < RXENT_ENTRIES; i++) {
1746 		struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1747 		if(rxb->skb)
1748 			continue;
1749 		if(typhoon_alloc_rx_skb(tp, i) < 0)
1750 			break;
1751 	}
1752 }
1753 
1754 static int
typhoon_poll(struct napi_struct * napi,int budget)1755 typhoon_poll(struct napi_struct *napi, int budget)
1756 {
1757 	struct typhoon *tp = container_of(napi, struct typhoon, napi);
1758 	struct typhoon_indexes *indexes = tp->indexes;
1759 	int work_done;
1760 
1761 	rmb();
1762 	if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1763 			typhoon_process_response(tp, 0, NULL);
1764 
1765 	if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1766 		typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1767 
1768 	work_done = 0;
1769 
1770 	if(indexes->rxHiCleared != indexes->rxHiReady) {
1771 		work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1772 			   		&indexes->rxHiCleared, budget);
1773 	}
1774 
1775 	if(indexes->rxLoCleared != indexes->rxLoReady) {
1776 		work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1777 					&indexes->rxLoCleared, budget - work_done);
1778 	}
1779 
1780 	if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1781 		/* rxBuff ring is empty, try to fill it. */
1782 		typhoon_fill_free_ring(tp);
1783 	}
1784 
1785 	if (work_done < budget) {
1786 		netif_rx_complete(napi);
1787 		iowrite32(TYPHOON_INTR_NONE,
1788 				tp->ioaddr + TYPHOON_REG_INTR_MASK);
1789 		typhoon_post_pci_writes(tp->ioaddr);
1790 	}
1791 
1792 	return work_done;
1793 }
1794 
1795 static irqreturn_t
typhoon_interrupt(int irq,void * dev_instance)1796 typhoon_interrupt(int irq, void *dev_instance)
1797 {
1798 	struct net_device *dev = dev_instance;
1799 	struct typhoon *tp = netdev_priv(dev);
1800 	void __iomem *ioaddr = tp->ioaddr;
1801 	u32 intr_status;
1802 
1803 	intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1804 	if(!(intr_status & TYPHOON_INTR_HOST_INT))
1805 		return IRQ_NONE;
1806 
1807 	iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1808 
1809 	if (netif_rx_schedule_prep(&tp->napi)) {
1810 		iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1811 		typhoon_post_pci_writes(ioaddr);
1812 		__netif_rx_schedule(&tp->napi);
1813 	} else {
1814 		printk(KERN_ERR "%s: Error, poll already scheduled\n",
1815                        dev->name);
1816 	}
1817 	return IRQ_HANDLED;
1818 }
1819 
1820 static void
typhoon_free_rx_rings(struct typhoon * tp)1821 typhoon_free_rx_rings(struct typhoon *tp)
1822 {
1823 	u32 i;
1824 
1825 	for(i = 0; i < RXENT_ENTRIES; i++) {
1826 		struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1827 		if(rxb->skb) {
1828 			pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1829 				       PCI_DMA_FROMDEVICE);
1830 			dev_kfree_skb(rxb->skb);
1831 			rxb->skb = NULL;
1832 		}
1833 	}
1834 }
1835 
1836 static int
typhoon_sleep(struct typhoon * tp,pci_power_t state,__le16 events)1837 typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1838 {
1839 	struct pci_dev *pdev = tp->pdev;
1840 	void __iomem *ioaddr = tp->ioaddr;
1841 	struct cmd_desc xp_cmd;
1842 	int err;
1843 
1844 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1845 	xp_cmd.parm1 = events;
1846 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1847 	if(err < 0) {
1848 		printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n",
1849 				tp->name, err);
1850 		return err;
1851 	}
1852 
1853 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1854 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1855 	if(err < 0) {
1856 		printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n",
1857 				tp->name, err);
1858 		return err;
1859 	}
1860 
1861 	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1862 		return -ETIMEDOUT;
1863 
1864 	/* Since we cannot monitor the status of the link while sleeping,
1865 	 * tell the world it went away.
1866 	 */
1867 	netif_carrier_off(tp->dev);
1868 
1869 	pci_enable_wake(tp->pdev, state, 1);
1870 	pci_disable_device(pdev);
1871 	return pci_set_power_state(pdev, state);
1872 }
1873 
1874 static int
typhoon_wakeup(struct typhoon * tp,int wait_type)1875 typhoon_wakeup(struct typhoon *tp, int wait_type)
1876 {
1877 	struct pci_dev *pdev = tp->pdev;
1878 	void __iomem *ioaddr = tp->ioaddr;
1879 
1880 	pci_set_power_state(pdev, PCI_D0);
1881 	pci_restore_state(pdev);
1882 
1883 	/* Post 2.x.x versions of the Sleep Image require a reset before
1884 	 * we can download the Runtime Image. But let's not make users of
1885 	 * the old firmware pay for the reset.
1886 	 */
1887 	iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1888 	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1889 			(tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1890 		return typhoon_reset(ioaddr, wait_type);
1891 
1892 	return 0;
1893 }
1894 
1895 static int
typhoon_start_runtime(struct typhoon * tp)1896 typhoon_start_runtime(struct typhoon *tp)
1897 {
1898 	struct net_device *dev = tp->dev;
1899 	void __iomem *ioaddr = tp->ioaddr;
1900 	struct cmd_desc xp_cmd;
1901 	int err;
1902 
1903 	typhoon_init_rings(tp);
1904 	typhoon_fill_free_ring(tp);
1905 
1906 	err = typhoon_download_firmware(tp);
1907 	if(err < 0) {
1908 		printk("%s: cannot load runtime on 3XP\n", tp->name);
1909 		goto error_out;
1910 	}
1911 
1912 	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1913 		printk("%s: cannot boot 3XP\n", tp->name);
1914 		err = -EIO;
1915 		goto error_out;
1916 	}
1917 
1918 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1919 	xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1920 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1921 	if(err < 0)
1922 		goto error_out;
1923 
1924 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1925 	xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1926 	xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1927 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1928 	if(err < 0)
1929 		goto error_out;
1930 
1931 	/* Disable IRQ coalescing -- we can reenable it when 3Com gives
1932 	 * us some more information on how to control it.
1933 	 */
1934 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1935 	xp_cmd.parm1 = 0;
1936 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1937 	if(err < 0)
1938 		goto error_out;
1939 
1940 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1941 	xp_cmd.parm1 = tp->xcvr_select;
1942 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1943 	if(err < 0)
1944 		goto error_out;
1945 
1946 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1947 	xp_cmd.parm1 = __constant_cpu_to_le16(ETH_P_8021Q);
1948 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1949 	if(err < 0)
1950 		goto error_out;
1951 
1952 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1953 	spin_lock_bh(&tp->state_lock);
1954 	xp_cmd.parm2 = tp->offload;
1955 	xp_cmd.parm3 = tp->offload;
1956 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1957 	spin_unlock_bh(&tp->state_lock);
1958 	if(err < 0)
1959 		goto error_out;
1960 
1961 	typhoon_set_rx_mode(dev);
1962 
1963 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
1964 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1965 	if(err < 0)
1966 		goto error_out;
1967 
1968 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
1969 	err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1970 	if(err < 0)
1971 		goto error_out;
1972 
1973 	tp->card_state = Running;
1974 	smp_wmb();
1975 
1976 	iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
1977 	iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
1978 	typhoon_post_pci_writes(ioaddr);
1979 
1980 	return 0;
1981 
1982 error_out:
1983 	typhoon_reset(ioaddr, WaitNoSleep);
1984 	typhoon_free_rx_rings(tp);
1985 	typhoon_init_rings(tp);
1986 	return err;
1987 }
1988 
1989 static int
typhoon_stop_runtime(struct typhoon * tp,int wait_type)1990 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
1991 {
1992 	struct typhoon_indexes *indexes = tp->indexes;
1993 	struct transmit_ring *txLo = &tp->txLoRing;
1994 	void __iomem *ioaddr = tp->ioaddr;
1995 	struct cmd_desc xp_cmd;
1996 	int i;
1997 
1998 	/* Disable interrupts early, since we can't schedule a poll
1999 	 * when called with !netif_running(). This will be posted
2000 	 * when we force the posting of the command.
2001 	 */
2002 	iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2003 
2004 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2005 	typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2006 
2007 	/* Wait 1/2 sec for any outstanding transmits to occur
2008 	 * We'll cleanup after the reset if this times out.
2009 	 */
2010 	for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2011 		if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2012 			break;
2013 		udelay(TYPHOON_UDELAY);
2014 	}
2015 
2016 	if(i == TYPHOON_WAIT_TIMEOUT)
2017 		printk(KERN_ERR
2018 		       "%s: halt timed out waiting for Tx to complete\n",
2019 		       tp->name);
2020 
2021 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2022 	typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2023 
2024 	/* save the statistics so when we bring the interface up again,
2025 	 * the values reported to userspace are correct.
2026 	 */
2027 	tp->card_state = Sleeping;
2028 	smp_wmb();
2029 	typhoon_do_get_stats(tp);
2030 	memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2031 
2032 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2033 	typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2034 
2035 	if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2036 		printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n",
2037 		       tp->name);
2038 
2039 	if(typhoon_reset(ioaddr, wait_type) < 0) {
2040 		printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name);
2041 		return -ETIMEDOUT;
2042 	}
2043 
2044 	/* cleanup any outstanding Tx packets */
2045 	if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2046 		indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2047 		typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2048 	}
2049 
2050 	return 0;
2051 }
2052 
2053 static void
typhoon_tx_timeout(struct net_device * dev)2054 typhoon_tx_timeout(struct net_device *dev)
2055 {
2056 	struct typhoon *tp = netdev_priv(dev);
2057 
2058 	if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2059 		printk(KERN_WARNING "%s: could not reset in tx timeout\n",
2060 					dev->name);
2061 		goto truely_dead;
2062 	}
2063 
2064 	/* If we ever start using the Hi ring, it will need cleaning too */
2065 	typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2066 	typhoon_free_rx_rings(tp);
2067 
2068 	if(typhoon_start_runtime(tp) < 0) {
2069 		printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
2070 					dev->name);
2071 		goto truely_dead;
2072         }
2073 
2074 	netif_wake_queue(dev);
2075 	return;
2076 
2077 truely_dead:
2078 	/* Reset the hardware, and turn off carrier to avoid more timeouts */
2079 	typhoon_reset(tp->ioaddr, NoWait);
2080 	netif_carrier_off(dev);
2081 }
2082 
2083 static int
typhoon_open(struct net_device * dev)2084 typhoon_open(struct net_device *dev)
2085 {
2086 	struct typhoon *tp = netdev_priv(dev);
2087 	int err;
2088 
2089 	err = typhoon_wakeup(tp, WaitSleep);
2090 	if(err < 0) {
2091 		printk(KERN_ERR "%s: unable to wakeup device\n", dev->name);
2092 		goto out_sleep;
2093 	}
2094 
2095 	err = request_irq(dev->irq, &typhoon_interrupt, IRQF_SHARED,
2096 				dev->name, dev);
2097 	if(err < 0)
2098 		goto out_sleep;
2099 
2100 	napi_enable(&tp->napi);
2101 
2102 	err = typhoon_start_runtime(tp);
2103 	if(err < 0) {
2104 		napi_disable(&tp->napi);
2105 		goto out_irq;
2106 	}
2107 
2108 	netif_start_queue(dev);
2109 	return 0;
2110 
2111 out_irq:
2112 	free_irq(dev->irq, dev);
2113 
2114 out_sleep:
2115 	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2116 		printk(KERN_ERR "%s: unable to reboot into sleep img\n",
2117 				dev->name);
2118 		typhoon_reset(tp->ioaddr, NoWait);
2119 		goto out;
2120 	}
2121 
2122 	if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2123 		printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
2124 
2125 out:
2126 	return err;
2127 }
2128 
2129 static int
typhoon_close(struct net_device * dev)2130 typhoon_close(struct net_device *dev)
2131 {
2132 	struct typhoon *tp = netdev_priv(dev);
2133 
2134 	netif_stop_queue(dev);
2135 	napi_disable(&tp->napi);
2136 
2137 	if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2138 		printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2139 
2140 	/* Make sure there is no irq handler running on a different CPU. */
2141 	free_irq(dev->irq, dev);
2142 
2143 	typhoon_free_rx_rings(tp);
2144 	typhoon_init_rings(tp);
2145 
2146 	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2147 		printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2148 
2149 	if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2150 		printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2151 
2152 	return 0;
2153 }
2154 
2155 #ifdef CONFIG_PM
2156 static int
typhoon_resume(struct pci_dev * pdev)2157 typhoon_resume(struct pci_dev *pdev)
2158 {
2159 	struct net_device *dev = pci_get_drvdata(pdev);
2160 	struct typhoon *tp = netdev_priv(dev);
2161 
2162 	/* If we're down, resume when we are upped.
2163 	 */
2164 	if(!netif_running(dev))
2165 		return 0;
2166 
2167 	if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2168 		printk(KERN_ERR "%s: critical: could not wake up in resume\n",
2169 				dev->name);
2170 		goto reset;
2171 	}
2172 
2173 	if(typhoon_start_runtime(tp) < 0) {
2174 		printk(KERN_ERR "%s: critical: could not start runtime in "
2175 				"resume\n", dev->name);
2176 		goto reset;
2177 	}
2178 
2179 	netif_device_attach(dev);
2180 	return 0;
2181 
2182 reset:
2183 	typhoon_reset(tp->ioaddr, NoWait);
2184 	return -EBUSY;
2185 }
2186 
2187 static int
typhoon_suspend(struct pci_dev * pdev,pm_message_t state)2188 typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2189 {
2190 	struct net_device *dev = pci_get_drvdata(pdev);
2191 	struct typhoon *tp = netdev_priv(dev);
2192 	struct cmd_desc xp_cmd;
2193 
2194 	/* If we're down, we're already suspended.
2195 	 */
2196 	if(!netif_running(dev))
2197 		return 0;
2198 
2199 	spin_lock_bh(&tp->state_lock);
2200 	if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2201 		spin_unlock_bh(&tp->state_lock);
2202 		printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n",
2203 				dev->name);
2204 		return -EBUSY;
2205 	}
2206 	spin_unlock_bh(&tp->state_lock);
2207 
2208 	netif_device_detach(dev);
2209 
2210 	if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2211 		printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2212 		goto need_resume;
2213 	}
2214 
2215 	typhoon_free_rx_rings(tp);
2216 	typhoon_init_rings(tp);
2217 
2218 	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2219 		printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2220 		goto need_resume;
2221 	}
2222 
2223 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2224 	xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2225 	xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
2226 	if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2227 		printk(KERN_ERR "%s: unable to set mac address in suspend\n",
2228 				dev->name);
2229 		goto need_resume;
2230 	}
2231 
2232 	INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2233 	xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2234 	if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2235 		printk(KERN_ERR "%s: unable to set rx filter in suspend\n",
2236 				dev->name);
2237 		goto need_resume;
2238 	}
2239 
2240 	if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
2241 		printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2242 		goto need_resume;
2243 	}
2244 
2245 	return 0;
2246 
2247 need_resume:
2248 	typhoon_resume(pdev);
2249 	return -EBUSY;
2250 }
2251 #endif
2252 
2253 static int __devinit
typhoon_test_mmio(struct pci_dev * pdev)2254 typhoon_test_mmio(struct pci_dev *pdev)
2255 {
2256 	void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2257 	int mode = 0;
2258 	u32 val;
2259 
2260 	if(!ioaddr)
2261 		goto out;
2262 
2263 	if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2264 				TYPHOON_STATUS_WAITING_FOR_HOST)
2265 		goto out_unmap;
2266 
2267 	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2268 	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2269 	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2270 
2271 	/* Ok, see if we can change our interrupt status register by
2272 	 * sending ourselves an interrupt. If so, then MMIO works.
2273 	 * The 50usec delay is arbitrary -- it could probably be smaller.
2274 	 */
2275 	val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2276 	if((val & TYPHOON_INTR_SELF) == 0) {
2277 		iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2278 		ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2279 		udelay(50);
2280 		val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2281 		if(val & TYPHOON_INTR_SELF)
2282 			mode = 1;
2283 	}
2284 
2285 	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2286 	iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2287 	iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2288 	ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2289 
2290 out_unmap:
2291 	pci_iounmap(pdev, ioaddr);
2292 
2293 out:
2294 	if(!mode)
2295 		printk(KERN_INFO PFX "falling back to port IO\n");
2296 	return mode;
2297 }
2298 
2299 static const struct net_device_ops typhoon_netdev_ops = {
2300 	.ndo_open		= typhoon_open,
2301 	.ndo_stop		= typhoon_close,
2302 	.ndo_start_xmit		= typhoon_start_tx,
2303 	.ndo_set_multicast_list	= typhoon_set_rx_mode,
2304 	.ndo_tx_timeout		= typhoon_tx_timeout,
2305 	.ndo_get_stats		= typhoon_get_stats,
2306 	.ndo_validate_addr	= eth_validate_addr,
2307 	.ndo_set_mac_address	= typhoon_set_mac_address,
2308 	.ndo_change_mtu		= eth_change_mtu,
2309 	.ndo_vlan_rx_register	= typhoon_vlan_rx_register,
2310 };
2311 
2312 static int __devinit
typhoon_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)2313 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2314 {
2315 	static int did_version = 0;
2316 	struct net_device *dev;
2317 	struct typhoon *tp;
2318 	int card_id = (int) ent->driver_data;
2319 	void __iomem *ioaddr;
2320 	void *shared;
2321 	dma_addr_t shared_dma;
2322 	struct cmd_desc xp_cmd;
2323 	struct resp_desc xp_resp[3];
2324 	int err = 0;
2325 
2326 	if(!did_version++)
2327 		printk(KERN_INFO "%s", version);
2328 
2329 	dev = alloc_etherdev(sizeof(*tp));
2330 	if(dev == NULL) {
2331 		printk(ERR_PFX "%s: unable to alloc new net device\n",
2332 		       pci_name(pdev));
2333 		err = -ENOMEM;
2334 		goto error_out;
2335 	}
2336 	SET_NETDEV_DEV(dev, &pdev->dev);
2337 
2338 	err = pci_enable_device(pdev);
2339 	if(err < 0) {
2340 		printk(ERR_PFX "%s: unable to enable device\n",
2341 		       pci_name(pdev));
2342 		goto error_out_dev;
2343 	}
2344 
2345 	err = pci_set_mwi(pdev);
2346 	if(err < 0) {
2347 		printk(ERR_PFX "%s: unable to set MWI\n", pci_name(pdev));
2348 		goto error_out_disable;
2349 	}
2350 
2351 	err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2352 	if(err < 0) {
2353 		printk(ERR_PFX "%s: No usable DMA configuration\n",
2354 		       pci_name(pdev));
2355 		goto error_out_mwi;
2356 	}
2357 
2358 	/* sanity checks on IO and MMIO BARs
2359 	 */
2360 	if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2361 		printk(ERR_PFX
2362 		       "%s: region #1 not a PCI IO resource, aborting\n",
2363 		       pci_name(pdev));
2364 		err = -ENODEV;
2365 		goto error_out_mwi;
2366 	}
2367 	if(pci_resource_len(pdev, 0) < 128) {
2368 		printk(ERR_PFX "%s: Invalid PCI IO region size, aborting\n",
2369 		       pci_name(pdev));
2370 		err = -ENODEV;
2371 		goto error_out_mwi;
2372 	}
2373 	if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2374 		printk(ERR_PFX
2375 		       "%s: region #1 not a PCI MMIO resource, aborting\n",
2376 		       pci_name(pdev));
2377 		err = -ENODEV;
2378 		goto error_out_mwi;
2379 	}
2380 	if(pci_resource_len(pdev, 1) < 128) {
2381 		printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n",
2382 		       pci_name(pdev));
2383 		err = -ENODEV;
2384 		goto error_out_mwi;
2385 	}
2386 
2387 	err = pci_request_regions(pdev, "typhoon");
2388 	if(err < 0) {
2389 		printk(ERR_PFX "%s: could not request regions\n",
2390 		       pci_name(pdev));
2391 		goto error_out_mwi;
2392 	}
2393 
2394 	/* map our registers
2395 	 */
2396 	if(use_mmio != 0 && use_mmio != 1)
2397 		use_mmio = typhoon_test_mmio(pdev);
2398 
2399 	ioaddr = pci_iomap(pdev, use_mmio, 128);
2400 	if (!ioaddr) {
2401 		printk(ERR_PFX "%s: cannot remap registers, aborting\n",
2402 		       pci_name(pdev));
2403 		err = -EIO;
2404 		goto error_out_regions;
2405 	}
2406 
2407 	/* allocate pci dma space for rx and tx descriptor rings
2408 	 */
2409 	shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2410 				      &shared_dma);
2411 	if(!shared) {
2412 		printk(ERR_PFX "%s: could not allocate DMA memory\n",
2413 		       pci_name(pdev));
2414 		err = -ENOMEM;
2415 		goto error_out_remap;
2416 	}
2417 
2418 	dev->irq = pdev->irq;
2419 	tp = netdev_priv(dev);
2420 	tp->shared = (struct typhoon_shared *) shared;
2421 	tp->shared_dma = shared_dma;
2422 	tp->pdev = pdev;
2423 	tp->tx_pdev = pdev;
2424 	tp->ioaddr = ioaddr;
2425 	tp->tx_ioaddr = ioaddr;
2426 	tp->dev = dev;
2427 
2428 	/* Init sequence:
2429 	 * 1) Reset the adapter to clear any bad juju
2430 	 * 2) Reload the sleep image
2431 	 * 3) Boot the sleep image
2432 	 * 4) Get the hardware address.
2433 	 * 5) Put the card to sleep.
2434 	 */
2435 	if (typhoon_reset(ioaddr, WaitSleep) < 0) {
2436 		printk(ERR_PFX "%s: could not reset 3XP\n", pci_name(pdev));
2437 		err = -EIO;
2438 		goto error_out_dma;
2439 	}
2440 
2441 	/* Now that we've reset the 3XP and are sure it's not going to
2442 	 * write all over memory, enable bus mastering, and save our
2443 	 * state for resuming after a suspend.
2444 	 */
2445 	pci_set_master(pdev);
2446 	pci_save_state(pdev);
2447 
2448 	/* dev->name is not valid until we register, but we need to
2449 	 * use some common routines to initialize the card. So that those
2450 	 * routines print the right name, we keep our oun pointer to the name
2451 	 */
2452 	tp->name = pci_name(pdev);
2453 
2454 	typhoon_init_interface(tp);
2455 	typhoon_init_rings(tp);
2456 
2457 	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2458 		printk(ERR_PFX "%s: cannot boot 3XP sleep image\n",
2459 		       pci_name(pdev));
2460 		err = -EIO;
2461 		goto error_out_reset;
2462 	}
2463 
2464 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2465 	if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2466 		printk(ERR_PFX "%s: cannot read MAC address\n",
2467 		       pci_name(pdev));
2468 		err = -EIO;
2469 		goto error_out_reset;
2470 	}
2471 
2472 	*(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2473 	*(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2474 
2475 	if(!is_valid_ether_addr(dev->dev_addr)) {
2476 		printk(ERR_PFX "%s: Could not obtain valid ethernet address, "
2477 		       "aborting\n", pci_name(pdev));
2478 		goto error_out_reset;
2479 	}
2480 
2481 	/* Read the Sleep Image version last, so the response is valid
2482 	 * later when we print out the version reported.
2483 	 */
2484 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2485 	if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2486 		printk(ERR_PFX "%s: Could not get Sleep Image version\n",
2487 			pci_name(pdev));
2488 		goto error_out_reset;
2489 	}
2490 
2491 	tp->capabilities = typhoon_card_info[card_id].capabilities;
2492 	tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2493 
2494 	/* Typhoon 1.0 Sleep Images return one response descriptor to the
2495 	 * READ_VERSIONS command. Those versions are OK after waking up
2496 	 * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2497 	 * seem to need a little extra help to get started. Since we don't
2498 	 * know how to nudge it along, just kick it.
2499 	 */
2500 	if(xp_resp[0].numDesc != 0)
2501 		tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2502 
2503 	if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
2504 		printk(ERR_PFX "%s: cannot put adapter to sleep\n",
2505 		       pci_name(pdev));
2506 		err = -EIO;
2507 		goto error_out_reset;
2508 	}
2509 
2510 	/* The chip-specific entries in the device structure. */
2511 	dev->netdev_ops		= &typhoon_netdev_ops;
2512 	netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2513 	dev->watchdog_timeo	= TX_TIMEOUT;
2514 
2515 	SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2516 
2517 	/* We can handle scatter gather, up to 16 entries, and
2518 	 * we can do IP checksumming (only version 4, doh...)
2519 	 */
2520 	dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2521 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2522 	dev->features |= NETIF_F_TSO;
2523 
2524 	if(register_netdev(dev) < 0)
2525 		goto error_out_reset;
2526 
2527 	/* fixup our local name */
2528 	tp->name = dev->name;
2529 
2530 	pci_set_drvdata(pdev, dev);
2531 
2532 	printk(KERN_INFO "%s: %s at %s 0x%llx, %pM\n",
2533 	       dev->name, typhoon_card_info[card_id].name,
2534 	       use_mmio ? "MMIO" : "IO",
2535 	       (unsigned long long)pci_resource_start(pdev, use_mmio),
2536 	       dev->dev_addr);
2537 
2538 	/* xp_resp still contains the response to the READ_VERSIONS command.
2539 	 * For debugging, let the user know what version he has.
2540 	 */
2541 	if(xp_resp[0].numDesc == 0) {
2542 		/* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2543 		 * of version is Month/Day of build.
2544 		 */
2545 		u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2546 		printk(KERN_INFO "%s: Typhoon 1.0 Sleep Image built "
2547 			"%02u/%02u/2000\n", dev->name, monthday >> 8,
2548 			monthday & 0xff);
2549 	} else if(xp_resp[0].numDesc == 2) {
2550 		/* This is the Typhoon 1.1+ type Sleep Image
2551 		 */
2552 		u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2553 		u8 *ver_string = (u8 *) &xp_resp[1];
2554 		ver_string[25] = 0;
2555 		printk(KERN_INFO "%s: Typhoon 1.1+ Sleep Image version "
2556 			"%02x.%03x.%03x %s\n", dev->name, sleep_ver >> 24,
2557 			(sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff,
2558 			ver_string);
2559 	} else {
2560 		printk(KERN_WARNING "%s: Unknown Sleep Image version "
2561 			"(%u:%04x)\n", dev->name, xp_resp[0].numDesc,
2562 			le32_to_cpu(xp_resp[0].parm2));
2563 	}
2564 
2565 	return 0;
2566 
2567 error_out_reset:
2568 	typhoon_reset(ioaddr, NoWait);
2569 
2570 error_out_dma:
2571 	pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2572 			    shared, shared_dma);
2573 error_out_remap:
2574 	pci_iounmap(pdev, ioaddr);
2575 error_out_regions:
2576 	pci_release_regions(pdev);
2577 error_out_mwi:
2578 	pci_clear_mwi(pdev);
2579 error_out_disable:
2580 	pci_disable_device(pdev);
2581 error_out_dev:
2582 	free_netdev(dev);
2583 error_out:
2584 	return err;
2585 }
2586 
2587 static void __devexit
typhoon_remove_one(struct pci_dev * pdev)2588 typhoon_remove_one(struct pci_dev *pdev)
2589 {
2590 	struct net_device *dev = pci_get_drvdata(pdev);
2591 	struct typhoon *tp = netdev_priv(dev);
2592 
2593 	unregister_netdev(dev);
2594 	pci_set_power_state(pdev, PCI_D0);
2595 	pci_restore_state(pdev);
2596 	typhoon_reset(tp->ioaddr, NoWait);
2597 	pci_iounmap(pdev, tp->ioaddr);
2598 	pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2599 			    tp->shared, tp->shared_dma);
2600 	pci_release_regions(pdev);
2601 	pci_clear_mwi(pdev);
2602 	pci_disable_device(pdev);
2603 	pci_set_drvdata(pdev, NULL);
2604 	free_netdev(dev);
2605 }
2606 
2607 static struct pci_driver typhoon_driver = {
2608 	.name		= DRV_MODULE_NAME,
2609 	.id_table	= typhoon_pci_tbl,
2610 	.probe		= typhoon_init_one,
2611 	.remove		= __devexit_p(typhoon_remove_one),
2612 #ifdef CONFIG_PM
2613 	.suspend	= typhoon_suspend,
2614 	.resume		= typhoon_resume,
2615 #endif
2616 };
2617 
2618 static int __init
typhoon_init(void)2619 typhoon_init(void)
2620 {
2621 	return pci_register_driver(&typhoon_driver);
2622 }
2623 
2624 static void __exit
typhoon_cleanup(void)2625 typhoon_cleanup(void)
2626 {
2627 	pci_unregister_driver(&typhoon_driver);
2628 }
2629 
2630 module_init(typhoon_init);
2631 module_exit(typhoon_cleanup);
2632