• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This code is derived from the VIA reference driver (copyright message
3  * below) provided to Red Hat by VIA Networking Technologies, Inc. for
4  * addition to the Linux kernel.
5  *
6  * The code has been merged into one source file, cleaned up to follow
7  * Linux coding style,  ported to the Linux 2.6 kernel tree and cleaned
8  * for 64bit hardware platforms.
9  *
10  * TODO
11  *	rx_copybreak/alignment
12  *	Scatter gather
13  *	More testing
14  *
15  * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
16  * Additional fixes and clean up: Francois Romieu
17  *
18  * This source has not been verified for use in safety critical systems.
19  *
20  * Please direct queries about the revamped driver to the linux-kernel
21  * list not VIA.
22  *
23  * Original code:
24  *
25  * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
26  * All rights reserved.
27  *
28  * This software may be redistributed and/or modified under
29  * the terms of the GNU General Public License as published by the Free
30  * Software Foundation; either version 2 of the License, or
31  * any later version.
32  *
33  * This program is distributed in the hope that it will be useful, but
34  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
35  * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
36  * for more details.
37  *
38  * Author: Chuang Liang-Shing, AJ Jiang
39  *
40  * Date: Jan 24, 2003
41  *
42  * MODULE_LICENSE("GPL");
43  *
44  */
45 
46 
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/init.h>
50 #include <linux/mm.h>
51 #include <linux/errno.h>
52 #include <linux/ioport.h>
53 #include <linux/pci.h>
54 #include <linux/kernel.h>
55 #include <linux/netdevice.h>
56 #include <linux/etherdevice.h>
57 #include <linux/skbuff.h>
58 #include <linux/delay.h>
59 #include <linux/timer.h>
60 #include <linux/slab.h>
61 #include <linux/interrupt.h>
62 #include <linux/string.h>
63 #include <linux/wait.h>
64 #include <asm/io.h>
65 #include <linux/if.h>
66 #include <asm/uaccess.h>
67 #include <linux/proc_fs.h>
68 #include <linux/inetdevice.h>
69 #include <linux/reboot.h>
70 #include <linux/ethtool.h>
71 #include <linux/mii.h>
72 #include <linux/in.h>
73 #include <linux/if_arp.h>
74 #include <linux/if_vlan.h>
75 #include <linux/ip.h>
76 #include <linux/tcp.h>
77 #include <linux/udp.h>
78 #include <linux/crc-ccitt.h>
79 #include <linux/crc32.h>
80 
81 #include "via-velocity.h"
82 
83 
84 static int velocity_nics = 0;
85 static int msglevel = MSG_LEVEL_INFO;
86 
87 /**
88  *	mac_get_cam_mask	-	Read a CAM mask
89  *	@regs: register block for this velocity
90  *	@mask: buffer to store mask
91  *
92  *	Fetch the mask bits of the selected CAM and store them into the
93  *	provided mask buffer.
94  */
95 
mac_get_cam_mask(struct mac_regs __iomem * regs,u8 * mask)96 static void mac_get_cam_mask(struct mac_regs __iomem * regs, u8 * mask)
97 {
98 	int i;
99 
100 	/* Select CAM mask */
101 	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
102 
103 	writeb(0, &regs->CAMADDR);
104 
105 	/* read mask */
106 	for (i = 0; i < 8; i++)
107 		*mask++ = readb(&(regs->MARCAM[i]));
108 
109 	/* disable CAMEN */
110 	writeb(0, &regs->CAMADDR);
111 
112 	/* Select mar */
113 	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
114 
115 }
116 
117 
118 /**
119  *	mac_set_cam_mask	-	Set a CAM mask
120  *	@regs: register block for this velocity
121  *	@mask: CAM mask to load
122  *
123  *	Store a new mask into a CAM
124  */
125 
mac_set_cam_mask(struct mac_regs __iomem * regs,u8 * mask)126 static void mac_set_cam_mask(struct mac_regs __iomem * regs, u8 * mask)
127 {
128 	int i;
129 	/* Select CAM mask */
130 	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
131 
132 	writeb(CAMADDR_CAMEN, &regs->CAMADDR);
133 
134 	for (i = 0; i < 8; i++) {
135 		writeb(*mask++, &(regs->MARCAM[i]));
136 	}
137 	/* disable CAMEN */
138 	writeb(0, &regs->CAMADDR);
139 
140 	/* Select mar */
141 	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
142 }
143 
mac_set_vlan_cam_mask(struct mac_regs __iomem * regs,u8 * mask)144 static void mac_set_vlan_cam_mask(struct mac_regs __iomem * regs, u8 * mask)
145 {
146 	int i;
147 	/* Select CAM mask */
148 	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
149 
150 	writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
151 
152 	for (i = 0; i < 8; i++) {
153 		writeb(*mask++, &(regs->MARCAM[i]));
154 	}
155 	/* disable CAMEN */
156 	writeb(0, &regs->CAMADDR);
157 
158 	/* Select mar */
159 	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
160 }
161 
162 /**
163  *	mac_set_cam	-	set CAM data
164  *	@regs: register block of this velocity
165  *	@idx: Cam index
166  *	@addr: 2 or 6 bytes of CAM data
167  *
168  *	Load an address or vlan tag into a CAM
169  */
170 
mac_set_cam(struct mac_regs __iomem * regs,int idx,const u8 * addr)171 static void mac_set_cam(struct mac_regs __iomem * regs, int idx, const u8 *addr)
172 {
173 	int i;
174 
175 	/* Select CAM mask */
176 	BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
177 
178 	idx &= (64 - 1);
179 
180 	writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
181 
182 	for (i = 0; i < 6; i++) {
183 		writeb(*addr++, &(regs->MARCAM[i]));
184 	}
185 	BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
186 
187 	udelay(10);
188 
189 	writeb(0, &regs->CAMADDR);
190 
191 	/* Select mar */
192 	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
193 }
194 
mac_set_vlan_cam(struct mac_regs __iomem * regs,int idx,const u8 * addr)195 static void mac_set_vlan_cam(struct mac_regs __iomem * regs, int idx,
196 			     const u8 *addr)
197 {
198 
199 	/* Select CAM mask */
200 	BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
201 
202 	idx &= (64 - 1);
203 
204 	writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
205 	writew(*((u16 *) addr), &regs->MARCAM[0]);
206 
207 	BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
208 
209 	udelay(10);
210 
211 	writeb(0, &regs->CAMADDR);
212 
213 	/* Select mar */
214 	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
215 }
216 
217 
218 /**
219  *	mac_wol_reset	-	reset WOL after exiting low power
220  *	@regs: register block of this velocity
221  *
222  *	Called after we drop out of wake on lan mode in order to
223  *	reset the Wake on lan features. This function doesn't restore
224  *	the rest of the logic from the result of sleep/wakeup
225  */
226 
mac_wol_reset(struct mac_regs __iomem * regs)227 static void mac_wol_reset(struct mac_regs __iomem * regs)
228 {
229 
230 	/* Turn off SWPTAG right after leaving power mode */
231 	BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
232 	/* clear sticky bits */
233 	BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
234 
235 	BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
236 	BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
237 	/* disable force PME-enable */
238 	writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
239 	/* disable power-event config bit */
240 	writew(0xFFFF, &regs->WOLCRClr);
241 	/* clear power status */
242 	writew(0xFFFF, &regs->WOLSRClr);
243 }
244 
245 static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
246 static const struct ethtool_ops velocity_ethtool_ops;
247 
248 /*
249     Define module options
250 */
251 
252 MODULE_AUTHOR("VIA Networking Technologies, Inc.");
253 MODULE_LICENSE("GPL");
254 MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
255 
256 #define VELOCITY_PARAM(N,D) \
257         static int N[MAX_UNITS]=OPTION_DEFAULT;\
258 	module_param_array(N, int, NULL, 0); \
259         MODULE_PARM_DESC(N, D);
260 
261 #define RX_DESC_MIN     64
262 #define RX_DESC_MAX     255
263 #define RX_DESC_DEF     64
264 VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
265 
266 #define TX_DESC_MIN     16
267 #define TX_DESC_MAX     256
268 #define TX_DESC_DEF     64
269 VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
270 
271 #define RX_THRESH_MIN   0
272 #define RX_THRESH_MAX   3
273 #define RX_THRESH_DEF   0
274 /* rx_thresh[] is used for controlling the receive fifo threshold.
275    0: indicate the rxfifo threshold is 128 bytes.
276    1: indicate the rxfifo threshold is 512 bytes.
277    2: indicate the rxfifo threshold is 1024 bytes.
278    3: indicate the rxfifo threshold is store & forward.
279 */
280 VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
281 
282 #define DMA_LENGTH_MIN  0
283 #define DMA_LENGTH_MAX  7
284 #define DMA_LENGTH_DEF  0
285 
286 /* DMA_length[] is used for controlling the DMA length
287    0: 8 DWORDs
288    1: 16 DWORDs
289    2: 32 DWORDs
290    3: 64 DWORDs
291    4: 128 DWORDs
292    5: 256 DWORDs
293    6: SF(flush till emply)
294    7: SF(flush till emply)
295 */
296 VELOCITY_PARAM(DMA_length, "DMA length");
297 
298 #define IP_ALIG_DEF     0
299 /* IP_byte_align[] is used for IP header DWORD byte aligned
300    0: indicate the IP header won't be DWORD byte aligned.(Default) .
301    1: indicate the IP header will be DWORD byte aligned.
302       In some enviroment, the IP header should be DWORD byte aligned,
303       or the packet will be droped when we receive it. (eg: IPVS)
304 */
305 VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
306 
307 #define TX_CSUM_DEF     1
308 /* txcsum_offload[] is used for setting the checksum offload ability of NIC.
309    (We only support RX checksum offload now)
310    0: disable csum_offload[checksum offload
311    1: enable checksum offload. (Default)
312 */
313 VELOCITY_PARAM(txcsum_offload, "Enable transmit packet checksum offload");
314 
315 #define FLOW_CNTL_DEF   1
316 #define FLOW_CNTL_MIN   1
317 #define FLOW_CNTL_MAX   5
318 
319 /* flow_control[] is used for setting the flow control ability of NIC.
320    1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
321    2: enable TX flow control.
322    3: enable RX flow control.
323    4: enable RX/TX flow control.
324    5: disable
325 */
326 VELOCITY_PARAM(flow_control, "Enable flow control ability");
327 
328 #define MED_LNK_DEF 0
329 #define MED_LNK_MIN 0
330 #define MED_LNK_MAX 4
331 /* speed_duplex[] is used for setting the speed and duplex mode of NIC.
332    0: indicate autonegotiation for both speed and duplex mode
333    1: indicate 100Mbps half duplex mode
334    2: indicate 100Mbps full duplex mode
335    3: indicate 10Mbps half duplex mode
336    4: indicate 10Mbps full duplex mode
337 
338    Note:
339         if EEPROM have been set to the force mode, this option is ignored
340             by driver.
341 */
342 VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
343 
344 #define VAL_PKT_LEN_DEF     0
345 /* ValPktLen[] is used for setting the checksum offload ability of NIC.
346    0: Receive frame with invalid layer 2 length (Default)
347    1: Drop frame with invalid layer 2 length
348 */
349 VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
350 
351 #define WOL_OPT_DEF     0
352 #define WOL_OPT_MIN     0
353 #define WOL_OPT_MAX     7
354 /* wol_opts[] is used for controlling wake on lan behavior.
355    0: Wake up if recevied a magic packet. (Default)
356    1: Wake up if link status is on/off.
357    2: Wake up if recevied an arp packet.
358    4: Wake up if recevied any unicast packet.
359    Those value can be sumed up to support more than one option.
360 */
361 VELOCITY_PARAM(wol_opts, "Wake On Lan options");
362 
363 #define INT_WORKS_DEF   20
364 #define INT_WORKS_MIN   10
365 #define INT_WORKS_MAX   64
366 
367 VELOCITY_PARAM(int_works, "Number of packets per interrupt services");
368 
369 static int rx_copybreak = 200;
370 module_param(rx_copybreak, int, 0644);
371 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
372 
373 static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr,
374 			       const struct velocity_info_tbl *info);
375 static int velocity_get_pci_info(struct velocity_info *, struct pci_dev *pdev);
376 static void velocity_print_info(struct velocity_info *vptr);
377 static int velocity_open(struct net_device *dev);
378 static int velocity_change_mtu(struct net_device *dev, int mtu);
379 static int velocity_xmit(struct sk_buff *skb, struct net_device *dev);
380 static int velocity_intr(int irq, void *dev_instance);
381 static void velocity_set_multi(struct net_device *dev);
382 static struct net_device_stats *velocity_get_stats(struct net_device *dev);
383 static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
384 static int velocity_close(struct net_device *dev);
385 static int velocity_receive_frame(struct velocity_info *, int idx);
386 static int velocity_alloc_rx_buf(struct velocity_info *, int idx);
387 static void velocity_free_rd_ring(struct velocity_info *vptr);
388 static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *);
389 static int velocity_soft_reset(struct velocity_info *vptr);
390 static void mii_init(struct velocity_info *vptr, u32 mii_status);
391 static u32 velocity_get_link(struct net_device *dev);
392 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr);
393 static void velocity_print_link_status(struct velocity_info *vptr);
394 static void safe_disable_mii_autopoll(struct mac_regs __iomem * regs);
395 static void velocity_shutdown(struct velocity_info *vptr);
396 static void enable_flow_control_ability(struct velocity_info *vptr);
397 static void enable_mii_autopoll(struct mac_regs __iomem * regs);
398 static int velocity_mii_read(struct mac_regs __iomem *, u8 byIdx, u16 * pdata);
399 static int velocity_mii_write(struct mac_regs __iomem *, u8 byMiiAddr, u16 data);
400 static u32 mii_check_media_mode(struct mac_regs __iomem * regs);
401 static u32 check_connection_type(struct mac_regs __iomem * regs);
402 static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status);
403 
404 #ifdef CONFIG_PM
405 
406 static int velocity_suspend(struct pci_dev *pdev, pm_message_t state);
407 static int velocity_resume(struct pci_dev *pdev);
408 
409 static DEFINE_SPINLOCK(velocity_dev_list_lock);
410 static LIST_HEAD(velocity_dev_list);
411 
412 #endif
413 
414 #if defined(CONFIG_PM) && defined(CONFIG_INET)
415 
416 static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr);
417 
418 static struct notifier_block velocity_inetaddr_notifier = {
419       .notifier_call	= velocity_netdev_event,
420 };
421 
velocity_register_notifier(void)422 static void velocity_register_notifier(void)
423 {
424 	register_inetaddr_notifier(&velocity_inetaddr_notifier);
425 }
426 
velocity_unregister_notifier(void)427 static void velocity_unregister_notifier(void)
428 {
429 	unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
430 }
431 
432 #else
433 
434 #define velocity_register_notifier()	do {} while (0)
435 #define velocity_unregister_notifier()	do {} while (0)
436 
437 #endif
438 
439 /*
440  *	Internal board variants. At the moment we have only one
441  */
442 
443 static struct velocity_info_tbl chip_info_table[] = {
444 	{CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
445 	{ }
446 };
447 
448 /*
449  *	Describe the PCI device identifiers that we support in this
450  *	device driver. Used for hotplug autoloading.
451  */
452 
453 static const struct pci_device_id velocity_id_table[] __devinitdata = {
454 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
455 	{ }
456 };
457 
458 MODULE_DEVICE_TABLE(pci, velocity_id_table);
459 
460 /**
461  *	get_chip_name	- 	identifier to name
462  *	@id: chip identifier
463  *
464  *	Given a chip identifier return a suitable description. Returns
465  *	a pointer a static string valid while the driver is loaded.
466  */
467 
get_chip_name(enum chip_type chip_id)468 static const char __devinit *get_chip_name(enum chip_type chip_id)
469 {
470 	int i;
471 	for (i = 0; chip_info_table[i].name != NULL; i++)
472 		if (chip_info_table[i].chip_id == chip_id)
473 			break;
474 	return chip_info_table[i].name;
475 }
476 
477 /**
478  *	velocity_remove1	-	device unplug
479  *	@pdev: PCI device being removed
480  *
481  *	Device unload callback. Called on an unplug or on module
482  *	unload for each active device that is present. Disconnects
483  *	the device from the network layer and frees all the resources
484  */
485 
velocity_remove1(struct pci_dev * pdev)486 static void __devexit velocity_remove1(struct pci_dev *pdev)
487 {
488 	struct net_device *dev = pci_get_drvdata(pdev);
489 	struct velocity_info *vptr = netdev_priv(dev);
490 
491 #ifdef CONFIG_PM
492 	unsigned long flags;
493 
494 	spin_lock_irqsave(&velocity_dev_list_lock, flags);
495 	if (!list_empty(&velocity_dev_list))
496 		list_del(&vptr->list);
497 	spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
498 #endif
499 	unregister_netdev(dev);
500 	iounmap(vptr->mac_regs);
501 	pci_release_regions(pdev);
502 	pci_disable_device(pdev);
503 	pci_set_drvdata(pdev, NULL);
504 	free_netdev(dev);
505 
506 	velocity_nics--;
507 }
508 
509 /**
510  *	velocity_set_int_opt	-	parser for integer options
511  *	@opt: pointer to option value
512  *	@val: value the user requested (or -1 for default)
513  *	@min: lowest value allowed
514  *	@max: highest value allowed
515  *	@def: default value
516  *	@name: property name
517  *	@dev: device name
518  *
519  *	Set an integer property in the module options. This function does
520  *	all the verification and checking as well as reporting so that
521  *	we don't duplicate code for each option.
522  */
523 
velocity_set_int_opt(int * opt,int val,int min,int max,int def,char * name,const char * devname)524 static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, const char *devname)
525 {
526 	if (val == -1)
527 		*opt = def;
528 	else if (val < min || val > max) {
529 		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
530 					devname, name, min, max);
531 		*opt = def;
532 	} else {
533 		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n",
534 					devname, name, val);
535 		*opt = val;
536 	}
537 }
538 
539 /**
540  *	velocity_set_bool_opt	-	parser for boolean options
541  *	@opt: pointer to option value
542  *	@val: value the user requested (or -1 for default)
543  *	@def: default value (yes/no)
544  *	@flag: numeric value to set for true.
545  *	@name: property name
546  *	@dev: device name
547  *
548  *	Set a boolean property in the module options. This function does
549  *	all the verification and checking as well as reporting so that
550  *	we don't duplicate code for each option.
551  */
552 
velocity_set_bool_opt(u32 * opt,int val,int def,u32 flag,char * name,const char * devname)553 static void __devinit velocity_set_bool_opt(u32 * opt, int val, int def, u32 flag, char *name, const char *devname)
554 {
555 	(*opt) &= (~flag);
556 	if (val == -1)
557 		*opt |= (def ? flag : 0);
558 	else if (val < 0 || val > 1) {
559 		printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
560 			devname, name);
561 		*opt |= (def ? flag : 0);
562 	} else {
563 		printk(KERN_INFO "%s: set parameter %s to %s\n",
564 			devname, name, val ? "TRUE" : "FALSE");
565 		*opt |= (val ? flag : 0);
566 	}
567 }
568 
569 /**
570  *	velocity_get_options	-	set options on device
571  *	@opts: option structure for the device
572  *	@index: index of option to use in module options array
573  *	@devname: device name
574  *
575  *	Turn the module and command options into a single structure
576  *	for the current device
577  */
578 
velocity_get_options(struct velocity_opt * opts,int index,const char * devname)579 static void __devinit velocity_get_options(struct velocity_opt *opts, int index, const char *devname)
580 {
581 
582 	velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
583 	velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname);
584 	velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
585 	velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
586 
587 	velocity_set_bool_opt(&opts->flags, txcsum_offload[index], TX_CSUM_DEF, VELOCITY_FLAGS_TX_CSUM, "txcsum_offload", devname);
588 	velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
589 	velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
590 	velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
591 	velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
592 	velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
593 	velocity_set_int_opt((int *) &opts->int_works, int_works[index], INT_WORKS_MIN, INT_WORKS_MAX, INT_WORKS_DEF, "Interrupt service works", devname);
594 	opts->numrx = (opts->numrx & ~3);
595 }
596 
597 /**
598  *	velocity_init_cam_filter	-	initialise CAM
599  *	@vptr: velocity to program
600  *
601  *	Initialize the content addressable memory used for filters. Load
602  *	appropriately according to the presence of VLAN
603  */
604 
velocity_init_cam_filter(struct velocity_info * vptr)605 static void velocity_init_cam_filter(struct velocity_info *vptr)
606 {
607 	struct mac_regs __iomem * regs = vptr->mac_regs;
608 
609 	/* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
610 	WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
611 	WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
612 
613 	/* Disable all CAMs */
614 	memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
615 	memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
616 	mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
617 	mac_set_cam_mask(regs, vptr->mCAMmask);
618 
619 	/* Enable VCAMs */
620 	if (vptr->vlgrp) {
621 		unsigned int vid, i = 0;
622 
623 		if (!vlan_group_get_device(vptr->vlgrp, 0))
624 			WORD_REG_BITS_ON(MCFG_RTGOPT, &regs->MCFG);
625 
626 		for (vid = 1; (vid < VLAN_VID_MASK); vid++) {
627 			if (vlan_group_get_device(vptr->vlgrp, vid)) {
628 				mac_set_vlan_cam(regs, i, (u8 *) &vid);
629 				vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
630 				if (++i >= VCAM_SIZE)
631 					break;
632 			}
633 		}
634 		mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
635 	}
636 }
637 
velocity_vlan_rx_register(struct net_device * dev,struct vlan_group * grp)638 static void velocity_vlan_rx_register(struct net_device *dev,
639 				      struct vlan_group *grp)
640 {
641 	struct velocity_info *vptr = netdev_priv(dev);
642 
643 	vptr->vlgrp = grp;
644 }
645 
velocity_vlan_rx_add_vid(struct net_device * dev,unsigned short vid)646 static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
647 {
648 	struct velocity_info *vptr = netdev_priv(dev);
649 
650         spin_lock_irq(&vptr->lock);
651 	velocity_init_cam_filter(vptr);
652         spin_unlock_irq(&vptr->lock);
653 }
654 
velocity_vlan_rx_kill_vid(struct net_device * dev,unsigned short vid)655 static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
656 {
657 	struct velocity_info *vptr = netdev_priv(dev);
658 
659         spin_lock_irq(&vptr->lock);
660 	vlan_group_set_device(vptr->vlgrp, vid, NULL);
661 	velocity_init_cam_filter(vptr);
662         spin_unlock_irq(&vptr->lock);
663 }
664 
velocity_init_rx_ring_indexes(struct velocity_info * vptr)665 static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
666 {
667 	vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
668 }
669 
670 /**
671  *	velocity_rx_reset	-	handle a receive reset
672  *	@vptr: velocity we are resetting
673  *
674  *	Reset the ownership and status for the receive ring side.
675  *	Hand all the receive queue to the NIC.
676  */
677 
velocity_rx_reset(struct velocity_info * vptr)678 static void velocity_rx_reset(struct velocity_info *vptr)
679 {
680 
681 	struct mac_regs __iomem * regs = vptr->mac_regs;
682 	int i;
683 
684 	velocity_init_rx_ring_indexes(vptr);
685 
686 	/*
687 	 *	Init state, all RD entries belong to the NIC
688 	 */
689 	for (i = 0; i < vptr->options.numrx; ++i)
690 		vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
691 
692 	writew(vptr->options.numrx, &regs->RBRDU);
693 	writel(vptr->rx.pool_dma, &regs->RDBaseLo);
694 	writew(0, &regs->RDIdx);
695 	writew(vptr->options.numrx - 1, &regs->RDCSize);
696 }
697 
698 /**
699  *	velocity_init_registers	-	initialise MAC registers
700  *	@vptr: velocity to init
701  *	@type: type of initialisation (hot or cold)
702  *
703  *	Initialise the MAC on a reset or on first set up on the
704  *	hardware.
705  */
706 
velocity_init_registers(struct velocity_info * vptr,enum velocity_init_type type)707 static void velocity_init_registers(struct velocity_info *vptr,
708 				    enum velocity_init_type type)
709 {
710 	struct mac_regs __iomem * regs = vptr->mac_regs;
711 	int i, mii_status;
712 
713 	mac_wol_reset(regs);
714 
715 	switch (type) {
716 	case VELOCITY_INIT_RESET:
717 	case VELOCITY_INIT_WOL:
718 
719 		netif_stop_queue(vptr->dev);
720 
721 		/*
722 		 *	Reset RX to prevent RX pointer not on the 4X location
723 		 */
724 		velocity_rx_reset(vptr);
725 		mac_rx_queue_run(regs);
726 		mac_rx_queue_wake(regs);
727 
728 		mii_status = velocity_get_opt_media_mode(vptr);
729 		if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
730 			velocity_print_link_status(vptr);
731 			if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
732 				netif_wake_queue(vptr->dev);
733 		}
734 
735 		enable_flow_control_ability(vptr);
736 
737 		mac_clear_isr(regs);
738 		writel(CR0_STOP, &regs->CR0Clr);
739 		writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
740 							&regs->CR0Set);
741 
742 		break;
743 
744 	case VELOCITY_INIT_COLD:
745 	default:
746 		/*
747 		 *	Do reset
748 		 */
749 		velocity_soft_reset(vptr);
750 		mdelay(5);
751 
752 		mac_eeprom_reload(regs);
753 		for (i = 0; i < 6; i++) {
754 			writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
755 		}
756 		/*
757 		 *	clear Pre_ACPI bit.
758 		 */
759 		BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
760 		mac_set_rx_thresh(regs, vptr->options.rx_thresh);
761 		mac_set_dma_length(regs, vptr->options.DMA_length);
762 
763 		writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
764 		/*
765 		 *	Back off algorithm use original IEEE standard
766 		 */
767 		BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
768 
769 		/*
770 		 *	Init CAM filter
771 		 */
772 		velocity_init_cam_filter(vptr);
773 
774 		/*
775 		 *	Set packet filter: Receive directed and broadcast address
776 		 */
777 		velocity_set_multi(vptr->dev);
778 
779 		/*
780 		 *	Enable MII auto-polling
781 		 */
782 		enable_mii_autopoll(regs);
783 
784 		vptr->int_mask = INT_MASK_DEF;
785 
786 		writel(vptr->rx.pool_dma, &regs->RDBaseLo);
787 		writew(vptr->options.numrx - 1, &regs->RDCSize);
788 		mac_rx_queue_run(regs);
789 		mac_rx_queue_wake(regs);
790 
791 		writew(vptr->options.numtx - 1, &regs->TDCSize);
792 
793 		for (i = 0; i < vptr->tx.numq; i++) {
794 			writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
795 			mac_tx_queue_run(regs, i);
796 		}
797 
798 		init_flow_control_register(vptr);
799 
800 		writel(CR0_STOP, &regs->CR0Clr);
801 		writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
802 
803 		mii_status = velocity_get_opt_media_mode(vptr);
804 		netif_stop_queue(vptr->dev);
805 
806 		mii_init(vptr, mii_status);
807 
808 		if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
809 			velocity_print_link_status(vptr);
810 			if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
811 				netif_wake_queue(vptr->dev);
812 		}
813 
814 		enable_flow_control_ability(vptr);
815 		mac_hw_mibs_init(regs);
816 		mac_write_int_mask(vptr->int_mask, regs);
817 		mac_clear_isr(regs);
818 
819 	}
820 }
821 
822 /**
823  *	velocity_soft_reset	-	soft reset
824  *	@vptr: velocity to reset
825  *
826  *	Kick off a soft reset of the velocity adapter and then poll
827  *	until the reset sequence has completed before returning.
828  */
829 
velocity_soft_reset(struct velocity_info * vptr)830 static int velocity_soft_reset(struct velocity_info *vptr)
831 {
832 	struct mac_regs __iomem * regs = vptr->mac_regs;
833 	int i = 0;
834 
835 	writel(CR0_SFRST, &regs->CR0Set);
836 
837 	for (i = 0; i < W_MAX_TIMEOUT; i++) {
838 		udelay(5);
839 		if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
840 			break;
841 	}
842 
843 	if (i == W_MAX_TIMEOUT) {
844 		writel(CR0_FORSRST, &regs->CR0Set);
845 		/* FIXME: PCI POSTING */
846 		/* delay 2ms */
847 		mdelay(2);
848 	}
849 	return 0;
850 }
851 
852 static const struct net_device_ops velocity_netdev_ops = {
853 	.ndo_open		= velocity_open,
854 	.ndo_stop		= velocity_close,
855 	.ndo_start_xmit		= velocity_xmit,
856 	.ndo_get_stats		= velocity_get_stats,
857 	.ndo_validate_addr	= eth_validate_addr,
858 	.ndo_set_mac_address 	= eth_mac_addr,
859 	.ndo_set_multicast_list	= velocity_set_multi,
860 	.ndo_change_mtu		= velocity_change_mtu,
861 	.ndo_do_ioctl		= velocity_ioctl,
862 	.ndo_vlan_rx_add_vid	= velocity_vlan_rx_add_vid,
863 	.ndo_vlan_rx_kill_vid	= velocity_vlan_rx_kill_vid,
864 	.ndo_vlan_rx_register	= velocity_vlan_rx_register,
865 };
866 
867 /**
868  *	velocity_found1		-	set up discovered velocity card
869  *	@pdev: PCI device
870  *	@ent: PCI device table entry that matched
871  *
872  *	Configure a discovered adapter from scratch. Return a negative
873  *	errno error code on failure paths.
874  */
875 
velocity_found1(struct pci_dev * pdev,const struct pci_device_id * ent)876 static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent)
877 {
878 	static int first = 1;
879 	struct net_device *dev;
880 	int i;
881 	const char *drv_string;
882 	const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
883 	struct velocity_info *vptr;
884 	struct mac_regs __iomem * regs;
885 	int ret = -ENOMEM;
886 
887 	/* FIXME: this driver, like almost all other ethernet drivers,
888 	 * can support more than MAX_UNITS.
889 	 */
890 	if (velocity_nics >= MAX_UNITS) {
891 		dev_notice(&pdev->dev, "already found %d NICs.\n",
892 			   velocity_nics);
893 		return -ENODEV;
894 	}
895 
896 	dev = alloc_etherdev(sizeof(struct velocity_info));
897 	if (!dev) {
898 		dev_err(&pdev->dev, "allocate net device failed.\n");
899 		goto out;
900 	}
901 
902 	/* Chain it all together */
903 
904 	SET_NETDEV_DEV(dev, &pdev->dev);
905 	vptr = netdev_priv(dev);
906 
907 
908 	if (first) {
909 		printk(KERN_INFO "%s Ver. %s\n",
910 			VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
911 		printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
912 		printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
913 		first = 0;
914 	}
915 
916 	velocity_init_info(pdev, vptr, info);
917 
918 	vptr->dev = dev;
919 
920 	dev->irq = pdev->irq;
921 
922 	ret = pci_enable_device(pdev);
923 	if (ret < 0)
924 		goto err_free_dev;
925 
926 	ret = velocity_get_pci_info(vptr, pdev);
927 	if (ret < 0) {
928 		/* error message already printed */
929 		goto err_disable;
930 	}
931 
932 	ret = pci_request_regions(pdev, VELOCITY_NAME);
933 	if (ret < 0) {
934 		dev_err(&pdev->dev, "No PCI resources.\n");
935 		goto err_disable;
936 	}
937 
938 	regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
939 	if (regs == NULL) {
940 		ret = -EIO;
941 		goto err_release_res;
942 	}
943 
944 	vptr->mac_regs = regs;
945 
946 	mac_wol_reset(regs);
947 
948 	dev->base_addr = vptr->ioaddr;
949 
950 	for (i = 0; i < 6; i++)
951 		dev->dev_addr[i] = readb(&regs->PAR[i]);
952 
953 
954 	drv_string = dev_driver_string(&pdev->dev);
955 
956 	velocity_get_options(&vptr->options, velocity_nics, drv_string);
957 
958 	/*
959 	 *	Mask out the options cannot be set to the chip
960 	 */
961 
962 	vptr->options.flags &= info->flags;
963 
964 	/*
965 	 *	Enable the chip specified capbilities
966 	 */
967 
968 	vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
969 
970 	vptr->wol_opts = vptr->options.wol_opts;
971 	vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
972 
973 	vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
974 
975 	dev->irq = pdev->irq;
976 	dev->netdev_ops = &velocity_netdev_ops;
977 	dev->ethtool_ops = &velocity_ethtool_ops;
978 
979 #ifdef  VELOCITY_ZERO_COPY_SUPPORT
980 	dev->features |= NETIF_F_SG;
981 #endif
982 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
983 		NETIF_F_HW_VLAN_RX;
984 
985 	if (vptr->flags & VELOCITY_FLAGS_TX_CSUM)
986 		dev->features |= NETIF_F_IP_CSUM;
987 
988 	ret = register_netdev(dev);
989 	if (ret < 0)
990 		goto err_iounmap;
991 
992 	if (velocity_get_link(dev))
993 		netif_carrier_off(dev);
994 
995 	velocity_print_info(vptr);
996 	pci_set_drvdata(pdev, dev);
997 
998 	/* and leave the chip powered down */
999 
1000 	pci_set_power_state(pdev, PCI_D3hot);
1001 #ifdef CONFIG_PM
1002 	{
1003 		unsigned long flags;
1004 
1005 		spin_lock_irqsave(&velocity_dev_list_lock, flags);
1006 		list_add(&vptr->list, &velocity_dev_list);
1007 		spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
1008 	}
1009 #endif
1010 	velocity_nics++;
1011 out:
1012 	return ret;
1013 
1014 err_iounmap:
1015 	iounmap(regs);
1016 err_release_res:
1017 	pci_release_regions(pdev);
1018 err_disable:
1019 	pci_disable_device(pdev);
1020 err_free_dev:
1021 	free_netdev(dev);
1022 	goto out;
1023 }
1024 
1025 /**
1026  *	velocity_print_info	-	per driver data
1027  *	@vptr: velocity
1028  *
1029  *	Print per driver data as the kernel driver finds Velocity
1030  *	hardware
1031  */
1032 
velocity_print_info(struct velocity_info * vptr)1033 static void __devinit velocity_print_info(struct velocity_info *vptr)
1034 {
1035 	struct net_device *dev = vptr->dev;
1036 
1037 	printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
1038 	printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
1039 		dev->name,
1040 		dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
1041 		dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
1042 }
1043 
1044 /**
1045  *	velocity_init_info	-	init private data
1046  *	@pdev: PCI device
1047  *	@vptr: Velocity info
1048  *	@info: Board type
1049  *
1050  *	Set up the initial velocity_info struct for the device that has been
1051  *	discovered.
1052  */
1053 
velocity_init_info(struct pci_dev * pdev,struct velocity_info * vptr,const struct velocity_info_tbl * info)1054 static void __devinit velocity_init_info(struct pci_dev *pdev,
1055 					 struct velocity_info *vptr,
1056 					 const struct velocity_info_tbl *info)
1057 {
1058 	memset(vptr, 0, sizeof(struct velocity_info));
1059 
1060 	vptr->pdev = pdev;
1061 	vptr->chip_id = info->chip_id;
1062 	vptr->tx.numq = info->txqueue;
1063 	vptr->multicast_limit = MCAM_SIZE;
1064 	spin_lock_init(&vptr->lock);
1065 	INIT_LIST_HEAD(&vptr->list);
1066 }
1067 
1068 /**
1069  *	velocity_get_pci_info	-	retrieve PCI info for device
1070  *	@vptr: velocity device
1071  *	@pdev: PCI device it matches
1072  *
1073  *	Retrieve the PCI configuration space data that interests us from
1074  *	the kernel PCI layer
1075  */
1076 
velocity_get_pci_info(struct velocity_info * vptr,struct pci_dev * pdev)1077 static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
1078 {
1079 	vptr->rev_id = pdev->revision;
1080 
1081 	pci_set_master(pdev);
1082 
1083 	vptr->ioaddr = pci_resource_start(pdev, 0);
1084 	vptr->memaddr = pci_resource_start(pdev, 1);
1085 
1086 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
1087 		dev_err(&pdev->dev,
1088 			   "region #0 is not an I/O resource, aborting.\n");
1089 		return -EINVAL;
1090 	}
1091 
1092 	if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
1093 		dev_err(&pdev->dev,
1094 			   "region #1 is an I/O resource, aborting.\n");
1095 		return -EINVAL;
1096 	}
1097 
1098 	if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
1099 		dev_err(&pdev->dev, "region #1 is too small.\n");
1100 		return -EINVAL;
1101 	}
1102 	vptr->pdev = pdev;
1103 
1104 	return 0;
1105 }
1106 
1107 /**
1108  *	velocity_init_dma_rings	-	set up DMA rings
1109  *	@vptr: Velocity to set up
1110  *
1111  *	Allocate PCI mapped DMA rings for the receive and transmit layer
1112  *	to use.
1113  */
1114 
velocity_init_dma_rings(struct velocity_info * vptr)1115 static int velocity_init_dma_rings(struct velocity_info *vptr)
1116 {
1117 	struct velocity_opt *opt = &vptr->options;
1118 	const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1119 	const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1120 	struct pci_dev *pdev = vptr->pdev;
1121 	dma_addr_t pool_dma;
1122 	void *pool;
1123 	unsigned int i;
1124 
1125 	/*
1126 	 * Allocate all RD/TD rings a single pool.
1127 	 *
1128 	 * pci_alloc_consistent() fulfills the requirement for 64 bytes
1129 	 * alignment
1130 	 */
1131 	pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
1132 				    rx_ring_size, &pool_dma);
1133 	if (!pool) {
1134 		dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
1135 			vptr->dev->name);
1136 		return -ENOMEM;
1137 	}
1138 
1139 	vptr->rx.ring = pool;
1140 	vptr->rx.pool_dma = pool_dma;
1141 
1142 	pool += rx_ring_size;
1143 	pool_dma += rx_ring_size;
1144 
1145 	for (i = 0; i < vptr->tx.numq; i++) {
1146 		vptr->tx.rings[i] = pool;
1147 		vptr->tx.pool_dma[i] = pool_dma;
1148 		pool += tx_ring_size;
1149 		pool_dma += tx_ring_size;
1150 	}
1151 
1152 	return 0;
1153 }
1154 
1155 /**
1156  *	velocity_free_dma_rings	-	free PCI ring pointers
1157  *	@vptr: Velocity to free from
1158  *
1159  *	Clean up the PCI ring buffers allocated to this velocity.
1160  */
1161 
velocity_free_dma_rings(struct velocity_info * vptr)1162 static void velocity_free_dma_rings(struct velocity_info *vptr)
1163 {
1164 	const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1165 		vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1166 
1167 	pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
1168 }
1169 
velocity_give_many_rx_descs(struct velocity_info * vptr)1170 static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1171 {
1172 	struct mac_regs __iomem *regs = vptr->mac_regs;
1173 	int avail, dirty, unusable;
1174 
1175 	/*
1176 	 * RD number must be equal to 4X per hardware spec
1177 	 * (programming guide rev 1.20, p.13)
1178 	 */
1179 	if (vptr->rx.filled < 4)
1180 		return;
1181 
1182 	wmb();
1183 
1184 	unusable = vptr->rx.filled & 0x0003;
1185 	dirty = vptr->rx.dirty - unusable;
1186 	for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1187 		dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1188 		vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1189 	}
1190 
1191 	writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1192 	vptr->rx.filled = unusable;
1193 }
1194 
velocity_rx_refill(struct velocity_info * vptr)1195 static int velocity_rx_refill(struct velocity_info *vptr)
1196 {
1197 	int dirty = vptr->rx.dirty, done = 0;
1198 
1199 	do {
1200 		struct rx_desc *rd = vptr->rx.ring + dirty;
1201 
1202 		/* Fine for an all zero Rx desc at init time as well */
1203 		if (rd->rdesc0.len & OWNED_BY_NIC)
1204 			break;
1205 
1206 		if (!vptr->rx.info[dirty].skb) {
1207 			if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1208 				break;
1209 		}
1210 		done++;
1211 		dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1212 	} while (dirty != vptr->rx.curr);
1213 
1214 	if (done) {
1215 		vptr->rx.dirty = dirty;
1216 		vptr->rx.filled += done;
1217 	}
1218 
1219 	return done;
1220 }
1221 
velocity_set_rxbufsize(struct velocity_info * vptr,int mtu)1222 static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1223 {
1224 	vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1225 }
1226 
1227 /**
1228  *	velocity_init_rd_ring	-	set up receive ring
1229  *	@vptr: velocity to configure
1230  *
1231  *	Allocate and set up the receive buffers for each ring slot and
1232  *	assign them to the network adapter.
1233  */
1234 
velocity_init_rd_ring(struct velocity_info * vptr)1235 static int velocity_init_rd_ring(struct velocity_info *vptr)
1236 {
1237 	int ret = -ENOMEM;
1238 
1239 	vptr->rx.info = kcalloc(vptr->options.numrx,
1240 				sizeof(struct velocity_rd_info), GFP_KERNEL);
1241 	if (!vptr->rx.info)
1242 		goto out;
1243 
1244 	velocity_init_rx_ring_indexes(vptr);
1245 
1246 	if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1247 		VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1248 			"%s: failed to allocate RX buffer.\n", vptr->dev->name);
1249 		velocity_free_rd_ring(vptr);
1250 		goto out;
1251 	}
1252 
1253 	ret = 0;
1254 out:
1255 	return ret;
1256 }
1257 
1258 /**
1259  *	velocity_free_rd_ring	-	free receive ring
1260  *	@vptr: velocity to clean up
1261  *
1262  *	Free the receive buffers for each ring slot and any
1263  *	attached socket buffers that need to go away.
1264  */
1265 
velocity_free_rd_ring(struct velocity_info * vptr)1266 static void velocity_free_rd_ring(struct velocity_info *vptr)
1267 {
1268 	int i;
1269 
1270 	if (vptr->rx.info == NULL)
1271 		return;
1272 
1273 	for (i = 0; i < vptr->options.numrx; i++) {
1274 		struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1275 		struct rx_desc *rd = vptr->rx.ring + i;
1276 
1277 		memset(rd, 0, sizeof(*rd));
1278 
1279 		if (!rd_info->skb)
1280 			continue;
1281 		pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1282 				 PCI_DMA_FROMDEVICE);
1283 		rd_info->skb_dma = 0;
1284 
1285 		dev_kfree_skb(rd_info->skb);
1286 		rd_info->skb = NULL;
1287 	}
1288 
1289 	kfree(vptr->rx.info);
1290 	vptr->rx.info = NULL;
1291 }
1292 
1293 /**
1294  *	velocity_init_td_ring	-	set up transmit ring
1295  *	@vptr:	velocity
1296  *
1297  *	Set up the transmit ring and chain the ring pointers together.
1298  *	Returns zero on success or a negative posix errno code for
1299  *	failure.
1300  */
1301 
velocity_init_td_ring(struct velocity_info * vptr)1302 static int velocity_init_td_ring(struct velocity_info *vptr)
1303 {
1304 	dma_addr_t curr;
1305 	int j;
1306 
1307 	/* Init the TD ring entries */
1308 	for (j = 0; j < vptr->tx.numq; j++) {
1309 		curr = vptr->tx.pool_dma[j];
1310 
1311 		vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1312 					    sizeof(struct velocity_td_info),
1313 					    GFP_KERNEL);
1314 		if (!vptr->tx.infos[j])	{
1315 			while(--j >= 0)
1316 				kfree(vptr->tx.infos[j]);
1317 			return -ENOMEM;
1318 		}
1319 
1320 		vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1321 	}
1322 	return 0;
1323 }
1324 
1325 /*
1326  *	FIXME: could we merge this with velocity_free_tx_buf ?
1327  */
1328 
velocity_free_td_ring_entry(struct velocity_info * vptr,int q,int n)1329 static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1330 							 int q, int n)
1331 {
1332 	struct velocity_td_info * td_info = &(vptr->tx.infos[q][n]);
1333 	int i;
1334 
1335 	if (td_info == NULL)
1336 		return;
1337 
1338 	if (td_info->skb) {
1339 		for (i = 0; i < td_info->nskb_dma; i++)
1340 		{
1341 			if (td_info->skb_dma[i]) {
1342 				pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
1343 					td_info->skb->len, PCI_DMA_TODEVICE);
1344 				td_info->skb_dma[i] = 0;
1345 			}
1346 		}
1347 		dev_kfree_skb(td_info->skb);
1348 		td_info->skb = NULL;
1349 	}
1350 }
1351 
1352 /**
1353  *	velocity_free_td_ring	-	free td ring
1354  *	@vptr: velocity
1355  *
1356  *	Free up the transmit ring for this particular velocity adapter.
1357  *	We free the ring contents but not the ring itself.
1358  */
1359 
velocity_free_td_ring(struct velocity_info * vptr)1360 static void velocity_free_td_ring(struct velocity_info *vptr)
1361 {
1362 	int i, j;
1363 
1364 	for (j = 0; j < vptr->tx.numq; j++) {
1365 		if (vptr->tx.infos[j] == NULL)
1366 			continue;
1367 		for (i = 0; i < vptr->options.numtx; i++) {
1368 			velocity_free_td_ring_entry(vptr, j, i);
1369 
1370 		}
1371 		kfree(vptr->tx.infos[j]);
1372 		vptr->tx.infos[j] = NULL;
1373 	}
1374 }
1375 
1376 /**
1377  *	velocity_rx_srv		-	service RX interrupt
1378  *	@vptr: velocity
1379  *	@status: adapter status (unused)
1380  *
1381  *	Walk the receive ring of the velocity adapter and remove
1382  *	any received packets from the receive queue. Hand the ring
1383  *	slots back to the adapter for reuse.
1384  */
1385 
velocity_rx_srv(struct velocity_info * vptr,int status)1386 static int velocity_rx_srv(struct velocity_info *vptr, int status)
1387 {
1388 	struct net_device_stats *stats = &vptr->stats;
1389 	int rd_curr = vptr->rx.curr;
1390 	int works = 0;
1391 
1392 	do {
1393 		struct rx_desc *rd = vptr->rx.ring + rd_curr;
1394 
1395 		if (!vptr->rx.info[rd_curr].skb)
1396 			break;
1397 
1398 		if (rd->rdesc0.len & OWNED_BY_NIC)
1399 			break;
1400 
1401 		rmb();
1402 
1403 		/*
1404 		 *	Don't drop CE or RL error frame although RXOK is off
1405 		 */
1406 		if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
1407 			if (velocity_receive_frame(vptr, rd_curr) < 0)
1408 				stats->rx_dropped++;
1409 		} else {
1410 			if (rd->rdesc0.RSR & RSR_CRC)
1411 				stats->rx_crc_errors++;
1412 			if (rd->rdesc0.RSR & RSR_FAE)
1413 				stats->rx_frame_errors++;
1414 
1415 			stats->rx_dropped++;
1416 		}
1417 
1418 		rd->size |= RX_INTEN;
1419 
1420 		rd_curr++;
1421 		if (rd_curr >= vptr->options.numrx)
1422 			rd_curr = 0;
1423 	} while (++works <= 15);
1424 
1425 	vptr->rx.curr = rd_curr;
1426 
1427 	if ((works > 0) && (velocity_rx_refill(vptr) > 0))
1428 		velocity_give_many_rx_descs(vptr);
1429 
1430 	VAR_USED(stats);
1431 	return works;
1432 }
1433 
1434 /**
1435  *	velocity_rx_csum	-	checksum process
1436  *	@rd: receive packet descriptor
1437  *	@skb: network layer packet buffer
1438  *
1439  *	Process the status bits for the received packet and determine
1440  *	if the checksum was computed and verified by the hardware
1441  */
1442 
velocity_rx_csum(struct rx_desc * rd,struct sk_buff * skb)1443 static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1444 {
1445 	skb->ip_summed = CHECKSUM_NONE;
1446 
1447 	if (rd->rdesc1.CSM & CSM_IPKT) {
1448 		if (rd->rdesc1.CSM & CSM_IPOK) {
1449 			if ((rd->rdesc1.CSM & CSM_TCPKT) ||
1450 					(rd->rdesc1.CSM & CSM_UDPKT)) {
1451 				if (!(rd->rdesc1.CSM & CSM_TUPOK)) {
1452 					return;
1453 				}
1454 			}
1455 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1456 		}
1457 	}
1458 }
1459 
1460 /**
1461  *	velocity_rx_copy	-	in place Rx copy for small packets
1462  *	@rx_skb: network layer packet buffer candidate
1463  *	@pkt_size: received data size
1464  *	@rd: receive packet descriptor
1465  *	@dev: network device
1466  *
1467  *	Replace the current skb that is scheduled for Rx processing by a
1468  *	shorter, immediatly allocated skb, if the received packet is small
1469  *	enough. This function returns a negative value if the received
1470  *	packet is too big or if memory is exhausted.
1471  */
velocity_rx_copy(struct sk_buff ** rx_skb,int pkt_size,struct velocity_info * vptr)1472 static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1473 			    struct velocity_info *vptr)
1474 {
1475 	int ret = -1;
1476 	if (pkt_size < rx_copybreak) {
1477 		struct sk_buff *new_skb;
1478 
1479 		new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2);
1480 		if (new_skb) {
1481 			new_skb->ip_summed = rx_skb[0]->ip_summed;
1482 			skb_reserve(new_skb, 2);
1483 			skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
1484 			*rx_skb = new_skb;
1485 			ret = 0;
1486 		}
1487 
1488 	}
1489 	return ret;
1490 }
1491 
1492 /**
1493  *	velocity_iph_realign	-	IP header alignment
1494  *	@vptr: velocity we are handling
1495  *	@skb: network layer packet buffer
1496  *	@pkt_size: received data size
1497  *
1498  *	Align IP header on a 2 bytes boundary. This behavior can be
1499  *	configured by the user.
1500  */
velocity_iph_realign(struct velocity_info * vptr,struct sk_buff * skb,int pkt_size)1501 static inline void velocity_iph_realign(struct velocity_info *vptr,
1502 					struct sk_buff *skb, int pkt_size)
1503 {
1504 	if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
1505 		memmove(skb->data + 2, skb->data, pkt_size);
1506 		skb_reserve(skb, 2);
1507 	}
1508 }
1509 
1510 /**
1511  *	velocity_receive_frame	-	received packet processor
1512  *	@vptr: velocity we are handling
1513  *	@idx: ring index
1514  *
1515  *	A packet has arrived. We process the packet and if appropriate
1516  *	pass the frame up the network stack
1517  */
1518 
velocity_receive_frame(struct velocity_info * vptr,int idx)1519 static int velocity_receive_frame(struct velocity_info *vptr, int idx)
1520 {
1521 	void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
1522 	struct net_device_stats *stats = &vptr->stats;
1523 	struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1524 	struct rx_desc *rd = &(vptr->rx.ring[idx]);
1525 	int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
1526 	struct sk_buff *skb;
1527 
1528 	if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
1529 		VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
1530 		stats->rx_length_errors++;
1531 		return -EINVAL;
1532 	}
1533 
1534 	if (rd->rdesc0.RSR & RSR_MAR)
1535 		vptr->stats.multicast++;
1536 
1537 	skb = rd_info->skb;
1538 
1539 	pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
1540 				    vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1541 
1542 	/*
1543 	 *	Drop frame not meeting IEEE 802.3
1544 	 */
1545 
1546 	if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
1547 		if (rd->rdesc0.RSR & RSR_RL) {
1548 			stats->rx_length_errors++;
1549 			return -EINVAL;
1550 		}
1551 	}
1552 
1553 	pci_action = pci_dma_sync_single_for_device;
1554 
1555 	velocity_rx_csum(rd, skb);
1556 
1557 	if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
1558 		velocity_iph_realign(vptr, skb, pkt_len);
1559 		pci_action = pci_unmap_single;
1560 		rd_info->skb = NULL;
1561 	}
1562 
1563 	pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1564 		   PCI_DMA_FROMDEVICE);
1565 
1566 	skb_put(skb, pkt_len - 4);
1567 	skb->protocol = eth_type_trans(skb, vptr->dev);
1568 
1569 	if (vptr->vlgrp && (rd->rdesc0.RSR & RSR_DETAG)) {
1570 		vlan_hwaccel_rx(skb, vptr->vlgrp,
1571 				swab16(le16_to_cpu(rd->rdesc1.PQTAG)));
1572 	} else
1573 		netif_rx(skb);
1574 
1575 	stats->rx_bytes += pkt_len;
1576 
1577 	return 0;
1578 }
1579 
1580 /**
1581  *	velocity_alloc_rx_buf	-	allocate aligned receive buffer
1582  *	@vptr: velocity
1583  *	@idx: ring index
1584  *
1585  *	Allocate a new full sized buffer for the reception of a frame and
1586  *	map it into PCI space for the hardware to use. The hardware
1587  *	requires *64* byte alignment of the buffer which makes life
1588  *	less fun than would be ideal.
1589  */
1590 
velocity_alloc_rx_buf(struct velocity_info * vptr,int idx)1591 static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1592 {
1593 	struct rx_desc *rd = &(vptr->rx.ring[idx]);
1594 	struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1595 
1596 	rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64);
1597 	if (rd_info->skb == NULL)
1598 		return -ENOMEM;
1599 
1600 	/*
1601 	 *	Do the gymnastics to get the buffer head for data at
1602 	 *	64byte alignment.
1603 	 */
1604 	skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
1605 	rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
1606 					vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1607 
1608 	/*
1609 	 *	Fill in the descriptor to match
1610 	 */
1611 
1612 	*((u32 *) & (rd->rdesc0)) = 0;
1613 	rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1614 	rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1615 	rd->pa_high = 0;
1616 	return 0;
1617 }
1618 
1619 /**
1620  *	tx_srv		-	transmit interrupt service
1621  *	@vptr; Velocity
1622  *	@status:
1623  *
1624  *	Scan the queues looking for transmitted packets that
1625  *	we can complete and clean up. Update any statistics as
1626  *	necessary/
1627  */
1628 
velocity_tx_srv(struct velocity_info * vptr,u32 status)1629 static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1630 {
1631 	struct tx_desc *td;
1632 	int qnum;
1633 	int full = 0;
1634 	int idx;
1635 	int works = 0;
1636 	struct velocity_td_info *tdinfo;
1637 	struct net_device_stats *stats = &vptr->stats;
1638 
1639 	for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1640 		for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1641 			idx = (idx + 1) % vptr->options.numtx) {
1642 
1643 			/*
1644 			 *	Get Tx Descriptor
1645 			 */
1646 			td = &(vptr->tx.rings[qnum][idx]);
1647 			tdinfo = &(vptr->tx.infos[qnum][idx]);
1648 
1649 			if (td->tdesc0.len & OWNED_BY_NIC)
1650 				break;
1651 
1652 			if ((works++ > 15))
1653 				break;
1654 
1655 			if (td->tdesc0.TSR & TSR0_TERR) {
1656 				stats->tx_errors++;
1657 				stats->tx_dropped++;
1658 				if (td->tdesc0.TSR & TSR0_CDH)
1659 					stats->tx_heartbeat_errors++;
1660 				if (td->tdesc0.TSR & TSR0_CRS)
1661 					stats->tx_carrier_errors++;
1662 				if (td->tdesc0.TSR & TSR0_ABT)
1663 					stats->tx_aborted_errors++;
1664 				if (td->tdesc0.TSR & TSR0_OWC)
1665 					stats->tx_window_errors++;
1666 			} else {
1667 				stats->tx_packets++;
1668 				stats->tx_bytes += tdinfo->skb->len;
1669 			}
1670 			velocity_free_tx_buf(vptr, tdinfo);
1671 			vptr->tx.used[qnum]--;
1672 		}
1673 		vptr->tx.tail[qnum] = idx;
1674 
1675 		if (AVAIL_TD(vptr, qnum) < 1) {
1676 			full = 1;
1677 		}
1678 	}
1679 	/*
1680 	 *	Look to see if we should kick the transmit network
1681 	 *	layer for more work.
1682 	 */
1683 	if (netif_queue_stopped(vptr->dev) && (full == 0)
1684 	    && (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1685 		netif_wake_queue(vptr->dev);
1686 	}
1687 	return works;
1688 }
1689 
1690 /**
1691  *	velocity_print_link_status	-	link status reporting
1692  *	@vptr: velocity to report on
1693  *
1694  *	Turn the link status of the velocity card into a kernel log
1695  *	description of the new link state, detailing speed and duplex
1696  *	status
1697  */
1698 
velocity_print_link_status(struct velocity_info * vptr)1699 static void velocity_print_link_status(struct velocity_info *vptr)
1700 {
1701 
1702 	if (vptr->mii_status & VELOCITY_LINK_FAIL) {
1703 		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
1704 	} else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1705 		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name);
1706 
1707 		if (vptr->mii_status & VELOCITY_SPEED_1000)
1708 			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
1709 		else if (vptr->mii_status & VELOCITY_SPEED_100)
1710 			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
1711 		else
1712 			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
1713 
1714 		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1715 			VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
1716 		else
1717 			VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
1718 	} else {
1719 		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
1720 		switch (vptr->options.spd_dpx) {
1721 		case SPD_DPX_100_HALF:
1722 			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
1723 			break;
1724 		case SPD_DPX_100_FULL:
1725 			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
1726 			break;
1727 		case SPD_DPX_10_HALF:
1728 			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
1729 			break;
1730 		case SPD_DPX_10_FULL:
1731 			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
1732 			break;
1733 		default:
1734 			break;
1735 		}
1736 	}
1737 }
1738 
1739 /**
1740  *	velocity_error	-	handle error from controller
1741  *	@vptr: velocity
1742  *	@status: card status
1743  *
1744  *	Process an error report from the hardware and attempt to recover
1745  *	the card itself. At the moment we cannot recover from some
1746  *	theoretically impossible errors but this could be fixed using
1747  *	the pci_device_failed logic to bounce the hardware
1748  *
1749  */
1750 
velocity_error(struct velocity_info * vptr,int status)1751 static void velocity_error(struct velocity_info *vptr, int status)
1752 {
1753 
1754 	if (status & ISR_TXSTLI) {
1755 		struct mac_regs __iomem * regs = vptr->mac_regs;
1756 
1757 		printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0]));
1758 		BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
1759 		writew(TRDCSR_RUN, &regs->TDCSRClr);
1760 		netif_stop_queue(vptr->dev);
1761 
1762 		/* FIXME: port over the pci_device_failed code and use it
1763 		   here */
1764 	}
1765 
1766 	if (status & ISR_SRCI) {
1767 		struct mac_regs __iomem * regs = vptr->mac_regs;
1768 		int linked;
1769 
1770 		if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1771 			vptr->mii_status = check_connection_type(regs);
1772 
1773 			/*
1774 			 *	If it is a 3119, disable frame bursting in
1775 			 *	halfduplex mode and enable it in fullduplex
1776 			 *	 mode
1777 			 */
1778 			if (vptr->rev_id < REV_ID_VT3216_A0) {
1779 				if (vptr->mii_status | VELOCITY_DUPLEX_FULL)
1780 					BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1781 				else
1782 					BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
1783 			}
1784 			/*
1785 			 *	Only enable CD heart beat counter in 10HD mode
1786 			 */
1787 			if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10)) {
1788 				BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1789 			} else {
1790 				BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
1791 			}
1792 		}
1793 		/*
1794 		 *	Get link status from PHYSR0
1795 		 */
1796 		linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
1797 
1798 		if (linked) {
1799 			vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1800 			netif_carrier_on(vptr->dev);
1801 		} else {
1802 			vptr->mii_status |= VELOCITY_LINK_FAIL;
1803 			netif_carrier_off(vptr->dev);
1804 		}
1805 
1806 		velocity_print_link_status(vptr);
1807 		enable_flow_control_ability(vptr);
1808 
1809 		/*
1810 		 *	Re-enable auto-polling because SRCI will disable
1811 		 *	auto-polling
1812 		 */
1813 
1814 		enable_mii_autopoll(regs);
1815 
1816 		if (vptr->mii_status & VELOCITY_LINK_FAIL)
1817 			netif_stop_queue(vptr->dev);
1818 		else
1819 			netif_wake_queue(vptr->dev);
1820 
1821 	};
1822 	if (status & ISR_MIBFI)
1823 		velocity_update_hw_mibs(vptr);
1824 	if (status & ISR_LSTEI)
1825 		mac_rx_queue_wake(vptr->mac_regs);
1826 }
1827 
1828 /**
1829  *	velocity_free_tx_buf	-	free transmit buffer
1830  *	@vptr: velocity
1831  *	@tdinfo: buffer
1832  *
1833  *	Release an transmit buffer. If the buffer was preallocated then
1834  *	recycle it, if not then unmap the buffer.
1835  */
1836 
velocity_free_tx_buf(struct velocity_info * vptr,struct velocity_td_info * tdinfo)1837 static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *tdinfo)
1838 {
1839 	struct sk_buff *skb = tdinfo->skb;
1840 	int i;
1841 	int pktlen;
1842 
1843 	/*
1844 	 *	Don't unmap the pre-allocated tx_bufs
1845 	 */
1846 	if (tdinfo->skb_dma) {
1847 
1848 		pktlen = (skb->len > ETH_ZLEN ? : ETH_ZLEN);
1849 		for (i = 0; i < tdinfo->nskb_dma; i++) {
1850 #ifdef VELOCITY_ZERO_COPY_SUPPORT
1851 			pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE);
1852 #else
1853 			pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], pktlen, PCI_DMA_TODEVICE);
1854 #endif
1855 			tdinfo->skb_dma[i] = 0;
1856 		}
1857 	}
1858 	dev_kfree_skb_irq(skb);
1859 	tdinfo->skb = NULL;
1860 }
1861 
velocity_init_rings(struct velocity_info * vptr,int mtu)1862 static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1863 {
1864 	int ret;
1865 
1866 	velocity_set_rxbufsize(vptr, mtu);
1867 
1868 	ret = velocity_init_dma_rings(vptr);
1869 	if (ret < 0)
1870 		goto out;
1871 
1872 	ret = velocity_init_rd_ring(vptr);
1873 	if (ret < 0)
1874 		goto err_free_dma_rings_0;
1875 
1876 	ret = velocity_init_td_ring(vptr);
1877 	if (ret < 0)
1878 		goto err_free_rd_ring_1;
1879 out:
1880 	return ret;
1881 
1882 err_free_rd_ring_1:
1883 	velocity_free_rd_ring(vptr);
1884 err_free_dma_rings_0:
1885 	velocity_free_dma_rings(vptr);
1886 	goto out;
1887 }
1888 
velocity_free_rings(struct velocity_info * vptr)1889 static void velocity_free_rings(struct velocity_info *vptr)
1890 {
1891 	velocity_free_td_ring(vptr);
1892 	velocity_free_rd_ring(vptr);
1893 	velocity_free_dma_rings(vptr);
1894 }
1895 
1896 /**
1897  *	velocity_open		-	interface activation callback
1898  *	@dev: network layer device to open
1899  *
1900  *	Called when the network layer brings the interface up. Returns
1901  *	a negative posix error code on failure, or zero on success.
1902  *
1903  *	All the ring allocation and set up is done on open for this
1904  *	adapter to minimise memory usage when inactive
1905  */
1906 
velocity_open(struct net_device * dev)1907 static int velocity_open(struct net_device *dev)
1908 {
1909 	struct velocity_info *vptr = netdev_priv(dev);
1910 	int ret;
1911 
1912 	ret = velocity_init_rings(vptr, dev->mtu);
1913 	if (ret < 0)
1914 		goto out;
1915 
1916 	/* Ensure chip is running */
1917 	pci_set_power_state(vptr->pdev, PCI_D0);
1918 
1919 	velocity_give_many_rx_descs(vptr);
1920 
1921 	velocity_init_registers(vptr, VELOCITY_INIT_COLD);
1922 
1923 	ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED,
1924 			  dev->name, dev);
1925 	if (ret < 0) {
1926 		/* Power down the chip */
1927 		pci_set_power_state(vptr->pdev, PCI_D3hot);
1928 		velocity_free_rings(vptr);
1929 		goto out;
1930 	}
1931 
1932 	mac_enable_int(vptr->mac_regs);
1933 	netif_start_queue(dev);
1934 	vptr->flags |= VELOCITY_FLAGS_OPENED;
1935 out:
1936 	return ret;
1937 }
1938 
1939 /**
1940  *	velocity_change_mtu	-	MTU change callback
1941  *	@dev: network device
1942  *	@new_mtu: desired MTU
1943  *
1944  *	Handle requests from the networking layer for MTU change on
1945  *	this interface. It gets called on a change by the network layer.
1946  *	Return zero for success or negative posix error code.
1947  */
1948 
velocity_change_mtu(struct net_device * dev,int new_mtu)1949 static int velocity_change_mtu(struct net_device *dev, int new_mtu)
1950 {
1951 	struct velocity_info *vptr = netdev_priv(dev);
1952 	int ret = 0;
1953 
1954 	if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
1955 		VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
1956 				vptr->dev->name);
1957 		ret = -EINVAL;
1958 		goto out_0;
1959 	}
1960 
1961 	if (!netif_running(dev)) {
1962 		dev->mtu = new_mtu;
1963 		goto out_0;
1964 	}
1965 
1966 	if (dev->mtu != new_mtu) {
1967 		struct velocity_info *tmp_vptr;
1968 		unsigned long flags;
1969 		struct rx_info rx;
1970 		struct tx_info tx;
1971 
1972 		tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
1973 		if (!tmp_vptr) {
1974 			ret = -ENOMEM;
1975 			goto out_0;
1976 		}
1977 
1978 		tmp_vptr->dev = dev;
1979 		tmp_vptr->pdev = vptr->pdev;
1980 		tmp_vptr->options = vptr->options;
1981 		tmp_vptr->tx.numq = vptr->tx.numq;
1982 
1983 		ret = velocity_init_rings(tmp_vptr, new_mtu);
1984 		if (ret < 0)
1985 			goto out_free_tmp_vptr_1;
1986 
1987 		spin_lock_irqsave(&vptr->lock, flags);
1988 
1989 		netif_stop_queue(dev);
1990 		velocity_shutdown(vptr);
1991 
1992 		rx = vptr->rx;
1993 		tx = vptr->tx;
1994 
1995 		vptr->rx = tmp_vptr->rx;
1996 		vptr->tx = tmp_vptr->tx;
1997 
1998 		tmp_vptr->rx = rx;
1999 		tmp_vptr->tx = tx;
2000 
2001 		dev->mtu = new_mtu;
2002 
2003 		velocity_give_many_rx_descs(vptr);
2004 
2005 		velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2006 
2007 		mac_enable_int(vptr->mac_regs);
2008 		netif_start_queue(dev);
2009 
2010 		spin_unlock_irqrestore(&vptr->lock, flags);
2011 
2012 		velocity_free_rings(tmp_vptr);
2013 
2014 out_free_tmp_vptr_1:
2015 		kfree(tmp_vptr);
2016 	}
2017 out_0:
2018 	return ret;
2019 }
2020 
2021 /**
2022  *	velocity_shutdown	-	shut down the chip
2023  *	@vptr: velocity to deactivate
2024  *
2025  *	Shuts down the internal operations of the velocity and
2026  *	disables interrupts, autopolling, transmit and receive
2027  */
2028 
velocity_shutdown(struct velocity_info * vptr)2029 static void velocity_shutdown(struct velocity_info *vptr)
2030 {
2031 	struct mac_regs __iomem * regs = vptr->mac_regs;
2032 	mac_disable_int(regs);
2033 	writel(CR0_STOP, &regs->CR0Set);
2034 	writew(0xFFFF, &regs->TDCSRClr);
2035 	writeb(0xFF, &regs->RDCSRClr);
2036 	safe_disable_mii_autopoll(regs);
2037 	mac_clear_isr(regs);
2038 }
2039 
2040 /**
2041  *	velocity_close		-	close adapter callback
2042  *	@dev: network device
2043  *
2044  *	Callback from the network layer when the velocity is being
2045  *	deactivated by the network layer
2046  */
2047 
velocity_close(struct net_device * dev)2048 static int velocity_close(struct net_device *dev)
2049 {
2050 	struct velocity_info *vptr = netdev_priv(dev);
2051 
2052 	netif_stop_queue(dev);
2053 	velocity_shutdown(vptr);
2054 
2055 	if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2056 		velocity_get_ip(vptr);
2057 	if (dev->irq != 0)
2058 		free_irq(dev->irq, dev);
2059 
2060 	/* Power down the chip */
2061 	pci_set_power_state(vptr->pdev, PCI_D3hot);
2062 
2063 	velocity_free_rings(vptr);
2064 
2065 	vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2066 	return 0;
2067 }
2068 
2069 /**
2070  *	velocity_xmit		-	transmit packet callback
2071  *	@skb: buffer to transmit
2072  *	@dev: network device
2073  *
2074  *	Called by the networ layer to request a packet is queued to
2075  *	the velocity. Returns zero on success.
2076  */
2077 
velocity_xmit(struct sk_buff * skb,struct net_device * dev)2078 static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
2079 {
2080 	struct velocity_info *vptr = netdev_priv(dev);
2081 	int qnum = 0;
2082 	struct tx_desc *td_ptr;
2083 	struct velocity_td_info *tdinfo;
2084 	unsigned long flags;
2085 	int pktlen;
2086 	__le16 len;
2087 	int index;
2088 
2089 
2090 	if (skb_padto(skb, ETH_ZLEN))
2091 		goto out;
2092 	pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
2093 
2094 	len = cpu_to_le16(pktlen);
2095 
2096 #ifdef VELOCITY_ZERO_COPY_SUPPORT
2097 	if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2098 		kfree_skb(skb);
2099 		return 0;
2100 	}
2101 #endif
2102 
2103 	spin_lock_irqsave(&vptr->lock, flags);
2104 
2105 	index = vptr->tx.curr[qnum];
2106 	td_ptr = &(vptr->tx.rings[qnum][index]);
2107 	tdinfo = &(vptr->tx.infos[qnum][index]);
2108 
2109 	td_ptr->tdesc1.TCR = TCR0_TIC;
2110 	td_ptr->td_buf[0].size &= ~TD_QUEUE;
2111 
2112 #ifdef VELOCITY_ZERO_COPY_SUPPORT
2113 	if (skb_shinfo(skb)->nr_frags > 0) {
2114 		int nfrags = skb_shinfo(skb)->nr_frags;
2115 		tdinfo->skb = skb;
2116 		if (nfrags > 6) {
2117 			skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
2118 			tdinfo->skb_dma[0] = tdinfo->buf_dma;
2119 			td_ptr->tdesc0.len = len;
2120 			td_ptr->tx.buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2121 			td_ptr->tx.buf[0].pa_high = 0;
2122 			td_ptr->tx.buf[0].size = len;	/* queue is 0 anyway */
2123 			tdinfo->nskb_dma = 1;
2124 		} else {
2125 			int i = 0;
2126 			tdinfo->nskb_dma = 0;
2127 			tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data,
2128 						skb_headlen(skb), PCI_DMA_TODEVICE);
2129 
2130 			td_ptr->tdesc0.len = len;
2131 
2132 			/* FIXME: support 48bit DMA later */
2133 			td_ptr->tx.buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
2134 			td_ptr->tx.buf[i].pa_high = 0;
2135 			td_ptr->tx.buf[i].size = cpu_to_le16(skb_headlen(skb));
2136 
2137 			for (i = 0; i < nfrags; i++) {
2138 				skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2139 				void *addr = (void *)page_address(frag->page) + frag->page_offset;
2140 
2141 				tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);
2142 
2143 				td_ptr->tx.buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2144 				td_ptr->tx.buf[i + 1].pa_high = 0;
2145 				td_ptr->tx.buf[i + 1].size = cpu_to_le16(frag->size);
2146 			}
2147 			tdinfo->nskb_dma = i - 1;
2148 		}
2149 
2150 	} else
2151 #endif
2152 	{
2153 		/*
2154 		 *	Map the linear network buffer into PCI space and
2155 		 *	add it to the transmit ring.
2156 		 */
2157 		tdinfo->skb = skb;
2158 		tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
2159 		td_ptr->tdesc0.len = len;
2160 		td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2161 		td_ptr->td_buf[0].pa_high = 0;
2162 		td_ptr->td_buf[0].size = len;
2163 		tdinfo->nskb_dma = 1;
2164 	}
2165 	td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2166 
2167 	if (vptr->vlgrp && vlan_tx_tag_present(skb)) {
2168 		td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
2169 		td_ptr->tdesc1.TCR |= TCR0_VETAG;
2170 	}
2171 
2172 	/*
2173 	 *	Handle hardware checksum
2174 	 */
2175 	if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM)
2176 				 && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2177 		const struct iphdr *ip = ip_hdr(skb);
2178 		if (ip->protocol == IPPROTO_TCP)
2179 			td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2180 		else if (ip->protocol == IPPROTO_UDP)
2181 			td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2182 		td_ptr->tdesc1.TCR |= TCR0_IPCK;
2183 	}
2184 	{
2185 
2186 		int prev = index - 1;
2187 
2188 		if (prev < 0)
2189 			prev = vptr->options.numtx - 1;
2190 		td_ptr->tdesc0.len |= OWNED_BY_NIC;
2191 		vptr->tx.used[qnum]++;
2192 		vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2193 
2194 		if (AVAIL_TD(vptr, qnum) < 1)
2195 			netif_stop_queue(dev);
2196 
2197 		td_ptr = &(vptr->tx.rings[qnum][prev]);
2198 		td_ptr->td_buf[0].size |= TD_QUEUE;
2199 		mac_tx_queue_wake(vptr->mac_regs, qnum);
2200 	}
2201 	dev->trans_start = jiffies;
2202 	spin_unlock_irqrestore(&vptr->lock, flags);
2203 out:
2204 	return NETDEV_TX_OK;
2205 }
2206 
2207 /**
2208  *	velocity_intr		-	interrupt callback
2209  *	@irq: interrupt number
2210  *	@dev_instance: interrupting device
2211  *
2212  *	Called whenever an interrupt is generated by the velocity
2213  *	adapter IRQ line. We may not be the source of the interrupt
2214  *	and need to identify initially if we are, and if not exit as
2215  *	efficiently as possible.
2216  */
2217 
velocity_intr(int irq,void * dev_instance)2218 static int velocity_intr(int irq, void *dev_instance)
2219 {
2220 	struct net_device *dev = dev_instance;
2221 	struct velocity_info *vptr = netdev_priv(dev);
2222 	u32 isr_status;
2223 	int max_count = 0;
2224 
2225 
2226 	spin_lock(&vptr->lock);
2227 	isr_status = mac_read_isr(vptr->mac_regs);
2228 
2229 	/* Not us ? */
2230 	if (isr_status == 0) {
2231 		spin_unlock(&vptr->lock);
2232 		return IRQ_NONE;
2233 	}
2234 
2235 	mac_disable_int(vptr->mac_regs);
2236 
2237 	/*
2238 	 *	Keep processing the ISR until we have completed
2239 	 *	processing and the isr_status becomes zero
2240 	 */
2241 
2242 	while (isr_status != 0) {
2243 		mac_write_isr(vptr->mac_regs, isr_status);
2244 		if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2245 			velocity_error(vptr, isr_status);
2246 		if (isr_status & (ISR_PRXI | ISR_PPRXI))
2247 			max_count += velocity_rx_srv(vptr, isr_status);
2248 		if (isr_status & (ISR_PTXI | ISR_PPTXI))
2249 			max_count += velocity_tx_srv(vptr, isr_status);
2250 		isr_status = mac_read_isr(vptr->mac_regs);
2251 		if (max_count > vptr->options.int_works)
2252 		{
2253 			printk(KERN_WARNING "%s: excessive work at interrupt.\n",
2254 				dev->name);
2255 			max_count = 0;
2256 		}
2257 	}
2258 	spin_unlock(&vptr->lock);
2259 	mac_enable_int(vptr->mac_regs);
2260 	return IRQ_HANDLED;
2261 
2262 }
2263 
2264 
2265 /**
2266  *	velocity_set_multi	-	filter list change callback
2267  *	@dev: network device
2268  *
2269  *	Called by the network layer when the filter lists need to change
2270  *	for a velocity adapter. Reload the CAMs with the new address
2271  *	filter ruleset.
2272  */
2273 
velocity_set_multi(struct net_device * dev)2274 static void velocity_set_multi(struct net_device *dev)
2275 {
2276 	struct velocity_info *vptr = netdev_priv(dev);
2277 	struct mac_regs __iomem * regs = vptr->mac_regs;
2278 	u8 rx_mode;
2279 	int i;
2280 	struct dev_mc_list *mclist;
2281 
2282 	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous. */
2283 		writel(0xffffffff, &regs->MARCAM[0]);
2284 		writel(0xffffffff, &regs->MARCAM[4]);
2285 		rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
2286 	} else if ((dev->mc_count > vptr->multicast_limit)
2287 		   || (dev->flags & IFF_ALLMULTI)) {
2288 		writel(0xffffffff, &regs->MARCAM[0]);
2289 		writel(0xffffffff, &regs->MARCAM[4]);
2290 		rx_mode = (RCR_AM | RCR_AB);
2291 	} else {
2292 		int offset = MCAM_SIZE - vptr->multicast_limit;
2293 		mac_get_cam_mask(regs, vptr->mCAMmask);
2294 
2295 		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) {
2296 			mac_set_cam(regs, i + offset, mclist->dmi_addr);
2297 			vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
2298 		}
2299 
2300 		mac_set_cam_mask(regs, vptr->mCAMmask);
2301 		rx_mode = RCR_AM | RCR_AB | RCR_AP;
2302 	}
2303 	if (dev->mtu > 1500)
2304 		rx_mode |= RCR_AL;
2305 
2306 	BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
2307 
2308 }
2309 
2310 /**
2311  *	velocity_get_status	-	statistics callback
2312  *	@dev: network device
2313  *
2314  *	Callback from the network layer to allow driver statistics
2315  *	to be resynchronized with hardware collected state. In the
2316  *	case of the velocity we need to pull the MIB counters from
2317  *	the hardware into the counters before letting the network
2318  *	layer display them.
2319  */
2320 
velocity_get_stats(struct net_device * dev)2321 static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2322 {
2323 	struct velocity_info *vptr = netdev_priv(dev);
2324 
2325 	/* If the hardware is down, don't touch MII */
2326 	if(!netif_running(dev))
2327 		return &vptr->stats;
2328 
2329 	spin_lock_irq(&vptr->lock);
2330 	velocity_update_hw_mibs(vptr);
2331 	spin_unlock_irq(&vptr->lock);
2332 
2333 	vptr->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2334 	vptr->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2335 	vptr->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2336 
2337 //  unsigned long   rx_dropped;     /* no space in linux buffers    */
2338 	vptr->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2339 	/* detailed rx_errors: */
2340 //  unsigned long   rx_length_errors;
2341 //  unsigned long   rx_over_errors;     /* receiver ring buff overflow  */
2342 	vptr->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2343 //  unsigned long   rx_frame_errors;    /* recv'd frame alignment error */
2344 //  unsigned long   rx_fifo_errors;     /* recv'r fifo overrun      */
2345 //  unsigned long   rx_missed_errors;   /* receiver missed packet   */
2346 
2347 	/* detailed tx_errors */
2348 //  unsigned long   tx_fifo_errors;
2349 
2350 	return &vptr->stats;
2351 }
2352 
2353 
2354 /**
2355  *	velocity_ioctl		-	ioctl entry point
2356  *	@dev: network device
2357  *	@rq: interface request ioctl
2358  *	@cmd: command code
2359  *
2360  *	Called when the user issues an ioctl request to the network
2361  *	device in question. The velocity interface supports MII.
2362  */
2363 
velocity_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)2364 static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2365 {
2366 	struct velocity_info *vptr = netdev_priv(dev);
2367 	int ret;
2368 
2369 	/* If we are asked for information and the device is power
2370 	   saving then we need to bring the device back up to talk to it */
2371 
2372 	if (!netif_running(dev))
2373 		pci_set_power_state(vptr->pdev, PCI_D0);
2374 
2375 	switch (cmd) {
2376 	case SIOCGMIIPHY:	/* Get address of MII PHY in use. */
2377 	case SIOCGMIIREG:	/* Read MII PHY register. */
2378 	case SIOCSMIIREG:	/* Write to MII PHY register. */
2379 		ret = velocity_mii_ioctl(dev, rq, cmd);
2380 		break;
2381 
2382 	default:
2383 		ret = -EOPNOTSUPP;
2384 	}
2385 	if (!netif_running(dev))
2386 		pci_set_power_state(vptr->pdev, PCI_D3hot);
2387 
2388 
2389 	return ret;
2390 }
2391 
2392 /*
2393  *	Definition for our device driver. The PCI layer interface
2394  *	uses this to handle all our card discover and plugging
2395  */
2396 
2397 static struct pci_driver velocity_driver = {
2398       .name	= VELOCITY_NAME,
2399       .id_table	= velocity_id_table,
2400       .probe	= velocity_found1,
2401       .remove	= __devexit_p(velocity_remove1),
2402 #ifdef CONFIG_PM
2403       .suspend	= velocity_suspend,
2404       .resume	= velocity_resume,
2405 #endif
2406 };
2407 
2408 /**
2409  *	velocity_init_module	-	load time function
2410  *
2411  *	Called when the velocity module is loaded. The PCI driver
2412  *	is registered with the PCI layer, and in turn will call
2413  *	the probe functions for each velocity adapter installed
2414  *	in the system.
2415  */
2416 
velocity_init_module(void)2417 static int __init velocity_init_module(void)
2418 {
2419 	int ret;
2420 
2421 	velocity_register_notifier();
2422 	ret = pci_register_driver(&velocity_driver);
2423 	if (ret < 0)
2424 		velocity_unregister_notifier();
2425 	return ret;
2426 }
2427 
2428 /**
2429  *	velocity_cleanup	-	module unload
2430  *
2431  *	When the velocity hardware is unloaded this function is called.
2432  *	It will clean up the notifiers and the unregister the PCI
2433  *	driver interface for this hardware. This in turn cleans up
2434  *	all discovered interfaces before returning from the function
2435  */
2436 
velocity_cleanup_module(void)2437 static void __exit velocity_cleanup_module(void)
2438 {
2439 	velocity_unregister_notifier();
2440 	pci_unregister_driver(&velocity_driver);
2441 }
2442 
2443 module_init(velocity_init_module);
2444 module_exit(velocity_cleanup_module);
2445 
2446 
2447 /*
2448  * MII access , media link mode setting functions
2449  */
2450 
2451 
2452 /**
2453  *	mii_init	-	set up MII
2454  *	@vptr: velocity adapter
2455  *	@mii_status:  links tatus
2456  *
2457  *	Set up the PHY for the current link state.
2458  */
2459 
mii_init(struct velocity_info * vptr,u32 mii_status)2460 static void mii_init(struct velocity_info *vptr, u32 mii_status)
2461 {
2462 	u16 BMCR;
2463 
2464 	switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
2465 	case PHYID_CICADA_CS8201:
2466 		/*
2467 		 *	Reset to hardware default
2468 		 */
2469 		MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
2470 		/*
2471 		 *	Turn on ECHODIS bit in NWay-forced full mode and turn it
2472 		 *	off it in NWay-forced half mode for NWay-forced v.s.
2473 		 *	legacy-forced issue.
2474 		 */
2475 		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
2476 			MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
2477 		else
2478 			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
2479 		/*
2480 		 *	Turn on Link/Activity LED enable bit for CIS8201
2481 		 */
2482 		MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs);
2483 		break;
2484 	case PHYID_VT3216_32BIT:
2485 	case PHYID_VT3216_64BIT:
2486 		/*
2487 		 *	Reset to hardware default
2488 		 */
2489 		MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
2490 		/*
2491 		 *	Turn on ECHODIS bit in NWay-forced full mode and turn it
2492 		 *	off it in NWay-forced half mode for NWay-forced v.s.
2493 		 *	legacy-forced issue
2494 		 */
2495 		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
2496 			MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
2497 		else
2498 			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
2499 		break;
2500 
2501 	case PHYID_MARVELL_1000:
2502 	case PHYID_MARVELL_1000S:
2503 		/*
2504 		 *	Assert CRS on Transmit
2505 		 */
2506 		MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
2507 		/*
2508 		 *	Reset to hardware default
2509 		 */
2510 		MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
2511 		break;
2512 	default:
2513 		;
2514 	}
2515 	velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR);
2516 	if (BMCR & BMCR_ISO) {
2517 		BMCR &= ~BMCR_ISO;
2518 		velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR);
2519 	}
2520 }
2521 
2522 /**
2523  *	safe_disable_mii_autopoll	-	autopoll off
2524  *	@regs: velocity registers
2525  *
2526  *	Turn off the autopoll and wait for it to disable on the chip
2527  */
2528 
safe_disable_mii_autopoll(struct mac_regs __iomem * regs)2529 static void safe_disable_mii_autopoll(struct mac_regs __iomem * regs)
2530 {
2531 	u16 ww;
2532 
2533 	/*  turn off MAUTO */
2534 	writeb(0, &regs->MIICR);
2535 	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
2536 		udelay(1);
2537 		if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
2538 			break;
2539 	}
2540 }
2541 
2542 /**
2543  *	enable_mii_autopoll	-	turn on autopolling
2544  *	@regs: velocity registers
2545  *
2546  *	Enable the MII link status autopoll feature on the Velocity
2547  *	hardware. Wait for it to enable.
2548  */
2549 
enable_mii_autopoll(struct mac_regs __iomem * regs)2550 static void enable_mii_autopoll(struct mac_regs __iomem * regs)
2551 {
2552 	int ii;
2553 
2554 	writeb(0, &(regs->MIICR));
2555 	writeb(MIIADR_SWMPL, &regs->MIIADR);
2556 
2557 	for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
2558 		udelay(1);
2559 		if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
2560 			break;
2561 	}
2562 
2563 	writeb(MIICR_MAUTO, &regs->MIICR);
2564 
2565 	for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
2566 		udelay(1);
2567 		if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
2568 			break;
2569 	}
2570 
2571 }
2572 
2573 /**
2574  *	velocity_mii_read	-	read MII data
2575  *	@regs: velocity registers
2576  *	@index: MII register index
2577  *	@data: buffer for received data
2578  *
2579  *	Perform a single read of an MII 16bit register. Returns zero
2580  *	on success or -ETIMEDOUT if the PHY did not respond.
2581  */
2582 
velocity_mii_read(struct mac_regs __iomem * regs,u8 index,u16 * data)2583 static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
2584 {
2585 	u16 ww;
2586 
2587 	/*
2588 	 *	Disable MIICR_MAUTO, so that mii addr can be set normally
2589 	 */
2590 	safe_disable_mii_autopoll(regs);
2591 
2592 	writeb(index, &regs->MIIADR);
2593 
2594 	BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
2595 
2596 	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
2597 		if (!(readb(&regs->MIICR) & MIICR_RCMD))
2598 			break;
2599 	}
2600 
2601 	*data = readw(&regs->MIIDATA);
2602 
2603 	enable_mii_autopoll(regs);
2604 	if (ww == W_MAX_TIMEOUT)
2605 		return -ETIMEDOUT;
2606 	return 0;
2607 }
2608 
2609 /**
2610  *	velocity_mii_write	-	write MII data
2611  *	@regs: velocity registers
2612  *	@index: MII register index
2613  *	@data: 16bit data for the MII register
2614  *
2615  *	Perform a single write to an MII 16bit register. Returns zero
2616  *	on success or -ETIMEDOUT if the PHY did not respond.
2617  */
2618 
velocity_mii_write(struct mac_regs __iomem * regs,u8 mii_addr,u16 data)2619 static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
2620 {
2621 	u16 ww;
2622 
2623 	/*
2624 	 *	Disable MIICR_MAUTO, so that mii addr can be set normally
2625 	 */
2626 	safe_disable_mii_autopoll(regs);
2627 
2628 	/* MII reg offset */
2629 	writeb(mii_addr, &regs->MIIADR);
2630 	/* set MII data */
2631 	writew(data, &regs->MIIDATA);
2632 
2633 	/* turn on MIICR_WCMD */
2634 	BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
2635 
2636 	/* W_MAX_TIMEOUT is the timeout period */
2637 	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
2638 		udelay(5);
2639 		if (!(readb(&regs->MIICR) & MIICR_WCMD))
2640 			break;
2641 	}
2642 	enable_mii_autopoll(regs);
2643 
2644 	if (ww == W_MAX_TIMEOUT)
2645 		return -ETIMEDOUT;
2646 	return 0;
2647 }
2648 
2649 /**
2650  *	velocity_get_opt_media_mode	-	get media selection
2651  *	@vptr: velocity adapter
2652  *
2653  *	Get the media mode stored in EEPROM or module options and load
2654  *	mii_status accordingly. The requested link state information
2655  *	is also returned.
2656  */
2657 
velocity_get_opt_media_mode(struct velocity_info * vptr)2658 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
2659 {
2660 	u32 status = 0;
2661 
2662 	switch (vptr->options.spd_dpx) {
2663 	case SPD_DPX_AUTO:
2664 		status = VELOCITY_AUTONEG_ENABLE;
2665 		break;
2666 	case SPD_DPX_100_FULL:
2667 		status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
2668 		break;
2669 	case SPD_DPX_10_FULL:
2670 		status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
2671 		break;
2672 	case SPD_DPX_100_HALF:
2673 		status = VELOCITY_SPEED_100;
2674 		break;
2675 	case SPD_DPX_10_HALF:
2676 		status = VELOCITY_SPEED_10;
2677 		break;
2678 	}
2679 	vptr->mii_status = status;
2680 	return status;
2681 }
2682 
2683 /**
2684  *	mii_set_auto_on		-	autonegotiate on
2685  *	@vptr: velocity
2686  *
2687  *	Enable autonegotation on this interface
2688  */
2689 
mii_set_auto_on(struct velocity_info * vptr)2690 static void mii_set_auto_on(struct velocity_info *vptr)
2691 {
2692 	if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs))
2693 		MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
2694 	else
2695 		MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
2696 }
2697 
2698 
2699 /*
2700 static void mii_set_auto_off(struct velocity_info * vptr)
2701 {
2702     MII_REG_BITS_OFF(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
2703 }
2704 */
2705 
2706 /**
2707  *	set_mii_flow_control	-	flow control setup
2708  *	@vptr: velocity interface
2709  *
2710  *	Set up the flow control on this interface according to
2711  *	the supplied user/eeprom options.
2712  */
2713 
set_mii_flow_control(struct velocity_info * vptr)2714 static void set_mii_flow_control(struct velocity_info *vptr)
2715 {
2716 	/*Enable or Disable PAUSE in ANAR */
2717 	switch (vptr->options.flow_cntl) {
2718 	case FLOW_CNTL_TX:
2719 		MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
2720 		MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
2721 		break;
2722 
2723 	case FLOW_CNTL_RX:
2724 		MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
2725 		MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
2726 		break;
2727 
2728 	case FLOW_CNTL_TX_RX:
2729 		MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
2730 		MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
2731 		break;
2732 
2733 	case FLOW_CNTL_DISABLE:
2734 		MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
2735 		MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
2736 		break;
2737 	default:
2738 		break;
2739 	}
2740 }
2741 
2742 /**
2743  *	velocity_set_media_mode		-	set media mode
2744  *	@mii_status: old MII link state
2745  *
2746  *	Check the media link state and configure the flow control
2747  *	PHY and also velocity hardware setup accordingly. In particular
2748  *	we need to set up CD polling and frame bursting.
2749  */
2750 
velocity_set_media_mode(struct velocity_info * vptr,u32 mii_status)2751 static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
2752 {
2753 	u32 curr_status;
2754 	struct mac_regs __iomem * regs = vptr->mac_regs;
2755 
2756 	vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
2757 	curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
2758 
2759 	/* Set mii link status */
2760 	set_mii_flow_control(vptr);
2761 
2762 	/*
2763 	   Check if new status is consisent with current status
2764 	   if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE)
2765 	   || (mii_status==curr_status)) {
2766 	   vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
2767 	   vptr->mii_status=check_connection_type(vptr->mac_regs);
2768 	   VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
2769 	   return 0;
2770 	   }
2771 	 */
2772 
2773 	if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) {
2774 		MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
2775 	}
2776 
2777 	/*
2778 	 *	If connection type is AUTO
2779 	 */
2780 	if (mii_status & VELOCITY_AUTONEG_ENABLE) {
2781 		VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
2782 		/* clear force MAC mode bit */
2783 		BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
2784 		/* set duplex mode of MAC according to duplex mode of MII */
2785 		MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs);
2786 		MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
2787 		MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs);
2788 
2789 		/* enable AUTO-NEGO mode */
2790 		mii_set_auto_on(vptr);
2791 	} else {
2792 		u16 ANAR;
2793 		u8 CHIPGCR;
2794 
2795 		/*
2796 		 * 1. if it's 3119, disable frame bursting in halfduplex mode
2797 		 *    and enable it in fullduplex mode
2798 		 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
2799 		 * 3. only enable CD heart beat counter in 10HD mode
2800 		 */
2801 
2802 		/* set force MAC mode bit */
2803 		BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
2804 
2805 		CHIPGCR = readb(&regs->CHIPGCR);
2806 		CHIPGCR &= ~CHIPGCR_FCGMII;
2807 
2808 		if (mii_status & VELOCITY_DUPLEX_FULL) {
2809 			CHIPGCR |= CHIPGCR_FCFDX;
2810 			writeb(CHIPGCR, &regs->CHIPGCR);
2811 			VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n");
2812 			if (vptr->rev_id < REV_ID_VT3216_A0)
2813 				BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
2814 		} else {
2815 			CHIPGCR &= ~CHIPGCR_FCFDX;
2816 			VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n");
2817 			writeb(CHIPGCR, &regs->CHIPGCR);
2818 			if (vptr->rev_id < REV_ID_VT3216_A0)
2819 				BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
2820 		}
2821 
2822 		MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
2823 
2824 		if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10)) {
2825 			BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
2826 		} else {
2827 			BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
2828 		}
2829 		/* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */
2830 		velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR);
2831 		ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10));
2832 		if (mii_status & VELOCITY_SPEED_100) {
2833 			if (mii_status & VELOCITY_DUPLEX_FULL)
2834 				ANAR |= ANAR_TXFD;
2835 			else
2836 				ANAR |= ANAR_TX;
2837 		} else {
2838 			if (mii_status & VELOCITY_DUPLEX_FULL)
2839 				ANAR |= ANAR_10FD;
2840 			else
2841 				ANAR |= ANAR_10;
2842 		}
2843 		velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR);
2844 		/* enable AUTO-NEGO mode */
2845 		mii_set_auto_on(vptr);
2846 		/* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */
2847 	}
2848 	/* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
2849 	/* vptr->mii_status=check_connection_type(vptr->mac_regs); */
2850 	return VELOCITY_LINK_CHANGE;
2851 }
2852 
2853 /**
2854  *	mii_check_media_mode	-	check media state
2855  *	@regs: velocity registers
2856  *
2857  *	Check the current MII status and determine the link status
2858  *	accordingly
2859  */
2860 
mii_check_media_mode(struct mac_regs __iomem * regs)2861 static u32 mii_check_media_mode(struct mac_regs __iomem * regs)
2862 {
2863 	u32 status = 0;
2864 	u16 ANAR;
2865 
2866 	if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs))
2867 		status |= VELOCITY_LINK_FAIL;
2868 
2869 	if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs))
2870 		status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
2871 	else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs))
2872 		status |= (VELOCITY_SPEED_1000);
2873 	else {
2874 		velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
2875 		if (ANAR & ANAR_TXFD)
2876 			status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
2877 		else if (ANAR & ANAR_TX)
2878 			status |= VELOCITY_SPEED_100;
2879 		else if (ANAR & ANAR_10FD)
2880 			status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
2881 		else
2882 			status |= (VELOCITY_SPEED_10);
2883 	}
2884 
2885 	if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
2886 		velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
2887 		if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
2888 		    == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
2889 			if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
2890 				status |= VELOCITY_AUTONEG_ENABLE;
2891 		}
2892 	}
2893 
2894 	return status;
2895 }
2896 
check_connection_type(struct mac_regs __iomem * regs)2897 static u32 check_connection_type(struct mac_regs __iomem * regs)
2898 {
2899 	u32 status = 0;
2900 	u8 PHYSR0;
2901 	u16 ANAR;
2902 	PHYSR0 = readb(&regs->PHYSR0);
2903 
2904 	/*
2905 	   if (!(PHYSR0 & PHYSR0_LINKGD))
2906 	   status|=VELOCITY_LINK_FAIL;
2907 	 */
2908 
2909 	if (PHYSR0 & PHYSR0_FDPX)
2910 		status |= VELOCITY_DUPLEX_FULL;
2911 
2912 	if (PHYSR0 & PHYSR0_SPDG)
2913 		status |= VELOCITY_SPEED_1000;
2914 	else if (PHYSR0 & PHYSR0_SPD10)
2915 		status |= VELOCITY_SPEED_10;
2916 	else
2917 		status |= VELOCITY_SPEED_100;
2918 
2919 	if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
2920 		velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
2921 		if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
2922 		    == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
2923 			if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
2924 				status |= VELOCITY_AUTONEG_ENABLE;
2925 		}
2926 	}
2927 
2928 	return status;
2929 }
2930 
2931 /**
2932  *	enable_flow_control_ability	-	flow control
2933  *	@vptr: veloity to configure
2934  *
2935  *	Set up flow control according to the flow control options
2936  *	determined by the eeprom/configuration.
2937  */
2938 
enable_flow_control_ability(struct velocity_info * vptr)2939 static void enable_flow_control_ability(struct velocity_info *vptr)
2940 {
2941 
2942 	struct mac_regs __iomem * regs = vptr->mac_regs;
2943 
2944 	switch (vptr->options.flow_cntl) {
2945 
2946 	case FLOW_CNTL_DEFAULT:
2947 		if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
2948 			writel(CR0_FDXRFCEN, &regs->CR0Set);
2949 		else
2950 			writel(CR0_FDXRFCEN, &regs->CR0Clr);
2951 
2952 		if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
2953 			writel(CR0_FDXTFCEN, &regs->CR0Set);
2954 		else
2955 			writel(CR0_FDXTFCEN, &regs->CR0Clr);
2956 		break;
2957 
2958 	case FLOW_CNTL_TX:
2959 		writel(CR0_FDXTFCEN, &regs->CR0Set);
2960 		writel(CR0_FDXRFCEN, &regs->CR0Clr);
2961 		break;
2962 
2963 	case FLOW_CNTL_RX:
2964 		writel(CR0_FDXRFCEN, &regs->CR0Set);
2965 		writel(CR0_FDXTFCEN, &regs->CR0Clr);
2966 		break;
2967 
2968 	case FLOW_CNTL_TX_RX:
2969 		writel(CR0_FDXTFCEN, &regs->CR0Set);
2970 		writel(CR0_FDXRFCEN, &regs->CR0Set);
2971 		break;
2972 
2973 	case FLOW_CNTL_DISABLE:
2974 		writel(CR0_FDXRFCEN, &regs->CR0Clr);
2975 		writel(CR0_FDXTFCEN, &regs->CR0Clr);
2976 		break;
2977 
2978 	default:
2979 		break;
2980 	}
2981 
2982 }
2983 
2984 
2985 /**
2986  *	velocity_ethtool_up	-	pre hook for ethtool
2987  *	@dev: network device
2988  *
2989  *	Called before an ethtool operation. We need to make sure the
2990  *	chip is out of D3 state before we poke at it.
2991  */
2992 
velocity_ethtool_up(struct net_device * dev)2993 static int velocity_ethtool_up(struct net_device *dev)
2994 {
2995 	struct velocity_info *vptr = netdev_priv(dev);
2996 	if (!netif_running(dev))
2997 		pci_set_power_state(vptr->pdev, PCI_D0);
2998 	return 0;
2999 }
3000 
3001 /**
3002  *	velocity_ethtool_down	-	post hook for ethtool
3003  *	@dev: network device
3004  *
3005  *	Called after an ethtool operation. Restore the chip back to D3
3006  *	state if it isn't running.
3007  */
3008 
velocity_ethtool_down(struct net_device * dev)3009 static void velocity_ethtool_down(struct net_device *dev)
3010 {
3011 	struct velocity_info *vptr = netdev_priv(dev);
3012 	if (!netif_running(dev))
3013 		pci_set_power_state(vptr->pdev, PCI_D3hot);
3014 }
3015 
velocity_get_settings(struct net_device * dev,struct ethtool_cmd * cmd)3016 static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
3017 {
3018 	struct velocity_info *vptr = netdev_priv(dev);
3019 	struct mac_regs __iomem * regs = vptr->mac_regs;
3020 	u32 status;
3021 	status = check_connection_type(vptr->mac_regs);
3022 
3023 	cmd->supported = SUPPORTED_TP |
3024 			SUPPORTED_Autoneg |
3025 			SUPPORTED_10baseT_Half |
3026 			SUPPORTED_10baseT_Full |
3027 			SUPPORTED_100baseT_Half |
3028 			SUPPORTED_100baseT_Full |
3029 			SUPPORTED_1000baseT_Half |
3030 			SUPPORTED_1000baseT_Full;
3031 	if (status & VELOCITY_SPEED_1000)
3032 		cmd->speed = SPEED_1000;
3033 	else if (status & VELOCITY_SPEED_100)
3034 		cmd->speed = SPEED_100;
3035 	else
3036 		cmd->speed = SPEED_10;
3037 	cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
3038 	cmd->port = PORT_TP;
3039 	cmd->transceiver = XCVR_INTERNAL;
3040 	cmd->phy_address = readb(&regs->MIIADR) & 0x1F;
3041 
3042 	if (status & VELOCITY_DUPLEX_FULL)
3043 		cmd->duplex = DUPLEX_FULL;
3044 	else
3045 		cmd->duplex = DUPLEX_HALF;
3046 
3047 	return 0;
3048 }
3049 
velocity_set_settings(struct net_device * dev,struct ethtool_cmd * cmd)3050 static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
3051 {
3052 	struct velocity_info *vptr = netdev_priv(dev);
3053 	u32 curr_status;
3054 	u32 new_status = 0;
3055 	int ret = 0;
3056 
3057 	curr_status = check_connection_type(vptr->mac_regs);
3058 	curr_status &= (~VELOCITY_LINK_FAIL);
3059 
3060 	new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3061 	new_status |= ((cmd->speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3062 	new_status |= ((cmd->speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3063 	new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
3064 
3065 	if ((new_status & VELOCITY_AUTONEG_ENABLE) && (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE)))
3066 		ret = -EINVAL;
3067 	else
3068 		velocity_set_media_mode(vptr, new_status);
3069 
3070 	return ret;
3071 }
3072 
velocity_get_link(struct net_device * dev)3073 static u32 velocity_get_link(struct net_device *dev)
3074 {
3075 	struct velocity_info *vptr = netdev_priv(dev);
3076 	struct mac_regs __iomem * regs = vptr->mac_regs;
3077 	return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
3078 }
3079 
velocity_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)3080 static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3081 {
3082 	struct velocity_info *vptr = netdev_priv(dev);
3083 	strcpy(info->driver, VELOCITY_NAME);
3084 	strcpy(info->version, VELOCITY_VERSION);
3085 	strcpy(info->bus_info, pci_name(vptr->pdev));
3086 }
3087 
velocity_ethtool_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)3088 static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3089 {
3090 	struct velocity_info *vptr = netdev_priv(dev);
3091 	wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
3092 	wol->wolopts |= WAKE_MAGIC;
3093 	/*
3094 	   if (vptr->wol_opts & VELOCITY_WOL_PHY)
3095 		   wol.wolopts|=WAKE_PHY;
3096 			 */
3097 	if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3098 		wol->wolopts |= WAKE_UCAST;
3099 	if (vptr->wol_opts & VELOCITY_WOL_ARP)
3100 		wol->wolopts |= WAKE_ARP;
3101 	memcpy(&wol->sopass, vptr->wol_passwd, 6);
3102 }
3103 
velocity_ethtool_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)3104 static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3105 {
3106 	struct velocity_info *vptr = netdev_priv(dev);
3107 
3108 	if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
3109 		return -EFAULT;
3110 	vptr->wol_opts = VELOCITY_WOL_MAGIC;
3111 
3112 	/*
3113 	   if (wol.wolopts & WAKE_PHY) {
3114 	   vptr->wol_opts|=VELOCITY_WOL_PHY;
3115 	   vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
3116 	   }
3117 	 */
3118 
3119 	if (wol->wolopts & WAKE_MAGIC) {
3120 		vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3121 		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3122 	}
3123 	if (wol->wolopts & WAKE_UCAST) {
3124 		vptr->wol_opts |= VELOCITY_WOL_UCAST;
3125 		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3126 	}
3127 	if (wol->wolopts & WAKE_ARP) {
3128 		vptr->wol_opts |= VELOCITY_WOL_ARP;
3129 		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3130 	}
3131 	memcpy(vptr->wol_passwd, wol->sopass, 6);
3132 	return 0;
3133 }
3134 
velocity_get_msglevel(struct net_device * dev)3135 static u32 velocity_get_msglevel(struct net_device *dev)
3136 {
3137 	return msglevel;
3138 }
3139 
velocity_set_msglevel(struct net_device * dev,u32 value)3140 static void velocity_set_msglevel(struct net_device *dev, u32 value)
3141 {
3142 	 msglevel = value;
3143 }
3144 
3145 static const struct ethtool_ops velocity_ethtool_ops = {
3146 	.get_settings	=	velocity_get_settings,
3147 	.set_settings	=	velocity_set_settings,
3148 	.get_drvinfo	=	velocity_get_drvinfo,
3149 	.get_wol	=	velocity_ethtool_get_wol,
3150 	.set_wol	=	velocity_ethtool_set_wol,
3151 	.get_msglevel	=	velocity_get_msglevel,
3152 	.set_msglevel	=	velocity_set_msglevel,
3153 	.get_link	=	velocity_get_link,
3154 	.begin		=	velocity_ethtool_up,
3155 	.complete	=	velocity_ethtool_down
3156 };
3157 
3158 /**
3159  *	velocity_mii_ioctl		-	MII ioctl handler
3160  *	@dev: network device
3161  *	@ifr: the ifreq block for the ioctl
3162  *	@cmd: the command
3163  *
3164  *	Process MII requests made via ioctl from the network layer. These
3165  *	are used by tools like kudzu to interrogate the link state of the
3166  *	hardware
3167  */
3168 
velocity_mii_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)3169 static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3170 {
3171 	struct velocity_info *vptr = netdev_priv(dev);
3172 	struct mac_regs __iomem * regs = vptr->mac_regs;
3173 	unsigned long flags;
3174 	struct mii_ioctl_data *miidata = if_mii(ifr);
3175 	int err;
3176 
3177 	switch (cmd) {
3178 	case SIOCGMIIPHY:
3179 		miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
3180 		break;
3181 	case SIOCGMIIREG:
3182 		if (!capable(CAP_NET_ADMIN))
3183 			return -EPERM;
3184 		if(velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
3185 			return -ETIMEDOUT;
3186 		break;
3187 	case SIOCSMIIREG:
3188 		if (!capable(CAP_NET_ADMIN))
3189 			return -EPERM;
3190 		spin_lock_irqsave(&vptr->lock, flags);
3191 		err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
3192 		spin_unlock_irqrestore(&vptr->lock, flags);
3193 		check_connection_type(vptr->mac_regs);
3194 		if(err)
3195 			return err;
3196 		break;
3197 	default:
3198 		return -EOPNOTSUPP;
3199 	}
3200 	return 0;
3201 }
3202 
3203 #ifdef CONFIG_PM
3204 
3205 /**
3206  *	velocity_save_context	-	save registers
3207  *	@vptr: velocity
3208  *	@context: buffer for stored context
3209  *
3210  *	Retrieve the current configuration from the velocity hardware
3211  *	and stash it in the context structure, for use by the context
3212  *	restore functions. This allows us to save things we need across
3213  *	power down states
3214  */
3215 
velocity_save_context(struct velocity_info * vptr,struct velocity_context * context)3216 static void velocity_save_context(struct velocity_info *vptr, struct velocity_context * context)
3217 {
3218 	struct mac_regs __iomem * regs = vptr->mac_regs;
3219 	u16 i;
3220 	u8 __iomem *ptr = (u8 __iomem *)regs;
3221 
3222 	for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
3223 		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3224 
3225 	for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
3226 		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3227 
3228 	for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3229 		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3230 
3231 }
3232 
3233 /**
3234  *	velocity_restore_context	-	restore registers
3235  *	@vptr: velocity
3236  *	@context: buffer for stored context
3237  *
3238  *	Reload the register configuration from the velocity context
3239  *	created by velocity_save_context.
3240  */
3241 
velocity_restore_context(struct velocity_info * vptr,struct velocity_context * context)3242 static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3243 {
3244 	struct mac_regs __iomem * regs = vptr->mac_regs;
3245 	int i;
3246 	u8 __iomem *ptr = (u8 __iomem *)regs;
3247 
3248 	for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4) {
3249 		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3250 	}
3251 
3252 	/* Just skip cr0 */
3253 	for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3254 		/* Clear */
3255 		writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3256 		/* Set */
3257 		writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3258 	}
3259 
3260 	for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4) {
3261 		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3262 	}
3263 
3264 	for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4) {
3265 		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3266 	}
3267 
3268 	for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++) {
3269 		writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3270 	}
3271 
3272 }
3273 
3274 /**
3275  *	wol_calc_crc		-	WOL CRC
3276  *	@pattern: data pattern
3277  *	@mask_pattern: mask
3278  *
3279  *	Compute the wake on lan crc hashes for the packet header
3280  *	we are interested in.
3281  */
3282 
wol_calc_crc(int size,u8 * pattern,u8 * mask_pattern)3283 static u16 wol_calc_crc(int size, u8 * pattern, u8 *mask_pattern)
3284 {
3285 	u16 crc = 0xFFFF;
3286 	u8 mask;
3287 	int i, j;
3288 
3289 	for (i = 0; i < size; i++) {
3290 		mask = mask_pattern[i];
3291 
3292 		/* Skip this loop if the mask equals to zero */
3293 		if (mask == 0x00)
3294 			continue;
3295 
3296 		for (j = 0; j < 8; j++) {
3297 			if ((mask & 0x01) == 0) {
3298 				mask >>= 1;
3299 				continue;
3300 			}
3301 			mask >>= 1;
3302 			crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
3303 		}
3304 	}
3305 	/*	Finally, invert the result once to get the correct data */
3306 	crc = ~crc;
3307 	return bitrev32(crc) >> 16;
3308 }
3309 
3310 /**
3311  *	velocity_set_wol	-	set up for wake on lan
3312  *	@vptr: velocity to set WOL status on
3313  *
3314  *	Set a card up for wake on lan either by unicast or by
3315  *	ARP packet.
3316  *
3317  *	FIXME: check static buffer is safe here
3318  */
3319 
velocity_set_wol(struct velocity_info * vptr)3320 static int velocity_set_wol(struct velocity_info *vptr)
3321 {
3322 	struct mac_regs __iomem * regs = vptr->mac_regs;
3323 	static u8 buf[256];
3324 	int i;
3325 
3326 	static u32 mask_pattern[2][4] = {
3327 		{0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
3328 		{0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff}	 /* Magic Packet */
3329 	};
3330 
3331 	writew(0xFFFF, &regs->WOLCRClr);
3332 	writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
3333 	writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
3334 
3335 	/*
3336 	   if (vptr->wol_opts & VELOCITY_WOL_PHY)
3337 	   writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
3338 	 */
3339 
3340 	if (vptr->wol_opts & VELOCITY_WOL_UCAST) {
3341 		writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
3342 	}
3343 
3344 	if (vptr->wol_opts & VELOCITY_WOL_ARP) {
3345 		struct arp_packet *arp = (struct arp_packet *) buf;
3346 		u16 crc;
3347 		memset(buf, 0, sizeof(struct arp_packet) + 7);
3348 
3349 		for (i = 0; i < 4; i++)
3350 			writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
3351 
3352 		arp->type = htons(ETH_P_ARP);
3353 		arp->ar_op = htons(1);
3354 
3355 		memcpy(arp->ar_tip, vptr->ip_addr, 4);
3356 
3357 		crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
3358 				(u8 *) & mask_pattern[0][0]);
3359 
3360 		writew(crc, &regs->PatternCRC[0]);
3361 		writew(WOLCR_ARP_EN, &regs->WOLCRSet);
3362 	}
3363 
3364 	BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
3365 	BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
3366 
3367 	writew(0x0FFF, &regs->WOLSRClr);
3368 
3369 	if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
3370 		if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
3371 			MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
3372 
3373 		MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
3374 	}
3375 
3376 	if (vptr->mii_status & VELOCITY_SPEED_1000)
3377 		MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
3378 
3379 	BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
3380 
3381 	{
3382 		u8 GCR;
3383 		GCR = readb(&regs->CHIPGCR);
3384 		GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
3385 		writeb(GCR, &regs->CHIPGCR);
3386 	}
3387 
3388 	BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
3389 	/* Turn on SWPTAG just before entering power mode */
3390 	BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
3391 	/* Go to bed ..... */
3392 	BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
3393 
3394 	return 0;
3395 }
3396 
velocity_suspend(struct pci_dev * pdev,pm_message_t state)3397 static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
3398 {
3399 	struct net_device *dev = pci_get_drvdata(pdev);
3400 	struct velocity_info *vptr = netdev_priv(dev);
3401 	unsigned long flags;
3402 
3403 	if(!netif_running(vptr->dev))
3404 		return 0;
3405 
3406 	netif_device_detach(vptr->dev);
3407 
3408 	spin_lock_irqsave(&vptr->lock, flags);
3409 	pci_save_state(pdev);
3410 #ifdef ETHTOOL_GWOL
3411 	if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3412 		velocity_get_ip(vptr);
3413 		velocity_save_context(vptr, &vptr->context);
3414 		velocity_shutdown(vptr);
3415 		velocity_set_wol(vptr);
3416 		pci_enable_wake(pdev, PCI_D3hot, 1);
3417 		pci_set_power_state(pdev, PCI_D3hot);
3418 	} else {
3419 		velocity_save_context(vptr, &vptr->context);
3420 		velocity_shutdown(vptr);
3421 		pci_disable_device(pdev);
3422 		pci_set_power_state(pdev, pci_choose_state(pdev, state));
3423 	}
3424 #else
3425 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
3426 #endif
3427 	spin_unlock_irqrestore(&vptr->lock, flags);
3428 	return 0;
3429 }
3430 
velocity_resume(struct pci_dev * pdev)3431 static int velocity_resume(struct pci_dev *pdev)
3432 {
3433 	struct net_device *dev = pci_get_drvdata(pdev);
3434 	struct velocity_info *vptr = netdev_priv(dev);
3435 	unsigned long flags;
3436 	int i;
3437 
3438 	if(!netif_running(vptr->dev))
3439 		return 0;
3440 
3441 	pci_set_power_state(pdev, PCI_D0);
3442 	pci_enable_wake(pdev, 0, 0);
3443 	pci_restore_state(pdev);
3444 
3445 	mac_wol_reset(vptr->mac_regs);
3446 
3447 	spin_lock_irqsave(&vptr->lock, flags);
3448 	velocity_restore_context(vptr, &vptr->context);
3449 	velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3450 	mac_disable_int(vptr->mac_regs);
3451 
3452 	velocity_tx_srv(vptr, 0);
3453 
3454 	for (i = 0; i < vptr->tx.numq; i++) {
3455 		if (vptr->tx.used[i]) {
3456 			mac_tx_queue_wake(vptr->mac_regs, i);
3457 		}
3458 	}
3459 
3460 	mac_enable_int(vptr->mac_regs);
3461 	spin_unlock_irqrestore(&vptr->lock, flags);
3462 	netif_device_attach(vptr->dev);
3463 
3464 	return 0;
3465 }
3466 
3467 #ifdef CONFIG_INET
3468 
velocity_netdev_event(struct notifier_block * nb,unsigned long notification,void * ptr)3469 static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3470 {
3471 	struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3472 	struct net_device *dev = ifa->ifa_dev->dev;
3473 	struct velocity_info *vptr;
3474 	unsigned long flags;
3475 
3476 	if (dev_net(dev) != &init_net)
3477 		return NOTIFY_DONE;
3478 
3479 	spin_lock_irqsave(&velocity_dev_list_lock, flags);
3480 	list_for_each_entry(vptr, &velocity_dev_list, list) {
3481 		if (vptr->dev == dev) {
3482 			velocity_get_ip(vptr);
3483 			break;
3484 		}
3485 	}
3486 	spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
3487 
3488 	return NOTIFY_DONE;
3489 }
3490 
3491 #endif
3492 #endif
3493