• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * QLogic QLA3xxx NIC HBA Driver
3  * Copyright (c)  2003-2006 QLogic Corporation
4  *
5  * See LICENSE.qla3xxx for copyright and licensing details.
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/ip.h>
26 #include <linux/in.h>
27 #include <linux/if_arp.h>
28 #include <linux/if_ether.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/ethtool.h>
32 #include <linux/skbuff.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/if_vlan.h>
35 #include <linux/delay.h>
36 #include <linux/mm.h>
37 #include <linux/prefetch.h>
38 
39 #include "qla3xxx.h"
40 
41 #define DRV_NAME	"qla3xxx"
42 #define DRV_STRING	"QLogic ISP3XXX Network Driver"
43 #define DRV_VERSION	"v2.03.00-k5"
44 
45 static const char ql3xxx_driver_name[] = DRV_NAME;
46 static const char ql3xxx_driver_version[] = DRV_VERSION;
47 
48 #define TIMED_OUT_MSG							\
49 "Timed out waiting for management port to get free before issuing command\n"
50 
51 MODULE_AUTHOR("QLogic Corporation");
52 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
53 MODULE_LICENSE("GPL");
54 MODULE_VERSION(DRV_VERSION);
55 
56 static const u32 default_msg
57     = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
58     | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
59 
60 static int debug = -1;		/* defaults above */
61 module_param(debug, int, 0);
62 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
63 
64 static int msi;
65 module_param(msi, int, 0);
66 MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
67 
68 static const struct pci_device_id ql3xxx_pci_tbl[] = {
69 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
70 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
71 	/* required last entry */
72 	{0,}
73 };
74 
75 MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
76 
77 /*
78  *  These are the known PHY's which are used
79  */
80 enum PHY_DEVICE_TYPE {
81    PHY_TYPE_UNKNOWN   = 0,
82    PHY_VITESSE_VSC8211,
83    PHY_AGERE_ET1011C,
84    MAX_PHY_DEV_TYPES
85 };
86 
87 struct PHY_DEVICE_INFO {
88 	const enum PHY_DEVICE_TYPE	phyDevice;
89 	const u32		phyIdOUI;
90 	const u16		phyIdModel;
91 	const char		*name;
92 };
93 
94 static const struct PHY_DEVICE_INFO PHY_DEVICES[] = {
95 	{PHY_TYPE_UNKNOWN,    0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
96 	{PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
97 	{PHY_AGERE_ET1011C,   0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
98 };
99 
100 
101 /*
102  * Caller must take hw_lock.
103  */
ql_sem_spinlock(struct ql3_adapter * qdev,u32 sem_mask,u32 sem_bits)104 static int ql_sem_spinlock(struct ql3_adapter *qdev,
105 			    u32 sem_mask, u32 sem_bits)
106 {
107 	struct ql3xxx_port_registers __iomem *port_regs =
108 		qdev->mem_map_registers;
109 	u32 value;
110 	unsigned int seconds = 3;
111 
112 	do {
113 		writel((sem_mask | sem_bits),
114 		       &port_regs->CommonRegs.semaphoreReg);
115 		value = readl(&port_regs->CommonRegs.semaphoreReg);
116 		if ((value & (sem_mask >> 16)) == sem_bits)
117 			return 0;
118 		mdelay(1000);
119 	} while (--seconds);
120 	return -1;
121 }
122 
ql_sem_unlock(struct ql3_adapter * qdev,u32 sem_mask)123 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
124 {
125 	struct ql3xxx_port_registers __iomem *port_regs =
126 		qdev->mem_map_registers;
127 	writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
128 	readl(&port_regs->CommonRegs.semaphoreReg);
129 }
130 
ql_sem_lock(struct ql3_adapter * qdev,u32 sem_mask,u32 sem_bits)131 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
132 {
133 	struct ql3xxx_port_registers __iomem *port_regs =
134 		qdev->mem_map_registers;
135 	u32 value;
136 
137 	writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
138 	value = readl(&port_regs->CommonRegs.semaphoreReg);
139 	return ((value & (sem_mask >> 16)) == sem_bits);
140 }
141 
142 /*
143  * Caller holds hw_lock.
144  */
ql_wait_for_drvr_lock(struct ql3_adapter * qdev)145 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
146 {
147 	int i = 0;
148 
149 	do {
150 		if (ql_sem_lock(qdev,
151 				QL_DRVR_SEM_MASK,
152 				(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
153 				 * 2) << 1)) {
154 			netdev_printk(KERN_DEBUG, qdev->ndev,
155 				      "driver lock acquired\n");
156 			return 1;
157 		}
158 		mdelay(1000);
159 	} while (++i < 10);
160 
161 	netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
162 	return 0;
163 }
164 
ql_set_register_page(struct ql3_adapter * qdev,u32 page)165 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
166 {
167 	struct ql3xxx_port_registers __iomem *port_regs =
168 		qdev->mem_map_registers;
169 
170 	writel(((ISP_CONTROL_NP_MASK << 16) | page),
171 			&port_regs->CommonRegs.ispControlStatus);
172 	readl(&port_regs->CommonRegs.ispControlStatus);
173 	qdev->current_page = page;
174 }
175 
ql_read_common_reg_l(struct ql3_adapter * qdev,u32 __iomem * reg)176 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
177 {
178 	u32 value;
179 	unsigned long hw_flags;
180 
181 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
182 	value = readl(reg);
183 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
184 
185 	return value;
186 }
187 
ql_read_common_reg(struct ql3_adapter * qdev,u32 __iomem * reg)188 static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
189 {
190 	return readl(reg);
191 }
192 
ql_read_page0_reg_l(struct ql3_adapter * qdev,u32 __iomem * reg)193 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
194 {
195 	u32 value;
196 	unsigned long hw_flags;
197 
198 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
199 
200 	if (qdev->current_page != 0)
201 		ql_set_register_page(qdev, 0);
202 	value = readl(reg);
203 
204 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
205 	return value;
206 }
207 
ql_read_page0_reg(struct ql3_adapter * qdev,u32 __iomem * reg)208 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
209 {
210 	if (qdev->current_page != 0)
211 		ql_set_register_page(qdev, 0);
212 	return readl(reg);
213 }
214 
ql_write_common_reg_l(struct ql3_adapter * qdev,u32 __iomem * reg,u32 value)215 static void ql_write_common_reg_l(struct ql3_adapter *qdev,
216 				u32 __iomem *reg, u32 value)
217 {
218 	unsigned long hw_flags;
219 
220 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
221 	writel(value, reg);
222 	readl(reg);
223 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
224 }
225 
ql_write_common_reg(struct ql3_adapter * qdev,u32 __iomem * reg,u32 value)226 static void ql_write_common_reg(struct ql3_adapter *qdev,
227 				u32 __iomem *reg, u32 value)
228 {
229 	writel(value, reg);
230 	readl(reg);
231 }
232 
ql_write_nvram_reg(struct ql3_adapter * qdev,u32 __iomem * reg,u32 value)233 static void ql_write_nvram_reg(struct ql3_adapter *qdev,
234 				u32 __iomem *reg, u32 value)
235 {
236 	writel(value, reg);
237 	readl(reg);
238 	udelay(1);
239 }
240 
ql_write_page0_reg(struct ql3_adapter * qdev,u32 __iomem * reg,u32 value)241 static void ql_write_page0_reg(struct ql3_adapter *qdev,
242 			       u32 __iomem *reg, u32 value)
243 {
244 	if (qdev->current_page != 0)
245 		ql_set_register_page(qdev, 0);
246 	writel(value, reg);
247 	readl(reg);
248 }
249 
250 /*
251  * Caller holds hw_lock. Only called during init.
252  */
ql_write_page1_reg(struct ql3_adapter * qdev,u32 __iomem * reg,u32 value)253 static void ql_write_page1_reg(struct ql3_adapter *qdev,
254 			       u32 __iomem *reg, u32 value)
255 {
256 	if (qdev->current_page != 1)
257 		ql_set_register_page(qdev, 1);
258 	writel(value, reg);
259 	readl(reg);
260 }
261 
262 /*
263  * Caller holds hw_lock. Only called during init.
264  */
ql_write_page2_reg(struct ql3_adapter * qdev,u32 __iomem * reg,u32 value)265 static void ql_write_page2_reg(struct ql3_adapter *qdev,
266 			       u32 __iomem *reg, u32 value)
267 {
268 	if (qdev->current_page != 2)
269 		ql_set_register_page(qdev, 2);
270 	writel(value, reg);
271 	readl(reg);
272 }
273 
ql_disable_interrupts(struct ql3_adapter * qdev)274 static void ql_disable_interrupts(struct ql3_adapter *qdev)
275 {
276 	struct ql3xxx_port_registers __iomem *port_regs =
277 		qdev->mem_map_registers;
278 
279 	ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
280 			    (ISP_IMR_ENABLE_INT << 16));
281 
282 }
283 
ql_enable_interrupts(struct ql3_adapter * qdev)284 static void ql_enable_interrupts(struct ql3_adapter *qdev)
285 {
286 	struct ql3xxx_port_registers __iomem *port_regs =
287 		qdev->mem_map_registers;
288 
289 	ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
290 			    ((0xff << 16) | ISP_IMR_ENABLE_INT));
291 
292 }
293 
ql_release_to_lrg_buf_free_list(struct ql3_adapter * qdev,struct ql_rcv_buf_cb * lrg_buf_cb)294 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
295 					    struct ql_rcv_buf_cb *lrg_buf_cb)
296 {
297 	dma_addr_t map;
298 	int err;
299 	lrg_buf_cb->next = NULL;
300 
301 	if (qdev->lrg_buf_free_tail == NULL) {	/* The list is empty  */
302 		qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
303 	} else {
304 		qdev->lrg_buf_free_tail->next = lrg_buf_cb;
305 		qdev->lrg_buf_free_tail = lrg_buf_cb;
306 	}
307 
308 	if (!lrg_buf_cb->skb) {
309 		lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
310 						   qdev->lrg_buffer_len);
311 		if (unlikely(!lrg_buf_cb->skb)) {
312 			qdev->lrg_buf_skb_check++;
313 		} else {
314 			/*
315 			 * We save some space to copy the ethhdr from first
316 			 * buffer
317 			 */
318 			skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
319 			map = pci_map_single(qdev->pdev,
320 					     lrg_buf_cb->skb->data,
321 					     qdev->lrg_buffer_len -
322 					     QL_HEADER_SPACE,
323 					     PCI_DMA_FROMDEVICE);
324 			err = pci_dma_mapping_error(qdev->pdev, map);
325 			if (err) {
326 				netdev_err(qdev->ndev,
327 					   "PCI mapping failed with error: %d\n",
328 					   err);
329 				dev_kfree_skb(lrg_buf_cb->skb);
330 				lrg_buf_cb->skb = NULL;
331 
332 				qdev->lrg_buf_skb_check++;
333 				return;
334 			}
335 
336 			lrg_buf_cb->buf_phy_addr_low =
337 			    cpu_to_le32(LS_64BITS(map));
338 			lrg_buf_cb->buf_phy_addr_high =
339 			    cpu_to_le32(MS_64BITS(map));
340 			dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
341 			dma_unmap_len_set(lrg_buf_cb, maplen,
342 					  qdev->lrg_buffer_len -
343 					  QL_HEADER_SPACE);
344 		}
345 	}
346 
347 	qdev->lrg_buf_free_count++;
348 }
349 
ql_get_from_lrg_buf_free_list(struct ql3_adapter * qdev)350 static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
351 							   *qdev)
352 {
353 	struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
354 
355 	if (lrg_buf_cb != NULL) {
356 		qdev->lrg_buf_free_head = lrg_buf_cb->next;
357 		if (qdev->lrg_buf_free_head == NULL)
358 			qdev->lrg_buf_free_tail = NULL;
359 		qdev->lrg_buf_free_count--;
360 	}
361 
362 	return lrg_buf_cb;
363 }
364 
365 static u32 addrBits = EEPROM_NO_ADDR_BITS;
366 static u32 dataBits = EEPROM_NO_DATA_BITS;
367 
368 static void fm93c56a_deselect(struct ql3_adapter *qdev);
369 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
370 			    unsigned short *value);
371 
372 /*
373  * Caller holds hw_lock.
374  */
fm93c56a_select(struct ql3_adapter * qdev)375 static void fm93c56a_select(struct ql3_adapter *qdev)
376 {
377 	struct ql3xxx_port_registers __iomem *port_regs =
378 			qdev->mem_map_registers;
379 	__iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
380 
381 	qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
382 	ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
383 }
384 
385 /*
386  * Caller holds hw_lock.
387  */
fm93c56a_cmd(struct ql3_adapter * qdev,u32 cmd,u32 eepromAddr)388 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
389 {
390 	int i;
391 	u32 mask;
392 	u32 dataBit;
393 	u32 previousBit;
394 	struct ql3xxx_port_registers __iomem *port_regs =
395 			qdev->mem_map_registers;
396 	__iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
397 
398 	/* Clock in a zero, then do the start bit */
399 	ql_write_nvram_reg(qdev, spir,
400 			   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
401 			    AUBURN_EEPROM_DO_1));
402 	ql_write_nvram_reg(qdev, spir,
403 			   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
404 			    AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE));
405 	ql_write_nvram_reg(qdev, spir,
406 			   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
407 			    AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL));
408 
409 	mask = 1 << (FM93C56A_CMD_BITS - 1);
410 	/* Force the previous data bit to be different */
411 	previousBit = 0xffff;
412 	for (i = 0; i < FM93C56A_CMD_BITS; i++) {
413 		dataBit = (cmd & mask)
414 			? AUBURN_EEPROM_DO_1
415 			: AUBURN_EEPROM_DO_0;
416 		if (previousBit != dataBit) {
417 			/* If the bit changed, change the DO state to match */
418 			ql_write_nvram_reg(qdev, spir,
419 					   (ISP_NVRAM_MASK |
420 					    qdev->eeprom_cmd_data | dataBit));
421 			previousBit = dataBit;
422 		}
423 		ql_write_nvram_reg(qdev, spir,
424 				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
425 				    dataBit | AUBURN_EEPROM_CLK_RISE));
426 		ql_write_nvram_reg(qdev, spir,
427 				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
428 				    dataBit | AUBURN_EEPROM_CLK_FALL));
429 		cmd = cmd << 1;
430 	}
431 
432 	mask = 1 << (addrBits - 1);
433 	/* Force the previous data bit to be different */
434 	previousBit = 0xffff;
435 	for (i = 0; i < addrBits; i++) {
436 		dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1
437 			: AUBURN_EEPROM_DO_0;
438 		if (previousBit != dataBit) {
439 			/*
440 			 * If the bit changed, then change the DO state to
441 			 * match
442 			 */
443 			ql_write_nvram_reg(qdev, spir,
444 					   (ISP_NVRAM_MASK |
445 					    qdev->eeprom_cmd_data | dataBit));
446 			previousBit = dataBit;
447 		}
448 		ql_write_nvram_reg(qdev, spir,
449 				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
450 				    dataBit | AUBURN_EEPROM_CLK_RISE));
451 		ql_write_nvram_reg(qdev, spir,
452 				   (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
453 				    dataBit | AUBURN_EEPROM_CLK_FALL));
454 		eepromAddr = eepromAddr << 1;
455 	}
456 }
457 
458 /*
459  * Caller holds hw_lock.
460  */
fm93c56a_deselect(struct ql3_adapter * qdev)461 static void fm93c56a_deselect(struct ql3_adapter *qdev)
462 {
463 	struct ql3xxx_port_registers __iomem *port_regs =
464 			qdev->mem_map_registers;
465 	__iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
466 
467 	qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
468 	ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
469 }
470 
471 /*
472  * Caller holds hw_lock.
473  */
fm93c56a_datain(struct ql3_adapter * qdev,unsigned short * value)474 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
475 {
476 	int i;
477 	u32 data = 0;
478 	u32 dataBit;
479 	struct ql3xxx_port_registers __iomem *port_regs =
480 			qdev->mem_map_registers;
481 	__iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
482 
483 	/* Read the data bits */
484 	/* The first bit is a dummy.  Clock right over it. */
485 	for (i = 0; i < dataBits; i++) {
486 		ql_write_nvram_reg(qdev, spir,
487 				   ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
488 				   AUBURN_EEPROM_CLK_RISE);
489 		ql_write_nvram_reg(qdev, spir,
490 				   ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
491 				   AUBURN_EEPROM_CLK_FALL);
492 		dataBit = (ql_read_common_reg(qdev, spir) &
493 			   AUBURN_EEPROM_DI_1) ? 1 : 0;
494 		data = (data << 1) | dataBit;
495 	}
496 	*value = (u16)data;
497 }
498 
499 /*
500  * Caller holds hw_lock.
501  */
eeprom_readword(struct ql3_adapter * qdev,u32 eepromAddr,unsigned short * value)502 static void eeprom_readword(struct ql3_adapter *qdev,
503 			    u32 eepromAddr, unsigned short *value)
504 {
505 	fm93c56a_select(qdev);
506 	fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
507 	fm93c56a_datain(qdev, value);
508 	fm93c56a_deselect(qdev);
509 }
510 
ql_set_mac_addr(struct net_device * ndev,u16 * addr)511 static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
512 {
513 	__le16 *p = (__le16 *)ndev->dev_addr;
514 	p[0] = cpu_to_le16(addr[0]);
515 	p[1] = cpu_to_le16(addr[1]);
516 	p[2] = cpu_to_le16(addr[2]);
517 }
518 
ql_get_nvram_params(struct ql3_adapter * qdev)519 static int ql_get_nvram_params(struct ql3_adapter *qdev)
520 {
521 	u16 *pEEPROMData;
522 	u16 checksum = 0;
523 	u32 index;
524 	unsigned long hw_flags;
525 
526 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
527 
528 	pEEPROMData = (u16 *)&qdev->nvram_data;
529 	qdev->eeprom_cmd_data = 0;
530 	if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
531 			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
532 			 2) << 10)) {
533 		pr_err("%s: Failed ql_sem_spinlock()\n", __func__);
534 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
535 		return -1;
536 	}
537 
538 	for (index = 0; index < EEPROM_SIZE; index++) {
539 		eeprom_readword(qdev, index, pEEPROMData);
540 		checksum += *pEEPROMData;
541 		pEEPROMData++;
542 	}
543 	ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
544 
545 	if (checksum != 0) {
546 		netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
547 			   checksum);
548 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
549 		return -1;
550 	}
551 
552 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
553 	return checksum;
554 }
555 
556 static const u32 PHYAddr[2] = {
557 	PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
558 };
559 
ql_wait_for_mii_ready(struct ql3_adapter * qdev)560 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
561 {
562 	struct ql3xxx_port_registers __iomem *port_regs =
563 			qdev->mem_map_registers;
564 	u32 temp;
565 	int count = 1000;
566 
567 	while (count) {
568 		temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
569 		if (!(temp & MAC_MII_STATUS_BSY))
570 			return 0;
571 		udelay(10);
572 		count--;
573 	}
574 	return -1;
575 }
576 
ql_mii_enable_scan_mode(struct ql3_adapter * qdev)577 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
578 {
579 	struct ql3xxx_port_registers __iomem *port_regs =
580 			qdev->mem_map_registers;
581 	u32 scanControl;
582 
583 	if (qdev->numPorts > 1) {
584 		/* Auto scan will cycle through multiple ports */
585 		scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
586 	} else {
587 		scanControl = MAC_MII_CONTROL_SC;
588 	}
589 
590 	/*
591 	 * Scan register 1 of PHY/PETBI,
592 	 * Set up to scan both devices
593 	 * The autoscan starts from the first register, completes
594 	 * the last one before rolling over to the first
595 	 */
596 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
597 			   PHYAddr[0] | MII_SCAN_REGISTER);
598 
599 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
600 			   (scanControl) |
601 			   ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
602 }
603 
ql_mii_disable_scan_mode(struct ql3_adapter * qdev)604 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
605 {
606 	u8 ret;
607 	struct ql3xxx_port_registers __iomem *port_regs =
608 					qdev->mem_map_registers;
609 
610 	/* See if scan mode is enabled before we turn it off */
611 	if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
612 	    (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
613 		/* Scan is enabled */
614 		ret = 1;
615 	} else {
616 		/* Scan is disabled */
617 		ret = 0;
618 	}
619 
620 	/*
621 	 * When disabling scan mode you must first change the MII register
622 	 * address
623 	 */
624 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
625 			   PHYAddr[0] | MII_SCAN_REGISTER);
626 
627 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
628 			   ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
629 			     MAC_MII_CONTROL_RC) << 16));
630 
631 	return ret;
632 }
633 
ql_mii_write_reg_ex(struct ql3_adapter * qdev,u16 regAddr,u16 value,u32 phyAddr)634 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
635 			       u16 regAddr, u16 value, u32 phyAddr)
636 {
637 	struct ql3xxx_port_registers __iomem *port_regs =
638 			qdev->mem_map_registers;
639 	u8 scanWasEnabled;
640 
641 	scanWasEnabled = ql_mii_disable_scan_mode(qdev);
642 
643 	if (ql_wait_for_mii_ready(qdev)) {
644 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
645 		return -1;
646 	}
647 
648 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
649 			   phyAddr | regAddr);
650 
651 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
652 
653 	/* Wait for write to complete 9/10/04 SJP */
654 	if (ql_wait_for_mii_ready(qdev)) {
655 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
656 		return -1;
657 	}
658 
659 	if (scanWasEnabled)
660 		ql_mii_enable_scan_mode(qdev);
661 
662 	return 0;
663 }
664 
ql_mii_read_reg_ex(struct ql3_adapter * qdev,u16 regAddr,u16 * value,u32 phyAddr)665 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
666 			      u16 *value, u32 phyAddr)
667 {
668 	struct ql3xxx_port_registers __iomem *port_regs =
669 			qdev->mem_map_registers;
670 	u8 scanWasEnabled;
671 	u32 temp;
672 
673 	scanWasEnabled = ql_mii_disable_scan_mode(qdev);
674 
675 	if (ql_wait_for_mii_ready(qdev)) {
676 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
677 		return -1;
678 	}
679 
680 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
681 			   phyAddr | regAddr);
682 
683 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
684 			   (MAC_MII_CONTROL_RC << 16));
685 
686 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
687 			   (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
688 
689 	/* Wait for the read to complete */
690 	if (ql_wait_for_mii_ready(qdev)) {
691 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
692 		return -1;
693 	}
694 
695 	temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
696 	*value = (u16) temp;
697 
698 	if (scanWasEnabled)
699 		ql_mii_enable_scan_mode(qdev);
700 
701 	return 0;
702 }
703 
ql_mii_write_reg(struct ql3_adapter * qdev,u16 regAddr,u16 value)704 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
705 {
706 	struct ql3xxx_port_registers __iomem *port_regs =
707 			qdev->mem_map_registers;
708 
709 	ql_mii_disable_scan_mode(qdev);
710 
711 	if (ql_wait_for_mii_ready(qdev)) {
712 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
713 		return -1;
714 	}
715 
716 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
717 			   qdev->PHYAddr | regAddr);
718 
719 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
720 
721 	/* Wait for write to complete. */
722 	if (ql_wait_for_mii_ready(qdev)) {
723 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
724 		return -1;
725 	}
726 
727 	ql_mii_enable_scan_mode(qdev);
728 
729 	return 0;
730 }
731 
ql_mii_read_reg(struct ql3_adapter * qdev,u16 regAddr,u16 * value)732 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
733 {
734 	u32 temp;
735 	struct ql3xxx_port_registers __iomem *port_regs =
736 			qdev->mem_map_registers;
737 
738 	ql_mii_disable_scan_mode(qdev);
739 
740 	if (ql_wait_for_mii_ready(qdev)) {
741 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
742 		return -1;
743 	}
744 
745 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
746 			   qdev->PHYAddr | regAddr);
747 
748 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
749 			   (MAC_MII_CONTROL_RC << 16));
750 
751 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
752 			   (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
753 
754 	/* Wait for the read to complete */
755 	if (ql_wait_for_mii_ready(qdev)) {
756 		netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
757 		return -1;
758 	}
759 
760 	temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
761 	*value = (u16) temp;
762 
763 	ql_mii_enable_scan_mode(qdev);
764 
765 	return 0;
766 }
767 
ql_petbi_reset(struct ql3_adapter * qdev)768 static void ql_petbi_reset(struct ql3_adapter *qdev)
769 {
770 	ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
771 }
772 
ql_petbi_start_neg(struct ql3_adapter * qdev)773 static void ql_petbi_start_neg(struct ql3_adapter *qdev)
774 {
775 	u16 reg;
776 
777 	/* Enable Auto-negotiation sense */
778 	ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
779 	reg |= PETBI_TBI_AUTO_SENSE;
780 	ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
781 
782 	ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
783 			 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
784 
785 	ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
786 			 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
787 			 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
788 
789 }
790 
ql_petbi_reset_ex(struct ql3_adapter * qdev)791 static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
792 {
793 	ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
794 			    PHYAddr[qdev->mac_index]);
795 }
796 
ql_petbi_start_neg_ex(struct ql3_adapter * qdev)797 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
798 {
799 	u16 reg;
800 
801 	/* Enable Auto-negotiation sense */
802 	ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
803 			   PHYAddr[qdev->mac_index]);
804 	reg |= PETBI_TBI_AUTO_SENSE;
805 	ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
806 			    PHYAddr[qdev->mac_index]);
807 
808 	ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
809 			    PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
810 			    PHYAddr[qdev->mac_index]);
811 
812 	ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
813 			    PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
814 			    PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
815 			    PHYAddr[qdev->mac_index]);
816 }
817 
ql_petbi_init(struct ql3_adapter * qdev)818 static void ql_petbi_init(struct ql3_adapter *qdev)
819 {
820 	ql_petbi_reset(qdev);
821 	ql_petbi_start_neg(qdev);
822 }
823 
ql_petbi_init_ex(struct ql3_adapter * qdev)824 static void ql_petbi_init_ex(struct ql3_adapter *qdev)
825 {
826 	ql_petbi_reset_ex(qdev);
827 	ql_petbi_start_neg_ex(qdev);
828 }
829 
ql_is_petbi_neg_pause(struct ql3_adapter * qdev)830 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
831 {
832 	u16 reg;
833 
834 	if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
835 		return 0;
836 
837 	return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
838 }
839 
phyAgereSpecificInit(struct ql3_adapter * qdev,u32 miiAddr)840 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
841 {
842 	netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
843 	/* power down device bit 11 = 1 */
844 	ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
845 	/* enable diagnostic mode bit 2 = 1 */
846 	ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
847 	/* 1000MB amplitude adjust (see Agere errata) */
848 	ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
849 	/* 1000MB amplitude adjust (see Agere errata) */
850 	ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
851 	/* 100MB amplitude adjust (see Agere errata) */
852 	ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
853 	/* 100MB amplitude adjust (see Agere errata) */
854 	ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
855 	/* 10MB amplitude adjust (see Agere errata) */
856 	ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
857 	/* 10MB amplitude adjust (see Agere errata) */
858 	ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
859 	/* point to hidden reg 0x2806 */
860 	ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
861 	/* Write new PHYAD w/bit 5 set */
862 	ql_mii_write_reg_ex(qdev, 0x11,
863 			    0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
864 	/*
865 	 * Disable diagnostic mode bit 2 = 0
866 	 * Power up device bit 11 = 0
867 	 * Link up (on) and activity (blink)
868 	 */
869 	ql_mii_write_reg(qdev, 0x12, 0x840a);
870 	ql_mii_write_reg(qdev, 0x00, 0x1140);
871 	ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
872 }
873 
getPhyType(struct ql3_adapter * qdev,u16 phyIdReg0,u16 phyIdReg1)874 static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev,
875 				       u16 phyIdReg0, u16 phyIdReg1)
876 {
877 	enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN;
878 	u32   oui;
879 	u16   model;
880 	int i;
881 
882 	if (phyIdReg0 == 0xffff)
883 		return result;
884 
885 	if (phyIdReg1 == 0xffff)
886 		return result;
887 
888 	/* oui is split between two registers */
889 	oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
890 
891 	model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
892 
893 	/* Scan table for this PHY */
894 	for (i = 0; i < MAX_PHY_DEV_TYPES; i++) {
895 		if ((oui == PHY_DEVICES[i].phyIdOUI) &&
896 		    (model == PHY_DEVICES[i].phyIdModel)) {
897 			netdev_info(qdev->ndev, "Phy: %s\n",
898 				    PHY_DEVICES[i].name);
899 			result = PHY_DEVICES[i].phyDevice;
900 			break;
901 		}
902 	}
903 
904 	return result;
905 }
906 
ql_phy_get_speed(struct ql3_adapter * qdev)907 static int ql_phy_get_speed(struct ql3_adapter *qdev)
908 {
909 	u16 reg;
910 
911 	switch (qdev->phyType) {
912 	case PHY_AGERE_ET1011C: {
913 		if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
914 			return 0;
915 
916 		reg = (reg >> 8) & 3;
917 		break;
918 	}
919 	default:
920 		if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
921 			return 0;
922 
923 		reg = (((reg & 0x18) >> 3) & 3);
924 	}
925 
926 	switch (reg) {
927 	case 2:
928 		return SPEED_1000;
929 	case 1:
930 		return SPEED_100;
931 	case 0:
932 		return SPEED_10;
933 	default:
934 		return -1;
935 	}
936 }
937 
ql_is_full_dup(struct ql3_adapter * qdev)938 static int ql_is_full_dup(struct ql3_adapter *qdev)
939 {
940 	u16 reg;
941 
942 	switch (qdev->phyType) {
943 	case PHY_AGERE_ET1011C: {
944 		if (ql_mii_read_reg(qdev, 0x1A, &reg))
945 			return 0;
946 
947 		return ((reg & 0x0080) && (reg & 0x1000)) != 0;
948 	}
949 	case PHY_VITESSE_VSC8211:
950 	default: {
951 		if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
952 			return 0;
953 		return (reg & PHY_AUX_DUPLEX_STAT) != 0;
954 	}
955 	}
956 }
957 
ql_is_phy_neg_pause(struct ql3_adapter * qdev)958 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
959 {
960 	u16 reg;
961 
962 	if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
963 		return 0;
964 
965 	return (reg & PHY_NEG_PAUSE) != 0;
966 }
967 
PHY_Setup(struct ql3_adapter * qdev)968 static int PHY_Setup(struct ql3_adapter *qdev)
969 {
970 	u16   reg1;
971 	u16   reg2;
972 	bool  agereAddrChangeNeeded = false;
973 	u32 miiAddr = 0;
974 	int err;
975 
976 	/*  Determine the PHY we are using by reading the ID's */
977 	err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
978 	if (err != 0) {
979 		netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
980 		return err;
981 	}
982 
983 	err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
984 	if (err != 0) {
985 		netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
986 		return err;
987 	}
988 
989 	/*  Check if we have a Agere PHY */
990 	if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
991 
992 		/* Determine which MII address we should be using
993 		   determined by the index of the card */
994 		if (qdev->mac_index == 0)
995 			miiAddr = MII_AGERE_ADDR_1;
996 		else
997 			miiAddr = MII_AGERE_ADDR_2;
998 
999 		err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
1000 		if (err != 0) {
1001 			netdev_err(qdev->ndev,
1002 				   "Could not read from reg PHY_ID_0_REG after Agere detected\n");
1003 			return err;
1004 		}
1005 
1006 		err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
1007 		if (err != 0) {
1008 			netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
1009 			return err;
1010 		}
1011 
1012 		/*  We need to remember to initialize the Agere PHY */
1013 		agereAddrChangeNeeded = true;
1014 	}
1015 
1016 	/*  Determine the particular PHY we have on board to apply
1017 	    PHY specific initializations */
1018 	qdev->phyType = getPhyType(qdev, reg1, reg2);
1019 
1020 	if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
1021 		/* need this here so address gets changed */
1022 		phyAgereSpecificInit(qdev, miiAddr);
1023 	} else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
1024 		netdev_err(qdev->ndev, "PHY is unknown\n");
1025 		return -EIO;
1026 	}
1027 
1028 	return 0;
1029 }
1030 
1031 /*
1032  * Caller holds hw_lock.
1033  */
ql_mac_enable(struct ql3_adapter * qdev,u32 enable)1034 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
1035 {
1036 	struct ql3xxx_port_registers __iomem *port_regs =
1037 			qdev->mem_map_registers;
1038 	u32 value;
1039 
1040 	if (enable)
1041 		value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
1042 	else
1043 		value = (MAC_CONFIG_REG_PE << 16);
1044 
1045 	if (qdev->mac_index)
1046 		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1047 	else
1048 		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1049 }
1050 
1051 /*
1052  * Caller holds hw_lock.
1053  */
ql_mac_cfg_soft_reset(struct ql3_adapter * qdev,u32 enable)1054 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
1055 {
1056 	struct ql3xxx_port_registers __iomem *port_regs =
1057 			qdev->mem_map_registers;
1058 	u32 value;
1059 
1060 	if (enable)
1061 		value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
1062 	else
1063 		value = (MAC_CONFIG_REG_SR << 16);
1064 
1065 	if (qdev->mac_index)
1066 		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1067 	else
1068 		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1069 }
1070 
1071 /*
1072  * Caller holds hw_lock.
1073  */
ql_mac_cfg_gig(struct ql3_adapter * qdev,u32 enable)1074 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
1075 {
1076 	struct ql3xxx_port_registers __iomem *port_regs =
1077 			qdev->mem_map_registers;
1078 	u32 value;
1079 
1080 	if (enable)
1081 		value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
1082 	else
1083 		value = (MAC_CONFIG_REG_GM << 16);
1084 
1085 	if (qdev->mac_index)
1086 		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1087 	else
1088 		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1089 }
1090 
1091 /*
1092  * Caller holds hw_lock.
1093  */
ql_mac_cfg_full_dup(struct ql3_adapter * qdev,u32 enable)1094 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
1095 {
1096 	struct ql3xxx_port_registers __iomem *port_regs =
1097 			qdev->mem_map_registers;
1098 	u32 value;
1099 
1100 	if (enable)
1101 		value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
1102 	else
1103 		value = (MAC_CONFIG_REG_FD << 16);
1104 
1105 	if (qdev->mac_index)
1106 		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1107 	else
1108 		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1109 }
1110 
1111 /*
1112  * Caller holds hw_lock.
1113  */
ql_mac_cfg_pause(struct ql3_adapter * qdev,u32 enable)1114 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
1115 {
1116 	struct ql3xxx_port_registers __iomem *port_regs =
1117 			qdev->mem_map_registers;
1118 	u32 value;
1119 
1120 	if (enable)
1121 		value =
1122 		    ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
1123 		     ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
1124 	else
1125 		value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
1126 
1127 	if (qdev->mac_index)
1128 		ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1129 	else
1130 		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1131 }
1132 
1133 /*
1134  * Caller holds hw_lock.
1135  */
ql_is_fiber(struct ql3_adapter * qdev)1136 static int ql_is_fiber(struct ql3_adapter *qdev)
1137 {
1138 	struct ql3xxx_port_registers __iomem *port_regs =
1139 			qdev->mem_map_registers;
1140 	u32 bitToCheck = 0;
1141 	u32 temp;
1142 
1143 	switch (qdev->mac_index) {
1144 	case 0:
1145 		bitToCheck = PORT_STATUS_SM0;
1146 		break;
1147 	case 1:
1148 		bitToCheck = PORT_STATUS_SM1;
1149 		break;
1150 	}
1151 
1152 	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1153 	return (temp & bitToCheck) != 0;
1154 }
1155 
ql_is_auto_cfg(struct ql3_adapter * qdev)1156 static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1157 {
1158 	u16 reg;
1159 	ql_mii_read_reg(qdev, 0x00, &reg);
1160 	return (reg & 0x1000) != 0;
1161 }
1162 
1163 /*
1164  * Caller holds hw_lock.
1165  */
ql_is_auto_neg_complete(struct ql3_adapter * qdev)1166 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1167 {
1168 	struct ql3xxx_port_registers __iomem *port_regs =
1169 			qdev->mem_map_registers;
1170 	u32 bitToCheck = 0;
1171 	u32 temp;
1172 
1173 	switch (qdev->mac_index) {
1174 	case 0:
1175 		bitToCheck = PORT_STATUS_AC0;
1176 		break;
1177 	case 1:
1178 		bitToCheck = PORT_STATUS_AC1;
1179 		break;
1180 	}
1181 
1182 	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1183 	if (temp & bitToCheck) {
1184 		netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
1185 		return 1;
1186 	}
1187 	netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
1188 	return 0;
1189 }
1190 
1191 /*
1192  *  ql_is_neg_pause() returns 1 if pause was negotiated to be on
1193  */
ql_is_neg_pause(struct ql3_adapter * qdev)1194 static int ql_is_neg_pause(struct ql3_adapter *qdev)
1195 {
1196 	if (ql_is_fiber(qdev))
1197 		return ql_is_petbi_neg_pause(qdev);
1198 	else
1199 		return ql_is_phy_neg_pause(qdev);
1200 }
1201 
ql_auto_neg_error(struct ql3_adapter * qdev)1202 static int ql_auto_neg_error(struct ql3_adapter *qdev)
1203 {
1204 	struct ql3xxx_port_registers __iomem *port_regs =
1205 			qdev->mem_map_registers;
1206 	u32 bitToCheck = 0;
1207 	u32 temp;
1208 
1209 	switch (qdev->mac_index) {
1210 	case 0:
1211 		bitToCheck = PORT_STATUS_AE0;
1212 		break;
1213 	case 1:
1214 		bitToCheck = PORT_STATUS_AE1;
1215 		break;
1216 	}
1217 	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1218 	return (temp & bitToCheck) != 0;
1219 }
1220 
ql_get_link_speed(struct ql3_adapter * qdev)1221 static u32 ql_get_link_speed(struct ql3_adapter *qdev)
1222 {
1223 	if (ql_is_fiber(qdev))
1224 		return SPEED_1000;
1225 	else
1226 		return ql_phy_get_speed(qdev);
1227 }
1228 
ql_is_link_full_dup(struct ql3_adapter * qdev)1229 static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1230 {
1231 	if (ql_is_fiber(qdev))
1232 		return 1;
1233 	else
1234 		return ql_is_full_dup(qdev);
1235 }
1236 
1237 /*
1238  * Caller holds hw_lock.
1239  */
ql_link_down_detect(struct ql3_adapter * qdev)1240 static int ql_link_down_detect(struct ql3_adapter *qdev)
1241 {
1242 	struct ql3xxx_port_registers __iomem *port_regs =
1243 			qdev->mem_map_registers;
1244 	u32 bitToCheck = 0;
1245 	u32 temp;
1246 
1247 	switch (qdev->mac_index) {
1248 	case 0:
1249 		bitToCheck = ISP_CONTROL_LINK_DN_0;
1250 		break;
1251 	case 1:
1252 		bitToCheck = ISP_CONTROL_LINK_DN_1;
1253 		break;
1254 	}
1255 
1256 	temp =
1257 	    ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
1258 	return (temp & bitToCheck) != 0;
1259 }
1260 
1261 /*
1262  * Caller holds hw_lock.
1263  */
ql_link_down_detect_clear(struct ql3_adapter * qdev)1264 static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1265 {
1266 	struct ql3xxx_port_registers __iomem *port_regs =
1267 			qdev->mem_map_registers;
1268 
1269 	switch (qdev->mac_index) {
1270 	case 0:
1271 		ql_write_common_reg(qdev,
1272 				    &port_regs->CommonRegs.ispControlStatus,
1273 				    (ISP_CONTROL_LINK_DN_0) |
1274 				    (ISP_CONTROL_LINK_DN_0 << 16));
1275 		break;
1276 
1277 	case 1:
1278 		ql_write_common_reg(qdev,
1279 				    &port_regs->CommonRegs.ispControlStatus,
1280 				    (ISP_CONTROL_LINK_DN_1) |
1281 				    (ISP_CONTROL_LINK_DN_1 << 16));
1282 		break;
1283 
1284 	default:
1285 		return 1;
1286 	}
1287 
1288 	return 0;
1289 }
1290 
1291 /*
1292  * Caller holds hw_lock.
1293  */
ql_this_adapter_controls_port(struct ql3_adapter * qdev)1294 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
1295 {
1296 	struct ql3xxx_port_registers __iomem *port_regs =
1297 			qdev->mem_map_registers;
1298 	u32 bitToCheck = 0;
1299 	u32 temp;
1300 
1301 	switch (qdev->mac_index) {
1302 	case 0:
1303 		bitToCheck = PORT_STATUS_F1_ENABLED;
1304 		break;
1305 	case 1:
1306 		bitToCheck = PORT_STATUS_F3_ENABLED;
1307 		break;
1308 	default:
1309 		break;
1310 	}
1311 
1312 	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1313 	if (temp & bitToCheck) {
1314 		netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1315 			     "not link master\n");
1316 		return 0;
1317 	}
1318 
1319 	netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
1320 	return 1;
1321 }
1322 
ql_phy_reset_ex(struct ql3_adapter * qdev)1323 static void ql_phy_reset_ex(struct ql3_adapter *qdev)
1324 {
1325 	ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
1326 			    PHYAddr[qdev->mac_index]);
1327 }
1328 
ql_phy_start_neg_ex(struct ql3_adapter * qdev)1329 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1330 {
1331 	u16 reg;
1332 	u16 portConfiguration;
1333 
1334 	if (qdev->phyType == PHY_AGERE_ET1011C)
1335 		ql_mii_write_reg(qdev, 0x13, 0x0000);
1336 					/* turn off external loopback */
1337 
1338 	if (qdev->mac_index == 0)
1339 		portConfiguration =
1340 			qdev->nvram_data.macCfg_port0.portConfiguration;
1341 	else
1342 		portConfiguration =
1343 			qdev->nvram_data.macCfg_port1.portConfiguration;
1344 
1345 	/*  Some HBA's in the field are set to 0 and they need to
1346 	    be reinterpreted with a default value */
1347 	if (portConfiguration == 0)
1348 		portConfiguration = PORT_CONFIG_DEFAULT;
1349 
1350 	/* Set the 1000 advertisements */
1351 	ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
1352 			   PHYAddr[qdev->mac_index]);
1353 	reg &= ~PHY_GIG_ALL_PARAMS;
1354 
1355 	if (portConfiguration & PORT_CONFIG_1000MB_SPEED) {
1356 		if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
1357 			reg |= PHY_GIG_ADV_1000F;
1358 		else
1359 			reg |= PHY_GIG_ADV_1000H;
1360 	}
1361 
1362 	ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
1363 			    PHYAddr[qdev->mac_index]);
1364 
1365 	/* Set the 10/100 & pause negotiation advertisements */
1366 	ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
1367 			   PHYAddr[qdev->mac_index]);
1368 	reg &= ~PHY_NEG_ALL_PARAMS;
1369 
1370 	if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
1371 		reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
1372 
1373 	if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
1374 		if (portConfiguration & PORT_CONFIG_100MB_SPEED)
1375 			reg |= PHY_NEG_ADV_100F;
1376 
1377 		if (portConfiguration & PORT_CONFIG_10MB_SPEED)
1378 			reg |= PHY_NEG_ADV_10F;
1379 	}
1380 
1381 	if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
1382 		if (portConfiguration & PORT_CONFIG_100MB_SPEED)
1383 			reg |= PHY_NEG_ADV_100H;
1384 
1385 		if (portConfiguration & PORT_CONFIG_10MB_SPEED)
1386 			reg |= PHY_NEG_ADV_10H;
1387 	}
1388 
1389 	if (portConfiguration & PORT_CONFIG_1000MB_SPEED)
1390 		reg |= 1;
1391 
1392 	ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
1393 			    PHYAddr[qdev->mac_index]);
1394 
1395 	ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
1396 
1397 	ql_mii_write_reg_ex(qdev, CONTROL_REG,
1398 			    reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
1399 			    PHYAddr[qdev->mac_index]);
1400 }
1401 
ql_phy_init_ex(struct ql3_adapter * qdev)1402 static void ql_phy_init_ex(struct ql3_adapter *qdev)
1403 {
1404 	ql_phy_reset_ex(qdev);
1405 	PHY_Setup(qdev);
1406 	ql_phy_start_neg_ex(qdev);
1407 }
1408 
1409 /*
1410  * Caller holds hw_lock.
1411  */
ql_get_link_state(struct ql3_adapter * qdev)1412 static u32 ql_get_link_state(struct ql3_adapter *qdev)
1413 {
1414 	struct ql3xxx_port_registers __iomem *port_regs =
1415 			qdev->mem_map_registers;
1416 	u32 bitToCheck = 0;
1417 	u32 temp, linkState;
1418 
1419 	switch (qdev->mac_index) {
1420 	case 0:
1421 		bitToCheck = PORT_STATUS_UP0;
1422 		break;
1423 	case 1:
1424 		bitToCheck = PORT_STATUS_UP1;
1425 		break;
1426 	}
1427 
1428 	temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1429 	if (temp & bitToCheck)
1430 		linkState = LS_UP;
1431 	else
1432 		linkState = LS_DOWN;
1433 
1434 	return linkState;
1435 }
1436 
ql_port_start(struct ql3_adapter * qdev)1437 static int ql_port_start(struct ql3_adapter *qdev)
1438 {
1439 	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1440 		(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1441 			 2) << 7)) {
1442 		netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
1443 		return -1;
1444 	}
1445 
1446 	if (ql_is_fiber(qdev)) {
1447 		ql_petbi_init(qdev);
1448 	} else {
1449 		/* Copper port */
1450 		ql_phy_init_ex(qdev);
1451 	}
1452 
1453 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1454 	return 0;
1455 }
1456 
ql_finish_auto_neg(struct ql3_adapter * qdev)1457 static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1458 {
1459 
1460 	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1461 		(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1462 			 2) << 7))
1463 		return -1;
1464 
1465 	if (!ql_auto_neg_error(qdev)) {
1466 		if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1467 			/* configure the MAC */
1468 			netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1469 				     "Configuring link\n");
1470 			ql_mac_cfg_soft_reset(qdev, 1);
1471 			ql_mac_cfg_gig(qdev,
1472 				       (ql_get_link_speed
1473 					(qdev) ==
1474 					SPEED_1000));
1475 			ql_mac_cfg_full_dup(qdev,
1476 					    ql_is_link_full_dup
1477 					    (qdev));
1478 			ql_mac_cfg_pause(qdev,
1479 					 ql_is_neg_pause
1480 					 (qdev));
1481 			ql_mac_cfg_soft_reset(qdev, 0);
1482 
1483 			/* enable the MAC */
1484 			netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1485 				     "Enabling mac\n");
1486 			ql_mac_enable(qdev, 1);
1487 		}
1488 
1489 		qdev->port_link_state = LS_UP;
1490 		netif_start_queue(qdev->ndev);
1491 		netif_carrier_on(qdev->ndev);
1492 		netif_info(qdev, link, qdev->ndev,
1493 			   "Link is up at %d Mbps, %s duplex\n",
1494 			   ql_get_link_speed(qdev),
1495 			   ql_is_link_full_dup(qdev) ? "full" : "half");
1496 
1497 	} else {	/* Remote error detected */
1498 
1499 		if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
1500 			netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
1501 				     "Remote error detected. Calling ql_port_start()\n");
1502 			/*
1503 			 * ql_port_start() is shared code and needs
1504 			 * to lock the PHY on it's own.
1505 			 */
1506 			ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1507 			if (ql_port_start(qdev))	/* Restart port */
1508 				return -1;
1509 			return 0;
1510 		}
1511 	}
1512 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1513 	return 0;
1514 }
1515 
ql_link_state_machine_work(struct work_struct * work)1516 static void ql_link_state_machine_work(struct work_struct *work)
1517 {
1518 	struct ql3_adapter *qdev =
1519 		container_of(work, struct ql3_adapter, link_state_work.work);
1520 
1521 	u32 curr_link_state;
1522 	unsigned long hw_flags;
1523 
1524 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1525 
1526 	curr_link_state = ql_get_link_state(qdev);
1527 
1528 	if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) {
1529 		netif_info(qdev, link, qdev->ndev,
1530 			   "Reset in progress, skip processing link state\n");
1531 
1532 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1533 
1534 		/* Restart timer on 2 second interval. */
1535 		mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1536 
1537 		return;
1538 	}
1539 
1540 	switch (qdev->port_link_state) {
1541 	default:
1542 		if (test_bit(QL_LINK_MASTER, &qdev->flags))
1543 			ql_port_start(qdev);
1544 		qdev->port_link_state = LS_DOWN;
1545 		/* Fall Through */
1546 
1547 	case LS_DOWN:
1548 		if (curr_link_state == LS_UP) {
1549 			netif_info(qdev, link, qdev->ndev, "Link is up\n");
1550 			if (ql_is_auto_neg_complete(qdev))
1551 				ql_finish_auto_neg(qdev);
1552 
1553 			if (qdev->port_link_state == LS_UP)
1554 				ql_link_down_detect_clear(qdev);
1555 
1556 			qdev->port_link_state = LS_UP;
1557 		}
1558 		break;
1559 
1560 	case LS_UP:
1561 		/*
1562 		 * See if the link is currently down or went down and came
1563 		 * back up
1564 		 */
1565 		if (curr_link_state == LS_DOWN) {
1566 			netif_info(qdev, link, qdev->ndev, "Link is down\n");
1567 			qdev->port_link_state = LS_DOWN;
1568 		}
1569 		if (ql_link_down_detect(qdev))
1570 			qdev->port_link_state = LS_DOWN;
1571 		break;
1572 	}
1573 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1574 
1575 	/* Restart timer on 2 second interval. */
1576 	mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
1577 }
1578 
1579 /*
1580  * Caller must take hw_lock and QL_PHY_GIO_SEM.
1581  */
ql_get_phy_owner(struct ql3_adapter * qdev)1582 static void ql_get_phy_owner(struct ql3_adapter *qdev)
1583 {
1584 	if (ql_this_adapter_controls_port(qdev))
1585 		set_bit(QL_LINK_MASTER, &qdev->flags);
1586 	else
1587 		clear_bit(QL_LINK_MASTER, &qdev->flags);
1588 }
1589 
1590 /*
1591  * Caller must take hw_lock and QL_PHY_GIO_SEM.
1592  */
ql_init_scan_mode(struct ql3_adapter * qdev)1593 static void ql_init_scan_mode(struct ql3_adapter *qdev)
1594 {
1595 	ql_mii_enable_scan_mode(qdev);
1596 
1597 	if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1598 		if (ql_this_adapter_controls_port(qdev))
1599 			ql_petbi_init_ex(qdev);
1600 	} else {
1601 		if (ql_this_adapter_controls_port(qdev))
1602 			ql_phy_init_ex(qdev);
1603 	}
1604 }
1605 
1606 /*
1607  * MII_Setup needs to be called before taking the PHY out of reset
1608  * so that the management interface clock speed can be set properly.
1609  * It would be better if we had a way to disable MDC until after the
1610  * PHY is out of reset, but we don't have that capability.
1611  */
ql_mii_setup(struct ql3_adapter * qdev)1612 static int ql_mii_setup(struct ql3_adapter *qdev)
1613 {
1614 	u32 reg;
1615 	struct ql3xxx_port_registers __iomem *port_regs =
1616 			qdev->mem_map_registers;
1617 
1618 	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1619 			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1620 			 2) << 7))
1621 		return -1;
1622 
1623 	if (qdev->device_id == QL3032_DEVICE_ID)
1624 		ql_write_page0_reg(qdev,
1625 			&port_regs->macMIIMgmtControlReg, 0x0f00000);
1626 
1627 	/* Divide 125MHz clock by 28 to meet PHY timing requirements */
1628 	reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1629 
1630 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
1631 			   reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
1632 
1633 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1634 	return 0;
1635 }
1636 
1637 #define SUPPORTED_OPTICAL_MODES	(SUPPORTED_1000baseT_Full |	\
1638 				 SUPPORTED_FIBRE |		\
1639 				 SUPPORTED_Autoneg)
1640 #define SUPPORTED_TP_MODES	(SUPPORTED_10baseT_Half |	\
1641 				 SUPPORTED_10baseT_Full |	\
1642 				 SUPPORTED_100baseT_Half |	\
1643 				 SUPPORTED_100baseT_Full |	\
1644 				 SUPPORTED_1000baseT_Half |	\
1645 				 SUPPORTED_1000baseT_Full |	\
1646 				 SUPPORTED_Autoneg |		\
1647 				 SUPPORTED_TP)			\
1648 
ql_supported_modes(struct ql3_adapter * qdev)1649 static u32 ql_supported_modes(struct ql3_adapter *qdev)
1650 {
1651 	if (test_bit(QL_LINK_OPTICAL, &qdev->flags))
1652 		return SUPPORTED_OPTICAL_MODES;
1653 
1654 	return SUPPORTED_TP_MODES;
1655 }
1656 
ql_get_auto_cfg_status(struct ql3_adapter * qdev)1657 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1658 {
1659 	int status;
1660 	unsigned long hw_flags;
1661 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1662 	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1663 			    (QL_RESOURCE_BITS_BASE_CODE |
1664 			     (qdev->mac_index) * 2) << 7)) {
1665 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1666 		return 0;
1667 	}
1668 	status = ql_is_auto_cfg(qdev);
1669 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1670 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1671 	return status;
1672 }
1673 
ql_get_speed(struct ql3_adapter * qdev)1674 static u32 ql_get_speed(struct ql3_adapter *qdev)
1675 {
1676 	u32 status;
1677 	unsigned long hw_flags;
1678 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1679 	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1680 			    (QL_RESOURCE_BITS_BASE_CODE |
1681 			     (qdev->mac_index) * 2) << 7)) {
1682 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1683 		return 0;
1684 	}
1685 	status = ql_get_link_speed(qdev);
1686 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1687 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1688 	return status;
1689 }
1690 
ql_get_full_dup(struct ql3_adapter * qdev)1691 static int ql_get_full_dup(struct ql3_adapter *qdev)
1692 {
1693 	int status;
1694 	unsigned long hw_flags;
1695 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1696 	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1697 			    (QL_RESOURCE_BITS_BASE_CODE |
1698 			     (qdev->mac_index) * 2) << 7)) {
1699 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1700 		return 0;
1701 	}
1702 	status = ql_is_link_full_dup(qdev);
1703 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1704 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1705 	return status;
1706 }
1707 
ql_get_settings(struct net_device * ndev,struct ethtool_cmd * ecmd)1708 static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1709 {
1710 	struct ql3_adapter *qdev = netdev_priv(ndev);
1711 
1712 	ecmd->transceiver = XCVR_INTERNAL;
1713 	ecmd->supported = ql_supported_modes(qdev);
1714 
1715 	if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
1716 		ecmd->port = PORT_FIBRE;
1717 	} else {
1718 		ecmd->port = PORT_TP;
1719 		ecmd->phy_address = qdev->PHYAddr;
1720 	}
1721 	ecmd->advertising = ql_supported_modes(qdev);
1722 	ecmd->autoneg = ql_get_auto_cfg_status(qdev);
1723 	ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev));
1724 	ecmd->duplex = ql_get_full_dup(qdev);
1725 	return 0;
1726 }
1727 
ql_get_drvinfo(struct net_device * ndev,struct ethtool_drvinfo * drvinfo)1728 static void ql_get_drvinfo(struct net_device *ndev,
1729 			   struct ethtool_drvinfo *drvinfo)
1730 {
1731 	struct ql3_adapter *qdev = netdev_priv(ndev);
1732 	strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
1733 	strlcpy(drvinfo->version, ql3xxx_driver_version,
1734 		sizeof(drvinfo->version));
1735 	strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
1736 		sizeof(drvinfo->bus_info));
1737 }
1738 
ql_get_msglevel(struct net_device * ndev)1739 static u32 ql_get_msglevel(struct net_device *ndev)
1740 {
1741 	struct ql3_adapter *qdev = netdev_priv(ndev);
1742 	return qdev->msg_enable;
1743 }
1744 
ql_set_msglevel(struct net_device * ndev,u32 value)1745 static void ql_set_msglevel(struct net_device *ndev, u32 value)
1746 {
1747 	struct ql3_adapter *qdev = netdev_priv(ndev);
1748 	qdev->msg_enable = value;
1749 }
1750 
ql_get_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * pause)1751 static void ql_get_pauseparam(struct net_device *ndev,
1752 			      struct ethtool_pauseparam *pause)
1753 {
1754 	struct ql3_adapter *qdev = netdev_priv(ndev);
1755 	struct ql3xxx_port_registers __iomem *port_regs =
1756 		qdev->mem_map_registers;
1757 
1758 	u32 reg;
1759 	if (qdev->mac_index == 0)
1760 		reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
1761 	else
1762 		reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
1763 
1764 	pause->autoneg  = ql_get_auto_cfg_status(qdev);
1765 	pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
1766 	pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
1767 }
1768 
1769 static const struct ethtool_ops ql3xxx_ethtool_ops = {
1770 	.get_settings = ql_get_settings,
1771 	.get_drvinfo = ql_get_drvinfo,
1772 	.get_link = ethtool_op_get_link,
1773 	.get_msglevel = ql_get_msglevel,
1774 	.set_msglevel = ql_set_msglevel,
1775 	.get_pauseparam = ql_get_pauseparam,
1776 };
1777 
ql_populate_free_queue(struct ql3_adapter * qdev)1778 static int ql_populate_free_queue(struct ql3_adapter *qdev)
1779 {
1780 	struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1781 	dma_addr_t map;
1782 	int err;
1783 
1784 	while (lrg_buf_cb) {
1785 		if (!lrg_buf_cb->skb) {
1786 			lrg_buf_cb->skb =
1787 				netdev_alloc_skb(qdev->ndev,
1788 						 qdev->lrg_buffer_len);
1789 			if (unlikely(!lrg_buf_cb->skb)) {
1790 				netdev_printk(KERN_DEBUG, qdev->ndev,
1791 					      "Failed netdev_alloc_skb()\n");
1792 				break;
1793 			} else {
1794 				/*
1795 				 * We save some space to copy the ethhdr from
1796 				 * first buffer
1797 				 */
1798 				skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
1799 				map = pci_map_single(qdev->pdev,
1800 						     lrg_buf_cb->skb->data,
1801 						     qdev->lrg_buffer_len -
1802 						     QL_HEADER_SPACE,
1803 						     PCI_DMA_FROMDEVICE);
1804 
1805 				err = pci_dma_mapping_error(qdev->pdev, map);
1806 				if (err) {
1807 					netdev_err(qdev->ndev,
1808 						   "PCI mapping failed with error: %d\n",
1809 						   err);
1810 					dev_kfree_skb(lrg_buf_cb->skb);
1811 					lrg_buf_cb->skb = NULL;
1812 					break;
1813 				}
1814 
1815 
1816 				lrg_buf_cb->buf_phy_addr_low =
1817 					cpu_to_le32(LS_64BITS(map));
1818 				lrg_buf_cb->buf_phy_addr_high =
1819 					cpu_to_le32(MS_64BITS(map));
1820 				dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1821 				dma_unmap_len_set(lrg_buf_cb, maplen,
1822 						  qdev->lrg_buffer_len -
1823 						  QL_HEADER_SPACE);
1824 				--qdev->lrg_buf_skb_check;
1825 				if (!qdev->lrg_buf_skb_check)
1826 					return 1;
1827 			}
1828 		}
1829 		lrg_buf_cb = lrg_buf_cb->next;
1830 	}
1831 	return 0;
1832 }
1833 
1834 /*
1835  * Caller holds hw_lock.
1836  */
ql_update_small_bufq_prod_index(struct ql3_adapter * qdev)1837 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
1838 {
1839 	struct ql3xxx_port_registers __iomem *port_regs =
1840 		qdev->mem_map_registers;
1841 
1842 	if (qdev->small_buf_release_cnt >= 16) {
1843 		while (qdev->small_buf_release_cnt >= 16) {
1844 			qdev->small_buf_q_producer_index++;
1845 
1846 			if (qdev->small_buf_q_producer_index ==
1847 			    NUM_SBUFQ_ENTRIES)
1848 				qdev->small_buf_q_producer_index = 0;
1849 			qdev->small_buf_release_cnt -= 8;
1850 		}
1851 		wmb();
1852 		writel(qdev->small_buf_q_producer_index,
1853 			&port_regs->CommonRegs.rxSmallQProducerIndex);
1854 	}
1855 }
1856 
1857 /*
1858  * Caller holds hw_lock.
1859  */
ql_update_lrg_bufq_prod_index(struct ql3_adapter * qdev)1860 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1861 {
1862 	struct bufq_addr_element *lrg_buf_q_ele;
1863 	int i;
1864 	struct ql_rcv_buf_cb *lrg_buf_cb;
1865 	struct ql3xxx_port_registers __iomem *port_regs =
1866 		qdev->mem_map_registers;
1867 
1868 	if ((qdev->lrg_buf_free_count >= 8) &&
1869 	    (qdev->lrg_buf_release_cnt >= 16)) {
1870 
1871 		if (qdev->lrg_buf_skb_check)
1872 			if (!ql_populate_free_queue(qdev))
1873 				return;
1874 
1875 		lrg_buf_q_ele = qdev->lrg_buf_next_free;
1876 
1877 		while ((qdev->lrg_buf_release_cnt >= 16) &&
1878 		       (qdev->lrg_buf_free_count >= 8)) {
1879 
1880 			for (i = 0; i < 8; i++) {
1881 				lrg_buf_cb =
1882 				    ql_get_from_lrg_buf_free_list(qdev);
1883 				lrg_buf_q_ele->addr_high =
1884 				    lrg_buf_cb->buf_phy_addr_high;
1885 				lrg_buf_q_ele->addr_low =
1886 				    lrg_buf_cb->buf_phy_addr_low;
1887 				lrg_buf_q_ele++;
1888 
1889 				qdev->lrg_buf_release_cnt--;
1890 			}
1891 
1892 			qdev->lrg_buf_q_producer_index++;
1893 
1894 			if (qdev->lrg_buf_q_producer_index ==
1895 			    qdev->num_lbufq_entries)
1896 				qdev->lrg_buf_q_producer_index = 0;
1897 
1898 			if (qdev->lrg_buf_q_producer_index ==
1899 			    (qdev->num_lbufq_entries - 1)) {
1900 				lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
1901 			}
1902 		}
1903 		wmb();
1904 		qdev->lrg_buf_next_free = lrg_buf_q_ele;
1905 		writel(qdev->lrg_buf_q_producer_index,
1906 			&port_regs->CommonRegs.rxLargeQProducerIndex);
1907 	}
1908 }
1909 
ql_process_mac_tx_intr(struct ql3_adapter * qdev,struct ob_mac_iocb_rsp * mac_rsp)1910 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1911 				   struct ob_mac_iocb_rsp *mac_rsp)
1912 {
1913 	struct ql_tx_buf_cb *tx_cb;
1914 	int i;
1915 
1916 	if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1917 		netdev_warn(qdev->ndev,
1918 			    "Frame too short but it was padded and sent\n");
1919 	}
1920 
1921 	tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1922 
1923 	/*  Check the transmit response flags for any errors */
1924 	if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1925 		netdev_err(qdev->ndev,
1926 			   "Frame too short to be legal, frame not sent\n");
1927 
1928 		qdev->ndev->stats.tx_errors++;
1929 		goto frame_not_sent;
1930 	}
1931 
1932 	if (tx_cb->seg_count == 0) {
1933 		netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
1934 			   mac_rsp->transaction_id);
1935 
1936 		qdev->ndev->stats.tx_errors++;
1937 		goto invalid_seg_count;
1938 	}
1939 
1940 	pci_unmap_single(qdev->pdev,
1941 			 dma_unmap_addr(&tx_cb->map[0], mapaddr),
1942 			 dma_unmap_len(&tx_cb->map[0], maplen),
1943 			 PCI_DMA_TODEVICE);
1944 	tx_cb->seg_count--;
1945 	if (tx_cb->seg_count) {
1946 		for (i = 1; i < tx_cb->seg_count; i++) {
1947 			pci_unmap_page(qdev->pdev,
1948 				       dma_unmap_addr(&tx_cb->map[i],
1949 						      mapaddr),
1950 				       dma_unmap_len(&tx_cb->map[i], maplen),
1951 				       PCI_DMA_TODEVICE);
1952 		}
1953 	}
1954 	qdev->ndev->stats.tx_packets++;
1955 	qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
1956 
1957 frame_not_sent:
1958 	dev_kfree_skb_irq(tx_cb->skb);
1959 	tx_cb->skb = NULL;
1960 
1961 invalid_seg_count:
1962 	atomic_inc(&qdev->tx_count);
1963 }
1964 
ql_get_sbuf(struct ql3_adapter * qdev)1965 static void ql_get_sbuf(struct ql3_adapter *qdev)
1966 {
1967 	if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1968 		qdev->small_buf_index = 0;
1969 	qdev->small_buf_release_cnt++;
1970 }
1971 
ql_get_lbuf(struct ql3_adapter * qdev)1972 static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
1973 {
1974 	struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
1975 	lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
1976 	qdev->lrg_buf_release_cnt++;
1977 	if (++qdev->lrg_buf_index == qdev->num_large_buffers)
1978 		qdev->lrg_buf_index = 0;
1979 	return lrg_buf_cb;
1980 }
1981 
1982 /*
1983  * The difference between 3022 and 3032 for inbound completions:
1984  * 3022 uses two buffers per completion.  The first buffer contains
1985  * (some) header info, the second the remainder of the headers plus
1986  * the data.  For this chip we reserve some space at the top of the
1987  * receive buffer so that the header info in buffer one can be
1988  * prepended to the buffer two.  Buffer two is the sent up while
1989  * buffer one is returned to the hardware to be reused.
1990  * 3032 receives all of it's data and headers in one buffer for a
1991  * simpler process.  3032 also supports checksum verification as
1992  * can be seen in ql_process_macip_rx_intr().
1993  */
ql_process_mac_rx_intr(struct ql3_adapter * qdev,struct ib_mac_iocb_rsp * ib_mac_rsp_ptr)1994 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1995 				   struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
1996 {
1997 	struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1998 	struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1999 	struct sk_buff *skb;
2000 	u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
2001 
2002 	/*
2003 	 * Get the inbound address list (small buffer).
2004 	 */
2005 	ql_get_sbuf(qdev);
2006 
2007 	if (qdev->device_id == QL3022_DEVICE_ID)
2008 		lrg_buf_cb1 = ql_get_lbuf(qdev);
2009 
2010 	/* start of second buffer */
2011 	lrg_buf_cb2 = ql_get_lbuf(qdev);
2012 	skb = lrg_buf_cb2->skb;
2013 
2014 	qdev->ndev->stats.rx_packets++;
2015 	qdev->ndev->stats.rx_bytes += length;
2016 
2017 	skb_put(skb, length);
2018 	pci_unmap_single(qdev->pdev,
2019 			 dma_unmap_addr(lrg_buf_cb2, mapaddr),
2020 			 dma_unmap_len(lrg_buf_cb2, maplen),
2021 			 PCI_DMA_FROMDEVICE);
2022 	prefetch(skb->data);
2023 	skb_checksum_none_assert(skb);
2024 	skb->protocol = eth_type_trans(skb, qdev->ndev);
2025 
2026 	netif_receive_skb(skb);
2027 	lrg_buf_cb2->skb = NULL;
2028 
2029 	if (qdev->device_id == QL3022_DEVICE_ID)
2030 		ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2031 	ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2032 }
2033 
ql_process_macip_rx_intr(struct ql3_adapter * qdev,struct ib_ip_iocb_rsp * ib_ip_rsp_ptr)2034 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2035 				     struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
2036 {
2037 	struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2038 	struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2039 	struct sk_buff *skb1 = NULL, *skb2;
2040 	struct net_device *ndev = qdev->ndev;
2041 	u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
2042 	u16 size = 0;
2043 
2044 	/*
2045 	 * Get the inbound address list (small buffer).
2046 	 */
2047 
2048 	ql_get_sbuf(qdev);
2049 
2050 	if (qdev->device_id == QL3022_DEVICE_ID) {
2051 		/* start of first buffer on 3022 */
2052 		lrg_buf_cb1 = ql_get_lbuf(qdev);
2053 		skb1 = lrg_buf_cb1->skb;
2054 		size = ETH_HLEN;
2055 		if (*((u16 *) skb1->data) != 0xFFFF)
2056 			size += VLAN_ETH_HLEN - ETH_HLEN;
2057 	}
2058 
2059 	/* start of second buffer */
2060 	lrg_buf_cb2 = ql_get_lbuf(qdev);
2061 	skb2 = lrg_buf_cb2->skb;
2062 
2063 	skb_put(skb2, length);	/* Just the second buffer length here. */
2064 	pci_unmap_single(qdev->pdev,
2065 			 dma_unmap_addr(lrg_buf_cb2, mapaddr),
2066 			 dma_unmap_len(lrg_buf_cb2, maplen),
2067 			 PCI_DMA_FROMDEVICE);
2068 	prefetch(skb2->data);
2069 
2070 	skb_checksum_none_assert(skb2);
2071 	if (qdev->device_id == QL3022_DEVICE_ID) {
2072 		/*
2073 		 * Copy the ethhdr from first buffer to second. This
2074 		 * is necessary for 3022 IP completions.
2075 		 */
2076 		skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
2077 						 skb_push(skb2, size), size);
2078 	} else {
2079 		u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
2080 		if (checksum &
2081 			(IB_IP_IOCB_RSP_3032_ICE |
2082 			 IB_IP_IOCB_RSP_3032_CE)) {
2083 			netdev_err(ndev,
2084 				   "%s: Bad checksum for this %s packet, checksum = %x\n",
2085 				   __func__,
2086 				   ((checksum & IB_IP_IOCB_RSP_3032_TCP) ?
2087 				    "TCP" : "UDP"), checksum);
2088 		} else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
2089 				(checksum & IB_IP_IOCB_RSP_3032_UDP &&
2090 				!(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
2091 			skb2->ip_summed = CHECKSUM_UNNECESSARY;
2092 		}
2093 	}
2094 	skb2->protocol = eth_type_trans(skb2, qdev->ndev);
2095 
2096 	netif_receive_skb(skb2);
2097 	ndev->stats.rx_packets++;
2098 	ndev->stats.rx_bytes += length;
2099 	lrg_buf_cb2->skb = NULL;
2100 
2101 	if (qdev->device_id == QL3022_DEVICE_ID)
2102 		ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2103 	ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2104 }
2105 
ql_tx_rx_clean(struct ql3_adapter * qdev,int * tx_cleaned,int * rx_cleaned,int work_to_do)2106 static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2107 			  int *tx_cleaned, int *rx_cleaned, int work_to_do)
2108 {
2109 	struct net_rsp_iocb *net_rsp;
2110 	struct net_device *ndev = qdev->ndev;
2111 	int work_done = 0;
2112 
2113 	/* While there are entries in the completion queue. */
2114 	while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
2115 		qdev->rsp_consumer_index) && (work_done < work_to_do)) {
2116 
2117 		net_rsp = qdev->rsp_current;
2118 		rmb();
2119 		/*
2120 		 * Fix 4032 chip's undocumented "feature" where bit-8 is set
2121 		 * if the inbound completion is for a VLAN.
2122 		 */
2123 		if (qdev->device_id == QL3032_DEVICE_ID)
2124 			net_rsp->opcode &= 0x7f;
2125 		switch (net_rsp->opcode) {
2126 
2127 		case OPCODE_OB_MAC_IOCB_FN0:
2128 		case OPCODE_OB_MAC_IOCB_FN2:
2129 			ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
2130 					       net_rsp);
2131 			(*tx_cleaned)++;
2132 			break;
2133 
2134 		case OPCODE_IB_MAC_IOCB:
2135 		case OPCODE_IB_3032_MAC_IOCB:
2136 			ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
2137 					       net_rsp);
2138 			(*rx_cleaned)++;
2139 			break;
2140 
2141 		case OPCODE_IB_IP_IOCB:
2142 		case OPCODE_IB_3032_IP_IOCB:
2143 			ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
2144 						 net_rsp);
2145 			(*rx_cleaned)++;
2146 			break;
2147 		default: {
2148 			u32 *tmp = (u32 *)net_rsp;
2149 			netdev_err(ndev,
2150 				   "Hit default case, not handled!\n"
2151 				   "	dropping the packet, opcode = %x\n"
2152 				   "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
2153 				   net_rsp->opcode,
2154 				   (unsigned long int)tmp[0],
2155 				   (unsigned long int)tmp[1],
2156 				   (unsigned long int)tmp[2],
2157 				   (unsigned long int)tmp[3]);
2158 		}
2159 		}
2160 
2161 		qdev->rsp_consumer_index++;
2162 
2163 		if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
2164 			qdev->rsp_consumer_index = 0;
2165 			qdev->rsp_current = qdev->rsp_q_virt_addr;
2166 		} else {
2167 			qdev->rsp_current++;
2168 		}
2169 
2170 		work_done = *tx_cleaned + *rx_cleaned;
2171 	}
2172 
2173 	return work_done;
2174 }
2175 
ql_poll(struct napi_struct * napi,int budget)2176 static int ql_poll(struct napi_struct *napi, int budget)
2177 {
2178 	struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
2179 	int rx_cleaned = 0, tx_cleaned = 0;
2180 	unsigned long hw_flags;
2181 	struct ql3xxx_port_registers __iomem *port_regs =
2182 		qdev->mem_map_registers;
2183 
2184 	ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
2185 
2186 	if (tx_cleaned + rx_cleaned != budget) {
2187 		spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2188 		__napi_complete(napi);
2189 		ql_update_small_bufq_prod_index(qdev);
2190 		ql_update_lrg_bufq_prod_index(qdev);
2191 		writel(qdev->rsp_consumer_index,
2192 			    &port_regs->CommonRegs.rspQConsumerIndex);
2193 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2194 
2195 		ql_enable_interrupts(qdev);
2196 	}
2197 	return tx_cleaned + rx_cleaned;
2198 }
2199 
ql3xxx_isr(int irq,void * dev_id)2200 static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2201 {
2202 
2203 	struct net_device *ndev = dev_id;
2204 	struct ql3_adapter *qdev = netdev_priv(ndev);
2205 	struct ql3xxx_port_registers __iomem *port_regs =
2206 		qdev->mem_map_registers;
2207 	u32 value;
2208 	int handled = 1;
2209 	u32 var;
2210 
2211 	value = ql_read_common_reg_l(qdev,
2212 				     &port_regs->CommonRegs.ispControlStatus);
2213 
2214 	if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
2215 		spin_lock(&qdev->adapter_lock);
2216 		netif_stop_queue(qdev->ndev);
2217 		netif_carrier_off(qdev->ndev);
2218 		ql_disable_interrupts(qdev);
2219 		qdev->port_link_state = LS_DOWN;
2220 		set_bit(QL_RESET_ACTIVE, &qdev->flags) ;
2221 
2222 		if (value & ISP_CONTROL_FE) {
2223 			/*
2224 			 * Chip Fatal Error.
2225 			 */
2226 			var =
2227 			    ql_read_page0_reg_l(qdev,
2228 					      &port_regs->PortFatalErrStatus);
2229 			netdev_warn(ndev,
2230 				    "Resetting chip. PortFatalErrStatus register = 0x%x\n",
2231 				    var);
2232 			set_bit(QL_RESET_START, &qdev->flags) ;
2233 		} else {
2234 			/*
2235 			 * Soft Reset Requested.
2236 			 */
2237 			set_bit(QL_RESET_PER_SCSI, &qdev->flags) ;
2238 			netdev_err(ndev,
2239 				   "Another function issued a reset to the chip. ISR value = %x\n",
2240 				   value);
2241 		}
2242 		queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2243 		spin_unlock(&qdev->adapter_lock);
2244 	} else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2245 		ql_disable_interrupts(qdev);
2246 		if (likely(napi_schedule_prep(&qdev->napi)))
2247 			__napi_schedule(&qdev->napi);
2248 	} else
2249 		return IRQ_NONE;
2250 
2251 	return IRQ_RETVAL(handled);
2252 }
2253 
2254 /*
2255  * Get the total number of segments needed for the given number of fragments.
2256  * This is necessary because outbound address lists (OAL) will be used when
2257  * more than two frags are given.  Each address list has 5 addr/len pairs.
2258  * The 5th pair in each OAL is used to  point to the next OAL if more frags
2259  * are coming.  That is why the frags:segment count ratio is not linear.
2260  */
ql_get_seg_count(struct ql3_adapter * qdev,unsigned short frags)2261 static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags)
2262 {
2263 	if (qdev->device_id == QL3022_DEVICE_ID)
2264 		return 1;
2265 
2266 	if (frags <= 2)
2267 		return frags + 1;
2268 	else if (frags <= 6)
2269 		return frags + 2;
2270 	else if (frags <= 10)
2271 		return frags + 3;
2272 	else if (frags <= 14)
2273 		return frags + 4;
2274 	else if (frags <= 18)
2275 		return frags + 5;
2276 	return -1;
2277 }
2278 
ql_hw_csum_setup(const struct sk_buff * skb,struct ob_mac_iocb_req * mac_iocb_ptr)2279 static void ql_hw_csum_setup(const struct sk_buff *skb,
2280 			     struct ob_mac_iocb_req *mac_iocb_ptr)
2281 {
2282 	const struct iphdr *ip = ip_hdr(skb);
2283 
2284 	mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
2285 	mac_iocb_ptr->ip_hdr_len = ip->ihl;
2286 
2287 	if (ip->protocol == IPPROTO_TCP) {
2288 		mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
2289 			OB_3032MAC_IOCB_REQ_IC;
2290 	} else {
2291 		mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
2292 			OB_3032MAC_IOCB_REQ_IC;
2293 	}
2294 
2295 }
2296 
2297 /*
2298  * Map the buffers for this transmit.
2299  * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
2300  */
ql_send_map(struct ql3_adapter * qdev,struct ob_mac_iocb_req * mac_iocb_ptr,struct ql_tx_buf_cb * tx_cb,struct sk_buff * skb)2301 static int ql_send_map(struct ql3_adapter *qdev,
2302 				struct ob_mac_iocb_req *mac_iocb_ptr,
2303 				struct ql_tx_buf_cb *tx_cb,
2304 				struct sk_buff *skb)
2305 {
2306 	struct oal *oal;
2307 	struct oal_entry *oal_entry;
2308 	int len = skb_headlen(skb);
2309 	dma_addr_t map;
2310 	int err;
2311 	int completed_segs, i;
2312 	int seg_cnt, seg = 0;
2313 	int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2314 
2315 	seg_cnt = tx_cb->seg_count;
2316 	/*
2317 	 * Map the skb buffer first.
2318 	 */
2319 	map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2320 
2321 	err = pci_dma_mapping_error(qdev->pdev, map);
2322 	if (err) {
2323 		netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
2324 			   err);
2325 
2326 		return NETDEV_TX_BUSY;
2327 	}
2328 
2329 	oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2330 	oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2331 	oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2332 	oal_entry->len = cpu_to_le32(len);
2333 	dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2334 	dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
2335 	seg++;
2336 
2337 	if (seg_cnt == 1) {
2338 		/* Terminate the last segment. */
2339 		oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2340 		return NETDEV_TX_OK;
2341 	}
2342 	oal = tx_cb->oal;
2343 	for (completed_segs = 0;
2344 	     completed_segs < frag_cnt;
2345 	     completed_segs++, seg++) {
2346 		skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
2347 		oal_entry++;
2348 		/*
2349 		 * Check for continuation requirements.
2350 		 * It's strange but necessary.
2351 		 * Continuation entry points to outbound address list.
2352 		 */
2353 		if ((seg == 2 && seg_cnt > 3) ||
2354 		    (seg == 7 && seg_cnt > 8) ||
2355 		    (seg == 12 && seg_cnt > 13) ||
2356 		    (seg == 17 && seg_cnt > 18)) {
2357 			map = pci_map_single(qdev->pdev, oal,
2358 					     sizeof(struct oal),
2359 					     PCI_DMA_TODEVICE);
2360 
2361 			err = pci_dma_mapping_error(qdev->pdev, map);
2362 			if (err) {
2363 				netdev_err(qdev->ndev,
2364 					   "PCI mapping outbound address list with error: %d\n",
2365 					   err);
2366 				goto map_error;
2367 			}
2368 
2369 			oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2370 			oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2371 			oal_entry->len = cpu_to_le32(sizeof(struct oal) |
2372 						     OAL_CONT_ENTRY);
2373 			dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2374 			dma_unmap_len_set(&tx_cb->map[seg], maplen,
2375 					  sizeof(struct oal));
2376 			oal_entry = (struct oal_entry *)oal;
2377 			oal++;
2378 			seg++;
2379 		}
2380 
2381 		map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
2382 				       DMA_TO_DEVICE);
2383 
2384 		err = dma_mapping_error(&qdev->pdev->dev, map);
2385 		if (err) {
2386 			netdev_err(qdev->ndev,
2387 				   "PCI mapping frags failed with error: %d\n",
2388 				   err);
2389 			goto map_error;
2390 		}
2391 
2392 		oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2393 		oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2394 		oal_entry->len = cpu_to_le32(skb_frag_size(frag));
2395 		dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2396 		dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag));
2397 		}
2398 	/* Terminate the last segment. */
2399 	oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2400 	return NETDEV_TX_OK;
2401 
2402 map_error:
2403 	/* A PCI mapping failed and now we will need to back out
2404 	 * We need to traverse through the oal's and associated pages which
2405 	 * have been mapped and now we must unmap them to clean up properly
2406 	 */
2407 
2408 	seg = 1;
2409 	oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2410 	oal = tx_cb->oal;
2411 	for (i = 0; i < completed_segs; i++, seg++) {
2412 		oal_entry++;
2413 
2414 		/*
2415 		 * Check for continuation requirements.
2416 		 * It's strange but necessary.
2417 		 */
2418 
2419 		if ((seg == 2 && seg_cnt > 3) ||
2420 		    (seg == 7 && seg_cnt > 8) ||
2421 		    (seg == 12 && seg_cnt > 13) ||
2422 		    (seg == 17 && seg_cnt > 18)) {
2423 			pci_unmap_single(qdev->pdev,
2424 				dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2425 				dma_unmap_len(&tx_cb->map[seg], maplen),
2426 				 PCI_DMA_TODEVICE);
2427 			oal++;
2428 			seg++;
2429 		}
2430 
2431 		pci_unmap_page(qdev->pdev,
2432 			       dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2433 			       dma_unmap_len(&tx_cb->map[seg], maplen),
2434 			       PCI_DMA_TODEVICE);
2435 	}
2436 
2437 	pci_unmap_single(qdev->pdev,
2438 			 dma_unmap_addr(&tx_cb->map[0], mapaddr),
2439 			 dma_unmap_addr(&tx_cb->map[0], maplen),
2440 			 PCI_DMA_TODEVICE);
2441 
2442 	return NETDEV_TX_BUSY;
2443 
2444 }
2445 
2446 /*
2447  * The difference between 3022 and 3032 sends:
2448  * 3022 only supports a simple single segment transmission.
2449  * 3032 supports checksumming and scatter/gather lists (fragments).
2450  * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2451  * in the IOCB plus a chain of outbound address lists (OAL) that
2452  * each contain 5 ALPs.  The last ALP of the IOCB (3rd) or OAL (5th)
2453  * will be used to point to an OAL when more ALP entries are required.
2454  * The IOCB is always the top of the chain followed by one or more
2455  * OALs (when necessary).
2456  */
ql3xxx_send(struct sk_buff * skb,struct net_device * ndev)2457 static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2458 			       struct net_device *ndev)
2459 {
2460 	struct ql3_adapter *qdev = netdev_priv(ndev);
2461 	struct ql3xxx_port_registers __iomem *port_regs =
2462 			qdev->mem_map_registers;
2463 	struct ql_tx_buf_cb *tx_cb;
2464 	u32 tot_len = skb->len;
2465 	struct ob_mac_iocb_req *mac_iocb_ptr;
2466 
2467 	if (unlikely(atomic_read(&qdev->tx_count) < 2))
2468 		return NETDEV_TX_BUSY;
2469 
2470 	tx_cb = &qdev->tx_buf[qdev->req_producer_index];
2471 	tx_cb->seg_count = ql_get_seg_count(qdev,
2472 					     skb_shinfo(skb)->nr_frags);
2473 	if (tx_cb->seg_count == -1) {
2474 		netdev_err(ndev, "%s: invalid segment count!\n", __func__);
2475 		return NETDEV_TX_OK;
2476 	}
2477 
2478 	mac_iocb_ptr = tx_cb->queue_entry;
2479 	memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2480 	mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2481 	mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
2482 	mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2483 	mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2484 	mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2485 	tx_cb->skb = skb;
2486 	if (qdev->device_id == QL3032_DEVICE_ID &&
2487 	    skb->ip_summed == CHECKSUM_PARTIAL)
2488 		ql_hw_csum_setup(skb, mac_iocb_ptr);
2489 
2490 	if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
2491 		netdev_err(ndev, "%s: Could not map the segments!\n", __func__);
2492 		return NETDEV_TX_BUSY;
2493 	}
2494 
2495 	wmb();
2496 	qdev->req_producer_index++;
2497 	if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2498 		qdev->req_producer_index = 0;
2499 	wmb();
2500 	ql_write_common_reg_l(qdev,
2501 			    &port_regs->CommonRegs.reqQProducerIndex,
2502 			    qdev->req_producer_index);
2503 
2504 	netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
2505 		     "tx queued, slot %d, len %d\n",
2506 		     qdev->req_producer_index, skb->len);
2507 
2508 	atomic_dec(&qdev->tx_count);
2509 	return NETDEV_TX_OK;
2510 }
2511 
ql_alloc_net_req_rsp_queues(struct ql3_adapter * qdev)2512 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2513 {
2514 	qdev->req_q_size =
2515 	    (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2516 
2517 	qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2518 
2519 	/* The barrier is required to ensure request and response queue
2520 	 * addr writes to the registers.
2521 	 */
2522 	wmb();
2523 
2524 	qdev->req_q_virt_addr =
2525 	    pci_alloc_consistent(qdev->pdev,
2526 				 (size_t) qdev->req_q_size,
2527 				 &qdev->req_q_phy_addr);
2528 
2529 	if ((qdev->req_q_virt_addr == NULL) ||
2530 	    LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2531 		netdev_err(qdev->ndev, "reqQ failed\n");
2532 		return -ENOMEM;
2533 	}
2534 
2535 	qdev->rsp_q_virt_addr =
2536 	    pci_alloc_consistent(qdev->pdev,
2537 				 (size_t) qdev->rsp_q_size,
2538 				 &qdev->rsp_q_phy_addr);
2539 
2540 	if ((qdev->rsp_q_virt_addr == NULL) ||
2541 	    LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2542 		netdev_err(qdev->ndev, "rspQ allocation failed\n");
2543 		pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
2544 				    qdev->req_q_virt_addr,
2545 				    qdev->req_q_phy_addr);
2546 		return -ENOMEM;
2547 	}
2548 
2549 	set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
2550 
2551 	return 0;
2552 }
2553 
ql_free_net_req_rsp_queues(struct ql3_adapter * qdev)2554 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2555 {
2556 	if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) {
2557 		netdev_info(qdev->ndev, "Already done\n");
2558 		return;
2559 	}
2560 
2561 	pci_free_consistent(qdev->pdev,
2562 			    qdev->req_q_size,
2563 			    qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2564 
2565 	qdev->req_q_virt_addr = NULL;
2566 
2567 	pci_free_consistent(qdev->pdev,
2568 			    qdev->rsp_q_size,
2569 			    qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
2570 
2571 	qdev->rsp_q_virt_addr = NULL;
2572 
2573 	clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
2574 }
2575 
ql_alloc_buffer_queues(struct ql3_adapter * qdev)2576 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2577 {
2578 	/* Create Large Buffer Queue */
2579 	qdev->lrg_buf_q_size =
2580 		qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
2581 	if (qdev->lrg_buf_q_size < PAGE_SIZE)
2582 		qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2583 	else
2584 		qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2585 
2586 	qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers,
2587 				      sizeof(struct ql_rcv_buf_cb),
2588 				      GFP_KERNEL);
2589 	if (qdev->lrg_buf == NULL)
2590 		return -ENOMEM;
2591 
2592 	qdev->lrg_buf_q_alloc_virt_addr =
2593 		pci_alloc_consistent(qdev->pdev,
2594 				     qdev->lrg_buf_q_alloc_size,
2595 				     &qdev->lrg_buf_q_alloc_phy_addr);
2596 
2597 	if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2598 		netdev_err(qdev->ndev, "lBufQ failed\n");
2599 		return -ENOMEM;
2600 	}
2601 	qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
2602 	qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
2603 
2604 	/* Create Small Buffer Queue */
2605 	qdev->small_buf_q_size =
2606 		NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2607 	if (qdev->small_buf_q_size < PAGE_SIZE)
2608 		qdev->small_buf_q_alloc_size = PAGE_SIZE;
2609 	else
2610 		qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2611 
2612 	qdev->small_buf_q_alloc_virt_addr =
2613 		pci_alloc_consistent(qdev->pdev,
2614 				     qdev->small_buf_q_alloc_size,
2615 				     &qdev->small_buf_q_alloc_phy_addr);
2616 
2617 	if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2618 		netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
2619 		pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
2620 				    qdev->lrg_buf_q_alloc_virt_addr,
2621 				    qdev->lrg_buf_q_alloc_phy_addr);
2622 		return -ENOMEM;
2623 	}
2624 
2625 	qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2626 	qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2627 	set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
2628 	return 0;
2629 }
2630 
ql_free_buffer_queues(struct ql3_adapter * qdev)2631 static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2632 {
2633 	if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) {
2634 		netdev_info(qdev->ndev, "Already done\n");
2635 		return;
2636 	}
2637 	kfree(qdev->lrg_buf);
2638 	pci_free_consistent(qdev->pdev,
2639 			    qdev->lrg_buf_q_alloc_size,
2640 			    qdev->lrg_buf_q_alloc_virt_addr,
2641 			    qdev->lrg_buf_q_alloc_phy_addr);
2642 
2643 	qdev->lrg_buf_q_virt_addr = NULL;
2644 
2645 	pci_free_consistent(qdev->pdev,
2646 			    qdev->small_buf_q_alloc_size,
2647 			    qdev->small_buf_q_alloc_virt_addr,
2648 			    qdev->small_buf_q_alloc_phy_addr);
2649 
2650 	qdev->small_buf_q_virt_addr = NULL;
2651 
2652 	clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
2653 }
2654 
ql_alloc_small_buffers(struct ql3_adapter * qdev)2655 static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2656 {
2657 	int i;
2658 	struct bufq_addr_element *small_buf_q_entry;
2659 
2660 	/* Currently we allocate on one of memory and use it for smallbuffers */
2661 	qdev->small_buf_total_size =
2662 		(QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
2663 		 QL_SMALL_BUFFER_SIZE);
2664 
2665 	qdev->small_buf_virt_addr =
2666 		pci_alloc_consistent(qdev->pdev,
2667 				     qdev->small_buf_total_size,
2668 				     &qdev->small_buf_phy_addr);
2669 
2670 	if (qdev->small_buf_virt_addr == NULL) {
2671 		netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
2672 		return -ENOMEM;
2673 	}
2674 
2675 	qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
2676 	qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
2677 
2678 	small_buf_q_entry = qdev->small_buf_q_virt_addr;
2679 
2680 	/* Initialize the small buffer queue. */
2681 	for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
2682 		small_buf_q_entry->addr_high =
2683 		    cpu_to_le32(qdev->small_buf_phy_addr_high);
2684 		small_buf_q_entry->addr_low =
2685 		    cpu_to_le32(qdev->small_buf_phy_addr_low +
2686 				(i * QL_SMALL_BUFFER_SIZE));
2687 		small_buf_q_entry++;
2688 	}
2689 	qdev->small_buf_index = 0;
2690 	set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags);
2691 	return 0;
2692 }
2693 
ql_free_small_buffers(struct ql3_adapter * qdev)2694 static void ql_free_small_buffers(struct ql3_adapter *qdev)
2695 {
2696 	if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) {
2697 		netdev_info(qdev->ndev, "Already done\n");
2698 		return;
2699 	}
2700 	if (qdev->small_buf_virt_addr != NULL) {
2701 		pci_free_consistent(qdev->pdev,
2702 				    qdev->small_buf_total_size,
2703 				    qdev->small_buf_virt_addr,
2704 				    qdev->small_buf_phy_addr);
2705 
2706 		qdev->small_buf_virt_addr = NULL;
2707 	}
2708 }
2709 
ql_free_large_buffers(struct ql3_adapter * qdev)2710 static void ql_free_large_buffers(struct ql3_adapter *qdev)
2711 {
2712 	int i = 0;
2713 	struct ql_rcv_buf_cb *lrg_buf_cb;
2714 
2715 	for (i = 0; i < qdev->num_large_buffers; i++) {
2716 		lrg_buf_cb = &qdev->lrg_buf[i];
2717 		if (lrg_buf_cb->skb) {
2718 			dev_kfree_skb(lrg_buf_cb->skb);
2719 			pci_unmap_single(qdev->pdev,
2720 					 dma_unmap_addr(lrg_buf_cb, mapaddr),
2721 					 dma_unmap_len(lrg_buf_cb, maplen),
2722 					 PCI_DMA_FROMDEVICE);
2723 			memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2724 		} else {
2725 			break;
2726 		}
2727 	}
2728 }
2729 
ql_init_large_buffers(struct ql3_adapter * qdev)2730 static void ql_init_large_buffers(struct ql3_adapter *qdev)
2731 {
2732 	int i;
2733 	struct ql_rcv_buf_cb *lrg_buf_cb;
2734 	struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2735 
2736 	for (i = 0; i < qdev->num_large_buffers; i++) {
2737 		lrg_buf_cb = &qdev->lrg_buf[i];
2738 		buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
2739 		buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
2740 		buf_addr_ele++;
2741 	}
2742 	qdev->lrg_buf_index = 0;
2743 	qdev->lrg_buf_skb_check = 0;
2744 }
2745 
ql_alloc_large_buffers(struct ql3_adapter * qdev)2746 static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2747 {
2748 	int i;
2749 	struct ql_rcv_buf_cb *lrg_buf_cb;
2750 	struct sk_buff *skb;
2751 	dma_addr_t map;
2752 	int err;
2753 
2754 	for (i = 0; i < qdev->num_large_buffers; i++) {
2755 		lrg_buf_cb = &qdev->lrg_buf[i];
2756 		memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2757 
2758 		skb = netdev_alloc_skb(qdev->ndev,
2759 				       qdev->lrg_buffer_len);
2760 		if (unlikely(!skb)) {
2761 			/* Better luck next round */
2762 			netdev_err(qdev->ndev,
2763 				   "large buff alloc failed for %d bytes at index %d\n",
2764 				   qdev->lrg_buffer_len * 2, i);
2765 			ql_free_large_buffers(qdev);
2766 			return -ENOMEM;
2767 		} else {
2768 			lrg_buf_cb->index = i;
2769 			/*
2770 			 * We save some space to copy the ethhdr from first
2771 			 * buffer
2772 			 */
2773 			skb_reserve(skb, QL_HEADER_SPACE);
2774 			map = pci_map_single(qdev->pdev,
2775 					     skb->data,
2776 					     qdev->lrg_buffer_len -
2777 					     QL_HEADER_SPACE,
2778 					     PCI_DMA_FROMDEVICE);
2779 
2780 			err = pci_dma_mapping_error(qdev->pdev, map);
2781 			if (err) {
2782 				netdev_err(qdev->ndev,
2783 					   "PCI mapping failed with error: %d\n",
2784 					   err);
2785 				dev_kfree_skb_irq(skb);
2786 				ql_free_large_buffers(qdev);
2787 				return -ENOMEM;
2788 			}
2789 
2790 			lrg_buf_cb->skb = skb;
2791 			dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2792 			dma_unmap_len_set(lrg_buf_cb, maplen,
2793 					  qdev->lrg_buffer_len -
2794 					  QL_HEADER_SPACE);
2795 			lrg_buf_cb->buf_phy_addr_low =
2796 			    cpu_to_le32(LS_64BITS(map));
2797 			lrg_buf_cb->buf_phy_addr_high =
2798 			    cpu_to_le32(MS_64BITS(map));
2799 		}
2800 	}
2801 	return 0;
2802 }
2803 
ql_free_send_free_list(struct ql3_adapter * qdev)2804 static void ql_free_send_free_list(struct ql3_adapter *qdev)
2805 {
2806 	struct ql_tx_buf_cb *tx_cb;
2807 	int i;
2808 
2809 	tx_cb = &qdev->tx_buf[0];
2810 	for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2811 		kfree(tx_cb->oal);
2812 		tx_cb->oal = NULL;
2813 		tx_cb++;
2814 	}
2815 }
2816 
ql_create_send_free_list(struct ql3_adapter * qdev)2817 static int ql_create_send_free_list(struct ql3_adapter *qdev)
2818 {
2819 	struct ql_tx_buf_cb *tx_cb;
2820 	int i;
2821 	struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
2822 
2823 	/* Create free list of transmit buffers */
2824 	for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2825 
2826 		tx_cb = &qdev->tx_buf[i];
2827 		tx_cb->skb = NULL;
2828 		tx_cb->queue_entry = req_q_curr;
2829 		req_q_curr++;
2830 		tx_cb->oal = kmalloc(512, GFP_KERNEL);
2831 		if (tx_cb->oal == NULL)
2832 			return -ENOMEM;
2833 	}
2834 	return 0;
2835 }
2836 
ql_alloc_mem_resources(struct ql3_adapter * qdev)2837 static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2838 {
2839 	if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
2840 		qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
2841 		qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2842 	} else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2843 		/*
2844 		 * Bigger buffers, so less of them.
2845 		 */
2846 		qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
2847 		qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2848 	} else {
2849 		netdev_err(qdev->ndev, "Invalid mtu size: %d.  Only %d and %d are accepted.\n",
2850 			   qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
2851 		return -ENOMEM;
2852 	}
2853 	qdev->num_large_buffers =
2854 		qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
2855 	qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2856 	qdev->max_frame_size =
2857 		(qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
2858 
2859 	/*
2860 	 * First allocate a page of shared memory and use it for shadow
2861 	 * locations of Network Request Queue Consumer Address Register and
2862 	 * Network Completion Queue Producer Index Register
2863 	 */
2864 	qdev->shadow_reg_virt_addr =
2865 		pci_alloc_consistent(qdev->pdev,
2866 				     PAGE_SIZE, &qdev->shadow_reg_phy_addr);
2867 
2868 	if (qdev->shadow_reg_virt_addr != NULL) {
2869 		qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
2870 		qdev->req_consumer_index_phy_addr_high =
2871 			MS_64BITS(qdev->shadow_reg_phy_addr);
2872 		qdev->req_consumer_index_phy_addr_low =
2873 			LS_64BITS(qdev->shadow_reg_phy_addr);
2874 
2875 		qdev->prsp_producer_index =
2876 			(__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
2877 		qdev->rsp_producer_index_phy_addr_high =
2878 			qdev->req_consumer_index_phy_addr_high;
2879 		qdev->rsp_producer_index_phy_addr_low =
2880 			qdev->req_consumer_index_phy_addr_low + 8;
2881 	} else {
2882 		netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
2883 		return -ENOMEM;
2884 	}
2885 
2886 	if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
2887 		netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
2888 		goto err_req_rsp;
2889 	}
2890 
2891 	if (ql_alloc_buffer_queues(qdev) != 0) {
2892 		netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
2893 		goto err_buffer_queues;
2894 	}
2895 
2896 	if (ql_alloc_small_buffers(qdev) != 0) {
2897 		netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
2898 		goto err_small_buffers;
2899 	}
2900 
2901 	if (ql_alloc_large_buffers(qdev) != 0) {
2902 		netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
2903 		goto err_small_buffers;
2904 	}
2905 
2906 	/* Initialize the large buffer queue. */
2907 	ql_init_large_buffers(qdev);
2908 	if (ql_create_send_free_list(qdev))
2909 		goto err_free_list;
2910 
2911 	qdev->rsp_current = qdev->rsp_q_virt_addr;
2912 
2913 	return 0;
2914 err_free_list:
2915 	ql_free_send_free_list(qdev);
2916 err_small_buffers:
2917 	ql_free_buffer_queues(qdev);
2918 err_buffer_queues:
2919 	ql_free_net_req_rsp_queues(qdev);
2920 err_req_rsp:
2921 	pci_free_consistent(qdev->pdev,
2922 			    PAGE_SIZE,
2923 			    qdev->shadow_reg_virt_addr,
2924 			    qdev->shadow_reg_phy_addr);
2925 
2926 	return -ENOMEM;
2927 }
2928 
ql_free_mem_resources(struct ql3_adapter * qdev)2929 static void ql_free_mem_resources(struct ql3_adapter *qdev)
2930 {
2931 	ql_free_send_free_list(qdev);
2932 	ql_free_large_buffers(qdev);
2933 	ql_free_small_buffers(qdev);
2934 	ql_free_buffer_queues(qdev);
2935 	ql_free_net_req_rsp_queues(qdev);
2936 	if (qdev->shadow_reg_virt_addr != NULL) {
2937 		pci_free_consistent(qdev->pdev,
2938 				    PAGE_SIZE,
2939 				    qdev->shadow_reg_virt_addr,
2940 				    qdev->shadow_reg_phy_addr);
2941 		qdev->shadow_reg_virt_addr = NULL;
2942 	}
2943 }
2944 
ql_init_misc_registers(struct ql3_adapter * qdev)2945 static int ql_init_misc_registers(struct ql3_adapter *qdev)
2946 {
2947 	struct ql3xxx_local_ram_registers __iomem *local_ram =
2948 	    (void __iomem *)qdev->mem_map_registers;
2949 
2950 	if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
2951 			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
2952 			 2) << 4))
2953 		return -1;
2954 
2955 	ql_write_page2_reg(qdev,
2956 			   &local_ram->bufletSize, qdev->nvram_data.bufletSize);
2957 
2958 	ql_write_page2_reg(qdev,
2959 			   &local_ram->maxBufletCount,
2960 			   qdev->nvram_data.bufletCount);
2961 
2962 	ql_write_page2_reg(qdev,
2963 			   &local_ram->freeBufletThresholdLow,
2964 			   (qdev->nvram_data.tcpWindowThreshold25 << 16) |
2965 			   (qdev->nvram_data.tcpWindowThreshold0));
2966 
2967 	ql_write_page2_reg(qdev,
2968 			   &local_ram->freeBufletThresholdHigh,
2969 			   qdev->nvram_data.tcpWindowThreshold50);
2970 
2971 	ql_write_page2_reg(qdev,
2972 			   &local_ram->ipHashTableBase,
2973 			   (qdev->nvram_data.ipHashTableBaseHi << 16) |
2974 			   qdev->nvram_data.ipHashTableBaseLo);
2975 	ql_write_page2_reg(qdev,
2976 			   &local_ram->ipHashTableCount,
2977 			   qdev->nvram_data.ipHashTableSize);
2978 	ql_write_page2_reg(qdev,
2979 			   &local_ram->tcpHashTableBase,
2980 			   (qdev->nvram_data.tcpHashTableBaseHi << 16) |
2981 			   qdev->nvram_data.tcpHashTableBaseLo);
2982 	ql_write_page2_reg(qdev,
2983 			   &local_ram->tcpHashTableCount,
2984 			   qdev->nvram_data.tcpHashTableSize);
2985 	ql_write_page2_reg(qdev,
2986 			   &local_ram->ncbBase,
2987 			   (qdev->nvram_data.ncbTableBaseHi << 16) |
2988 			   qdev->nvram_data.ncbTableBaseLo);
2989 	ql_write_page2_reg(qdev,
2990 			   &local_ram->maxNcbCount,
2991 			   qdev->nvram_data.ncbTableSize);
2992 	ql_write_page2_reg(qdev,
2993 			   &local_ram->drbBase,
2994 			   (qdev->nvram_data.drbTableBaseHi << 16) |
2995 			   qdev->nvram_data.drbTableBaseLo);
2996 	ql_write_page2_reg(qdev,
2997 			   &local_ram->maxDrbCount,
2998 			   qdev->nvram_data.drbTableSize);
2999 	ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
3000 	return 0;
3001 }
3002 
ql_adapter_initialize(struct ql3_adapter * qdev)3003 static int ql_adapter_initialize(struct ql3_adapter *qdev)
3004 {
3005 	u32 value;
3006 	struct ql3xxx_port_registers __iomem *port_regs =
3007 		qdev->mem_map_registers;
3008 	__iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
3009 	struct ql3xxx_host_memory_registers __iomem *hmem_regs =
3010 		(void __iomem *)port_regs;
3011 	u32 delay = 10;
3012 	int status = 0;
3013 
3014 	if (ql_mii_setup(qdev))
3015 		return -1;
3016 
3017 	/* Bring out PHY out of reset */
3018 	ql_write_common_reg(qdev, spir,
3019 			    (ISP_SERIAL_PORT_IF_WE |
3020 			     (ISP_SERIAL_PORT_IF_WE << 16)));
3021 	/* Give the PHY time to come out of reset. */
3022 	mdelay(100);
3023 	qdev->port_link_state = LS_DOWN;
3024 	netif_carrier_off(qdev->ndev);
3025 
3026 	/* V2 chip fix for ARS-39168. */
3027 	ql_write_common_reg(qdev, spir,
3028 			    (ISP_SERIAL_PORT_IF_SDE |
3029 			     (ISP_SERIAL_PORT_IF_SDE << 16)));
3030 
3031 	/* Request Queue Registers */
3032 	*((u32 *)(qdev->preq_consumer_index)) = 0;
3033 	atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES);
3034 	qdev->req_producer_index = 0;
3035 
3036 	ql_write_page1_reg(qdev,
3037 			   &hmem_regs->reqConsumerIndexAddrHigh,
3038 			   qdev->req_consumer_index_phy_addr_high);
3039 	ql_write_page1_reg(qdev,
3040 			   &hmem_regs->reqConsumerIndexAddrLow,
3041 			   qdev->req_consumer_index_phy_addr_low);
3042 
3043 	ql_write_page1_reg(qdev,
3044 			   &hmem_regs->reqBaseAddrHigh,
3045 			   MS_64BITS(qdev->req_q_phy_addr));
3046 	ql_write_page1_reg(qdev,
3047 			   &hmem_regs->reqBaseAddrLow,
3048 			   LS_64BITS(qdev->req_q_phy_addr));
3049 	ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
3050 
3051 	/* Response Queue Registers */
3052 	*((__le16 *) (qdev->prsp_producer_index)) = 0;
3053 	qdev->rsp_consumer_index = 0;
3054 	qdev->rsp_current = qdev->rsp_q_virt_addr;
3055 
3056 	ql_write_page1_reg(qdev,
3057 			   &hmem_regs->rspProducerIndexAddrHigh,
3058 			   qdev->rsp_producer_index_phy_addr_high);
3059 
3060 	ql_write_page1_reg(qdev,
3061 			   &hmem_regs->rspProducerIndexAddrLow,
3062 			   qdev->rsp_producer_index_phy_addr_low);
3063 
3064 	ql_write_page1_reg(qdev,
3065 			   &hmem_regs->rspBaseAddrHigh,
3066 			   MS_64BITS(qdev->rsp_q_phy_addr));
3067 
3068 	ql_write_page1_reg(qdev,
3069 			   &hmem_regs->rspBaseAddrLow,
3070 			   LS_64BITS(qdev->rsp_q_phy_addr));
3071 
3072 	ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
3073 
3074 	/* Large Buffer Queue */
3075 	ql_write_page1_reg(qdev,
3076 			   &hmem_regs->rxLargeQBaseAddrHigh,
3077 			   MS_64BITS(qdev->lrg_buf_q_phy_addr));
3078 
3079 	ql_write_page1_reg(qdev,
3080 			   &hmem_regs->rxLargeQBaseAddrLow,
3081 			   LS_64BITS(qdev->lrg_buf_q_phy_addr));
3082 
3083 	ql_write_page1_reg(qdev,
3084 			   &hmem_regs->rxLargeQLength,
3085 			   qdev->num_lbufq_entries);
3086 
3087 	ql_write_page1_reg(qdev,
3088 			   &hmem_regs->rxLargeBufferLength,
3089 			   qdev->lrg_buffer_len);
3090 
3091 	/* Small Buffer Queue */
3092 	ql_write_page1_reg(qdev,
3093 			   &hmem_regs->rxSmallQBaseAddrHigh,
3094 			   MS_64BITS(qdev->small_buf_q_phy_addr));
3095 
3096 	ql_write_page1_reg(qdev,
3097 			   &hmem_regs->rxSmallQBaseAddrLow,
3098 			   LS_64BITS(qdev->small_buf_q_phy_addr));
3099 
3100 	ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
3101 	ql_write_page1_reg(qdev,
3102 			   &hmem_regs->rxSmallBufferLength,
3103 			   QL_SMALL_BUFFER_SIZE);
3104 
3105 	qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
3106 	qdev->small_buf_release_cnt = 8;
3107 	qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
3108 	qdev->lrg_buf_release_cnt = 8;
3109 	qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr;
3110 	qdev->small_buf_index = 0;
3111 	qdev->lrg_buf_index = 0;
3112 	qdev->lrg_buf_free_count = 0;
3113 	qdev->lrg_buf_free_head = NULL;
3114 	qdev->lrg_buf_free_tail = NULL;
3115 
3116 	ql_write_common_reg(qdev,
3117 			    &port_regs->CommonRegs.
3118 			    rxSmallQProducerIndex,
3119 			    qdev->small_buf_q_producer_index);
3120 	ql_write_common_reg(qdev,
3121 			    &port_regs->CommonRegs.
3122 			    rxLargeQProducerIndex,
3123 			    qdev->lrg_buf_q_producer_index);
3124 
3125 	/*
3126 	 * Find out if the chip has already been initialized.  If it has, then
3127 	 * we skip some of the initialization.
3128 	 */
3129 	clear_bit(QL_LINK_MASTER, &qdev->flags);
3130 	value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3131 	if ((value & PORT_STATUS_IC) == 0) {
3132 
3133 		/* Chip has not been configured yet, so let it rip. */
3134 		if (ql_init_misc_registers(qdev)) {
3135 			status = -1;
3136 			goto out;
3137 		}
3138 
3139 		value = qdev->nvram_data.tcpMaxWindowSize;
3140 		ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
3141 
3142 		value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
3143 
3144 		if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
3145 				(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
3146 				 * 2) << 13)) {
3147 			status = -1;
3148 			goto out;
3149 		}
3150 		ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
3151 		ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
3152 				   (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
3153 				     16) | (INTERNAL_CHIP_SD |
3154 					    INTERNAL_CHIP_WE)));
3155 		ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
3156 	}
3157 
3158 	if (qdev->mac_index)
3159 		ql_write_page0_reg(qdev,
3160 				   &port_regs->mac1MaxFrameLengthReg,
3161 				   qdev->max_frame_size);
3162 	else
3163 		ql_write_page0_reg(qdev,
3164 					   &port_regs->mac0MaxFrameLengthReg,
3165 					   qdev->max_frame_size);
3166 
3167 	if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
3168 			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3169 			 2) << 7)) {
3170 		status = -1;
3171 		goto out;
3172 	}
3173 
3174 	PHY_Setup(qdev);
3175 	ql_init_scan_mode(qdev);
3176 	ql_get_phy_owner(qdev);
3177 
3178 	/* Load the MAC Configuration */
3179 
3180 	/* Program lower 32 bits of the MAC address */
3181 	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3182 			   (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3183 	ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3184 			   ((qdev->ndev->dev_addr[2] << 24)
3185 			    | (qdev->ndev->dev_addr[3] << 16)
3186 			    | (qdev->ndev->dev_addr[4] << 8)
3187 			    | qdev->ndev->dev_addr[5]));
3188 
3189 	/* Program top 16 bits of the MAC address */
3190 	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3191 			   ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3192 	ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3193 			   ((qdev->ndev->dev_addr[0] << 8)
3194 			    | qdev->ndev->dev_addr[1]));
3195 
3196 	/* Enable Primary MAC */
3197 	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3198 			   ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
3199 			    MAC_ADDR_INDIRECT_PTR_REG_PE));
3200 
3201 	/* Clear Primary and Secondary IP addresses */
3202 	ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3203 			   ((IP_ADDR_INDEX_REG_MASK << 16) |
3204 			    (qdev->mac_index << 2)));
3205 	ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3206 
3207 	ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3208 			   ((IP_ADDR_INDEX_REG_MASK << 16) |
3209 			    ((qdev->mac_index << 2) + 1)));
3210 	ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3211 
3212 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
3213 
3214 	/* Indicate Configuration Complete */
3215 	ql_write_page0_reg(qdev,
3216 			   &port_regs->portControl,
3217 			   ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
3218 
3219 	do {
3220 		value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3221 		if (value & PORT_STATUS_IC)
3222 			break;
3223 		spin_unlock_irq(&qdev->hw_lock);
3224 		msleep(500);
3225 		spin_lock_irq(&qdev->hw_lock);
3226 	} while (--delay);
3227 
3228 	if (delay == 0) {
3229 		netdev_err(qdev->ndev, "Hw Initialization timeout\n");
3230 		status = -1;
3231 		goto out;
3232 	}
3233 
3234 	/* Enable Ethernet Function */
3235 	if (qdev->device_id == QL3032_DEVICE_ID) {
3236 		value =
3237 		    (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
3238 		     QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
3239 			QL3032_PORT_CONTROL_ET);
3240 		ql_write_page0_reg(qdev, &port_regs->functionControl,
3241 				   ((value << 16) | value));
3242 	} else {
3243 		value =
3244 		    (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
3245 		     PORT_CONTROL_HH);
3246 		ql_write_page0_reg(qdev, &port_regs->portControl,
3247 				   ((value << 16) | value));
3248 	}
3249 
3250 
3251 out:
3252 	return status;
3253 }
3254 
3255 /*
3256  * Caller holds hw_lock.
3257  */
ql_adapter_reset(struct ql3_adapter * qdev)3258 static int ql_adapter_reset(struct ql3_adapter *qdev)
3259 {
3260 	struct ql3xxx_port_registers __iomem *port_regs =
3261 		qdev->mem_map_registers;
3262 	int status = 0;
3263 	u16 value;
3264 	int max_wait_time;
3265 
3266 	set_bit(QL_RESET_ACTIVE, &qdev->flags);
3267 	clear_bit(QL_RESET_DONE, &qdev->flags);
3268 
3269 	/*
3270 	 * Issue soft reset to chip.
3271 	 */
3272 	netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
3273 	ql_write_common_reg(qdev,
3274 			    &port_regs->CommonRegs.ispControlStatus,
3275 			    ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
3276 
3277 	/* Wait 3 seconds for reset to complete. */
3278 	netdev_printk(KERN_DEBUG, qdev->ndev,
3279 		      "Wait 10 milliseconds for reset to complete\n");
3280 
3281 	/* Wait until the firmware tells us the Soft Reset is done */
3282 	max_wait_time = 5;
3283 	do {
3284 		value =
3285 		    ql_read_common_reg(qdev,
3286 				       &port_regs->CommonRegs.ispControlStatus);
3287 		if ((value & ISP_CONTROL_SR) == 0)
3288 			break;
3289 
3290 		mdelay(1000);
3291 	} while ((--max_wait_time));
3292 
3293 	/*
3294 	 * Also, make sure that the Network Reset Interrupt bit has been
3295 	 * cleared after the soft reset has taken place.
3296 	 */
3297 	value =
3298 	    ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
3299 	if (value & ISP_CONTROL_RI) {
3300 		netdev_printk(KERN_DEBUG, qdev->ndev,
3301 			      "clearing RI after reset\n");
3302 		ql_write_common_reg(qdev,
3303 				    &port_regs->CommonRegs.
3304 				    ispControlStatus,
3305 				    ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3306 	}
3307 
3308 	if (max_wait_time == 0) {
3309 		/* Issue Force Soft Reset */
3310 		ql_write_common_reg(qdev,
3311 				    &port_regs->CommonRegs.
3312 				    ispControlStatus,
3313 				    ((ISP_CONTROL_FSR << 16) |
3314 				     ISP_CONTROL_FSR));
3315 		/*
3316 		 * Wait until the firmware tells us the Force Soft Reset is
3317 		 * done
3318 		 */
3319 		max_wait_time = 5;
3320 		do {
3321 			value = ql_read_common_reg(qdev,
3322 						   &port_regs->CommonRegs.
3323 						   ispControlStatus);
3324 			if ((value & ISP_CONTROL_FSR) == 0)
3325 				break;
3326 			mdelay(1000);
3327 		} while ((--max_wait_time));
3328 	}
3329 	if (max_wait_time == 0)
3330 		status = 1;
3331 
3332 	clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3333 	set_bit(QL_RESET_DONE, &qdev->flags);
3334 	return status;
3335 }
3336 
ql_set_mac_info(struct ql3_adapter * qdev)3337 static void ql_set_mac_info(struct ql3_adapter *qdev)
3338 {
3339 	struct ql3xxx_port_registers __iomem *port_regs =
3340 		qdev->mem_map_registers;
3341 	u32 value, port_status;
3342 	u8 func_number;
3343 
3344 	/* Get the function number */
3345 	value =
3346 	    ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
3347 	func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
3348 	port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
3349 	switch (value & ISP_CONTROL_FN_MASK) {
3350 	case ISP_CONTROL_FN0_NET:
3351 		qdev->mac_index = 0;
3352 		qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3353 		qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3354 		qdev->PHYAddr = PORT0_PHY_ADDRESS;
3355 		if (port_status & PORT_STATUS_SM0)
3356 			set_bit(QL_LINK_OPTICAL, &qdev->flags);
3357 		else
3358 			clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3359 		break;
3360 
3361 	case ISP_CONTROL_FN1_NET:
3362 		qdev->mac_index = 1;
3363 		qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3364 		qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3365 		qdev->PHYAddr = PORT1_PHY_ADDRESS;
3366 		if (port_status & PORT_STATUS_SM1)
3367 			set_bit(QL_LINK_OPTICAL, &qdev->flags);
3368 		else
3369 			clear_bit(QL_LINK_OPTICAL, &qdev->flags);
3370 		break;
3371 
3372 	case ISP_CONTROL_FN0_SCSI:
3373 	case ISP_CONTROL_FN1_SCSI:
3374 	default:
3375 		netdev_printk(KERN_DEBUG, qdev->ndev,
3376 			      "Invalid function number, ispControlStatus = 0x%x\n",
3377 			      value);
3378 		break;
3379 	}
3380 	qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
3381 }
3382 
ql_display_dev_info(struct net_device * ndev)3383 static void ql_display_dev_info(struct net_device *ndev)
3384 {
3385 	struct ql3_adapter *qdev = netdev_priv(ndev);
3386 	struct pci_dev *pdev = qdev->pdev;
3387 
3388 	netdev_info(ndev,
3389 		    "%s Adapter %d RevisionID %d found %s on PCI slot %d\n",
3390 		    DRV_NAME, qdev->index, qdev->chip_rev_id,
3391 		    qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022",
3392 		    qdev->pci_slot);
3393 	netdev_info(ndev, "%s Interface\n",
3394 		test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
3395 
3396 	/*
3397 	 * Print PCI bus width/type.
3398 	 */
3399 	netdev_info(ndev, "Bus interface is %s %s\n",
3400 		    ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
3401 		    ((qdev->pci_x) ? "PCI-X" : "PCI"));
3402 
3403 	netdev_info(ndev, "mem  IO base address adjusted = 0x%p\n",
3404 		    qdev->mem_map_registers);
3405 	netdev_info(ndev, "Interrupt number = %d\n", pdev->irq);
3406 
3407 	netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
3408 }
3409 
ql_adapter_down(struct ql3_adapter * qdev,int do_reset)3410 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3411 {
3412 	struct net_device *ndev = qdev->ndev;
3413 	int retval = 0;
3414 
3415 	netif_stop_queue(ndev);
3416 	netif_carrier_off(ndev);
3417 
3418 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
3419 	clear_bit(QL_LINK_MASTER, &qdev->flags);
3420 
3421 	ql_disable_interrupts(qdev);
3422 
3423 	free_irq(qdev->pdev->irq, ndev);
3424 
3425 	if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3426 		netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
3427 		clear_bit(QL_MSI_ENABLED, &qdev->flags);
3428 		pci_disable_msi(qdev->pdev);
3429 	}
3430 
3431 	del_timer_sync(&qdev->adapter_timer);
3432 
3433 	napi_disable(&qdev->napi);
3434 
3435 	if (do_reset) {
3436 		int soft_reset;
3437 		unsigned long hw_flags;
3438 
3439 		spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3440 		if (ql_wait_for_drvr_lock(qdev)) {
3441 			soft_reset = ql_adapter_reset(qdev);
3442 			if (soft_reset) {
3443 				netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n",
3444 					   qdev->index);
3445 			}
3446 			netdev_err(ndev,
3447 				   "Releasing driver lock via chip reset\n");
3448 		} else {
3449 			netdev_err(ndev,
3450 				   "Could not acquire driver lock to do reset!\n");
3451 			retval = -1;
3452 		}
3453 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3454 	}
3455 	ql_free_mem_resources(qdev);
3456 	return retval;
3457 }
3458 
ql_adapter_up(struct ql3_adapter * qdev)3459 static int ql_adapter_up(struct ql3_adapter *qdev)
3460 {
3461 	struct net_device *ndev = qdev->ndev;
3462 	int err;
3463 	unsigned long irq_flags = IRQF_SHARED;
3464 	unsigned long hw_flags;
3465 
3466 	if (ql_alloc_mem_resources(qdev)) {
3467 		netdev_err(ndev, "Unable to  allocate buffers\n");
3468 		return -ENOMEM;
3469 	}
3470 
3471 	if (qdev->msi) {
3472 		if (pci_enable_msi(qdev->pdev)) {
3473 			netdev_err(ndev,
3474 				   "User requested MSI, but MSI failed to initialize.  Continuing without MSI.\n");
3475 			qdev->msi = 0;
3476 		} else {
3477 			netdev_info(ndev, "MSI Enabled...\n");
3478 			set_bit(QL_MSI_ENABLED, &qdev->flags);
3479 			irq_flags &= ~IRQF_SHARED;
3480 		}
3481 	}
3482 
3483 	err = request_irq(qdev->pdev->irq, ql3xxx_isr,
3484 			  irq_flags, ndev->name, ndev);
3485 	if (err) {
3486 		netdev_err(ndev,
3487 			   "Failed to reserve interrupt %d - already in use\n",
3488 			   qdev->pdev->irq);
3489 		goto err_irq;
3490 	}
3491 
3492 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3493 
3494 	if (!ql_wait_for_drvr_lock(qdev)) {
3495 		netdev_err(ndev, "Could not acquire driver lock\n");
3496 		err = -ENODEV;
3497 		goto err_lock;
3498 	}
3499 
3500 	err = ql_adapter_initialize(qdev);
3501 	if (err) {
3502 		netdev_err(ndev, "Unable to initialize adapter\n");
3503 		goto err_init;
3504 	}
3505 	ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3506 
3507 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3508 
3509 	set_bit(QL_ADAPTER_UP, &qdev->flags);
3510 
3511 	mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3512 
3513 	napi_enable(&qdev->napi);
3514 	ql_enable_interrupts(qdev);
3515 	return 0;
3516 
3517 err_init:
3518 	ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3519 err_lock:
3520 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3521 	free_irq(qdev->pdev->irq, ndev);
3522 err_irq:
3523 	if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3524 		netdev_info(ndev, "calling pci_disable_msi()\n");
3525 		clear_bit(QL_MSI_ENABLED, &qdev->flags);
3526 		pci_disable_msi(qdev->pdev);
3527 	}
3528 	return err;
3529 }
3530 
ql_cycle_adapter(struct ql3_adapter * qdev,int reset)3531 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3532 {
3533 	if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
3534 		netdev_err(qdev->ndev,
3535 			   "Driver up/down cycle failed, closing device\n");
3536 		rtnl_lock();
3537 		dev_close(qdev->ndev);
3538 		rtnl_unlock();
3539 		return -1;
3540 	}
3541 	return 0;
3542 }
3543 
ql3xxx_close(struct net_device * ndev)3544 static int ql3xxx_close(struct net_device *ndev)
3545 {
3546 	struct ql3_adapter *qdev = netdev_priv(ndev);
3547 
3548 	/*
3549 	 * Wait for device to recover from a reset.
3550 	 * (Rarely happens, but possible.)
3551 	 */
3552 	while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3553 		msleep(50);
3554 
3555 	ql_adapter_down(qdev, QL_DO_RESET);
3556 	return 0;
3557 }
3558 
ql3xxx_open(struct net_device * ndev)3559 static int ql3xxx_open(struct net_device *ndev)
3560 {
3561 	struct ql3_adapter *qdev = netdev_priv(ndev);
3562 	return ql_adapter_up(qdev);
3563 }
3564 
ql3xxx_set_mac_address(struct net_device * ndev,void * p)3565 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3566 {
3567 	struct ql3_adapter *qdev = netdev_priv(ndev);
3568 	struct ql3xxx_port_registers __iomem *port_regs =
3569 			qdev->mem_map_registers;
3570 	struct sockaddr *addr = p;
3571 	unsigned long hw_flags;
3572 
3573 	if (netif_running(ndev))
3574 		return -EBUSY;
3575 
3576 	if (!is_valid_ether_addr(addr->sa_data))
3577 		return -EADDRNOTAVAIL;
3578 
3579 	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3580 
3581 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3582 	/* Program lower 32 bits of the MAC address */
3583 	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3584 			   (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3585 	ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3586 			   ((ndev->dev_addr[2] << 24) | (ndev->
3587 							 dev_addr[3] << 16) |
3588 			    (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
3589 
3590 	/* Program top 16 bits of the MAC address */
3591 	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3592 			   ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3593 	ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3594 			   ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
3595 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3596 
3597 	return 0;
3598 }
3599 
ql3xxx_tx_timeout(struct net_device * ndev)3600 static void ql3xxx_tx_timeout(struct net_device *ndev)
3601 {
3602 	struct ql3_adapter *qdev = netdev_priv(ndev);
3603 
3604 	netdev_err(ndev, "Resetting...\n");
3605 	/*
3606 	 * Stop the queues, we've got a problem.
3607 	 */
3608 	netif_stop_queue(ndev);
3609 
3610 	/*
3611 	 * Wake up the worker to process this event.
3612 	 */
3613 	queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
3614 }
3615 
ql_reset_work(struct work_struct * work)3616 static void ql_reset_work(struct work_struct *work)
3617 {
3618 	struct ql3_adapter *qdev =
3619 		container_of(work, struct ql3_adapter, reset_work.work);
3620 	struct net_device *ndev = qdev->ndev;
3621 	u32 value;
3622 	struct ql_tx_buf_cb *tx_cb;
3623 	int max_wait_time, i;
3624 	struct ql3xxx_port_registers __iomem *port_regs =
3625 		qdev->mem_map_registers;
3626 	unsigned long hw_flags;
3627 
3628 	if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) {
3629 		clear_bit(QL_LINK_MASTER, &qdev->flags);
3630 
3631 		/*
3632 		 * Loop through the active list and return the skb.
3633 		 */
3634 		for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3635 			int j;
3636 			tx_cb = &qdev->tx_buf[i];
3637 			if (tx_cb->skb) {
3638 				netdev_printk(KERN_DEBUG, ndev,
3639 					      "Freeing lost SKB\n");
3640 				pci_unmap_single(qdev->pdev,
3641 					 dma_unmap_addr(&tx_cb->map[0],
3642 							mapaddr),
3643 					 dma_unmap_len(&tx_cb->map[0], maplen),
3644 					 PCI_DMA_TODEVICE);
3645 				for (j = 1; j < tx_cb->seg_count; j++) {
3646 					pci_unmap_page(qdev->pdev,
3647 					       dma_unmap_addr(&tx_cb->map[j],
3648 							      mapaddr),
3649 					       dma_unmap_len(&tx_cb->map[j],
3650 							     maplen),
3651 					       PCI_DMA_TODEVICE);
3652 				}
3653 				dev_kfree_skb(tx_cb->skb);
3654 				tx_cb->skb = NULL;
3655 			}
3656 		}
3657 
3658 		netdev_err(ndev, "Clearing NRI after reset\n");
3659 		spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3660 		ql_write_common_reg(qdev,
3661 				    &port_regs->CommonRegs.
3662 				    ispControlStatus,
3663 				    ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3664 		/*
3665 		 * Wait the for Soft Reset to Complete.
3666 		 */
3667 		max_wait_time = 10;
3668 		do {
3669 			value = ql_read_common_reg(qdev,
3670 						   &port_regs->CommonRegs.
3671 
3672 						   ispControlStatus);
3673 			if ((value & ISP_CONTROL_SR) == 0) {
3674 				netdev_printk(KERN_DEBUG, ndev,
3675 					      "reset completed\n");
3676 				break;
3677 			}
3678 
3679 			if (value & ISP_CONTROL_RI) {
3680 				netdev_printk(KERN_DEBUG, ndev,
3681 					      "clearing NRI after reset\n");
3682 				ql_write_common_reg(qdev,
3683 						    &port_regs->
3684 						    CommonRegs.
3685 						    ispControlStatus,
3686 						    ((ISP_CONTROL_RI <<
3687 						      16) | ISP_CONTROL_RI));
3688 			}
3689 
3690 			spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3691 			ssleep(1);
3692 			spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3693 		} while (--max_wait_time);
3694 		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3695 
3696 		if (value & ISP_CONTROL_SR) {
3697 
3698 			/*
3699 			 * Set the reset flags and clear the board again.
3700 			 * Nothing else to do...
3701 			 */
3702 			netdev_err(ndev,
3703 				   "Timed out waiting for reset to complete\n");
3704 			netdev_err(ndev, "Do a reset\n");
3705 			clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
3706 			clear_bit(QL_RESET_START, &qdev->flags);
3707 			ql_cycle_adapter(qdev, QL_DO_RESET);
3708 			return;
3709 		}
3710 
3711 		clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3712 		clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
3713 		clear_bit(QL_RESET_START, &qdev->flags);
3714 		ql_cycle_adapter(qdev, QL_NO_RESET);
3715 	}
3716 }
3717 
ql_tx_timeout_work(struct work_struct * work)3718 static void ql_tx_timeout_work(struct work_struct *work)
3719 {
3720 	struct ql3_adapter *qdev =
3721 		container_of(work, struct ql3_adapter, tx_timeout_work.work);
3722 
3723 	ql_cycle_adapter(qdev, QL_DO_RESET);
3724 }
3725 
ql_get_board_info(struct ql3_adapter * qdev)3726 static void ql_get_board_info(struct ql3_adapter *qdev)
3727 {
3728 	struct ql3xxx_port_registers __iomem *port_regs =
3729 		qdev->mem_map_registers;
3730 	u32 value;
3731 
3732 	value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
3733 
3734 	qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
3735 	if (value & PORT_STATUS_64)
3736 		qdev->pci_width = 64;
3737 	else
3738 		qdev->pci_width = 32;
3739 	if (value & PORT_STATUS_X)
3740 		qdev->pci_x = 1;
3741 	else
3742 		qdev->pci_x = 0;
3743 	qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
3744 }
3745 
ql3xxx_timer(unsigned long ptr)3746 static void ql3xxx_timer(unsigned long ptr)
3747 {
3748 	struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
3749 	queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
3750 }
3751 
3752 static const struct net_device_ops ql3xxx_netdev_ops = {
3753 	.ndo_open		= ql3xxx_open,
3754 	.ndo_start_xmit		= ql3xxx_send,
3755 	.ndo_stop		= ql3xxx_close,
3756 	.ndo_change_mtu		= eth_change_mtu,
3757 	.ndo_validate_addr	= eth_validate_addr,
3758 	.ndo_set_mac_address	= ql3xxx_set_mac_address,
3759 	.ndo_tx_timeout		= ql3xxx_tx_timeout,
3760 };
3761 
ql3xxx_probe(struct pci_dev * pdev,const struct pci_device_id * pci_entry)3762 static int ql3xxx_probe(struct pci_dev *pdev,
3763 			const struct pci_device_id *pci_entry)
3764 {
3765 	struct net_device *ndev = NULL;
3766 	struct ql3_adapter *qdev = NULL;
3767 	static int cards_found;
3768 	int uninitialized_var(pci_using_dac), err;
3769 
3770 	err = pci_enable_device(pdev);
3771 	if (err) {
3772 		pr_err("%s cannot enable PCI device\n", pci_name(pdev));
3773 		goto err_out;
3774 	}
3775 
3776 	err = pci_request_regions(pdev, DRV_NAME);
3777 	if (err) {
3778 		pr_err("%s cannot obtain PCI resources\n", pci_name(pdev));
3779 		goto err_out_disable_pdev;
3780 	}
3781 
3782 	pci_set_master(pdev);
3783 
3784 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3785 		pci_using_dac = 1;
3786 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3787 	} else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
3788 		pci_using_dac = 0;
3789 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3790 	}
3791 
3792 	if (err) {
3793 		pr_err("%s no usable DMA configuration\n", pci_name(pdev));
3794 		goto err_out_free_regions;
3795 	}
3796 
3797 	ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3798 	if (!ndev) {
3799 		err = -ENOMEM;
3800 		goto err_out_free_regions;
3801 	}
3802 
3803 	SET_NETDEV_DEV(ndev, &pdev->dev);
3804 
3805 	pci_set_drvdata(pdev, ndev);
3806 
3807 	qdev = netdev_priv(ndev);
3808 	qdev->index = cards_found;
3809 	qdev->ndev = ndev;
3810 	qdev->pdev = pdev;
3811 	qdev->device_id = pci_entry->device;
3812 	qdev->port_link_state = LS_DOWN;
3813 	if (msi)
3814 		qdev->msi = 1;
3815 
3816 	qdev->msg_enable = netif_msg_init(debug, default_msg);
3817 
3818 	if (pci_using_dac)
3819 		ndev->features |= NETIF_F_HIGHDMA;
3820 	if (qdev->device_id == QL3032_DEVICE_ID)
3821 		ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3822 
3823 	qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
3824 	if (!qdev->mem_map_registers) {
3825 		pr_err("%s: cannot map device registers\n", pci_name(pdev));
3826 		err = -EIO;
3827 		goto err_out_free_ndev;
3828 	}
3829 
3830 	spin_lock_init(&qdev->adapter_lock);
3831 	spin_lock_init(&qdev->hw_lock);
3832 
3833 	/* Set driver entry points */
3834 	ndev->netdev_ops = &ql3xxx_netdev_ops;
3835 	ndev->ethtool_ops = &ql3xxx_ethtool_ops;
3836 	ndev->watchdog_timeo = 5 * HZ;
3837 
3838 	netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
3839 
3840 	ndev->irq = pdev->irq;
3841 
3842 	/* make sure the EEPROM is good */
3843 	if (ql_get_nvram_params(qdev)) {
3844 		pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n",
3845 			 __func__, qdev->index);
3846 		err = -EIO;
3847 		goto err_out_iounmap;
3848 	}
3849 
3850 	ql_set_mac_info(qdev);
3851 
3852 	/* Validate and set parameters */
3853 	if (qdev->mac_index) {
3854 		ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
3855 		ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
3856 	} else {
3857 		ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
3858 		ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
3859 	}
3860 
3861 	ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
3862 
3863 	/* Record PCI bus information. */
3864 	ql_get_board_info(qdev);
3865 
3866 	/*
3867 	 * Set the Maximum Memory Read Byte Count value. We do this to handle
3868 	 * jumbo frames.
3869 	 */
3870 	if (qdev->pci_x)
3871 		pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
3872 
3873 	err = register_netdev(ndev);
3874 	if (err) {
3875 		pr_err("%s: cannot register net device\n", pci_name(pdev));
3876 		goto err_out_iounmap;
3877 	}
3878 
3879 	/* we're going to reset, so assume we have no link for now */
3880 
3881 	netif_carrier_off(ndev);
3882 	netif_stop_queue(ndev);
3883 
3884 	qdev->workqueue = create_singlethread_workqueue(ndev->name);
3885 	INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
3886 	INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
3887 	INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
3888 
3889 	init_timer(&qdev->adapter_timer);
3890 	qdev->adapter_timer.function = ql3xxx_timer;
3891 	qdev->adapter_timer.expires = jiffies + HZ * 2;	/* two second delay */
3892 	qdev->adapter_timer.data = (unsigned long)qdev;
3893 
3894 	if (!cards_found) {
3895 		pr_alert("%s\n", DRV_STRING);
3896 		pr_alert("Driver name: %s, Version: %s\n",
3897 			 DRV_NAME, DRV_VERSION);
3898 	}
3899 	ql_display_dev_info(ndev);
3900 
3901 	cards_found++;
3902 	return 0;
3903 
3904 err_out_iounmap:
3905 	iounmap(qdev->mem_map_registers);
3906 err_out_free_ndev:
3907 	free_netdev(ndev);
3908 err_out_free_regions:
3909 	pci_release_regions(pdev);
3910 err_out_disable_pdev:
3911 	pci_disable_device(pdev);
3912 err_out:
3913 	return err;
3914 }
3915 
ql3xxx_remove(struct pci_dev * pdev)3916 static void ql3xxx_remove(struct pci_dev *pdev)
3917 {
3918 	struct net_device *ndev = pci_get_drvdata(pdev);
3919 	struct ql3_adapter *qdev = netdev_priv(ndev);
3920 
3921 	unregister_netdev(ndev);
3922 
3923 	ql_disable_interrupts(qdev);
3924 
3925 	if (qdev->workqueue) {
3926 		cancel_delayed_work(&qdev->reset_work);
3927 		cancel_delayed_work(&qdev->tx_timeout_work);
3928 		destroy_workqueue(qdev->workqueue);
3929 		qdev->workqueue = NULL;
3930 	}
3931 
3932 	iounmap(qdev->mem_map_registers);
3933 	pci_release_regions(pdev);
3934 	free_netdev(ndev);
3935 }
3936 
3937 static struct pci_driver ql3xxx_driver = {
3938 
3939 	.name = DRV_NAME,
3940 	.id_table = ql3xxx_pci_tbl,
3941 	.probe = ql3xxx_probe,
3942 	.remove = ql3xxx_remove,
3943 };
3944 
3945 module_pci_driver(ql3xxx_driver);
3946