• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <linux/ipv6.h>
29 #include <net/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
40 #include <linux/mm.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
44 
45 #include "qlge.h"
46 
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
49 
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
54 
55 static const u32 default_msg =
56     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER |	*/
58     NETIF_MSG_IFDOWN |
59     NETIF_MSG_IFUP |
60     NETIF_MSG_RX_ERR |
61     NETIF_MSG_TX_ERR |
62 /*  NETIF_MSG_TX_QUEUED | */
63 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66 
67 static int debug = -1;	/* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70 
71 #define MSIX_IRQ 0
72 #define MSI_IRQ 1
73 #define LEG_IRQ 2
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77 
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81 		"Option to enable MPI firmware dump. "
82 		"Default is OFF - Do Not allocate memory. ");
83 
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87 		"Option to allow force of firmware core dump. "
88 		"Default is OFF - Do not allow.");
89 
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93 	/* required last entry */
94 	{0,}
95 };
96 
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98 
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
101 
102 /* This hardware semaphore causes exclusive access to
103  * resources shared between the NIC driver, MPI firmware,
104  * FCOE firmware and the FC driver.
105  */
ql_sem_trylock(struct ql_adapter * qdev,u32 sem_mask)106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107 {
108 	u32 sem_bits = 0;
109 
110 	switch (sem_mask) {
111 	case SEM_XGMAC0_MASK:
112 		sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113 		break;
114 	case SEM_XGMAC1_MASK:
115 		sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116 		break;
117 	case SEM_ICB_MASK:
118 		sem_bits = SEM_SET << SEM_ICB_SHIFT;
119 		break;
120 	case SEM_MAC_ADDR_MASK:
121 		sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122 		break;
123 	case SEM_FLASH_MASK:
124 		sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125 		break;
126 	case SEM_PROBE_MASK:
127 		sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128 		break;
129 	case SEM_RT_IDX_MASK:
130 		sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131 		break;
132 	case SEM_PROC_REG_MASK:
133 		sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134 		break;
135 	default:
136 		netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
137 		return -EINVAL;
138 	}
139 
140 	ql_write32(qdev, SEM, sem_bits | sem_mask);
141 	return !(ql_read32(qdev, SEM) & sem_bits);
142 }
143 
ql_sem_spinlock(struct ql_adapter * qdev,u32 sem_mask)144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145 {
146 	unsigned int wait_count = 30;
147 	do {
148 		if (!ql_sem_trylock(qdev, sem_mask))
149 			return 0;
150 		udelay(100);
151 	} while (--wait_count);
152 	return -ETIMEDOUT;
153 }
154 
ql_sem_unlock(struct ql_adapter * qdev,u32 sem_mask)155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156 {
157 	ql_write32(qdev, SEM, sem_mask);
158 	ql_read32(qdev, SEM);	/* flush */
159 }
160 
161 /* This function waits for a specific bit to come ready
162  * in a given register.  It is used mostly by the initialize
163  * process, but is also used in kernel thread API such as
164  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165  */
ql_wait_reg_rdy(struct ql_adapter * qdev,u32 reg,u32 bit,u32 err_bit)166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167 {
168 	u32 temp;
169 	int count = UDELAY_COUNT;
170 
171 	while (count) {
172 		temp = ql_read32(qdev, reg);
173 
174 		/* check for errors */
175 		if (temp & err_bit) {
176 			netif_alert(qdev, probe, qdev->ndev,
177 				    "register 0x%.08x access error, value = 0x%.08x!.\n",
178 				    reg, temp);
179 			return -EIO;
180 		} else if (temp & bit)
181 			return 0;
182 		udelay(UDELAY_DELAY);
183 		count--;
184 	}
185 	netif_alert(qdev, probe, qdev->ndev,
186 		    "Timed out waiting for reg %x to come ready.\n", reg);
187 	return -ETIMEDOUT;
188 }
189 
190 /* The CFG register is used to download TX and RX control blocks
191  * to the chip. This function waits for an operation to complete.
192  */
ql_wait_cfg(struct ql_adapter * qdev,u32 bit)193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194 {
195 	int count = UDELAY_COUNT;
196 	u32 temp;
197 
198 	while (count) {
199 		temp = ql_read32(qdev, CFG);
200 		if (temp & CFG_LE)
201 			return -EIO;
202 		if (!(temp & bit))
203 			return 0;
204 		udelay(UDELAY_DELAY);
205 		count--;
206 	}
207 	return -ETIMEDOUT;
208 }
209 
210 
211 /* Used to issue init control blocks to hw. Maps control block,
212  * sets address, triggers download, waits for completion.
213  */
ql_write_cfg(struct ql_adapter * qdev,void * ptr,int size,u32 bit,u16 q_id)214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215 		 u16 q_id)
216 {
217 	u64 map;
218 	int status = 0;
219 	int direction;
220 	u32 mask;
221 	u32 value;
222 
223 	direction =
224 	    (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225 	    PCI_DMA_FROMDEVICE;
226 
227 	map = pci_map_single(qdev->pdev, ptr, size, direction);
228 	if (pci_dma_mapping_error(qdev->pdev, map)) {
229 		netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
230 		return -ENOMEM;
231 	}
232 
233 	status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234 	if (status)
235 		return status;
236 
237 	status = ql_wait_cfg(qdev, bit);
238 	if (status) {
239 		netif_err(qdev, ifup, qdev->ndev,
240 			  "Timed out waiting for CFG to come ready.\n");
241 		goto exit;
242 	}
243 
244 	ql_write32(qdev, ICB_L, (u32) map);
245 	ql_write32(qdev, ICB_H, (u32) (map >> 32));
246 
247 	mask = CFG_Q_MASK | (bit << 16);
248 	value = bit | (q_id << CFG_Q_SHIFT);
249 	ql_write32(qdev, CFG, (mask | value));
250 
251 	/*
252 	 * Wait for the bit to clear after signaling hw.
253 	 */
254 	status = ql_wait_cfg(qdev, bit);
255 exit:
256 	ql_sem_unlock(qdev, SEM_ICB_MASK);	/* does flush too */
257 	pci_unmap_single(qdev->pdev, map, size, direction);
258 	return status;
259 }
260 
261 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
ql_get_mac_addr_reg(struct ql_adapter * qdev,u32 type,u16 index,u32 * value)262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263 			u32 *value)
264 {
265 	u32 offset = 0;
266 	int status;
267 
268 	switch (type) {
269 	case MAC_ADDR_TYPE_MULTI_MAC:
270 	case MAC_ADDR_TYPE_CAM_MAC:
271 		{
272 			status =
273 			    ql_wait_reg_rdy(qdev,
274 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
275 			if (status)
276 				goto exit;
277 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 				   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 			status =
281 			    ql_wait_reg_rdy(qdev,
282 				MAC_ADDR_IDX, MAC_ADDR_MR, 0);
283 			if (status)
284 				goto exit;
285 			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 			status =
287 			    ql_wait_reg_rdy(qdev,
288 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
289 			if (status)
290 				goto exit;
291 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 				   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294 			status =
295 			    ql_wait_reg_rdy(qdev,
296 				MAC_ADDR_IDX, MAC_ADDR_MR, 0);
297 			if (status)
298 				goto exit;
299 			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 			if (type == MAC_ADDR_TYPE_CAM_MAC) {
301 				status =
302 				    ql_wait_reg_rdy(qdev,
303 					MAC_ADDR_IDX, MAC_ADDR_MW, 0);
304 				if (status)
305 					goto exit;
306 				ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 					   (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 					   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309 				status =
310 				    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
311 						    MAC_ADDR_MR, 0);
312 				if (status)
313 					goto exit;
314 				*value++ = ql_read32(qdev, MAC_ADDR_DATA);
315 			}
316 			break;
317 		}
318 	case MAC_ADDR_TYPE_VLAN:
319 	case MAC_ADDR_TYPE_MULTI_FLTR:
320 	default:
321 		netif_crit(qdev, ifup, qdev->ndev,
322 			   "Address type %d not yet supported.\n", type);
323 		status = -EPERM;
324 	}
325 exit:
326 	return status;
327 }
328 
329 /* Set up a MAC, multicast or VLAN address for the
330  * inbound frame matching.
331  */
ql_set_mac_addr_reg(struct ql_adapter * qdev,u8 * addr,u32 type,u16 index)332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333 			       u16 index)
334 {
335 	u32 offset = 0;
336 	int status = 0;
337 
338 	switch (type) {
339 	case MAC_ADDR_TYPE_MULTI_MAC:
340 		{
341 			u32 upper = (addr[0] << 8) | addr[1];
342 			u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 					(addr[4] << 8) | (addr[5]);
344 
345 			status =
346 				ql_wait_reg_rdy(qdev,
347 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348 			if (status)
349 				goto exit;
350 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 				(index << MAC_ADDR_IDX_SHIFT) |
352 				type | MAC_ADDR_E);
353 			ql_write32(qdev, MAC_ADDR_DATA, lower);
354 			status =
355 				ql_wait_reg_rdy(qdev,
356 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 			if (status)
358 				goto exit;
359 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 				(index << MAC_ADDR_IDX_SHIFT) |
361 				type | MAC_ADDR_E);
362 
363 			ql_write32(qdev, MAC_ADDR_DATA, upper);
364 			status =
365 				ql_wait_reg_rdy(qdev,
366 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367 			if (status)
368 				goto exit;
369 			break;
370 		}
371 	case MAC_ADDR_TYPE_CAM_MAC:
372 		{
373 			u32 cam_output;
374 			u32 upper = (addr[0] << 8) | addr[1];
375 			u32 lower =
376 			    (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377 			    (addr[5]);
378 			status =
379 			    ql_wait_reg_rdy(qdev,
380 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
381 			if (status)
382 				goto exit;
383 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
385 				   type);	/* type */
386 			ql_write32(qdev, MAC_ADDR_DATA, lower);
387 			status =
388 			    ql_wait_reg_rdy(qdev,
389 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
390 			if (status)
391 				goto exit;
392 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
393 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
394 				   type);	/* type */
395 			ql_write32(qdev, MAC_ADDR_DATA, upper);
396 			status =
397 			    ql_wait_reg_rdy(qdev,
398 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
399 			if (status)
400 				goto exit;
401 			ql_write32(qdev, MAC_ADDR_IDX, (offset) |	/* offset */
402 				   (index << MAC_ADDR_IDX_SHIFT) |	/* index */
403 				   type);	/* type */
404 			/* This field should also include the queue id
405 			   and possibly the function id.  Right now we hardcode
406 			   the route field to NIC core.
407 			 */
408 			cam_output = (CAM_OUT_ROUTE_NIC |
409 				      (qdev->
410 				       func << CAM_OUT_FUNC_SHIFT) |
411 					(0 << CAM_OUT_CQ_ID_SHIFT));
412 			if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
413 				cam_output |= CAM_OUT_RV;
414 			/* route to NIC core */
415 			ql_write32(qdev, MAC_ADDR_DATA, cam_output);
416 			break;
417 		}
418 	case MAC_ADDR_TYPE_VLAN:
419 		{
420 			u32 enable_bit = *((u32 *) &addr[0]);
421 			/* For VLAN, the addr actually holds a bit that
422 			 * either enables or disables the vlan id we are
423 			 * addressing. It's either MAC_ADDR_E on or off.
424 			 * That's bit-27 we're talking about.
425 			 */
426 			status =
427 			    ql_wait_reg_rdy(qdev,
428 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
429 			if (status)
430 				goto exit;
431 			ql_write32(qdev, MAC_ADDR_IDX, offset |	/* offset */
432 				   (index << MAC_ADDR_IDX_SHIFT) |	/* index */
433 				   type |	/* type */
434 				   enable_bit);	/* enable/disable */
435 			break;
436 		}
437 	case MAC_ADDR_TYPE_MULTI_FLTR:
438 	default:
439 		netif_crit(qdev, ifup, qdev->ndev,
440 			   "Address type %d not yet supported.\n", type);
441 		status = -EPERM;
442 	}
443 exit:
444 	return status;
445 }
446 
447 /* Set or clear MAC address in hardware. We sometimes
448  * have to clear it to prevent wrong frame routing
449  * especially in a bonding environment.
450  */
ql_set_mac_addr(struct ql_adapter * qdev,int set)451 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
452 {
453 	int status;
454 	char zero_mac_addr[ETH_ALEN];
455 	char *addr;
456 
457 	if (set) {
458 		addr = &qdev->current_mac_addr[0];
459 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
460 			     "Set Mac addr %pM\n", addr);
461 	} else {
462 		memset(zero_mac_addr, 0, ETH_ALEN);
463 		addr = &zero_mac_addr[0];
464 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
465 			     "Clearing MAC address\n");
466 	}
467 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
468 	if (status)
469 		return status;
470 	status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
471 			MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
472 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
473 	if (status)
474 		netif_err(qdev, ifup, qdev->ndev,
475 			  "Failed to init mac address.\n");
476 	return status;
477 }
478 
ql_link_on(struct ql_adapter * qdev)479 void ql_link_on(struct ql_adapter *qdev)
480 {
481 	netif_err(qdev, link, qdev->ndev, "Link is up.\n");
482 	netif_carrier_on(qdev->ndev);
483 	ql_set_mac_addr(qdev, 1);
484 }
485 
ql_link_off(struct ql_adapter * qdev)486 void ql_link_off(struct ql_adapter *qdev)
487 {
488 	netif_err(qdev, link, qdev->ndev, "Link is down.\n");
489 	netif_carrier_off(qdev->ndev);
490 	ql_set_mac_addr(qdev, 0);
491 }
492 
493 /* Get a specific frame routing value from the CAM.
494  * Used for debug and reg dump.
495  */
ql_get_routing_reg(struct ql_adapter * qdev,u32 index,u32 * value)496 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497 {
498 	int status = 0;
499 
500 	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
501 	if (status)
502 		goto exit;
503 
504 	ql_write32(qdev, RT_IDX,
505 		   RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
506 	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
507 	if (status)
508 		goto exit;
509 	*value = ql_read32(qdev, RT_DATA);
510 exit:
511 	return status;
512 }
513 
514 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
515  * to route different frame types to various inbound queues.  We send broadcast/
516  * multicast/error frames to the default queue for slow handling,
517  * and CAM hit/RSS frames to the fast handling queues.
518  */
ql_set_routing_reg(struct ql_adapter * qdev,u32 index,u32 mask,int enable)519 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520 			      int enable)
521 {
522 	int status = -EINVAL; /* Return error if no mask match. */
523 	u32 value = 0;
524 
525 	switch (mask) {
526 	case RT_IDX_CAM_HIT:
527 		{
528 			value = RT_IDX_DST_CAM_Q |	/* dest */
529 			    RT_IDX_TYPE_NICQ |	/* type */
530 			    (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
531 			break;
532 		}
533 	case RT_IDX_VALID:	/* Promiscuous Mode frames. */
534 		{
535 			value = RT_IDX_DST_DFLT_Q |	/* dest */
536 			    RT_IDX_TYPE_NICQ |	/* type */
537 			    (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
538 			break;
539 		}
540 	case RT_IDX_ERR:	/* Pass up MAC,IP,TCP/UDP error frames. */
541 		{
542 			value = RT_IDX_DST_DFLT_Q |	/* dest */
543 			    RT_IDX_TYPE_NICQ |	/* type */
544 			    (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
545 			break;
546 		}
547 	case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
548 		{
549 			value = RT_IDX_DST_DFLT_Q | /* dest */
550 				RT_IDX_TYPE_NICQ | /* type */
551 				(RT_IDX_IP_CSUM_ERR_SLOT <<
552 				RT_IDX_IDX_SHIFT); /* index */
553 			break;
554 		}
555 	case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
556 		{
557 			value = RT_IDX_DST_DFLT_Q | /* dest */
558 				RT_IDX_TYPE_NICQ | /* type */
559 				(RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
560 				RT_IDX_IDX_SHIFT); /* index */
561 			break;
562 		}
563 	case RT_IDX_BCAST:	/* Pass up Broadcast frames to default Q. */
564 		{
565 			value = RT_IDX_DST_DFLT_Q |	/* dest */
566 			    RT_IDX_TYPE_NICQ |	/* type */
567 			    (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
568 			break;
569 		}
570 	case RT_IDX_MCAST:	/* Pass up All Multicast frames. */
571 		{
572 			value = RT_IDX_DST_DFLT_Q |	/* dest */
573 			    RT_IDX_TYPE_NICQ |	/* type */
574 			    (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
575 			break;
576 		}
577 	case RT_IDX_MCAST_MATCH:	/* Pass up matched Multicast frames. */
578 		{
579 			value = RT_IDX_DST_DFLT_Q |	/* dest */
580 			    RT_IDX_TYPE_NICQ |	/* type */
581 			    (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
582 			break;
583 		}
584 	case RT_IDX_RSS_MATCH:	/* Pass up matched RSS frames. */
585 		{
586 			value = RT_IDX_DST_RSS |	/* dest */
587 			    RT_IDX_TYPE_NICQ |	/* type */
588 			    (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
589 			break;
590 		}
591 	case 0:		/* Clear the E-bit on an entry. */
592 		{
593 			value = RT_IDX_DST_DFLT_Q |	/* dest */
594 			    RT_IDX_TYPE_NICQ |	/* type */
595 			    (index << RT_IDX_IDX_SHIFT);/* index */
596 			break;
597 		}
598 	default:
599 		netif_err(qdev, ifup, qdev->ndev,
600 			  "Mask type %d not yet supported.\n", mask);
601 		status = -EPERM;
602 		goto exit;
603 	}
604 
605 	if (value) {
606 		status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
607 		if (status)
608 			goto exit;
609 		value |= (enable ? RT_IDX_E : 0);
610 		ql_write32(qdev, RT_IDX, value);
611 		ql_write32(qdev, RT_DATA, enable ? mask : 0);
612 	}
613 exit:
614 	return status;
615 }
616 
ql_enable_interrupts(struct ql_adapter * qdev)617 static void ql_enable_interrupts(struct ql_adapter *qdev)
618 {
619 	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
620 }
621 
ql_disable_interrupts(struct ql_adapter * qdev)622 static void ql_disable_interrupts(struct ql_adapter *qdev)
623 {
624 	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
625 }
626 
627 /* If we're running with multiple MSI-X vectors then we enable on the fly.
628  * Otherwise, we may have multiple outstanding workers and don't want to
629  * enable until the last one finishes. In this case, the irq_cnt gets
630  * incremented every time we queue a worker and decremented every time
631  * a worker finishes.  Once it hits zero we enable the interrupt.
632  */
ql_enable_completion_interrupt(struct ql_adapter * qdev,u32 intr)633 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
634 {
635 	u32 var = 0;
636 	unsigned long hw_flags = 0;
637 	struct intr_context *ctx = qdev->intr_context + intr;
638 
639 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
640 		/* Always enable if we're MSIX multi interrupts and
641 		 * it's not the default (zeroeth) interrupt.
642 		 */
643 		ql_write32(qdev, INTR_EN,
644 			   ctx->intr_en_mask);
645 		var = ql_read32(qdev, STS);
646 		return var;
647 	}
648 
649 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
650 	if (atomic_dec_and_test(&ctx->irq_cnt)) {
651 		ql_write32(qdev, INTR_EN,
652 			   ctx->intr_en_mask);
653 		var = ql_read32(qdev, STS);
654 	}
655 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
656 	return var;
657 }
658 
ql_disable_completion_interrupt(struct ql_adapter * qdev,u32 intr)659 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
660 {
661 	u32 var = 0;
662 	struct intr_context *ctx;
663 
664 	/* HW disables for us if we're MSIX multi interrupts and
665 	 * it's not the default (zeroeth) interrupt.
666 	 */
667 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
668 		return 0;
669 
670 	ctx = qdev->intr_context + intr;
671 	spin_lock(&qdev->hw_lock);
672 	if (!atomic_read(&ctx->irq_cnt)) {
673 		ql_write32(qdev, INTR_EN,
674 		ctx->intr_dis_mask);
675 		var = ql_read32(qdev, STS);
676 	}
677 	atomic_inc(&ctx->irq_cnt);
678 	spin_unlock(&qdev->hw_lock);
679 	return var;
680 }
681 
ql_enable_all_completion_interrupts(struct ql_adapter * qdev)682 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
683 {
684 	int i;
685 	for (i = 0; i < qdev->intr_count; i++) {
686 		/* The enable call does a atomic_dec_and_test
687 		 * and enables only if the result is zero.
688 		 * So we precharge it here.
689 		 */
690 		if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
691 			i == 0))
692 			atomic_set(&qdev->intr_context[i].irq_cnt, 1);
693 		ql_enable_completion_interrupt(qdev, i);
694 	}
695 
696 }
697 
ql_validate_flash(struct ql_adapter * qdev,u32 size,const char * str)698 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
699 {
700 	int status, i;
701 	u16 csum = 0;
702 	__le16 *flash = (__le16 *)&qdev->flash;
703 
704 	status = strncmp((char *)&qdev->flash, str, 4);
705 	if (status) {
706 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
707 		return	status;
708 	}
709 
710 	for (i = 0; i < size; i++)
711 		csum += le16_to_cpu(*flash++);
712 
713 	if (csum)
714 		netif_err(qdev, ifup, qdev->ndev,
715 			  "Invalid flash checksum, csum = 0x%.04x.\n", csum);
716 
717 	return csum;
718 }
719 
ql_read_flash_word(struct ql_adapter * qdev,int offset,__le32 * data)720 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
721 {
722 	int status = 0;
723 	/* wait for reg to come ready */
724 	status = ql_wait_reg_rdy(qdev,
725 			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
726 	if (status)
727 		goto exit;
728 	/* set up for reg read */
729 	ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
730 	/* wait for reg to come ready */
731 	status = ql_wait_reg_rdy(qdev,
732 			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
733 	if (status)
734 		goto exit;
735 	 /* This data is stored on flash as an array of
736 	 * __le32.  Since ql_read32() returns cpu endian
737 	 * we need to swap it back.
738 	 */
739 	*data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
740 exit:
741 	return status;
742 }
743 
ql_get_8000_flash_params(struct ql_adapter * qdev)744 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
745 {
746 	u32 i, size;
747 	int status;
748 	__le32 *p = (__le32 *)&qdev->flash;
749 	u32 offset;
750 	u8 mac_addr[6];
751 
752 	/* Get flash offset for function and adjust
753 	 * for dword access.
754 	 */
755 	if (!qdev->port)
756 		offset = FUNC0_FLASH_OFFSET / sizeof(u32);
757 	else
758 		offset = FUNC1_FLASH_OFFSET / sizeof(u32);
759 
760 	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
761 		return -ETIMEDOUT;
762 
763 	size = sizeof(struct flash_params_8000) / sizeof(u32);
764 	for (i = 0; i < size; i++, p++) {
765 		status = ql_read_flash_word(qdev, i+offset, p);
766 		if (status) {
767 			netif_err(qdev, ifup, qdev->ndev,
768 				  "Error reading flash.\n");
769 			goto exit;
770 		}
771 	}
772 
773 	status = ql_validate_flash(qdev,
774 			sizeof(struct flash_params_8000) / sizeof(u16),
775 			"8000");
776 	if (status) {
777 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
778 		status = -EINVAL;
779 		goto exit;
780 	}
781 
782 	/* Extract either manufacturer or BOFM modified
783 	 * MAC address.
784 	 */
785 	if (qdev->flash.flash_params_8000.data_type1 == 2)
786 		memcpy(mac_addr,
787 			qdev->flash.flash_params_8000.mac_addr1,
788 			qdev->ndev->addr_len);
789 	else
790 		memcpy(mac_addr,
791 			qdev->flash.flash_params_8000.mac_addr,
792 			qdev->ndev->addr_len);
793 
794 	if (!is_valid_ether_addr(mac_addr)) {
795 		netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
796 		status = -EINVAL;
797 		goto exit;
798 	}
799 
800 	memcpy(qdev->ndev->dev_addr,
801 		mac_addr,
802 		qdev->ndev->addr_len);
803 
804 exit:
805 	ql_sem_unlock(qdev, SEM_FLASH_MASK);
806 	return status;
807 }
808 
ql_get_8012_flash_params(struct ql_adapter * qdev)809 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
810 {
811 	int i;
812 	int status;
813 	__le32 *p = (__le32 *)&qdev->flash;
814 	u32 offset = 0;
815 	u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
816 
817 	/* Second function's parameters follow the first
818 	 * function's.
819 	 */
820 	if (qdev->port)
821 		offset = size;
822 
823 	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
824 		return -ETIMEDOUT;
825 
826 	for (i = 0; i < size; i++, p++) {
827 		status = ql_read_flash_word(qdev, i+offset, p);
828 		if (status) {
829 			netif_err(qdev, ifup, qdev->ndev,
830 				  "Error reading flash.\n");
831 			goto exit;
832 		}
833 
834 	}
835 
836 	status = ql_validate_flash(qdev,
837 			sizeof(struct flash_params_8012) / sizeof(u16),
838 			"8012");
839 	if (status) {
840 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
841 		status = -EINVAL;
842 		goto exit;
843 	}
844 
845 	if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
846 		status = -EINVAL;
847 		goto exit;
848 	}
849 
850 	memcpy(qdev->ndev->dev_addr,
851 		qdev->flash.flash_params_8012.mac_addr,
852 		qdev->ndev->addr_len);
853 
854 exit:
855 	ql_sem_unlock(qdev, SEM_FLASH_MASK);
856 	return status;
857 }
858 
859 /* xgmac register are located behind the xgmac_addr and xgmac_data
860  * register pair.  Each read/write requires us to wait for the ready
861  * bit before reading/writing the data.
862  */
ql_write_xgmac_reg(struct ql_adapter * qdev,u32 reg,u32 data)863 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
864 {
865 	int status;
866 	/* wait for reg to come ready */
867 	status = ql_wait_reg_rdy(qdev,
868 			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
869 	if (status)
870 		return status;
871 	/* write the data to the data reg */
872 	ql_write32(qdev, XGMAC_DATA, data);
873 	/* trigger the write */
874 	ql_write32(qdev, XGMAC_ADDR, reg);
875 	return status;
876 }
877 
878 /* xgmac register are located behind the xgmac_addr and xgmac_data
879  * register pair.  Each read/write requires us to wait for the ready
880  * bit before reading/writing the data.
881  */
ql_read_xgmac_reg(struct ql_adapter * qdev,u32 reg,u32 * data)882 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
883 {
884 	int status = 0;
885 	/* wait for reg to come ready */
886 	status = ql_wait_reg_rdy(qdev,
887 			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
888 	if (status)
889 		goto exit;
890 	/* set up for reg read */
891 	ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
892 	/* wait for reg to come ready */
893 	status = ql_wait_reg_rdy(qdev,
894 			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
895 	if (status)
896 		goto exit;
897 	/* get the data */
898 	*data = ql_read32(qdev, XGMAC_DATA);
899 exit:
900 	return status;
901 }
902 
903 /* This is used for reading the 64-bit statistics regs. */
ql_read_xgmac_reg64(struct ql_adapter * qdev,u32 reg,u64 * data)904 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
905 {
906 	int status = 0;
907 	u32 hi = 0;
908 	u32 lo = 0;
909 
910 	status = ql_read_xgmac_reg(qdev, reg, &lo);
911 	if (status)
912 		goto exit;
913 
914 	status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
915 	if (status)
916 		goto exit;
917 
918 	*data = (u64) lo | ((u64) hi << 32);
919 
920 exit:
921 	return status;
922 }
923 
ql_8000_port_initialize(struct ql_adapter * qdev)924 static int ql_8000_port_initialize(struct ql_adapter *qdev)
925 {
926 	int status;
927 	/*
928 	 * Get MPI firmware version for driver banner
929 	 * and ethool info.
930 	 */
931 	status = ql_mb_about_fw(qdev);
932 	if (status)
933 		goto exit;
934 	status = ql_mb_get_fw_state(qdev);
935 	if (status)
936 		goto exit;
937 	/* Wake up a worker to get/set the TX/RX frame sizes. */
938 	queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
939 exit:
940 	return status;
941 }
942 
943 /* Take the MAC Core out of reset.
944  * Enable statistics counting.
945  * Take the transmitter/receiver out of reset.
946  * This functionality may be done in the MPI firmware at a
947  * later date.
948  */
ql_8012_port_initialize(struct ql_adapter * qdev)949 static int ql_8012_port_initialize(struct ql_adapter *qdev)
950 {
951 	int status = 0;
952 	u32 data;
953 
954 	if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
955 		/* Another function has the semaphore, so
956 		 * wait for the port init bit to come ready.
957 		 */
958 		netif_info(qdev, link, qdev->ndev,
959 			   "Another function has the semaphore, so wait for the port init bit to come ready.\n");
960 		status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
961 		if (status) {
962 			netif_crit(qdev, link, qdev->ndev,
963 				   "Port initialize timed out.\n");
964 		}
965 		return status;
966 	}
967 
968 	netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
969 	/* Set the core reset. */
970 	status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
971 	if (status)
972 		goto end;
973 	data |= GLOBAL_CFG_RESET;
974 	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
975 	if (status)
976 		goto end;
977 
978 	/* Clear the core reset and turn on jumbo for receiver. */
979 	data &= ~GLOBAL_CFG_RESET;	/* Clear core reset. */
980 	data |= GLOBAL_CFG_JUMBO;	/* Turn on jumbo. */
981 	data |= GLOBAL_CFG_TX_STAT_EN;
982 	data |= GLOBAL_CFG_RX_STAT_EN;
983 	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
984 	if (status)
985 		goto end;
986 
987 	/* Enable transmitter, and clear it's reset. */
988 	status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
989 	if (status)
990 		goto end;
991 	data &= ~TX_CFG_RESET;	/* Clear the TX MAC reset. */
992 	data |= TX_CFG_EN;	/* Enable the transmitter. */
993 	status = ql_write_xgmac_reg(qdev, TX_CFG, data);
994 	if (status)
995 		goto end;
996 
997 	/* Enable receiver and clear it's reset. */
998 	status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
999 	if (status)
1000 		goto end;
1001 	data &= ~RX_CFG_RESET;	/* Clear the RX MAC reset. */
1002 	data |= RX_CFG_EN;	/* Enable the receiver. */
1003 	status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1004 	if (status)
1005 		goto end;
1006 
1007 	/* Turn on jumbo. */
1008 	status =
1009 	    ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1010 	if (status)
1011 		goto end;
1012 	status =
1013 	    ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1014 	if (status)
1015 		goto end;
1016 
1017 	/* Signal to the world that the port is enabled.        */
1018 	ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1019 end:
1020 	ql_sem_unlock(qdev, qdev->xg_sem_mask);
1021 	return status;
1022 }
1023 
ql_lbq_block_size(struct ql_adapter * qdev)1024 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1025 {
1026 	return PAGE_SIZE << qdev->lbq_buf_order;
1027 }
1028 
1029 /* Get the next large buffer. */
ql_get_curr_lbuf(struct rx_ring * rx_ring)1030 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1031 {
1032 	struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1033 	rx_ring->lbq_curr_idx++;
1034 	if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1035 		rx_ring->lbq_curr_idx = 0;
1036 	rx_ring->lbq_free_cnt++;
1037 	return lbq_desc;
1038 }
1039 
ql_get_curr_lchunk(struct ql_adapter * qdev,struct rx_ring * rx_ring)1040 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1041 		struct rx_ring *rx_ring)
1042 {
1043 	struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1044 
1045 	pci_dma_sync_single_for_cpu(qdev->pdev,
1046 					dma_unmap_addr(lbq_desc, mapaddr),
1047 				    rx_ring->lbq_buf_size,
1048 					PCI_DMA_FROMDEVICE);
1049 
1050 	/* If it's the last chunk of our master page then
1051 	 * we unmap it.
1052 	 */
1053 	if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1054 					== ql_lbq_block_size(qdev))
1055 		pci_unmap_page(qdev->pdev,
1056 				lbq_desc->p.pg_chunk.map,
1057 				ql_lbq_block_size(qdev),
1058 				PCI_DMA_FROMDEVICE);
1059 	return lbq_desc;
1060 }
1061 
1062 /* Get the next small buffer. */
ql_get_curr_sbuf(struct rx_ring * rx_ring)1063 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1064 {
1065 	struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1066 	rx_ring->sbq_curr_idx++;
1067 	if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1068 		rx_ring->sbq_curr_idx = 0;
1069 	rx_ring->sbq_free_cnt++;
1070 	return sbq_desc;
1071 }
1072 
1073 /* Update an rx ring index. */
ql_update_cq(struct rx_ring * rx_ring)1074 static void ql_update_cq(struct rx_ring *rx_ring)
1075 {
1076 	rx_ring->cnsmr_idx++;
1077 	rx_ring->curr_entry++;
1078 	if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1079 		rx_ring->cnsmr_idx = 0;
1080 		rx_ring->curr_entry = rx_ring->cq_base;
1081 	}
1082 }
1083 
ql_write_cq_idx(struct rx_ring * rx_ring)1084 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1085 {
1086 	ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1087 }
1088 
ql_get_next_chunk(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct bq_desc * lbq_desc)1089 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1090 						struct bq_desc *lbq_desc)
1091 {
1092 	if (!rx_ring->pg_chunk.page) {
1093 		u64 map;
1094 		rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1095 						GFP_ATOMIC,
1096 						qdev->lbq_buf_order);
1097 		if (unlikely(!rx_ring->pg_chunk.page)) {
1098 			netif_err(qdev, drv, qdev->ndev,
1099 				  "page allocation failed.\n");
1100 			return -ENOMEM;
1101 		}
1102 		rx_ring->pg_chunk.offset = 0;
1103 		map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104 					0, ql_lbq_block_size(qdev),
1105 					PCI_DMA_FROMDEVICE);
1106 		if (pci_dma_mapping_error(qdev->pdev, map)) {
1107 			__free_pages(rx_ring->pg_chunk.page,
1108 					qdev->lbq_buf_order);
1109 			netif_err(qdev, drv, qdev->ndev,
1110 				  "PCI mapping failed.\n");
1111 			return -ENOMEM;
1112 		}
1113 		rx_ring->pg_chunk.map = map;
1114 		rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1115 	}
1116 
1117 	/* Copy the current master pg_chunk info
1118 	 * to the current descriptor.
1119 	 */
1120 	lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1121 
1122 	/* Adjust the master page chunk for next
1123 	 * buffer get.
1124 	 */
1125 	rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1126 	if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1127 		rx_ring->pg_chunk.page = NULL;
1128 		lbq_desc->p.pg_chunk.last_flag = 1;
1129 	} else {
1130 		rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1131 		get_page(rx_ring->pg_chunk.page);
1132 		lbq_desc->p.pg_chunk.last_flag = 0;
1133 	}
1134 	return 0;
1135 }
1136 /* Process (refill) a large buffer queue. */
ql_update_lbq(struct ql_adapter * qdev,struct rx_ring * rx_ring)1137 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1138 {
1139 	u32 clean_idx = rx_ring->lbq_clean_idx;
1140 	u32 start_idx = clean_idx;
1141 	struct bq_desc *lbq_desc;
1142 	u64 map;
1143 	int i;
1144 
1145 	while (rx_ring->lbq_free_cnt > 32) {
1146 		for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1147 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1148 				     "lbq: try cleaning clean_idx = %d.\n",
1149 				     clean_idx);
1150 			lbq_desc = &rx_ring->lbq[clean_idx];
1151 			if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1152 				rx_ring->lbq_clean_idx = clean_idx;
1153 				netif_err(qdev, ifup, qdev->ndev,
1154 						"Could not get a page chunk, i=%d, clean_idx =%d .\n",
1155 						i, clean_idx);
1156 				return;
1157 			}
1158 
1159 			map = lbq_desc->p.pg_chunk.map +
1160 				lbq_desc->p.pg_chunk.offset;
1161 				dma_unmap_addr_set(lbq_desc, mapaddr, map);
1162 			dma_unmap_len_set(lbq_desc, maplen,
1163 					rx_ring->lbq_buf_size);
1164 				*lbq_desc->addr = cpu_to_le64(map);
1165 
1166 			pci_dma_sync_single_for_device(qdev->pdev, map,
1167 						rx_ring->lbq_buf_size,
1168 						PCI_DMA_FROMDEVICE);
1169 			clean_idx++;
1170 			if (clean_idx == rx_ring->lbq_len)
1171 				clean_idx = 0;
1172 		}
1173 
1174 		rx_ring->lbq_clean_idx = clean_idx;
1175 		rx_ring->lbq_prod_idx += 16;
1176 		if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1177 			rx_ring->lbq_prod_idx = 0;
1178 		rx_ring->lbq_free_cnt -= 16;
1179 	}
1180 
1181 	if (start_idx != clean_idx) {
1182 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 			     "lbq: updating prod idx = %d.\n",
1184 			     rx_ring->lbq_prod_idx);
1185 		ql_write_db_reg(rx_ring->lbq_prod_idx,
1186 				rx_ring->lbq_prod_idx_db_reg);
1187 	}
1188 }
1189 
1190 /* Process (refill) a small buffer queue. */
ql_update_sbq(struct ql_adapter * qdev,struct rx_ring * rx_ring)1191 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1192 {
1193 	u32 clean_idx = rx_ring->sbq_clean_idx;
1194 	u32 start_idx = clean_idx;
1195 	struct bq_desc *sbq_desc;
1196 	u64 map;
1197 	int i;
1198 
1199 	while (rx_ring->sbq_free_cnt > 16) {
1200 		for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1201 			sbq_desc = &rx_ring->sbq[clean_idx];
1202 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1203 				     "sbq: try cleaning clean_idx = %d.\n",
1204 				     clean_idx);
1205 			if (sbq_desc->p.skb == NULL) {
1206 				netif_printk(qdev, rx_status, KERN_DEBUG,
1207 					     qdev->ndev,
1208 					     "sbq: getting new skb for index %d.\n",
1209 					     sbq_desc->index);
1210 				sbq_desc->p.skb =
1211 				    netdev_alloc_skb(qdev->ndev,
1212 						     SMALL_BUFFER_SIZE);
1213 				if (sbq_desc->p.skb == NULL) {
1214 					netif_err(qdev, probe, qdev->ndev,
1215 						  "Couldn't get an skb.\n");
1216 					rx_ring->sbq_clean_idx = clean_idx;
1217 					return;
1218 				}
1219 				skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220 				map = pci_map_single(qdev->pdev,
1221 						     sbq_desc->p.skb->data,
1222 						     rx_ring->sbq_buf_size,
1223 						     PCI_DMA_FROMDEVICE);
1224 				if (pci_dma_mapping_error(qdev->pdev, map)) {
1225 					netif_err(qdev, ifup, qdev->ndev,
1226 						  "PCI mapping failed.\n");
1227 					rx_ring->sbq_clean_idx = clean_idx;
1228 					dev_kfree_skb_any(sbq_desc->p.skb);
1229 					sbq_desc->p.skb = NULL;
1230 					return;
1231 				}
1232 				dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233 				dma_unmap_len_set(sbq_desc, maplen,
1234 						  rx_ring->sbq_buf_size);
1235 				*sbq_desc->addr = cpu_to_le64(map);
1236 			}
1237 
1238 			clean_idx++;
1239 			if (clean_idx == rx_ring->sbq_len)
1240 				clean_idx = 0;
1241 		}
1242 		rx_ring->sbq_clean_idx = clean_idx;
1243 		rx_ring->sbq_prod_idx += 16;
1244 		if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 			rx_ring->sbq_prod_idx = 0;
1246 		rx_ring->sbq_free_cnt -= 16;
1247 	}
1248 
1249 	if (start_idx != clean_idx) {
1250 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251 			     "sbq: updating prod idx = %d.\n",
1252 			     rx_ring->sbq_prod_idx);
1253 		ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 				rx_ring->sbq_prod_idx_db_reg);
1255 	}
1256 }
1257 
ql_update_buffer_queues(struct ql_adapter * qdev,struct rx_ring * rx_ring)1258 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 				    struct rx_ring *rx_ring)
1260 {
1261 	ql_update_sbq(qdev, rx_ring);
1262 	ql_update_lbq(qdev, rx_ring);
1263 }
1264 
1265 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1266  * fails at some stage, or from the interrupt when a tx completes.
1267  */
ql_unmap_send(struct ql_adapter * qdev,struct tx_ring_desc * tx_ring_desc,int mapped)1268 static void ql_unmap_send(struct ql_adapter *qdev,
1269 			  struct tx_ring_desc *tx_ring_desc, int mapped)
1270 {
1271 	int i;
1272 	for (i = 0; i < mapped; i++) {
1273 		if (i == 0 || (i == 7 && mapped > 7)) {
1274 			/*
1275 			 * Unmap the skb->data area, or the
1276 			 * external sglist (AKA the Outbound
1277 			 * Address List (OAL)).
1278 			 * If its the zeroeth element, then it's
1279 			 * the skb->data area.  If it's the 7th
1280 			 * element and there is more than 6 frags,
1281 			 * then its an OAL.
1282 			 */
1283 			if (i == 7) {
1284 				netif_printk(qdev, tx_done, KERN_DEBUG,
1285 					     qdev->ndev,
1286 					     "unmapping OAL area.\n");
1287 			}
1288 			pci_unmap_single(qdev->pdev,
1289 					 dma_unmap_addr(&tx_ring_desc->map[i],
1290 							mapaddr),
1291 					 dma_unmap_len(&tx_ring_desc->map[i],
1292 						       maplen),
1293 					 PCI_DMA_TODEVICE);
1294 		} else {
1295 			netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296 				     "unmapping frag %d.\n", i);
1297 			pci_unmap_page(qdev->pdev,
1298 				       dma_unmap_addr(&tx_ring_desc->map[i],
1299 						      mapaddr),
1300 				       dma_unmap_len(&tx_ring_desc->map[i],
1301 						     maplen), PCI_DMA_TODEVICE);
1302 		}
1303 	}
1304 
1305 }
1306 
1307 /* Map the buffers for this transmit.  This will return
1308  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309  */
ql_map_send(struct ql_adapter * qdev,struct ob_mac_iocb_req * mac_iocb_ptr,struct sk_buff * skb,struct tx_ring_desc * tx_ring_desc)1310 static int ql_map_send(struct ql_adapter *qdev,
1311 		       struct ob_mac_iocb_req *mac_iocb_ptr,
1312 		       struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1313 {
1314 	int len = skb_headlen(skb);
1315 	dma_addr_t map;
1316 	int frag_idx, err, map_idx = 0;
1317 	struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318 	int frag_cnt = skb_shinfo(skb)->nr_frags;
1319 
1320 	if (frag_cnt) {
1321 		netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322 			     "frag_cnt = %d.\n", frag_cnt);
1323 	}
1324 	/*
1325 	 * Map the skb buffer first.
1326 	 */
1327 	map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1328 
1329 	err = pci_dma_mapping_error(qdev->pdev, map);
1330 	if (err) {
1331 		netif_err(qdev, tx_queued, qdev->ndev,
1332 			  "PCI mapping failed with error: %d\n", err);
1333 
1334 		return NETDEV_TX_BUSY;
1335 	}
1336 
1337 	tbd->len = cpu_to_le32(len);
1338 	tbd->addr = cpu_to_le64(map);
1339 	dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 	dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1341 	map_idx++;
1342 
1343 	/*
1344 	 * This loop fills the remainder of the 8 address descriptors
1345 	 * in the IOCB.  If there are more than 7 fragments, then the
1346 	 * eighth address desc will point to an external list (OAL).
1347 	 * When this happens, the remainder of the frags will be stored
1348 	 * in this list.
1349 	 */
1350 	for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351 		skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1352 		tbd++;
1353 		if (frag_idx == 6 && frag_cnt > 7) {
1354 			/* Let's tack on an sglist.
1355 			 * Our control block will now
1356 			 * look like this:
1357 			 * iocb->seg[0] = skb->data
1358 			 * iocb->seg[1] = frag[0]
1359 			 * iocb->seg[2] = frag[1]
1360 			 * iocb->seg[3] = frag[2]
1361 			 * iocb->seg[4] = frag[3]
1362 			 * iocb->seg[5] = frag[4]
1363 			 * iocb->seg[6] = frag[5]
1364 			 * iocb->seg[7] = ptr to OAL (external sglist)
1365 			 * oal->seg[0] = frag[6]
1366 			 * oal->seg[1] = frag[7]
1367 			 * oal->seg[2] = frag[8]
1368 			 * oal->seg[3] = frag[9]
1369 			 * oal->seg[4] = frag[10]
1370 			 *      etc...
1371 			 */
1372 			/* Tack on the OAL in the eighth segment of IOCB. */
1373 			map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1374 					     sizeof(struct oal),
1375 					     PCI_DMA_TODEVICE);
1376 			err = pci_dma_mapping_error(qdev->pdev, map);
1377 			if (err) {
1378 				netif_err(qdev, tx_queued, qdev->ndev,
1379 					  "PCI mapping outbound address list with error: %d\n",
1380 					  err);
1381 				goto map_error;
1382 			}
1383 
1384 			tbd->addr = cpu_to_le64(map);
1385 			/*
1386 			 * The length is the number of fragments
1387 			 * that remain to be mapped times the length
1388 			 * of our sglist (OAL).
1389 			 */
1390 			tbd->len =
1391 			    cpu_to_le32((sizeof(struct tx_buf_desc) *
1392 					 (frag_cnt - frag_idx)) | TX_DESC_C);
1393 			dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1394 					   map);
1395 			dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1396 					  sizeof(struct oal));
1397 			tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1398 			map_idx++;
1399 		}
1400 
1401 		map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1402 				       DMA_TO_DEVICE);
1403 
1404 		err = dma_mapping_error(&qdev->pdev->dev, map);
1405 		if (err) {
1406 			netif_err(qdev, tx_queued, qdev->ndev,
1407 				  "PCI mapping frags failed with error: %d.\n",
1408 				  err);
1409 			goto map_error;
1410 		}
1411 
1412 		tbd->addr = cpu_to_le64(map);
1413 		tbd->len = cpu_to_le32(skb_frag_size(frag));
1414 		dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 		dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416 				  skb_frag_size(frag));
1417 
1418 	}
1419 	/* Save the number of segments we've mapped. */
1420 	tx_ring_desc->map_cnt = map_idx;
1421 	/* Terminate the last segment. */
1422 	tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 	return NETDEV_TX_OK;
1424 
1425 map_error:
1426 	/*
1427 	 * If the first frag mapping failed, then i will be zero.
1428 	 * This causes the unmap of the skb->data area.  Otherwise
1429 	 * we pass in the number of frags that mapped successfully
1430 	 * so they can be umapped.
1431 	 */
1432 	ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 	return NETDEV_TX_BUSY;
1434 }
1435 
1436 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_gro_page(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1437 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1438 					struct rx_ring *rx_ring,
1439 					struct ib_mac_iocb_rsp *ib_mac_rsp,
1440 					u32 length,
1441 					u16 vlan_id)
1442 {
1443 	struct sk_buff *skb;
1444 	struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1445 	struct napi_struct *napi = &rx_ring->napi;
1446 
1447 	napi->dev = qdev->ndev;
1448 
1449 	skb = napi_get_frags(napi);
1450 	if (!skb) {
1451 		netif_err(qdev, drv, qdev->ndev,
1452 			  "Couldn't get an skb, exiting.\n");
1453 		rx_ring->rx_dropped++;
1454 		put_page(lbq_desc->p.pg_chunk.page);
1455 		return;
1456 	}
1457 	prefetch(lbq_desc->p.pg_chunk.va);
1458 	__skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1459 			     lbq_desc->p.pg_chunk.page,
1460 			     lbq_desc->p.pg_chunk.offset,
1461 			     length);
1462 
1463 	skb->len += length;
1464 	skb->data_len += length;
1465 	skb->truesize += length;
1466 	skb_shinfo(skb)->nr_frags++;
1467 
1468 	rx_ring->rx_packets++;
1469 	rx_ring->rx_bytes += length;
1470 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1471 	skb_record_rx_queue(skb, rx_ring->cq_id);
1472 	if (vlan_id != 0xffff)
1473 		__vlan_hwaccel_put_tag(skb, vlan_id);
1474 	napi_gro_frags(napi);
1475 }
1476 
1477 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_page(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1478 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1479 					struct rx_ring *rx_ring,
1480 					struct ib_mac_iocb_rsp *ib_mac_rsp,
1481 					u32 length,
1482 					u16 vlan_id)
1483 {
1484 	struct net_device *ndev = qdev->ndev;
1485 	struct sk_buff *skb = NULL;
1486 	void *addr;
1487 	struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1488 	struct napi_struct *napi = &rx_ring->napi;
1489 
1490 	skb = netdev_alloc_skb(ndev, length);
1491 	if (!skb) {
1492 		netif_err(qdev, drv, qdev->ndev,
1493 			  "Couldn't get an skb, need to unwind!.\n");
1494 		rx_ring->rx_dropped++;
1495 		put_page(lbq_desc->p.pg_chunk.page);
1496 		return;
1497 	}
1498 
1499 	addr = lbq_desc->p.pg_chunk.va;
1500 	prefetch(addr);
1501 
1502 
1503 	/* Frame error, so drop the packet. */
1504 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1505 		netif_info(qdev, drv, qdev->ndev,
1506 			  "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1507 		rx_ring->rx_errors++;
1508 		goto err_out;
1509 	}
1510 
1511 	/* The max framesize filter on this chip is set higher than
1512 	 * MTU since FCoE uses 2k frames.
1513 	 */
1514 	if (skb->len > ndev->mtu + ETH_HLEN) {
1515 		netif_err(qdev, drv, qdev->ndev,
1516 			  "Segment too small, dropping.\n");
1517 		rx_ring->rx_dropped++;
1518 		goto err_out;
1519 	}
1520 	memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1521 	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1522 		     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1523 		     length);
1524 	skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1525 				lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1526 				length-ETH_HLEN);
1527 	skb->len += length-ETH_HLEN;
1528 	skb->data_len += length-ETH_HLEN;
1529 	skb->truesize += length-ETH_HLEN;
1530 
1531 	rx_ring->rx_packets++;
1532 	rx_ring->rx_bytes += skb->len;
1533 	skb->protocol = eth_type_trans(skb, ndev);
1534 	skb_checksum_none_assert(skb);
1535 
1536 	if ((ndev->features & NETIF_F_RXCSUM) &&
1537 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1538 		/* TCP frame. */
1539 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1540 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1541 				     "TCP checksum done!\n");
1542 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1543 		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1544 				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1545 			/* Unfragmented ipv4 UDP frame. */
1546 			struct iphdr *iph =
1547 				(struct iphdr *) ((u8 *)addr + ETH_HLEN);
1548 			if (!(iph->frag_off &
1549 				cpu_to_be16(IP_MF|IP_OFFSET))) {
1550 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1551 				netif_printk(qdev, rx_status, KERN_DEBUG,
1552 					     qdev->ndev,
1553 					     "UDP checksum done!\n");
1554 			}
1555 		}
1556 	}
1557 
1558 	skb_record_rx_queue(skb, rx_ring->cq_id);
1559 	if (vlan_id != 0xffff)
1560 		__vlan_hwaccel_put_tag(skb, vlan_id);
1561 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1562 		napi_gro_receive(napi, skb);
1563 	else
1564 		netif_receive_skb(skb);
1565 	return;
1566 err_out:
1567 	dev_kfree_skb_any(skb);
1568 	put_page(lbq_desc->p.pg_chunk.page);
1569 }
1570 
1571 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_skb(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1572 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1573 					struct rx_ring *rx_ring,
1574 					struct ib_mac_iocb_rsp *ib_mac_rsp,
1575 					u32 length,
1576 					u16 vlan_id)
1577 {
1578 	struct net_device *ndev = qdev->ndev;
1579 	struct sk_buff *skb = NULL;
1580 	struct sk_buff *new_skb = NULL;
1581 	struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1582 
1583 	skb = sbq_desc->p.skb;
1584 	/* Allocate new_skb and copy */
1585 	new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1586 	if (new_skb == NULL) {
1587 		netif_err(qdev, probe, qdev->ndev,
1588 			  "No skb available, drop the packet.\n");
1589 		rx_ring->rx_dropped++;
1590 		return;
1591 	}
1592 	skb_reserve(new_skb, NET_IP_ALIGN);
1593 	memcpy(skb_put(new_skb, length), skb->data, length);
1594 	skb = new_skb;
1595 
1596 	/* Frame error, so drop the packet. */
1597 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1598 		netif_info(qdev, drv, qdev->ndev,
1599 			  "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1600 		dev_kfree_skb_any(skb);
1601 		rx_ring->rx_errors++;
1602 		return;
1603 	}
1604 
1605 	/* loopback self test for ethtool */
1606 	if (test_bit(QL_SELFTEST, &qdev->flags)) {
1607 		ql_check_lb_frame(qdev, skb);
1608 		dev_kfree_skb_any(skb);
1609 		return;
1610 	}
1611 
1612 	/* The max framesize filter on this chip is set higher than
1613 	 * MTU since FCoE uses 2k frames.
1614 	 */
1615 	if (skb->len > ndev->mtu + ETH_HLEN) {
1616 		dev_kfree_skb_any(skb);
1617 		rx_ring->rx_dropped++;
1618 		return;
1619 	}
1620 
1621 	prefetch(skb->data);
1622 	skb->dev = ndev;
1623 	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1624 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1625 			     "%s Multicast.\n",
1626 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1627 			     IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1628 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1629 			     IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1630 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1631 			     IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1632 	}
1633 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1634 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1635 			     "Promiscuous Packet.\n");
1636 
1637 	rx_ring->rx_packets++;
1638 	rx_ring->rx_bytes += skb->len;
1639 	skb->protocol = eth_type_trans(skb, ndev);
1640 	skb_checksum_none_assert(skb);
1641 
1642 	/* If rx checksum is on, and there are no
1643 	 * csum or frame errors.
1644 	 */
1645 	if ((ndev->features & NETIF_F_RXCSUM) &&
1646 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1647 		/* TCP frame. */
1648 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1649 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1650 				     "TCP checksum done!\n");
1651 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1652 		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1653 				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1654 			/* Unfragmented ipv4 UDP frame. */
1655 			struct iphdr *iph = (struct iphdr *) skb->data;
1656 			if (!(iph->frag_off &
1657 				ntohs(IP_MF|IP_OFFSET))) {
1658 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1659 				netif_printk(qdev, rx_status, KERN_DEBUG,
1660 					     qdev->ndev,
1661 					     "UDP checksum done!\n");
1662 			}
1663 		}
1664 	}
1665 
1666 	skb_record_rx_queue(skb, rx_ring->cq_id);
1667 	if (vlan_id != 0xffff)
1668 		__vlan_hwaccel_put_tag(skb, vlan_id);
1669 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1670 		napi_gro_receive(&rx_ring->napi, skb);
1671 	else
1672 		netif_receive_skb(skb);
1673 }
1674 
ql_realign_skb(struct sk_buff * skb,int len)1675 static void ql_realign_skb(struct sk_buff *skb, int len)
1676 {
1677 	void *temp_addr = skb->data;
1678 
1679 	/* Undo the skb_reserve(skb,32) we did before
1680 	 * giving to hardware, and realign data on
1681 	 * a 2-byte boundary.
1682 	 */
1683 	skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1684 	skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1685 	skb_copy_to_linear_data(skb, temp_addr,
1686 		(unsigned int)len);
1687 }
1688 
1689 /*
1690  * This function builds an skb for the given inbound
1691  * completion.  It will be rewritten for readability in the near
1692  * future, but for not it works well.
1693  */
ql_build_rx_skb(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp)1694 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1695 				       struct rx_ring *rx_ring,
1696 				       struct ib_mac_iocb_rsp *ib_mac_rsp)
1697 {
1698 	struct bq_desc *lbq_desc;
1699 	struct bq_desc *sbq_desc;
1700 	struct sk_buff *skb = NULL;
1701 	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1702        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1703 
1704 	/*
1705 	 * Handle the header buffer if present.
1706 	 */
1707 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1708 	    ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1709 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1710 			     "Header of %d bytes in small buffer.\n", hdr_len);
1711 		/*
1712 		 * Headers fit nicely into a small buffer.
1713 		 */
1714 		sbq_desc = ql_get_curr_sbuf(rx_ring);
1715 		pci_unmap_single(qdev->pdev,
1716 				dma_unmap_addr(sbq_desc, mapaddr),
1717 				dma_unmap_len(sbq_desc, maplen),
1718 				PCI_DMA_FROMDEVICE);
1719 		skb = sbq_desc->p.skb;
1720 		ql_realign_skb(skb, hdr_len);
1721 		skb_put(skb, hdr_len);
1722 		sbq_desc->p.skb = NULL;
1723 	}
1724 
1725 	/*
1726 	 * Handle the data buffer(s).
1727 	 */
1728 	if (unlikely(!length)) {	/* Is there data too? */
1729 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1730 			     "No Data buffer in this packet.\n");
1731 		return skb;
1732 	}
1733 
1734 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1735 		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1736 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1737 				     "Headers in small, data of %d bytes in small, combine them.\n",
1738 				     length);
1739 			/*
1740 			 * Data is less than small buffer size so it's
1741 			 * stuffed in a small buffer.
1742 			 * For this case we append the data
1743 			 * from the "data" small buffer to the "header" small
1744 			 * buffer.
1745 			 */
1746 			sbq_desc = ql_get_curr_sbuf(rx_ring);
1747 			pci_dma_sync_single_for_cpu(qdev->pdev,
1748 						    dma_unmap_addr
1749 						    (sbq_desc, mapaddr),
1750 						    dma_unmap_len
1751 						    (sbq_desc, maplen),
1752 						    PCI_DMA_FROMDEVICE);
1753 			memcpy(skb_put(skb, length),
1754 			       sbq_desc->p.skb->data, length);
1755 			pci_dma_sync_single_for_device(qdev->pdev,
1756 						       dma_unmap_addr
1757 						       (sbq_desc,
1758 							mapaddr),
1759 						       dma_unmap_len
1760 						       (sbq_desc,
1761 							maplen),
1762 						       PCI_DMA_FROMDEVICE);
1763 		} else {
1764 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1765 				     "%d bytes in a single small buffer.\n",
1766 				     length);
1767 			sbq_desc = ql_get_curr_sbuf(rx_ring);
1768 			skb = sbq_desc->p.skb;
1769 			ql_realign_skb(skb, length);
1770 			skb_put(skb, length);
1771 			pci_unmap_single(qdev->pdev,
1772 					 dma_unmap_addr(sbq_desc,
1773 							mapaddr),
1774 					 dma_unmap_len(sbq_desc,
1775 						       maplen),
1776 					 PCI_DMA_FROMDEVICE);
1777 			sbq_desc->p.skb = NULL;
1778 		}
1779 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1780 		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1781 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1782 				     "Header in small, %d bytes in large. Chain large to small!\n",
1783 				     length);
1784 			/*
1785 			 * The data is in a single large buffer.  We
1786 			 * chain it to the header buffer's skb and let
1787 			 * it rip.
1788 			 */
1789 			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1790 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1791 				     "Chaining page at offset = %d, for %d bytes  to skb.\n",
1792 				     lbq_desc->p.pg_chunk.offset, length);
1793 			skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1794 						lbq_desc->p.pg_chunk.offset,
1795 						length);
1796 			skb->len += length;
1797 			skb->data_len += length;
1798 			skb->truesize += length;
1799 		} else {
1800 			/*
1801 			 * The headers and data are in a single large buffer. We
1802 			 * copy it to a new skb and let it go. This can happen with
1803 			 * jumbo mtu on a non-TCP/UDP frame.
1804 			 */
1805 			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1806 			skb = netdev_alloc_skb(qdev->ndev, length);
1807 			if (skb == NULL) {
1808 				netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1809 					     "No skb available, drop the packet.\n");
1810 				return NULL;
1811 			}
1812 			pci_unmap_page(qdev->pdev,
1813 				       dma_unmap_addr(lbq_desc,
1814 						      mapaddr),
1815 				       dma_unmap_len(lbq_desc, maplen),
1816 				       PCI_DMA_FROMDEVICE);
1817 			skb_reserve(skb, NET_IP_ALIGN);
1818 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1819 				     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1820 				     length);
1821 			skb_fill_page_desc(skb, 0,
1822 						lbq_desc->p.pg_chunk.page,
1823 						lbq_desc->p.pg_chunk.offset,
1824 						length);
1825 			skb->len += length;
1826 			skb->data_len += length;
1827 			skb->truesize += length;
1828 			length -= length;
1829 			__pskb_pull_tail(skb,
1830 				(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1831 				VLAN_ETH_HLEN : ETH_HLEN);
1832 		}
1833 	} else {
1834 		/*
1835 		 * The data is in a chain of large buffers
1836 		 * pointed to by a small buffer.  We loop
1837 		 * thru and chain them to the our small header
1838 		 * buffer's skb.
1839 		 * frags:  There are 18 max frags and our small
1840 		 *         buffer will hold 32 of them. The thing is,
1841 		 *         we'll use 3 max for our 9000 byte jumbo
1842 		 *         frames.  If the MTU goes up we could
1843 		 *          eventually be in trouble.
1844 		 */
1845 		int size, i = 0;
1846 		sbq_desc = ql_get_curr_sbuf(rx_ring);
1847 		pci_unmap_single(qdev->pdev,
1848 				 dma_unmap_addr(sbq_desc, mapaddr),
1849 				 dma_unmap_len(sbq_desc, maplen),
1850 				 PCI_DMA_FROMDEVICE);
1851 		if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1852 			/*
1853 			 * This is an non TCP/UDP IP frame, so
1854 			 * the headers aren't split into a small
1855 			 * buffer.  We have to use the small buffer
1856 			 * that contains our sg list as our skb to
1857 			 * send upstairs. Copy the sg list here to
1858 			 * a local buffer and use it to find the
1859 			 * pages to chain.
1860 			 */
1861 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1862 				     "%d bytes of headers & data in chain of large.\n",
1863 				     length);
1864 			skb = sbq_desc->p.skb;
1865 			sbq_desc->p.skb = NULL;
1866 			skb_reserve(skb, NET_IP_ALIGN);
1867 		}
1868 		while (length > 0) {
1869 			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1870 			size = (length < rx_ring->lbq_buf_size) ? length :
1871 				rx_ring->lbq_buf_size;
1872 
1873 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1874 				     "Adding page %d to skb for %d bytes.\n",
1875 				     i, size);
1876 			skb_fill_page_desc(skb, i,
1877 						lbq_desc->p.pg_chunk.page,
1878 						lbq_desc->p.pg_chunk.offset,
1879 						size);
1880 			skb->len += size;
1881 			skb->data_len += size;
1882 			skb->truesize += size;
1883 			length -= size;
1884 			i++;
1885 		}
1886 		__pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1887 				VLAN_ETH_HLEN : ETH_HLEN);
1888 	}
1889 	return skb;
1890 }
1891 
1892 /* Process an inbound completion from an rx ring. */
ql_process_mac_split_rx_intr(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u16 vlan_id)1893 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1894 				   struct rx_ring *rx_ring,
1895 				   struct ib_mac_iocb_rsp *ib_mac_rsp,
1896 				   u16 vlan_id)
1897 {
1898 	struct net_device *ndev = qdev->ndev;
1899 	struct sk_buff *skb = NULL;
1900 
1901 	QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1902 
1903 	skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1904 	if (unlikely(!skb)) {
1905 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1906 			     "No skb available, drop packet.\n");
1907 		rx_ring->rx_dropped++;
1908 		return;
1909 	}
1910 
1911 	/* Frame error, so drop the packet. */
1912 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1913 		netif_info(qdev, drv, qdev->ndev,
1914 			  "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1915 		dev_kfree_skb_any(skb);
1916 		rx_ring->rx_errors++;
1917 		return;
1918 	}
1919 
1920 	/* The max framesize filter on this chip is set higher than
1921 	 * MTU since FCoE uses 2k frames.
1922 	 */
1923 	if (skb->len > ndev->mtu + ETH_HLEN) {
1924 		dev_kfree_skb_any(skb);
1925 		rx_ring->rx_dropped++;
1926 		return;
1927 	}
1928 
1929 	/* loopback self test for ethtool */
1930 	if (test_bit(QL_SELFTEST, &qdev->flags)) {
1931 		ql_check_lb_frame(qdev, skb);
1932 		dev_kfree_skb_any(skb);
1933 		return;
1934 	}
1935 
1936 	prefetch(skb->data);
1937 	skb->dev = ndev;
1938 	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1939 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1940 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1941 			     IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1942 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1943 			     IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1944 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1945 			     IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1946 		rx_ring->rx_multicast++;
1947 	}
1948 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1949 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1950 			     "Promiscuous Packet.\n");
1951 	}
1952 
1953 	skb->protocol = eth_type_trans(skb, ndev);
1954 	skb_checksum_none_assert(skb);
1955 
1956 	/* If rx checksum is on, and there are no
1957 	 * csum or frame errors.
1958 	 */
1959 	if ((ndev->features & NETIF_F_RXCSUM) &&
1960 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1961 		/* TCP frame. */
1962 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1963 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1964 				     "TCP checksum done!\n");
1965 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1966 		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1967 				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1968 		/* Unfragmented ipv4 UDP frame. */
1969 			struct iphdr *iph = (struct iphdr *) skb->data;
1970 			if (!(iph->frag_off &
1971 				ntohs(IP_MF|IP_OFFSET))) {
1972 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1973 				netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1974 					     "TCP checksum done!\n");
1975 			}
1976 		}
1977 	}
1978 
1979 	rx_ring->rx_packets++;
1980 	rx_ring->rx_bytes += skb->len;
1981 	skb_record_rx_queue(skb, rx_ring->cq_id);
1982 	if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
1983 		__vlan_hwaccel_put_tag(skb, vlan_id);
1984 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1985 		napi_gro_receive(&rx_ring->napi, skb);
1986 	else
1987 		netif_receive_skb(skb);
1988 }
1989 
1990 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_intr(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp)1991 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1992 					struct rx_ring *rx_ring,
1993 					struct ib_mac_iocb_rsp *ib_mac_rsp)
1994 {
1995 	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1996 	u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1997 			((le16_to_cpu(ib_mac_rsp->vlan_id) &
1998 			IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1999 
2000 	QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2001 
2002 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2003 		/* The data and headers are split into
2004 		 * separate buffers.
2005 		 */
2006 		ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2007 						vlan_id);
2008 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2009 		/* The data fit in a single small buffer.
2010 		 * Allocate a new skb, copy the data and
2011 		 * return the buffer to the free pool.
2012 		 */
2013 		ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2014 						length, vlan_id);
2015 	} else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2016 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2017 		(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2018 		/* TCP packet in a page chunk that's been checksummed.
2019 		 * Tack it on to our GRO skb and let it go.
2020 		 */
2021 		ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2022 						length, vlan_id);
2023 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2024 		/* Non-TCP packet in a page chunk. Allocate an
2025 		 * skb, tack it on frags, and send it up.
2026 		 */
2027 		ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2028 						length, vlan_id);
2029 	} else {
2030 		/* Non-TCP/UDP large frames that span multiple buffers
2031 		 * can be processed corrrectly by the split frame logic.
2032 		 */
2033 		ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2034 						vlan_id);
2035 	}
2036 
2037 	return (unsigned long)length;
2038 }
2039 
2040 /* Process an outbound completion from an rx ring. */
ql_process_mac_tx_intr(struct ql_adapter * qdev,struct ob_mac_iocb_rsp * mac_rsp)2041 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2042 				   struct ob_mac_iocb_rsp *mac_rsp)
2043 {
2044 	struct tx_ring *tx_ring;
2045 	struct tx_ring_desc *tx_ring_desc;
2046 
2047 	QL_DUMP_OB_MAC_RSP(mac_rsp);
2048 	tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2049 	tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2050 	ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2051 	tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2052 	tx_ring->tx_packets++;
2053 	dev_kfree_skb(tx_ring_desc->skb);
2054 	tx_ring_desc->skb = NULL;
2055 
2056 	if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2057 					OB_MAC_IOCB_RSP_S |
2058 					OB_MAC_IOCB_RSP_L |
2059 					OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2060 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2061 			netif_warn(qdev, tx_done, qdev->ndev,
2062 				   "Total descriptor length did not match transfer length.\n");
2063 		}
2064 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2065 			netif_warn(qdev, tx_done, qdev->ndev,
2066 				   "Frame too short to be valid, not sent.\n");
2067 		}
2068 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2069 			netif_warn(qdev, tx_done, qdev->ndev,
2070 				   "Frame too long, but sent anyway.\n");
2071 		}
2072 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2073 			netif_warn(qdev, tx_done, qdev->ndev,
2074 				   "PCI backplane error. Frame not sent.\n");
2075 		}
2076 	}
2077 	atomic_inc(&tx_ring->tx_count);
2078 }
2079 
2080 /* Fire up a handler to reset the MPI processor. */
ql_queue_fw_error(struct ql_adapter * qdev)2081 void ql_queue_fw_error(struct ql_adapter *qdev)
2082 {
2083 	ql_link_off(qdev);
2084 	queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2085 }
2086 
ql_queue_asic_error(struct ql_adapter * qdev)2087 void ql_queue_asic_error(struct ql_adapter *qdev)
2088 {
2089 	ql_link_off(qdev);
2090 	ql_disable_interrupts(qdev);
2091 	/* Clear adapter up bit to signal the recovery
2092 	 * process that it shouldn't kill the reset worker
2093 	 * thread
2094 	 */
2095 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
2096 	/* Set asic recovery bit to indicate reset process that we are
2097 	 * in fatal error recovery process rather than normal close
2098 	 */
2099 	set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2100 	queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2101 }
2102 
ql_process_chip_ae_intr(struct ql_adapter * qdev,struct ib_ae_iocb_rsp * ib_ae_rsp)2103 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2104 				    struct ib_ae_iocb_rsp *ib_ae_rsp)
2105 {
2106 	switch (ib_ae_rsp->event) {
2107 	case MGMT_ERR_EVENT:
2108 		netif_err(qdev, rx_err, qdev->ndev,
2109 			  "Management Processor Fatal Error.\n");
2110 		ql_queue_fw_error(qdev);
2111 		return;
2112 
2113 	case CAM_LOOKUP_ERR_EVENT:
2114 		netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2115 		netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2116 		ql_queue_asic_error(qdev);
2117 		return;
2118 
2119 	case SOFT_ECC_ERROR_EVENT:
2120 		netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2121 		ql_queue_asic_error(qdev);
2122 		break;
2123 
2124 	case PCI_ERR_ANON_BUF_RD:
2125 		netdev_err(qdev->ndev, "PCI error occurred when reading "
2126 					"anonymous buffers from rx_ring %d.\n",
2127 					ib_ae_rsp->q_id);
2128 		ql_queue_asic_error(qdev);
2129 		break;
2130 
2131 	default:
2132 		netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2133 			  ib_ae_rsp->event);
2134 		ql_queue_asic_error(qdev);
2135 		break;
2136 	}
2137 }
2138 
ql_clean_outbound_rx_ring(struct rx_ring * rx_ring)2139 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2140 {
2141 	struct ql_adapter *qdev = rx_ring->qdev;
2142 	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2143 	struct ob_mac_iocb_rsp *net_rsp = NULL;
2144 	int count = 0;
2145 
2146 	struct tx_ring *tx_ring;
2147 	/* While there are entries in the completion queue. */
2148 	while (prod != rx_ring->cnsmr_idx) {
2149 
2150 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2151 			     "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2152 			     rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2153 
2154 		net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2155 		rmb();
2156 		switch (net_rsp->opcode) {
2157 
2158 		case OPCODE_OB_MAC_TSO_IOCB:
2159 		case OPCODE_OB_MAC_IOCB:
2160 			ql_process_mac_tx_intr(qdev, net_rsp);
2161 			break;
2162 		default:
2163 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2164 				     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2165 				     net_rsp->opcode);
2166 		}
2167 		count++;
2168 		ql_update_cq(rx_ring);
2169 		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2170 	}
2171 	if (!net_rsp)
2172 		return 0;
2173 	ql_write_cq_idx(rx_ring);
2174 	tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2175 	if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2176 		if (atomic_read(&tx_ring->queue_stopped) &&
2177 		    (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2178 			/*
2179 			 * The queue got stopped because the tx_ring was full.
2180 			 * Wake it up, because it's now at least 25% empty.
2181 			 */
2182 			netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2183 	}
2184 
2185 	return count;
2186 }
2187 
ql_clean_inbound_rx_ring(struct rx_ring * rx_ring,int budget)2188 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2189 {
2190 	struct ql_adapter *qdev = rx_ring->qdev;
2191 	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2192 	struct ql_net_rsp_iocb *net_rsp;
2193 	int count = 0;
2194 
2195 	/* While there are entries in the completion queue. */
2196 	while (prod != rx_ring->cnsmr_idx) {
2197 
2198 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2199 			     "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2200 			     rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2201 
2202 		net_rsp = rx_ring->curr_entry;
2203 		rmb();
2204 		switch (net_rsp->opcode) {
2205 		case OPCODE_IB_MAC_IOCB:
2206 			ql_process_mac_rx_intr(qdev, rx_ring,
2207 					       (struct ib_mac_iocb_rsp *)
2208 					       net_rsp);
2209 			break;
2210 
2211 		case OPCODE_IB_AE_IOCB:
2212 			ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2213 						net_rsp);
2214 			break;
2215 		default:
2216 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2217 				     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2218 				     net_rsp->opcode);
2219 			break;
2220 		}
2221 		count++;
2222 		ql_update_cq(rx_ring);
2223 		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2224 		if (count == budget)
2225 			break;
2226 	}
2227 	ql_update_buffer_queues(qdev, rx_ring);
2228 	ql_write_cq_idx(rx_ring);
2229 	return count;
2230 }
2231 
ql_napi_poll_msix(struct napi_struct * napi,int budget)2232 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2233 {
2234 	struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2235 	struct ql_adapter *qdev = rx_ring->qdev;
2236 	struct rx_ring *trx_ring;
2237 	int i, work_done = 0;
2238 	struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2239 
2240 	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2241 		     "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2242 
2243 	/* Service the TX rings first.  They start
2244 	 * right after the RSS rings. */
2245 	for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2246 		trx_ring = &qdev->rx_ring[i];
2247 		/* If this TX completion ring belongs to this vector and
2248 		 * it's not empty then service it.
2249 		 */
2250 		if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2251 			(ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2252 					trx_ring->cnsmr_idx)) {
2253 			netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2254 				     "%s: Servicing TX completion ring %d.\n",
2255 				     __func__, trx_ring->cq_id);
2256 			ql_clean_outbound_rx_ring(trx_ring);
2257 		}
2258 	}
2259 
2260 	/*
2261 	 * Now service the RSS ring if it's active.
2262 	 */
2263 	if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2264 					rx_ring->cnsmr_idx) {
2265 		netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2266 			     "%s: Servicing RX completion ring %d.\n",
2267 			     __func__, rx_ring->cq_id);
2268 		work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2269 	}
2270 
2271 	if (work_done < budget) {
2272 		napi_complete(napi);
2273 		ql_enable_completion_interrupt(qdev, rx_ring->irq);
2274 	}
2275 	return work_done;
2276 }
2277 
qlge_vlan_mode(struct net_device * ndev,netdev_features_t features)2278 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2279 {
2280 	struct ql_adapter *qdev = netdev_priv(ndev);
2281 
2282 	if (features & NETIF_F_HW_VLAN_RX) {
2283 		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2284 				 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2285 	} else {
2286 		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2287 	}
2288 }
2289 
qlge_fix_features(struct net_device * ndev,netdev_features_t features)2290 static netdev_features_t qlge_fix_features(struct net_device *ndev,
2291 	netdev_features_t features)
2292 {
2293 	/*
2294 	 * Since there is no support for separate rx/tx vlan accel
2295 	 * enable/disable make sure tx flag is always in same state as rx.
2296 	 */
2297 	if (features & NETIF_F_HW_VLAN_RX)
2298 		features |= NETIF_F_HW_VLAN_TX;
2299 	else
2300 		features &= ~NETIF_F_HW_VLAN_TX;
2301 
2302 	return features;
2303 }
2304 
qlge_set_features(struct net_device * ndev,netdev_features_t features)2305 static int qlge_set_features(struct net_device *ndev,
2306 	netdev_features_t features)
2307 {
2308 	netdev_features_t changed = ndev->features ^ features;
2309 
2310 	if (changed & NETIF_F_HW_VLAN_RX)
2311 		qlge_vlan_mode(ndev, features);
2312 
2313 	return 0;
2314 }
2315 
__qlge_vlan_rx_add_vid(struct ql_adapter * qdev,u16 vid)2316 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2317 {
2318 	u32 enable_bit = MAC_ADDR_E;
2319 	int err;
2320 
2321 	err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2322 				  MAC_ADDR_TYPE_VLAN, vid);
2323 	if (err)
2324 		netif_err(qdev, ifup, qdev->ndev,
2325 			  "Failed to init vlan address.\n");
2326 	return err;
2327 }
2328 
qlge_vlan_rx_add_vid(struct net_device * ndev,u16 vid)2329 static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2330 {
2331 	struct ql_adapter *qdev = netdev_priv(ndev);
2332 	int status;
2333 	int err;
2334 
2335 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2336 	if (status)
2337 		return status;
2338 
2339 	err = __qlge_vlan_rx_add_vid(qdev, vid);
2340 	set_bit(vid, qdev->active_vlans);
2341 
2342 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2343 
2344 	return err;
2345 }
2346 
__qlge_vlan_rx_kill_vid(struct ql_adapter * qdev,u16 vid)2347 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2348 {
2349 	u32 enable_bit = 0;
2350 	int err;
2351 
2352 	err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2353 				  MAC_ADDR_TYPE_VLAN, vid);
2354 	if (err)
2355 		netif_err(qdev, ifup, qdev->ndev,
2356 			  "Failed to clear vlan address.\n");
2357 	return err;
2358 }
2359 
qlge_vlan_rx_kill_vid(struct net_device * ndev,u16 vid)2360 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2361 {
2362 	struct ql_adapter *qdev = netdev_priv(ndev);
2363 	int status;
2364 	int err;
2365 
2366 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2367 	if (status)
2368 		return status;
2369 
2370 	err = __qlge_vlan_rx_kill_vid(qdev, vid);
2371 	clear_bit(vid, qdev->active_vlans);
2372 
2373 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2374 
2375 	return err;
2376 }
2377 
qlge_restore_vlan(struct ql_adapter * qdev)2378 static void qlge_restore_vlan(struct ql_adapter *qdev)
2379 {
2380 	int status;
2381 	u16 vid;
2382 
2383 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2384 	if (status)
2385 		return;
2386 
2387 	for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2388 		__qlge_vlan_rx_add_vid(qdev, vid);
2389 
2390 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2391 }
2392 
2393 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
qlge_msix_rx_isr(int irq,void * dev_id)2394 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2395 {
2396 	struct rx_ring *rx_ring = dev_id;
2397 	napi_schedule(&rx_ring->napi);
2398 	return IRQ_HANDLED;
2399 }
2400 
2401 /* This handles a fatal error, MPI activity, and the default
2402  * rx_ring in an MSI-X multiple vector environment.
2403  * In MSI/Legacy environment it also process the rest of
2404  * the rx_rings.
2405  */
qlge_isr(int irq,void * dev_id)2406 static irqreturn_t qlge_isr(int irq, void *dev_id)
2407 {
2408 	struct rx_ring *rx_ring = dev_id;
2409 	struct ql_adapter *qdev = rx_ring->qdev;
2410 	struct intr_context *intr_context = &qdev->intr_context[0];
2411 	u32 var;
2412 	int work_done = 0;
2413 
2414 	spin_lock(&qdev->hw_lock);
2415 	if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2416 		netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2417 			     "Shared Interrupt, Not ours!\n");
2418 		spin_unlock(&qdev->hw_lock);
2419 		return IRQ_NONE;
2420 	}
2421 	spin_unlock(&qdev->hw_lock);
2422 
2423 	var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2424 
2425 	/*
2426 	 * Check for fatal error.
2427 	 */
2428 	if (var & STS_FE) {
2429 		ql_queue_asic_error(qdev);
2430 		netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2431 		var = ql_read32(qdev, ERR_STS);
2432 		netdev_err(qdev->ndev, "Resetting chip. "
2433 					"Error Status Register = 0x%x\n", var);
2434 		return IRQ_HANDLED;
2435 	}
2436 
2437 	/*
2438 	 * Check MPI processor activity.
2439 	 */
2440 	if ((var & STS_PI) &&
2441 		(ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2442 		/*
2443 		 * We've got an async event or mailbox completion.
2444 		 * Handle it and clear the source of the interrupt.
2445 		 */
2446 		netif_err(qdev, intr, qdev->ndev,
2447 			  "Got MPI processor interrupt.\n");
2448 		ql_disable_completion_interrupt(qdev, intr_context->intr);
2449 		ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2450 		queue_delayed_work_on(smp_processor_id(),
2451 				qdev->workqueue, &qdev->mpi_work, 0);
2452 		work_done++;
2453 	}
2454 
2455 	/*
2456 	 * Get the bit-mask that shows the active queues for this
2457 	 * pass.  Compare it to the queues that this irq services
2458 	 * and call napi if there's a match.
2459 	 */
2460 	var = ql_read32(qdev, ISR1);
2461 	if (var & intr_context->irq_mask) {
2462 		netif_info(qdev, intr, qdev->ndev,
2463 			   "Waking handler for rx_ring[0].\n");
2464 		ql_disable_completion_interrupt(qdev, intr_context->intr);
2465 		napi_schedule(&rx_ring->napi);
2466 		work_done++;
2467 	}
2468 	ql_enable_completion_interrupt(qdev, intr_context->intr);
2469 	return work_done ? IRQ_HANDLED : IRQ_NONE;
2470 }
2471 
ql_tso(struct sk_buff * skb,struct ob_mac_tso_iocb_req * mac_iocb_ptr)2472 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2473 {
2474 
2475 	if (skb_is_gso(skb)) {
2476 		int err;
2477 		if (skb_header_cloned(skb)) {
2478 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2479 			if (err)
2480 				return err;
2481 		}
2482 
2483 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2484 		mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2485 		mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2486 		mac_iocb_ptr->total_hdrs_len =
2487 		    cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2488 		mac_iocb_ptr->net_trans_offset =
2489 		    cpu_to_le16(skb_network_offset(skb) |
2490 				skb_transport_offset(skb)
2491 				<< OB_MAC_TRANSPORT_HDR_SHIFT);
2492 		mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2493 		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2494 		if (likely(skb->protocol == htons(ETH_P_IP))) {
2495 			struct iphdr *iph = ip_hdr(skb);
2496 			iph->check = 0;
2497 			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2498 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2499 								 iph->daddr, 0,
2500 								 IPPROTO_TCP,
2501 								 0);
2502 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
2503 			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2504 			tcp_hdr(skb)->check =
2505 			    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2506 					     &ipv6_hdr(skb)->daddr,
2507 					     0, IPPROTO_TCP, 0);
2508 		}
2509 		return 1;
2510 	}
2511 	return 0;
2512 }
2513 
ql_hw_csum_setup(struct sk_buff * skb,struct ob_mac_tso_iocb_req * mac_iocb_ptr)2514 static void ql_hw_csum_setup(struct sk_buff *skb,
2515 			     struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2516 {
2517 	int len;
2518 	struct iphdr *iph = ip_hdr(skb);
2519 	__sum16 *check;
2520 	mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2521 	mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2522 	mac_iocb_ptr->net_trans_offset =
2523 		cpu_to_le16(skb_network_offset(skb) |
2524 		skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2525 
2526 	mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2527 	len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2528 	if (likely(iph->protocol == IPPROTO_TCP)) {
2529 		check = &(tcp_hdr(skb)->check);
2530 		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2531 		mac_iocb_ptr->total_hdrs_len =
2532 		    cpu_to_le16(skb_transport_offset(skb) +
2533 				(tcp_hdr(skb)->doff << 2));
2534 	} else {
2535 		check = &(udp_hdr(skb)->check);
2536 		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2537 		mac_iocb_ptr->total_hdrs_len =
2538 		    cpu_to_le16(skb_transport_offset(skb) +
2539 				sizeof(struct udphdr));
2540 	}
2541 	*check = ~csum_tcpudp_magic(iph->saddr,
2542 				    iph->daddr, len, iph->protocol, 0);
2543 }
2544 
qlge_send(struct sk_buff * skb,struct net_device * ndev)2545 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2546 {
2547 	struct tx_ring_desc *tx_ring_desc;
2548 	struct ob_mac_iocb_req *mac_iocb_ptr;
2549 	struct ql_adapter *qdev = netdev_priv(ndev);
2550 	int tso;
2551 	struct tx_ring *tx_ring;
2552 	u32 tx_ring_idx = (u32) skb->queue_mapping;
2553 
2554 	tx_ring = &qdev->tx_ring[tx_ring_idx];
2555 
2556 	if (skb_padto(skb, ETH_ZLEN))
2557 		return NETDEV_TX_OK;
2558 
2559 	if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2560 		netif_info(qdev, tx_queued, qdev->ndev,
2561 			   "%s: shutting down tx queue %d du to lack of resources.\n",
2562 			   __func__, tx_ring_idx);
2563 		netif_stop_subqueue(ndev, tx_ring->wq_id);
2564 		atomic_inc(&tx_ring->queue_stopped);
2565 		tx_ring->tx_errors++;
2566 		return NETDEV_TX_BUSY;
2567 	}
2568 	tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2569 	mac_iocb_ptr = tx_ring_desc->queue_entry;
2570 	memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2571 
2572 	mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2573 	mac_iocb_ptr->tid = tx_ring_desc->index;
2574 	/* We use the upper 32-bits to store the tx queue for this IO.
2575 	 * When we get the completion we can use it to establish the context.
2576 	 */
2577 	mac_iocb_ptr->txq_idx = tx_ring_idx;
2578 	tx_ring_desc->skb = skb;
2579 
2580 	mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2581 
2582 	if (vlan_tx_tag_present(skb)) {
2583 		netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2584 			     "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2585 		mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2586 		mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2587 	}
2588 	tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2589 	if (tso < 0) {
2590 		dev_kfree_skb_any(skb);
2591 		return NETDEV_TX_OK;
2592 	} else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2593 		ql_hw_csum_setup(skb,
2594 				 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2595 	}
2596 	if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2597 			NETDEV_TX_OK) {
2598 		netif_err(qdev, tx_queued, qdev->ndev,
2599 			  "Could not map the segments.\n");
2600 		tx_ring->tx_errors++;
2601 		return NETDEV_TX_BUSY;
2602 	}
2603 	QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2604 	tx_ring->prod_idx++;
2605 	if (tx_ring->prod_idx == tx_ring->wq_len)
2606 		tx_ring->prod_idx = 0;
2607 	wmb();
2608 
2609 	ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2610 	netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2611 		     "tx queued, slot %d, len %d\n",
2612 		     tx_ring->prod_idx, skb->len);
2613 
2614 	atomic_dec(&tx_ring->tx_count);
2615 	return NETDEV_TX_OK;
2616 }
2617 
2618 
ql_free_shadow_space(struct ql_adapter * qdev)2619 static void ql_free_shadow_space(struct ql_adapter *qdev)
2620 {
2621 	if (qdev->rx_ring_shadow_reg_area) {
2622 		pci_free_consistent(qdev->pdev,
2623 				    PAGE_SIZE,
2624 				    qdev->rx_ring_shadow_reg_area,
2625 				    qdev->rx_ring_shadow_reg_dma);
2626 		qdev->rx_ring_shadow_reg_area = NULL;
2627 	}
2628 	if (qdev->tx_ring_shadow_reg_area) {
2629 		pci_free_consistent(qdev->pdev,
2630 				    PAGE_SIZE,
2631 				    qdev->tx_ring_shadow_reg_area,
2632 				    qdev->tx_ring_shadow_reg_dma);
2633 		qdev->tx_ring_shadow_reg_area = NULL;
2634 	}
2635 }
2636 
ql_alloc_shadow_space(struct ql_adapter * qdev)2637 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2638 {
2639 	qdev->rx_ring_shadow_reg_area =
2640 	    pci_alloc_consistent(qdev->pdev,
2641 				 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2642 	if (qdev->rx_ring_shadow_reg_area == NULL) {
2643 		netif_err(qdev, ifup, qdev->ndev,
2644 			  "Allocation of RX shadow space failed.\n");
2645 		return -ENOMEM;
2646 	}
2647 	memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2648 	qdev->tx_ring_shadow_reg_area =
2649 	    pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2650 				 &qdev->tx_ring_shadow_reg_dma);
2651 	if (qdev->tx_ring_shadow_reg_area == NULL) {
2652 		netif_err(qdev, ifup, qdev->ndev,
2653 			  "Allocation of TX shadow space failed.\n");
2654 		goto err_wqp_sh_area;
2655 	}
2656 	memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2657 	return 0;
2658 
2659 err_wqp_sh_area:
2660 	pci_free_consistent(qdev->pdev,
2661 			    PAGE_SIZE,
2662 			    qdev->rx_ring_shadow_reg_area,
2663 			    qdev->rx_ring_shadow_reg_dma);
2664 	return -ENOMEM;
2665 }
2666 
ql_init_tx_ring(struct ql_adapter * qdev,struct tx_ring * tx_ring)2667 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2668 {
2669 	struct tx_ring_desc *tx_ring_desc;
2670 	int i;
2671 	struct ob_mac_iocb_req *mac_iocb_ptr;
2672 
2673 	mac_iocb_ptr = tx_ring->wq_base;
2674 	tx_ring_desc = tx_ring->q;
2675 	for (i = 0; i < tx_ring->wq_len; i++) {
2676 		tx_ring_desc->index = i;
2677 		tx_ring_desc->skb = NULL;
2678 		tx_ring_desc->queue_entry = mac_iocb_ptr;
2679 		mac_iocb_ptr++;
2680 		tx_ring_desc++;
2681 	}
2682 	atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2683 	atomic_set(&tx_ring->queue_stopped, 0);
2684 }
2685 
ql_free_tx_resources(struct ql_adapter * qdev,struct tx_ring * tx_ring)2686 static void ql_free_tx_resources(struct ql_adapter *qdev,
2687 				 struct tx_ring *tx_ring)
2688 {
2689 	if (tx_ring->wq_base) {
2690 		pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2691 				    tx_ring->wq_base, tx_ring->wq_base_dma);
2692 		tx_ring->wq_base = NULL;
2693 	}
2694 	kfree(tx_ring->q);
2695 	tx_ring->q = NULL;
2696 }
2697 
ql_alloc_tx_resources(struct ql_adapter * qdev,struct tx_ring * tx_ring)2698 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2699 				 struct tx_ring *tx_ring)
2700 {
2701 	tx_ring->wq_base =
2702 	    pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2703 				 &tx_ring->wq_base_dma);
2704 
2705 	if ((tx_ring->wq_base == NULL) ||
2706 	    tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2707 		netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2708 		return -ENOMEM;
2709 	}
2710 	tx_ring->q =
2711 	    kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2712 	if (tx_ring->q == NULL)
2713 		goto err;
2714 
2715 	return 0;
2716 err:
2717 	pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2718 			    tx_ring->wq_base, tx_ring->wq_base_dma);
2719 	return -ENOMEM;
2720 }
2721 
ql_free_lbq_buffers(struct ql_adapter * qdev,struct rx_ring * rx_ring)2722 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2723 {
2724 	struct bq_desc *lbq_desc;
2725 
2726 	uint32_t  curr_idx, clean_idx;
2727 
2728 	curr_idx = rx_ring->lbq_curr_idx;
2729 	clean_idx = rx_ring->lbq_clean_idx;
2730 	while (curr_idx != clean_idx) {
2731 		lbq_desc = &rx_ring->lbq[curr_idx];
2732 
2733 		if (lbq_desc->p.pg_chunk.last_flag) {
2734 			pci_unmap_page(qdev->pdev,
2735 				lbq_desc->p.pg_chunk.map,
2736 				ql_lbq_block_size(qdev),
2737 				       PCI_DMA_FROMDEVICE);
2738 			lbq_desc->p.pg_chunk.last_flag = 0;
2739 		}
2740 
2741 		put_page(lbq_desc->p.pg_chunk.page);
2742 		lbq_desc->p.pg_chunk.page = NULL;
2743 
2744 		if (++curr_idx == rx_ring->lbq_len)
2745 			curr_idx = 0;
2746 
2747 	}
2748 }
2749 
ql_free_sbq_buffers(struct ql_adapter * qdev,struct rx_ring * rx_ring)2750 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2751 {
2752 	int i;
2753 	struct bq_desc *sbq_desc;
2754 
2755 	for (i = 0; i < rx_ring->sbq_len; i++) {
2756 		sbq_desc = &rx_ring->sbq[i];
2757 		if (sbq_desc == NULL) {
2758 			netif_err(qdev, ifup, qdev->ndev,
2759 				  "sbq_desc %d is NULL.\n", i);
2760 			return;
2761 		}
2762 		if (sbq_desc->p.skb) {
2763 			pci_unmap_single(qdev->pdev,
2764 					 dma_unmap_addr(sbq_desc, mapaddr),
2765 					 dma_unmap_len(sbq_desc, maplen),
2766 					 PCI_DMA_FROMDEVICE);
2767 			dev_kfree_skb(sbq_desc->p.skb);
2768 			sbq_desc->p.skb = NULL;
2769 		}
2770 	}
2771 }
2772 
2773 /* Free all large and small rx buffers associated
2774  * with the completion queues for this device.
2775  */
ql_free_rx_buffers(struct ql_adapter * qdev)2776 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2777 {
2778 	int i;
2779 	struct rx_ring *rx_ring;
2780 
2781 	for (i = 0; i < qdev->rx_ring_count; i++) {
2782 		rx_ring = &qdev->rx_ring[i];
2783 		if (rx_ring->lbq)
2784 			ql_free_lbq_buffers(qdev, rx_ring);
2785 		if (rx_ring->sbq)
2786 			ql_free_sbq_buffers(qdev, rx_ring);
2787 	}
2788 }
2789 
ql_alloc_rx_buffers(struct ql_adapter * qdev)2790 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2791 {
2792 	struct rx_ring *rx_ring;
2793 	int i;
2794 
2795 	for (i = 0; i < qdev->rx_ring_count; i++) {
2796 		rx_ring = &qdev->rx_ring[i];
2797 		if (rx_ring->type != TX_Q)
2798 			ql_update_buffer_queues(qdev, rx_ring);
2799 	}
2800 }
2801 
ql_init_lbq_ring(struct ql_adapter * qdev,struct rx_ring * rx_ring)2802 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2803 				struct rx_ring *rx_ring)
2804 {
2805 	int i;
2806 	struct bq_desc *lbq_desc;
2807 	__le64 *bq = rx_ring->lbq_base;
2808 
2809 	memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2810 	for (i = 0; i < rx_ring->lbq_len; i++) {
2811 		lbq_desc = &rx_ring->lbq[i];
2812 		memset(lbq_desc, 0, sizeof(*lbq_desc));
2813 		lbq_desc->index = i;
2814 		lbq_desc->addr = bq;
2815 		bq++;
2816 	}
2817 }
2818 
ql_init_sbq_ring(struct ql_adapter * qdev,struct rx_ring * rx_ring)2819 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2820 				struct rx_ring *rx_ring)
2821 {
2822 	int i;
2823 	struct bq_desc *sbq_desc;
2824 	__le64 *bq = rx_ring->sbq_base;
2825 
2826 	memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2827 	for (i = 0; i < rx_ring->sbq_len; i++) {
2828 		sbq_desc = &rx_ring->sbq[i];
2829 		memset(sbq_desc, 0, sizeof(*sbq_desc));
2830 		sbq_desc->index = i;
2831 		sbq_desc->addr = bq;
2832 		bq++;
2833 	}
2834 }
2835 
ql_free_rx_resources(struct ql_adapter * qdev,struct rx_ring * rx_ring)2836 static void ql_free_rx_resources(struct ql_adapter *qdev,
2837 				 struct rx_ring *rx_ring)
2838 {
2839 	/* Free the small buffer queue. */
2840 	if (rx_ring->sbq_base) {
2841 		pci_free_consistent(qdev->pdev,
2842 				    rx_ring->sbq_size,
2843 				    rx_ring->sbq_base, rx_ring->sbq_base_dma);
2844 		rx_ring->sbq_base = NULL;
2845 	}
2846 
2847 	/* Free the small buffer queue control blocks. */
2848 	kfree(rx_ring->sbq);
2849 	rx_ring->sbq = NULL;
2850 
2851 	/* Free the large buffer queue. */
2852 	if (rx_ring->lbq_base) {
2853 		pci_free_consistent(qdev->pdev,
2854 				    rx_ring->lbq_size,
2855 				    rx_ring->lbq_base, rx_ring->lbq_base_dma);
2856 		rx_ring->lbq_base = NULL;
2857 	}
2858 
2859 	/* Free the large buffer queue control blocks. */
2860 	kfree(rx_ring->lbq);
2861 	rx_ring->lbq = NULL;
2862 
2863 	/* Free the rx queue. */
2864 	if (rx_ring->cq_base) {
2865 		pci_free_consistent(qdev->pdev,
2866 				    rx_ring->cq_size,
2867 				    rx_ring->cq_base, rx_ring->cq_base_dma);
2868 		rx_ring->cq_base = NULL;
2869 	}
2870 }
2871 
2872 /* Allocate queues and buffers for this completions queue based
2873  * on the values in the parameter structure. */
ql_alloc_rx_resources(struct ql_adapter * qdev,struct rx_ring * rx_ring)2874 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2875 				 struct rx_ring *rx_ring)
2876 {
2877 
2878 	/*
2879 	 * Allocate the completion queue for this rx_ring.
2880 	 */
2881 	rx_ring->cq_base =
2882 	    pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2883 				 &rx_ring->cq_base_dma);
2884 
2885 	if (rx_ring->cq_base == NULL) {
2886 		netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2887 		return -ENOMEM;
2888 	}
2889 
2890 	if (rx_ring->sbq_len) {
2891 		/*
2892 		 * Allocate small buffer queue.
2893 		 */
2894 		rx_ring->sbq_base =
2895 		    pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2896 					 &rx_ring->sbq_base_dma);
2897 
2898 		if (rx_ring->sbq_base == NULL) {
2899 			netif_err(qdev, ifup, qdev->ndev,
2900 				  "Small buffer queue allocation failed.\n");
2901 			goto err_mem;
2902 		}
2903 
2904 		/*
2905 		 * Allocate small buffer queue control blocks.
2906 		 */
2907 		rx_ring->sbq =
2908 		    kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2909 			    GFP_KERNEL);
2910 		if (rx_ring->sbq == NULL) {
2911 			netif_err(qdev, ifup, qdev->ndev,
2912 				  "Small buffer queue control block allocation failed.\n");
2913 			goto err_mem;
2914 		}
2915 
2916 		ql_init_sbq_ring(qdev, rx_ring);
2917 	}
2918 
2919 	if (rx_ring->lbq_len) {
2920 		/*
2921 		 * Allocate large buffer queue.
2922 		 */
2923 		rx_ring->lbq_base =
2924 		    pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2925 					 &rx_ring->lbq_base_dma);
2926 
2927 		if (rx_ring->lbq_base == NULL) {
2928 			netif_err(qdev, ifup, qdev->ndev,
2929 				  "Large buffer queue allocation failed.\n");
2930 			goto err_mem;
2931 		}
2932 		/*
2933 		 * Allocate large buffer queue control blocks.
2934 		 */
2935 		rx_ring->lbq =
2936 		    kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2937 			    GFP_KERNEL);
2938 		if (rx_ring->lbq == NULL) {
2939 			netif_err(qdev, ifup, qdev->ndev,
2940 				  "Large buffer queue control block allocation failed.\n");
2941 			goto err_mem;
2942 		}
2943 
2944 		ql_init_lbq_ring(qdev, rx_ring);
2945 	}
2946 
2947 	return 0;
2948 
2949 err_mem:
2950 	ql_free_rx_resources(qdev, rx_ring);
2951 	return -ENOMEM;
2952 }
2953 
ql_tx_ring_clean(struct ql_adapter * qdev)2954 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2955 {
2956 	struct tx_ring *tx_ring;
2957 	struct tx_ring_desc *tx_ring_desc;
2958 	int i, j;
2959 
2960 	/*
2961 	 * Loop through all queues and free
2962 	 * any resources.
2963 	 */
2964 	for (j = 0; j < qdev->tx_ring_count; j++) {
2965 		tx_ring = &qdev->tx_ring[j];
2966 		for (i = 0; i < tx_ring->wq_len; i++) {
2967 			tx_ring_desc = &tx_ring->q[i];
2968 			if (tx_ring_desc && tx_ring_desc->skb) {
2969 				netif_err(qdev, ifdown, qdev->ndev,
2970 					  "Freeing lost SKB %p, from queue %d, index %d.\n",
2971 					  tx_ring_desc->skb, j,
2972 					  tx_ring_desc->index);
2973 				ql_unmap_send(qdev, tx_ring_desc,
2974 					      tx_ring_desc->map_cnt);
2975 				dev_kfree_skb(tx_ring_desc->skb);
2976 				tx_ring_desc->skb = NULL;
2977 			}
2978 		}
2979 	}
2980 }
2981 
ql_free_mem_resources(struct ql_adapter * qdev)2982 static void ql_free_mem_resources(struct ql_adapter *qdev)
2983 {
2984 	int i;
2985 
2986 	for (i = 0; i < qdev->tx_ring_count; i++)
2987 		ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2988 	for (i = 0; i < qdev->rx_ring_count; i++)
2989 		ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2990 	ql_free_shadow_space(qdev);
2991 }
2992 
ql_alloc_mem_resources(struct ql_adapter * qdev)2993 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2994 {
2995 	int i;
2996 
2997 	/* Allocate space for our shadow registers and such. */
2998 	if (ql_alloc_shadow_space(qdev))
2999 		return -ENOMEM;
3000 
3001 	for (i = 0; i < qdev->rx_ring_count; i++) {
3002 		if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3003 			netif_err(qdev, ifup, qdev->ndev,
3004 				  "RX resource allocation failed.\n");
3005 			goto err_mem;
3006 		}
3007 	}
3008 	/* Allocate tx queue resources */
3009 	for (i = 0; i < qdev->tx_ring_count; i++) {
3010 		if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3011 			netif_err(qdev, ifup, qdev->ndev,
3012 				  "TX resource allocation failed.\n");
3013 			goto err_mem;
3014 		}
3015 	}
3016 	return 0;
3017 
3018 err_mem:
3019 	ql_free_mem_resources(qdev);
3020 	return -ENOMEM;
3021 }
3022 
3023 /* Set up the rx ring control block and pass it to the chip.
3024  * The control block is defined as
3025  * "Completion Queue Initialization Control Block", or cqicb.
3026  */
ql_start_rx_ring(struct ql_adapter * qdev,struct rx_ring * rx_ring)3027 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3028 {
3029 	struct cqicb *cqicb = &rx_ring->cqicb;
3030 	void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3031 		(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3032 	u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3033 		(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3034 	void __iomem *doorbell_area =
3035 	    qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3036 	int err = 0;
3037 	u16 bq_len;
3038 	u64 tmp;
3039 	__le64 *base_indirect_ptr;
3040 	int page_entries;
3041 
3042 	/* Set up the shadow registers for this ring. */
3043 	rx_ring->prod_idx_sh_reg = shadow_reg;
3044 	rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3045 	*rx_ring->prod_idx_sh_reg = 0;
3046 	shadow_reg += sizeof(u64);
3047 	shadow_reg_dma += sizeof(u64);
3048 	rx_ring->lbq_base_indirect = shadow_reg;
3049 	rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3050 	shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3051 	shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3052 	rx_ring->sbq_base_indirect = shadow_reg;
3053 	rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3054 
3055 	/* PCI doorbell mem area + 0x00 for consumer index register */
3056 	rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3057 	rx_ring->cnsmr_idx = 0;
3058 	rx_ring->curr_entry = rx_ring->cq_base;
3059 
3060 	/* PCI doorbell mem area + 0x04 for valid register */
3061 	rx_ring->valid_db_reg = doorbell_area + 0x04;
3062 
3063 	/* PCI doorbell mem area + 0x18 for large buffer consumer */
3064 	rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3065 
3066 	/* PCI doorbell mem area + 0x1c */
3067 	rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3068 
3069 	memset((void *)cqicb, 0, sizeof(struct cqicb));
3070 	cqicb->msix_vect = rx_ring->irq;
3071 
3072 	bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3073 	cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3074 
3075 	cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3076 
3077 	cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3078 
3079 	/*
3080 	 * Set up the control block load flags.
3081 	 */
3082 	cqicb->flags = FLAGS_LC |	/* Load queue base address */
3083 	    FLAGS_LV |		/* Load MSI-X vector */
3084 	    FLAGS_LI;		/* Load irq delay values */
3085 	if (rx_ring->lbq_len) {
3086 		cqicb->flags |= FLAGS_LL;	/* Load lbq values */
3087 		tmp = (u64)rx_ring->lbq_base_dma;
3088 		base_indirect_ptr = rx_ring->lbq_base_indirect;
3089 		page_entries = 0;
3090 		do {
3091 			*base_indirect_ptr = cpu_to_le64(tmp);
3092 			tmp += DB_PAGE_SIZE;
3093 			base_indirect_ptr++;
3094 			page_entries++;
3095 		} while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3096 		cqicb->lbq_addr =
3097 		    cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3098 		bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3099 			(u16) rx_ring->lbq_buf_size;
3100 		cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3101 		bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3102 			(u16) rx_ring->lbq_len;
3103 		cqicb->lbq_len = cpu_to_le16(bq_len);
3104 		rx_ring->lbq_prod_idx = 0;
3105 		rx_ring->lbq_curr_idx = 0;
3106 		rx_ring->lbq_clean_idx = 0;
3107 		rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3108 	}
3109 	if (rx_ring->sbq_len) {
3110 		cqicb->flags |= FLAGS_LS;	/* Load sbq values */
3111 		tmp = (u64)rx_ring->sbq_base_dma;
3112 		base_indirect_ptr = rx_ring->sbq_base_indirect;
3113 		page_entries = 0;
3114 		do {
3115 			*base_indirect_ptr = cpu_to_le64(tmp);
3116 			tmp += DB_PAGE_SIZE;
3117 			base_indirect_ptr++;
3118 			page_entries++;
3119 		} while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3120 		cqicb->sbq_addr =
3121 		    cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3122 		cqicb->sbq_buf_size =
3123 		    cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3124 		bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3125 			(u16) rx_ring->sbq_len;
3126 		cqicb->sbq_len = cpu_to_le16(bq_len);
3127 		rx_ring->sbq_prod_idx = 0;
3128 		rx_ring->sbq_curr_idx = 0;
3129 		rx_ring->sbq_clean_idx = 0;
3130 		rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3131 	}
3132 	switch (rx_ring->type) {
3133 	case TX_Q:
3134 		cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3135 		cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3136 		break;
3137 	case RX_Q:
3138 		/* Inbound completion handling rx_rings run in
3139 		 * separate NAPI contexts.
3140 		 */
3141 		netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3142 			       64);
3143 		cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3144 		cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3145 		break;
3146 	default:
3147 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3148 			     "Invalid rx_ring->type = %d.\n", rx_ring->type);
3149 	}
3150 	err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3151 			   CFG_LCQ, rx_ring->cq_id);
3152 	if (err) {
3153 		netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3154 		return err;
3155 	}
3156 	return err;
3157 }
3158 
ql_start_tx_ring(struct ql_adapter * qdev,struct tx_ring * tx_ring)3159 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3160 {
3161 	struct wqicb *wqicb = (struct wqicb *)tx_ring;
3162 	void __iomem *doorbell_area =
3163 	    qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3164 	void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3165 	    (tx_ring->wq_id * sizeof(u64));
3166 	u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3167 	    (tx_ring->wq_id * sizeof(u64));
3168 	int err = 0;
3169 
3170 	/*
3171 	 * Assign doorbell registers for this tx_ring.
3172 	 */
3173 	/* TX PCI doorbell mem area for tx producer index */
3174 	tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3175 	tx_ring->prod_idx = 0;
3176 	/* TX PCI doorbell mem area + 0x04 */
3177 	tx_ring->valid_db_reg = doorbell_area + 0x04;
3178 
3179 	/*
3180 	 * Assign shadow registers for this tx_ring.
3181 	 */
3182 	tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3183 	tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3184 
3185 	wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3186 	wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3187 				   Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3188 	wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3189 	wqicb->rid = 0;
3190 	wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3191 
3192 	wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3193 
3194 	ql_init_tx_ring(qdev, tx_ring);
3195 
3196 	err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3197 			   (u16) tx_ring->wq_id);
3198 	if (err) {
3199 		netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3200 		return err;
3201 	}
3202 	return err;
3203 }
3204 
ql_disable_msix(struct ql_adapter * qdev)3205 static void ql_disable_msix(struct ql_adapter *qdev)
3206 {
3207 	if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3208 		pci_disable_msix(qdev->pdev);
3209 		clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3210 		kfree(qdev->msi_x_entry);
3211 		qdev->msi_x_entry = NULL;
3212 	} else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3213 		pci_disable_msi(qdev->pdev);
3214 		clear_bit(QL_MSI_ENABLED, &qdev->flags);
3215 	}
3216 }
3217 
3218 /* We start by trying to get the number of vectors
3219  * stored in qdev->intr_count. If we don't get that
3220  * many then we reduce the count and try again.
3221  */
ql_enable_msix(struct ql_adapter * qdev)3222 static void ql_enable_msix(struct ql_adapter *qdev)
3223 {
3224 	int i, err;
3225 
3226 	/* Get the MSIX vectors. */
3227 	if (qlge_irq_type == MSIX_IRQ) {
3228 		/* Try to alloc space for the msix struct,
3229 		 * if it fails then go to MSI/legacy.
3230 		 */
3231 		qdev->msi_x_entry = kcalloc(qdev->intr_count,
3232 					    sizeof(struct msix_entry),
3233 					    GFP_KERNEL);
3234 		if (!qdev->msi_x_entry) {
3235 			qlge_irq_type = MSI_IRQ;
3236 			goto msi;
3237 		}
3238 
3239 		for (i = 0; i < qdev->intr_count; i++)
3240 			qdev->msi_x_entry[i].entry = i;
3241 
3242 		/* Loop to get our vectors.  We start with
3243 		 * what we want and settle for what we get.
3244 		 */
3245 		do {
3246 			err = pci_enable_msix(qdev->pdev,
3247 				qdev->msi_x_entry, qdev->intr_count);
3248 			if (err > 0)
3249 				qdev->intr_count = err;
3250 		} while (err > 0);
3251 
3252 		if (err < 0) {
3253 			kfree(qdev->msi_x_entry);
3254 			qdev->msi_x_entry = NULL;
3255 			netif_warn(qdev, ifup, qdev->ndev,
3256 				   "MSI-X Enable failed, trying MSI.\n");
3257 			qdev->intr_count = 1;
3258 			qlge_irq_type = MSI_IRQ;
3259 		} else if (err == 0) {
3260 			set_bit(QL_MSIX_ENABLED, &qdev->flags);
3261 			netif_info(qdev, ifup, qdev->ndev,
3262 				   "MSI-X Enabled, got %d vectors.\n",
3263 				   qdev->intr_count);
3264 			return;
3265 		}
3266 	}
3267 msi:
3268 	qdev->intr_count = 1;
3269 	if (qlge_irq_type == MSI_IRQ) {
3270 		if (!pci_enable_msi(qdev->pdev)) {
3271 			set_bit(QL_MSI_ENABLED, &qdev->flags);
3272 			netif_info(qdev, ifup, qdev->ndev,
3273 				   "Running with MSI interrupts.\n");
3274 			return;
3275 		}
3276 	}
3277 	qlge_irq_type = LEG_IRQ;
3278 	netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3279 		     "Running with legacy interrupts.\n");
3280 }
3281 
3282 /* Each vector services 1 RSS ring and and 1 or more
3283  * TX completion rings.  This function loops through
3284  * the TX completion rings and assigns the vector that
3285  * will service it.  An example would be if there are
3286  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3287  * This would mean that vector 0 would service RSS ring 0
3288  * and TX completion rings 0,1,2 and 3.  Vector 1 would
3289  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3290  */
ql_set_tx_vect(struct ql_adapter * qdev)3291 static void ql_set_tx_vect(struct ql_adapter *qdev)
3292 {
3293 	int i, j, vect;
3294 	u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3295 
3296 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3297 		/* Assign irq vectors to TX rx_rings.*/
3298 		for (vect = 0, j = 0, i = qdev->rss_ring_count;
3299 					 i < qdev->rx_ring_count; i++) {
3300 			if (j == tx_rings_per_vector) {
3301 				vect++;
3302 				j = 0;
3303 			}
3304 			qdev->rx_ring[i].irq = vect;
3305 			j++;
3306 		}
3307 	} else {
3308 		/* For single vector all rings have an irq
3309 		 * of zero.
3310 		 */
3311 		for (i = 0; i < qdev->rx_ring_count; i++)
3312 			qdev->rx_ring[i].irq = 0;
3313 	}
3314 }
3315 
3316 /* Set the interrupt mask for this vector.  Each vector
3317  * will service 1 RSS ring and 1 or more TX completion
3318  * rings.  This function sets up a bit mask per vector
3319  * that indicates which rings it services.
3320  */
ql_set_irq_mask(struct ql_adapter * qdev,struct intr_context * ctx)3321 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3322 {
3323 	int j, vect = ctx->intr;
3324 	u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3325 
3326 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3327 		/* Add the RSS ring serviced by this vector
3328 		 * to the mask.
3329 		 */
3330 		ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3331 		/* Add the TX ring(s) serviced by this vector
3332 		 * to the mask. */
3333 		for (j = 0; j < tx_rings_per_vector; j++) {
3334 			ctx->irq_mask |=
3335 			(1 << qdev->rx_ring[qdev->rss_ring_count +
3336 			(vect * tx_rings_per_vector) + j].cq_id);
3337 		}
3338 	} else {
3339 		/* For single vector we just shift each queue's
3340 		 * ID into the mask.
3341 		 */
3342 		for (j = 0; j < qdev->rx_ring_count; j++)
3343 			ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3344 	}
3345 }
3346 
3347 /*
3348  * Here we build the intr_context structures based on
3349  * our rx_ring count and intr vector count.
3350  * The intr_context structure is used to hook each vector
3351  * to possibly different handlers.
3352  */
ql_resolve_queues_to_irqs(struct ql_adapter * qdev)3353 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3354 {
3355 	int i = 0;
3356 	struct intr_context *intr_context = &qdev->intr_context[0];
3357 
3358 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3359 		/* Each rx_ring has it's
3360 		 * own intr_context since we have separate
3361 		 * vectors for each queue.
3362 		 */
3363 		for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3364 			qdev->rx_ring[i].irq = i;
3365 			intr_context->intr = i;
3366 			intr_context->qdev = qdev;
3367 			/* Set up this vector's bit-mask that indicates
3368 			 * which queues it services.
3369 			 */
3370 			ql_set_irq_mask(qdev, intr_context);
3371 			/*
3372 			 * We set up each vectors enable/disable/read bits so
3373 			 * there's no bit/mask calculations in the critical path.
3374 			 */
3375 			intr_context->intr_en_mask =
3376 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3377 			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3378 			    | i;
3379 			intr_context->intr_dis_mask =
3380 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3381 			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3382 			    INTR_EN_IHD | i;
3383 			intr_context->intr_read_mask =
3384 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3385 			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3386 			    i;
3387 			if (i == 0) {
3388 				/* The first vector/queue handles
3389 				 * broadcast/multicast, fatal errors,
3390 				 * and firmware events.  This in addition
3391 				 * to normal inbound NAPI processing.
3392 				 */
3393 				intr_context->handler = qlge_isr;
3394 				sprintf(intr_context->name, "%s-rx-%d",
3395 					qdev->ndev->name, i);
3396 			} else {
3397 				/*
3398 				 * Inbound queues handle unicast frames only.
3399 				 */
3400 				intr_context->handler = qlge_msix_rx_isr;
3401 				sprintf(intr_context->name, "%s-rx-%d",
3402 					qdev->ndev->name, i);
3403 			}
3404 		}
3405 	} else {
3406 		/*
3407 		 * All rx_rings use the same intr_context since
3408 		 * there is only one vector.
3409 		 */
3410 		intr_context->intr = 0;
3411 		intr_context->qdev = qdev;
3412 		/*
3413 		 * We set up each vectors enable/disable/read bits so
3414 		 * there's no bit/mask calculations in the critical path.
3415 		 */
3416 		intr_context->intr_en_mask =
3417 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3418 		intr_context->intr_dis_mask =
3419 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3420 		    INTR_EN_TYPE_DISABLE;
3421 		intr_context->intr_read_mask =
3422 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3423 		/*
3424 		 * Single interrupt means one handler for all rings.
3425 		 */
3426 		intr_context->handler = qlge_isr;
3427 		sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3428 		/* Set up this vector's bit-mask that indicates
3429 		 * which queues it services. In this case there is
3430 		 * a single vector so it will service all RSS and
3431 		 * TX completion rings.
3432 		 */
3433 		ql_set_irq_mask(qdev, intr_context);
3434 	}
3435 	/* Tell the TX completion rings which MSIx vector
3436 	 * they will be using.
3437 	 */
3438 	ql_set_tx_vect(qdev);
3439 }
3440 
ql_free_irq(struct ql_adapter * qdev)3441 static void ql_free_irq(struct ql_adapter *qdev)
3442 {
3443 	int i;
3444 	struct intr_context *intr_context = &qdev->intr_context[0];
3445 
3446 	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3447 		if (intr_context->hooked) {
3448 			if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3449 				free_irq(qdev->msi_x_entry[i].vector,
3450 					 &qdev->rx_ring[i]);
3451 			} else {
3452 				free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3453 			}
3454 		}
3455 	}
3456 	ql_disable_msix(qdev);
3457 }
3458 
ql_request_irq(struct ql_adapter * qdev)3459 static int ql_request_irq(struct ql_adapter *qdev)
3460 {
3461 	int i;
3462 	int status = 0;
3463 	struct pci_dev *pdev = qdev->pdev;
3464 	struct intr_context *intr_context = &qdev->intr_context[0];
3465 
3466 	ql_resolve_queues_to_irqs(qdev);
3467 
3468 	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3469 		atomic_set(&intr_context->irq_cnt, 0);
3470 		if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3471 			status = request_irq(qdev->msi_x_entry[i].vector,
3472 					     intr_context->handler,
3473 					     0,
3474 					     intr_context->name,
3475 					     &qdev->rx_ring[i]);
3476 			if (status) {
3477 				netif_err(qdev, ifup, qdev->ndev,
3478 					  "Failed request for MSIX interrupt %d.\n",
3479 					  i);
3480 				goto err_irq;
3481 			}
3482 		} else {
3483 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3484 				     "trying msi or legacy interrupts.\n");
3485 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3486 				     "%s: irq = %d.\n", __func__, pdev->irq);
3487 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3488 				     "%s: context->name = %s.\n", __func__,
3489 				     intr_context->name);
3490 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3491 				     "%s: dev_id = 0x%p.\n", __func__,
3492 				     &qdev->rx_ring[0]);
3493 			status =
3494 			    request_irq(pdev->irq, qlge_isr,
3495 					test_bit(QL_MSI_ENABLED,
3496 						 &qdev->
3497 						 flags) ? 0 : IRQF_SHARED,
3498 					intr_context->name, &qdev->rx_ring[0]);
3499 			if (status)
3500 				goto err_irq;
3501 
3502 			netif_err(qdev, ifup, qdev->ndev,
3503 				  "Hooked intr %d, queue type %s, with name %s.\n",
3504 				  i,
3505 				  qdev->rx_ring[0].type == DEFAULT_Q ?
3506 				  "DEFAULT_Q" :
3507 				  qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3508 				  qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3509 				  intr_context->name);
3510 		}
3511 		intr_context->hooked = 1;
3512 	}
3513 	return status;
3514 err_irq:
3515 	netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3516 	ql_free_irq(qdev);
3517 	return status;
3518 }
3519 
ql_start_rss(struct ql_adapter * qdev)3520 static int ql_start_rss(struct ql_adapter *qdev)
3521 {
3522 	static const u8 init_hash_seed[] = {
3523 		0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3524 		0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3525 		0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3526 		0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3527 		0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3528 	};
3529 	struct ricb *ricb = &qdev->ricb;
3530 	int status = 0;
3531 	int i;
3532 	u8 *hash_id = (u8 *) ricb->hash_cq_id;
3533 
3534 	memset((void *)ricb, 0, sizeof(*ricb));
3535 
3536 	ricb->base_cq = RSS_L4K;
3537 	ricb->flags =
3538 		(RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3539 	ricb->mask = cpu_to_le16((u16)(0x3ff));
3540 
3541 	/*
3542 	 * Fill out the Indirection Table.
3543 	 */
3544 	for (i = 0; i < 1024; i++)
3545 		hash_id[i] = (i & (qdev->rss_ring_count - 1));
3546 
3547 	memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3548 	memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3549 
3550 	status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3551 	if (status) {
3552 		netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3553 		return status;
3554 	}
3555 	return status;
3556 }
3557 
ql_clear_routing_entries(struct ql_adapter * qdev)3558 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3559 {
3560 	int i, status = 0;
3561 
3562 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3563 	if (status)
3564 		return status;
3565 	/* Clear all the entries in the routing table. */
3566 	for (i = 0; i < 16; i++) {
3567 		status = ql_set_routing_reg(qdev, i, 0, 0);
3568 		if (status) {
3569 			netif_err(qdev, ifup, qdev->ndev,
3570 				  "Failed to init routing register for CAM packets.\n");
3571 			break;
3572 		}
3573 	}
3574 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3575 	return status;
3576 }
3577 
3578 /* Initialize the frame-to-queue routing. */
ql_route_initialize(struct ql_adapter * qdev)3579 static int ql_route_initialize(struct ql_adapter *qdev)
3580 {
3581 	int status = 0;
3582 
3583 	/* Clear all the entries in the routing table. */
3584 	status = ql_clear_routing_entries(qdev);
3585 	if (status)
3586 		return status;
3587 
3588 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3589 	if (status)
3590 		return status;
3591 
3592 	status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3593 						RT_IDX_IP_CSUM_ERR, 1);
3594 	if (status) {
3595 		netif_err(qdev, ifup, qdev->ndev,
3596 			"Failed to init routing register "
3597 			"for IP CSUM error packets.\n");
3598 		goto exit;
3599 	}
3600 	status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3601 						RT_IDX_TU_CSUM_ERR, 1);
3602 	if (status) {
3603 		netif_err(qdev, ifup, qdev->ndev,
3604 			"Failed to init routing register "
3605 			"for TCP/UDP CSUM error packets.\n");
3606 		goto exit;
3607 	}
3608 	status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3609 	if (status) {
3610 		netif_err(qdev, ifup, qdev->ndev,
3611 			  "Failed to init routing register for broadcast packets.\n");
3612 		goto exit;
3613 	}
3614 	/* If we have more than one inbound queue, then turn on RSS in the
3615 	 * routing block.
3616 	 */
3617 	if (qdev->rss_ring_count > 1) {
3618 		status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3619 					RT_IDX_RSS_MATCH, 1);
3620 		if (status) {
3621 			netif_err(qdev, ifup, qdev->ndev,
3622 				  "Failed to init routing register for MATCH RSS packets.\n");
3623 			goto exit;
3624 		}
3625 	}
3626 
3627 	status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3628 				    RT_IDX_CAM_HIT, 1);
3629 	if (status)
3630 		netif_err(qdev, ifup, qdev->ndev,
3631 			  "Failed to init routing register for CAM packets.\n");
3632 exit:
3633 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3634 	return status;
3635 }
3636 
ql_cam_route_initialize(struct ql_adapter * qdev)3637 int ql_cam_route_initialize(struct ql_adapter *qdev)
3638 {
3639 	int status, set;
3640 
3641 	/* If check if the link is up and use to
3642 	 * determine if we are setting or clearing
3643 	 * the MAC address in the CAM.
3644 	 */
3645 	set = ql_read32(qdev, STS);
3646 	set &= qdev->port_link_up;
3647 	status = ql_set_mac_addr(qdev, set);
3648 	if (status) {
3649 		netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3650 		return status;
3651 	}
3652 
3653 	status = ql_route_initialize(qdev);
3654 	if (status)
3655 		netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3656 
3657 	return status;
3658 }
3659 
ql_adapter_initialize(struct ql_adapter * qdev)3660 static int ql_adapter_initialize(struct ql_adapter *qdev)
3661 {
3662 	u32 value, mask;
3663 	int i;
3664 	int status = 0;
3665 
3666 	/*
3667 	 * Set up the System register to halt on errors.
3668 	 */
3669 	value = SYS_EFE | SYS_FAE;
3670 	mask = value << 16;
3671 	ql_write32(qdev, SYS, mask | value);
3672 
3673 	/* Set the default queue, and VLAN behavior. */
3674 	value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3675 	mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3676 	ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3677 
3678 	/* Set the MPI interrupt to enabled. */
3679 	ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3680 
3681 	/* Enable the function, set pagesize, enable error checking. */
3682 	value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3683 	    FSC_EC | FSC_VM_PAGE_4K;
3684 	value |= SPLT_SETTING;
3685 
3686 	/* Set/clear header splitting. */
3687 	mask = FSC_VM_PAGESIZE_MASK |
3688 	    FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3689 	ql_write32(qdev, FSC, mask | value);
3690 
3691 	ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3692 
3693 	/* Set RX packet routing to use port/pci function on which the
3694 	 * packet arrived on in addition to usual frame routing.
3695 	 * This is helpful on bonding where both interfaces can have
3696 	 * the same MAC address.
3697 	 */
3698 	ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3699 	/* Reroute all packets to our Interface.
3700 	 * They may have been routed to MPI firmware
3701 	 * due to WOL.
3702 	 */
3703 	value = ql_read32(qdev, MGMT_RCV_CFG);
3704 	value &= ~MGMT_RCV_CFG_RM;
3705 	mask = 0xffff0000;
3706 
3707 	/* Sticky reg needs clearing due to WOL. */
3708 	ql_write32(qdev, MGMT_RCV_CFG, mask);
3709 	ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3710 
3711 	/* Default WOL is enable on Mezz cards */
3712 	if (qdev->pdev->subsystem_device == 0x0068 ||
3713 			qdev->pdev->subsystem_device == 0x0180)
3714 		qdev->wol = WAKE_MAGIC;
3715 
3716 	/* Start up the rx queues. */
3717 	for (i = 0; i < qdev->rx_ring_count; i++) {
3718 		status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3719 		if (status) {
3720 			netif_err(qdev, ifup, qdev->ndev,
3721 				  "Failed to start rx ring[%d].\n", i);
3722 			return status;
3723 		}
3724 	}
3725 
3726 	/* If there is more than one inbound completion queue
3727 	 * then download a RICB to configure RSS.
3728 	 */
3729 	if (qdev->rss_ring_count > 1) {
3730 		status = ql_start_rss(qdev);
3731 		if (status) {
3732 			netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3733 			return status;
3734 		}
3735 	}
3736 
3737 	/* Start up the tx queues. */
3738 	for (i = 0; i < qdev->tx_ring_count; i++) {
3739 		status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3740 		if (status) {
3741 			netif_err(qdev, ifup, qdev->ndev,
3742 				  "Failed to start tx ring[%d].\n", i);
3743 			return status;
3744 		}
3745 	}
3746 
3747 	/* Initialize the port and set the max framesize. */
3748 	status = qdev->nic_ops->port_initialize(qdev);
3749 	if (status)
3750 		netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3751 
3752 	/* Set up the MAC address and frame routing filter. */
3753 	status = ql_cam_route_initialize(qdev);
3754 	if (status) {
3755 		netif_err(qdev, ifup, qdev->ndev,
3756 			  "Failed to init CAM/Routing tables.\n");
3757 		return status;
3758 	}
3759 
3760 	/* Start NAPI for the RSS queues. */
3761 	for (i = 0; i < qdev->rss_ring_count; i++)
3762 		napi_enable(&qdev->rx_ring[i].napi);
3763 
3764 	return status;
3765 }
3766 
3767 /* Issue soft reset to chip. */
ql_adapter_reset(struct ql_adapter * qdev)3768 static int ql_adapter_reset(struct ql_adapter *qdev)
3769 {
3770 	u32 value;
3771 	int status = 0;
3772 	unsigned long end_jiffies;
3773 
3774 	/* Clear all the entries in the routing table. */
3775 	status = ql_clear_routing_entries(qdev);
3776 	if (status) {
3777 		netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3778 		return status;
3779 	}
3780 
3781 	end_jiffies = jiffies +
3782 		max((unsigned long)1, usecs_to_jiffies(30));
3783 
3784 	/* Check if bit is set then skip the mailbox command and
3785 	 * clear the bit, else we are in normal reset process.
3786 	 */
3787 	if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3788 		/* Stop management traffic. */
3789 		ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3790 
3791 		/* Wait for the NIC and MGMNT FIFOs to empty. */
3792 		ql_wait_fifo_empty(qdev);
3793 	} else
3794 		clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3795 
3796 	ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3797 
3798 	do {
3799 		value = ql_read32(qdev, RST_FO);
3800 		if ((value & RST_FO_FR) == 0)
3801 			break;
3802 		cpu_relax();
3803 	} while (time_before(jiffies, end_jiffies));
3804 
3805 	if (value & RST_FO_FR) {
3806 		netif_err(qdev, ifdown, qdev->ndev,
3807 			  "ETIMEDOUT!!! errored out of resetting the chip!\n");
3808 		status = -ETIMEDOUT;
3809 	}
3810 
3811 	/* Resume management traffic. */
3812 	ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3813 	return status;
3814 }
3815 
ql_display_dev_info(struct net_device * ndev)3816 static void ql_display_dev_info(struct net_device *ndev)
3817 {
3818 	struct ql_adapter *qdev = netdev_priv(ndev);
3819 
3820 	netif_info(qdev, probe, qdev->ndev,
3821 		   "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3822 		   "XG Roll = %d, XG Rev = %d.\n",
3823 		   qdev->func,
3824 		   qdev->port,
3825 		   qdev->chip_rev_id & 0x0000000f,
3826 		   qdev->chip_rev_id >> 4 & 0x0000000f,
3827 		   qdev->chip_rev_id >> 8 & 0x0000000f,
3828 		   qdev->chip_rev_id >> 12 & 0x0000000f);
3829 	netif_info(qdev, probe, qdev->ndev,
3830 		   "MAC address %pM\n", ndev->dev_addr);
3831 }
3832 
ql_wol(struct ql_adapter * qdev)3833 static int ql_wol(struct ql_adapter *qdev)
3834 {
3835 	int status = 0;
3836 	u32 wol = MB_WOL_DISABLE;
3837 
3838 	/* The CAM is still intact after a reset, but if we
3839 	 * are doing WOL, then we may need to program the
3840 	 * routing regs. We would also need to issue the mailbox
3841 	 * commands to instruct the MPI what to do per the ethtool
3842 	 * settings.
3843 	 */
3844 
3845 	if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3846 			WAKE_MCAST | WAKE_BCAST)) {
3847 		netif_err(qdev, ifdown, qdev->ndev,
3848 			  "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3849 			  qdev->wol);
3850 		return -EINVAL;
3851 	}
3852 
3853 	if (qdev->wol & WAKE_MAGIC) {
3854 		status = ql_mb_wol_set_magic(qdev, 1);
3855 		if (status) {
3856 			netif_err(qdev, ifdown, qdev->ndev,
3857 				  "Failed to set magic packet on %s.\n",
3858 				  qdev->ndev->name);
3859 			return status;
3860 		} else
3861 			netif_info(qdev, drv, qdev->ndev,
3862 				   "Enabled magic packet successfully on %s.\n",
3863 				   qdev->ndev->name);
3864 
3865 		wol |= MB_WOL_MAGIC_PKT;
3866 	}
3867 
3868 	if (qdev->wol) {
3869 		wol |= MB_WOL_MODE_ON;
3870 		status = ql_mb_wol_mode(qdev, wol);
3871 		netif_err(qdev, drv, qdev->ndev,
3872 			  "WOL %s (wol code 0x%x) on %s\n",
3873 			  (status == 0) ? "Successfully set" : "Failed",
3874 			  wol, qdev->ndev->name);
3875 	}
3876 
3877 	return status;
3878 }
3879 
ql_cancel_all_work_sync(struct ql_adapter * qdev)3880 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3881 {
3882 
3883 	/* Don't kill the reset worker thread if we
3884 	 * are in the process of recovery.
3885 	 */
3886 	if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3887 		cancel_delayed_work_sync(&qdev->asic_reset_work);
3888 	cancel_delayed_work_sync(&qdev->mpi_reset_work);
3889 	cancel_delayed_work_sync(&qdev->mpi_work);
3890 	cancel_delayed_work_sync(&qdev->mpi_idc_work);
3891 	cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3892 	cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3893 }
3894 
ql_adapter_down(struct ql_adapter * qdev)3895 static int ql_adapter_down(struct ql_adapter *qdev)
3896 {
3897 	int i, status = 0;
3898 
3899 	ql_link_off(qdev);
3900 
3901 	ql_cancel_all_work_sync(qdev);
3902 
3903 	for (i = 0; i < qdev->rss_ring_count; i++)
3904 		napi_disable(&qdev->rx_ring[i].napi);
3905 
3906 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
3907 
3908 	ql_disable_interrupts(qdev);
3909 
3910 	ql_tx_ring_clean(qdev);
3911 
3912 	/* Call netif_napi_del() from common point.
3913 	 */
3914 	for (i = 0; i < qdev->rss_ring_count; i++)
3915 		netif_napi_del(&qdev->rx_ring[i].napi);
3916 
3917 	status = ql_adapter_reset(qdev);
3918 	if (status)
3919 		netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3920 			  qdev->func);
3921 	ql_free_rx_buffers(qdev);
3922 
3923 	return status;
3924 }
3925 
ql_adapter_up(struct ql_adapter * qdev)3926 static int ql_adapter_up(struct ql_adapter *qdev)
3927 {
3928 	int err = 0;
3929 
3930 	err = ql_adapter_initialize(qdev);
3931 	if (err) {
3932 		netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3933 		goto err_init;
3934 	}
3935 	set_bit(QL_ADAPTER_UP, &qdev->flags);
3936 	ql_alloc_rx_buffers(qdev);
3937 	/* If the port is initialized and the
3938 	 * link is up the turn on the carrier.
3939 	 */
3940 	if ((ql_read32(qdev, STS) & qdev->port_init) &&
3941 			(ql_read32(qdev, STS) & qdev->port_link_up))
3942 		ql_link_on(qdev);
3943 	/* Restore rx mode. */
3944 	clear_bit(QL_ALLMULTI, &qdev->flags);
3945 	clear_bit(QL_PROMISCUOUS, &qdev->flags);
3946 	qlge_set_multicast_list(qdev->ndev);
3947 
3948 	/* Restore vlan setting. */
3949 	qlge_restore_vlan(qdev);
3950 
3951 	ql_enable_interrupts(qdev);
3952 	ql_enable_all_completion_interrupts(qdev);
3953 	netif_tx_start_all_queues(qdev->ndev);
3954 
3955 	return 0;
3956 err_init:
3957 	ql_adapter_reset(qdev);
3958 	return err;
3959 }
3960 
ql_release_adapter_resources(struct ql_adapter * qdev)3961 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3962 {
3963 	ql_free_mem_resources(qdev);
3964 	ql_free_irq(qdev);
3965 }
3966 
ql_get_adapter_resources(struct ql_adapter * qdev)3967 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3968 {
3969 	int status = 0;
3970 
3971 	if (ql_alloc_mem_resources(qdev)) {
3972 		netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
3973 		return -ENOMEM;
3974 	}
3975 	status = ql_request_irq(qdev);
3976 	return status;
3977 }
3978 
qlge_close(struct net_device * ndev)3979 static int qlge_close(struct net_device *ndev)
3980 {
3981 	struct ql_adapter *qdev = netdev_priv(ndev);
3982 
3983 	/* If we hit pci_channel_io_perm_failure
3984 	 * failure condition, then we already
3985 	 * brought the adapter down.
3986 	 */
3987 	if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3988 		netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3989 		clear_bit(QL_EEH_FATAL, &qdev->flags);
3990 		return 0;
3991 	}
3992 
3993 	/*
3994 	 * Wait for device to recover from a reset.
3995 	 * (Rarely happens, but possible.)
3996 	 */
3997 	while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3998 		msleep(1);
3999 	ql_adapter_down(qdev);
4000 	ql_release_adapter_resources(qdev);
4001 	return 0;
4002 }
4003 
ql_configure_rings(struct ql_adapter * qdev)4004 static int ql_configure_rings(struct ql_adapter *qdev)
4005 {
4006 	int i;
4007 	struct rx_ring *rx_ring;
4008 	struct tx_ring *tx_ring;
4009 	int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4010 	unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4011 		LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4012 
4013 	qdev->lbq_buf_order = get_order(lbq_buf_len);
4014 
4015 	/* In a perfect world we have one RSS ring for each CPU
4016 	 * and each has it's own vector.  To do that we ask for
4017 	 * cpu_cnt vectors.  ql_enable_msix() will adjust the
4018 	 * vector count to what we actually get.  We then
4019 	 * allocate an RSS ring for each.
4020 	 * Essentially, we are doing min(cpu_count, msix_vector_count).
4021 	 */
4022 	qdev->intr_count = cpu_cnt;
4023 	ql_enable_msix(qdev);
4024 	/* Adjust the RSS ring count to the actual vector count. */
4025 	qdev->rss_ring_count = qdev->intr_count;
4026 	qdev->tx_ring_count = cpu_cnt;
4027 	qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4028 
4029 	for (i = 0; i < qdev->tx_ring_count; i++) {
4030 		tx_ring = &qdev->tx_ring[i];
4031 		memset((void *)tx_ring, 0, sizeof(*tx_ring));
4032 		tx_ring->qdev = qdev;
4033 		tx_ring->wq_id = i;
4034 		tx_ring->wq_len = qdev->tx_ring_size;
4035 		tx_ring->wq_size =
4036 		    tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4037 
4038 		/*
4039 		 * The completion queue ID for the tx rings start
4040 		 * immediately after the rss rings.
4041 		 */
4042 		tx_ring->cq_id = qdev->rss_ring_count + i;
4043 	}
4044 
4045 	for (i = 0; i < qdev->rx_ring_count; i++) {
4046 		rx_ring = &qdev->rx_ring[i];
4047 		memset((void *)rx_ring, 0, sizeof(*rx_ring));
4048 		rx_ring->qdev = qdev;
4049 		rx_ring->cq_id = i;
4050 		rx_ring->cpu = i % cpu_cnt;	/* CPU to run handler on. */
4051 		if (i < qdev->rss_ring_count) {
4052 			/*
4053 			 * Inbound (RSS) queues.
4054 			 */
4055 			rx_ring->cq_len = qdev->rx_ring_size;
4056 			rx_ring->cq_size =
4057 			    rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4058 			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4059 			rx_ring->lbq_size =
4060 			    rx_ring->lbq_len * sizeof(__le64);
4061 			rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4062 			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4063 			rx_ring->sbq_size =
4064 			    rx_ring->sbq_len * sizeof(__le64);
4065 			rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4066 			rx_ring->type = RX_Q;
4067 		} else {
4068 			/*
4069 			 * Outbound queue handles outbound completions only.
4070 			 */
4071 			/* outbound cq is same size as tx_ring it services. */
4072 			rx_ring->cq_len = qdev->tx_ring_size;
4073 			rx_ring->cq_size =
4074 			    rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4075 			rx_ring->lbq_len = 0;
4076 			rx_ring->lbq_size = 0;
4077 			rx_ring->lbq_buf_size = 0;
4078 			rx_ring->sbq_len = 0;
4079 			rx_ring->sbq_size = 0;
4080 			rx_ring->sbq_buf_size = 0;
4081 			rx_ring->type = TX_Q;
4082 		}
4083 	}
4084 	return 0;
4085 }
4086 
qlge_open(struct net_device * ndev)4087 static int qlge_open(struct net_device *ndev)
4088 {
4089 	int err = 0;
4090 	struct ql_adapter *qdev = netdev_priv(ndev);
4091 
4092 	err = ql_adapter_reset(qdev);
4093 	if (err)
4094 		return err;
4095 
4096 	err = ql_configure_rings(qdev);
4097 	if (err)
4098 		return err;
4099 
4100 	err = ql_get_adapter_resources(qdev);
4101 	if (err)
4102 		goto error_up;
4103 
4104 	err = ql_adapter_up(qdev);
4105 	if (err)
4106 		goto error_up;
4107 
4108 	return err;
4109 
4110 error_up:
4111 	ql_release_adapter_resources(qdev);
4112 	return err;
4113 }
4114 
ql_change_rx_buffers(struct ql_adapter * qdev)4115 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4116 {
4117 	struct rx_ring *rx_ring;
4118 	int i, status;
4119 	u32 lbq_buf_len;
4120 
4121 	/* Wait for an outstanding reset to complete. */
4122 	if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4123 		int i = 3;
4124 		while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4125 			netif_err(qdev, ifup, qdev->ndev,
4126 				  "Waiting for adapter UP...\n");
4127 			ssleep(1);
4128 		}
4129 
4130 		if (!i) {
4131 			netif_err(qdev, ifup, qdev->ndev,
4132 				  "Timed out waiting for adapter UP\n");
4133 			return -ETIMEDOUT;
4134 		}
4135 	}
4136 
4137 	status = ql_adapter_down(qdev);
4138 	if (status)
4139 		goto error;
4140 
4141 	/* Get the new rx buffer size. */
4142 	lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4143 		LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4144 	qdev->lbq_buf_order = get_order(lbq_buf_len);
4145 
4146 	for (i = 0; i < qdev->rss_ring_count; i++) {
4147 		rx_ring = &qdev->rx_ring[i];
4148 		/* Set the new size. */
4149 		rx_ring->lbq_buf_size = lbq_buf_len;
4150 	}
4151 
4152 	status = ql_adapter_up(qdev);
4153 	if (status)
4154 		goto error;
4155 
4156 	return status;
4157 error:
4158 	netif_alert(qdev, ifup, qdev->ndev,
4159 		    "Driver up/down cycle failed, closing device.\n");
4160 	set_bit(QL_ADAPTER_UP, &qdev->flags);
4161 	dev_close(qdev->ndev);
4162 	return status;
4163 }
4164 
qlge_change_mtu(struct net_device * ndev,int new_mtu)4165 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4166 {
4167 	struct ql_adapter *qdev = netdev_priv(ndev);
4168 	int status;
4169 
4170 	if (ndev->mtu == 1500 && new_mtu == 9000) {
4171 		netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4172 	} else if (ndev->mtu == 9000 && new_mtu == 1500) {
4173 		netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4174 	} else
4175 		return -EINVAL;
4176 
4177 	queue_delayed_work(qdev->workqueue,
4178 			&qdev->mpi_port_cfg_work, 3*HZ);
4179 
4180 	ndev->mtu = new_mtu;
4181 
4182 	if (!netif_running(qdev->ndev)) {
4183 		return 0;
4184 	}
4185 
4186 	status = ql_change_rx_buffers(qdev);
4187 	if (status) {
4188 		netif_err(qdev, ifup, qdev->ndev,
4189 			  "Changing MTU failed.\n");
4190 	}
4191 
4192 	return status;
4193 }
4194 
qlge_get_stats(struct net_device * ndev)4195 static struct net_device_stats *qlge_get_stats(struct net_device
4196 					       *ndev)
4197 {
4198 	struct ql_adapter *qdev = netdev_priv(ndev);
4199 	struct rx_ring *rx_ring = &qdev->rx_ring[0];
4200 	struct tx_ring *tx_ring = &qdev->tx_ring[0];
4201 	unsigned long pkts, mcast, dropped, errors, bytes;
4202 	int i;
4203 
4204 	/* Get RX stats. */
4205 	pkts = mcast = dropped = errors = bytes = 0;
4206 	for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4207 			pkts += rx_ring->rx_packets;
4208 			bytes += rx_ring->rx_bytes;
4209 			dropped += rx_ring->rx_dropped;
4210 			errors += rx_ring->rx_errors;
4211 			mcast += rx_ring->rx_multicast;
4212 	}
4213 	ndev->stats.rx_packets = pkts;
4214 	ndev->stats.rx_bytes = bytes;
4215 	ndev->stats.rx_dropped = dropped;
4216 	ndev->stats.rx_errors = errors;
4217 	ndev->stats.multicast = mcast;
4218 
4219 	/* Get TX stats. */
4220 	pkts = errors = bytes = 0;
4221 	for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4222 			pkts += tx_ring->tx_packets;
4223 			bytes += tx_ring->tx_bytes;
4224 			errors += tx_ring->tx_errors;
4225 	}
4226 	ndev->stats.tx_packets = pkts;
4227 	ndev->stats.tx_bytes = bytes;
4228 	ndev->stats.tx_errors = errors;
4229 	return &ndev->stats;
4230 }
4231 
qlge_set_multicast_list(struct net_device * ndev)4232 static void qlge_set_multicast_list(struct net_device *ndev)
4233 {
4234 	struct ql_adapter *qdev = netdev_priv(ndev);
4235 	struct netdev_hw_addr *ha;
4236 	int i, status;
4237 
4238 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4239 	if (status)
4240 		return;
4241 	/*
4242 	 * Set or clear promiscuous mode if a
4243 	 * transition is taking place.
4244 	 */
4245 	if (ndev->flags & IFF_PROMISC) {
4246 		if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4247 			if (ql_set_routing_reg
4248 			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4249 				netif_err(qdev, hw, qdev->ndev,
4250 					  "Failed to set promiscuous mode.\n");
4251 			} else {
4252 				set_bit(QL_PROMISCUOUS, &qdev->flags);
4253 			}
4254 		}
4255 	} else {
4256 		if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4257 			if (ql_set_routing_reg
4258 			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4259 				netif_err(qdev, hw, qdev->ndev,
4260 					  "Failed to clear promiscuous mode.\n");
4261 			} else {
4262 				clear_bit(QL_PROMISCUOUS, &qdev->flags);
4263 			}
4264 		}
4265 	}
4266 
4267 	/*
4268 	 * Set or clear all multicast mode if a
4269 	 * transition is taking place.
4270 	 */
4271 	if ((ndev->flags & IFF_ALLMULTI) ||
4272 	    (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4273 		if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4274 			if (ql_set_routing_reg
4275 			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4276 				netif_err(qdev, hw, qdev->ndev,
4277 					  "Failed to set all-multi mode.\n");
4278 			} else {
4279 				set_bit(QL_ALLMULTI, &qdev->flags);
4280 			}
4281 		}
4282 	} else {
4283 		if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4284 			if (ql_set_routing_reg
4285 			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4286 				netif_err(qdev, hw, qdev->ndev,
4287 					  "Failed to clear all-multi mode.\n");
4288 			} else {
4289 				clear_bit(QL_ALLMULTI, &qdev->flags);
4290 			}
4291 		}
4292 	}
4293 
4294 	if (!netdev_mc_empty(ndev)) {
4295 		status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4296 		if (status)
4297 			goto exit;
4298 		i = 0;
4299 		netdev_for_each_mc_addr(ha, ndev) {
4300 			if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4301 						MAC_ADDR_TYPE_MULTI_MAC, i)) {
4302 				netif_err(qdev, hw, qdev->ndev,
4303 					  "Failed to loadmulticast address.\n");
4304 				ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4305 				goto exit;
4306 			}
4307 			i++;
4308 		}
4309 		ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4310 		if (ql_set_routing_reg
4311 		    (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4312 			netif_err(qdev, hw, qdev->ndev,
4313 				  "Failed to set multicast match mode.\n");
4314 		} else {
4315 			set_bit(QL_ALLMULTI, &qdev->flags);
4316 		}
4317 	}
4318 exit:
4319 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4320 }
4321 
qlge_set_mac_address(struct net_device * ndev,void * p)4322 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4323 {
4324 	struct ql_adapter *qdev = netdev_priv(ndev);
4325 	struct sockaddr *addr = p;
4326 	int status;
4327 
4328 	if (!is_valid_ether_addr(addr->sa_data))
4329 		return -EADDRNOTAVAIL;
4330 	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4331 	/* Update local copy of current mac address. */
4332 	memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4333 
4334 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4335 	if (status)
4336 		return status;
4337 	status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4338 			MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4339 	if (status)
4340 		netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4341 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4342 	return status;
4343 }
4344 
qlge_tx_timeout(struct net_device * ndev)4345 static void qlge_tx_timeout(struct net_device *ndev)
4346 {
4347 	struct ql_adapter *qdev = netdev_priv(ndev);
4348 	ql_queue_asic_error(qdev);
4349 }
4350 
ql_asic_reset_work(struct work_struct * work)4351 static void ql_asic_reset_work(struct work_struct *work)
4352 {
4353 	struct ql_adapter *qdev =
4354 	    container_of(work, struct ql_adapter, asic_reset_work.work);
4355 	int status;
4356 	rtnl_lock();
4357 	status = ql_adapter_down(qdev);
4358 	if (status)
4359 		goto error;
4360 
4361 	status = ql_adapter_up(qdev);
4362 	if (status)
4363 		goto error;
4364 
4365 	/* Restore rx mode. */
4366 	clear_bit(QL_ALLMULTI, &qdev->flags);
4367 	clear_bit(QL_PROMISCUOUS, &qdev->flags);
4368 	qlge_set_multicast_list(qdev->ndev);
4369 
4370 	rtnl_unlock();
4371 	return;
4372 error:
4373 	netif_alert(qdev, ifup, qdev->ndev,
4374 		    "Driver up/down cycle failed, closing device\n");
4375 
4376 	set_bit(QL_ADAPTER_UP, &qdev->flags);
4377 	dev_close(qdev->ndev);
4378 	rtnl_unlock();
4379 }
4380 
4381 static const struct nic_operations qla8012_nic_ops = {
4382 	.get_flash		= ql_get_8012_flash_params,
4383 	.port_initialize	= ql_8012_port_initialize,
4384 };
4385 
4386 static const struct nic_operations qla8000_nic_ops = {
4387 	.get_flash		= ql_get_8000_flash_params,
4388 	.port_initialize	= ql_8000_port_initialize,
4389 };
4390 
4391 /* Find the pcie function number for the other NIC
4392  * on this chip.  Since both NIC functions share a
4393  * common firmware we have the lowest enabled function
4394  * do any common work.  Examples would be resetting
4395  * after a fatal firmware error, or doing a firmware
4396  * coredump.
4397  */
ql_get_alt_pcie_func(struct ql_adapter * qdev)4398 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4399 {
4400 	int status = 0;
4401 	u32 temp;
4402 	u32 nic_func1, nic_func2;
4403 
4404 	status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4405 			&temp);
4406 	if (status)
4407 		return status;
4408 
4409 	nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4410 			MPI_TEST_NIC_FUNC_MASK);
4411 	nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4412 			MPI_TEST_NIC_FUNC_MASK);
4413 
4414 	if (qdev->func == nic_func1)
4415 		qdev->alt_func = nic_func2;
4416 	else if (qdev->func == nic_func2)
4417 		qdev->alt_func = nic_func1;
4418 	else
4419 		status = -EIO;
4420 
4421 	return status;
4422 }
4423 
ql_get_board_info(struct ql_adapter * qdev)4424 static int ql_get_board_info(struct ql_adapter *qdev)
4425 {
4426 	int status;
4427 	qdev->func =
4428 	    (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4429 	if (qdev->func > 3)
4430 		return -EIO;
4431 
4432 	status = ql_get_alt_pcie_func(qdev);
4433 	if (status)
4434 		return status;
4435 
4436 	qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4437 	if (qdev->port) {
4438 		qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4439 		qdev->port_link_up = STS_PL1;
4440 		qdev->port_init = STS_PI1;
4441 		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4442 		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4443 	} else {
4444 		qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4445 		qdev->port_link_up = STS_PL0;
4446 		qdev->port_init = STS_PI0;
4447 		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4448 		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4449 	}
4450 	qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4451 	qdev->device_id = qdev->pdev->device;
4452 	if (qdev->device_id == QLGE_DEVICE_ID_8012)
4453 		qdev->nic_ops = &qla8012_nic_ops;
4454 	else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4455 		qdev->nic_ops = &qla8000_nic_ops;
4456 	return status;
4457 }
4458 
ql_release_all(struct pci_dev * pdev)4459 static void ql_release_all(struct pci_dev *pdev)
4460 {
4461 	struct net_device *ndev = pci_get_drvdata(pdev);
4462 	struct ql_adapter *qdev = netdev_priv(ndev);
4463 
4464 	if (qdev->workqueue) {
4465 		destroy_workqueue(qdev->workqueue);
4466 		qdev->workqueue = NULL;
4467 	}
4468 
4469 	if (qdev->reg_base)
4470 		iounmap(qdev->reg_base);
4471 	if (qdev->doorbell_area)
4472 		iounmap(qdev->doorbell_area);
4473 	vfree(qdev->mpi_coredump);
4474 	pci_release_regions(pdev);
4475 	pci_set_drvdata(pdev, NULL);
4476 }
4477 
ql_init_device(struct pci_dev * pdev,struct net_device * ndev,int cards_found)4478 static int __devinit ql_init_device(struct pci_dev *pdev,
4479 				    struct net_device *ndev, int cards_found)
4480 {
4481 	struct ql_adapter *qdev = netdev_priv(ndev);
4482 	int err = 0;
4483 
4484 	memset((void *)qdev, 0, sizeof(*qdev));
4485 	err = pci_enable_device(pdev);
4486 	if (err) {
4487 		dev_err(&pdev->dev, "PCI device enable failed.\n");
4488 		return err;
4489 	}
4490 
4491 	qdev->ndev = ndev;
4492 	qdev->pdev = pdev;
4493 	pci_set_drvdata(pdev, ndev);
4494 
4495 	/* Set PCIe read request size */
4496 	err = pcie_set_readrq(pdev, 4096);
4497 	if (err) {
4498 		dev_err(&pdev->dev, "Set readrq failed.\n");
4499 		goto err_out1;
4500 	}
4501 
4502 	err = pci_request_regions(pdev, DRV_NAME);
4503 	if (err) {
4504 		dev_err(&pdev->dev, "PCI region request failed.\n");
4505 		return err;
4506 	}
4507 
4508 	pci_set_master(pdev);
4509 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4510 		set_bit(QL_DMA64, &qdev->flags);
4511 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4512 	} else {
4513 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4514 		if (!err)
4515 		       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4516 	}
4517 
4518 	if (err) {
4519 		dev_err(&pdev->dev, "No usable DMA configuration.\n");
4520 		goto err_out2;
4521 	}
4522 
4523 	/* Set PCIe reset type for EEH to fundamental. */
4524 	pdev->needs_freset = 1;
4525 	pci_save_state(pdev);
4526 	qdev->reg_base =
4527 	    ioremap_nocache(pci_resource_start(pdev, 1),
4528 			    pci_resource_len(pdev, 1));
4529 	if (!qdev->reg_base) {
4530 		dev_err(&pdev->dev, "Register mapping failed.\n");
4531 		err = -ENOMEM;
4532 		goto err_out2;
4533 	}
4534 
4535 	qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4536 	qdev->doorbell_area =
4537 	    ioremap_nocache(pci_resource_start(pdev, 3),
4538 			    pci_resource_len(pdev, 3));
4539 	if (!qdev->doorbell_area) {
4540 		dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4541 		err = -ENOMEM;
4542 		goto err_out2;
4543 	}
4544 
4545 	err = ql_get_board_info(qdev);
4546 	if (err) {
4547 		dev_err(&pdev->dev, "Register access failed.\n");
4548 		err = -EIO;
4549 		goto err_out2;
4550 	}
4551 	qdev->msg_enable = netif_msg_init(debug, default_msg);
4552 	spin_lock_init(&qdev->hw_lock);
4553 	spin_lock_init(&qdev->stats_lock);
4554 
4555 	if (qlge_mpi_coredump) {
4556 		qdev->mpi_coredump =
4557 			vmalloc(sizeof(struct ql_mpi_coredump));
4558 		if (qdev->mpi_coredump == NULL) {
4559 			dev_err(&pdev->dev, "Coredump alloc failed.\n");
4560 			err = -ENOMEM;
4561 			goto err_out2;
4562 		}
4563 		if (qlge_force_coredump)
4564 			set_bit(QL_FRC_COREDUMP, &qdev->flags);
4565 	}
4566 	/* make sure the EEPROM is good */
4567 	err = qdev->nic_ops->get_flash(qdev);
4568 	if (err) {
4569 		dev_err(&pdev->dev, "Invalid FLASH.\n");
4570 		goto err_out2;
4571 	}
4572 
4573 	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4574 	/* Keep local copy of current mac address. */
4575 	memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4576 
4577 	/* Set up the default ring sizes. */
4578 	qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4579 	qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4580 
4581 	/* Set up the coalescing parameters. */
4582 	qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4583 	qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4584 	qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4585 	qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4586 
4587 	/*
4588 	 * Set up the operating parameters.
4589 	 */
4590 	qdev->workqueue = create_singlethread_workqueue(ndev->name);
4591 	INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4592 	INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4593 	INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4594 	INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4595 	INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4596 	INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4597 	init_completion(&qdev->ide_completion);
4598 	mutex_init(&qdev->mpi_mutex);
4599 
4600 	if (!cards_found) {
4601 		dev_info(&pdev->dev, "%s\n", DRV_STRING);
4602 		dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4603 			 DRV_NAME, DRV_VERSION);
4604 	}
4605 	return 0;
4606 err_out2:
4607 	ql_release_all(pdev);
4608 err_out1:
4609 	pci_disable_device(pdev);
4610 	return err;
4611 }
4612 
4613 static const struct net_device_ops qlge_netdev_ops = {
4614 	.ndo_open		= qlge_open,
4615 	.ndo_stop		= qlge_close,
4616 	.ndo_start_xmit		= qlge_send,
4617 	.ndo_change_mtu		= qlge_change_mtu,
4618 	.ndo_get_stats		= qlge_get_stats,
4619 	.ndo_set_rx_mode	= qlge_set_multicast_list,
4620 	.ndo_set_mac_address	= qlge_set_mac_address,
4621 	.ndo_validate_addr	= eth_validate_addr,
4622 	.ndo_tx_timeout		= qlge_tx_timeout,
4623 	.ndo_fix_features	= qlge_fix_features,
4624 	.ndo_set_features	= qlge_set_features,
4625 	.ndo_vlan_rx_add_vid	= qlge_vlan_rx_add_vid,
4626 	.ndo_vlan_rx_kill_vid	= qlge_vlan_rx_kill_vid,
4627 };
4628 
ql_timer(unsigned long data)4629 static void ql_timer(unsigned long data)
4630 {
4631 	struct ql_adapter *qdev = (struct ql_adapter *)data;
4632 	u32 var = 0;
4633 
4634 	var = ql_read32(qdev, STS);
4635 	if (pci_channel_offline(qdev->pdev)) {
4636 		netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4637 		return;
4638 	}
4639 
4640 	mod_timer(&qdev->timer, jiffies + (5*HZ));
4641 }
4642 
qlge_probe(struct pci_dev * pdev,const struct pci_device_id * pci_entry)4643 static int __devinit qlge_probe(struct pci_dev *pdev,
4644 				const struct pci_device_id *pci_entry)
4645 {
4646 	struct net_device *ndev = NULL;
4647 	struct ql_adapter *qdev = NULL;
4648 	static int cards_found = 0;
4649 	int err = 0;
4650 
4651 	ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4652 			min(MAX_CPUS, (int)num_online_cpus()));
4653 	if (!ndev)
4654 		return -ENOMEM;
4655 
4656 	err = ql_init_device(pdev, ndev, cards_found);
4657 	if (err < 0) {
4658 		free_netdev(ndev);
4659 		return err;
4660 	}
4661 
4662 	qdev = netdev_priv(ndev);
4663 	SET_NETDEV_DEV(ndev, &pdev->dev);
4664 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4665 		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4666 		NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4667 	ndev->features = ndev->hw_features |
4668 		NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4669 
4670 	if (test_bit(QL_DMA64, &qdev->flags))
4671 		ndev->features |= NETIF_F_HIGHDMA;
4672 
4673 	/*
4674 	 * Set up net_device structure.
4675 	 */
4676 	ndev->tx_queue_len = qdev->tx_ring_size;
4677 	ndev->irq = pdev->irq;
4678 
4679 	ndev->netdev_ops = &qlge_netdev_ops;
4680 	SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4681 	ndev->watchdog_timeo = 10 * HZ;
4682 
4683 	err = register_netdev(ndev);
4684 	if (err) {
4685 		dev_err(&pdev->dev, "net device registration failed.\n");
4686 		ql_release_all(pdev);
4687 		pci_disable_device(pdev);
4688 		return err;
4689 	}
4690 	/* Start up the timer to trigger EEH if
4691 	 * the bus goes dead
4692 	 */
4693 	init_timer_deferrable(&qdev->timer);
4694 	qdev->timer.data = (unsigned long)qdev;
4695 	qdev->timer.function = ql_timer;
4696 	qdev->timer.expires = jiffies + (5*HZ);
4697 	add_timer(&qdev->timer);
4698 	ql_link_off(qdev);
4699 	ql_display_dev_info(ndev);
4700 	atomic_set(&qdev->lb_count, 0);
4701 	cards_found++;
4702 	return 0;
4703 }
4704 
ql_lb_send(struct sk_buff * skb,struct net_device * ndev)4705 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4706 {
4707 	return qlge_send(skb, ndev);
4708 }
4709 
ql_clean_lb_rx_ring(struct rx_ring * rx_ring,int budget)4710 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4711 {
4712 	return ql_clean_inbound_rx_ring(rx_ring, budget);
4713 }
4714 
qlge_remove(struct pci_dev * pdev)4715 static void __devexit qlge_remove(struct pci_dev *pdev)
4716 {
4717 	struct net_device *ndev = pci_get_drvdata(pdev);
4718 	struct ql_adapter *qdev = netdev_priv(ndev);
4719 	del_timer_sync(&qdev->timer);
4720 	ql_cancel_all_work_sync(qdev);
4721 	unregister_netdev(ndev);
4722 	ql_release_all(pdev);
4723 	pci_disable_device(pdev);
4724 	free_netdev(ndev);
4725 }
4726 
4727 /* Clean up resources without touching hardware. */
ql_eeh_close(struct net_device * ndev)4728 static void ql_eeh_close(struct net_device *ndev)
4729 {
4730 	int i;
4731 	struct ql_adapter *qdev = netdev_priv(ndev);
4732 
4733 	if (netif_carrier_ok(ndev)) {
4734 		netif_carrier_off(ndev);
4735 		netif_stop_queue(ndev);
4736 	}
4737 
4738 	/* Disabling the timer */
4739 	del_timer_sync(&qdev->timer);
4740 	ql_cancel_all_work_sync(qdev);
4741 
4742 	for (i = 0; i < qdev->rss_ring_count; i++)
4743 		netif_napi_del(&qdev->rx_ring[i].napi);
4744 
4745 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
4746 	ql_tx_ring_clean(qdev);
4747 	ql_free_rx_buffers(qdev);
4748 	ql_release_adapter_resources(qdev);
4749 }
4750 
4751 /*
4752  * This callback is called by the PCI subsystem whenever
4753  * a PCI bus error is detected.
4754  */
qlge_io_error_detected(struct pci_dev * pdev,enum pci_channel_state state)4755 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4756 					       enum pci_channel_state state)
4757 {
4758 	struct net_device *ndev = pci_get_drvdata(pdev);
4759 	struct ql_adapter *qdev = netdev_priv(ndev);
4760 
4761 	switch (state) {
4762 	case pci_channel_io_normal:
4763 		return PCI_ERS_RESULT_CAN_RECOVER;
4764 	case pci_channel_io_frozen:
4765 		netif_device_detach(ndev);
4766 		if (netif_running(ndev))
4767 			ql_eeh_close(ndev);
4768 		pci_disable_device(pdev);
4769 		return PCI_ERS_RESULT_NEED_RESET;
4770 	case pci_channel_io_perm_failure:
4771 		dev_err(&pdev->dev,
4772 			"%s: pci_channel_io_perm_failure.\n", __func__);
4773 		ql_eeh_close(ndev);
4774 		set_bit(QL_EEH_FATAL, &qdev->flags);
4775 		return PCI_ERS_RESULT_DISCONNECT;
4776 	}
4777 
4778 	/* Request a slot reset. */
4779 	return PCI_ERS_RESULT_NEED_RESET;
4780 }
4781 
4782 /*
4783  * This callback is called after the PCI buss has been reset.
4784  * Basically, this tries to restart the card from scratch.
4785  * This is a shortened version of the device probe/discovery code,
4786  * it resembles the first-half of the () routine.
4787  */
qlge_io_slot_reset(struct pci_dev * pdev)4788 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4789 {
4790 	struct net_device *ndev = pci_get_drvdata(pdev);
4791 	struct ql_adapter *qdev = netdev_priv(ndev);
4792 
4793 	pdev->error_state = pci_channel_io_normal;
4794 
4795 	pci_restore_state(pdev);
4796 	if (pci_enable_device(pdev)) {
4797 		netif_err(qdev, ifup, qdev->ndev,
4798 			  "Cannot re-enable PCI device after reset.\n");
4799 		return PCI_ERS_RESULT_DISCONNECT;
4800 	}
4801 	pci_set_master(pdev);
4802 
4803 	if (ql_adapter_reset(qdev)) {
4804 		netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4805 		set_bit(QL_EEH_FATAL, &qdev->flags);
4806 		return PCI_ERS_RESULT_DISCONNECT;
4807 	}
4808 
4809 	return PCI_ERS_RESULT_RECOVERED;
4810 }
4811 
qlge_io_resume(struct pci_dev * pdev)4812 static void qlge_io_resume(struct pci_dev *pdev)
4813 {
4814 	struct net_device *ndev = pci_get_drvdata(pdev);
4815 	struct ql_adapter *qdev = netdev_priv(ndev);
4816 	int err = 0;
4817 
4818 	if (netif_running(ndev)) {
4819 		err = qlge_open(ndev);
4820 		if (err) {
4821 			netif_err(qdev, ifup, qdev->ndev,
4822 				  "Device initialization failed after reset.\n");
4823 			return;
4824 		}
4825 	} else {
4826 		netif_err(qdev, ifup, qdev->ndev,
4827 			  "Device was not running prior to EEH.\n");
4828 	}
4829 	mod_timer(&qdev->timer, jiffies + (5*HZ));
4830 	netif_device_attach(ndev);
4831 }
4832 
4833 static struct pci_error_handlers qlge_err_handler = {
4834 	.error_detected = qlge_io_error_detected,
4835 	.slot_reset = qlge_io_slot_reset,
4836 	.resume = qlge_io_resume,
4837 };
4838 
qlge_suspend(struct pci_dev * pdev,pm_message_t state)4839 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4840 {
4841 	struct net_device *ndev = pci_get_drvdata(pdev);
4842 	struct ql_adapter *qdev = netdev_priv(ndev);
4843 	int err;
4844 
4845 	netif_device_detach(ndev);
4846 	del_timer_sync(&qdev->timer);
4847 
4848 	if (netif_running(ndev)) {
4849 		err = ql_adapter_down(qdev);
4850 		if (!err)
4851 			return err;
4852 	}
4853 
4854 	ql_wol(qdev);
4855 	err = pci_save_state(pdev);
4856 	if (err)
4857 		return err;
4858 
4859 	pci_disable_device(pdev);
4860 
4861 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
4862 
4863 	return 0;
4864 }
4865 
4866 #ifdef CONFIG_PM
qlge_resume(struct pci_dev * pdev)4867 static int qlge_resume(struct pci_dev *pdev)
4868 {
4869 	struct net_device *ndev = pci_get_drvdata(pdev);
4870 	struct ql_adapter *qdev = netdev_priv(ndev);
4871 	int err;
4872 
4873 	pci_set_power_state(pdev, PCI_D0);
4874 	pci_restore_state(pdev);
4875 	err = pci_enable_device(pdev);
4876 	if (err) {
4877 		netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4878 		return err;
4879 	}
4880 	pci_set_master(pdev);
4881 
4882 	pci_enable_wake(pdev, PCI_D3hot, 0);
4883 	pci_enable_wake(pdev, PCI_D3cold, 0);
4884 
4885 	if (netif_running(ndev)) {
4886 		err = ql_adapter_up(qdev);
4887 		if (err)
4888 			return err;
4889 	}
4890 
4891 	mod_timer(&qdev->timer, jiffies + (5*HZ));
4892 	netif_device_attach(ndev);
4893 
4894 	return 0;
4895 }
4896 #endif /* CONFIG_PM */
4897 
qlge_shutdown(struct pci_dev * pdev)4898 static void qlge_shutdown(struct pci_dev *pdev)
4899 {
4900 	qlge_suspend(pdev, PMSG_SUSPEND);
4901 }
4902 
4903 static struct pci_driver qlge_driver = {
4904 	.name = DRV_NAME,
4905 	.id_table = qlge_pci_tbl,
4906 	.probe = qlge_probe,
4907 	.remove = __devexit_p(qlge_remove),
4908 #ifdef CONFIG_PM
4909 	.suspend = qlge_suspend,
4910 	.resume = qlge_resume,
4911 #endif
4912 	.shutdown = qlge_shutdown,
4913 	.err_handler = &qlge_err_handler
4914 };
4915 
qlge_init_module(void)4916 static int __init qlge_init_module(void)
4917 {
4918 	return pci_register_driver(&qlge_driver);
4919 }
4920 
qlge_exit(void)4921 static void __exit qlge_exit(void)
4922 {
4923 	pci_unregister_driver(&qlge_driver);
4924 }
4925 
4926 module_init(qlge_init_module);
4927 module_exit(qlge_exit);
4928