• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <linux/ipv6.h>
29 #include <net/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
40 #include <linux/mm.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
44 
45 #include "qlge.h"
46 
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
49 
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
54 
55 static const u32 default_msg =
56     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER |	*/
58     NETIF_MSG_IFDOWN |
59     NETIF_MSG_IFUP |
60     NETIF_MSG_RX_ERR |
61     NETIF_MSG_TX_ERR |
62 /*  NETIF_MSG_TX_QUEUED | */
63 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66 
67 static int debug = -1;	/* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70 
71 #define MSIX_IRQ 0
72 #define MSI_IRQ 1
73 #define LEG_IRQ 2
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77 
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81 		"Option to enable MPI firmware dump. "
82 		"Default is OFF - Do Not allocate memory. ");
83 
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87 		"Option to allow force of firmware core dump. "
88 		"Default is OFF - Do not allow.");
89 
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93 	/* required last entry */
94 	{0,}
95 };
96 
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98 
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
101 
102 /* This hardware semaphore causes exclusive access to
103  * resources shared between the NIC driver, MPI firmware,
104  * FCOE firmware and the FC driver.
105  */
ql_sem_trylock(struct ql_adapter * qdev,u32 sem_mask)106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107 {
108 	u32 sem_bits = 0;
109 
110 	switch (sem_mask) {
111 	case SEM_XGMAC0_MASK:
112 		sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113 		break;
114 	case SEM_XGMAC1_MASK:
115 		sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116 		break;
117 	case SEM_ICB_MASK:
118 		sem_bits = SEM_SET << SEM_ICB_SHIFT;
119 		break;
120 	case SEM_MAC_ADDR_MASK:
121 		sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122 		break;
123 	case SEM_FLASH_MASK:
124 		sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125 		break;
126 	case SEM_PROBE_MASK:
127 		sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128 		break;
129 	case SEM_RT_IDX_MASK:
130 		sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131 		break;
132 	case SEM_PROC_REG_MASK:
133 		sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134 		break;
135 	default:
136 		netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
137 		return -EINVAL;
138 	}
139 
140 	ql_write32(qdev, SEM, sem_bits | sem_mask);
141 	return !(ql_read32(qdev, SEM) & sem_bits);
142 }
143 
ql_sem_spinlock(struct ql_adapter * qdev,u32 sem_mask)144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145 {
146 	unsigned int wait_count = 30;
147 	do {
148 		if (!ql_sem_trylock(qdev, sem_mask))
149 			return 0;
150 		udelay(100);
151 	} while (--wait_count);
152 	return -ETIMEDOUT;
153 }
154 
ql_sem_unlock(struct ql_adapter * qdev,u32 sem_mask)155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156 {
157 	ql_write32(qdev, SEM, sem_mask);
158 	ql_read32(qdev, SEM);	/* flush */
159 }
160 
161 /* This function waits for a specific bit to come ready
162  * in a given register.  It is used mostly by the initialize
163  * process, but is also used in kernel thread API such as
164  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165  */
ql_wait_reg_rdy(struct ql_adapter * qdev,u32 reg,u32 bit,u32 err_bit)166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167 {
168 	u32 temp;
169 	int count = UDELAY_COUNT;
170 
171 	while (count) {
172 		temp = ql_read32(qdev, reg);
173 
174 		/* check for errors */
175 		if (temp & err_bit) {
176 			netif_alert(qdev, probe, qdev->ndev,
177 				    "register 0x%.08x access error, value = 0x%.08x!.\n",
178 				    reg, temp);
179 			return -EIO;
180 		} else if (temp & bit)
181 			return 0;
182 		udelay(UDELAY_DELAY);
183 		count--;
184 	}
185 	netif_alert(qdev, probe, qdev->ndev,
186 		    "Timed out waiting for reg %x to come ready.\n", reg);
187 	return -ETIMEDOUT;
188 }
189 
190 /* The CFG register is used to download TX and RX control blocks
191  * to the chip. This function waits for an operation to complete.
192  */
ql_wait_cfg(struct ql_adapter * qdev,u32 bit)193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194 {
195 	int count = UDELAY_COUNT;
196 	u32 temp;
197 
198 	while (count) {
199 		temp = ql_read32(qdev, CFG);
200 		if (temp & CFG_LE)
201 			return -EIO;
202 		if (!(temp & bit))
203 			return 0;
204 		udelay(UDELAY_DELAY);
205 		count--;
206 	}
207 	return -ETIMEDOUT;
208 }
209 
210 
211 /* Used to issue init control blocks to hw. Maps control block,
212  * sets address, triggers download, waits for completion.
213  */
ql_write_cfg(struct ql_adapter * qdev,void * ptr,int size,u32 bit,u16 q_id)214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215 		 u16 q_id)
216 {
217 	u64 map;
218 	int status = 0;
219 	int direction;
220 	u32 mask;
221 	u32 value;
222 
223 	direction =
224 	    (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225 	    PCI_DMA_FROMDEVICE;
226 
227 	map = pci_map_single(qdev->pdev, ptr, size, direction);
228 	if (pci_dma_mapping_error(qdev->pdev, map)) {
229 		netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
230 		return -ENOMEM;
231 	}
232 
233 	status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234 	if (status)
235 		return status;
236 
237 	status = ql_wait_cfg(qdev, bit);
238 	if (status) {
239 		netif_err(qdev, ifup, qdev->ndev,
240 			  "Timed out waiting for CFG to come ready.\n");
241 		goto exit;
242 	}
243 
244 	ql_write32(qdev, ICB_L, (u32) map);
245 	ql_write32(qdev, ICB_H, (u32) (map >> 32));
246 
247 	mask = CFG_Q_MASK | (bit << 16);
248 	value = bit | (q_id << CFG_Q_SHIFT);
249 	ql_write32(qdev, CFG, (mask | value));
250 
251 	/*
252 	 * Wait for the bit to clear after signaling hw.
253 	 */
254 	status = ql_wait_cfg(qdev, bit);
255 exit:
256 	ql_sem_unlock(qdev, SEM_ICB_MASK);	/* does flush too */
257 	pci_unmap_single(qdev->pdev, map, size, direction);
258 	return status;
259 }
260 
261 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
ql_get_mac_addr_reg(struct ql_adapter * qdev,u32 type,u16 index,u32 * value)262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263 			u32 *value)
264 {
265 	u32 offset = 0;
266 	int status;
267 
268 	switch (type) {
269 	case MAC_ADDR_TYPE_MULTI_MAC:
270 	case MAC_ADDR_TYPE_CAM_MAC:
271 		{
272 			status =
273 			    ql_wait_reg_rdy(qdev,
274 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
275 			if (status)
276 				goto exit;
277 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 				   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 			status =
281 			    ql_wait_reg_rdy(qdev,
282 				MAC_ADDR_IDX, MAC_ADDR_MR, 0);
283 			if (status)
284 				goto exit;
285 			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 			status =
287 			    ql_wait_reg_rdy(qdev,
288 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
289 			if (status)
290 				goto exit;
291 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 				   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294 			status =
295 			    ql_wait_reg_rdy(qdev,
296 				MAC_ADDR_IDX, MAC_ADDR_MR, 0);
297 			if (status)
298 				goto exit;
299 			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 			if (type == MAC_ADDR_TYPE_CAM_MAC) {
301 				status =
302 				    ql_wait_reg_rdy(qdev,
303 					MAC_ADDR_IDX, MAC_ADDR_MW, 0);
304 				if (status)
305 					goto exit;
306 				ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 					   (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 					   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309 				status =
310 				    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
311 						    MAC_ADDR_MR, 0);
312 				if (status)
313 					goto exit;
314 				*value++ = ql_read32(qdev, MAC_ADDR_DATA);
315 			}
316 			break;
317 		}
318 	case MAC_ADDR_TYPE_VLAN:
319 	case MAC_ADDR_TYPE_MULTI_FLTR:
320 	default:
321 		netif_crit(qdev, ifup, qdev->ndev,
322 			   "Address type %d not yet supported.\n", type);
323 		status = -EPERM;
324 	}
325 exit:
326 	return status;
327 }
328 
329 /* Set up a MAC, multicast or VLAN address for the
330  * inbound frame matching.
331  */
ql_set_mac_addr_reg(struct ql_adapter * qdev,u8 * addr,u32 type,u16 index)332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333 			       u16 index)
334 {
335 	u32 offset = 0;
336 	int status = 0;
337 
338 	switch (type) {
339 	case MAC_ADDR_TYPE_MULTI_MAC:
340 		{
341 			u32 upper = (addr[0] << 8) | addr[1];
342 			u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 					(addr[4] << 8) | (addr[5]);
344 
345 			status =
346 				ql_wait_reg_rdy(qdev,
347 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348 			if (status)
349 				goto exit;
350 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 				(index << MAC_ADDR_IDX_SHIFT) |
352 				type | MAC_ADDR_E);
353 			ql_write32(qdev, MAC_ADDR_DATA, lower);
354 			status =
355 				ql_wait_reg_rdy(qdev,
356 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 			if (status)
358 				goto exit;
359 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 				(index << MAC_ADDR_IDX_SHIFT) |
361 				type | MAC_ADDR_E);
362 
363 			ql_write32(qdev, MAC_ADDR_DATA, upper);
364 			status =
365 				ql_wait_reg_rdy(qdev,
366 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367 			if (status)
368 				goto exit;
369 			break;
370 		}
371 	case MAC_ADDR_TYPE_CAM_MAC:
372 		{
373 			u32 cam_output;
374 			u32 upper = (addr[0] << 8) | addr[1];
375 			u32 lower =
376 			    (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377 			    (addr[5]);
378 			status =
379 			    ql_wait_reg_rdy(qdev,
380 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
381 			if (status)
382 				goto exit;
383 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
385 				   type);	/* type */
386 			ql_write32(qdev, MAC_ADDR_DATA, lower);
387 			status =
388 			    ql_wait_reg_rdy(qdev,
389 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
390 			if (status)
391 				goto exit;
392 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
393 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
394 				   type);	/* type */
395 			ql_write32(qdev, MAC_ADDR_DATA, upper);
396 			status =
397 			    ql_wait_reg_rdy(qdev,
398 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
399 			if (status)
400 				goto exit;
401 			ql_write32(qdev, MAC_ADDR_IDX, (offset) |	/* offset */
402 				   (index << MAC_ADDR_IDX_SHIFT) |	/* index */
403 				   type);	/* type */
404 			/* This field should also include the queue id
405 			   and possibly the function id.  Right now we hardcode
406 			   the route field to NIC core.
407 			 */
408 			cam_output = (CAM_OUT_ROUTE_NIC |
409 				      (qdev->
410 				       func << CAM_OUT_FUNC_SHIFT) |
411 					(0 << CAM_OUT_CQ_ID_SHIFT));
412 			if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
413 				cam_output |= CAM_OUT_RV;
414 			/* route to NIC core */
415 			ql_write32(qdev, MAC_ADDR_DATA, cam_output);
416 			break;
417 		}
418 	case MAC_ADDR_TYPE_VLAN:
419 		{
420 			u32 enable_bit = *((u32 *) &addr[0]);
421 			/* For VLAN, the addr actually holds a bit that
422 			 * either enables or disables the vlan id we are
423 			 * addressing. It's either MAC_ADDR_E on or off.
424 			 * That's bit-27 we're talking about.
425 			 */
426 			status =
427 			    ql_wait_reg_rdy(qdev,
428 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
429 			if (status)
430 				goto exit;
431 			ql_write32(qdev, MAC_ADDR_IDX, offset |	/* offset */
432 				   (index << MAC_ADDR_IDX_SHIFT) |	/* index */
433 				   type |	/* type */
434 				   enable_bit);	/* enable/disable */
435 			break;
436 		}
437 	case MAC_ADDR_TYPE_MULTI_FLTR:
438 	default:
439 		netif_crit(qdev, ifup, qdev->ndev,
440 			   "Address type %d not yet supported.\n", type);
441 		status = -EPERM;
442 	}
443 exit:
444 	return status;
445 }
446 
447 /* Set or clear MAC address in hardware. We sometimes
448  * have to clear it to prevent wrong frame routing
449  * especially in a bonding environment.
450  */
ql_set_mac_addr(struct ql_adapter * qdev,int set)451 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
452 {
453 	int status;
454 	char zero_mac_addr[ETH_ALEN];
455 	char *addr;
456 
457 	if (set) {
458 		addr = &qdev->current_mac_addr[0];
459 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
460 			     "Set Mac addr %pM\n", addr);
461 	} else {
462 		memset(zero_mac_addr, 0, ETH_ALEN);
463 		addr = &zero_mac_addr[0];
464 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
465 			     "Clearing MAC address\n");
466 	}
467 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
468 	if (status)
469 		return status;
470 	status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
471 			MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
472 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
473 	if (status)
474 		netif_err(qdev, ifup, qdev->ndev,
475 			  "Failed to init mac address.\n");
476 	return status;
477 }
478 
ql_link_on(struct ql_adapter * qdev)479 void ql_link_on(struct ql_adapter *qdev)
480 {
481 	netif_err(qdev, link, qdev->ndev, "Link is up.\n");
482 	netif_carrier_on(qdev->ndev);
483 	ql_set_mac_addr(qdev, 1);
484 }
485 
ql_link_off(struct ql_adapter * qdev)486 void ql_link_off(struct ql_adapter *qdev)
487 {
488 	netif_err(qdev, link, qdev->ndev, "Link is down.\n");
489 	netif_carrier_off(qdev->ndev);
490 	ql_set_mac_addr(qdev, 0);
491 }
492 
493 /* Get a specific frame routing value from the CAM.
494  * Used for debug and reg dump.
495  */
ql_get_routing_reg(struct ql_adapter * qdev,u32 index,u32 * value)496 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497 {
498 	int status = 0;
499 
500 	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
501 	if (status)
502 		goto exit;
503 
504 	ql_write32(qdev, RT_IDX,
505 		   RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
506 	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
507 	if (status)
508 		goto exit;
509 	*value = ql_read32(qdev, RT_DATA);
510 exit:
511 	return status;
512 }
513 
514 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
515  * to route different frame types to various inbound queues.  We send broadcast/
516  * multicast/error frames to the default queue for slow handling,
517  * and CAM hit/RSS frames to the fast handling queues.
518  */
ql_set_routing_reg(struct ql_adapter * qdev,u32 index,u32 mask,int enable)519 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520 			      int enable)
521 {
522 	int status = -EINVAL; /* Return error if no mask match. */
523 	u32 value = 0;
524 
525 	switch (mask) {
526 	case RT_IDX_CAM_HIT:
527 		{
528 			value = RT_IDX_DST_CAM_Q |	/* dest */
529 			    RT_IDX_TYPE_NICQ |	/* type */
530 			    (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
531 			break;
532 		}
533 	case RT_IDX_VALID:	/* Promiscuous Mode frames. */
534 		{
535 			value = RT_IDX_DST_DFLT_Q |	/* dest */
536 			    RT_IDX_TYPE_NICQ |	/* type */
537 			    (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
538 			break;
539 		}
540 	case RT_IDX_ERR:	/* Pass up MAC,IP,TCP/UDP error frames. */
541 		{
542 			value = RT_IDX_DST_DFLT_Q |	/* dest */
543 			    RT_IDX_TYPE_NICQ |	/* type */
544 			    (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
545 			break;
546 		}
547 	case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
548 		{
549 			value = RT_IDX_DST_DFLT_Q | /* dest */
550 				RT_IDX_TYPE_NICQ | /* type */
551 				(RT_IDX_IP_CSUM_ERR_SLOT <<
552 				RT_IDX_IDX_SHIFT); /* index */
553 			break;
554 		}
555 	case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
556 		{
557 			value = RT_IDX_DST_DFLT_Q | /* dest */
558 				RT_IDX_TYPE_NICQ | /* type */
559 				(RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
560 				RT_IDX_IDX_SHIFT); /* index */
561 			break;
562 		}
563 	case RT_IDX_BCAST:	/* Pass up Broadcast frames to default Q. */
564 		{
565 			value = RT_IDX_DST_DFLT_Q |	/* dest */
566 			    RT_IDX_TYPE_NICQ |	/* type */
567 			    (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
568 			break;
569 		}
570 	case RT_IDX_MCAST:	/* Pass up All Multicast frames. */
571 		{
572 			value = RT_IDX_DST_DFLT_Q |	/* dest */
573 			    RT_IDX_TYPE_NICQ |	/* type */
574 			    (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
575 			break;
576 		}
577 	case RT_IDX_MCAST_MATCH:	/* Pass up matched Multicast frames. */
578 		{
579 			value = RT_IDX_DST_DFLT_Q |	/* dest */
580 			    RT_IDX_TYPE_NICQ |	/* type */
581 			    (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
582 			break;
583 		}
584 	case RT_IDX_RSS_MATCH:	/* Pass up matched RSS frames. */
585 		{
586 			value = RT_IDX_DST_RSS |	/* dest */
587 			    RT_IDX_TYPE_NICQ |	/* type */
588 			    (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
589 			break;
590 		}
591 	case 0:		/* Clear the E-bit on an entry. */
592 		{
593 			value = RT_IDX_DST_DFLT_Q |	/* dest */
594 			    RT_IDX_TYPE_NICQ |	/* type */
595 			    (index << RT_IDX_IDX_SHIFT);/* index */
596 			break;
597 		}
598 	default:
599 		netif_err(qdev, ifup, qdev->ndev,
600 			  "Mask type %d not yet supported.\n", mask);
601 		status = -EPERM;
602 		goto exit;
603 	}
604 
605 	if (value) {
606 		status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
607 		if (status)
608 			goto exit;
609 		value |= (enable ? RT_IDX_E : 0);
610 		ql_write32(qdev, RT_IDX, value);
611 		ql_write32(qdev, RT_DATA, enable ? mask : 0);
612 	}
613 exit:
614 	return status;
615 }
616 
ql_enable_interrupts(struct ql_adapter * qdev)617 static void ql_enable_interrupts(struct ql_adapter *qdev)
618 {
619 	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
620 }
621 
ql_disable_interrupts(struct ql_adapter * qdev)622 static void ql_disable_interrupts(struct ql_adapter *qdev)
623 {
624 	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
625 }
626 
627 /* If we're running with multiple MSI-X vectors then we enable on the fly.
628  * Otherwise, we may have multiple outstanding workers and don't want to
629  * enable until the last one finishes. In this case, the irq_cnt gets
630  * incremented every time we queue a worker and decremented every time
631  * a worker finishes.  Once it hits zero we enable the interrupt.
632  */
ql_enable_completion_interrupt(struct ql_adapter * qdev,u32 intr)633 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
634 {
635 	u32 var = 0;
636 	unsigned long hw_flags = 0;
637 	struct intr_context *ctx = qdev->intr_context + intr;
638 
639 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
640 		/* Always enable if we're MSIX multi interrupts and
641 		 * it's not the default (zeroeth) interrupt.
642 		 */
643 		ql_write32(qdev, INTR_EN,
644 			   ctx->intr_en_mask);
645 		var = ql_read32(qdev, STS);
646 		return var;
647 	}
648 
649 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
650 	if (atomic_dec_and_test(&ctx->irq_cnt)) {
651 		ql_write32(qdev, INTR_EN,
652 			   ctx->intr_en_mask);
653 		var = ql_read32(qdev, STS);
654 	}
655 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
656 	return var;
657 }
658 
ql_disable_completion_interrupt(struct ql_adapter * qdev,u32 intr)659 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
660 {
661 	u32 var = 0;
662 	struct intr_context *ctx;
663 
664 	/* HW disables for us if we're MSIX multi interrupts and
665 	 * it's not the default (zeroeth) interrupt.
666 	 */
667 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
668 		return 0;
669 
670 	ctx = qdev->intr_context + intr;
671 	spin_lock(&qdev->hw_lock);
672 	if (!atomic_read(&ctx->irq_cnt)) {
673 		ql_write32(qdev, INTR_EN,
674 		ctx->intr_dis_mask);
675 		var = ql_read32(qdev, STS);
676 	}
677 	atomic_inc(&ctx->irq_cnt);
678 	spin_unlock(&qdev->hw_lock);
679 	return var;
680 }
681 
ql_enable_all_completion_interrupts(struct ql_adapter * qdev)682 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
683 {
684 	int i;
685 	for (i = 0; i < qdev->intr_count; i++) {
686 		/* The enable call does a atomic_dec_and_test
687 		 * and enables only if the result is zero.
688 		 * So we precharge it here.
689 		 */
690 		if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
691 			i == 0))
692 			atomic_set(&qdev->intr_context[i].irq_cnt, 1);
693 		ql_enable_completion_interrupt(qdev, i);
694 	}
695 
696 }
697 
ql_validate_flash(struct ql_adapter * qdev,u32 size,const char * str)698 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
699 {
700 	int status, i;
701 	u16 csum = 0;
702 	__le16 *flash = (__le16 *)&qdev->flash;
703 
704 	status = strncmp((char *)&qdev->flash, str, 4);
705 	if (status) {
706 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
707 		return	status;
708 	}
709 
710 	for (i = 0; i < size; i++)
711 		csum += le16_to_cpu(*flash++);
712 
713 	if (csum)
714 		netif_err(qdev, ifup, qdev->ndev,
715 			  "Invalid flash checksum, csum = 0x%.04x.\n", csum);
716 
717 	return csum;
718 }
719 
ql_read_flash_word(struct ql_adapter * qdev,int offset,__le32 * data)720 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
721 {
722 	int status = 0;
723 	/* wait for reg to come ready */
724 	status = ql_wait_reg_rdy(qdev,
725 			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
726 	if (status)
727 		goto exit;
728 	/* set up for reg read */
729 	ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
730 	/* wait for reg to come ready */
731 	status = ql_wait_reg_rdy(qdev,
732 			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
733 	if (status)
734 		goto exit;
735 	 /* This data is stored on flash as an array of
736 	 * __le32.  Since ql_read32() returns cpu endian
737 	 * we need to swap it back.
738 	 */
739 	*data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
740 exit:
741 	return status;
742 }
743 
ql_get_8000_flash_params(struct ql_adapter * qdev)744 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
745 {
746 	u32 i, size;
747 	int status;
748 	__le32 *p = (__le32 *)&qdev->flash;
749 	u32 offset;
750 	u8 mac_addr[6];
751 
752 	/* Get flash offset for function and adjust
753 	 * for dword access.
754 	 */
755 	if (!qdev->port)
756 		offset = FUNC0_FLASH_OFFSET / sizeof(u32);
757 	else
758 		offset = FUNC1_FLASH_OFFSET / sizeof(u32);
759 
760 	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
761 		return -ETIMEDOUT;
762 
763 	size = sizeof(struct flash_params_8000) / sizeof(u32);
764 	for (i = 0; i < size; i++, p++) {
765 		status = ql_read_flash_word(qdev, i+offset, p);
766 		if (status) {
767 			netif_err(qdev, ifup, qdev->ndev,
768 				  "Error reading flash.\n");
769 			goto exit;
770 		}
771 	}
772 
773 	status = ql_validate_flash(qdev,
774 			sizeof(struct flash_params_8000) / sizeof(u16),
775 			"8000");
776 	if (status) {
777 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
778 		status = -EINVAL;
779 		goto exit;
780 	}
781 
782 	/* Extract either manufacturer or BOFM modified
783 	 * MAC address.
784 	 */
785 	if (qdev->flash.flash_params_8000.data_type1 == 2)
786 		memcpy(mac_addr,
787 			qdev->flash.flash_params_8000.mac_addr1,
788 			qdev->ndev->addr_len);
789 	else
790 		memcpy(mac_addr,
791 			qdev->flash.flash_params_8000.mac_addr,
792 			qdev->ndev->addr_len);
793 
794 	if (!is_valid_ether_addr(mac_addr)) {
795 		netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
796 		status = -EINVAL;
797 		goto exit;
798 	}
799 
800 	memcpy(qdev->ndev->dev_addr,
801 		mac_addr,
802 		qdev->ndev->addr_len);
803 
804 exit:
805 	ql_sem_unlock(qdev, SEM_FLASH_MASK);
806 	return status;
807 }
808 
ql_get_8012_flash_params(struct ql_adapter * qdev)809 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
810 {
811 	int i;
812 	int status;
813 	__le32 *p = (__le32 *)&qdev->flash;
814 	u32 offset = 0;
815 	u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
816 
817 	/* Second function's parameters follow the first
818 	 * function's.
819 	 */
820 	if (qdev->port)
821 		offset = size;
822 
823 	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
824 		return -ETIMEDOUT;
825 
826 	for (i = 0; i < size; i++, p++) {
827 		status = ql_read_flash_word(qdev, i+offset, p);
828 		if (status) {
829 			netif_err(qdev, ifup, qdev->ndev,
830 				  "Error reading flash.\n");
831 			goto exit;
832 		}
833 
834 	}
835 
836 	status = ql_validate_flash(qdev,
837 			sizeof(struct flash_params_8012) / sizeof(u16),
838 			"8012");
839 	if (status) {
840 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
841 		status = -EINVAL;
842 		goto exit;
843 	}
844 
845 	if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
846 		status = -EINVAL;
847 		goto exit;
848 	}
849 
850 	memcpy(qdev->ndev->dev_addr,
851 		qdev->flash.flash_params_8012.mac_addr,
852 		qdev->ndev->addr_len);
853 
854 exit:
855 	ql_sem_unlock(qdev, SEM_FLASH_MASK);
856 	return status;
857 }
858 
859 /* xgmac register are located behind the xgmac_addr and xgmac_data
860  * register pair.  Each read/write requires us to wait for the ready
861  * bit before reading/writing the data.
862  */
ql_write_xgmac_reg(struct ql_adapter * qdev,u32 reg,u32 data)863 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
864 {
865 	int status;
866 	/* wait for reg to come ready */
867 	status = ql_wait_reg_rdy(qdev,
868 			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
869 	if (status)
870 		return status;
871 	/* write the data to the data reg */
872 	ql_write32(qdev, XGMAC_DATA, data);
873 	/* trigger the write */
874 	ql_write32(qdev, XGMAC_ADDR, reg);
875 	return status;
876 }
877 
878 /* xgmac register are located behind the xgmac_addr and xgmac_data
879  * register pair.  Each read/write requires us to wait for the ready
880  * bit before reading/writing the data.
881  */
ql_read_xgmac_reg(struct ql_adapter * qdev,u32 reg,u32 * data)882 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
883 {
884 	int status = 0;
885 	/* wait for reg to come ready */
886 	status = ql_wait_reg_rdy(qdev,
887 			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
888 	if (status)
889 		goto exit;
890 	/* set up for reg read */
891 	ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
892 	/* wait for reg to come ready */
893 	status = ql_wait_reg_rdy(qdev,
894 			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
895 	if (status)
896 		goto exit;
897 	/* get the data */
898 	*data = ql_read32(qdev, XGMAC_DATA);
899 exit:
900 	return status;
901 }
902 
903 /* This is used for reading the 64-bit statistics regs. */
ql_read_xgmac_reg64(struct ql_adapter * qdev,u32 reg,u64 * data)904 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
905 {
906 	int status = 0;
907 	u32 hi = 0;
908 	u32 lo = 0;
909 
910 	status = ql_read_xgmac_reg(qdev, reg, &lo);
911 	if (status)
912 		goto exit;
913 
914 	status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
915 	if (status)
916 		goto exit;
917 
918 	*data = (u64) lo | ((u64) hi << 32);
919 
920 exit:
921 	return status;
922 }
923 
ql_8000_port_initialize(struct ql_adapter * qdev)924 static int ql_8000_port_initialize(struct ql_adapter *qdev)
925 {
926 	int status;
927 	/*
928 	 * Get MPI firmware version for driver banner
929 	 * and ethool info.
930 	 */
931 	status = ql_mb_about_fw(qdev);
932 	if (status)
933 		goto exit;
934 	status = ql_mb_get_fw_state(qdev);
935 	if (status)
936 		goto exit;
937 	/* Wake up a worker to get/set the TX/RX frame sizes. */
938 	queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
939 exit:
940 	return status;
941 }
942 
943 /* Take the MAC Core out of reset.
944  * Enable statistics counting.
945  * Take the transmitter/receiver out of reset.
946  * This functionality may be done in the MPI firmware at a
947  * later date.
948  */
ql_8012_port_initialize(struct ql_adapter * qdev)949 static int ql_8012_port_initialize(struct ql_adapter *qdev)
950 {
951 	int status = 0;
952 	u32 data;
953 
954 	if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
955 		/* Another function has the semaphore, so
956 		 * wait for the port init bit to come ready.
957 		 */
958 		netif_info(qdev, link, qdev->ndev,
959 			   "Another function has the semaphore, so wait for the port init bit to come ready.\n");
960 		status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
961 		if (status) {
962 			netif_crit(qdev, link, qdev->ndev,
963 				   "Port initialize timed out.\n");
964 		}
965 		return status;
966 	}
967 
968 	netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
969 	/* Set the core reset. */
970 	status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
971 	if (status)
972 		goto end;
973 	data |= GLOBAL_CFG_RESET;
974 	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
975 	if (status)
976 		goto end;
977 
978 	/* Clear the core reset and turn on jumbo for receiver. */
979 	data &= ~GLOBAL_CFG_RESET;	/* Clear core reset. */
980 	data |= GLOBAL_CFG_JUMBO;	/* Turn on jumbo. */
981 	data |= GLOBAL_CFG_TX_STAT_EN;
982 	data |= GLOBAL_CFG_RX_STAT_EN;
983 	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
984 	if (status)
985 		goto end;
986 
987 	/* Enable transmitter, and clear it's reset. */
988 	status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
989 	if (status)
990 		goto end;
991 	data &= ~TX_CFG_RESET;	/* Clear the TX MAC reset. */
992 	data |= TX_CFG_EN;	/* Enable the transmitter. */
993 	status = ql_write_xgmac_reg(qdev, TX_CFG, data);
994 	if (status)
995 		goto end;
996 
997 	/* Enable receiver and clear it's reset. */
998 	status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
999 	if (status)
1000 		goto end;
1001 	data &= ~RX_CFG_RESET;	/* Clear the RX MAC reset. */
1002 	data |= RX_CFG_EN;	/* Enable the receiver. */
1003 	status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1004 	if (status)
1005 		goto end;
1006 
1007 	/* Turn on jumbo. */
1008 	status =
1009 	    ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1010 	if (status)
1011 		goto end;
1012 	status =
1013 	    ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1014 	if (status)
1015 		goto end;
1016 
1017 	/* Signal to the world that the port is enabled.        */
1018 	ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1019 end:
1020 	ql_sem_unlock(qdev, qdev->xg_sem_mask);
1021 	return status;
1022 }
1023 
ql_lbq_block_size(struct ql_adapter * qdev)1024 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1025 {
1026 	return PAGE_SIZE << qdev->lbq_buf_order;
1027 }
1028 
1029 /* Get the next large buffer. */
ql_get_curr_lbuf(struct rx_ring * rx_ring)1030 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1031 {
1032 	struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1033 	rx_ring->lbq_curr_idx++;
1034 	if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1035 		rx_ring->lbq_curr_idx = 0;
1036 	rx_ring->lbq_free_cnt++;
1037 	return lbq_desc;
1038 }
1039 
ql_get_curr_lchunk(struct ql_adapter * qdev,struct rx_ring * rx_ring)1040 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1041 		struct rx_ring *rx_ring)
1042 {
1043 	struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1044 
1045 	pci_dma_sync_single_for_cpu(qdev->pdev,
1046 					dma_unmap_addr(lbq_desc, mapaddr),
1047 				    rx_ring->lbq_buf_size,
1048 					PCI_DMA_FROMDEVICE);
1049 
1050 	/* If it's the last chunk of our master page then
1051 	 * we unmap it.
1052 	 */
1053 	if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1054 					== ql_lbq_block_size(qdev))
1055 		pci_unmap_page(qdev->pdev,
1056 				lbq_desc->p.pg_chunk.map,
1057 				ql_lbq_block_size(qdev),
1058 				PCI_DMA_FROMDEVICE);
1059 	return lbq_desc;
1060 }
1061 
1062 /* Get the next small buffer. */
ql_get_curr_sbuf(struct rx_ring * rx_ring)1063 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1064 {
1065 	struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1066 	rx_ring->sbq_curr_idx++;
1067 	if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1068 		rx_ring->sbq_curr_idx = 0;
1069 	rx_ring->sbq_free_cnt++;
1070 	return sbq_desc;
1071 }
1072 
1073 /* Update an rx ring index. */
ql_update_cq(struct rx_ring * rx_ring)1074 static void ql_update_cq(struct rx_ring *rx_ring)
1075 {
1076 	rx_ring->cnsmr_idx++;
1077 	rx_ring->curr_entry++;
1078 	if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1079 		rx_ring->cnsmr_idx = 0;
1080 		rx_ring->curr_entry = rx_ring->cq_base;
1081 	}
1082 }
1083 
ql_write_cq_idx(struct rx_ring * rx_ring)1084 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1085 {
1086 	ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1087 }
1088 
ql_get_next_chunk(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct bq_desc * lbq_desc)1089 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1090 						struct bq_desc *lbq_desc)
1091 {
1092 	if (!rx_ring->pg_chunk.page) {
1093 		u64 map;
1094 		rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1095 						GFP_ATOMIC,
1096 						qdev->lbq_buf_order);
1097 		if (unlikely(!rx_ring->pg_chunk.page)) {
1098 			netif_err(qdev, drv, qdev->ndev,
1099 				  "page allocation failed.\n");
1100 			return -ENOMEM;
1101 		}
1102 		rx_ring->pg_chunk.offset = 0;
1103 		map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104 					0, ql_lbq_block_size(qdev),
1105 					PCI_DMA_FROMDEVICE);
1106 		if (pci_dma_mapping_error(qdev->pdev, map)) {
1107 			__free_pages(rx_ring->pg_chunk.page,
1108 					qdev->lbq_buf_order);
1109 			rx_ring->pg_chunk.page = NULL;
1110 			netif_err(qdev, drv, qdev->ndev,
1111 				  "PCI mapping failed.\n");
1112 			return -ENOMEM;
1113 		}
1114 		rx_ring->pg_chunk.map = map;
1115 		rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1116 	}
1117 
1118 	/* Copy the current master pg_chunk info
1119 	 * to the current descriptor.
1120 	 */
1121 	lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1122 
1123 	/* Adjust the master page chunk for next
1124 	 * buffer get.
1125 	 */
1126 	rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1127 	if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1128 		rx_ring->pg_chunk.page = NULL;
1129 		lbq_desc->p.pg_chunk.last_flag = 1;
1130 	} else {
1131 		rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1132 		get_page(rx_ring->pg_chunk.page);
1133 		lbq_desc->p.pg_chunk.last_flag = 0;
1134 	}
1135 	return 0;
1136 }
1137 /* Process (refill) a large buffer queue. */
ql_update_lbq(struct ql_adapter * qdev,struct rx_ring * rx_ring)1138 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1139 {
1140 	u32 clean_idx = rx_ring->lbq_clean_idx;
1141 	u32 start_idx = clean_idx;
1142 	struct bq_desc *lbq_desc;
1143 	u64 map;
1144 	int i;
1145 
1146 	while (rx_ring->lbq_free_cnt > 32) {
1147 		for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1148 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1149 				     "lbq: try cleaning clean_idx = %d.\n",
1150 				     clean_idx);
1151 			lbq_desc = &rx_ring->lbq[clean_idx];
1152 			if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1153 				rx_ring->lbq_clean_idx = clean_idx;
1154 				netif_err(qdev, ifup, qdev->ndev,
1155 						"Could not get a page chunk, i=%d, clean_idx =%d .\n",
1156 						i, clean_idx);
1157 				return;
1158 			}
1159 
1160 			map = lbq_desc->p.pg_chunk.map +
1161 				lbq_desc->p.pg_chunk.offset;
1162 				dma_unmap_addr_set(lbq_desc, mapaddr, map);
1163 			dma_unmap_len_set(lbq_desc, maplen,
1164 					rx_ring->lbq_buf_size);
1165 				*lbq_desc->addr = cpu_to_le64(map);
1166 
1167 			pci_dma_sync_single_for_device(qdev->pdev, map,
1168 						rx_ring->lbq_buf_size,
1169 						PCI_DMA_FROMDEVICE);
1170 			clean_idx++;
1171 			if (clean_idx == rx_ring->lbq_len)
1172 				clean_idx = 0;
1173 		}
1174 
1175 		rx_ring->lbq_clean_idx = clean_idx;
1176 		rx_ring->lbq_prod_idx += 16;
1177 		if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1178 			rx_ring->lbq_prod_idx = 0;
1179 		rx_ring->lbq_free_cnt -= 16;
1180 	}
1181 
1182 	if (start_idx != clean_idx) {
1183 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1184 			     "lbq: updating prod idx = %d.\n",
1185 			     rx_ring->lbq_prod_idx);
1186 		ql_write_db_reg(rx_ring->lbq_prod_idx,
1187 				rx_ring->lbq_prod_idx_db_reg);
1188 	}
1189 }
1190 
1191 /* Process (refill) a small buffer queue. */
ql_update_sbq(struct ql_adapter * qdev,struct rx_ring * rx_ring)1192 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1193 {
1194 	u32 clean_idx = rx_ring->sbq_clean_idx;
1195 	u32 start_idx = clean_idx;
1196 	struct bq_desc *sbq_desc;
1197 	u64 map;
1198 	int i;
1199 
1200 	while (rx_ring->sbq_free_cnt > 16) {
1201 		for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1202 			sbq_desc = &rx_ring->sbq[clean_idx];
1203 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1204 				     "sbq: try cleaning clean_idx = %d.\n",
1205 				     clean_idx);
1206 			if (sbq_desc->p.skb == NULL) {
1207 				netif_printk(qdev, rx_status, KERN_DEBUG,
1208 					     qdev->ndev,
1209 					     "sbq: getting new skb for index %d.\n",
1210 					     sbq_desc->index);
1211 				sbq_desc->p.skb =
1212 				    netdev_alloc_skb(qdev->ndev,
1213 						     SMALL_BUFFER_SIZE);
1214 				if (sbq_desc->p.skb == NULL) {
1215 					rx_ring->sbq_clean_idx = clean_idx;
1216 					return;
1217 				}
1218 				skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1219 				map = pci_map_single(qdev->pdev,
1220 						     sbq_desc->p.skb->data,
1221 						     rx_ring->sbq_buf_size,
1222 						     PCI_DMA_FROMDEVICE);
1223 				if (pci_dma_mapping_error(qdev->pdev, map)) {
1224 					netif_err(qdev, ifup, qdev->ndev,
1225 						  "PCI mapping failed.\n");
1226 					rx_ring->sbq_clean_idx = clean_idx;
1227 					dev_kfree_skb_any(sbq_desc->p.skb);
1228 					sbq_desc->p.skb = NULL;
1229 					return;
1230 				}
1231 				dma_unmap_addr_set(sbq_desc, mapaddr, map);
1232 				dma_unmap_len_set(sbq_desc, maplen,
1233 						  rx_ring->sbq_buf_size);
1234 				*sbq_desc->addr = cpu_to_le64(map);
1235 			}
1236 
1237 			clean_idx++;
1238 			if (clean_idx == rx_ring->sbq_len)
1239 				clean_idx = 0;
1240 		}
1241 		rx_ring->sbq_clean_idx = clean_idx;
1242 		rx_ring->sbq_prod_idx += 16;
1243 		if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1244 			rx_ring->sbq_prod_idx = 0;
1245 		rx_ring->sbq_free_cnt -= 16;
1246 	}
1247 
1248 	if (start_idx != clean_idx) {
1249 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1250 			     "sbq: updating prod idx = %d.\n",
1251 			     rx_ring->sbq_prod_idx);
1252 		ql_write_db_reg(rx_ring->sbq_prod_idx,
1253 				rx_ring->sbq_prod_idx_db_reg);
1254 	}
1255 }
1256 
ql_update_buffer_queues(struct ql_adapter * qdev,struct rx_ring * rx_ring)1257 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1258 				    struct rx_ring *rx_ring)
1259 {
1260 	ql_update_sbq(qdev, rx_ring);
1261 	ql_update_lbq(qdev, rx_ring);
1262 }
1263 
1264 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1265  * fails at some stage, or from the interrupt when a tx completes.
1266  */
ql_unmap_send(struct ql_adapter * qdev,struct tx_ring_desc * tx_ring_desc,int mapped)1267 static void ql_unmap_send(struct ql_adapter *qdev,
1268 			  struct tx_ring_desc *tx_ring_desc, int mapped)
1269 {
1270 	int i;
1271 	for (i = 0; i < mapped; i++) {
1272 		if (i == 0 || (i == 7 && mapped > 7)) {
1273 			/*
1274 			 * Unmap the skb->data area, or the
1275 			 * external sglist (AKA the Outbound
1276 			 * Address List (OAL)).
1277 			 * If its the zeroeth element, then it's
1278 			 * the skb->data area.  If it's the 7th
1279 			 * element and there is more than 6 frags,
1280 			 * then its an OAL.
1281 			 */
1282 			if (i == 7) {
1283 				netif_printk(qdev, tx_done, KERN_DEBUG,
1284 					     qdev->ndev,
1285 					     "unmapping OAL area.\n");
1286 			}
1287 			pci_unmap_single(qdev->pdev,
1288 					 dma_unmap_addr(&tx_ring_desc->map[i],
1289 							mapaddr),
1290 					 dma_unmap_len(&tx_ring_desc->map[i],
1291 						       maplen),
1292 					 PCI_DMA_TODEVICE);
1293 		} else {
1294 			netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1295 				     "unmapping frag %d.\n", i);
1296 			pci_unmap_page(qdev->pdev,
1297 				       dma_unmap_addr(&tx_ring_desc->map[i],
1298 						      mapaddr),
1299 				       dma_unmap_len(&tx_ring_desc->map[i],
1300 						     maplen), PCI_DMA_TODEVICE);
1301 		}
1302 	}
1303 
1304 }
1305 
1306 /* Map the buffers for this transmit.  This will return
1307  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1308  */
ql_map_send(struct ql_adapter * qdev,struct ob_mac_iocb_req * mac_iocb_ptr,struct sk_buff * skb,struct tx_ring_desc * tx_ring_desc)1309 static int ql_map_send(struct ql_adapter *qdev,
1310 		       struct ob_mac_iocb_req *mac_iocb_ptr,
1311 		       struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1312 {
1313 	int len = skb_headlen(skb);
1314 	dma_addr_t map;
1315 	int frag_idx, err, map_idx = 0;
1316 	struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1317 	int frag_cnt = skb_shinfo(skb)->nr_frags;
1318 
1319 	if (frag_cnt) {
1320 		netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1321 			     "frag_cnt = %d.\n", frag_cnt);
1322 	}
1323 	/*
1324 	 * Map the skb buffer first.
1325 	 */
1326 	map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1327 
1328 	err = pci_dma_mapping_error(qdev->pdev, map);
1329 	if (err) {
1330 		netif_err(qdev, tx_queued, qdev->ndev,
1331 			  "PCI mapping failed with error: %d\n", err);
1332 
1333 		return NETDEV_TX_BUSY;
1334 	}
1335 
1336 	tbd->len = cpu_to_le32(len);
1337 	tbd->addr = cpu_to_le64(map);
1338 	dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1339 	dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1340 	map_idx++;
1341 
1342 	/*
1343 	 * This loop fills the remainder of the 8 address descriptors
1344 	 * in the IOCB.  If there are more than 7 fragments, then the
1345 	 * eighth address desc will point to an external list (OAL).
1346 	 * When this happens, the remainder of the frags will be stored
1347 	 * in this list.
1348 	 */
1349 	for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1350 		skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1351 		tbd++;
1352 		if (frag_idx == 6 && frag_cnt > 7) {
1353 			/* Let's tack on an sglist.
1354 			 * Our control block will now
1355 			 * look like this:
1356 			 * iocb->seg[0] = skb->data
1357 			 * iocb->seg[1] = frag[0]
1358 			 * iocb->seg[2] = frag[1]
1359 			 * iocb->seg[3] = frag[2]
1360 			 * iocb->seg[4] = frag[3]
1361 			 * iocb->seg[5] = frag[4]
1362 			 * iocb->seg[6] = frag[5]
1363 			 * iocb->seg[7] = ptr to OAL (external sglist)
1364 			 * oal->seg[0] = frag[6]
1365 			 * oal->seg[1] = frag[7]
1366 			 * oal->seg[2] = frag[8]
1367 			 * oal->seg[3] = frag[9]
1368 			 * oal->seg[4] = frag[10]
1369 			 *      etc...
1370 			 */
1371 			/* Tack on the OAL in the eighth segment of IOCB. */
1372 			map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1373 					     sizeof(struct oal),
1374 					     PCI_DMA_TODEVICE);
1375 			err = pci_dma_mapping_error(qdev->pdev, map);
1376 			if (err) {
1377 				netif_err(qdev, tx_queued, qdev->ndev,
1378 					  "PCI mapping outbound address list with error: %d\n",
1379 					  err);
1380 				goto map_error;
1381 			}
1382 
1383 			tbd->addr = cpu_to_le64(map);
1384 			/*
1385 			 * The length is the number of fragments
1386 			 * that remain to be mapped times the length
1387 			 * of our sglist (OAL).
1388 			 */
1389 			tbd->len =
1390 			    cpu_to_le32((sizeof(struct tx_buf_desc) *
1391 					 (frag_cnt - frag_idx)) | TX_DESC_C);
1392 			dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1393 					   map);
1394 			dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1395 					  sizeof(struct oal));
1396 			tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1397 			map_idx++;
1398 		}
1399 
1400 		map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1401 				       DMA_TO_DEVICE);
1402 
1403 		err = dma_mapping_error(&qdev->pdev->dev, map);
1404 		if (err) {
1405 			netif_err(qdev, tx_queued, qdev->ndev,
1406 				  "PCI mapping frags failed with error: %d.\n",
1407 				  err);
1408 			goto map_error;
1409 		}
1410 
1411 		tbd->addr = cpu_to_le64(map);
1412 		tbd->len = cpu_to_le32(skb_frag_size(frag));
1413 		dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1414 		dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1415 				  skb_frag_size(frag));
1416 
1417 	}
1418 	/* Save the number of segments we've mapped. */
1419 	tx_ring_desc->map_cnt = map_idx;
1420 	/* Terminate the last segment. */
1421 	tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1422 	return NETDEV_TX_OK;
1423 
1424 map_error:
1425 	/*
1426 	 * If the first frag mapping failed, then i will be zero.
1427 	 * This causes the unmap of the skb->data area.  Otherwise
1428 	 * we pass in the number of frags that mapped successfully
1429 	 * so they can be umapped.
1430 	 */
1431 	ql_unmap_send(qdev, tx_ring_desc, map_idx);
1432 	return NETDEV_TX_BUSY;
1433 }
1434 
1435 /* Categorizing receive firmware frame errors */
ql_categorize_rx_err(struct ql_adapter * qdev,u8 rx_err,struct rx_ring * rx_ring)1436 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1437 				 struct rx_ring *rx_ring)
1438 {
1439 	struct nic_stats *stats = &qdev->nic_stats;
1440 
1441 	stats->rx_err_count++;
1442 	rx_ring->rx_errors++;
1443 
1444 	switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1445 	case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1446 		stats->rx_code_err++;
1447 		break;
1448 	case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1449 		stats->rx_oversize_err++;
1450 		break;
1451 	case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1452 		stats->rx_undersize_err++;
1453 		break;
1454 	case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1455 		stats->rx_preamble_err++;
1456 		break;
1457 	case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1458 		stats->rx_frame_len_err++;
1459 		break;
1460 	case IB_MAC_IOCB_RSP_ERR_CRC:
1461 		stats->rx_crc_err++;
1462 	default:
1463 		break;
1464 	}
1465 }
1466 
1467 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_gro_page(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1468 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1469 					struct rx_ring *rx_ring,
1470 					struct ib_mac_iocb_rsp *ib_mac_rsp,
1471 					u32 length,
1472 					u16 vlan_id)
1473 {
1474 	struct sk_buff *skb;
1475 	struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1476 	struct napi_struct *napi = &rx_ring->napi;
1477 
1478 	/* Frame error, so drop the packet. */
1479 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1480 		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1481 		put_page(lbq_desc->p.pg_chunk.page);
1482 		return;
1483 	}
1484 	napi->dev = qdev->ndev;
1485 
1486 	skb = napi_get_frags(napi);
1487 	if (!skb) {
1488 		netif_err(qdev, drv, qdev->ndev,
1489 			  "Couldn't get an skb, exiting.\n");
1490 		rx_ring->rx_dropped++;
1491 		put_page(lbq_desc->p.pg_chunk.page);
1492 		return;
1493 	}
1494 	prefetch(lbq_desc->p.pg_chunk.va);
1495 	__skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1496 			     lbq_desc->p.pg_chunk.page,
1497 			     lbq_desc->p.pg_chunk.offset,
1498 			     length);
1499 
1500 	skb->len += length;
1501 	skb->data_len += length;
1502 	skb->truesize += length;
1503 	skb_shinfo(skb)->nr_frags++;
1504 
1505 	rx_ring->rx_packets++;
1506 	rx_ring->rx_bytes += length;
1507 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1508 	skb_record_rx_queue(skb, rx_ring->cq_id);
1509 	if (vlan_id != 0xffff)
1510 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1511 	napi_gro_frags(napi);
1512 }
1513 
1514 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_page(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1515 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1516 					struct rx_ring *rx_ring,
1517 					struct ib_mac_iocb_rsp *ib_mac_rsp,
1518 					u32 length,
1519 					u16 vlan_id)
1520 {
1521 	struct net_device *ndev = qdev->ndev;
1522 	struct sk_buff *skb = NULL;
1523 	void *addr;
1524 	struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1525 	struct napi_struct *napi = &rx_ring->napi;
1526 
1527 	skb = netdev_alloc_skb(ndev, length);
1528 	if (!skb) {
1529 		rx_ring->rx_dropped++;
1530 		put_page(lbq_desc->p.pg_chunk.page);
1531 		return;
1532 	}
1533 
1534 	addr = lbq_desc->p.pg_chunk.va;
1535 	prefetch(addr);
1536 
1537 	/* Frame error, so drop the packet. */
1538 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1539 		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1540 		goto err_out;
1541 	}
1542 
1543 	/* The max framesize filter on this chip is set higher than
1544 	 * MTU since FCoE uses 2k frames.
1545 	 */
1546 	if (skb->len > ndev->mtu + ETH_HLEN) {
1547 		netif_err(qdev, drv, qdev->ndev,
1548 			  "Segment too small, dropping.\n");
1549 		rx_ring->rx_dropped++;
1550 		goto err_out;
1551 	}
1552 	memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1553 	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1554 		     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1555 		     length);
1556 	skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1557 				lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1558 				length-ETH_HLEN);
1559 	skb->len += length-ETH_HLEN;
1560 	skb->data_len += length-ETH_HLEN;
1561 	skb->truesize += length-ETH_HLEN;
1562 
1563 	rx_ring->rx_packets++;
1564 	rx_ring->rx_bytes += skb->len;
1565 	skb->protocol = eth_type_trans(skb, ndev);
1566 	skb_checksum_none_assert(skb);
1567 
1568 	if ((ndev->features & NETIF_F_RXCSUM) &&
1569 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1570 		/* TCP frame. */
1571 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1572 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1573 				     "TCP checksum done!\n");
1574 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1575 		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1576 				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1577 			/* Unfragmented ipv4 UDP frame. */
1578 			struct iphdr *iph =
1579 				(struct iphdr *) ((u8 *)addr + ETH_HLEN);
1580 			if (!(iph->frag_off &
1581 				htons(IP_MF|IP_OFFSET))) {
1582 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1583 				netif_printk(qdev, rx_status, KERN_DEBUG,
1584 					     qdev->ndev,
1585 					     "UDP checksum done!\n");
1586 			}
1587 		}
1588 	}
1589 
1590 	skb_record_rx_queue(skb, rx_ring->cq_id);
1591 	if (vlan_id != 0xffff)
1592 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1593 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1594 		napi_gro_receive(napi, skb);
1595 	else
1596 		netif_receive_skb(skb);
1597 	return;
1598 err_out:
1599 	dev_kfree_skb_any(skb);
1600 	put_page(lbq_desc->p.pg_chunk.page);
1601 }
1602 
1603 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_skb(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1604 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1605 					struct rx_ring *rx_ring,
1606 					struct ib_mac_iocb_rsp *ib_mac_rsp,
1607 					u32 length,
1608 					u16 vlan_id)
1609 {
1610 	struct net_device *ndev = qdev->ndev;
1611 	struct sk_buff *skb = NULL;
1612 	struct sk_buff *new_skb = NULL;
1613 	struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1614 
1615 	skb = sbq_desc->p.skb;
1616 	/* Allocate new_skb and copy */
1617 	new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1618 	if (new_skb == NULL) {
1619 		rx_ring->rx_dropped++;
1620 		return;
1621 	}
1622 	skb_reserve(new_skb, NET_IP_ALIGN);
1623 	memcpy(skb_put(new_skb, length), skb->data, length);
1624 	skb = new_skb;
1625 
1626 	/* Frame error, so drop the packet. */
1627 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1628 		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1629 		dev_kfree_skb_any(skb);
1630 		return;
1631 	}
1632 
1633 	/* loopback self test for ethtool */
1634 	if (test_bit(QL_SELFTEST, &qdev->flags)) {
1635 		ql_check_lb_frame(qdev, skb);
1636 		dev_kfree_skb_any(skb);
1637 		return;
1638 	}
1639 
1640 	/* The max framesize filter on this chip is set higher than
1641 	 * MTU since FCoE uses 2k frames.
1642 	 */
1643 	if (skb->len > ndev->mtu + ETH_HLEN) {
1644 		dev_kfree_skb_any(skb);
1645 		rx_ring->rx_dropped++;
1646 		return;
1647 	}
1648 
1649 	prefetch(skb->data);
1650 	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1651 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1652 			     "%s Multicast.\n",
1653 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1654 			     IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1655 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1656 			     IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1657 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1658 			     IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1659 	}
1660 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1661 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1662 			     "Promiscuous Packet.\n");
1663 
1664 	rx_ring->rx_packets++;
1665 	rx_ring->rx_bytes += skb->len;
1666 	skb->protocol = eth_type_trans(skb, ndev);
1667 	skb_checksum_none_assert(skb);
1668 
1669 	/* If rx checksum is on, and there are no
1670 	 * csum or frame errors.
1671 	 */
1672 	if ((ndev->features & NETIF_F_RXCSUM) &&
1673 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1674 		/* TCP frame. */
1675 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1676 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1677 				     "TCP checksum done!\n");
1678 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1679 		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1680 				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1681 			/* Unfragmented ipv4 UDP frame. */
1682 			struct iphdr *iph = (struct iphdr *) skb->data;
1683 			if (!(iph->frag_off &
1684 				htons(IP_MF|IP_OFFSET))) {
1685 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1686 				netif_printk(qdev, rx_status, KERN_DEBUG,
1687 					     qdev->ndev,
1688 					     "UDP checksum done!\n");
1689 			}
1690 		}
1691 	}
1692 
1693 	skb_record_rx_queue(skb, rx_ring->cq_id);
1694 	if (vlan_id != 0xffff)
1695 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1696 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1697 		napi_gro_receive(&rx_ring->napi, skb);
1698 	else
1699 		netif_receive_skb(skb);
1700 }
1701 
ql_realign_skb(struct sk_buff * skb,int len)1702 static void ql_realign_skb(struct sk_buff *skb, int len)
1703 {
1704 	void *temp_addr = skb->data;
1705 
1706 	/* Undo the skb_reserve(skb,32) we did before
1707 	 * giving to hardware, and realign data on
1708 	 * a 2-byte boundary.
1709 	 */
1710 	skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1711 	skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1712 	skb_copy_to_linear_data(skb, temp_addr,
1713 		(unsigned int)len);
1714 }
1715 
1716 /*
1717  * This function builds an skb for the given inbound
1718  * completion.  It will be rewritten for readability in the near
1719  * future, but for not it works well.
1720  */
ql_build_rx_skb(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp)1721 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1722 				       struct rx_ring *rx_ring,
1723 				       struct ib_mac_iocb_rsp *ib_mac_rsp)
1724 {
1725 	struct bq_desc *lbq_desc;
1726 	struct bq_desc *sbq_desc;
1727 	struct sk_buff *skb = NULL;
1728 	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1729        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1730 
1731 	/*
1732 	 * Handle the header buffer if present.
1733 	 */
1734 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1735 	    ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1736 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1737 			     "Header of %d bytes in small buffer.\n", hdr_len);
1738 		/*
1739 		 * Headers fit nicely into a small buffer.
1740 		 */
1741 		sbq_desc = ql_get_curr_sbuf(rx_ring);
1742 		pci_unmap_single(qdev->pdev,
1743 				dma_unmap_addr(sbq_desc, mapaddr),
1744 				dma_unmap_len(sbq_desc, maplen),
1745 				PCI_DMA_FROMDEVICE);
1746 		skb = sbq_desc->p.skb;
1747 		ql_realign_skb(skb, hdr_len);
1748 		skb_put(skb, hdr_len);
1749 		sbq_desc->p.skb = NULL;
1750 	}
1751 
1752 	/*
1753 	 * Handle the data buffer(s).
1754 	 */
1755 	if (unlikely(!length)) {	/* Is there data too? */
1756 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1757 			     "No Data buffer in this packet.\n");
1758 		return skb;
1759 	}
1760 
1761 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1762 		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1763 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1764 				     "Headers in small, data of %d bytes in small, combine them.\n",
1765 				     length);
1766 			/*
1767 			 * Data is less than small buffer size so it's
1768 			 * stuffed in a small buffer.
1769 			 * For this case we append the data
1770 			 * from the "data" small buffer to the "header" small
1771 			 * buffer.
1772 			 */
1773 			sbq_desc = ql_get_curr_sbuf(rx_ring);
1774 			pci_dma_sync_single_for_cpu(qdev->pdev,
1775 						    dma_unmap_addr
1776 						    (sbq_desc, mapaddr),
1777 						    dma_unmap_len
1778 						    (sbq_desc, maplen),
1779 						    PCI_DMA_FROMDEVICE);
1780 			memcpy(skb_put(skb, length),
1781 			       sbq_desc->p.skb->data, length);
1782 			pci_dma_sync_single_for_device(qdev->pdev,
1783 						       dma_unmap_addr
1784 						       (sbq_desc,
1785 							mapaddr),
1786 						       dma_unmap_len
1787 						       (sbq_desc,
1788 							maplen),
1789 						       PCI_DMA_FROMDEVICE);
1790 		} else {
1791 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1792 				     "%d bytes in a single small buffer.\n",
1793 				     length);
1794 			sbq_desc = ql_get_curr_sbuf(rx_ring);
1795 			skb = sbq_desc->p.skb;
1796 			ql_realign_skb(skb, length);
1797 			skb_put(skb, length);
1798 			pci_unmap_single(qdev->pdev,
1799 					 dma_unmap_addr(sbq_desc,
1800 							mapaddr),
1801 					 dma_unmap_len(sbq_desc,
1802 						       maplen),
1803 					 PCI_DMA_FROMDEVICE);
1804 			sbq_desc->p.skb = NULL;
1805 		}
1806 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1807 		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1808 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1809 				     "Header in small, %d bytes in large. Chain large to small!\n",
1810 				     length);
1811 			/*
1812 			 * The data is in a single large buffer.  We
1813 			 * chain it to the header buffer's skb and let
1814 			 * it rip.
1815 			 */
1816 			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1817 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1818 				     "Chaining page at offset = %d, for %d bytes  to skb.\n",
1819 				     lbq_desc->p.pg_chunk.offset, length);
1820 			skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1821 						lbq_desc->p.pg_chunk.offset,
1822 						length);
1823 			skb->len += length;
1824 			skb->data_len += length;
1825 			skb->truesize += length;
1826 		} else {
1827 			/*
1828 			 * The headers and data are in a single large buffer. We
1829 			 * copy it to a new skb and let it go. This can happen with
1830 			 * jumbo mtu on a non-TCP/UDP frame.
1831 			 */
1832 			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1833 			skb = netdev_alloc_skb(qdev->ndev, length);
1834 			if (skb == NULL) {
1835 				netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1836 					     "No skb available, drop the packet.\n");
1837 				return NULL;
1838 			}
1839 			pci_unmap_page(qdev->pdev,
1840 				       dma_unmap_addr(lbq_desc,
1841 						      mapaddr),
1842 				       dma_unmap_len(lbq_desc, maplen),
1843 				       PCI_DMA_FROMDEVICE);
1844 			skb_reserve(skb, NET_IP_ALIGN);
1845 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1846 				     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1847 				     length);
1848 			skb_fill_page_desc(skb, 0,
1849 						lbq_desc->p.pg_chunk.page,
1850 						lbq_desc->p.pg_chunk.offset,
1851 						length);
1852 			skb->len += length;
1853 			skb->data_len += length;
1854 			skb->truesize += length;
1855 			length -= length;
1856 			__pskb_pull_tail(skb,
1857 				(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1858 				VLAN_ETH_HLEN : ETH_HLEN);
1859 		}
1860 	} else {
1861 		/*
1862 		 * The data is in a chain of large buffers
1863 		 * pointed to by a small buffer.  We loop
1864 		 * thru and chain them to the our small header
1865 		 * buffer's skb.
1866 		 * frags:  There are 18 max frags and our small
1867 		 *         buffer will hold 32 of them. The thing is,
1868 		 *         we'll use 3 max for our 9000 byte jumbo
1869 		 *         frames.  If the MTU goes up we could
1870 		 *          eventually be in trouble.
1871 		 */
1872 		int size, i = 0;
1873 		sbq_desc = ql_get_curr_sbuf(rx_ring);
1874 		pci_unmap_single(qdev->pdev,
1875 				 dma_unmap_addr(sbq_desc, mapaddr),
1876 				 dma_unmap_len(sbq_desc, maplen),
1877 				 PCI_DMA_FROMDEVICE);
1878 		if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1879 			/*
1880 			 * This is an non TCP/UDP IP frame, so
1881 			 * the headers aren't split into a small
1882 			 * buffer.  We have to use the small buffer
1883 			 * that contains our sg list as our skb to
1884 			 * send upstairs. Copy the sg list here to
1885 			 * a local buffer and use it to find the
1886 			 * pages to chain.
1887 			 */
1888 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1889 				     "%d bytes of headers & data in chain of large.\n",
1890 				     length);
1891 			skb = sbq_desc->p.skb;
1892 			sbq_desc->p.skb = NULL;
1893 			skb_reserve(skb, NET_IP_ALIGN);
1894 		}
1895 		while (length > 0) {
1896 			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1897 			size = (length < rx_ring->lbq_buf_size) ? length :
1898 				rx_ring->lbq_buf_size;
1899 
1900 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1901 				     "Adding page %d to skb for %d bytes.\n",
1902 				     i, size);
1903 			skb_fill_page_desc(skb, i,
1904 						lbq_desc->p.pg_chunk.page,
1905 						lbq_desc->p.pg_chunk.offset,
1906 						size);
1907 			skb->len += size;
1908 			skb->data_len += size;
1909 			skb->truesize += size;
1910 			length -= size;
1911 			i++;
1912 		}
1913 		__pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1914 				VLAN_ETH_HLEN : ETH_HLEN);
1915 	}
1916 	return skb;
1917 }
1918 
1919 /* Process an inbound completion from an rx ring. */
ql_process_mac_split_rx_intr(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u16 vlan_id)1920 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1921 				   struct rx_ring *rx_ring,
1922 				   struct ib_mac_iocb_rsp *ib_mac_rsp,
1923 				   u16 vlan_id)
1924 {
1925 	struct net_device *ndev = qdev->ndev;
1926 	struct sk_buff *skb = NULL;
1927 
1928 	QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1929 
1930 	skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1931 	if (unlikely(!skb)) {
1932 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1933 			     "No skb available, drop packet.\n");
1934 		rx_ring->rx_dropped++;
1935 		return;
1936 	}
1937 
1938 	/* Frame error, so drop the packet. */
1939 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1940 		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1941 		dev_kfree_skb_any(skb);
1942 		return;
1943 	}
1944 
1945 	/* The max framesize filter on this chip is set higher than
1946 	 * MTU since FCoE uses 2k frames.
1947 	 */
1948 	if (skb->len > ndev->mtu + ETH_HLEN) {
1949 		dev_kfree_skb_any(skb);
1950 		rx_ring->rx_dropped++;
1951 		return;
1952 	}
1953 
1954 	/* loopback self test for ethtool */
1955 	if (test_bit(QL_SELFTEST, &qdev->flags)) {
1956 		ql_check_lb_frame(qdev, skb);
1957 		dev_kfree_skb_any(skb);
1958 		return;
1959 	}
1960 
1961 	prefetch(skb->data);
1962 	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1963 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1964 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1965 			     IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1966 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1967 			     IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1968 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1969 			     IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1970 		rx_ring->rx_multicast++;
1971 	}
1972 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1973 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1974 			     "Promiscuous Packet.\n");
1975 	}
1976 
1977 	skb->protocol = eth_type_trans(skb, ndev);
1978 	skb_checksum_none_assert(skb);
1979 
1980 	/* If rx checksum is on, and there are no
1981 	 * csum or frame errors.
1982 	 */
1983 	if ((ndev->features & NETIF_F_RXCSUM) &&
1984 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1985 		/* TCP frame. */
1986 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1987 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1988 				     "TCP checksum done!\n");
1989 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1990 		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1991 				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1992 		/* Unfragmented ipv4 UDP frame. */
1993 			struct iphdr *iph = (struct iphdr *) skb->data;
1994 			if (!(iph->frag_off &
1995 				htons(IP_MF|IP_OFFSET))) {
1996 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1997 				netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1998 					     "TCP checksum done!\n");
1999 			}
2000 		}
2001 	}
2002 
2003 	rx_ring->rx_packets++;
2004 	rx_ring->rx_bytes += skb->len;
2005 	skb_record_rx_queue(skb, rx_ring->cq_id);
2006 	if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2007 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
2008 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2009 		napi_gro_receive(&rx_ring->napi, skb);
2010 	else
2011 		netif_receive_skb(skb);
2012 }
2013 
2014 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_intr(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp)2015 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2016 					struct rx_ring *rx_ring,
2017 					struct ib_mac_iocb_rsp *ib_mac_rsp)
2018 {
2019 	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2020 	u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2021 			((le16_to_cpu(ib_mac_rsp->vlan_id) &
2022 			IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2023 
2024 	QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2025 
2026 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2027 		/* The data and headers are split into
2028 		 * separate buffers.
2029 		 */
2030 		ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2031 						vlan_id);
2032 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2033 		/* The data fit in a single small buffer.
2034 		 * Allocate a new skb, copy the data and
2035 		 * return the buffer to the free pool.
2036 		 */
2037 		ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2038 						length, vlan_id);
2039 	} else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2040 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2041 		(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2042 		/* TCP packet in a page chunk that's been checksummed.
2043 		 * Tack it on to our GRO skb and let it go.
2044 		 */
2045 		ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2046 						length, vlan_id);
2047 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2048 		/* Non-TCP packet in a page chunk. Allocate an
2049 		 * skb, tack it on frags, and send it up.
2050 		 */
2051 		ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2052 						length, vlan_id);
2053 	} else {
2054 		/* Non-TCP/UDP large frames that span multiple buffers
2055 		 * can be processed corrrectly by the split frame logic.
2056 		 */
2057 		ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2058 						vlan_id);
2059 	}
2060 
2061 	return (unsigned long)length;
2062 }
2063 
2064 /* Process an outbound completion from an rx ring. */
ql_process_mac_tx_intr(struct ql_adapter * qdev,struct ob_mac_iocb_rsp * mac_rsp)2065 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2066 				   struct ob_mac_iocb_rsp *mac_rsp)
2067 {
2068 	struct tx_ring *tx_ring;
2069 	struct tx_ring_desc *tx_ring_desc;
2070 
2071 	QL_DUMP_OB_MAC_RSP(mac_rsp);
2072 	tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2073 	tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2074 	ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2075 	tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2076 	tx_ring->tx_packets++;
2077 	dev_kfree_skb(tx_ring_desc->skb);
2078 	tx_ring_desc->skb = NULL;
2079 
2080 	if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2081 					OB_MAC_IOCB_RSP_S |
2082 					OB_MAC_IOCB_RSP_L |
2083 					OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2084 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2085 			netif_warn(qdev, tx_done, qdev->ndev,
2086 				   "Total descriptor length did not match transfer length.\n");
2087 		}
2088 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2089 			netif_warn(qdev, tx_done, qdev->ndev,
2090 				   "Frame too short to be valid, not sent.\n");
2091 		}
2092 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2093 			netif_warn(qdev, tx_done, qdev->ndev,
2094 				   "Frame too long, but sent anyway.\n");
2095 		}
2096 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2097 			netif_warn(qdev, tx_done, qdev->ndev,
2098 				   "PCI backplane error. Frame not sent.\n");
2099 		}
2100 	}
2101 	atomic_inc(&tx_ring->tx_count);
2102 }
2103 
2104 /* Fire up a handler to reset the MPI processor. */
ql_queue_fw_error(struct ql_adapter * qdev)2105 void ql_queue_fw_error(struct ql_adapter *qdev)
2106 {
2107 	ql_link_off(qdev);
2108 	queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2109 }
2110 
ql_queue_asic_error(struct ql_adapter * qdev)2111 void ql_queue_asic_error(struct ql_adapter *qdev)
2112 {
2113 	ql_link_off(qdev);
2114 	ql_disable_interrupts(qdev);
2115 	/* Clear adapter up bit to signal the recovery
2116 	 * process that it shouldn't kill the reset worker
2117 	 * thread
2118 	 */
2119 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
2120 	/* Set asic recovery bit to indicate reset process that we are
2121 	 * in fatal error recovery process rather than normal close
2122 	 */
2123 	set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2124 	queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2125 }
2126 
ql_process_chip_ae_intr(struct ql_adapter * qdev,struct ib_ae_iocb_rsp * ib_ae_rsp)2127 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2128 				    struct ib_ae_iocb_rsp *ib_ae_rsp)
2129 {
2130 	switch (ib_ae_rsp->event) {
2131 	case MGMT_ERR_EVENT:
2132 		netif_err(qdev, rx_err, qdev->ndev,
2133 			  "Management Processor Fatal Error.\n");
2134 		ql_queue_fw_error(qdev);
2135 		return;
2136 
2137 	case CAM_LOOKUP_ERR_EVENT:
2138 		netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2139 		netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2140 		ql_queue_asic_error(qdev);
2141 		return;
2142 
2143 	case SOFT_ECC_ERROR_EVENT:
2144 		netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2145 		ql_queue_asic_error(qdev);
2146 		break;
2147 
2148 	case PCI_ERR_ANON_BUF_RD:
2149 		netdev_err(qdev->ndev, "PCI error occurred when reading "
2150 					"anonymous buffers from rx_ring %d.\n",
2151 					ib_ae_rsp->q_id);
2152 		ql_queue_asic_error(qdev);
2153 		break;
2154 
2155 	default:
2156 		netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2157 			  ib_ae_rsp->event);
2158 		ql_queue_asic_error(qdev);
2159 		break;
2160 	}
2161 }
2162 
ql_clean_outbound_rx_ring(struct rx_ring * rx_ring)2163 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2164 {
2165 	struct ql_adapter *qdev = rx_ring->qdev;
2166 	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2167 	struct ob_mac_iocb_rsp *net_rsp = NULL;
2168 	int count = 0;
2169 
2170 	struct tx_ring *tx_ring;
2171 	/* While there are entries in the completion queue. */
2172 	while (prod != rx_ring->cnsmr_idx) {
2173 
2174 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2175 			     "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2176 			     rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2177 
2178 		net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2179 		rmb();
2180 		switch (net_rsp->opcode) {
2181 
2182 		case OPCODE_OB_MAC_TSO_IOCB:
2183 		case OPCODE_OB_MAC_IOCB:
2184 			ql_process_mac_tx_intr(qdev, net_rsp);
2185 			break;
2186 		default:
2187 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2188 				     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2189 				     net_rsp->opcode);
2190 		}
2191 		count++;
2192 		ql_update_cq(rx_ring);
2193 		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2194 	}
2195 	if (!net_rsp)
2196 		return 0;
2197 	ql_write_cq_idx(rx_ring);
2198 	tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2199 	if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2200 		if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2201 			/*
2202 			 * The queue got stopped because the tx_ring was full.
2203 			 * Wake it up, because it's now at least 25% empty.
2204 			 */
2205 			netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2206 	}
2207 
2208 	return count;
2209 }
2210 
ql_clean_inbound_rx_ring(struct rx_ring * rx_ring,int budget)2211 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2212 {
2213 	struct ql_adapter *qdev = rx_ring->qdev;
2214 	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2215 	struct ql_net_rsp_iocb *net_rsp;
2216 	int count = 0;
2217 
2218 	/* While there are entries in the completion queue. */
2219 	while (prod != rx_ring->cnsmr_idx) {
2220 
2221 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2222 			     "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2223 			     rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2224 
2225 		net_rsp = rx_ring->curr_entry;
2226 		rmb();
2227 		switch (net_rsp->opcode) {
2228 		case OPCODE_IB_MAC_IOCB:
2229 			ql_process_mac_rx_intr(qdev, rx_ring,
2230 					       (struct ib_mac_iocb_rsp *)
2231 					       net_rsp);
2232 			break;
2233 
2234 		case OPCODE_IB_AE_IOCB:
2235 			ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2236 						net_rsp);
2237 			break;
2238 		default:
2239 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2240 				     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2241 				     net_rsp->opcode);
2242 			break;
2243 		}
2244 		count++;
2245 		ql_update_cq(rx_ring);
2246 		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2247 		if (count == budget)
2248 			break;
2249 	}
2250 	ql_update_buffer_queues(qdev, rx_ring);
2251 	ql_write_cq_idx(rx_ring);
2252 	return count;
2253 }
2254 
ql_napi_poll_msix(struct napi_struct * napi,int budget)2255 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2256 {
2257 	struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2258 	struct ql_adapter *qdev = rx_ring->qdev;
2259 	struct rx_ring *trx_ring;
2260 	int i, work_done = 0;
2261 	struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2262 
2263 	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2264 		     "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2265 
2266 	/* Service the TX rings first.  They start
2267 	 * right after the RSS rings. */
2268 	for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2269 		trx_ring = &qdev->rx_ring[i];
2270 		/* If this TX completion ring belongs to this vector and
2271 		 * it's not empty then service it.
2272 		 */
2273 		if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2274 			(ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2275 					trx_ring->cnsmr_idx)) {
2276 			netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2277 				     "%s: Servicing TX completion ring %d.\n",
2278 				     __func__, trx_ring->cq_id);
2279 			ql_clean_outbound_rx_ring(trx_ring);
2280 		}
2281 	}
2282 
2283 	/*
2284 	 * Now service the RSS ring if it's active.
2285 	 */
2286 	if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2287 					rx_ring->cnsmr_idx) {
2288 		netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2289 			     "%s: Servicing RX completion ring %d.\n",
2290 			     __func__, rx_ring->cq_id);
2291 		work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2292 	}
2293 
2294 	if (work_done < budget) {
2295 		napi_complete(napi);
2296 		ql_enable_completion_interrupt(qdev, rx_ring->irq);
2297 	}
2298 	return work_done;
2299 }
2300 
qlge_vlan_mode(struct net_device * ndev,netdev_features_t features)2301 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2302 {
2303 	struct ql_adapter *qdev = netdev_priv(ndev);
2304 
2305 	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2306 		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2307 				 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2308 	} else {
2309 		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2310 	}
2311 }
2312 
qlge_fix_features(struct net_device * ndev,netdev_features_t features)2313 static netdev_features_t qlge_fix_features(struct net_device *ndev,
2314 	netdev_features_t features)
2315 {
2316 	/*
2317 	 * Since there is no support for separate rx/tx vlan accel
2318 	 * enable/disable make sure tx flag is always in same state as rx.
2319 	 */
2320 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2321 		features |= NETIF_F_HW_VLAN_CTAG_TX;
2322 	else
2323 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2324 
2325 	return features;
2326 }
2327 
qlge_set_features(struct net_device * ndev,netdev_features_t features)2328 static int qlge_set_features(struct net_device *ndev,
2329 	netdev_features_t features)
2330 {
2331 	netdev_features_t changed = ndev->features ^ features;
2332 
2333 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2334 		qlge_vlan_mode(ndev, features);
2335 
2336 	return 0;
2337 }
2338 
__qlge_vlan_rx_add_vid(struct ql_adapter * qdev,u16 vid)2339 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2340 {
2341 	u32 enable_bit = MAC_ADDR_E;
2342 	int err;
2343 
2344 	err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2345 				  MAC_ADDR_TYPE_VLAN, vid);
2346 	if (err)
2347 		netif_err(qdev, ifup, qdev->ndev,
2348 			  "Failed to init vlan address.\n");
2349 	return err;
2350 }
2351 
qlge_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)2352 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2353 {
2354 	struct ql_adapter *qdev = netdev_priv(ndev);
2355 	int status;
2356 	int err;
2357 
2358 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2359 	if (status)
2360 		return status;
2361 
2362 	err = __qlge_vlan_rx_add_vid(qdev, vid);
2363 	set_bit(vid, qdev->active_vlans);
2364 
2365 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2366 
2367 	return err;
2368 }
2369 
__qlge_vlan_rx_kill_vid(struct ql_adapter * qdev,u16 vid)2370 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2371 {
2372 	u32 enable_bit = 0;
2373 	int err;
2374 
2375 	err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2376 				  MAC_ADDR_TYPE_VLAN, vid);
2377 	if (err)
2378 		netif_err(qdev, ifup, qdev->ndev,
2379 			  "Failed to clear vlan address.\n");
2380 	return err;
2381 }
2382 
qlge_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)2383 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2384 {
2385 	struct ql_adapter *qdev = netdev_priv(ndev);
2386 	int status;
2387 	int err;
2388 
2389 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2390 	if (status)
2391 		return status;
2392 
2393 	err = __qlge_vlan_rx_kill_vid(qdev, vid);
2394 	clear_bit(vid, qdev->active_vlans);
2395 
2396 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2397 
2398 	return err;
2399 }
2400 
qlge_restore_vlan(struct ql_adapter * qdev)2401 static void qlge_restore_vlan(struct ql_adapter *qdev)
2402 {
2403 	int status;
2404 	u16 vid;
2405 
2406 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2407 	if (status)
2408 		return;
2409 
2410 	for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2411 		__qlge_vlan_rx_add_vid(qdev, vid);
2412 
2413 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2414 }
2415 
2416 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
qlge_msix_rx_isr(int irq,void * dev_id)2417 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2418 {
2419 	struct rx_ring *rx_ring = dev_id;
2420 	napi_schedule(&rx_ring->napi);
2421 	return IRQ_HANDLED;
2422 }
2423 
2424 /* This handles a fatal error, MPI activity, and the default
2425  * rx_ring in an MSI-X multiple vector environment.
2426  * In MSI/Legacy environment it also process the rest of
2427  * the rx_rings.
2428  */
qlge_isr(int irq,void * dev_id)2429 static irqreturn_t qlge_isr(int irq, void *dev_id)
2430 {
2431 	struct rx_ring *rx_ring = dev_id;
2432 	struct ql_adapter *qdev = rx_ring->qdev;
2433 	struct intr_context *intr_context = &qdev->intr_context[0];
2434 	u32 var;
2435 	int work_done = 0;
2436 
2437 	spin_lock(&qdev->hw_lock);
2438 	if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2439 		netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2440 			     "Shared Interrupt, Not ours!\n");
2441 		spin_unlock(&qdev->hw_lock);
2442 		return IRQ_NONE;
2443 	}
2444 	spin_unlock(&qdev->hw_lock);
2445 
2446 	var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2447 
2448 	/*
2449 	 * Check for fatal error.
2450 	 */
2451 	if (var & STS_FE) {
2452 		ql_queue_asic_error(qdev);
2453 		netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2454 		var = ql_read32(qdev, ERR_STS);
2455 		netdev_err(qdev->ndev, "Resetting chip. "
2456 					"Error Status Register = 0x%x\n", var);
2457 		return IRQ_HANDLED;
2458 	}
2459 
2460 	/*
2461 	 * Check MPI processor activity.
2462 	 */
2463 	if ((var & STS_PI) &&
2464 		(ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2465 		/*
2466 		 * We've got an async event or mailbox completion.
2467 		 * Handle it and clear the source of the interrupt.
2468 		 */
2469 		netif_err(qdev, intr, qdev->ndev,
2470 			  "Got MPI processor interrupt.\n");
2471 		ql_disable_completion_interrupt(qdev, intr_context->intr);
2472 		ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2473 		queue_delayed_work_on(smp_processor_id(),
2474 				qdev->workqueue, &qdev->mpi_work, 0);
2475 		work_done++;
2476 	}
2477 
2478 	/*
2479 	 * Get the bit-mask that shows the active queues for this
2480 	 * pass.  Compare it to the queues that this irq services
2481 	 * and call napi if there's a match.
2482 	 */
2483 	var = ql_read32(qdev, ISR1);
2484 	if (var & intr_context->irq_mask) {
2485 		netif_info(qdev, intr, qdev->ndev,
2486 			   "Waking handler for rx_ring[0].\n");
2487 		ql_disable_completion_interrupt(qdev, intr_context->intr);
2488 		napi_schedule(&rx_ring->napi);
2489 		work_done++;
2490 	}
2491 	ql_enable_completion_interrupt(qdev, intr_context->intr);
2492 	return work_done ? IRQ_HANDLED : IRQ_NONE;
2493 }
2494 
ql_tso(struct sk_buff * skb,struct ob_mac_tso_iocb_req * mac_iocb_ptr)2495 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2496 {
2497 
2498 	if (skb_is_gso(skb)) {
2499 		int err;
2500 		if (skb_header_cloned(skb)) {
2501 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2502 			if (err)
2503 				return err;
2504 		}
2505 
2506 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2507 		mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2508 		mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2509 		mac_iocb_ptr->total_hdrs_len =
2510 		    cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2511 		mac_iocb_ptr->net_trans_offset =
2512 		    cpu_to_le16(skb_network_offset(skb) |
2513 				skb_transport_offset(skb)
2514 				<< OB_MAC_TRANSPORT_HDR_SHIFT);
2515 		mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2516 		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2517 		if (likely(skb->protocol == htons(ETH_P_IP))) {
2518 			struct iphdr *iph = ip_hdr(skb);
2519 			iph->check = 0;
2520 			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2521 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2522 								 iph->daddr, 0,
2523 								 IPPROTO_TCP,
2524 								 0);
2525 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
2526 			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2527 			tcp_hdr(skb)->check =
2528 			    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2529 					     &ipv6_hdr(skb)->daddr,
2530 					     0, IPPROTO_TCP, 0);
2531 		}
2532 		return 1;
2533 	}
2534 	return 0;
2535 }
2536 
ql_hw_csum_setup(struct sk_buff * skb,struct ob_mac_tso_iocb_req * mac_iocb_ptr)2537 static void ql_hw_csum_setup(struct sk_buff *skb,
2538 			     struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2539 {
2540 	int len;
2541 	struct iphdr *iph = ip_hdr(skb);
2542 	__sum16 *check;
2543 	mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2544 	mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2545 	mac_iocb_ptr->net_trans_offset =
2546 		cpu_to_le16(skb_network_offset(skb) |
2547 		skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2548 
2549 	mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2550 	len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2551 	if (likely(iph->protocol == IPPROTO_TCP)) {
2552 		check = &(tcp_hdr(skb)->check);
2553 		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2554 		mac_iocb_ptr->total_hdrs_len =
2555 		    cpu_to_le16(skb_transport_offset(skb) +
2556 				(tcp_hdr(skb)->doff << 2));
2557 	} else {
2558 		check = &(udp_hdr(skb)->check);
2559 		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2560 		mac_iocb_ptr->total_hdrs_len =
2561 		    cpu_to_le16(skb_transport_offset(skb) +
2562 				sizeof(struct udphdr));
2563 	}
2564 	*check = ~csum_tcpudp_magic(iph->saddr,
2565 				    iph->daddr, len, iph->protocol, 0);
2566 }
2567 
qlge_send(struct sk_buff * skb,struct net_device * ndev)2568 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2569 {
2570 	struct tx_ring_desc *tx_ring_desc;
2571 	struct ob_mac_iocb_req *mac_iocb_ptr;
2572 	struct ql_adapter *qdev = netdev_priv(ndev);
2573 	int tso;
2574 	struct tx_ring *tx_ring;
2575 	u32 tx_ring_idx = (u32) skb->queue_mapping;
2576 
2577 	tx_ring = &qdev->tx_ring[tx_ring_idx];
2578 
2579 	if (skb_padto(skb, ETH_ZLEN))
2580 		return NETDEV_TX_OK;
2581 
2582 	if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2583 		netif_info(qdev, tx_queued, qdev->ndev,
2584 			   "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2585 			   __func__, tx_ring_idx);
2586 		netif_stop_subqueue(ndev, tx_ring->wq_id);
2587 		tx_ring->tx_errors++;
2588 		return NETDEV_TX_BUSY;
2589 	}
2590 	tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2591 	mac_iocb_ptr = tx_ring_desc->queue_entry;
2592 	memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2593 
2594 	mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2595 	mac_iocb_ptr->tid = tx_ring_desc->index;
2596 	/* We use the upper 32-bits to store the tx queue for this IO.
2597 	 * When we get the completion we can use it to establish the context.
2598 	 */
2599 	mac_iocb_ptr->txq_idx = tx_ring_idx;
2600 	tx_ring_desc->skb = skb;
2601 
2602 	mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2603 
2604 	if (vlan_tx_tag_present(skb)) {
2605 		netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2606 			     "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2607 		mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2608 		mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2609 	}
2610 	tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2611 	if (tso < 0) {
2612 		dev_kfree_skb_any(skb);
2613 		return NETDEV_TX_OK;
2614 	} else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2615 		ql_hw_csum_setup(skb,
2616 				 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2617 	}
2618 	if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2619 			NETDEV_TX_OK) {
2620 		netif_err(qdev, tx_queued, qdev->ndev,
2621 			  "Could not map the segments.\n");
2622 		tx_ring->tx_errors++;
2623 		return NETDEV_TX_BUSY;
2624 	}
2625 	QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2626 	tx_ring->prod_idx++;
2627 	if (tx_ring->prod_idx == tx_ring->wq_len)
2628 		tx_ring->prod_idx = 0;
2629 	wmb();
2630 
2631 	ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2632 	netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2633 		     "tx queued, slot %d, len %d\n",
2634 		     tx_ring->prod_idx, skb->len);
2635 
2636 	atomic_dec(&tx_ring->tx_count);
2637 
2638 	if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2639 		netif_stop_subqueue(ndev, tx_ring->wq_id);
2640 		if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2641 			/*
2642 			 * The queue got stopped because the tx_ring was full.
2643 			 * Wake it up, because it's now at least 25% empty.
2644 			 */
2645 			netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2646 	}
2647 	return NETDEV_TX_OK;
2648 }
2649 
2650 
ql_free_shadow_space(struct ql_adapter * qdev)2651 static void ql_free_shadow_space(struct ql_adapter *qdev)
2652 {
2653 	if (qdev->rx_ring_shadow_reg_area) {
2654 		pci_free_consistent(qdev->pdev,
2655 				    PAGE_SIZE,
2656 				    qdev->rx_ring_shadow_reg_area,
2657 				    qdev->rx_ring_shadow_reg_dma);
2658 		qdev->rx_ring_shadow_reg_area = NULL;
2659 	}
2660 	if (qdev->tx_ring_shadow_reg_area) {
2661 		pci_free_consistent(qdev->pdev,
2662 				    PAGE_SIZE,
2663 				    qdev->tx_ring_shadow_reg_area,
2664 				    qdev->tx_ring_shadow_reg_dma);
2665 		qdev->tx_ring_shadow_reg_area = NULL;
2666 	}
2667 }
2668 
ql_alloc_shadow_space(struct ql_adapter * qdev)2669 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2670 {
2671 	qdev->rx_ring_shadow_reg_area =
2672 	    pci_alloc_consistent(qdev->pdev,
2673 				 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2674 	if (qdev->rx_ring_shadow_reg_area == NULL) {
2675 		netif_err(qdev, ifup, qdev->ndev,
2676 			  "Allocation of RX shadow space failed.\n");
2677 		return -ENOMEM;
2678 	}
2679 	memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2680 	qdev->tx_ring_shadow_reg_area =
2681 	    pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2682 				 &qdev->tx_ring_shadow_reg_dma);
2683 	if (qdev->tx_ring_shadow_reg_area == NULL) {
2684 		netif_err(qdev, ifup, qdev->ndev,
2685 			  "Allocation of TX shadow space failed.\n");
2686 		goto err_wqp_sh_area;
2687 	}
2688 	memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2689 	return 0;
2690 
2691 err_wqp_sh_area:
2692 	pci_free_consistent(qdev->pdev,
2693 			    PAGE_SIZE,
2694 			    qdev->rx_ring_shadow_reg_area,
2695 			    qdev->rx_ring_shadow_reg_dma);
2696 	return -ENOMEM;
2697 }
2698 
ql_init_tx_ring(struct ql_adapter * qdev,struct tx_ring * tx_ring)2699 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2700 {
2701 	struct tx_ring_desc *tx_ring_desc;
2702 	int i;
2703 	struct ob_mac_iocb_req *mac_iocb_ptr;
2704 
2705 	mac_iocb_ptr = tx_ring->wq_base;
2706 	tx_ring_desc = tx_ring->q;
2707 	for (i = 0; i < tx_ring->wq_len; i++) {
2708 		tx_ring_desc->index = i;
2709 		tx_ring_desc->skb = NULL;
2710 		tx_ring_desc->queue_entry = mac_iocb_ptr;
2711 		mac_iocb_ptr++;
2712 		tx_ring_desc++;
2713 	}
2714 	atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2715 }
2716 
ql_free_tx_resources(struct ql_adapter * qdev,struct tx_ring * tx_ring)2717 static void ql_free_tx_resources(struct ql_adapter *qdev,
2718 				 struct tx_ring *tx_ring)
2719 {
2720 	if (tx_ring->wq_base) {
2721 		pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2722 				    tx_ring->wq_base, tx_ring->wq_base_dma);
2723 		tx_ring->wq_base = NULL;
2724 	}
2725 	kfree(tx_ring->q);
2726 	tx_ring->q = NULL;
2727 }
2728 
ql_alloc_tx_resources(struct ql_adapter * qdev,struct tx_ring * tx_ring)2729 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2730 				 struct tx_ring *tx_ring)
2731 {
2732 	tx_ring->wq_base =
2733 	    pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2734 				 &tx_ring->wq_base_dma);
2735 
2736 	if ((tx_ring->wq_base == NULL) ||
2737 	    tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2738 		goto pci_alloc_err;
2739 
2740 	tx_ring->q =
2741 	    kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2742 	if (tx_ring->q == NULL)
2743 		goto err;
2744 
2745 	return 0;
2746 err:
2747 	pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2748 			    tx_ring->wq_base, tx_ring->wq_base_dma);
2749 	tx_ring->wq_base = NULL;
2750 pci_alloc_err:
2751 	netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2752 	return -ENOMEM;
2753 }
2754 
ql_free_lbq_buffers(struct ql_adapter * qdev,struct rx_ring * rx_ring)2755 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2756 {
2757 	struct bq_desc *lbq_desc;
2758 
2759 	uint32_t  curr_idx, clean_idx;
2760 
2761 	curr_idx = rx_ring->lbq_curr_idx;
2762 	clean_idx = rx_ring->lbq_clean_idx;
2763 	while (curr_idx != clean_idx) {
2764 		lbq_desc = &rx_ring->lbq[curr_idx];
2765 
2766 		if (lbq_desc->p.pg_chunk.last_flag) {
2767 			pci_unmap_page(qdev->pdev,
2768 				lbq_desc->p.pg_chunk.map,
2769 				ql_lbq_block_size(qdev),
2770 				       PCI_DMA_FROMDEVICE);
2771 			lbq_desc->p.pg_chunk.last_flag = 0;
2772 		}
2773 
2774 		put_page(lbq_desc->p.pg_chunk.page);
2775 		lbq_desc->p.pg_chunk.page = NULL;
2776 
2777 		if (++curr_idx == rx_ring->lbq_len)
2778 			curr_idx = 0;
2779 
2780 	}
2781 	if (rx_ring->pg_chunk.page) {
2782 		pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2783 			ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2784 		put_page(rx_ring->pg_chunk.page);
2785 		rx_ring->pg_chunk.page = NULL;
2786 	}
2787 }
2788 
ql_free_sbq_buffers(struct ql_adapter * qdev,struct rx_ring * rx_ring)2789 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2790 {
2791 	int i;
2792 	struct bq_desc *sbq_desc;
2793 
2794 	for (i = 0; i < rx_ring->sbq_len; i++) {
2795 		sbq_desc = &rx_ring->sbq[i];
2796 		if (sbq_desc == NULL) {
2797 			netif_err(qdev, ifup, qdev->ndev,
2798 				  "sbq_desc %d is NULL.\n", i);
2799 			return;
2800 		}
2801 		if (sbq_desc->p.skb) {
2802 			pci_unmap_single(qdev->pdev,
2803 					 dma_unmap_addr(sbq_desc, mapaddr),
2804 					 dma_unmap_len(sbq_desc, maplen),
2805 					 PCI_DMA_FROMDEVICE);
2806 			dev_kfree_skb(sbq_desc->p.skb);
2807 			sbq_desc->p.skb = NULL;
2808 		}
2809 	}
2810 }
2811 
2812 /* Free all large and small rx buffers associated
2813  * with the completion queues for this device.
2814  */
ql_free_rx_buffers(struct ql_adapter * qdev)2815 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2816 {
2817 	int i;
2818 	struct rx_ring *rx_ring;
2819 
2820 	for (i = 0; i < qdev->rx_ring_count; i++) {
2821 		rx_ring = &qdev->rx_ring[i];
2822 		if (rx_ring->lbq)
2823 			ql_free_lbq_buffers(qdev, rx_ring);
2824 		if (rx_ring->sbq)
2825 			ql_free_sbq_buffers(qdev, rx_ring);
2826 	}
2827 }
2828 
ql_alloc_rx_buffers(struct ql_adapter * qdev)2829 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2830 {
2831 	struct rx_ring *rx_ring;
2832 	int i;
2833 
2834 	for (i = 0; i < qdev->rx_ring_count; i++) {
2835 		rx_ring = &qdev->rx_ring[i];
2836 		if (rx_ring->type != TX_Q)
2837 			ql_update_buffer_queues(qdev, rx_ring);
2838 	}
2839 }
2840 
ql_init_lbq_ring(struct ql_adapter * qdev,struct rx_ring * rx_ring)2841 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2842 				struct rx_ring *rx_ring)
2843 {
2844 	int i;
2845 	struct bq_desc *lbq_desc;
2846 	__le64 *bq = rx_ring->lbq_base;
2847 
2848 	memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2849 	for (i = 0; i < rx_ring->lbq_len; i++) {
2850 		lbq_desc = &rx_ring->lbq[i];
2851 		memset(lbq_desc, 0, sizeof(*lbq_desc));
2852 		lbq_desc->index = i;
2853 		lbq_desc->addr = bq;
2854 		bq++;
2855 	}
2856 }
2857 
ql_init_sbq_ring(struct ql_adapter * qdev,struct rx_ring * rx_ring)2858 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2859 				struct rx_ring *rx_ring)
2860 {
2861 	int i;
2862 	struct bq_desc *sbq_desc;
2863 	__le64 *bq = rx_ring->sbq_base;
2864 
2865 	memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2866 	for (i = 0; i < rx_ring->sbq_len; i++) {
2867 		sbq_desc = &rx_ring->sbq[i];
2868 		memset(sbq_desc, 0, sizeof(*sbq_desc));
2869 		sbq_desc->index = i;
2870 		sbq_desc->addr = bq;
2871 		bq++;
2872 	}
2873 }
2874 
ql_free_rx_resources(struct ql_adapter * qdev,struct rx_ring * rx_ring)2875 static void ql_free_rx_resources(struct ql_adapter *qdev,
2876 				 struct rx_ring *rx_ring)
2877 {
2878 	/* Free the small buffer queue. */
2879 	if (rx_ring->sbq_base) {
2880 		pci_free_consistent(qdev->pdev,
2881 				    rx_ring->sbq_size,
2882 				    rx_ring->sbq_base, rx_ring->sbq_base_dma);
2883 		rx_ring->sbq_base = NULL;
2884 	}
2885 
2886 	/* Free the small buffer queue control blocks. */
2887 	kfree(rx_ring->sbq);
2888 	rx_ring->sbq = NULL;
2889 
2890 	/* Free the large buffer queue. */
2891 	if (rx_ring->lbq_base) {
2892 		pci_free_consistent(qdev->pdev,
2893 				    rx_ring->lbq_size,
2894 				    rx_ring->lbq_base, rx_ring->lbq_base_dma);
2895 		rx_ring->lbq_base = NULL;
2896 	}
2897 
2898 	/* Free the large buffer queue control blocks. */
2899 	kfree(rx_ring->lbq);
2900 	rx_ring->lbq = NULL;
2901 
2902 	/* Free the rx queue. */
2903 	if (rx_ring->cq_base) {
2904 		pci_free_consistent(qdev->pdev,
2905 				    rx_ring->cq_size,
2906 				    rx_ring->cq_base, rx_ring->cq_base_dma);
2907 		rx_ring->cq_base = NULL;
2908 	}
2909 }
2910 
2911 /* Allocate queues and buffers for this completions queue based
2912  * on the values in the parameter structure. */
ql_alloc_rx_resources(struct ql_adapter * qdev,struct rx_ring * rx_ring)2913 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2914 				 struct rx_ring *rx_ring)
2915 {
2916 
2917 	/*
2918 	 * Allocate the completion queue for this rx_ring.
2919 	 */
2920 	rx_ring->cq_base =
2921 	    pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2922 				 &rx_ring->cq_base_dma);
2923 
2924 	if (rx_ring->cq_base == NULL) {
2925 		netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2926 		return -ENOMEM;
2927 	}
2928 
2929 	if (rx_ring->sbq_len) {
2930 		/*
2931 		 * Allocate small buffer queue.
2932 		 */
2933 		rx_ring->sbq_base =
2934 		    pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2935 					 &rx_ring->sbq_base_dma);
2936 
2937 		if (rx_ring->sbq_base == NULL) {
2938 			netif_err(qdev, ifup, qdev->ndev,
2939 				  "Small buffer queue allocation failed.\n");
2940 			goto err_mem;
2941 		}
2942 
2943 		/*
2944 		 * Allocate small buffer queue control blocks.
2945 		 */
2946 		rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
2947 					     sizeof(struct bq_desc),
2948 					     GFP_KERNEL);
2949 		if (rx_ring->sbq == NULL)
2950 			goto err_mem;
2951 
2952 		ql_init_sbq_ring(qdev, rx_ring);
2953 	}
2954 
2955 	if (rx_ring->lbq_len) {
2956 		/*
2957 		 * Allocate large buffer queue.
2958 		 */
2959 		rx_ring->lbq_base =
2960 		    pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2961 					 &rx_ring->lbq_base_dma);
2962 
2963 		if (rx_ring->lbq_base == NULL) {
2964 			netif_err(qdev, ifup, qdev->ndev,
2965 				  "Large buffer queue allocation failed.\n");
2966 			goto err_mem;
2967 		}
2968 		/*
2969 		 * Allocate large buffer queue control blocks.
2970 		 */
2971 		rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
2972 					     sizeof(struct bq_desc),
2973 					     GFP_KERNEL);
2974 		if (rx_ring->lbq == NULL)
2975 			goto err_mem;
2976 
2977 		ql_init_lbq_ring(qdev, rx_ring);
2978 	}
2979 
2980 	return 0;
2981 
2982 err_mem:
2983 	ql_free_rx_resources(qdev, rx_ring);
2984 	return -ENOMEM;
2985 }
2986 
ql_tx_ring_clean(struct ql_adapter * qdev)2987 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2988 {
2989 	struct tx_ring *tx_ring;
2990 	struct tx_ring_desc *tx_ring_desc;
2991 	int i, j;
2992 
2993 	/*
2994 	 * Loop through all queues and free
2995 	 * any resources.
2996 	 */
2997 	for (j = 0; j < qdev->tx_ring_count; j++) {
2998 		tx_ring = &qdev->tx_ring[j];
2999 		for (i = 0; i < tx_ring->wq_len; i++) {
3000 			tx_ring_desc = &tx_ring->q[i];
3001 			if (tx_ring_desc && tx_ring_desc->skb) {
3002 				netif_err(qdev, ifdown, qdev->ndev,
3003 					  "Freeing lost SKB %p, from queue %d, index %d.\n",
3004 					  tx_ring_desc->skb, j,
3005 					  tx_ring_desc->index);
3006 				ql_unmap_send(qdev, tx_ring_desc,
3007 					      tx_ring_desc->map_cnt);
3008 				dev_kfree_skb(tx_ring_desc->skb);
3009 				tx_ring_desc->skb = NULL;
3010 			}
3011 		}
3012 	}
3013 }
3014 
ql_free_mem_resources(struct ql_adapter * qdev)3015 static void ql_free_mem_resources(struct ql_adapter *qdev)
3016 {
3017 	int i;
3018 
3019 	for (i = 0; i < qdev->tx_ring_count; i++)
3020 		ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3021 	for (i = 0; i < qdev->rx_ring_count; i++)
3022 		ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3023 	ql_free_shadow_space(qdev);
3024 }
3025 
ql_alloc_mem_resources(struct ql_adapter * qdev)3026 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3027 {
3028 	int i;
3029 
3030 	/* Allocate space for our shadow registers and such. */
3031 	if (ql_alloc_shadow_space(qdev))
3032 		return -ENOMEM;
3033 
3034 	for (i = 0; i < qdev->rx_ring_count; i++) {
3035 		if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3036 			netif_err(qdev, ifup, qdev->ndev,
3037 				  "RX resource allocation failed.\n");
3038 			goto err_mem;
3039 		}
3040 	}
3041 	/* Allocate tx queue resources */
3042 	for (i = 0; i < qdev->tx_ring_count; i++) {
3043 		if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3044 			netif_err(qdev, ifup, qdev->ndev,
3045 				  "TX resource allocation failed.\n");
3046 			goto err_mem;
3047 		}
3048 	}
3049 	return 0;
3050 
3051 err_mem:
3052 	ql_free_mem_resources(qdev);
3053 	return -ENOMEM;
3054 }
3055 
3056 /* Set up the rx ring control block and pass it to the chip.
3057  * The control block is defined as
3058  * "Completion Queue Initialization Control Block", or cqicb.
3059  */
ql_start_rx_ring(struct ql_adapter * qdev,struct rx_ring * rx_ring)3060 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3061 {
3062 	struct cqicb *cqicb = &rx_ring->cqicb;
3063 	void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3064 		(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3065 	u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3066 		(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3067 	void __iomem *doorbell_area =
3068 	    qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3069 	int err = 0;
3070 	u16 bq_len;
3071 	u64 tmp;
3072 	__le64 *base_indirect_ptr;
3073 	int page_entries;
3074 
3075 	/* Set up the shadow registers for this ring. */
3076 	rx_ring->prod_idx_sh_reg = shadow_reg;
3077 	rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3078 	*rx_ring->prod_idx_sh_reg = 0;
3079 	shadow_reg += sizeof(u64);
3080 	shadow_reg_dma += sizeof(u64);
3081 	rx_ring->lbq_base_indirect = shadow_reg;
3082 	rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3083 	shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3084 	shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3085 	rx_ring->sbq_base_indirect = shadow_reg;
3086 	rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3087 
3088 	/* PCI doorbell mem area + 0x00 for consumer index register */
3089 	rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3090 	rx_ring->cnsmr_idx = 0;
3091 	rx_ring->curr_entry = rx_ring->cq_base;
3092 
3093 	/* PCI doorbell mem area + 0x04 for valid register */
3094 	rx_ring->valid_db_reg = doorbell_area + 0x04;
3095 
3096 	/* PCI doorbell mem area + 0x18 for large buffer consumer */
3097 	rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3098 
3099 	/* PCI doorbell mem area + 0x1c */
3100 	rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3101 
3102 	memset((void *)cqicb, 0, sizeof(struct cqicb));
3103 	cqicb->msix_vect = rx_ring->irq;
3104 
3105 	bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3106 	cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3107 
3108 	cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3109 
3110 	cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3111 
3112 	/*
3113 	 * Set up the control block load flags.
3114 	 */
3115 	cqicb->flags = FLAGS_LC |	/* Load queue base address */
3116 	    FLAGS_LV |		/* Load MSI-X vector */
3117 	    FLAGS_LI;		/* Load irq delay values */
3118 	if (rx_ring->lbq_len) {
3119 		cqicb->flags |= FLAGS_LL;	/* Load lbq values */
3120 		tmp = (u64)rx_ring->lbq_base_dma;
3121 		base_indirect_ptr = rx_ring->lbq_base_indirect;
3122 		page_entries = 0;
3123 		do {
3124 			*base_indirect_ptr = cpu_to_le64(tmp);
3125 			tmp += DB_PAGE_SIZE;
3126 			base_indirect_ptr++;
3127 			page_entries++;
3128 		} while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3129 		cqicb->lbq_addr =
3130 		    cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3131 		bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3132 			(u16) rx_ring->lbq_buf_size;
3133 		cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3134 		bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3135 			(u16) rx_ring->lbq_len;
3136 		cqicb->lbq_len = cpu_to_le16(bq_len);
3137 		rx_ring->lbq_prod_idx = 0;
3138 		rx_ring->lbq_curr_idx = 0;
3139 		rx_ring->lbq_clean_idx = 0;
3140 		rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3141 	}
3142 	if (rx_ring->sbq_len) {
3143 		cqicb->flags |= FLAGS_LS;	/* Load sbq values */
3144 		tmp = (u64)rx_ring->sbq_base_dma;
3145 		base_indirect_ptr = rx_ring->sbq_base_indirect;
3146 		page_entries = 0;
3147 		do {
3148 			*base_indirect_ptr = cpu_to_le64(tmp);
3149 			tmp += DB_PAGE_SIZE;
3150 			base_indirect_ptr++;
3151 			page_entries++;
3152 		} while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3153 		cqicb->sbq_addr =
3154 		    cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3155 		cqicb->sbq_buf_size =
3156 		    cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3157 		bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3158 			(u16) rx_ring->sbq_len;
3159 		cqicb->sbq_len = cpu_to_le16(bq_len);
3160 		rx_ring->sbq_prod_idx = 0;
3161 		rx_ring->sbq_curr_idx = 0;
3162 		rx_ring->sbq_clean_idx = 0;
3163 		rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3164 	}
3165 	switch (rx_ring->type) {
3166 	case TX_Q:
3167 		cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3168 		cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3169 		break;
3170 	case RX_Q:
3171 		/* Inbound completion handling rx_rings run in
3172 		 * separate NAPI contexts.
3173 		 */
3174 		netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3175 			       64);
3176 		cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3177 		cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3178 		break;
3179 	default:
3180 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3181 			     "Invalid rx_ring->type = %d.\n", rx_ring->type);
3182 	}
3183 	err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3184 			   CFG_LCQ, rx_ring->cq_id);
3185 	if (err) {
3186 		netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3187 		return err;
3188 	}
3189 	return err;
3190 }
3191 
ql_start_tx_ring(struct ql_adapter * qdev,struct tx_ring * tx_ring)3192 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3193 {
3194 	struct wqicb *wqicb = (struct wqicb *)tx_ring;
3195 	void __iomem *doorbell_area =
3196 	    qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3197 	void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3198 	    (tx_ring->wq_id * sizeof(u64));
3199 	u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3200 	    (tx_ring->wq_id * sizeof(u64));
3201 	int err = 0;
3202 
3203 	/*
3204 	 * Assign doorbell registers for this tx_ring.
3205 	 */
3206 	/* TX PCI doorbell mem area for tx producer index */
3207 	tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3208 	tx_ring->prod_idx = 0;
3209 	/* TX PCI doorbell mem area + 0x04 */
3210 	tx_ring->valid_db_reg = doorbell_area + 0x04;
3211 
3212 	/*
3213 	 * Assign shadow registers for this tx_ring.
3214 	 */
3215 	tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3216 	tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3217 
3218 	wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3219 	wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3220 				   Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3221 	wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3222 	wqicb->rid = 0;
3223 	wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3224 
3225 	wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3226 
3227 	ql_init_tx_ring(qdev, tx_ring);
3228 
3229 	err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3230 			   (u16) tx_ring->wq_id);
3231 	if (err) {
3232 		netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3233 		return err;
3234 	}
3235 	return err;
3236 }
3237 
ql_disable_msix(struct ql_adapter * qdev)3238 static void ql_disable_msix(struct ql_adapter *qdev)
3239 {
3240 	if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3241 		pci_disable_msix(qdev->pdev);
3242 		clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3243 		kfree(qdev->msi_x_entry);
3244 		qdev->msi_x_entry = NULL;
3245 	} else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3246 		pci_disable_msi(qdev->pdev);
3247 		clear_bit(QL_MSI_ENABLED, &qdev->flags);
3248 	}
3249 }
3250 
3251 /* We start by trying to get the number of vectors
3252  * stored in qdev->intr_count. If we don't get that
3253  * many then we reduce the count and try again.
3254  */
ql_enable_msix(struct ql_adapter * qdev)3255 static void ql_enable_msix(struct ql_adapter *qdev)
3256 {
3257 	int i, err;
3258 
3259 	/* Get the MSIX vectors. */
3260 	if (qlge_irq_type == MSIX_IRQ) {
3261 		/* Try to alloc space for the msix struct,
3262 		 * if it fails then go to MSI/legacy.
3263 		 */
3264 		qdev->msi_x_entry = kcalloc(qdev->intr_count,
3265 					    sizeof(struct msix_entry),
3266 					    GFP_KERNEL);
3267 		if (!qdev->msi_x_entry) {
3268 			qlge_irq_type = MSI_IRQ;
3269 			goto msi;
3270 		}
3271 
3272 		for (i = 0; i < qdev->intr_count; i++)
3273 			qdev->msi_x_entry[i].entry = i;
3274 
3275 		/* Loop to get our vectors.  We start with
3276 		 * what we want and settle for what we get.
3277 		 */
3278 		do {
3279 			err = pci_enable_msix(qdev->pdev,
3280 				qdev->msi_x_entry, qdev->intr_count);
3281 			if (err > 0)
3282 				qdev->intr_count = err;
3283 		} while (err > 0);
3284 
3285 		if (err < 0) {
3286 			kfree(qdev->msi_x_entry);
3287 			qdev->msi_x_entry = NULL;
3288 			netif_warn(qdev, ifup, qdev->ndev,
3289 				   "MSI-X Enable failed, trying MSI.\n");
3290 			qdev->intr_count = 1;
3291 			qlge_irq_type = MSI_IRQ;
3292 		} else if (err == 0) {
3293 			set_bit(QL_MSIX_ENABLED, &qdev->flags);
3294 			netif_info(qdev, ifup, qdev->ndev,
3295 				   "MSI-X Enabled, got %d vectors.\n",
3296 				   qdev->intr_count);
3297 			return;
3298 		}
3299 	}
3300 msi:
3301 	qdev->intr_count = 1;
3302 	if (qlge_irq_type == MSI_IRQ) {
3303 		if (!pci_enable_msi(qdev->pdev)) {
3304 			set_bit(QL_MSI_ENABLED, &qdev->flags);
3305 			netif_info(qdev, ifup, qdev->ndev,
3306 				   "Running with MSI interrupts.\n");
3307 			return;
3308 		}
3309 	}
3310 	qlge_irq_type = LEG_IRQ;
3311 	netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3312 		     "Running with legacy interrupts.\n");
3313 }
3314 
3315 /* Each vector services 1 RSS ring and and 1 or more
3316  * TX completion rings.  This function loops through
3317  * the TX completion rings and assigns the vector that
3318  * will service it.  An example would be if there are
3319  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3320  * This would mean that vector 0 would service RSS ring 0
3321  * and TX completion rings 0,1,2 and 3.  Vector 1 would
3322  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3323  */
ql_set_tx_vect(struct ql_adapter * qdev)3324 static void ql_set_tx_vect(struct ql_adapter *qdev)
3325 {
3326 	int i, j, vect;
3327 	u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3328 
3329 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3330 		/* Assign irq vectors to TX rx_rings.*/
3331 		for (vect = 0, j = 0, i = qdev->rss_ring_count;
3332 					 i < qdev->rx_ring_count; i++) {
3333 			if (j == tx_rings_per_vector) {
3334 				vect++;
3335 				j = 0;
3336 			}
3337 			qdev->rx_ring[i].irq = vect;
3338 			j++;
3339 		}
3340 	} else {
3341 		/* For single vector all rings have an irq
3342 		 * of zero.
3343 		 */
3344 		for (i = 0; i < qdev->rx_ring_count; i++)
3345 			qdev->rx_ring[i].irq = 0;
3346 	}
3347 }
3348 
3349 /* Set the interrupt mask for this vector.  Each vector
3350  * will service 1 RSS ring and 1 or more TX completion
3351  * rings.  This function sets up a bit mask per vector
3352  * that indicates which rings it services.
3353  */
ql_set_irq_mask(struct ql_adapter * qdev,struct intr_context * ctx)3354 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3355 {
3356 	int j, vect = ctx->intr;
3357 	u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3358 
3359 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3360 		/* Add the RSS ring serviced by this vector
3361 		 * to the mask.
3362 		 */
3363 		ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3364 		/* Add the TX ring(s) serviced by this vector
3365 		 * to the mask. */
3366 		for (j = 0; j < tx_rings_per_vector; j++) {
3367 			ctx->irq_mask |=
3368 			(1 << qdev->rx_ring[qdev->rss_ring_count +
3369 			(vect * tx_rings_per_vector) + j].cq_id);
3370 		}
3371 	} else {
3372 		/* For single vector we just shift each queue's
3373 		 * ID into the mask.
3374 		 */
3375 		for (j = 0; j < qdev->rx_ring_count; j++)
3376 			ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3377 	}
3378 }
3379 
3380 /*
3381  * Here we build the intr_context structures based on
3382  * our rx_ring count and intr vector count.
3383  * The intr_context structure is used to hook each vector
3384  * to possibly different handlers.
3385  */
ql_resolve_queues_to_irqs(struct ql_adapter * qdev)3386 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3387 {
3388 	int i = 0;
3389 	struct intr_context *intr_context = &qdev->intr_context[0];
3390 
3391 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3392 		/* Each rx_ring has it's
3393 		 * own intr_context since we have separate
3394 		 * vectors for each queue.
3395 		 */
3396 		for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3397 			qdev->rx_ring[i].irq = i;
3398 			intr_context->intr = i;
3399 			intr_context->qdev = qdev;
3400 			/* Set up this vector's bit-mask that indicates
3401 			 * which queues it services.
3402 			 */
3403 			ql_set_irq_mask(qdev, intr_context);
3404 			/*
3405 			 * We set up each vectors enable/disable/read bits so
3406 			 * there's no bit/mask calculations in the critical path.
3407 			 */
3408 			intr_context->intr_en_mask =
3409 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3410 			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3411 			    | i;
3412 			intr_context->intr_dis_mask =
3413 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3414 			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3415 			    INTR_EN_IHD | i;
3416 			intr_context->intr_read_mask =
3417 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3418 			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3419 			    i;
3420 			if (i == 0) {
3421 				/* The first vector/queue handles
3422 				 * broadcast/multicast, fatal errors,
3423 				 * and firmware events.  This in addition
3424 				 * to normal inbound NAPI processing.
3425 				 */
3426 				intr_context->handler = qlge_isr;
3427 				sprintf(intr_context->name, "%s-rx-%d",
3428 					qdev->ndev->name, i);
3429 			} else {
3430 				/*
3431 				 * Inbound queues handle unicast frames only.
3432 				 */
3433 				intr_context->handler = qlge_msix_rx_isr;
3434 				sprintf(intr_context->name, "%s-rx-%d",
3435 					qdev->ndev->name, i);
3436 			}
3437 		}
3438 	} else {
3439 		/*
3440 		 * All rx_rings use the same intr_context since
3441 		 * there is only one vector.
3442 		 */
3443 		intr_context->intr = 0;
3444 		intr_context->qdev = qdev;
3445 		/*
3446 		 * We set up each vectors enable/disable/read bits so
3447 		 * there's no bit/mask calculations in the critical path.
3448 		 */
3449 		intr_context->intr_en_mask =
3450 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3451 		intr_context->intr_dis_mask =
3452 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3453 		    INTR_EN_TYPE_DISABLE;
3454 		intr_context->intr_read_mask =
3455 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3456 		/*
3457 		 * Single interrupt means one handler for all rings.
3458 		 */
3459 		intr_context->handler = qlge_isr;
3460 		sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3461 		/* Set up this vector's bit-mask that indicates
3462 		 * which queues it services. In this case there is
3463 		 * a single vector so it will service all RSS and
3464 		 * TX completion rings.
3465 		 */
3466 		ql_set_irq_mask(qdev, intr_context);
3467 	}
3468 	/* Tell the TX completion rings which MSIx vector
3469 	 * they will be using.
3470 	 */
3471 	ql_set_tx_vect(qdev);
3472 }
3473 
ql_free_irq(struct ql_adapter * qdev)3474 static void ql_free_irq(struct ql_adapter *qdev)
3475 {
3476 	int i;
3477 	struct intr_context *intr_context = &qdev->intr_context[0];
3478 
3479 	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3480 		if (intr_context->hooked) {
3481 			if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3482 				free_irq(qdev->msi_x_entry[i].vector,
3483 					 &qdev->rx_ring[i]);
3484 			} else {
3485 				free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3486 			}
3487 		}
3488 	}
3489 	ql_disable_msix(qdev);
3490 }
3491 
ql_request_irq(struct ql_adapter * qdev)3492 static int ql_request_irq(struct ql_adapter *qdev)
3493 {
3494 	int i;
3495 	int status = 0;
3496 	struct pci_dev *pdev = qdev->pdev;
3497 	struct intr_context *intr_context = &qdev->intr_context[0];
3498 
3499 	ql_resolve_queues_to_irqs(qdev);
3500 
3501 	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3502 		atomic_set(&intr_context->irq_cnt, 0);
3503 		if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3504 			status = request_irq(qdev->msi_x_entry[i].vector,
3505 					     intr_context->handler,
3506 					     0,
3507 					     intr_context->name,
3508 					     &qdev->rx_ring[i]);
3509 			if (status) {
3510 				netif_err(qdev, ifup, qdev->ndev,
3511 					  "Failed request for MSIX interrupt %d.\n",
3512 					  i);
3513 				goto err_irq;
3514 			}
3515 		} else {
3516 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3517 				     "trying msi or legacy interrupts.\n");
3518 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3519 				     "%s: irq = %d.\n", __func__, pdev->irq);
3520 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3521 				     "%s: context->name = %s.\n", __func__,
3522 				     intr_context->name);
3523 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3524 				     "%s: dev_id = 0x%p.\n", __func__,
3525 				     &qdev->rx_ring[0]);
3526 			status =
3527 			    request_irq(pdev->irq, qlge_isr,
3528 					test_bit(QL_MSI_ENABLED,
3529 						 &qdev->
3530 						 flags) ? 0 : IRQF_SHARED,
3531 					intr_context->name, &qdev->rx_ring[0]);
3532 			if (status)
3533 				goto err_irq;
3534 
3535 			netif_err(qdev, ifup, qdev->ndev,
3536 				  "Hooked intr %d, queue type %s, with name %s.\n",
3537 				  i,
3538 				  qdev->rx_ring[0].type == DEFAULT_Q ?
3539 				  "DEFAULT_Q" :
3540 				  qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3541 				  qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3542 				  intr_context->name);
3543 		}
3544 		intr_context->hooked = 1;
3545 	}
3546 	return status;
3547 err_irq:
3548 	netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3549 	ql_free_irq(qdev);
3550 	return status;
3551 }
3552 
ql_start_rss(struct ql_adapter * qdev)3553 static int ql_start_rss(struct ql_adapter *qdev)
3554 {
3555 	static const u8 init_hash_seed[] = {
3556 		0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3557 		0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3558 		0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3559 		0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3560 		0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3561 	};
3562 	struct ricb *ricb = &qdev->ricb;
3563 	int status = 0;
3564 	int i;
3565 	u8 *hash_id = (u8 *) ricb->hash_cq_id;
3566 
3567 	memset((void *)ricb, 0, sizeof(*ricb));
3568 
3569 	ricb->base_cq = RSS_L4K;
3570 	ricb->flags =
3571 		(RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3572 	ricb->mask = cpu_to_le16((u16)(0x3ff));
3573 
3574 	/*
3575 	 * Fill out the Indirection Table.
3576 	 */
3577 	for (i = 0; i < 1024; i++)
3578 		hash_id[i] = (i & (qdev->rss_ring_count - 1));
3579 
3580 	memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3581 	memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3582 
3583 	status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3584 	if (status) {
3585 		netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3586 		return status;
3587 	}
3588 	return status;
3589 }
3590 
ql_clear_routing_entries(struct ql_adapter * qdev)3591 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3592 {
3593 	int i, status = 0;
3594 
3595 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3596 	if (status)
3597 		return status;
3598 	/* Clear all the entries in the routing table. */
3599 	for (i = 0; i < 16; i++) {
3600 		status = ql_set_routing_reg(qdev, i, 0, 0);
3601 		if (status) {
3602 			netif_err(qdev, ifup, qdev->ndev,
3603 				  "Failed to init routing register for CAM packets.\n");
3604 			break;
3605 		}
3606 	}
3607 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3608 	return status;
3609 }
3610 
3611 /* Initialize the frame-to-queue routing. */
ql_route_initialize(struct ql_adapter * qdev)3612 static int ql_route_initialize(struct ql_adapter *qdev)
3613 {
3614 	int status = 0;
3615 
3616 	/* Clear all the entries in the routing table. */
3617 	status = ql_clear_routing_entries(qdev);
3618 	if (status)
3619 		return status;
3620 
3621 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3622 	if (status)
3623 		return status;
3624 
3625 	status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3626 						RT_IDX_IP_CSUM_ERR, 1);
3627 	if (status) {
3628 		netif_err(qdev, ifup, qdev->ndev,
3629 			"Failed to init routing register "
3630 			"for IP CSUM error packets.\n");
3631 		goto exit;
3632 	}
3633 	status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3634 						RT_IDX_TU_CSUM_ERR, 1);
3635 	if (status) {
3636 		netif_err(qdev, ifup, qdev->ndev,
3637 			"Failed to init routing register "
3638 			"for TCP/UDP CSUM error packets.\n");
3639 		goto exit;
3640 	}
3641 	status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3642 	if (status) {
3643 		netif_err(qdev, ifup, qdev->ndev,
3644 			  "Failed to init routing register for broadcast packets.\n");
3645 		goto exit;
3646 	}
3647 	/* If we have more than one inbound queue, then turn on RSS in the
3648 	 * routing block.
3649 	 */
3650 	if (qdev->rss_ring_count > 1) {
3651 		status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3652 					RT_IDX_RSS_MATCH, 1);
3653 		if (status) {
3654 			netif_err(qdev, ifup, qdev->ndev,
3655 				  "Failed to init routing register for MATCH RSS packets.\n");
3656 			goto exit;
3657 		}
3658 	}
3659 
3660 	status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3661 				    RT_IDX_CAM_HIT, 1);
3662 	if (status)
3663 		netif_err(qdev, ifup, qdev->ndev,
3664 			  "Failed to init routing register for CAM packets.\n");
3665 exit:
3666 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3667 	return status;
3668 }
3669 
ql_cam_route_initialize(struct ql_adapter * qdev)3670 int ql_cam_route_initialize(struct ql_adapter *qdev)
3671 {
3672 	int status, set;
3673 
3674 	/* If check if the link is up and use to
3675 	 * determine if we are setting or clearing
3676 	 * the MAC address in the CAM.
3677 	 */
3678 	set = ql_read32(qdev, STS);
3679 	set &= qdev->port_link_up;
3680 	status = ql_set_mac_addr(qdev, set);
3681 	if (status) {
3682 		netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3683 		return status;
3684 	}
3685 
3686 	status = ql_route_initialize(qdev);
3687 	if (status)
3688 		netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3689 
3690 	return status;
3691 }
3692 
ql_adapter_initialize(struct ql_adapter * qdev)3693 static int ql_adapter_initialize(struct ql_adapter *qdev)
3694 {
3695 	u32 value, mask;
3696 	int i;
3697 	int status = 0;
3698 
3699 	/*
3700 	 * Set up the System register to halt on errors.
3701 	 */
3702 	value = SYS_EFE | SYS_FAE;
3703 	mask = value << 16;
3704 	ql_write32(qdev, SYS, mask | value);
3705 
3706 	/* Set the default queue, and VLAN behavior. */
3707 	value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3708 	mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3709 	ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3710 
3711 	/* Set the MPI interrupt to enabled. */
3712 	ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3713 
3714 	/* Enable the function, set pagesize, enable error checking. */
3715 	value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3716 	    FSC_EC | FSC_VM_PAGE_4K;
3717 	value |= SPLT_SETTING;
3718 
3719 	/* Set/clear header splitting. */
3720 	mask = FSC_VM_PAGESIZE_MASK |
3721 	    FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3722 	ql_write32(qdev, FSC, mask | value);
3723 
3724 	ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3725 
3726 	/* Set RX packet routing to use port/pci function on which the
3727 	 * packet arrived on in addition to usual frame routing.
3728 	 * This is helpful on bonding where both interfaces can have
3729 	 * the same MAC address.
3730 	 */
3731 	ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3732 	/* Reroute all packets to our Interface.
3733 	 * They may have been routed to MPI firmware
3734 	 * due to WOL.
3735 	 */
3736 	value = ql_read32(qdev, MGMT_RCV_CFG);
3737 	value &= ~MGMT_RCV_CFG_RM;
3738 	mask = 0xffff0000;
3739 
3740 	/* Sticky reg needs clearing due to WOL. */
3741 	ql_write32(qdev, MGMT_RCV_CFG, mask);
3742 	ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3743 
3744 	/* Default WOL is enable on Mezz cards */
3745 	if (qdev->pdev->subsystem_device == 0x0068 ||
3746 			qdev->pdev->subsystem_device == 0x0180)
3747 		qdev->wol = WAKE_MAGIC;
3748 
3749 	/* Start up the rx queues. */
3750 	for (i = 0; i < qdev->rx_ring_count; i++) {
3751 		status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3752 		if (status) {
3753 			netif_err(qdev, ifup, qdev->ndev,
3754 				  "Failed to start rx ring[%d].\n", i);
3755 			return status;
3756 		}
3757 	}
3758 
3759 	/* If there is more than one inbound completion queue
3760 	 * then download a RICB to configure RSS.
3761 	 */
3762 	if (qdev->rss_ring_count > 1) {
3763 		status = ql_start_rss(qdev);
3764 		if (status) {
3765 			netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3766 			return status;
3767 		}
3768 	}
3769 
3770 	/* Start up the tx queues. */
3771 	for (i = 0; i < qdev->tx_ring_count; i++) {
3772 		status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3773 		if (status) {
3774 			netif_err(qdev, ifup, qdev->ndev,
3775 				  "Failed to start tx ring[%d].\n", i);
3776 			return status;
3777 		}
3778 	}
3779 
3780 	/* Initialize the port and set the max framesize. */
3781 	status = qdev->nic_ops->port_initialize(qdev);
3782 	if (status)
3783 		netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3784 
3785 	/* Set up the MAC address and frame routing filter. */
3786 	status = ql_cam_route_initialize(qdev);
3787 	if (status) {
3788 		netif_err(qdev, ifup, qdev->ndev,
3789 			  "Failed to init CAM/Routing tables.\n");
3790 		return status;
3791 	}
3792 
3793 	/* Start NAPI for the RSS queues. */
3794 	for (i = 0; i < qdev->rss_ring_count; i++)
3795 		napi_enable(&qdev->rx_ring[i].napi);
3796 
3797 	return status;
3798 }
3799 
3800 /* Issue soft reset to chip. */
ql_adapter_reset(struct ql_adapter * qdev)3801 static int ql_adapter_reset(struct ql_adapter *qdev)
3802 {
3803 	u32 value;
3804 	int status = 0;
3805 	unsigned long end_jiffies;
3806 
3807 	/* Clear all the entries in the routing table. */
3808 	status = ql_clear_routing_entries(qdev);
3809 	if (status) {
3810 		netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3811 		return status;
3812 	}
3813 
3814 	end_jiffies = jiffies +
3815 		max((unsigned long)1, usecs_to_jiffies(30));
3816 
3817 	/* Check if bit is set then skip the mailbox command and
3818 	 * clear the bit, else we are in normal reset process.
3819 	 */
3820 	if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3821 		/* Stop management traffic. */
3822 		ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3823 
3824 		/* Wait for the NIC and MGMNT FIFOs to empty. */
3825 		ql_wait_fifo_empty(qdev);
3826 	} else
3827 		clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3828 
3829 	ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3830 
3831 	do {
3832 		value = ql_read32(qdev, RST_FO);
3833 		if ((value & RST_FO_FR) == 0)
3834 			break;
3835 		cpu_relax();
3836 	} while (time_before(jiffies, end_jiffies));
3837 
3838 	if (value & RST_FO_FR) {
3839 		netif_err(qdev, ifdown, qdev->ndev,
3840 			  "ETIMEDOUT!!! errored out of resetting the chip!\n");
3841 		status = -ETIMEDOUT;
3842 	}
3843 
3844 	/* Resume management traffic. */
3845 	ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3846 	return status;
3847 }
3848 
ql_display_dev_info(struct net_device * ndev)3849 static void ql_display_dev_info(struct net_device *ndev)
3850 {
3851 	struct ql_adapter *qdev = netdev_priv(ndev);
3852 
3853 	netif_info(qdev, probe, qdev->ndev,
3854 		   "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3855 		   "XG Roll = %d, XG Rev = %d.\n",
3856 		   qdev->func,
3857 		   qdev->port,
3858 		   qdev->chip_rev_id & 0x0000000f,
3859 		   qdev->chip_rev_id >> 4 & 0x0000000f,
3860 		   qdev->chip_rev_id >> 8 & 0x0000000f,
3861 		   qdev->chip_rev_id >> 12 & 0x0000000f);
3862 	netif_info(qdev, probe, qdev->ndev,
3863 		   "MAC address %pM\n", ndev->dev_addr);
3864 }
3865 
ql_wol(struct ql_adapter * qdev)3866 static int ql_wol(struct ql_adapter *qdev)
3867 {
3868 	int status = 0;
3869 	u32 wol = MB_WOL_DISABLE;
3870 
3871 	/* The CAM is still intact after a reset, but if we
3872 	 * are doing WOL, then we may need to program the
3873 	 * routing regs. We would also need to issue the mailbox
3874 	 * commands to instruct the MPI what to do per the ethtool
3875 	 * settings.
3876 	 */
3877 
3878 	if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3879 			WAKE_MCAST | WAKE_BCAST)) {
3880 		netif_err(qdev, ifdown, qdev->ndev,
3881 			  "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3882 			  qdev->wol);
3883 		return -EINVAL;
3884 	}
3885 
3886 	if (qdev->wol & WAKE_MAGIC) {
3887 		status = ql_mb_wol_set_magic(qdev, 1);
3888 		if (status) {
3889 			netif_err(qdev, ifdown, qdev->ndev,
3890 				  "Failed to set magic packet on %s.\n",
3891 				  qdev->ndev->name);
3892 			return status;
3893 		} else
3894 			netif_info(qdev, drv, qdev->ndev,
3895 				   "Enabled magic packet successfully on %s.\n",
3896 				   qdev->ndev->name);
3897 
3898 		wol |= MB_WOL_MAGIC_PKT;
3899 	}
3900 
3901 	if (qdev->wol) {
3902 		wol |= MB_WOL_MODE_ON;
3903 		status = ql_mb_wol_mode(qdev, wol);
3904 		netif_err(qdev, drv, qdev->ndev,
3905 			  "WOL %s (wol code 0x%x) on %s\n",
3906 			  (status == 0) ? "Successfully set" : "Failed",
3907 			  wol, qdev->ndev->name);
3908 	}
3909 
3910 	return status;
3911 }
3912 
ql_cancel_all_work_sync(struct ql_adapter * qdev)3913 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3914 {
3915 
3916 	/* Don't kill the reset worker thread if we
3917 	 * are in the process of recovery.
3918 	 */
3919 	if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3920 		cancel_delayed_work_sync(&qdev->asic_reset_work);
3921 	cancel_delayed_work_sync(&qdev->mpi_reset_work);
3922 	cancel_delayed_work_sync(&qdev->mpi_work);
3923 	cancel_delayed_work_sync(&qdev->mpi_idc_work);
3924 	cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3925 	cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3926 }
3927 
ql_adapter_down(struct ql_adapter * qdev)3928 static int ql_adapter_down(struct ql_adapter *qdev)
3929 {
3930 	int i, status = 0;
3931 
3932 	ql_link_off(qdev);
3933 
3934 	ql_cancel_all_work_sync(qdev);
3935 
3936 	for (i = 0; i < qdev->rss_ring_count; i++)
3937 		napi_disable(&qdev->rx_ring[i].napi);
3938 
3939 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
3940 
3941 	ql_disable_interrupts(qdev);
3942 
3943 	ql_tx_ring_clean(qdev);
3944 
3945 	/* Call netif_napi_del() from common point.
3946 	 */
3947 	for (i = 0; i < qdev->rss_ring_count; i++)
3948 		netif_napi_del(&qdev->rx_ring[i].napi);
3949 
3950 	status = ql_adapter_reset(qdev);
3951 	if (status)
3952 		netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3953 			  qdev->func);
3954 	ql_free_rx_buffers(qdev);
3955 
3956 	return status;
3957 }
3958 
ql_adapter_up(struct ql_adapter * qdev)3959 static int ql_adapter_up(struct ql_adapter *qdev)
3960 {
3961 	int err = 0;
3962 
3963 	err = ql_adapter_initialize(qdev);
3964 	if (err) {
3965 		netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3966 		goto err_init;
3967 	}
3968 	set_bit(QL_ADAPTER_UP, &qdev->flags);
3969 	ql_alloc_rx_buffers(qdev);
3970 	/* If the port is initialized and the
3971 	 * link is up the turn on the carrier.
3972 	 */
3973 	if ((ql_read32(qdev, STS) & qdev->port_init) &&
3974 			(ql_read32(qdev, STS) & qdev->port_link_up))
3975 		ql_link_on(qdev);
3976 	/* Restore rx mode. */
3977 	clear_bit(QL_ALLMULTI, &qdev->flags);
3978 	clear_bit(QL_PROMISCUOUS, &qdev->flags);
3979 	qlge_set_multicast_list(qdev->ndev);
3980 
3981 	/* Restore vlan setting. */
3982 	qlge_restore_vlan(qdev);
3983 
3984 	ql_enable_interrupts(qdev);
3985 	ql_enable_all_completion_interrupts(qdev);
3986 	netif_tx_start_all_queues(qdev->ndev);
3987 
3988 	return 0;
3989 err_init:
3990 	ql_adapter_reset(qdev);
3991 	return err;
3992 }
3993 
ql_release_adapter_resources(struct ql_adapter * qdev)3994 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3995 {
3996 	ql_free_mem_resources(qdev);
3997 	ql_free_irq(qdev);
3998 }
3999 
ql_get_adapter_resources(struct ql_adapter * qdev)4000 static int ql_get_adapter_resources(struct ql_adapter *qdev)
4001 {
4002 	int status = 0;
4003 
4004 	if (ql_alloc_mem_resources(qdev)) {
4005 		netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
4006 		return -ENOMEM;
4007 	}
4008 	status = ql_request_irq(qdev);
4009 	return status;
4010 }
4011 
qlge_close(struct net_device * ndev)4012 static int qlge_close(struct net_device *ndev)
4013 {
4014 	struct ql_adapter *qdev = netdev_priv(ndev);
4015 
4016 	/* If we hit pci_channel_io_perm_failure
4017 	 * failure condition, then we already
4018 	 * brought the adapter down.
4019 	 */
4020 	if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4021 		netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4022 		clear_bit(QL_EEH_FATAL, &qdev->flags);
4023 		return 0;
4024 	}
4025 
4026 	/*
4027 	 * Wait for device to recover from a reset.
4028 	 * (Rarely happens, but possible.)
4029 	 */
4030 	while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4031 		msleep(1);
4032 	ql_adapter_down(qdev);
4033 	ql_release_adapter_resources(qdev);
4034 	return 0;
4035 }
4036 
ql_configure_rings(struct ql_adapter * qdev)4037 static int ql_configure_rings(struct ql_adapter *qdev)
4038 {
4039 	int i;
4040 	struct rx_ring *rx_ring;
4041 	struct tx_ring *tx_ring;
4042 	int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4043 	unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4044 		LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4045 
4046 	qdev->lbq_buf_order = get_order(lbq_buf_len);
4047 
4048 	/* In a perfect world we have one RSS ring for each CPU
4049 	 * and each has it's own vector.  To do that we ask for
4050 	 * cpu_cnt vectors.  ql_enable_msix() will adjust the
4051 	 * vector count to what we actually get.  We then
4052 	 * allocate an RSS ring for each.
4053 	 * Essentially, we are doing min(cpu_count, msix_vector_count).
4054 	 */
4055 	qdev->intr_count = cpu_cnt;
4056 	ql_enable_msix(qdev);
4057 	/* Adjust the RSS ring count to the actual vector count. */
4058 	qdev->rss_ring_count = qdev->intr_count;
4059 	qdev->tx_ring_count = cpu_cnt;
4060 	qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4061 
4062 	for (i = 0; i < qdev->tx_ring_count; i++) {
4063 		tx_ring = &qdev->tx_ring[i];
4064 		memset((void *)tx_ring, 0, sizeof(*tx_ring));
4065 		tx_ring->qdev = qdev;
4066 		tx_ring->wq_id = i;
4067 		tx_ring->wq_len = qdev->tx_ring_size;
4068 		tx_ring->wq_size =
4069 		    tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4070 
4071 		/*
4072 		 * The completion queue ID for the tx rings start
4073 		 * immediately after the rss rings.
4074 		 */
4075 		tx_ring->cq_id = qdev->rss_ring_count + i;
4076 	}
4077 
4078 	for (i = 0; i < qdev->rx_ring_count; i++) {
4079 		rx_ring = &qdev->rx_ring[i];
4080 		memset((void *)rx_ring, 0, sizeof(*rx_ring));
4081 		rx_ring->qdev = qdev;
4082 		rx_ring->cq_id = i;
4083 		rx_ring->cpu = i % cpu_cnt;	/* CPU to run handler on. */
4084 		if (i < qdev->rss_ring_count) {
4085 			/*
4086 			 * Inbound (RSS) queues.
4087 			 */
4088 			rx_ring->cq_len = qdev->rx_ring_size;
4089 			rx_ring->cq_size =
4090 			    rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4091 			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4092 			rx_ring->lbq_size =
4093 			    rx_ring->lbq_len * sizeof(__le64);
4094 			rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4095 			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4096 			rx_ring->sbq_size =
4097 			    rx_ring->sbq_len * sizeof(__le64);
4098 			rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4099 			rx_ring->type = RX_Q;
4100 		} else {
4101 			/*
4102 			 * Outbound queue handles outbound completions only.
4103 			 */
4104 			/* outbound cq is same size as tx_ring it services. */
4105 			rx_ring->cq_len = qdev->tx_ring_size;
4106 			rx_ring->cq_size =
4107 			    rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4108 			rx_ring->lbq_len = 0;
4109 			rx_ring->lbq_size = 0;
4110 			rx_ring->lbq_buf_size = 0;
4111 			rx_ring->sbq_len = 0;
4112 			rx_ring->sbq_size = 0;
4113 			rx_ring->sbq_buf_size = 0;
4114 			rx_ring->type = TX_Q;
4115 		}
4116 	}
4117 	return 0;
4118 }
4119 
qlge_open(struct net_device * ndev)4120 static int qlge_open(struct net_device *ndev)
4121 {
4122 	int err = 0;
4123 	struct ql_adapter *qdev = netdev_priv(ndev);
4124 
4125 	err = ql_adapter_reset(qdev);
4126 	if (err)
4127 		return err;
4128 
4129 	err = ql_configure_rings(qdev);
4130 	if (err)
4131 		return err;
4132 
4133 	err = ql_get_adapter_resources(qdev);
4134 	if (err)
4135 		goto error_up;
4136 
4137 	err = ql_adapter_up(qdev);
4138 	if (err)
4139 		goto error_up;
4140 
4141 	return err;
4142 
4143 error_up:
4144 	ql_release_adapter_resources(qdev);
4145 	return err;
4146 }
4147 
ql_change_rx_buffers(struct ql_adapter * qdev)4148 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4149 {
4150 	struct rx_ring *rx_ring;
4151 	int i, status;
4152 	u32 lbq_buf_len;
4153 
4154 	/* Wait for an outstanding reset to complete. */
4155 	if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4156 		int i = 3;
4157 		while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4158 			netif_err(qdev, ifup, qdev->ndev,
4159 				  "Waiting for adapter UP...\n");
4160 			ssleep(1);
4161 		}
4162 
4163 		if (!i) {
4164 			netif_err(qdev, ifup, qdev->ndev,
4165 				  "Timed out waiting for adapter UP\n");
4166 			return -ETIMEDOUT;
4167 		}
4168 	}
4169 
4170 	status = ql_adapter_down(qdev);
4171 	if (status)
4172 		goto error;
4173 
4174 	/* Get the new rx buffer size. */
4175 	lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4176 		LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4177 	qdev->lbq_buf_order = get_order(lbq_buf_len);
4178 
4179 	for (i = 0; i < qdev->rss_ring_count; i++) {
4180 		rx_ring = &qdev->rx_ring[i];
4181 		/* Set the new size. */
4182 		rx_ring->lbq_buf_size = lbq_buf_len;
4183 	}
4184 
4185 	status = ql_adapter_up(qdev);
4186 	if (status)
4187 		goto error;
4188 
4189 	return status;
4190 error:
4191 	netif_alert(qdev, ifup, qdev->ndev,
4192 		    "Driver up/down cycle failed, closing device.\n");
4193 	set_bit(QL_ADAPTER_UP, &qdev->flags);
4194 	dev_close(qdev->ndev);
4195 	return status;
4196 }
4197 
qlge_change_mtu(struct net_device * ndev,int new_mtu)4198 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4199 {
4200 	struct ql_adapter *qdev = netdev_priv(ndev);
4201 	int status;
4202 
4203 	if (ndev->mtu == 1500 && new_mtu == 9000) {
4204 		netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4205 	} else if (ndev->mtu == 9000 && new_mtu == 1500) {
4206 		netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4207 	} else
4208 		return -EINVAL;
4209 
4210 	queue_delayed_work(qdev->workqueue,
4211 			&qdev->mpi_port_cfg_work, 3*HZ);
4212 
4213 	ndev->mtu = new_mtu;
4214 
4215 	if (!netif_running(qdev->ndev)) {
4216 		return 0;
4217 	}
4218 
4219 	status = ql_change_rx_buffers(qdev);
4220 	if (status) {
4221 		netif_err(qdev, ifup, qdev->ndev,
4222 			  "Changing MTU failed.\n");
4223 	}
4224 
4225 	return status;
4226 }
4227 
qlge_get_stats(struct net_device * ndev)4228 static struct net_device_stats *qlge_get_stats(struct net_device
4229 					       *ndev)
4230 {
4231 	struct ql_adapter *qdev = netdev_priv(ndev);
4232 	struct rx_ring *rx_ring = &qdev->rx_ring[0];
4233 	struct tx_ring *tx_ring = &qdev->tx_ring[0];
4234 	unsigned long pkts, mcast, dropped, errors, bytes;
4235 	int i;
4236 
4237 	/* Get RX stats. */
4238 	pkts = mcast = dropped = errors = bytes = 0;
4239 	for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4240 			pkts += rx_ring->rx_packets;
4241 			bytes += rx_ring->rx_bytes;
4242 			dropped += rx_ring->rx_dropped;
4243 			errors += rx_ring->rx_errors;
4244 			mcast += rx_ring->rx_multicast;
4245 	}
4246 	ndev->stats.rx_packets = pkts;
4247 	ndev->stats.rx_bytes = bytes;
4248 	ndev->stats.rx_dropped = dropped;
4249 	ndev->stats.rx_errors = errors;
4250 	ndev->stats.multicast = mcast;
4251 
4252 	/* Get TX stats. */
4253 	pkts = errors = bytes = 0;
4254 	for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4255 			pkts += tx_ring->tx_packets;
4256 			bytes += tx_ring->tx_bytes;
4257 			errors += tx_ring->tx_errors;
4258 	}
4259 	ndev->stats.tx_packets = pkts;
4260 	ndev->stats.tx_bytes = bytes;
4261 	ndev->stats.tx_errors = errors;
4262 	return &ndev->stats;
4263 }
4264 
qlge_set_multicast_list(struct net_device * ndev)4265 static void qlge_set_multicast_list(struct net_device *ndev)
4266 {
4267 	struct ql_adapter *qdev = netdev_priv(ndev);
4268 	struct netdev_hw_addr *ha;
4269 	int i, status;
4270 
4271 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4272 	if (status)
4273 		return;
4274 	/*
4275 	 * Set or clear promiscuous mode if a
4276 	 * transition is taking place.
4277 	 */
4278 	if (ndev->flags & IFF_PROMISC) {
4279 		if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4280 			if (ql_set_routing_reg
4281 			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4282 				netif_err(qdev, hw, qdev->ndev,
4283 					  "Failed to set promiscuous mode.\n");
4284 			} else {
4285 				set_bit(QL_PROMISCUOUS, &qdev->flags);
4286 			}
4287 		}
4288 	} else {
4289 		if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4290 			if (ql_set_routing_reg
4291 			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4292 				netif_err(qdev, hw, qdev->ndev,
4293 					  "Failed to clear promiscuous mode.\n");
4294 			} else {
4295 				clear_bit(QL_PROMISCUOUS, &qdev->flags);
4296 			}
4297 		}
4298 	}
4299 
4300 	/*
4301 	 * Set or clear all multicast mode if a
4302 	 * transition is taking place.
4303 	 */
4304 	if ((ndev->flags & IFF_ALLMULTI) ||
4305 	    (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4306 		if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4307 			if (ql_set_routing_reg
4308 			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4309 				netif_err(qdev, hw, qdev->ndev,
4310 					  "Failed to set all-multi mode.\n");
4311 			} else {
4312 				set_bit(QL_ALLMULTI, &qdev->flags);
4313 			}
4314 		}
4315 	} else {
4316 		if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4317 			if (ql_set_routing_reg
4318 			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4319 				netif_err(qdev, hw, qdev->ndev,
4320 					  "Failed to clear all-multi mode.\n");
4321 			} else {
4322 				clear_bit(QL_ALLMULTI, &qdev->flags);
4323 			}
4324 		}
4325 	}
4326 
4327 	if (!netdev_mc_empty(ndev)) {
4328 		status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4329 		if (status)
4330 			goto exit;
4331 		i = 0;
4332 		netdev_for_each_mc_addr(ha, ndev) {
4333 			if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4334 						MAC_ADDR_TYPE_MULTI_MAC, i)) {
4335 				netif_err(qdev, hw, qdev->ndev,
4336 					  "Failed to loadmulticast address.\n");
4337 				ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4338 				goto exit;
4339 			}
4340 			i++;
4341 		}
4342 		ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4343 		if (ql_set_routing_reg
4344 		    (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4345 			netif_err(qdev, hw, qdev->ndev,
4346 				  "Failed to set multicast match mode.\n");
4347 		} else {
4348 			set_bit(QL_ALLMULTI, &qdev->flags);
4349 		}
4350 	}
4351 exit:
4352 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4353 }
4354 
qlge_set_mac_address(struct net_device * ndev,void * p)4355 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4356 {
4357 	struct ql_adapter *qdev = netdev_priv(ndev);
4358 	struct sockaddr *addr = p;
4359 	int status;
4360 
4361 	if (!is_valid_ether_addr(addr->sa_data))
4362 		return -EADDRNOTAVAIL;
4363 	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4364 	/* Update local copy of current mac address. */
4365 	memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4366 
4367 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4368 	if (status)
4369 		return status;
4370 	status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4371 			MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4372 	if (status)
4373 		netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4374 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4375 	return status;
4376 }
4377 
qlge_tx_timeout(struct net_device * ndev)4378 static void qlge_tx_timeout(struct net_device *ndev)
4379 {
4380 	struct ql_adapter *qdev = netdev_priv(ndev);
4381 	ql_queue_asic_error(qdev);
4382 }
4383 
ql_asic_reset_work(struct work_struct * work)4384 static void ql_asic_reset_work(struct work_struct *work)
4385 {
4386 	struct ql_adapter *qdev =
4387 	    container_of(work, struct ql_adapter, asic_reset_work.work);
4388 	int status;
4389 	rtnl_lock();
4390 	status = ql_adapter_down(qdev);
4391 	if (status)
4392 		goto error;
4393 
4394 	status = ql_adapter_up(qdev);
4395 	if (status)
4396 		goto error;
4397 
4398 	/* Restore rx mode. */
4399 	clear_bit(QL_ALLMULTI, &qdev->flags);
4400 	clear_bit(QL_PROMISCUOUS, &qdev->flags);
4401 	qlge_set_multicast_list(qdev->ndev);
4402 
4403 	rtnl_unlock();
4404 	return;
4405 error:
4406 	netif_alert(qdev, ifup, qdev->ndev,
4407 		    "Driver up/down cycle failed, closing device\n");
4408 
4409 	set_bit(QL_ADAPTER_UP, &qdev->flags);
4410 	dev_close(qdev->ndev);
4411 	rtnl_unlock();
4412 }
4413 
4414 static const struct nic_operations qla8012_nic_ops = {
4415 	.get_flash		= ql_get_8012_flash_params,
4416 	.port_initialize	= ql_8012_port_initialize,
4417 };
4418 
4419 static const struct nic_operations qla8000_nic_ops = {
4420 	.get_flash		= ql_get_8000_flash_params,
4421 	.port_initialize	= ql_8000_port_initialize,
4422 };
4423 
4424 /* Find the pcie function number for the other NIC
4425  * on this chip.  Since both NIC functions share a
4426  * common firmware we have the lowest enabled function
4427  * do any common work.  Examples would be resetting
4428  * after a fatal firmware error, or doing a firmware
4429  * coredump.
4430  */
ql_get_alt_pcie_func(struct ql_adapter * qdev)4431 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4432 {
4433 	int status = 0;
4434 	u32 temp;
4435 	u32 nic_func1, nic_func2;
4436 
4437 	status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4438 			&temp);
4439 	if (status)
4440 		return status;
4441 
4442 	nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4443 			MPI_TEST_NIC_FUNC_MASK);
4444 	nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4445 			MPI_TEST_NIC_FUNC_MASK);
4446 
4447 	if (qdev->func == nic_func1)
4448 		qdev->alt_func = nic_func2;
4449 	else if (qdev->func == nic_func2)
4450 		qdev->alt_func = nic_func1;
4451 	else
4452 		status = -EIO;
4453 
4454 	return status;
4455 }
4456 
ql_get_board_info(struct ql_adapter * qdev)4457 static int ql_get_board_info(struct ql_adapter *qdev)
4458 {
4459 	int status;
4460 	qdev->func =
4461 	    (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4462 	if (qdev->func > 3)
4463 		return -EIO;
4464 
4465 	status = ql_get_alt_pcie_func(qdev);
4466 	if (status)
4467 		return status;
4468 
4469 	qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4470 	if (qdev->port) {
4471 		qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4472 		qdev->port_link_up = STS_PL1;
4473 		qdev->port_init = STS_PI1;
4474 		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4475 		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4476 	} else {
4477 		qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4478 		qdev->port_link_up = STS_PL0;
4479 		qdev->port_init = STS_PI0;
4480 		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4481 		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4482 	}
4483 	qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4484 	qdev->device_id = qdev->pdev->device;
4485 	if (qdev->device_id == QLGE_DEVICE_ID_8012)
4486 		qdev->nic_ops = &qla8012_nic_ops;
4487 	else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4488 		qdev->nic_ops = &qla8000_nic_ops;
4489 	return status;
4490 }
4491 
ql_release_all(struct pci_dev * pdev)4492 static void ql_release_all(struct pci_dev *pdev)
4493 {
4494 	struct net_device *ndev = pci_get_drvdata(pdev);
4495 	struct ql_adapter *qdev = netdev_priv(ndev);
4496 
4497 	if (qdev->workqueue) {
4498 		destroy_workqueue(qdev->workqueue);
4499 		qdev->workqueue = NULL;
4500 	}
4501 
4502 	if (qdev->reg_base)
4503 		iounmap(qdev->reg_base);
4504 	if (qdev->doorbell_area)
4505 		iounmap(qdev->doorbell_area);
4506 	vfree(qdev->mpi_coredump);
4507 	pci_release_regions(pdev);
4508 	pci_set_drvdata(pdev, NULL);
4509 }
4510 
ql_init_device(struct pci_dev * pdev,struct net_device * ndev,int cards_found)4511 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4512 			  int cards_found)
4513 {
4514 	struct ql_adapter *qdev = netdev_priv(ndev);
4515 	int err = 0;
4516 
4517 	memset((void *)qdev, 0, sizeof(*qdev));
4518 	err = pci_enable_device(pdev);
4519 	if (err) {
4520 		dev_err(&pdev->dev, "PCI device enable failed.\n");
4521 		return err;
4522 	}
4523 
4524 	qdev->ndev = ndev;
4525 	qdev->pdev = pdev;
4526 	pci_set_drvdata(pdev, ndev);
4527 
4528 	/* Set PCIe read request size */
4529 	err = pcie_set_readrq(pdev, 4096);
4530 	if (err) {
4531 		dev_err(&pdev->dev, "Set readrq failed.\n");
4532 		goto err_out1;
4533 	}
4534 
4535 	err = pci_request_regions(pdev, DRV_NAME);
4536 	if (err) {
4537 		dev_err(&pdev->dev, "PCI region request failed.\n");
4538 		return err;
4539 	}
4540 
4541 	pci_set_master(pdev);
4542 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4543 		set_bit(QL_DMA64, &qdev->flags);
4544 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4545 	} else {
4546 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4547 		if (!err)
4548 		       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4549 	}
4550 
4551 	if (err) {
4552 		dev_err(&pdev->dev, "No usable DMA configuration.\n");
4553 		goto err_out2;
4554 	}
4555 
4556 	/* Set PCIe reset type for EEH to fundamental. */
4557 	pdev->needs_freset = 1;
4558 	pci_save_state(pdev);
4559 	qdev->reg_base =
4560 	    ioremap_nocache(pci_resource_start(pdev, 1),
4561 			    pci_resource_len(pdev, 1));
4562 	if (!qdev->reg_base) {
4563 		dev_err(&pdev->dev, "Register mapping failed.\n");
4564 		err = -ENOMEM;
4565 		goto err_out2;
4566 	}
4567 
4568 	qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4569 	qdev->doorbell_area =
4570 	    ioremap_nocache(pci_resource_start(pdev, 3),
4571 			    pci_resource_len(pdev, 3));
4572 	if (!qdev->doorbell_area) {
4573 		dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4574 		err = -ENOMEM;
4575 		goto err_out2;
4576 	}
4577 
4578 	err = ql_get_board_info(qdev);
4579 	if (err) {
4580 		dev_err(&pdev->dev, "Register access failed.\n");
4581 		err = -EIO;
4582 		goto err_out2;
4583 	}
4584 	qdev->msg_enable = netif_msg_init(debug, default_msg);
4585 	spin_lock_init(&qdev->hw_lock);
4586 	spin_lock_init(&qdev->stats_lock);
4587 
4588 	if (qlge_mpi_coredump) {
4589 		qdev->mpi_coredump =
4590 			vmalloc(sizeof(struct ql_mpi_coredump));
4591 		if (qdev->mpi_coredump == NULL) {
4592 			err = -ENOMEM;
4593 			goto err_out2;
4594 		}
4595 		if (qlge_force_coredump)
4596 			set_bit(QL_FRC_COREDUMP, &qdev->flags);
4597 	}
4598 	/* make sure the EEPROM is good */
4599 	err = qdev->nic_ops->get_flash(qdev);
4600 	if (err) {
4601 		dev_err(&pdev->dev, "Invalid FLASH.\n");
4602 		goto err_out2;
4603 	}
4604 
4605 	/* Keep local copy of current mac address. */
4606 	memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4607 
4608 	/* Set up the default ring sizes. */
4609 	qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4610 	qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4611 
4612 	/* Set up the coalescing parameters. */
4613 	qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4614 	qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4615 	qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4616 	qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4617 
4618 	/*
4619 	 * Set up the operating parameters.
4620 	 */
4621 	qdev->workqueue = create_singlethread_workqueue(ndev->name);
4622 	INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4623 	INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4624 	INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4625 	INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4626 	INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4627 	INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4628 	init_completion(&qdev->ide_completion);
4629 	mutex_init(&qdev->mpi_mutex);
4630 
4631 	if (!cards_found) {
4632 		dev_info(&pdev->dev, "%s\n", DRV_STRING);
4633 		dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4634 			 DRV_NAME, DRV_VERSION);
4635 	}
4636 	return 0;
4637 err_out2:
4638 	ql_release_all(pdev);
4639 err_out1:
4640 	pci_disable_device(pdev);
4641 	return err;
4642 }
4643 
4644 static const struct net_device_ops qlge_netdev_ops = {
4645 	.ndo_open		= qlge_open,
4646 	.ndo_stop		= qlge_close,
4647 	.ndo_start_xmit		= qlge_send,
4648 	.ndo_change_mtu		= qlge_change_mtu,
4649 	.ndo_get_stats		= qlge_get_stats,
4650 	.ndo_set_rx_mode	= qlge_set_multicast_list,
4651 	.ndo_set_mac_address	= qlge_set_mac_address,
4652 	.ndo_validate_addr	= eth_validate_addr,
4653 	.ndo_tx_timeout		= qlge_tx_timeout,
4654 	.ndo_fix_features	= qlge_fix_features,
4655 	.ndo_set_features	= qlge_set_features,
4656 	.ndo_vlan_rx_add_vid	= qlge_vlan_rx_add_vid,
4657 	.ndo_vlan_rx_kill_vid	= qlge_vlan_rx_kill_vid,
4658 };
4659 
ql_timer(unsigned long data)4660 static void ql_timer(unsigned long data)
4661 {
4662 	struct ql_adapter *qdev = (struct ql_adapter *)data;
4663 	u32 var = 0;
4664 
4665 	var = ql_read32(qdev, STS);
4666 	if (pci_channel_offline(qdev->pdev)) {
4667 		netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4668 		return;
4669 	}
4670 
4671 	mod_timer(&qdev->timer, jiffies + (5*HZ));
4672 }
4673 
qlge_probe(struct pci_dev * pdev,const struct pci_device_id * pci_entry)4674 static int qlge_probe(struct pci_dev *pdev,
4675 		      const struct pci_device_id *pci_entry)
4676 {
4677 	struct net_device *ndev = NULL;
4678 	struct ql_adapter *qdev = NULL;
4679 	static int cards_found = 0;
4680 	int err = 0;
4681 
4682 	ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4683 			min(MAX_CPUS, netif_get_num_default_rss_queues()));
4684 	if (!ndev)
4685 		return -ENOMEM;
4686 
4687 	err = ql_init_device(pdev, ndev, cards_found);
4688 	if (err < 0) {
4689 		free_netdev(ndev);
4690 		return err;
4691 	}
4692 
4693 	qdev = netdev_priv(ndev);
4694 	SET_NETDEV_DEV(ndev, &pdev->dev);
4695 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4696 		NETIF_F_TSO | NETIF_F_TSO_ECN |
4697 		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM;
4698 	ndev->features = ndev->hw_features |
4699 		NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4700 	ndev->vlan_features = ndev->hw_features;
4701 
4702 	if (test_bit(QL_DMA64, &qdev->flags))
4703 		ndev->features |= NETIF_F_HIGHDMA;
4704 
4705 	/*
4706 	 * Set up net_device structure.
4707 	 */
4708 	ndev->tx_queue_len = qdev->tx_ring_size;
4709 	ndev->irq = pdev->irq;
4710 
4711 	ndev->netdev_ops = &qlge_netdev_ops;
4712 	SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4713 	ndev->watchdog_timeo = 10 * HZ;
4714 
4715 	err = register_netdev(ndev);
4716 	if (err) {
4717 		dev_err(&pdev->dev, "net device registration failed.\n");
4718 		ql_release_all(pdev);
4719 		pci_disable_device(pdev);
4720 		free_netdev(ndev);
4721 		return err;
4722 	}
4723 	/* Start up the timer to trigger EEH if
4724 	 * the bus goes dead
4725 	 */
4726 	init_timer_deferrable(&qdev->timer);
4727 	qdev->timer.data = (unsigned long)qdev;
4728 	qdev->timer.function = ql_timer;
4729 	qdev->timer.expires = jiffies + (5*HZ);
4730 	add_timer(&qdev->timer);
4731 	ql_link_off(qdev);
4732 	ql_display_dev_info(ndev);
4733 	atomic_set(&qdev->lb_count, 0);
4734 	cards_found++;
4735 	return 0;
4736 }
4737 
ql_lb_send(struct sk_buff * skb,struct net_device * ndev)4738 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4739 {
4740 	return qlge_send(skb, ndev);
4741 }
4742 
ql_clean_lb_rx_ring(struct rx_ring * rx_ring,int budget)4743 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4744 {
4745 	return ql_clean_inbound_rx_ring(rx_ring, budget);
4746 }
4747 
qlge_remove(struct pci_dev * pdev)4748 static void qlge_remove(struct pci_dev *pdev)
4749 {
4750 	struct net_device *ndev = pci_get_drvdata(pdev);
4751 	struct ql_adapter *qdev = netdev_priv(ndev);
4752 	del_timer_sync(&qdev->timer);
4753 	ql_cancel_all_work_sync(qdev);
4754 	unregister_netdev(ndev);
4755 	ql_release_all(pdev);
4756 	pci_disable_device(pdev);
4757 	free_netdev(ndev);
4758 }
4759 
4760 /* Clean up resources without touching hardware. */
ql_eeh_close(struct net_device * ndev)4761 static void ql_eeh_close(struct net_device *ndev)
4762 {
4763 	int i;
4764 	struct ql_adapter *qdev = netdev_priv(ndev);
4765 
4766 	if (netif_carrier_ok(ndev)) {
4767 		netif_carrier_off(ndev);
4768 		netif_stop_queue(ndev);
4769 	}
4770 
4771 	/* Disabling the timer */
4772 	del_timer_sync(&qdev->timer);
4773 	ql_cancel_all_work_sync(qdev);
4774 
4775 	for (i = 0; i < qdev->rss_ring_count; i++)
4776 		netif_napi_del(&qdev->rx_ring[i].napi);
4777 
4778 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
4779 	ql_tx_ring_clean(qdev);
4780 	ql_free_rx_buffers(qdev);
4781 	ql_release_adapter_resources(qdev);
4782 }
4783 
4784 /*
4785  * This callback is called by the PCI subsystem whenever
4786  * a PCI bus error is detected.
4787  */
qlge_io_error_detected(struct pci_dev * pdev,enum pci_channel_state state)4788 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4789 					       enum pci_channel_state state)
4790 {
4791 	struct net_device *ndev = pci_get_drvdata(pdev);
4792 	struct ql_adapter *qdev = netdev_priv(ndev);
4793 
4794 	switch (state) {
4795 	case pci_channel_io_normal:
4796 		return PCI_ERS_RESULT_CAN_RECOVER;
4797 	case pci_channel_io_frozen:
4798 		netif_device_detach(ndev);
4799 		if (netif_running(ndev))
4800 			ql_eeh_close(ndev);
4801 		pci_disable_device(pdev);
4802 		return PCI_ERS_RESULT_NEED_RESET;
4803 	case pci_channel_io_perm_failure:
4804 		dev_err(&pdev->dev,
4805 			"%s: pci_channel_io_perm_failure.\n", __func__);
4806 		ql_eeh_close(ndev);
4807 		set_bit(QL_EEH_FATAL, &qdev->flags);
4808 		return PCI_ERS_RESULT_DISCONNECT;
4809 	}
4810 
4811 	/* Request a slot reset. */
4812 	return PCI_ERS_RESULT_NEED_RESET;
4813 }
4814 
4815 /*
4816  * This callback is called after the PCI buss has been reset.
4817  * Basically, this tries to restart the card from scratch.
4818  * This is a shortened version of the device probe/discovery code,
4819  * it resembles the first-half of the () routine.
4820  */
qlge_io_slot_reset(struct pci_dev * pdev)4821 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4822 {
4823 	struct net_device *ndev = pci_get_drvdata(pdev);
4824 	struct ql_adapter *qdev = netdev_priv(ndev);
4825 
4826 	pdev->error_state = pci_channel_io_normal;
4827 
4828 	pci_restore_state(pdev);
4829 	if (pci_enable_device(pdev)) {
4830 		netif_err(qdev, ifup, qdev->ndev,
4831 			  "Cannot re-enable PCI device after reset.\n");
4832 		return PCI_ERS_RESULT_DISCONNECT;
4833 	}
4834 	pci_set_master(pdev);
4835 
4836 	if (ql_adapter_reset(qdev)) {
4837 		netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4838 		set_bit(QL_EEH_FATAL, &qdev->flags);
4839 		return PCI_ERS_RESULT_DISCONNECT;
4840 	}
4841 
4842 	return PCI_ERS_RESULT_RECOVERED;
4843 }
4844 
qlge_io_resume(struct pci_dev * pdev)4845 static void qlge_io_resume(struct pci_dev *pdev)
4846 {
4847 	struct net_device *ndev = pci_get_drvdata(pdev);
4848 	struct ql_adapter *qdev = netdev_priv(ndev);
4849 	int err = 0;
4850 
4851 	if (netif_running(ndev)) {
4852 		err = qlge_open(ndev);
4853 		if (err) {
4854 			netif_err(qdev, ifup, qdev->ndev,
4855 				  "Device initialization failed after reset.\n");
4856 			return;
4857 		}
4858 	} else {
4859 		netif_err(qdev, ifup, qdev->ndev,
4860 			  "Device was not running prior to EEH.\n");
4861 	}
4862 	mod_timer(&qdev->timer, jiffies + (5*HZ));
4863 	netif_device_attach(ndev);
4864 }
4865 
4866 static const struct pci_error_handlers qlge_err_handler = {
4867 	.error_detected = qlge_io_error_detected,
4868 	.slot_reset = qlge_io_slot_reset,
4869 	.resume = qlge_io_resume,
4870 };
4871 
qlge_suspend(struct pci_dev * pdev,pm_message_t state)4872 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4873 {
4874 	struct net_device *ndev = pci_get_drvdata(pdev);
4875 	struct ql_adapter *qdev = netdev_priv(ndev);
4876 	int err;
4877 
4878 	netif_device_detach(ndev);
4879 	del_timer_sync(&qdev->timer);
4880 
4881 	if (netif_running(ndev)) {
4882 		err = ql_adapter_down(qdev);
4883 		if (!err)
4884 			return err;
4885 	}
4886 
4887 	ql_wol(qdev);
4888 	err = pci_save_state(pdev);
4889 	if (err)
4890 		return err;
4891 
4892 	pci_disable_device(pdev);
4893 
4894 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
4895 
4896 	return 0;
4897 }
4898 
4899 #ifdef CONFIG_PM
qlge_resume(struct pci_dev * pdev)4900 static int qlge_resume(struct pci_dev *pdev)
4901 {
4902 	struct net_device *ndev = pci_get_drvdata(pdev);
4903 	struct ql_adapter *qdev = netdev_priv(ndev);
4904 	int err;
4905 
4906 	pci_set_power_state(pdev, PCI_D0);
4907 	pci_restore_state(pdev);
4908 	err = pci_enable_device(pdev);
4909 	if (err) {
4910 		netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4911 		return err;
4912 	}
4913 	pci_set_master(pdev);
4914 
4915 	pci_enable_wake(pdev, PCI_D3hot, 0);
4916 	pci_enable_wake(pdev, PCI_D3cold, 0);
4917 
4918 	if (netif_running(ndev)) {
4919 		err = ql_adapter_up(qdev);
4920 		if (err)
4921 			return err;
4922 	}
4923 
4924 	mod_timer(&qdev->timer, jiffies + (5*HZ));
4925 	netif_device_attach(ndev);
4926 
4927 	return 0;
4928 }
4929 #endif /* CONFIG_PM */
4930 
qlge_shutdown(struct pci_dev * pdev)4931 static void qlge_shutdown(struct pci_dev *pdev)
4932 {
4933 	qlge_suspend(pdev, PMSG_SUSPEND);
4934 }
4935 
4936 static struct pci_driver qlge_driver = {
4937 	.name = DRV_NAME,
4938 	.id_table = qlge_pci_tbl,
4939 	.probe = qlge_probe,
4940 	.remove = qlge_remove,
4941 #ifdef CONFIG_PM
4942 	.suspend = qlge_suspend,
4943 	.resume = qlge_resume,
4944 #endif
4945 	.shutdown = qlge_shutdown,
4946 	.err_handler = &qlge_err_handler
4947 };
4948 
qlge_init_module(void)4949 static int __init qlge_init_module(void)
4950 {
4951 	return pci_register_driver(&qlge_driver);
4952 }
4953 
qlge_exit(void)4954 static void __exit qlge_exit(void)
4955 {
4956 	pci_unregister_driver(&qlge_driver);
4957 }
4958 
4959 module_init(qlge_init_module);
4960 module_exit(qlge_exit);
4961