• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic qlge NIC HBA Driver
4  * Copyright (c)  2003-2008 QLogic Corporation
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <net/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/if_vlan.h>
37 #include <linux/skbuff.h>
38 #include <linux/delay.h>
39 #include <linux/mm.h>
40 #include <linux/vmalloc.h>
41 #include <linux/prefetch.h>
42 #include <net/ip6_checksum.h>
43 
44 #include "qlge.h"
45 
46 char qlge_driver_name[] = DRV_NAME;
47 const char qlge_driver_version[] = DRV_VERSION;
48 
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING " ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION);
53 
54 static const u32 default_msg =
55 	NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56 	NETIF_MSG_IFDOWN |
57 	NETIF_MSG_IFUP |
58 	NETIF_MSG_RX_ERR |
59 	NETIF_MSG_TX_ERR |
60 	NETIF_MSG_HW | NETIF_MSG_WOL | 0;
61 
62 static int debug = -1;	/* defaults above */
63 module_param(debug, int, 0664);
64 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
65 
66 #define MSIX_IRQ 0
67 #define MSI_IRQ 1
68 #define LEG_IRQ 2
69 static int qlge_irq_type = MSIX_IRQ;
70 module_param(qlge_irq_type, int, 0664);
71 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
72 
73 static int qlge_mpi_coredump;
74 module_param(qlge_mpi_coredump, int, 0);
75 MODULE_PARM_DESC(qlge_mpi_coredump,
76 		 "Option to enable MPI firmware dump. Default is OFF - Do Not allocate memory. ");
77 
78 static int qlge_force_coredump;
79 module_param(qlge_force_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_force_coredump,
81 		 "Option to allow force of firmware core dump. Default is OFF - Do not allow.");
82 
83 static const struct pci_device_id qlge_pci_tbl[] = {
84 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
85 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
86 	/* required last entry */
87 	{0,}
88 };
89 
90 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
91 
92 static int ql_wol(struct ql_adapter *);
93 static void qlge_set_multicast_list(struct net_device *);
94 static int ql_adapter_down(struct ql_adapter *);
95 static int ql_adapter_up(struct ql_adapter *);
96 
97 /* This hardware semaphore causes exclusive access to
98  * resources shared between the NIC driver, MPI firmware,
99  * FCOE firmware and the FC driver.
100  */
ql_sem_trylock(struct ql_adapter * qdev,u32 sem_mask)101 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
102 {
103 	u32 sem_bits = 0;
104 
105 	switch (sem_mask) {
106 	case SEM_XGMAC0_MASK:
107 		sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
108 		break;
109 	case SEM_XGMAC1_MASK:
110 		sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
111 		break;
112 	case SEM_ICB_MASK:
113 		sem_bits = SEM_SET << SEM_ICB_SHIFT;
114 		break;
115 	case SEM_MAC_ADDR_MASK:
116 		sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
117 		break;
118 	case SEM_FLASH_MASK:
119 		sem_bits = SEM_SET << SEM_FLASH_SHIFT;
120 		break;
121 	case SEM_PROBE_MASK:
122 		sem_bits = SEM_SET << SEM_PROBE_SHIFT;
123 		break;
124 	case SEM_RT_IDX_MASK:
125 		sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
126 		break;
127 	case SEM_PROC_REG_MASK:
128 		sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
129 		break;
130 	default:
131 		netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
132 		return -EINVAL;
133 	}
134 
135 	ql_write32(qdev, SEM, sem_bits | sem_mask);
136 	return !(ql_read32(qdev, SEM) & sem_bits);
137 }
138 
ql_sem_spinlock(struct ql_adapter * qdev,u32 sem_mask)139 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
140 {
141 	unsigned int wait_count = 30;
142 
143 	do {
144 		if (!ql_sem_trylock(qdev, sem_mask))
145 			return 0;
146 		udelay(100);
147 	} while (--wait_count);
148 	return -ETIMEDOUT;
149 }
150 
ql_sem_unlock(struct ql_adapter * qdev,u32 sem_mask)151 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
152 {
153 	ql_write32(qdev, SEM, sem_mask);
154 	ql_read32(qdev, SEM);	/* flush */
155 }
156 
157 /* This function waits for a specific bit to come ready
158  * in a given register.  It is used mostly by the initialize
159  * process, but is also used in kernel thread API such as
160  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
161  */
ql_wait_reg_rdy(struct ql_adapter * qdev,u32 reg,u32 bit,u32 err_bit)162 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
163 {
164 	u32 temp;
165 	int count;
166 
167 	for (count = 0; count < UDELAY_COUNT; count++) {
168 		temp = ql_read32(qdev, reg);
169 
170 		/* check for errors */
171 		if (temp & err_bit) {
172 			netif_alert(qdev, probe, qdev->ndev,
173 				    "register 0x%.08x access error, value = 0x%.08x!.\n",
174 				    reg, temp);
175 			return -EIO;
176 		} else if (temp & bit) {
177 			return 0;
178 		}
179 		udelay(UDELAY_DELAY);
180 	}
181 	netif_alert(qdev, probe, qdev->ndev,
182 		    "Timed out waiting for reg %x to come ready.\n", reg);
183 	return -ETIMEDOUT;
184 }
185 
186 /* The CFG register is used to download TX and RX control blocks
187  * to the chip. This function waits for an operation to complete.
188  */
ql_wait_cfg(struct ql_adapter * qdev,u32 bit)189 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
190 {
191 	int count;
192 	u32 temp;
193 
194 	for (count = 0; count < UDELAY_COUNT; count++) {
195 		temp = ql_read32(qdev, CFG);
196 		if (temp & CFG_LE)
197 			return -EIO;
198 		if (!(temp & bit))
199 			return 0;
200 		udelay(UDELAY_DELAY);
201 	}
202 	return -ETIMEDOUT;
203 }
204 
205 /* Used to issue init control blocks to hw. Maps control block,
206  * sets address, triggers download, waits for completion.
207  */
ql_write_cfg(struct ql_adapter * qdev,void * ptr,int size,u32 bit,u16 q_id)208 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
209 		 u16 q_id)
210 {
211 	u64 map;
212 	int status = 0;
213 	int direction;
214 	u32 mask;
215 	u32 value;
216 
217 	if (bit & (CFG_LRQ | CFG_LR | CFG_LCQ))
218 		direction = DMA_TO_DEVICE;
219 	else
220 		direction = DMA_FROM_DEVICE;
221 
222 	map = dma_map_single(&qdev->pdev->dev, ptr, size, direction);
223 	if (dma_mapping_error(&qdev->pdev->dev, map)) {
224 		netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
225 		return -ENOMEM;
226 	}
227 
228 	status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
229 	if (status)
230 		goto lock_failed;
231 
232 	status = ql_wait_cfg(qdev, bit);
233 	if (status) {
234 		netif_err(qdev, ifup, qdev->ndev,
235 			  "Timed out waiting for CFG to come ready.\n");
236 		goto exit;
237 	}
238 
239 	ql_write32(qdev, ICB_L, (u32)map);
240 	ql_write32(qdev, ICB_H, (u32)(map >> 32));
241 
242 	mask = CFG_Q_MASK | (bit << 16);
243 	value = bit | (q_id << CFG_Q_SHIFT);
244 	ql_write32(qdev, CFG, (mask | value));
245 
246 	/*
247 	 * Wait for the bit to clear after signaling hw.
248 	 */
249 	status = ql_wait_cfg(qdev, bit);
250 exit:
251 	ql_sem_unlock(qdev, SEM_ICB_MASK);	/* does flush too */
252 lock_failed:
253 	dma_unmap_single(&qdev->pdev->dev, map, size, direction);
254 	return status;
255 }
256 
257 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
ql_get_mac_addr_reg(struct ql_adapter * qdev,u32 type,u16 index,u32 * value)258 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
259 			u32 *value)
260 {
261 	u32 offset = 0;
262 	int status;
263 
264 	switch (type) {
265 	case MAC_ADDR_TYPE_MULTI_MAC:
266 	case MAC_ADDR_TYPE_CAM_MAC: {
267 		status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
268 		if (status)
269 			break;
270 		ql_write32(qdev, MAC_ADDR_IDX,
271 			   (offset++) | /* offset */
272 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
273 				   MAC_ADDR_ADR | MAC_ADDR_RS |
274 				   type); /* type */
275 		status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
276 		if (status)
277 			break;
278 		*value++ = ql_read32(qdev, MAC_ADDR_DATA);
279 		status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
280 		if (status)
281 			break;
282 		ql_write32(qdev, MAC_ADDR_IDX,
283 			   (offset++) | /* offset */
284 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
285 				   MAC_ADDR_ADR | MAC_ADDR_RS |
286 				   type); /* type */
287 		status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
288 		if (status)
289 			break;
290 		*value++ = ql_read32(qdev, MAC_ADDR_DATA);
291 		if (type == MAC_ADDR_TYPE_CAM_MAC) {
292 			status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
293 						 MAC_ADDR_MW, 0);
294 			if (status)
295 				break;
296 			ql_write32(qdev, MAC_ADDR_IDX,
297 				   (offset++) | /* offset */
298 					   (index
299 					    << MAC_ADDR_IDX_SHIFT) | /* index */
300 					   MAC_ADDR_ADR |
301 					   MAC_ADDR_RS | type); /* type */
302 			status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
303 						 MAC_ADDR_MR, 0);
304 			if (status)
305 				break;
306 			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
307 		}
308 		break;
309 	}
310 	case MAC_ADDR_TYPE_VLAN:
311 	case MAC_ADDR_TYPE_MULTI_FLTR:
312 	default:
313 		netif_crit(qdev, ifup, qdev->ndev,
314 			   "Address type %d not yet supported.\n", type);
315 		status = -EPERM;
316 	}
317 	return status;
318 }
319 
320 /* Set up a MAC, multicast or VLAN address for the
321  * inbound frame matching.
322  */
ql_set_mac_addr_reg(struct ql_adapter * qdev,u8 * addr,u32 type,u16 index)323 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
324 			       u16 index)
325 {
326 	u32 offset = 0;
327 	int status = 0;
328 
329 	switch (type) {
330 	case MAC_ADDR_TYPE_MULTI_MAC: {
331 		u32 upper = (addr[0] << 8) | addr[1];
332 		u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
333 			    (addr[5]);
334 
335 		status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
336 		if (status)
337 			break;
338 		ql_write32(qdev, MAC_ADDR_IDX,
339 			   (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
340 				   MAC_ADDR_E);
341 		ql_write32(qdev, MAC_ADDR_DATA, lower);
342 		status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
343 		if (status)
344 			break;
345 		ql_write32(qdev, MAC_ADDR_IDX,
346 			   (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
347 				   MAC_ADDR_E);
348 
349 		ql_write32(qdev, MAC_ADDR_DATA, upper);
350 		status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
351 		break;
352 	}
353 	case MAC_ADDR_TYPE_CAM_MAC: {
354 		u32 cam_output;
355 		u32 upper = (addr[0] << 8) | addr[1];
356 		u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
357 			    (addr[5]);
358 		status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
359 		if (status)
360 			break;
361 		ql_write32(qdev, MAC_ADDR_IDX,
362 			   (offset++) | /* offset */
363 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
364 				   type); /* type */
365 		ql_write32(qdev, MAC_ADDR_DATA, lower);
366 		status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367 		if (status)
368 			break;
369 		ql_write32(qdev, MAC_ADDR_IDX,
370 			   (offset++) | /* offset */
371 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
372 				   type); /* type */
373 		ql_write32(qdev, MAC_ADDR_DATA, upper);
374 		status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
375 		if (status)
376 			break;
377 		ql_write32(qdev, MAC_ADDR_IDX,
378 			   (offset) | /* offset */
379 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
380 				   type); /* type */
381 		/* This field should also include the queue id
382 		 * and possibly the function id.  Right now we hardcode
383 		 * the route field to NIC core.
384 		 */
385 		cam_output = (CAM_OUT_ROUTE_NIC |
386 			      (qdev->func << CAM_OUT_FUNC_SHIFT) |
387 			      (0 << CAM_OUT_CQ_ID_SHIFT));
388 		if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
389 			cam_output |= CAM_OUT_RV;
390 		/* route to NIC core */
391 		ql_write32(qdev, MAC_ADDR_DATA, cam_output);
392 		break;
393 	}
394 	case MAC_ADDR_TYPE_VLAN: {
395 		u32 enable_bit = *((u32 *)&addr[0]);
396 		/* For VLAN, the addr actually holds a bit that
397 		 * either enables or disables the vlan id we are
398 		 * addressing. It's either MAC_ADDR_E on or off.
399 		 * That's bit-27 we're talking about.
400 		 */
401 		status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
402 		if (status)
403 			break;
404 		ql_write32(qdev, MAC_ADDR_IDX,
405 			   offset | /* offset */
406 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
407 				   type | /* type */
408 				   enable_bit); /* enable/disable */
409 		break;
410 	}
411 	case MAC_ADDR_TYPE_MULTI_FLTR:
412 	default:
413 		netif_crit(qdev, ifup, qdev->ndev,
414 			   "Address type %d not yet supported.\n", type);
415 		status = -EPERM;
416 	}
417 	return status;
418 }
419 
420 /* Set or clear MAC address in hardware. We sometimes
421  * have to clear it to prevent wrong frame routing
422  * especially in a bonding environment.
423  */
ql_set_mac_addr(struct ql_adapter * qdev,int set)424 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
425 {
426 	int status;
427 	char zero_mac_addr[ETH_ALEN];
428 	char *addr;
429 
430 	if (set) {
431 		addr = &qdev->current_mac_addr[0];
432 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
433 			     "Set Mac addr %pM\n", addr);
434 	} else {
435 		eth_zero_addr(zero_mac_addr);
436 		addr = &zero_mac_addr[0];
437 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
438 			     "Clearing MAC address\n");
439 	}
440 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
441 	if (status)
442 		return status;
443 	status = ql_set_mac_addr_reg(qdev, (u8 *)addr,
444 				     MAC_ADDR_TYPE_CAM_MAC,
445 				     qdev->func * MAX_CQ);
446 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
447 	if (status)
448 		netif_err(qdev, ifup, qdev->ndev,
449 			  "Failed to init mac address.\n");
450 	return status;
451 }
452 
ql_link_on(struct ql_adapter * qdev)453 void ql_link_on(struct ql_adapter *qdev)
454 {
455 	netif_err(qdev, link, qdev->ndev, "Link is up.\n");
456 	netif_carrier_on(qdev->ndev);
457 	ql_set_mac_addr(qdev, 1);
458 }
459 
ql_link_off(struct ql_adapter * qdev)460 void ql_link_off(struct ql_adapter *qdev)
461 {
462 	netif_err(qdev, link, qdev->ndev, "Link is down.\n");
463 	netif_carrier_off(qdev->ndev);
464 	ql_set_mac_addr(qdev, 0);
465 }
466 
467 /* Get a specific frame routing value from the CAM.
468  * Used for debug and reg dump.
469  */
ql_get_routing_reg(struct ql_adapter * qdev,u32 index,u32 * value)470 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
471 {
472 	int status = 0;
473 
474 	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
475 	if (status)
476 		goto exit;
477 
478 	ql_write32(qdev, RT_IDX,
479 		   RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
480 	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
481 	if (status)
482 		goto exit;
483 	*value = ql_read32(qdev, RT_DATA);
484 exit:
485 	return status;
486 }
487 
488 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
489  * to route different frame types to various inbound queues.  We send broadcast/
490  * multicast/error frames to the default queue for slow handling,
491  * and CAM hit/RSS frames to the fast handling queues.
492  */
ql_set_routing_reg(struct ql_adapter * qdev,u32 index,u32 mask,int enable)493 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
494 			      int enable)
495 {
496 	int status = -EINVAL; /* Return error if no mask match. */
497 	u32 value = 0;
498 
499 	switch (mask) {
500 	case RT_IDX_CAM_HIT:
501 		{
502 			value = RT_IDX_DST_CAM_Q |	/* dest */
503 			    RT_IDX_TYPE_NICQ |	/* type */
504 			    (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
505 			break;
506 		}
507 	case RT_IDX_VALID:	/* Promiscuous Mode frames. */
508 		{
509 			value = RT_IDX_DST_DFLT_Q |	/* dest */
510 			    RT_IDX_TYPE_NICQ |	/* type */
511 			    (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
512 			break;
513 		}
514 	case RT_IDX_ERR:	/* Pass up MAC,IP,TCP/UDP error frames. */
515 		{
516 			value = RT_IDX_DST_DFLT_Q |	/* dest */
517 			    RT_IDX_TYPE_NICQ |	/* type */
518 			    (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
519 			break;
520 		}
521 	case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
522 		{
523 			value = RT_IDX_DST_DFLT_Q | /* dest */
524 				RT_IDX_TYPE_NICQ | /* type */
525 				(RT_IDX_IP_CSUM_ERR_SLOT <<
526 				RT_IDX_IDX_SHIFT); /* index */
527 			break;
528 		}
529 	case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
530 		{
531 			value = RT_IDX_DST_DFLT_Q | /* dest */
532 				RT_IDX_TYPE_NICQ | /* type */
533 				(RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
534 				RT_IDX_IDX_SHIFT); /* index */
535 			break;
536 		}
537 	case RT_IDX_BCAST:	/* Pass up Broadcast frames to default Q. */
538 		{
539 			value = RT_IDX_DST_DFLT_Q |	/* dest */
540 			    RT_IDX_TYPE_NICQ |	/* type */
541 			    (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
542 			break;
543 		}
544 	case RT_IDX_MCAST:	/* Pass up All Multicast frames. */
545 		{
546 			value = RT_IDX_DST_DFLT_Q |	/* dest */
547 			    RT_IDX_TYPE_NICQ |	/* type */
548 			    (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
549 			break;
550 		}
551 	case RT_IDX_MCAST_MATCH:	/* Pass up matched Multicast frames. */
552 		{
553 			value = RT_IDX_DST_DFLT_Q |	/* dest */
554 			    RT_IDX_TYPE_NICQ |	/* type */
555 			    (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
556 			break;
557 		}
558 	case RT_IDX_RSS_MATCH:	/* Pass up matched RSS frames. */
559 		{
560 			value = RT_IDX_DST_RSS |	/* dest */
561 			    RT_IDX_TYPE_NICQ |	/* type */
562 			    (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
563 			break;
564 		}
565 	case 0:		/* Clear the E-bit on an entry. */
566 		{
567 			value = RT_IDX_DST_DFLT_Q |	/* dest */
568 			    RT_IDX_TYPE_NICQ |	/* type */
569 			    (index << RT_IDX_IDX_SHIFT);/* index */
570 			break;
571 		}
572 	default:
573 		netif_err(qdev, ifup, qdev->ndev,
574 			  "Mask type %d not yet supported.\n", mask);
575 		status = -EPERM;
576 		goto exit;
577 	}
578 
579 	if (value) {
580 		status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
581 		if (status)
582 			goto exit;
583 		value |= (enable ? RT_IDX_E : 0);
584 		ql_write32(qdev, RT_IDX, value);
585 		ql_write32(qdev, RT_DATA, enable ? mask : 0);
586 	}
587 exit:
588 	return status;
589 }
590 
ql_enable_interrupts(struct ql_adapter * qdev)591 static void ql_enable_interrupts(struct ql_adapter *qdev)
592 {
593 	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
594 }
595 
ql_disable_interrupts(struct ql_adapter * qdev)596 static void ql_disable_interrupts(struct ql_adapter *qdev)
597 {
598 	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
599 }
600 
ql_enable_completion_interrupt(struct ql_adapter * qdev,u32 intr)601 static void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
602 {
603 	struct intr_context *ctx = &qdev->intr_context[intr];
604 
605 	ql_write32(qdev, INTR_EN, ctx->intr_en_mask);
606 }
607 
ql_disable_completion_interrupt(struct ql_adapter * qdev,u32 intr)608 static void ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
609 {
610 	struct intr_context *ctx = &qdev->intr_context[intr];
611 
612 	ql_write32(qdev, INTR_EN, ctx->intr_dis_mask);
613 }
614 
ql_enable_all_completion_interrupts(struct ql_adapter * qdev)615 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
616 {
617 	int i;
618 
619 	for (i = 0; i < qdev->intr_count; i++)
620 		ql_enable_completion_interrupt(qdev, i);
621 }
622 
ql_validate_flash(struct ql_adapter * qdev,u32 size,const char * str)623 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
624 {
625 	int status, i;
626 	u16 csum = 0;
627 	__le16 *flash = (__le16 *)&qdev->flash;
628 
629 	status = strncmp((char *)&qdev->flash, str, 4);
630 	if (status) {
631 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
632 		return	status;
633 	}
634 
635 	for (i = 0; i < size; i++)
636 		csum += le16_to_cpu(*flash++);
637 
638 	if (csum)
639 		netif_err(qdev, ifup, qdev->ndev,
640 			  "Invalid flash checksum, csum = 0x%.04x.\n", csum);
641 
642 	return csum;
643 }
644 
ql_read_flash_word(struct ql_adapter * qdev,int offset,__le32 * data)645 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
646 {
647 	int status = 0;
648 	/* wait for reg to come ready */
649 	status = ql_wait_reg_rdy(qdev,
650 				 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
651 	if (status)
652 		goto exit;
653 	/* set up for reg read */
654 	ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
655 	/* wait for reg to come ready */
656 	status = ql_wait_reg_rdy(qdev,
657 				 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
658 	if (status)
659 		goto exit;
660 	/* This data is stored on flash as an array of
661 	 * __le32.  Since ql_read32() returns cpu endian
662 	 * we need to swap it back.
663 	 */
664 	*data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
665 exit:
666 	return status;
667 }
668 
ql_get_8000_flash_params(struct ql_adapter * qdev)669 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
670 {
671 	u32 i, size;
672 	int status;
673 	__le32 *p = (__le32 *)&qdev->flash;
674 	u32 offset;
675 	u8 mac_addr[6];
676 
677 	/* Get flash offset for function and adjust
678 	 * for dword access.
679 	 */
680 	if (!qdev->port)
681 		offset = FUNC0_FLASH_OFFSET / sizeof(u32);
682 	else
683 		offset = FUNC1_FLASH_OFFSET / sizeof(u32);
684 
685 	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
686 		return -ETIMEDOUT;
687 
688 	size = sizeof(struct flash_params_8000) / sizeof(u32);
689 	for (i = 0; i < size; i++, p++) {
690 		status = ql_read_flash_word(qdev, i + offset, p);
691 		if (status) {
692 			netif_err(qdev, ifup, qdev->ndev,
693 				  "Error reading flash.\n");
694 			goto exit;
695 		}
696 	}
697 
698 	status = ql_validate_flash(qdev,
699 				   sizeof(struct flash_params_8000) /
700 				   sizeof(u16),
701 				   "8000");
702 	if (status) {
703 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
704 		status = -EINVAL;
705 		goto exit;
706 	}
707 
708 	/* Extract either manufacturer or BOFM modified
709 	 * MAC address.
710 	 */
711 	if (qdev->flash.flash_params_8000.data_type1 == 2)
712 		memcpy(mac_addr,
713 		       qdev->flash.flash_params_8000.mac_addr1,
714 		       qdev->ndev->addr_len);
715 	else
716 		memcpy(mac_addr,
717 		       qdev->flash.flash_params_8000.mac_addr,
718 		       qdev->ndev->addr_len);
719 
720 	if (!is_valid_ether_addr(mac_addr)) {
721 		netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
722 		status = -EINVAL;
723 		goto exit;
724 	}
725 
726 	memcpy(qdev->ndev->dev_addr,
727 	       mac_addr,
728 	       qdev->ndev->addr_len);
729 
730 exit:
731 	ql_sem_unlock(qdev, SEM_FLASH_MASK);
732 	return status;
733 }
734 
ql_get_8012_flash_params(struct ql_adapter * qdev)735 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
736 {
737 	int i;
738 	int status;
739 	__le32 *p = (__le32 *)&qdev->flash;
740 	u32 offset = 0;
741 	u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
742 
743 	/* Second function's parameters follow the first
744 	 * function's.
745 	 */
746 	if (qdev->port)
747 		offset = size;
748 
749 	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
750 		return -ETIMEDOUT;
751 
752 	for (i = 0; i < size; i++, p++) {
753 		status = ql_read_flash_word(qdev, i + offset, p);
754 		if (status) {
755 			netif_err(qdev, ifup, qdev->ndev,
756 				  "Error reading flash.\n");
757 			goto exit;
758 		}
759 
760 	}
761 
762 	status = ql_validate_flash(qdev,
763 				   sizeof(struct flash_params_8012) /
764 				   sizeof(u16),
765 				   "8012");
766 	if (status) {
767 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
768 		status = -EINVAL;
769 		goto exit;
770 	}
771 
772 	if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
773 		status = -EINVAL;
774 		goto exit;
775 	}
776 
777 	memcpy(qdev->ndev->dev_addr,
778 	       qdev->flash.flash_params_8012.mac_addr,
779 	       qdev->ndev->addr_len);
780 
781 exit:
782 	ql_sem_unlock(qdev, SEM_FLASH_MASK);
783 	return status;
784 }
785 
786 /* xgmac register are located behind the xgmac_addr and xgmac_data
787  * register pair.  Each read/write requires us to wait for the ready
788  * bit before reading/writing the data.
789  */
ql_write_xgmac_reg(struct ql_adapter * qdev,u32 reg,u32 data)790 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
791 {
792 	int status;
793 	/* wait for reg to come ready */
794 	status = ql_wait_reg_rdy(qdev,
795 				 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
796 	if (status)
797 		return status;
798 	/* write the data to the data reg */
799 	ql_write32(qdev, XGMAC_DATA, data);
800 	/* trigger the write */
801 	ql_write32(qdev, XGMAC_ADDR, reg);
802 	return status;
803 }
804 
805 /* xgmac register are located behind the xgmac_addr and xgmac_data
806  * register pair.  Each read/write requires us to wait for the ready
807  * bit before reading/writing the data.
808  */
ql_read_xgmac_reg(struct ql_adapter * qdev,u32 reg,u32 * data)809 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
810 {
811 	int status = 0;
812 	/* wait for reg to come ready */
813 	status = ql_wait_reg_rdy(qdev,
814 				 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
815 	if (status)
816 		goto exit;
817 	/* set up for reg read */
818 	ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
819 	/* wait for reg to come ready */
820 	status = ql_wait_reg_rdy(qdev,
821 				 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
822 	if (status)
823 		goto exit;
824 	/* get the data */
825 	*data = ql_read32(qdev, XGMAC_DATA);
826 exit:
827 	return status;
828 }
829 
830 /* This is used for reading the 64-bit statistics regs. */
ql_read_xgmac_reg64(struct ql_adapter * qdev,u32 reg,u64 * data)831 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
832 {
833 	int status = 0;
834 	u32 hi = 0;
835 	u32 lo = 0;
836 
837 	status = ql_read_xgmac_reg(qdev, reg, &lo);
838 	if (status)
839 		goto exit;
840 
841 	status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
842 	if (status)
843 		goto exit;
844 
845 	*data = (u64)lo | ((u64)hi << 32);
846 
847 exit:
848 	return status;
849 }
850 
ql_8000_port_initialize(struct ql_adapter * qdev)851 static int ql_8000_port_initialize(struct ql_adapter *qdev)
852 {
853 	int status;
854 	/*
855 	 * Get MPI firmware version for driver banner
856 	 * and ethool info.
857 	 */
858 	status = ql_mb_about_fw(qdev);
859 	if (status)
860 		goto exit;
861 	status = ql_mb_get_fw_state(qdev);
862 	if (status)
863 		goto exit;
864 	/* Wake up a worker to get/set the TX/RX frame sizes. */
865 	queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
866 exit:
867 	return status;
868 }
869 
870 /* Take the MAC Core out of reset.
871  * Enable statistics counting.
872  * Take the transmitter/receiver out of reset.
873  * This functionality may be done in the MPI firmware at a
874  * later date.
875  */
ql_8012_port_initialize(struct ql_adapter * qdev)876 static int ql_8012_port_initialize(struct ql_adapter *qdev)
877 {
878 	int status = 0;
879 	u32 data;
880 
881 	if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
882 		/* Another function has the semaphore, so
883 		 * wait for the port init bit to come ready.
884 		 */
885 		netif_info(qdev, link, qdev->ndev,
886 			   "Another function has the semaphore, so wait for the port init bit to come ready.\n");
887 		status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
888 		if (status) {
889 			netif_crit(qdev, link, qdev->ndev,
890 				   "Port initialize timed out.\n");
891 		}
892 		return status;
893 	}
894 
895 	netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
896 	/* Set the core reset. */
897 	status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
898 	if (status)
899 		goto end;
900 	data |= GLOBAL_CFG_RESET;
901 	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
902 	if (status)
903 		goto end;
904 
905 	/* Clear the core reset and turn on jumbo for receiver. */
906 	data &= ~GLOBAL_CFG_RESET;	/* Clear core reset. */
907 	data |= GLOBAL_CFG_JUMBO;	/* Turn on jumbo. */
908 	data |= GLOBAL_CFG_TX_STAT_EN;
909 	data |= GLOBAL_CFG_RX_STAT_EN;
910 	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
911 	if (status)
912 		goto end;
913 
914 	/* Enable transmitter, and clear it's reset. */
915 	status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
916 	if (status)
917 		goto end;
918 	data &= ~TX_CFG_RESET;	/* Clear the TX MAC reset. */
919 	data |= TX_CFG_EN;	/* Enable the transmitter. */
920 	status = ql_write_xgmac_reg(qdev, TX_CFG, data);
921 	if (status)
922 		goto end;
923 
924 	/* Enable receiver and clear it's reset. */
925 	status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
926 	if (status)
927 		goto end;
928 	data &= ~RX_CFG_RESET;	/* Clear the RX MAC reset. */
929 	data |= RX_CFG_EN;	/* Enable the receiver. */
930 	status = ql_write_xgmac_reg(qdev, RX_CFG, data);
931 	if (status)
932 		goto end;
933 
934 	/* Turn on jumbo. */
935 	status =
936 	    ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
937 	if (status)
938 		goto end;
939 	status =
940 	    ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
941 	if (status)
942 		goto end;
943 
944 	/* Signal to the world that the port is enabled.        */
945 	ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
946 end:
947 	ql_sem_unlock(qdev, qdev->xg_sem_mask);
948 	return status;
949 }
950 
ql_lbq_block_size(struct ql_adapter * qdev)951 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
952 {
953 	return PAGE_SIZE << qdev->lbq_buf_order;
954 }
955 
qlge_get_curr_buf(struct qlge_bq * bq)956 static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
957 {
958 	struct qlge_bq_desc *bq_desc;
959 
960 	bq_desc = &bq->queue[bq->next_to_clean];
961 	bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1);
962 
963 	return bq_desc;
964 }
965 
ql_get_curr_lchunk(struct ql_adapter * qdev,struct rx_ring * rx_ring)966 static struct qlge_bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
967 					       struct rx_ring *rx_ring)
968 {
969 	struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
970 
971 	dma_sync_single_for_cpu(&qdev->pdev->dev, lbq_desc->dma_addr,
972 				qdev->lbq_buf_size, DMA_FROM_DEVICE);
973 
974 	if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
975 	    ql_lbq_block_size(qdev)) {
976 		/* last chunk of the master page */
977 		dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
978 			       ql_lbq_block_size(qdev), DMA_FROM_DEVICE);
979 	}
980 
981 	return lbq_desc;
982 }
983 
984 /* Update an rx ring index. */
ql_update_cq(struct rx_ring * rx_ring)985 static void ql_update_cq(struct rx_ring *rx_ring)
986 {
987 	rx_ring->cnsmr_idx++;
988 	rx_ring->curr_entry++;
989 	if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
990 		rx_ring->cnsmr_idx = 0;
991 		rx_ring->curr_entry = rx_ring->cq_base;
992 	}
993 }
994 
ql_write_cq_idx(struct rx_ring * rx_ring)995 static void ql_write_cq_idx(struct rx_ring *rx_ring)
996 {
997 	ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
998 }
999 
1000 static const char * const bq_type_name[] = {
1001 	[QLGE_SB] = "sbq",
1002 	[QLGE_LB] = "lbq",
1003 };
1004 
1005 /* return 0 or negative error */
qlge_refill_sb(struct rx_ring * rx_ring,struct qlge_bq_desc * sbq_desc,gfp_t gfp)1006 static int qlge_refill_sb(struct rx_ring *rx_ring,
1007 			  struct qlge_bq_desc *sbq_desc, gfp_t gfp)
1008 {
1009 	struct ql_adapter *qdev = rx_ring->qdev;
1010 	struct sk_buff *skb;
1011 
1012 	if (sbq_desc->p.skb)
1013 		return 0;
1014 
1015 	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1016 		     "ring %u sbq: getting new skb for index %d.\n",
1017 		     rx_ring->cq_id, sbq_desc->index);
1018 
1019 	skb = __netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE, gfp);
1020 	if (!skb)
1021 		return -ENOMEM;
1022 	skb_reserve(skb, QLGE_SB_PAD);
1023 
1024 	sbq_desc->dma_addr = dma_map_single(&qdev->pdev->dev, skb->data,
1025 					    SMALL_BUF_MAP_SIZE,
1026 					    DMA_FROM_DEVICE);
1027 	if (dma_mapping_error(&qdev->pdev->dev, sbq_desc->dma_addr)) {
1028 		netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
1029 		dev_kfree_skb_any(skb);
1030 		return -EIO;
1031 	}
1032 	*sbq_desc->buf_ptr = cpu_to_le64(sbq_desc->dma_addr);
1033 
1034 	sbq_desc->p.skb = skb;
1035 	return 0;
1036 }
1037 
1038 /* return 0 or negative error */
qlge_refill_lb(struct rx_ring * rx_ring,struct qlge_bq_desc * lbq_desc,gfp_t gfp)1039 static int qlge_refill_lb(struct rx_ring *rx_ring,
1040 			  struct qlge_bq_desc *lbq_desc, gfp_t gfp)
1041 {
1042 	struct ql_adapter *qdev = rx_ring->qdev;
1043 	struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk;
1044 
1045 	if (!master_chunk->page) {
1046 		struct page *page;
1047 		dma_addr_t dma_addr;
1048 
1049 		page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
1050 		if (unlikely(!page))
1051 			return -ENOMEM;
1052 		dma_addr = dma_map_page(&qdev->pdev->dev, page, 0,
1053 					ql_lbq_block_size(qdev),
1054 					DMA_FROM_DEVICE);
1055 		if (dma_mapping_error(&qdev->pdev->dev, dma_addr)) {
1056 			__free_pages(page, qdev->lbq_buf_order);
1057 			netif_err(qdev, drv, qdev->ndev,
1058 				  "PCI mapping failed.\n");
1059 			return -EIO;
1060 		}
1061 		master_chunk->page = page;
1062 		master_chunk->va = page_address(page);
1063 		master_chunk->offset = 0;
1064 		rx_ring->chunk_dma_addr = dma_addr;
1065 	}
1066 
1067 	lbq_desc->p.pg_chunk = *master_chunk;
1068 	lbq_desc->dma_addr = rx_ring->chunk_dma_addr;
1069 	*lbq_desc->buf_ptr = cpu_to_le64(lbq_desc->dma_addr +
1070 					 lbq_desc->p.pg_chunk.offset);
1071 
1072 	/* Adjust the master page chunk for next
1073 	 * buffer get.
1074 	 */
1075 	master_chunk->offset += qdev->lbq_buf_size;
1076 	if (master_chunk->offset == ql_lbq_block_size(qdev)) {
1077 		master_chunk->page = NULL;
1078 	} else {
1079 		master_chunk->va += qdev->lbq_buf_size;
1080 		get_page(master_chunk->page);
1081 	}
1082 
1083 	return 0;
1084 }
1085 
1086 /* return 0 or negative error */
qlge_refill_bq(struct qlge_bq * bq,gfp_t gfp)1087 static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
1088 {
1089 	struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
1090 	struct ql_adapter *qdev = rx_ring->qdev;
1091 	struct qlge_bq_desc *bq_desc;
1092 	int refill_count;
1093 	int retval;
1094 	int i;
1095 
1096 	refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
1097 				    bq->next_to_use);
1098 	if (!refill_count)
1099 		return 0;
1100 
1101 	i = bq->next_to_use;
1102 	bq_desc = &bq->queue[i];
1103 	i -= QLGE_BQ_LEN;
1104 	do {
1105 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1106 			     "ring %u %s: try cleaning idx %d\n",
1107 			     rx_ring->cq_id, bq_type_name[bq->type], i);
1108 
1109 		if (bq->type == QLGE_SB)
1110 			retval = qlge_refill_sb(rx_ring, bq_desc, gfp);
1111 		else
1112 			retval = qlge_refill_lb(rx_ring, bq_desc, gfp);
1113 		if (retval < 0) {
1114 			netif_err(qdev, ifup, qdev->ndev,
1115 				  "ring %u %s: Could not get a page chunk, idx %d\n",
1116 				  rx_ring->cq_id, bq_type_name[bq->type], i);
1117 			break;
1118 		}
1119 
1120 		bq_desc++;
1121 		i++;
1122 		if (unlikely(!i)) {
1123 			bq_desc = &bq->queue[0];
1124 			i -= QLGE_BQ_LEN;
1125 		}
1126 		refill_count--;
1127 	} while (refill_count);
1128 	i += QLGE_BQ_LEN;
1129 
1130 	if (bq->next_to_use != i) {
1131 		if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
1132 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1133 				     "ring %u %s: updating prod idx = %d.\n",
1134 				     rx_ring->cq_id, bq_type_name[bq->type],
1135 				     i);
1136 			ql_write_db_reg(i, bq->prod_idx_db_reg);
1137 		}
1138 		bq->next_to_use = i;
1139 	}
1140 
1141 	return retval;
1142 }
1143 
ql_update_buffer_queues(struct rx_ring * rx_ring,gfp_t gfp,unsigned long delay)1144 static void ql_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
1145 				    unsigned long delay)
1146 {
1147 	bool sbq_fail, lbq_fail;
1148 
1149 	sbq_fail = !!qlge_refill_bq(&rx_ring->sbq, gfp);
1150 	lbq_fail = !!qlge_refill_bq(&rx_ring->lbq, gfp);
1151 
1152 	/* Minimum number of buffers needed to be able to receive at least one
1153 	 * frame of any format:
1154 	 * sbq: 1 for header + 1 for data
1155 	 * lbq: mtu 9000 / lb size
1156 	 * Below this, the queue might stall.
1157 	 */
1158 	if ((sbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->sbq) < 2) ||
1159 	    (lbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->lbq) <
1160 	     DIV_ROUND_UP(9000, LARGE_BUFFER_MAX_SIZE)))
1161 		/* Allocations can take a long time in certain cases (ex.
1162 		 * reclaim). Therefore, use a workqueue for long-running
1163 		 * work items.
1164 		 */
1165 		queue_delayed_work_on(smp_processor_id(), system_long_wq,
1166 				      &rx_ring->refill_work, delay);
1167 }
1168 
qlge_slow_refill(struct work_struct * work)1169 static void qlge_slow_refill(struct work_struct *work)
1170 {
1171 	struct rx_ring *rx_ring = container_of(work, struct rx_ring,
1172 					       refill_work.work);
1173 	struct napi_struct *napi = &rx_ring->napi;
1174 
1175 	napi_disable(napi);
1176 	ql_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
1177 	napi_enable(napi);
1178 
1179 	local_bh_disable();
1180 	/* napi_disable() might have prevented incomplete napi work from being
1181 	 * rescheduled.
1182 	 */
1183 	napi_schedule(napi);
1184 	/* trigger softirq processing */
1185 	local_bh_enable();
1186 }
1187 
1188 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1189  * fails at some stage, or from the interrupt when a tx completes.
1190  */
ql_unmap_send(struct ql_adapter * qdev,struct tx_ring_desc * tx_ring_desc,int mapped)1191 static void ql_unmap_send(struct ql_adapter *qdev,
1192 			  struct tx_ring_desc *tx_ring_desc, int mapped)
1193 {
1194 	int i;
1195 
1196 	for (i = 0; i < mapped; i++) {
1197 		if (i == 0 || (i == 7 && mapped > 7)) {
1198 			/*
1199 			 * Unmap the skb->data area, or the
1200 			 * external sglist (AKA the Outbound
1201 			 * Address List (OAL)).
1202 			 * If its the zeroeth element, then it's
1203 			 * the skb->data area.  If it's the 7th
1204 			 * element and there is more than 6 frags,
1205 			 * then its an OAL.
1206 			 */
1207 			if (i == 7) {
1208 				netif_printk(qdev, tx_done, KERN_DEBUG,
1209 					     qdev->ndev,
1210 					     "unmapping OAL area.\n");
1211 			}
1212 			dma_unmap_single(&qdev->pdev->dev,
1213 					 dma_unmap_addr(&tx_ring_desc->map[i],
1214 							mapaddr),
1215 					 dma_unmap_len(&tx_ring_desc->map[i],
1216 						       maplen),
1217 					 DMA_TO_DEVICE);
1218 		} else {
1219 			netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1220 				     "unmapping frag %d.\n", i);
1221 			dma_unmap_page(&qdev->pdev->dev,
1222 				       dma_unmap_addr(&tx_ring_desc->map[i],
1223 						      mapaddr),
1224 				       dma_unmap_len(&tx_ring_desc->map[i],
1225 						     maplen), DMA_TO_DEVICE);
1226 		}
1227 	}
1228 
1229 }
1230 
1231 /* Map the buffers for this transmit.  This will return
1232  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1233  */
ql_map_send(struct ql_adapter * qdev,struct ob_mac_iocb_req * mac_iocb_ptr,struct sk_buff * skb,struct tx_ring_desc * tx_ring_desc)1234 static int ql_map_send(struct ql_adapter *qdev,
1235 		       struct ob_mac_iocb_req *mac_iocb_ptr,
1236 		       struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1237 {
1238 	int len = skb_headlen(skb);
1239 	dma_addr_t map;
1240 	int frag_idx, err, map_idx = 0;
1241 	struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1242 	int frag_cnt = skb_shinfo(skb)->nr_frags;
1243 
1244 	if (frag_cnt) {
1245 		netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1246 			     "frag_cnt = %d.\n", frag_cnt);
1247 	}
1248 	/*
1249 	 * Map the skb buffer first.
1250 	 */
1251 	map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
1252 
1253 	err = dma_mapping_error(&qdev->pdev->dev, map);
1254 	if (err) {
1255 		netif_err(qdev, tx_queued, qdev->ndev,
1256 			  "PCI mapping failed with error: %d\n", err);
1257 
1258 		return NETDEV_TX_BUSY;
1259 	}
1260 
1261 	tbd->len = cpu_to_le32(len);
1262 	tbd->addr = cpu_to_le64(map);
1263 	dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1264 	dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1265 	map_idx++;
1266 
1267 	/*
1268 	 * This loop fills the remainder of the 8 address descriptors
1269 	 * in the IOCB.  If there are more than 7 fragments, then the
1270 	 * eighth address desc will point to an external list (OAL).
1271 	 * When this happens, the remainder of the frags will be stored
1272 	 * in this list.
1273 	 */
1274 	for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1275 		skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1276 
1277 		tbd++;
1278 		if (frag_idx == 6 && frag_cnt > 7) {
1279 			/* Let's tack on an sglist.
1280 			 * Our control block will now
1281 			 * look like this:
1282 			 * iocb->seg[0] = skb->data
1283 			 * iocb->seg[1] = frag[0]
1284 			 * iocb->seg[2] = frag[1]
1285 			 * iocb->seg[3] = frag[2]
1286 			 * iocb->seg[4] = frag[3]
1287 			 * iocb->seg[5] = frag[4]
1288 			 * iocb->seg[6] = frag[5]
1289 			 * iocb->seg[7] = ptr to OAL (external sglist)
1290 			 * oal->seg[0] = frag[6]
1291 			 * oal->seg[1] = frag[7]
1292 			 * oal->seg[2] = frag[8]
1293 			 * oal->seg[3] = frag[9]
1294 			 * oal->seg[4] = frag[10]
1295 			 *      etc...
1296 			 */
1297 			/* Tack on the OAL in the eighth segment of IOCB. */
1298 			map = dma_map_single(&qdev->pdev->dev, &tx_ring_desc->oal,
1299 					     sizeof(struct oal),
1300 					     DMA_TO_DEVICE);
1301 			err = dma_mapping_error(&qdev->pdev->dev, map);
1302 			if (err) {
1303 				netif_err(qdev, tx_queued, qdev->ndev,
1304 					  "PCI mapping outbound address list with error: %d\n",
1305 					  err);
1306 				goto map_error;
1307 			}
1308 
1309 			tbd->addr = cpu_to_le64(map);
1310 			/*
1311 			 * The length is the number of fragments
1312 			 * that remain to be mapped times the length
1313 			 * of our sglist (OAL).
1314 			 */
1315 			tbd->len =
1316 			    cpu_to_le32((sizeof(struct tx_buf_desc) *
1317 					 (frag_cnt - frag_idx)) | TX_DESC_C);
1318 			dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1319 					   map);
1320 			dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1321 					  sizeof(struct oal));
1322 			tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1323 			map_idx++;
1324 		}
1325 
1326 		map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1327 				       DMA_TO_DEVICE);
1328 
1329 		err = dma_mapping_error(&qdev->pdev->dev, map);
1330 		if (err) {
1331 			netif_err(qdev, tx_queued, qdev->ndev,
1332 				  "PCI mapping frags failed with error: %d.\n",
1333 				  err);
1334 			goto map_error;
1335 		}
1336 
1337 		tbd->addr = cpu_to_le64(map);
1338 		tbd->len = cpu_to_le32(skb_frag_size(frag));
1339 		dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 		dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1341 				  skb_frag_size(frag));
1342 
1343 	}
1344 	/* Save the number of segments we've mapped. */
1345 	tx_ring_desc->map_cnt = map_idx;
1346 	/* Terminate the last segment. */
1347 	tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1348 	return NETDEV_TX_OK;
1349 
1350 map_error:
1351 	/*
1352 	 * If the first frag mapping failed, then i will be zero.
1353 	 * This causes the unmap of the skb->data area.  Otherwise
1354 	 * we pass in the number of frags that mapped successfully
1355 	 * so they can be umapped.
1356 	 */
1357 	ql_unmap_send(qdev, tx_ring_desc, map_idx);
1358 	return NETDEV_TX_BUSY;
1359 }
1360 
1361 /* Categorizing receive firmware frame errors */
ql_categorize_rx_err(struct ql_adapter * qdev,u8 rx_err,struct rx_ring * rx_ring)1362 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1363 				 struct rx_ring *rx_ring)
1364 {
1365 	struct nic_stats *stats = &qdev->nic_stats;
1366 
1367 	stats->rx_err_count++;
1368 	rx_ring->rx_errors++;
1369 
1370 	switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1371 	case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1372 		stats->rx_code_err++;
1373 		break;
1374 	case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1375 		stats->rx_oversize_err++;
1376 		break;
1377 	case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1378 		stats->rx_undersize_err++;
1379 		break;
1380 	case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1381 		stats->rx_preamble_err++;
1382 		break;
1383 	case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1384 		stats->rx_frame_len_err++;
1385 		break;
1386 	case IB_MAC_IOCB_RSP_ERR_CRC:
1387 		stats->rx_crc_err++;
1388 	default:
1389 		break;
1390 	}
1391 }
1392 
1393 /**
1394  * ql_update_mac_hdr_len - helper routine to update the mac header length
1395  * based on vlan tags if present
1396  */
ql_update_mac_hdr_len(struct ql_adapter * qdev,struct ib_mac_iocb_rsp * ib_mac_rsp,void * page,size_t * len)1397 static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1398 				  struct ib_mac_iocb_rsp *ib_mac_rsp,
1399 				  void *page, size_t *len)
1400 {
1401 	u16 *tags;
1402 
1403 	if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1404 		return;
1405 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1406 		tags = (u16 *)page;
1407 		/* Look for stacked vlan tags in ethertype field */
1408 		if (tags[6] == ETH_P_8021Q &&
1409 		    tags[8] == ETH_P_8021Q)
1410 			*len += 2 * VLAN_HLEN;
1411 		else
1412 			*len += VLAN_HLEN;
1413 	}
1414 }
1415 
1416 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_gro_page(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1417 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1418 				       struct rx_ring *rx_ring,
1419 				       struct ib_mac_iocb_rsp *ib_mac_rsp,
1420 				       u32 length, u16 vlan_id)
1421 {
1422 	struct sk_buff *skb;
1423 	struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1424 	struct napi_struct *napi = &rx_ring->napi;
1425 
1426 	/* Frame error, so drop the packet. */
1427 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1428 		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1429 		put_page(lbq_desc->p.pg_chunk.page);
1430 		return;
1431 	}
1432 	napi->dev = qdev->ndev;
1433 
1434 	skb = napi_get_frags(napi);
1435 	if (!skb) {
1436 		netif_err(qdev, drv, qdev->ndev,
1437 			  "Couldn't get an skb, exiting.\n");
1438 		rx_ring->rx_dropped++;
1439 		put_page(lbq_desc->p.pg_chunk.page);
1440 		return;
1441 	}
1442 	prefetch(lbq_desc->p.pg_chunk.va);
1443 	__skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1444 			     lbq_desc->p.pg_chunk.page,
1445 			     lbq_desc->p.pg_chunk.offset,
1446 			     length);
1447 
1448 	skb->len += length;
1449 	skb->data_len += length;
1450 	skb->truesize += length;
1451 	skb_shinfo(skb)->nr_frags++;
1452 
1453 	rx_ring->rx_packets++;
1454 	rx_ring->rx_bytes += length;
1455 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1456 	skb_record_rx_queue(skb, rx_ring->cq_id);
1457 	if (vlan_id != 0xffff)
1458 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1459 	napi_gro_frags(napi);
1460 }
1461 
1462 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_page(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1463 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1464 				   struct rx_ring *rx_ring,
1465 				   struct ib_mac_iocb_rsp *ib_mac_rsp,
1466 				   u32 length, u16 vlan_id)
1467 {
1468 	struct net_device *ndev = qdev->ndev;
1469 	struct sk_buff *skb = NULL;
1470 	void *addr;
1471 	struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1472 	struct napi_struct *napi = &rx_ring->napi;
1473 	size_t hlen = ETH_HLEN;
1474 
1475 	skb = netdev_alloc_skb(ndev, length);
1476 	if (!skb) {
1477 		rx_ring->rx_dropped++;
1478 		put_page(lbq_desc->p.pg_chunk.page);
1479 		return;
1480 	}
1481 
1482 	addr = lbq_desc->p.pg_chunk.va;
1483 	prefetch(addr);
1484 
1485 	/* Frame error, so drop the packet. */
1486 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1487 		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1488 		goto err_out;
1489 	}
1490 
1491 	/* Update the MAC header length*/
1492 	ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1493 
1494 	/* The max framesize filter on this chip is set higher than
1495 	 * MTU since FCoE uses 2k frames.
1496 	 */
1497 	if (skb->len > ndev->mtu + hlen) {
1498 		netif_err(qdev, drv, qdev->ndev,
1499 			  "Segment too small, dropping.\n");
1500 		rx_ring->rx_dropped++;
1501 		goto err_out;
1502 	}
1503 	skb_put_data(skb, addr, hlen);
1504 	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1505 		     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1506 		     length);
1507 	skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1508 			   lbq_desc->p.pg_chunk.offset + hlen, length - hlen);
1509 	skb->len += length - hlen;
1510 	skb->data_len += length - hlen;
1511 	skb->truesize += length - hlen;
1512 
1513 	rx_ring->rx_packets++;
1514 	rx_ring->rx_bytes += skb->len;
1515 	skb->protocol = eth_type_trans(skb, ndev);
1516 	skb_checksum_none_assert(skb);
1517 
1518 	if ((ndev->features & NETIF_F_RXCSUM) &&
1519 	    !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1520 		/* TCP frame. */
1521 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1522 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1523 				     "TCP checksum done!\n");
1524 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1525 		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1526 				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1527 			/* Unfragmented ipv4 UDP frame. */
1528 			struct iphdr *iph =
1529 				(struct iphdr *)((u8 *)addr + hlen);
1530 			if (!(iph->frag_off &
1531 				htons(IP_MF | IP_OFFSET))) {
1532 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1533 				netif_printk(qdev, rx_status, KERN_DEBUG,
1534 					     qdev->ndev,
1535 					     "UDP checksum done!\n");
1536 			}
1537 		}
1538 	}
1539 
1540 	skb_record_rx_queue(skb, rx_ring->cq_id);
1541 	if (vlan_id != 0xffff)
1542 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1543 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1544 		napi_gro_receive(napi, skb);
1545 	else
1546 		netif_receive_skb(skb);
1547 	return;
1548 err_out:
1549 	dev_kfree_skb_any(skb);
1550 	put_page(lbq_desc->p.pg_chunk.page);
1551 }
1552 
1553 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_skb(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1554 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1555 				  struct rx_ring *rx_ring,
1556 				  struct ib_mac_iocb_rsp *ib_mac_rsp,
1557 				  u32 length, u16 vlan_id)
1558 {
1559 	struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1560 	struct net_device *ndev = qdev->ndev;
1561 	struct sk_buff *skb, *new_skb;
1562 
1563 	skb = sbq_desc->p.skb;
1564 	/* Allocate new_skb and copy */
1565 	new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1566 	if (!new_skb) {
1567 		rx_ring->rx_dropped++;
1568 		return;
1569 	}
1570 	skb_reserve(new_skb, NET_IP_ALIGN);
1571 
1572 	dma_sync_single_for_cpu(&qdev->pdev->dev, sbq_desc->dma_addr,
1573 				SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1574 
1575 	skb_put_data(new_skb, skb->data, length);
1576 
1577 	skb = new_skb;
1578 
1579 	/* Frame error, so drop the packet. */
1580 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1581 		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1582 		dev_kfree_skb_any(skb);
1583 		return;
1584 	}
1585 
1586 	/* loopback self test for ethtool */
1587 	if (test_bit(QL_SELFTEST, &qdev->flags)) {
1588 		ql_check_lb_frame(qdev, skb);
1589 		dev_kfree_skb_any(skb);
1590 		return;
1591 	}
1592 
1593 	/* The max framesize filter on this chip is set higher than
1594 	 * MTU since FCoE uses 2k frames.
1595 	 */
1596 	if (skb->len > ndev->mtu + ETH_HLEN) {
1597 		dev_kfree_skb_any(skb);
1598 		rx_ring->rx_dropped++;
1599 		return;
1600 	}
1601 
1602 	prefetch(skb->data);
1603 	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1604 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1605 			     "%s Multicast.\n",
1606 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1607 			     IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1608 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1609 			     IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1610 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1611 			     IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1612 	}
1613 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1614 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1615 			     "Promiscuous Packet.\n");
1616 
1617 	rx_ring->rx_packets++;
1618 	rx_ring->rx_bytes += skb->len;
1619 	skb->protocol = eth_type_trans(skb, ndev);
1620 	skb_checksum_none_assert(skb);
1621 
1622 	/* If rx checksum is on, and there are no
1623 	 * csum or frame errors.
1624 	 */
1625 	if ((ndev->features & NETIF_F_RXCSUM) &&
1626 	    !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1627 		/* TCP frame. */
1628 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1629 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1630 				     "TCP checksum done!\n");
1631 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1632 		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1633 				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1634 			/* Unfragmented ipv4 UDP frame. */
1635 			struct iphdr *iph = (struct iphdr *)skb->data;
1636 
1637 			if (!(iph->frag_off &
1638 				htons(IP_MF | IP_OFFSET))) {
1639 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1640 				netif_printk(qdev, rx_status, KERN_DEBUG,
1641 					     qdev->ndev,
1642 					     "UDP checksum done!\n");
1643 			}
1644 		}
1645 	}
1646 
1647 	skb_record_rx_queue(skb, rx_ring->cq_id);
1648 	if (vlan_id != 0xffff)
1649 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1650 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1651 		napi_gro_receive(&rx_ring->napi, skb);
1652 	else
1653 		netif_receive_skb(skb);
1654 }
1655 
ql_realign_skb(struct sk_buff * skb,int len)1656 static void ql_realign_skb(struct sk_buff *skb, int len)
1657 {
1658 	void *temp_addr = skb->data;
1659 
1660 	/* Undo the skb_reserve(skb,32) we did before
1661 	 * giving to hardware, and realign data on
1662 	 * a 2-byte boundary.
1663 	 */
1664 	skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1665 	skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1666 	memmove(skb->data, temp_addr, len);
1667 }
1668 
1669 /*
1670  * This function builds an skb for the given inbound
1671  * completion.  It will be rewritten for readability in the near
1672  * future, but for not it works well.
1673  */
ql_build_rx_skb(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp)1674 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1675 				       struct rx_ring *rx_ring,
1676 				       struct ib_mac_iocb_rsp *ib_mac_rsp)
1677 {
1678 	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1679 	u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1680 	struct qlge_bq_desc *lbq_desc, *sbq_desc;
1681 	struct sk_buff *skb = NULL;
1682 	size_t hlen = ETH_HLEN;
1683 
1684 	/*
1685 	 * Handle the header buffer if present.
1686 	 */
1687 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1688 	    ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1689 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1690 			     "Header of %d bytes in small buffer.\n", hdr_len);
1691 		/*
1692 		 * Headers fit nicely into a small buffer.
1693 		 */
1694 		sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1695 		dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1696 				 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1697 		skb = sbq_desc->p.skb;
1698 		ql_realign_skb(skb, hdr_len);
1699 		skb_put(skb, hdr_len);
1700 		sbq_desc->p.skb = NULL;
1701 	}
1702 
1703 	/*
1704 	 * Handle the data buffer(s).
1705 	 */
1706 	if (unlikely(!length)) {	/* Is there data too? */
1707 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1708 			     "No Data buffer in this packet.\n");
1709 		return skb;
1710 	}
1711 
1712 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1713 		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1714 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1715 				     "Headers in small, data of %d bytes in small, combine them.\n",
1716 				     length);
1717 			/*
1718 			 * Data is less than small buffer size so it's
1719 			 * stuffed in a small buffer.
1720 			 * For this case we append the data
1721 			 * from the "data" small buffer to the "header" small
1722 			 * buffer.
1723 			 */
1724 			sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1725 			dma_sync_single_for_cpu(&qdev->pdev->dev,
1726 						sbq_desc->dma_addr,
1727 						SMALL_BUF_MAP_SIZE,
1728 						DMA_FROM_DEVICE);
1729 			skb_put_data(skb, sbq_desc->p.skb->data, length);
1730 		} else {
1731 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1732 				     "%d bytes in a single small buffer.\n",
1733 				     length);
1734 			sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1735 			skb = sbq_desc->p.skb;
1736 			ql_realign_skb(skb, length);
1737 			skb_put(skb, length);
1738 			dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1739 					 SMALL_BUF_MAP_SIZE,
1740 					 DMA_FROM_DEVICE);
1741 			sbq_desc->p.skb = NULL;
1742 		}
1743 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1744 		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1745 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1746 				     "Header in small, %d bytes in large. Chain large to small!\n",
1747 				     length);
1748 			/*
1749 			 * The data is in a single large buffer.  We
1750 			 * chain it to the header buffer's skb and let
1751 			 * it rip.
1752 			 */
1753 			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1754 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1755 				     "Chaining page at offset = %d, for %d bytes  to skb.\n",
1756 				     lbq_desc->p.pg_chunk.offset, length);
1757 			skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1758 					   lbq_desc->p.pg_chunk.offset, length);
1759 			skb->len += length;
1760 			skb->data_len += length;
1761 			skb->truesize += length;
1762 		} else {
1763 			/*
1764 			 * The headers and data are in a single large buffer. We
1765 			 * copy it to a new skb and let it go. This can happen with
1766 			 * jumbo mtu on a non-TCP/UDP frame.
1767 			 */
1768 			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1769 			skb = netdev_alloc_skb(qdev->ndev, length);
1770 			if (!skb) {
1771 				netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1772 					     "No skb available, drop the packet.\n");
1773 				return NULL;
1774 			}
1775 			dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
1776 				       qdev->lbq_buf_size,
1777 				       DMA_FROM_DEVICE);
1778 			skb_reserve(skb, NET_IP_ALIGN);
1779 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1780 				     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1781 				     length);
1782 			skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1783 					   lbq_desc->p.pg_chunk.offset,
1784 					   length);
1785 			skb->len += length;
1786 			skb->data_len += length;
1787 			skb->truesize += length;
1788 			ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1789 					      lbq_desc->p.pg_chunk.va,
1790 					      &hlen);
1791 			__pskb_pull_tail(skb, hlen);
1792 		}
1793 	} else {
1794 		/*
1795 		 * The data is in a chain of large buffers
1796 		 * pointed to by a small buffer.  We loop
1797 		 * thru and chain them to the our small header
1798 		 * buffer's skb.
1799 		 * frags:  There are 18 max frags and our small
1800 		 *         buffer will hold 32 of them. The thing is,
1801 		 *         we'll use 3 max for our 9000 byte jumbo
1802 		 *         frames.  If the MTU goes up we could
1803 		 *          eventually be in trouble.
1804 		 */
1805 		int size, i = 0;
1806 
1807 		sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1808 		dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1809 				 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1810 		if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1811 			/*
1812 			 * This is an non TCP/UDP IP frame, so
1813 			 * the headers aren't split into a small
1814 			 * buffer.  We have to use the small buffer
1815 			 * that contains our sg list as our skb to
1816 			 * send upstairs. Copy the sg list here to
1817 			 * a local buffer and use it to find the
1818 			 * pages to chain.
1819 			 */
1820 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1821 				     "%d bytes of headers & data in chain of large.\n",
1822 				     length);
1823 			skb = sbq_desc->p.skb;
1824 			sbq_desc->p.skb = NULL;
1825 			skb_reserve(skb, NET_IP_ALIGN);
1826 		}
1827 		do {
1828 			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1829 			size = min(length, qdev->lbq_buf_size);
1830 
1831 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1832 				     "Adding page %d to skb for %d bytes.\n",
1833 				     i, size);
1834 			skb_fill_page_desc(skb, i,
1835 					   lbq_desc->p.pg_chunk.page,
1836 					   lbq_desc->p.pg_chunk.offset, size);
1837 			skb->len += size;
1838 			skb->data_len += size;
1839 			skb->truesize += size;
1840 			length -= size;
1841 			i++;
1842 		} while (length > 0);
1843 		ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1844 				      &hlen);
1845 		__pskb_pull_tail(skb, hlen);
1846 	}
1847 	return skb;
1848 }
1849 
1850 /* Process an inbound completion from an rx ring. */
ql_process_mac_split_rx_intr(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u16 vlan_id)1851 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1852 					 struct rx_ring *rx_ring,
1853 					 struct ib_mac_iocb_rsp *ib_mac_rsp,
1854 					 u16 vlan_id)
1855 {
1856 	struct net_device *ndev = qdev->ndev;
1857 	struct sk_buff *skb = NULL;
1858 
1859 	QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp);
1860 
1861 	skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1862 	if (unlikely(!skb)) {
1863 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1864 			     "No skb available, drop packet.\n");
1865 		rx_ring->rx_dropped++;
1866 		return;
1867 	}
1868 
1869 	/* Frame error, so drop the packet. */
1870 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1871 		ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1872 		dev_kfree_skb_any(skb);
1873 		return;
1874 	}
1875 
1876 	/* The max framesize filter on this chip is set higher than
1877 	 * MTU since FCoE uses 2k frames.
1878 	 */
1879 	if (skb->len > ndev->mtu + ETH_HLEN) {
1880 		dev_kfree_skb_any(skb);
1881 		rx_ring->rx_dropped++;
1882 		return;
1883 	}
1884 
1885 	/* loopback self test for ethtool */
1886 	if (test_bit(QL_SELFTEST, &qdev->flags)) {
1887 		ql_check_lb_frame(qdev, skb);
1888 		dev_kfree_skb_any(skb);
1889 		return;
1890 	}
1891 
1892 	prefetch(skb->data);
1893 	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1894 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1895 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1896 			     IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1897 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1898 			     IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1899 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1900 			     IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1901 		rx_ring->rx_multicast++;
1902 	}
1903 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1904 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1905 			     "Promiscuous Packet.\n");
1906 	}
1907 
1908 	skb->protocol = eth_type_trans(skb, ndev);
1909 	skb_checksum_none_assert(skb);
1910 
1911 	/* If rx checksum is on, and there are no
1912 	 * csum or frame errors.
1913 	 */
1914 	if ((ndev->features & NETIF_F_RXCSUM) &&
1915 	    !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1916 		/* TCP frame. */
1917 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1918 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1919 				     "TCP checksum done!\n");
1920 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1921 		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1922 				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1923 		/* Unfragmented ipv4 UDP frame. */
1924 			struct iphdr *iph = (struct iphdr *)skb->data;
1925 
1926 			if (!(iph->frag_off &
1927 				htons(IP_MF | IP_OFFSET))) {
1928 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1929 				netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1930 					     "TCP checksum done!\n");
1931 			}
1932 		}
1933 	}
1934 
1935 	rx_ring->rx_packets++;
1936 	rx_ring->rx_bytes += skb->len;
1937 	skb_record_rx_queue(skb, rx_ring->cq_id);
1938 	if (vlan_id != 0xffff)
1939 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1940 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1941 		napi_gro_receive(&rx_ring->napi, skb);
1942 	else
1943 		netif_receive_skb(skb);
1944 }
1945 
1946 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_intr(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp)1947 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1948 					    struct rx_ring *rx_ring,
1949 					    struct ib_mac_iocb_rsp *ib_mac_rsp)
1950 {
1951 	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1952 	u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1953 			(qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
1954 			((le16_to_cpu(ib_mac_rsp->vlan_id) &
1955 			IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1956 
1957 	QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp);
1958 
1959 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
1960 		/* The data and headers are split into
1961 		 * separate buffers.
1962 		 */
1963 		ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1964 					     vlan_id);
1965 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1966 		/* The data fit in a single small buffer.
1967 		 * Allocate a new skb, copy the data and
1968 		 * return the buffer to the free pool.
1969 		 */
1970 		ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length,
1971 				      vlan_id);
1972 	} else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
1973 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
1974 		(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
1975 		/* TCP packet in a page chunk that's been checksummed.
1976 		 * Tack it on to our GRO skb and let it go.
1977 		 */
1978 		ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length,
1979 					   vlan_id);
1980 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1981 		/* Non-TCP packet in a page chunk. Allocate an
1982 		 * skb, tack it on frags, and send it up.
1983 		 */
1984 		ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length,
1985 				       vlan_id);
1986 	} else {
1987 		/* Non-TCP/UDP large frames that span multiple buffers
1988 		 * can be processed corrrectly by the split frame logic.
1989 		 */
1990 		ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1991 					     vlan_id);
1992 	}
1993 
1994 	return (unsigned long)length;
1995 }
1996 
1997 /* Process an outbound completion from an rx ring. */
ql_process_mac_tx_intr(struct ql_adapter * qdev,struct ob_mac_iocb_rsp * mac_rsp)1998 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1999 				   struct ob_mac_iocb_rsp *mac_rsp)
2000 {
2001 	struct tx_ring *tx_ring;
2002 	struct tx_ring_desc *tx_ring_desc;
2003 
2004 	QL_DUMP_OB_MAC_RSP(qdev, mac_rsp);
2005 	tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2006 	tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2007 	ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2008 	tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2009 	tx_ring->tx_packets++;
2010 	dev_kfree_skb(tx_ring_desc->skb);
2011 	tx_ring_desc->skb = NULL;
2012 
2013 	if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2014 					OB_MAC_IOCB_RSP_S |
2015 					OB_MAC_IOCB_RSP_L |
2016 					OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2017 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2018 			netif_warn(qdev, tx_done, qdev->ndev,
2019 				   "Total descriptor length did not match transfer length.\n");
2020 		}
2021 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2022 			netif_warn(qdev, tx_done, qdev->ndev,
2023 				   "Frame too short to be valid, not sent.\n");
2024 		}
2025 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2026 			netif_warn(qdev, tx_done, qdev->ndev,
2027 				   "Frame too long, but sent anyway.\n");
2028 		}
2029 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2030 			netif_warn(qdev, tx_done, qdev->ndev,
2031 				   "PCI backplane error. Frame not sent.\n");
2032 		}
2033 	}
2034 	atomic_inc(&tx_ring->tx_count);
2035 }
2036 
2037 /* Fire up a handler to reset the MPI processor. */
ql_queue_fw_error(struct ql_adapter * qdev)2038 void ql_queue_fw_error(struct ql_adapter *qdev)
2039 {
2040 	ql_link_off(qdev);
2041 	queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2042 }
2043 
ql_queue_asic_error(struct ql_adapter * qdev)2044 void ql_queue_asic_error(struct ql_adapter *qdev)
2045 {
2046 	ql_link_off(qdev);
2047 	ql_disable_interrupts(qdev);
2048 	/* Clear adapter up bit to signal the recovery
2049 	 * process that it shouldn't kill the reset worker
2050 	 * thread
2051 	 */
2052 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
2053 	/* Set asic recovery bit to indicate reset process that we are
2054 	 * in fatal error recovery process rather than normal close
2055 	 */
2056 	set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2057 	queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2058 }
2059 
ql_process_chip_ae_intr(struct ql_adapter * qdev,struct ib_ae_iocb_rsp * ib_ae_rsp)2060 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2061 				    struct ib_ae_iocb_rsp *ib_ae_rsp)
2062 {
2063 	switch (ib_ae_rsp->event) {
2064 	case MGMT_ERR_EVENT:
2065 		netif_err(qdev, rx_err, qdev->ndev,
2066 			  "Management Processor Fatal Error.\n");
2067 		ql_queue_fw_error(qdev);
2068 		return;
2069 
2070 	case CAM_LOOKUP_ERR_EVENT:
2071 		netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2072 		netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2073 		ql_queue_asic_error(qdev);
2074 		return;
2075 
2076 	case SOFT_ECC_ERROR_EVENT:
2077 		netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2078 		ql_queue_asic_error(qdev);
2079 		break;
2080 
2081 	case PCI_ERR_ANON_BUF_RD:
2082 		netdev_err(qdev->ndev,
2083 			   "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2084 			   ib_ae_rsp->q_id);
2085 		ql_queue_asic_error(qdev);
2086 		break;
2087 
2088 	default:
2089 		netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2090 			  ib_ae_rsp->event);
2091 		ql_queue_asic_error(qdev);
2092 		break;
2093 	}
2094 }
2095 
ql_clean_outbound_rx_ring(struct rx_ring * rx_ring)2096 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2097 {
2098 	struct ql_adapter *qdev = rx_ring->qdev;
2099 	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2100 	struct ob_mac_iocb_rsp *net_rsp = NULL;
2101 	int count = 0;
2102 
2103 	struct tx_ring *tx_ring;
2104 	/* While there are entries in the completion queue. */
2105 	while (prod != rx_ring->cnsmr_idx) {
2106 
2107 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2108 			     "cq_id = %d, prod = %d, cnsmr = %d\n",
2109 			     rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2110 
2111 		net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2112 		rmb();
2113 		switch (net_rsp->opcode) {
2114 
2115 		case OPCODE_OB_MAC_TSO_IOCB:
2116 		case OPCODE_OB_MAC_IOCB:
2117 			ql_process_mac_tx_intr(qdev, net_rsp);
2118 			break;
2119 		default:
2120 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2121 				     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2122 				     net_rsp->opcode);
2123 		}
2124 		count++;
2125 		ql_update_cq(rx_ring);
2126 		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2127 	}
2128 	if (!net_rsp)
2129 		return 0;
2130 	ql_write_cq_idx(rx_ring);
2131 	tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2132 	if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2133 		if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2134 			/*
2135 			 * The queue got stopped because the tx_ring was full.
2136 			 * Wake it up, because it's now at least 25% empty.
2137 			 */
2138 			netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2139 	}
2140 
2141 	return count;
2142 }
2143 
ql_clean_inbound_rx_ring(struct rx_ring * rx_ring,int budget)2144 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2145 {
2146 	struct ql_adapter *qdev = rx_ring->qdev;
2147 	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2148 	struct ql_net_rsp_iocb *net_rsp;
2149 	int count = 0;
2150 
2151 	/* While there are entries in the completion queue. */
2152 	while (prod != rx_ring->cnsmr_idx) {
2153 
2154 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2155 			     "cq_id = %d, prod = %d, cnsmr = %d\n",
2156 			     rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2157 
2158 		net_rsp = rx_ring->curr_entry;
2159 		rmb();
2160 		switch (net_rsp->opcode) {
2161 		case OPCODE_IB_MAC_IOCB:
2162 			ql_process_mac_rx_intr(qdev, rx_ring,
2163 					       (struct ib_mac_iocb_rsp *)
2164 					       net_rsp);
2165 			break;
2166 
2167 		case OPCODE_IB_AE_IOCB:
2168 			ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2169 						net_rsp);
2170 			break;
2171 		default:
2172 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2173 				     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2174 				     net_rsp->opcode);
2175 			break;
2176 		}
2177 		count++;
2178 		ql_update_cq(rx_ring);
2179 		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2180 		if (count == budget)
2181 			break;
2182 	}
2183 	ql_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
2184 	ql_write_cq_idx(rx_ring);
2185 	return count;
2186 }
2187 
ql_napi_poll_msix(struct napi_struct * napi,int budget)2188 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2189 {
2190 	struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2191 	struct ql_adapter *qdev = rx_ring->qdev;
2192 	struct rx_ring *trx_ring;
2193 	int i, work_done = 0;
2194 	struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2195 
2196 	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2197 		     "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2198 
2199 	/* Service the TX rings first.  They start
2200 	 * right after the RSS rings.
2201 	 */
2202 	for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2203 		trx_ring = &qdev->rx_ring[i];
2204 		/* If this TX completion ring belongs to this vector and
2205 		 * it's not empty then service it.
2206 		 */
2207 		if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2208 		    (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2209 		     trx_ring->cnsmr_idx)) {
2210 			netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2211 				     "%s: Servicing TX completion ring %d.\n",
2212 				     __func__, trx_ring->cq_id);
2213 			ql_clean_outbound_rx_ring(trx_ring);
2214 		}
2215 	}
2216 
2217 	/*
2218 	 * Now service the RSS ring if it's active.
2219 	 */
2220 	if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2221 					rx_ring->cnsmr_idx) {
2222 		netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2223 			     "%s: Servicing RX completion ring %d.\n",
2224 			     __func__, rx_ring->cq_id);
2225 		work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2226 	}
2227 
2228 	if (work_done < budget) {
2229 		napi_complete_done(napi, work_done);
2230 		ql_enable_completion_interrupt(qdev, rx_ring->irq);
2231 	}
2232 	return work_done;
2233 }
2234 
qlge_vlan_mode(struct net_device * ndev,netdev_features_t features)2235 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2236 {
2237 	struct ql_adapter *qdev = netdev_priv(ndev);
2238 
2239 	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2240 		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2241 				 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2242 	} else {
2243 		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2244 	}
2245 }
2246 
2247 /**
2248  * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2249  * based on the features to enable/disable hardware vlan accel
2250  */
qlge_update_hw_vlan_features(struct net_device * ndev,netdev_features_t features)2251 static int qlge_update_hw_vlan_features(struct net_device *ndev,
2252 					netdev_features_t features)
2253 {
2254 	struct ql_adapter *qdev = netdev_priv(ndev);
2255 	int status = 0;
2256 	bool need_restart = netif_running(ndev);
2257 
2258 	if (need_restart) {
2259 		status = ql_adapter_down(qdev);
2260 		if (status) {
2261 			netif_err(qdev, link, qdev->ndev,
2262 				  "Failed to bring down the adapter\n");
2263 			return status;
2264 		}
2265 	}
2266 
2267 	/* update the features with resent change */
2268 	ndev->features = features;
2269 
2270 	if (need_restart) {
2271 		status = ql_adapter_up(qdev);
2272 		if (status) {
2273 			netif_err(qdev, link, qdev->ndev,
2274 				  "Failed to bring up the adapter\n");
2275 			return status;
2276 		}
2277 	}
2278 
2279 	return status;
2280 }
2281 
qlge_set_features(struct net_device * ndev,netdev_features_t features)2282 static int qlge_set_features(struct net_device *ndev,
2283 			     netdev_features_t features)
2284 {
2285 	netdev_features_t changed = ndev->features ^ features;
2286 	int err;
2287 
2288 	if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2289 		/* Update the behavior of vlan accel in the adapter */
2290 		err = qlge_update_hw_vlan_features(ndev, features);
2291 		if (err)
2292 			return err;
2293 
2294 		qlge_vlan_mode(ndev, features);
2295 	}
2296 
2297 	return 0;
2298 }
2299 
__qlge_vlan_rx_add_vid(struct ql_adapter * qdev,u16 vid)2300 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2301 {
2302 	u32 enable_bit = MAC_ADDR_E;
2303 	int err;
2304 
2305 	err = ql_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2306 				  MAC_ADDR_TYPE_VLAN, vid);
2307 	if (err)
2308 		netif_err(qdev, ifup, qdev->ndev,
2309 			  "Failed to init vlan address.\n");
2310 	return err;
2311 }
2312 
qlge_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)2313 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2314 {
2315 	struct ql_adapter *qdev = netdev_priv(ndev);
2316 	int status;
2317 	int err;
2318 
2319 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2320 	if (status)
2321 		return status;
2322 
2323 	err = __qlge_vlan_rx_add_vid(qdev, vid);
2324 	set_bit(vid, qdev->active_vlans);
2325 
2326 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2327 
2328 	return err;
2329 }
2330 
__qlge_vlan_rx_kill_vid(struct ql_adapter * qdev,u16 vid)2331 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2332 {
2333 	u32 enable_bit = 0;
2334 	int err;
2335 
2336 	err = ql_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2337 				  MAC_ADDR_TYPE_VLAN, vid);
2338 	if (err)
2339 		netif_err(qdev, ifup, qdev->ndev,
2340 			  "Failed to clear vlan address.\n");
2341 	return err;
2342 }
2343 
qlge_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)2344 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2345 {
2346 	struct ql_adapter *qdev = netdev_priv(ndev);
2347 	int status;
2348 	int err;
2349 
2350 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2351 	if (status)
2352 		return status;
2353 
2354 	err = __qlge_vlan_rx_kill_vid(qdev, vid);
2355 	clear_bit(vid, qdev->active_vlans);
2356 
2357 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2358 
2359 	return err;
2360 }
2361 
qlge_restore_vlan(struct ql_adapter * qdev)2362 static void qlge_restore_vlan(struct ql_adapter *qdev)
2363 {
2364 	int status;
2365 	u16 vid;
2366 
2367 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2368 	if (status)
2369 		return;
2370 
2371 	for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2372 		__qlge_vlan_rx_add_vid(qdev, vid);
2373 
2374 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2375 }
2376 
2377 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
qlge_msix_rx_isr(int irq,void * dev_id)2378 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2379 {
2380 	struct rx_ring *rx_ring = dev_id;
2381 
2382 	napi_schedule(&rx_ring->napi);
2383 	return IRQ_HANDLED;
2384 }
2385 
2386 /* This handles a fatal error, MPI activity, and the default
2387  * rx_ring in an MSI-X multiple vector environment.
2388  * In MSI/Legacy environment it also process the rest of
2389  * the rx_rings.
2390  */
qlge_isr(int irq,void * dev_id)2391 static irqreturn_t qlge_isr(int irq, void *dev_id)
2392 {
2393 	struct rx_ring *rx_ring = dev_id;
2394 	struct ql_adapter *qdev = rx_ring->qdev;
2395 	struct intr_context *intr_context = &qdev->intr_context[0];
2396 	u32 var;
2397 	int work_done = 0;
2398 
2399 	/* Experience shows that when using INTx interrupts, interrupts must
2400 	 * be masked manually.
2401 	 * When using MSI mode, INTR_EN_EN must be explicitly disabled
2402 	 * (even though it is auto-masked), otherwise a later command to
2403 	 * enable it is not effective.
2404 	 */
2405 	if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2406 		ql_disable_completion_interrupt(qdev, 0);
2407 
2408 	var = ql_read32(qdev, STS);
2409 
2410 	/*
2411 	 * Check for fatal error.
2412 	 */
2413 	if (var & STS_FE) {
2414 		ql_disable_completion_interrupt(qdev, 0);
2415 		ql_queue_asic_error(qdev);
2416 		netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2417 		var = ql_read32(qdev, ERR_STS);
2418 		netdev_err(qdev->ndev, "Resetting chip. Error Status Register = 0x%x\n", var);
2419 		return IRQ_HANDLED;
2420 	}
2421 
2422 	/*
2423 	 * Check MPI processor activity.
2424 	 */
2425 	if ((var & STS_PI) &&
2426 	    (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2427 		/*
2428 		 * We've got an async event or mailbox completion.
2429 		 * Handle it and clear the source of the interrupt.
2430 		 */
2431 		netif_err(qdev, intr, qdev->ndev,
2432 			  "Got MPI processor interrupt.\n");
2433 		ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2434 		queue_delayed_work_on(smp_processor_id(),
2435 				      qdev->workqueue, &qdev->mpi_work, 0);
2436 		work_done++;
2437 	}
2438 
2439 	/*
2440 	 * Get the bit-mask that shows the active queues for this
2441 	 * pass.  Compare it to the queues that this irq services
2442 	 * and call napi if there's a match.
2443 	 */
2444 	var = ql_read32(qdev, ISR1);
2445 	if (var & intr_context->irq_mask) {
2446 		netif_info(qdev, intr, qdev->ndev,
2447 			   "Waking handler for rx_ring[0].\n");
2448 		napi_schedule(&rx_ring->napi);
2449 		work_done++;
2450 	} else {
2451 		/* Experience shows that the device sometimes signals an
2452 		 * interrupt but no work is scheduled from this function.
2453 		 * Nevertheless, the interrupt is auto-masked. Therefore, we
2454 		 * systematically re-enable the interrupt if we didn't
2455 		 * schedule napi.
2456 		 */
2457 		ql_enable_completion_interrupt(qdev, 0);
2458 	}
2459 
2460 	return work_done ? IRQ_HANDLED : IRQ_NONE;
2461 }
2462 
ql_tso(struct sk_buff * skb,struct ob_mac_tso_iocb_req * mac_iocb_ptr)2463 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2464 {
2465 
2466 	if (skb_is_gso(skb)) {
2467 		int err;
2468 		__be16 l3_proto = vlan_get_protocol(skb);
2469 
2470 		err = skb_cow_head(skb, 0);
2471 		if (err < 0)
2472 			return err;
2473 
2474 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2475 		mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2476 		mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2477 		mac_iocb_ptr->total_hdrs_len =
2478 		    cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2479 		mac_iocb_ptr->net_trans_offset =
2480 		    cpu_to_le16(skb_network_offset(skb) |
2481 				skb_transport_offset(skb)
2482 				<< OB_MAC_TRANSPORT_HDR_SHIFT);
2483 		mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2484 		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2485 		if (likely(l3_proto == htons(ETH_P_IP))) {
2486 			struct iphdr *iph = ip_hdr(skb);
2487 
2488 			iph->check = 0;
2489 			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2490 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2491 								 iph->daddr, 0,
2492 								 IPPROTO_TCP,
2493 								 0);
2494 		} else if (l3_proto == htons(ETH_P_IPV6)) {
2495 			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2496 			tcp_hdr(skb)->check =
2497 			    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2498 					     &ipv6_hdr(skb)->daddr,
2499 					     0, IPPROTO_TCP, 0);
2500 		}
2501 		return 1;
2502 	}
2503 	return 0;
2504 }
2505 
ql_hw_csum_setup(struct sk_buff * skb,struct ob_mac_tso_iocb_req * mac_iocb_ptr)2506 static void ql_hw_csum_setup(struct sk_buff *skb,
2507 			     struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2508 {
2509 	int len;
2510 	struct iphdr *iph = ip_hdr(skb);
2511 	__sum16 *check;
2512 
2513 	mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2514 	mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2515 	mac_iocb_ptr->net_trans_offset =
2516 		cpu_to_le16(skb_network_offset(skb) |
2517 		skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2518 
2519 	mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2520 	len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2521 	if (likely(iph->protocol == IPPROTO_TCP)) {
2522 		check = &(tcp_hdr(skb)->check);
2523 		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2524 		mac_iocb_ptr->total_hdrs_len =
2525 		    cpu_to_le16(skb_transport_offset(skb) +
2526 				(tcp_hdr(skb)->doff << 2));
2527 	} else {
2528 		check = &(udp_hdr(skb)->check);
2529 		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2530 		mac_iocb_ptr->total_hdrs_len =
2531 		    cpu_to_le16(skb_transport_offset(skb) +
2532 				sizeof(struct udphdr));
2533 	}
2534 	*check = ~csum_tcpudp_magic(iph->saddr,
2535 				    iph->daddr, len, iph->protocol, 0);
2536 }
2537 
qlge_send(struct sk_buff * skb,struct net_device * ndev)2538 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2539 {
2540 	struct tx_ring_desc *tx_ring_desc;
2541 	struct ob_mac_iocb_req *mac_iocb_ptr;
2542 	struct ql_adapter *qdev = netdev_priv(ndev);
2543 	int tso;
2544 	struct tx_ring *tx_ring;
2545 	u32 tx_ring_idx = (u32)skb->queue_mapping;
2546 
2547 	tx_ring = &qdev->tx_ring[tx_ring_idx];
2548 
2549 	if (skb_padto(skb, ETH_ZLEN))
2550 		return NETDEV_TX_OK;
2551 
2552 	if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2553 		netif_info(qdev, tx_queued, qdev->ndev,
2554 			   "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2555 			   __func__, tx_ring_idx);
2556 		netif_stop_subqueue(ndev, tx_ring->wq_id);
2557 		tx_ring->tx_errors++;
2558 		return NETDEV_TX_BUSY;
2559 	}
2560 	tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2561 	mac_iocb_ptr = tx_ring_desc->queue_entry;
2562 	memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2563 
2564 	mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2565 	mac_iocb_ptr->tid = tx_ring_desc->index;
2566 	/* We use the upper 32-bits to store the tx queue for this IO.
2567 	 * When we get the completion we can use it to establish the context.
2568 	 */
2569 	mac_iocb_ptr->txq_idx = tx_ring_idx;
2570 	tx_ring_desc->skb = skb;
2571 
2572 	mac_iocb_ptr->frame_len = cpu_to_le16((u16)skb->len);
2573 
2574 	if (skb_vlan_tag_present(skb)) {
2575 		netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2576 			     "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2577 		mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2578 		mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2579 	}
2580 	tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2581 	if (tso < 0) {
2582 		dev_kfree_skb_any(skb);
2583 		return NETDEV_TX_OK;
2584 	} else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2585 		ql_hw_csum_setup(skb,
2586 				 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2587 	}
2588 	if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2589 			NETDEV_TX_OK) {
2590 		netif_err(qdev, tx_queued, qdev->ndev,
2591 			  "Could not map the segments.\n");
2592 		tx_ring->tx_errors++;
2593 		return NETDEV_TX_BUSY;
2594 	}
2595 	QL_DUMP_OB_MAC_IOCB(qdev, mac_iocb_ptr);
2596 	tx_ring->prod_idx++;
2597 	if (tx_ring->prod_idx == tx_ring->wq_len)
2598 		tx_ring->prod_idx = 0;
2599 	wmb();
2600 
2601 	ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2602 	netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2603 		     "tx queued, slot %d, len %d\n",
2604 		     tx_ring->prod_idx, skb->len);
2605 
2606 	atomic_dec(&tx_ring->tx_count);
2607 
2608 	if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2609 		netif_stop_subqueue(ndev, tx_ring->wq_id);
2610 		if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2611 			/*
2612 			 * The queue got stopped because the tx_ring was full.
2613 			 * Wake it up, because it's now at least 25% empty.
2614 			 */
2615 			netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2616 	}
2617 	return NETDEV_TX_OK;
2618 }
2619 
ql_free_shadow_space(struct ql_adapter * qdev)2620 static void ql_free_shadow_space(struct ql_adapter *qdev)
2621 {
2622 	if (qdev->rx_ring_shadow_reg_area) {
2623 		dma_free_coherent(&qdev->pdev->dev,
2624 				  PAGE_SIZE,
2625 				  qdev->rx_ring_shadow_reg_area,
2626 				  qdev->rx_ring_shadow_reg_dma);
2627 		qdev->rx_ring_shadow_reg_area = NULL;
2628 	}
2629 	if (qdev->tx_ring_shadow_reg_area) {
2630 		dma_free_coherent(&qdev->pdev->dev,
2631 				  PAGE_SIZE,
2632 				  qdev->tx_ring_shadow_reg_area,
2633 				  qdev->tx_ring_shadow_reg_dma);
2634 		qdev->tx_ring_shadow_reg_area = NULL;
2635 	}
2636 }
2637 
ql_alloc_shadow_space(struct ql_adapter * qdev)2638 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2639 {
2640 	qdev->rx_ring_shadow_reg_area =
2641 		dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2642 				   &qdev->rx_ring_shadow_reg_dma, GFP_ATOMIC);
2643 	if (!qdev->rx_ring_shadow_reg_area) {
2644 		netif_err(qdev, ifup, qdev->ndev,
2645 			  "Allocation of RX shadow space failed.\n");
2646 		return -ENOMEM;
2647 	}
2648 
2649 	qdev->tx_ring_shadow_reg_area =
2650 		dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2651 				   &qdev->tx_ring_shadow_reg_dma, GFP_ATOMIC);
2652 	if (!qdev->tx_ring_shadow_reg_area) {
2653 		netif_err(qdev, ifup, qdev->ndev,
2654 			  "Allocation of TX shadow space failed.\n");
2655 		goto err_wqp_sh_area;
2656 	}
2657 	return 0;
2658 
2659 err_wqp_sh_area:
2660 	dma_free_coherent(&qdev->pdev->dev,
2661 			  PAGE_SIZE,
2662 			  qdev->rx_ring_shadow_reg_area,
2663 			  qdev->rx_ring_shadow_reg_dma);
2664 	return -ENOMEM;
2665 }
2666 
ql_init_tx_ring(struct ql_adapter * qdev,struct tx_ring * tx_ring)2667 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2668 {
2669 	struct tx_ring_desc *tx_ring_desc;
2670 	int i;
2671 	struct ob_mac_iocb_req *mac_iocb_ptr;
2672 
2673 	mac_iocb_ptr = tx_ring->wq_base;
2674 	tx_ring_desc = tx_ring->q;
2675 	for (i = 0; i < tx_ring->wq_len; i++) {
2676 		tx_ring_desc->index = i;
2677 		tx_ring_desc->skb = NULL;
2678 		tx_ring_desc->queue_entry = mac_iocb_ptr;
2679 		mac_iocb_ptr++;
2680 		tx_ring_desc++;
2681 	}
2682 	atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2683 }
2684 
ql_free_tx_resources(struct ql_adapter * qdev,struct tx_ring * tx_ring)2685 static void ql_free_tx_resources(struct ql_adapter *qdev,
2686 				 struct tx_ring *tx_ring)
2687 {
2688 	if (tx_ring->wq_base) {
2689 		dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2690 				  tx_ring->wq_base, tx_ring->wq_base_dma);
2691 		tx_ring->wq_base = NULL;
2692 	}
2693 	kfree(tx_ring->q);
2694 	tx_ring->q = NULL;
2695 }
2696 
ql_alloc_tx_resources(struct ql_adapter * qdev,struct tx_ring * tx_ring)2697 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2698 				 struct tx_ring *tx_ring)
2699 {
2700 	tx_ring->wq_base =
2701 	    dma_alloc_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2702 			       &tx_ring->wq_base_dma, GFP_ATOMIC);
2703 
2704 	if (!tx_ring->wq_base ||
2705 	    tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2706 		goto pci_alloc_err;
2707 
2708 	tx_ring->q =
2709 	    kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
2710 			  GFP_KERNEL);
2711 	if (!tx_ring->q)
2712 		goto err;
2713 
2714 	return 0;
2715 err:
2716 	dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2717 			  tx_ring->wq_base, tx_ring->wq_base_dma);
2718 	tx_ring->wq_base = NULL;
2719 pci_alloc_err:
2720 	netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2721 	return -ENOMEM;
2722 }
2723 
ql_free_lbq_buffers(struct ql_adapter * qdev,struct rx_ring * rx_ring)2724 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2725 {
2726 	struct qlge_bq *lbq = &rx_ring->lbq;
2727 	unsigned int last_offset;
2728 
2729 	last_offset = ql_lbq_block_size(qdev) - qdev->lbq_buf_size;
2730 	while (lbq->next_to_clean != lbq->next_to_use) {
2731 		struct qlge_bq_desc *lbq_desc =
2732 			&lbq->queue[lbq->next_to_clean];
2733 
2734 		if (lbq_desc->p.pg_chunk.offset == last_offset)
2735 			dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
2736 				       ql_lbq_block_size(qdev),
2737 				       DMA_FROM_DEVICE);
2738 		put_page(lbq_desc->p.pg_chunk.page);
2739 
2740 		lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
2741 	}
2742 
2743 	if (rx_ring->master_chunk.page) {
2744 		dma_unmap_page(&qdev->pdev->dev, rx_ring->chunk_dma_addr,
2745 			       ql_lbq_block_size(qdev), DMA_FROM_DEVICE);
2746 		put_page(rx_ring->master_chunk.page);
2747 		rx_ring->master_chunk.page = NULL;
2748 	}
2749 }
2750 
ql_free_sbq_buffers(struct ql_adapter * qdev,struct rx_ring * rx_ring)2751 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2752 {
2753 	int i;
2754 
2755 	for (i = 0; i < QLGE_BQ_LEN; i++) {
2756 		struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i];
2757 
2758 		if (!sbq_desc) {
2759 			netif_err(qdev, ifup, qdev->ndev,
2760 				  "sbq_desc %d is NULL.\n", i);
2761 			return;
2762 		}
2763 		if (sbq_desc->p.skb) {
2764 			dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
2765 					 SMALL_BUF_MAP_SIZE,
2766 					 DMA_FROM_DEVICE);
2767 			dev_kfree_skb(sbq_desc->p.skb);
2768 			sbq_desc->p.skb = NULL;
2769 		}
2770 	}
2771 }
2772 
2773 /* Free all large and small rx buffers associated
2774  * with the completion queues for this device.
2775  */
ql_free_rx_buffers(struct ql_adapter * qdev)2776 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2777 {
2778 	int i;
2779 
2780 	for (i = 0; i < qdev->rx_ring_count; i++) {
2781 		struct rx_ring *rx_ring = &qdev->rx_ring[i];
2782 
2783 		if (rx_ring->lbq.queue)
2784 			ql_free_lbq_buffers(qdev, rx_ring);
2785 		if (rx_ring->sbq.queue)
2786 			ql_free_sbq_buffers(qdev, rx_ring);
2787 	}
2788 }
2789 
ql_alloc_rx_buffers(struct ql_adapter * qdev)2790 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2791 {
2792 	int i;
2793 
2794 	for (i = 0; i < qdev->rss_ring_count; i++)
2795 		ql_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
2796 					HZ / 2);
2797 }
2798 
qlge_init_bq(struct qlge_bq * bq)2799 static int qlge_init_bq(struct qlge_bq *bq)
2800 {
2801 	struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
2802 	struct ql_adapter *qdev = rx_ring->qdev;
2803 	struct qlge_bq_desc *bq_desc;
2804 	__le64 *buf_ptr;
2805 	int i;
2806 
2807 	bq->base = dma_alloc_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2808 				      &bq->base_dma, GFP_ATOMIC);
2809 	if (!bq->base) {
2810 		netif_err(qdev, ifup, qdev->ndev,
2811 			  "ring %u %s allocation failed.\n", rx_ring->cq_id,
2812 			  bq_type_name[bq->type]);
2813 		return -ENOMEM;
2814 	}
2815 
2816 	bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
2817 				  GFP_KERNEL);
2818 	if (!bq->queue)
2819 		return -ENOMEM;
2820 
2821 	buf_ptr = bq->base;
2822 	bq_desc = &bq->queue[0];
2823 	for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) {
2824 		bq_desc->p.skb = NULL;
2825 		bq_desc->index = i;
2826 		bq_desc->buf_ptr = buf_ptr;
2827 	}
2828 
2829 	return 0;
2830 }
2831 
ql_free_rx_resources(struct ql_adapter * qdev,struct rx_ring * rx_ring)2832 static void ql_free_rx_resources(struct ql_adapter *qdev,
2833 				 struct rx_ring *rx_ring)
2834 {
2835 	/* Free the small buffer queue. */
2836 	if (rx_ring->sbq.base) {
2837 		dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2838 				  rx_ring->sbq.base, rx_ring->sbq.base_dma);
2839 		rx_ring->sbq.base = NULL;
2840 	}
2841 
2842 	/* Free the small buffer queue control blocks. */
2843 	kfree(rx_ring->sbq.queue);
2844 	rx_ring->sbq.queue = NULL;
2845 
2846 	/* Free the large buffer queue. */
2847 	if (rx_ring->lbq.base) {
2848 		dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2849 				  rx_ring->lbq.base, rx_ring->lbq.base_dma);
2850 		rx_ring->lbq.base = NULL;
2851 	}
2852 
2853 	/* Free the large buffer queue control blocks. */
2854 	kfree(rx_ring->lbq.queue);
2855 	rx_ring->lbq.queue = NULL;
2856 
2857 	/* Free the rx queue. */
2858 	if (rx_ring->cq_base) {
2859 		dma_free_coherent(&qdev->pdev->dev,
2860 				  rx_ring->cq_size,
2861 				  rx_ring->cq_base, rx_ring->cq_base_dma);
2862 		rx_ring->cq_base = NULL;
2863 	}
2864 }
2865 
2866 /* Allocate queues and buffers for this completions queue based
2867  * on the values in the parameter structure.
2868  */
ql_alloc_rx_resources(struct ql_adapter * qdev,struct rx_ring * rx_ring)2869 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2870 				 struct rx_ring *rx_ring)
2871 {
2872 
2873 	/*
2874 	 * Allocate the completion queue for this rx_ring.
2875 	 */
2876 	rx_ring->cq_base =
2877 	    dma_alloc_coherent(&qdev->pdev->dev, rx_ring->cq_size,
2878 			       &rx_ring->cq_base_dma, GFP_ATOMIC);
2879 
2880 	if (!rx_ring->cq_base) {
2881 		netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2882 		return -ENOMEM;
2883 	}
2884 
2885 	if (rx_ring->cq_id < qdev->rss_ring_count &&
2886 	    (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
2887 		ql_free_rx_resources(qdev, rx_ring);
2888 		return -ENOMEM;
2889 	}
2890 
2891 	return 0;
2892 }
2893 
ql_tx_ring_clean(struct ql_adapter * qdev)2894 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2895 {
2896 	struct tx_ring *tx_ring;
2897 	struct tx_ring_desc *tx_ring_desc;
2898 	int i, j;
2899 
2900 	/*
2901 	 * Loop through all queues and free
2902 	 * any resources.
2903 	 */
2904 	for (j = 0; j < qdev->tx_ring_count; j++) {
2905 		tx_ring = &qdev->tx_ring[j];
2906 		for (i = 0; i < tx_ring->wq_len; i++) {
2907 			tx_ring_desc = &tx_ring->q[i];
2908 			if (tx_ring_desc && tx_ring_desc->skb) {
2909 				netif_err(qdev, ifdown, qdev->ndev,
2910 					  "Freeing lost SKB %p, from queue %d, index %d.\n",
2911 					  tx_ring_desc->skb, j,
2912 					  tx_ring_desc->index);
2913 				ql_unmap_send(qdev, tx_ring_desc,
2914 					      tx_ring_desc->map_cnt);
2915 				dev_kfree_skb(tx_ring_desc->skb);
2916 				tx_ring_desc->skb = NULL;
2917 			}
2918 		}
2919 	}
2920 }
2921 
ql_free_mem_resources(struct ql_adapter * qdev)2922 static void ql_free_mem_resources(struct ql_adapter *qdev)
2923 {
2924 	int i;
2925 
2926 	for (i = 0; i < qdev->tx_ring_count; i++)
2927 		ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2928 	for (i = 0; i < qdev->rx_ring_count; i++)
2929 		ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2930 	ql_free_shadow_space(qdev);
2931 }
2932 
ql_alloc_mem_resources(struct ql_adapter * qdev)2933 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2934 {
2935 	int i;
2936 
2937 	/* Allocate space for our shadow registers and such. */
2938 	if (ql_alloc_shadow_space(qdev))
2939 		return -ENOMEM;
2940 
2941 	for (i = 0; i < qdev->rx_ring_count; i++) {
2942 		if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2943 			netif_err(qdev, ifup, qdev->ndev,
2944 				  "RX resource allocation failed.\n");
2945 			goto err_mem;
2946 		}
2947 	}
2948 	/* Allocate tx queue resources */
2949 	for (i = 0; i < qdev->tx_ring_count; i++) {
2950 		if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2951 			netif_err(qdev, ifup, qdev->ndev,
2952 				  "TX resource allocation failed.\n");
2953 			goto err_mem;
2954 		}
2955 	}
2956 	return 0;
2957 
2958 err_mem:
2959 	ql_free_mem_resources(qdev);
2960 	return -ENOMEM;
2961 }
2962 
2963 /* Set up the rx ring control block and pass it to the chip.
2964  * The control block is defined as
2965  * "Completion Queue Initialization Control Block", or cqicb.
2966  */
ql_start_rx_ring(struct ql_adapter * qdev,struct rx_ring * rx_ring)2967 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2968 {
2969 	struct cqicb *cqicb = &rx_ring->cqicb;
2970 	void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2971 		(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2972 	u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2973 		(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2974 	void __iomem *doorbell_area =
2975 	    qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2976 	int err = 0;
2977 	u64 tmp;
2978 	__le64 *base_indirect_ptr;
2979 	int page_entries;
2980 
2981 	/* Set up the shadow registers for this ring. */
2982 	rx_ring->prod_idx_sh_reg = shadow_reg;
2983 	rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2984 	*rx_ring->prod_idx_sh_reg = 0;
2985 	shadow_reg += sizeof(u64);
2986 	shadow_reg_dma += sizeof(u64);
2987 	rx_ring->lbq.base_indirect = shadow_reg;
2988 	rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
2989 	shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2990 	shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2991 	rx_ring->sbq.base_indirect = shadow_reg;
2992 	rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
2993 
2994 	/* PCI doorbell mem area + 0x00 for consumer index register */
2995 	rx_ring->cnsmr_idx_db_reg = (u32 __iomem *)doorbell_area;
2996 	rx_ring->cnsmr_idx = 0;
2997 	rx_ring->curr_entry = rx_ring->cq_base;
2998 
2999 	/* PCI doorbell mem area + 0x04 for valid register */
3000 	rx_ring->valid_db_reg = doorbell_area + 0x04;
3001 
3002 	/* PCI doorbell mem area + 0x18 for large buffer consumer */
3003 	rx_ring->lbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x18);
3004 
3005 	/* PCI doorbell mem area + 0x1c */
3006 	rx_ring->sbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x1c);
3007 
3008 	memset((void *)cqicb, 0, sizeof(struct cqicb));
3009 	cqicb->msix_vect = rx_ring->irq;
3010 
3011 	cqicb->len = cpu_to_le16(QLGE_FIT16(rx_ring->cq_len) | LEN_V |
3012 				 LEN_CPP_CONT);
3013 
3014 	cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3015 
3016 	cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3017 
3018 	/*
3019 	 * Set up the control block load flags.
3020 	 */
3021 	cqicb->flags = FLAGS_LC |	/* Load queue base address */
3022 	    FLAGS_LV |		/* Load MSI-X vector */
3023 	    FLAGS_LI;		/* Load irq delay values */
3024 	if (rx_ring->cq_id < qdev->rss_ring_count) {
3025 		cqicb->flags |= FLAGS_LL;	/* Load lbq values */
3026 		tmp = (u64)rx_ring->lbq.base_dma;
3027 		base_indirect_ptr = rx_ring->lbq.base_indirect;
3028 		page_entries = 0;
3029 		do {
3030 			*base_indirect_ptr = cpu_to_le64(tmp);
3031 			tmp += DB_PAGE_SIZE;
3032 			base_indirect_ptr++;
3033 			page_entries++;
3034 		} while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3035 		cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
3036 		cqicb->lbq_buf_size =
3037 			cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
3038 		cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3039 		rx_ring->lbq.next_to_use = 0;
3040 		rx_ring->lbq.next_to_clean = 0;
3041 
3042 		cqicb->flags |= FLAGS_LS;	/* Load sbq values */
3043 		tmp = (u64)rx_ring->sbq.base_dma;
3044 		base_indirect_ptr = rx_ring->sbq.base_indirect;
3045 		page_entries = 0;
3046 		do {
3047 			*base_indirect_ptr = cpu_to_le64(tmp);
3048 			tmp += DB_PAGE_SIZE;
3049 			base_indirect_ptr++;
3050 			page_entries++;
3051 		} while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3052 		cqicb->sbq_addr =
3053 		    cpu_to_le64(rx_ring->sbq.base_indirect_dma);
3054 		cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
3055 		cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3056 		rx_ring->sbq.next_to_use = 0;
3057 		rx_ring->sbq.next_to_clean = 0;
3058 	}
3059 	if (rx_ring->cq_id < qdev->rss_ring_count) {
3060 		/* Inbound completion handling rx_rings run in
3061 		 * separate NAPI contexts.
3062 		 */
3063 		netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3064 			       64);
3065 		cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3066 		cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3067 	} else {
3068 		cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3069 		cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3070 	}
3071 	err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3072 			   CFG_LCQ, rx_ring->cq_id);
3073 	if (err) {
3074 		netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3075 		return err;
3076 	}
3077 	return err;
3078 }
3079 
ql_start_tx_ring(struct ql_adapter * qdev,struct tx_ring * tx_ring)3080 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3081 {
3082 	struct wqicb *wqicb = (struct wqicb *)tx_ring;
3083 	void __iomem *doorbell_area =
3084 	    qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3085 	void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3086 	    (tx_ring->wq_id * sizeof(u64));
3087 	u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3088 	    (tx_ring->wq_id * sizeof(u64));
3089 	int err = 0;
3090 
3091 	/*
3092 	 * Assign doorbell registers for this tx_ring.
3093 	 */
3094 	/* TX PCI doorbell mem area for tx producer index */
3095 	tx_ring->prod_idx_db_reg = (u32 __iomem *)doorbell_area;
3096 	tx_ring->prod_idx = 0;
3097 	/* TX PCI doorbell mem area + 0x04 */
3098 	tx_ring->valid_db_reg = doorbell_area + 0x04;
3099 
3100 	/*
3101 	 * Assign shadow registers for this tx_ring.
3102 	 */
3103 	tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3104 	tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3105 
3106 	wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3107 	wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3108 				   Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3109 	wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3110 	wqicb->rid = 0;
3111 	wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3112 
3113 	wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3114 
3115 	ql_init_tx_ring(qdev, tx_ring);
3116 
3117 	err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3118 			   (u16)tx_ring->wq_id);
3119 	if (err) {
3120 		netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3121 		return err;
3122 	}
3123 	return err;
3124 }
3125 
ql_disable_msix(struct ql_adapter * qdev)3126 static void ql_disable_msix(struct ql_adapter *qdev)
3127 {
3128 	if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3129 		pci_disable_msix(qdev->pdev);
3130 		clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3131 		kfree(qdev->msi_x_entry);
3132 		qdev->msi_x_entry = NULL;
3133 	} else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3134 		pci_disable_msi(qdev->pdev);
3135 		clear_bit(QL_MSI_ENABLED, &qdev->flags);
3136 	}
3137 }
3138 
3139 /* We start by trying to get the number of vectors
3140  * stored in qdev->intr_count. If we don't get that
3141  * many then we reduce the count and try again.
3142  */
ql_enable_msix(struct ql_adapter * qdev)3143 static void ql_enable_msix(struct ql_adapter *qdev)
3144 {
3145 	int i, err;
3146 
3147 	/* Get the MSIX vectors. */
3148 	if (qlge_irq_type == MSIX_IRQ) {
3149 		/* Try to alloc space for the msix struct,
3150 		 * if it fails then go to MSI/legacy.
3151 		 */
3152 		qdev->msi_x_entry = kcalloc(qdev->intr_count,
3153 					    sizeof(struct msix_entry),
3154 					    GFP_KERNEL);
3155 		if (!qdev->msi_x_entry) {
3156 			qlge_irq_type = MSI_IRQ;
3157 			goto msi;
3158 		}
3159 
3160 		for (i = 0; i < qdev->intr_count; i++)
3161 			qdev->msi_x_entry[i].entry = i;
3162 
3163 		err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3164 					    1, qdev->intr_count);
3165 		if (err < 0) {
3166 			kfree(qdev->msi_x_entry);
3167 			qdev->msi_x_entry = NULL;
3168 			netif_warn(qdev, ifup, qdev->ndev,
3169 				   "MSI-X Enable failed, trying MSI.\n");
3170 			qlge_irq_type = MSI_IRQ;
3171 		} else {
3172 			qdev->intr_count = err;
3173 			set_bit(QL_MSIX_ENABLED, &qdev->flags);
3174 			netif_info(qdev, ifup, qdev->ndev,
3175 				   "MSI-X Enabled, got %d vectors.\n",
3176 				   qdev->intr_count);
3177 			return;
3178 		}
3179 	}
3180 msi:
3181 	qdev->intr_count = 1;
3182 	if (qlge_irq_type == MSI_IRQ) {
3183 		if (pci_alloc_irq_vectors(qdev->pdev, 1, 1, PCI_IRQ_MSI) >= 0) {
3184 			set_bit(QL_MSI_ENABLED, &qdev->flags);
3185 			netif_info(qdev, ifup, qdev->ndev,
3186 				   "Running with MSI interrupts.\n");
3187 			return;
3188 		}
3189 	}
3190 	qlge_irq_type = LEG_IRQ;
3191 	set_bit(QL_LEGACY_ENABLED, &qdev->flags);
3192 	netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3193 		     "Running with legacy interrupts.\n");
3194 }
3195 
3196 /* Each vector services 1 RSS ring and and 1 or more
3197  * TX completion rings.  This function loops through
3198  * the TX completion rings and assigns the vector that
3199  * will service it.  An example would be if there are
3200  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3201  * This would mean that vector 0 would service RSS ring 0
3202  * and TX completion rings 0,1,2 and 3.  Vector 1 would
3203  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3204  */
ql_set_tx_vect(struct ql_adapter * qdev)3205 static void ql_set_tx_vect(struct ql_adapter *qdev)
3206 {
3207 	int i, j, vect;
3208 	u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3209 
3210 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3211 		/* Assign irq vectors to TX rx_rings.*/
3212 		for (vect = 0, j = 0, i = qdev->rss_ring_count;
3213 					 i < qdev->rx_ring_count; i++) {
3214 			if (j == tx_rings_per_vector) {
3215 				vect++;
3216 				j = 0;
3217 			}
3218 			qdev->rx_ring[i].irq = vect;
3219 			j++;
3220 		}
3221 	} else {
3222 		/* For single vector all rings have an irq
3223 		 * of zero.
3224 		 */
3225 		for (i = 0; i < qdev->rx_ring_count; i++)
3226 			qdev->rx_ring[i].irq = 0;
3227 	}
3228 }
3229 
3230 /* Set the interrupt mask for this vector.  Each vector
3231  * will service 1 RSS ring and 1 or more TX completion
3232  * rings.  This function sets up a bit mask per vector
3233  * that indicates which rings it services.
3234  */
ql_set_irq_mask(struct ql_adapter * qdev,struct intr_context * ctx)3235 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3236 {
3237 	int j, vect = ctx->intr;
3238 	u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3239 
3240 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3241 		/* Add the RSS ring serviced by this vector
3242 		 * to the mask.
3243 		 */
3244 		ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3245 		/* Add the TX ring(s) serviced by this vector
3246 		 * to the mask.
3247 		 */
3248 		for (j = 0; j < tx_rings_per_vector; j++) {
3249 			ctx->irq_mask |=
3250 			(1 << qdev->rx_ring[qdev->rss_ring_count +
3251 			(vect * tx_rings_per_vector) + j].cq_id);
3252 		}
3253 	} else {
3254 		/* For single vector we just shift each queue's
3255 		 * ID into the mask.
3256 		 */
3257 		for (j = 0; j < qdev->rx_ring_count; j++)
3258 			ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3259 	}
3260 }
3261 
3262 /*
3263  * Here we build the intr_context structures based on
3264  * our rx_ring count and intr vector count.
3265  * The intr_context structure is used to hook each vector
3266  * to possibly different handlers.
3267  */
ql_resolve_queues_to_irqs(struct ql_adapter * qdev)3268 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3269 {
3270 	int i = 0;
3271 	struct intr_context *intr_context = &qdev->intr_context[0];
3272 
3273 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3274 		/* Each rx_ring has it's
3275 		 * own intr_context since we have separate
3276 		 * vectors for each queue.
3277 		 */
3278 		for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3279 			qdev->rx_ring[i].irq = i;
3280 			intr_context->intr = i;
3281 			intr_context->qdev = qdev;
3282 			/* Set up this vector's bit-mask that indicates
3283 			 * which queues it services.
3284 			 */
3285 			ql_set_irq_mask(qdev, intr_context);
3286 			/*
3287 			 * We set up each vectors enable/disable/read bits so
3288 			 * there's no bit/mask calculations in the critical path.
3289 			 */
3290 			intr_context->intr_en_mask =
3291 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3292 			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3293 			    | i;
3294 			intr_context->intr_dis_mask =
3295 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3296 			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3297 			    INTR_EN_IHD | i;
3298 			intr_context->intr_read_mask =
3299 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3300 			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3301 			    i;
3302 			if (i == 0) {
3303 				/* The first vector/queue handles
3304 				 * broadcast/multicast, fatal errors,
3305 				 * and firmware events.  This in addition
3306 				 * to normal inbound NAPI processing.
3307 				 */
3308 				intr_context->handler = qlge_isr;
3309 				sprintf(intr_context->name, "%s-rx-%d",
3310 					qdev->ndev->name, i);
3311 			} else {
3312 				/*
3313 				 * Inbound queues handle unicast frames only.
3314 				 */
3315 				intr_context->handler = qlge_msix_rx_isr;
3316 				sprintf(intr_context->name, "%s-rx-%d",
3317 					qdev->ndev->name, i);
3318 			}
3319 		}
3320 	} else {
3321 		/*
3322 		 * All rx_rings use the same intr_context since
3323 		 * there is only one vector.
3324 		 */
3325 		intr_context->intr = 0;
3326 		intr_context->qdev = qdev;
3327 		/*
3328 		 * We set up each vectors enable/disable/read bits so
3329 		 * there's no bit/mask calculations in the critical path.
3330 		 */
3331 		intr_context->intr_en_mask =
3332 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3333 		intr_context->intr_dis_mask =
3334 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3335 		    INTR_EN_TYPE_DISABLE;
3336 		if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) {
3337 			/* Experience shows that when using INTx interrupts,
3338 			 * the device does not always auto-mask INTR_EN_EN.
3339 			 * Moreover, masking INTR_EN_EN manually does not
3340 			 * immediately prevent interrupt generation.
3341 			 */
3342 			intr_context->intr_en_mask |= INTR_EN_EI << 16 |
3343 				INTR_EN_EI;
3344 			intr_context->intr_dis_mask |= INTR_EN_EI << 16;
3345 		}
3346 		intr_context->intr_read_mask =
3347 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3348 		/*
3349 		 * Single interrupt means one handler for all rings.
3350 		 */
3351 		intr_context->handler = qlge_isr;
3352 		sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3353 		/* Set up this vector's bit-mask that indicates
3354 		 * which queues it services. In this case there is
3355 		 * a single vector so it will service all RSS and
3356 		 * TX completion rings.
3357 		 */
3358 		ql_set_irq_mask(qdev, intr_context);
3359 	}
3360 	/* Tell the TX completion rings which MSIx vector
3361 	 * they will be using.
3362 	 */
3363 	ql_set_tx_vect(qdev);
3364 }
3365 
ql_free_irq(struct ql_adapter * qdev)3366 static void ql_free_irq(struct ql_adapter *qdev)
3367 {
3368 	int i;
3369 	struct intr_context *intr_context = &qdev->intr_context[0];
3370 
3371 	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3372 		if (intr_context->hooked) {
3373 			if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3374 				free_irq(qdev->msi_x_entry[i].vector,
3375 					 &qdev->rx_ring[i]);
3376 			} else {
3377 				free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3378 			}
3379 		}
3380 	}
3381 	ql_disable_msix(qdev);
3382 }
3383 
ql_request_irq(struct ql_adapter * qdev)3384 static int ql_request_irq(struct ql_adapter *qdev)
3385 {
3386 	int i;
3387 	int status = 0;
3388 	struct pci_dev *pdev = qdev->pdev;
3389 	struct intr_context *intr_context = &qdev->intr_context[0];
3390 
3391 	ql_resolve_queues_to_irqs(qdev);
3392 
3393 	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3394 		if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3395 			status = request_irq(qdev->msi_x_entry[i].vector,
3396 					     intr_context->handler,
3397 					     0,
3398 					     intr_context->name,
3399 					     &qdev->rx_ring[i]);
3400 			if (status) {
3401 				netif_err(qdev, ifup, qdev->ndev,
3402 					  "Failed request for MSIX interrupt %d.\n",
3403 					  i);
3404 				goto err_irq;
3405 			}
3406 		} else {
3407 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3408 				     "trying msi or legacy interrupts.\n");
3409 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3410 				     "%s: irq = %d.\n", __func__, pdev->irq);
3411 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3412 				     "%s: context->name = %s.\n", __func__,
3413 				     intr_context->name);
3414 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3415 				     "%s: dev_id = 0x%p.\n", __func__,
3416 				     &qdev->rx_ring[0]);
3417 			status =
3418 			    request_irq(pdev->irq, qlge_isr,
3419 					test_bit(QL_MSI_ENABLED, &qdev->flags)
3420 						? 0
3421 						: IRQF_SHARED,
3422 					intr_context->name, &qdev->rx_ring[0]);
3423 			if (status)
3424 				goto err_irq;
3425 
3426 			netif_err(qdev, ifup, qdev->ndev,
3427 				  "Hooked intr 0, queue type RX_Q, with name %s.\n",
3428 				  intr_context->name);
3429 		}
3430 		intr_context->hooked = 1;
3431 	}
3432 	return status;
3433 err_irq:
3434 	netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3435 	ql_free_irq(qdev);
3436 	return status;
3437 }
3438 
ql_start_rss(struct ql_adapter * qdev)3439 static int ql_start_rss(struct ql_adapter *qdev)
3440 {
3441 	static const u8 init_hash_seed[] = {
3442 		0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3443 		0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3444 		0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3445 		0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3446 		0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3447 	};
3448 	struct ricb *ricb = &qdev->ricb;
3449 	int status = 0;
3450 	int i;
3451 	u8 *hash_id = (u8 *)ricb->hash_cq_id;
3452 
3453 	memset((void *)ricb, 0, sizeof(*ricb));
3454 
3455 	ricb->base_cq = RSS_L4K;
3456 	ricb->flags =
3457 		(RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3458 	ricb->mask = cpu_to_le16((u16)(0x3ff));
3459 
3460 	/*
3461 	 * Fill out the Indirection Table.
3462 	 */
3463 	for (i = 0; i < 1024; i++)
3464 		hash_id[i] = (i & (qdev->rss_ring_count - 1));
3465 
3466 	memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3467 	memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3468 
3469 	status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3470 	if (status) {
3471 		netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3472 		return status;
3473 	}
3474 	return status;
3475 }
3476 
ql_clear_routing_entries(struct ql_adapter * qdev)3477 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3478 {
3479 	int i, status = 0;
3480 
3481 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3482 	if (status)
3483 		return status;
3484 	/* Clear all the entries in the routing table. */
3485 	for (i = 0; i < 16; i++) {
3486 		status = ql_set_routing_reg(qdev, i, 0, 0);
3487 		if (status) {
3488 			netif_err(qdev, ifup, qdev->ndev,
3489 				  "Failed to init routing register for CAM packets.\n");
3490 			break;
3491 		}
3492 	}
3493 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3494 	return status;
3495 }
3496 
3497 /* Initialize the frame-to-queue routing. */
ql_route_initialize(struct ql_adapter * qdev)3498 static int ql_route_initialize(struct ql_adapter *qdev)
3499 {
3500 	int status = 0;
3501 
3502 	/* Clear all the entries in the routing table. */
3503 	status = ql_clear_routing_entries(qdev);
3504 	if (status)
3505 		return status;
3506 
3507 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3508 	if (status)
3509 		return status;
3510 
3511 	status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3512 				    RT_IDX_IP_CSUM_ERR, 1);
3513 	if (status) {
3514 		netif_err(qdev, ifup, qdev->ndev,
3515 			  "Failed to init routing register for IP CSUM error packets.\n");
3516 		goto exit;
3517 	}
3518 	status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3519 				    RT_IDX_TU_CSUM_ERR, 1);
3520 	if (status) {
3521 		netif_err(qdev, ifup, qdev->ndev,
3522 			  "Failed to init routing register for TCP/UDP CSUM error packets.\n");
3523 		goto exit;
3524 	}
3525 	status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3526 	if (status) {
3527 		netif_err(qdev, ifup, qdev->ndev,
3528 			  "Failed to init routing register for broadcast packets.\n");
3529 		goto exit;
3530 	}
3531 	/* If we have more than one inbound queue, then turn on RSS in the
3532 	 * routing block.
3533 	 */
3534 	if (qdev->rss_ring_count > 1) {
3535 		status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3536 					    RT_IDX_RSS_MATCH, 1);
3537 		if (status) {
3538 			netif_err(qdev, ifup, qdev->ndev,
3539 				  "Failed to init routing register for MATCH RSS packets.\n");
3540 			goto exit;
3541 		}
3542 	}
3543 
3544 	status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3545 				    RT_IDX_CAM_HIT, 1);
3546 	if (status)
3547 		netif_err(qdev, ifup, qdev->ndev,
3548 			  "Failed to init routing register for CAM packets.\n");
3549 exit:
3550 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3551 	return status;
3552 }
3553 
ql_cam_route_initialize(struct ql_adapter * qdev)3554 int ql_cam_route_initialize(struct ql_adapter *qdev)
3555 {
3556 	int status, set;
3557 
3558 	/* If check if the link is up and use to
3559 	 * determine if we are setting or clearing
3560 	 * the MAC address in the CAM.
3561 	 */
3562 	set = ql_read32(qdev, STS);
3563 	set &= qdev->port_link_up;
3564 	status = ql_set_mac_addr(qdev, set);
3565 	if (status) {
3566 		netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3567 		return status;
3568 	}
3569 
3570 	status = ql_route_initialize(qdev);
3571 	if (status)
3572 		netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3573 
3574 	return status;
3575 }
3576 
ql_adapter_initialize(struct ql_adapter * qdev)3577 static int ql_adapter_initialize(struct ql_adapter *qdev)
3578 {
3579 	u32 value, mask;
3580 	int i;
3581 	int status = 0;
3582 
3583 	/*
3584 	 * Set up the System register to halt on errors.
3585 	 */
3586 	value = SYS_EFE | SYS_FAE;
3587 	mask = value << 16;
3588 	ql_write32(qdev, SYS, mask | value);
3589 
3590 	/* Set the default queue, and VLAN behavior. */
3591 	value = NIC_RCV_CFG_DFQ;
3592 	mask = NIC_RCV_CFG_DFQ_MASK;
3593 	if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3594 		value |= NIC_RCV_CFG_RV;
3595 		mask |= (NIC_RCV_CFG_RV << 16);
3596 	}
3597 	ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3598 
3599 	/* Set the MPI interrupt to enabled. */
3600 	ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3601 
3602 	/* Enable the function, set pagesize, enable error checking. */
3603 	value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3604 	    FSC_EC | FSC_VM_PAGE_4K;
3605 	value |= SPLT_SETTING;
3606 
3607 	/* Set/clear header splitting. */
3608 	mask = FSC_VM_PAGESIZE_MASK |
3609 	    FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3610 	ql_write32(qdev, FSC, mask | value);
3611 
3612 	ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3613 
3614 	/* Set RX packet routing to use port/pci function on which the
3615 	 * packet arrived on in addition to usual frame routing.
3616 	 * This is helpful on bonding where both interfaces can have
3617 	 * the same MAC address.
3618 	 */
3619 	ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3620 	/* Reroute all packets to our Interface.
3621 	 * They may have been routed to MPI firmware
3622 	 * due to WOL.
3623 	 */
3624 	value = ql_read32(qdev, MGMT_RCV_CFG);
3625 	value &= ~MGMT_RCV_CFG_RM;
3626 	mask = 0xffff0000;
3627 
3628 	/* Sticky reg needs clearing due to WOL. */
3629 	ql_write32(qdev, MGMT_RCV_CFG, mask);
3630 	ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3631 
3632 	/* Default WOL is enable on Mezz cards */
3633 	if (qdev->pdev->subsystem_device == 0x0068 ||
3634 	    qdev->pdev->subsystem_device == 0x0180)
3635 		qdev->wol = WAKE_MAGIC;
3636 
3637 	/* Start up the rx queues. */
3638 	for (i = 0; i < qdev->rx_ring_count; i++) {
3639 		status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3640 		if (status) {
3641 			netif_err(qdev, ifup, qdev->ndev,
3642 				  "Failed to start rx ring[%d].\n", i);
3643 			return status;
3644 		}
3645 	}
3646 
3647 	/* If there is more than one inbound completion queue
3648 	 * then download a RICB to configure RSS.
3649 	 */
3650 	if (qdev->rss_ring_count > 1) {
3651 		status = ql_start_rss(qdev);
3652 		if (status) {
3653 			netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3654 			return status;
3655 		}
3656 	}
3657 
3658 	/* Start up the tx queues. */
3659 	for (i = 0; i < qdev->tx_ring_count; i++) {
3660 		status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3661 		if (status) {
3662 			netif_err(qdev, ifup, qdev->ndev,
3663 				  "Failed to start tx ring[%d].\n", i);
3664 			return status;
3665 		}
3666 	}
3667 
3668 	/* Initialize the port and set the max framesize. */
3669 	status = qdev->nic_ops->port_initialize(qdev);
3670 	if (status)
3671 		netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3672 
3673 	/* Set up the MAC address and frame routing filter. */
3674 	status = ql_cam_route_initialize(qdev);
3675 	if (status) {
3676 		netif_err(qdev, ifup, qdev->ndev,
3677 			  "Failed to init CAM/Routing tables.\n");
3678 		return status;
3679 	}
3680 
3681 	/* Start NAPI for the RSS queues. */
3682 	for (i = 0; i < qdev->rss_ring_count; i++)
3683 		napi_enable(&qdev->rx_ring[i].napi);
3684 
3685 	return status;
3686 }
3687 
3688 /* Issue soft reset to chip. */
ql_adapter_reset(struct ql_adapter * qdev)3689 static int ql_adapter_reset(struct ql_adapter *qdev)
3690 {
3691 	u32 value;
3692 	int status = 0;
3693 	unsigned long end_jiffies;
3694 
3695 	/* Clear all the entries in the routing table. */
3696 	status = ql_clear_routing_entries(qdev);
3697 	if (status) {
3698 		netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3699 		return status;
3700 	}
3701 
3702 	/* Check if bit is set then skip the mailbox command and
3703 	 * clear the bit, else we are in normal reset process.
3704 	 */
3705 	if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3706 		/* Stop management traffic. */
3707 		ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3708 
3709 		/* Wait for the NIC and MGMNT FIFOs to empty. */
3710 		ql_wait_fifo_empty(qdev);
3711 	} else {
3712 		clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3713 	}
3714 
3715 	ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3716 
3717 	end_jiffies = jiffies + usecs_to_jiffies(30);
3718 	do {
3719 		value = ql_read32(qdev, RST_FO);
3720 		if ((value & RST_FO_FR) == 0)
3721 			break;
3722 		cpu_relax();
3723 	} while (time_before(jiffies, end_jiffies));
3724 
3725 	if (value & RST_FO_FR) {
3726 		netif_err(qdev, ifdown, qdev->ndev,
3727 			  "ETIMEDOUT!!! errored out of resetting the chip!\n");
3728 		status = -ETIMEDOUT;
3729 	}
3730 
3731 	/* Resume management traffic. */
3732 	ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3733 	return status;
3734 }
3735 
ql_display_dev_info(struct net_device * ndev)3736 static void ql_display_dev_info(struct net_device *ndev)
3737 {
3738 	struct ql_adapter *qdev = netdev_priv(ndev);
3739 
3740 	netif_info(qdev, probe, qdev->ndev,
3741 		   "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, XG Roll = %d, XG Rev = %d.\n",
3742 		   qdev->func,
3743 		   qdev->port,
3744 		   qdev->chip_rev_id & 0x0000000f,
3745 		   qdev->chip_rev_id >> 4 & 0x0000000f,
3746 		   qdev->chip_rev_id >> 8 & 0x0000000f,
3747 		   qdev->chip_rev_id >> 12 & 0x0000000f);
3748 	netif_info(qdev, probe, qdev->ndev,
3749 		   "MAC address %pM\n", ndev->dev_addr);
3750 }
3751 
ql_wol(struct ql_adapter * qdev)3752 static int ql_wol(struct ql_adapter *qdev)
3753 {
3754 	int status = 0;
3755 	u32 wol = MB_WOL_DISABLE;
3756 
3757 	/* The CAM is still intact after a reset, but if we
3758 	 * are doing WOL, then we may need to program the
3759 	 * routing regs. We would also need to issue the mailbox
3760 	 * commands to instruct the MPI what to do per the ethtool
3761 	 * settings.
3762 	 */
3763 
3764 	if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3765 			WAKE_MCAST | WAKE_BCAST)) {
3766 		netif_err(qdev, ifdown, qdev->ndev,
3767 			  "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3768 			  qdev->wol);
3769 		return -EINVAL;
3770 	}
3771 
3772 	if (qdev->wol & WAKE_MAGIC) {
3773 		status = ql_mb_wol_set_magic(qdev, 1);
3774 		if (status) {
3775 			netif_err(qdev, ifdown, qdev->ndev,
3776 				  "Failed to set magic packet on %s.\n",
3777 				  qdev->ndev->name);
3778 			return status;
3779 		}
3780 		netif_info(qdev, drv, qdev->ndev,
3781 			   "Enabled magic packet successfully on %s.\n",
3782 			   qdev->ndev->name);
3783 
3784 		wol |= MB_WOL_MAGIC_PKT;
3785 	}
3786 
3787 	if (qdev->wol) {
3788 		wol |= MB_WOL_MODE_ON;
3789 		status = ql_mb_wol_mode(qdev, wol);
3790 		netif_err(qdev, drv, qdev->ndev,
3791 			  "WOL %s (wol code 0x%x) on %s\n",
3792 			  (status == 0) ? "Successfully set" : "Failed",
3793 			  wol, qdev->ndev->name);
3794 	}
3795 
3796 	return status;
3797 }
3798 
ql_cancel_all_work_sync(struct ql_adapter * qdev)3799 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3800 {
3801 
3802 	/* Don't kill the reset worker thread if we
3803 	 * are in the process of recovery.
3804 	 */
3805 	if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3806 		cancel_delayed_work_sync(&qdev->asic_reset_work);
3807 	cancel_delayed_work_sync(&qdev->mpi_reset_work);
3808 	cancel_delayed_work_sync(&qdev->mpi_work);
3809 	cancel_delayed_work_sync(&qdev->mpi_idc_work);
3810 	cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3811 	cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3812 }
3813 
ql_adapter_down(struct ql_adapter * qdev)3814 static int ql_adapter_down(struct ql_adapter *qdev)
3815 {
3816 	int i, status = 0;
3817 
3818 	ql_link_off(qdev);
3819 
3820 	ql_cancel_all_work_sync(qdev);
3821 
3822 	for (i = 0; i < qdev->rss_ring_count; i++)
3823 		napi_disable(&qdev->rx_ring[i].napi);
3824 
3825 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
3826 
3827 	ql_disable_interrupts(qdev);
3828 
3829 	ql_tx_ring_clean(qdev);
3830 
3831 	/* Call netif_napi_del() from common point.
3832 	 */
3833 	for (i = 0; i < qdev->rss_ring_count; i++)
3834 		netif_napi_del(&qdev->rx_ring[i].napi);
3835 
3836 	status = ql_adapter_reset(qdev);
3837 	if (status)
3838 		netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3839 			  qdev->func);
3840 	ql_free_rx_buffers(qdev);
3841 
3842 	return status;
3843 }
3844 
ql_adapter_up(struct ql_adapter * qdev)3845 static int ql_adapter_up(struct ql_adapter *qdev)
3846 {
3847 	int err = 0;
3848 
3849 	err = ql_adapter_initialize(qdev);
3850 	if (err) {
3851 		netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3852 		goto err_init;
3853 	}
3854 	set_bit(QL_ADAPTER_UP, &qdev->flags);
3855 	ql_alloc_rx_buffers(qdev);
3856 	/* If the port is initialized and the
3857 	 * link is up the turn on the carrier.
3858 	 */
3859 	if ((ql_read32(qdev, STS) & qdev->port_init) &&
3860 	    (ql_read32(qdev, STS) & qdev->port_link_up))
3861 		ql_link_on(qdev);
3862 	/* Restore rx mode. */
3863 	clear_bit(QL_ALLMULTI, &qdev->flags);
3864 	clear_bit(QL_PROMISCUOUS, &qdev->flags);
3865 	qlge_set_multicast_list(qdev->ndev);
3866 
3867 	/* Restore vlan setting. */
3868 	qlge_restore_vlan(qdev);
3869 
3870 	ql_enable_interrupts(qdev);
3871 	ql_enable_all_completion_interrupts(qdev);
3872 	netif_tx_start_all_queues(qdev->ndev);
3873 
3874 	return 0;
3875 err_init:
3876 	ql_adapter_reset(qdev);
3877 	return err;
3878 }
3879 
ql_release_adapter_resources(struct ql_adapter * qdev)3880 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3881 {
3882 	ql_free_mem_resources(qdev);
3883 	ql_free_irq(qdev);
3884 }
3885 
ql_get_adapter_resources(struct ql_adapter * qdev)3886 static int ql_get_adapter_resources(struct ql_adapter *qdev)
3887 {
3888 	if (ql_alloc_mem_resources(qdev)) {
3889 		netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
3890 		return -ENOMEM;
3891 	}
3892 	return ql_request_irq(qdev);
3893 }
3894 
qlge_close(struct net_device * ndev)3895 static int qlge_close(struct net_device *ndev)
3896 {
3897 	struct ql_adapter *qdev = netdev_priv(ndev);
3898 	int i;
3899 
3900 	/* If we hit pci_channel_io_perm_failure
3901 	 * failure condition, then we already
3902 	 * brought the adapter down.
3903 	 */
3904 	if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3905 		netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3906 		clear_bit(QL_EEH_FATAL, &qdev->flags);
3907 		return 0;
3908 	}
3909 
3910 	/*
3911 	 * Wait for device to recover from a reset.
3912 	 * (Rarely happens, but possible.)
3913 	 */
3914 	while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3915 		msleep(1);
3916 
3917 	/* Make sure refill_work doesn't re-enable napi */
3918 	for (i = 0; i < qdev->rss_ring_count; i++)
3919 		cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work);
3920 
3921 	ql_adapter_down(qdev);
3922 	ql_release_adapter_resources(qdev);
3923 	return 0;
3924 }
3925 
qlge_set_lb_size(struct ql_adapter * qdev)3926 static void qlge_set_lb_size(struct ql_adapter *qdev)
3927 {
3928 	if (qdev->ndev->mtu <= 1500)
3929 		qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE;
3930 	else
3931 		qdev->lbq_buf_size = LARGE_BUFFER_MAX_SIZE;
3932 	qdev->lbq_buf_order = get_order(qdev->lbq_buf_size);
3933 }
3934 
ql_configure_rings(struct ql_adapter * qdev)3935 static int ql_configure_rings(struct ql_adapter *qdev)
3936 {
3937 	int i;
3938 	struct rx_ring *rx_ring;
3939 	struct tx_ring *tx_ring;
3940 	int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
3941 
3942 	/* In a perfect world we have one RSS ring for each CPU
3943 	 * and each has it's own vector.  To do that we ask for
3944 	 * cpu_cnt vectors.  ql_enable_msix() will adjust the
3945 	 * vector count to what we actually get.  We then
3946 	 * allocate an RSS ring for each.
3947 	 * Essentially, we are doing min(cpu_count, msix_vector_count).
3948 	 */
3949 	qdev->intr_count = cpu_cnt;
3950 	ql_enable_msix(qdev);
3951 	/* Adjust the RSS ring count to the actual vector count. */
3952 	qdev->rss_ring_count = qdev->intr_count;
3953 	qdev->tx_ring_count = cpu_cnt;
3954 	qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
3955 
3956 	for (i = 0; i < qdev->tx_ring_count; i++) {
3957 		tx_ring = &qdev->tx_ring[i];
3958 		memset((void *)tx_ring, 0, sizeof(*tx_ring));
3959 		tx_ring->qdev = qdev;
3960 		tx_ring->wq_id = i;
3961 		tx_ring->wq_len = qdev->tx_ring_size;
3962 		tx_ring->wq_size =
3963 		    tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3964 
3965 		/*
3966 		 * The completion queue ID for the tx rings start
3967 		 * immediately after the rss rings.
3968 		 */
3969 		tx_ring->cq_id = qdev->rss_ring_count + i;
3970 	}
3971 
3972 	for (i = 0; i < qdev->rx_ring_count; i++) {
3973 		rx_ring = &qdev->rx_ring[i];
3974 		memset((void *)rx_ring, 0, sizeof(*rx_ring));
3975 		rx_ring->qdev = qdev;
3976 		rx_ring->cq_id = i;
3977 		rx_ring->cpu = i % cpu_cnt;	/* CPU to run handler on. */
3978 		if (i < qdev->rss_ring_count) {
3979 			/*
3980 			 * Inbound (RSS) queues.
3981 			 */
3982 			rx_ring->cq_len = qdev->rx_ring_size;
3983 			rx_ring->cq_size =
3984 			    rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3985 			rx_ring->lbq.type = QLGE_LB;
3986 			rx_ring->sbq.type = QLGE_SB;
3987 			INIT_DELAYED_WORK(&rx_ring->refill_work,
3988 					  &qlge_slow_refill);
3989 		} else {
3990 			/*
3991 			 * Outbound queue handles outbound completions only.
3992 			 */
3993 			/* outbound cq is same size as tx_ring it services. */
3994 			rx_ring->cq_len = qdev->tx_ring_size;
3995 			rx_ring->cq_size =
3996 			    rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3997 		}
3998 	}
3999 	return 0;
4000 }
4001 
qlge_open(struct net_device * ndev)4002 static int qlge_open(struct net_device *ndev)
4003 {
4004 	int err = 0;
4005 	struct ql_adapter *qdev = netdev_priv(ndev);
4006 
4007 	err = ql_adapter_reset(qdev);
4008 	if (err)
4009 		return err;
4010 
4011 	qlge_set_lb_size(qdev);
4012 	err = ql_configure_rings(qdev);
4013 	if (err)
4014 		return err;
4015 
4016 	err = ql_get_adapter_resources(qdev);
4017 	if (err)
4018 		goto error_up;
4019 
4020 	err = ql_adapter_up(qdev);
4021 	if (err)
4022 		goto error_up;
4023 
4024 	return err;
4025 
4026 error_up:
4027 	ql_release_adapter_resources(qdev);
4028 	return err;
4029 }
4030 
ql_change_rx_buffers(struct ql_adapter * qdev)4031 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4032 {
4033 	int status;
4034 
4035 	/* Wait for an outstanding reset to complete. */
4036 	if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4037 		int i = 4;
4038 
4039 		while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4040 			netif_err(qdev, ifup, qdev->ndev,
4041 				  "Waiting for adapter UP...\n");
4042 			ssleep(1);
4043 		}
4044 
4045 		if (!i) {
4046 			netif_err(qdev, ifup, qdev->ndev,
4047 				  "Timed out waiting for adapter UP\n");
4048 			return -ETIMEDOUT;
4049 		}
4050 	}
4051 
4052 	status = ql_adapter_down(qdev);
4053 	if (status)
4054 		goto error;
4055 
4056 	qlge_set_lb_size(qdev);
4057 
4058 	status = ql_adapter_up(qdev);
4059 	if (status)
4060 		goto error;
4061 
4062 	return status;
4063 error:
4064 	netif_alert(qdev, ifup, qdev->ndev,
4065 		    "Driver up/down cycle failed, closing device.\n");
4066 	set_bit(QL_ADAPTER_UP, &qdev->flags);
4067 	dev_close(qdev->ndev);
4068 	return status;
4069 }
4070 
qlge_change_mtu(struct net_device * ndev,int new_mtu)4071 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4072 {
4073 	struct ql_adapter *qdev = netdev_priv(ndev);
4074 	int status;
4075 
4076 	if (ndev->mtu == 1500 && new_mtu == 9000)
4077 		netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4078 	else if (ndev->mtu == 9000 && new_mtu == 1500)
4079 		netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4080 	else
4081 		return -EINVAL;
4082 
4083 	queue_delayed_work(qdev->workqueue,
4084 			   &qdev->mpi_port_cfg_work, 3 * HZ);
4085 
4086 	ndev->mtu = new_mtu;
4087 
4088 	if (!netif_running(qdev->ndev))
4089 		return 0;
4090 
4091 	status = ql_change_rx_buffers(qdev);
4092 	if (status) {
4093 		netif_err(qdev, ifup, qdev->ndev,
4094 			  "Changing MTU failed.\n");
4095 	}
4096 
4097 	return status;
4098 }
4099 
qlge_get_stats(struct net_device * ndev)4100 static struct net_device_stats *qlge_get_stats(struct net_device
4101 					       *ndev)
4102 {
4103 	struct ql_adapter *qdev = netdev_priv(ndev);
4104 	struct rx_ring *rx_ring = &qdev->rx_ring[0];
4105 	struct tx_ring *tx_ring = &qdev->tx_ring[0];
4106 	unsigned long pkts, mcast, dropped, errors, bytes;
4107 	int i;
4108 
4109 	/* Get RX stats. */
4110 	pkts = mcast = dropped = errors = bytes = 0;
4111 	for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4112 		pkts += rx_ring->rx_packets;
4113 		bytes += rx_ring->rx_bytes;
4114 		dropped += rx_ring->rx_dropped;
4115 		errors += rx_ring->rx_errors;
4116 		mcast += rx_ring->rx_multicast;
4117 	}
4118 	ndev->stats.rx_packets = pkts;
4119 	ndev->stats.rx_bytes = bytes;
4120 	ndev->stats.rx_dropped = dropped;
4121 	ndev->stats.rx_errors = errors;
4122 	ndev->stats.multicast = mcast;
4123 
4124 	/* Get TX stats. */
4125 	pkts = errors = bytes = 0;
4126 	for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4127 		pkts += tx_ring->tx_packets;
4128 		bytes += tx_ring->tx_bytes;
4129 		errors += tx_ring->tx_errors;
4130 	}
4131 	ndev->stats.tx_packets = pkts;
4132 	ndev->stats.tx_bytes = bytes;
4133 	ndev->stats.tx_errors = errors;
4134 	return &ndev->stats;
4135 }
4136 
qlge_set_multicast_list(struct net_device * ndev)4137 static void qlge_set_multicast_list(struct net_device *ndev)
4138 {
4139 	struct ql_adapter *qdev = netdev_priv(ndev);
4140 	struct netdev_hw_addr *ha;
4141 	int i, status;
4142 
4143 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4144 	if (status)
4145 		return;
4146 	/*
4147 	 * Set or clear promiscuous mode if a
4148 	 * transition is taking place.
4149 	 */
4150 	if (ndev->flags & IFF_PROMISC) {
4151 		if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4152 			if (ql_set_routing_reg
4153 			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4154 				netif_err(qdev, hw, qdev->ndev,
4155 					  "Failed to set promiscuous mode.\n");
4156 			} else {
4157 				set_bit(QL_PROMISCUOUS, &qdev->flags);
4158 			}
4159 		}
4160 	} else {
4161 		if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4162 			if (ql_set_routing_reg
4163 			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4164 				netif_err(qdev, hw, qdev->ndev,
4165 					  "Failed to clear promiscuous mode.\n");
4166 			} else {
4167 				clear_bit(QL_PROMISCUOUS, &qdev->flags);
4168 			}
4169 		}
4170 	}
4171 
4172 	/*
4173 	 * Set or clear all multicast mode if a
4174 	 * transition is taking place.
4175 	 */
4176 	if ((ndev->flags & IFF_ALLMULTI) ||
4177 	    (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4178 		if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4179 			if (ql_set_routing_reg
4180 			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4181 				netif_err(qdev, hw, qdev->ndev,
4182 					  "Failed to set all-multi mode.\n");
4183 			} else {
4184 				set_bit(QL_ALLMULTI, &qdev->flags);
4185 			}
4186 		}
4187 	} else {
4188 		if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4189 			if (ql_set_routing_reg
4190 			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4191 				netif_err(qdev, hw, qdev->ndev,
4192 					  "Failed to clear all-multi mode.\n");
4193 			} else {
4194 				clear_bit(QL_ALLMULTI, &qdev->flags);
4195 			}
4196 		}
4197 	}
4198 
4199 	if (!netdev_mc_empty(ndev)) {
4200 		status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4201 		if (status)
4202 			goto exit;
4203 		i = 0;
4204 		netdev_for_each_mc_addr(ha, ndev) {
4205 			if (ql_set_mac_addr_reg(qdev, (u8 *)ha->addr,
4206 						MAC_ADDR_TYPE_MULTI_MAC, i)) {
4207 				netif_err(qdev, hw, qdev->ndev,
4208 					  "Failed to loadmulticast address.\n");
4209 				ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4210 				goto exit;
4211 			}
4212 			i++;
4213 		}
4214 		ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4215 		if (ql_set_routing_reg
4216 		    (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4217 			netif_err(qdev, hw, qdev->ndev,
4218 				  "Failed to set multicast match mode.\n");
4219 		} else {
4220 			set_bit(QL_ALLMULTI, &qdev->flags);
4221 		}
4222 	}
4223 exit:
4224 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4225 }
4226 
qlge_set_mac_address(struct net_device * ndev,void * p)4227 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4228 {
4229 	struct ql_adapter *qdev = netdev_priv(ndev);
4230 	struct sockaddr *addr = p;
4231 	int status;
4232 
4233 	if (!is_valid_ether_addr(addr->sa_data))
4234 		return -EADDRNOTAVAIL;
4235 	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4236 	/* Update local copy of current mac address. */
4237 	memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4238 
4239 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4240 	if (status)
4241 		return status;
4242 	status = ql_set_mac_addr_reg(qdev, (u8 *)ndev->dev_addr,
4243 				     MAC_ADDR_TYPE_CAM_MAC,
4244 				     qdev->func * MAX_CQ);
4245 	if (status)
4246 		netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4247 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4248 	return status;
4249 }
4250 
qlge_tx_timeout(struct net_device * ndev,unsigned int txqueue)4251 static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
4252 {
4253 	struct ql_adapter *qdev = netdev_priv(ndev);
4254 
4255 	ql_queue_asic_error(qdev);
4256 }
4257 
ql_asic_reset_work(struct work_struct * work)4258 static void ql_asic_reset_work(struct work_struct *work)
4259 {
4260 	struct ql_adapter *qdev =
4261 	    container_of(work, struct ql_adapter, asic_reset_work.work);
4262 	int status;
4263 
4264 	rtnl_lock();
4265 	status = ql_adapter_down(qdev);
4266 	if (status)
4267 		goto error;
4268 
4269 	status = ql_adapter_up(qdev);
4270 	if (status)
4271 		goto error;
4272 
4273 	/* Restore rx mode. */
4274 	clear_bit(QL_ALLMULTI, &qdev->flags);
4275 	clear_bit(QL_PROMISCUOUS, &qdev->flags);
4276 	qlge_set_multicast_list(qdev->ndev);
4277 
4278 	rtnl_unlock();
4279 	return;
4280 error:
4281 	netif_alert(qdev, ifup, qdev->ndev,
4282 		    "Driver up/down cycle failed, closing device\n");
4283 
4284 	set_bit(QL_ADAPTER_UP, &qdev->flags);
4285 	dev_close(qdev->ndev);
4286 	rtnl_unlock();
4287 }
4288 
4289 static const struct nic_operations qla8012_nic_ops = {
4290 	.get_flash		= ql_get_8012_flash_params,
4291 	.port_initialize	= ql_8012_port_initialize,
4292 };
4293 
4294 static const struct nic_operations qla8000_nic_ops = {
4295 	.get_flash		= ql_get_8000_flash_params,
4296 	.port_initialize	= ql_8000_port_initialize,
4297 };
4298 
4299 /* Find the pcie function number for the other NIC
4300  * on this chip.  Since both NIC functions share a
4301  * common firmware we have the lowest enabled function
4302  * do any common work.  Examples would be resetting
4303  * after a fatal firmware error, or doing a firmware
4304  * coredump.
4305  */
ql_get_alt_pcie_func(struct ql_adapter * qdev)4306 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4307 {
4308 	int status = 0;
4309 	u32 temp;
4310 	u32 nic_func1, nic_func2;
4311 
4312 	status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4313 				 &temp);
4314 	if (status)
4315 		return status;
4316 
4317 	nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4318 			MPI_TEST_NIC_FUNC_MASK);
4319 	nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4320 			MPI_TEST_NIC_FUNC_MASK);
4321 
4322 	if (qdev->func == nic_func1)
4323 		qdev->alt_func = nic_func2;
4324 	else if (qdev->func == nic_func2)
4325 		qdev->alt_func = nic_func1;
4326 	else
4327 		status = -EIO;
4328 
4329 	return status;
4330 }
4331 
ql_get_board_info(struct ql_adapter * qdev)4332 static int ql_get_board_info(struct ql_adapter *qdev)
4333 {
4334 	int status;
4335 
4336 	qdev->func =
4337 	    (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4338 	if (qdev->func > 3)
4339 		return -EIO;
4340 
4341 	status = ql_get_alt_pcie_func(qdev);
4342 	if (status)
4343 		return status;
4344 
4345 	qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4346 	if (qdev->port) {
4347 		qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4348 		qdev->port_link_up = STS_PL1;
4349 		qdev->port_init = STS_PI1;
4350 		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4351 		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4352 	} else {
4353 		qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4354 		qdev->port_link_up = STS_PL0;
4355 		qdev->port_init = STS_PI0;
4356 		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4357 		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4358 	}
4359 	qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4360 	qdev->device_id = qdev->pdev->device;
4361 	if (qdev->device_id == QLGE_DEVICE_ID_8012)
4362 		qdev->nic_ops = &qla8012_nic_ops;
4363 	else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4364 		qdev->nic_ops = &qla8000_nic_ops;
4365 	return status;
4366 }
4367 
ql_release_all(struct pci_dev * pdev)4368 static void ql_release_all(struct pci_dev *pdev)
4369 {
4370 	struct net_device *ndev = pci_get_drvdata(pdev);
4371 	struct ql_adapter *qdev = netdev_priv(ndev);
4372 
4373 	if (qdev->workqueue) {
4374 		destroy_workqueue(qdev->workqueue);
4375 		qdev->workqueue = NULL;
4376 	}
4377 
4378 	if (qdev->reg_base)
4379 		iounmap(qdev->reg_base);
4380 	if (qdev->doorbell_area)
4381 		iounmap(qdev->doorbell_area);
4382 	vfree(qdev->mpi_coredump);
4383 	pci_release_regions(pdev);
4384 }
4385 
ql_init_device(struct pci_dev * pdev,struct net_device * ndev,int cards_found)4386 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4387 			  int cards_found)
4388 {
4389 	struct ql_adapter *qdev = netdev_priv(ndev);
4390 	int err = 0;
4391 
4392 	memset((void *)qdev, 0, sizeof(*qdev));
4393 	err = pci_enable_device(pdev);
4394 	if (err) {
4395 		dev_err(&pdev->dev, "PCI device enable failed.\n");
4396 		return err;
4397 	}
4398 
4399 	qdev->ndev = ndev;
4400 	qdev->pdev = pdev;
4401 	pci_set_drvdata(pdev, ndev);
4402 
4403 	/* Set PCIe read request size */
4404 	err = pcie_set_readrq(pdev, 4096);
4405 	if (err) {
4406 		dev_err(&pdev->dev, "Set readrq failed.\n");
4407 		goto err_out1;
4408 	}
4409 
4410 	err = pci_request_regions(pdev, DRV_NAME);
4411 	if (err) {
4412 		dev_err(&pdev->dev, "PCI region request failed.\n");
4413 		return err;
4414 	}
4415 
4416 	pci_set_master(pdev);
4417 	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4418 		set_bit(QL_DMA64, &qdev->flags);
4419 		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4420 	} else {
4421 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4422 		if (!err)
4423 			err = dma_set_coherent_mask(&pdev->dev,
4424 						    DMA_BIT_MASK(32));
4425 	}
4426 
4427 	if (err) {
4428 		dev_err(&pdev->dev, "No usable DMA configuration.\n");
4429 		goto err_out2;
4430 	}
4431 
4432 	/* Set PCIe reset type for EEH to fundamental. */
4433 	pdev->needs_freset = 1;
4434 	pci_save_state(pdev);
4435 	qdev->reg_base =
4436 		ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
4437 	if (!qdev->reg_base) {
4438 		dev_err(&pdev->dev, "Register mapping failed.\n");
4439 		err = -ENOMEM;
4440 		goto err_out2;
4441 	}
4442 
4443 	qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4444 	qdev->doorbell_area =
4445 		ioremap(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3));
4446 	if (!qdev->doorbell_area) {
4447 		dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4448 		err = -ENOMEM;
4449 		goto err_out2;
4450 	}
4451 
4452 	err = ql_get_board_info(qdev);
4453 	if (err) {
4454 		dev_err(&pdev->dev, "Register access failed.\n");
4455 		err = -EIO;
4456 		goto err_out2;
4457 	}
4458 	qdev->msg_enable = netif_msg_init(debug, default_msg);
4459 	spin_lock_init(&qdev->stats_lock);
4460 
4461 	if (qlge_mpi_coredump) {
4462 		qdev->mpi_coredump =
4463 			vmalloc(sizeof(struct ql_mpi_coredump));
4464 		if (!qdev->mpi_coredump) {
4465 			err = -ENOMEM;
4466 			goto err_out2;
4467 		}
4468 		if (qlge_force_coredump)
4469 			set_bit(QL_FRC_COREDUMP, &qdev->flags);
4470 	}
4471 	/* make sure the EEPROM is good */
4472 	err = qdev->nic_ops->get_flash(qdev);
4473 	if (err) {
4474 		dev_err(&pdev->dev, "Invalid FLASH.\n");
4475 		goto err_out2;
4476 	}
4477 
4478 	/* Keep local copy of current mac address. */
4479 	memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4480 
4481 	/* Set up the default ring sizes. */
4482 	qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4483 	qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4484 
4485 	/* Set up the coalescing parameters. */
4486 	qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4487 	qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4488 	qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4489 	qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4490 
4491 	/*
4492 	 * Set up the operating parameters.
4493 	 */
4494 	qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4495 						  ndev->name);
4496 	if (!qdev->workqueue) {
4497 		err = -ENOMEM;
4498 		goto err_out2;
4499 	}
4500 
4501 	INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4502 	INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4503 	INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4504 	INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4505 	INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4506 	INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4507 	init_completion(&qdev->ide_completion);
4508 	mutex_init(&qdev->mpi_mutex);
4509 
4510 	if (!cards_found) {
4511 		dev_info(&pdev->dev, "%s\n", DRV_STRING);
4512 		dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4513 			 DRV_NAME, DRV_VERSION);
4514 	}
4515 	return 0;
4516 err_out2:
4517 	ql_release_all(pdev);
4518 err_out1:
4519 	pci_disable_device(pdev);
4520 	return err;
4521 }
4522 
4523 static const struct net_device_ops qlge_netdev_ops = {
4524 	.ndo_open		= qlge_open,
4525 	.ndo_stop		= qlge_close,
4526 	.ndo_start_xmit		= qlge_send,
4527 	.ndo_change_mtu		= qlge_change_mtu,
4528 	.ndo_get_stats		= qlge_get_stats,
4529 	.ndo_set_rx_mode	= qlge_set_multicast_list,
4530 	.ndo_set_mac_address	= qlge_set_mac_address,
4531 	.ndo_validate_addr	= eth_validate_addr,
4532 	.ndo_tx_timeout		= qlge_tx_timeout,
4533 	.ndo_set_features	= qlge_set_features,
4534 	.ndo_vlan_rx_add_vid	= qlge_vlan_rx_add_vid,
4535 	.ndo_vlan_rx_kill_vid	= qlge_vlan_rx_kill_vid,
4536 };
4537 
ql_timer(struct timer_list * t)4538 static void ql_timer(struct timer_list *t)
4539 {
4540 	struct ql_adapter *qdev = from_timer(qdev, t, timer);
4541 	u32 var = 0;
4542 
4543 	var = ql_read32(qdev, STS);
4544 	if (pci_channel_offline(qdev->pdev)) {
4545 		netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4546 		return;
4547 	}
4548 
4549 	mod_timer(&qdev->timer, jiffies + (5 * HZ));
4550 }
4551 
qlge_probe(struct pci_dev * pdev,const struct pci_device_id * pci_entry)4552 static int qlge_probe(struct pci_dev *pdev,
4553 		      const struct pci_device_id *pci_entry)
4554 {
4555 	struct net_device *ndev = NULL;
4556 	struct ql_adapter *qdev = NULL;
4557 	static int cards_found;
4558 	int err = 0;
4559 
4560 	ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4561 				 min(MAX_CPUS,
4562 				     netif_get_num_default_rss_queues()));
4563 	if (!ndev)
4564 		return -ENOMEM;
4565 
4566 	err = ql_init_device(pdev, ndev, cards_found);
4567 	if (err < 0) {
4568 		free_netdev(ndev);
4569 		return err;
4570 	}
4571 
4572 	qdev = netdev_priv(ndev);
4573 	SET_NETDEV_DEV(ndev, &pdev->dev);
4574 	ndev->hw_features = NETIF_F_SG |
4575 			    NETIF_F_IP_CSUM |
4576 			    NETIF_F_TSO |
4577 			    NETIF_F_TSO_ECN |
4578 			    NETIF_F_HW_VLAN_CTAG_TX |
4579 			    NETIF_F_HW_VLAN_CTAG_RX |
4580 			    NETIF_F_HW_VLAN_CTAG_FILTER |
4581 			    NETIF_F_RXCSUM;
4582 	ndev->features = ndev->hw_features;
4583 	ndev->vlan_features = ndev->hw_features;
4584 	/* vlan gets same features (except vlan filter) */
4585 	ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4586 				 NETIF_F_HW_VLAN_CTAG_TX |
4587 				 NETIF_F_HW_VLAN_CTAG_RX);
4588 
4589 	if (test_bit(QL_DMA64, &qdev->flags))
4590 		ndev->features |= NETIF_F_HIGHDMA;
4591 
4592 	/*
4593 	 * Set up net_device structure.
4594 	 */
4595 	ndev->tx_queue_len = qdev->tx_ring_size;
4596 	ndev->irq = pdev->irq;
4597 
4598 	ndev->netdev_ops = &qlge_netdev_ops;
4599 	ndev->ethtool_ops = &qlge_ethtool_ops;
4600 	ndev->watchdog_timeo = 10 * HZ;
4601 
4602 	/* MTU range: this driver only supports 1500 or 9000, so this only
4603 	 * filters out values above or below, and we'll rely on
4604 	 * qlge_change_mtu to make sure only 1500 or 9000 are allowed
4605 	 */
4606 	ndev->min_mtu = ETH_DATA_LEN;
4607 	ndev->max_mtu = 9000;
4608 
4609 	err = register_netdev(ndev);
4610 	if (err) {
4611 		dev_err(&pdev->dev, "net device registration failed.\n");
4612 		ql_release_all(pdev);
4613 		pci_disable_device(pdev);
4614 		free_netdev(ndev);
4615 		return err;
4616 	}
4617 	/* Start up the timer to trigger EEH if
4618 	 * the bus goes dead
4619 	 */
4620 	timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE);
4621 	mod_timer(&qdev->timer, jiffies + (5 * HZ));
4622 	ql_link_off(qdev);
4623 	ql_display_dev_info(ndev);
4624 	atomic_set(&qdev->lb_count, 0);
4625 	cards_found++;
4626 	return 0;
4627 }
4628 
ql_lb_send(struct sk_buff * skb,struct net_device * ndev)4629 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4630 {
4631 	return qlge_send(skb, ndev);
4632 }
4633 
ql_clean_lb_rx_ring(struct rx_ring * rx_ring,int budget)4634 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4635 {
4636 	return ql_clean_inbound_rx_ring(rx_ring, budget);
4637 }
4638 
qlge_remove(struct pci_dev * pdev)4639 static void qlge_remove(struct pci_dev *pdev)
4640 {
4641 	struct net_device *ndev = pci_get_drvdata(pdev);
4642 	struct ql_adapter *qdev = netdev_priv(ndev);
4643 
4644 	del_timer_sync(&qdev->timer);
4645 	ql_cancel_all_work_sync(qdev);
4646 	unregister_netdev(ndev);
4647 	ql_release_all(pdev);
4648 	pci_disable_device(pdev);
4649 	free_netdev(ndev);
4650 }
4651 
4652 /* Clean up resources without touching hardware. */
ql_eeh_close(struct net_device * ndev)4653 static void ql_eeh_close(struct net_device *ndev)
4654 {
4655 	int i;
4656 	struct ql_adapter *qdev = netdev_priv(ndev);
4657 
4658 	if (netif_carrier_ok(ndev)) {
4659 		netif_carrier_off(ndev);
4660 		netif_stop_queue(ndev);
4661 	}
4662 
4663 	/* Disabling the timer */
4664 	ql_cancel_all_work_sync(qdev);
4665 
4666 	for (i = 0; i < qdev->rss_ring_count; i++)
4667 		netif_napi_del(&qdev->rx_ring[i].napi);
4668 
4669 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
4670 	ql_tx_ring_clean(qdev);
4671 	ql_free_rx_buffers(qdev);
4672 	ql_release_adapter_resources(qdev);
4673 }
4674 
4675 /*
4676  * This callback is called by the PCI subsystem whenever
4677  * a PCI bus error is detected.
4678  */
qlge_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)4679 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4680 					       pci_channel_state_t state)
4681 {
4682 	struct net_device *ndev = pci_get_drvdata(pdev);
4683 	struct ql_adapter *qdev = netdev_priv(ndev);
4684 
4685 	switch (state) {
4686 	case pci_channel_io_normal:
4687 		return PCI_ERS_RESULT_CAN_RECOVER;
4688 	case pci_channel_io_frozen:
4689 		netif_device_detach(ndev);
4690 		del_timer_sync(&qdev->timer);
4691 		if (netif_running(ndev))
4692 			ql_eeh_close(ndev);
4693 		pci_disable_device(pdev);
4694 		return PCI_ERS_RESULT_NEED_RESET;
4695 	case pci_channel_io_perm_failure:
4696 		dev_err(&pdev->dev,
4697 			"%s: pci_channel_io_perm_failure.\n", __func__);
4698 		del_timer_sync(&qdev->timer);
4699 		ql_eeh_close(ndev);
4700 		set_bit(QL_EEH_FATAL, &qdev->flags);
4701 		return PCI_ERS_RESULT_DISCONNECT;
4702 	}
4703 
4704 	/* Request a slot reset. */
4705 	return PCI_ERS_RESULT_NEED_RESET;
4706 }
4707 
4708 /*
4709  * This callback is called after the PCI buss has been reset.
4710  * Basically, this tries to restart the card from scratch.
4711  * This is a shortened version of the device probe/discovery code,
4712  * it resembles the first-half of the () routine.
4713  */
qlge_io_slot_reset(struct pci_dev * pdev)4714 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4715 {
4716 	struct net_device *ndev = pci_get_drvdata(pdev);
4717 	struct ql_adapter *qdev = netdev_priv(ndev);
4718 
4719 	pdev->error_state = pci_channel_io_normal;
4720 
4721 	pci_restore_state(pdev);
4722 	if (pci_enable_device(pdev)) {
4723 		netif_err(qdev, ifup, qdev->ndev,
4724 			  "Cannot re-enable PCI device after reset.\n");
4725 		return PCI_ERS_RESULT_DISCONNECT;
4726 	}
4727 	pci_set_master(pdev);
4728 
4729 	if (ql_adapter_reset(qdev)) {
4730 		netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4731 		set_bit(QL_EEH_FATAL, &qdev->flags);
4732 		return PCI_ERS_RESULT_DISCONNECT;
4733 	}
4734 
4735 	return PCI_ERS_RESULT_RECOVERED;
4736 }
4737 
qlge_io_resume(struct pci_dev * pdev)4738 static void qlge_io_resume(struct pci_dev *pdev)
4739 {
4740 	struct net_device *ndev = pci_get_drvdata(pdev);
4741 	struct ql_adapter *qdev = netdev_priv(ndev);
4742 	int err = 0;
4743 
4744 	if (netif_running(ndev)) {
4745 		err = qlge_open(ndev);
4746 		if (err) {
4747 			netif_err(qdev, ifup, qdev->ndev,
4748 				  "Device initialization failed after reset.\n");
4749 			return;
4750 		}
4751 	} else {
4752 		netif_err(qdev, ifup, qdev->ndev,
4753 			  "Device was not running prior to EEH.\n");
4754 	}
4755 	mod_timer(&qdev->timer, jiffies + (5 * HZ));
4756 	netif_device_attach(ndev);
4757 }
4758 
4759 static const struct pci_error_handlers qlge_err_handler = {
4760 	.error_detected = qlge_io_error_detected,
4761 	.slot_reset = qlge_io_slot_reset,
4762 	.resume = qlge_io_resume,
4763 };
4764 
qlge_suspend(struct device * dev_d)4765 static int __maybe_unused qlge_suspend(struct device *dev_d)
4766 {
4767 	struct net_device *ndev = dev_get_drvdata(dev_d);
4768 	struct ql_adapter *qdev = netdev_priv(ndev);
4769 	int err;
4770 
4771 	netif_device_detach(ndev);
4772 	del_timer_sync(&qdev->timer);
4773 
4774 	if (netif_running(ndev)) {
4775 		err = ql_adapter_down(qdev);
4776 		if (!err)
4777 			return err;
4778 	}
4779 
4780 	ql_wol(qdev);
4781 
4782 	return 0;
4783 }
4784 
qlge_resume(struct device * dev_d)4785 static int __maybe_unused qlge_resume(struct device *dev_d)
4786 {
4787 	struct net_device *ndev = dev_get_drvdata(dev_d);
4788 	struct ql_adapter *qdev = netdev_priv(ndev);
4789 	int err;
4790 
4791 	pci_set_master(to_pci_dev(dev_d));
4792 
4793 	device_wakeup_disable(dev_d);
4794 
4795 	if (netif_running(ndev)) {
4796 		err = ql_adapter_up(qdev);
4797 		if (err)
4798 			return err;
4799 	}
4800 
4801 	mod_timer(&qdev->timer, jiffies + (5 * HZ));
4802 	netif_device_attach(ndev);
4803 
4804 	return 0;
4805 }
4806 
qlge_shutdown(struct pci_dev * pdev)4807 static void qlge_shutdown(struct pci_dev *pdev)
4808 {
4809 	qlge_suspend(&pdev->dev);
4810 }
4811 
4812 static SIMPLE_DEV_PM_OPS(qlge_pm_ops, qlge_suspend, qlge_resume);
4813 
4814 static struct pci_driver qlge_driver = {
4815 	.name = DRV_NAME,
4816 	.id_table = qlge_pci_tbl,
4817 	.probe = qlge_probe,
4818 	.remove = qlge_remove,
4819 	.driver.pm = &qlge_pm_ops,
4820 	.shutdown = qlge_shutdown,
4821 	.err_handler = &qlge_err_handler
4822 };
4823 
4824 module_pci_driver(qlge_driver);
4825