1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic qlge NIC HBA Driver
4 * Copyright (c) 2003-2008 QLogic Corporation
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <net/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/if_vlan.h>
37 #include <linux/skbuff.h>
38 #include <linux/delay.h>
39 #include <linux/mm.h>
40 #include <linux/vmalloc.h>
41 #include <linux/prefetch.h>
42 #include <net/ip6_checksum.h>
43
44 #include "qlge.h"
45 #include "qlge_devlink.h"
46
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
49
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
54
55 static const u32 default_msg =
56 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 NETIF_MSG_IFDOWN |
58 NETIF_MSG_IFUP |
59 NETIF_MSG_RX_ERR |
60 NETIF_MSG_TX_ERR |
61 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
62
63 static int debug = -1; /* defaults above */
64 module_param(debug, int, 0664);
65 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
66
67 #define MSIX_IRQ 0
68 #define MSI_IRQ 1
69 #define LEG_IRQ 2
70 static int qlge_irq_type = MSIX_IRQ;
71 module_param(qlge_irq_type, int, 0664);
72 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
73
74 static int qlge_mpi_coredump;
75 module_param(qlge_mpi_coredump, int, 0);
76 MODULE_PARM_DESC(qlge_mpi_coredump,
77 "Option to enable MPI firmware dump. Default is OFF - Do Not allocate memory. ");
78
79 static int qlge_force_coredump;
80 module_param(qlge_force_coredump, int, 0);
81 MODULE_PARM_DESC(qlge_force_coredump,
82 "Option to allow force of firmware core dump. Default is OFF - Do not allow.");
83
84 static const struct pci_device_id qlge_pci_tbl[] = {
85 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
86 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
87 /* required last entry */
88 {0,}
89 };
90
91 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
92
93 static int qlge_wol(struct qlge_adapter *);
94 static void qlge_set_multicast_list(struct net_device *);
95 static int qlge_adapter_down(struct qlge_adapter *);
96 static int qlge_adapter_up(struct qlge_adapter *);
97
98 /* This hardware semaphore causes exclusive access to
99 * resources shared between the NIC driver, MPI firmware,
100 * FCOE firmware and the FC driver.
101 */
qlge_sem_trylock(struct qlge_adapter * qdev,u32 sem_mask)102 static int qlge_sem_trylock(struct qlge_adapter *qdev, u32 sem_mask)
103 {
104 u32 sem_bits = 0;
105
106 switch (sem_mask) {
107 case SEM_XGMAC0_MASK:
108 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
109 break;
110 case SEM_XGMAC1_MASK:
111 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
112 break;
113 case SEM_ICB_MASK:
114 sem_bits = SEM_SET << SEM_ICB_SHIFT;
115 break;
116 case SEM_MAC_ADDR_MASK:
117 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
118 break;
119 case SEM_FLASH_MASK:
120 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
121 break;
122 case SEM_PROBE_MASK:
123 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
124 break;
125 case SEM_RT_IDX_MASK:
126 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
127 break;
128 case SEM_PROC_REG_MASK:
129 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
130 break;
131 default:
132 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
133 return -EINVAL;
134 }
135
136 qlge_write32(qdev, SEM, sem_bits | sem_mask);
137 return !(qlge_read32(qdev, SEM) & sem_bits);
138 }
139
qlge_sem_spinlock(struct qlge_adapter * qdev,u32 sem_mask)140 int qlge_sem_spinlock(struct qlge_adapter *qdev, u32 sem_mask)
141 {
142 unsigned int wait_count = 30;
143
144 do {
145 if (!qlge_sem_trylock(qdev, sem_mask))
146 return 0;
147 udelay(100);
148 } while (--wait_count);
149 return -ETIMEDOUT;
150 }
151
qlge_sem_unlock(struct qlge_adapter * qdev,u32 sem_mask)152 void qlge_sem_unlock(struct qlge_adapter *qdev, u32 sem_mask)
153 {
154 qlge_write32(qdev, SEM, sem_mask);
155 qlge_read32(qdev, SEM); /* flush */
156 }
157
158 /* This function waits for a specific bit to come ready
159 * in a given register. It is used mostly by the initialize
160 * process, but is also used in kernel thread API such as
161 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
162 */
qlge_wait_reg_rdy(struct qlge_adapter * qdev,u32 reg,u32 bit,u32 err_bit)163 int qlge_wait_reg_rdy(struct qlge_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
164 {
165 u32 temp;
166 int count;
167
168 for (count = 0; count < UDELAY_COUNT; count++) {
169 temp = qlge_read32(qdev, reg);
170
171 /* check for errors */
172 if (temp & err_bit) {
173 netif_alert(qdev, probe, qdev->ndev,
174 "register 0x%.08x access error, value = 0x%.08x!.\n",
175 reg, temp);
176 return -EIO;
177 } else if (temp & bit) {
178 return 0;
179 }
180 udelay(UDELAY_DELAY);
181 }
182 netif_alert(qdev, probe, qdev->ndev,
183 "Timed out waiting for reg %x to come ready.\n", reg);
184 return -ETIMEDOUT;
185 }
186
187 /* The CFG register is used to download TX and RX control blocks
188 * to the chip. This function waits for an operation to complete.
189 */
qlge_wait_cfg(struct qlge_adapter * qdev,u32 bit)190 static int qlge_wait_cfg(struct qlge_adapter *qdev, u32 bit)
191 {
192 int count;
193 u32 temp;
194
195 for (count = 0; count < UDELAY_COUNT; count++) {
196 temp = qlge_read32(qdev, CFG);
197 if (temp & CFG_LE)
198 return -EIO;
199 if (!(temp & bit))
200 return 0;
201 udelay(UDELAY_DELAY);
202 }
203 return -ETIMEDOUT;
204 }
205
206 /* Used to issue init control blocks to hw. Maps control block,
207 * sets address, triggers download, waits for completion.
208 */
qlge_write_cfg(struct qlge_adapter * qdev,void * ptr,int size,u32 bit,u16 q_id)209 int qlge_write_cfg(struct qlge_adapter *qdev, void *ptr, int size, u32 bit,
210 u16 q_id)
211 {
212 u64 map;
213 int status = 0;
214 int direction;
215 u32 mask;
216 u32 value;
217
218 if (bit & (CFG_LRQ | CFG_LR | CFG_LCQ))
219 direction = DMA_TO_DEVICE;
220 else
221 direction = DMA_FROM_DEVICE;
222
223 map = dma_map_single(&qdev->pdev->dev, ptr, size, direction);
224 if (dma_mapping_error(&qdev->pdev->dev, map)) {
225 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
226 return -ENOMEM;
227 }
228
229 status = qlge_sem_spinlock(qdev, SEM_ICB_MASK);
230 if (status)
231 goto lock_failed;
232
233 status = qlge_wait_cfg(qdev, bit);
234 if (status) {
235 netif_err(qdev, ifup, qdev->ndev,
236 "Timed out waiting for CFG to come ready.\n");
237 goto exit;
238 }
239
240 qlge_write32(qdev, ICB_L, (u32)map);
241 qlge_write32(qdev, ICB_H, (u32)(map >> 32));
242
243 mask = CFG_Q_MASK | (bit << 16);
244 value = bit | (q_id << CFG_Q_SHIFT);
245 qlge_write32(qdev, CFG, (mask | value));
246
247 /*
248 * Wait for the bit to clear after signaling hw.
249 */
250 status = qlge_wait_cfg(qdev, bit);
251 exit:
252 qlge_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
253 lock_failed:
254 dma_unmap_single(&qdev->pdev->dev, map, size, direction);
255 return status;
256 }
257
258 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
qlge_get_mac_addr_reg(struct qlge_adapter * qdev,u32 type,u16 index,u32 * value)259 int qlge_get_mac_addr_reg(struct qlge_adapter *qdev, u32 type, u16 index,
260 u32 *value)
261 {
262 u32 offset = 0;
263 int status;
264
265 switch (type) {
266 case MAC_ADDR_TYPE_MULTI_MAC:
267 case MAC_ADDR_TYPE_CAM_MAC: {
268 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
269 if (status)
270 break;
271 qlge_write32(qdev, MAC_ADDR_IDX,
272 (offset++) | /* offset */
273 (index << MAC_ADDR_IDX_SHIFT) | /* index */
274 MAC_ADDR_ADR | MAC_ADDR_RS |
275 type); /* type */
276 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
277 if (status)
278 break;
279 *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
280 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
281 if (status)
282 break;
283 qlge_write32(qdev, MAC_ADDR_IDX,
284 (offset++) | /* offset */
285 (index << MAC_ADDR_IDX_SHIFT) | /* index */
286 MAC_ADDR_ADR | MAC_ADDR_RS |
287 type); /* type */
288 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
289 if (status)
290 break;
291 *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
292 if (type == MAC_ADDR_TYPE_CAM_MAC) {
293 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX,
294 MAC_ADDR_MW, 0);
295 if (status)
296 break;
297 qlge_write32(qdev, MAC_ADDR_IDX,
298 (offset++) | /* offset */
299 (index
300 << MAC_ADDR_IDX_SHIFT) | /* index */
301 MAC_ADDR_ADR |
302 MAC_ADDR_RS | type); /* type */
303 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX,
304 MAC_ADDR_MR, 0);
305 if (status)
306 break;
307 *value++ = qlge_read32(qdev, MAC_ADDR_DATA);
308 }
309 break;
310 }
311 case MAC_ADDR_TYPE_VLAN:
312 case MAC_ADDR_TYPE_MULTI_FLTR:
313 default:
314 netif_crit(qdev, ifup, qdev->ndev,
315 "Address type %d not yet supported.\n", type);
316 status = -EPERM;
317 }
318 return status;
319 }
320
321 /* Set up a MAC, multicast or VLAN address for the
322 * inbound frame matching.
323 */
qlge_set_mac_addr_reg(struct qlge_adapter * qdev,u8 * addr,u32 type,u16 index)324 static int qlge_set_mac_addr_reg(struct qlge_adapter *qdev, u8 *addr, u32 type,
325 u16 index)
326 {
327 u32 offset = 0;
328 int status = 0;
329
330 switch (type) {
331 case MAC_ADDR_TYPE_MULTI_MAC: {
332 u32 upper = (addr[0] << 8) | addr[1];
333 u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
334 (addr[5]);
335
336 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
337 if (status)
338 break;
339 qlge_write32(qdev, MAC_ADDR_IDX,
340 (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
341 MAC_ADDR_E);
342 qlge_write32(qdev, MAC_ADDR_DATA, lower);
343 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
344 if (status)
345 break;
346 qlge_write32(qdev, MAC_ADDR_IDX,
347 (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
348 MAC_ADDR_E);
349
350 qlge_write32(qdev, MAC_ADDR_DATA, upper);
351 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
352 break;
353 }
354 case MAC_ADDR_TYPE_CAM_MAC: {
355 u32 cam_output;
356 u32 upper = (addr[0] << 8) | addr[1];
357 u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
358 (addr[5]);
359 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
360 if (status)
361 break;
362 qlge_write32(qdev, MAC_ADDR_IDX,
363 (offset++) | /* offset */
364 (index << MAC_ADDR_IDX_SHIFT) | /* index */
365 type); /* type */
366 qlge_write32(qdev, MAC_ADDR_DATA, lower);
367 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
368 if (status)
369 break;
370 qlge_write32(qdev, MAC_ADDR_IDX,
371 (offset++) | /* offset */
372 (index << MAC_ADDR_IDX_SHIFT) | /* index */
373 type); /* type */
374 qlge_write32(qdev, MAC_ADDR_DATA, upper);
375 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
376 if (status)
377 break;
378 qlge_write32(qdev, MAC_ADDR_IDX,
379 (offset) | /* offset */
380 (index << MAC_ADDR_IDX_SHIFT) | /* index */
381 type); /* type */
382 /* This field should also include the queue id
383 * and possibly the function id. Right now we hardcode
384 * the route field to NIC core.
385 */
386 cam_output = (CAM_OUT_ROUTE_NIC |
387 (qdev->func << CAM_OUT_FUNC_SHIFT) |
388 (0 << CAM_OUT_CQ_ID_SHIFT));
389 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
390 cam_output |= CAM_OUT_RV;
391 /* route to NIC core */
392 qlge_write32(qdev, MAC_ADDR_DATA, cam_output);
393 break;
394 }
395 case MAC_ADDR_TYPE_VLAN: {
396 u32 enable_bit = *((u32 *)&addr[0]);
397 /* For VLAN, the addr actually holds a bit that
398 * either enables or disables the vlan id we are
399 * addressing. It's either MAC_ADDR_E on or off.
400 * That's bit-27 we're talking about.
401 */
402 status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
403 if (status)
404 break;
405 qlge_write32(qdev, MAC_ADDR_IDX,
406 offset | /* offset */
407 (index << MAC_ADDR_IDX_SHIFT) | /* index */
408 type | /* type */
409 enable_bit); /* enable/disable */
410 break;
411 }
412 case MAC_ADDR_TYPE_MULTI_FLTR:
413 default:
414 netif_crit(qdev, ifup, qdev->ndev,
415 "Address type %d not yet supported.\n", type);
416 status = -EPERM;
417 }
418 return status;
419 }
420
421 /* Set or clear MAC address in hardware. We sometimes
422 * have to clear it to prevent wrong frame routing
423 * especially in a bonding environment.
424 */
qlge_set_mac_addr(struct qlge_adapter * qdev,int set)425 static int qlge_set_mac_addr(struct qlge_adapter *qdev, int set)
426 {
427 int status;
428 char zero_mac_addr[ETH_ALEN];
429 char *addr;
430
431 if (set) {
432 addr = &qdev->current_mac_addr[0];
433 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
434 "Set Mac addr %pM\n", addr);
435 } else {
436 eth_zero_addr(zero_mac_addr);
437 addr = &zero_mac_addr[0];
438 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
439 "Clearing MAC address\n");
440 }
441 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
442 if (status)
443 return status;
444 status = qlge_set_mac_addr_reg(qdev, (u8 *)addr,
445 MAC_ADDR_TYPE_CAM_MAC,
446 qdev->func * MAX_CQ);
447 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
448 if (status)
449 netif_err(qdev, ifup, qdev->ndev,
450 "Failed to init mac address.\n");
451 return status;
452 }
453
qlge_link_on(struct qlge_adapter * qdev)454 void qlge_link_on(struct qlge_adapter *qdev)
455 {
456 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
457 netif_carrier_on(qdev->ndev);
458 qlge_set_mac_addr(qdev, 1);
459 }
460
qlge_link_off(struct qlge_adapter * qdev)461 void qlge_link_off(struct qlge_adapter *qdev)
462 {
463 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
464 netif_carrier_off(qdev->ndev);
465 qlge_set_mac_addr(qdev, 0);
466 }
467
468 /* Get a specific frame routing value from the CAM.
469 * Used for debug and reg dump.
470 */
qlge_get_routing_reg(struct qlge_adapter * qdev,u32 index,u32 * value)471 int qlge_get_routing_reg(struct qlge_adapter *qdev, u32 index, u32 *value)
472 {
473 int status = 0;
474
475 status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
476 if (status)
477 goto exit;
478
479 qlge_write32(qdev, RT_IDX,
480 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
481 status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
482 if (status)
483 goto exit;
484 *value = qlge_read32(qdev, RT_DATA);
485 exit:
486 return status;
487 }
488
489 /* The NIC function for this chip has 16 routing indexes. Each one can be used
490 * to route different frame types to various inbound queues. We send broadcast/
491 * multicast/error frames to the default queue for slow handling,
492 * and CAM hit/RSS frames to the fast handling queues.
493 */
qlge_set_routing_reg(struct qlge_adapter * qdev,u32 index,u32 mask,int enable)494 static int qlge_set_routing_reg(struct qlge_adapter *qdev, u32 index, u32 mask,
495 int enable)
496 {
497 int status = -EINVAL; /* Return error if no mask match. */
498 u32 value = 0;
499
500 switch (mask) {
501 case RT_IDX_CAM_HIT:
502 {
503 value = RT_IDX_DST_CAM_Q | /* dest */
504 RT_IDX_TYPE_NICQ | /* type */
505 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
506 break;
507 }
508 case RT_IDX_VALID: /* Promiscuous Mode frames. */
509 {
510 value = RT_IDX_DST_DFLT_Q | /* dest */
511 RT_IDX_TYPE_NICQ | /* type */
512 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
513 break;
514 }
515 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
516 {
517 value = RT_IDX_DST_DFLT_Q | /* dest */
518 RT_IDX_TYPE_NICQ | /* type */
519 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
520 break;
521 }
522 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
523 {
524 value = RT_IDX_DST_DFLT_Q | /* dest */
525 RT_IDX_TYPE_NICQ | /* type */
526 (RT_IDX_IP_CSUM_ERR_SLOT <<
527 RT_IDX_IDX_SHIFT); /* index */
528 break;
529 }
530 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
531 {
532 value = RT_IDX_DST_DFLT_Q | /* dest */
533 RT_IDX_TYPE_NICQ | /* type */
534 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
535 RT_IDX_IDX_SHIFT); /* index */
536 break;
537 }
538 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
539 {
540 value = RT_IDX_DST_DFLT_Q | /* dest */
541 RT_IDX_TYPE_NICQ | /* type */
542 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
543 break;
544 }
545 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
546 {
547 value = RT_IDX_DST_DFLT_Q | /* dest */
548 RT_IDX_TYPE_NICQ | /* type */
549 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
550 break;
551 }
552 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
553 {
554 value = RT_IDX_DST_DFLT_Q | /* dest */
555 RT_IDX_TYPE_NICQ | /* type */
556 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
557 break;
558 }
559 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
560 {
561 value = RT_IDX_DST_RSS | /* dest */
562 RT_IDX_TYPE_NICQ | /* type */
563 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
564 break;
565 }
566 case 0: /* Clear the E-bit on an entry. */
567 {
568 value = RT_IDX_DST_DFLT_Q | /* dest */
569 RT_IDX_TYPE_NICQ | /* type */
570 (index << RT_IDX_IDX_SHIFT);/* index */
571 break;
572 }
573 default:
574 netif_err(qdev, ifup, qdev->ndev,
575 "Mask type %d not yet supported.\n", mask);
576 status = -EPERM;
577 goto exit;
578 }
579
580 if (value) {
581 status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
582 if (status)
583 goto exit;
584 value |= (enable ? RT_IDX_E : 0);
585 qlge_write32(qdev, RT_IDX, value);
586 qlge_write32(qdev, RT_DATA, enable ? mask : 0);
587 }
588 exit:
589 return status;
590 }
591
qlge_enable_interrupts(struct qlge_adapter * qdev)592 static void qlge_enable_interrupts(struct qlge_adapter *qdev)
593 {
594 qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
595 }
596
qlge_disable_interrupts(struct qlge_adapter * qdev)597 static void qlge_disable_interrupts(struct qlge_adapter *qdev)
598 {
599 qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
600 }
601
qlge_enable_completion_interrupt(struct qlge_adapter * qdev,u32 intr)602 static void qlge_enable_completion_interrupt(struct qlge_adapter *qdev, u32 intr)
603 {
604 struct intr_context *ctx = &qdev->intr_context[intr];
605
606 qlge_write32(qdev, INTR_EN, ctx->intr_en_mask);
607 }
608
qlge_disable_completion_interrupt(struct qlge_adapter * qdev,u32 intr)609 static void qlge_disable_completion_interrupt(struct qlge_adapter *qdev, u32 intr)
610 {
611 struct intr_context *ctx = &qdev->intr_context[intr];
612
613 qlge_write32(qdev, INTR_EN, ctx->intr_dis_mask);
614 }
615
qlge_enable_all_completion_interrupts(struct qlge_adapter * qdev)616 static void qlge_enable_all_completion_interrupts(struct qlge_adapter *qdev)
617 {
618 int i;
619
620 for (i = 0; i < qdev->intr_count; i++)
621 qlge_enable_completion_interrupt(qdev, i);
622 }
623
qlge_validate_flash(struct qlge_adapter * qdev,u32 size,const char * str)624 static int qlge_validate_flash(struct qlge_adapter *qdev, u32 size, const char *str)
625 {
626 int status, i;
627 u16 csum = 0;
628 __le16 *flash = (__le16 *)&qdev->flash;
629
630 status = strncmp((char *)&qdev->flash, str, 4);
631 if (status) {
632 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
633 return status;
634 }
635
636 for (i = 0; i < size; i++)
637 csum += le16_to_cpu(*flash++);
638
639 if (csum)
640 netif_err(qdev, ifup, qdev->ndev,
641 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
642
643 return csum;
644 }
645
qlge_read_flash_word(struct qlge_adapter * qdev,int offset,__le32 * data)646 static int qlge_read_flash_word(struct qlge_adapter *qdev, int offset, __le32 *data)
647 {
648 int status = 0;
649 /* wait for reg to come ready */
650 status = qlge_wait_reg_rdy(qdev,
651 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
652 if (status)
653 goto exit;
654 /* set up for reg read */
655 qlge_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
656 /* wait for reg to come ready */
657 status = qlge_wait_reg_rdy(qdev,
658 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
659 if (status)
660 goto exit;
661 /* This data is stored on flash as an array of
662 * __le32. Since qlge_read32() returns cpu endian
663 * we need to swap it back.
664 */
665 *data = cpu_to_le32(qlge_read32(qdev, FLASH_DATA));
666 exit:
667 return status;
668 }
669
qlge_get_8000_flash_params(struct qlge_adapter * qdev)670 static int qlge_get_8000_flash_params(struct qlge_adapter *qdev)
671 {
672 u32 i, size;
673 int status;
674 __le32 *p = (__le32 *)&qdev->flash;
675 u32 offset;
676 u8 mac_addr[6];
677
678 /* Get flash offset for function and adjust
679 * for dword access.
680 */
681 if (!qdev->port)
682 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
683 else
684 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
685
686 if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK))
687 return -ETIMEDOUT;
688
689 size = sizeof(struct flash_params_8000) / sizeof(u32);
690 for (i = 0; i < size; i++, p++) {
691 status = qlge_read_flash_word(qdev, i + offset, p);
692 if (status) {
693 netif_err(qdev, ifup, qdev->ndev,
694 "Error reading flash.\n");
695 goto exit;
696 }
697 }
698
699 status = qlge_validate_flash(qdev,
700 sizeof(struct flash_params_8000) /
701 sizeof(u16),
702 "8000");
703 if (status) {
704 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
705 status = -EINVAL;
706 goto exit;
707 }
708
709 /* Extract either manufacturer or BOFM modified
710 * MAC address.
711 */
712 if (qdev->flash.flash_params_8000.data_type1 == 2)
713 memcpy(mac_addr,
714 qdev->flash.flash_params_8000.mac_addr1,
715 qdev->ndev->addr_len);
716 else
717 memcpy(mac_addr,
718 qdev->flash.flash_params_8000.mac_addr,
719 qdev->ndev->addr_len);
720
721 if (!is_valid_ether_addr(mac_addr)) {
722 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
723 status = -EINVAL;
724 goto exit;
725 }
726
727 memcpy(qdev->ndev->dev_addr,
728 mac_addr,
729 qdev->ndev->addr_len);
730
731 exit:
732 qlge_sem_unlock(qdev, SEM_FLASH_MASK);
733 return status;
734 }
735
qlge_get_8012_flash_params(struct qlge_adapter * qdev)736 static int qlge_get_8012_flash_params(struct qlge_adapter *qdev)
737 {
738 int i;
739 int status;
740 __le32 *p = (__le32 *)&qdev->flash;
741 u32 offset = 0;
742 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
743
744 /* Second function's parameters follow the first
745 * function's.
746 */
747 if (qdev->port)
748 offset = size;
749
750 if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK))
751 return -ETIMEDOUT;
752
753 for (i = 0; i < size; i++, p++) {
754 status = qlge_read_flash_word(qdev, i + offset, p);
755 if (status) {
756 netif_err(qdev, ifup, qdev->ndev,
757 "Error reading flash.\n");
758 goto exit;
759 }
760 }
761
762 status = qlge_validate_flash(qdev,
763 sizeof(struct flash_params_8012) /
764 sizeof(u16),
765 "8012");
766 if (status) {
767 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
768 status = -EINVAL;
769 goto exit;
770 }
771
772 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
773 status = -EINVAL;
774 goto exit;
775 }
776
777 memcpy(qdev->ndev->dev_addr,
778 qdev->flash.flash_params_8012.mac_addr,
779 qdev->ndev->addr_len);
780
781 exit:
782 qlge_sem_unlock(qdev, SEM_FLASH_MASK);
783 return status;
784 }
785
786 /* xgmac register are located behind the xgmac_addr and xgmac_data
787 * register pair. Each read/write requires us to wait for the ready
788 * bit before reading/writing the data.
789 */
qlge_write_xgmac_reg(struct qlge_adapter * qdev,u32 reg,u32 data)790 static int qlge_write_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 data)
791 {
792 int status;
793 /* wait for reg to come ready */
794 status = qlge_wait_reg_rdy(qdev,
795 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
796 if (status)
797 return status;
798 /* write the data to the data reg */
799 qlge_write32(qdev, XGMAC_DATA, data);
800 /* trigger the write */
801 qlge_write32(qdev, XGMAC_ADDR, reg);
802 return status;
803 }
804
805 /* xgmac register are located behind the xgmac_addr and xgmac_data
806 * register pair. Each read/write requires us to wait for the ready
807 * bit before reading/writing the data.
808 */
qlge_read_xgmac_reg(struct qlge_adapter * qdev,u32 reg,u32 * data)809 int qlge_read_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 *data)
810 {
811 int status = 0;
812 /* wait for reg to come ready */
813 status = qlge_wait_reg_rdy(qdev,
814 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
815 if (status)
816 goto exit;
817 /* set up for reg read */
818 qlge_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
819 /* wait for reg to come ready */
820 status = qlge_wait_reg_rdy(qdev,
821 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
822 if (status)
823 goto exit;
824 /* get the data */
825 *data = qlge_read32(qdev, XGMAC_DATA);
826 exit:
827 return status;
828 }
829
830 /* This is used for reading the 64-bit statistics regs. */
qlge_read_xgmac_reg64(struct qlge_adapter * qdev,u32 reg,u64 * data)831 int qlge_read_xgmac_reg64(struct qlge_adapter *qdev, u32 reg, u64 *data)
832 {
833 int status = 0;
834 u32 hi = 0;
835 u32 lo = 0;
836
837 status = qlge_read_xgmac_reg(qdev, reg, &lo);
838 if (status)
839 goto exit;
840
841 status = qlge_read_xgmac_reg(qdev, reg + 4, &hi);
842 if (status)
843 goto exit;
844
845 *data = (u64)lo | ((u64)hi << 32);
846
847 exit:
848 return status;
849 }
850
qlge_8000_port_initialize(struct qlge_adapter * qdev)851 static int qlge_8000_port_initialize(struct qlge_adapter *qdev)
852 {
853 int status;
854 /*
855 * Get MPI firmware version for driver banner
856 * and ethool info.
857 */
858 status = qlge_mb_about_fw(qdev);
859 if (status)
860 goto exit;
861 status = qlge_mb_get_fw_state(qdev);
862 if (status)
863 goto exit;
864 /* Wake up a worker to get/set the TX/RX frame sizes. */
865 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
866 exit:
867 return status;
868 }
869
870 /* Take the MAC Core out of reset.
871 * Enable statistics counting.
872 * Take the transmitter/receiver out of reset.
873 * This functionality may be done in the MPI firmware at a
874 * later date.
875 */
qlge_8012_port_initialize(struct qlge_adapter * qdev)876 static int qlge_8012_port_initialize(struct qlge_adapter *qdev)
877 {
878 int status = 0;
879 u32 data;
880
881 if (qlge_sem_trylock(qdev, qdev->xg_sem_mask)) {
882 /* Another function has the semaphore, so
883 * wait for the port init bit to come ready.
884 */
885 netif_info(qdev, link, qdev->ndev,
886 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
887 status = qlge_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
888 if (status) {
889 netif_crit(qdev, link, qdev->ndev,
890 "Port initialize timed out.\n");
891 }
892 return status;
893 }
894
895 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
896 /* Set the core reset. */
897 status = qlge_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
898 if (status)
899 goto end;
900 data |= GLOBAL_CFG_RESET;
901 status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data);
902 if (status)
903 goto end;
904
905 /* Clear the core reset and turn on jumbo for receiver. */
906 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
907 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
908 data |= GLOBAL_CFG_TX_STAT_EN;
909 data |= GLOBAL_CFG_RX_STAT_EN;
910 status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data);
911 if (status)
912 goto end;
913
914 /* Enable transmitter, and clear it's reset. */
915 status = qlge_read_xgmac_reg(qdev, TX_CFG, &data);
916 if (status)
917 goto end;
918 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
919 data |= TX_CFG_EN; /* Enable the transmitter. */
920 status = qlge_write_xgmac_reg(qdev, TX_CFG, data);
921 if (status)
922 goto end;
923
924 /* Enable receiver and clear it's reset. */
925 status = qlge_read_xgmac_reg(qdev, RX_CFG, &data);
926 if (status)
927 goto end;
928 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
929 data |= RX_CFG_EN; /* Enable the receiver. */
930 status = qlge_write_xgmac_reg(qdev, RX_CFG, data);
931 if (status)
932 goto end;
933
934 /* Turn on jumbo. */
935 status =
936 qlge_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
937 if (status)
938 goto end;
939 status =
940 qlge_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
941 if (status)
942 goto end;
943
944 /* Signal to the world that the port is enabled. */
945 qlge_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
946 end:
947 qlge_sem_unlock(qdev, qdev->xg_sem_mask);
948 return status;
949 }
950
qlge_lbq_block_size(struct qlge_adapter * qdev)951 static inline unsigned int qlge_lbq_block_size(struct qlge_adapter *qdev)
952 {
953 return PAGE_SIZE << qdev->lbq_buf_order;
954 }
955
qlge_get_curr_buf(struct qlge_bq * bq)956 static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
957 {
958 struct qlge_bq_desc *bq_desc;
959
960 bq_desc = &bq->queue[bq->next_to_clean];
961 bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1);
962
963 return bq_desc;
964 }
965
qlge_get_curr_lchunk(struct qlge_adapter * qdev,struct rx_ring * rx_ring)966 static struct qlge_bq_desc *qlge_get_curr_lchunk(struct qlge_adapter *qdev,
967 struct rx_ring *rx_ring)
968 {
969 struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
970
971 dma_sync_single_for_cpu(&qdev->pdev->dev, lbq_desc->dma_addr,
972 qdev->lbq_buf_size, DMA_FROM_DEVICE);
973
974 if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
975 qlge_lbq_block_size(qdev)) {
976 /* last chunk of the master page */
977 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
978 qlge_lbq_block_size(qdev), DMA_FROM_DEVICE);
979 }
980
981 return lbq_desc;
982 }
983
984 /* Update an rx ring index. */
qlge_update_cq(struct rx_ring * rx_ring)985 static void qlge_update_cq(struct rx_ring *rx_ring)
986 {
987 rx_ring->cnsmr_idx++;
988 rx_ring->curr_entry++;
989 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
990 rx_ring->cnsmr_idx = 0;
991 rx_ring->curr_entry = rx_ring->cq_base;
992 }
993 }
994
qlge_write_cq_idx(struct rx_ring * rx_ring)995 static void qlge_write_cq_idx(struct rx_ring *rx_ring)
996 {
997 qlge_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
998 }
999
1000 static const char * const bq_type_name[] = {
1001 [QLGE_SB] = "sbq",
1002 [QLGE_LB] = "lbq",
1003 };
1004
1005 /* return 0 or negative error */
qlge_refill_sb(struct rx_ring * rx_ring,struct qlge_bq_desc * sbq_desc,gfp_t gfp)1006 static int qlge_refill_sb(struct rx_ring *rx_ring,
1007 struct qlge_bq_desc *sbq_desc, gfp_t gfp)
1008 {
1009 struct qlge_adapter *qdev = rx_ring->qdev;
1010 struct sk_buff *skb;
1011
1012 if (sbq_desc->p.skb)
1013 return 0;
1014
1015 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1016 "ring %u sbq: getting new skb for index %d.\n",
1017 rx_ring->cq_id, sbq_desc->index);
1018
1019 skb = __netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE, gfp);
1020 if (!skb)
1021 return -ENOMEM;
1022 skb_reserve(skb, QLGE_SB_PAD);
1023
1024 sbq_desc->dma_addr = dma_map_single(&qdev->pdev->dev, skb->data,
1025 SMALL_BUF_MAP_SIZE,
1026 DMA_FROM_DEVICE);
1027 if (dma_mapping_error(&qdev->pdev->dev, sbq_desc->dma_addr)) {
1028 netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
1029 dev_kfree_skb_any(skb);
1030 return -EIO;
1031 }
1032 *sbq_desc->buf_ptr = cpu_to_le64(sbq_desc->dma_addr);
1033
1034 sbq_desc->p.skb = skb;
1035 return 0;
1036 }
1037
1038 /* return 0 or negative error */
qlge_refill_lb(struct rx_ring * rx_ring,struct qlge_bq_desc * lbq_desc,gfp_t gfp)1039 static int qlge_refill_lb(struct rx_ring *rx_ring,
1040 struct qlge_bq_desc *lbq_desc, gfp_t gfp)
1041 {
1042 struct qlge_adapter *qdev = rx_ring->qdev;
1043 struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk;
1044
1045 if (!master_chunk->page) {
1046 struct page *page;
1047 dma_addr_t dma_addr;
1048
1049 page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
1050 if (unlikely(!page))
1051 return -ENOMEM;
1052 dma_addr = dma_map_page(&qdev->pdev->dev, page, 0,
1053 qlge_lbq_block_size(qdev),
1054 DMA_FROM_DEVICE);
1055 if (dma_mapping_error(&qdev->pdev->dev, dma_addr)) {
1056 __free_pages(page, qdev->lbq_buf_order);
1057 netif_err(qdev, drv, qdev->ndev,
1058 "PCI mapping failed.\n");
1059 return -EIO;
1060 }
1061 master_chunk->page = page;
1062 master_chunk->va = page_address(page);
1063 master_chunk->offset = 0;
1064 rx_ring->chunk_dma_addr = dma_addr;
1065 }
1066
1067 lbq_desc->p.pg_chunk = *master_chunk;
1068 lbq_desc->dma_addr = rx_ring->chunk_dma_addr;
1069 *lbq_desc->buf_ptr = cpu_to_le64(lbq_desc->dma_addr +
1070 lbq_desc->p.pg_chunk.offset);
1071
1072 /* Adjust the master page chunk for next
1073 * buffer get.
1074 */
1075 master_chunk->offset += qdev->lbq_buf_size;
1076 if (master_chunk->offset == qlge_lbq_block_size(qdev)) {
1077 master_chunk->page = NULL;
1078 } else {
1079 master_chunk->va += qdev->lbq_buf_size;
1080 get_page(master_chunk->page);
1081 }
1082
1083 return 0;
1084 }
1085
1086 /* return 0 or negative error */
qlge_refill_bq(struct qlge_bq * bq,gfp_t gfp)1087 static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
1088 {
1089 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
1090 struct qlge_adapter *qdev = rx_ring->qdev;
1091 struct qlge_bq_desc *bq_desc;
1092 int refill_count;
1093 int retval;
1094 int i;
1095
1096 refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
1097 bq->next_to_use);
1098 if (!refill_count)
1099 return 0;
1100
1101 i = bq->next_to_use;
1102 bq_desc = &bq->queue[i];
1103 i -= QLGE_BQ_LEN;
1104 do {
1105 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1106 "ring %u %s: try cleaning idx %d\n",
1107 rx_ring->cq_id, bq_type_name[bq->type], i);
1108
1109 if (bq->type == QLGE_SB)
1110 retval = qlge_refill_sb(rx_ring, bq_desc, gfp);
1111 else
1112 retval = qlge_refill_lb(rx_ring, bq_desc, gfp);
1113 if (retval < 0) {
1114 netif_err(qdev, ifup, qdev->ndev,
1115 "ring %u %s: Could not get a page chunk, idx %d\n",
1116 rx_ring->cq_id, bq_type_name[bq->type], i);
1117 break;
1118 }
1119
1120 bq_desc++;
1121 i++;
1122 if (unlikely(!i)) {
1123 bq_desc = &bq->queue[0];
1124 i -= QLGE_BQ_LEN;
1125 }
1126 refill_count--;
1127 } while (refill_count);
1128 i += QLGE_BQ_LEN;
1129
1130 if (bq->next_to_use != i) {
1131 if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
1132 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1133 "ring %u %s: updating prod idx = %d.\n",
1134 rx_ring->cq_id, bq_type_name[bq->type],
1135 i);
1136 qlge_write_db_reg(i, bq->prod_idx_db_reg);
1137 }
1138 bq->next_to_use = i;
1139 }
1140
1141 return retval;
1142 }
1143
qlge_update_buffer_queues(struct rx_ring * rx_ring,gfp_t gfp,unsigned long delay)1144 static void qlge_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
1145 unsigned long delay)
1146 {
1147 bool sbq_fail, lbq_fail;
1148
1149 sbq_fail = !!qlge_refill_bq(&rx_ring->sbq, gfp);
1150 lbq_fail = !!qlge_refill_bq(&rx_ring->lbq, gfp);
1151
1152 /* Minimum number of buffers needed to be able to receive at least one
1153 * frame of any format:
1154 * sbq: 1 for header + 1 for data
1155 * lbq: mtu 9000 / lb size
1156 * Below this, the queue might stall.
1157 */
1158 if ((sbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->sbq) < 2) ||
1159 (lbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->lbq) <
1160 DIV_ROUND_UP(9000, LARGE_BUFFER_MAX_SIZE)))
1161 /* Allocations can take a long time in certain cases (ex.
1162 * reclaim). Therefore, use a workqueue for long-running
1163 * work items.
1164 */
1165 queue_delayed_work_on(smp_processor_id(), system_long_wq,
1166 &rx_ring->refill_work, delay);
1167 }
1168
qlge_slow_refill(struct work_struct * work)1169 static void qlge_slow_refill(struct work_struct *work)
1170 {
1171 struct rx_ring *rx_ring = container_of(work, struct rx_ring,
1172 refill_work.work);
1173 struct napi_struct *napi = &rx_ring->napi;
1174
1175 napi_disable(napi);
1176 qlge_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
1177 napi_enable(napi);
1178
1179 local_bh_disable();
1180 /* napi_disable() might have prevented incomplete napi work from being
1181 * rescheduled.
1182 */
1183 napi_schedule(napi);
1184 /* trigger softirq processing */
1185 local_bh_enable();
1186 }
1187
1188 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1189 * fails at some stage, or from the interrupt when a tx completes.
1190 */
qlge_unmap_send(struct qlge_adapter * qdev,struct tx_ring_desc * tx_ring_desc,int mapped)1191 static void qlge_unmap_send(struct qlge_adapter *qdev,
1192 struct tx_ring_desc *tx_ring_desc, int mapped)
1193 {
1194 int i;
1195
1196 for (i = 0; i < mapped; i++) {
1197 if (i == 0 || (i == 7 && mapped > 7)) {
1198 /*
1199 * Unmap the skb->data area, or the
1200 * external sglist (AKA the Outbound
1201 * Address List (OAL)).
1202 * If its the zeroeth element, then it's
1203 * the skb->data area. If it's the 7th
1204 * element and there is more than 6 frags,
1205 * then its an OAL.
1206 */
1207 if (i == 7) {
1208 netif_printk(qdev, tx_done, KERN_DEBUG,
1209 qdev->ndev,
1210 "unmapping OAL area.\n");
1211 }
1212 dma_unmap_single(&qdev->pdev->dev,
1213 dma_unmap_addr(&tx_ring_desc->map[i],
1214 mapaddr),
1215 dma_unmap_len(&tx_ring_desc->map[i],
1216 maplen),
1217 DMA_TO_DEVICE);
1218 } else {
1219 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1220 "unmapping frag %d.\n", i);
1221 dma_unmap_page(&qdev->pdev->dev,
1222 dma_unmap_addr(&tx_ring_desc->map[i],
1223 mapaddr),
1224 dma_unmap_len(&tx_ring_desc->map[i],
1225 maplen), DMA_TO_DEVICE);
1226 }
1227 }
1228 }
1229
1230 /* Map the buffers for this transmit. This will return
1231 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1232 */
qlge_map_send(struct qlge_adapter * qdev,struct qlge_ob_mac_iocb_req * mac_iocb_ptr,struct sk_buff * skb,struct tx_ring_desc * tx_ring_desc)1233 static int qlge_map_send(struct qlge_adapter *qdev,
1234 struct qlge_ob_mac_iocb_req *mac_iocb_ptr,
1235 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1236 {
1237 int len = skb_headlen(skb);
1238 dma_addr_t map;
1239 int frag_idx, err, map_idx = 0;
1240 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1241 int frag_cnt = skb_shinfo(skb)->nr_frags;
1242
1243 if (frag_cnt) {
1244 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1245 "frag_cnt = %d.\n", frag_cnt);
1246 }
1247 /*
1248 * Map the skb buffer first.
1249 */
1250 map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
1251
1252 err = dma_mapping_error(&qdev->pdev->dev, map);
1253 if (err) {
1254 netif_err(qdev, tx_queued, qdev->ndev,
1255 "PCI mapping failed with error: %d\n", err);
1256
1257 return NETDEV_TX_BUSY;
1258 }
1259
1260 tbd->len = cpu_to_le32(len);
1261 tbd->addr = cpu_to_le64(map);
1262 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1263 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1264 map_idx++;
1265
1266 /*
1267 * This loop fills the remainder of the 8 address descriptors
1268 * in the IOCB. If there are more than 7 fragments, then the
1269 * eighth address desc will point to an external list (OAL).
1270 * When this happens, the remainder of the frags will be stored
1271 * in this list.
1272 */
1273 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1274 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1275
1276 tbd++;
1277 if (frag_idx == 6 && frag_cnt > 7) {
1278 /* Let's tack on an sglist.
1279 * Our control block will now
1280 * look like this:
1281 * iocb->seg[0] = skb->data
1282 * iocb->seg[1] = frag[0]
1283 * iocb->seg[2] = frag[1]
1284 * iocb->seg[3] = frag[2]
1285 * iocb->seg[4] = frag[3]
1286 * iocb->seg[5] = frag[4]
1287 * iocb->seg[6] = frag[5]
1288 * iocb->seg[7] = ptr to OAL (external sglist)
1289 * oal->seg[0] = frag[6]
1290 * oal->seg[1] = frag[7]
1291 * oal->seg[2] = frag[8]
1292 * oal->seg[3] = frag[9]
1293 * oal->seg[4] = frag[10]
1294 * etc...
1295 */
1296 /* Tack on the OAL in the eighth segment of IOCB. */
1297 map = dma_map_single(&qdev->pdev->dev, &tx_ring_desc->oal,
1298 sizeof(struct qlge_oal),
1299 DMA_TO_DEVICE);
1300 err = dma_mapping_error(&qdev->pdev->dev, map);
1301 if (err) {
1302 netif_err(qdev, tx_queued, qdev->ndev,
1303 "PCI mapping outbound address list with error: %d\n",
1304 err);
1305 goto map_error;
1306 }
1307
1308 tbd->addr = cpu_to_le64(map);
1309 /*
1310 * The length is the number of fragments
1311 * that remain to be mapped times the length
1312 * of our sglist (OAL).
1313 */
1314 tbd->len =
1315 cpu_to_le32((sizeof(struct tx_buf_desc) *
1316 (frag_cnt - frag_idx)) | TX_DESC_C);
1317 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1318 map);
1319 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1320 sizeof(struct qlge_oal));
1321 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1322 map_idx++;
1323 }
1324
1325 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1326 DMA_TO_DEVICE);
1327
1328 err = dma_mapping_error(&qdev->pdev->dev, map);
1329 if (err) {
1330 netif_err(qdev, tx_queued, qdev->ndev,
1331 "PCI mapping frags failed with error: %d.\n",
1332 err);
1333 goto map_error;
1334 }
1335
1336 tbd->addr = cpu_to_le64(map);
1337 tbd->len = cpu_to_le32(skb_frag_size(frag));
1338 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1339 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1340 skb_frag_size(frag));
1341 }
1342 /* Save the number of segments we've mapped. */
1343 tx_ring_desc->map_cnt = map_idx;
1344 /* Terminate the last segment. */
1345 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1346 return NETDEV_TX_OK;
1347
1348 map_error:
1349 /*
1350 * If the first frag mapping failed, then i will be zero.
1351 * This causes the unmap of the skb->data area. Otherwise
1352 * we pass in the number of frags that mapped successfully
1353 * so they can be umapped.
1354 */
1355 qlge_unmap_send(qdev, tx_ring_desc, map_idx);
1356 return NETDEV_TX_BUSY;
1357 }
1358
1359 /* Categorizing receive firmware frame errors */
qlge_categorize_rx_err(struct qlge_adapter * qdev,u8 rx_err,struct rx_ring * rx_ring)1360 static void qlge_categorize_rx_err(struct qlge_adapter *qdev, u8 rx_err,
1361 struct rx_ring *rx_ring)
1362 {
1363 struct nic_stats *stats = &qdev->nic_stats;
1364
1365 stats->rx_err_count++;
1366 rx_ring->rx_errors++;
1367
1368 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1369 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1370 stats->rx_code_err++;
1371 break;
1372 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1373 stats->rx_oversize_err++;
1374 break;
1375 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1376 stats->rx_undersize_err++;
1377 break;
1378 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1379 stats->rx_preamble_err++;
1380 break;
1381 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1382 stats->rx_frame_len_err++;
1383 break;
1384 case IB_MAC_IOCB_RSP_ERR_CRC:
1385 stats->rx_crc_err++;
1386 break;
1387 default:
1388 break;
1389 }
1390 }
1391
1392 /*
1393 * qlge_update_mac_hdr_len - helper routine to update the mac header length
1394 * based on vlan tags if present
1395 */
qlge_update_mac_hdr_len(struct qlge_adapter * qdev,struct qlge_ib_mac_iocb_rsp * ib_mac_rsp,void * page,size_t * len)1396 static void qlge_update_mac_hdr_len(struct qlge_adapter *qdev,
1397 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1398 void *page, size_t *len)
1399 {
1400 u16 *tags;
1401
1402 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1403 return;
1404 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1405 tags = (u16 *)page;
1406 /* Look for stacked vlan tags in ethertype field */
1407 if (tags[6] == ETH_P_8021Q &&
1408 tags[8] == ETH_P_8021Q)
1409 *len += 2 * VLAN_HLEN;
1410 else
1411 *len += VLAN_HLEN;
1412 }
1413 }
1414
1415 /* Process an inbound completion from an rx ring. */
qlge_process_mac_rx_gro_page(struct qlge_adapter * qdev,struct rx_ring * rx_ring,struct qlge_ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1416 static void qlge_process_mac_rx_gro_page(struct qlge_adapter *qdev,
1417 struct rx_ring *rx_ring,
1418 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1419 u32 length, u16 vlan_id)
1420 {
1421 struct sk_buff *skb;
1422 struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1423 struct napi_struct *napi = &rx_ring->napi;
1424
1425 /* Frame error, so drop the packet. */
1426 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1427 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1428 put_page(lbq_desc->p.pg_chunk.page);
1429 return;
1430 }
1431 napi->dev = qdev->ndev;
1432
1433 skb = napi_get_frags(napi);
1434 if (!skb) {
1435 netif_err(qdev, drv, qdev->ndev,
1436 "Couldn't get an skb, exiting.\n");
1437 rx_ring->rx_dropped++;
1438 put_page(lbq_desc->p.pg_chunk.page);
1439 return;
1440 }
1441 prefetch(lbq_desc->p.pg_chunk.va);
1442 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1443 lbq_desc->p.pg_chunk.page,
1444 lbq_desc->p.pg_chunk.offset,
1445 length);
1446
1447 skb->len += length;
1448 skb->data_len += length;
1449 skb->truesize += length;
1450 skb_shinfo(skb)->nr_frags++;
1451
1452 rx_ring->rx_packets++;
1453 rx_ring->rx_bytes += length;
1454 skb->ip_summed = CHECKSUM_UNNECESSARY;
1455 skb_record_rx_queue(skb, rx_ring->cq_id);
1456 if (vlan_id != 0xffff)
1457 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1458 napi_gro_frags(napi);
1459 }
1460
1461 /* Process an inbound completion from an rx ring. */
qlge_process_mac_rx_page(struct qlge_adapter * qdev,struct rx_ring * rx_ring,struct qlge_ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1462 static void qlge_process_mac_rx_page(struct qlge_adapter *qdev,
1463 struct rx_ring *rx_ring,
1464 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1465 u32 length, u16 vlan_id)
1466 {
1467 struct net_device *ndev = qdev->ndev;
1468 struct sk_buff *skb = NULL;
1469 void *addr;
1470 struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1471 struct napi_struct *napi = &rx_ring->napi;
1472 size_t hlen = ETH_HLEN;
1473
1474 skb = netdev_alloc_skb(ndev, length);
1475 if (!skb) {
1476 rx_ring->rx_dropped++;
1477 put_page(lbq_desc->p.pg_chunk.page);
1478 return;
1479 }
1480
1481 addr = lbq_desc->p.pg_chunk.va;
1482 prefetch(addr);
1483
1484 /* Frame error, so drop the packet. */
1485 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1486 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1487 goto err_out;
1488 }
1489
1490 /* Update the MAC header length*/
1491 qlge_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1492
1493 /* The max framesize filter on this chip is set higher than
1494 * MTU since FCoE uses 2k frames.
1495 */
1496 if (skb->len > ndev->mtu + hlen) {
1497 netif_err(qdev, drv, qdev->ndev,
1498 "Segment too small, dropping.\n");
1499 rx_ring->rx_dropped++;
1500 goto err_out;
1501 }
1502 skb_put_data(skb, addr, hlen);
1503 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1504 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1505 length);
1506 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1507 lbq_desc->p.pg_chunk.offset + hlen, length - hlen);
1508 skb->len += length - hlen;
1509 skb->data_len += length - hlen;
1510 skb->truesize += length - hlen;
1511
1512 rx_ring->rx_packets++;
1513 rx_ring->rx_bytes += skb->len;
1514 skb->protocol = eth_type_trans(skb, ndev);
1515 skb_checksum_none_assert(skb);
1516
1517 if ((ndev->features & NETIF_F_RXCSUM) &&
1518 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1519 /* TCP frame. */
1520 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1521 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1522 "TCP checksum done!\n");
1523 skb->ip_summed = CHECKSUM_UNNECESSARY;
1524 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1525 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1526 /* Unfragmented ipv4 UDP frame. */
1527 struct iphdr *iph =
1528 (struct iphdr *)((u8 *)addr + hlen);
1529 if (!(iph->frag_off &
1530 htons(IP_MF | IP_OFFSET))) {
1531 skb->ip_summed = CHECKSUM_UNNECESSARY;
1532 netif_printk(qdev, rx_status, KERN_DEBUG,
1533 qdev->ndev,
1534 "UDP checksum done!\n");
1535 }
1536 }
1537 }
1538
1539 skb_record_rx_queue(skb, rx_ring->cq_id);
1540 if (vlan_id != 0xffff)
1541 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1542 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1543 napi_gro_receive(napi, skb);
1544 else
1545 netif_receive_skb(skb);
1546 return;
1547 err_out:
1548 dev_kfree_skb_any(skb);
1549 put_page(lbq_desc->p.pg_chunk.page);
1550 }
1551
1552 /* Process an inbound completion from an rx ring. */
qlge_process_mac_rx_skb(struct qlge_adapter * qdev,struct rx_ring * rx_ring,struct qlge_ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1553 static void qlge_process_mac_rx_skb(struct qlge_adapter *qdev,
1554 struct rx_ring *rx_ring,
1555 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1556 u32 length, u16 vlan_id)
1557 {
1558 struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1559 struct net_device *ndev = qdev->ndev;
1560 struct sk_buff *skb, *new_skb;
1561
1562 skb = sbq_desc->p.skb;
1563 /* Allocate new_skb and copy */
1564 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1565 if (!new_skb) {
1566 rx_ring->rx_dropped++;
1567 return;
1568 }
1569 skb_reserve(new_skb, NET_IP_ALIGN);
1570
1571 dma_sync_single_for_cpu(&qdev->pdev->dev, sbq_desc->dma_addr,
1572 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1573
1574 skb_put_data(new_skb, skb->data, length);
1575
1576 skb = new_skb;
1577
1578 /* Frame error, so drop the packet. */
1579 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1580 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1581 dev_kfree_skb_any(skb);
1582 return;
1583 }
1584
1585 /* loopback self test for ethtool */
1586 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1587 qlge_check_lb_frame(qdev, skb);
1588 dev_kfree_skb_any(skb);
1589 return;
1590 }
1591
1592 /* The max framesize filter on this chip is set higher than
1593 * MTU since FCoE uses 2k frames.
1594 */
1595 if (skb->len > ndev->mtu + ETH_HLEN) {
1596 dev_kfree_skb_any(skb);
1597 rx_ring->rx_dropped++;
1598 return;
1599 }
1600
1601 prefetch(skb->data);
1602 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1603 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1604 "%s Multicast.\n",
1605 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1606 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1607 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1608 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1609 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1610 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1611 }
1612 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1613 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1614 "Promiscuous Packet.\n");
1615
1616 rx_ring->rx_packets++;
1617 rx_ring->rx_bytes += skb->len;
1618 skb->protocol = eth_type_trans(skb, ndev);
1619 skb_checksum_none_assert(skb);
1620
1621 /* If rx checksum is on, and there are no
1622 * csum or frame errors.
1623 */
1624 if ((ndev->features & NETIF_F_RXCSUM) &&
1625 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1626 /* TCP frame. */
1627 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1628 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1629 "TCP checksum done!\n");
1630 skb->ip_summed = CHECKSUM_UNNECESSARY;
1631 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1632 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1633 /* Unfragmented ipv4 UDP frame. */
1634 struct iphdr *iph = (struct iphdr *)skb->data;
1635
1636 if (!(iph->frag_off &
1637 htons(IP_MF | IP_OFFSET))) {
1638 skb->ip_summed = CHECKSUM_UNNECESSARY;
1639 netif_printk(qdev, rx_status, KERN_DEBUG,
1640 qdev->ndev,
1641 "UDP checksum done!\n");
1642 }
1643 }
1644 }
1645
1646 skb_record_rx_queue(skb, rx_ring->cq_id);
1647 if (vlan_id != 0xffff)
1648 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1649 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1650 napi_gro_receive(&rx_ring->napi, skb);
1651 else
1652 netif_receive_skb(skb);
1653 }
1654
qlge_realign_skb(struct sk_buff * skb,int len)1655 static void qlge_realign_skb(struct sk_buff *skb, int len)
1656 {
1657 void *temp_addr = skb->data;
1658
1659 /* Undo the skb_reserve(skb,32) we did before
1660 * giving to hardware, and realign data on
1661 * a 2-byte boundary.
1662 */
1663 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1664 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1665 memmove(skb->data, temp_addr, len);
1666 }
1667
1668 /*
1669 * This function builds an skb for the given inbound
1670 * completion. It will be rewritten for readability in the near
1671 * future, but for not it works well.
1672 */
qlge_build_rx_skb(struct qlge_adapter * qdev,struct rx_ring * rx_ring,struct qlge_ib_mac_iocb_rsp * ib_mac_rsp)1673 static struct sk_buff *qlge_build_rx_skb(struct qlge_adapter *qdev,
1674 struct rx_ring *rx_ring,
1675 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp)
1676 {
1677 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1678 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1679 struct qlge_bq_desc *lbq_desc, *sbq_desc;
1680 struct sk_buff *skb = NULL;
1681 size_t hlen = ETH_HLEN;
1682
1683 /*
1684 * Handle the header buffer if present.
1685 */
1686 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1687 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1688 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1689 "Header of %d bytes in small buffer.\n", hdr_len);
1690 /*
1691 * Headers fit nicely into a small buffer.
1692 */
1693 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1694 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1695 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1696 skb = sbq_desc->p.skb;
1697 qlge_realign_skb(skb, hdr_len);
1698 skb_put(skb, hdr_len);
1699 sbq_desc->p.skb = NULL;
1700 }
1701
1702 /*
1703 * Handle the data buffer(s).
1704 */
1705 if (unlikely(!length)) { /* Is there data too? */
1706 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1707 "No Data buffer in this packet.\n");
1708 return skb;
1709 }
1710
1711 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1712 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1713 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1714 "Headers in small, data of %d bytes in small, combine them.\n",
1715 length);
1716 /*
1717 * Data is less than small buffer size so it's
1718 * stuffed in a small buffer.
1719 * For this case we append the data
1720 * from the "data" small buffer to the "header" small
1721 * buffer.
1722 */
1723 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1724 dma_sync_single_for_cpu(&qdev->pdev->dev,
1725 sbq_desc->dma_addr,
1726 SMALL_BUF_MAP_SIZE,
1727 DMA_FROM_DEVICE);
1728 skb_put_data(skb, sbq_desc->p.skb->data, length);
1729 } else {
1730 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1731 "%d bytes in a single small buffer.\n",
1732 length);
1733 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1734 skb = sbq_desc->p.skb;
1735 qlge_realign_skb(skb, length);
1736 skb_put(skb, length);
1737 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1738 SMALL_BUF_MAP_SIZE,
1739 DMA_FROM_DEVICE);
1740 sbq_desc->p.skb = NULL;
1741 }
1742 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1743 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1744 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1745 "Header in small, %d bytes in large. Chain large to small!\n",
1746 length);
1747 /*
1748 * The data is in a single large buffer. We
1749 * chain it to the header buffer's skb and let
1750 * it rip.
1751 */
1752 lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1753 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1754 "Chaining page at offset = %d, for %d bytes to skb.\n",
1755 lbq_desc->p.pg_chunk.offset, length);
1756 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1757 lbq_desc->p.pg_chunk.offset, length);
1758 skb->len += length;
1759 skb->data_len += length;
1760 skb->truesize += length;
1761 } else {
1762 /*
1763 * The headers and data are in a single large buffer. We
1764 * copy it to a new skb and let it go. This can happen with
1765 * jumbo mtu on a non-TCP/UDP frame.
1766 */
1767 lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1768 skb = netdev_alloc_skb(qdev->ndev, length);
1769 if (!skb) {
1770 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1771 "No skb available, drop the packet.\n");
1772 return NULL;
1773 }
1774 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
1775 qdev->lbq_buf_size,
1776 DMA_FROM_DEVICE);
1777 skb_reserve(skb, NET_IP_ALIGN);
1778 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1779 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1780 length);
1781 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1782 lbq_desc->p.pg_chunk.offset,
1783 length);
1784 skb->len += length;
1785 skb->data_len += length;
1786 skb->truesize += length;
1787 qlge_update_mac_hdr_len(qdev, ib_mac_rsp,
1788 lbq_desc->p.pg_chunk.va,
1789 &hlen);
1790 __pskb_pull_tail(skb, hlen);
1791 }
1792 } else {
1793 /*
1794 * The data is in a chain of large buffers
1795 * pointed to by a small buffer. We loop
1796 * thru and chain them to the our small header
1797 * buffer's skb.
1798 * frags: There are 18 max frags and our small
1799 * buffer will hold 32 of them. The thing is,
1800 * we'll use 3 max for our 9000 byte jumbo
1801 * frames. If the MTU goes up we could
1802 * eventually be in trouble.
1803 */
1804 int size, i = 0;
1805
1806 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1807 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1808 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1809 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1810 /*
1811 * This is an non TCP/UDP IP frame, so
1812 * the headers aren't split into a small
1813 * buffer. We have to use the small buffer
1814 * that contains our sg list as our skb to
1815 * send upstairs. Copy the sg list here to
1816 * a local buffer and use it to find the
1817 * pages to chain.
1818 */
1819 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1820 "%d bytes of headers & data in chain of large.\n",
1821 length);
1822 skb = sbq_desc->p.skb;
1823 sbq_desc->p.skb = NULL;
1824 skb_reserve(skb, NET_IP_ALIGN);
1825 }
1826 do {
1827 lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring);
1828 size = min(length, qdev->lbq_buf_size);
1829
1830 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1831 "Adding page %d to skb for %d bytes.\n",
1832 i, size);
1833 skb_fill_page_desc(skb, i,
1834 lbq_desc->p.pg_chunk.page,
1835 lbq_desc->p.pg_chunk.offset, size);
1836 skb->len += size;
1837 skb->data_len += size;
1838 skb->truesize += size;
1839 length -= size;
1840 i++;
1841 } while (length > 0);
1842 qlge_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1843 &hlen);
1844 __pskb_pull_tail(skb, hlen);
1845 }
1846 return skb;
1847 }
1848
1849 /* Process an inbound completion from an rx ring. */
qlge_process_mac_split_rx_intr(struct qlge_adapter * qdev,struct rx_ring * rx_ring,struct qlge_ib_mac_iocb_rsp * ib_mac_rsp,u16 vlan_id)1850 static void qlge_process_mac_split_rx_intr(struct qlge_adapter *qdev,
1851 struct rx_ring *rx_ring,
1852 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp,
1853 u16 vlan_id)
1854 {
1855 struct net_device *ndev = qdev->ndev;
1856 struct sk_buff *skb = NULL;
1857
1858 skb = qlge_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1859 if (unlikely(!skb)) {
1860 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1861 "No skb available, drop packet.\n");
1862 rx_ring->rx_dropped++;
1863 return;
1864 }
1865
1866 /* Frame error, so drop the packet. */
1867 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1868 qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1869 dev_kfree_skb_any(skb);
1870 return;
1871 }
1872
1873 /* The max framesize filter on this chip is set higher than
1874 * MTU since FCoE uses 2k frames.
1875 */
1876 if (skb->len > ndev->mtu + ETH_HLEN) {
1877 dev_kfree_skb_any(skb);
1878 rx_ring->rx_dropped++;
1879 return;
1880 }
1881
1882 /* loopback self test for ethtool */
1883 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1884 qlge_check_lb_frame(qdev, skb);
1885 dev_kfree_skb_any(skb);
1886 return;
1887 }
1888
1889 prefetch(skb->data);
1890 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1891 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1892 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1893 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1894 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1895 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1896 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1897 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1898 rx_ring->rx_multicast++;
1899 }
1900 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1901 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1902 "Promiscuous Packet.\n");
1903 }
1904
1905 skb->protocol = eth_type_trans(skb, ndev);
1906 skb_checksum_none_assert(skb);
1907
1908 /* If rx checksum is on, and there are no
1909 * csum or frame errors.
1910 */
1911 if ((ndev->features & NETIF_F_RXCSUM) &&
1912 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1913 /* TCP frame. */
1914 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1915 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1916 "TCP checksum done!\n");
1917 skb->ip_summed = CHECKSUM_UNNECESSARY;
1918 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1919 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1920 /* Unfragmented ipv4 UDP frame. */
1921 struct iphdr *iph = (struct iphdr *)skb->data;
1922
1923 if (!(iph->frag_off &
1924 htons(IP_MF | IP_OFFSET))) {
1925 skb->ip_summed = CHECKSUM_UNNECESSARY;
1926 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1927 "TCP checksum done!\n");
1928 }
1929 }
1930 }
1931
1932 rx_ring->rx_packets++;
1933 rx_ring->rx_bytes += skb->len;
1934 skb_record_rx_queue(skb, rx_ring->cq_id);
1935 if (vlan_id != 0xffff)
1936 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1937 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1938 napi_gro_receive(&rx_ring->napi, skb);
1939 else
1940 netif_receive_skb(skb);
1941 }
1942
1943 /* Process an inbound completion from an rx ring. */
qlge_process_mac_rx_intr(struct qlge_adapter * qdev,struct rx_ring * rx_ring,struct qlge_ib_mac_iocb_rsp * ib_mac_rsp)1944 static unsigned long qlge_process_mac_rx_intr(struct qlge_adapter *qdev,
1945 struct rx_ring *rx_ring,
1946 struct qlge_ib_mac_iocb_rsp *ib_mac_rsp)
1947 {
1948 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1949 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1950 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
1951 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1952 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1953
1954 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
1955 /* The data and headers are split into
1956 * separate buffers.
1957 */
1958 qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1959 vlan_id);
1960 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1961 /* The data fit in a single small buffer.
1962 * Allocate a new skb, copy the data and
1963 * return the buffer to the free pool.
1964 */
1965 qlge_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length,
1966 vlan_id);
1967 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
1968 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
1969 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
1970 /* TCP packet in a page chunk that's been checksummed.
1971 * Tack it on to our GRO skb and let it go.
1972 */
1973 qlge_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length,
1974 vlan_id);
1975 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1976 /* Non-TCP packet in a page chunk. Allocate an
1977 * skb, tack it on frags, and send it up.
1978 */
1979 qlge_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length,
1980 vlan_id);
1981 } else {
1982 /* Non-TCP/UDP large frames that span multiple buffers
1983 * can be processed corrrectly by the split frame logic.
1984 */
1985 qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1986 vlan_id);
1987 }
1988
1989 return (unsigned long)length;
1990 }
1991
1992 /* Process an outbound completion from an rx ring. */
qlge_process_mac_tx_intr(struct qlge_adapter * qdev,struct qlge_ob_mac_iocb_rsp * mac_rsp)1993 static void qlge_process_mac_tx_intr(struct qlge_adapter *qdev,
1994 struct qlge_ob_mac_iocb_rsp *mac_rsp)
1995 {
1996 struct tx_ring *tx_ring;
1997 struct tx_ring_desc *tx_ring_desc;
1998
1999 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2000 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2001 qlge_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2002 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2003 tx_ring->tx_packets++;
2004 dev_kfree_skb(tx_ring_desc->skb);
2005 tx_ring_desc->skb = NULL;
2006
2007 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2008 OB_MAC_IOCB_RSP_S |
2009 OB_MAC_IOCB_RSP_L |
2010 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2011 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2012 netif_warn(qdev, tx_done, qdev->ndev,
2013 "Total descriptor length did not match transfer length.\n");
2014 }
2015 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2016 netif_warn(qdev, tx_done, qdev->ndev,
2017 "Frame too short to be valid, not sent.\n");
2018 }
2019 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2020 netif_warn(qdev, tx_done, qdev->ndev,
2021 "Frame too long, but sent anyway.\n");
2022 }
2023 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2024 netif_warn(qdev, tx_done, qdev->ndev,
2025 "PCI backplane error. Frame not sent.\n");
2026 }
2027 }
2028 atomic_inc(&tx_ring->tx_count);
2029 }
2030
2031 /* Fire up a handler to reset the MPI processor. */
qlge_queue_fw_error(struct qlge_adapter * qdev)2032 void qlge_queue_fw_error(struct qlge_adapter *qdev)
2033 {
2034 qlge_link_off(qdev);
2035 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2036 }
2037
qlge_queue_asic_error(struct qlge_adapter * qdev)2038 void qlge_queue_asic_error(struct qlge_adapter *qdev)
2039 {
2040 qlge_link_off(qdev);
2041 qlge_disable_interrupts(qdev);
2042 /* Clear adapter up bit to signal the recovery
2043 * process that it shouldn't kill the reset worker
2044 * thread
2045 */
2046 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2047 /* Set asic recovery bit to indicate reset process that we are
2048 * in fatal error recovery process rather than normal close
2049 */
2050 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2051 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2052 }
2053
qlge_process_chip_ae_intr(struct qlge_adapter * qdev,struct qlge_ib_ae_iocb_rsp * ib_ae_rsp)2054 static void qlge_process_chip_ae_intr(struct qlge_adapter *qdev,
2055 struct qlge_ib_ae_iocb_rsp *ib_ae_rsp)
2056 {
2057 switch (ib_ae_rsp->event) {
2058 case MGMT_ERR_EVENT:
2059 netif_err(qdev, rx_err, qdev->ndev,
2060 "Management Processor Fatal Error.\n");
2061 qlge_queue_fw_error(qdev);
2062 return;
2063
2064 case CAM_LOOKUP_ERR_EVENT:
2065 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2066 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2067 qlge_queue_asic_error(qdev);
2068 return;
2069
2070 case SOFT_ECC_ERROR_EVENT:
2071 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2072 qlge_queue_asic_error(qdev);
2073 break;
2074
2075 case PCI_ERR_ANON_BUF_RD:
2076 netdev_err(qdev->ndev,
2077 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2078 ib_ae_rsp->q_id);
2079 qlge_queue_asic_error(qdev);
2080 break;
2081
2082 default:
2083 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2084 ib_ae_rsp->event);
2085 qlge_queue_asic_error(qdev);
2086 break;
2087 }
2088 }
2089
qlge_clean_outbound_rx_ring(struct rx_ring * rx_ring)2090 static int qlge_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2091 {
2092 struct qlge_adapter *qdev = rx_ring->qdev;
2093 u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2094 struct qlge_ob_mac_iocb_rsp *net_rsp = NULL;
2095 int count = 0;
2096
2097 struct tx_ring *tx_ring;
2098 /* While there are entries in the completion queue. */
2099 while (prod != rx_ring->cnsmr_idx) {
2100 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2101 "cq_id = %d, prod = %d, cnsmr = %d\n",
2102 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2103
2104 net_rsp = (struct qlge_ob_mac_iocb_rsp *)rx_ring->curr_entry;
2105 rmb();
2106 switch (net_rsp->opcode) {
2107 case OPCODE_OB_MAC_TSO_IOCB:
2108 case OPCODE_OB_MAC_IOCB:
2109 qlge_process_mac_tx_intr(qdev, net_rsp);
2110 break;
2111 default:
2112 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2113 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2114 net_rsp->opcode);
2115 }
2116 count++;
2117 qlge_update_cq(rx_ring);
2118 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2119 }
2120 if (!net_rsp)
2121 return 0;
2122 qlge_write_cq_idx(rx_ring);
2123 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2124 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2125 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2126 /*
2127 * The queue got stopped because the tx_ring was full.
2128 * Wake it up, because it's now at least 25% empty.
2129 */
2130 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2131 }
2132
2133 return count;
2134 }
2135
qlge_clean_inbound_rx_ring(struct rx_ring * rx_ring,int budget)2136 static int qlge_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2137 {
2138 struct qlge_adapter *qdev = rx_ring->qdev;
2139 u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2140 struct qlge_net_rsp_iocb *net_rsp;
2141 int count = 0;
2142
2143 /* While there are entries in the completion queue. */
2144 while (prod != rx_ring->cnsmr_idx) {
2145 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2146 "cq_id = %d, prod = %d, cnsmr = %d\n",
2147 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2148
2149 net_rsp = rx_ring->curr_entry;
2150 rmb();
2151 switch (net_rsp->opcode) {
2152 case OPCODE_IB_MAC_IOCB:
2153 qlge_process_mac_rx_intr(qdev, rx_ring,
2154 (struct qlge_ib_mac_iocb_rsp *)
2155 net_rsp);
2156 break;
2157
2158 case OPCODE_IB_AE_IOCB:
2159 qlge_process_chip_ae_intr(qdev, (struct qlge_ib_ae_iocb_rsp *)
2160 net_rsp);
2161 break;
2162 default:
2163 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2164 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2165 net_rsp->opcode);
2166 break;
2167 }
2168 count++;
2169 qlge_update_cq(rx_ring);
2170 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg);
2171 if (count == budget)
2172 break;
2173 }
2174 qlge_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
2175 qlge_write_cq_idx(rx_ring);
2176 return count;
2177 }
2178
qlge_napi_poll_msix(struct napi_struct * napi,int budget)2179 static int qlge_napi_poll_msix(struct napi_struct *napi, int budget)
2180 {
2181 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2182 struct qlge_adapter *qdev = rx_ring->qdev;
2183 struct rx_ring *trx_ring;
2184 int i, work_done = 0;
2185 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2186
2187 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2188 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2189
2190 /* Service the TX rings first. They start
2191 * right after the RSS rings.
2192 */
2193 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2194 trx_ring = &qdev->rx_ring[i];
2195 /* If this TX completion ring belongs to this vector and
2196 * it's not empty then service it.
2197 */
2198 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2199 (qlge_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2200 trx_ring->cnsmr_idx)) {
2201 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2202 "%s: Servicing TX completion ring %d.\n",
2203 __func__, trx_ring->cq_id);
2204 qlge_clean_outbound_rx_ring(trx_ring);
2205 }
2206 }
2207
2208 /*
2209 * Now service the RSS ring if it's active.
2210 */
2211 if (qlge_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2212 rx_ring->cnsmr_idx) {
2213 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2214 "%s: Servicing RX completion ring %d.\n",
2215 __func__, rx_ring->cq_id);
2216 work_done = qlge_clean_inbound_rx_ring(rx_ring, budget);
2217 }
2218
2219 if (work_done < budget) {
2220 napi_complete_done(napi, work_done);
2221 qlge_enable_completion_interrupt(qdev, rx_ring->irq);
2222 }
2223 return work_done;
2224 }
2225
qlge_vlan_mode(struct net_device * ndev,netdev_features_t features)2226 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2227 {
2228 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2229
2230 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2231 qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2232 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2233 } else {
2234 qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2235 }
2236 }
2237
2238 /*
2239 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2240 * based on the features to enable/disable hardware vlan accel
2241 */
qlge_update_hw_vlan_features(struct net_device * ndev,netdev_features_t features)2242 static int qlge_update_hw_vlan_features(struct net_device *ndev,
2243 netdev_features_t features)
2244 {
2245 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2246 bool need_restart = netif_running(ndev);
2247 int status = 0;
2248
2249 if (need_restart) {
2250 status = qlge_adapter_down(qdev);
2251 if (status) {
2252 netif_err(qdev, link, qdev->ndev,
2253 "Failed to bring down the adapter\n");
2254 return status;
2255 }
2256 }
2257
2258 /* update the features with resent change */
2259 ndev->features = features;
2260
2261 if (need_restart) {
2262 status = qlge_adapter_up(qdev);
2263 if (status) {
2264 netif_err(qdev, link, qdev->ndev,
2265 "Failed to bring up the adapter\n");
2266 return status;
2267 }
2268 }
2269
2270 return status;
2271 }
2272
qlge_set_features(struct net_device * ndev,netdev_features_t features)2273 static int qlge_set_features(struct net_device *ndev,
2274 netdev_features_t features)
2275 {
2276 netdev_features_t changed = ndev->features ^ features;
2277 int err;
2278
2279 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2280 /* Update the behavior of vlan accel in the adapter */
2281 err = qlge_update_hw_vlan_features(ndev, features);
2282 if (err)
2283 return err;
2284
2285 qlge_vlan_mode(ndev, features);
2286 }
2287
2288 return 0;
2289 }
2290
__qlge_vlan_rx_add_vid(struct qlge_adapter * qdev,u16 vid)2291 static int __qlge_vlan_rx_add_vid(struct qlge_adapter *qdev, u16 vid)
2292 {
2293 u32 enable_bit = MAC_ADDR_E;
2294 int err;
2295
2296 err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2297 MAC_ADDR_TYPE_VLAN, vid);
2298 if (err)
2299 netif_err(qdev, ifup, qdev->ndev,
2300 "Failed to init vlan address.\n");
2301 return err;
2302 }
2303
qlge_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)2304 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2305 {
2306 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2307 int status;
2308 int err;
2309
2310 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2311 if (status)
2312 return status;
2313
2314 err = __qlge_vlan_rx_add_vid(qdev, vid);
2315 set_bit(vid, qdev->active_vlans);
2316
2317 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2318
2319 return err;
2320 }
2321
__qlge_vlan_rx_kill_vid(struct qlge_adapter * qdev,u16 vid)2322 static int __qlge_vlan_rx_kill_vid(struct qlge_adapter *qdev, u16 vid)
2323 {
2324 u32 enable_bit = 0;
2325 int err;
2326
2327 err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2328 MAC_ADDR_TYPE_VLAN, vid);
2329 if (err)
2330 netif_err(qdev, ifup, qdev->ndev,
2331 "Failed to clear vlan address.\n");
2332 return err;
2333 }
2334
qlge_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)2335 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2336 {
2337 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2338 int status;
2339 int err;
2340
2341 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2342 if (status)
2343 return status;
2344
2345 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2346 clear_bit(vid, qdev->active_vlans);
2347
2348 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2349
2350 return err;
2351 }
2352
qlge_restore_vlan(struct qlge_adapter * qdev)2353 static void qlge_restore_vlan(struct qlge_adapter *qdev)
2354 {
2355 int status;
2356 u16 vid;
2357
2358 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2359 if (status)
2360 return;
2361
2362 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2363 __qlge_vlan_rx_add_vid(qdev, vid);
2364
2365 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2366 }
2367
2368 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
qlge_msix_rx_isr(int irq,void * dev_id)2369 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2370 {
2371 struct rx_ring *rx_ring = dev_id;
2372
2373 napi_schedule(&rx_ring->napi);
2374 return IRQ_HANDLED;
2375 }
2376
2377 /* This handles a fatal error, MPI activity, and the default
2378 * rx_ring in an MSI-X multiple vector environment.
2379 * In MSI/Legacy environment it also process the rest of
2380 * the rx_rings.
2381 */
qlge_isr(int irq,void * dev_id)2382 static irqreturn_t qlge_isr(int irq, void *dev_id)
2383 {
2384 struct rx_ring *rx_ring = dev_id;
2385 struct qlge_adapter *qdev = rx_ring->qdev;
2386 struct intr_context *intr_context = &qdev->intr_context[0];
2387 u32 var;
2388 int work_done = 0;
2389
2390 /* Experience shows that when using INTx interrupts, interrupts must
2391 * be masked manually.
2392 * When using MSI mode, INTR_EN_EN must be explicitly disabled
2393 * (even though it is auto-masked), otherwise a later command to
2394 * enable it is not effective.
2395 */
2396 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2397 qlge_disable_completion_interrupt(qdev, 0);
2398
2399 var = qlge_read32(qdev, STS);
2400
2401 /*
2402 * Check for fatal error.
2403 */
2404 if (var & STS_FE) {
2405 qlge_disable_completion_interrupt(qdev, 0);
2406 qlge_queue_asic_error(qdev);
2407 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2408 var = qlge_read32(qdev, ERR_STS);
2409 netdev_err(qdev->ndev, "Resetting chip. Error Status Register = 0x%x\n", var);
2410 return IRQ_HANDLED;
2411 }
2412
2413 /*
2414 * Check MPI processor activity.
2415 */
2416 if ((var & STS_PI) &&
2417 (qlge_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2418 /*
2419 * We've got an async event or mailbox completion.
2420 * Handle it and clear the source of the interrupt.
2421 */
2422 netif_err(qdev, intr, qdev->ndev,
2423 "Got MPI processor interrupt.\n");
2424 qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2425 queue_delayed_work_on(smp_processor_id(),
2426 qdev->workqueue, &qdev->mpi_work, 0);
2427 work_done++;
2428 }
2429
2430 /*
2431 * Get the bit-mask that shows the active queues for this
2432 * pass. Compare it to the queues that this irq services
2433 * and call napi if there's a match.
2434 */
2435 var = qlge_read32(qdev, ISR1);
2436 if (var & intr_context->irq_mask) {
2437 netif_info(qdev, intr, qdev->ndev,
2438 "Waking handler for rx_ring[0].\n");
2439 napi_schedule(&rx_ring->napi);
2440 work_done++;
2441 } else {
2442 /* Experience shows that the device sometimes signals an
2443 * interrupt but no work is scheduled from this function.
2444 * Nevertheless, the interrupt is auto-masked. Therefore, we
2445 * systematically re-enable the interrupt if we didn't
2446 * schedule napi.
2447 */
2448 qlge_enable_completion_interrupt(qdev, 0);
2449 }
2450
2451 return work_done ? IRQ_HANDLED : IRQ_NONE;
2452 }
2453
qlge_tso(struct sk_buff * skb,struct qlge_ob_mac_tso_iocb_req * mac_iocb_ptr)2454 static int qlge_tso(struct sk_buff *skb, struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr)
2455 {
2456 if (skb_is_gso(skb)) {
2457 int err;
2458 __be16 l3_proto = vlan_get_protocol(skb);
2459
2460 err = skb_cow_head(skb, 0);
2461 if (err < 0)
2462 return err;
2463
2464 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2465 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2466 mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2467 mac_iocb_ptr->total_hdrs_len =
2468 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2469 mac_iocb_ptr->net_trans_offset =
2470 cpu_to_le16(skb_network_offset(skb) |
2471 skb_transport_offset(skb)
2472 << OB_MAC_TRANSPORT_HDR_SHIFT);
2473 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2474 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2475 if (likely(l3_proto == htons(ETH_P_IP))) {
2476 struct iphdr *iph = ip_hdr(skb);
2477
2478 iph->check = 0;
2479 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2480 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2481 iph->daddr, 0,
2482 IPPROTO_TCP,
2483 0);
2484 } else if (l3_proto == htons(ETH_P_IPV6)) {
2485 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2486 tcp_hdr(skb)->check =
2487 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2488 &ipv6_hdr(skb)->daddr,
2489 0, IPPROTO_TCP, 0);
2490 }
2491 return 1;
2492 }
2493 return 0;
2494 }
2495
qlge_hw_csum_setup(struct sk_buff * skb,struct qlge_ob_mac_tso_iocb_req * mac_iocb_ptr)2496 static void qlge_hw_csum_setup(struct sk_buff *skb,
2497 struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr)
2498 {
2499 int len;
2500 struct iphdr *iph = ip_hdr(skb);
2501 __sum16 *check;
2502
2503 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2504 mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2505 mac_iocb_ptr->net_trans_offset =
2506 cpu_to_le16(skb_network_offset(skb) |
2507 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2508
2509 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2510 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2511 if (likely(iph->protocol == IPPROTO_TCP)) {
2512 check = &(tcp_hdr(skb)->check);
2513 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2514 mac_iocb_ptr->total_hdrs_len =
2515 cpu_to_le16(skb_transport_offset(skb) +
2516 (tcp_hdr(skb)->doff << 2));
2517 } else {
2518 check = &(udp_hdr(skb)->check);
2519 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2520 mac_iocb_ptr->total_hdrs_len =
2521 cpu_to_le16(skb_transport_offset(skb) +
2522 sizeof(struct udphdr));
2523 }
2524 *check = ~csum_tcpudp_magic(iph->saddr,
2525 iph->daddr, len, iph->protocol, 0);
2526 }
2527
qlge_send(struct sk_buff * skb,struct net_device * ndev)2528 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2529 {
2530 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
2531 struct qlge_ob_mac_iocb_req *mac_iocb_ptr;
2532 struct tx_ring_desc *tx_ring_desc;
2533 int tso;
2534 struct tx_ring *tx_ring;
2535 u32 tx_ring_idx = (u32)skb->queue_mapping;
2536
2537 tx_ring = &qdev->tx_ring[tx_ring_idx];
2538
2539 if (skb_padto(skb, ETH_ZLEN))
2540 return NETDEV_TX_OK;
2541
2542 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2543 netif_info(qdev, tx_queued, qdev->ndev,
2544 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2545 __func__, tx_ring_idx);
2546 netif_stop_subqueue(ndev, tx_ring->wq_id);
2547 tx_ring->tx_errors++;
2548 return NETDEV_TX_BUSY;
2549 }
2550 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2551 mac_iocb_ptr = tx_ring_desc->queue_entry;
2552 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2553
2554 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2555 mac_iocb_ptr->tid = tx_ring_desc->index;
2556 /* We use the upper 32-bits to store the tx queue for this IO.
2557 * When we get the completion we can use it to establish the context.
2558 */
2559 mac_iocb_ptr->txq_idx = tx_ring_idx;
2560 tx_ring_desc->skb = skb;
2561
2562 mac_iocb_ptr->frame_len = cpu_to_le16((u16)skb->len);
2563
2564 if (skb_vlan_tag_present(skb)) {
2565 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2566 "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2567 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2568 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2569 }
2570 tso = qlge_tso(skb, (struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr);
2571 if (tso < 0) {
2572 dev_kfree_skb_any(skb);
2573 return NETDEV_TX_OK;
2574 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2575 qlge_hw_csum_setup(skb,
2576 (struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr);
2577 }
2578 if (qlge_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2579 NETDEV_TX_OK) {
2580 netif_err(qdev, tx_queued, qdev->ndev,
2581 "Could not map the segments.\n");
2582 tx_ring->tx_errors++;
2583 return NETDEV_TX_BUSY;
2584 }
2585
2586 tx_ring->prod_idx++;
2587 if (tx_ring->prod_idx == tx_ring->wq_len)
2588 tx_ring->prod_idx = 0;
2589 wmb();
2590
2591 qlge_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2592 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2593 "tx queued, slot %d, len %d\n",
2594 tx_ring->prod_idx, skb->len);
2595
2596 atomic_dec(&tx_ring->tx_count);
2597
2598 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2599 netif_stop_subqueue(ndev, tx_ring->wq_id);
2600 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2601 /*
2602 * The queue got stopped because the tx_ring was full.
2603 * Wake it up, because it's now at least 25% empty.
2604 */
2605 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2606 }
2607 return NETDEV_TX_OK;
2608 }
2609
qlge_free_shadow_space(struct qlge_adapter * qdev)2610 static void qlge_free_shadow_space(struct qlge_adapter *qdev)
2611 {
2612 if (qdev->rx_ring_shadow_reg_area) {
2613 dma_free_coherent(&qdev->pdev->dev,
2614 PAGE_SIZE,
2615 qdev->rx_ring_shadow_reg_area,
2616 qdev->rx_ring_shadow_reg_dma);
2617 qdev->rx_ring_shadow_reg_area = NULL;
2618 }
2619 if (qdev->tx_ring_shadow_reg_area) {
2620 dma_free_coherent(&qdev->pdev->dev,
2621 PAGE_SIZE,
2622 qdev->tx_ring_shadow_reg_area,
2623 qdev->tx_ring_shadow_reg_dma);
2624 qdev->tx_ring_shadow_reg_area = NULL;
2625 }
2626 }
2627
qlge_alloc_shadow_space(struct qlge_adapter * qdev)2628 static int qlge_alloc_shadow_space(struct qlge_adapter *qdev)
2629 {
2630 qdev->rx_ring_shadow_reg_area =
2631 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2632 &qdev->rx_ring_shadow_reg_dma, GFP_ATOMIC);
2633 if (!qdev->rx_ring_shadow_reg_area) {
2634 netif_err(qdev, ifup, qdev->ndev,
2635 "Allocation of RX shadow space failed.\n");
2636 return -ENOMEM;
2637 }
2638
2639 qdev->tx_ring_shadow_reg_area =
2640 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2641 &qdev->tx_ring_shadow_reg_dma, GFP_ATOMIC);
2642 if (!qdev->tx_ring_shadow_reg_area) {
2643 netif_err(qdev, ifup, qdev->ndev,
2644 "Allocation of TX shadow space failed.\n");
2645 goto err_wqp_sh_area;
2646 }
2647 return 0;
2648
2649 err_wqp_sh_area:
2650 dma_free_coherent(&qdev->pdev->dev,
2651 PAGE_SIZE,
2652 qdev->rx_ring_shadow_reg_area,
2653 qdev->rx_ring_shadow_reg_dma);
2654 return -ENOMEM;
2655 }
2656
qlge_init_tx_ring(struct qlge_adapter * qdev,struct tx_ring * tx_ring)2657 static void qlge_init_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring)
2658 {
2659 struct tx_ring_desc *tx_ring_desc;
2660 int i;
2661 struct qlge_ob_mac_iocb_req *mac_iocb_ptr;
2662
2663 mac_iocb_ptr = tx_ring->wq_base;
2664 tx_ring_desc = tx_ring->q;
2665 for (i = 0; i < tx_ring->wq_len; i++) {
2666 tx_ring_desc->index = i;
2667 tx_ring_desc->skb = NULL;
2668 tx_ring_desc->queue_entry = mac_iocb_ptr;
2669 mac_iocb_ptr++;
2670 tx_ring_desc++;
2671 }
2672 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2673 }
2674
qlge_free_tx_resources(struct qlge_adapter * qdev,struct tx_ring * tx_ring)2675 static void qlge_free_tx_resources(struct qlge_adapter *qdev,
2676 struct tx_ring *tx_ring)
2677 {
2678 if (tx_ring->wq_base) {
2679 dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2680 tx_ring->wq_base, tx_ring->wq_base_dma);
2681 tx_ring->wq_base = NULL;
2682 }
2683 kfree(tx_ring->q);
2684 tx_ring->q = NULL;
2685 }
2686
qlge_alloc_tx_resources(struct qlge_adapter * qdev,struct tx_ring * tx_ring)2687 static int qlge_alloc_tx_resources(struct qlge_adapter *qdev,
2688 struct tx_ring *tx_ring)
2689 {
2690 tx_ring->wq_base =
2691 dma_alloc_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2692 &tx_ring->wq_base_dma, GFP_ATOMIC);
2693
2694 if (!tx_ring->wq_base ||
2695 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2696 goto pci_alloc_err;
2697
2698 tx_ring->q =
2699 kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
2700 GFP_KERNEL);
2701 if (!tx_ring->q)
2702 goto err;
2703
2704 return 0;
2705 err:
2706 dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2707 tx_ring->wq_base, tx_ring->wq_base_dma);
2708 tx_ring->wq_base = NULL;
2709 pci_alloc_err:
2710 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2711 return -ENOMEM;
2712 }
2713
qlge_free_lbq_buffers(struct qlge_adapter * qdev,struct rx_ring * rx_ring)2714 static void qlge_free_lbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
2715 {
2716 struct qlge_bq *lbq = &rx_ring->lbq;
2717 unsigned int last_offset;
2718
2719 last_offset = qlge_lbq_block_size(qdev) - qdev->lbq_buf_size;
2720 while (lbq->next_to_clean != lbq->next_to_use) {
2721 struct qlge_bq_desc *lbq_desc =
2722 &lbq->queue[lbq->next_to_clean];
2723
2724 if (lbq_desc->p.pg_chunk.offset == last_offset)
2725 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
2726 qlge_lbq_block_size(qdev),
2727 DMA_FROM_DEVICE);
2728 put_page(lbq_desc->p.pg_chunk.page);
2729
2730 lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
2731 }
2732
2733 if (rx_ring->master_chunk.page) {
2734 dma_unmap_page(&qdev->pdev->dev, rx_ring->chunk_dma_addr,
2735 qlge_lbq_block_size(qdev), DMA_FROM_DEVICE);
2736 put_page(rx_ring->master_chunk.page);
2737 rx_ring->master_chunk.page = NULL;
2738 }
2739 }
2740
qlge_free_sbq_buffers(struct qlge_adapter * qdev,struct rx_ring * rx_ring)2741 static void qlge_free_sbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
2742 {
2743 int i;
2744
2745 for (i = 0; i < QLGE_BQ_LEN; i++) {
2746 struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i];
2747
2748 if (!sbq_desc) {
2749 netif_err(qdev, ifup, qdev->ndev,
2750 "sbq_desc %d is NULL.\n", i);
2751 return;
2752 }
2753 if (sbq_desc->p.skb) {
2754 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
2755 SMALL_BUF_MAP_SIZE,
2756 DMA_FROM_DEVICE);
2757 dev_kfree_skb(sbq_desc->p.skb);
2758 sbq_desc->p.skb = NULL;
2759 }
2760 }
2761 }
2762
2763 /* Free all large and small rx buffers associated
2764 * with the completion queues for this device.
2765 */
qlge_free_rx_buffers(struct qlge_adapter * qdev)2766 static void qlge_free_rx_buffers(struct qlge_adapter *qdev)
2767 {
2768 int i;
2769
2770 for (i = 0; i < qdev->rx_ring_count; i++) {
2771 struct rx_ring *rx_ring = &qdev->rx_ring[i];
2772
2773 if (rx_ring->lbq.queue)
2774 qlge_free_lbq_buffers(qdev, rx_ring);
2775 if (rx_ring->sbq.queue)
2776 qlge_free_sbq_buffers(qdev, rx_ring);
2777 }
2778 }
2779
qlge_alloc_rx_buffers(struct qlge_adapter * qdev)2780 static void qlge_alloc_rx_buffers(struct qlge_adapter *qdev)
2781 {
2782 int i;
2783
2784 for (i = 0; i < qdev->rss_ring_count; i++)
2785 qlge_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
2786 HZ / 2);
2787 }
2788
qlge_init_bq(struct qlge_bq * bq)2789 static int qlge_init_bq(struct qlge_bq *bq)
2790 {
2791 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
2792 struct qlge_adapter *qdev = rx_ring->qdev;
2793 struct qlge_bq_desc *bq_desc;
2794 __le64 *buf_ptr;
2795 int i;
2796
2797 bq->base = dma_alloc_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2798 &bq->base_dma, GFP_ATOMIC);
2799 if (!bq->base)
2800 return -ENOMEM;
2801
2802 bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
2803 GFP_KERNEL);
2804 if (!bq->queue)
2805 return -ENOMEM;
2806
2807 buf_ptr = bq->base;
2808 bq_desc = &bq->queue[0];
2809 for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) {
2810 bq_desc->p.skb = NULL;
2811 bq_desc->index = i;
2812 bq_desc->buf_ptr = buf_ptr;
2813 }
2814
2815 return 0;
2816 }
2817
qlge_free_rx_resources(struct qlge_adapter * qdev,struct rx_ring * rx_ring)2818 static void qlge_free_rx_resources(struct qlge_adapter *qdev,
2819 struct rx_ring *rx_ring)
2820 {
2821 /* Free the small buffer queue. */
2822 if (rx_ring->sbq.base) {
2823 dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2824 rx_ring->sbq.base, rx_ring->sbq.base_dma);
2825 rx_ring->sbq.base = NULL;
2826 }
2827
2828 /* Free the small buffer queue control blocks. */
2829 kfree(rx_ring->sbq.queue);
2830 rx_ring->sbq.queue = NULL;
2831
2832 /* Free the large buffer queue. */
2833 if (rx_ring->lbq.base) {
2834 dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2835 rx_ring->lbq.base, rx_ring->lbq.base_dma);
2836 rx_ring->lbq.base = NULL;
2837 }
2838
2839 /* Free the large buffer queue control blocks. */
2840 kfree(rx_ring->lbq.queue);
2841 rx_ring->lbq.queue = NULL;
2842
2843 /* Free the rx queue. */
2844 if (rx_ring->cq_base) {
2845 dma_free_coherent(&qdev->pdev->dev,
2846 rx_ring->cq_size,
2847 rx_ring->cq_base, rx_ring->cq_base_dma);
2848 rx_ring->cq_base = NULL;
2849 }
2850 }
2851
2852 /* Allocate queues and buffers for this completions queue based
2853 * on the values in the parameter structure.
2854 */
qlge_alloc_rx_resources(struct qlge_adapter * qdev,struct rx_ring * rx_ring)2855 static int qlge_alloc_rx_resources(struct qlge_adapter *qdev,
2856 struct rx_ring *rx_ring)
2857 {
2858 /*
2859 * Allocate the completion queue for this rx_ring.
2860 */
2861 rx_ring->cq_base =
2862 dma_alloc_coherent(&qdev->pdev->dev, rx_ring->cq_size,
2863 &rx_ring->cq_base_dma, GFP_ATOMIC);
2864
2865 if (!rx_ring->cq_base) {
2866 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2867 return -ENOMEM;
2868 }
2869
2870 if (rx_ring->cq_id < qdev->rss_ring_count &&
2871 (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
2872 qlge_free_rx_resources(qdev, rx_ring);
2873 return -ENOMEM;
2874 }
2875
2876 return 0;
2877 }
2878
qlge_tx_ring_clean(struct qlge_adapter * qdev)2879 static void qlge_tx_ring_clean(struct qlge_adapter *qdev)
2880 {
2881 struct tx_ring *tx_ring;
2882 struct tx_ring_desc *tx_ring_desc;
2883 int i, j;
2884
2885 /*
2886 * Loop through all queues and free
2887 * any resources.
2888 */
2889 for (j = 0; j < qdev->tx_ring_count; j++) {
2890 tx_ring = &qdev->tx_ring[j];
2891 for (i = 0; i < tx_ring->wq_len; i++) {
2892 tx_ring_desc = &tx_ring->q[i];
2893 if (tx_ring_desc && tx_ring_desc->skb) {
2894 netif_err(qdev, ifdown, qdev->ndev,
2895 "Freeing lost SKB %p, from queue %d, index %d.\n",
2896 tx_ring_desc->skb, j,
2897 tx_ring_desc->index);
2898 qlge_unmap_send(qdev, tx_ring_desc,
2899 tx_ring_desc->map_cnt);
2900 dev_kfree_skb(tx_ring_desc->skb);
2901 tx_ring_desc->skb = NULL;
2902 }
2903 }
2904 }
2905 }
2906
qlge_free_mem_resources(struct qlge_adapter * qdev)2907 static void qlge_free_mem_resources(struct qlge_adapter *qdev)
2908 {
2909 int i;
2910
2911 for (i = 0; i < qdev->tx_ring_count; i++)
2912 qlge_free_tx_resources(qdev, &qdev->tx_ring[i]);
2913 for (i = 0; i < qdev->rx_ring_count; i++)
2914 qlge_free_rx_resources(qdev, &qdev->rx_ring[i]);
2915 qlge_free_shadow_space(qdev);
2916 }
2917
qlge_alloc_mem_resources(struct qlge_adapter * qdev)2918 static int qlge_alloc_mem_resources(struct qlge_adapter *qdev)
2919 {
2920 int i;
2921
2922 /* Allocate space for our shadow registers and such. */
2923 if (qlge_alloc_shadow_space(qdev))
2924 return -ENOMEM;
2925
2926 for (i = 0; i < qdev->rx_ring_count; i++) {
2927 if (qlge_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2928 netif_err(qdev, ifup, qdev->ndev,
2929 "RX resource allocation failed.\n");
2930 goto err_mem;
2931 }
2932 }
2933 /* Allocate tx queue resources */
2934 for (i = 0; i < qdev->tx_ring_count; i++) {
2935 if (qlge_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2936 netif_err(qdev, ifup, qdev->ndev,
2937 "TX resource allocation failed.\n");
2938 goto err_mem;
2939 }
2940 }
2941 return 0;
2942
2943 err_mem:
2944 qlge_free_mem_resources(qdev);
2945 return -ENOMEM;
2946 }
2947
2948 /* Set up the rx ring control block and pass it to the chip.
2949 * The control block is defined as
2950 * "Completion Queue Initialization Control Block", or cqicb.
2951 */
qlge_start_rx_ring(struct qlge_adapter * qdev,struct rx_ring * rx_ring)2952 static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring)
2953 {
2954 struct cqicb *cqicb = &rx_ring->cqicb;
2955 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2956 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2957 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2958 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2959 void __iomem *doorbell_area =
2960 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2961 int err = 0;
2962 u64 tmp;
2963 __le64 *base_indirect_ptr;
2964 int page_entries;
2965
2966 /* Set up the shadow registers for this ring. */
2967 rx_ring->prod_idx_sh_reg = shadow_reg;
2968 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2969 *rx_ring->prod_idx_sh_reg = 0;
2970 shadow_reg += sizeof(u64);
2971 shadow_reg_dma += sizeof(u64);
2972 rx_ring->lbq.base_indirect = shadow_reg;
2973 rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
2974 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2975 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2976 rx_ring->sbq.base_indirect = shadow_reg;
2977 rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
2978
2979 /* PCI doorbell mem area + 0x00 for consumer index register */
2980 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *)doorbell_area;
2981 rx_ring->cnsmr_idx = 0;
2982 rx_ring->curr_entry = rx_ring->cq_base;
2983
2984 /* PCI doorbell mem area + 0x04 for valid register */
2985 rx_ring->valid_db_reg = doorbell_area + 0x04;
2986
2987 /* PCI doorbell mem area + 0x18 for large buffer consumer */
2988 rx_ring->lbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x18);
2989
2990 /* PCI doorbell mem area + 0x1c */
2991 rx_ring->sbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x1c);
2992
2993 memset((void *)cqicb, 0, sizeof(struct cqicb));
2994 cqicb->msix_vect = rx_ring->irq;
2995
2996 cqicb->len = cpu_to_le16(QLGE_FIT16(rx_ring->cq_len) | LEN_V |
2997 LEN_CPP_CONT);
2998
2999 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3000
3001 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3002
3003 /*
3004 * Set up the control block load flags.
3005 */
3006 cqicb->flags = FLAGS_LC | /* Load queue base address */
3007 FLAGS_LV | /* Load MSI-X vector */
3008 FLAGS_LI; /* Load irq delay values */
3009 if (rx_ring->cq_id < qdev->rss_ring_count) {
3010 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3011 tmp = (u64)rx_ring->lbq.base_dma;
3012 base_indirect_ptr = rx_ring->lbq.base_indirect;
3013 page_entries = 0;
3014 do {
3015 *base_indirect_ptr = cpu_to_le64(tmp);
3016 tmp += DB_PAGE_SIZE;
3017 base_indirect_ptr++;
3018 page_entries++;
3019 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3020 cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
3021 cqicb->lbq_buf_size =
3022 cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
3023 cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3024 rx_ring->lbq.next_to_use = 0;
3025 rx_ring->lbq.next_to_clean = 0;
3026
3027 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3028 tmp = (u64)rx_ring->sbq.base_dma;
3029 base_indirect_ptr = rx_ring->sbq.base_indirect;
3030 page_entries = 0;
3031 do {
3032 *base_indirect_ptr = cpu_to_le64(tmp);
3033 tmp += DB_PAGE_SIZE;
3034 base_indirect_ptr++;
3035 page_entries++;
3036 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3037 cqicb->sbq_addr =
3038 cpu_to_le64(rx_ring->sbq.base_indirect_dma);
3039 cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
3040 cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3041 rx_ring->sbq.next_to_use = 0;
3042 rx_ring->sbq.next_to_clean = 0;
3043 }
3044 if (rx_ring->cq_id < qdev->rss_ring_count) {
3045 /* Inbound completion handling rx_rings run in
3046 * separate NAPI contexts.
3047 */
3048 netif_napi_add(qdev->ndev, &rx_ring->napi, qlge_napi_poll_msix,
3049 64);
3050 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3051 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3052 } else {
3053 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3054 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3055 }
3056 err = qlge_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3057 CFG_LCQ, rx_ring->cq_id);
3058 if (err) {
3059 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3060 return err;
3061 }
3062 return err;
3063 }
3064
qlge_start_tx_ring(struct qlge_adapter * qdev,struct tx_ring * tx_ring)3065 static int qlge_start_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring)
3066 {
3067 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3068 void __iomem *doorbell_area =
3069 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3070 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3071 (tx_ring->wq_id * sizeof(u64));
3072 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3073 (tx_ring->wq_id * sizeof(u64));
3074 int err = 0;
3075
3076 /*
3077 * Assign doorbell registers for this tx_ring.
3078 */
3079 /* TX PCI doorbell mem area for tx producer index */
3080 tx_ring->prod_idx_db_reg = (u32 __iomem *)doorbell_area;
3081 tx_ring->prod_idx = 0;
3082 /* TX PCI doorbell mem area + 0x04 */
3083 tx_ring->valid_db_reg = doorbell_area + 0x04;
3084
3085 /*
3086 * Assign shadow registers for this tx_ring.
3087 */
3088 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3089 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3090
3091 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3092 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3093 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3094 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3095 wqicb->rid = 0;
3096 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3097
3098 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3099
3100 qlge_init_tx_ring(qdev, tx_ring);
3101
3102 err = qlge_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3103 (u16)tx_ring->wq_id);
3104 if (err) {
3105 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3106 return err;
3107 }
3108 return err;
3109 }
3110
qlge_disable_msix(struct qlge_adapter * qdev)3111 static void qlge_disable_msix(struct qlge_adapter *qdev)
3112 {
3113 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3114 pci_disable_msix(qdev->pdev);
3115 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3116 kfree(qdev->msi_x_entry);
3117 qdev->msi_x_entry = NULL;
3118 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3119 pci_disable_msi(qdev->pdev);
3120 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3121 }
3122 }
3123
3124 /* We start by trying to get the number of vectors
3125 * stored in qdev->intr_count. If we don't get that
3126 * many then we reduce the count and try again.
3127 */
qlge_enable_msix(struct qlge_adapter * qdev)3128 static void qlge_enable_msix(struct qlge_adapter *qdev)
3129 {
3130 int i, err;
3131
3132 /* Get the MSIX vectors. */
3133 if (qlge_irq_type == MSIX_IRQ) {
3134 /* Try to alloc space for the msix struct,
3135 * if it fails then go to MSI/legacy.
3136 */
3137 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3138 sizeof(struct msix_entry),
3139 GFP_KERNEL);
3140 if (!qdev->msi_x_entry) {
3141 qlge_irq_type = MSI_IRQ;
3142 goto msi;
3143 }
3144
3145 for (i = 0; i < qdev->intr_count; i++)
3146 qdev->msi_x_entry[i].entry = i;
3147
3148 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3149 1, qdev->intr_count);
3150 if (err < 0) {
3151 kfree(qdev->msi_x_entry);
3152 qdev->msi_x_entry = NULL;
3153 netif_warn(qdev, ifup, qdev->ndev,
3154 "MSI-X Enable failed, trying MSI.\n");
3155 qlge_irq_type = MSI_IRQ;
3156 } else {
3157 qdev->intr_count = err;
3158 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3159 netif_info(qdev, ifup, qdev->ndev,
3160 "MSI-X Enabled, got %d vectors.\n",
3161 qdev->intr_count);
3162 return;
3163 }
3164 }
3165 msi:
3166 qdev->intr_count = 1;
3167 if (qlge_irq_type == MSI_IRQ) {
3168 if (pci_alloc_irq_vectors(qdev->pdev, 1, 1, PCI_IRQ_MSI) >= 0) {
3169 set_bit(QL_MSI_ENABLED, &qdev->flags);
3170 netif_info(qdev, ifup, qdev->ndev,
3171 "Running with MSI interrupts.\n");
3172 return;
3173 }
3174 }
3175 qlge_irq_type = LEG_IRQ;
3176 set_bit(QL_LEGACY_ENABLED, &qdev->flags);
3177 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3178 "Running with legacy interrupts.\n");
3179 }
3180
3181 /* Each vector services 1 RSS ring and 1 or more
3182 * TX completion rings. This function loops through
3183 * the TX completion rings and assigns the vector that
3184 * will service it. An example would be if there are
3185 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3186 * This would mean that vector 0 would service RSS ring 0
3187 * and TX completion rings 0,1,2 and 3. Vector 1 would
3188 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3189 */
qlge_set_tx_vect(struct qlge_adapter * qdev)3190 static void qlge_set_tx_vect(struct qlge_adapter *qdev)
3191 {
3192 int i, j, vect;
3193 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3194
3195 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3196 /* Assign irq vectors to TX rx_rings.*/
3197 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3198 i < qdev->rx_ring_count; i++) {
3199 if (j == tx_rings_per_vector) {
3200 vect++;
3201 j = 0;
3202 }
3203 qdev->rx_ring[i].irq = vect;
3204 j++;
3205 }
3206 } else {
3207 /* For single vector all rings have an irq
3208 * of zero.
3209 */
3210 for (i = 0; i < qdev->rx_ring_count; i++)
3211 qdev->rx_ring[i].irq = 0;
3212 }
3213 }
3214
3215 /* Set the interrupt mask for this vector. Each vector
3216 * will service 1 RSS ring and 1 or more TX completion
3217 * rings. This function sets up a bit mask per vector
3218 * that indicates which rings it services.
3219 */
qlge_set_irq_mask(struct qlge_adapter * qdev,struct intr_context * ctx)3220 static void qlge_set_irq_mask(struct qlge_adapter *qdev, struct intr_context *ctx)
3221 {
3222 int j, vect = ctx->intr;
3223 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3224
3225 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3226 /* Add the RSS ring serviced by this vector
3227 * to the mask.
3228 */
3229 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3230 /* Add the TX ring(s) serviced by this vector
3231 * to the mask.
3232 */
3233 for (j = 0; j < tx_rings_per_vector; j++) {
3234 ctx->irq_mask |=
3235 (1 << qdev->rx_ring[qdev->rss_ring_count +
3236 (vect * tx_rings_per_vector) + j].cq_id);
3237 }
3238 } else {
3239 /* For single vector we just shift each queue's
3240 * ID into the mask.
3241 */
3242 for (j = 0; j < qdev->rx_ring_count; j++)
3243 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3244 }
3245 }
3246
3247 /*
3248 * Here we build the intr_context structures based on
3249 * our rx_ring count and intr vector count.
3250 * The intr_context structure is used to hook each vector
3251 * to possibly different handlers.
3252 */
qlge_resolve_queues_to_irqs(struct qlge_adapter * qdev)3253 static void qlge_resolve_queues_to_irqs(struct qlge_adapter *qdev)
3254 {
3255 int i = 0;
3256 struct intr_context *intr_context = &qdev->intr_context[0];
3257
3258 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3259 /* Each rx_ring has it's
3260 * own intr_context since we have separate
3261 * vectors for each queue.
3262 */
3263 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3264 qdev->rx_ring[i].irq = i;
3265 intr_context->intr = i;
3266 intr_context->qdev = qdev;
3267 /* Set up this vector's bit-mask that indicates
3268 * which queues it services.
3269 */
3270 qlge_set_irq_mask(qdev, intr_context);
3271 /*
3272 * We set up each vectors enable/disable/read bits so
3273 * there's no bit/mask calculations in the critical path.
3274 */
3275 intr_context->intr_en_mask =
3276 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3277 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3278 | i;
3279 intr_context->intr_dis_mask =
3280 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3281 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3282 INTR_EN_IHD | i;
3283 intr_context->intr_read_mask =
3284 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3285 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3286 i;
3287 if (i == 0) {
3288 /* The first vector/queue handles
3289 * broadcast/multicast, fatal errors,
3290 * and firmware events. This in addition
3291 * to normal inbound NAPI processing.
3292 */
3293 intr_context->handler = qlge_isr;
3294 sprintf(intr_context->name, "%s-rx-%d",
3295 qdev->ndev->name, i);
3296 } else {
3297 /*
3298 * Inbound queues handle unicast frames only.
3299 */
3300 intr_context->handler = qlge_msix_rx_isr;
3301 sprintf(intr_context->name, "%s-rx-%d",
3302 qdev->ndev->name, i);
3303 }
3304 }
3305 } else {
3306 /*
3307 * All rx_rings use the same intr_context since
3308 * there is only one vector.
3309 */
3310 intr_context->intr = 0;
3311 intr_context->qdev = qdev;
3312 /*
3313 * We set up each vectors enable/disable/read bits so
3314 * there's no bit/mask calculations in the critical path.
3315 */
3316 intr_context->intr_en_mask =
3317 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3318 intr_context->intr_dis_mask =
3319 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3320 INTR_EN_TYPE_DISABLE;
3321 if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) {
3322 /* Experience shows that when using INTx interrupts,
3323 * the device does not always auto-mask INTR_EN_EN.
3324 * Moreover, masking INTR_EN_EN manually does not
3325 * immediately prevent interrupt generation.
3326 */
3327 intr_context->intr_en_mask |= INTR_EN_EI << 16 |
3328 INTR_EN_EI;
3329 intr_context->intr_dis_mask |= INTR_EN_EI << 16;
3330 }
3331 intr_context->intr_read_mask =
3332 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3333 /*
3334 * Single interrupt means one handler for all rings.
3335 */
3336 intr_context->handler = qlge_isr;
3337 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3338 /* Set up this vector's bit-mask that indicates
3339 * which queues it services. In this case there is
3340 * a single vector so it will service all RSS and
3341 * TX completion rings.
3342 */
3343 qlge_set_irq_mask(qdev, intr_context);
3344 }
3345 /* Tell the TX completion rings which MSIx vector
3346 * they will be using.
3347 */
3348 qlge_set_tx_vect(qdev);
3349 }
3350
qlge_free_irq(struct qlge_adapter * qdev)3351 static void qlge_free_irq(struct qlge_adapter *qdev)
3352 {
3353 int i;
3354 struct intr_context *intr_context = &qdev->intr_context[0];
3355
3356 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3357 if (intr_context->hooked) {
3358 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3359 free_irq(qdev->msi_x_entry[i].vector,
3360 &qdev->rx_ring[i]);
3361 } else {
3362 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3363 }
3364 }
3365 }
3366 qlge_disable_msix(qdev);
3367 }
3368
qlge_request_irq(struct qlge_adapter * qdev)3369 static int qlge_request_irq(struct qlge_adapter *qdev)
3370 {
3371 int i;
3372 int status = 0;
3373 struct pci_dev *pdev = qdev->pdev;
3374 struct intr_context *intr_context = &qdev->intr_context[0];
3375
3376 qlge_resolve_queues_to_irqs(qdev);
3377
3378 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3379 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3380 status = request_irq(qdev->msi_x_entry[i].vector,
3381 intr_context->handler,
3382 0,
3383 intr_context->name,
3384 &qdev->rx_ring[i]);
3385 if (status) {
3386 netif_err(qdev, ifup, qdev->ndev,
3387 "Failed request for MSIX interrupt %d.\n",
3388 i);
3389 goto err_irq;
3390 }
3391 } else {
3392 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3393 "trying msi or legacy interrupts.\n");
3394 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3395 "%s: irq = %d.\n", __func__, pdev->irq);
3396 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3397 "%s: context->name = %s.\n", __func__,
3398 intr_context->name);
3399 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3400 "%s: dev_id = 0x%p.\n", __func__,
3401 &qdev->rx_ring[0]);
3402 status =
3403 request_irq(pdev->irq, qlge_isr,
3404 test_bit(QL_MSI_ENABLED, &qdev->flags)
3405 ? 0
3406 : IRQF_SHARED,
3407 intr_context->name, &qdev->rx_ring[0]);
3408 if (status)
3409 goto err_irq;
3410
3411 netif_err(qdev, ifup, qdev->ndev,
3412 "Hooked intr 0, queue type RX_Q, with name %s.\n",
3413 intr_context->name);
3414 }
3415 intr_context->hooked = 1;
3416 }
3417 return status;
3418 err_irq:
3419 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3420 qlge_free_irq(qdev);
3421 return status;
3422 }
3423
qlge_start_rss(struct qlge_adapter * qdev)3424 static int qlge_start_rss(struct qlge_adapter *qdev)
3425 {
3426 static const u8 init_hash_seed[] = {
3427 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3428 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3429 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3430 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3431 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3432 };
3433 struct ricb *ricb = &qdev->ricb;
3434 int status = 0;
3435 int i;
3436 u8 *hash_id = (u8 *)ricb->hash_cq_id;
3437
3438 memset((void *)ricb, 0, sizeof(*ricb));
3439
3440 ricb->base_cq = RSS_L4K;
3441 ricb->flags =
3442 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3443 ricb->mask = cpu_to_le16((u16)(0x3ff));
3444
3445 /*
3446 * Fill out the Indirection Table.
3447 */
3448 for (i = 0; i < 1024; i++)
3449 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3450
3451 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3452 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3453
3454 status = qlge_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3455 if (status) {
3456 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3457 return status;
3458 }
3459 return status;
3460 }
3461
qlge_clear_routing_entries(struct qlge_adapter * qdev)3462 static int qlge_clear_routing_entries(struct qlge_adapter *qdev)
3463 {
3464 int i, status = 0;
3465
3466 status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3467 if (status)
3468 return status;
3469 /* Clear all the entries in the routing table. */
3470 for (i = 0; i < 16; i++) {
3471 status = qlge_set_routing_reg(qdev, i, 0, 0);
3472 if (status) {
3473 netif_err(qdev, ifup, qdev->ndev,
3474 "Failed to init routing register for CAM packets.\n");
3475 break;
3476 }
3477 }
3478 qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
3479 return status;
3480 }
3481
3482 /* Initialize the frame-to-queue routing. */
qlge_route_initialize(struct qlge_adapter * qdev)3483 static int qlge_route_initialize(struct qlge_adapter *qdev)
3484 {
3485 int status = 0;
3486
3487 /* Clear all the entries in the routing table. */
3488 status = qlge_clear_routing_entries(qdev);
3489 if (status)
3490 return status;
3491
3492 status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3493 if (status)
3494 return status;
3495
3496 status = qlge_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3497 RT_IDX_IP_CSUM_ERR, 1);
3498 if (status) {
3499 netif_err(qdev, ifup, qdev->ndev,
3500 "Failed to init routing register for IP CSUM error packets.\n");
3501 goto exit;
3502 }
3503 status = qlge_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3504 RT_IDX_TU_CSUM_ERR, 1);
3505 if (status) {
3506 netif_err(qdev, ifup, qdev->ndev,
3507 "Failed to init routing register for TCP/UDP CSUM error packets.\n");
3508 goto exit;
3509 }
3510 status = qlge_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3511 if (status) {
3512 netif_err(qdev, ifup, qdev->ndev,
3513 "Failed to init routing register for broadcast packets.\n");
3514 goto exit;
3515 }
3516 /* If we have more than one inbound queue, then turn on RSS in the
3517 * routing block.
3518 */
3519 if (qdev->rss_ring_count > 1) {
3520 status = qlge_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3521 RT_IDX_RSS_MATCH, 1);
3522 if (status) {
3523 netif_err(qdev, ifup, qdev->ndev,
3524 "Failed to init routing register for MATCH RSS packets.\n");
3525 goto exit;
3526 }
3527 }
3528
3529 status = qlge_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3530 RT_IDX_CAM_HIT, 1);
3531 if (status)
3532 netif_err(qdev, ifup, qdev->ndev,
3533 "Failed to init routing register for CAM packets.\n");
3534 exit:
3535 qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
3536 return status;
3537 }
3538
qlge_cam_route_initialize(struct qlge_adapter * qdev)3539 int qlge_cam_route_initialize(struct qlge_adapter *qdev)
3540 {
3541 int status, set;
3542
3543 /* If check if the link is up and use to
3544 * determine if we are setting or clearing
3545 * the MAC address in the CAM.
3546 */
3547 set = qlge_read32(qdev, STS);
3548 set &= qdev->port_link_up;
3549 status = qlge_set_mac_addr(qdev, set);
3550 if (status) {
3551 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3552 return status;
3553 }
3554
3555 status = qlge_route_initialize(qdev);
3556 if (status)
3557 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3558
3559 return status;
3560 }
3561
qlge_adapter_initialize(struct qlge_adapter * qdev)3562 static int qlge_adapter_initialize(struct qlge_adapter *qdev)
3563 {
3564 u32 value, mask;
3565 int i;
3566 int status = 0;
3567
3568 /*
3569 * Set up the System register to halt on errors.
3570 */
3571 value = SYS_EFE | SYS_FAE;
3572 mask = value << 16;
3573 qlge_write32(qdev, SYS, mask | value);
3574
3575 /* Set the default queue, and VLAN behavior. */
3576 value = NIC_RCV_CFG_DFQ;
3577 mask = NIC_RCV_CFG_DFQ_MASK;
3578 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3579 value |= NIC_RCV_CFG_RV;
3580 mask |= (NIC_RCV_CFG_RV << 16);
3581 }
3582 qlge_write32(qdev, NIC_RCV_CFG, (mask | value));
3583
3584 /* Set the MPI interrupt to enabled. */
3585 qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3586
3587 /* Enable the function, set pagesize, enable error checking. */
3588 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3589 FSC_EC | FSC_VM_PAGE_4K;
3590 value |= SPLT_SETTING;
3591
3592 /* Set/clear header splitting. */
3593 mask = FSC_VM_PAGESIZE_MASK |
3594 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3595 qlge_write32(qdev, FSC, mask | value);
3596
3597 qlge_write32(qdev, SPLT_HDR, SPLT_LEN);
3598
3599 /* Set RX packet routing to use port/pci function on which the
3600 * packet arrived on in addition to usual frame routing.
3601 * This is helpful on bonding where both interfaces can have
3602 * the same MAC address.
3603 */
3604 qlge_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3605 /* Reroute all packets to our Interface.
3606 * They may have been routed to MPI firmware
3607 * due to WOL.
3608 */
3609 value = qlge_read32(qdev, MGMT_RCV_CFG);
3610 value &= ~MGMT_RCV_CFG_RM;
3611 mask = 0xffff0000;
3612
3613 /* Sticky reg needs clearing due to WOL. */
3614 qlge_write32(qdev, MGMT_RCV_CFG, mask);
3615 qlge_write32(qdev, MGMT_RCV_CFG, mask | value);
3616
3617 /* Default WOL is enable on Mezz cards */
3618 if (qdev->pdev->subsystem_device == 0x0068 ||
3619 qdev->pdev->subsystem_device == 0x0180)
3620 qdev->wol = WAKE_MAGIC;
3621
3622 /* Start up the rx queues. */
3623 for (i = 0; i < qdev->rx_ring_count; i++) {
3624 status = qlge_start_rx_ring(qdev, &qdev->rx_ring[i]);
3625 if (status) {
3626 netif_err(qdev, ifup, qdev->ndev,
3627 "Failed to start rx ring[%d].\n", i);
3628 return status;
3629 }
3630 }
3631
3632 /* If there is more than one inbound completion queue
3633 * then download a RICB to configure RSS.
3634 */
3635 if (qdev->rss_ring_count > 1) {
3636 status = qlge_start_rss(qdev);
3637 if (status) {
3638 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3639 return status;
3640 }
3641 }
3642
3643 /* Start up the tx queues. */
3644 for (i = 0; i < qdev->tx_ring_count; i++) {
3645 status = qlge_start_tx_ring(qdev, &qdev->tx_ring[i]);
3646 if (status) {
3647 netif_err(qdev, ifup, qdev->ndev,
3648 "Failed to start tx ring[%d].\n", i);
3649 return status;
3650 }
3651 }
3652
3653 /* Initialize the port and set the max framesize. */
3654 status = qdev->nic_ops->port_initialize(qdev);
3655 if (status)
3656 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3657
3658 /* Set up the MAC address and frame routing filter. */
3659 status = qlge_cam_route_initialize(qdev);
3660 if (status) {
3661 netif_err(qdev, ifup, qdev->ndev,
3662 "Failed to init CAM/Routing tables.\n");
3663 return status;
3664 }
3665
3666 /* Start NAPI for the RSS queues. */
3667 for (i = 0; i < qdev->rss_ring_count; i++)
3668 napi_enable(&qdev->rx_ring[i].napi);
3669
3670 return status;
3671 }
3672
3673 /* Issue soft reset to chip. */
qlge_adapter_reset(struct qlge_adapter * qdev)3674 static int qlge_adapter_reset(struct qlge_adapter *qdev)
3675 {
3676 u32 value;
3677 int status = 0;
3678 unsigned long end_jiffies;
3679
3680 /* Clear all the entries in the routing table. */
3681 status = qlge_clear_routing_entries(qdev);
3682 if (status) {
3683 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3684 return status;
3685 }
3686
3687 /* Check if bit is set then skip the mailbox command and
3688 * clear the bit, else we are in normal reset process.
3689 */
3690 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3691 /* Stop management traffic. */
3692 qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3693
3694 /* Wait for the NIC and MGMNT FIFOs to empty. */
3695 qlge_wait_fifo_empty(qdev);
3696 } else {
3697 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3698 }
3699
3700 qlge_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3701
3702 end_jiffies = jiffies + usecs_to_jiffies(30);
3703 do {
3704 value = qlge_read32(qdev, RST_FO);
3705 if ((value & RST_FO_FR) == 0)
3706 break;
3707 cpu_relax();
3708 } while (time_before(jiffies, end_jiffies));
3709
3710 if (value & RST_FO_FR) {
3711 netif_err(qdev, ifdown, qdev->ndev,
3712 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3713 status = -ETIMEDOUT;
3714 }
3715
3716 /* Resume management traffic. */
3717 qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3718 return status;
3719 }
3720
qlge_display_dev_info(struct net_device * ndev)3721 static void qlge_display_dev_info(struct net_device *ndev)
3722 {
3723 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
3724
3725 netif_info(qdev, probe, qdev->ndev,
3726 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, XG Roll = %d, XG Rev = %d.\n",
3727 qdev->func,
3728 qdev->port,
3729 qdev->chip_rev_id & 0x0000000f,
3730 qdev->chip_rev_id >> 4 & 0x0000000f,
3731 qdev->chip_rev_id >> 8 & 0x0000000f,
3732 qdev->chip_rev_id >> 12 & 0x0000000f);
3733 netif_info(qdev, probe, qdev->ndev,
3734 "MAC address %pM\n", ndev->dev_addr);
3735 }
3736
qlge_wol(struct qlge_adapter * qdev)3737 static int qlge_wol(struct qlge_adapter *qdev)
3738 {
3739 int status = 0;
3740 u32 wol = MB_WOL_DISABLE;
3741
3742 /* The CAM is still intact after a reset, but if we
3743 * are doing WOL, then we may need to program the
3744 * routing regs. We would also need to issue the mailbox
3745 * commands to instruct the MPI what to do per the ethtool
3746 * settings.
3747 */
3748
3749 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3750 WAKE_MCAST | WAKE_BCAST)) {
3751 netif_err(qdev, ifdown, qdev->ndev,
3752 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3753 qdev->wol);
3754 return -EINVAL;
3755 }
3756
3757 if (qdev->wol & WAKE_MAGIC) {
3758 status = qlge_mb_wol_set_magic(qdev, 1);
3759 if (status) {
3760 netif_err(qdev, ifdown, qdev->ndev,
3761 "Failed to set magic packet on %s.\n",
3762 qdev->ndev->name);
3763 return status;
3764 }
3765 netif_info(qdev, drv, qdev->ndev,
3766 "Enabled magic packet successfully on %s.\n",
3767 qdev->ndev->name);
3768
3769 wol |= MB_WOL_MAGIC_PKT;
3770 }
3771
3772 if (qdev->wol) {
3773 wol |= MB_WOL_MODE_ON;
3774 status = qlge_mb_wol_mode(qdev, wol);
3775 netif_err(qdev, drv, qdev->ndev,
3776 "WOL %s (wol code 0x%x) on %s\n",
3777 (status == 0) ? "Successfully set" : "Failed",
3778 wol, qdev->ndev->name);
3779 }
3780
3781 return status;
3782 }
3783
qlge_cancel_all_work_sync(struct qlge_adapter * qdev)3784 static void qlge_cancel_all_work_sync(struct qlge_adapter *qdev)
3785 {
3786 /* Don't kill the reset worker thread if we
3787 * are in the process of recovery.
3788 */
3789 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3790 cancel_delayed_work_sync(&qdev->asic_reset_work);
3791 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3792 cancel_delayed_work_sync(&qdev->mpi_work);
3793 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3794 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3795 }
3796
qlge_adapter_down(struct qlge_adapter * qdev)3797 static int qlge_adapter_down(struct qlge_adapter *qdev)
3798 {
3799 int i, status = 0;
3800
3801 qlge_link_off(qdev);
3802
3803 qlge_cancel_all_work_sync(qdev);
3804
3805 for (i = 0; i < qdev->rss_ring_count; i++)
3806 napi_disable(&qdev->rx_ring[i].napi);
3807
3808 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3809
3810 qlge_disable_interrupts(qdev);
3811
3812 qlge_tx_ring_clean(qdev);
3813
3814 /* Call netif_napi_del() from common point. */
3815 for (i = 0; i < qdev->rss_ring_count; i++)
3816 netif_napi_del(&qdev->rx_ring[i].napi);
3817
3818 status = qlge_adapter_reset(qdev);
3819 if (status)
3820 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3821 qdev->func);
3822 qlge_free_rx_buffers(qdev);
3823
3824 return status;
3825 }
3826
qlge_adapter_up(struct qlge_adapter * qdev)3827 static int qlge_adapter_up(struct qlge_adapter *qdev)
3828 {
3829 int err = 0;
3830
3831 err = qlge_adapter_initialize(qdev);
3832 if (err) {
3833 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3834 goto err_init;
3835 }
3836 set_bit(QL_ADAPTER_UP, &qdev->flags);
3837 qlge_alloc_rx_buffers(qdev);
3838 /* If the port is initialized and the
3839 * link is up the turn on the carrier.
3840 */
3841 if ((qlge_read32(qdev, STS) & qdev->port_init) &&
3842 (qlge_read32(qdev, STS) & qdev->port_link_up))
3843 qlge_link_on(qdev);
3844 /* Restore rx mode. */
3845 clear_bit(QL_ALLMULTI, &qdev->flags);
3846 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3847 qlge_set_multicast_list(qdev->ndev);
3848
3849 /* Restore vlan setting. */
3850 qlge_restore_vlan(qdev);
3851
3852 qlge_enable_interrupts(qdev);
3853 qlge_enable_all_completion_interrupts(qdev);
3854 netif_tx_start_all_queues(qdev->ndev);
3855
3856 return 0;
3857 err_init:
3858 qlge_adapter_reset(qdev);
3859 return err;
3860 }
3861
qlge_release_adapter_resources(struct qlge_adapter * qdev)3862 static void qlge_release_adapter_resources(struct qlge_adapter *qdev)
3863 {
3864 qlge_free_mem_resources(qdev);
3865 qlge_free_irq(qdev);
3866 }
3867
qlge_get_adapter_resources(struct qlge_adapter * qdev)3868 static int qlge_get_adapter_resources(struct qlge_adapter *qdev)
3869 {
3870 if (qlge_alloc_mem_resources(qdev)) {
3871 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
3872 return -ENOMEM;
3873 }
3874 return qlge_request_irq(qdev);
3875 }
3876
qlge_close(struct net_device * ndev)3877 static int qlge_close(struct net_device *ndev)
3878 {
3879 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
3880 int i;
3881
3882 /* If we hit pci_channel_io_perm_failure
3883 * failure condition, then we already
3884 * brought the adapter down.
3885 */
3886 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3887 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3888 clear_bit(QL_EEH_FATAL, &qdev->flags);
3889 return 0;
3890 }
3891
3892 /*
3893 * Wait for device to recover from a reset.
3894 * (Rarely happens, but possible.)
3895 */
3896 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3897 msleep(1);
3898
3899 /* Make sure refill_work doesn't re-enable napi */
3900 for (i = 0; i < qdev->rss_ring_count; i++)
3901 cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work);
3902
3903 qlge_adapter_down(qdev);
3904 qlge_release_adapter_resources(qdev);
3905 return 0;
3906 }
3907
qlge_set_lb_size(struct qlge_adapter * qdev)3908 static void qlge_set_lb_size(struct qlge_adapter *qdev)
3909 {
3910 if (qdev->ndev->mtu <= 1500)
3911 qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE;
3912 else
3913 qdev->lbq_buf_size = LARGE_BUFFER_MAX_SIZE;
3914 qdev->lbq_buf_order = get_order(qdev->lbq_buf_size);
3915 }
3916
qlge_configure_rings(struct qlge_adapter * qdev)3917 static int qlge_configure_rings(struct qlge_adapter *qdev)
3918 {
3919 int i;
3920 struct rx_ring *rx_ring;
3921 struct tx_ring *tx_ring;
3922 int cpu_cnt = min_t(int, MAX_CPUS, num_online_cpus());
3923
3924 /* In a perfect world we have one RSS ring for each CPU
3925 * and each has it's own vector. To do that we ask for
3926 * cpu_cnt vectors. qlge_enable_msix() will adjust the
3927 * vector count to what we actually get. We then
3928 * allocate an RSS ring for each.
3929 * Essentially, we are doing min(cpu_count, msix_vector_count).
3930 */
3931 qdev->intr_count = cpu_cnt;
3932 qlge_enable_msix(qdev);
3933 /* Adjust the RSS ring count to the actual vector count. */
3934 qdev->rss_ring_count = qdev->intr_count;
3935 qdev->tx_ring_count = cpu_cnt;
3936 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
3937
3938 for (i = 0; i < qdev->tx_ring_count; i++) {
3939 tx_ring = &qdev->tx_ring[i];
3940 memset((void *)tx_ring, 0, sizeof(*tx_ring));
3941 tx_ring->qdev = qdev;
3942 tx_ring->wq_id = i;
3943 tx_ring->wq_len = qdev->tx_ring_size;
3944 tx_ring->wq_size =
3945 tx_ring->wq_len * sizeof(struct qlge_ob_mac_iocb_req);
3946
3947 /*
3948 * The completion queue ID for the tx rings start
3949 * immediately after the rss rings.
3950 */
3951 tx_ring->cq_id = qdev->rss_ring_count + i;
3952 }
3953
3954 for (i = 0; i < qdev->rx_ring_count; i++) {
3955 rx_ring = &qdev->rx_ring[i];
3956 memset((void *)rx_ring, 0, sizeof(*rx_ring));
3957 rx_ring->qdev = qdev;
3958 rx_ring->cq_id = i;
3959 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
3960 if (i < qdev->rss_ring_count) {
3961 /*
3962 * Inbound (RSS) queues.
3963 */
3964 rx_ring->cq_len = qdev->rx_ring_size;
3965 rx_ring->cq_size =
3966 rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb);
3967 rx_ring->lbq.type = QLGE_LB;
3968 rx_ring->sbq.type = QLGE_SB;
3969 INIT_DELAYED_WORK(&rx_ring->refill_work,
3970 &qlge_slow_refill);
3971 } else {
3972 /*
3973 * Outbound queue handles outbound completions only.
3974 */
3975 /* outbound cq is same size as tx_ring it services. */
3976 rx_ring->cq_len = qdev->tx_ring_size;
3977 rx_ring->cq_size =
3978 rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb);
3979 }
3980 }
3981 return 0;
3982 }
3983
qlge_open(struct net_device * ndev)3984 static int qlge_open(struct net_device *ndev)
3985 {
3986 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
3987 int err = 0;
3988
3989 err = qlge_adapter_reset(qdev);
3990 if (err)
3991 return err;
3992
3993 qlge_set_lb_size(qdev);
3994 err = qlge_configure_rings(qdev);
3995 if (err)
3996 return err;
3997
3998 err = qlge_get_adapter_resources(qdev);
3999 if (err)
4000 goto error_up;
4001
4002 err = qlge_adapter_up(qdev);
4003 if (err)
4004 goto error_up;
4005
4006 return err;
4007
4008 error_up:
4009 qlge_release_adapter_resources(qdev);
4010 return err;
4011 }
4012
qlge_change_rx_buffers(struct qlge_adapter * qdev)4013 static int qlge_change_rx_buffers(struct qlge_adapter *qdev)
4014 {
4015 int status;
4016
4017 /* Wait for an outstanding reset to complete. */
4018 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4019 int i = 4;
4020
4021 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4022 netif_err(qdev, ifup, qdev->ndev,
4023 "Waiting for adapter UP...\n");
4024 ssleep(1);
4025 }
4026
4027 if (!i) {
4028 netif_err(qdev, ifup, qdev->ndev,
4029 "Timed out waiting for adapter UP\n");
4030 return -ETIMEDOUT;
4031 }
4032 }
4033
4034 status = qlge_adapter_down(qdev);
4035 if (status)
4036 goto error;
4037
4038 qlge_set_lb_size(qdev);
4039
4040 status = qlge_adapter_up(qdev);
4041 if (status)
4042 goto error;
4043
4044 return status;
4045 error:
4046 netif_alert(qdev, ifup, qdev->ndev,
4047 "Driver up/down cycle failed, closing device.\n");
4048 set_bit(QL_ADAPTER_UP, &qdev->flags);
4049 dev_close(qdev->ndev);
4050 return status;
4051 }
4052
qlge_change_mtu(struct net_device * ndev,int new_mtu)4053 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4054 {
4055 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4056 int status;
4057
4058 if (ndev->mtu == 1500 && new_mtu == 9000)
4059 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4060 else if (ndev->mtu == 9000 && new_mtu == 1500)
4061 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4062 else
4063 return -EINVAL;
4064
4065 queue_delayed_work(qdev->workqueue,
4066 &qdev->mpi_port_cfg_work, 3 * HZ);
4067
4068 ndev->mtu = new_mtu;
4069
4070 if (!netif_running(qdev->ndev))
4071 return 0;
4072
4073 status = qlge_change_rx_buffers(qdev);
4074 if (status) {
4075 netif_err(qdev, ifup, qdev->ndev,
4076 "Changing MTU failed.\n");
4077 }
4078
4079 return status;
4080 }
4081
qlge_get_stats(struct net_device * ndev)4082 static struct net_device_stats *qlge_get_stats(struct net_device
4083 *ndev)
4084 {
4085 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4086 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4087 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4088 unsigned long pkts, mcast, dropped, errors, bytes;
4089 int i;
4090
4091 /* Get RX stats. */
4092 pkts = mcast = dropped = errors = bytes = 0;
4093 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4094 pkts += rx_ring->rx_packets;
4095 bytes += rx_ring->rx_bytes;
4096 dropped += rx_ring->rx_dropped;
4097 errors += rx_ring->rx_errors;
4098 mcast += rx_ring->rx_multicast;
4099 }
4100 ndev->stats.rx_packets = pkts;
4101 ndev->stats.rx_bytes = bytes;
4102 ndev->stats.rx_dropped = dropped;
4103 ndev->stats.rx_errors = errors;
4104 ndev->stats.multicast = mcast;
4105
4106 /* Get TX stats. */
4107 pkts = errors = bytes = 0;
4108 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4109 pkts += tx_ring->tx_packets;
4110 bytes += tx_ring->tx_bytes;
4111 errors += tx_ring->tx_errors;
4112 }
4113 ndev->stats.tx_packets = pkts;
4114 ndev->stats.tx_bytes = bytes;
4115 ndev->stats.tx_errors = errors;
4116 return &ndev->stats;
4117 }
4118
qlge_set_multicast_list(struct net_device * ndev)4119 static void qlge_set_multicast_list(struct net_device *ndev)
4120 {
4121 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4122 struct netdev_hw_addr *ha;
4123 int i, status;
4124
4125 status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4126 if (status)
4127 return;
4128 /*
4129 * Set or clear promiscuous mode if a
4130 * transition is taking place.
4131 */
4132 if (ndev->flags & IFF_PROMISC) {
4133 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4134 if (qlge_set_routing_reg
4135 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4136 netif_err(qdev, hw, qdev->ndev,
4137 "Failed to set promiscuous mode.\n");
4138 } else {
4139 set_bit(QL_PROMISCUOUS, &qdev->flags);
4140 }
4141 }
4142 } else {
4143 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4144 if (qlge_set_routing_reg
4145 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4146 netif_err(qdev, hw, qdev->ndev,
4147 "Failed to clear promiscuous mode.\n");
4148 } else {
4149 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4150 }
4151 }
4152 }
4153
4154 /*
4155 * Set or clear all multicast mode if a
4156 * transition is taking place.
4157 */
4158 if ((ndev->flags & IFF_ALLMULTI) ||
4159 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4160 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4161 if (qlge_set_routing_reg
4162 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4163 netif_err(qdev, hw, qdev->ndev,
4164 "Failed to set all-multi mode.\n");
4165 } else {
4166 set_bit(QL_ALLMULTI, &qdev->flags);
4167 }
4168 }
4169 } else {
4170 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4171 if (qlge_set_routing_reg
4172 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4173 netif_err(qdev, hw, qdev->ndev,
4174 "Failed to clear all-multi mode.\n");
4175 } else {
4176 clear_bit(QL_ALLMULTI, &qdev->flags);
4177 }
4178 }
4179 }
4180
4181 if (!netdev_mc_empty(ndev)) {
4182 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4183 if (status)
4184 goto exit;
4185 i = 0;
4186 netdev_for_each_mc_addr(ha, ndev) {
4187 if (qlge_set_mac_addr_reg(qdev, (u8 *)ha->addr,
4188 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4189 netif_err(qdev, hw, qdev->ndev,
4190 "Failed to loadmulticast address.\n");
4191 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4192 goto exit;
4193 }
4194 i++;
4195 }
4196 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4197 if (qlge_set_routing_reg
4198 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4199 netif_err(qdev, hw, qdev->ndev,
4200 "Failed to set multicast match mode.\n");
4201 } else {
4202 set_bit(QL_ALLMULTI, &qdev->flags);
4203 }
4204 }
4205 exit:
4206 qlge_sem_unlock(qdev, SEM_RT_IDX_MASK);
4207 }
4208
qlge_set_mac_address(struct net_device * ndev,void * p)4209 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4210 {
4211 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4212 struct sockaddr *addr = p;
4213 int status;
4214
4215 if (!is_valid_ether_addr(addr->sa_data))
4216 return -EADDRNOTAVAIL;
4217 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4218 /* Update local copy of current mac address. */
4219 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4220
4221 status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4222 if (status)
4223 return status;
4224 status = qlge_set_mac_addr_reg(qdev, (u8 *)ndev->dev_addr,
4225 MAC_ADDR_TYPE_CAM_MAC,
4226 qdev->func * MAX_CQ);
4227 if (status)
4228 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4229 qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4230 return status;
4231 }
4232
qlge_tx_timeout(struct net_device * ndev,unsigned int txqueue)4233 static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
4234 {
4235 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4236
4237 qlge_queue_asic_error(qdev);
4238 }
4239
qlge_asic_reset_work(struct work_struct * work)4240 static void qlge_asic_reset_work(struct work_struct *work)
4241 {
4242 struct qlge_adapter *qdev =
4243 container_of(work, struct qlge_adapter, asic_reset_work.work);
4244 int status;
4245
4246 rtnl_lock();
4247 status = qlge_adapter_down(qdev);
4248 if (status)
4249 goto error;
4250
4251 status = qlge_adapter_up(qdev);
4252 if (status)
4253 goto error;
4254
4255 /* Restore rx mode. */
4256 clear_bit(QL_ALLMULTI, &qdev->flags);
4257 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4258 qlge_set_multicast_list(qdev->ndev);
4259
4260 rtnl_unlock();
4261 return;
4262 error:
4263 netif_alert(qdev, ifup, qdev->ndev,
4264 "Driver up/down cycle failed, closing device\n");
4265
4266 set_bit(QL_ADAPTER_UP, &qdev->flags);
4267 dev_close(qdev->ndev);
4268 rtnl_unlock();
4269 }
4270
4271 static const struct nic_operations qla8012_nic_ops = {
4272 .get_flash = qlge_get_8012_flash_params,
4273 .port_initialize = qlge_8012_port_initialize,
4274 };
4275
4276 static const struct nic_operations qla8000_nic_ops = {
4277 .get_flash = qlge_get_8000_flash_params,
4278 .port_initialize = qlge_8000_port_initialize,
4279 };
4280
4281 /* Find the pcie function number for the other NIC
4282 * on this chip. Since both NIC functions share a
4283 * common firmware we have the lowest enabled function
4284 * do any common work. Examples would be resetting
4285 * after a fatal firmware error, or doing a firmware
4286 * coredump.
4287 */
qlge_get_alt_pcie_func(struct qlge_adapter * qdev)4288 static int qlge_get_alt_pcie_func(struct qlge_adapter *qdev)
4289 {
4290 int status = 0;
4291 u32 temp;
4292 u32 nic_func1, nic_func2;
4293
4294 status = qlge_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4295 &temp);
4296 if (status)
4297 return status;
4298
4299 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4300 MPI_TEST_NIC_FUNC_MASK);
4301 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4302 MPI_TEST_NIC_FUNC_MASK);
4303
4304 if (qdev->func == nic_func1)
4305 qdev->alt_func = nic_func2;
4306 else if (qdev->func == nic_func2)
4307 qdev->alt_func = nic_func1;
4308 else
4309 status = -EIO;
4310
4311 return status;
4312 }
4313
qlge_get_board_info(struct qlge_adapter * qdev)4314 static int qlge_get_board_info(struct qlge_adapter *qdev)
4315 {
4316 int status;
4317
4318 qdev->func =
4319 (qlge_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4320 if (qdev->func > 3)
4321 return -EIO;
4322
4323 status = qlge_get_alt_pcie_func(qdev);
4324 if (status)
4325 return status;
4326
4327 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4328 if (qdev->port) {
4329 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4330 qdev->port_link_up = STS_PL1;
4331 qdev->port_init = STS_PI1;
4332 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4333 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4334 } else {
4335 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4336 qdev->port_link_up = STS_PL0;
4337 qdev->port_init = STS_PI0;
4338 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4339 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4340 }
4341 qdev->chip_rev_id = qlge_read32(qdev, REV_ID);
4342 qdev->device_id = qdev->pdev->device;
4343 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4344 qdev->nic_ops = &qla8012_nic_ops;
4345 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4346 qdev->nic_ops = &qla8000_nic_ops;
4347 return status;
4348 }
4349
qlge_release_all(struct pci_dev * pdev)4350 static void qlge_release_all(struct pci_dev *pdev)
4351 {
4352 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4353
4354 if (qdev->workqueue) {
4355 destroy_workqueue(qdev->workqueue);
4356 qdev->workqueue = NULL;
4357 }
4358
4359 if (qdev->reg_base)
4360 iounmap(qdev->reg_base);
4361 if (qdev->doorbell_area)
4362 iounmap(qdev->doorbell_area);
4363 vfree(qdev->mpi_coredump);
4364 pci_release_regions(pdev);
4365 }
4366
qlge_init_device(struct pci_dev * pdev,struct qlge_adapter * qdev,int cards_found)4367 static int qlge_init_device(struct pci_dev *pdev, struct qlge_adapter *qdev,
4368 int cards_found)
4369 {
4370 struct net_device *ndev = qdev->ndev;
4371 int err = 0;
4372
4373 err = pci_enable_device(pdev);
4374 if (err) {
4375 dev_err(&pdev->dev, "PCI device enable failed.\n");
4376 return err;
4377 }
4378
4379 qdev->pdev = pdev;
4380 pci_set_drvdata(pdev, qdev);
4381
4382 /* Set PCIe read request size */
4383 err = pcie_set_readrq(pdev, 4096);
4384 if (err) {
4385 dev_err(&pdev->dev, "Set readrq failed.\n");
4386 goto err_disable_pci;
4387 }
4388
4389 err = pci_request_regions(pdev, DRV_NAME);
4390 if (err) {
4391 dev_err(&pdev->dev, "PCI region request failed.\n");
4392 goto err_disable_pci;
4393 }
4394
4395 pci_set_master(pdev);
4396 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4397 set_bit(QL_DMA64, &qdev->flags);
4398 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4399 } else {
4400 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4401 if (!err)
4402 err = dma_set_coherent_mask(&pdev->dev,
4403 DMA_BIT_MASK(32));
4404 }
4405
4406 if (err) {
4407 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4408 goto err_release_pci;
4409 }
4410
4411 /* Set PCIe reset type for EEH to fundamental. */
4412 pdev->needs_freset = 1;
4413 pci_save_state(pdev);
4414 qdev->reg_base =
4415 ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
4416 if (!qdev->reg_base) {
4417 dev_err(&pdev->dev, "Register mapping failed.\n");
4418 err = -ENOMEM;
4419 goto err_release_pci;
4420 }
4421
4422 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4423 qdev->doorbell_area =
4424 ioremap(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3));
4425 if (!qdev->doorbell_area) {
4426 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4427 err = -ENOMEM;
4428 goto err_iounmap_base;
4429 }
4430
4431 err = qlge_get_board_info(qdev);
4432 if (err) {
4433 dev_err(&pdev->dev, "Register access failed.\n");
4434 err = -EIO;
4435 goto err_iounmap_doorbell;
4436 }
4437 qdev->msg_enable = netif_msg_init(debug, default_msg);
4438 spin_lock_init(&qdev->stats_lock);
4439
4440 if (qlge_mpi_coredump) {
4441 qdev->mpi_coredump =
4442 vmalloc(sizeof(struct qlge_mpi_coredump));
4443 if (!qdev->mpi_coredump) {
4444 err = -ENOMEM;
4445 goto err_iounmap_doorbell;
4446 }
4447 if (qlge_force_coredump)
4448 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4449 }
4450 /* make sure the EEPROM is good */
4451 err = qdev->nic_ops->get_flash(qdev);
4452 if (err) {
4453 dev_err(&pdev->dev, "Invalid FLASH.\n");
4454 goto err_free_mpi_coredump;
4455 }
4456
4457 /* Keep local copy of current mac address. */
4458 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4459
4460 /* Set up the default ring sizes. */
4461 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4462 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4463
4464 /* Set up the coalescing parameters. */
4465 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4466 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4467 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4468 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4469
4470 /*
4471 * Set up the operating parameters.
4472 */
4473 qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4474 ndev->name);
4475 if (!qdev->workqueue) {
4476 err = -ENOMEM;
4477 goto err_free_mpi_coredump;
4478 }
4479
4480 INIT_DELAYED_WORK(&qdev->asic_reset_work, qlge_asic_reset_work);
4481 INIT_DELAYED_WORK(&qdev->mpi_reset_work, qlge_mpi_reset_work);
4482 INIT_DELAYED_WORK(&qdev->mpi_work, qlge_mpi_work);
4483 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, qlge_mpi_port_cfg_work);
4484 INIT_DELAYED_WORK(&qdev->mpi_idc_work, qlge_mpi_idc_work);
4485 init_completion(&qdev->ide_completion);
4486 mutex_init(&qdev->mpi_mutex);
4487
4488 if (!cards_found) {
4489 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4490 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4491 DRV_NAME, DRV_VERSION);
4492 }
4493 return 0;
4494
4495 err_free_mpi_coredump:
4496 vfree(qdev->mpi_coredump);
4497 err_iounmap_doorbell:
4498 iounmap(qdev->doorbell_area);
4499 err_iounmap_base:
4500 iounmap(qdev->reg_base);
4501 err_release_pci:
4502 pci_release_regions(pdev);
4503 err_disable_pci:
4504 pci_disable_device(pdev);
4505
4506 return err;
4507 }
4508
4509 static const struct net_device_ops qlge_netdev_ops = {
4510 .ndo_open = qlge_open,
4511 .ndo_stop = qlge_close,
4512 .ndo_start_xmit = qlge_send,
4513 .ndo_change_mtu = qlge_change_mtu,
4514 .ndo_get_stats = qlge_get_stats,
4515 .ndo_set_rx_mode = qlge_set_multicast_list,
4516 .ndo_set_mac_address = qlge_set_mac_address,
4517 .ndo_validate_addr = eth_validate_addr,
4518 .ndo_tx_timeout = qlge_tx_timeout,
4519 .ndo_set_features = qlge_set_features,
4520 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4521 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4522 };
4523
qlge_timer(struct timer_list * t)4524 static void qlge_timer(struct timer_list *t)
4525 {
4526 struct qlge_adapter *qdev = from_timer(qdev, t, timer);
4527 u32 var = 0;
4528
4529 var = qlge_read32(qdev, STS);
4530 if (pci_channel_offline(qdev->pdev)) {
4531 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4532 return;
4533 }
4534
4535 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4536 }
4537
4538 static const struct devlink_ops qlge_devlink_ops;
4539
qlge_probe(struct pci_dev * pdev,const struct pci_device_id * pci_entry)4540 static int qlge_probe(struct pci_dev *pdev,
4541 const struct pci_device_id *pci_entry)
4542 {
4543 struct qlge_netdev_priv *ndev_priv;
4544 struct qlge_adapter *qdev = NULL;
4545 struct net_device *ndev = NULL;
4546 struct devlink *devlink;
4547 static int cards_found;
4548 int err;
4549
4550 devlink = devlink_alloc(&qlge_devlink_ops, sizeof(struct qlge_adapter),
4551 &pdev->dev);
4552 if (!devlink)
4553 return -ENOMEM;
4554
4555 qdev = devlink_priv(devlink);
4556
4557 ndev = alloc_etherdev_mq(sizeof(struct qlge_netdev_priv),
4558 min(MAX_CPUS,
4559 netif_get_num_default_rss_queues()));
4560 if (!ndev) {
4561 err = -ENOMEM;
4562 goto devlink_free;
4563 }
4564
4565 ndev_priv = netdev_priv(ndev);
4566 ndev_priv->qdev = qdev;
4567 ndev_priv->ndev = ndev;
4568 qdev->ndev = ndev;
4569 err = qlge_init_device(pdev, qdev, cards_found);
4570 if (err < 0)
4571 goto netdev_free;
4572
4573 SET_NETDEV_DEV(ndev, &pdev->dev);
4574 ndev->hw_features = NETIF_F_SG |
4575 NETIF_F_IP_CSUM |
4576 NETIF_F_TSO |
4577 NETIF_F_TSO_ECN |
4578 NETIF_F_HW_VLAN_CTAG_TX |
4579 NETIF_F_HW_VLAN_CTAG_RX |
4580 NETIF_F_HW_VLAN_CTAG_FILTER |
4581 NETIF_F_RXCSUM;
4582 ndev->features = ndev->hw_features;
4583 ndev->vlan_features = ndev->hw_features;
4584 /* vlan gets same features (except vlan filter) */
4585 ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4586 NETIF_F_HW_VLAN_CTAG_TX |
4587 NETIF_F_HW_VLAN_CTAG_RX);
4588
4589 if (test_bit(QL_DMA64, &qdev->flags))
4590 ndev->features |= NETIF_F_HIGHDMA;
4591
4592 /*
4593 * Set up net_device structure.
4594 */
4595 ndev->tx_queue_len = qdev->tx_ring_size;
4596 ndev->irq = pdev->irq;
4597
4598 ndev->netdev_ops = &qlge_netdev_ops;
4599 ndev->ethtool_ops = &qlge_ethtool_ops;
4600 ndev->watchdog_timeo = 10 * HZ;
4601
4602 /* MTU range: this driver only supports 1500 or 9000, so this only
4603 * filters out values above or below, and we'll rely on
4604 * qlge_change_mtu to make sure only 1500 or 9000 are allowed
4605 */
4606 ndev->min_mtu = ETH_DATA_LEN;
4607 ndev->max_mtu = 9000;
4608
4609 err = register_netdev(ndev);
4610 if (err) {
4611 dev_err(&pdev->dev, "net device registration failed.\n");
4612 qlge_release_all(pdev);
4613 pci_disable_device(pdev);
4614 goto netdev_free;
4615 }
4616
4617 err = devlink_register(devlink);
4618 if (err)
4619 goto netdev_free;
4620
4621 err = qlge_health_create_reporters(qdev);
4622
4623 if (err)
4624 goto devlink_unregister;
4625
4626 /* Start up the timer to trigger EEH if
4627 * the bus goes dead
4628 */
4629 timer_setup(&qdev->timer, qlge_timer, TIMER_DEFERRABLE);
4630 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4631 qlge_link_off(qdev);
4632 qlge_display_dev_info(ndev);
4633 atomic_set(&qdev->lb_count, 0);
4634 cards_found++;
4635 return 0;
4636
4637 devlink_unregister:
4638 devlink_unregister(devlink);
4639 netdev_free:
4640 free_netdev(ndev);
4641 devlink_free:
4642 devlink_free(devlink);
4643
4644 return err;
4645 }
4646
qlge_lb_send(struct sk_buff * skb,struct net_device * ndev)4647 netdev_tx_t qlge_lb_send(struct sk_buff *skb, struct net_device *ndev)
4648 {
4649 return qlge_send(skb, ndev);
4650 }
4651
qlge_clean_lb_rx_ring(struct rx_ring * rx_ring,int budget)4652 int qlge_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4653 {
4654 return qlge_clean_inbound_rx_ring(rx_ring, budget);
4655 }
4656
qlge_remove(struct pci_dev * pdev)4657 static void qlge_remove(struct pci_dev *pdev)
4658 {
4659 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4660 struct net_device *ndev = qdev->ndev;
4661 struct devlink *devlink = priv_to_devlink(qdev);
4662
4663 del_timer_sync(&qdev->timer);
4664 qlge_cancel_all_work_sync(qdev);
4665 unregister_netdev(ndev);
4666 qlge_release_all(pdev);
4667 pci_disable_device(pdev);
4668 devlink_health_reporter_destroy(qdev->reporter);
4669 devlink_unregister(devlink);
4670 devlink_free(devlink);
4671 free_netdev(ndev);
4672 }
4673
4674 /* Clean up resources without touching hardware. */
qlge_eeh_close(struct net_device * ndev)4675 static void qlge_eeh_close(struct net_device *ndev)
4676 {
4677 struct qlge_adapter *qdev = netdev_to_qdev(ndev);
4678 int i;
4679
4680 if (netif_carrier_ok(ndev)) {
4681 netif_carrier_off(ndev);
4682 netif_stop_queue(ndev);
4683 }
4684
4685 /* Disabling the timer */
4686 qlge_cancel_all_work_sync(qdev);
4687
4688 for (i = 0; i < qdev->rss_ring_count; i++)
4689 netif_napi_del(&qdev->rx_ring[i].napi);
4690
4691 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4692 qlge_tx_ring_clean(qdev);
4693 qlge_free_rx_buffers(qdev);
4694 qlge_release_adapter_resources(qdev);
4695 }
4696
4697 /*
4698 * This callback is called by the PCI subsystem whenever
4699 * a PCI bus error is detected.
4700 */
qlge_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)4701 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4702 pci_channel_state_t state)
4703 {
4704 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4705 struct net_device *ndev = qdev->ndev;
4706
4707 switch (state) {
4708 case pci_channel_io_normal:
4709 return PCI_ERS_RESULT_CAN_RECOVER;
4710 case pci_channel_io_frozen:
4711 netif_device_detach(ndev);
4712 del_timer_sync(&qdev->timer);
4713 if (netif_running(ndev))
4714 qlge_eeh_close(ndev);
4715 pci_disable_device(pdev);
4716 return PCI_ERS_RESULT_NEED_RESET;
4717 case pci_channel_io_perm_failure:
4718 dev_err(&pdev->dev,
4719 "%s: pci_channel_io_perm_failure.\n", __func__);
4720 del_timer_sync(&qdev->timer);
4721 qlge_eeh_close(ndev);
4722 set_bit(QL_EEH_FATAL, &qdev->flags);
4723 return PCI_ERS_RESULT_DISCONNECT;
4724 }
4725
4726 /* Request a slot reset. */
4727 return PCI_ERS_RESULT_NEED_RESET;
4728 }
4729
4730 /*
4731 * This callback is called after the PCI buss has been reset.
4732 * Basically, this tries to restart the card from scratch.
4733 * This is a shortened version of the device probe/discovery code,
4734 * it resembles the first-half of the () routine.
4735 */
qlge_io_slot_reset(struct pci_dev * pdev)4736 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4737 {
4738 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4739
4740 pdev->error_state = pci_channel_io_normal;
4741
4742 pci_restore_state(pdev);
4743 if (pci_enable_device(pdev)) {
4744 netif_err(qdev, ifup, qdev->ndev,
4745 "Cannot re-enable PCI device after reset.\n");
4746 return PCI_ERS_RESULT_DISCONNECT;
4747 }
4748 pci_set_master(pdev);
4749
4750 if (qlge_adapter_reset(qdev)) {
4751 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4752 set_bit(QL_EEH_FATAL, &qdev->flags);
4753 return PCI_ERS_RESULT_DISCONNECT;
4754 }
4755
4756 return PCI_ERS_RESULT_RECOVERED;
4757 }
4758
qlge_io_resume(struct pci_dev * pdev)4759 static void qlge_io_resume(struct pci_dev *pdev)
4760 {
4761 struct qlge_adapter *qdev = pci_get_drvdata(pdev);
4762 struct net_device *ndev = qdev->ndev;
4763 int err = 0;
4764
4765 if (netif_running(ndev)) {
4766 err = qlge_open(ndev);
4767 if (err) {
4768 netif_err(qdev, ifup, qdev->ndev,
4769 "Device initialization failed after reset.\n");
4770 return;
4771 }
4772 } else {
4773 netif_err(qdev, ifup, qdev->ndev,
4774 "Device was not running prior to EEH.\n");
4775 }
4776 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4777 netif_device_attach(ndev);
4778 }
4779
4780 static const struct pci_error_handlers qlge_err_handler = {
4781 .error_detected = qlge_io_error_detected,
4782 .slot_reset = qlge_io_slot_reset,
4783 .resume = qlge_io_resume,
4784 };
4785
qlge_suspend(struct device * dev_d)4786 static int __maybe_unused qlge_suspend(struct device *dev_d)
4787 {
4788 struct pci_dev *pdev = to_pci_dev(dev_d);
4789 struct qlge_adapter *qdev;
4790 struct net_device *ndev;
4791 int err;
4792
4793 qdev = pci_get_drvdata(pdev);
4794 ndev = qdev->ndev;
4795 netif_device_detach(ndev);
4796 del_timer_sync(&qdev->timer);
4797
4798 if (netif_running(ndev)) {
4799 err = qlge_adapter_down(qdev);
4800 if (!err)
4801 return err;
4802 }
4803
4804 qlge_wol(qdev);
4805
4806 return 0;
4807 }
4808
qlge_resume(struct device * dev_d)4809 static int __maybe_unused qlge_resume(struct device *dev_d)
4810 {
4811 struct pci_dev *pdev = to_pci_dev(dev_d);
4812 struct qlge_adapter *qdev;
4813 struct net_device *ndev;
4814 int err;
4815
4816 qdev = pci_get_drvdata(pdev);
4817 ndev = qdev->ndev;
4818
4819 pci_set_master(pdev);
4820
4821 device_wakeup_disable(dev_d);
4822
4823 if (netif_running(ndev)) {
4824 err = qlge_adapter_up(qdev);
4825 if (err)
4826 return err;
4827 }
4828
4829 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4830 netif_device_attach(ndev);
4831
4832 return 0;
4833 }
4834
qlge_shutdown(struct pci_dev * pdev)4835 static void qlge_shutdown(struct pci_dev *pdev)
4836 {
4837 qlge_suspend(&pdev->dev);
4838 }
4839
4840 static SIMPLE_DEV_PM_OPS(qlge_pm_ops, qlge_suspend, qlge_resume);
4841
4842 static struct pci_driver qlge_driver = {
4843 .name = DRV_NAME,
4844 .id_table = qlge_pci_tbl,
4845 .probe = qlge_probe,
4846 .remove = qlge_remove,
4847 .driver.pm = &qlge_pm_ops,
4848 .shutdown = qlge_shutdown,
4849 .err_handler = &qlge_err_handler
4850 };
4851
4852 module_pci_driver(qlge_driver);
4853