1 /*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <net/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/if_vlan.h>
37 #include <linux/skbuff.h>
38 #include <linux/delay.h>
39 #include <linux/mm.h>
40 #include <linux/vmalloc.h>
41 #include <linux/prefetch.h>
42 #include <net/ip6_checksum.h>
43
44 #include "qlge.h"
45
46 char qlge_driver_name[] = DRV_NAME;
47 const char qlge_driver_version[] = DRV_VERSION;
48
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING " ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION);
53
54 static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56 /* NETIF_MSG_TIMER | */
57 NETIF_MSG_IFDOWN |
58 NETIF_MSG_IFUP |
59 NETIF_MSG_RX_ERR |
60 NETIF_MSG_TX_ERR |
61 /* NETIF_MSG_TX_QUEUED | */
62 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
63 /* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65
66 static int debug = -1; /* defaults above */
67 module_param(debug, int, 0664);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70 #define MSIX_IRQ 0
71 #define MSI_IRQ 1
72 #define LEG_IRQ 2
73 static int qlge_irq_type = MSIX_IRQ;
74 module_param(qlge_irq_type, int, 0664);
75 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76
77 static int qlge_mpi_coredump;
78 module_param(qlge_mpi_coredump, int, 0);
79 MODULE_PARM_DESC(qlge_mpi_coredump,
80 "Option to enable MPI firmware dump. "
81 "Default is OFF - Do Not allocate memory. ");
82
83 static int qlge_force_coredump;
84 module_param(qlge_force_coredump, int, 0);
85 MODULE_PARM_DESC(qlge_force_coredump,
86 "Option to allow force of firmware core dump. "
87 "Default is OFF - Do not allow.");
88
89 static const struct pci_device_id qlge_pci_tbl[] = {
90 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
91 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
92 /* required last entry */
93 {0,}
94 };
95
96 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
97
98 static int ql_wol(struct ql_adapter *);
99 static void qlge_set_multicast_list(struct net_device *);
100 static int ql_adapter_down(struct ql_adapter *);
101 static int ql_adapter_up(struct ql_adapter *);
102
103 /* This hardware semaphore causes exclusive access to
104 * resources shared between the NIC driver, MPI firmware,
105 * FCOE firmware and the FC driver.
106 */
ql_sem_trylock(struct ql_adapter * qdev,u32 sem_mask)107 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
108 {
109 u32 sem_bits = 0;
110
111 switch (sem_mask) {
112 case SEM_XGMAC0_MASK:
113 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
114 break;
115 case SEM_XGMAC1_MASK:
116 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
117 break;
118 case SEM_ICB_MASK:
119 sem_bits = SEM_SET << SEM_ICB_SHIFT;
120 break;
121 case SEM_MAC_ADDR_MASK:
122 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
123 break;
124 case SEM_FLASH_MASK:
125 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
126 break;
127 case SEM_PROBE_MASK:
128 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
129 break;
130 case SEM_RT_IDX_MASK:
131 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
132 break;
133 case SEM_PROC_REG_MASK:
134 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
135 break;
136 default:
137 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
138 return -EINVAL;
139 }
140
141 ql_write32(qdev, SEM, sem_bits | sem_mask);
142 return !(ql_read32(qdev, SEM) & sem_bits);
143 }
144
ql_sem_spinlock(struct ql_adapter * qdev,u32 sem_mask)145 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
146 {
147 unsigned int wait_count = 30;
148 do {
149 if (!ql_sem_trylock(qdev, sem_mask))
150 return 0;
151 udelay(100);
152 } while (--wait_count);
153 return -ETIMEDOUT;
154 }
155
ql_sem_unlock(struct ql_adapter * qdev,u32 sem_mask)156 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
157 {
158 ql_write32(qdev, SEM, sem_mask);
159 ql_read32(qdev, SEM); /* flush */
160 }
161
162 /* This function waits for a specific bit to come ready
163 * in a given register. It is used mostly by the initialize
164 * process, but is also used in kernel thread API such as
165 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
166 */
ql_wait_reg_rdy(struct ql_adapter * qdev,u32 reg,u32 bit,u32 err_bit)167 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
168 {
169 u32 temp;
170 int count = UDELAY_COUNT;
171
172 while (count) {
173 temp = ql_read32(qdev, reg);
174
175 /* check for errors */
176 if (temp & err_bit) {
177 netif_alert(qdev, probe, qdev->ndev,
178 "register 0x%.08x access error, value = 0x%.08x!.\n",
179 reg, temp);
180 return -EIO;
181 } else if (temp & bit)
182 return 0;
183 udelay(UDELAY_DELAY);
184 count--;
185 }
186 netif_alert(qdev, probe, qdev->ndev,
187 "Timed out waiting for reg %x to come ready.\n", reg);
188 return -ETIMEDOUT;
189 }
190
191 /* The CFG register is used to download TX and RX control blocks
192 * to the chip. This function waits for an operation to complete.
193 */
ql_wait_cfg(struct ql_adapter * qdev,u32 bit)194 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
195 {
196 int count = UDELAY_COUNT;
197 u32 temp;
198
199 while (count) {
200 temp = ql_read32(qdev, CFG);
201 if (temp & CFG_LE)
202 return -EIO;
203 if (!(temp & bit))
204 return 0;
205 udelay(UDELAY_DELAY);
206 count--;
207 }
208 return -ETIMEDOUT;
209 }
210
211
212 /* Used to issue init control blocks to hw. Maps control block,
213 * sets address, triggers download, waits for completion.
214 */
ql_write_cfg(struct ql_adapter * qdev,void * ptr,int size,u32 bit,u16 q_id)215 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
216 u16 q_id)
217 {
218 u64 map;
219 int status = 0;
220 int direction;
221 u32 mask;
222 u32 value;
223
224 direction =
225 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
226 PCI_DMA_FROMDEVICE;
227
228 map = pci_map_single(qdev->pdev, ptr, size, direction);
229 if (pci_dma_mapping_error(qdev->pdev, map)) {
230 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
231 return -ENOMEM;
232 }
233
234 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
235 if (status)
236 return status;
237
238 status = ql_wait_cfg(qdev, bit);
239 if (status) {
240 netif_err(qdev, ifup, qdev->ndev,
241 "Timed out waiting for CFG to come ready.\n");
242 goto exit;
243 }
244
245 ql_write32(qdev, ICB_L, (u32) map);
246 ql_write32(qdev, ICB_H, (u32) (map >> 32));
247
248 mask = CFG_Q_MASK | (bit << 16);
249 value = bit | (q_id << CFG_Q_SHIFT);
250 ql_write32(qdev, CFG, (mask | value));
251
252 /*
253 * Wait for the bit to clear after signaling hw.
254 */
255 status = ql_wait_cfg(qdev, bit);
256 exit:
257 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
258 pci_unmap_single(qdev->pdev, map, size, direction);
259 return status;
260 }
261
262 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
ql_get_mac_addr_reg(struct ql_adapter * qdev,u32 type,u16 index,u32 * value)263 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
264 u32 *value)
265 {
266 u32 offset = 0;
267 int status;
268
269 switch (type) {
270 case MAC_ADDR_TYPE_MULTI_MAC:
271 case MAC_ADDR_TYPE_CAM_MAC:
272 {
273 status =
274 ql_wait_reg_rdy(qdev,
275 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
276 if (status)
277 goto exit;
278 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
279 (index << MAC_ADDR_IDX_SHIFT) | /* index */
280 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
281 status =
282 ql_wait_reg_rdy(qdev,
283 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
284 if (status)
285 goto exit;
286 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
287 status =
288 ql_wait_reg_rdy(qdev,
289 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
290 if (status)
291 goto exit;
292 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
293 (index << MAC_ADDR_IDX_SHIFT) | /* index */
294 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
295 status =
296 ql_wait_reg_rdy(qdev,
297 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
298 if (status)
299 goto exit;
300 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
301 if (type == MAC_ADDR_TYPE_CAM_MAC) {
302 status =
303 ql_wait_reg_rdy(qdev,
304 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
305 if (status)
306 goto exit;
307 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
308 (index << MAC_ADDR_IDX_SHIFT) | /* index */
309 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
310 status =
311 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
312 MAC_ADDR_MR, 0);
313 if (status)
314 goto exit;
315 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
316 }
317 break;
318 }
319 case MAC_ADDR_TYPE_VLAN:
320 case MAC_ADDR_TYPE_MULTI_FLTR:
321 default:
322 netif_crit(qdev, ifup, qdev->ndev,
323 "Address type %d not yet supported.\n", type);
324 status = -EPERM;
325 }
326 exit:
327 return status;
328 }
329
330 /* Set up a MAC, multicast or VLAN address for the
331 * inbound frame matching.
332 */
ql_set_mac_addr_reg(struct ql_adapter * qdev,u8 * addr,u32 type,u16 index)333 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
334 u16 index)
335 {
336 u32 offset = 0;
337 int status = 0;
338
339 switch (type) {
340 case MAC_ADDR_TYPE_MULTI_MAC:
341 {
342 u32 upper = (addr[0] << 8) | addr[1];
343 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
344 (addr[4] << 8) | (addr[5]);
345
346 status =
347 ql_wait_reg_rdy(qdev,
348 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
349 if (status)
350 goto exit;
351 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
352 (index << MAC_ADDR_IDX_SHIFT) |
353 type | MAC_ADDR_E);
354 ql_write32(qdev, MAC_ADDR_DATA, lower);
355 status =
356 ql_wait_reg_rdy(qdev,
357 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
358 if (status)
359 goto exit;
360 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
361 (index << MAC_ADDR_IDX_SHIFT) |
362 type | MAC_ADDR_E);
363
364 ql_write32(qdev, MAC_ADDR_DATA, upper);
365 status =
366 ql_wait_reg_rdy(qdev,
367 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
368 if (status)
369 goto exit;
370 break;
371 }
372 case MAC_ADDR_TYPE_CAM_MAC:
373 {
374 u32 cam_output;
375 u32 upper = (addr[0] << 8) | addr[1];
376 u32 lower =
377 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
378 (addr[5]);
379 status =
380 ql_wait_reg_rdy(qdev,
381 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
382 if (status)
383 goto exit;
384 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
385 (index << MAC_ADDR_IDX_SHIFT) | /* index */
386 type); /* type */
387 ql_write32(qdev, MAC_ADDR_DATA, lower);
388 status =
389 ql_wait_reg_rdy(qdev,
390 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
391 if (status)
392 goto exit;
393 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
394 (index << MAC_ADDR_IDX_SHIFT) | /* index */
395 type); /* type */
396 ql_write32(qdev, MAC_ADDR_DATA, upper);
397 status =
398 ql_wait_reg_rdy(qdev,
399 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
400 if (status)
401 goto exit;
402 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
403 (index << MAC_ADDR_IDX_SHIFT) | /* index */
404 type); /* type */
405 /* This field should also include the queue id
406 and possibly the function id. Right now we hardcode
407 the route field to NIC core.
408 */
409 cam_output = (CAM_OUT_ROUTE_NIC |
410 (qdev->
411 func << CAM_OUT_FUNC_SHIFT) |
412 (0 << CAM_OUT_CQ_ID_SHIFT));
413 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
414 cam_output |= CAM_OUT_RV;
415 /* route to NIC core */
416 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
417 break;
418 }
419 case MAC_ADDR_TYPE_VLAN:
420 {
421 u32 enable_bit = *((u32 *) &addr[0]);
422 /* For VLAN, the addr actually holds a bit that
423 * either enables or disables the vlan id we are
424 * addressing. It's either MAC_ADDR_E on or off.
425 * That's bit-27 we're talking about.
426 */
427 status =
428 ql_wait_reg_rdy(qdev,
429 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
430 if (status)
431 goto exit;
432 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
433 (index << MAC_ADDR_IDX_SHIFT) | /* index */
434 type | /* type */
435 enable_bit); /* enable/disable */
436 break;
437 }
438 case MAC_ADDR_TYPE_MULTI_FLTR:
439 default:
440 netif_crit(qdev, ifup, qdev->ndev,
441 "Address type %d not yet supported.\n", type);
442 status = -EPERM;
443 }
444 exit:
445 return status;
446 }
447
448 /* Set or clear MAC address in hardware. We sometimes
449 * have to clear it to prevent wrong frame routing
450 * especially in a bonding environment.
451 */
ql_set_mac_addr(struct ql_adapter * qdev,int set)452 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
453 {
454 int status;
455 char zero_mac_addr[ETH_ALEN];
456 char *addr;
457
458 if (set) {
459 addr = &qdev->current_mac_addr[0];
460 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
461 "Set Mac addr %pM\n", addr);
462 } else {
463 eth_zero_addr(zero_mac_addr);
464 addr = &zero_mac_addr[0];
465 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
466 "Clearing MAC address\n");
467 }
468 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
469 if (status)
470 return status;
471 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
472 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
473 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
474 if (status)
475 netif_err(qdev, ifup, qdev->ndev,
476 "Failed to init mac address.\n");
477 return status;
478 }
479
ql_link_on(struct ql_adapter * qdev)480 void ql_link_on(struct ql_adapter *qdev)
481 {
482 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
483 netif_carrier_on(qdev->ndev);
484 ql_set_mac_addr(qdev, 1);
485 }
486
ql_link_off(struct ql_adapter * qdev)487 void ql_link_off(struct ql_adapter *qdev)
488 {
489 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
490 netif_carrier_off(qdev->ndev);
491 ql_set_mac_addr(qdev, 0);
492 }
493
494 /* Get a specific frame routing value from the CAM.
495 * Used for debug and reg dump.
496 */
ql_get_routing_reg(struct ql_adapter * qdev,u32 index,u32 * value)497 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
498 {
499 int status = 0;
500
501 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
502 if (status)
503 goto exit;
504
505 ql_write32(qdev, RT_IDX,
506 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
507 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
508 if (status)
509 goto exit;
510 *value = ql_read32(qdev, RT_DATA);
511 exit:
512 return status;
513 }
514
515 /* The NIC function for this chip has 16 routing indexes. Each one can be used
516 * to route different frame types to various inbound queues. We send broadcast/
517 * multicast/error frames to the default queue for slow handling,
518 * and CAM hit/RSS frames to the fast handling queues.
519 */
ql_set_routing_reg(struct ql_adapter * qdev,u32 index,u32 mask,int enable)520 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
521 int enable)
522 {
523 int status = -EINVAL; /* Return error if no mask match. */
524 u32 value = 0;
525
526 switch (mask) {
527 case RT_IDX_CAM_HIT:
528 {
529 value = RT_IDX_DST_CAM_Q | /* dest */
530 RT_IDX_TYPE_NICQ | /* type */
531 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
532 break;
533 }
534 case RT_IDX_VALID: /* Promiscuous Mode frames. */
535 {
536 value = RT_IDX_DST_DFLT_Q | /* dest */
537 RT_IDX_TYPE_NICQ | /* type */
538 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
539 break;
540 }
541 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
542 {
543 value = RT_IDX_DST_DFLT_Q | /* dest */
544 RT_IDX_TYPE_NICQ | /* type */
545 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
546 break;
547 }
548 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
549 {
550 value = RT_IDX_DST_DFLT_Q | /* dest */
551 RT_IDX_TYPE_NICQ | /* type */
552 (RT_IDX_IP_CSUM_ERR_SLOT <<
553 RT_IDX_IDX_SHIFT); /* index */
554 break;
555 }
556 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
557 {
558 value = RT_IDX_DST_DFLT_Q | /* dest */
559 RT_IDX_TYPE_NICQ | /* type */
560 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
561 RT_IDX_IDX_SHIFT); /* index */
562 break;
563 }
564 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
565 {
566 value = RT_IDX_DST_DFLT_Q | /* dest */
567 RT_IDX_TYPE_NICQ | /* type */
568 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
569 break;
570 }
571 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
572 {
573 value = RT_IDX_DST_DFLT_Q | /* dest */
574 RT_IDX_TYPE_NICQ | /* type */
575 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
576 break;
577 }
578 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
579 {
580 value = RT_IDX_DST_DFLT_Q | /* dest */
581 RT_IDX_TYPE_NICQ | /* type */
582 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
583 break;
584 }
585 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
586 {
587 value = RT_IDX_DST_RSS | /* dest */
588 RT_IDX_TYPE_NICQ | /* type */
589 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
590 break;
591 }
592 case 0: /* Clear the E-bit on an entry. */
593 {
594 value = RT_IDX_DST_DFLT_Q | /* dest */
595 RT_IDX_TYPE_NICQ | /* type */
596 (index << RT_IDX_IDX_SHIFT);/* index */
597 break;
598 }
599 default:
600 netif_err(qdev, ifup, qdev->ndev,
601 "Mask type %d not yet supported.\n", mask);
602 status = -EPERM;
603 goto exit;
604 }
605
606 if (value) {
607 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
608 if (status)
609 goto exit;
610 value |= (enable ? RT_IDX_E : 0);
611 ql_write32(qdev, RT_IDX, value);
612 ql_write32(qdev, RT_DATA, enable ? mask : 0);
613 }
614 exit:
615 return status;
616 }
617
ql_enable_interrupts(struct ql_adapter * qdev)618 static void ql_enable_interrupts(struct ql_adapter *qdev)
619 {
620 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
621 }
622
ql_disable_interrupts(struct ql_adapter * qdev)623 static void ql_disable_interrupts(struct ql_adapter *qdev)
624 {
625 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
626 }
627
628 /* If we're running with multiple MSI-X vectors then we enable on the fly.
629 * Otherwise, we may have multiple outstanding workers and don't want to
630 * enable until the last one finishes. In this case, the irq_cnt gets
631 * incremented every time we queue a worker and decremented every time
632 * a worker finishes. Once it hits zero we enable the interrupt.
633 */
ql_enable_completion_interrupt(struct ql_adapter * qdev,u32 intr)634 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
635 {
636 u32 var = 0;
637 unsigned long hw_flags = 0;
638 struct intr_context *ctx = qdev->intr_context + intr;
639
640 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
641 /* Always enable if we're MSIX multi interrupts and
642 * it's not the default (zeroeth) interrupt.
643 */
644 ql_write32(qdev, INTR_EN,
645 ctx->intr_en_mask);
646 var = ql_read32(qdev, STS);
647 return var;
648 }
649
650 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
651 if (atomic_dec_and_test(&ctx->irq_cnt)) {
652 ql_write32(qdev, INTR_EN,
653 ctx->intr_en_mask);
654 var = ql_read32(qdev, STS);
655 }
656 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
657 return var;
658 }
659
ql_disable_completion_interrupt(struct ql_adapter * qdev,u32 intr)660 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
661 {
662 u32 var = 0;
663 struct intr_context *ctx;
664
665 /* HW disables for us if we're MSIX multi interrupts and
666 * it's not the default (zeroeth) interrupt.
667 */
668 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
669 return 0;
670
671 ctx = qdev->intr_context + intr;
672 spin_lock(&qdev->hw_lock);
673 if (!atomic_read(&ctx->irq_cnt)) {
674 ql_write32(qdev, INTR_EN,
675 ctx->intr_dis_mask);
676 var = ql_read32(qdev, STS);
677 }
678 atomic_inc(&ctx->irq_cnt);
679 spin_unlock(&qdev->hw_lock);
680 return var;
681 }
682
ql_enable_all_completion_interrupts(struct ql_adapter * qdev)683 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
684 {
685 int i;
686 for (i = 0; i < qdev->intr_count; i++) {
687 /* The enable call does a atomic_dec_and_test
688 * and enables only if the result is zero.
689 * So we precharge it here.
690 */
691 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
692 i == 0))
693 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
694 ql_enable_completion_interrupt(qdev, i);
695 }
696
697 }
698
ql_validate_flash(struct ql_adapter * qdev,u32 size,const char * str)699 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
700 {
701 int status, i;
702 u16 csum = 0;
703 __le16 *flash = (__le16 *)&qdev->flash;
704
705 status = strncmp((char *)&qdev->flash, str, 4);
706 if (status) {
707 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
708 return status;
709 }
710
711 for (i = 0; i < size; i++)
712 csum += le16_to_cpu(*flash++);
713
714 if (csum)
715 netif_err(qdev, ifup, qdev->ndev,
716 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
717
718 return csum;
719 }
720
ql_read_flash_word(struct ql_adapter * qdev,int offset,__le32 * data)721 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
722 {
723 int status = 0;
724 /* wait for reg to come ready */
725 status = ql_wait_reg_rdy(qdev,
726 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
727 if (status)
728 goto exit;
729 /* set up for reg read */
730 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
731 /* wait for reg to come ready */
732 status = ql_wait_reg_rdy(qdev,
733 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
734 if (status)
735 goto exit;
736 /* This data is stored on flash as an array of
737 * __le32. Since ql_read32() returns cpu endian
738 * we need to swap it back.
739 */
740 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
741 exit:
742 return status;
743 }
744
ql_get_8000_flash_params(struct ql_adapter * qdev)745 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
746 {
747 u32 i, size;
748 int status;
749 __le32 *p = (__le32 *)&qdev->flash;
750 u32 offset;
751 u8 mac_addr[6];
752
753 /* Get flash offset for function and adjust
754 * for dword access.
755 */
756 if (!qdev->port)
757 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
758 else
759 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
760
761 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
762 return -ETIMEDOUT;
763
764 size = sizeof(struct flash_params_8000) / sizeof(u32);
765 for (i = 0; i < size; i++, p++) {
766 status = ql_read_flash_word(qdev, i+offset, p);
767 if (status) {
768 netif_err(qdev, ifup, qdev->ndev,
769 "Error reading flash.\n");
770 goto exit;
771 }
772 }
773
774 status = ql_validate_flash(qdev,
775 sizeof(struct flash_params_8000) / sizeof(u16),
776 "8000");
777 if (status) {
778 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
779 status = -EINVAL;
780 goto exit;
781 }
782
783 /* Extract either manufacturer or BOFM modified
784 * MAC address.
785 */
786 if (qdev->flash.flash_params_8000.data_type1 == 2)
787 memcpy(mac_addr,
788 qdev->flash.flash_params_8000.mac_addr1,
789 qdev->ndev->addr_len);
790 else
791 memcpy(mac_addr,
792 qdev->flash.flash_params_8000.mac_addr,
793 qdev->ndev->addr_len);
794
795 if (!is_valid_ether_addr(mac_addr)) {
796 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
797 status = -EINVAL;
798 goto exit;
799 }
800
801 memcpy(qdev->ndev->dev_addr,
802 mac_addr,
803 qdev->ndev->addr_len);
804
805 exit:
806 ql_sem_unlock(qdev, SEM_FLASH_MASK);
807 return status;
808 }
809
ql_get_8012_flash_params(struct ql_adapter * qdev)810 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
811 {
812 int i;
813 int status;
814 __le32 *p = (__le32 *)&qdev->flash;
815 u32 offset = 0;
816 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
817
818 /* Second function's parameters follow the first
819 * function's.
820 */
821 if (qdev->port)
822 offset = size;
823
824 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
825 return -ETIMEDOUT;
826
827 for (i = 0; i < size; i++, p++) {
828 status = ql_read_flash_word(qdev, i+offset, p);
829 if (status) {
830 netif_err(qdev, ifup, qdev->ndev,
831 "Error reading flash.\n");
832 goto exit;
833 }
834
835 }
836
837 status = ql_validate_flash(qdev,
838 sizeof(struct flash_params_8012) / sizeof(u16),
839 "8012");
840 if (status) {
841 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
842 status = -EINVAL;
843 goto exit;
844 }
845
846 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
847 status = -EINVAL;
848 goto exit;
849 }
850
851 memcpy(qdev->ndev->dev_addr,
852 qdev->flash.flash_params_8012.mac_addr,
853 qdev->ndev->addr_len);
854
855 exit:
856 ql_sem_unlock(qdev, SEM_FLASH_MASK);
857 return status;
858 }
859
860 /* xgmac register are located behind the xgmac_addr and xgmac_data
861 * register pair. Each read/write requires us to wait for the ready
862 * bit before reading/writing the data.
863 */
ql_write_xgmac_reg(struct ql_adapter * qdev,u32 reg,u32 data)864 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
865 {
866 int status;
867 /* wait for reg to come ready */
868 status = ql_wait_reg_rdy(qdev,
869 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
870 if (status)
871 return status;
872 /* write the data to the data reg */
873 ql_write32(qdev, XGMAC_DATA, data);
874 /* trigger the write */
875 ql_write32(qdev, XGMAC_ADDR, reg);
876 return status;
877 }
878
879 /* xgmac register are located behind the xgmac_addr and xgmac_data
880 * register pair. Each read/write requires us to wait for the ready
881 * bit before reading/writing the data.
882 */
ql_read_xgmac_reg(struct ql_adapter * qdev,u32 reg,u32 * data)883 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
884 {
885 int status = 0;
886 /* wait for reg to come ready */
887 status = ql_wait_reg_rdy(qdev,
888 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
889 if (status)
890 goto exit;
891 /* set up for reg read */
892 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
893 /* wait for reg to come ready */
894 status = ql_wait_reg_rdy(qdev,
895 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
896 if (status)
897 goto exit;
898 /* get the data */
899 *data = ql_read32(qdev, XGMAC_DATA);
900 exit:
901 return status;
902 }
903
904 /* This is used for reading the 64-bit statistics regs. */
ql_read_xgmac_reg64(struct ql_adapter * qdev,u32 reg,u64 * data)905 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
906 {
907 int status = 0;
908 u32 hi = 0;
909 u32 lo = 0;
910
911 status = ql_read_xgmac_reg(qdev, reg, &lo);
912 if (status)
913 goto exit;
914
915 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
916 if (status)
917 goto exit;
918
919 *data = (u64) lo | ((u64) hi << 32);
920
921 exit:
922 return status;
923 }
924
ql_8000_port_initialize(struct ql_adapter * qdev)925 static int ql_8000_port_initialize(struct ql_adapter *qdev)
926 {
927 int status;
928 /*
929 * Get MPI firmware version for driver banner
930 * and ethool info.
931 */
932 status = ql_mb_about_fw(qdev);
933 if (status)
934 goto exit;
935 status = ql_mb_get_fw_state(qdev);
936 if (status)
937 goto exit;
938 /* Wake up a worker to get/set the TX/RX frame sizes. */
939 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
940 exit:
941 return status;
942 }
943
944 /* Take the MAC Core out of reset.
945 * Enable statistics counting.
946 * Take the transmitter/receiver out of reset.
947 * This functionality may be done in the MPI firmware at a
948 * later date.
949 */
ql_8012_port_initialize(struct ql_adapter * qdev)950 static int ql_8012_port_initialize(struct ql_adapter *qdev)
951 {
952 int status = 0;
953 u32 data;
954
955 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
956 /* Another function has the semaphore, so
957 * wait for the port init bit to come ready.
958 */
959 netif_info(qdev, link, qdev->ndev,
960 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
961 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
962 if (status) {
963 netif_crit(qdev, link, qdev->ndev,
964 "Port initialize timed out.\n");
965 }
966 return status;
967 }
968
969 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
970 /* Set the core reset. */
971 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
972 if (status)
973 goto end;
974 data |= GLOBAL_CFG_RESET;
975 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
976 if (status)
977 goto end;
978
979 /* Clear the core reset and turn on jumbo for receiver. */
980 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
981 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
982 data |= GLOBAL_CFG_TX_STAT_EN;
983 data |= GLOBAL_CFG_RX_STAT_EN;
984 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
985 if (status)
986 goto end;
987
988 /* Enable transmitter, and clear it's reset. */
989 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
990 if (status)
991 goto end;
992 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
993 data |= TX_CFG_EN; /* Enable the transmitter. */
994 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
995 if (status)
996 goto end;
997
998 /* Enable receiver and clear it's reset. */
999 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1000 if (status)
1001 goto end;
1002 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1003 data |= RX_CFG_EN; /* Enable the receiver. */
1004 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1005 if (status)
1006 goto end;
1007
1008 /* Turn on jumbo. */
1009 status =
1010 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1011 if (status)
1012 goto end;
1013 status =
1014 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1015 if (status)
1016 goto end;
1017
1018 /* Signal to the world that the port is enabled. */
1019 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1020 end:
1021 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1022 return status;
1023 }
1024
ql_lbq_block_size(struct ql_adapter * qdev)1025 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1026 {
1027 return PAGE_SIZE << qdev->lbq_buf_order;
1028 }
1029
1030 /* Get the next large buffer. */
ql_get_curr_lbuf(struct rx_ring * rx_ring)1031 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1032 {
1033 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1034 rx_ring->lbq_curr_idx++;
1035 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1036 rx_ring->lbq_curr_idx = 0;
1037 rx_ring->lbq_free_cnt++;
1038 return lbq_desc;
1039 }
1040
ql_get_curr_lchunk(struct ql_adapter * qdev,struct rx_ring * rx_ring)1041 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1042 struct rx_ring *rx_ring)
1043 {
1044 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1045
1046 pci_dma_sync_single_for_cpu(qdev->pdev,
1047 dma_unmap_addr(lbq_desc, mapaddr),
1048 rx_ring->lbq_buf_size,
1049 PCI_DMA_FROMDEVICE);
1050
1051 /* If it's the last chunk of our master page then
1052 * we unmap it.
1053 */
1054 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1055 == ql_lbq_block_size(qdev))
1056 pci_unmap_page(qdev->pdev,
1057 lbq_desc->p.pg_chunk.map,
1058 ql_lbq_block_size(qdev),
1059 PCI_DMA_FROMDEVICE);
1060 return lbq_desc;
1061 }
1062
1063 /* Get the next small buffer. */
ql_get_curr_sbuf(struct rx_ring * rx_ring)1064 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1065 {
1066 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1067 rx_ring->sbq_curr_idx++;
1068 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1069 rx_ring->sbq_curr_idx = 0;
1070 rx_ring->sbq_free_cnt++;
1071 return sbq_desc;
1072 }
1073
1074 /* Update an rx ring index. */
ql_update_cq(struct rx_ring * rx_ring)1075 static void ql_update_cq(struct rx_ring *rx_ring)
1076 {
1077 rx_ring->cnsmr_idx++;
1078 rx_ring->curr_entry++;
1079 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1080 rx_ring->cnsmr_idx = 0;
1081 rx_ring->curr_entry = rx_ring->cq_base;
1082 }
1083 }
1084
ql_write_cq_idx(struct rx_ring * rx_ring)1085 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1086 {
1087 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1088 }
1089
ql_get_next_chunk(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct bq_desc * lbq_desc)1090 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1091 struct bq_desc *lbq_desc)
1092 {
1093 if (!rx_ring->pg_chunk.page) {
1094 u64 map;
1095 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1096 GFP_ATOMIC,
1097 qdev->lbq_buf_order);
1098 if (unlikely(!rx_ring->pg_chunk.page)) {
1099 netif_err(qdev, drv, qdev->ndev,
1100 "page allocation failed.\n");
1101 return -ENOMEM;
1102 }
1103 rx_ring->pg_chunk.offset = 0;
1104 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1105 0, ql_lbq_block_size(qdev),
1106 PCI_DMA_FROMDEVICE);
1107 if (pci_dma_mapping_error(qdev->pdev, map)) {
1108 __free_pages(rx_ring->pg_chunk.page,
1109 qdev->lbq_buf_order);
1110 rx_ring->pg_chunk.page = NULL;
1111 netif_err(qdev, drv, qdev->ndev,
1112 "PCI mapping failed.\n");
1113 return -ENOMEM;
1114 }
1115 rx_ring->pg_chunk.map = map;
1116 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1117 }
1118
1119 /* Copy the current master pg_chunk info
1120 * to the current descriptor.
1121 */
1122 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1123
1124 /* Adjust the master page chunk for next
1125 * buffer get.
1126 */
1127 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1128 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1129 rx_ring->pg_chunk.page = NULL;
1130 lbq_desc->p.pg_chunk.last_flag = 1;
1131 } else {
1132 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1133 get_page(rx_ring->pg_chunk.page);
1134 lbq_desc->p.pg_chunk.last_flag = 0;
1135 }
1136 return 0;
1137 }
1138 /* Process (refill) a large buffer queue. */
ql_update_lbq(struct ql_adapter * qdev,struct rx_ring * rx_ring)1139 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1140 {
1141 u32 clean_idx = rx_ring->lbq_clean_idx;
1142 u32 start_idx = clean_idx;
1143 struct bq_desc *lbq_desc;
1144 u64 map;
1145 int i;
1146
1147 while (rx_ring->lbq_free_cnt > 32) {
1148 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1149 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1150 "lbq: try cleaning clean_idx = %d.\n",
1151 clean_idx);
1152 lbq_desc = &rx_ring->lbq[clean_idx];
1153 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1154 rx_ring->lbq_clean_idx = clean_idx;
1155 netif_err(qdev, ifup, qdev->ndev,
1156 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1157 i, clean_idx);
1158 return;
1159 }
1160
1161 map = lbq_desc->p.pg_chunk.map +
1162 lbq_desc->p.pg_chunk.offset;
1163 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1164 dma_unmap_len_set(lbq_desc, maplen,
1165 rx_ring->lbq_buf_size);
1166 *lbq_desc->addr = cpu_to_le64(map);
1167
1168 pci_dma_sync_single_for_device(qdev->pdev, map,
1169 rx_ring->lbq_buf_size,
1170 PCI_DMA_FROMDEVICE);
1171 clean_idx++;
1172 if (clean_idx == rx_ring->lbq_len)
1173 clean_idx = 0;
1174 }
1175
1176 rx_ring->lbq_clean_idx = clean_idx;
1177 rx_ring->lbq_prod_idx += 16;
1178 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1179 rx_ring->lbq_prod_idx = 0;
1180 rx_ring->lbq_free_cnt -= 16;
1181 }
1182
1183 if (start_idx != clean_idx) {
1184 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1185 "lbq: updating prod idx = %d.\n",
1186 rx_ring->lbq_prod_idx);
1187 ql_write_db_reg(rx_ring->lbq_prod_idx,
1188 rx_ring->lbq_prod_idx_db_reg);
1189 }
1190 }
1191
1192 /* Process (refill) a small buffer queue. */
ql_update_sbq(struct ql_adapter * qdev,struct rx_ring * rx_ring)1193 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1194 {
1195 u32 clean_idx = rx_ring->sbq_clean_idx;
1196 u32 start_idx = clean_idx;
1197 struct bq_desc *sbq_desc;
1198 u64 map;
1199 int i;
1200
1201 while (rx_ring->sbq_free_cnt > 16) {
1202 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1203 sbq_desc = &rx_ring->sbq[clean_idx];
1204 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1205 "sbq: try cleaning clean_idx = %d.\n",
1206 clean_idx);
1207 if (sbq_desc->p.skb == NULL) {
1208 netif_printk(qdev, rx_status, KERN_DEBUG,
1209 qdev->ndev,
1210 "sbq: getting new skb for index %d.\n",
1211 sbq_desc->index);
1212 sbq_desc->p.skb =
1213 netdev_alloc_skb(qdev->ndev,
1214 SMALL_BUFFER_SIZE);
1215 if (sbq_desc->p.skb == NULL) {
1216 rx_ring->sbq_clean_idx = clean_idx;
1217 return;
1218 }
1219 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220 map = pci_map_single(qdev->pdev,
1221 sbq_desc->p.skb->data,
1222 rx_ring->sbq_buf_size,
1223 PCI_DMA_FROMDEVICE);
1224 if (pci_dma_mapping_error(qdev->pdev, map)) {
1225 netif_err(qdev, ifup, qdev->ndev,
1226 "PCI mapping failed.\n");
1227 rx_ring->sbq_clean_idx = clean_idx;
1228 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL;
1230 return;
1231 }
1232 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233 dma_unmap_len_set(sbq_desc, maplen,
1234 rx_ring->sbq_buf_size);
1235 *sbq_desc->addr = cpu_to_le64(map);
1236 }
1237
1238 clean_idx++;
1239 if (clean_idx == rx_ring->sbq_len)
1240 clean_idx = 0;
1241 }
1242 rx_ring->sbq_clean_idx = clean_idx;
1243 rx_ring->sbq_prod_idx += 16;
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 rx_ring->sbq_prod_idx = 0;
1246 rx_ring->sbq_free_cnt -= 16;
1247 }
1248
1249 if (start_idx != clean_idx) {
1250 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx);
1253 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg);
1255 }
1256 }
1257
ql_update_buffer_queues(struct ql_adapter * qdev,struct rx_ring * rx_ring)1258 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1260 {
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1263 }
1264
1265 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1267 */
ql_unmap_send(struct ql_adapter * qdev,struct tx_ring_desc * tx_ring_desc,int mapped)1268 static void ql_unmap_send(struct ql_adapter *qdev,
1269 struct tx_ring_desc *tx_ring_desc, int mapped)
1270 {
1271 int i;
1272 for (i = 0; i < mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1274 /*
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1281 * then its an OAL.
1282 */
1283 if (i == 7) {
1284 netif_printk(qdev, tx_done, KERN_DEBUG,
1285 qdev->ndev,
1286 "unmapping OAL area.\n");
1287 }
1288 pci_unmap_single(qdev->pdev,
1289 dma_unmap_addr(&tx_ring_desc->map[i],
1290 mapaddr),
1291 dma_unmap_len(&tx_ring_desc->map[i],
1292 maplen),
1293 PCI_DMA_TODEVICE);
1294 } else {
1295 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296 "unmapping frag %d.\n", i);
1297 pci_unmap_page(qdev->pdev,
1298 dma_unmap_addr(&tx_ring_desc->map[i],
1299 mapaddr),
1300 dma_unmap_len(&tx_ring_desc->map[i],
1301 maplen), PCI_DMA_TODEVICE);
1302 }
1303 }
1304
1305 }
1306
1307 /* Map the buffers for this transmit. This will return
1308 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309 */
ql_map_send(struct ql_adapter * qdev,struct ob_mac_iocb_req * mac_iocb_ptr,struct sk_buff * skb,struct tx_ring_desc * tx_ring_desc)1310 static int ql_map_send(struct ql_adapter *qdev,
1311 struct ob_mac_iocb_req *mac_iocb_ptr,
1312 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1313 {
1314 int len = skb_headlen(skb);
1315 dma_addr_t map;
1316 int frag_idx, err, map_idx = 0;
1317 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318 int frag_cnt = skb_shinfo(skb)->nr_frags;
1319
1320 if (frag_cnt) {
1321 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322 "frag_cnt = %d.\n", frag_cnt);
1323 }
1324 /*
1325 * Map the skb buffer first.
1326 */
1327 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1328
1329 err = pci_dma_mapping_error(qdev->pdev, map);
1330 if (err) {
1331 netif_err(qdev, tx_queued, qdev->ndev,
1332 "PCI mapping failed with error: %d\n", err);
1333
1334 return NETDEV_TX_BUSY;
1335 }
1336
1337 tbd->len = cpu_to_le32(len);
1338 tbd->addr = cpu_to_le64(map);
1339 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1341 map_idx++;
1342
1343 /*
1344 * This loop fills the remainder of the 8 address descriptors
1345 * in the IOCB. If there are more than 7 fragments, then the
1346 * eighth address desc will point to an external list (OAL).
1347 * When this happens, the remainder of the frags will be stored
1348 * in this list.
1349 */
1350 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1352 tbd++;
1353 if (frag_idx == 6 && frag_cnt > 7) {
1354 /* Let's tack on an sglist.
1355 * Our control block will now
1356 * look like this:
1357 * iocb->seg[0] = skb->data
1358 * iocb->seg[1] = frag[0]
1359 * iocb->seg[2] = frag[1]
1360 * iocb->seg[3] = frag[2]
1361 * iocb->seg[4] = frag[3]
1362 * iocb->seg[5] = frag[4]
1363 * iocb->seg[6] = frag[5]
1364 * iocb->seg[7] = ptr to OAL (external sglist)
1365 * oal->seg[0] = frag[6]
1366 * oal->seg[1] = frag[7]
1367 * oal->seg[2] = frag[8]
1368 * oal->seg[3] = frag[9]
1369 * oal->seg[4] = frag[10]
1370 * etc...
1371 */
1372 /* Tack on the OAL in the eighth segment of IOCB. */
1373 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1374 sizeof(struct oal),
1375 PCI_DMA_TODEVICE);
1376 err = pci_dma_mapping_error(qdev->pdev, map);
1377 if (err) {
1378 netif_err(qdev, tx_queued, qdev->ndev,
1379 "PCI mapping outbound address list with error: %d\n",
1380 err);
1381 goto map_error;
1382 }
1383
1384 tbd->addr = cpu_to_le64(map);
1385 /*
1386 * The length is the number of fragments
1387 * that remain to be mapped times the length
1388 * of our sglist (OAL).
1389 */
1390 tbd->len =
1391 cpu_to_le32((sizeof(struct tx_buf_desc) *
1392 (frag_cnt - frag_idx)) | TX_DESC_C);
1393 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1394 map);
1395 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1396 sizeof(struct oal));
1397 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1398 map_idx++;
1399 }
1400
1401 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1402 DMA_TO_DEVICE);
1403
1404 err = dma_mapping_error(&qdev->pdev->dev, map);
1405 if (err) {
1406 netif_err(qdev, tx_queued, qdev->ndev,
1407 "PCI mapping frags failed with error: %d.\n",
1408 err);
1409 goto map_error;
1410 }
1411
1412 tbd->addr = cpu_to_le64(map);
1413 tbd->len = cpu_to_le32(skb_frag_size(frag));
1414 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416 skb_frag_size(frag));
1417
1418 }
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc->map_cnt = map_idx;
1421 /* Terminate the last segment. */
1422 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 return NETDEV_TX_OK;
1424
1425 map_error:
1426 /*
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1431 */
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 return NETDEV_TX_BUSY;
1434 }
1435
1436 /* Categorizing receive firmware frame errors */
ql_categorize_rx_err(struct ql_adapter * qdev,u8 rx_err,struct rx_ring * rx_ring)1437 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1438 struct rx_ring *rx_ring)
1439 {
1440 struct nic_stats *stats = &qdev->nic_stats;
1441
1442 stats->rx_err_count++;
1443 rx_ring->rx_errors++;
1444
1445 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1446 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1447 stats->rx_code_err++;
1448 break;
1449 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1450 stats->rx_oversize_err++;
1451 break;
1452 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1453 stats->rx_undersize_err++;
1454 break;
1455 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1456 stats->rx_preamble_err++;
1457 break;
1458 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1459 stats->rx_frame_len_err++;
1460 break;
1461 case IB_MAC_IOCB_RSP_ERR_CRC:
1462 stats->rx_crc_err++;
1463 default:
1464 break;
1465 }
1466 }
1467
1468 /**
1469 * ql_update_mac_hdr_len - helper routine to update the mac header length
1470 * based on vlan tags if present
1471 */
ql_update_mac_hdr_len(struct ql_adapter * qdev,struct ib_mac_iocb_rsp * ib_mac_rsp,void * page,size_t * len)1472 static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1473 struct ib_mac_iocb_rsp *ib_mac_rsp,
1474 void *page, size_t *len)
1475 {
1476 u16 *tags;
1477
1478 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1479 return;
1480 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1481 tags = (u16 *)page;
1482 /* Look for stacked vlan tags in ethertype field */
1483 if (tags[6] == ETH_P_8021Q &&
1484 tags[8] == ETH_P_8021Q)
1485 *len += 2 * VLAN_HLEN;
1486 else
1487 *len += VLAN_HLEN;
1488 }
1489 }
1490
1491 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_gro_page(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1492 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1493 struct rx_ring *rx_ring,
1494 struct ib_mac_iocb_rsp *ib_mac_rsp,
1495 u32 length,
1496 u16 vlan_id)
1497 {
1498 struct sk_buff *skb;
1499 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1500 struct napi_struct *napi = &rx_ring->napi;
1501
1502 /* Frame error, so drop the packet. */
1503 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1504 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1505 put_page(lbq_desc->p.pg_chunk.page);
1506 return;
1507 }
1508 napi->dev = qdev->ndev;
1509
1510 skb = napi_get_frags(napi);
1511 if (!skb) {
1512 netif_err(qdev, drv, qdev->ndev,
1513 "Couldn't get an skb, exiting.\n");
1514 rx_ring->rx_dropped++;
1515 put_page(lbq_desc->p.pg_chunk.page);
1516 return;
1517 }
1518 prefetch(lbq_desc->p.pg_chunk.va);
1519 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1520 lbq_desc->p.pg_chunk.page,
1521 lbq_desc->p.pg_chunk.offset,
1522 length);
1523
1524 skb->len += length;
1525 skb->data_len += length;
1526 skb->truesize += length;
1527 skb_shinfo(skb)->nr_frags++;
1528
1529 rx_ring->rx_packets++;
1530 rx_ring->rx_bytes += length;
1531 skb->ip_summed = CHECKSUM_UNNECESSARY;
1532 skb_record_rx_queue(skb, rx_ring->cq_id);
1533 if (vlan_id != 0xffff)
1534 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1535 napi_gro_frags(napi);
1536 }
1537
1538 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_page(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1539 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1540 struct rx_ring *rx_ring,
1541 struct ib_mac_iocb_rsp *ib_mac_rsp,
1542 u32 length,
1543 u16 vlan_id)
1544 {
1545 struct net_device *ndev = qdev->ndev;
1546 struct sk_buff *skb = NULL;
1547 void *addr;
1548 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1549 struct napi_struct *napi = &rx_ring->napi;
1550 size_t hlen = ETH_HLEN;
1551
1552 skb = netdev_alloc_skb(ndev, length);
1553 if (!skb) {
1554 rx_ring->rx_dropped++;
1555 put_page(lbq_desc->p.pg_chunk.page);
1556 return;
1557 }
1558
1559 addr = lbq_desc->p.pg_chunk.va;
1560 prefetch(addr);
1561
1562 /* Frame error, so drop the packet. */
1563 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1564 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1565 goto err_out;
1566 }
1567
1568 /* Update the MAC header length*/
1569 ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1570
1571 /* The max framesize filter on this chip is set higher than
1572 * MTU since FCoE uses 2k frames.
1573 */
1574 if (skb->len > ndev->mtu + hlen) {
1575 netif_err(qdev, drv, qdev->ndev,
1576 "Segment too small, dropping.\n");
1577 rx_ring->rx_dropped++;
1578 goto err_out;
1579 }
1580 memcpy(skb_put(skb, hlen), addr, hlen);
1581 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1582 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1583 length);
1584 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1585 lbq_desc->p.pg_chunk.offset + hlen,
1586 length - hlen);
1587 skb->len += length - hlen;
1588 skb->data_len += length - hlen;
1589 skb->truesize += length - hlen;
1590
1591 rx_ring->rx_packets++;
1592 rx_ring->rx_bytes += skb->len;
1593 skb->protocol = eth_type_trans(skb, ndev);
1594 skb_checksum_none_assert(skb);
1595
1596 if ((ndev->features & NETIF_F_RXCSUM) &&
1597 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1598 /* TCP frame. */
1599 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1600 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1601 "TCP checksum done!\n");
1602 skb->ip_summed = CHECKSUM_UNNECESSARY;
1603 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1604 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1605 /* Unfragmented ipv4 UDP frame. */
1606 struct iphdr *iph =
1607 (struct iphdr *)((u8 *)addr + hlen);
1608 if (!(iph->frag_off &
1609 htons(IP_MF|IP_OFFSET))) {
1610 skb->ip_summed = CHECKSUM_UNNECESSARY;
1611 netif_printk(qdev, rx_status, KERN_DEBUG,
1612 qdev->ndev,
1613 "UDP checksum done!\n");
1614 }
1615 }
1616 }
1617
1618 skb_record_rx_queue(skb, rx_ring->cq_id);
1619 if (vlan_id != 0xffff)
1620 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1621 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1622 napi_gro_receive(napi, skb);
1623 else
1624 netif_receive_skb(skb);
1625 return;
1626 err_out:
1627 dev_kfree_skb_any(skb);
1628 put_page(lbq_desc->p.pg_chunk.page);
1629 }
1630
1631 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_skb(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1632 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1633 struct rx_ring *rx_ring,
1634 struct ib_mac_iocb_rsp *ib_mac_rsp,
1635 u32 length,
1636 u16 vlan_id)
1637 {
1638 struct net_device *ndev = qdev->ndev;
1639 struct sk_buff *skb = NULL;
1640 struct sk_buff *new_skb = NULL;
1641 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1642
1643 skb = sbq_desc->p.skb;
1644 /* Allocate new_skb and copy */
1645 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1646 if (new_skb == NULL) {
1647 rx_ring->rx_dropped++;
1648 return;
1649 }
1650 skb_reserve(new_skb, NET_IP_ALIGN);
1651
1652 pci_dma_sync_single_for_cpu(qdev->pdev,
1653 dma_unmap_addr(sbq_desc, mapaddr),
1654 dma_unmap_len(sbq_desc, maplen),
1655 PCI_DMA_FROMDEVICE);
1656
1657 memcpy(skb_put(new_skb, length), skb->data, length);
1658
1659 pci_dma_sync_single_for_device(qdev->pdev,
1660 dma_unmap_addr(sbq_desc, mapaddr),
1661 dma_unmap_len(sbq_desc, maplen),
1662 PCI_DMA_FROMDEVICE);
1663 skb = new_skb;
1664
1665 /* Frame error, so drop the packet. */
1666 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1667 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1668 dev_kfree_skb_any(skb);
1669 return;
1670 }
1671
1672 /* loopback self test for ethtool */
1673 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1674 ql_check_lb_frame(qdev, skb);
1675 dev_kfree_skb_any(skb);
1676 return;
1677 }
1678
1679 /* The max framesize filter on this chip is set higher than
1680 * MTU since FCoE uses 2k frames.
1681 */
1682 if (skb->len > ndev->mtu + ETH_HLEN) {
1683 dev_kfree_skb_any(skb);
1684 rx_ring->rx_dropped++;
1685 return;
1686 }
1687
1688 prefetch(skb->data);
1689 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1690 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1691 "%s Multicast.\n",
1692 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1693 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1694 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1695 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1696 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1697 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1698 }
1699 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1700 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1701 "Promiscuous Packet.\n");
1702
1703 rx_ring->rx_packets++;
1704 rx_ring->rx_bytes += skb->len;
1705 skb->protocol = eth_type_trans(skb, ndev);
1706 skb_checksum_none_assert(skb);
1707
1708 /* If rx checksum is on, and there are no
1709 * csum or frame errors.
1710 */
1711 if ((ndev->features & NETIF_F_RXCSUM) &&
1712 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1713 /* TCP frame. */
1714 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1715 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1716 "TCP checksum done!\n");
1717 skb->ip_summed = CHECKSUM_UNNECESSARY;
1718 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1719 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1720 /* Unfragmented ipv4 UDP frame. */
1721 struct iphdr *iph = (struct iphdr *) skb->data;
1722 if (!(iph->frag_off &
1723 htons(IP_MF|IP_OFFSET))) {
1724 skb->ip_summed = CHECKSUM_UNNECESSARY;
1725 netif_printk(qdev, rx_status, KERN_DEBUG,
1726 qdev->ndev,
1727 "UDP checksum done!\n");
1728 }
1729 }
1730 }
1731
1732 skb_record_rx_queue(skb, rx_ring->cq_id);
1733 if (vlan_id != 0xffff)
1734 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1735 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1736 napi_gro_receive(&rx_ring->napi, skb);
1737 else
1738 netif_receive_skb(skb);
1739 }
1740
ql_realign_skb(struct sk_buff * skb,int len)1741 static void ql_realign_skb(struct sk_buff *skb, int len)
1742 {
1743 void *temp_addr = skb->data;
1744
1745 /* Undo the skb_reserve(skb,32) we did before
1746 * giving to hardware, and realign data on
1747 * a 2-byte boundary.
1748 */
1749 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1750 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1751 skb_copy_to_linear_data(skb, temp_addr,
1752 (unsigned int)len);
1753 }
1754
1755 /*
1756 * This function builds an skb for the given inbound
1757 * completion. It will be rewritten for readability in the near
1758 * future, but for not it works well.
1759 */
ql_build_rx_skb(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp)1760 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1761 struct rx_ring *rx_ring,
1762 struct ib_mac_iocb_rsp *ib_mac_rsp)
1763 {
1764 struct bq_desc *lbq_desc;
1765 struct bq_desc *sbq_desc;
1766 struct sk_buff *skb = NULL;
1767 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1768 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1769 size_t hlen = ETH_HLEN;
1770
1771 /*
1772 * Handle the header buffer if present.
1773 */
1774 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1775 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1776 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1777 "Header of %d bytes in small buffer.\n", hdr_len);
1778 /*
1779 * Headers fit nicely into a small buffer.
1780 */
1781 sbq_desc = ql_get_curr_sbuf(rx_ring);
1782 pci_unmap_single(qdev->pdev,
1783 dma_unmap_addr(sbq_desc, mapaddr),
1784 dma_unmap_len(sbq_desc, maplen),
1785 PCI_DMA_FROMDEVICE);
1786 skb = sbq_desc->p.skb;
1787 ql_realign_skb(skb, hdr_len);
1788 skb_put(skb, hdr_len);
1789 sbq_desc->p.skb = NULL;
1790 }
1791
1792 /*
1793 * Handle the data buffer(s).
1794 */
1795 if (unlikely(!length)) { /* Is there data too? */
1796 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1797 "No Data buffer in this packet.\n");
1798 return skb;
1799 }
1800
1801 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1802 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1803 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1804 "Headers in small, data of %d bytes in small, combine them.\n",
1805 length);
1806 /*
1807 * Data is less than small buffer size so it's
1808 * stuffed in a small buffer.
1809 * For this case we append the data
1810 * from the "data" small buffer to the "header" small
1811 * buffer.
1812 */
1813 sbq_desc = ql_get_curr_sbuf(rx_ring);
1814 pci_dma_sync_single_for_cpu(qdev->pdev,
1815 dma_unmap_addr
1816 (sbq_desc, mapaddr),
1817 dma_unmap_len
1818 (sbq_desc, maplen),
1819 PCI_DMA_FROMDEVICE);
1820 memcpy(skb_put(skb, length),
1821 sbq_desc->p.skb->data, length);
1822 pci_dma_sync_single_for_device(qdev->pdev,
1823 dma_unmap_addr
1824 (sbq_desc,
1825 mapaddr),
1826 dma_unmap_len
1827 (sbq_desc,
1828 maplen),
1829 PCI_DMA_FROMDEVICE);
1830 } else {
1831 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1832 "%d bytes in a single small buffer.\n",
1833 length);
1834 sbq_desc = ql_get_curr_sbuf(rx_ring);
1835 skb = sbq_desc->p.skb;
1836 ql_realign_skb(skb, length);
1837 skb_put(skb, length);
1838 pci_unmap_single(qdev->pdev,
1839 dma_unmap_addr(sbq_desc,
1840 mapaddr),
1841 dma_unmap_len(sbq_desc,
1842 maplen),
1843 PCI_DMA_FROMDEVICE);
1844 sbq_desc->p.skb = NULL;
1845 }
1846 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1847 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1848 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1849 "Header in small, %d bytes in large. Chain large to small!\n",
1850 length);
1851 /*
1852 * The data is in a single large buffer. We
1853 * chain it to the header buffer's skb and let
1854 * it rip.
1855 */
1856 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1857 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1858 "Chaining page at offset = %d, for %d bytes to skb.\n",
1859 lbq_desc->p.pg_chunk.offset, length);
1860 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1861 lbq_desc->p.pg_chunk.offset,
1862 length);
1863 skb->len += length;
1864 skb->data_len += length;
1865 skb->truesize += length;
1866 } else {
1867 /*
1868 * The headers and data are in a single large buffer. We
1869 * copy it to a new skb and let it go. This can happen with
1870 * jumbo mtu on a non-TCP/UDP frame.
1871 */
1872 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1873 skb = netdev_alloc_skb(qdev->ndev, length);
1874 if (skb == NULL) {
1875 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1876 "No skb available, drop the packet.\n");
1877 return NULL;
1878 }
1879 pci_unmap_page(qdev->pdev,
1880 dma_unmap_addr(lbq_desc,
1881 mapaddr),
1882 dma_unmap_len(lbq_desc, maplen),
1883 PCI_DMA_FROMDEVICE);
1884 skb_reserve(skb, NET_IP_ALIGN);
1885 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1886 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1887 length);
1888 skb_fill_page_desc(skb, 0,
1889 lbq_desc->p.pg_chunk.page,
1890 lbq_desc->p.pg_chunk.offset,
1891 length);
1892 skb->len += length;
1893 skb->data_len += length;
1894 skb->truesize += length;
1895 ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1896 lbq_desc->p.pg_chunk.va,
1897 &hlen);
1898 __pskb_pull_tail(skb, hlen);
1899 }
1900 } else {
1901 /*
1902 * The data is in a chain of large buffers
1903 * pointed to by a small buffer. We loop
1904 * thru and chain them to the our small header
1905 * buffer's skb.
1906 * frags: There are 18 max frags and our small
1907 * buffer will hold 32 of them. The thing is,
1908 * we'll use 3 max for our 9000 byte jumbo
1909 * frames. If the MTU goes up we could
1910 * eventually be in trouble.
1911 */
1912 int size, i = 0;
1913 sbq_desc = ql_get_curr_sbuf(rx_ring);
1914 pci_unmap_single(qdev->pdev,
1915 dma_unmap_addr(sbq_desc, mapaddr),
1916 dma_unmap_len(sbq_desc, maplen),
1917 PCI_DMA_FROMDEVICE);
1918 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1919 /*
1920 * This is an non TCP/UDP IP frame, so
1921 * the headers aren't split into a small
1922 * buffer. We have to use the small buffer
1923 * that contains our sg list as our skb to
1924 * send upstairs. Copy the sg list here to
1925 * a local buffer and use it to find the
1926 * pages to chain.
1927 */
1928 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1929 "%d bytes of headers & data in chain of large.\n",
1930 length);
1931 skb = sbq_desc->p.skb;
1932 sbq_desc->p.skb = NULL;
1933 skb_reserve(skb, NET_IP_ALIGN);
1934 }
1935 do {
1936 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1937 size = (length < rx_ring->lbq_buf_size) ? length :
1938 rx_ring->lbq_buf_size;
1939
1940 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1941 "Adding page %d to skb for %d bytes.\n",
1942 i, size);
1943 skb_fill_page_desc(skb, i,
1944 lbq_desc->p.pg_chunk.page,
1945 lbq_desc->p.pg_chunk.offset,
1946 size);
1947 skb->len += size;
1948 skb->data_len += size;
1949 skb->truesize += size;
1950 length -= size;
1951 i++;
1952 } while (length > 0);
1953 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1954 &hlen);
1955 __pskb_pull_tail(skb, hlen);
1956 }
1957 return skb;
1958 }
1959
1960 /* Process an inbound completion from an rx ring. */
ql_process_mac_split_rx_intr(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u16 vlan_id)1961 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1962 struct rx_ring *rx_ring,
1963 struct ib_mac_iocb_rsp *ib_mac_rsp,
1964 u16 vlan_id)
1965 {
1966 struct net_device *ndev = qdev->ndev;
1967 struct sk_buff *skb = NULL;
1968
1969 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1970
1971 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1972 if (unlikely(!skb)) {
1973 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1974 "No skb available, drop packet.\n");
1975 rx_ring->rx_dropped++;
1976 return;
1977 }
1978
1979 /* Frame error, so drop the packet. */
1980 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1981 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1982 dev_kfree_skb_any(skb);
1983 return;
1984 }
1985
1986 /* The max framesize filter on this chip is set higher than
1987 * MTU since FCoE uses 2k frames.
1988 */
1989 if (skb->len > ndev->mtu + ETH_HLEN) {
1990 dev_kfree_skb_any(skb);
1991 rx_ring->rx_dropped++;
1992 return;
1993 }
1994
1995 /* loopback self test for ethtool */
1996 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1997 ql_check_lb_frame(qdev, skb);
1998 dev_kfree_skb_any(skb);
1999 return;
2000 }
2001
2002 prefetch(skb->data);
2003 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
2004 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
2005 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2006 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
2007 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2008 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
2009 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2010 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
2011 rx_ring->rx_multicast++;
2012 }
2013 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
2014 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2015 "Promiscuous Packet.\n");
2016 }
2017
2018 skb->protocol = eth_type_trans(skb, ndev);
2019 skb_checksum_none_assert(skb);
2020
2021 /* If rx checksum is on, and there are no
2022 * csum or frame errors.
2023 */
2024 if ((ndev->features & NETIF_F_RXCSUM) &&
2025 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2026 /* TCP frame. */
2027 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2028 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2029 "TCP checksum done!\n");
2030 skb->ip_summed = CHECKSUM_UNNECESSARY;
2031 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2032 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2033 /* Unfragmented ipv4 UDP frame. */
2034 struct iphdr *iph = (struct iphdr *) skb->data;
2035 if (!(iph->frag_off &
2036 htons(IP_MF|IP_OFFSET))) {
2037 skb->ip_summed = CHECKSUM_UNNECESSARY;
2038 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2039 "TCP checksum done!\n");
2040 }
2041 }
2042 }
2043
2044 rx_ring->rx_packets++;
2045 rx_ring->rx_bytes += skb->len;
2046 skb_record_rx_queue(skb, rx_ring->cq_id);
2047 if (vlan_id != 0xffff)
2048 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
2049 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2050 napi_gro_receive(&rx_ring->napi, skb);
2051 else
2052 netif_receive_skb(skb);
2053 }
2054
2055 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_intr(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp)2056 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2057 struct rx_ring *rx_ring,
2058 struct ib_mac_iocb_rsp *ib_mac_rsp)
2059 {
2060 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2061 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2062 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
2063 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2064 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2065
2066 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2067
2068 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2069 /* The data and headers are split into
2070 * separate buffers.
2071 */
2072 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2073 vlan_id);
2074 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2075 /* The data fit in a single small buffer.
2076 * Allocate a new skb, copy the data and
2077 * return the buffer to the free pool.
2078 */
2079 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2080 length, vlan_id);
2081 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2082 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2083 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2084 /* TCP packet in a page chunk that's been checksummed.
2085 * Tack it on to our GRO skb and let it go.
2086 */
2087 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2088 length, vlan_id);
2089 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2090 /* Non-TCP packet in a page chunk. Allocate an
2091 * skb, tack it on frags, and send it up.
2092 */
2093 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2094 length, vlan_id);
2095 } else {
2096 /* Non-TCP/UDP large frames that span multiple buffers
2097 * can be processed corrrectly by the split frame logic.
2098 */
2099 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2100 vlan_id);
2101 }
2102
2103 return (unsigned long)length;
2104 }
2105
2106 /* Process an outbound completion from an rx ring. */
ql_process_mac_tx_intr(struct ql_adapter * qdev,struct ob_mac_iocb_rsp * mac_rsp)2107 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2108 struct ob_mac_iocb_rsp *mac_rsp)
2109 {
2110 struct tx_ring *tx_ring;
2111 struct tx_ring_desc *tx_ring_desc;
2112
2113 QL_DUMP_OB_MAC_RSP(mac_rsp);
2114 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2115 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2116 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2117 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2118 tx_ring->tx_packets++;
2119 dev_kfree_skb(tx_ring_desc->skb);
2120 tx_ring_desc->skb = NULL;
2121
2122 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2123 OB_MAC_IOCB_RSP_S |
2124 OB_MAC_IOCB_RSP_L |
2125 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2126 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2127 netif_warn(qdev, tx_done, qdev->ndev,
2128 "Total descriptor length did not match transfer length.\n");
2129 }
2130 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2131 netif_warn(qdev, tx_done, qdev->ndev,
2132 "Frame too short to be valid, not sent.\n");
2133 }
2134 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2135 netif_warn(qdev, tx_done, qdev->ndev,
2136 "Frame too long, but sent anyway.\n");
2137 }
2138 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2139 netif_warn(qdev, tx_done, qdev->ndev,
2140 "PCI backplane error. Frame not sent.\n");
2141 }
2142 }
2143 atomic_inc(&tx_ring->tx_count);
2144 }
2145
2146 /* Fire up a handler to reset the MPI processor. */
ql_queue_fw_error(struct ql_adapter * qdev)2147 void ql_queue_fw_error(struct ql_adapter *qdev)
2148 {
2149 ql_link_off(qdev);
2150 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2151 }
2152
ql_queue_asic_error(struct ql_adapter * qdev)2153 void ql_queue_asic_error(struct ql_adapter *qdev)
2154 {
2155 ql_link_off(qdev);
2156 ql_disable_interrupts(qdev);
2157 /* Clear adapter up bit to signal the recovery
2158 * process that it shouldn't kill the reset worker
2159 * thread
2160 */
2161 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2162 /* Set asic recovery bit to indicate reset process that we are
2163 * in fatal error recovery process rather than normal close
2164 */
2165 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2166 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2167 }
2168
ql_process_chip_ae_intr(struct ql_adapter * qdev,struct ib_ae_iocb_rsp * ib_ae_rsp)2169 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2170 struct ib_ae_iocb_rsp *ib_ae_rsp)
2171 {
2172 switch (ib_ae_rsp->event) {
2173 case MGMT_ERR_EVENT:
2174 netif_err(qdev, rx_err, qdev->ndev,
2175 "Management Processor Fatal Error.\n");
2176 ql_queue_fw_error(qdev);
2177 return;
2178
2179 case CAM_LOOKUP_ERR_EVENT:
2180 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2181 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2182 ql_queue_asic_error(qdev);
2183 return;
2184
2185 case SOFT_ECC_ERROR_EVENT:
2186 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2187 ql_queue_asic_error(qdev);
2188 break;
2189
2190 case PCI_ERR_ANON_BUF_RD:
2191 netdev_err(qdev->ndev, "PCI error occurred when reading "
2192 "anonymous buffers from rx_ring %d.\n",
2193 ib_ae_rsp->q_id);
2194 ql_queue_asic_error(qdev);
2195 break;
2196
2197 default:
2198 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2199 ib_ae_rsp->event);
2200 ql_queue_asic_error(qdev);
2201 break;
2202 }
2203 }
2204
ql_clean_outbound_rx_ring(struct rx_ring * rx_ring)2205 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2206 {
2207 struct ql_adapter *qdev = rx_ring->qdev;
2208 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2209 struct ob_mac_iocb_rsp *net_rsp = NULL;
2210 int count = 0;
2211
2212 struct tx_ring *tx_ring;
2213 /* While there are entries in the completion queue. */
2214 while (prod != rx_ring->cnsmr_idx) {
2215
2216 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2217 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2218 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2219
2220 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2221 rmb();
2222 switch (net_rsp->opcode) {
2223
2224 case OPCODE_OB_MAC_TSO_IOCB:
2225 case OPCODE_OB_MAC_IOCB:
2226 ql_process_mac_tx_intr(qdev, net_rsp);
2227 break;
2228 default:
2229 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2230 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2231 net_rsp->opcode);
2232 }
2233 count++;
2234 ql_update_cq(rx_ring);
2235 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2236 }
2237 if (!net_rsp)
2238 return 0;
2239 ql_write_cq_idx(rx_ring);
2240 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2241 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2242 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2243 /*
2244 * The queue got stopped because the tx_ring was full.
2245 * Wake it up, because it's now at least 25% empty.
2246 */
2247 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2248 }
2249
2250 return count;
2251 }
2252
ql_clean_inbound_rx_ring(struct rx_ring * rx_ring,int budget)2253 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2254 {
2255 struct ql_adapter *qdev = rx_ring->qdev;
2256 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2257 struct ql_net_rsp_iocb *net_rsp;
2258 int count = 0;
2259
2260 /* While there are entries in the completion queue. */
2261 while (prod != rx_ring->cnsmr_idx) {
2262
2263 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2264 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2265 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2266
2267 net_rsp = rx_ring->curr_entry;
2268 rmb();
2269 switch (net_rsp->opcode) {
2270 case OPCODE_IB_MAC_IOCB:
2271 ql_process_mac_rx_intr(qdev, rx_ring,
2272 (struct ib_mac_iocb_rsp *)
2273 net_rsp);
2274 break;
2275
2276 case OPCODE_IB_AE_IOCB:
2277 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2278 net_rsp);
2279 break;
2280 default:
2281 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2282 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2283 net_rsp->opcode);
2284 break;
2285 }
2286 count++;
2287 ql_update_cq(rx_ring);
2288 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2289 if (count == budget)
2290 break;
2291 }
2292 ql_update_buffer_queues(qdev, rx_ring);
2293 ql_write_cq_idx(rx_ring);
2294 return count;
2295 }
2296
ql_napi_poll_msix(struct napi_struct * napi,int budget)2297 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2298 {
2299 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2300 struct ql_adapter *qdev = rx_ring->qdev;
2301 struct rx_ring *trx_ring;
2302 int i, work_done = 0;
2303 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2304
2305 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2306 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2307
2308 /* Service the TX rings first. They start
2309 * right after the RSS rings. */
2310 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2311 trx_ring = &qdev->rx_ring[i];
2312 /* If this TX completion ring belongs to this vector and
2313 * it's not empty then service it.
2314 */
2315 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2316 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2317 trx_ring->cnsmr_idx)) {
2318 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2319 "%s: Servicing TX completion ring %d.\n",
2320 __func__, trx_ring->cq_id);
2321 ql_clean_outbound_rx_ring(trx_ring);
2322 }
2323 }
2324
2325 /*
2326 * Now service the RSS ring if it's active.
2327 */
2328 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2329 rx_ring->cnsmr_idx) {
2330 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2331 "%s: Servicing RX completion ring %d.\n",
2332 __func__, rx_ring->cq_id);
2333 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2334 }
2335
2336 if (work_done < budget) {
2337 napi_complete(napi);
2338 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2339 }
2340 return work_done;
2341 }
2342
qlge_vlan_mode(struct net_device * ndev,netdev_features_t features)2343 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2344 {
2345 struct ql_adapter *qdev = netdev_priv(ndev);
2346
2347 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2348 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2349 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2350 } else {
2351 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2352 }
2353 }
2354
2355 /**
2356 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2357 * based on the features to enable/disable hardware vlan accel
2358 */
qlge_update_hw_vlan_features(struct net_device * ndev,netdev_features_t features)2359 static int qlge_update_hw_vlan_features(struct net_device *ndev,
2360 netdev_features_t features)
2361 {
2362 struct ql_adapter *qdev = netdev_priv(ndev);
2363 int status = 0;
2364 bool need_restart = netif_running(ndev);
2365
2366 if (need_restart) {
2367 status = ql_adapter_down(qdev);
2368 if (status) {
2369 netif_err(qdev, link, qdev->ndev,
2370 "Failed to bring down the adapter\n");
2371 return status;
2372 }
2373 }
2374
2375 /* update the features with resent change */
2376 ndev->features = features;
2377
2378 if (need_restart) {
2379 status = ql_adapter_up(qdev);
2380 if (status) {
2381 netif_err(qdev, link, qdev->ndev,
2382 "Failed to bring up the adapter\n");
2383 return status;
2384 }
2385 }
2386
2387 return status;
2388 }
2389
qlge_fix_features(struct net_device * ndev,netdev_features_t features)2390 static netdev_features_t qlge_fix_features(struct net_device *ndev,
2391 netdev_features_t features)
2392 {
2393 int err;
2394
2395 /* Update the behavior of vlan accel in the adapter */
2396 err = qlge_update_hw_vlan_features(ndev, features);
2397 if (err)
2398 return err;
2399
2400 return features;
2401 }
2402
qlge_set_features(struct net_device * ndev,netdev_features_t features)2403 static int qlge_set_features(struct net_device *ndev,
2404 netdev_features_t features)
2405 {
2406 netdev_features_t changed = ndev->features ^ features;
2407
2408 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2409 qlge_vlan_mode(ndev, features);
2410
2411 return 0;
2412 }
2413
__qlge_vlan_rx_add_vid(struct ql_adapter * qdev,u16 vid)2414 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2415 {
2416 u32 enable_bit = MAC_ADDR_E;
2417 int err;
2418
2419 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2420 MAC_ADDR_TYPE_VLAN, vid);
2421 if (err)
2422 netif_err(qdev, ifup, qdev->ndev,
2423 "Failed to init vlan address.\n");
2424 return err;
2425 }
2426
qlge_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)2427 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2428 {
2429 struct ql_adapter *qdev = netdev_priv(ndev);
2430 int status;
2431 int err;
2432
2433 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2434 if (status)
2435 return status;
2436
2437 err = __qlge_vlan_rx_add_vid(qdev, vid);
2438 set_bit(vid, qdev->active_vlans);
2439
2440 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2441
2442 return err;
2443 }
2444
__qlge_vlan_rx_kill_vid(struct ql_adapter * qdev,u16 vid)2445 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2446 {
2447 u32 enable_bit = 0;
2448 int err;
2449
2450 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2451 MAC_ADDR_TYPE_VLAN, vid);
2452 if (err)
2453 netif_err(qdev, ifup, qdev->ndev,
2454 "Failed to clear vlan address.\n");
2455 return err;
2456 }
2457
qlge_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)2458 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2459 {
2460 struct ql_adapter *qdev = netdev_priv(ndev);
2461 int status;
2462 int err;
2463
2464 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2465 if (status)
2466 return status;
2467
2468 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2469 clear_bit(vid, qdev->active_vlans);
2470
2471 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2472
2473 return err;
2474 }
2475
qlge_restore_vlan(struct ql_adapter * qdev)2476 static void qlge_restore_vlan(struct ql_adapter *qdev)
2477 {
2478 int status;
2479 u16 vid;
2480
2481 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2482 if (status)
2483 return;
2484
2485 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2486 __qlge_vlan_rx_add_vid(qdev, vid);
2487
2488 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2489 }
2490
2491 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
qlge_msix_rx_isr(int irq,void * dev_id)2492 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2493 {
2494 struct rx_ring *rx_ring = dev_id;
2495 napi_schedule(&rx_ring->napi);
2496 return IRQ_HANDLED;
2497 }
2498
2499 /* This handles a fatal error, MPI activity, and the default
2500 * rx_ring in an MSI-X multiple vector environment.
2501 * In MSI/Legacy environment it also process the rest of
2502 * the rx_rings.
2503 */
qlge_isr(int irq,void * dev_id)2504 static irqreturn_t qlge_isr(int irq, void *dev_id)
2505 {
2506 struct rx_ring *rx_ring = dev_id;
2507 struct ql_adapter *qdev = rx_ring->qdev;
2508 struct intr_context *intr_context = &qdev->intr_context[0];
2509 u32 var;
2510 int work_done = 0;
2511
2512 spin_lock(&qdev->hw_lock);
2513 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2514 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2515 "Shared Interrupt, Not ours!\n");
2516 spin_unlock(&qdev->hw_lock);
2517 return IRQ_NONE;
2518 }
2519 spin_unlock(&qdev->hw_lock);
2520
2521 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2522
2523 /*
2524 * Check for fatal error.
2525 */
2526 if (var & STS_FE) {
2527 ql_queue_asic_error(qdev);
2528 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2529 var = ql_read32(qdev, ERR_STS);
2530 netdev_err(qdev->ndev, "Resetting chip. "
2531 "Error Status Register = 0x%x\n", var);
2532 return IRQ_HANDLED;
2533 }
2534
2535 /*
2536 * Check MPI processor activity.
2537 */
2538 if ((var & STS_PI) &&
2539 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2540 /*
2541 * We've got an async event or mailbox completion.
2542 * Handle it and clear the source of the interrupt.
2543 */
2544 netif_err(qdev, intr, qdev->ndev,
2545 "Got MPI processor interrupt.\n");
2546 ql_disable_completion_interrupt(qdev, intr_context->intr);
2547 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2548 queue_delayed_work_on(smp_processor_id(),
2549 qdev->workqueue, &qdev->mpi_work, 0);
2550 work_done++;
2551 }
2552
2553 /*
2554 * Get the bit-mask that shows the active queues for this
2555 * pass. Compare it to the queues that this irq services
2556 * and call napi if there's a match.
2557 */
2558 var = ql_read32(qdev, ISR1);
2559 if (var & intr_context->irq_mask) {
2560 netif_info(qdev, intr, qdev->ndev,
2561 "Waking handler for rx_ring[0].\n");
2562 ql_disable_completion_interrupt(qdev, intr_context->intr);
2563 napi_schedule(&rx_ring->napi);
2564 work_done++;
2565 }
2566 ql_enable_completion_interrupt(qdev, intr_context->intr);
2567 return work_done ? IRQ_HANDLED : IRQ_NONE;
2568 }
2569
ql_tso(struct sk_buff * skb,struct ob_mac_tso_iocb_req * mac_iocb_ptr)2570 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2571 {
2572
2573 if (skb_is_gso(skb)) {
2574 int err;
2575 __be16 l3_proto = vlan_get_protocol(skb);
2576
2577 err = skb_cow_head(skb, 0);
2578 if (err < 0)
2579 return err;
2580
2581 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2582 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2583 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2584 mac_iocb_ptr->total_hdrs_len =
2585 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2586 mac_iocb_ptr->net_trans_offset =
2587 cpu_to_le16(skb_network_offset(skb) |
2588 skb_transport_offset(skb)
2589 << OB_MAC_TRANSPORT_HDR_SHIFT);
2590 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2591 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2592 if (likely(l3_proto == htons(ETH_P_IP))) {
2593 struct iphdr *iph = ip_hdr(skb);
2594 iph->check = 0;
2595 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2596 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2597 iph->daddr, 0,
2598 IPPROTO_TCP,
2599 0);
2600 } else if (l3_proto == htons(ETH_P_IPV6)) {
2601 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2602 tcp_hdr(skb)->check =
2603 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2604 &ipv6_hdr(skb)->daddr,
2605 0, IPPROTO_TCP, 0);
2606 }
2607 return 1;
2608 }
2609 return 0;
2610 }
2611
ql_hw_csum_setup(struct sk_buff * skb,struct ob_mac_tso_iocb_req * mac_iocb_ptr)2612 static void ql_hw_csum_setup(struct sk_buff *skb,
2613 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2614 {
2615 int len;
2616 struct iphdr *iph = ip_hdr(skb);
2617 __sum16 *check;
2618 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2619 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2620 mac_iocb_ptr->net_trans_offset =
2621 cpu_to_le16(skb_network_offset(skb) |
2622 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2623
2624 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2625 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2626 if (likely(iph->protocol == IPPROTO_TCP)) {
2627 check = &(tcp_hdr(skb)->check);
2628 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2629 mac_iocb_ptr->total_hdrs_len =
2630 cpu_to_le16(skb_transport_offset(skb) +
2631 (tcp_hdr(skb)->doff << 2));
2632 } else {
2633 check = &(udp_hdr(skb)->check);
2634 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2635 mac_iocb_ptr->total_hdrs_len =
2636 cpu_to_le16(skb_transport_offset(skb) +
2637 sizeof(struct udphdr));
2638 }
2639 *check = ~csum_tcpudp_magic(iph->saddr,
2640 iph->daddr, len, iph->protocol, 0);
2641 }
2642
qlge_send(struct sk_buff * skb,struct net_device * ndev)2643 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2644 {
2645 struct tx_ring_desc *tx_ring_desc;
2646 struct ob_mac_iocb_req *mac_iocb_ptr;
2647 struct ql_adapter *qdev = netdev_priv(ndev);
2648 int tso;
2649 struct tx_ring *tx_ring;
2650 u32 tx_ring_idx = (u32) skb->queue_mapping;
2651
2652 tx_ring = &qdev->tx_ring[tx_ring_idx];
2653
2654 if (skb_padto(skb, ETH_ZLEN))
2655 return NETDEV_TX_OK;
2656
2657 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2658 netif_info(qdev, tx_queued, qdev->ndev,
2659 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2660 __func__, tx_ring_idx);
2661 netif_stop_subqueue(ndev, tx_ring->wq_id);
2662 tx_ring->tx_errors++;
2663 return NETDEV_TX_BUSY;
2664 }
2665 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2666 mac_iocb_ptr = tx_ring_desc->queue_entry;
2667 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2668
2669 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2670 mac_iocb_ptr->tid = tx_ring_desc->index;
2671 /* We use the upper 32-bits to store the tx queue for this IO.
2672 * When we get the completion we can use it to establish the context.
2673 */
2674 mac_iocb_ptr->txq_idx = tx_ring_idx;
2675 tx_ring_desc->skb = skb;
2676
2677 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2678
2679 if (skb_vlan_tag_present(skb)) {
2680 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2681 "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2682 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2683 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2684 }
2685 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2686 if (tso < 0) {
2687 dev_kfree_skb_any(skb);
2688 return NETDEV_TX_OK;
2689 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2690 ql_hw_csum_setup(skb,
2691 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2692 }
2693 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2694 NETDEV_TX_OK) {
2695 netif_err(qdev, tx_queued, qdev->ndev,
2696 "Could not map the segments.\n");
2697 tx_ring->tx_errors++;
2698 return NETDEV_TX_BUSY;
2699 }
2700 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2701 tx_ring->prod_idx++;
2702 if (tx_ring->prod_idx == tx_ring->wq_len)
2703 tx_ring->prod_idx = 0;
2704 wmb();
2705
2706 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2707 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2708 "tx queued, slot %d, len %d\n",
2709 tx_ring->prod_idx, skb->len);
2710
2711 atomic_dec(&tx_ring->tx_count);
2712
2713 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2714 netif_stop_subqueue(ndev, tx_ring->wq_id);
2715 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2716 /*
2717 * The queue got stopped because the tx_ring was full.
2718 * Wake it up, because it's now at least 25% empty.
2719 */
2720 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2721 }
2722 return NETDEV_TX_OK;
2723 }
2724
2725
ql_free_shadow_space(struct ql_adapter * qdev)2726 static void ql_free_shadow_space(struct ql_adapter *qdev)
2727 {
2728 if (qdev->rx_ring_shadow_reg_area) {
2729 pci_free_consistent(qdev->pdev,
2730 PAGE_SIZE,
2731 qdev->rx_ring_shadow_reg_area,
2732 qdev->rx_ring_shadow_reg_dma);
2733 qdev->rx_ring_shadow_reg_area = NULL;
2734 }
2735 if (qdev->tx_ring_shadow_reg_area) {
2736 pci_free_consistent(qdev->pdev,
2737 PAGE_SIZE,
2738 qdev->tx_ring_shadow_reg_area,
2739 qdev->tx_ring_shadow_reg_dma);
2740 qdev->tx_ring_shadow_reg_area = NULL;
2741 }
2742 }
2743
ql_alloc_shadow_space(struct ql_adapter * qdev)2744 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2745 {
2746 qdev->rx_ring_shadow_reg_area =
2747 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2748 &qdev->rx_ring_shadow_reg_dma);
2749 if (qdev->rx_ring_shadow_reg_area == NULL) {
2750 netif_err(qdev, ifup, qdev->ndev,
2751 "Allocation of RX shadow space failed.\n");
2752 return -ENOMEM;
2753 }
2754
2755 qdev->tx_ring_shadow_reg_area =
2756 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2757 &qdev->tx_ring_shadow_reg_dma);
2758 if (qdev->tx_ring_shadow_reg_area == NULL) {
2759 netif_err(qdev, ifup, qdev->ndev,
2760 "Allocation of TX shadow space failed.\n");
2761 goto err_wqp_sh_area;
2762 }
2763 return 0;
2764
2765 err_wqp_sh_area:
2766 pci_free_consistent(qdev->pdev,
2767 PAGE_SIZE,
2768 qdev->rx_ring_shadow_reg_area,
2769 qdev->rx_ring_shadow_reg_dma);
2770 return -ENOMEM;
2771 }
2772
ql_init_tx_ring(struct ql_adapter * qdev,struct tx_ring * tx_ring)2773 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2774 {
2775 struct tx_ring_desc *tx_ring_desc;
2776 int i;
2777 struct ob_mac_iocb_req *mac_iocb_ptr;
2778
2779 mac_iocb_ptr = tx_ring->wq_base;
2780 tx_ring_desc = tx_ring->q;
2781 for (i = 0; i < tx_ring->wq_len; i++) {
2782 tx_ring_desc->index = i;
2783 tx_ring_desc->skb = NULL;
2784 tx_ring_desc->queue_entry = mac_iocb_ptr;
2785 mac_iocb_ptr++;
2786 tx_ring_desc++;
2787 }
2788 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2789 }
2790
ql_free_tx_resources(struct ql_adapter * qdev,struct tx_ring * tx_ring)2791 static void ql_free_tx_resources(struct ql_adapter *qdev,
2792 struct tx_ring *tx_ring)
2793 {
2794 if (tx_ring->wq_base) {
2795 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2796 tx_ring->wq_base, tx_ring->wq_base_dma);
2797 tx_ring->wq_base = NULL;
2798 }
2799 kfree(tx_ring->q);
2800 tx_ring->q = NULL;
2801 }
2802
ql_alloc_tx_resources(struct ql_adapter * qdev,struct tx_ring * tx_ring)2803 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2804 struct tx_ring *tx_ring)
2805 {
2806 tx_ring->wq_base =
2807 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2808 &tx_ring->wq_base_dma);
2809
2810 if ((tx_ring->wq_base == NULL) ||
2811 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2812 goto pci_alloc_err;
2813
2814 tx_ring->q =
2815 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2816 if (tx_ring->q == NULL)
2817 goto err;
2818
2819 return 0;
2820 err:
2821 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2822 tx_ring->wq_base, tx_ring->wq_base_dma);
2823 tx_ring->wq_base = NULL;
2824 pci_alloc_err:
2825 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2826 return -ENOMEM;
2827 }
2828
ql_free_lbq_buffers(struct ql_adapter * qdev,struct rx_ring * rx_ring)2829 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2830 {
2831 struct bq_desc *lbq_desc;
2832
2833 uint32_t curr_idx, clean_idx;
2834
2835 curr_idx = rx_ring->lbq_curr_idx;
2836 clean_idx = rx_ring->lbq_clean_idx;
2837 while (curr_idx != clean_idx) {
2838 lbq_desc = &rx_ring->lbq[curr_idx];
2839
2840 if (lbq_desc->p.pg_chunk.last_flag) {
2841 pci_unmap_page(qdev->pdev,
2842 lbq_desc->p.pg_chunk.map,
2843 ql_lbq_block_size(qdev),
2844 PCI_DMA_FROMDEVICE);
2845 lbq_desc->p.pg_chunk.last_flag = 0;
2846 }
2847
2848 put_page(lbq_desc->p.pg_chunk.page);
2849 lbq_desc->p.pg_chunk.page = NULL;
2850
2851 if (++curr_idx == rx_ring->lbq_len)
2852 curr_idx = 0;
2853
2854 }
2855 if (rx_ring->pg_chunk.page) {
2856 pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2857 ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2858 put_page(rx_ring->pg_chunk.page);
2859 rx_ring->pg_chunk.page = NULL;
2860 }
2861 }
2862
ql_free_sbq_buffers(struct ql_adapter * qdev,struct rx_ring * rx_ring)2863 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2864 {
2865 int i;
2866 struct bq_desc *sbq_desc;
2867
2868 for (i = 0; i < rx_ring->sbq_len; i++) {
2869 sbq_desc = &rx_ring->sbq[i];
2870 if (sbq_desc == NULL) {
2871 netif_err(qdev, ifup, qdev->ndev,
2872 "sbq_desc %d is NULL.\n", i);
2873 return;
2874 }
2875 if (sbq_desc->p.skb) {
2876 pci_unmap_single(qdev->pdev,
2877 dma_unmap_addr(sbq_desc, mapaddr),
2878 dma_unmap_len(sbq_desc, maplen),
2879 PCI_DMA_FROMDEVICE);
2880 dev_kfree_skb(sbq_desc->p.skb);
2881 sbq_desc->p.skb = NULL;
2882 }
2883 }
2884 }
2885
2886 /* Free all large and small rx buffers associated
2887 * with the completion queues for this device.
2888 */
ql_free_rx_buffers(struct ql_adapter * qdev)2889 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2890 {
2891 int i;
2892 struct rx_ring *rx_ring;
2893
2894 for (i = 0; i < qdev->rx_ring_count; i++) {
2895 rx_ring = &qdev->rx_ring[i];
2896 if (rx_ring->lbq)
2897 ql_free_lbq_buffers(qdev, rx_ring);
2898 if (rx_ring->sbq)
2899 ql_free_sbq_buffers(qdev, rx_ring);
2900 }
2901 }
2902
ql_alloc_rx_buffers(struct ql_adapter * qdev)2903 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2904 {
2905 struct rx_ring *rx_ring;
2906 int i;
2907
2908 for (i = 0; i < qdev->rx_ring_count; i++) {
2909 rx_ring = &qdev->rx_ring[i];
2910 if (rx_ring->type != TX_Q)
2911 ql_update_buffer_queues(qdev, rx_ring);
2912 }
2913 }
2914
ql_init_lbq_ring(struct ql_adapter * qdev,struct rx_ring * rx_ring)2915 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2916 struct rx_ring *rx_ring)
2917 {
2918 int i;
2919 struct bq_desc *lbq_desc;
2920 __le64 *bq = rx_ring->lbq_base;
2921
2922 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2923 for (i = 0; i < rx_ring->lbq_len; i++) {
2924 lbq_desc = &rx_ring->lbq[i];
2925 memset(lbq_desc, 0, sizeof(*lbq_desc));
2926 lbq_desc->index = i;
2927 lbq_desc->addr = bq;
2928 bq++;
2929 }
2930 }
2931
ql_init_sbq_ring(struct ql_adapter * qdev,struct rx_ring * rx_ring)2932 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2933 struct rx_ring *rx_ring)
2934 {
2935 int i;
2936 struct bq_desc *sbq_desc;
2937 __le64 *bq = rx_ring->sbq_base;
2938
2939 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2940 for (i = 0; i < rx_ring->sbq_len; i++) {
2941 sbq_desc = &rx_ring->sbq[i];
2942 memset(sbq_desc, 0, sizeof(*sbq_desc));
2943 sbq_desc->index = i;
2944 sbq_desc->addr = bq;
2945 bq++;
2946 }
2947 }
2948
ql_free_rx_resources(struct ql_adapter * qdev,struct rx_ring * rx_ring)2949 static void ql_free_rx_resources(struct ql_adapter *qdev,
2950 struct rx_ring *rx_ring)
2951 {
2952 /* Free the small buffer queue. */
2953 if (rx_ring->sbq_base) {
2954 pci_free_consistent(qdev->pdev,
2955 rx_ring->sbq_size,
2956 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2957 rx_ring->sbq_base = NULL;
2958 }
2959
2960 /* Free the small buffer queue control blocks. */
2961 kfree(rx_ring->sbq);
2962 rx_ring->sbq = NULL;
2963
2964 /* Free the large buffer queue. */
2965 if (rx_ring->lbq_base) {
2966 pci_free_consistent(qdev->pdev,
2967 rx_ring->lbq_size,
2968 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2969 rx_ring->lbq_base = NULL;
2970 }
2971
2972 /* Free the large buffer queue control blocks. */
2973 kfree(rx_ring->lbq);
2974 rx_ring->lbq = NULL;
2975
2976 /* Free the rx queue. */
2977 if (rx_ring->cq_base) {
2978 pci_free_consistent(qdev->pdev,
2979 rx_ring->cq_size,
2980 rx_ring->cq_base, rx_ring->cq_base_dma);
2981 rx_ring->cq_base = NULL;
2982 }
2983 }
2984
2985 /* Allocate queues and buffers for this completions queue based
2986 * on the values in the parameter structure. */
ql_alloc_rx_resources(struct ql_adapter * qdev,struct rx_ring * rx_ring)2987 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2988 struct rx_ring *rx_ring)
2989 {
2990
2991 /*
2992 * Allocate the completion queue for this rx_ring.
2993 */
2994 rx_ring->cq_base =
2995 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2996 &rx_ring->cq_base_dma);
2997
2998 if (rx_ring->cq_base == NULL) {
2999 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
3000 return -ENOMEM;
3001 }
3002
3003 if (rx_ring->sbq_len) {
3004 /*
3005 * Allocate small buffer queue.
3006 */
3007 rx_ring->sbq_base =
3008 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
3009 &rx_ring->sbq_base_dma);
3010
3011 if (rx_ring->sbq_base == NULL) {
3012 netif_err(qdev, ifup, qdev->ndev,
3013 "Small buffer queue allocation failed.\n");
3014 goto err_mem;
3015 }
3016
3017 /*
3018 * Allocate small buffer queue control blocks.
3019 */
3020 rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
3021 sizeof(struct bq_desc),
3022 GFP_KERNEL);
3023 if (rx_ring->sbq == NULL)
3024 goto err_mem;
3025
3026 ql_init_sbq_ring(qdev, rx_ring);
3027 }
3028
3029 if (rx_ring->lbq_len) {
3030 /*
3031 * Allocate large buffer queue.
3032 */
3033 rx_ring->lbq_base =
3034 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
3035 &rx_ring->lbq_base_dma);
3036
3037 if (rx_ring->lbq_base == NULL) {
3038 netif_err(qdev, ifup, qdev->ndev,
3039 "Large buffer queue allocation failed.\n");
3040 goto err_mem;
3041 }
3042 /*
3043 * Allocate large buffer queue control blocks.
3044 */
3045 rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
3046 sizeof(struct bq_desc),
3047 GFP_KERNEL);
3048 if (rx_ring->lbq == NULL)
3049 goto err_mem;
3050
3051 ql_init_lbq_ring(qdev, rx_ring);
3052 }
3053
3054 return 0;
3055
3056 err_mem:
3057 ql_free_rx_resources(qdev, rx_ring);
3058 return -ENOMEM;
3059 }
3060
ql_tx_ring_clean(struct ql_adapter * qdev)3061 static void ql_tx_ring_clean(struct ql_adapter *qdev)
3062 {
3063 struct tx_ring *tx_ring;
3064 struct tx_ring_desc *tx_ring_desc;
3065 int i, j;
3066
3067 /*
3068 * Loop through all queues and free
3069 * any resources.
3070 */
3071 for (j = 0; j < qdev->tx_ring_count; j++) {
3072 tx_ring = &qdev->tx_ring[j];
3073 for (i = 0; i < tx_ring->wq_len; i++) {
3074 tx_ring_desc = &tx_ring->q[i];
3075 if (tx_ring_desc && tx_ring_desc->skb) {
3076 netif_err(qdev, ifdown, qdev->ndev,
3077 "Freeing lost SKB %p, from queue %d, index %d.\n",
3078 tx_ring_desc->skb, j,
3079 tx_ring_desc->index);
3080 ql_unmap_send(qdev, tx_ring_desc,
3081 tx_ring_desc->map_cnt);
3082 dev_kfree_skb(tx_ring_desc->skb);
3083 tx_ring_desc->skb = NULL;
3084 }
3085 }
3086 }
3087 }
3088
ql_free_mem_resources(struct ql_adapter * qdev)3089 static void ql_free_mem_resources(struct ql_adapter *qdev)
3090 {
3091 int i;
3092
3093 for (i = 0; i < qdev->tx_ring_count; i++)
3094 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3095 for (i = 0; i < qdev->rx_ring_count; i++)
3096 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3097 ql_free_shadow_space(qdev);
3098 }
3099
ql_alloc_mem_resources(struct ql_adapter * qdev)3100 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3101 {
3102 int i;
3103
3104 /* Allocate space for our shadow registers and such. */
3105 if (ql_alloc_shadow_space(qdev))
3106 return -ENOMEM;
3107
3108 for (i = 0; i < qdev->rx_ring_count; i++) {
3109 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3110 netif_err(qdev, ifup, qdev->ndev,
3111 "RX resource allocation failed.\n");
3112 goto err_mem;
3113 }
3114 }
3115 /* Allocate tx queue resources */
3116 for (i = 0; i < qdev->tx_ring_count; i++) {
3117 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3118 netif_err(qdev, ifup, qdev->ndev,
3119 "TX resource allocation failed.\n");
3120 goto err_mem;
3121 }
3122 }
3123 return 0;
3124
3125 err_mem:
3126 ql_free_mem_resources(qdev);
3127 return -ENOMEM;
3128 }
3129
3130 /* Set up the rx ring control block and pass it to the chip.
3131 * The control block is defined as
3132 * "Completion Queue Initialization Control Block", or cqicb.
3133 */
ql_start_rx_ring(struct ql_adapter * qdev,struct rx_ring * rx_ring)3134 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3135 {
3136 struct cqicb *cqicb = &rx_ring->cqicb;
3137 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3138 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3139 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3140 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3141 void __iomem *doorbell_area =
3142 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3143 int err = 0;
3144 u16 bq_len;
3145 u64 tmp;
3146 __le64 *base_indirect_ptr;
3147 int page_entries;
3148
3149 /* Set up the shadow registers for this ring. */
3150 rx_ring->prod_idx_sh_reg = shadow_reg;
3151 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3152 *rx_ring->prod_idx_sh_reg = 0;
3153 shadow_reg += sizeof(u64);
3154 shadow_reg_dma += sizeof(u64);
3155 rx_ring->lbq_base_indirect = shadow_reg;
3156 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3157 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3158 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3159 rx_ring->sbq_base_indirect = shadow_reg;
3160 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3161
3162 /* PCI doorbell mem area + 0x00 for consumer index register */
3163 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3164 rx_ring->cnsmr_idx = 0;
3165 rx_ring->curr_entry = rx_ring->cq_base;
3166
3167 /* PCI doorbell mem area + 0x04 for valid register */
3168 rx_ring->valid_db_reg = doorbell_area + 0x04;
3169
3170 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3171 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3172
3173 /* PCI doorbell mem area + 0x1c */
3174 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3175
3176 memset((void *)cqicb, 0, sizeof(struct cqicb));
3177 cqicb->msix_vect = rx_ring->irq;
3178
3179 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3180 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3181
3182 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3183
3184 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3185
3186 /*
3187 * Set up the control block load flags.
3188 */
3189 cqicb->flags = FLAGS_LC | /* Load queue base address */
3190 FLAGS_LV | /* Load MSI-X vector */
3191 FLAGS_LI; /* Load irq delay values */
3192 if (rx_ring->lbq_len) {
3193 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3194 tmp = (u64)rx_ring->lbq_base_dma;
3195 base_indirect_ptr = rx_ring->lbq_base_indirect;
3196 page_entries = 0;
3197 do {
3198 *base_indirect_ptr = cpu_to_le64(tmp);
3199 tmp += DB_PAGE_SIZE;
3200 base_indirect_ptr++;
3201 page_entries++;
3202 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3203 cqicb->lbq_addr =
3204 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3205 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3206 (u16) rx_ring->lbq_buf_size;
3207 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3208 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3209 (u16) rx_ring->lbq_len;
3210 cqicb->lbq_len = cpu_to_le16(bq_len);
3211 rx_ring->lbq_prod_idx = 0;
3212 rx_ring->lbq_curr_idx = 0;
3213 rx_ring->lbq_clean_idx = 0;
3214 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3215 }
3216 if (rx_ring->sbq_len) {
3217 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3218 tmp = (u64)rx_ring->sbq_base_dma;
3219 base_indirect_ptr = rx_ring->sbq_base_indirect;
3220 page_entries = 0;
3221 do {
3222 *base_indirect_ptr = cpu_to_le64(tmp);
3223 tmp += DB_PAGE_SIZE;
3224 base_indirect_ptr++;
3225 page_entries++;
3226 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3227 cqicb->sbq_addr =
3228 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3229 cqicb->sbq_buf_size =
3230 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3231 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3232 (u16) rx_ring->sbq_len;
3233 cqicb->sbq_len = cpu_to_le16(bq_len);
3234 rx_ring->sbq_prod_idx = 0;
3235 rx_ring->sbq_curr_idx = 0;
3236 rx_ring->sbq_clean_idx = 0;
3237 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3238 }
3239 switch (rx_ring->type) {
3240 case TX_Q:
3241 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3242 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3243 break;
3244 case RX_Q:
3245 /* Inbound completion handling rx_rings run in
3246 * separate NAPI contexts.
3247 */
3248 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3249 64);
3250 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3251 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3252 break;
3253 default:
3254 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3255 "Invalid rx_ring->type = %d.\n", rx_ring->type);
3256 }
3257 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3258 CFG_LCQ, rx_ring->cq_id);
3259 if (err) {
3260 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3261 return err;
3262 }
3263 return err;
3264 }
3265
ql_start_tx_ring(struct ql_adapter * qdev,struct tx_ring * tx_ring)3266 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3267 {
3268 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3269 void __iomem *doorbell_area =
3270 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3271 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3272 (tx_ring->wq_id * sizeof(u64));
3273 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3274 (tx_ring->wq_id * sizeof(u64));
3275 int err = 0;
3276
3277 /*
3278 * Assign doorbell registers for this tx_ring.
3279 */
3280 /* TX PCI doorbell mem area for tx producer index */
3281 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3282 tx_ring->prod_idx = 0;
3283 /* TX PCI doorbell mem area + 0x04 */
3284 tx_ring->valid_db_reg = doorbell_area + 0x04;
3285
3286 /*
3287 * Assign shadow registers for this tx_ring.
3288 */
3289 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3290 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3291
3292 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3293 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3294 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3295 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3296 wqicb->rid = 0;
3297 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3298
3299 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3300
3301 ql_init_tx_ring(qdev, tx_ring);
3302
3303 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3304 (u16) tx_ring->wq_id);
3305 if (err) {
3306 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3307 return err;
3308 }
3309 return err;
3310 }
3311
ql_disable_msix(struct ql_adapter * qdev)3312 static void ql_disable_msix(struct ql_adapter *qdev)
3313 {
3314 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3315 pci_disable_msix(qdev->pdev);
3316 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3317 kfree(qdev->msi_x_entry);
3318 qdev->msi_x_entry = NULL;
3319 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3320 pci_disable_msi(qdev->pdev);
3321 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3322 }
3323 }
3324
3325 /* We start by trying to get the number of vectors
3326 * stored in qdev->intr_count. If we don't get that
3327 * many then we reduce the count and try again.
3328 */
ql_enable_msix(struct ql_adapter * qdev)3329 static void ql_enable_msix(struct ql_adapter *qdev)
3330 {
3331 int i, err;
3332
3333 /* Get the MSIX vectors. */
3334 if (qlge_irq_type == MSIX_IRQ) {
3335 /* Try to alloc space for the msix struct,
3336 * if it fails then go to MSI/legacy.
3337 */
3338 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3339 sizeof(struct msix_entry),
3340 GFP_KERNEL);
3341 if (!qdev->msi_x_entry) {
3342 qlge_irq_type = MSI_IRQ;
3343 goto msi;
3344 }
3345
3346 for (i = 0; i < qdev->intr_count; i++)
3347 qdev->msi_x_entry[i].entry = i;
3348
3349 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3350 1, qdev->intr_count);
3351 if (err < 0) {
3352 kfree(qdev->msi_x_entry);
3353 qdev->msi_x_entry = NULL;
3354 netif_warn(qdev, ifup, qdev->ndev,
3355 "MSI-X Enable failed, trying MSI.\n");
3356 qlge_irq_type = MSI_IRQ;
3357 } else {
3358 qdev->intr_count = err;
3359 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3360 netif_info(qdev, ifup, qdev->ndev,
3361 "MSI-X Enabled, got %d vectors.\n",
3362 qdev->intr_count);
3363 return;
3364 }
3365 }
3366 msi:
3367 qdev->intr_count = 1;
3368 if (qlge_irq_type == MSI_IRQ) {
3369 if (!pci_enable_msi(qdev->pdev)) {
3370 set_bit(QL_MSI_ENABLED, &qdev->flags);
3371 netif_info(qdev, ifup, qdev->ndev,
3372 "Running with MSI interrupts.\n");
3373 return;
3374 }
3375 }
3376 qlge_irq_type = LEG_IRQ;
3377 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3378 "Running with legacy interrupts.\n");
3379 }
3380
3381 /* Each vector services 1 RSS ring and and 1 or more
3382 * TX completion rings. This function loops through
3383 * the TX completion rings and assigns the vector that
3384 * will service it. An example would be if there are
3385 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3386 * This would mean that vector 0 would service RSS ring 0
3387 * and TX completion rings 0,1,2 and 3. Vector 1 would
3388 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3389 */
ql_set_tx_vect(struct ql_adapter * qdev)3390 static void ql_set_tx_vect(struct ql_adapter *qdev)
3391 {
3392 int i, j, vect;
3393 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3394
3395 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3396 /* Assign irq vectors to TX rx_rings.*/
3397 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3398 i < qdev->rx_ring_count; i++) {
3399 if (j == tx_rings_per_vector) {
3400 vect++;
3401 j = 0;
3402 }
3403 qdev->rx_ring[i].irq = vect;
3404 j++;
3405 }
3406 } else {
3407 /* For single vector all rings have an irq
3408 * of zero.
3409 */
3410 for (i = 0; i < qdev->rx_ring_count; i++)
3411 qdev->rx_ring[i].irq = 0;
3412 }
3413 }
3414
3415 /* Set the interrupt mask for this vector. Each vector
3416 * will service 1 RSS ring and 1 or more TX completion
3417 * rings. This function sets up a bit mask per vector
3418 * that indicates which rings it services.
3419 */
ql_set_irq_mask(struct ql_adapter * qdev,struct intr_context * ctx)3420 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3421 {
3422 int j, vect = ctx->intr;
3423 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3424
3425 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3426 /* Add the RSS ring serviced by this vector
3427 * to the mask.
3428 */
3429 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3430 /* Add the TX ring(s) serviced by this vector
3431 * to the mask. */
3432 for (j = 0; j < tx_rings_per_vector; j++) {
3433 ctx->irq_mask |=
3434 (1 << qdev->rx_ring[qdev->rss_ring_count +
3435 (vect * tx_rings_per_vector) + j].cq_id);
3436 }
3437 } else {
3438 /* For single vector we just shift each queue's
3439 * ID into the mask.
3440 */
3441 for (j = 0; j < qdev->rx_ring_count; j++)
3442 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3443 }
3444 }
3445
3446 /*
3447 * Here we build the intr_context structures based on
3448 * our rx_ring count and intr vector count.
3449 * The intr_context structure is used to hook each vector
3450 * to possibly different handlers.
3451 */
ql_resolve_queues_to_irqs(struct ql_adapter * qdev)3452 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3453 {
3454 int i = 0;
3455 struct intr_context *intr_context = &qdev->intr_context[0];
3456
3457 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3458 /* Each rx_ring has it's
3459 * own intr_context since we have separate
3460 * vectors for each queue.
3461 */
3462 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3463 qdev->rx_ring[i].irq = i;
3464 intr_context->intr = i;
3465 intr_context->qdev = qdev;
3466 /* Set up this vector's bit-mask that indicates
3467 * which queues it services.
3468 */
3469 ql_set_irq_mask(qdev, intr_context);
3470 /*
3471 * We set up each vectors enable/disable/read bits so
3472 * there's no bit/mask calculations in the critical path.
3473 */
3474 intr_context->intr_en_mask =
3475 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3476 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3477 | i;
3478 intr_context->intr_dis_mask =
3479 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3480 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3481 INTR_EN_IHD | i;
3482 intr_context->intr_read_mask =
3483 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3484 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3485 i;
3486 if (i == 0) {
3487 /* The first vector/queue handles
3488 * broadcast/multicast, fatal errors,
3489 * and firmware events. This in addition
3490 * to normal inbound NAPI processing.
3491 */
3492 intr_context->handler = qlge_isr;
3493 sprintf(intr_context->name, "%s-rx-%d",
3494 qdev->ndev->name, i);
3495 } else {
3496 /*
3497 * Inbound queues handle unicast frames only.
3498 */
3499 intr_context->handler = qlge_msix_rx_isr;
3500 sprintf(intr_context->name, "%s-rx-%d",
3501 qdev->ndev->name, i);
3502 }
3503 }
3504 } else {
3505 /*
3506 * All rx_rings use the same intr_context since
3507 * there is only one vector.
3508 */
3509 intr_context->intr = 0;
3510 intr_context->qdev = qdev;
3511 /*
3512 * We set up each vectors enable/disable/read bits so
3513 * there's no bit/mask calculations in the critical path.
3514 */
3515 intr_context->intr_en_mask =
3516 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3517 intr_context->intr_dis_mask =
3518 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3519 INTR_EN_TYPE_DISABLE;
3520 intr_context->intr_read_mask =
3521 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3522 /*
3523 * Single interrupt means one handler for all rings.
3524 */
3525 intr_context->handler = qlge_isr;
3526 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3527 /* Set up this vector's bit-mask that indicates
3528 * which queues it services. In this case there is
3529 * a single vector so it will service all RSS and
3530 * TX completion rings.
3531 */
3532 ql_set_irq_mask(qdev, intr_context);
3533 }
3534 /* Tell the TX completion rings which MSIx vector
3535 * they will be using.
3536 */
3537 ql_set_tx_vect(qdev);
3538 }
3539
ql_free_irq(struct ql_adapter * qdev)3540 static void ql_free_irq(struct ql_adapter *qdev)
3541 {
3542 int i;
3543 struct intr_context *intr_context = &qdev->intr_context[0];
3544
3545 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3546 if (intr_context->hooked) {
3547 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3548 free_irq(qdev->msi_x_entry[i].vector,
3549 &qdev->rx_ring[i]);
3550 } else {
3551 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3552 }
3553 }
3554 }
3555 ql_disable_msix(qdev);
3556 }
3557
ql_request_irq(struct ql_adapter * qdev)3558 static int ql_request_irq(struct ql_adapter *qdev)
3559 {
3560 int i;
3561 int status = 0;
3562 struct pci_dev *pdev = qdev->pdev;
3563 struct intr_context *intr_context = &qdev->intr_context[0];
3564
3565 ql_resolve_queues_to_irqs(qdev);
3566
3567 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3568 atomic_set(&intr_context->irq_cnt, 0);
3569 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3570 status = request_irq(qdev->msi_x_entry[i].vector,
3571 intr_context->handler,
3572 0,
3573 intr_context->name,
3574 &qdev->rx_ring[i]);
3575 if (status) {
3576 netif_err(qdev, ifup, qdev->ndev,
3577 "Failed request for MSIX interrupt %d.\n",
3578 i);
3579 goto err_irq;
3580 }
3581 } else {
3582 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3583 "trying msi or legacy interrupts.\n");
3584 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3585 "%s: irq = %d.\n", __func__, pdev->irq);
3586 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3587 "%s: context->name = %s.\n", __func__,
3588 intr_context->name);
3589 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3590 "%s: dev_id = 0x%p.\n", __func__,
3591 &qdev->rx_ring[0]);
3592 status =
3593 request_irq(pdev->irq, qlge_isr,
3594 test_bit(QL_MSI_ENABLED,
3595 &qdev->
3596 flags) ? 0 : IRQF_SHARED,
3597 intr_context->name, &qdev->rx_ring[0]);
3598 if (status)
3599 goto err_irq;
3600
3601 netif_err(qdev, ifup, qdev->ndev,
3602 "Hooked intr %d, queue type %s, with name %s.\n",
3603 i,
3604 qdev->rx_ring[0].type == DEFAULT_Q ?
3605 "DEFAULT_Q" :
3606 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3607 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3608 intr_context->name);
3609 }
3610 intr_context->hooked = 1;
3611 }
3612 return status;
3613 err_irq:
3614 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3615 ql_free_irq(qdev);
3616 return status;
3617 }
3618
ql_start_rss(struct ql_adapter * qdev)3619 static int ql_start_rss(struct ql_adapter *qdev)
3620 {
3621 static const u8 init_hash_seed[] = {
3622 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3623 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3624 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3625 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3626 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3627 };
3628 struct ricb *ricb = &qdev->ricb;
3629 int status = 0;
3630 int i;
3631 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3632
3633 memset((void *)ricb, 0, sizeof(*ricb));
3634
3635 ricb->base_cq = RSS_L4K;
3636 ricb->flags =
3637 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3638 ricb->mask = cpu_to_le16((u16)(0x3ff));
3639
3640 /*
3641 * Fill out the Indirection Table.
3642 */
3643 for (i = 0; i < 1024; i++)
3644 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3645
3646 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3647 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3648
3649 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3650 if (status) {
3651 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3652 return status;
3653 }
3654 return status;
3655 }
3656
ql_clear_routing_entries(struct ql_adapter * qdev)3657 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3658 {
3659 int i, status = 0;
3660
3661 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3662 if (status)
3663 return status;
3664 /* Clear all the entries in the routing table. */
3665 for (i = 0; i < 16; i++) {
3666 status = ql_set_routing_reg(qdev, i, 0, 0);
3667 if (status) {
3668 netif_err(qdev, ifup, qdev->ndev,
3669 "Failed to init routing register for CAM packets.\n");
3670 break;
3671 }
3672 }
3673 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3674 return status;
3675 }
3676
3677 /* Initialize the frame-to-queue routing. */
ql_route_initialize(struct ql_adapter * qdev)3678 static int ql_route_initialize(struct ql_adapter *qdev)
3679 {
3680 int status = 0;
3681
3682 /* Clear all the entries in the routing table. */
3683 status = ql_clear_routing_entries(qdev);
3684 if (status)
3685 return status;
3686
3687 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3688 if (status)
3689 return status;
3690
3691 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3692 RT_IDX_IP_CSUM_ERR, 1);
3693 if (status) {
3694 netif_err(qdev, ifup, qdev->ndev,
3695 "Failed to init routing register "
3696 "for IP CSUM error packets.\n");
3697 goto exit;
3698 }
3699 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3700 RT_IDX_TU_CSUM_ERR, 1);
3701 if (status) {
3702 netif_err(qdev, ifup, qdev->ndev,
3703 "Failed to init routing register "
3704 "for TCP/UDP CSUM error packets.\n");
3705 goto exit;
3706 }
3707 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3708 if (status) {
3709 netif_err(qdev, ifup, qdev->ndev,
3710 "Failed to init routing register for broadcast packets.\n");
3711 goto exit;
3712 }
3713 /* If we have more than one inbound queue, then turn on RSS in the
3714 * routing block.
3715 */
3716 if (qdev->rss_ring_count > 1) {
3717 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3718 RT_IDX_RSS_MATCH, 1);
3719 if (status) {
3720 netif_err(qdev, ifup, qdev->ndev,
3721 "Failed to init routing register for MATCH RSS packets.\n");
3722 goto exit;
3723 }
3724 }
3725
3726 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3727 RT_IDX_CAM_HIT, 1);
3728 if (status)
3729 netif_err(qdev, ifup, qdev->ndev,
3730 "Failed to init routing register for CAM packets.\n");
3731 exit:
3732 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3733 return status;
3734 }
3735
ql_cam_route_initialize(struct ql_adapter * qdev)3736 int ql_cam_route_initialize(struct ql_adapter *qdev)
3737 {
3738 int status, set;
3739
3740 /* If check if the link is up and use to
3741 * determine if we are setting or clearing
3742 * the MAC address in the CAM.
3743 */
3744 set = ql_read32(qdev, STS);
3745 set &= qdev->port_link_up;
3746 status = ql_set_mac_addr(qdev, set);
3747 if (status) {
3748 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3749 return status;
3750 }
3751
3752 status = ql_route_initialize(qdev);
3753 if (status)
3754 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3755
3756 return status;
3757 }
3758
ql_adapter_initialize(struct ql_adapter * qdev)3759 static int ql_adapter_initialize(struct ql_adapter *qdev)
3760 {
3761 u32 value, mask;
3762 int i;
3763 int status = 0;
3764
3765 /*
3766 * Set up the System register to halt on errors.
3767 */
3768 value = SYS_EFE | SYS_FAE;
3769 mask = value << 16;
3770 ql_write32(qdev, SYS, mask | value);
3771
3772 /* Set the default queue, and VLAN behavior. */
3773 value = NIC_RCV_CFG_DFQ;
3774 mask = NIC_RCV_CFG_DFQ_MASK;
3775 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3776 value |= NIC_RCV_CFG_RV;
3777 mask |= (NIC_RCV_CFG_RV << 16);
3778 }
3779 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3780
3781 /* Set the MPI interrupt to enabled. */
3782 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3783
3784 /* Enable the function, set pagesize, enable error checking. */
3785 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3786 FSC_EC | FSC_VM_PAGE_4K;
3787 value |= SPLT_SETTING;
3788
3789 /* Set/clear header splitting. */
3790 mask = FSC_VM_PAGESIZE_MASK |
3791 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3792 ql_write32(qdev, FSC, mask | value);
3793
3794 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3795
3796 /* Set RX packet routing to use port/pci function on which the
3797 * packet arrived on in addition to usual frame routing.
3798 * This is helpful on bonding where both interfaces can have
3799 * the same MAC address.
3800 */
3801 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3802 /* Reroute all packets to our Interface.
3803 * They may have been routed to MPI firmware
3804 * due to WOL.
3805 */
3806 value = ql_read32(qdev, MGMT_RCV_CFG);
3807 value &= ~MGMT_RCV_CFG_RM;
3808 mask = 0xffff0000;
3809
3810 /* Sticky reg needs clearing due to WOL. */
3811 ql_write32(qdev, MGMT_RCV_CFG, mask);
3812 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3813
3814 /* Default WOL is enable on Mezz cards */
3815 if (qdev->pdev->subsystem_device == 0x0068 ||
3816 qdev->pdev->subsystem_device == 0x0180)
3817 qdev->wol = WAKE_MAGIC;
3818
3819 /* Start up the rx queues. */
3820 for (i = 0; i < qdev->rx_ring_count; i++) {
3821 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3822 if (status) {
3823 netif_err(qdev, ifup, qdev->ndev,
3824 "Failed to start rx ring[%d].\n", i);
3825 return status;
3826 }
3827 }
3828
3829 /* If there is more than one inbound completion queue
3830 * then download a RICB to configure RSS.
3831 */
3832 if (qdev->rss_ring_count > 1) {
3833 status = ql_start_rss(qdev);
3834 if (status) {
3835 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3836 return status;
3837 }
3838 }
3839
3840 /* Start up the tx queues. */
3841 for (i = 0; i < qdev->tx_ring_count; i++) {
3842 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3843 if (status) {
3844 netif_err(qdev, ifup, qdev->ndev,
3845 "Failed to start tx ring[%d].\n", i);
3846 return status;
3847 }
3848 }
3849
3850 /* Initialize the port and set the max framesize. */
3851 status = qdev->nic_ops->port_initialize(qdev);
3852 if (status)
3853 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3854
3855 /* Set up the MAC address and frame routing filter. */
3856 status = ql_cam_route_initialize(qdev);
3857 if (status) {
3858 netif_err(qdev, ifup, qdev->ndev,
3859 "Failed to init CAM/Routing tables.\n");
3860 return status;
3861 }
3862
3863 /* Start NAPI for the RSS queues. */
3864 for (i = 0; i < qdev->rss_ring_count; i++)
3865 napi_enable(&qdev->rx_ring[i].napi);
3866
3867 return status;
3868 }
3869
3870 /* Issue soft reset to chip. */
ql_adapter_reset(struct ql_adapter * qdev)3871 static int ql_adapter_reset(struct ql_adapter *qdev)
3872 {
3873 u32 value;
3874 int status = 0;
3875 unsigned long end_jiffies;
3876
3877 /* Clear all the entries in the routing table. */
3878 status = ql_clear_routing_entries(qdev);
3879 if (status) {
3880 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3881 return status;
3882 }
3883
3884 /* Check if bit is set then skip the mailbox command and
3885 * clear the bit, else we are in normal reset process.
3886 */
3887 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3888 /* Stop management traffic. */
3889 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3890
3891 /* Wait for the NIC and MGMNT FIFOs to empty. */
3892 ql_wait_fifo_empty(qdev);
3893 } else
3894 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3895
3896 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3897
3898 end_jiffies = jiffies + usecs_to_jiffies(30);
3899 do {
3900 value = ql_read32(qdev, RST_FO);
3901 if ((value & RST_FO_FR) == 0)
3902 break;
3903 cpu_relax();
3904 } while (time_before(jiffies, end_jiffies));
3905
3906 if (value & RST_FO_FR) {
3907 netif_err(qdev, ifdown, qdev->ndev,
3908 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3909 status = -ETIMEDOUT;
3910 }
3911
3912 /* Resume management traffic. */
3913 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3914 return status;
3915 }
3916
ql_display_dev_info(struct net_device * ndev)3917 static void ql_display_dev_info(struct net_device *ndev)
3918 {
3919 struct ql_adapter *qdev = netdev_priv(ndev);
3920
3921 netif_info(qdev, probe, qdev->ndev,
3922 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3923 "XG Roll = %d, XG Rev = %d.\n",
3924 qdev->func,
3925 qdev->port,
3926 qdev->chip_rev_id & 0x0000000f,
3927 qdev->chip_rev_id >> 4 & 0x0000000f,
3928 qdev->chip_rev_id >> 8 & 0x0000000f,
3929 qdev->chip_rev_id >> 12 & 0x0000000f);
3930 netif_info(qdev, probe, qdev->ndev,
3931 "MAC address %pM\n", ndev->dev_addr);
3932 }
3933
ql_wol(struct ql_adapter * qdev)3934 static int ql_wol(struct ql_adapter *qdev)
3935 {
3936 int status = 0;
3937 u32 wol = MB_WOL_DISABLE;
3938
3939 /* The CAM is still intact after a reset, but if we
3940 * are doing WOL, then we may need to program the
3941 * routing regs. We would also need to issue the mailbox
3942 * commands to instruct the MPI what to do per the ethtool
3943 * settings.
3944 */
3945
3946 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3947 WAKE_MCAST | WAKE_BCAST)) {
3948 netif_err(qdev, ifdown, qdev->ndev,
3949 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3950 qdev->wol);
3951 return -EINVAL;
3952 }
3953
3954 if (qdev->wol & WAKE_MAGIC) {
3955 status = ql_mb_wol_set_magic(qdev, 1);
3956 if (status) {
3957 netif_err(qdev, ifdown, qdev->ndev,
3958 "Failed to set magic packet on %s.\n",
3959 qdev->ndev->name);
3960 return status;
3961 } else
3962 netif_info(qdev, drv, qdev->ndev,
3963 "Enabled magic packet successfully on %s.\n",
3964 qdev->ndev->name);
3965
3966 wol |= MB_WOL_MAGIC_PKT;
3967 }
3968
3969 if (qdev->wol) {
3970 wol |= MB_WOL_MODE_ON;
3971 status = ql_mb_wol_mode(qdev, wol);
3972 netif_err(qdev, drv, qdev->ndev,
3973 "WOL %s (wol code 0x%x) on %s\n",
3974 (status == 0) ? "Successfully set" : "Failed",
3975 wol, qdev->ndev->name);
3976 }
3977
3978 return status;
3979 }
3980
ql_cancel_all_work_sync(struct ql_adapter * qdev)3981 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3982 {
3983
3984 /* Don't kill the reset worker thread if we
3985 * are in the process of recovery.
3986 */
3987 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3988 cancel_delayed_work_sync(&qdev->asic_reset_work);
3989 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3990 cancel_delayed_work_sync(&qdev->mpi_work);
3991 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3992 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3993 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3994 }
3995
ql_adapter_down(struct ql_adapter * qdev)3996 static int ql_adapter_down(struct ql_adapter *qdev)
3997 {
3998 int i, status = 0;
3999
4000 ql_link_off(qdev);
4001
4002 ql_cancel_all_work_sync(qdev);
4003
4004 for (i = 0; i < qdev->rss_ring_count; i++)
4005 napi_disable(&qdev->rx_ring[i].napi);
4006
4007 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4008
4009 ql_disable_interrupts(qdev);
4010
4011 ql_tx_ring_clean(qdev);
4012
4013 /* Call netif_napi_del() from common point.
4014 */
4015 for (i = 0; i < qdev->rss_ring_count; i++)
4016 netif_napi_del(&qdev->rx_ring[i].napi);
4017
4018 status = ql_adapter_reset(qdev);
4019 if (status)
4020 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
4021 qdev->func);
4022 ql_free_rx_buffers(qdev);
4023
4024 return status;
4025 }
4026
ql_adapter_up(struct ql_adapter * qdev)4027 static int ql_adapter_up(struct ql_adapter *qdev)
4028 {
4029 int err = 0;
4030
4031 err = ql_adapter_initialize(qdev);
4032 if (err) {
4033 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
4034 goto err_init;
4035 }
4036 set_bit(QL_ADAPTER_UP, &qdev->flags);
4037 ql_alloc_rx_buffers(qdev);
4038 /* If the port is initialized and the
4039 * link is up the turn on the carrier.
4040 */
4041 if ((ql_read32(qdev, STS) & qdev->port_init) &&
4042 (ql_read32(qdev, STS) & qdev->port_link_up))
4043 ql_link_on(qdev);
4044 /* Restore rx mode. */
4045 clear_bit(QL_ALLMULTI, &qdev->flags);
4046 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4047 qlge_set_multicast_list(qdev->ndev);
4048
4049 /* Restore vlan setting. */
4050 qlge_restore_vlan(qdev);
4051
4052 ql_enable_interrupts(qdev);
4053 ql_enable_all_completion_interrupts(qdev);
4054 netif_tx_start_all_queues(qdev->ndev);
4055
4056 return 0;
4057 err_init:
4058 ql_adapter_reset(qdev);
4059 return err;
4060 }
4061
ql_release_adapter_resources(struct ql_adapter * qdev)4062 static void ql_release_adapter_resources(struct ql_adapter *qdev)
4063 {
4064 ql_free_mem_resources(qdev);
4065 ql_free_irq(qdev);
4066 }
4067
ql_get_adapter_resources(struct ql_adapter * qdev)4068 static int ql_get_adapter_resources(struct ql_adapter *qdev)
4069 {
4070 int status = 0;
4071
4072 if (ql_alloc_mem_resources(qdev)) {
4073 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
4074 return -ENOMEM;
4075 }
4076 status = ql_request_irq(qdev);
4077 return status;
4078 }
4079
qlge_close(struct net_device * ndev)4080 static int qlge_close(struct net_device *ndev)
4081 {
4082 struct ql_adapter *qdev = netdev_priv(ndev);
4083
4084 /* If we hit pci_channel_io_perm_failure
4085 * failure condition, then we already
4086 * brought the adapter down.
4087 */
4088 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4089 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4090 clear_bit(QL_EEH_FATAL, &qdev->flags);
4091 return 0;
4092 }
4093
4094 /*
4095 * Wait for device to recover from a reset.
4096 * (Rarely happens, but possible.)
4097 */
4098 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4099 msleep(1);
4100 ql_adapter_down(qdev);
4101 ql_release_adapter_resources(qdev);
4102 return 0;
4103 }
4104
ql_configure_rings(struct ql_adapter * qdev)4105 static int ql_configure_rings(struct ql_adapter *qdev)
4106 {
4107 int i;
4108 struct rx_ring *rx_ring;
4109 struct tx_ring *tx_ring;
4110 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4111 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4112 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4113
4114 qdev->lbq_buf_order = get_order(lbq_buf_len);
4115
4116 /* In a perfect world we have one RSS ring for each CPU
4117 * and each has it's own vector. To do that we ask for
4118 * cpu_cnt vectors. ql_enable_msix() will adjust the
4119 * vector count to what we actually get. We then
4120 * allocate an RSS ring for each.
4121 * Essentially, we are doing min(cpu_count, msix_vector_count).
4122 */
4123 qdev->intr_count = cpu_cnt;
4124 ql_enable_msix(qdev);
4125 /* Adjust the RSS ring count to the actual vector count. */
4126 qdev->rss_ring_count = qdev->intr_count;
4127 qdev->tx_ring_count = cpu_cnt;
4128 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4129
4130 for (i = 0; i < qdev->tx_ring_count; i++) {
4131 tx_ring = &qdev->tx_ring[i];
4132 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4133 tx_ring->qdev = qdev;
4134 tx_ring->wq_id = i;
4135 tx_ring->wq_len = qdev->tx_ring_size;
4136 tx_ring->wq_size =
4137 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4138
4139 /*
4140 * The completion queue ID for the tx rings start
4141 * immediately after the rss rings.
4142 */
4143 tx_ring->cq_id = qdev->rss_ring_count + i;
4144 }
4145
4146 for (i = 0; i < qdev->rx_ring_count; i++) {
4147 rx_ring = &qdev->rx_ring[i];
4148 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4149 rx_ring->qdev = qdev;
4150 rx_ring->cq_id = i;
4151 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
4152 if (i < qdev->rss_ring_count) {
4153 /*
4154 * Inbound (RSS) queues.
4155 */
4156 rx_ring->cq_len = qdev->rx_ring_size;
4157 rx_ring->cq_size =
4158 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4159 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4160 rx_ring->lbq_size =
4161 rx_ring->lbq_len * sizeof(__le64);
4162 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4163 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4164 rx_ring->sbq_size =
4165 rx_ring->sbq_len * sizeof(__le64);
4166 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4167 rx_ring->type = RX_Q;
4168 } else {
4169 /*
4170 * Outbound queue handles outbound completions only.
4171 */
4172 /* outbound cq is same size as tx_ring it services. */
4173 rx_ring->cq_len = qdev->tx_ring_size;
4174 rx_ring->cq_size =
4175 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4176 rx_ring->lbq_len = 0;
4177 rx_ring->lbq_size = 0;
4178 rx_ring->lbq_buf_size = 0;
4179 rx_ring->sbq_len = 0;
4180 rx_ring->sbq_size = 0;
4181 rx_ring->sbq_buf_size = 0;
4182 rx_ring->type = TX_Q;
4183 }
4184 }
4185 return 0;
4186 }
4187
qlge_open(struct net_device * ndev)4188 static int qlge_open(struct net_device *ndev)
4189 {
4190 int err = 0;
4191 struct ql_adapter *qdev = netdev_priv(ndev);
4192
4193 err = ql_adapter_reset(qdev);
4194 if (err)
4195 return err;
4196
4197 err = ql_configure_rings(qdev);
4198 if (err)
4199 return err;
4200
4201 err = ql_get_adapter_resources(qdev);
4202 if (err)
4203 goto error_up;
4204
4205 err = ql_adapter_up(qdev);
4206 if (err)
4207 goto error_up;
4208
4209 return err;
4210
4211 error_up:
4212 ql_release_adapter_resources(qdev);
4213 return err;
4214 }
4215
ql_change_rx_buffers(struct ql_adapter * qdev)4216 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4217 {
4218 struct rx_ring *rx_ring;
4219 int i, status;
4220 u32 lbq_buf_len;
4221
4222 /* Wait for an outstanding reset to complete. */
4223 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4224 int i = 4;
4225
4226 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4227 netif_err(qdev, ifup, qdev->ndev,
4228 "Waiting for adapter UP...\n");
4229 ssleep(1);
4230 }
4231
4232 if (!i) {
4233 netif_err(qdev, ifup, qdev->ndev,
4234 "Timed out waiting for adapter UP\n");
4235 return -ETIMEDOUT;
4236 }
4237 }
4238
4239 status = ql_adapter_down(qdev);
4240 if (status)
4241 goto error;
4242
4243 /* Get the new rx buffer size. */
4244 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4245 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4246 qdev->lbq_buf_order = get_order(lbq_buf_len);
4247
4248 for (i = 0; i < qdev->rss_ring_count; i++) {
4249 rx_ring = &qdev->rx_ring[i];
4250 /* Set the new size. */
4251 rx_ring->lbq_buf_size = lbq_buf_len;
4252 }
4253
4254 status = ql_adapter_up(qdev);
4255 if (status)
4256 goto error;
4257
4258 return status;
4259 error:
4260 netif_alert(qdev, ifup, qdev->ndev,
4261 "Driver up/down cycle failed, closing device.\n");
4262 set_bit(QL_ADAPTER_UP, &qdev->flags);
4263 dev_close(qdev->ndev);
4264 return status;
4265 }
4266
qlge_change_mtu(struct net_device * ndev,int new_mtu)4267 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4268 {
4269 struct ql_adapter *qdev = netdev_priv(ndev);
4270 int status;
4271
4272 if (ndev->mtu == 1500 && new_mtu == 9000) {
4273 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4274 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4275 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4276 } else
4277 return -EINVAL;
4278
4279 queue_delayed_work(qdev->workqueue,
4280 &qdev->mpi_port_cfg_work, 3*HZ);
4281
4282 ndev->mtu = new_mtu;
4283
4284 if (!netif_running(qdev->ndev)) {
4285 return 0;
4286 }
4287
4288 status = ql_change_rx_buffers(qdev);
4289 if (status) {
4290 netif_err(qdev, ifup, qdev->ndev,
4291 "Changing MTU failed.\n");
4292 }
4293
4294 return status;
4295 }
4296
qlge_get_stats(struct net_device * ndev)4297 static struct net_device_stats *qlge_get_stats(struct net_device
4298 *ndev)
4299 {
4300 struct ql_adapter *qdev = netdev_priv(ndev);
4301 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4302 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4303 unsigned long pkts, mcast, dropped, errors, bytes;
4304 int i;
4305
4306 /* Get RX stats. */
4307 pkts = mcast = dropped = errors = bytes = 0;
4308 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4309 pkts += rx_ring->rx_packets;
4310 bytes += rx_ring->rx_bytes;
4311 dropped += rx_ring->rx_dropped;
4312 errors += rx_ring->rx_errors;
4313 mcast += rx_ring->rx_multicast;
4314 }
4315 ndev->stats.rx_packets = pkts;
4316 ndev->stats.rx_bytes = bytes;
4317 ndev->stats.rx_dropped = dropped;
4318 ndev->stats.rx_errors = errors;
4319 ndev->stats.multicast = mcast;
4320
4321 /* Get TX stats. */
4322 pkts = errors = bytes = 0;
4323 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4324 pkts += tx_ring->tx_packets;
4325 bytes += tx_ring->tx_bytes;
4326 errors += tx_ring->tx_errors;
4327 }
4328 ndev->stats.tx_packets = pkts;
4329 ndev->stats.tx_bytes = bytes;
4330 ndev->stats.tx_errors = errors;
4331 return &ndev->stats;
4332 }
4333
qlge_set_multicast_list(struct net_device * ndev)4334 static void qlge_set_multicast_list(struct net_device *ndev)
4335 {
4336 struct ql_adapter *qdev = netdev_priv(ndev);
4337 struct netdev_hw_addr *ha;
4338 int i, status;
4339
4340 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4341 if (status)
4342 return;
4343 /*
4344 * Set or clear promiscuous mode if a
4345 * transition is taking place.
4346 */
4347 if (ndev->flags & IFF_PROMISC) {
4348 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4349 if (ql_set_routing_reg
4350 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4351 netif_err(qdev, hw, qdev->ndev,
4352 "Failed to set promiscuous mode.\n");
4353 } else {
4354 set_bit(QL_PROMISCUOUS, &qdev->flags);
4355 }
4356 }
4357 } else {
4358 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4359 if (ql_set_routing_reg
4360 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4361 netif_err(qdev, hw, qdev->ndev,
4362 "Failed to clear promiscuous mode.\n");
4363 } else {
4364 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4365 }
4366 }
4367 }
4368
4369 /*
4370 * Set or clear all multicast mode if a
4371 * transition is taking place.
4372 */
4373 if ((ndev->flags & IFF_ALLMULTI) ||
4374 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4375 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4376 if (ql_set_routing_reg
4377 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4378 netif_err(qdev, hw, qdev->ndev,
4379 "Failed to set all-multi mode.\n");
4380 } else {
4381 set_bit(QL_ALLMULTI, &qdev->flags);
4382 }
4383 }
4384 } else {
4385 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4386 if (ql_set_routing_reg
4387 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4388 netif_err(qdev, hw, qdev->ndev,
4389 "Failed to clear all-multi mode.\n");
4390 } else {
4391 clear_bit(QL_ALLMULTI, &qdev->flags);
4392 }
4393 }
4394 }
4395
4396 if (!netdev_mc_empty(ndev)) {
4397 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4398 if (status)
4399 goto exit;
4400 i = 0;
4401 netdev_for_each_mc_addr(ha, ndev) {
4402 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4403 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4404 netif_err(qdev, hw, qdev->ndev,
4405 "Failed to loadmulticast address.\n");
4406 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4407 goto exit;
4408 }
4409 i++;
4410 }
4411 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4412 if (ql_set_routing_reg
4413 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4414 netif_err(qdev, hw, qdev->ndev,
4415 "Failed to set multicast match mode.\n");
4416 } else {
4417 set_bit(QL_ALLMULTI, &qdev->flags);
4418 }
4419 }
4420 exit:
4421 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4422 }
4423
qlge_set_mac_address(struct net_device * ndev,void * p)4424 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4425 {
4426 struct ql_adapter *qdev = netdev_priv(ndev);
4427 struct sockaddr *addr = p;
4428 int status;
4429
4430 if (!is_valid_ether_addr(addr->sa_data))
4431 return -EADDRNOTAVAIL;
4432 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4433 /* Update local copy of current mac address. */
4434 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4435
4436 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4437 if (status)
4438 return status;
4439 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4440 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4441 if (status)
4442 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4443 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4444 return status;
4445 }
4446
qlge_tx_timeout(struct net_device * ndev)4447 static void qlge_tx_timeout(struct net_device *ndev)
4448 {
4449 struct ql_adapter *qdev = netdev_priv(ndev);
4450 ql_queue_asic_error(qdev);
4451 }
4452
ql_asic_reset_work(struct work_struct * work)4453 static void ql_asic_reset_work(struct work_struct *work)
4454 {
4455 struct ql_adapter *qdev =
4456 container_of(work, struct ql_adapter, asic_reset_work.work);
4457 int status;
4458 rtnl_lock();
4459 status = ql_adapter_down(qdev);
4460 if (status)
4461 goto error;
4462
4463 status = ql_adapter_up(qdev);
4464 if (status)
4465 goto error;
4466
4467 /* Restore rx mode. */
4468 clear_bit(QL_ALLMULTI, &qdev->flags);
4469 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4470 qlge_set_multicast_list(qdev->ndev);
4471
4472 rtnl_unlock();
4473 return;
4474 error:
4475 netif_alert(qdev, ifup, qdev->ndev,
4476 "Driver up/down cycle failed, closing device\n");
4477
4478 set_bit(QL_ADAPTER_UP, &qdev->flags);
4479 dev_close(qdev->ndev);
4480 rtnl_unlock();
4481 }
4482
4483 static const struct nic_operations qla8012_nic_ops = {
4484 .get_flash = ql_get_8012_flash_params,
4485 .port_initialize = ql_8012_port_initialize,
4486 };
4487
4488 static const struct nic_operations qla8000_nic_ops = {
4489 .get_flash = ql_get_8000_flash_params,
4490 .port_initialize = ql_8000_port_initialize,
4491 };
4492
4493 /* Find the pcie function number for the other NIC
4494 * on this chip. Since both NIC functions share a
4495 * common firmware we have the lowest enabled function
4496 * do any common work. Examples would be resetting
4497 * after a fatal firmware error, or doing a firmware
4498 * coredump.
4499 */
ql_get_alt_pcie_func(struct ql_adapter * qdev)4500 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4501 {
4502 int status = 0;
4503 u32 temp;
4504 u32 nic_func1, nic_func2;
4505
4506 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4507 &temp);
4508 if (status)
4509 return status;
4510
4511 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4512 MPI_TEST_NIC_FUNC_MASK);
4513 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4514 MPI_TEST_NIC_FUNC_MASK);
4515
4516 if (qdev->func == nic_func1)
4517 qdev->alt_func = nic_func2;
4518 else if (qdev->func == nic_func2)
4519 qdev->alt_func = nic_func1;
4520 else
4521 status = -EIO;
4522
4523 return status;
4524 }
4525
ql_get_board_info(struct ql_adapter * qdev)4526 static int ql_get_board_info(struct ql_adapter *qdev)
4527 {
4528 int status;
4529 qdev->func =
4530 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4531 if (qdev->func > 3)
4532 return -EIO;
4533
4534 status = ql_get_alt_pcie_func(qdev);
4535 if (status)
4536 return status;
4537
4538 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4539 if (qdev->port) {
4540 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4541 qdev->port_link_up = STS_PL1;
4542 qdev->port_init = STS_PI1;
4543 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4544 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4545 } else {
4546 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4547 qdev->port_link_up = STS_PL0;
4548 qdev->port_init = STS_PI0;
4549 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4550 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4551 }
4552 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4553 qdev->device_id = qdev->pdev->device;
4554 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4555 qdev->nic_ops = &qla8012_nic_ops;
4556 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4557 qdev->nic_ops = &qla8000_nic_ops;
4558 return status;
4559 }
4560
ql_release_all(struct pci_dev * pdev)4561 static void ql_release_all(struct pci_dev *pdev)
4562 {
4563 struct net_device *ndev = pci_get_drvdata(pdev);
4564 struct ql_adapter *qdev = netdev_priv(ndev);
4565
4566 if (qdev->workqueue) {
4567 destroy_workqueue(qdev->workqueue);
4568 qdev->workqueue = NULL;
4569 }
4570
4571 if (qdev->reg_base)
4572 iounmap(qdev->reg_base);
4573 if (qdev->doorbell_area)
4574 iounmap(qdev->doorbell_area);
4575 vfree(qdev->mpi_coredump);
4576 pci_release_regions(pdev);
4577 }
4578
ql_init_device(struct pci_dev * pdev,struct net_device * ndev,int cards_found)4579 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4580 int cards_found)
4581 {
4582 struct ql_adapter *qdev = netdev_priv(ndev);
4583 int err = 0;
4584
4585 memset((void *)qdev, 0, sizeof(*qdev));
4586 err = pci_enable_device(pdev);
4587 if (err) {
4588 dev_err(&pdev->dev, "PCI device enable failed.\n");
4589 return err;
4590 }
4591
4592 qdev->ndev = ndev;
4593 qdev->pdev = pdev;
4594 pci_set_drvdata(pdev, ndev);
4595
4596 /* Set PCIe read request size */
4597 err = pcie_set_readrq(pdev, 4096);
4598 if (err) {
4599 dev_err(&pdev->dev, "Set readrq failed.\n");
4600 goto err_out1;
4601 }
4602
4603 err = pci_request_regions(pdev, DRV_NAME);
4604 if (err) {
4605 dev_err(&pdev->dev, "PCI region request failed.\n");
4606 return err;
4607 }
4608
4609 pci_set_master(pdev);
4610 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4611 set_bit(QL_DMA64, &qdev->flags);
4612 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4613 } else {
4614 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4615 if (!err)
4616 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4617 }
4618
4619 if (err) {
4620 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4621 goto err_out2;
4622 }
4623
4624 /* Set PCIe reset type for EEH to fundamental. */
4625 pdev->needs_freset = 1;
4626 pci_save_state(pdev);
4627 qdev->reg_base =
4628 ioremap_nocache(pci_resource_start(pdev, 1),
4629 pci_resource_len(pdev, 1));
4630 if (!qdev->reg_base) {
4631 dev_err(&pdev->dev, "Register mapping failed.\n");
4632 err = -ENOMEM;
4633 goto err_out2;
4634 }
4635
4636 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4637 qdev->doorbell_area =
4638 ioremap_nocache(pci_resource_start(pdev, 3),
4639 pci_resource_len(pdev, 3));
4640 if (!qdev->doorbell_area) {
4641 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4642 err = -ENOMEM;
4643 goto err_out2;
4644 }
4645
4646 err = ql_get_board_info(qdev);
4647 if (err) {
4648 dev_err(&pdev->dev, "Register access failed.\n");
4649 err = -EIO;
4650 goto err_out2;
4651 }
4652 qdev->msg_enable = netif_msg_init(debug, default_msg);
4653 spin_lock_init(&qdev->hw_lock);
4654 spin_lock_init(&qdev->stats_lock);
4655
4656 if (qlge_mpi_coredump) {
4657 qdev->mpi_coredump =
4658 vmalloc(sizeof(struct ql_mpi_coredump));
4659 if (qdev->mpi_coredump == NULL) {
4660 err = -ENOMEM;
4661 goto err_out2;
4662 }
4663 if (qlge_force_coredump)
4664 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4665 }
4666 /* make sure the EEPROM is good */
4667 err = qdev->nic_ops->get_flash(qdev);
4668 if (err) {
4669 dev_err(&pdev->dev, "Invalid FLASH.\n");
4670 goto err_out2;
4671 }
4672
4673 /* Keep local copy of current mac address. */
4674 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4675
4676 /* Set up the default ring sizes. */
4677 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4678 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4679
4680 /* Set up the coalescing parameters. */
4681 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4682 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4683 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4684 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4685
4686 /*
4687 * Set up the operating parameters.
4688 */
4689 qdev->workqueue = alloc_ordered_workqueue(ndev->name, WQ_MEM_RECLAIM);
4690 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4691 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4692 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4693 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4694 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4695 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4696 init_completion(&qdev->ide_completion);
4697 mutex_init(&qdev->mpi_mutex);
4698
4699 if (!cards_found) {
4700 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4701 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4702 DRV_NAME, DRV_VERSION);
4703 }
4704 return 0;
4705 err_out2:
4706 ql_release_all(pdev);
4707 err_out1:
4708 pci_disable_device(pdev);
4709 return err;
4710 }
4711
4712 static const struct net_device_ops qlge_netdev_ops = {
4713 .ndo_open = qlge_open,
4714 .ndo_stop = qlge_close,
4715 .ndo_start_xmit = qlge_send,
4716 .ndo_change_mtu = qlge_change_mtu,
4717 .ndo_get_stats = qlge_get_stats,
4718 .ndo_set_rx_mode = qlge_set_multicast_list,
4719 .ndo_set_mac_address = qlge_set_mac_address,
4720 .ndo_validate_addr = eth_validate_addr,
4721 .ndo_tx_timeout = qlge_tx_timeout,
4722 .ndo_fix_features = qlge_fix_features,
4723 .ndo_set_features = qlge_set_features,
4724 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4725 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4726 };
4727
ql_timer(unsigned long data)4728 static void ql_timer(unsigned long data)
4729 {
4730 struct ql_adapter *qdev = (struct ql_adapter *)data;
4731 u32 var = 0;
4732
4733 var = ql_read32(qdev, STS);
4734 if (pci_channel_offline(qdev->pdev)) {
4735 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4736 return;
4737 }
4738
4739 mod_timer(&qdev->timer, jiffies + (5*HZ));
4740 }
4741
qlge_probe(struct pci_dev * pdev,const struct pci_device_id * pci_entry)4742 static int qlge_probe(struct pci_dev *pdev,
4743 const struct pci_device_id *pci_entry)
4744 {
4745 struct net_device *ndev = NULL;
4746 struct ql_adapter *qdev = NULL;
4747 static int cards_found = 0;
4748 int err = 0;
4749
4750 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4751 min(MAX_CPUS, netif_get_num_default_rss_queues()));
4752 if (!ndev)
4753 return -ENOMEM;
4754
4755 err = ql_init_device(pdev, ndev, cards_found);
4756 if (err < 0) {
4757 free_netdev(ndev);
4758 return err;
4759 }
4760
4761 qdev = netdev_priv(ndev);
4762 SET_NETDEV_DEV(ndev, &pdev->dev);
4763 ndev->hw_features = NETIF_F_SG |
4764 NETIF_F_IP_CSUM |
4765 NETIF_F_TSO |
4766 NETIF_F_TSO_ECN |
4767 NETIF_F_HW_VLAN_CTAG_TX |
4768 NETIF_F_HW_VLAN_CTAG_RX |
4769 NETIF_F_HW_VLAN_CTAG_FILTER |
4770 NETIF_F_RXCSUM;
4771 ndev->features = ndev->hw_features;
4772 ndev->vlan_features = ndev->hw_features;
4773 /* vlan gets same features (except vlan filter) */
4774 ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4775 NETIF_F_HW_VLAN_CTAG_TX |
4776 NETIF_F_HW_VLAN_CTAG_RX);
4777
4778 if (test_bit(QL_DMA64, &qdev->flags))
4779 ndev->features |= NETIF_F_HIGHDMA;
4780
4781 /*
4782 * Set up net_device structure.
4783 */
4784 ndev->tx_queue_len = qdev->tx_ring_size;
4785 ndev->irq = pdev->irq;
4786
4787 ndev->netdev_ops = &qlge_netdev_ops;
4788 ndev->ethtool_ops = &qlge_ethtool_ops;
4789 ndev->watchdog_timeo = 10 * HZ;
4790
4791 err = register_netdev(ndev);
4792 if (err) {
4793 dev_err(&pdev->dev, "net device registration failed.\n");
4794 ql_release_all(pdev);
4795 pci_disable_device(pdev);
4796 free_netdev(ndev);
4797 return err;
4798 }
4799 /* Start up the timer to trigger EEH if
4800 * the bus goes dead
4801 */
4802 init_timer_deferrable(&qdev->timer);
4803 qdev->timer.data = (unsigned long)qdev;
4804 qdev->timer.function = ql_timer;
4805 qdev->timer.expires = jiffies + (5*HZ);
4806 add_timer(&qdev->timer);
4807 ql_link_off(qdev);
4808 ql_display_dev_info(ndev);
4809 atomic_set(&qdev->lb_count, 0);
4810 cards_found++;
4811 return 0;
4812 }
4813
ql_lb_send(struct sk_buff * skb,struct net_device * ndev)4814 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4815 {
4816 return qlge_send(skb, ndev);
4817 }
4818
ql_clean_lb_rx_ring(struct rx_ring * rx_ring,int budget)4819 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4820 {
4821 return ql_clean_inbound_rx_ring(rx_ring, budget);
4822 }
4823
qlge_remove(struct pci_dev * pdev)4824 static void qlge_remove(struct pci_dev *pdev)
4825 {
4826 struct net_device *ndev = pci_get_drvdata(pdev);
4827 struct ql_adapter *qdev = netdev_priv(ndev);
4828 del_timer_sync(&qdev->timer);
4829 ql_cancel_all_work_sync(qdev);
4830 unregister_netdev(ndev);
4831 ql_release_all(pdev);
4832 pci_disable_device(pdev);
4833 free_netdev(ndev);
4834 }
4835
4836 /* Clean up resources without touching hardware. */
ql_eeh_close(struct net_device * ndev)4837 static void ql_eeh_close(struct net_device *ndev)
4838 {
4839 int i;
4840 struct ql_adapter *qdev = netdev_priv(ndev);
4841
4842 if (netif_carrier_ok(ndev)) {
4843 netif_carrier_off(ndev);
4844 netif_stop_queue(ndev);
4845 }
4846
4847 /* Disabling the timer */
4848 ql_cancel_all_work_sync(qdev);
4849
4850 for (i = 0; i < qdev->rss_ring_count; i++)
4851 netif_napi_del(&qdev->rx_ring[i].napi);
4852
4853 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4854 ql_tx_ring_clean(qdev);
4855 ql_free_rx_buffers(qdev);
4856 ql_release_adapter_resources(qdev);
4857 }
4858
4859 /*
4860 * This callback is called by the PCI subsystem whenever
4861 * a PCI bus error is detected.
4862 */
qlge_io_error_detected(struct pci_dev * pdev,enum pci_channel_state state)4863 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4864 enum pci_channel_state state)
4865 {
4866 struct net_device *ndev = pci_get_drvdata(pdev);
4867 struct ql_adapter *qdev = netdev_priv(ndev);
4868
4869 switch (state) {
4870 case pci_channel_io_normal:
4871 return PCI_ERS_RESULT_CAN_RECOVER;
4872 case pci_channel_io_frozen:
4873 netif_device_detach(ndev);
4874 del_timer_sync(&qdev->timer);
4875 if (netif_running(ndev))
4876 ql_eeh_close(ndev);
4877 pci_disable_device(pdev);
4878 return PCI_ERS_RESULT_NEED_RESET;
4879 case pci_channel_io_perm_failure:
4880 dev_err(&pdev->dev,
4881 "%s: pci_channel_io_perm_failure.\n", __func__);
4882 del_timer_sync(&qdev->timer);
4883 ql_eeh_close(ndev);
4884 set_bit(QL_EEH_FATAL, &qdev->flags);
4885 return PCI_ERS_RESULT_DISCONNECT;
4886 }
4887
4888 /* Request a slot reset. */
4889 return PCI_ERS_RESULT_NEED_RESET;
4890 }
4891
4892 /*
4893 * This callback is called after the PCI buss has been reset.
4894 * Basically, this tries to restart the card from scratch.
4895 * This is a shortened version of the device probe/discovery code,
4896 * it resembles the first-half of the () routine.
4897 */
qlge_io_slot_reset(struct pci_dev * pdev)4898 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4899 {
4900 struct net_device *ndev = pci_get_drvdata(pdev);
4901 struct ql_adapter *qdev = netdev_priv(ndev);
4902
4903 pdev->error_state = pci_channel_io_normal;
4904
4905 pci_restore_state(pdev);
4906 if (pci_enable_device(pdev)) {
4907 netif_err(qdev, ifup, qdev->ndev,
4908 "Cannot re-enable PCI device after reset.\n");
4909 return PCI_ERS_RESULT_DISCONNECT;
4910 }
4911 pci_set_master(pdev);
4912
4913 if (ql_adapter_reset(qdev)) {
4914 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4915 set_bit(QL_EEH_FATAL, &qdev->flags);
4916 return PCI_ERS_RESULT_DISCONNECT;
4917 }
4918
4919 return PCI_ERS_RESULT_RECOVERED;
4920 }
4921
qlge_io_resume(struct pci_dev * pdev)4922 static void qlge_io_resume(struct pci_dev *pdev)
4923 {
4924 struct net_device *ndev = pci_get_drvdata(pdev);
4925 struct ql_adapter *qdev = netdev_priv(ndev);
4926 int err = 0;
4927
4928 if (netif_running(ndev)) {
4929 err = qlge_open(ndev);
4930 if (err) {
4931 netif_err(qdev, ifup, qdev->ndev,
4932 "Device initialization failed after reset.\n");
4933 return;
4934 }
4935 } else {
4936 netif_err(qdev, ifup, qdev->ndev,
4937 "Device was not running prior to EEH.\n");
4938 }
4939 mod_timer(&qdev->timer, jiffies + (5*HZ));
4940 netif_device_attach(ndev);
4941 }
4942
4943 static const struct pci_error_handlers qlge_err_handler = {
4944 .error_detected = qlge_io_error_detected,
4945 .slot_reset = qlge_io_slot_reset,
4946 .resume = qlge_io_resume,
4947 };
4948
qlge_suspend(struct pci_dev * pdev,pm_message_t state)4949 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4950 {
4951 struct net_device *ndev = pci_get_drvdata(pdev);
4952 struct ql_adapter *qdev = netdev_priv(ndev);
4953 int err;
4954
4955 netif_device_detach(ndev);
4956 del_timer_sync(&qdev->timer);
4957
4958 if (netif_running(ndev)) {
4959 err = ql_adapter_down(qdev);
4960 if (!err)
4961 return err;
4962 }
4963
4964 ql_wol(qdev);
4965 err = pci_save_state(pdev);
4966 if (err)
4967 return err;
4968
4969 pci_disable_device(pdev);
4970
4971 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4972
4973 return 0;
4974 }
4975
4976 #ifdef CONFIG_PM
qlge_resume(struct pci_dev * pdev)4977 static int qlge_resume(struct pci_dev *pdev)
4978 {
4979 struct net_device *ndev = pci_get_drvdata(pdev);
4980 struct ql_adapter *qdev = netdev_priv(ndev);
4981 int err;
4982
4983 pci_set_power_state(pdev, PCI_D0);
4984 pci_restore_state(pdev);
4985 err = pci_enable_device(pdev);
4986 if (err) {
4987 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4988 return err;
4989 }
4990 pci_set_master(pdev);
4991
4992 pci_enable_wake(pdev, PCI_D3hot, 0);
4993 pci_enable_wake(pdev, PCI_D3cold, 0);
4994
4995 if (netif_running(ndev)) {
4996 err = ql_adapter_up(qdev);
4997 if (err)
4998 return err;
4999 }
5000
5001 mod_timer(&qdev->timer, jiffies + (5*HZ));
5002 netif_device_attach(ndev);
5003
5004 return 0;
5005 }
5006 #endif /* CONFIG_PM */
5007
qlge_shutdown(struct pci_dev * pdev)5008 static void qlge_shutdown(struct pci_dev *pdev)
5009 {
5010 qlge_suspend(pdev, PMSG_SUSPEND);
5011 }
5012
5013 static struct pci_driver qlge_driver = {
5014 .name = DRV_NAME,
5015 .id_table = qlge_pci_tbl,
5016 .probe = qlge_probe,
5017 .remove = qlge_remove,
5018 #ifdef CONFIG_PM
5019 .suspend = qlge_suspend,
5020 .resume = qlge_resume,
5021 #endif
5022 .shutdown = qlge_shutdown,
5023 .err_handler = &qlge_err_handler
5024 };
5025
5026 module_pci_driver(qlge_driver);
5027