1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015 Cavium, Inc.
4 */
5
6 #include <linux/module.h>
7 #include <linux/interrupt.h>
8 #include <linux/pci.h>
9 #include <linux/netdevice.h>
10 #include <linux/if_vlan.h>
11 #include <linux/etherdevice.h>
12 #include <linux/ethtool.h>
13 #include <linux/log2.h>
14 #include <linux/prefetch.h>
15 #include <linux/irq.h>
16 #include <linux/iommu.h>
17 #include <linux/bpf.h>
18 #include <linux/bpf_trace.h>
19 #include <linux/filter.h>
20 #include <linux/net_tstamp.h>
21 #include <linux/workqueue.h>
22
23 #include "nic_reg.h"
24 #include "nic.h"
25 #include "nicvf_queues.h"
26 #include "thunder_bgx.h"
27 #include "../common/cavium_ptp.h"
28
29 #define DRV_NAME "nicvf"
30 #define DRV_VERSION "1.0"
31
32 /* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs
33 * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed
34 * this value, keeping headroom for the 14 byte Ethernet header and two
35 * VLAN tags (for QinQ)
36 */
37 #define MAX_XDP_MTU (1530 - ETH_HLEN - VLAN_HLEN * 2)
38
39 /* Supported devices */
40 static const struct pci_device_id nicvf_id_table[] = {
41 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
42 PCI_DEVICE_ID_THUNDER_NIC_VF,
43 PCI_VENDOR_ID_CAVIUM,
44 PCI_SUBSYS_DEVID_88XX_NIC_VF) },
45 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
46 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
47 PCI_VENDOR_ID_CAVIUM,
48 PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF) },
49 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
50 PCI_DEVICE_ID_THUNDER_NIC_VF,
51 PCI_VENDOR_ID_CAVIUM,
52 PCI_SUBSYS_DEVID_81XX_NIC_VF) },
53 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
54 PCI_DEVICE_ID_THUNDER_NIC_VF,
55 PCI_VENDOR_ID_CAVIUM,
56 PCI_SUBSYS_DEVID_83XX_NIC_VF) },
57 { 0, } /* end of table */
58 };
59
60 MODULE_AUTHOR("Sunil Goutham");
61 MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
62 MODULE_LICENSE("GPL v2");
63 MODULE_VERSION(DRV_VERSION);
64 MODULE_DEVICE_TABLE(pci, nicvf_id_table);
65
66 static int debug = 0x00;
67 module_param(debug, int, 0644);
68 MODULE_PARM_DESC(debug, "Debug message level bitmap");
69
70 static int cpi_alg = CPI_ALG_NONE;
71 module_param(cpi_alg, int, 0444);
72 MODULE_PARM_DESC(cpi_alg,
73 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
74
nicvf_netdev_qidx(struct nicvf * nic,u8 qidx)75 static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
76 {
77 if (nic->sqs_mode)
78 return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
79 else
80 return qidx;
81 }
82
83 /* The Cavium ThunderX network controller can *only* be found in SoCs
84 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
85 * registers on this platform are implicitly strongly ordered with respect
86 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
87 * with no memory barriers in this driver. The readq()/writeq() functions add
88 * explicit ordering operation which in this case are redundant, and only
89 * add overhead.
90 */
91
92 /* Register read/write APIs */
nicvf_reg_write(struct nicvf * nic,u64 offset,u64 val)93 void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
94 {
95 writeq_relaxed(val, nic->reg_base + offset);
96 }
97
nicvf_reg_read(struct nicvf * nic,u64 offset)98 u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
99 {
100 return readq_relaxed(nic->reg_base + offset);
101 }
102
nicvf_queue_reg_write(struct nicvf * nic,u64 offset,u64 qidx,u64 val)103 void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
104 u64 qidx, u64 val)
105 {
106 void __iomem *addr = nic->reg_base + offset;
107
108 writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT));
109 }
110
nicvf_queue_reg_read(struct nicvf * nic,u64 offset,u64 qidx)111 u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
112 {
113 void __iomem *addr = nic->reg_base + offset;
114
115 return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT));
116 }
117
118 /* VF -> PF mailbox communication */
nicvf_write_to_mbx(struct nicvf * nic,union nic_mbx * mbx)119 static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
120 {
121 u64 *msg = (u64 *)mbx;
122
123 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
124 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
125 }
126
nicvf_send_msg_to_pf(struct nicvf * nic,union nic_mbx * mbx)127 int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
128 {
129 unsigned long timeout;
130 int ret = 0;
131
132 mutex_lock(&nic->rx_mode_mtx);
133
134 nic->pf_acked = false;
135 nic->pf_nacked = false;
136
137 nicvf_write_to_mbx(nic, mbx);
138
139 timeout = jiffies + msecs_to_jiffies(NIC_MBOX_MSG_TIMEOUT);
140 /* Wait for previous message to be acked, timeout 2sec */
141 while (!nic->pf_acked) {
142 if (nic->pf_nacked) {
143 netdev_err(nic->netdev,
144 "PF NACK to mbox msg 0x%02x from VF%d\n",
145 (mbx->msg.msg & 0xFF), nic->vf_id);
146 ret = -EINVAL;
147 break;
148 }
149 usleep_range(8000, 10000);
150 if (nic->pf_acked)
151 break;
152 if (time_after(jiffies, timeout)) {
153 netdev_err(nic->netdev,
154 "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
155 (mbx->msg.msg & 0xFF), nic->vf_id);
156 ret = -EBUSY;
157 break;
158 }
159 }
160 mutex_unlock(&nic->rx_mode_mtx);
161 return ret;
162 }
163
164 /* Checks if VF is able to comminicate with PF
165 * and also gets the VNIC number this VF is associated to.
166 */
nicvf_check_pf_ready(struct nicvf * nic)167 static int nicvf_check_pf_ready(struct nicvf *nic)
168 {
169 union nic_mbx mbx = {};
170
171 mbx.msg.msg = NIC_MBOX_MSG_READY;
172 if (nicvf_send_msg_to_pf(nic, &mbx)) {
173 netdev_err(nic->netdev,
174 "PF didn't respond to READY msg\n");
175 return 0;
176 }
177
178 return 1;
179 }
180
nicvf_send_cfg_done(struct nicvf * nic)181 static void nicvf_send_cfg_done(struct nicvf *nic)
182 {
183 union nic_mbx mbx = {};
184
185 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
186 if (nicvf_send_msg_to_pf(nic, &mbx)) {
187 netdev_err(nic->netdev,
188 "PF didn't respond to CFG DONE msg\n");
189 }
190 }
191
nicvf_read_bgx_stats(struct nicvf * nic,struct bgx_stats_msg * bgx)192 static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
193 {
194 if (bgx->rx)
195 nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
196 else
197 nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
198 }
199
nicvf_handle_mbx_intr(struct nicvf * nic)200 static void nicvf_handle_mbx_intr(struct nicvf *nic)
201 {
202 union nic_mbx mbx = {};
203 u64 *mbx_data;
204 u64 mbx_addr;
205 int i;
206
207 mbx_addr = NIC_VF_PF_MAILBOX_0_1;
208 mbx_data = (u64 *)&mbx;
209
210 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
211 *mbx_data = nicvf_reg_read(nic, mbx_addr);
212 mbx_data++;
213 mbx_addr += sizeof(u64);
214 }
215
216 netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
217 switch (mbx.msg.msg) {
218 case NIC_MBOX_MSG_READY:
219 nic->pf_acked = true;
220 nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
221 nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
222 nic->node = mbx.nic_cfg.node_id;
223 if (!nic->set_mac_pending)
224 ether_addr_copy(nic->netdev->dev_addr,
225 mbx.nic_cfg.mac_addr);
226 nic->sqs_mode = mbx.nic_cfg.sqs_mode;
227 nic->loopback_supported = mbx.nic_cfg.loopback_supported;
228 nic->link_up = false;
229 nic->duplex = 0;
230 nic->speed = 0;
231 break;
232 case NIC_MBOX_MSG_ACK:
233 nic->pf_acked = true;
234 break;
235 case NIC_MBOX_MSG_NACK:
236 nic->pf_nacked = true;
237 break;
238 case NIC_MBOX_MSG_RSS_SIZE:
239 nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
240 nic->pf_acked = true;
241 break;
242 case NIC_MBOX_MSG_BGX_STATS:
243 nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
244 nic->pf_acked = true;
245 break;
246 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
247 nic->pf_acked = true;
248 if (nic->link_up != mbx.link_status.link_up) {
249 nic->link_up = mbx.link_status.link_up;
250 nic->duplex = mbx.link_status.duplex;
251 nic->speed = mbx.link_status.speed;
252 nic->mac_type = mbx.link_status.mac_type;
253 if (nic->link_up) {
254 netdev_info(nic->netdev,
255 "Link is Up %d Mbps %s duplex\n",
256 nic->speed,
257 nic->duplex == DUPLEX_FULL ?
258 "Full" : "Half");
259 netif_carrier_on(nic->netdev);
260 netif_tx_start_all_queues(nic->netdev);
261 } else {
262 netdev_info(nic->netdev, "Link is Down\n");
263 netif_carrier_off(nic->netdev);
264 netif_tx_stop_all_queues(nic->netdev);
265 }
266 }
267 break;
268 case NIC_MBOX_MSG_ALLOC_SQS:
269 nic->sqs_count = mbx.sqs_alloc.qs_count;
270 nic->pf_acked = true;
271 break;
272 case NIC_MBOX_MSG_SNICVF_PTR:
273 /* Primary VF: make note of secondary VF's pointer
274 * to be used while packet transmission.
275 */
276 nic->snicvf[mbx.nicvf.sqs_id] =
277 (struct nicvf *)mbx.nicvf.nicvf;
278 nic->pf_acked = true;
279 break;
280 case NIC_MBOX_MSG_PNICVF_PTR:
281 /* Secondary VF/Qset: make note of primary VF's pointer
282 * to be used while packet reception, to handover packet
283 * to primary VF's netdev.
284 */
285 nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf;
286 nic->pf_acked = true;
287 break;
288 case NIC_MBOX_MSG_PFC:
289 nic->pfc.autoneg = mbx.pfc.autoneg;
290 nic->pfc.fc_rx = mbx.pfc.fc_rx;
291 nic->pfc.fc_tx = mbx.pfc.fc_tx;
292 nic->pf_acked = true;
293 break;
294 default:
295 netdev_err(nic->netdev,
296 "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
297 break;
298 }
299 nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
300 }
301
nicvf_hw_set_mac_addr(struct nicvf * nic,struct net_device * netdev)302 static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev)
303 {
304 union nic_mbx mbx = {};
305
306 mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
307 mbx.mac.vf_id = nic->vf_id;
308 ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr);
309
310 return nicvf_send_msg_to_pf(nic, &mbx);
311 }
312
nicvf_config_cpi(struct nicvf * nic)313 static void nicvf_config_cpi(struct nicvf *nic)
314 {
315 union nic_mbx mbx = {};
316
317 mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
318 mbx.cpi_cfg.vf_id = nic->vf_id;
319 mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
320 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
321
322 nicvf_send_msg_to_pf(nic, &mbx);
323 }
324
nicvf_get_rss_size(struct nicvf * nic)325 static void nicvf_get_rss_size(struct nicvf *nic)
326 {
327 union nic_mbx mbx = {};
328
329 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
330 mbx.rss_size.vf_id = nic->vf_id;
331 nicvf_send_msg_to_pf(nic, &mbx);
332 }
333
nicvf_config_rss(struct nicvf * nic)334 void nicvf_config_rss(struct nicvf *nic)
335 {
336 union nic_mbx mbx = {};
337 struct nicvf_rss_info *rss = &nic->rss_info;
338 int ind_tbl_len = rss->rss_size;
339 int i, nextq = 0;
340
341 mbx.rss_cfg.vf_id = nic->vf_id;
342 mbx.rss_cfg.hash_bits = rss->hash_bits;
343 while (ind_tbl_len) {
344 mbx.rss_cfg.tbl_offset = nextq;
345 mbx.rss_cfg.tbl_len = min(ind_tbl_len,
346 RSS_IND_TBL_LEN_PER_MBX_MSG);
347 mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
348 NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
349
350 for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
351 mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
352
353 nicvf_send_msg_to_pf(nic, &mbx);
354
355 ind_tbl_len -= mbx.rss_cfg.tbl_len;
356 }
357 }
358
nicvf_set_rss_key(struct nicvf * nic)359 void nicvf_set_rss_key(struct nicvf *nic)
360 {
361 struct nicvf_rss_info *rss = &nic->rss_info;
362 u64 key_addr = NIC_VNIC_RSS_KEY_0_4;
363 int idx;
364
365 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
366 nicvf_reg_write(nic, key_addr, rss->key[idx]);
367 key_addr += sizeof(u64);
368 }
369 }
370
nicvf_rss_init(struct nicvf * nic)371 static int nicvf_rss_init(struct nicvf *nic)
372 {
373 struct nicvf_rss_info *rss = &nic->rss_info;
374 int idx;
375
376 nicvf_get_rss_size(nic);
377
378 if (cpi_alg != CPI_ALG_NONE) {
379 rss->enable = false;
380 rss->hash_bits = 0;
381 return 0;
382 }
383
384 rss->enable = true;
385
386 netdev_rss_key_fill(rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
387 nicvf_set_rss_key(nic);
388
389 rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
390 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
391
392 rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size));
393
394 for (idx = 0; idx < rss->rss_size; idx++)
395 rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
396 nic->rx_queues);
397 nicvf_config_rss(nic);
398 return 1;
399 }
400
401 /* Request PF to allocate additional Qsets */
nicvf_request_sqs(struct nicvf * nic)402 static void nicvf_request_sqs(struct nicvf *nic)
403 {
404 union nic_mbx mbx = {};
405 int sqs;
406 int sqs_count = nic->sqs_count;
407 int rx_queues = 0, tx_queues = 0;
408
409 /* Only primary VF should request */
410 if (nic->sqs_mode || !nic->sqs_count)
411 return;
412
413 mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
414 mbx.sqs_alloc.vf_id = nic->vf_id;
415 mbx.sqs_alloc.qs_count = nic->sqs_count;
416 if (nicvf_send_msg_to_pf(nic, &mbx)) {
417 /* No response from PF */
418 nic->sqs_count = 0;
419 return;
420 }
421
422 /* Return if no Secondary Qsets available */
423 if (!nic->sqs_count)
424 return;
425
426 if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS)
427 rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS;
428
429 tx_queues = nic->tx_queues + nic->xdp_tx_queues;
430 if (tx_queues > MAX_SND_QUEUES_PER_QS)
431 tx_queues = tx_queues - MAX_SND_QUEUES_PER_QS;
432
433 /* Set no of Rx/Tx queues in each of the SQsets */
434 for (sqs = 0; sqs < nic->sqs_count; sqs++) {
435 mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
436 mbx.nicvf.vf_id = nic->vf_id;
437 mbx.nicvf.sqs_id = sqs;
438 nicvf_send_msg_to_pf(nic, &mbx);
439
440 nic->snicvf[sqs]->sqs_id = sqs;
441 if (rx_queues > MAX_RCV_QUEUES_PER_QS) {
442 nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS;
443 rx_queues -= MAX_RCV_QUEUES_PER_QS;
444 } else {
445 nic->snicvf[sqs]->qs->rq_cnt = rx_queues;
446 rx_queues = 0;
447 }
448
449 if (tx_queues > MAX_SND_QUEUES_PER_QS) {
450 nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS;
451 tx_queues -= MAX_SND_QUEUES_PER_QS;
452 } else {
453 nic->snicvf[sqs]->qs->sq_cnt = tx_queues;
454 tx_queues = 0;
455 }
456
457 nic->snicvf[sqs]->qs->cq_cnt =
458 max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt);
459
460 /* Initialize secondary Qset's queues and its interrupts */
461 nicvf_open(nic->snicvf[sqs]->netdev);
462 }
463
464 /* Update stack with actual Rx/Tx queue count allocated */
465 if (sqs_count != nic->sqs_count)
466 nicvf_set_real_num_queues(nic->netdev,
467 nic->tx_queues, nic->rx_queues);
468 }
469
470 /* Send this Qset's nicvf pointer to PF.
471 * PF inturn sends primary VF's nicvf struct to secondary Qsets/VFs
472 * so that packets received by these Qsets can use primary VF's netdev
473 */
nicvf_send_vf_struct(struct nicvf * nic)474 static void nicvf_send_vf_struct(struct nicvf *nic)
475 {
476 union nic_mbx mbx = {};
477
478 mbx.nicvf.msg = NIC_MBOX_MSG_NICVF_PTR;
479 mbx.nicvf.sqs_mode = nic->sqs_mode;
480 mbx.nicvf.nicvf = (u64)nic;
481 nicvf_send_msg_to_pf(nic, &mbx);
482 }
483
nicvf_get_primary_vf_struct(struct nicvf * nic)484 static void nicvf_get_primary_vf_struct(struct nicvf *nic)
485 {
486 union nic_mbx mbx = {};
487
488 mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
489 nicvf_send_msg_to_pf(nic, &mbx);
490 }
491
nicvf_set_real_num_queues(struct net_device * netdev,int tx_queues,int rx_queues)492 int nicvf_set_real_num_queues(struct net_device *netdev,
493 int tx_queues, int rx_queues)
494 {
495 int err = 0;
496
497 err = netif_set_real_num_tx_queues(netdev, tx_queues);
498 if (err) {
499 netdev_err(netdev,
500 "Failed to set no of Tx queues: %d\n", tx_queues);
501 return err;
502 }
503
504 err = netif_set_real_num_rx_queues(netdev, rx_queues);
505 if (err)
506 netdev_err(netdev,
507 "Failed to set no of Rx queues: %d\n", rx_queues);
508 return err;
509 }
510
nicvf_init_resources(struct nicvf * nic)511 static int nicvf_init_resources(struct nicvf *nic)
512 {
513 int err;
514
515 /* Enable Qset */
516 nicvf_qset_config(nic, true);
517
518 /* Initialize queues and HW for data transfer */
519 err = nicvf_config_data_transfer(nic, true);
520 if (err) {
521 netdev_err(nic->netdev,
522 "Failed to alloc/config VF's QSet resources\n");
523 return err;
524 }
525
526 return 0;
527 }
528
nicvf_xdp_rx(struct nicvf * nic,struct bpf_prog * prog,struct cqe_rx_t * cqe_rx,struct snd_queue * sq,struct rcv_queue * rq,struct sk_buff ** skb)529 static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
530 struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
531 struct rcv_queue *rq, struct sk_buff **skb)
532 {
533 struct xdp_buff xdp;
534 struct page *page;
535 u32 action;
536 u16 len, offset = 0;
537 u64 dma_addr, cpu_addr;
538 void *orig_data;
539
540 /* Retrieve packet buffer's DMA address and length */
541 len = *((u16 *)((void *)cqe_rx + (3 * sizeof(u64))));
542 dma_addr = *((u64 *)((void *)cqe_rx + (7 * sizeof(u64))));
543
544 cpu_addr = nicvf_iova_to_phys(nic, dma_addr);
545 if (!cpu_addr)
546 return false;
547 cpu_addr = (u64)phys_to_virt(cpu_addr);
548 page = virt_to_page((void *)cpu_addr);
549
550 xdp.data_hard_start = page_address(page);
551 xdp.data = (void *)cpu_addr;
552 xdp_set_data_meta_invalid(&xdp);
553 xdp.data_end = xdp.data + len;
554 xdp.rxq = &rq->xdp_rxq;
555 xdp.frame_sz = RCV_FRAG_LEN + XDP_PACKET_HEADROOM;
556 orig_data = xdp.data;
557
558 rcu_read_lock();
559 action = bpf_prog_run_xdp(prog, &xdp);
560 rcu_read_unlock();
561
562 len = xdp.data_end - xdp.data;
563 /* Check if XDP program has changed headers */
564 if (orig_data != xdp.data) {
565 offset = orig_data - xdp.data;
566 dma_addr -= offset;
567 }
568
569 switch (action) {
570 case XDP_PASS:
571 /* Check if it's a recycled page, if not
572 * unmap the DMA mapping.
573 *
574 * Recycled page holds an extra reference.
575 */
576 if (page_ref_count(page) == 1) {
577 dma_addr &= PAGE_MASK;
578 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
579 RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
580 DMA_FROM_DEVICE,
581 DMA_ATTR_SKIP_CPU_SYNC);
582 }
583
584 /* Build SKB and pass on packet to network stack */
585 *skb = build_skb(xdp.data,
586 RCV_FRAG_LEN - cqe_rx->align_pad + offset);
587 if (!*skb)
588 put_page(page);
589 else
590 skb_put(*skb, len);
591 return false;
592 case XDP_TX:
593 nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
594 return true;
595 default:
596 bpf_warn_invalid_xdp_action(action);
597 fallthrough;
598 case XDP_ABORTED:
599 trace_xdp_exception(nic->netdev, prog, action);
600 fallthrough;
601 case XDP_DROP:
602 /* Check if it's a recycled page, if not
603 * unmap the DMA mapping.
604 *
605 * Recycled page holds an extra reference.
606 */
607 if (page_ref_count(page) == 1) {
608 dma_addr &= PAGE_MASK;
609 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
610 RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
611 DMA_FROM_DEVICE,
612 DMA_ATTR_SKIP_CPU_SYNC);
613 }
614 put_page(page);
615 return true;
616 }
617 return false;
618 }
619
nicvf_snd_ptp_handler(struct net_device * netdev,struct cqe_send_t * cqe_tx)620 static void nicvf_snd_ptp_handler(struct net_device *netdev,
621 struct cqe_send_t *cqe_tx)
622 {
623 struct nicvf *nic = netdev_priv(netdev);
624 struct skb_shared_hwtstamps ts;
625 u64 ns;
626
627 nic = nic->pnicvf;
628
629 /* Sync for 'ptp_skb' */
630 smp_rmb();
631
632 /* New timestamp request can be queued now */
633 atomic_set(&nic->tx_ptp_skbs, 0);
634
635 /* Check for timestamp requested skb */
636 if (!nic->ptp_skb)
637 return;
638
639 /* Check if timestamping is timedout, which is set to 10us */
640 if (cqe_tx->send_status == CQ_TX_ERROP_TSTMP_TIMEOUT ||
641 cqe_tx->send_status == CQ_TX_ERROP_TSTMP_CONFLICT)
642 goto no_tstamp;
643
644 /* Get the timestamp */
645 memset(&ts, 0, sizeof(ts));
646 ns = cavium_ptp_tstamp2time(nic->ptp_clock, cqe_tx->ptp_timestamp);
647 ts.hwtstamp = ns_to_ktime(ns);
648 skb_tstamp_tx(nic->ptp_skb, &ts);
649
650 no_tstamp:
651 /* Free the original skb */
652 dev_kfree_skb_any(nic->ptp_skb);
653 nic->ptp_skb = NULL;
654 /* Sync 'ptp_skb' */
655 smp_wmb();
656 }
657
nicvf_snd_pkt_handler(struct net_device * netdev,struct cqe_send_t * cqe_tx,int budget,int * subdesc_cnt,unsigned int * tx_pkts,unsigned int * tx_bytes)658 static void nicvf_snd_pkt_handler(struct net_device *netdev,
659 struct cqe_send_t *cqe_tx,
660 int budget, int *subdesc_cnt,
661 unsigned int *tx_pkts, unsigned int *tx_bytes)
662 {
663 struct sk_buff *skb = NULL;
664 struct page *page;
665 struct nicvf *nic = netdev_priv(netdev);
666 struct snd_queue *sq;
667 struct sq_hdr_subdesc *hdr;
668 struct sq_hdr_subdesc *tso_sqe;
669
670 sq = &nic->qs->sq[cqe_tx->sq_idx];
671
672 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
673 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
674 return;
675
676 /* Check for errors */
677 if (cqe_tx->send_status)
678 nicvf_check_cqe_tx_errs(nic->pnicvf, cqe_tx);
679
680 /* Is this a XDP designated Tx queue */
681 if (sq->is_xdp) {
682 page = (struct page *)sq->xdp_page[cqe_tx->sqe_ptr];
683 /* Check if it's recycled page or else unmap DMA mapping */
684 if (page && (page_ref_count(page) == 1))
685 nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
686 hdr->subdesc_cnt);
687
688 /* Release page reference for recycling */
689 if (page)
690 put_page(page);
691 sq->xdp_page[cqe_tx->sqe_ptr] = (u64)NULL;
692 *subdesc_cnt += hdr->subdesc_cnt + 1;
693 return;
694 }
695
696 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
697 if (skb) {
698 /* Check for dummy descriptor used for HW TSO offload on 88xx */
699 if (hdr->dont_send) {
700 /* Get actual TSO descriptors and free them */
701 tso_sqe =
702 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
703 nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
704 tso_sqe->subdesc_cnt);
705 *subdesc_cnt += tso_sqe->subdesc_cnt + 1;
706 } else {
707 nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
708 hdr->subdesc_cnt);
709 }
710 *subdesc_cnt += hdr->subdesc_cnt + 1;
711 prefetch(skb);
712 (*tx_pkts)++;
713 *tx_bytes += skb->len;
714 /* If timestamp is requested for this skb, don't free it */
715 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
716 !nic->pnicvf->ptp_skb)
717 nic->pnicvf->ptp_skb = skb;
718 else
719 napi_consume_skb(skb, budget);
720 sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
721 } else {
722 /* In case of SW TSO on 88xx, only last segment will have
723 * a SKB attached, so just free SQEs here.
724 */
725 if (!nic->hw_tso)
726 *subdesc_cnt += hdr->subdesc_cnt + 1;
727 }
728 }
729
nicvf_set_rxhash(struct net_device * netdev,struct cqe_rx_t * cqe_rx,struct sk_buff * skb)730 static inline void nicvf_set_rxhash(struct net_device *netdev,
731 struct cqe_rx_t *cqe_rx,
732 struct sk_buff *skb)
733 {
734 u8 hash_type;
735 u32 hash;
736
737 if (!(netdev->features & NETIF_F_RXHASH))
738 return;
739
740 switch (cqe_rx->rss_alg) {
741 case RSS_ALG_TCP_IP:
742 case RSS_ALG_UDP_IP:
743 hash_type = PKT_HASH_TYPE_L4;
744 hash = cqe_rx->rss_tag;
745 break;
746 case RSS_ALG_IP:
747 hash_type = PKT_HASH_TYPE_L3;
748 hash = cqe_rx->rss_tag;
749 break;
750 default:
751 hash_type = PKT_HASH_TYPE_NONE;
752 hash = 0;
753 }
754
755 skb_set_hash(skb, hash, hash_type);
756 }
757
nicvf_set_rxtstamp(struct nicvf * nic,struct sk_buff * skb)758 static inline void nicvf_set_rxtstamp(struct nicvf *nic, struct sk_buff *skb)
759 {
760 u64 ns;
761
762 if (!nic->ptp_clock || !nic->hw_rx_tstamp)
763 return;
764
765 /* The first 8 bytes is the timestamp */
766 ns = cavium_ptp_tstamp2time(nic->ptp_clock,
767 be64_to_cpu(*(__be64 *)skb->data));
768 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
769
770 __skb_pull(skb, 8);
771 }
772
nicvf_rcv_pkt_handler(struct net_device * netdev,struct napi_struct * napi,struct cqe_rx_t * cqe_rx,struct snd_queue * sq,struct rcv_queue * rq)773 static void nicvf_rcv_pkt_handler(struct net_device *netdev,
774 struct napi_struct *napi,
775 struct cqe_rx_t *cqe_rx,
776 struct snd_queue *sq, struct rcv_queue *rq)
777 {
778 struct sk_buff *skb = NULL;
779 struct nicvf *nic = netdev_priv(netdev);
780 struct nicvf *snic = nic;
781 int err = 0;
782 int rq_idx;
783
784 rq_idx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
785
786 if (nic->sqs_mode) {
787 /* Use primary VF's 'nicvf' struct */
788 nic = nic->pnicvf;
789 netdev = nic->netdev;
790 }
791
792 /* Check for errors */
793 if (cqe_rx->err_level || cqe_rx->err_opcode) {
794 err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
795 if (err && !cqe_rx->rb_cnt)
796 return;
797 }
798
799 /* For XDP, ignore pkts spanning multiple pages */
800 if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) {
801 /* Packet consumed by XDP */
802 if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, rq, &skb))
803 return;
804 } else {
805 skb = nicvf_get_rcv_skb(snic, cqe_rx,
806 nic->xdp_prog ? true : false);
807 }
808
809 if (!skb)
810 return;
811
812 if (netif_msg_pktdata(nic)) {
813 netdev_info(nic->netdev, "skb 0x%p, len=%d\n", skb, skb->len);
814 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
815 skb->data, skb->len, true);
816 }
817
818 /* If error packet, drop it here */
819 if (err) {
820 dev_kfree_skb_any(skb);
821 return;
822 }
823
824 nicvf_set_rxtstamp(nic, skb);
825 nicvf_set_rxhash(netdev, cqe_rx, skb);
826
827 skb_record_rx_queue(skb, rq_idx);
828 if (netdev->hw_features & NETIF_F_RXCSUM) {
829 /* HW by default verifies TCP/UDP/SCTP checksums */
830 skb->ip_summed = CHECKSUM_UNNECESSARY;
831 } else {
832 skb_checksum_none_assert(skb);
833 }
834
835 skb->protocol = eth_type_trans(skb, netdev);
836
837 /* Check for stripped VLAN */
838 if (cqe_rx->vlan_found && cqe_rx->vlan_stripped)
839 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
840 ntohs((__force __be16)cqe_rx->vlan_tci));
841
842 if (napi && (netdev->features & NETIF_F_GRO))
843 napi_gro_receive(napi, skb);
844 else
845 netif_receive_skb(skb);
846 }
847
nicvf_cq_intr_handler(struct net_device * netdev,u8 cq_idx,struct napi_struct * napi,int budget)848 static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
849 struct napi_struct *napi, int budget)
850 {
851 int processed_cqe, work_done = 0, tx_done = 0;
852 int cqe_count, cqe_head;
853 int subdesc_cnt = 0;
854 struct nicvf *nic = netdev_priv(netdev);
855 struct queue_set *qs = nic->qs;
856 struct cmp_queue *cq = &qs->cq[cq_idx];
857 struct cqe_rx_t *cq_desc;
858 struct netdev_queue *txq;
859 struct snd_queue *sq = &qs->sq[cq_idx];
860 struct rcv_queue *rq = &qs->rq[cq_idx];
861 unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx;
862
863 spin_lock_bh(&cq->lock);
864 loop:
865 processed_cqe = 0;
866 /* Get no of valid CQ entries to process */
867 cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
868 cqe_count &= CQ_CQE_COUNT;
869 if (!cqe_count)
870 goto done;
871
872 /* Get head of the valid CQ entries */
873 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
874 cqe_head &= 0xFFFF;
875
876 while (processed_cqe < cqe_count) {
877 /* Get the CQ descriptor */
878 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
879 cqe_head++;
880 cqe_head &= (cq->dmem.q_len - 1);
881 /* Initiate prefetch for next descriptor */
882 prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
883
884 if ((work_done >= budget) && napi &&
885 (cq_desc->cqe_type != CQE_TYPE_SEND)) {
886 break;
887 }
888
889 switch (cq_desc->cqe_type) {
890 case CQE_TYPE_RX:
891 nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq, rq);
892 work_done++;
893 break;
894 case CQE_TYPE_SEND:
895 nicvf_snd_pkt_handler(netdev, (void *)cq_desc,
896 budget, &subdesc_cnt,
897 &tx_pkts, &tx_bytes);
898 tx_done++;
899 break;
900 case CQE_TYPE_SEND_PTP:
901 nicvf_snd_ptp_handler(netdev, (void *)cq_desc);
902 break;
903 case CQE_TYPE_INVALID:
904 case CQE_TYPE_RX_SPLIT:
905 case CQE_TYPE_RX_TCP:
906 /* Ignore for now */
907 break;
908 }
909 processed_cqe++;
910 }
911
912 /* Ring doorbell to inform H/W to reuse processed CQEs */
913 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
914 cq_idx, processed_cqe);
915
916 if ((work_done < budget) && napi)
917 goto loop;
918
919 done:
920 /* Update SQ's descriptor free count */
921 if (subdesc_cnt)
922 nicvf_put_sq_desc(sq, subdesc_cnt);
923
924 txq_idx = nicvf_netdev_qidx(nic, cq_idx);
925 /* Handle XDP TX queues */
926 if (nic->pnicvf->xdp_prog) {
927 if (txq_idx < nic->pnicvf->xdp_tx_queues) {
928 nicvf_xdp_sq_doorbell(nic, sq, cq_idx);
929 goto out;
930 }
931 nic = nic->pnicvf;
932 txq_idx -= nic->pnicvf->xdp_tx_queues;
933 }
934
935 /* Wakeup TXQ if its stopped earlier due to SQ full */
936 if (tx_done ||
937 (atomic_read(&sq->free_cnt) >= MIN_SQ_DESC_PER_PKT_XMIT)) {
938 netdev = nic->pnicvf->netdev;
939 txq = netdev_get_tx_queue(netdev, txq_idx);
940 if (tx_pkts)
941 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
942
943 /* To read updated queue and carrier status */
944 smp_mb();
945 if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
946 netif_tx_wake_queue(txq);
947 nic = nic->pnicvf;
948 this_cpu_inc(nic->drv_stats->txq_wake);
949 netif_warn(nic, tx_err, netdev,
950 "Transmit queue wakeup SQ%d\n", txq_idx);
951 }
952 }
953
954 out:
955 spin_unlock_bh(&cq->lock);
956 return work_done;
957 }
958
nicvf_poll(struct napi_struct * napi,int budget)959 static int nicvf_poll(struct napi_struct *napi, int budget)
960 {
961 u64 cq_head;
962 int work_done = 0;
963 struct net_device *netdev = napi->dev;
964 struct nicvf *nic = netdev_priv(netdev);
965 struct nicvf_cq_poll *cq;
966
967 cq = container_of(napi, struct nicvf_cq_poll, napi);
968 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
969
970 if (work_done < budget) {
971 /* Slow packet rate, exit polling */
972 napi_complete_done(napi, work_done);
973 /* Re-enable interrupts */
974 cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
975 cq->cq_idx);
976 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
977 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD,
978 cq->cq_idx, cq_head);
979 nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
980 }
981 return work_done;
982 }
983
984 /* Qset error interrupt handler
985 *
986 * As of now only CQ errors are handled
987 */
nicvf_handle_qs_err(struct tasklet_struct * t)988 static void nicvf_handle_qs_err(struct tasklet_struct *t)
989 {
990 struct nicvf *nic = from_tasklet(nic, t, qs_err_task);
991 struct queue_set *qs = nic->qs;
992 int qidx;
993 u64 status;
994
995 netif_tx_disable(nic->netdev);
996
997 /* Check if it is CQ err */
998 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
999 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
1000 qidx);
1001 if (!(status & CQ_ERR_MASK))
1002 continue;
1003 /* Process already queued CQEs and reconfig CQ */
1004 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1005 nicvf_sq_disable(nic, qidx);
1006 nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0);
1007 nicvf_cmp_queue_config(nic, qs, qidx, true);
1008 nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx);
1009 nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
1010
1011 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
1012 }
1013
1014 netif_tx_start_all_queues(nic->netdev);
1015 /* Re-enable Qset error interrupt */
1016 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
1017 }
1018
nicvf_dump_intr_status(struct nicvf * nic)1019 static void nicvf_dump_intr_status(struct nicvf *nic)
1020 {
1021 netif_info(nic, intr, nic->netdev, "interrupt status 0x%llx\n",
1022 nicvf_reg_read(nic, NIC_VF_INT));
1023 }
1024
nicvf_misc_intr_handler(int irq,void * nicvf_irq)1025 static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
1026 {
1027 struct nicvf *nic = (struct nicvf *)nicvf_irq;
1028 u64 intr;
1029
1030 nicvf_dump_intr_status(nic);
1031
1032 intr = nicvf_reg_read(nic, NIC_VF_INT);
1033 /* Check for spurious interrupt */
1034 if (!(intr & NICVF_INTR_MBOX_MASK))
1035 return IRQ_HANDLED;
1036
1037 nicvf_handle_mbx_intr(nic);
1038
1039 return IRQ_HANDLED;
1040 }
1041
nicvf_intr_handler(int irq,void * cq_irq)1042 static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq)
1043 {
1044 struct nicvf_cq_poll *cq_poll = (struct nicvf_cq_poll *)cq_irq;
1045 struct nicvf *nic = cq_poll->nicvf;
1046 int qidx = cq_poll->cq_idx;
1047
1048 nicvf_dump_intr_status(nic);
1049
1050 /* Disable interrupts */
1051 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1052
1053 /* Schedule NAPI */
1054 napi_schedule_irqoff(&cq_poll->napi);
1055
1056 /* Clear interrupt */
1057 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
1058
1059 return IRQ_HANDLED;
1060 }
1061
nicvf_rbdr_intr_handler(int irq,void * nicvf_irq)1062 static irqreturn_t nicvf_rbdr_intr_handler(int irq, void *nicvf_irq)
1063 {
1064 struct nicvf *nic = (struct nicvf *)nicvf_irq;
1065 u8 qidx;
1066
1067
1068 nicvf_dump_intr_status(nic);
1069
1070 /* Disable RBDR interrupt and schedule softirq */
1071 for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
1072 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
1073 continue;
1074 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1075 tasklet_hi_schedule(&nic->rbdr_task);
1076 /* Clear interrupt */
1077 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1078 }
1079
1080 return IRQ_HANDLED;
1081 }
1082
nicvf_qs_err_intr_handler(int irq,void * nicvf_irq)1083 static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq)
1084 {
1085 struct nicvf *nic = (struct nicvf *)nicvf_irq;
1086
1087 nicvf_dump_intr_status(nic);
1088
1089 /* Disable Qset err interrupt and schedule softirq */
1090 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1091 tasklet_hi_schedule(&nic->qs_err_task);
1092 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1093
1094 return IRQ_HANDLED;
1095 }
1096
nicvf_set_irq_affinity(struct nicvf * nic)1097 static void nicvf_set_irq_affinity(struct nicvf *nic)
1098 {
1099 int vec, cpu;
1100
1101 for (vec = 0; vec < nic->num_vec; vec++) {
1102 if (!nic->irq_allocated[vec])
1103 continue;
1104
1105 if (!zalloc_cpumask_var(&nic->affinity_mask[vec], GFP_KERNEL))
1106 return;
1107 /* CQ interrupts */
1108 if (vec < NICVF_INTR_ID_SQ)
1109 /* Leave CPU0 for RBDR and other interrupts */
1110 cpu = nicvf_netdev_qidx(nic, vec) + 1;
1111 else
1112 cpu = 0;
1113
1114 cpumask_set_cpu(cpumask_local_spread(cpu, nic->node),
1115 nic->affinity_mask[vec]);
1116 irq_set_affinity_hint(pci_irq_vector(nic->pdev, vec),
1117 nic->affinity_mask[vec]);
1118 }
1119 }
1120
nicvf_register_interrupts(struct nicvf * nic)1121 static int nicvf_register_interrupts(struct nicvf *nic)
1122 {
1123 int irq, ret = 0;
1124
1125 for_each_cq_irq(irq)
1126 sprintf(nic->irq_name[irq], "%s-rxtx-%d",
1127 nic->pnicvf->netdev->name,
1128 nicvf_netdev_qidx(nic, irq));
1129
1130 for_each_sq_irq(irq)
1131 sprintf(nic->irq_name[irq], "%s-sq-%d",
1132 nic->pnicvf->netdev->name,
1133 nicvf_netdev_qidx(nic, irq - NICVF_INTR_ID_SQ));
1134
1135 for_each_rbdr_irq(irq)
1136 sprintf(nic->irq_name[irq], "%s-rbdr-%d",
1137 nic->pnicvf->netdev->name,
1138 nic->sqs_mode ? (nic->sqs_id + 1) : 0);
1139
1140 /* Register CQ interrupts */
1141 for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
1142 ret = request_irq(pci_irq_vector(nic->pdev, irq),
1143 nicvf_intr_handler,
1144 0, nic->irq_name[irq], nic->napi[irq]);
1145 if (ret)
1146 goto err;
1147 nic->irq_allocated[irq] = true;
1148 }
1149
1150 /* Register RBDR interrupt */
1151 for (irq = NICVF_INTR_ID_RBDR;
1152 irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) {
1153 ret = request_irq(pci_irq_vector(nic->pdev, irq),
1154 nicvf_rbdr_intr_handler,
1155 0, nic->irq_name[irq], nic);
1156 if (ret)
1157 goto err;
1158 nic->irq_allocated[irq] = true;
1159 }
1160
1161 /* Register QS error interrupt */
1162 sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], "%s-qset-err-%d",
1163 nic->pnicvf->netdev->name,
1164 nic->sqs_mode ? (nic->sqs_id + 1) : 0);
1165 irq = NICVF_INTR_ID_QS_ERR;
1166 ret = request_irq(pci_irq_vector(nic->pdev, irq),
1167 nicvf_qs_err_intr_handler,
1168 0, nic->irq_name[irq], nic);
1169 if (ret)
1170 goto err;
1171
1172 nic->irq_allocated[irq] = true;
1173
1174 /* Set IRQ affinities */
1175 nicvf_set_irq_affinity(nic);
1176
1177 err:
1178 if (ret)
1179 netdev_err(nic->netdev, "request_irq failed, vector %d\n", irq);
1180
1181 return ret;
1182 }
1183
nicvf_unregister_interrupts(struct nicvf * nic)1184 static void nicvf_unregister_interrupts(struct nicvf *nic)
1185 {
1186 struct pci_dev *pdev = nic->pdev;
1187 int irq;
1188
1189 /* Free registered interrupts */
1190 for (irq = 0; irq < nic->num_vec; irq++) {
1191 if (!nic->irq_allocated[irq])
1192 continue;
1193
1194 irq_set_affinity_hint(pci_irq_vector(pdev, irq), NULL);
1195 free_cpumask_var(nic->affinity_mask[irq]);
1196
1197 if (irq < NICVF_INTR_ID_SQ)
1198 free_irq(pci_irq_vector(pdev, irq), nic->napi[irq]);
1199 else
1200 free_irq(pci_irq_vector(pdev, irq), nic);
1201
1202 nic->irq_allocated[irq] = false;
1203 }
1204
1205 /* Disable MSI-X */
1206 pci_free_irq_vectors(pdev);
1207 nic->num_vec = 0;
1208 }
1209
1210 /* Initialize MSIX vectors and register MISC interrupt.
1211 * Send READY message to PF to check if its alive
1212 */
nicvf_register_misc_interrupt(struct nicvf * nic)1213 static int nicvf_register_misc_interrupt(struct nicvf *nic)
1214 {
1215 int ret = 0;
1216 int irq = NICVF_INTR_ID_MISC;
1217
1218 /* Return if mailbox interrupt is already registered */
1219 if (nic->pdev->msix_enabled)
1220 return 0;
1221
1222 /* Enable MSI-X */
1223 nic->num_vec = pci_msix_vec_count(nic->pdev);
1224 ret = pci_alloc_irq_vectors(nic->pdev, nic->num_vec, nic->num_vec,
1225 PCI_IRQ_MSIX);
1226 if (ret < 0) {
1227 netdev_err(nic->netdev,
1228 "Req for #%d msix vectors failed\n", nic->num_vec);
1229 return ret;
1230 }
1231
1232 sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
1233 /* Register Misc interrupt */
1234 ret = request_irq(pci_irq_vector(nic->pdev, irq),
1235 nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
1236
1237 if (ret)
1238 return ret;
1239 nic->irq_allocated[irq] = true;
1240
1241 /* Enable mailbox interrupt */
1242 nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
1243
1244 /* Check if VF is able to communicate with PF */
1245 if (!nicvf_check_pf_ready(nic)) {
1246 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1247 nicvf_unregister_interrupts(nic);
1248 return -EIO;
1249 }
1250
1251 return 0;
1252 }
1253
nicvf_xmit(struct sk_buff * skb,struct net_device * netdev)1254 static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
1255 {
1256 struct nicvf *nic = netdev_priv(netdev);
1257 int qid = skb_get_queue_mapping(skb);
1258 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
1259 struct nicvf *snic;
1260 struct snd_queue *sq;
1261 int tmp;
1262
1263 /* Check for minimum packet length */
1264 if (skb->len <= ETH_HLEN) {
1265 dev_kfree_skb(skb);
1266 return NETDEV_TX_OK;
1267 }
1268
1269 /* In XDP case, initial HW tx queues are used for XDP,
1270 * but stack's queue mapping starts at '0', so skip the
1271 * Tx queues attached to Rx queues for XDP.
1272 */
1273 if (nic->xdp_prog)
1274 qid += nic->xdp_tx_queues;
1275
1276 snic = nic;
1277 /* Get secondary Qset's SQ structure */
1278 if (qid >= MAX_SND_QUEUES_PER_QS) {
1279 tmp = qid / MAX_SND_QUEUES_PER_QS;
1280 snic = (struct nicvf *)nic->snicvf[tmp - 1];
1281 if (!snic) {
1282 netdev_warn(nic->netdev,
1283 "Secondary Qset#%d's ptr not initialized\n",
1284 tmp - 1);
1285 dev_kfree_skb(skb);
1286 return NETDEV_TX_OK;
1287 }
1288 qid = qid % MAX_SND_QUEUES_PER_QS;
1289 }
1290
1291 sq = &snic->qs->sq[qid];
1292 if (!netif_tx_queue_stopped(txq) &&
1293 !nicvf_sq_append_skb(snic, sq, skb, qid)) {
1294 netif_tx_stop_queue(txq);
1295
1296 /* Barrier, so that stop_queue visible to other cpus */
1297 smp_mb();
1298
1299 /* Check again, incase another cpu freed descriptors */
1300 if (atomic_read(&sq->free_cnt) > MIN_SQ_DESC_PER_PKT_XMIT) {
1301 netif_tx_wake_queue(txq);
1302 } else {
1303 this_cpu_inc(nic->drv_stats->txq_stop);
1304 netif_warn(nic, tx_err, netdev,
1305 "Transmit ring full, stopping SQ%d\n", qid);
1306 }
1307 return NETDEV_TX_BUSY;
1308 }
1309
1310 return NETDEV_TX_OK;
1311 }
1312
nicvf_free_cq_poll(struct nicvf * nic)1313 static inline void nicvf_free_cq_poll(struct nicvf *nic)
1314 {
1315 struct nicvf_cq_poll *cq_poll;
1316 int qidx;
1317
1318 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
1319 cq_poll = nic->napi[qidx];
1320 if (!cq_poll)
1321 continue;
1322 nic->napi[qidx] = NULL;
1323 kfree(cq_poll);
1324 }
1325 }
1326
nicvf_stop(struct net_device * netdev)1327 int nicvf_stop(struct net_device *netdev)
1328 {
1329 int irq, qidx;
1330 struct nicvf *nic = netdev_priv(netdev);
1331 struct queue_set *qs = nic->qs;
1332 struct nicvf_cq_poll *cq_poll = NULL;
1333 union nic_mbx mbx = {};
1334
1335 /* wait till all queued set_rx_mode tasks completes */
1336 if (nic->nicvf_rx_mode_wq) {
1337 cancel_delayed_work_sync(&nic->link_change_work);
1338 drain_workqueue(nic->nicvf_rx_mode_wq);
1339 }
1340
1341 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1342 nicvf_send_msg_to_pf(nic, &mbx);
1343
1344 netif_carrier_off(netdev);
1345 netif_tx_stop_all_queues(nic->netdev);
1346 nic->link_up = false;
1347
1348 /* Teardown secondary qsets first */
1349 if (!nic->sqs_mode) {
1350 for (qidx = 0; qidx < nic->sqs_count; qidx++) {
1351 if (!nic->snicvf[qidx])
1352 continue;
1353 nicvf_stop(nic->snicvf[qidx]->netdev);
1354 nic->snicvf[qidx] = NULL;
1355 }
1356 }
1357
1358 /* Disable RBDR & QS error interrupts */
1359 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1360 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1361 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1362 }
1363 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1364 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1365
1366 /* Wait for pending IRQ handlers to finish */
1367 for (irq = 0; irq < nic->num_vec; irq++)
1368 synchronize_irq(pci_irq_vector(nic->pdev, irq));
1369
1370 tasklet_kill(&nic->rbdr_task);
1371 tasklet_kill(&nic->qs_err_task);
1372 if (nic->rb_work_scheduled)
1373 cancel_delayed_work_sync(&nic->rbdr_work);
1374
1375 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
1376 cq_poll = nic->napi[qidx];
1377 if (!cq_poll)
1378 continue;
1379 napi_synchronize(&cq_poll->napi);
1380 /* CQ intr is enabled while napi_complete,
1381 * so disable it now
1382 */
1383 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1384 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
1385 napi_disable(&cq_poll->napi);
1386 netif_napi_del(&cq_poll->napi);
1387 }
1388
1389 netif_tx_disable(netdev);
1390
1391 for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
1392 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
1393
1394 /* Free resources */
1395 nicvf_config_data_transfer(nic, false);
1396
1397 /* Disable HW Qset */
1398 nicvf_qset_config(nic, false);
1399
1400 /* disable mailbox interrupt */
1401 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1402
1403 nicvf_unregister_interrupts(nic);
1404
1405 nicvf_free_cq_poll(nic);
1406
1407 /* Free any pending SKB saved to receive timestamp */
1408 if (nic->ptp_skb) {
1409 dev_kfree_skb_any(nic->ptp_skb);
1410 nic->ptp_skb = NULL;
1411 }
1412
1413 /* Clear multiqset info */
1414 nic->pnicvf = nic;
1415
1416 return 0;
1417 }
1418
nicvf_config_hw_rx_tstamp(struct nicvf * nic,bool enable)1419 static int nicvf_config_hw_rx_tstamp(struct nicvf *nic, bool enable)
1420 {
1421 union nic_mbx mbx = {};
1422
1423 mbx.ptp.msg = NIC_MBOX_MSG_PTP_CFG;
1424 mbx.ptp.enable = enable;
1425
1426 return nicvf_send_msg_to_pf(nic, &mbx);
1427 }
1428
nicvf_update_hw_max_frs(struct nicvf * nic,int mtu)1429 static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1430 {
1431 union nic_mbx mbx = {};
1432
1433 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
1434 mbx.frs.max_frs = mtu;
1435 mbx.frs.vf_id = nic->vf_id;
1436
1437 return nicvf_send_msg_to_pf(nic, &mbx);
1438 }
1439
nicvf_link_status_check_task(struct work_struct * work_arg)1440 static void nicvf_link_status_check_task(struct work_struct *work_arg)
1441 {
1442 struct nicvf *nic = container_of(work_arg,
1443 struct nicvf,
1444 link_change_work.work);
1445 union nic_mbx mbx = {};
1446 mbx.msg.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
1447 nicvf_send_msg_to_pf(nic, &mbx);
1448 queue_delayed_work(nic->nicvf_rx_mode_wq,
1449 &nic->link_change_work, 2 * HZ);
1450 }
1451
nicvf_open(struct net_device * netdev)1452 int nicvf_open(struct net_device *netdev)
1453 {
1454 int cpu, err, qidx;
1455 struct nicvf *nic = netdev_priv(netdev);
1456 struct queue_set *qs = nic->qs;
1457 struct nicvf_cq_poll *cq_poll = NULL;
1458
1459 /* wait till all queued set_rx_mode tasks completes if any */
1460 if (nic->nicvf_rx_mode_wq)
1461 drain_workqueue(nic->nicvf_rx_mode_wq);
1462
1463 netif_carrier_off(netdev);
1464
1465 err = nicvf_register_misc_interrupt(nic);
1466 if (err)
1467 return err;
1468
1469 /* Register NAPI handler for processing CQEs */
1470 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1471 cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL);
1472 if (!cq_poll) {
1473 err = -ENOMEM;
1474 goto napi_del;
1475 }
1476 cq_poll->cq_idx = qidx;
1477 cq_poll->nicvf = nic;
1478 netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
1479 NAPI_POLL_WEIGHT);
1480 napi_enable(&cq_poll->napi);
1481 nic->napi[qidx] = cq_poll;
1482 }
1483
1484 /* Check if we got MAC address from PF or else generate a radom MAC */
1485 if (!nic->sqs_mode && is_zero_ether_addr(netdev->dev_addr)) {
1486 eth_hw_addr_random(netdev);
1487 nicvf_hw_set_mac_addr(nic, netdev);
1488 }
1489
1490 if (nic->set_mac_pending) {
1491 nic->set_mac_pending = false;
1492 nicvf_hw_set_mac_addr(nic, netdev);
1493 }
1494
1495 /* Init tasklet for handling Qset err interrupt */
1496 tasklet_setup(&nic->qs_err_task, nicvf_handle_qs_err);
1497
1498 /* Init RBDR tasklet which will refill RBDR */
1499 tasklet_setup(&nic->rbdr_task, nicvf_rbdr_task);
1500 INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
1501
1502 /* Configure CPI alorithm */
1503 nic->cpi_alg = cpi_alg;
1504 if (!nic->sqs_mode)
1505 nicvf_config_cpi(nic);
1506
1507 nicvf_request_sqs(nic);
1508 if (nic->sqs_mode)
1509 nicvf_get_primary_vf_struct(nic);
1510
1511 /* Configure PTP timestamp */
1512 if (nic->ptp_clock)
1513 nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
1514 atomic_set(&nic->tx_ptp_skbs, 0);
1515 nic->ptp_skb = NULL;
1516
1517 /* Configure receive side scaling and MTU */
1518 if (!nic->sqs_mode) {
1519 nicvf_rss_init(nic);
1520 err = nicvf_update_hw_max_frs(nic, netdev->mtu);
1521 if (err)
1522 goto cleanup;
1523
1524 /* Clear percpu stats */
1525 for_each_possible_cpu(cpu)
1526 memset(per_cpu_ptr(nic->drv_stats, cpu), 0,
1527 sizeof(struct nicvf_drv_stats));
1528 }
1529
1530 err = nicvf_register_interrupts(nic);
1531 if (err)
1532 goto cleanup;
1533
1534 /* Initialize the queues */
1535 err = nicvf_init_resources(nic);
1536 if (err)
1537 goto cleanup;
1538
1539 /* Make sure queue initialization is written */
1540 wmb();
1541
1542 nicvf_reg_write(nic, NIC_VF_INT, -1);
1543 /* Enable Qset err interrupt */
1544 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
1545
1546 /* Enable completion queue interrupt */
1547 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1548 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
1549
1550 /* Enable RBDR threshold interrupt */
1551 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1552 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
1553
1554 /* Send VF config done msg to PF */
1555 nicvf_send_cfg_done(nic);
1556
1557 if (nic->nicvf_rx_mode_wq) {
1558 INIT_DELAYED_WORK(&nic->link_change_work,
1559 nicvf_link_status_check_task);
1560 queue_delayed_work(nic->nicvf_rx_mode_wq,
1561 &nic->link_change_work, 0);
1562 }
1563
1564 return 0;
1565 cleanup:
1566 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1567 nicvf_unregister_interrupts(nic);
1568 tasklet_kill(&nic->qs_err_task);
1569 tasklet_kill(&nic->rbdr_task);
1570 napi_del:
1571 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1572 cq_poll = nic->napi[qidx];
1573 if (!cq_poll)
1574 continue;
1575 napi_disable(&cq_poll->napi);
1576 netif_napi_del(&cq_poll->napi);
1577 }
1578 nicvf_free_cq_poll(nic);
1579 return err;
1580 }
1581
nicvf_change_mtu(struct net_device * netdev,int new_mtu)1582 static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1583 {
1584 struct nicvf *nic = netdev_priv(netdev);
1585 int orig_mtu = netdev->mtu;
1586
1587 /* For now just support only the usual MTU sized frames,
1588 * plus some headroom for VLAN, QinQ.
1589 */
1590 if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) {
1591 netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
1592 netdev->mtu);
1593 return -EINVAL;
1594 }
1595
1596 netdev->mtu = new_mtu;
1597
1598 if (!netif_running(netdev))
1599 return 0;
1600
1601 if (nicvf_update_hw_max_frs(nic, new_mtu)) {
1602 netdev->mtu = orig_mtu;
1603 return -EINVAL;
1604 }
1605
1606 return 0;
1607 }
1608
nicvf_set_mac_address(struct net_device * netdev,void * p)1609 static int nicvf_set_mac_address(struct net_device *netdev, void *p)
1610 {
1611 struct sockaddr *addr = p;
1612 struct nicvf *nic = netdev_priv(netdev);
1613
1614 if (!is_valid_ether_addr(addr->sa_data))
1615 return -EADDRNOTAVAIL;
1616
1617 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1618
1619 if (nic->pdev->msix_enabled) {
1620 if (nicvf_hw_set_mac_addr(nic, netdev))
1621 return -EBUSY;
1622 } else {
1623 nic->set_mac_pending = true;
1624 }
1625
1626 return 0;
1627 }
1628
nicvf_update_lmac_stats(struct nicvf * nic)1629 void nicvf_update_lmac_stats(struct nicvf *nic)
1630 {
1631 int stat = 0;
1632 union nic_mbx mbx = {};
1633
1634 if (!netif_running(nic->netdev))
1635 return;
1636
1637 mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
1638 mbx.bgx_stats.vf_id = nic->vf_id;
1639 /* Rx stats */
1640 mbx.bgx_stats.rx = 1;
1641 while (stat < BGX_RX_STATS_COUNT) {
1642 mbx.bgx_stats.idx = stat;
1643 if (nicvf_send_msg_to_pf(nic, &mbx))
1644 return;
1645 stat++;
1646 }
1647
1648 stat = 0;
1649
1650 /* Tx stats */
1651 mbx.bgx_stats.rx = 0;
1652 while (stat < BGX_TX_STATS_COUNT) {
1653 mbx.bgx_stats.idx = stat;
1654 if (nicvf_send_msg_to_pf(nic, &mbx))
1655 return;
1656 stat++;
1657 }
1658 }
1659
nicvf_update_stats(struct nicvf * nic)1660 void nicvf_update_stats(struct nicvf *nic)
1661 {
1662 int qidx, cpu;
1663 u64 tmp_stats = 0;
1664 struct nicvf_hw_stats *stats = &nic->hw_stats;
1665 struct nicvf_drv_stats *drv_stats;
1666 struct queue_set *qs = nic->qs;
1667
1668 #define GET_RX_STATS(reg) \
1669 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
1670 #define GET_TX_STATS(reg) \
1671 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
1672
1673 stats->rx_bytes = GET_RX_STATS(RX_OCTS);
1674 stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
1675 stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
1676 stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
1677 stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
1678 stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
1679 stats->rx_drop_red = GET_RX_STATS(RX_RED);
1680 stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
1681 stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
1682 stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
1683 stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
1684 stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
1685 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1686 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1687
1688 stats->tx_bytes = GET_TX_STATS(TX_OCTS);
1689 stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST);
1690 stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST);
1691 stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST);
1692 stats->tx_drops = GET_TX_STATS(TX_DROP);
1693
1694 /* On T88 pass 2.0, the dummy SQE added for TSO notification
1695 * via CQE has 'dont_send' set. Hence HW drops the pkt pointed
1696 * pointed by dummy SQE and results in tx_drops counter being
1697 * incremented. Subtracting it from tx_tso counter will give
1698 * exact tx_drops counter.
1699 */
1700 if (nic->t88 && nic->hw_tso) {
1701 for_each_possible_cpu(cpu) {
1702 drv_stats = per_cpu_ptr(nic->drv_stats, cpu);
1703 tmp_stats += drv_stats->tx_tso;
1704 }
1705 stats->tx_drops = tmp_stats - stats->tx_drops;
1706 }
1707 stats->tx_frames = stats->tx_ucast_frames +
1708 stats->tx_bcast_frames +
1709 stats->tx_mcast_frames;
1710 stats->rx_frames = stats->rx_ucast_frames +
1711 stats->rx_bcast_frames +
1712 stats->rx_mcast_frames;
1713 stats->rx_drops = stats->rx_drop_red +
1714 stats->rx_drop_overrun;
1715
1716 /* Update RQ and SQ stats */
1717 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1718 nicvf_update_rq_stats(nic, qidx);
1719 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1720 nicvf_update_sq_stats(nic, qidx);
1721 }
1722
nicvf_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)1723 static void nicvf_get_stats64(struct net_device *netdev,
1724 struct rtnl_link_stats64 *stats)
1725 {
1726 struct nicvf *nic = netdev_priv(netdev);
1727 struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
1728
1729 nicvf_update_stats(nic);
1730
1731 stats->rx_bytes = hw_stats->rx_bytes;
1732 stats->rx_packets = hw_stats->rx_frames;
1733 stats->rx_dropped = hw_stats->rx_drops;
1734 stats->multicast = hw_stats->rx_mcast_frames;
1735
1736 stats->tx_bytes = hw_stats->tx_bytes;
1737 stats->tx_packets = hw_stats->tx_frames;
1738 stats->tx_dropped = hw_stats->tx_drops;
1739
1740 }
1741
nicvf_tx_timeout(struct net_device * dev,unsigned int txqueue)1742 static void nicvf_tx_timeout(struct net_device *dev, unsigned int txqueue)
1743 {
1744 struct nicvf *nic = netdev_priv(dev);
1745
1746 netif_warn(nic, tx_err, dev, "Transmit timed out, resetting\n");
1747
1748 this_cpu_inc(nic->drv_stats->tx_timeout);
1749 schedule_work(&nic->reset_task);
1750 }
1751
nicvf_reset_task(struct work_struct * work)1752 static void nicvf_reset_task(struct work_struct *work)
1753 {
1754 struct nicvf *nic;
1755
1756 nic = container_of(work, struct nicvf, reset_task);
1757
1758 if (!netif_running(nic->netdev))
1759 return;
1760
1761 nicvf_stop(nic->netdev);
1762 nicvf_open(nic->netdev);
1763 netif_trans_update(nic->netdev);
1764 }
1765
nicvf_config_loopback(struct nicvf * nic,netdev_features_t features)1766 static int nicvf_config_loopback(struct nicvf *nic,
1767 netdev_features_t features)
1768 {
1769 union nic_mbx mbx = {};
1770
1771 mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK;
1772 mbx.lbk.vf_id = nic->vf_id;
1773 mbx.lbk.enable = (features & NETIF_F_LOOPBACK) != 0;
1774
1775 return nicvf_send_msg_to_pf(nic, &mbx);
1776 }
1777
nicvf_fix_features(struct net_device * netdev,netdev_features_t features)1778 static netdev_features_t nicvf_fix_features(struct net_device *netdev,
1779 netdev_features_t features)
1780 {
1781 struct nicvf *nic = netdev_priv(netdev);
1782
1783 if ((features & NETIF_F_LOOPBACK) &&
1784 netif_running(netdev) && !nic->loopback_supported)
1785 features &= ~NETIF_F_LOOPBACK;
1786
1787 return features;
1788 }
1789
nicvf_set_features(struct net_device * netdev,netdev_features_t features)1790 static int nicvf_set_features(struct net_device *netdev,
1791 netdev_features_t features)
1792 {
1793 struct nicvf *nic = netdev_priv(netdev);
1794 netdev_features_t changed = features ^ netdev->features;
1795
1796 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1797 nicvf_config_vlan_stripping(nic, features);
1798
1799 if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
1800 return nicvf_config_loopback(nic, features);
1801
1802 return 0;
1803 }
1804
nicvf_set_xdp_queues(struct nicvf * nic,bool bpf_attached)1805 static void nicvf_set_xdp_queues(struct nicvf *nic, bool bpf_attached)
1806 {
1807 u8 cq_count, txq_count;
1808
1809 /* Set XDP Tx queue count same as Rx queue count */
1810 if (!bpf_attached)
1811 nic->xdp_tx_queues = 0;
1812 else
1813 nic->xdp_tx_queues = nic->rx_queues;
1814
1815 /* If queue count > MAX_CMP_QUEUES_PER_QS, then additional qsets
1816 * needs to be allocated, check how many.
1817 */
1818 txq_count = nic->xdp_tx_queues + nic->tx_queues;
1819 cq_count = max(nic->rx_queues, txq_count);
1820 if (cq_count > MAX_CMP_QUEUES_PER_QS) {
1821 nic->sqs_count = roundup(cq_count, MAX_CMP_QUEUES_PER_QS);
1822 nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1;
1823 } else {
1824 nic->sqs_count = 0;
1825 }
1826
1827 /* Set primary Qset's resources */
1828 nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS);
1829 nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS);
1830 nic->qs->cq_cnt = max_t(u8, nic->qs->rq_cnt, nic->qs->sq_cnt);
1831
1832 /* Update stack */
1833 nicvf_set_real_num_queues(nic->netdev, nic->tx_queues, nic->rx_queues);
1834 }
1835
nicvf_xdp_setup(struct nicvf * nic,struct bpf_prog * prog)1836 static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
1837 {
1838 struct net_device *dev = nic->netdev;
1839 bool if_up = netif_running(nic->netdev);
1840 struct bpf_prog *old_prog;
1841 bool bpf_attached = false;
1842 int ret = 0;
1843
1844 /* For now just support only the usual MTU sized frames,
1845 * plus some headroom for VLAN, QinQ.
1846 */
1847 if (prog && dev->mtu > MAX_XDP_MTU) {
1848 netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
1849 dev->mtu);
1850 return -EOPNOTSUPP;
1851 }
1852
1853 /* ALL SQs attached to CQs i.e same as RQs, are treated as
1854 * XDP Tx queues and more Tx queues are allocated for
1855 * network stack to send pkts out.
1856 *
1857 * No of Tx queues are either same as Rx queues or whatever
1858 * is left in max no of queues possible.
1859 */
1860 if ((nic->rx_queues + nic->tx_queues) > nic->max_queues) {
1861 netdev_warn(dev,
1862 "Failed to attach BPF prog, RXQs + TXQs > Max %d\n",
1863 nic->max_queues);
1864 return -ENOMEM;
1865 }
1866
1867 if (if_up)
1868 nicvf_stop(nic->netdev);
1869
1870 old_prog = xchg(&nic->xdp_prog, prog);
1871 /* Detach old prog, if any */
1872 if (old_prog)
1873 bpf_prog_put(old_prog);
1874
1875 if (nic->xdp_prog) {
1876 /* Attach BPF program */
1877 bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
1878 bpf_attached = true;
1879 }
1880
1881 /* Calculate Tx queues needed for XDP and network stack */
1882 nicvf_set_xdp_queues(nic, bpf_attached);
1883
1884 if (if_up) {
1885 /* Reinitialize interface, clean slate */
1886 nicvf_open(nic->netdev);
1887 netif_trans_update(nic->netdev);
1888 }
1889
1890 return ret;
1891 }
1892
nicvf_xdp(struct net_device * netdev,struct netdev_bpf * xdp)1893 static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
1894 {
1895 struct nicvf *nic = netdev_priv(netdev);
1896
1897 /* To avoid checks while retrieving buffer address from CQE_RX,
1898 * do not support XDP for T88 pass1.x silicons which are anyway
1899 * not in use widely.
1900 */
1901 if (pass1_silicon(nic->pdev))
1902 return -EOPNOTSUPP;
1903
1904 switch (xdp->command) {
1905 case XDP_SETUP_PROG:
1906 return nicvf_xdp_setup(nic, xdp->prog);
1907 default:
1908 return -EINVAL;
1909 }
1910 }
1911
nicvf_config_hwtstamp(struct net_device * netdev,struct ifreq * ifr)1912 static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
1913 {
1914 struct hwtstamp_config config;
1915 struct nicvf *nic = netdev_priv(netdev);
1916
1917 if (!nic->ptp_clock)
1918 return -ENODEV;
1919
1920 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1921 return -EFAULT;
1922
1923 /* reserved for future extensions */
1924 if (config.flags)
1925 return -EINVAL;
1926
1927 switch (config.tx_type) {
1928 case HWTSTAMP_TX_OFF:
1929 case HWTSTAMP_TX_ON:
1930 break;
1931 default:
1932 return -ERANGE;
1933 }
1934
1935 switch (config.rx_filter) {
1936 case HWTSTAMP_FILTER_NONE:
1937 nic->hw_rx_tstamp = false;
1938 break;
1939 case HWTSTAMP_FILTER_ALL:
1940 case HWTSTAMP_FILTER_SOME:
1941 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1942 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1943 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1944 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1945 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1946 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1947 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1948 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1949 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1950 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1951 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1952 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1953 nic->hw_rx_tstamp = true;
1954 config.rx_filter = HWTSTAMP_FILTER_ALL;
1955 break;
1956 default:
1957 return -ERANGE;
1958 }
1959
1960 if (netif_running(netdev))
1961 nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
1962
1963 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1964 return -EFAULT;
1965
1966 return 0;
1967 }
1968
nicvf_ioctl(struct net_device * netdev,struct ifreq * req,int cmd)1969 static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1970 {
1971 switch (cmd) {
1972 case SIOCSHWTSTAMP:
1973 return nicvf_config_hwtstamp(netdev, req);
1974 default:
1975 return -EOPNOTSUPP;
1976 }
1977 }
1978
__nicvf_set_rx_mode_task(u8 mode,struct xcast_addr_list * mc_addrs,struct nicvf * nic)1979 static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
1980 struct nicvf *nic)
1981 {
1982 union nic_mbx mbx = {};
1983 int idx;
1984
1985 /* From the inside of VM code flow we have only 128 bits memory
1986 * available to send message to host's PF, so send all mc addrs
1987 * one by one, starting from flush command in case if kernel
1988 * requests to configure specific MAC filtering
1989 */
1990
1991 /* flush DMAC filters and reset RX mode */
1992 mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST;
1993 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
1994 goto free_mc;
1995
1996 if (mode & BGX_XCAST_MCAST_FILTER) {
1997 /* once enabling filtering, we need to signal to PF to add
1998 * its' own LMAC to the filter to accept packets for it.
1999 */
2000 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
2001 mbx.xcast.mac = 0;
2002 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
2003 goto free_mc;
2004 }
2005
2006 /* check if we have any specific MACs to be added to PF DMAC filter */
2007 if (mc_addrs) {
2008 /* now go through kernel list of MACs and add them one by one */
2009 for (idx = 0; idx < mc_addrs->count; idx++) {
2010 mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
2011 mbx.xcast.mac = mc_addrs->mc[idx];
2012 if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
2013 goto free_mc;
2014 }
2015 }
2016
2017 /* and finally set rx mode for PF accordingly */
2018 mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
2019 mbx.xcast.mode = mode;
2020
2021 nicvf_send_msg_to_pf(nic, &mbx);
2022 free_mc:
2023 kfree(mc_addrs);
2024 }
2025
nicvf_set_rx_mode_task(struct work_struct * work_arg)2026 static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
2027 {
2028 struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
2029 work);
2030 struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
2031 u8 mode;
2032 struct xcast_addr_list *mc;
2033
2034 if (!vf_work)
2035 return;
2036
2037 /* Save message data locally to prevent them from
2038 * being overwritten by next ndo_set_rx_mode call().
2039 */
2040 spin_lock_bh(&nic->rx_mode_wq_lock);
2041 mode = vf_work->mode;
2042 mc = vf_work->mc;
2043 vf_work->mc = NULL;
2044 spin_unlock_bh(&nic->rx_mode_wq_lock);
2045
2046 __nicvf_set_rx_mode_task(mode, mc, nic);
2047 }
2048
nicvf_set_rx_mode(struct net_device * netdev)2049 static void nicvf_set_rx_mode(struct net_device *netdev)
2050 {
2051 struct nicvf *nic = netdev_priv(netdev);
2052 struct netdev_hw_addr *ha;
2053 struct xcast_addr_list *mc_list = NULL;
2054 u8 mode = 0;
2055
2056 if (netdev->flags & IFF_PROMISC) {
2057 mode = BGX_XCAST_BCAST_ACCEPT | BGX_XCAST_MCAST_ACCEPT;
2058 } else {
2059 if (netdev->flags & IFF_BROADCAST)
2060 mode |= BGX_XCAST_BCAST_ACCEPT;
2061
2062 if (netdev->flags & IFF_ALLMULTI) {
2063 mode |= BGX_XCAST_MCAST_ACCEPT;
2064 } else if (netdev->flags & IFF_MULTICAST) {
2065 mode |= BGX_XCAST_MCAST_FILTER;
2066 /* here we need to copy mc addrs */
2067 if (netdev_mc_count(netdev)) {
2068 mc_list = kmalloc(struct_size(mc_list, mc,
2069 netdev_mc_count(netdev)),
2070 GFP_ATOMIC);
2071 if (unlikely(!mc_list))
2072 return;
2073 mc_list->count = 0;
2074 netdev_hw_addr_list_for_each(ha, &netdev->mc) {
2075 mc_list->mc[mc_list->count] =
2076 ether_addr_to_u64(ha->addr);
2077 mc_list->count++;
2078 }
2079 }
2080 }
2081 }
2082 spin_lock(&nic->rx_mode_wq_lock);
2083 kfree(nic->rx_mode_work.mc);
2084 nic->rx_mode_work.mc = mc_list;
2085 nic->rx_mode_work.mode = mode;
2086 queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work);
2087 spin_unlock(&nic->rx_mode_wq_lock);
2088 }
2089
2090 static const struct net_device_ops nicvf_netdev_ops = {
2091 .ndo_open = nicvf_open,
2092 .ndo_stop = nicvf_stop,
2093 .ndo_start_xmit = nicvf_xmit,
2094 .ndo_change_mtu = nicvf_change_mtu,
2095 .ndo_set_mac_address = nicvf_set_mac_address,
2096 .ndo_get_stats64 = nicvf_get_stats64,
2097 .ndo_tx_timeout = nicvf_tx_timeout,
2098 .ndo_fix_features = nicvf_fix_features,
2099 .ndo_set_features = nicvf_set_features,
2100 .ndo_bpf = nicvf_xdp,
2101 .ndo_do_ioctl = nicvf_ioctl,
2102 .ndo_set_rx_mode = nicvf_set_rx_mode,
2103 };
2104
nicvf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2105 static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2106 {
2107 struct device *dev = &pdev->dev;
2108 struct net_device *netdev;
2109 struct nicvf *nic;
2110 int err, qcount;
2111 u16 sdevid;
2112 struct cavium_ptp *ptp_clock;
2113
2114 ptp_clock = cavium_ptp_get();
2115 if (IS_ERR(ptp_clock)) {
2116 if (PTR_ERR(ptp_clock) == -ENODEV)
2117 /* In virtualized environment we proceed without ptp */
2118 ptp_clock = NULL;
2119 else
2120 return PTR_ERR(ptp_clock);
2121 }
2122
2123 err = pci_enable_device(pdev);
2124 if (err) {
2125 dev_err(dev, "Failed to enable PCI device\n");
2126 return err;
2127 }
2128
2129 err = pci_request_regions(pdev, DRV_NAME);
2130 if (err) {
2131 dev_err(dev, "PCI request regions failed 0x%x\n", err);
2132 goto err_disable_device;
2133 }
2134
2135 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
2136 if (err) {
2137 dev_err(dev, "Unable to get usable DMA configuration\n");
2138 goto err_release_regions;
2139 }
2140
2141 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
2142 if (err) {
2143 dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n");
2144 goto err_release_regions;
2145 }
2146
2147 qcount = netif_get_num_default_rss_queues();
2148
2149 /* Restrict multiqset support only for host bound VFs */
2150 if (pdev->is_virtfn) {
2151 /* Set max number of queues per VF */
2152 qcount = min_t(int, num_online_cpus(),
2153 (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
2154 }
2155
2156 netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount);
2157 if (!netdev) {
2158 err = -ENOMEM;
2159 goto err_release_regions;
2160 }
2161
2162 pci_set_drvdata(pdev, netdev);
2163
2164 SET_NETDEV_DEV(netdev, &pdev->dev);
2165
2166 nic = netdev_priv(netdev);
2167 nic->netdev = netdev;
2168 nic->pdev = pdev;
2169 nic->pnicvf = nic;
2170 nic->max_queues = qcount;
2171 /* If no of CPUs are too low, there won't be any queues left
2172 * for XDP_TX, hence double it.
2173 */
2174 if (!nic->t88)
2175 nic->max_queues *= 2;
2176 nic->ptp_clock = ptp_clock;
2177
2178 /* Initialize mutex that serializes usage of VF's mailbox */
2179 mutex_init(&nic->rx_mode_mtx);
2180
2181 /* MAP VF's configuration registers */
2182 nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
2183 if (!nic->reg_base) {
2184 dev_err(dev, "Cannot map config register space, aborting\n");
2185 err = -ENOMEM;
2186 goto err_free_netdev;
2187 }
2188
2189 nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats);
2190 if (!nic->drv_stats) {
2191 err = -ENOMEM;
2192 goto err_free_netdev;
2193 }
2194
2195 err = nicvf_set_qset_resources(nic);
2196 if (err)
2197 goto err_free_netdev;
2198
2199 /* Check if PF is alive and get MAC address for this VF */
2200 err = nicvf_register_misc_interrupt(nic);
2201 if (err)
2202 goto err_free_netdev;
2203
2204 nicvf_send_vf_struct(nic);
2205
2206 if (!pass1_silicon(nic->pdev))
2207 nic->hw_tso = true;
2208
2209 /* Get iommu domain for iova to physical addr conversion */
2210 nic->iommu_domain = iommu_get_domain_for_dev(dev);
2211
2212 pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
2213 if (sdevid == 0xA134)
2214 nic->t88 = true;
2215
2216 /* Check if this VF is in QS only mode */
2217 if (nic->sqs_mode)
2218 return 0;
2219
2220 err = nicvf_set_real_num_queues(netdev, nic->tx_queues, nic->rx_queues);
2221 if (err)
2222 goto err_unregister_interrupts;
2223
2224 netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_SG |
2225 NETIF_F_TSO | NETIF_F_GRO | NETIF_F_TSO6 |
2226 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2227 NETIF_F_HW_VLAN_CTAG_RX);
2228
2229 netdev->hw_features |= NETIF_F_RXHASH;
2230
2231 netdev->features |= netdev->hw_features;
2232 netdev->hw_features |= NETIF_F_LOOPBACK;
2233
2234 netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM |
2235 NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
2236
2237 netdev->netdev_ops = &nicvf_netdev_ops;
2238 netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
2239
2240 /* MTU range: 64 - 9200 */
2241 netdev->min_mtu = NIC_HW_MIN_FRS;
2242 netdev->max_mtu = NIC_HW_MAX_FRS;
2243
2244 INIT_WORK(&nic->reset_task, nicvf_reset_task);
2245
2246 nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d",
2247 WQ_MEM_RECLAIM,
2248 nic->vf_id);
2249 if (!nic->nicvf_rx_mode_wq) {
2250 err = -ENOMEM;
2251 dev_err(dev, "Failed to allocate work queue\n");
2252 goto err_unregister_interrupts;
2253 }
2254
2255 INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
2256 spin_lock_init(&nic->rx_mode_wq_lock);
2257
2258 err = register_netdev(netdev);
2259 if (err) {
2260 dev_err(dev, "Failed to register netdevice\n");
2261 goto err_destroy_workqueue;
2262 }
2263
2264 nic->msg_enable = debug;
2265
2266 nicvf_set_ethtool_ops(netdev);
2267
2268 return 0;
2269
2270 err_destroy_workqueue:
2271 destroy_workqueue(nic->nicvf_rx_mode_wq);
2272 err_unregister_interrupts:
2273 nicvf_unregister_interrupts(nic);
2274 err_free_netdev:
2275 pci_set_drvdata(pdev, NULL);
2276 if (nic->drv_stats)
2277 free_percpu(nic->drv_stats);
2278 free_netdev(netdev);
2279 err_release_regions:
2280 pci_release_regions(pdev);
2281 err_disable_device:
2282 pci_disable_device(pdev);
2283 return err;
2284 }
2285
nicvf_remove(struct pci_dev * pdev)2286 static void nicvf_remove(struct pci_dev *pdev)
2287 {
2288 struct net_device *netdev = pci_get_drvdata(pdev);
2289 struct nicvf *nic;
2290 struct net_device *pnetdev;
2291
2292 if (!netdev)
2293 return;
2294
2295 nic = netdev_priv(netdev);
2296 pnetdev = nic->pnicvf->netdev;
2297
2298 /* Check if this Qset is assigned to different VF.
2299 * If yes, clean primary and all secondary Qsets.
2300 */
2301 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
2302 unregister_netdev(pnetdev);
2303 if (nic->nicvf_rx_mode_wq) {
2304 destroy_workqueue(nic->nicvf_rx_mode_wq);
2305 nic->nicvf_rx_mode_wq = NULL;
2306 }
2307 nicvf_unregister_interrupts(nic);
2308 pci_set_drvdata(pdev, NULL);
2309 if (nic->drv_stats)
2310 free_percpu(nic->drv_stats);
2311 cavium_ptp_put(nic->ptp_clock);
2312 free_netdev(netdev);
2313 pci_release_regions(pdev);
2314 pci_disable_device(pdev);
2315 }
2316
nicvf_shutdown(struct pci_dev * pdev)2317 static void nicvf_shutdown(struct pci_dev *pdev)
2318 {
2319 nicvf_remove(pdev);
2320 }
2321
2322 static struct pci_driver nicvf_driver = {
2323 .name = DRV_NAME,
2324 .id_table = nicvf_id_table,
2325 .probe = nicvf_probe,
2326 .remove = nicvf_remove,
2327 .shutdown = nicvf_shutdown,
2328 };
2329
nicvf_init_module(void)2330 static int __init nicvf_init_module(void)
2331 {
2332 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
2333 return pci_register_driver(&nicvf_driver);
2334 }
2335
nicvf_cleanup_module(void)2336 static void __exit nicvf_cleanup_module(void)
2337 {
2338 pci_unregister_driver(&nicvf_driver);
2339 }
2340
2341 module_init(nicvf_init_module);
2342 module_exit(nicvf_cleanup_module);
2343