1 /**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2016 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/firmware.h>
22 #include <net/vxlan.h>
23 #include <linux/kthread.h>
24 #include "liquidio_common.h"
25 #include "octeon_droq.h"
26 #include "octeon_iq.h"
27 #include "response_manager.h"
28 #include "octeon_device.h"
29 #include "octeon_nic.h"
30 #include "octeon_main.h"
31 #include "octeon_network.h"
32 #include "cn66xx_regs.h"
33 #include "cn66xx_device.h"
34 #include "cn68xx_device.h"
35 #include "cn23xx_pf_device.h"
36 #include "liquidio_image.h"
37 #include "lio_vf_rep.h"
38
39 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
40 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
41 MODULE_LICENSE("GPL");
42 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
43 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
45 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
47 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
49 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
50
51 static int ddr_timeout = 10000;
52 module_param(ddr_timeout, int, 0644);
53 MODULE_PARM_DESC(ddr_timeout,
54 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
55
56 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
57
58 static int debug = -1;
59 module_param(debug, int, 0644);
60 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
61
62 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
63 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
64 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
65
66 static u32 console_bitmask;
67 module_param(console_bitmask, int, 0644);
68 MODULE_PARM_DESC(console_bitmask,
69 "Bitmask indicating which consoles have debug output redirected to syslog.");
70
71 /**
72 * octeon_console_debug_enabled - determines if a given console has debug enabled.
73 * @console: console to check
74 * Return: 1 = enabled. 0 otherwise
75 */
octeon_console_debug_enabled(u32 console)76 static int octeon_console_debug_enabled(u32 console)
77 {
78 return (console_bitmask >> (console)) & 0x1;
79 }
80
81 /* Polling interval for determining when NIC application is alive */
82 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
83
84 /* runtime link query interval */
85 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
86 /* update localtime to octeon firmware every 60 seconds.
87 * make firmware to use same time reference, so that it will be easy to
88 * correlate firmware logged events/errors with host events, for debugging.
89 */
90 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
91
92 /* time to wait for possible in-flight requests in milliseconds */
93 #define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
94
95 struct lio_trusted_vf_ctx {
96 struct completion complete;
97 int status;
98 };
99
100 struct oct_link_status_resp {
101 u64 rh;
102 struct oct_link_info link_info;
103 u64 status;
104 };
105
106 struct oct_timestamp_resp {
107 u64 rh;
108 u64 timestamp;
109 u64 status;
110 };
111
112 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
113
114 union tx_info {
115 u64 u64;
116 struct {
117 #ifdef __BIG_ENDIAN_BITFIELD
118 u16 gso_size;
119 u16 gso_segs;
120 u32 reserved;
121 #else
122 u32 reserved;
123 u16 gso_segs;
124 u16 gso_size;
125 #endif
126 } s;
127 };
128
129 /* Octeon device properties to be used by the NIC module.
130 * Each octeon device in the system will be represented
131 * by this structure in the NIC module.
132 */
133
134 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
135 #define OCTNIC_GSO_MAX_SIZE \
136 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
137
138 struct handshake {
139 struct completion init;
140 struct completion started;
141 struct pci_dev *pci_dev;
142 int init_ok;
143 int started_ok;
144 };
145
146 #ifdef CONFIG_PCI_IOV
147 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
148 #endif
149
150 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
151 char *prefix, char *suffix);
152
153 static int octeon_device_init(struct octeon_device *);
154 static int liquidio_stop(struct net_device *netdev);
155 static void liquidio_remove(struct pci_dev *pdev);
156 static int liquidio_probe(struct pci_dev *pdev,
157 const struct pci_device_id *ent);
158 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
159 int linkstate);
160
161 static struct handshake handshake[MAX_OCTEON_DEVICES];
162 static struct completion first_stage;
163
octeon_droq_bh(struct tasklet_struct * t)164 static void octeon_droq_bh(struct tasklet_struct *t)
165 {
166 int q_no;
167 int reschedule = 0;
168 struct octeon_device_priv *oct_priv = from_tasklet(oct_priv, t,
169 droq_tasklet);
170 struct octeon_device *oct = oct_priv->dev;
171
172 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
173 if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
174 continue;
175 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
176 MAX_PACKET_BUDGET);
177 lio_enable_irq(oct->droq[q_no], NULL);
178
179 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
180 /* set time and cnt interrupt thresholds for this DROQ
181 * for NAPI
182 */
183 int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
184
185 octeon_write_csr64(
186 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
187 0x5700000040ULL);
188 octeon_write_csr64(
189 oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
190 }
191 }
192
193 if (reschedule)
194 tasklet_schedule(&oct_priv->droq_tasklet);
195 }
196
lio_wait_for_oq_pkts(struct octeon_device * oct)197 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
198 {
199 struct octeon_device_priv *oct_priv =
200 (struct octeon_device_priv *)oct->priv;
201 int retry = 100, pkt_cnt = 0, pending_pkts = 0;
202 int i;
203
204 do {
205 pending_pkts = 0;
206
207 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
208 if (!(oct->io_qmask.oq & BIT_ULL(i)))
209 continue;
210 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
211 }
212 if (pkt_cnt > 0) {
213 pending_pkts += pkt_cnt;
214 tasklet_schedule(&oct_priv->droq_tasklet);
215 }
216 pkt_cnt = 0;
217 schedule_timeout_uninterruptible(1);
218
219 } while (retry-- && pending_pkts);
220
221 return pkt_cnt;
222 }
223
224 /**
225 * force_io_queues_off - Forces all IO queues off on a given device
226 * @oct: Pointer to Octeon device
227 */
force_io_queues_off(struct octeon_device * oct)228 static void force_io_queues_off(struct octeon_device *oct)
229 {
230 if ((oct->chip_id == OCTEON_CN66XX) ||
231 (oct->chip_id == OCTEON_CN68XX)) {
232 /* Reset the Enable bits for Input Queues. */
233 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
234
235 /* Reset the Enable bits for Output Queues. */
236 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
237 }
238 }
239
240 /**
241 * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc
242 * @oct: Pointer to Octeon device
243 */
pcierror_quiesce_device(struct octeon_device * oct)244 static inline void pcierror_quiesce_device(struct octeon_device *oct)
245 {
246 int i;
247
248 /* Disable the input and output queues now. No more packets will
249 * arrive from Octeon, but we should wait for all packet processing
250 * to finish.
251 */
252 force_io_queues_off(oct);
253
254 /* To allow for in-flight requests */
255 schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
256
257 if (wait_for_pending_requests(oct))
258 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
259
260 /* Force all requests waiting to be fetched by OCTEON to complete. */
261 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
262 struct octeon_instr_queue *iq;
263
264 if (!(oct->io_qmask.iq & BIT_ULL(i)))
265 continue;
266 iq = oct->instr_queue[i];
267
268 if (atomic_read(&iq->instr_pending)) {
269 spin_lock_bh(&iq->lock);
270 iq->fill_cnt = 0;
271 iq->octeon_read_index = iq->host_write_index;
272 iq->stats.instr_processed +=
273 atomic_read(&iq->instr_pending);
274 lio_process_iq_request_list(oct, iq, 0);
275 spin_unlock_bh(&iq->lock);
276 }
277 }
278
279 /* Force all pending ordered list requests to time out. */
280 lio_process_ordered_list(oct, 1);
281
282 /* We do not need to wait for output queue packets to be processed. */
283 }
284
285 /**
286 * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status
287 * @dev: Pointer to PCI device
288 */
cleanup_aer_uncorrect_error_status(struct pci_dev * dev)289 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
290 {
291 int pos = 0x100;
292 u32 status, mask;
293
294 pr_info("%s :\n", __func__);
295
296 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
297 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
298 if (dev->error_state == pci_channel_io_normal)
299 status &= ~mask; /* Clear corresponding nonfatal bits */
300 else
301 status &= mask; /* Clear corresponding fatal bits */
302 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
303 }
304
305 /**
306 * stop_pci_io - Stop all PCI IO to a given device
307 * @oct: Pointer to Octeon device
308 */
stop_pci_io(struct octeon_device * oct)309 static void stop_pci_io(struct octeon_device *oct)
310 {
311 /* No more instructions will be forwarded. */
312 atomic_set(&oct->status, OCT_DEV_IN_RESET);
313
314 pci_disable_device(oct->pci_dev);
315
316 /* Disable interrupts */
317 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
318
319 pcierror_quiesce_device(oct);
320
321 /* Release the interrupt line */
322 free_irq(oct->pci_dev->irq, oct);
323
324 if (oct->flags & LIO_FLAG_MSI_ENABLED)
325 pci_disable_msi(oct->pci_dev);
326
327 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
328 lio_get_state_string(&oct->status));
329
330 /* making it a common function for all OCTEON models */
331 cleanup_aer_uncorrect_error_status(oct->pci_dev);
332 }
333
334 /**
335 * liquidio_pcie_error_detected - called when PCI error is detected
336 * @pdev: Pointer to PCI device
337 * @state: The current pci connection state
338 *
339 * This function is called after a PCI bus error affecting
340 * this device has been detected.
341 */
liquidio_pcie_error_detected(struct pci_dev * pdev,pci_channel_state_t state)342 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
343 pci_channel_state_t state)
344 {
345 struct octeon_device *oct = pci_get_drvdata(pdev);
346
347 /* Non-correctable Non-fatal errors */
348 if (state == pci_channel_io_normal) {
349 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
350 cleanup_aer_uncorrect_error_status(oct->pci_dev);
351 return PCI_ERS_RESULT_CAN_RECOVER;
352 }
353
354 /* Non-correctable Fatal errors */
355 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
356 stop_pci_io(oct);
357
358 /* Always return a DISCONNECT. There is no support for recovery but only
359 * for a clean shutdown.
360 */
361 return PCI_ERS_RESULT_DISCONNECT;
362 }
363
364 /**
365 * liquidio_pcie_mmio_enabled - mmio handler
366 * @pdev: Pointer to PCI device
367 */
liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused * pdev)368 static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused *pdev)
369 {
370 /* We should never hit this since we never ask for a reset for a Fatal
371 * Error. We always return DISCONNECT in io_error above.
372 * But play safe and return RECOVERED for now.
373 */
374 return PCI_ERS_RESULT_RECOVERED;
375 }
376
377 /**
378 * liquidio_pcie_slot_reset - called after the pci bus has been reset.
379 * @pdev: Pointer to PCI device
380 *
381 * Restart the card from scratch, as if from a cold-boot. Implementation
382 * resembles the first-half of the octeon_resume routine.
383 */
liquidio_pcie_slot_reset(struct pci_dev __maybe_unused * pdev)384 static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev __maybe_unused *pdev)
385 {
386 /* We should never hit this since we never ask for a reset for a Fatal
387 * Error. We always return DISCONNECT in io_error above.
388 * But play safe and return RECOVERED for now.
389 */
390 return PCI_ERS_RESULT_RECOVERED;
391 }
392
393 /**
394 * liquidio_pcie_resume - called when traffic can start flowing again.
395 * @pdev: Pointer to PCI device
396 *
397 * This callback is called when the error recovery driver tells us that
398 * its OK to resume normal operation. Implementation resembles the
399 * second-half of the octeon_resume routine.
400 */
liquidio_pcie_resume(struct pci_dev __maybe_unused * pdev)401 static void liquidio_pcie_resume(struct pci_dev __maybe_unused *pdev)
402 {
403 /* Nothing to be done here. */
404 }
405
406 #define liquidio_suspend NULL
407 #define liquidio_resume NULL
408
409 /* For PCI-E Advanced Error Recovery (AER) Interface */
410 static const struct pci_error_handlers liquidio_err_handler = {
411 .error_detected = liquidio_pcie_error_detected,
412 .mmio_enabled = liquidio_pcie_mmio_enabled,
413 .slot_reset = liquidio_pcie_slot_reset,
414 .resume = liquidio_pcie_resume,
415 };
416
417 static const struct pci_device_id liquidio_pci_tbl[] = {
418 { /* 68xx */
419 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
420 },
421 { /* 66xx */
422 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
423 },
424 { /* 23xx pf */
425 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
426 },
427 {
428 0, 0, 0, 0, 0, 0, 0
429 }
430 };
431 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
432
433 static SIMPLE_DEV_PM_OPS(liquidio_pm_ops, liquidio_suspend, liquidio_resume);
434
435 static struct pci_driver liquidio_pci_driver = {
436 .name = "LiquidIO",
437 .id_table = liquidio_pci_tbl,
438 .probe = liquidio_probe,
439 .remove = liquidio_remove,
440 .err_handler = &liquidio_err_handler, /* For AER */
441 .driver.pm = &liquidio_pm_ops,
442 #ifdef CONFIG_PCI_IOV
443 .sriov_configure = liquidio_enable_sriov,
444 #endif
445 };
446
447 /**
448 * liquidio_init_pci - register PCI driver
449 */
liquidio_init_pci(void)450 static int liquidio_init_pci(void)
451 {
452 return pci_register_driver(&liquidio_pci_driver);
453 }
454
455 /**
456 * liquidio_deinit_pci - unregister PCI driver
457 */
liquidio_deinit_pci(void)458 static void liquidio_deinit_pci(void)
459 {
460 pci_unregister_driver(&liquidio_pci_driver);
461 }
462
463 /**
464 * check_txq_status - Check Tx queue status, and take appropriate action
465 * @lio: per-network private data
466 * Return: 0 if full, number of queues woken up otherwise
467 */
check_txq_status(struct lio * lio)468 static inline int check_txq_status(struct lio *lio)
469 {
470 int numqs = lio->netdev->real_num_tx_queues;
471 int ret_val = 0;
472 int q, iq;
473
474 /* check each sub-queue state */
475 for (q = 0; q < numqs; q++) {
476 iq = lio->linfo.txpciq[q %
477 lio->oct_dev->num_iqs].s.q_no;
478 if (octnet_iq_is_full(lio->oct_dev, iq))
479 continue;
480 if (__netif_subqueue_stopped(lio->netdev, q)) {
481 netif_wake_subqueue(lio->netdev, q);
482 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
483 tx_restart, 1);
484 ret_val++;
485 }
486 }
487
488 return ret_val;
489 }
490
491 /**
492 * print_link_info - Print link information
493 * @netdev: network device
494 */
print_link_info(struct net_device * netdev)495 static void print_link_info(struct net_device *netdev)
496 {
497 struct lio *lio = GET_LIO(netdev);
498
499 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
500 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
501 struct oct_link_info *linfo = &lio->linfo;
502
503 if (linfo->link.s.link_up) {
504 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
505 linfo->link.s.speed,
506 (linfo->link.s.duplex) ? "Full" : "Half");
507 } else {
508 netif_info(lio, link, lio->netdev, "Link Down\n");
509 }
510 }
511 }
512
513 /**
514 * octnet_link_status_change - Routine to notify MTU change
515 * @work: work_struct data structure
516 */
octnet_link_status_change(struct work_struct * work)517 static void octnet_link_status_change(struct work_struct *work)
518 {
519 struct cavium_wk *wk = (struct cavium_wk *)work;
520 struct lio *lio = (struct lio *)wk->ctxptr;
521
522 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
523 * this API is invoked only when new max-MTU of the interface is
524 * less than current MTU.
525 */
526 rtnl_lock();
527 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
528 rtnl_unlock();
529 }
530
531 /**
532 * setup_link_status_change_wq - Sets up the mtu status change work
533 * @netdev: network device
534 */
setup_link_status_change_wq(struct net_device * netdev)535 static inline int setup_link_status_change_wq(struct net_device *netdev)
536 {
537 struct lio *lio = GET_LIO(netdev);
538 struct octeon_device *oct = lio->oct_dev;
539
540 lio->link_status_wq.wq = alloc_workqueue("link-status",
541 WQ_MEM_RECLAIM, 0);
542 if (!lio->link_status_wq.wq) {
543 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
544 return -1;
545 }
546 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
547 octnet_link_status_change);
548 lio->link_status_wq.wk.ctxptr = lio;
549
550 return 0;
551 }
552
cleanup_link_status_change_wq(struct net_device * netdev)553 static inline void cleanup_link_status_change_wq(struct net_device *netdev)
554 {
555 struct lio *lio = GET_LIO(netdev);
556
557 if (lio->link_status_wq.wq) {
558 cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
559 destroy_workqueue(lio->link_status_wq.wq);
560 }
561 }
562
563 /**
564 * update_link_status - Update link status
565 * @netdev: network device
566 * @ls: link status structure
567 *
568 * Called on receipt of a link status response from the core application to
569 * update each interface's link status.
570 */
update_link_status(struct net_device * netdev,union oct_link_status * ls)571 static inline void update_link_status(struct net_device *netdev,
572 union oct_link_status *ls)
573 {
574 struct lio *lio = GET_LIO(netdev);
575 int changed = (lio->linfo.link.u64 != ls->u64);
576 int current_max_mtu = lio->linfo.link.s.mtu;
577 struct octeon_device *oct = lio->oct_dev;
578
579 dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n",
580 __func__, lio->linfo.link.u64, ls->u64);
581 lio->linfo.link.u64 = ls->u64;
582
583 if ((lio->intf_open) && (changed)) {
584 print_link_info(netdev);
585 lio->link_changes++;
586
587 if (lio->linfo.link.s.link_up) {
588 dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
589 netif_carrier_on(netdev);
590 wake_txqs(netdev);
591 } else {
592 dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
593 netif_carrier_off(netdev);
594 stop_txqs(netdev);
595 }
596 if (lio->linfo.link.s.mtu != current_max_mtu) {
597 netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
598 current_max_mtu, lio->linfo.link.s.mtu);
599 netdev->max_mtu = lio->linfo.link.s.mtu;
600 }
601 if (lio->linfo.link.s.mtu < netdev->mtu) {
602 dev_warn(&oct->pci_dev->dev,
603 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
604 netdev->mtu, lio->linfo.link.s.mtu);
605 queue_delayed_work(lio->link_status_wq.wq,
606 &lio->link_status_wq.wk.work, 0);
607 }
608 }
609 }
610
611 /**
612 * lio_sync_octeon_time - send latest localtime to octeon firmware so that
613 * firmware will correct it's time, in case there is a time skew
614 *
615 * @work: work scheduled to send time update to octeon firmware
616 **/
lio_sync_octeon_time(struct work_struct * work)617 static void lio_sync_octeon_time(struct work_struct *work)
618 {
619 struct cavium_wk *wk = (struct cavium_wk *)work;
620 struct lio *lio = (struct lio *)wk->ctxptr;
621 struct octeon_device *oct = lio->oct_dev;
622 struct octeon_soft_command *sc;
623 struct timespec64 ts;
624 struct lio_time *lt;
625 int ret;
626
627 sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0);
628 if (!sc) {
629 dev_err(&oct->pci_dev->dev,
630 "Failed to sync time to octeon: soft command allocation failed\n");
631 return;
632 }
633
634 lt = (struct lio_time *)sc->virtdptr;
635
636 /* Get time of the day */
637 ktime_get_real_ts64(&ts);
638 lt->sec = ts.tv_sec;
639 lt->nsec = ts.tv_nsec;
640 octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
641
642 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
643 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
644 OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
645
646 init_completion(&sc->complete);
647 sc->sc_status = OCTEON_REQUEST_PENDING;
648
649 ret = octeon_send_soft_command(oct, sc);
650 if (ret == IQ_SEND_FAILED) {
651 dev_err(&oct->pci_dev->dev,
652 "Failed to sync time to octeon: failed to send soft command\n");
653 octeon_free_soft_command(oct, sc);
654 } else {
655 WRITE_ONCE(sc->caller_is_done, true);
656 }
657
658 queue_delayed_work(lio->sync_octeon_time_wq.wq,
659 &lio->sync_octeon_time_wq.wk.work,
660 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
661 }
662
663 /**
664 * setup_sync_octeon_time_wq - prepare work to periodically update local time to octeon firmware
665 *
666 * @netdev: network device which should send time update to firmware
667 **/
setup_sync_octeon_time_wq(struct net_device * netdev)668 static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
669 {
670 struct lio *lio = GET_LIO(netdev);
671 struct octeon_device *oct = lio->oct_dev;
672
673 lio->sync_octeon_time_wq.wq =
674 alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
675 if (!lio->sync_octeon_time_wq.wq) {
676 dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
677 return -1;
678 }
679 INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
680 lio_sync_octeon_time);
681 lio->sync_octeon_time_wq.wk.ctxptr = lio;
682 queue_delayed_work(lio->sync_octeon_time_wq.wq,
683 &lio->sync_octeon_time_wq.wk.work,
684 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
685
686 return 0;
687 }
688
689 /**
690 * cleanup_sync_octeon_time_wq - destroy wq
691 *
692 * @netdev: network device which should send time update to firmware
693 *
694 * Stop scheduling and destroy the work created to periodically update local
695 * time to octeon firmware.
696 **/
cleanup_sync_octeon_time_wq(struct net_device * netdev)697 static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
698 {
699 struct lio *lio = GET_LIO(netdev);
700 struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
701
702 if (time_wq->wq) {
703 cancel_delayed_work_sync(&time_wq->wk.work);
704 destroy_workqueue(time_wq->wq);
705 }
706 }
707
get_other_octeon_device(struct octeon_device * oct)708 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
709 {
710 struct octeon_device *other_oct;
711
712 other_oct = lio_get_device(oct->octeon_id + 1);
713
714 if (other_oct && other_oct->pci_dev) {
715 int oct_busnum, other_oct_busnum;
716
717 oct_busnum = oct->pci_dev->bus->number;
718 other_oct_busnum = other_oct->pci_dev->bus->number;
719
720 if (oct_busnum == other_oct_busnum) {
721 int oct_slot, other_oct_slot;
722
723 oct_slot = PCI_SLOT(oct->pci_dev->devfn);
724 other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
725
726 if (oct_slot == other_oct_slot)
727 return other_oct;
728 }
729 }
730
731 return NULL;
732 }
733
disable_all_vf_links(struct octeon_device * oct)734 static void disable_all_vf_links(struct octeon_device *oct)
735 {
736 struct net_device *netdev;
737 int max_vfs, vf, i;
738
739 if (!oct)
740 return;
741
742 max_vfs = oct->sriov_info.max_vfs;
743
744 for (i = 0; i < oct->ifcount; i++) {
745 netdev = oct->props[i].netdev;
746 if (!netdev)
747 continue;
748
749 for (vf = 0; vf < max_vfs; vf++)
750 liquidio_set_vf_link_state(netdev, vf,
751 IFLA_VF_LINK_STATE_DISABLE);
752 }
753 }
754
liquidio_watchdog(void * param)755 static int liquidio_watchdog(void *param)
756 {
757 bool err_msg_was_printed[LIO_MAX_CORES];
758 u16 mask_of_crashed_or_stuck_cores = 0;
759 bool all_vf_links_are_disabled = false;
760 struct octeon_device *oct = param;
761 struct octeon_device *other_oct;
762 #ifdef CONFIG_MODULE_UNLOAD
763 long refcount, vfs_referencing_pf;
764 u64 vfs_mask1, vfs_mask2;
765 #endif
766 int core;
767
768 memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
769
770 while (!kthread_should_stop()) {
771 /* sleep for a couple of seconds so that we don't hog the CPU */
772 set_current_state(TASK_INTERRUPTIBLE);
773 schedule_timeout(msecs_to_jiffies(2000));
774
775 mask_of_crashed_or_stuck_cores =
776 (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
777
778 if (!mask_of_crashed_or_stuck_cores)
779 continue;
780
781 WRITE_ONCE(oct->cores_crashed, true);
782 other_oct = get_other_octeon_device(oct);
783 if (other_oct)
784 WRITE_ONCE(other_oct->cores_crashed, true);
785
786 for (core = 0; core < LIO_MAX_CORES; core++) {
787 bool core_crashed_or_got_stuck;
788
789 core_crashed_or_got_stuck =
790 (mask_of_crashed_or_stuck_cores
791 >> core) & 1;
792
793 if (core_crashed_or_got_stuck &&
794 !err_msg_was_printed[core]) {
795 dev_err(&oct->pci_dev->dev,
796 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
797 core);
798 err_msg_was_printed[core] = true;
799 }
800 }
801
802 if (all_vf_links_are_disabled)
803 continue;
804
805 disable_all_vf_links(oct);
806 disable_all_vf_links(other_oct);
807 all_vf_links_are_disabled = true;
808
809 #ifdef CONFIG_MODULE_UNLOAD
810 vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
811 vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
812
813 vfs_referencing_pf = hweight64(vfs_mask1);
814 vfs_referencing_pf += hweight64(vfs_mask2);
815
816 refcount = module_refcount(THIS_MODULE);
817 if (refcount >= vfs_referencing_pf) {
818 while (vfs_referencing_pf) {
819 module_put(THIS_MODULE);
820 vfs_referencing_pf--;
821 }
822 }
823 #endif
824 }
825
826 return 0;
827 }
828
829 /**
830 * liquidio_probe - PCI probe handler
831 * @pdev: PCI device structure
832 * @ent: unused
833 */
834 static int
liquidio_probe(struct pci_dev * pdev,const struct pci_device_id __maybe_unused * ent)835 liquidio_probe(struct pci_dev *pdev, const struct pci_device_id __maybe_unused *ent)
836 {
837 struct octeon_device *oct_dev = NULL;
838 struct handshake *hs;
839
840 oct_dev = octeon_allocate_device(pdev->device,
841 sizeof(struct octeon_device_priv));
842 if (!oct_dev) {
843 dev_err(&pdev->dev, "Unable to allocate device\n");
844 return -ENOMEM;
845 }
846
847 if (pdev->device == OCTEON_CN23XX_PF_VID)
848 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
849
850 /* Enable PTP for 6XXX Device */
851 if (((pdev->device == OCTEON_CN66XX) ||
852 (pdev->device == OCTEON_CN68XX)))
853 oct_dev->ptp_enable = true;
854 else
855 oct_dev->ptp_enable = false;
856
857 dev_info(&pdev->dev, "Initializing device %x:%x.\n",
858 (u32)pdev->vendor, (u32)pdev->device);
859
860 /* Assign octeon_device for this device to the private data area. */
861 pci_set_drvdata(pdev, oct_dev);
862
863 /* set linux specific device pointer */
864 oct_dev->pci_dev = (void *)pdev;
865
866 oct_dev->subsystem_id = pdev->subsystem_vendor |
867 (pdev->subsystem_device << 16);
868
869 hs = &handshake[oct_dev->octeon_id];
870 init_completion(&hs->init);
871 init_completion(&hs->started);
872 hs->pci_dev = pdev;
873
874 if (oct_dev->octeon_id == 0)
875 /* first LiquidIO NIC is detected */
876 complete(&first_stage);
877
878 if (octeon_device_init(oct_dev)) {
879 complete(&hs->init);
880 liquidio_remove(pdev);
881 return -ENOMEM;
882 }
883
884 if (OCTEON_CN23XX_PF(oct_dev)) {
885 u8 bus, device, function;
886
887 if (atomic_read(oct_dev->adapter_refcount) == 1) {
888 /* Each NIC gets one watchdog kernel thread. The first
889 * PF (of each NIC) that gets pci_driver->probe()'d
890 * creates that thread.
891 */
892 bus = pdev->bus->number;
893 device = PCI_SLOT(pdev->devfn);
894 function = PCI_FUNC(pdev->devfn);
895 oct_dev->watchdog_task = kthread_create(
896 liquidio_watchdog, oct_dev,
897 "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
898 if (!IS_ERR(oct_dev->watchdog_task)) {
899 wake_up_process(oct_dev->watchdog_task);
900 } else {
901 oct_dev->watchdog_task = NULL;
902 dev_err(&oct_dev->pci_dev->dev,
903 "failed to create kernel_thread\n");
904 liquidio_remove(pdev);
905 return -1;
906 }
907 }
908 }
909
910 oct_dev->rx_pause = 1;
911 oct_dev->tx_pause = 1;
912
913 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
914
915 return 0;
916 }
917
fw_type_is_auto(void)918 static bool fw_type_is_auto(void)
919 {
920 return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
921 sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
922 }
923
924 /**
925 * octeon_pci_flr - PCI FLR for each Octeon device.
926 * @oct: octeon device
927 */
octeon_pci_flr(struct octeon_device * oct)928 static void octeon_pci_flr(struct octeon_device *oct)
929 {
930 int rc;
931
932 pci_save_state(oct->pci_dev);
933
934 pci_cfg_access_lock(oct->pci_dev);
935
936 /* Quiesce the device completely */
937 pci_write_config_word(oct->pci_dev, PCI_COMMAND,
938 PCI_COMMAND_INTX_DISABLE);
939
940 rc = __pci_reset_function_locked(oct->pci_dev);
941
942 if (rc != 0)
943 dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
944 rc, oct->pf_num);
945
946 pci_cfg_access_unlock(oct->pci_dev);
947
948 pci_restore_state(oct->pci_dev);
949 }
950
951 /**
952 * octeon_destroy_resources - Destroy resources associated with octeon device
953 * @oct: octeon device
954 */
octeon_destroy_resources(struct octeon_device * oct)955 static void octeon_destroy_resources(struct octeon_device *oct)
956 {
957 int i, refcount;
958 struct msix_entry *msix_entries;
959 struct octeon_device_priv *oct_priv =
960 (struct octeon_device_priv *)oct->priv;
961
962 struct handshake *hs;
963
964 switch (atomic_read(&oct->status)) {
965 case OCT_DEV_RUNNING:
966 case OCT_DEV_CORE_OK:
967
968 /* No more instructions will be forwarded. */
969 atomic_set(&oct->status, OCT_DEV_IN_RESET);
970
971 oct->app_mode = CVM_DRV_INVALID_APP;
972 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
973 lio_get_state_string(&oct->status));
974
975 schedule_timeout_uninterruptible(HZ / 10);
976
977 fallthrough;
978 case OCT_DEV_HOST_OK:
979
980 case OCT_DEV_CONSOLE_INIT_DONE:
981 /* Remove any consoles */
982 octeon_remove_consoles(oct);
983
984 fallthrough;
985 case OCT_DEV_IO_QUEUES_DONE:
986 if (lio_wait_for_instr_fetch(oct))
987 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
988
989 if (wait_for_pending_requests(oct))
990 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
991
992 /* Disable the input and output queues now. No more packets will
993 * arrive from Octeon, but we should wait for all packet
994 * processing to finish.
995 */
996 oct->fn_list.disable_io_queues(oct);
997
998 if (lio_wait_for_oq_pkts(oct))
999 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1000
1001 /* Force all requests waiting to be fetched by OCTEON to
1002 * complete.
1003 */
1004 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1005 struct octeon_instr_queue *iq;
1006
1007 if (!(oct->io_qmask.iq & BIT_ULL(i)))
1008 continue;
1009 iq = oct->instr_queue[i];
1010
1011 if (atomic_read(&iq->instr_pending)) {
1012 spin_lock_bh(&iq->lock);
1013 iq->fill_cnt = 0;
1014 iq->octeon_read_index = iq->host_write_index;
1015 iq->stats.instr_processed +=
1016 atomic_read(&iq->instr_pending);
1017 lio_process_iq_request_list(oct, iq, 0);
1018 spin_unlock_bh(&iq->lock);
1019 }
1020 }
1021
1022 lio_process_ordered_list(oct, 1);
1023 octeon_free_sc_done_list(oct);
1024 octeon_free_sc_zombie_list(oct);
1025
1026 fallthrough;
1027 case OCT_DEV_INTR_SET_DONE:
1028 /* Disable interrupts */
1029 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1030
1031 if (oct->msix_on) {
1032 msix_entries = (struct msix_entry *)oct->msix_entries;
1033 for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1034 if (oct->ioq_vector[i].vector) {
1035 /* clear the affinity_cpumask */
1036 irq_set_affinity_hint(
1037 msix_entries[i].vector,
1038 NULL);
1039 free_irq(msix_entries[i].vector,
1040 &oct->ioq_vector[i]);
1041 oct->ioq_vector[i].vector = 0;
1042 }
1043 }
1044 /* non-iov vector's argument is oct struct */
1045 free_irq(msix_entries[i].vector, oct);
1046
1047 pci_disable_msix(oct->pci_dev);
1048 kfree(oct->msix_entries);
1049 oct->msix_entries = NULL;
1050 } else {
1051 /* Release the interrupt line */
1052 free_irq(oct->pci_dev->irq, oct);
1053
1054 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1055 pci_disable_msi(oct->pci_dev);
1056 }
1057
1058 kfree(oct->irq_name_storage);
1059 oct->irq_name_storage = NULL;
1060
1061 fallthrough;
1062 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1063 if (OCTEON_CN23XX_PF(oct))
1064 octeon_free_ioq_vector(oct);
1065
1066 fallthrough;
1067 case OCT_DEV_MBOX_SETUP_DONE:
1068 if (OCTEON_CN23XX_PF(oct))
1069 oct->fn_list.free_mbox(oct);
1070
1071 fallthrough;
1072 case OCT_DEV_IN_RESET:
1073 case OCT_DEV_DROQ_INIT_DONE:
1074 /* Wait for any pending operations */
1075 mdelay(100);
1076 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1077 if (!(oct->io_qmask.oq & BIT_ULL(i)))
1078 continue;
1079 octeon_delete_droq(oct, i);
1080 }
1081
1082 /* Force any pending handshakes to complete */
1083 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1084 hs = &handshake[i];
1085
1086 if (hs->pci_dev) {
1087 handshake[oct->octeon_id].init_ok = 0;
1088 complete(&handshake[oct->octeon_id].init);
1089 handshake[oct->octeon_id].started_ok = 0;
1090 complete(&handshake[oct->octeon_id].started);
1091 }
1092 }
1093
1094 fallthrough;
1095 case OCT_DEV_RESP_LIST_INIT_DONE:
1096 octeon_delete_response_list(oct);
1097
1098 fallthrough;
1099 case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1100 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1101 if (!(oct->io_qmask.iq & BIT_ULL(i)))
1102 continue;
1103 octeon_delete_instr_queue(oct, i);
1104 }
1105 #ifdef CONFIG_PCI_IOV
1106 if (oct->sriov_info.sriov_enabled)
1107 pci_disable_sriov(oct->pci_dev);
1108 #endif
1109 fallthrough;
1110 case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1111 octeon_free_sc_buffer_pool(oct);
1112
1113 fallthrough;
1114 case OCT_DEV_DISPATCH_INIT_DONE:
1115 octeon_delete_dispatch_list(oct);
1116 cancel_delayed_work_sync(&oct->nic_poll_work.work);
1117
1118 fallthrough;
1119 case OCT_DEV_PCI_MAP_DONE:
1120 refcount = octeon_deregister_device(oct);
1121
1122 /* Soft reset the octeon device before exiting.
1123 * However, if fw was loaded from card (i.e. autoboot),
1124 * perform an FLR instead.
1125 * Implementation note: only soft-reset the device
1126 * if it is a CN6XXX OR the LAST CN23XX device.
1127 */
1128 if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
1129 octeon_pci_flr(oct);
1130 else if (OCTEON_CN6XXX(oct) || !refcount)
1131 oct->fn_list.soft_reset(oct);
1132
1133 octeon_unmap_pci_barx(oct, 0);
1134 octeon_unmap_pci_barx(oct, 1);
1135
1136 fallthrough;
1137 case OCT_DEV_PCI_ENABLE_DONE:
1138 pci_clear_master(oct->pci_dev);
1139 /* Disable the device, releasing the PCI INT */
1140 pci_disable_device(oct->pci_dev);
1141
1142 fallthrough;
1143 case OCT_DEV_BEGIN_STATE:
1144 /* Nothing to be done here either */
1145 break;
1146 } /* end switch (oct->status) */
1147
1148 tasklet_kill(&oct_priv->droq_tasklet);
1149 }
1150
1151 /**
1152 * send_rx_ctrl_cmd - Send Rx control command
1153 * @lio: per-network private data
1154 * @start_stop: whether to start or stop
1155 */
send_rx_ctrl_cmd(struct lio * lio,int start_stop)1156 static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1157 {
1158 struct octeon_soft_command *sc;
1159 union octnet_cmd *ncmd;
1160 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1161 int retval;
1162
1163 if (oct->props[lio->ifidx].rx_on == start_stop)
1164 return 0;
1165
1166 sc = (struct octeon_soft_command *)
1167 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1168 16, 0);
1169 if (!sc) {
1170 netif_info(lio, rx_err, lio->netdev,
1171 "Failed to allocate octeon_soft_command struct\n");
1172 return -ENOMEM;
1173 }
1174
1175 ncmd = (union octnet_cmd *)sc->virtdptr;
1176
1177 ncmd->u64 = 0;
1178 ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1179 ncmd->s.param1 = start_stop;
1180
1181 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1182
1183 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1184
1185 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1186 OPCODE_NIC_CMD, 0, 0, 0);
1187
1188 init_completion(&sc->complete);
1189 sc->sc_status = OCTEON_REQUEST_PENDING;
1190
1191 retval = octeon_send_soft_command(oct, sc);
1192 if (retval == IQ_SEND_FAILED) {
1193 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1194 octeon_free_soft_command(oct, sc);
1195 } else {
1196 /* Sleep on a wait queue till the cond flag indicates that the
1197 * response arrived or timed-out.
1198 */
1199 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1200 if (retval)
1201 return retval;
1202
1203 oct->props[lio->ifidx].rx_on = start_stop;
1204 WRITE_ONCE(sc->caller_is_done, true);
1205 }
1206
1207 return retval;
1208 }
1209
1210 /**
1211 * liquidio_destroy_nic_device - Destroy NIC device interface
1212 * @oct: octeon device
1213 * @ifidx: which interface to destroy
1214 *
1215 * Cleanup associated with each interface for an Octeon device when NIC
1216 * module is being unloaded or if initialization fails during load.
1217 */
liquidio_destroy_nic_device(struct octeon_device * oct,int ifidx)1218 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1219 {
1220 struct net_device *netdev = oct->props[ifidx].netdev;
1221 struct octeon_device_priv *oct_priv =
1222 (struct octeon_device_priv *)oct->priv;
1223 struct napi_struct *napi, *n;
1224 struct lio *lio;
1225
1226 if (!netdev) {
1227 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1228 __func__, ifidx);
1229 return;
1230 }
1231
1232 lio = GET_LIO(netdev);
1233
1234 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1235
1236 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1237 liquidio_stop(netdev);
1238
1239 if (oct->props[lio->ifidx].napi_enabled == 1) {
1240 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1241 napi_disable(napi);
1242
1243 oct->props[lio->ifidx].napi_enabled = 0;
1244
1245 if (OCTEON_CN23XX_PF(oct))
1246 oct->droq[0]->ops.poll_mode = 0;
1247 }
1248
1249 /* Delete NAPI */
1250 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1251 netif_napi_del(napi);
1252
1253 tasklet_enable(&oct_priv->droq_tasklet);
1254
1255 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1256 unregister_netdev(netdev);
1257
1258 cleanup_sync_octeon_time_wq(netdev);
1259 cleanup_link_status_change_wq(netdev);
1260
1261 cleanup_rx_oom_poll_fn(netdev);
1262
1263 lio_delete_glists(lio);
1264
1265 free_netdev(netdev);
1266
1267 oct->props[ifidx].gmxport = -1;
1268
1269 oct->props[ifidx].netdev = NULL;
1270 }
1271
1272 /**
1273 * liquidio_stop_nic_module - Stop complete NIC functionality
1274 * @oct: octeon device
1275 */
liquidio_stop_nic_module(struct octeon_device * oct)1276 static int liquidio_stop_nic_module(struct octeon_device *oct)
1277 {
1278 int i, j;
1279 struct lio *lio;
1280
1281 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1282 if (!oct->ifcount) {
1283 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1284 return 1;
1285 }
1286
1287 spin_lock_bh(&oct->cmd_resp_wqlock);
1288 oct->cmd_resp_state = OCT_DRV_OFFLINE;
1289 spin_unlock_bh(&oct->cmd_resp_wqlock);
1290
1291 lio_vf_rep_destroy(oct);
1292
1293 for (i = 0; i < oct->ifcount; i++) {
1294 lio = GET_LIO(oct->props[i].netdev);
1295 for (j = 0; j < oct->num_oqs; j++)
1296 octeon_unregister_droq_ops(oct,
1297 lio->linfo.rxpciq[j].s.q_no);
1298 }
1299
1300 for (i = 0; i < oct->ifcount; i++)
1301 liquidio_destroy_nic_device(oct, i);
1302
1303 if (oct->devlink) {
1304 devlink_unregister(oct->devlink);
1305 devlink_free(oct->devlink);
1306 oct->devlink = NULL;
1307 }
1308
1309 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1310 return 0;
1311 }
1312
1313 /**
1314 * liquidio_remove - Cleans up resources at unload time
1315 * @pdev: PCI device structure
1316 */
liquidio_remove(struct pci_dev * pdev)1317 static void liquidio_remove(struct pci_dev *pdev)
1318 {
1319 struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1320
1321 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1322
1323 if (oct_dev->watchdog_task)
1324 kthread_stop(oct_dev->watchdog_task);
1325
1326 if (!oct_dev->octeon_id &&
1327 oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
1328 lio_vf_rep_modexit();
1329
1330 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1331 liquidio_stop_nic_module(oct_dev);
1332
1333 /* Reset the octeon device and cleanup all memory allocated for
1334 * the octeon device by driver.
1335 */
1336 octeon_destroy_resources(oct_dev);
1337
1338 dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1339
1340 /* This octeon device has been removed. Update the global
1341 * data structure to reflect this. Free the device structure.
1342 */
1343 octeon_free_device_mem(oct_dev);
1344 }
1345
1346 /**
1347 * octeon_chip_specific_setup - Identify the Octeon device and to map the BAR address space
1348 * @oct: octeon device
1349 */
octeon_chip_specific_setup(struct octeon_device * oct)1350 static int octeon_chip_specific_setup(struct octeon_device *oct)
1351 {
1352 u32 dev_id, rev_id;
1353 int ret = 1;
1354
1355 pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1356 pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1357 oct->rev_id = rev_id & 0xff;
1358
1359 switch (dev_id) {
1360 case OCTEON_CN68XX_PCIID:
1361 oct->chip_id = OCTEON_CN68XX;
1362 ret = lio_setup_cn68xx_octeon_device(oct);
1363 break;
1364
1365 case OCTEON_CN66XX_PCIID:
1366 oct->chip_id = OCTEON_CN66XX;
1367 ret = lio_setup_cn66xx_octeon_device(oct);
1368 break;
1369
1370 case OCTEON_CN23XX_PCIID_PF:
1371 oct->chip_id = OCTEON_CN23XX_PF_VID;
1372 ret = setup_cn23xx_octeon_pf_device(oct);
1373 if (ret)
1374 break;
1375 #ifdef CONFIG_PCI_IOV
1376 if (!ret)
1377 pci_sriov_set_totalvfs(oct->pci_dev,
1378 oct->sriov_info.max_vfs);
1379 #endif
1380 break;
1381
1382 default:
1383 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1384 dev_id);
1385 }
1386
1387 return ret;
1388 }
1389
1390 /**
1391 * octeon_pci_os_setup - PCI initialization for each Octeon device.
1392 * @oct: octeon device
1393 */
octeon_pci_os_setup(struct octeon_device * oct)1394 static int octeon_pci_os_setup(struct octeon_device *oct)
1395 {
1396 /* setup PCI stuff first */
1397 if (pci_enable_device(oct->pci_dev)) {
1398 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1399 return 1;
1400 }
1401
1402 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1403 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1404 pci_disable_device(oct->pci_dev);
1405 return 1;
1406 }
1407
1408 /* Enable PCI DMA Master. */
1409 pci_set_master(oct->pci_dev);
1410
1411 return 0;
1412 }
1413
1414 /**
1415 * free_netbuf - Unmap and free network buffer
1416 * @buf: buffer
1417 */
free_netbuf(void * buf)1418 static void free_netbuf(void *buf)
1419 {
1420 struct sk_buff *skb;
1421 struct octnet_buf_free_info *finfo;
1422 struct lio *lio;
1423
1424 finfo = (struct octnet_buf_free_info *)buf;
1425 skb = finfo->skb;
1426 lio = finfo->lio;
1427
1428 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1429 DMA_TO_DEVICE);
1430
1431 tx_buffer_free(skb);
1432 }
1433
1434 /**
1435 * free_netsgbuf - Unmap and free gather buffer
1436 * @buf: buffer
1437 */
free_netsgbuf(void * buf)1438 static void free_netsgbuf(void *buf)
1439 {
1440 struct octnet_buf_free_info *finfo;
1441 struct sk_buff *skb;
1442 struct lio *lio;
1443 struct octnic_gather *g;
1444 int i, frags, iq;
1445
1446 finfo = (struct octnet_buf_free_info *)buf;
1447 skb = finfo->skb;
1448 lio = finfo->lio;
1449 g = finfo->g;
1450 frags = skb_shinfo(skb)->nr_frags;
1451
1452 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1453 g->sg[0].ptr[0], (skb->len - skb->data_len),
1454 DMA_TO_DEVICE);
1455
1456 i = 1;
1457 while (frags--) {
1458 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
1459
1460 dma_unmap_page(&lio->oct_dev->pci_dev->dev,
1461 g->sg[(i >> 2)].ptr[(i & 3)],
1462 skb_frag_size(frag), DMA_TO_DEVICE);
1463 i++;
1464 }
1465
1466 iq = skb_iq(lio->oct_dev, skb);
1467 spin_lock(&lio->glist_lock[iq]);
1468 list_add_tail(&g->list, &lio->glist[iq]);
1469 spin_unlock(&lio->glist_lock[iq]);
1470
1471 tx_buffer_free(skb);
1472 }
1473
1474 /**
1475 * free_netsgbuf_with_resp - Unmap and free gather buffer with response
1476 * @buf: buffer
1477 */
free_netsgbuf_with_resp(void * buf)1478 static void free_netsgbuf_with_resp(void *buf)
1479 {
1480 struct octeon_soft_command *sc;
1481 struct octnet_buf_free_info *finfo;
1482 struct sk_buff *skb;
1483 struct lio *lio;
1484 struct octnic_gather *g;
1485 int i, frags, iq;
1486
1487 sc = (struct octeon_soft_command *)buf;
1488 skb = (struct sk_buff *)sc->callback_arg;
1489 finfo = (struct octnet_buf_free_info *)&skb->cb;
1490
1491 lio = finfo->lio;
1492 g = finfo->g;
1493 frags = skb_shinfo(skb)->nr_frags;
1494
1495 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1496 g->sg[0].ptr[0], (skb->len - skb->data_len),
1497 DMA_TO_DEVICE);
1498
1499 i = 1;
1500 while (frags--) {
1501 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
1502
1503 dma_unmap_page(&lio->oct_dev->pci_dev->dev,
1504 g->sg[(i >> 2)].ptr[(i & 3)],
1505 skb_frag_size(frag), DMA_TO_DEVICE);
1506 i++;
1507 }
1508
1509 iq = skb_iq(lio->oct_dev, skb);
1510
1511 spin_lock(&lio->glist_lock[iq]);
1512 list_add_tail(&g->list, &lio->glist[iq]);
1513 spin_unlock(&lio->glist_lock[iq]);
1514
1515 /* Don't free the skb yet */
1516 }
1517
1518 /**
1519 * liquidio_ptp_adjfreq - Adjust ptp frequency
1520 * @ptp: PTP clock info
1521 * @ppb: how much to adjust by, in parts-per-billion
1522 */
liquidio_ptp_adjfreq(struct ptp_clock_info * ptp,s32 ppb)1523 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1524 {
1525 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1526 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1527 u64 comp, delta;
1528 unsigned long flags;
1529 bool neg_adj = false;
1530
1531 if (ppb < 0) {
1532 neg_adj = true;
1533 ppb = -ppb;
1534 }
1535
1536 /* The hardware adds the clock compensation value to the
1537 * PTP clock on every coprocessor clock cycle, so we
1538 * compute the delta in terms of coprocessor clocks.
1539 */
1540 delta = (u64)ppb << 32;
1541 do_div(delta, oct->coproc_clock_rate);
1542
1543 spin_lock_irqsave(&lio->ptp_lock, flags);
1544 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1545 if (neg_adj)
1546 comp -= delta;
1547 else
1548 comp += delta;
1549 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1550 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1551
1552 return 0;
1553 }
1554
1555 /**
1556 * liquidio_ptp_adjtime - Adjust ptp time
1557 * @ptp: PTP clock info
1558 * @delta: how much to adjust by, in nanosecs
1559 */
liquidio_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)1560 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1561 {
1562 unsigned long flags;
1563 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1564
1565 spin_lock_irqsave(&lio->ptp_lock, flags);
1566 lio->ptp_adjust += delta;
1567 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1568
1569 return 0;
1570 }
1571
1572 /**
1573 * liquidio_ptp_gettime - Get hardware clock time, including any adjustment
1574 * @ptp: PTP clock info
1575 * @ts: timespec
1576 */
liquidio_ptp_gettime(struct ptp_clock_info * ptp,struct timespec64 * ts)1577 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1578 struct timespec64 *ts)
1579 {
1580 u64 ns;
1581 unsigned long flags;
1582 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1583 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1584
1585 spin_lock_irqsave(&lio->ptp_lock, flags);
1586 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1587 ns += lio->ptp_adjust;
1588 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1589
1590 *ts = ns_to_timespec64(ns);
1591
1592 return 0;
1593 }
1594
1595 /**
1596 * liquidio_ptp_settime - Set hardware clock time. Reset adjustment
1597 * @ptp: PTP clock info
1598 * @ts: timespec
1599 */
liquidio_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)1600 static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1601 const struct timespec64 *ts)
1602 {
1603 u64 ns;
1604 unsigned long flags;
1605 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1606 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1607
1608 ns = timespec64_to_ns(ts);
1609
1610 spin_lock_irqsave(&lio->ptp_lock, flags);
1611 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1612 lio->ptp_adjust = 0;
1613 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1614
1615 return 0;
1616 }
1617
1618 /**
1619 * liquidio_ptp_enable - Check if PTP is enabled
1620 * @ptp: PTP clock info
1621 * @rq: request
1622 * @on: is it on
1623 */
1624 static int
liquidio_ptp_enable(struct ptp_clock_info __maybe_unused * ptp,struct ptp_clock_request __maybe_unused * rq,int __maybe_unused on)1625 liquidio_ptp_enable(struct ptp_clock_info __maybe_unused *ptp,
1626 struct ptp_clock_request __maybe_unused *rq,
1627 int __maybe_unused on)
1628 {
1629 return -EOPNOTSUPP;
1630 }
1631
1632 /**
1633 * oct_ptp_open - Open PTP clock source
1634 * @netdev: network device
1635 */
oct_ptp_open(struct net_device * netdev)1636 static void oct_ptp_open(struct net_device *netdev)
1637 {
1638 struct lio *lio = GET_LIO(netdev);
1639 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1640
1641 spin_lock_init(&lio->ptp_lock);
1642
1643 snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1644 lio->ptp_info.owner = THIS_MODULE;
1645 lio->ptp_info.max_adj = 250000000;
1646 lio->ptp_info.n_alarm = 0;
1647 lio->ptp_info.n_ext_ts = 0;
1648 lio->ptp_info.n_per_out = 0;
1649 lio->ptp_info.pps = 0;
1650 lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1651 lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1652 lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1653 lio->ptp_info.settime64 = liquidio_ptp_settime;
1654 lio->ptp_info.enable = liquidio_ptp_enable;
1655
1656 lio->ptp_adjust = 0;
1657
1658 lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1659 &oct->pci_dev->dev);
1660
1661 if (IS_ERR(lio->ptp_clock))
1662 lio->ptp_clock = NULL;
1663 }
1664
1665 /**
1666 * liquidio_ptp_init - Init PTP clock
1667 * @oct: octeon device
1668 */
liquidio_ptp_init(struct octeon_device * oct)1669 static void liquidio_ptp_init(struct octeon_device *oct)
1670 {
1671 u64 clock_comp, cfg;
1672
1673 clock_comp = (u64)NSEC_PER_SEC << 32;
1674 do_div(clock_comp, oct->coproc_clock_rate);
1675 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1676
1677 /* Enable */
1678 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1679 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1680 }
1681
1682 /**
1683 * load_firmware - Load firmware to device
1684 * @oct: octeon device
1685 *
1686 * Maps device to firmware filename, requests firmware, and downloads it
1687 */
load_firmware(struct octeon_device * oct)1688 static int load_firmware(struct octeon_device *oct)
1689 {
1690 int ret = 0;
1691 const struct firmware *fw;
1692 char fw_name[LIO_MAX_FW_FILENAME_LEN];
1693 char *tmp_fw_type;
1694
1695 if (fw_type_is_auto()) {
1696 tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1697 strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
1698 } else {
1699 tmp_fw_type = fw_type;
1700 }
1701
1702 sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1703 octeon_get_conf(oct)->card_name, tmp_fw_type,
1704 LIO_FW_NAME_SUFFIX);
1705
1706 ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1707 if (ret) {
1708 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n",
1709 fw_name);
1710 release_firmware(fw);
1711 return ret;
1712 }
1713
1714 ret = octeon_download_firmware(oct, fw->data, fw->size);
1715
1716 release_firmware(fw);
1717
1718 return ret;
1719 }
1720
1721 /**
1722 * octnet_poll_check_txq_status - Poll routine for checking transmit queue status
1723 * @work: work_struct data structure
1724 */
octnet_poll_check_txq_status(struct work_struct * work)1725 static void octnet_poll_check_txq_status(struct work_struct *work)
1726 {
1727 struct cavium_wk *wk = (struct cavium_wk *)work;
1728 struct lio *lio = (struct lio *)wk->ctxptr;
1729
1730 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
1731 return;
1732
1733 check_txq_status(lio);
1734 queue_delayed_work(lio->txq_status_wq.wq,
1735 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1736 }
1737
1738 /**
1739 * setup_tx_poll_fn - Sets up the txq poll check
1740 * @netdev: network device
1741 */
setup_tx_poll_fn(struct net_device * netdev)1742 static inline int setup_tx_poll_fn(struct net_device *netdev)
1743 {
1744 struct lio *lio = GET_LIO(netdev);
1745 struct octeon_device *oct = lio->oct_dev;
1746
1747 lio->txq_status_wq.wq = alloc_workqueue("txq-status",
1748 WQ_MEM_RECLAIM, 0);
1749 if (!lio->txq_status_wq.wq) {
1750 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
1751 return -1;
1752 }
1753 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
1754 octnet_poll_check_txq_status);
1755 lio->txq_status_wq.wk.ctxptr = lio;
1756 queue_delayed_work(lio->txq_status_wq.wq,
1757 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1758 return 0;
1759 }
1760
cleanup_tx_poll_fn(struct net_device * netdev)1761 static inline void cleanup_tx_poll_fn(struct net_device *netdev)
1762 {
1763 struct lio *lio = GET_LIO(netdev);
1764
1765 if (lio->txq_status_wq.wq) {
1766 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
1767 destroy_workqueue(lio->txq_status_wq.wq);
1768 }
1769 }
1770
1771 /**
1772 * liquidio_open - Net device open for LiquidIO
1773 * @netdev: network device
1774 */
liquidio_open(struct net_device * netdev)1775 static int liquidio_open(struct net_device *netdev)
1776 {
1777 struct lio *lio = GET_LIO(netdev);
1778 struct octeon_device *oct = lio->oct_dev;
1779 struct octeon_device_priv *oct_priv =
1780 (struct octeon_device_priv *)oct->priv;
1781 struct napi_struct *napi, *n;
1782 int ret = 0;
1783
1784 if (oct->props[lio->ifidx].napi_enabled == 0) {
1785 tasklet_disable(&oct_priv->droq_tasklet);
1786
1787 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1788 napi_enable(napi);
1789
1790 oct->props[lio->ifidx].napi_enabled = 1;
1791
1792 if (OCTEON_CN23XX_PF(oct))
1793 oct->droq[0]->ops.poll_mode = 1;
1794 }
1795
1796 if (oct->ptp_enable)
1797 oct_ptp_open(netdev);
1798
1799 ifstate_set(lio, LIO_IFSTATE_RUNNING);
1800
1801 if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) {
1802 ret = setup_tx_poll_fn(netdev);
1803 if (ret)
1804 goto err_poll;
1805 }
1806
1807 netif_tx_start_all_queues(netdev);
1808
1809 /* Ready for link status updates */
1810 lio->intf_open = 1;
1811
1812 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
1813
1814 /* tell Octeon to start forwarding packets to host */
1815 ret = send_rx_ctrl_cmd(lio, 1);
1816 if (ret)
1817 goto err_rx_ctrl;
1818
1819 /* start periodical statistics fetch */
1820 INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
1821 lio->stats_wk.ctxptr = lio;
1822 schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
1823 (LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
1824
1825 dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
1826 netdev->name);
1827
1828 return 0;
1829
1830 err_rx_ctrl:
1831 if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on)
1832 cleanup_tx_poll_fn(netdev);
1833 err_poll:
1834 if (lio->ptp_clock) {
1835 ptp_clock_unregister(lio->ptp_clock);
1836 lio->ptp_clock = NULL;
1837 }
1838
1839 if (oct->props[lio->ifidx].napi_enabled == 1) {
1840 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1841 napi_disable(napi);
1842
1843 oct->props[lio->ifidx].napi_enabled = 0;
1844
1845 if (OCTEON_CN23XX_PF(oct))
1846 oct->droq[0]->ops.poll_mode = 0;
1847 }
1848
1849 return ret;
1850 }
1851
1852 /**
1853 * liquidio_stop - Net device stop for LiquidIO
1854 * @netdev: network device
1855 */
liquidio_stop(struct net_device * netdev)1856 static int liquidio_stop(struct net_device *netdev)
1857 {
1858 struct lio *lio = GET_LIO(netdev);
1859 struct octeon_device *oct = lio->oct_dev;
1860 struct octeon_device_priv *oct_priv =
1861 (struct octeon_device_priv *)oct->priv;
1862 struct napi_struct *napi, *n;
1863 int ret = 0;
1864
1865 ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1866
1867 /* Stop any link updates */
1868 lio->intf_open = 0;
1869
1870 stop_txqs(netdev);
1871
1872 /* Inform that netif carrier is down */
1873 netif_carrier_off(netdev);
1874 netif_tx_disable(netdev);
1875
1876 lio->linfo.link.s.link_up = 0;
1877 lio->link_changes++;
1878
1879 /* Tell Octeon that nic interface is down. */
1880 ret = send_rx_ctrl_cmd(lio, 0);
1881 if (ret)
1882 return ret;
1883
1884 if (OCTEON_CN23XX_PF(oct)) {
1885 if (!oct->msix_on)
1886 cleanup_tx_poll_fn(netdev);
1887 } else {
1888 cleanup_tx_poll_fn(netdev);
1889 }
1890
1891 cancel_delayed_work_sync(&lio->stats_wk.work);
1892
1893 if (lio->ptp_clock) {
1894 ptp_clock_unregister(lio->ptp_clock);
1895 lio->ptp_clock = NULL;
1896 }
1897
1898 /* Wait for any pending Rx descriptors */
1899 if (lio_wait_for_clean_oq(oct))
1900 netif_info(lio, rx_err, lio->netdev,
1901 "Proceeding with stop interface after partial RX desc processing\n");
1902
1903 if (oct->props[lio->ifidx].napi_enabled == 1) {
1904 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1905 napi_disable(napi);
1906
1907 oct->props[lio->ifidx].napi_enabled = 0;
1908
1909 if (OCTEON_CN23XX_PF(oct))
1910 oct->droq[0]->ops.poll_mode = 0;
1911
1912 tasklet_enable(&oct_priv->droq_tasklet);
1913 }
1914
1915 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1916
1917 return ret;
1918 }
1919
1920 /**
1921 * get_new_flags - Converts a mask based on net device flags
1922 * @netdev: network device
1923 *
1924 * This routine generates a octnet_ifflags mask from the net device flags
1925 * received from the OS.
1926 */
get_new_flags(struct net_device * netdev)1927 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
1928 {
1929 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1930
1931 if (netdev->flags & IFF_PROMISC)
1932 f |= OCTNET_IFFLAG_PROMISC;
1933
1934 if (netdev->flags & IFF_ALLMULTI)
1935 f |= OCTNET_IFFLAG_ALLMULTI;
1936
1937 if (netdev->flags & IFF_MULTICAST) {
1938 f |= OCTNET_IFFLAG_MULTICAST;
1939
1940 /* Accept all multicast addresses if there are more than we
1941 * can handle
1942 */
1943 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1944 f |= OCTNET_IFFLAG_ALLMULTI;
1945 }
1946
1947 if (netdev->flags & IFF_BROADCAST)
1948 f |= OCTNET_IFFLAG_BROADCAST;
1949
1950 return f;
1951 }
1952
1953 /**
1954 * liquidio_set_mcast_list - Net device set_multicast_list
1955 * @netdev: network device
1956 */
liquidio_set_mcast_list(struct net_device * netdev)1957 static void liquidio_set_mcast_list(struct net_device *netdev)
1958 {
1959 struct lio *lio = GET_LIO(netdev);
1960 struct octeon_device *oct = lio->oct_dev;
1961 struct octnic_ctrl_pkt nctrl;
1962 struct netdev_hw_addr *ha;
1963 u64 *mc;
1964 int ret;
1965 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1966
1967 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1968
1969 /* Create a ctrl pkt command to be sent to core app. */
1970 nctrl.ncmd.u64 = 0;
1971 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1972 nctrl.ncmd.s.param1 = get_new_flags(netdev);
1973 nctrl.ncmd.s.param2 = mc_count;
1974 nctrl.ncmd.s.more = mc_count;
1975 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1976 nctrl.netpndev = (u64)netdev;
1977 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1978
1979 /* copy all the addresses into the udd */
1980 mc = &nctrl.udd[0];
1981 netdev_for_each_mc_addr(ha, netdev) {
1982 *mc = 0;
1983 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
1984 /* no need to swap bytes */
1985
1986 if (++mc > &nctrl.udd[mc_count])
1987 break;
1988 }
1989
1990 /* Apparently, any activity in this call from the kernel has to
1991 * be atomic. So we won't wait for response.
1992 */
1993
1994 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1995 if (ret) {
1996 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
1997 ret);
1998 }
1999 }
2000
2001 /**
2002 * liquidio_set_mac - Net device set_mac_address
2003 * @netdev: network device
2004 * @p: pointer to sockaddr
2005 */
liquidio_set_mac(struct net_device * netdev,void * p)2006 static int liquidio_set_mac(struct net_device *netdev, void *p)
2007 {
2008 int ret = 0;
2009 struct lio *lio = GET_LIO(netdev);
2010 struct octeon_device *oct = lio->oct_dev;
2011 struct sockaddr *addr = (struct sockaddr *)p;
2012 struct octnic_ctrl_pkt nctrl;
2013
2014 if (!is_valid_ether_addr(addr->sa_data))
2015 return -EADDRNOTAVAIL;
2016
2017 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2018
2019 nctrl.ncmd.u64 = 0;
2020 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2021 nctrl.ncmd.s.param1 = 0;
2022 nctrl.ncmd.s.more = 1;
2023 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2024 nctrl.netpndev = (u64)netdev;
2025
2026 nctrl.udd[0] = 0;
2027 /* The MAC Address is presented in network byte order. */
2028 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2029
2030 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2031 if (ret < 0) {
2032 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2033 return -ENOMEM;
2034 }
2035
2036 if (nctrl.sc_status) {
2037 dev_err(&oct->pci_dev->dev,
2038 "%s: MAC Address change failed. sc return=%x\n",
2039 __func__, nctrl.sc_status);
2040 return -EIO;
2041 }
2042
2043 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2044 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2045
2046 return 0;
2047 }
2048
2049 static void
liquidio_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * lstats)2050 liquidio_get_stats64(struct net_device *netdev,
2051 struct rtnl_link_stats64 *lstats)
2052 {
2053 struct lio *lio = GET_LIO(netdev);
2054 struct octeon_device *oct;
2055 u64 pkts = 0, drop = 0, bytes = 0;
2056 struct oct_droq_stats *oq_stats;
2057 struct oct_iq_stats *iq_stats;
2058 int i, iq_no, oq_no;
2059
2060 oct = lio->oct_dev;
2061
2062 if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2063 return;
2064
2065 for (i = 0; i < oct->num_iqs; i++) {
2066 iq_no = lio->linfo.txpciq[i].s.q_no;
2067 iq_stats = &oct->instr_queue[iq_no]->stats;
2068 pkts += iq_stats->tx_done;
2069 drop += iq_stats->tx_dropped;
2070 bytes += iq_stats->tx_tot_bytes;
2071 }
2072
2073 lstats->tx_packets = pkts;
2074 lstats->tx_bytes = bytes;
2075 lstats->tx_dropped = drop;
2076
2077 pkts = 0;
2078 drop = 0;
2079 bytes = 0;
2080
2081 for (i = 0; i < oct->num_oqs; i++) {
2082 oq_no = lio->linfo.rxpciq[i].s.q_no;
2083 oq_stats = &oct->droq[oq_no]->stats;
2084 pkts += oq_stats->rx_pkts_received;
2085 drop += (oq_stats->rx_dropped +
2086 oq_stats->dropped_nodispatch +
2087 oq_stats->dropped_toomany +
2088 oq_stats->dropped_nomem);
2089 bytes += oq_stats->rx_bytes_received;
2090 }
2091
2092 lstats->rx_bytes = bytes;
2093 lstats->rx_packets = pkts;
2094 lstats->rx_dropped = drop;
2095
2096 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
2097 lstats->collisions = oct->link_stats.fromhost.total_collisions;
2098
2099 /* detailed rx_errors: */
2100 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
2101 /* recved pkt with crc error */
2102 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
2103 /* recv'd frame alignment error */
2104 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
2105 /* recv'r fifo overrun */
2106 lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err;
2107
2108 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
2109 lstats->rx_frame_errors + lstats->rx_fifo_errors;
2110
2111 /* detailed tx_errors */
2112 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
2113 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
2114 lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err;
2115
2116 lstats->tx_errors = lstats->tx_aborted_errors +
2117 lstats->tx_carrier_errors +
2118 lstats->tx_fifo_errors;
2119 }
2120
2121 /**
2122 * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
2123 * @netdev: network device
2124 * @ifr: interface request
2125 */
hwtstamp_ioctl(struct net_device * netdev,struct ifreq * ifr)2126 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2127 {
2128 struct hwtstamp_config conf;
2129 struct lio *lio = GET_LIO(netdev);
2130
2131 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2132 return -EFAULT;
2133
2134 if (conf.flags)
2135 return -EINVAL;
2136
2137 switch (conf.tx_type) {
2138 case HWTSTAMP_TX_ON:
2139 case HWTSTAMP_TX_OFF:
2140 break;
2141 default:
2142 return -ERANGE;
2143 }
2144
2145 switch (conf.rx_filter) {
2146 case HWTSTAMP_FILTER_NONE:
2147 break;
2148 case HWTSTAMP_FILTER_ALL:
2149 case HWTSTAMP_FILTER_SOME:
2150 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2151 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2152 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2153 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2154 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2155 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2156 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2157 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2158 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2159 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2160 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2161 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2162 case HWTSTAMP_FILTER_NTP_ALL:
2163 conf.rx_filter = HWTSTAMP_FILTER_ALL;
2164 break;
2165 default:
2166 return -ERANGE;
2167 }
2168
2169 if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2170 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2171
2172 else
2173 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2174
2175 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2176 }
2177
2178 /**
2179 * liquidio_ioctl - ioctl handler
2180 * @netdev: network device
2181 * @ifr: interface request
2182 * @cmd: command
2183 */
liquidio_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)2184 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2185 {
2186 struct lio *lio = GET_LIO(netdev);
2187
2188 switch (cmd) {
2189 case SIOCSHWTSTAMP:
2190 if (lio->oct_dev->ptp_enable)
2191 return hwtstamp_ioctl(netdev, ifr);
2192 fallthrough;
2193 default:
2194 return -EOPNOTSUPP;
2195 }
2196 }
2197
2198 /**
2199 * handle_timestamp - handle a Tx timestamp response
2200 * @oct: octeon device
2201 * @status: response status
2202 * @buf: pointer to skb
2203 */
handle_timestamp(struct octeon_device * oct,u32 status,void * buf)2204 static void handle_timestamp(struct octeon_device *oct,
2205 u32 status,
2206 void *buf)
2207 {
2208 struct octnet_buf_free_info *finfo;
2209 struct octeon_soft_command *sc;
2210 struct oct_timestamp_resp *resp;
2211 struct lio *lio;
2212 struct sk_buff *skb = (struct sk_buff *)buf;
2213
2214 finfo = (struct octnet_buf_free_info *)skb->cb;
2215 lio = finfo->lio;
2216 sc = finfo->sc;
2217 oct = lio->oct_dev;
2218 resp = (struct oct_timestamp_resp *)sc->virtrptr;
2219
2220 if (status != OCTEON_REQUEST_DONE) {
2221 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2222 CVM_CAST64(status));
2223 resp->timestamp = 0;
2224 }
2225
2226 octeon_swap_8B_data(&resp->timestamp, 1);
2227
2228 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2229 struct skb_shared_hwtstamps ts;
2230 u64 ns = resp->timestamp;
2231
2232 netif_info(lio, tx_done, lio->netdev,
2233 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2234 skb, (unsigned long long)ns);
2235 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2236 skb_tstamp_tx(skb, &ts);
2237 }
2238
2239 octeon_free_soft_command(oct, sc);
2240 tx_buffer_free(skb);
2241 }
2242
2243 /**
2244 * send_nic_timestamp_pkt - Send a data packet that will be timestamped
2245 * @oct: octeon device
2246 * @ndata: pointer to network data
2247 * @finfo: pointer to private network data
2248 * @xmit_more: more is coming
2249 */
send_nic_timestamp_pkt(struct octeon_device * oct,struct octnic_data_pkt * ndata,struct octnet_buf_free_info * finfo,int xmit_more)2250 static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2251 struct octnic_data_pkt *ndata,
2252 struct octnet_buf_free_info *finfo,
2253 int xmit_more)
2254 {
2255 int retval;
2256 struct octeon_soft_command *sc;
2257 struct lio *lio;
2258 int ring_doorbell;
2259 u32 len;
2260
2261 lio = finfo->lio;
2262
2263 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2264 sizeof(struct oct_timestamp_resp));
2265 finfo->sc = sc;
2266
2267 if (!sc) {
2268 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2269 return IQ_SEND_FAILED;
2270 }
2271
2272 if (ndata->reqtype == REQTYPE_NORESP_NET)
2273 ndata->reqtype = REQTYPE_RESP_NET;
2274 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2275 ndata->reqtype = REQTYPE_RESP_NET_SG;
2276
2277 sc->callback = handle_timestamp;
2278 sc->callback_arg = finfo->skb;
2279 sc->iq_no = ndata->q_no;
2280
2281 if (OCTEON_CN23XX_PF(oct))
2282 len = (u32)((struct octeon_instr_ih3 *)
2283 (&sc->cmd.cmd3.ih3))->dlengsz;
2284 else
2285 len = (u32)((struct octeon_instr_ih2 *)
2286 (&sc->cmd.cmd2.ih2))->dlengsz;
2287
2288 ring_doorbell = !xmit_more;
2289
2290 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2291 sc, len, ndata->reqtype);
2292
2293 if (retval == IQ_SEND_FAILED) {
2294 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2295 retval);
2296 octeon_free_soft_command(oct, sc);
2297 } else {
2298 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2299 }
2300
2301 return retval;
2302 }
2303
2304 /**
2305 * liquidio_xmit - Transmit networks packets to the Octeon interface
2306 * @skb: skbuff struct to be passed to network layer.
2307 * @netdev: pointer to network device
2308 *
2309 * Return: whether the packet was transmitted to the device okay or not
2310 * (NETDEV_TX_OK or NETDEV_TX_BUSY)
2311 */
liquidio_xmit(struct sk_buff * skb,struct net_device * netdev)2312 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2313 {
2314 struct lio *lio;
2315 struct octnet_buf_free_info *finfo;
2316 union octnic_cmd_setup cmdsetup;
2317 struct octnic_data_pkt ndata;
2318 struct octeon_device *oct;
2319 struct oct_iq_stats *stats;
2320 struct octeon_instr_irh *irh;
2321 union tx_info *tx_info;
2322 int status = 0;
2323 int q_idx = 0, iq_no = 0;
2324 int j, xmit_more = 0;
2325 u64 dptr = 0;
2326 u32 tag = 0;
2327
2328 lio = GET_LIO(netdev);
2329 oct = lio->oct_dev;
2330
2331 q_idx = skb_iq(oct, skb);
2332 tag = q_idx;
2333 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2334
2335 stats = &oct->instr_queue[iq_no]->stats;
2336
2337 /* Check for all conditions in which the current packet cannot be
2338 * transmitted.
2339 */
2340 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2341 (!lio->linfo.link.s.link_up) ||
2342 (skb->len <= 0)) {
2343 netif_info(lio, tx_err, lio->netdev,
2344 "Transmit failed link_status : %d\n",
2345 lio->linfo.link.s.link_up);
2346 goto lio_xmit_failed;
2347 }
2348
2349 /* Use space in skb->cb to store info used to unmap and
2350 * free the buffers.
2351 */
2352 finfo = (struct octnet_buf_free_info *)skb->cb;
2353 finfo->lio = lio;
2354 finfo->skb = skb;
2355 finfo->sc = NULL;
2356
2357 /* Prepare the attributes for the data to be passed to OSI. */
2358 memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2359
2360 ndata.buf = (void *)finfo;
2361
2362 ndata.q_no = iq_no;
2363
2364 if (octnet_iq_is_full(oct, ndata.q_no)) {
2365 /* defer sending if queue is full */
2366 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2367 ndata.q_no);
2368 stats->tx_iq_busy++;
2369 return NETDEV_TX_BUSY;
2370 }
2371
2372 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
2373 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2374 */
2375
2376 ndata.datasize = skb->len;
2377
2378 cmdsetup.u64 = 0;
2379 cmdsetup.s.iq_no = iq_no;
2380
2381 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2382 if (skb->encapsulation) {
2383 cmdsetup.s.tnl_csum = 1;
2384 stats->tx_vxlan++;
2385 } else {
2386 cmdsetup.s.transport_csum = 1;
2387 }
2388 }
2389 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2390 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2391 cmdsetup.s.timestamp = 1;
2392 }
2393
2394 if (skb_shinfo(skb)->nr_frags == 0) {
2395 cmdsetup.s.u.datasize = skb->len;
2396 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2397
2398 /* Offload checksum calculation for TCP/UDP packets */
2399 dptr = dma_map_single(&oct->pci_dev->dev,
2400 skb->data,
2401 skb->len,
2402 DMA_TO_DEVICE);
2403 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2404 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2405 __func__);
2406 stats->tx_dmamap_fail++;
2407 return NETDEV_TX_BUSY;
2408 }
2409
2410 if (OCTEON_CN23XX_PF(oct))
2411 ndata.cmd.cmd3.dptr = dptr;
2412 else
2413 ndata.cmd.cmd2.dptr = dptr;
2414 finfo->dptr = dptr;
2415 ndata.reqtype = REQTYPE_NORESP_NET;
2416
2417 } else {
2418 int i, frags;
2419 skb_frag_t *frag;
2420 struct octnic_gather *g;
2421
2422 spin_lock(&lio->glist_lock[q_idx]);
2423 g = (struct octnic_gather *)
2424 lio_list_delete_head(&lio->glist[q_idx]);
2425 spin_unlock(&lio->glist_lock[q_idx]);
2426
2427 if (!g) {
2428 netif_info(lio, tx_err, lio->netdev,
2429 "Transmit scatter gather: glist null!\n");
2430 goto lio_xmit_failed;
2431 }
2432
2433 cmdsetup.s.gather = 1;
2434 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2435 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2436
2437 memset(g->sg, 0, g->sg_size);
2438
2439 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2440 skb->data,
2441 (skb->len - skb->data_len),
2442 DMA_TO_DEVICE);
2443 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2444 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2445 __func__);
2446 stats->tx_dmamap_fail++;
2447 return NETDEV_TX_BUSY;
2448 }
2449 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2450
2451 frags = skb_shinfo(skb)->nr_frags;
2452 i = 1;
2453 while (frags--) {
2454 frag = &skb_shinfo(skb)->frags[i - 1];
2455
2456 g->sg[(i >> 2)].ptr[(i & 3)] =
2457 skb_frag_dma_map(&oct->pci_dev->dev,
2458 frag, 0, skb_frag_size(frag),
2459 DMA_TO_DEVICE);
2460
2461 if (dma_mapping_error(&oct->pci_dev->dev,
2462 g->sg[i >> 2].ptr[i & 3])) {
2463 dma_unmap_single(&oct->pci_dev->dev,
2464 g->sg[0].ptr[0],
2465 skb->len - skb->data_len,
2466 DMA_TO_DEVICE);
2467 for (j = 1; j < i; j++) {
2468 frag = &skb_shinfo(skb)->frags[j - 1];
2469 dma_unmap_page(&oct->pci_dev->dev,
2470 g->sg[j >> 2].ptr[j & 3],
2471 skb_frag_size(frag),
2472 DMA_TO_DEVICE);
2473 }
2474 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2475 __func__);
2476 return NETDEV_TX_BUSY;
2477 }
2478
2479 add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
2480 (i & 3));
2481 i++;
2482 }
2483
2484 dptr = g->sg_dma_ptr;
2485
2486 if (OCTEON_CN23XX_PF(oct))
2487 ndata.cmd.cmd3.dptr = dptr;
2488 else
2489 ndata.cmd.cmd2.dptr = dptr;
2490 finfo->dptr = dptr;
2491 finfo->g = g;
2492
2493 ndata.reqtype = REQTYPE_NORESP_NET_SG;
2494 }
2495
2496 if (OCTEON_CN23XX_PF(oct)) {
2497 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2498 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2499 } else {
2500 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2501 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2502 }
2503
2504 if (skb_shinfo(skb)->gso_size) {
2505 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2506 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2507 stats->tx_gso++;
2508 }
2509
2510 /* HW insert VLAN tag */
2511 if (skb_vlan_tag_present(skb)) {
2512 irh->priority = skb_vlan_tag_get(skb) >> 13;
2513 irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2514 }
2515
2516 xmit_more = netdev_xmit_more();
2517
2518 if (unlikely(cmdsetup.s.timestamp))
2519 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
2520 else
2521 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
2522 if (status == IQ_SEND_FAILED)
2523 goto lio_xmit_failed;
2524
2525 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2526
2527 if (status == IQ_SEND_STOP)
2528 netif_stop_subqueue(netdev, q_idx);
2529
2530 netif_trans_update(netdev);
2531
2532 if (tx_info->s.gso_segs)
2533 stats->tx_done += tx_info->s.gso_segs;
2534 else
2535 stats->tx_done++;
2536 stats->tx_tot_bytes += ndata.datasize;
2537
2538 return NETDEV_TX_OK;
2539
2540 lio_xmit_failed:
2541 stats->tx_dropped++;
2542 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2543 iq_no, stats->tx_dropped);
2544 if (dptr)
2545 dma_unmap_single(&oct->pci_dev->dev, dptr,
2546 ndata.datasize, DMA_TO_DEVICE);
2547
2548 octeon_ring_doorbell_locked(oct, iq_no);
2549
2550 tx_buffer_free(skb);
2551 return NETDEV_TX_OK;
2552 }
2553
2554 /**
2555 * liquidio_tx_timeout - Network device Tx timeout
2556 * @netdev: pointer to network device
2557 * @txqueue: index of the hung transmit queue
2558 */
liquidio_tx_timeout(struct net_device * netdev,unsigned int txqueue)2559 static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2560 {
2561 struct lio *lio;
2562
2563 lio = GET_LIO(netdev);
2564
2565 netif_info(lio, tx_err, lio->netdev,
2566 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2567 netdev->stats.tx_dropped);
2568 netif_trans_update(netdev);
2569 wake_txqs(netdev);
2570 }
2571
liquidio_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)2572 static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2573 __be16 proto __attribute__((unused)),
2574 u16 vid)
2575 {
2576 struct lio *lio = GET_LIO(netdev);
2577 struct octeon_device *oct = lio->oct_dev;
2578 struct octnic_ctrl_pkt nctrl;
2579 int ret = 0;
2580
2581 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2582
2583 nctrl.ncmd.u64 = 0;
2584 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2585 nctrl.ncmd.s.param1 = vid;
2586 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2587 nctrl.netpndev = (u64)netdev;
2588 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2589
2590 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2591 if (ret) {
2592 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2593 ret);
2594 if (ret > 0)
2595 ret = -EIO;
2596 }
2597
2598 return ret;
2599 }
2600
liquidio_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)2601 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2602 __be16 proto __attribute__((unused)),
2603 u16 vid)
2604 {
2605 struct lio *lio = GET_LIO(netdev);
2606 struct octeon_device *oct = lio->oct_dev;
2607 struct octnic_ctrl_pkt nctrl;
2608 int ret = 0;
2609
2610 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2611
2612 nctrl.ncmd.u64 = 0;
2613 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2614 nctrl.ncmd.s.param1 = vid;
2615 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2616 nctrl.netpndev = (u64)netdev;
2617 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2618
2619 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2620 if (ret) {
2621 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
2622 ret);
2623 if (ret > 0)
2624 ret = -EIO;
2625 }
2626 return ret;
2627 }
2628
2629 /**
2630 * liquidio_set_rxcsum_command - Sending command to enable/disable RX checksum offload
2631 * @netdev: pointer to network device
2632 * @command: OCTNET_CMD_TNL_RX_CSUM_CTL
2633 * @rx_cmd: OCTNET_CMD_RXCSUM_ENABLE/OCTNET_CMD_RXCSUM_DISABLE
2634 * Returns: SUCCESS or FAILURE
2635 */
liquidio_set_rxcsum_command(struct net_device * netdev,int command,u8 rx_cmd)2636 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2637 u8 rx_cmd)
2638 {
2639 struct lio *lio = GET_LIO(netdev);
2640 struct octeon_device *oct = lio->oct_dev;
2641 struct octnic_ctrl_pkt nctrl;
2642 int ret = 0;
2643
2644 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2645
2646 nctrl.ncmd.u64 = 0;
2647 nctrl.ncmd.s.cmd = command;
2648 nctrl.ncmd.s.param1 = rx_cmd;
2649 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2650 nctrl.netpndev = (u64)netdev;
2651 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2652
2653 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2654 if (ret) {
2655 dev_err(&oct->pci_dev->dev,
2656 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2657 ret);
2658 if (ret > 0)
2659 ret = -EIO;
2660 }
2661 return ret;
2662 }
2663
2664 /**
2665 * liquidio_vxlan_port_command - Sending command to add/delete VxLAN UDP port to firmware
2666 * @netdev: pointer to network device
2667 * @command: OCTNET_CMD_VXLAN_PORT_CONFIG
2668 * @vxlan_port: VxLAN port to be added or deleted
2669 * @vxlan_cmd_bit: OCTNET_CMD_VXLAN_PORT_ADD,
2670 * OCTNET_CMD_VXLAN_PORT_DEL
2671 * Return: SUCCESS or FAILURE
2672 */
liquidio_vxlan_port_command(struct net_device * netdev,int command,u16 vxlan_port,u8 vxlan_cmd_bit)2673 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2674 u16 vxlan_port, u8 vxlan_cmd_bit)
2675 {
2676 struct lio *lio = GET_LIO(netdev);
2677 struct octeon_device *oct = lio->oct_dev;
2678 struct octnic_ctrl_pkt nctrl;
2679 int ret = 0;
2680
2681 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2682
2683 nctrl.ncmd.u64 = 0;
2684 nctrl.ncmd.s.cmd = command;
2685 nctrl.ncmd.s.more = vxlan_cmd_bit;
2686 nctrl.ncmd.s.param1 = vxlan_port;
2687 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2688 nctrl.netpndev = (u64)netdev;
2689 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2690
2691 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2692 if (ret) {
2693 dev_err(&oct->pci_dev->dev,
2694 "VxLAN port add/delete failed in core (ret:0x%x)\n",
2695 ret);
2696 if (ret > 0)
2697 ret = -EIO;
2698 }
2699 return ret;
2700 }
2701
liquidio_udp_tunnel_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)2702 static int liquidio_udp_tunnel_set_port(struct net_device *netdev,
2703 unsigned int table, unsigned int entry,
2704 struct udp_tunnel_info *ti)
2705 {
2706 return liquidio_vxlan_port_command(netdev,
2707 OCTNET_CMD_VXLAN_PORT_CONFIG,
2708 htons(ti->port),
2709 OCTNET_CMD_VXLAN_PORT_ADD);
2710 }
2711
liquidio_udp_tunnel_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)2712 static int liquidio_udp_tunnel_unset_port(struct net_device *netdev,
2713 unsigned int table,
2714 unsigned int entry,
2715 struct udp_tunnel_info *ti)
2716 {
2717 return liquidio_vxlan_port_command(netdev,
2718 OCTNET_CMD_VXLAN_PORT_CONFIG,
2719 htons(ti->port),
2720 OCTNET_CMD_VXLAN_PORT_DEL);
2721 }
2722
2723 static const struct udp_tunnel_nic_info liquidio_udp_tunnels = {
2724 .set_port = liquidio_udp_tunnel_set_port,
2725 .unset_port = liquidio_udp_tunnel_unset_port,
2726 .tables = {
2727 { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
2728 },
2729 };
2730
2731 /**
2732 * liquidio_fix_features - Net device fix features
2733 * @netdev: pointer to network device
2734 * @request: features requested
2735 * Return: updated features list
2736 */
liquidio_fix_features(struct net_device * netdev,netdev_features_t request)2737 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2738 netdev_features_t request)
2739 {
2740 struct lio *lio = netdev_priv(netdev);
2741
2742 if ((request & NETIF_F_RXCSUM) &&
2743 !(lio->dev_capability & NETIF_F_RXCSUM))
2744 request &= ~NETIF_F_RXCSUM;
2745
2746 if ((request & NETIF_F_HW_CSUM) &&
2747 !(lio->dev_capability & NETIF_F_HW_CSUM))
2748 request &= ~NETIF_F_HW_CSUM;
2749
2750 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2751 request &= ~NETIF_F_TSO;
2752
2753 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2754 request &= ~NETIF_F_TSO6;
2755
2756 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2757 request &= ~NETIF_F_LRO;
2758
2759 /*Disable LRO if RXCSUM is off */
2760 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2761 (lio->dev_capability & NETIF_F_LRO))
2762 request &= ~NETIF_F_LRO;
2763
2764 if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2765 !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
2766 request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2767
2768 return request;
2769 }
2770
2771 /**
2772 * liquidio_set_features - Net device set features
2773 * @netdev: pointer to network device
2774 * @features: features to enable/disable
2775 */
liquidio_set_features(struct net_device * netdev,netdev_features_t features)2776 static int liquidio_set_features(struct net_device *netdev,
2777 netdev_features_t features)
2778 {
2779 struct lio *lio = netdev_priv(netdev);
2780
2781 if ((features & NETIF_F_LRO) &&
2782 (lio->dev_capability & NETIF_F_LRO) &&
2783 !(netdev->features & NETIF_F_LRO))
2784 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2785 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2786 else if (!(features & NETIF_F_LRO) &&
2787 (lio->dev_capability & NETIF_F_LRO) &&
2788 (netdev->features & NETIF_F_LRO))
2789 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2790 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2791
2792 /* Sending command to firmware to enable/disable RX checksum
2793 * offload settings using ethtool
2794 */
2795 if (!(netdev->features & NETIF_F_RXCSUM) &&
2796 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2797 (features & NETIF_F_RXCSUM))
2798 liquidio_set_rxcsum_command(netdev,
2799 OCTNET_CMD_TNL_RX_CSUM_CTL,
2800 OCTNET_CMD_RXCSUM_ENABLE);
2801 else if ((netdev->features & NETIF_F_RXCSUM) &&
2802 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2803 !(features & NETIF_F_RXCSUM))
2804 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2805 OCTNET_CMD_RXCSUM_DISABLE);
2806
2807 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2808 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2809 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2810 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2811 OCTNET_CMD_VLAN_FILTER_ENABLE);
2812 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2813 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2814 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2815 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2816 OCTNET_CMD_VLAN_FILTER_DISABLE);
2817
2818 return 0;
2819 }
2820
__liquidio_set_vf_mac(struct net_device * netdev,int vfidx,u8 * mac,bool is_admin_assigned)2821 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
2822 u8 *mac, bool is_admin_assigned)
2823 {
2824 struct lio *lio = GET_LIO(netdev);
2825 struct octeon_device *oct = lio->oct_dev;
2826 struct octnic_ctrl_pkt nctrl;
2827 int ret = 0;
2828
2829 if (!is_valid_ether_addr(mac))
2830 return -EINVAL;
2831
2832 if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
2833 return -EINVAL;
2834
2835 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2836
2837 nctrl.ncmd.u64 = 0;
2838 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2839 /* vfidx is 0 based, but vf_num (param1) is 1 based */
2840 nctrl.ncmd.s.param1 = vfidx + 1;
2841 nctrl.ncmd.s.more = 1;
2842 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2843 nctrl.netpndev = (u64)netdev;
2844 if (is_admin_assigned) {
2845 nctrl.ncmd.s.param2 = true;
2846 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2847 }
2848
2849 nctrl.udd[0] = 0;
2850 /* The MAC Address is presented in network byte order. */
2851 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
2852
2853 oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
2854
2855 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2856 if (ret > 0)
2857 ret = -EIO;
2858
2859 return ret;
2860 }
2861
liquidio_set_vf_mac(struct net_device * netdev,int vfidx,u8 * mac)2862 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
2863 {
2864 struct lio *lio = GET_LIO(netdev);
2865 struct octeon_device *oct = lio->oct_dev;
2866 int retval;
2867
2868 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2869 return -EINVAL;
2870
2871 retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
2872 if (!retval)
2873 cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
2874
2875 return retval;
2876 }
2877
liquidio_set_vf_spoofchk(struct net_device * netdev,int vfidx,bool enable)2878 static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx,
2879 bool enable)
2880 {
2881 struct lio *lio = GET_LIO(netdev);
2882 struct octeon_device *oct = lio->oct_dev;
2883 struct octnic_ctrl_pkt nctrl;
2884 int retval;
2885
2886 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) {
2887 netif_info(lio, drv, lio->netdev,
2888 "firmware does not support spoofchk\n");
2889 return -EOPNOTSUPP;
2890 }
2891
2892 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
2893 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
2894 return -EINVAL;
2895 }
2896
2897 if (enable) {
2898 if (oct->sriov_info.vf_spoofchk[vfidx])
2899 return 0;
2900 } else {
2901 /* Clear */
2902 if (!oct->sriov_info.vf_spoofchk[vfidx])
2903 return 0;
2904 }
2905
2906 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2907 nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1;
2908 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK;
2909 nctrl.ncmd.s.param1 =
2910 vfidx + 1; /* vfidx is 0 based,
2911 * but vf_num (param1) is 1 based
2912 */
2913 nctrl.ncmd.s.param2 = enable;
2914 nctrl.ncmd.s.more = 0;
2915 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2916 nctrl.cb_fn = NULL;
2917
2918 retval = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2919
2920 if (retval) {
2921 netif_info(lio, drv, lio->netdev,
2922 "Failed to set VF %d spoofchk %s\n", vfidx,
2923 enable ? "on" : "off");
2924 return -1;
2925 }
2926
2927 oct->sriov_info.vf_spoofchk[vfidx] = enable;
2928 netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx,
2929 enable ? "on" : "off");
2930
2931 return 0;
2932 }
2933
liquidio_set_vf_vlan(struct net_device * netdev,int vfidx,u16 vlan,u8 qos,__be16 vlan_proto)2934 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
2935 u16 vlan, u8 qos, __be16 vlan_proto)
2936 {
2937 struct lio *lio = GET_LIO(netdev);
2938 struct octeon_device *oct = lio->oct_dev;
2939 struct octnic_ctrl_pkt nctrl;
2940 u16 vlantci;
2941 int ret = 0;
2942
2943 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2944 return -EINVAL;
2945
2946 if (vlan_proto != htons(ETH_P_8021Q))
2947 return -EPROTONOSUPPORT;
2948
2949 if (vlan >= VLAN_N_VID || qos > 7)
2950 return -EINVAL;
2951
2952 if (vlan)
2953 vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
2954 else
2955 vlantci = 0;
2956
2957 if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
2958 return 0;
2959
2960 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2961
2962 if (vlan)
2963 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2964 else
2965 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2966
2967 nctrl.ncmd.s.param1 = vlantci;
2968 nctrl.ncmd.s.param2 =
2969 vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
2970 nctrl.ncmd.s.more = 0;
2971 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2972 nctrl.cb_fn = NULL;
2973
2974 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2975 if (ret) {
2976 if (ret > 0)
2977 ret = -EIO;
2978 return ret;
2979 }
2980
2981 oct->sriov_info.vf_vlantci[vfidx] = vlantci;
2982
2983 return ret;
2984 }
2985
liquidio_get_vf_config(struct net_device * netdev,int vfidx,struct ifla_vf_info * ivi)2986 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
2987 struct ifla_vf_info *ivi)
2988 {
2989 struct lio *lio = GET_LIO(netdev);
2990 struct octeon_device *oct = lio->oct_dev;
2991 u8 *macaddr;
2992
2993 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2994 return -EINVAL;
2995
2996 memset(ivi, 0, sizeof(struct ifla_vf_info));
2997
2998 ivi->vf = vfidx;
2999 macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
3000 ether_addr_copy(&ivi->mac[0], macaddr);
3001 ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
3002 ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
3003 if (oct->sriov_info.trusted_vf.active &&
3004 oct->sriov_info.trusted_vf.id == vfidx)
3005 ivi->trusted = true;
3006 else
3007 ivi->trusted = false;
3008 ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
3009 ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx];
3010 ivi->max_tx_rate = lio->linfo.link.s.speed;
3011 ivi->min_tx_rate = 0;
3012
3013 return 0;
3014 }
3015
liquidio_send_vf_trust_cmd(struct lio * lio,int vfidx,bool trusted)3016 static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
3017 {
3018 struct octeon_device *oct = lio->oct_dev;
3019 struct octeon_soft_command *sc;
3020 int retval;
3021
3022 sc = octeon_alloc_soft_command(oct, 0, 16, 0);
3023 if (!sc)
3024 return -ENOMEM;
3025
3026 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
3027
3028 /* vfidx is 0 based, but vf_num (param1) is 1 based */
3029 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
3030 OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
3031 trusted);
3032
3033 init_completion(&sc->complete);
3034 sc->sc_status = OCTEON_REQUEST_PENDING;
3035
3036 retval = octeon_send_soft_command(oct, sc);
3037 if (retval == IQ_SEND_FAILED) {
3038 octeon_free_soft_command(oct, sc);
3039 retval = -1;
3040 } else {
3041 /* Wait for response or timeout */
3042 retval = wait_for_sc_completion_timeout(oct, sc, 0);
3043 if (retval)
3044 return (retval);
3045
3046 WRITE_ONCE(sc->caller_is_done, true);
3047 }
3048
3049 return retval;
3050 }
3051
liquidio_set_vf_trust(struct net_device * netdev,int vfidx,bool setting)3052 static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx,
3053 bool setting)
3054 {
3055 struct lio *lio = GET_LIO(netdev);
3056 struct octeon_device *oct = lio->oct_dev;
3057
3058 if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) {
3059 /* trusted vf is not supported by firmware older than 1.7.1 */
3060 return -EOPNOTSUPP;
3061 }
3062
3063 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
3064 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
3065 return -EINVAL;
3066 }
3067
3068 if (setting) {
3069 /* Set */
3070
3071 if (oct->sriov_info.trusted_vf.active &&
3072 oct->sriov_info.trusted_vf.id == vfidx)
3073 return 0;
3074
3075 if (oct->sriov_info.trusted_vf.active) {
3076 netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n");
3077 return -EPERM;
3078 }
3079 } else {
3080 /* Clear */
3081
3082 if (!oct->sriov_info.trusted_vf.active)
3083 return 0;
3084 }
3085
3086 if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) {
3087 if (setting) {
3088 oct->sriov_info.trusted_vf.id = vfidx;
3089 oct->sriov_info.trusted_vf.active = true;
3090 } else {
3091 oct->sriov_info.trusted_vf.active = false;
3092 }
3093
3094 netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx,
3095 setting ? "" : "not ");
3096 } else {
3097 netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n");
3098 return -1;
3099 }
3100
3101 return 0;
3102 }
3103
liquidio_set_vf_link_state(struct net_device * netdev,int vfidx,int linkstate)3104 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3105 int linkstate)
3106 {
3107 struct lio *lio = GET_LIO(netdev);
3108 struct octeon_device *oct = lio->oct_dev;
3109 struct octnic_ctrl_pkt nctrl;
3110 int ret = 0;
3111
3112 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3113 return -EINVAL;
3114
3115 if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3116 return 0;
3117
3118 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3119 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3120 nctrl.ncmd.s.param1 =
3121 vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3122 nctrl.ncmd.s.param2 = linkstate;
3123 nctrl.ncmd.s.more = 0;
3124 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3125 nctrl.cb_fn = NULL;
3126
3127 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
3128
3129 if (!ret)
3130 oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3131 else if (ret > 0)
3132 ret = -EIO;
3133
3134 return ret;
3135 }
3136
3137 static int
liquidio_eswitch_mode_get(struct devlink * devlink,u16 * mode)3138 liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3139 {
3140 struct lio_devlink_priv *priv;
3141 struct octeon_device *oct;
3142
3143 priv = devlink_priv(devlink);
3144 oct = priv->oct;
3145
3146 *mode = oct->eswitch_mode;
3147
3148 return 0;
3149 }
3150
3151 static int
liquidio_eswitch_mode_set(struct devlink * devlink,u16 mode,struct netlink_ext_ack * extack)3152 liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode,
3153 struct netlink_ext_ack *extack)
3154 {
3155 struct lio_devlink_priv *priv;
3156 struct octeon_device *oct;
3157 int ret = 0;
3158
3159 priv = devlink_priv(devlink);
3160 oct = priv->oct;
3161
3162 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
3163 return -EINVAL;
3164
3165 if (oct->eswitch_mode == mode)
3166 return 0;
3167
3168 switch (mode) {
3169 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3170 oct->eswitch_mode = mode;
3171 ret = lio_vf_rep_create(oct);
3172 break;
3173
3174 case DEVLINK_ESWITCH_MODE_LEGACY:
3175 lio_vf_rep_destroy(oct);
3176 oct->eswitch_mode = mode;
3177 break;
3178
3179 default:
3180 ret = -EINVAL;
3181 }
3182
3183 return ret;
3184 }
3185
3186 static const struct devlink_ops liquidio_devlink_ops = {
3187 .eswitch_mode_get = liquidio_eswitch_mode_get,
3188 .eswitch_mode_set = liquidio_eswitch_mode_set,
3189 };
3190
3191 static int
liquidio_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)3192 liquidio_get_port_parent_id(struct net_device *dev,
3193 struct netdev_phys_item_id *ppid)
3194 {
3195 struct lio *lio = GET_LIO(dev);
3196 struct octeon_device *oct = lio->oct_dev;
3197
3198 if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
3199 return -EOPNOTSUPP;
3200
3201 ppid->id_len = ETH_ALEN;
3202 ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
3203
3204 return 0;
3205 }
3206
liquidio_get_vf_stats(struct net_device * netdev,int vfidx,struct ifla_vf_stats * vf_stats)3207 static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
3208 struct ifla_vf_stats *vf_stats)
3209 {
3210 struct lio *lio = GET_LIO(netdev);
3211 struct octeon_device *oct = lio->oct_dev;
3212 struct oct_vf_stats stats;
3213 int ret;
3214
3215 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3216 return -EINVAL;
3217
3218 memset(&stats, 0, sizeof(struct oct_vf_stats));
3219 ret = cn23xx_get_vf_stats(oct, vfidx, &stats);
3220 if (!ret) {
3221 vf_stats->rx_packets = stats.rx_packets;
3222 vf_stats->tx_packets = stats.tx_packets;
3223 vf_stats->rx_bytes = stats.rx_bytes;
3224 vf_stats->tx_bytes = stats.tx_bytes;
3225 vf_stats->broadcast = stats.broadcast;
3226 vf_stats->multicast = stats.multicast;
3227 }
3228
3229 return ret;
3230 }
3231
3232 static const struct net_device_ops lionetdevops = {
3233 .ndo_open = liquidio_open,
3234 .ndo_stop = liquidio_stop,
3235 .ndo_start_xmit = liquidio_xmit,
3236 .ndo_get_stats64 = liquidio_get_stats64,
3237 .ndo_set_mac_address = liquidio_set_mac,
3238 .ndo_set_rx_mode = liquidio_set_mcast_list,
3239 .ndo_tx_timeout = liquidio_tx_timeout,
3240
3241 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
3242 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
3243 .ndo_change_mtu = liquidio_change_mtu,
3244 .ndo_eth_ioctl = liquidio_ioctl,
3245 .ndo_fix_features = liquidio_fix_features,
3246 .ndo_set_features = liquidio_set_features,
3247 .ndo_set_vf_mac = liquidio_set_vf_mac,
3248 .ndo_set_vf_vlan = liquidio_set_vf_vlan,
3249 .ndo_get_vf_config = liquidio_get_vf_config,
3250 .ndo_set_vf_spoofchk = liquidio_set_vf_spoofchk,
3251 .ndo_set_vf_trust = liquidio_set_vf_trust,
3252 .ndo_set_vf_link_state = liquidio_set_vf_link_state,
3253 .ndo_get_vf_stats = liquidio_get_vf_stats,
3254 .ndo_get_port_parent_id = liquidio_get_port_parent_id,
3255 };
3256
3257 /**
3258 * liquidio_init - Entry point for the liquidio module
3259 */
liquidio_init(void)3260 static int __init liquidio_init(void)
3261 {
3262 int i;
3263 struct handshake *hs;
3264
3265 init_completion(&first_stage);
3266
3267 octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
3268
3269 if (liquidio_init_pci())
3270 return -EINVAL;
3271
3272 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3273
3274 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3275 hs = &handshake[i];
3276 if (hs->pci_dev) {
3277 wait_for_completion(&hs->init);
3278 if (!hs->init_ok) {
3279 /* init handshake failed */
3280 dev_err(&hs->pci_dev->dev,
3281 "Failed to init device\n");
3282 liquidio_deinit_pci();
3283 return -EIO;
3284 }
3285 }
3286 }
3287
3288 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3289 hs = &handshake[i];
3290 if (hs->pci_dev) {
3291 wait_for_completion_timeout(&hs->started,
3292 msecs_to_jiffies(30000));
3293 if (!hs->started_ok) {
3294 /* starter handshake failed */
3295 dev_err(&hs->pci_dev->dev,
3296 "Firmware failed to start\n");
3297 liquidio_deinit_pci();
3298 return -EIO;
3299 }
3300 }
3301 }
3302
3303 return 0;
3304 }
3305
lio_nic_info(struct octeon_recv_info * recv_info,void * buf)3306 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3307 {
3308 struct octeon_device *oct = (struct octeon_device *)buf;
3309 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3310 int gmxport = 0;
3311 union oct_link_status *ls;
3312 int i;
3313
3314 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
3315 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3316 recv_pkt->buffer_size[0],
3317 recv_pkt->rh.r_nic_info.gmxport);
3318 goto nic_info_err;
3319 }
3320
3321 gmxport = recv_pkt->rh.r_nic_info.gmxport;
3322 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3323 OCT_DROQ_INFO_SIZE);
3324
3325 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3326 for (i = 0; i < oct->ifcount; i++) {
3327 if (oct->props[i].gmxport == gmxport) {
3328 update_link_status(oct->props[i].netdev, ls);
3329 break;
3330 }
3331 }
3332
3333 nic_info_err:
3334 for (i = 0; i < recv_pkt->buffer_count; i++)
3335 recv_buffer_free(recv_pkt->buffer_ptr[i]);
3336 octeon_free_recv_info(recv_info);
3337 return 0;
3338 }
3339
3340 /**
3341 * setup_nic_devices - Setup network interfaces
3342 * @octeon_dev: octeon device
3343 *
3344 * Called during init time for each device. It assumes the NIC
3345 * is already up and running. The link information for each
3346 * interface is passed in link_info.
3347 */
setup_nic_devices(struct octeon_device * octeon_dev)3348 static int setup_nic_devices(struct octeon_device *octeon_dev)
3349 {
3350 struct lio *lio = NULL;
3351 struct net_device *netdev;
3352 u8 mac[6], i, j, *fw_ver, *micro_ver;
3353 unsigned long micro;
3354 u32 cur_ver;
3355 struct octeon_soft_command *sc;
3356 struct liquidio_if_cfg_resp *resp;
3357 struct octdev_props *props;
3358 int retval, num_iqueues, num_oqueues;
3359 int max_num_queues = 0;
3360 union oct_nic_if_cfg if_cfg;
3361 unsigned int base_queue;
3362 unsigned int gmx_port_id;
3363 u32 resp_size, data_size;
3364 u32 ifidx_or_pfnum;
3365 struct lio_version *vdata;
3366 struct devlink *devlink;
3367 struct lio_devlink_priv *lio_devlink;
3368
3369 /* This is to handle link status changes */
3370 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3371 OPCODE_NIC_INFO,
3372 lio_nic_info, octeon_dev);
3373
3374 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3375 * They are handled directly.
3376 */
3377 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3378 free_netbuf);
3379
3380 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3381 free_netsgbuf);
3382
3383 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3384 free_netsgbuf_with_resp);
3385
3386 for (i = 0; i < octeon_dev->ifcount; i++) {
3387 resp_size = sizeof(struct liquidio_if_cfg_resp);
3388 data_size = sizeof(struct lio_version);
3389 sc = (struct octeon_soft_command *)
3390 octeon_alloc_soft_command(octeon_dev, data_size,
3391 resp_size, 0);
3392 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3393 vdata = (struct lio_version *)sc->virtdptr;
3394
3395 *((u64 *)vdata) = 0;
3396 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3397 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3398 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3399
3400 if (OCTEON_CN23XX_PF(octeon_dev)) {
3401 num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3402 num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3403 base_queue = octeon_dev->sriov_info.pf_srn;
3404
3405 gmx_port_id = octeon_dev->pf_num;
3406 ifidx_or_pfnum = octeon_dev->pf_num;
3407 } else {
3408 num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3409 octeon_get_conf(octeon_dev), i);
3410 num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3411 octeon_get_conf(octeon_dev), i);
3412 base_queue = CFG_GET_BASE_QUE_NIC_IF(
3413 octeon_get_conf(octeon_dev), i);
3414 gmx_port_id = CFG_GET_GMXID_NIC_IF(
3415 octeon_get_conf(octeon_dev), i);
3416 ifidx_or_pfnum = i;
3417 }
3418
3419 dev_dbg(&octeon_dev->pci_dev->dev,
3420 "requesting config for interface %d, iqs %d, oqs %d\n",
3421 ifidx_or_pfnum, num_iqueues, num_oqueues);
3422
3423 if_cfg.u64 = 0;
3424 if_cfg.s.num_iqueues = num_iqueues;
3425 if_cfg.s.num_oqueues = num_oqueues;
3426 if_cfg.s.base_queue = base_queue;
3427 if_cfg.s.gmx_port_id = gmx_port_id;
3428
3429 sc->iq_no = 0;
3430
3431 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3432 OPCODE_NIC_IF_CFG, 0,
3433 if_cfg.u64, 0);
3434
3435 init_completion(&sc->complete);
3436 sc->sc_status = OCTEON_REQUEST_PENDING;
3437
3438 retval = octeon_send_soft_command(octeon_dev, sc);
3439 if (retval == IQ_SEND_FAILED) {
3440 dev_err(&octeon_dev->pci_dev->dev,
3441 "iq/oq config failed status: %x\n",
3442 retval);
3443 /* Soft instr is freed by driver in case of failure. */
3444 octeon_free_soft_command(octeon_dev, sc);
3445 return(-EIO);
3446 }
3447
3448 /* Sleep on a wait queue till the cond flag indicates that the
3449 * response arrived or timed-out.
3450 */
3451 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
3452 if (retval)
3453 return retval;
3454
3455 retval = resp->status;
3456 if (retval) {
3457 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3458 WRITE_ONCE(sc->caller_is_done, true);
3459 goto setup_nic_dev_done;
3460 }
3461 snprintf(octeon_dev->fw_info.liquidio_firmware_version,
3462 32, "%s",
3463 resp->cfg_info.liquidio_firmware_version);
3464
3465 /* Verify f/w version (in case of 'auto' loading from flash) */
3466 fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
3467 if (memcmp(LIQUIDIO_BASE_VERSION,
3468 fw_ver,
3469 strlen(LIQUIDIO_BASE_VERSION))) {
3470 dev_err(&octeon_dev->pci_dev->dev,
3471 "Unmatched firmware version. Expected %s.x, got %s.\n",
3472 LIQUIDIO_BASE_VERSION, fw_ver);
3473 WRITE_ONCE(sc->caller_is_done, true);
3474 goto setup_nic_dev_done;
3475 } else if (atomic_read(octeon_dev->adapter_fw_state) ==
3476 FW_IS_PRELOADED) {
3477 dev_info(&octeon_dev->pci_dev->dev,
3478 "Using auto-loaded firmware version %s.\n",
3479 fw_ver);
3480 }
3481
3482 /* extract micro version field; point past '<maj>.<min>.' */
3483 micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1;
3484 if (kstrtoul(micro_ver, 10, µ) != 0)
3485 micro = 0;
3486 octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION;
3487 octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION;
3488 octeon_dev->fw_info.ver.rev = micro;
3489
3490 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3491 (sizeof(struct liquidio_if_cfg_info)) >> 3);
3492
3493 num_iqueues = hweight64(resp->cfg_info.iqmask);
3494 num_oqueues = hweight64(resp->cfg_info.oqmask);
3495
3496 if (!(num_iqueues) || !(num_oqueues)) {
3497 dev_err(&octeon_dev->pci_dev->dev,
3498 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3499 resp->cfg_info.iqmask,
3500 resp->cfg_info.oqmask);
3501 WRITE_ONCE(sc->caller_is_done, true);
3502 goto setup_nic_dev_done;
3503 }
3504
3505 if (OCTEON_CN6XXX(octeon_dev)) {
3506 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3507 cn6xxx));
3508 } else if (OCTEON_CN23XX_PF(octeon_dev)) {
3509 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3510 cn23xx_pf));
3511 }
3512
3513 dev_dbg(&octeon_dev->pci_dev->dev,
3514 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
3515 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3516 num_iqueues, num_oqueues, max_num_queues);
3517 netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
3518
3519 if (!netdev) {
3520 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3521 WRITE_ONCE(sc->caller_is_done, true);
3522 goto setup_nic_dev_done;
3523 }
3524
3525 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3526
3527 /* Associate the routines that will handle different
3528 * netdev tasks.
3529 */
3530 netdev->netdev_ops = &lionetdevops;
3531
3532 retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
3533 if (retval) {
3534 dev_err(&octeon_dev->pci_dev->dev,
3535 "setting real number rx failed\n");
3536 WRITE_ONCE(sc->caller_is_done, true);
3537 goto setup_nic_dev_free;
3538 }
3539
3540 retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
3541 if (retval) {
3542 dev_err(&octeon_dev->pci_dev->dev,
3543 "setting real number tx failed\n");
3544 WRITE_ONCE(sc->caller_is_done, true);
3545 goto setup_nic_dev_free;
3546 }
3547
3548 lio = GET_LIO(netdev);
3549
3550 memset(lio, 0, sizeof(struct lio));
3551
3552 lio->ifidx = ifidx_or_pfnum;
3553
3554 props = &octeon_dev->props[i];
3555 props->gmxport = resp->cfg_info.linfo.gmxport;
3556 props->netdev = netdev;
3557
3558 lio->linfo.num_rxpciq = num_oqueues;
3559 lio->linfo.num_txpciq = num_iqueues;
3560 for (j = 0; j < num_oqueues; j++) {
3561 lio->linfo.rxpciq[j].u64 =
3562 resp->cfg_info.linfo.rxpciq[j].u64;
3563 }
3564 for (j = 0; j < num_iqueues; j++) {
3565 lio->linfo.txpciq[j].u64 =
3566 resp->cfg_info.linfo.txpciq[j].u64;
3567 }
3568 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3569 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3570 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3571
3572 WRITE_ONCE(sc->caller_is_done, true);
3573
3574 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3575
3576 if (OCTEON_CN23XX_PF(octeon_dev) ||
3577 OCTEON_CN6XXX(octeon_dev)) {
3578 lio->dev_capability = NETIF_F_HIGHDMA
3579 | NETIF_F_IP_CSUM
3580 | NETIF_F_IPV6_CSUM
3581 | NETIF_F_SG | NETIF_F_RXCSUM
3582 | NETIF_F_GRO
3583 | NETIF_F_TSO | NETIF_F_TSO6
3584 | NETIF_F_LRO;
3585 }
3586 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3587
3588 /* Copy of transmit encapsulation capabilities:
3589 * TSO, TSO6, Checksums for this device
3590 */
3591 lio->enc_dev_capability = NETIF_F_IP_CSUM
3592 | NETIF_F_IPV6_CSUM
3593 | NETIF_F_GSO_UDP_TUNNEL
3594 | NETIF_F_HW_CSUM | NETIF_F_SG
3595 | NETIF_F_RXCSUM
3596 | NETIF_F_TSO | NETIF_F_TSO6
3597 | NETIF_F_LRO;
3598
3599 netdev->hw_enc_features = (lio->enc_dev_capability &
3600 ~NETIF_F_LRO);
3601
3602 netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels;
3603
3604 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3605
3606 netdev->vlan_features = lio->dev_capability;
3607 /* Add any unchangeable hw features */
3608 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
3609 NETIF_F_HW_VLAN_CTAG_RX |
3610 NETIF_F_HW_VLAN_CTAG_TX;
3611
3612 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3613
3614 netdev->hw_features = lio->dev_capability;
3615 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3616 netdev->hw_features = netdev->hw_features &
3617 ~NETIF_F_HW_VLAN_CTAG_RX;
3618
3619 /* MTU range: 68 - 16000 */
3620 netdev->min_mtu = LIO_MIN_MTU_SIZE;
3621 netdev->max_mtu = LIO_MAX_MTU_SIZE;
3622
3623 /* Point to the properties for octeon device to which this
3624 * interface belongs.
3625 */
3626 lio->oct_dev = octeon_dev;
3627 lio->octprops = props;
3628 lio->netdev = netdev;
3629
3630 dev_dbg(&octeon_dev->pci_dev->dev,
3631 "if%d gmx: %d hw_addr: 0x%llx\n", i,
3632 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3633
3634 for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3635 u8 vfmac[ETH_ALEN];
3636
3637 eth_random_addr(vfmac);
3638 if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) {
3639 dev_err(&octeon_dev->pci_dev->dev,
3640 "Error setting VF%d MAC address\n",
3641 j);
3642 goto setup_nic_dev_free;
3643 }
3644 }
3645
3646 /* 64-bit swap required on LE machines */
3647 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3648 for (j = 0; j < 6; j++)
3649 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3650
3651 /* Copy MAC Address to OS network device structure */
3652
3653 eth_hw_addr_set(netdev, mac);
3654
3655 /* By default all interfaces on a single Octeon uses the same
3656 * tx and rx queues
3657 */
3658 lio->txq = lio->linfo.txpciq[0].s.q_no;
3659 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3660 if (liquidio_setup_io_queues(octeon_dev, i,
3661 lio->linfo.num_txpciq,
3662 lio->linfo.num_rxpciq)) {
3663 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3664 goto setup_nic_dev_free;
3665 }
3666
3667 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3668
3669 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3670 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3671
3672 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
3673 dev_err(&octeon_dev->pci_dev->dev,
3674 "Gather list allocation failed\n");
3675 goto setup_nic_dev_free;
3676 }
3677
3678 /* Register ethtool support */
3679 liquidio_set_ethtool_ops(netdev);
3680 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3681 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3682 else
3683 octeon_dev->priv_flags = 0x0;
3684
3685 if (netdev->features & NETIF_F_LRO)
3686 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3687 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3688
3689 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3690 OCTNET_CMD_VLAN_FILTER_ENABLE);
3691
3692 if ((debug != -1) && (debug & NETIF_MSG_HW))
3693 liquidio_set_feature(netdev,
3694 OCTNET_CMD_VERBOSE_ENABLE, 0);
3695
3696 if (setup_link_status_change_wq(netdev))
3697 goto setup_nic_dev_free;
3698
3699 if ((octeon_dev->fw_info.app_cap_flags &
3700 LIQUIDIO_TIME_SYNC_CAP) &&
3701 setup_sync_octeon_time_wq(netdev))
3702 goto setup_nic_dev_free;
3703
3704 if (setup_rx_oom_poll_fn(netdev))
3705 goto setup_nic_dev_free;
3706
3707 /* Register the network device with the OS */
3708 if (register_netdev(netdev)) {
3709 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3710 goto setup_nic_dev_free;
3711 }
3712
3713 dev_dbg(&octeon_dev->pci_dev->dev,
3714 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3715 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3716 netif_carrier_off(netdev);
3717 lio->link_changes++;
3718
3719 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3720
3721 /* Sending command to firmware to enable Rx checksum offload
3722 * by default at the time of setup of Liquidio driver for
3723 * this device
3724 */
3725 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3726 OCTNET_CMD_RXCSUM_ENABLE);
3727 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3728 OCTNET_CMD_TXCSUM_ENABLE);
3729
3730 dev_dbg(&octeon_dev->pci_dev->dev,
3731 "NIC ifidx:%d Setup successful\n", i);
3732
3733 if (octeon_dev->subsystem_id ==
3734 OCTEON_CN2350_25GB_SUBSYS_ID ||
3735 octeon_dev->subsystem_id ==
3736 OCTEON_CN2360_25GB_SUBSYS_ID) {
3737 cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj,
3738 octeon_dev->fw_info.ver.min,
3739 octeon_dev->fw_info.ver.rev);
3740
3741 /* speed control unsupported in f/w older than 1.7.2 */
3742 if (cur_ver < OCT_FW_VER(1, 7, 2)) {
3743 dev_info(&octeon_dev->pci_dev->dev,
3744 "speed setting not supported by f/w.");
3745 octeon_dev->speed_setting = 25;
3746 octeon_dev->no_speed_setting = 1;
3747 } else {
3748 liquidio_get_speed(lio);
3749 }
3750
3751 if (octeon_dev->speed_setting == 0) {
3752 octeon_dev->speed_setting = 25;
3753 octeon_dev->no_speed_setting = 1;
3754 }
3755 } else {
3756 octeon_dev->no_speed_setting = 1;
3757 octeon_dev->speed_setting = 10;
3758 }
3759 octeon_dev->speed_boot = octeon_dev->speed_setting;
3760
3761 /* don't read FEC setting if unsupported by f/w (see above) */
3762 if (octeon_dev->speed_boot == 25 &&
3763 !octeon_dev->no_speed_setting) {
3764 liquidio_get_fec(lio);
3765 octeon_dev->props[lio->ifidx].fec_boot =
3766 octeon_dev->props[lio->ifidx].fec;
3767 }
3768 }
3769
3770 devlink = devlink_alloc(&liquidio_devlink_ops,
3771 sizeof(struct lio_devlink_priv),
3772 &octeon_dev->pci_dev->dev);
3773 if (!devlink) {
3774 dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
3775 goto setup_nic_dev_free;
3776 }
3777
3778 lio_devlink = devlink_priv(devlink);
3779 lio_devlink->oct = octeon_dev;
3780
3781 if (devlink_register(devlink)) {
3782 devlink_free(devlink);
3783 dev_err(&octeon_dev->pci_dev->dev,
3784 "devlink registration failed\n");
3785 goto setup_nic_dev_free;
3786 }
3787
3788 octeon_dev->devlink = devlink;
3789 octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
3790
3791 return 0;
3792
3793 setup_nic_dev_free:
3794
3795 while (i--) {
3796 dev_err(&octeon_dev->pci_dev->dev,
3797 "NIC ifidx:%d Setup failed\n", i);
3798 liquidio_destroy_nic_device(octeon_dev, i);
3799 }
3800
3801 setup_nic_dev_done:
3802
3803 return -ENODEV;
3804 }
3805
3806 #ifdef CONFIG_PCI_IOV
octeon_enable_sriov(struct octeon_device * oct)3807 static int octeon_enable_sriov(struct octeon_device *oct)
3808 {
3809 unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3810 struct pci_dev *vfdev;
3811 int err;
3812 u32 u;
3813
3814 if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3815 err = pci_enable_sriov(oct->pci_dev,
3816 oct->sriov_info.num_vfs_alloced);
3817 if (err) {
3818 dev_err(&oct->pci_dev->dev,
3819 "OCTEON: Failed to enable PCI sriov: %d\n",
3820 err);
3821 oct->sriov_info.num_vfs_alloced = 0;
3822 return err;
3823 }
3824 oct->sriov_info.sriov_enabled = 1;
3825
3826 /* init lookup table that maps DPI ring number to VF pci_dev
3827 * struct pointer
3828 */
3829 u = 0;
3830 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3831 OCTEON_CN23XX_VF_VID, NULL);
3832 while (vfdev) {
3833 if (vfdev->is_virtfn &&
3834 (vfdev->physfn == oct->pci_dev)) {
3835 oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3836 vfdev;
3837 u += oct->sriov_info.rings_per_vf;
3838 }
3839 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3840 OCTEON_CN23XX_VF_VID, vfdev);
3841 }
3842 }
3843
3844 return num_vfs_alloced;
3845 }
3846
lio_pci_sriov_disable(struct octeon_device * oct)3847 static int lio_pci_sriov_disable(struct octeon_device *oct)
3848 {
3849 int u;
3850
3851 if (pci_vfs_assigned(oct->pci_dev)) {
3852 dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3853 return -EPERM;
3854 }
3855
3856 pci_disable_sriov(oct->pci_dev);
3857
3858 u = 0;
3859 while (u < MAX_POSSIBLE_VFS) {
3860 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3861 u += oct->sriov_info.rings_per_vf;
3862 }
3863
3864 oct->sriov_info.num_vfs_alloced = 0;
3865 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3866 oct->pf_num);
3867
3868 return 0;
3869 }
3870
liquidio_enable_sriov(struct pci_dev * dev,int num_vfs)3871 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3872 {
3873 struct octeon_device *oct = pci_get_drvdata(dev);
3874 int ret = 0;
3875
3876 if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3877 (oct->sriov_info.sriov_enabled)) {
3878 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3879 oct->pf_num, num_vfs);
3880 return 0;
3881 }
3882
3883 if (!num_vfs) {
3884 lio_vf_rep_destroy(oct);
3885 ret = lio_pci_sriov_disable(oct);
3886 } else if (num_vfs > oct->sriov_info.max_vfs) {
3887 dev_err(&oct->pci_dev->dev,
3888 "OCTEON: Max allowed VFs:%d user requested:%d",
3889 oct->sriov_info.max_vfs, num_vfs);
3890 ret = -EPERM;
3891 } else {
3892 oct->sriov_info.num_vfs_alloced = num_vfs;
3893 ret = octeon_enable_sriov(oct);
3894 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3895 oct->pf_num, num_vfs);
3896 ret = lio_vf_rep_create(oct);
3897 if (ret)
3898 dev_info(&oct->pci_dev->dev,
3899 "vf representor create failed");
3900 }
3901
3902 return ret;
3903 }
3904 #endif
3905
3906 /**
3907 * liquidio_init_nic_module - initialize the NIC
3908 * @oct: octeon device
3909 *
3910 * This initialization routine is called once the Octeon device application is
3911 * up and running
3912 */
liquidio_init_nic_module(struct octeon_device * oct)3913 static int liquidio_init_nic_module(struct octeon_device *oct)
3914 {
3915 int i, retval = 0;
3916 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3917
3918 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3919
3920 /* only default iq and oq were initialized
3921 * initialize the rest as well
3922 */
3923 /* run port_config command for each port */
3924 oct->ifcount = num_nic_ports;
3925
3926 memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
3927
3928 for (i = 0; i < MAX_OCTEON_LINKS; i++)
3929 oct->props[i].gmxport = -1;
3930
3931 retval = setup_nic_devices(oct);
3932 if (retval) {
3933 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3934 goto octnet_init_failure;
3935 }
3936
3937 /* Call vf_rep_modinit if the firmware is switchdev capable
3938 * and do it from the first liquidio function probed.
3939 */
3940 if (!oct->octeon_id &&
3941 oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
3942 retval = lio_vf_rep_modinit();
3943 if (retval) {
3944 liquidio_stop_nic_module(oct);
3945 goto octnet_init_failure;
3946 }
3947 }
3948
3949 liquidio_ptp_init(oct);
3950
3951 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3952
3953 return retval;
3954
3955 octnet_init_failure:
3956
3957 oct->ifcount = 0;
3958
3959 return retval;
3960 }
3961
3962 /**
3963 * nic_starter - finish init
3964 * @work: work struct work_struct
3965 *
3966 * starter callback that invokes the remaining initialization work after the NIC is up and running.
3967 */
nic_starter(struct work_struct * work)3968 static void nic_starter(struct work_struct *work)
3969 {
3970 struct octeon_device *oct;
3971 struct cavium_wk *wk = (struct cavium_wk *)work;
3972
3973 oct = (struct octeon_device *)wk->ctxptr;
3974
3975 if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3976 return;
3977
3978 /* If the status of the device is CORE_OK, the core
3979 * application has reported its application type. Call
3980 * any registered handlers now and move to the RUNNING
3981 * state.
3982 */
3983 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3984 schedule_delayed_work(&oct->nic_poll_work.work,
3985 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3986 return;
3987 }
3988
3989 atomic_set(&oct->status, OCT_DEV_RUNNING);
3990
3991 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3992 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3993
3994 if (liquidio_init_nic_module(oct))
3995 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3996 else
3997 handshake[oct->octeon_id].started_ok = 1;
3998 } else {
3999 dev_err(&oct->pci_dev->dev,
4000 "Unexpected application running on NIC (%d). Check firmware.\n",
4001 oct->app_mode);
4002 }
4003
4004 complete(&handshake[oct->octeon_id].started);
4005 }
4006
4007 static int
octeon_recv_vf_drv_notice(struct octeon_recv_info * recv_info,void * buf)4008 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
4009 {
4010 struct octeon_device *oct = (struct octeon_device *)buf;
4011 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
4012 int i, notice, vf_idx;
4013 bool cores_crashed;
4014 u64 *data, vf_num;
4015
4016 notice = recv_pkt->rh.r.ossp;
4017 data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
4018
4019 /* the first 64-bit word of data is the vf_num */
4020 vf_num = data[0];
4021 octeon_swap_8B_data(&vf_num, 1);
4022 vf_idx = (int)vf_num - 1;
4023
4024 cores_crashed = READ_ONCE(oct->cores_crashed);
4025
4026 if (notice == VF_DRV_LOADED) {
4027 if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
4028 oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
4029 dev_info(&oct->pci_dev->dev,
4030 "driver for VF%d was loaded\n", vf_idx);
4031 if (!cores_crashed)
4032 try_module_get(THIS_MODULE);
4033 }
4034 } else if (notice == VF_DRV_REMOVED) {
4035 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
4036 oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
4037 dev_info(&oct->pci_dev->dev,
4038 "driver for VF%d was removed\n", vf_idx);
4039 if (!cores_crashed)
4040 module_put(THIS_MODULE);
4041 }
4042 } else if (notice == VF_DRV_MACADDR_CHANGED) {
4043 u8 *b = (u8 *)&data[1];
4044
4045 oct->sriov_info.vf_macaddr[vf_idx] = data[1];
4046 dev_info(&oct->pci_dev->dev,
4047 "VF driver changed VF%d's MAC address to %pM\n",
4048 vf_idx, b + 2);
4049 }
4050
4051 for (i = 0; i < recv_pkt->buffer_count; i++)
4052 recv_buffer_free(recv_pkt->buffer_ptr[i]);
4053 octeon_free_recv_info(recv_info);
4054
4055 return 0;
4056 }
4057
4058 /**
4059 * octeon_device_init - Device initialization for each Octeon device that is probed
4060 * @octeon_dev: octeon device
4061 */
octeon_device_init(struct octeon_device * octeon_dev)4062 static int octeon_device_init(struct octeon_device *octeon_dev)
4063 {
4064 int j, ret;
4065 char bootcmd[] = "\n";
4066 char *dbg_enb = NULL;
4067 enum lio_fw_state fw_state;
4068 struct octeon_device_priv *oct_priv =
4069 (struct octeon_device_priv *)octeon_dev->priv;
4070 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
4071
4072 /* Enable access to the octeon device and make its DMA capability
4073 * known to the OS.
4074 */
4075 if (octeon_pci_os_setup(octeon_dev))
4076 return 1;
4077
4078 atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
4079
4080 /* Identify the Octeon type and map the BAR address space. */
4081 if (octeon_chip_specific_setup(octeon_dev)) {
4082 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4083 return 1;
4084 }
4085
4086 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4087
4088 /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4089 * since that is what is required for the reference to be removed
4090 * during de-initialization (see 'octeon_destroy_resources').
4091 */
4092 octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
4093 PCI_SLOT(octeon_dev->pci_dev->devfn),
4094 PCI_FUNC(octeon_dev->pci_dev->devfn),
4095 true);
4096
4097 octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4098
4099 /* CN23XX supports preloaded firmware if the following is true:
4100 *
4101 * The adapter indicates that firmware is currently running AND
4102 * 'fw_type' is 'auto'.
4103 *
4104 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4105 */
4106 if (OCTEON_CN23XX_PF(octeon_dev) &&
4107 cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
4108 atomic_cmpxchg(octeon_dev->adapter_fw_state,
4109 FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
4110 }
4111
4112 /* If loading firmware, only first device of adapter needs to do so. */
4113 fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
4114 FW_NEEDS_TO_BE_LOADED,
4115 FW_IS_BEING_LOADED);
4116
4117 /* Here, [local variable] 'fw_state' is set to one of:
4118 *
4119 * FW_IS_PRELOADED: No firmware is to be loaded (see above)
4120 * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4121 * firmware to the adapter.
4122 * FW_IS_BEING_LOADED: The driver's second instance will not load
4123 * firmware to the adapter.
4124 */
4125
4126 /* Prior to f/w load, perform a soft reset of the Octeon device;
4127 * if error resetting, return w/error.
4128 */
4129 if (fw_state == FW_NEEDS_TO_BE_LOADED)
4130 if (octeon_dev->fn_list.soft_reset(octeon_dev))
4131 return 1;
4132
4133 /* Initialize the dispatch mechanism used to push packets arriving on
4134 * Octeon Output queues.
4135 */
4136 if (octeon_init_dispatch_list(octeon_dev))
4137 return 1;
4138
4139 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4140 OPCODE_NIC_CORE_DRV_ACTIVE,
4141 octeon_core_drv_init,
4142 octeon_dev);
4143
4144 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4145 OPCODE_NIC_VF_DRV_NOTICE,
4146 octeon_recv_vf_drv_notice, octeon_dev);
4147 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4148 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4149 schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4150 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4151
4152 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4153
4154 if (octeon_set_io_queues_off(octeon_dev)) {
4155 dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
4156 return 1;
4157 }
4158
4159 if (OCTEON_CN23XX_PF(octeon_dev)) {
4160 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4161 if (ret) {
4162 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4163 return ret;
4164 }
4165 }
4166
4167 /* Initialize soft command buffer pool
4168 */
4169 if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4170 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4171 return 1;
4172 }
4173 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4174
4175 /* Setup the data structures that manage this Octeon's Input queues. */
4176 if (octeon_setup_instr_queues(octeon_dev)) {
4177 dev_err(&octeon_dev->pci_dev->dev,
4178 "instruction queue initialization failed\n");
4179 return 1;
4180 }
4181 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4182
4183 /* Initialize lists to manage the requests of different types that
4184 * arrive from user & kernel applications for this octeon device.
4185 */
4186 if (octeon_setup_response_list(octeon_dev)) {
4187 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4188 return 1;
4189 }
4190 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4191
4192 if (octeon_setup_output_queues(octeon_dev)) {
4193 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
4194 return 1;
4195 }
4196
4197 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4198
4199 if (OCTEON_CN23XX_PF(octeon_dev)) {
4200 if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4201 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4202 return 1;
4203 }
4204 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4205
4206 if (octeon_allocate_ioq_vector
4207 (octeon_dev,
4208 octeon_dev->sriov_info.num_pf_rings)) {
4209 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4210 return 1;
4211 }
4212 atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
4213
4214 } else {
4215 /* The input and output queue registers were setup earlier (the
4216 * queues were not enabled). Any additional registers
4217 * that need to be programmed should be done now.
4218 */
4219 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4220 if (ret) {
4221 dev_err(&octeon_dev->pci_dev->dev,
4222 "Failed to configure device registers\n");
4223 return ret;
4224 }
4225 }
4226
4227 /* Initialize the tasklet that handles output queue packet processing.*/
4228 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4229 tasklet_setup(&oct_priv->droq_tasklet, octeon_droq_bh);
4230
4231 /* Setup the interrupt handler and record the INT SUM register address
4232 */
4233 if (octeon_setup_interrupt(octeon_dev,
4234 octeon_dev->sriov_info.num_pf_rings))
4235 return 1;
4236
4237 /* Enable Octeon device interrupts */
4238 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4239
4240 atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4241
4242 /* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4243 * the output queue is enabled.
4244 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4245 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4246 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4247 * before any credits have been issued, causing the ring to be reset
4248 * (and the f/w appear to never have started).
4249 */
4250 for (j = 0; j < octeon_dev->num_oqs; j++)
4251 writel(octeon_dev->droq[j]->max_count,
4252 octeon_dev->droq[j]->pkts_credit_reg);
4253
4254 /* Enable the input and output queues for this Octeon device */
4255 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4256 if (ret) {
4257 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4258 return ret;
4259 }
4260
4261 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4262
4263 if (fw_state == FW_NEEDS_TO_BE_LOADED) {
4264 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4265 if (!ddr_timeout) {
4266 dev_info(&octeon_dev->pci_dev->dev,
4267 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4268 }
4269
4270 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4271
4272 /* Wait for the octeon to initialize DDR after the soft-reset.*/
4273 while (!ddr_timeout) {
4274 set_current_state(TASK_INTERRUPTIBLE);
4275 if (schedule_timeout(HZ / 10)) {
4276 /* user probably pressed Control-C */
4277 return 1;
4278 }
4279 }
4280 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4281 if (ret) {
4282 dev_err(&octeon_dev->pci_dev->dev,
4283 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4284 ret);
4285 return 1;
4286 }
4287
4288 if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4289 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4290 return 1;
4291 }
4292
4293 /* Divert uboot to take commands from host instead. */
4294 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4295
4296 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4297 ret = octeon_init_consoles(octeon_dev);
4298 if (ret) {
4299 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4300 return 1;
4301 }
4302 /* If console debug enabled, specify empty string to use default
4303 * enablement ELSE specify NULL string for 'disabled'.
4304 */
4305 dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4306 ret = octeon_add_console(octeon_dev, 0, dbg_enb);
4307 if (ret) {
4308 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4309 return 1;
4310 } else if (octeon_console_debug_enabled(0)) {
4311 /* If console was added AND we're logging console output
4312 * then set our console print function.
4313 */
4314 octeon_dev->console[0].print = octeon_dbg_console_print;
4315 }
4316
4317 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4318
4319 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4320 ret = load_firmware(octeon_dev);
4321 if (ret) {
4322 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4323 return 1;
4324 }
4325
4326 atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
4327 }
4328
4329 handshake[octeon_dev->octeon_id].init_ok = 1;
4330 complete(&handshake[octeon_dev->octeon_id].init);
4331
4332 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4333 oct_priv->dev = octeon_dev;
4334
4335 return 0;
4336 }
4337
4338 /**
4339 * octeon_dbg_console_print - Debug console print function
4340 * @oct: octeon device
4341 * @console_num: console number
4342 * @prefix: first portion of line to display
4343 * @suffix: second portion of line to display
4344 *
4345 * The OCTEON debug console outputs entire lines (excluding '\n').
4346 * Normally, the line will be passed in the 'prefix' parameter.
4347 * However, due to buffering, it is possible for a line to be split into two
4348 * parts, in which case they will be passed as the 'prefix' parameter and
4349 * 'suffix' parameter.
4350 */
octeon_dbg_console_print(struct octeon_device * oct,u32 console_num,char * prefix,char * suffix)4351 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4352 char *prefix, char *suffix)
4353 {
4354 if (prefix && suffix)
4355 dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4356 suffix);
4357 else if (prefix)
4358 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4359 else if (suffix)
4360 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4361
4362 return 0;
4363 }
4364
4365 /**
4366 * liquidio_exit - Exits the module
4367 */
liquidio_exit(void)4368 static void __exit liquidio_exit(void)
4369 {
4370 liquidio_deinit_pci();
4371
4372 pr_info("LiquidIO network module is now unloaded\n");
4373 }
4374
4375 module_init(liquidio_init);
4376 module_exit(liquidio_exit);
4377