1 /**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2016 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/netdevice.h>
19 #include <linux/net_tstamp.h>
20 #include <linux/pci.h>
21 #include "liquidio_common.h"
22 #include "octeon_droq.h"
23 #include "octeon_iq.h"
24 #include "response_manager.h"
25 #include "octeon_device.h"
26 #include "octeon_nic.h"
27 #include "octeon_main.h"
28 #include "octeon_network.h"
29 #include "cn66xx_regs.h"
30 #include "cn66xx_device.h"
31 #include "cn23xx_pf_device.h"
32 #include "cn23xx_vf_device.h"
33
34 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs);
35 static int octnet_get_link_stats(struct net_device *netdev);
36
37 struct oct_intrmod_context {
38 int octeon_id;
39 wait_queue_head_t wc;
40 int cond;
41 int status;
42 };
43
44 struct oct_intrmod_resp {
45 u64 rh;
46 struct oct_intrmod_cfg intrmod;
47 u64 status;
48 };
49
50 struct oct_mdio_cmd_context {
51 int octeon_id;
52 wait_queue_head_t wc;
53 int cond;
54 };
55
56 struct oct_mdio_cmd_resp {
57 u64 rh;
58 struct oct_mdio_cmd resp;
59 u64 status;
60 };
61
62 #define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp))
63
64 /* Octeon's interface mode of operation */
65 enum {
66 INTERFACE_MODE_DISABLED,
67 INTERFACE_MODE_RGMII,
68 INTERFACE_MODE_GMII,
69 INTERFACE_MODE_SPI,
70 INTERFACE_MODE_PCIE,
71 INTERFACE_MODE_XAUI,
72 INTERFACE_MODE_SGMII,
73 INTERFACE_MODE_PICMG,
74 INTERFACE_MODE_NPI,
75 INTERFACE_MODE_LOOP,
76 INTERFACE_MODE_SRIO,
77 INTERFACE_MODE_ILK,
78 INTERFACE_MODE_RXAUI,
79 INTERFACE_MODE_QSGMII,
80 INTERFACE_MODE_AGL,
81 INTERFACE_MODE_XLAUI,
82 INTERFACE_MODE_XFI,
83 INTERFACE_MODE_10G_KR,
84 INTERFACE_MODE_40G_KR4,
85 INTERFACE_MODE_MIXED,
86 };
87
88 #define OCT_ETHTOOL_REGDUMP_LEN 4096
89 #define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11)
90 #define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF (4096 * 2)
91 #define OCT_ETHTOOL_REGSVER 1
92
93 /* statistics of PF */
94 static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
95 "rx_packets",
96 "tx_packets",
97 "rx_bytes",
98 "tx_bytes",
99 "rx_errors", /*jabber_err+l2_err+frame_err */
100 "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */
101 "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd +
102 *st->fromwire.dmac_drop + st->fromwire.fw_err_drop
103 */
104 "tx_dropped",
105
106 "tx_total_sent",
107 "tx_total_fwd",
108 "tx_err_pko",
109 "tx_err_pki",
110 "tx_err_link",
111 "tx_err_drop",
112
113 "tx_tso",
114 "tx_tso_packets",
115 "tx_tso_err",
116 "tx_vxlan",
117
118 "mac_tx_total_pkts",
119 "mac_tx_total_bytes",
120 "mac_tx_mcast_pkts",
121 "mac_tx_bcast_pkts",
122 "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */
123 "mac_tx_total_collisions",
124 "mac_tx_one_collision",
125 "mac_tx_multi_collison",
126 "mac_tx_max_collision_fail",
127 "mac_tx_max_deferal_fail",
128 "mac_tx_fifo_err",
129 "mac_tx_runts",
130
131 "rx_total_rcvd",
132 "rx_total_fwd",
133 "rx_jabber_err",
134 "rx_l2_err",
135 "rx_frame_err",
136 "rx_err_pko",
137 "rx_err_link",
138 "rx_err_drop",
139
140 "rx_vxlan",
141 "rx_vxlan_err",
142
143 "rx_lro_pkts",
144 "rx_lro_bytes",
145 "rx_total_lro",
146
147 "rx_lro_aborts",
148 "rx_lro_aborts_port",
149 "rx_lro_aborts_seq",
150 "rx_lro_aborts_tsval",
151 "rx_lro_aborts_timer",
152 "rx_fwd_rate",
153
154 "mac_rx_total_rcvd",
155 "mac_rx_bytes",
156 "mac_rx_total_bcst",
157 "mac_rx_total_mcst",
158 "mac_rx_runts",
159 "mac_rx_ctl_packets",
160 "mac_rx_fifo_err",
161 "mac_rx_dma_drop",
162 "mac_rx_fcs_err",
163
164 "link_state_changes",
165 };
166
167 /* statistics of VF */
168 static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = {
169 "rx_packets",
170 "tx_packets",
171 "rx_bytes",
172 "tx_bytes",
173 "rx_errors", /* jabber_err + l2_err+frame_err */
174 "tx_errors", /* fw_err_pko + fw_err_link+fw_err_drop */
175 "rx_dropped", /* total_rcvd - fw_total_rcvd + dmac_drop + fw_err_drop */
176 "tx_dropped",
177 "link_state_changes",
178 };
179
180 /* statistics of host tx queue */
181 static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
182 "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/
183 "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/
184 "dropped",
185 "iq_busy",
186 "sgentry_sent",
187
188 "fw_instr_posted",
189 "fw_instr_processed",
190 "fw_instr_dropped",
191 "fw_bytes_sent",
192
193 "tso",
194 "vxlan",
195 "txq_restart",
196 };
197
198 /* statistics of host rx queue */
199 static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
200 "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */
201 "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */
202 "dropped", /*oct->droq[oq_no]->stats.rx_dropped+
203 *oct->droq[oq_no]->stats.dropped_nodispatch+
204 *oct->droq[oq_no]->stats.dropped_toomany+
205 *oct->droq[oq_no]->stats.dropped_nomem
206 */
207 "dropped_nomem",
208 "dropped_toomany",
209 "fw_dropped",
210 "fw_pkts_received",
211 "fw_bytes_received",
212 "fw_dropped_nodispatch",
213
214 "vxlan",
215 "buffer_alloc_failure",
216 };
217
218 /* LiquidIO driver private flags */
219 static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = {
220 };
221
222 #define OCTNIC_NCMD_AUTONEG_ON 0x1
223 #define OCTNIC_NCMD_PHY_ON 0x2
224
lio_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * ecmd)225 static int lio_get_link_ksettings(struct net_device *netdev,
226 struct ethtool_link_ksettings *ecmd)
227 {
228 struct lio *lio = GET_LIO(netdev);
229 struct octeon_device *oct = lio->oct_dev;
230 struct oct_link_info *linfo;
231 u32 supported = 0, advertising = 0;
232
233 linfo = &lio->linfo;
234
235 if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
236 linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
237 linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
238 linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
239 ecmd->base.port = PORT_FIBRE;
240
241 if (linfo->link.s.speed == SPEED_10000) {
242 supported = SUPPORTED_10000baseT_Full;
243 advertising = ADVERTISED_10000baseT_Full;
244 }
245
246 supported |= SUPPORTED_FIBRE | SUPPORTED_Pause;
247 advertising |= ADVERTISED_Pause;
248 ethtool_convert_legacy_u32_to_link_mode(
249 ecmd->link_modes.supported, supported);
250 ethtool_convert_legacy_u32_to_link_mode(
251 ecmd->link_modes.advertising, advertising);
252 ecmd->base.autoneg = AUTONEG_DISABLE;
253
254 } else {
255 dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
256 linfo->link.s.if_mode);
257 }
258
259 if (linfo->link.s.link_up) {
260 ecmd->base.speed = linfo->link.s.speed;
261 ecmd->base.duplex = linfo->link.s.duplex;
262 } else {
263 ecmd->base.speed = SPEED_UNKNOWN;
264 ecmd->base.duplex = DUPLEX_UNKNOWN;
265 }
266
267 return 0;
268 }
269
270 static void
lio_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)271 lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
272 {
273 struct lio *lio;
274 struct octeon_device *oct;
275
276 lio = GET_LIO(netdev);
277 oct = lio->oct_dev;
278
279 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
280 strcpy(drvinfo->driver, "liquidio");
281 strcpy(drvinfo->version, LIQUIDIO_VERSION);
282 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
283 ETHTOOL_FWVERS_LEN);
284 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
285 }
286
287 static void
lio_get_vf_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)288 lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
289 {
290 struct octeon_device *oct;
291 struct lio *lio;
292
293 lio = GET_LIO(netdev);
294 oct = lio->oct_dev;
295
296 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
297 strcpy(drvinfo->driver, "liquidio_vf");
298 strcpy(drvinfo->version, LIQUIDIO_VERSION);
299 strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
300 ETHTOOL_FWVERS_LEN);
301 strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
302 }
303
304 static int
lio_send_queue_count_update(struct net_device * netdev,uint32_t num_queues)305 lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues)
306 {
307 struct lio *lio = GET_LIO(netdev);
308 struct octeon_device *oct = lio->oct_dev;
309 struct octnic_ctrl_pkt nctrl;
310 int ret = 0;
311
312 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
313
314 nctrl.ncmd.u64 = 0;
315 nctrl.ncmd.s.cmd = OCTNET_CMD_QUEUE_COUNT_CTL;
316 nctrl.ncmd.s.param1 = num_queues;
317 nctrl.ncmd.s.param2 = num_queues;
318 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
319 nctrl.wait_time = 100;
320 nctrl.netpndev = (u64)netdev;
321 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
322
323 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
324 if (ret < 0) {
325 dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n",
326 ret);
327 return -1;
328 }
329
330 return 0;
331 }
332
333 static void
lio_ethtool_get_channels(struct net_device * dev,struct ethtool_channels * channel)334 lio_ethtool_get_channels(struct net_device *dev,
335 struct ethtool_channels *channel)
336 {
337 struct lio *lio = GET_LIO(dev);
338 struct octeon_device *oct = lio->oct_dev;
339 u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
340 u32 combined_count = 0, max_combined = 0;
341
342 if (OCTEON_CN6XXX(oct)) {
343 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
344
345 max_rx = CFG_GET_OQ_MAX_Q(conf6x);
346 max_tx = CFG_GET_IQ_MAX_Q(conf6x);
347 rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
348 tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
349 } else if (OCTEON_CN23XX_PF(oct)) {
350 max_combined = lio->linfo.num_txpciq;
351 combined_count = oct->num_iqs;
352 } else if (OCTEON_CN23XX_VF(oct)) {
353 u64 reg_val = 0ULL;
354 u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
355
356 reg_val = octeon_read_csr64(oct, ctrl);
357 reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
358 max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
359 combined_count = oct->num_iqs;
360 }
361
362 channel->max_rx = max_rx;
363 channel->max_tx = max_tx;
364 channel->max_combined = max_combined;
365 channel->rx_count = rx_count;
366 channel->tx_count = tx_count;
367 channel->combined_count = combined_count;
368 }
369
370 static int
lio_irq_reallocate_irqs(struct octeon_device * oct,uint32_t num_ioqs)371 lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs)
372 {
373 struct msix_entry *msix_entries;
374 int num_msix_irqs = 0;
375 int i;
376
377 if (!oct->msix_on)
378 return 0;
379
380 /* Disable the input and output queues now. No more packets will
381 * arrive from Octeon.
382 */
383 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
384
385 if (oct->msix_on) {
386 if (OCTEON_CN23XX_PF(oct))
387 num_msix_irqs = oct->num_msix_irqs - 1;
388 else if (OCTEON_CN23XX_VF(oct))
389 num_msix_irqs = oct->num_msix_irqs;
390
391 msix_entries = (struct msix_entry *)oct->msix_entries;
392 for (i = 0; i < num_msix_irqs; i++) {
393 if (oct->ioq_vector[i].vector) {
394 /* clear the affinity_cpumask */
395 irq_set_affinity_hint(msix_entries[i].vector,
396 NULL);
397 free_irq(msix_entries[i].vector,
398 &oct->ioq_vector[i]);
399 oct->ioq_vector[i].vector = 0;
400 }
401 }
402
403 /* non-iov vector's argument is oct struct */
404 if (OCTEON_CN23XX_PF(oct))
405 free_irq(msix_entries[i].vector, oct);
406
407 pci_disable_msix(oct->pci_dev);
408 kfree(oct->msix_entries);
409 oct->msix_entries = NULL;
410 }
411
412 kfree(oct->irq_name_storage);
413 oct->irq_name_storage = NULL;
414 if (octeon_setup_interrupt(oct, num_ioqs)) {
415 dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n");
416 return 1;
417 }
418
419 /* Enable Octeon device interrupts */
420 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
421
422 return 0;
423 }
424
425 static int
lio_ethtool_set_channels(struct net_device * dev,struct ethtool_channels * channel)426 lio_ethtool_set_channels(struct net_device *dev,
427 struct ethtool_channels *channel)
428 {
429 u32 combined_count, max_combined;
430 struct lio *lio = GET_LIO(dev);
431 struct octeon_device *oct = lio->oct_dev;
432 int stopped = 0;
433
434 if (strcmp(oct->fw_info.liquidio_firmware_version, "1.6.1") < 0) {
435 dev_err(&oct->pci_dev->dev, "Minimum firmware version required is 1.6.1\n");
436 return -EINVAL;
437 }
438
439 if (!channel->combined_count || channel->other_count ||
440 channel->rx_count || channel->tx_count)
441 return -EINVAL;
442
443 combined_count = channel->combined_count;
444
445 if (OCTEON_CN23XX_PF(oct)) {
446 max_combined = channel->max_combined;
447 } else if (OCTEON_CN23XX_VF(oct)) {
448 u64 reg_val = 0ULL;
449 u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
450
451 reg_val = octeon_read_csr64(oct, ctrl);
452 reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
453 max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
454 } else {
455 return -EINVAL;
456 }
457
458 if (combined_count > max_combined || combined_count < 1)
459 return -EINVAL;
460
461 if (combined_count == oct->num_iqs)
462 return 0;
463
464 ifstate_set(lio, LIO_IFSTATE_RESETTING);
465
466 if (netif_running(dev)) {
467 dev->netdev_ops->ndo_stop(dev);
468 stopped = 1;
469 }
470
471 if (lio_reset_queues(dev, combined_count))
472 return -EINVAL;
473
474 lio_irq_reallocate_irqs(oct, combined_count);
475 if (stopped)
476 dev->netdev_ops->ndo_open(dev);
477
478 ifstate_reset(lio, LIO_IFSTATE_RESETTING);
479
480 return 0;
481 }
482
lio_get_eeprom_len(struct net_device * netdev)483 static int lio_get_eeprom_len(struct net_device *netdev)
484 {
485 u8 buf[192];
486 struct lio *lio = GET_LIO(netdev);
487 struct octeon_device *oct_dev = lio->oct_dev;
488 struct octeon_board_info *board_info;
489 int len;
490
491 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
492 len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
493 board_info->name, board_info->serial_number,
494 board_info->major, board_info->minor);
495
496 return len;
497 }
498
499 static int
lio_get_eeprom(struct net_device * netdev,struct ethtool_eeprom * eeprom,u8 * bytes)500 lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
501 u8 *bytes)
502 {
503 struct lio *lio = GET_LIO(netdev);
504 struct octeon_device *oct_dev = lio->oct_dev;
505 struct octeon_board_info *board_info;
506
507 if (eeprom->offset)
508 return -EINVAL;
509
510 eeprom->magic = oct_dev->pci_dev->vendor;
511 board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
512 sprintf((char *)bytes,
513 "boardname:%s serialnum:%s maj:%lld min:%lld\n",
514 board_info->name, board_info->serial_number,
515 board_info->major, board_info->minor);
516
517 return 0;
518 }
519
octnet_gpio_access(struct net_device * netdev,int addr,int val)520 static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
521 {
522 struct lio *lio = GET_LIO(netdev);
523 struct octeon_device *oct = lio->oct_dev;
524 struct octnic_ctrl_pkt nctrl;
525 int ret = 0;
526
527 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
528
529 nctrl.ncmd.u64 = 0;
530 nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
531 nctrl.ncmd.s.param1 = addr;
532 nctrl.ncmd.s.param2 = val;
533 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
534 nctrl.wait_time = 100;
535 nctrl.netpndev = (u64)netdev;
536 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
537
538 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
539 if (ret < 0) {
540 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
541 return -EINVAL;
542 }
543
544 return 0;
545 }
546
octnet_id_active(struct net_device * netdev,int val)547 static int octnet_id_active(struct net_device *netdev, int val)
548 {
549 struct lio *lio = GET_LIO(netdev);
550 struct octeon_device *oct = lio->oct_dev;
551 struct octnic_ctrl_pkt nctrl;
552 int ret = 0;
553
554 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
555
556 nctrl.ncmd.u64 = 0;
557 nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE;
558 nctrl.ncmd.s.param1 = val;
559 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
560 nctrl.wait_time = 100;
561 nctrl.netpndev = (u64)netdev;
562 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
563
564 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
565 if (ret < 0) {
566 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
567 return -EINVAL;
568 }
569
570 return 0;
571 }
572
573 /* Callback for when mdio command response arrives
574 */
octnet_mdio_resp_callback(struct octeon_device * oct,u32 status,void * buf)575 static void octnet_mdio_resp_callback(struct octeon_device *oct,
576 u32 status,
577 void *buf)
578 {
579 struct oct_mdio_cmd_context *mdio_cmd_ctx;
580 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
581
582 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
583
584 oct = lio_get_device(mdio_cmd_ctx->octeon_id);
585 if (status) {
586 dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
587 CVM_CAST64(status));
588 WRITE_ONCE(mdio_cmd_ctx->cond, -1);
589 } else {
590 WRITE_ONCE(mdio_cmd_ctx->cond, 1);
591 }
592 wake_up_interruptible(&mdio_cmd_ctx->wc);
593 }
594
595 /* This routine provides PHY access routines for
596 * mdio clause45 .
597 */
598 static int
octnet_mdio45_access(struct lio * lio,int op,int loc,int * value)599 octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
600 {
601 struct octeon_device *oct_dev = lio->oct_dev;
602 struct octeon_soft_command *sc;
603 struct oct_mdio_cmd_resp *mdio_cmd_rsp;
604 struct oct_mdio_cmd_context *mdio_cmd_ctx;
605 struct oct_mdio_cmd *mdio_cmd;
606 int retval = 0;
607
608 sc = (struct octeon_soft_command *)
609 octeon_alloc_soft_command(oct_dev,
610 sizeof(struct oct_mdio_cmd),
611 sizeof(struct oct_mdio_cmd_resp),
612 sizeof(struct oct_mdio_cmd_context));
613
614 if (!sc)
615 return -ENOMEM;
616
617 mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
618 mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
619 mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
620
621 WRITE_ONCE(mdio_cmd_ctx->cond, 0);
622 mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
623 mdio_cmd->op = op;
624 mdio_cmd->mdio_addr = loc;
625 if (op)
626 mdio_cmd->value1 = *value;
627 octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
628
629 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
630
631 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
632 0, 0, 0);
633
634 sc->wait_time = 1000;
635 sc->callback = octnet_mdio_resp_callback;
636 sc->callback_arg = sc;
637
638 init_waitqueue_head(&mdio_cmd_ctx->wc);
639
640 retval = octeon_send_soft_command(oct_dev, sc);
641
642 if (retval == IQ_SEND_FAILED) {
643 dev_err(&oct_dev->pci_dev->dev,
644 "octnet_mdio45_access instruction failed status: %x\n",
645 retval);
646 retval = -EBUSY;
647 } else {
648 /* Sleep on a wait queue till the cond flag indicates that the
649 * response arrived
650 */
651 sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
652 retval = mdio_cmd_rsp->status;
653 if (retval) {
654 dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
655 retval = -EBUSY;
656 } else {
657 octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
658 sizeof(struct oct_mdio_cmd) / 8);
659
660 if (READ_ONCE(mdio_cmd_ctx->cond) == 1) {
661 if (!op)
662 *value = mdio_cmd_rsp->resp.value1;
663 } else {
664 retval = -EINVAL;
665 }
666 }
667 }
668
669 octeon_free_soft_command(oct_dev, sc);
670
671 return retval;
672 }
673
lio_set_phys_id(struct net_device * netdev,enum ethtool_phys_id_state state)674 static int lio_set_phys_id(struct net_device *netdev,
675 enum ethtool_phys_id_state state)
676 {
677 struct lio *lio = GET_LIO(netdev);
678 struct octeon_device *oct = lio->oct_dev;
679 int value, ret;
680
681 switch (state) {
682 case ETHTOOL_ID_ACTIVE:
683 if (oct->chip_id == OCTEON_CN66XX) {
684 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
685 VITESSE_PHY_GPIO_DRIVEON);
686 return 2;
687
688 } else if (oct->chip_id == OCTEON_CN68XX) {
689 /* Save the current LED settings */
690 ret = octnet_mdio45_access(lio, 0,
691 LIO68XX_LED_BEACON_ADDR,
692 &lio->phy_beacon_val);
693 if (ret)
694 return ret;
695
696 ret = octnet_mdio45_access(lio, 0,
697 LIO68XX_LED_CTRL_ADDR,
698 &lio->led_ctrl_val);
699 if (ret)
700 return ret;
701
702 /* Configure Beacon values */
703 value = LIO68XX_LED_BEACON_CFGON;
704 ret = octnet_mdio45_access(lio, 1,
705 LIO68XX_LED_BEACON_ADDR,
706 &value);
707 if (ret)
708 return ret;
709
710 value = LIO68XX_LED_CTRL_CFGON;
711 ret = octnet_mdio45_access(lio, 1,
712 LIO68XX_LED_CTRL_ADDR,
713 &value);
714 if (ret)
715 return ret;
716 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
717 octnet_id_active(netdev, LED_IDENTIFICATION_ON);
718
719 /* returns 0 since updates are asynchronous */
720 return 0;
721 } else {
722 return -EINVAL;
723 }
724 break;
725
726 case ETHTOOL_ID_ON:
727 if (oct->chip_id == OCTEON_CN66XX)
728 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
729 VITESSE_PHY_GPIO_HIGH);
730 else
731 return -EINVAL;
732
733 break;
734
735 case ETHTOOL_ID_OFF:
736 if (oct->chip_id == OCTEON_CN66XX)
737 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
738 VITESSE_PHY_GPIO_LOW);
739 else
740 return -EINVAL;
741
742 break;
743
744 case ETHTOOL_ID_INACTIVE:
745 if (oct->chip_id == OCTEON_CN66XX) {
746 octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
747 VITESSE_PHY_GPIO_DRIVEOFF);
748 } else if (oct->chip_id == OCTEON_CN68XX) {
749 /* Restore LED settings */
750 ret = octnet_mdio45_access(lio, 1,
751 LIO68XX_LED_CTRL_ADDR,
752 &lio->led_ctrl_val);
753 if (ret)
754 return ret;
755
756 ret = octnet_mdio45_access(lio, 1,
757 LIO68XX_LED_BEACON_ADDR,
758 &lio->phy_beacon_val);
759 if (ret)
760 return ret;
761 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
762 octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
763
764 return 0;
765 } else {
766 return -EINVAL;
767 }
768 break;
769
770 default:
771 return -EINVAL;
772 }
773
774 return 0;
775 }
776
777 static void
lio_ethtool_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ering)778 lio_ethtool_get_ringparam(struct net_device *netdev,
779 struct ethtool_ringparam *ering)
780 {
781 struct lio *lio = GET_LIO(netdev);
782 struct octeon_device *oct = lio->oct_dev;
783 u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
784 rx_pending = 0;
785
786 if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
787 return;
788
789 if (OCTEON_CN6XXX(oct)) {
790 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
791
792 tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
793 rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
794 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
795 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
796 } else if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
797 tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS;
798 rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS;
799 rx_pending = oct->droq[0]->max_count;
800 tx_pending = oct->instr_queue[0]->max_count;
801 }
802
803 ering->tx_pending = tx_pending;
804 ering->tx_max_pending = tx_max_pending;
805 ering->rx_pending = rx_pending;
806 ering->rx_max_pending = rx_max_pending;
807 ering->rx_mini_pending = 0;
808 ering->rx_jumbo_pending = 0;
809 ering->rx_mini_max_pending = 0;
810 ering->rx_jumbo_max_pending = 0;
811 }
812
lio_reset_queues(struct net_device * netdev,uint32_t num_qs)813 static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
814 {
815 struct lio *lio = GET_LIO(netdev);
816 struct octeon_device *oct = lio->oct_dev;
817 struct napi_struct *napi, *n;
818 int i, update = 0;
819
820 if (wait_for_pending_requests(oct))
821 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
822
823 if (lio_wait_for_instr_fetch(oct))
824 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
825
826 if (octeon_set_io_queues_off(oct)) {
827 dev_err(&oct->pci_dev->dev, "setting io queues off failed\n");
828 return -1;
829 }
830
831 /* Disable the input and output queues now. No more packets will
832 * arrive from Octeon.
833 */
834 oct->fn_list.disable_io_queues(oct);
835 /* Delete NAPI */
836 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
837 netif_napi_del(napi);
838
839 if (num_qs != oct->num_iqs) {
840 netif_set_real_num_rx_queues(netdev, num_qs);
841 netif_set_real_num_tx_queues(netdev, num_qs);
842 update = 1;
843 }
844
845 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
846 if (!(oct->io_qmask.oq & BIT_ULL(i)))
847 continue;
848 octeon_delete_droq(oct, i);
849 }
850
851 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
852 if (!(oct->io_qmask.iq & BIT_ULL(i)))
853 continue;
854 octeon_delete_instr_queue(oct, i);
855 }
856
857 if (oct->fn_list.setup_device_regs(oct)) {
858 dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n");
859 return -1;
860 }
861
862 if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) {
863 dev_err(&oct->pci_dev->dev, "IO queues initialization failed\n");
864 return -1;
865 }
866
867 /* Enable the input and output queues for this Octeon device */
868 if (oct->fn_list.enable_io_queues(oct)) {
869 dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues");
870 return -1;
871 }
872
873 if (update && lio_send_queue_count_update(netdev, num_qs))
874 return -1;
875
876 return 0;
877 }
878
lio_ethtool_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ering)879 static int lio_ethtool_set_ringparam(struct net_device *netdev,
880 struct ethtool_ringparam *ering)
881 {
882 u32 rx_count, tx_count, rx_count_old, tx_count_old;
883 struct lio *lio = GET_LIO(netdev);
884 struct octeon_device *oct = lio->oct_dev;
885 int stopped = 0;
886
887 if (!OCTEON_CN23XX_PF(oct) && !OCTEON_CN23XX_VF(oct))
888 return -EINVAL;
889
890 if (ering->rx_mini_pending || ering->rx_jumbo_pending)
891 return -EINVAL;
892
893 rx_count = clamp_t(u32, ering->rx_pending, CN23XX_MIN_OQ_DESCRIPTORS,
894 CN23XX_MAX_OQ_DESCRIPTORS);
895 tx_count = clamp_t(u32, ering->tx_pending, CN23XX_MIN_IQ_DESCRIPTORS,
896 CN23XX_MAX_IQ_DESCRIPTORS);
897
898 rx_count_old = oct->droq[0]->max_count;
899 tx_count_old = oct->instr_queue[0]->max_count;
900
901 if (rx_count == rx_count_old && tx_count == tx_count_old)
902 return 0;
903
904 ifstate_set(lio, LIO_IFSTATE_RESETTING);
905
906 if (netif_running(netdev)) {
907 netdev->netdev_ops->ndo_stop(netdev);
908 stopped = 1;
909 }
910
911 /* Change RX/TX DESCS count */
912 if (tx_count != tx_count_old)
913 CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
914 tx_count);
915 if (rx_count != rx_count_old)
916 CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
917 rx_count);
918
919 if (lio_reset_queues(netdev, lio->linfo.num_txpciq))
920 goto err_lio_reset_queues;
921
922 if (stopped)
923 netdev->netdev_ops->ndo_open(netdev);
924
925 ifstate_reset(lio, LIO_IFSTATE_RESETTING);
926
927 return 0;
928
929 err_lio_reset_queues:
930 if (tx_count != tx_count_old)
931 CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
932 tx_count_old);
933 if (rx_count != rx_count_old)
934 CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
935 rx_count_old);
936 return -EINVAL;
937 }
938
lio_get_msglevel(struct net_device * netdev)939 static u32 lio_get_msglevel(struct net_device *netdev)
940 {
941 struct lio *lio = GET_LIO(netdev);
942
943 return lio->msg_enable;
944 }
945
lio_set_msglevel(struct net_device * netdev,u32 msglvl)946 static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
947 {
948 struct lio *lio = GET_LIO(netdev);
949
950 if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
951 if (msglvl & NETIF_MSG_HW)
952 liquidio_set_feature(netdev,
953 OCTNET_CMD_VERBOSE_ENABLE, 0);
954 else
955 liquidio_set_feature(netdev,
956 OCTNET_CMD_VERBOSE_DISABLE, 0);
957 }
958
959 lio->msg_enable = msglvl;
960 }
961
lio_vf_set_msglevel(struct net_device * netdev,u32 msglvl)962 static void lio_vf_set_msglevel(struct net_device *netdev, u32 msglvl)
963 {
964 struct lio *lio = GET_LIO(netdev);
965
966 lio->msg_enable = msglvl;
967 }
968
969 static void
lio_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)970 lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
971 {
972 /* Notes: Not supporting any auto negotiation in these
973 * drivers. Just report pause frame support.
974 */
975 struct lio *lio = GET_LIO(netdev);
976 struct octeon_device *oct = lio->oct_dev;
977
978 pause->autoneg = 0;
979
980 pause->tx_pause = oct->tx_pause;
981 pause->rx_pause = oct->rx_pause;
982 }
983
984 static int
lio_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)985 lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
986 {
987 /* Notes: Not supporting any auto negotiation in these
988 * drivers.
989 */
990 struct lio *lio = GET_LIO(netdev);
991 struct octeon_device *oct = lio->oct_dev;
992 struct octnic_ctrl_pkt nctrl;
993 struct oct_link_info *linfo = &lio->linfo;
994
995 int ret = 0;
996
997 if (oct->chip_id != OCTEON_CN23XX_PF_VID)
998 return -EINVAL;
999
1000 if (linfo->link.s.duplex == 0) {
1001 /*no flow control for half duplex*/
1002 if (pause->rx_pause || pause->tx_pause)
1003 return -EINVAL;
1004 }
1005
1006 /*do not support autoneg of link flow control*/
1007 if (pause->autoneg == AUTONEG_ENABLE)
1008 return -EINVAL;
1009
1010 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1011
1012 nctrl.ncmd.u64 = 0;
1013 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL;
1014 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1015 nctrl.wait_time = 100;
1016 nctrl.netpndev = (u64)netdev;
1017 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1018
1019 if (pause->rx_pause) {
1020 /*enable rx pause*/
1021 nctrl.ncmd.s.param1 = 1;
1022 } else {
1023 /*disable rx pause*/
1024 nctrl.ncmd.s.param1 = 0;
1025 }
1026
1027 if (pause->tx_pause) {
1028 /*enable tx pause*/
1029 nctrl.ncmd.s.param2 = 1;
1030 } else {
1031 /*disable tx pause*/
1032 nctrl.ncmd.s.param2 = 0;
1033 }
1034
1035 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1036 if (ret < 0) {
1037 dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n");
1038 return -EINVAL;
1039 }
1040
1041 oct->rx_pause = pause->rx_pause;
1042 oct->tx_pause = pause->tx_pause;
1043
1044 return 0;
1045 }
1046
1047 static void
lio_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)1048 lio_get_ethtool_stats(struct net_device *netdev,
1049 struct ethtool_stats *stats __attribute__((unused)),
1050 u64 *data)
1051 {
1052 struct lio *lio = GET_LIO(netdev);
1053 struct octeon_device *oct_dev = lio->oct_dev;
1054 struct net_device_stats *netstats = &netdev->stats;
1055 int i = 0, j;
1056
1057 if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1058 return;
1059
1060 netdev->netdev_ops->ndo_get_stats(netdev);
1061 octnet_get_link_stats(netdev);
1062
1063 /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
1064 data[i++] = CVM_CAST64(netstats->rx_packets);
1065 /*sum of oct->instr_queue[iq_no]->stats.tx_done */
1066 data[i++] = CVM_CAST64(netstats->tx_packets);
1067 /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
1068 data[i++] = CVM_CAST64(netstats->rx_bytes);
1069 /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1070 data[i++] = CVM_CAST64(netstats->tx_bytes);
1071 data[i++] = CVM_CAST64(netstats->rx_errors);
1072 data[i++] = CVM_CAST64(netstats->tx_errors);
1073 /*sum of oct->droq[oq_no]->stats->rx_dropped +
1074 *oct->droq[oq_no]->stats->dropped_nodispatch +
1075 *oct->droq[oq_no]->stats->dropped_toomany +
1076 *oct->droq[oq_no]->stats->dropped_nomem
1077 */
1078 data[i++] = CVM_CAST64(netstats->rx_dropped);
1079 /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1080 data[i++] = CVM_CAST64(netstats->tx_dropped);
1081
1082 /* firmware tx stats */
1083 /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
1084 *fromhost.fw_total_sent
1085 */
1086 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
1087 /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
1088 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
1089 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
1090 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
1091 /*per_core_stats[j].link_stats[i].fromhost.fw_err_pki */
1092 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pki);
1093 /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
1094 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
1095 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1096 *fw_err_drop
1097 */
1098 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
1099
1100 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
1101 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
1102 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1103 *fw_tso_fwd
1104 */
1105 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
1106 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1107 *fw_err_tso
1108 */
1109 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
1110 /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
1111 *fw_tx_vxlan
1112 */
1113 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
1114
1115 /* mac tx statistics */
1116 /*CVMX_BGXX_CMRX_TX_STAT5 */
1117 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
1118 /*CVMX_BGXX_CMRX_TX_STAT4 */
1119 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
1120 /*CVMX_BGXX_CMRX_TX_STAT15 */
1121 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
1122 /*CVMX_BGXX_CMRX_TX_STAT14 */
1123 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
1124 /*CVMX_BGXX_CMRX_TX_STAT17 */
1125 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
1126 /*CVMX_BGXX_CMRX_TX_STAT0 */
1127 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
1128 /*CVMX_BGXX_CMRX_TX_STAT3 */
1129 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
1130 /*CVMX_BGXX_CMRX_TX_STAT2 */
1131 data[i++] =
1132 CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
1133 /*CVMX_BGXX_CMRX_TX_STAT0 */
1134 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
1135 /*CVMX_BGXX_CMRX_TX_STAT1 */
1136 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
1137 /*CVMX_BGXX_CMRX_TX_STAT16 */
1138 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
1139 /*CVMX_BGXX_CMRX_TX_STAT6 */
1140 data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
1141
1142 /* RX firmware stats */
1143 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1144 *fw_total_rcvd
1145 */
1146 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
1147 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1148 *fw_total_fwd
1149 */
1150 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
1151 /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
1152 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
1153 /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
1154 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
1155 /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
1156 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
1157 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1158 *fw_err_pko
1159 */
1160 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
1161 /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
1162 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
1163 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1164 *fromwire.fw_err_drop
1165 */
1166 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
1167
1168 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1169 *fromwire.fw_rx_vxlan
1170 */
1171 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
1172 /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
1173 *fromwire.fw_rx_vxlan_err
1174 */
1175 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
1176
1177 /* LRO */
1178 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1179 *fw_lro_pkts
1180 */
1181 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
1182 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1183 *fw_lro_octs
1184 */
1185 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
1186 /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
1187 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
1188 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
1189 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
1190 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1191 *fw_lro_aborts_port
1192 */
1193 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
1194 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1195 *fw_lro_aborts_seq
1196 */
1197 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
1198 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1199 *fw_lro_aborts_tsval
1200 */
1201 data[i++] =
1202 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
1203 /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
1204 *fw_lro_aborts_timer
1205 */
1206 /* intrmod: packet forward rate */
1207 data[i++] =
1208 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
1209 /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
1210 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
1211
1212 /* mac: link-level stats */
1213 /*CVMX_BGXX_CMRX_RX_STAT0 */
1214 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
1215 /*CVMX_BGXX_CMRX_RX_STAT1 */
1216 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
1217 /*CVMX_PKI_STATX_STAT5 */
1218 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
1219 /*CVMX_PKI_STATX_STAT5 */
1220 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
1221 /*wqe->word2.err_code or wqe->word2.err_level */
1222 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
1223 /*CVMX_BGXX_CMRX_RX_STAT2 */
1224 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
1225 /*CVMX_BGXX_CMRX_RX_STAT6 */
1226 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
1227 /*CVMX_BGXX_CMRX_RX_STAT4 */
1228 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
1229 /*wqe->word2.err_code or wqe->word2.err_level */
1230 data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
1231 /*lio->link_changes*/
1232 data[i++] = CVM_CAST64(lio->link_changes);
1233
1234 for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
1235 if (!(oct_dev->io_qmask.iq & BIT_ULL(j)))
1236 continue;
1237 /*packets to network port*/
1238 /*# of packets tx to network */
1239 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
1240 /*# of bytes tx to network */
1241 data[i++] =
1242 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
1243 /*# of packets dropped */
1244 data[i++] =
1245 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
1246 /*# of tx fails due to queue full */
1247 data[i++] =
1248 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
1249 /*XXX gather entries sent */
1250 data[i++] =
1251 CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
1252
1253 /*instruction to firmware: data and control */
1254 /*# of instructions to the queue */
1255 data[i++] =
1256 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
1257 /*# of instructions processed */
1258 data[i++] = CVM_CAST64(
1259 oct_dev->instr_queue[j]->stats.instr_processed);
1260 /*# of instructions could not be processed */
1261 data[i++] = CVM_CAST64(
1262 oct_dev->instr_queue[j]->stats.instr_dropped);
1263 /*bytes sent through the queue */
1264 data[i++] =
1265 CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
1266
1267 /*tso request*/
1268 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
1269 /*vxlan request*/
1270 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
1271 /*txq restart*/
1272 data[i++] =
1273 CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
1274 }
1275
1276 /* RX */
1277 for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
1278 if (!(oct_dev->io_qmask.oq & BIT_ULL(j)))
1279 continue;
1280
1281 /*packets send to TCP/IP network stack */
1282 /*# of packets to network stack */
1283 data[i++] =
1284 CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
1285 /*# of bytes to network stack */
1286 data[i++] =
1287 CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
1288 /*# of packets dropped */
1289 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
1290 oct_dev->droq[j]->stats.dropped_toomany +
1291 oct_dev->droq[j]->stats.rx_dropped);
1292 data[i++] =
1293 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
1294 data[i++] =
1295 CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
1296 data[i++] =
1297 CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
1298
1299 /*control and data path*/
1300 data[i++] =
1301 CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
1302 data[i++] =
1303 CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
1304 data[i++] =
1305 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
1306
1307 data[i++] =
1308 CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
1309 data[i++] =
1310 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
1311 }
1312 }
1313
lio_vf_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)1314 static void lio_vf_get_ethtool_stats(struct net_device *netdev,
1315 struct ethtool_stats *stats
1316 __attribute__((unused)),
1317 u64 *data)
1318 {
1319 struct net_device_stats *netstats = &netdev->stats;
1320 struct lio *lio = GET_LIO(netdev);
1321 struct octeon_device *oct_dev = lio->oct_dev;
1322 int i = 0, j, vj;
1323
1324 if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1325 return;
1326
1327 netdev->netdev_ops->ndo_get_stats(netdev);
1328 /* sum of oct->droq[oq_no]->stats->rx_pkts_received */
1329 data[i++] = CVM_CAST64(netstats->rx_packets);
1330 /* sum of oct->instr_queue[iq_no]->stats.tx_done */
1331 data[i++] = CVM_CAST64(netstats->tx_packets);
1332 /* sum of oct->droq[oq_no]->stats->rx_bytes_received */
1333 data[i++] = CVM_CAST64(netstats->rx_bytes);
1334 /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1335 data[i++] = CVM_CAST64(netstats->tx_bytes);
1336 data[i++] = CVM_CAST64(netstats->rx_errors);
1337 data[i++] = CVM_CAST64(netstats->tx_errors);
1338 /* sum of oct->droq[oq_no]->stats->rx_dropped +
1339 * oct->droq[oq_no]->stats->dropped_nodispatch +
1340 * oct->droq[oq_no]->stats->dropped_toomany +
1341 * oct->droq[oq_no]->stats->dropped_nomem
1342 */
1343 data[i++] = CVM_CAST64(netstats->rx_dropped);
1344 /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1345 data[i++] = CVM_CAST64(netstats->tx_dropped);
1346 /* lio->link_changes */
1347 data[i++] = CVM_CAST64(lio->link_changes);
1348
1349 for (vj = 0; vj < oct_dev->num_iqs; vj++) {
1350 j = lio->linfo.txpciq[vj].s.q_no;
1351
1352 /* packets to network port */
1353 /* # of packets tx to network */
1354 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
1355 /* # of bytes tx to network */
1356 data[i++] = CVM_CAST64(
1357 oct_dev->instr_queue[j]->stats.tx_tot_bytes);
1358 /* # of packets dropped */
1359 data[i++] = CVM_CAST64(
1360 oct_dev->instr_queue[j]->stats.tx_dropped);
1361 /* # of tx fails due to queue full */
1362 data[i++] = CVM_CAST64(
1363 oct_dev->instr_queue[j]->stats.tx_iq_busy);
1364 /* XXX gather entries sent */
1365 data[i++] = CVM_CAST64(
1366 oct_dev->instr_queue[j]->stats.sgentry_sent);
1367
1368 /* instruction to firmware: data and control */
1369 /* # of instructions to the queue */
1370 data[i++] = CVM_CAST64(
1371 oct_dev->instr_queue[j]->stats.instr_posted);
1372 /* # of instructions processed */
1373 data[i++] =
1374 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed);
1375 /* # of instructions could not be processed */
1376 data[i++] =
1377 CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped);
1378 /* bytes sent through the queue */
1379 data[i++] = CVM_CAST64(
1380 oct_dev->instr_queue[j]->stats.bytes_sent);
1381 /* tso request */
1382 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
1383 /* vxlan request */
1384 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
1385 /* txq restart */
1386 data[i++] = CVM_CAST64(
1387 oct_dev->instr_queue[j]->stats.tx_restart);
1388 }
1389
1390 /* RX */
1391 for (vj = 0; vj < oct_dev->num_oqs; vj++) {
1392 j = lio->linfo.rxpciq[vj].s.q_no;
1393
1394 /* packets send to TCP/IP network stack */
1395 /* # of packets to network stack */
1396 data[i++] = CVM_CAST64(
1397 oct_dev->droq[j]->stats.rx_pkts_received);
1398 /* # of bytes to network stack */
1399 data[i++] = CVM_CAST64(
1400 oct_dev->droq[j]->stats.rx_bytes_received);
1401 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
1402 oct_dev->droq[j]->stats.dropped_toomany +
1403 oct_dev->droq[j]->stats.rx_dropped);
1404 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
1405 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
1406 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
1407
1408 /* control and data path */
1409 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
1410 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
1411 data[i++] =
1412 CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
1413
1414 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
1415 data[i++] =
1416 CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
1417 }
1418 }
1419
lio_get_priv_flags_strings(struct lio * lio,u8 * data)1420 static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
1421 {
1422 struct octeon_device *oct_dev = lio->oct_dev;
1423 int i;
1424
1425 switch (oct_dev->chip_id) {
1426 case OCTEON_CN23XX_PF_VID:
1427 case OCTEON_CN23XX_VF_VID:
1428 for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) {
1429 sprintf(data, "%s", oct_priv_flags_strings[i]);
1430 data += ETH_GSTRING_LEN;
1431 }
1432 break;
1433 case OCTEON_CN68XX:
1434 case OCTEON_CN66XX:
1435 break;
1436 default:
1437 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1438 break;
1439 }
1440 }
1441
lio_get_strings(struct net_device * netdev,u32 stringset,u8 * data)1442 static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1443 {
1444 struct lio *lio = GET_LIO(netdev);
1445 struct octeon_device *oct_dev = lio->oct_dev;
1446 int num_iq_stats, num_oq_stats, i, j;
1447 int num_stats;
1448
1449 switch (stringset) {
1450 case ETH_SS_STATS:
1451 num_stats = ARRAY_SIZE(oct_stats_strings);
1452 for (j = 0; j < num_stats; j++) {
1453 sprintf(data, "%s", oct_stats_strings[j]);
1454 data += ETH_GSTRING_LEN;
1455 }
1456
1457 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1458 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
1459 if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
1460 continue;
1461 for (j = 0; j < num_iq_stats; j++) {
1462 sprintf(data, "tx-%d-%s", i,
1463 oct_iq_stats_strings[j]);
1464 data += ETH_GSTRING_LEN;
1465 }
1466 }
1467
1468 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
1469 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
1470 if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
1471 continue;
1472 for (j = 0; j < num_oq_stats; j++) {
1473 sprintf(data, "rx-%d-%s", i,
1474 oct_droq_stats_strings[j]);
1475 data += ETH_GSTRING_LEN;
1476 }
1477 }
1478 break;
1479
1480 case ETH_SS_PRIV_FLAGS:
1481 lio_get_priv_flags_strings(lio, data);
1482 break;
1483 default:
1484 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1485 break;
1486 }
1487 }
1488
lio_vf_get_strings(struct net_device * netdev,u32 stringset,u8 * data)1489 static void lio_vf_get_strings(struct net_device *netdev, u32 stringset,
1490 u8 *data)
1491 {
1492 int num_iq_stats, num_oq_stats, i, j;
1493 struct lio *lio = GET_LIO(netdev);
1494 struct octeon_device *oct_dev = lio->oct_dev;
1495 int num_stats;
1496
1497 switch (stringset) {
1498 case ETH_SS_STATS:
1499 num_stats = ARRAY_SIZE(oct_vf_stats_strings);
1500 for (j = 0; j < num_stats; j++) {
1501 sprintf(data, "%s", oct_vf_stats_strings[j]);
1502 data += ETH_GSTRING_LEN;
1503 }
1504
1505 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1506 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
1507 if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
1508 continue;
1509 for (j = 0; j < num_iq_stats; j++) {
1510 sprintf(data, "tx-%d-%s", i,
1511 oct_iq_stats_strings[j]);
1512 data += ETH_GSTRING_LEN;
1513 }
1514 }
1515
1516 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
1517 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
1518 if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
1519 continue;
1520 for (j = 0; j < num_oq_stats; j++) {
1521 sprintf(data, "rx-%d-%s", i,
1522 oct_droq_stats_strings[j]);
1523 data += ETH_GSTRING_LEN;
1524 }
1525 }
1526 break;
1527
1528 case ETH_SS_PRIV_FLAGS:
1529 lio_get_priv_flags_strings(lio, data);
1530 break;
1531 default:
1532 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1533 break;
1534 }
1535 }
1536
lio_get_priv_flags_ss_count(struct lio * lio)1537 static int lio_get_priv_flags_ss_count(struct lio *lio)
1538 {
1539 struct octeon_device *oct_dev = lio->oct_dev;
1540
1541 switch (oct_dev->chip_id) {
1542 case OCTEON_CN23XX_PF_VID:
1543 case OCTEON_CN23XX_VF_VID:
1544 return ARRAY_SIZE(oct_priv_flags_strings);
1545 case OCTEON_CN68XX:
1546 case OCTEON_CN66XX:
1547 return -EOPNOTSUPP;
1548 default:
1549 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1550 return -EOPNOTSUPP;
1551 }
1552 }
1553
lio_get_sset_count(struct net_device * netdev,int sset)1554 static int lio_get_sset_count(struct net_device *netdev, int sset)
1555 {
1556 struct lio *lio = GET_LIO(netdev);
1557 struct octeon_device *oct_dev = lio->oct_dev;
1558
1559 switch (sset) {
1560 case ETH_SS_STATS:
1561 return (ARRAY_SIZE(oct_stats_strings) +
1562 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1563 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
1564 case ETH_SS_PRIV_FLAGS:
1565 return lio_get_priv_flags_ss_count(lio);
1566 default:
1567 return -EOPNOTSUPP;
1568 }
1569 }
1570
lio_vf_get_sset_count(struct net_device * netdev,int sset)1571 static int lio_vf_get_sset_count(struct net_device *netdev, int sset)
1572 {
1573 struct lio *lio = GET_LIO(netdev);
1574 struct octeon_device *oct_dev = lio->oct_dev;
1575
1576 switch (sset) {
1577 case ETH_SS_STATS:
1578 return (ARRAY_SIZE(oct_vf_stats_strings) +
1579 ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1580 ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
1581 case ETH_SS_PRIV_FLAGS:
1582 return lio_get_priv_flags_ss_count(lio);
1583 default:
1584 return -EOPNOTSUPP;
1585 }
1586 }
1587
1588 /* Callback function for intrmod */
octnet_intrmod_callback(struct octeon_device * oct_dev,u32 status,void * ptr)1589 static void octnet_intrmod_callback(struct octeon_device *oct_dev,
1590 u32 status,
1591 void *ptr)
1592 {
1593 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1594 struct oct_intrmod_context *ctx;
1595
1596 ctx = (struct oct_intrmod_context *)sc->ctxptr;
1597
1598 ctx->status = status;
1599
1600 WRITE_ONCE(ctx->cond, 1);
1601
1602 /* This barrier is required to be sure that the response has been
1603 * written fully before waking up the handler
1604 */
1605 wmb();
1606
1607 wake_up_interruptible(&ctx->wc);
1608 }
1609
1610 /* get interrupt moderation parameters */
octnet_get_intrmod_cfg(struct lio * lio,struct oct_intrmod_cfg * intr_cfg)1611 static int octnet_get_intrmod_cfg(struct lio *lio,
1612 struct oct_intrmod_cfg *intr_cfg)
1613 {
1614 struct octeon_soft_command *sc;
1615 struct oct_intrmod_context *ctx;
1616 struct oct_intrmod_resp *resp;
1617 int retval;
1618 struct octeon_device *oct_dev = lio->oct_dev;
1619
1620 /* Alloc soft command */
1621 sc = (struct octeon_soft_command *)
1622 octeon_alloc_soft_command(oct_dev,
1623 0,
1624 sizeof(struct oct_intrmod_resp),
1625 sizeof(struct oct_intrmod_context));
1626
1627 if (!sc)
1628 return -ENOMEM;
1629
1630 resp = (struct oct_intrmod_resp *)sc->virtrptr;
1631 memset(resp, 0, sizeof(struct oct_intrmod_resp));
1632
1633 ctx = (struct oct_intrmod_context *)sc->ctxptr;
1634 memset(ctx, 0, sizeof(struct oct_intrmod_context));
1635 WRITE_ONCE(ctx->cond, 0);
1636 ctx->octeon_id = lio_get_device_id(oct_dev);
1637 init_waitqueue_head(&ctx->wc);
1638
1639 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1640
1641 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1642 OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0);
1643
1644 sc->callback = octnet_intrmod_callback;
1645 sc->callback_arg = sc;
1646 sc->wait_time = 1000;
1647
1648 retval = octeon_send_soft_command(oct_dev, sc);
1649 if (retval == IQ_SEND_FAILED) {
1650 octeon_free_soft_command(oct_dev, sc);
1651 return -EINVAL;
1652 }
1653
1654 /* Sleep on a wait queue till the cond flag indicates that the
1655 * response arrived or timed-out.
1656 */
1657 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
1658 dev_err(&oct_dev->pci_dev->dev, "Wait interrupted\n");
1659 goto intrmod_info_wait_intr;
1660 }
1661
1662 retval = ctx->status || resp->status;
1663 if (retval) {
1664 dev_err(&oct_dev->pci_dev->dev,
1665 "Get interrupt moderation parameters failed\n");
1666 goto intrmod_info_wait_fail;
1667 }
1668
1669 octeon_swap_8B_data((u64 *)&resp->intrmod,
1670 (sizeof(struct oct_intrmod_cfg)) / 8);
1671 memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg));
1672 octeon_free_soft_command(oct_dev, sc);
1673
1674 return 0;
1675
1676 intrmod_info_wait_fail:
1677
1678 octeon_free_soft_command(oct_dev, sc);
1679
1680 intrmod_info_wait_intr:
1681
1682 return -ENODEV;
1683 }
1684
1685 /* Configure interrupt moderation parameters */
octnet_set_intrmod_cfg(struct lio * lio,struct oct_intrmod_cfg * intr_cfg)1686 static int octnet_set_intrmod_cfg(struct lio *lio,
1687 struct oct_intrmod_cfg *intr_cfg)
1688 {
1689 struct octeon_soft_command *sc;
1690 struct oct_intrmod_context *ctx;
1691 struct oct_intrmod_cfg *cfg;
1692 int retval;
1693 struct octeon_device *oct_dev = lio->oct_dev;
1694
1695 /* Alloc soft command */
1696 sc = (struct octeon_soft_command *)
1697 octeon_alloc_soft_command(oct_dev,
1698 sizeof(struct oct_intrmod_cfg),
1699 0,
1700 sizeof(struct oct_intrmod_context));
1701
1702 if (!sc)
1703 return -ENOMEM;
1704
1705 ctx = (struct oct_intrmod_context *)sc->ctxptr;
1706
1707 WRITE_ONCE(ctx->cond, 0);
1708 ctx->octeon_id = lio_get_device_id(oct_dev);
1709 init_waitqueue_head(&ctx->wc);
1710
1711 cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
1712
1713 memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
1714 octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
1715
1716 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1717
1718 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1719 OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
1720
1721 sc->callback = octnet_intrmod_callback;
1722 sc->callback_arg = sc;
1723 sc->wait_time = 1000;
1724
1725 retval = octeon_send_soft_command(oct_dev, sc);
1726 if (retval == IQ_SEND_FAILED) {
1727 octeon_free_soft_command(oct_dev, sc);
1728 return -EINVAL;
1729 }
1730
1731 /* Sleep on a wait queue till the cond flag indicates that the
1732 * response arrived or timed-out.
1733 */
1734 if (sleep_cond(&ctx->wc, &ctx->cond) != -EINTR) {
1735 retval = ctx->status;
1736 if (retval)
1737 dev_err(&oct_dev->pci_dev->dev,
1738 "intrmod config failed. Status: %llx\n",
1739 CVM_CAST64(retval));
1740 else
1741 dev_info(&oct_dev->pci_dev->dev,
1742 "Rx-Adaptive Interrupt moderation %s\n",
1743 (intr_cfg->rx_enable) ?
1744 "enabled" : "disabled");
1745
1746 octeon_free_soft_command(oct_dev, sc);
1747
1748 return ((retval) ? -ENODEV : 0);
1749 }
1750
1751 dev_err(&oct_dev->pci_dev->dev, "iq/oq config failed\n");
1752
1753 return -EINTR;
1754 }
1755
1756 static void
octnet_nic_stats_callback(struct octeon_device * oct_dev,u32 status,void * ptr)1757 octnet_nic_stats_callback(struct octeon_device *oct_dev,
1758 u32 status, void *ptr)
1759 {
1760 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1761 struct oct_nic_stats_resp *resp =
1762 (struct oct_nic_stats_resp *)sc->virtrptr;
1763 struct oct_nic_stats_ctrl *ctrl =
1764 (struct oct_nic_stats_ctrl *)sc->ctxptr;
1765 struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1766 struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1767
1768 struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1769 struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1770
1771 if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) {
1772 octeon_swap_8B_data((u64 *)&resp->stats,
1773 (sizeof(struct oct_link_stats)) >> 3);
1774
1775 /* RX link-level stats */
1776 rstats->total_rcvd = rsp_rstats->total_rcvd;
1777 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1778 rstats->total_bcst = rsp_rstats->total_bcst;
1779 rstats->total_mcst = rsp_rstats->total_mcst;
1780 rstats->runts = rsp_rstats->runts;
1781 rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
1782 /* Accounts for over/under-run of buffers */
1783 rstats->fifo_err = rsp_rstats->fifo_err;
1784 rstats->dmac_drop = rsp_rstats->dmac_drop;
1785 rstats->fcs_err = rsp_rstats->fcs_err;
1786 rstats->jabber_err = rsp_rstats->jabber_err;
1787 rstats->l2_err = rsp_rstats->l2_err;
1788 rstats->frame_err = rsp_rstats->frame_err;
1789
1790 /* RX firmware stats */
1791 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1792 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1793 rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1794 rstats->fw_err_link = rsp_rstats->fw_err_link;
1795 rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1796 rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1797 rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1798
1799 /* Number of packets that are LROed */
1800 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1801 /* Number of octets that are LROed */
1802 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1803 /* Number of LRO packets formed */
1804 rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1805 /* Number of times lRO of packet aborted */
1806 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1807 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1808 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1809 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1810 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1811 /* intrmod: packet forward rate */
1812 rstats->fwd_rate = rsp_rstats->fwd_rate;
1813
1814 /* TX link-level stats */
1815 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1816 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1817 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1818 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1819 tstats->ctl_sent = rsp_tstats->ctl_sent;
1820 /* Packets sent after one collision*/
1821 tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1822 /* Packets sent after multiple collision*/
1823 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1824 /* Packets not sent due to max collisions */
1825 tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1826 /* Packets not sent due to max deferrals */
1827 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1828 /* Accounts for over/under-run of buffers */
1829 tstats->fifo_err = rsp_tstats->fifo_err;
1830 tstats->runts = rsp_tstats->runts;
1831 /* Total number of collisions detected */
1832 tstats->total_collisions = rsp_tstats->total_collisions;
1833
1834 /* firmware stats */
1835 tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1836 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1837 tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1838 tstats->fw_err_pki = rsp_tstats->fw_err_pki;
1839 tstats->fw_err_link = rsp_tstats->fw_err_link;
1840 tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1841 tstats->fw_tso = rsp_tstats->fw_tso;
1842 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1843 tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1844 tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1845
1846 resp->status = 1;
1847 } else {
1848 resp->status = -1;
1849 }
1850 complete(&ctrl->complete);
1851 }
1852
1853 /* Configure interrupt moderation parameters */
octnet_get_link_stats(struct net_device * netdev)1854 static int octnet_get_link_stats(struct net_device *netdev)
1855 {
1856 struct lio *lio = GET_LIO(netdev);
1857 struct octeon_device *oct_dev = lio->oct_dev;
1858
1859 struct octeon_soft_command *sc;
1860 struct oct_nic_stats_ctrl *ctrl;
1861 struct oct_nic_stats_resp *resp;
1862
1863 int retval;
1864
1865 /* Alloc soft command */
1866 sc = (struct octeon_soft_command *)
1867 octeon_alloc_soft_command(oct_dev,
1868 0,
1869 sizeof(struct oct_nic_stats_resp),
1870 sizeof(struct octnic_ctrl_pkt));
1871
1872 if (!sc)
1873 return -ENOMEM;
1874
1875 resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1876 memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1877
1878 ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
1879 memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
1880 ctrl->netdev = netdev;
1881 init_completion(&ctrl->complete);
1882
1883 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1884
1885 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1886 OPCODE_NIC_PORT_STATS, 0, 0, 0);
1887
1888 sc->callback = octnet_nic_stats_callback;
1889 sc->callback_arg = sc;
1890 sc->wait_time = 500; /*in milli seconds*/
1891
1892 retval = octeon_send_soft_command(oct_dev, sc);
1893 if (retval == IQ_SEND_FAILED) {
1894 octeon_free_soft_command(oct_dev, sc);
1895 return -EINVAL;
1896 }
1897
1898 wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
1899
1900 if (resp->status != 1) {
1901 octeon_free_soft_command(oct_dev, sc);
1902
1903 return -EINVAL;
1904 }
1905
1906 octeon_free_soft_command(oct_dev, sc);
1907
1908 return 0;
1909 }
1910
lio_get_intr_coalesce(struct net_device * netdev,struct ethtool_coalesce * intr_coal)1911 static int lio_get_intr_coalesce(struct net_device *netdev,
1912 struct ethtool_coalesce *intr_coal)
1913 {
1914 struct lio *lio = GET_LIO(netdev);
1915 struct octeon_device *oct = lio->oct_dev;
1916 struct octeon_instr_queue *iq;
1917 struct oct_intrmod_cfg intrmod_cfg;
1918
1919 if (octnet_get_intrmod_cfg(lio, &intrmod_cfg))
1920 return -ENODEV;
1921
1922 switch (oct->chip_id) {
1923 case OCTEON_CN23XX_PF_VID:
1924 case OCTEON_CN23XX_VF_VID: {
1925 if (!intrmod_cfg.rx_enable) {
1926 intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs;
1927 intr_coal->rx_max_coalesced_frames =
1928 oct->rx_max_coalesced_frames;
1929 }
1930 if (!intrmod_cfg.tx_enable)
1931 intr_coal->tx_max_coalesced_frames =
1932 oct->tx_max_coalesced_frames;
1933 break;
1934 }
1935 case OCTEON_CN68XX:
1936 case OCTEON_CN66XX: {
1937 struct octeon_cn6xxx *cn6xxx =
1938 (struct octeon_cn6xxx *)oct->chip;
1939
1940 if (!intrmod_cfg.rx_enable) {
1941 intr_coal->rx_coalesce_usecs =
1942 CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
1943 intr_coal->rx_max_coalesced_frames =
1944 CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
1945 }
1946 iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
1947 intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
1948 break;
1949 }
1950 default:
1951 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1952 return -EINVAL;
1953 }
1954 if (intrmod_cfg.rx_enable) {
1955 intr_coal->use_adaptive_rx_coalesce =
1956 intrmod_cfg.rx_enable;
1957 intr_coal->rate_sample_interval =
1958 intrmod_cfg.check_intrvl;
1959 intr_coal->pkt_rate_high =
1960 intrmod_cfg.maxpkt_ratethr;
1961 intr_coal->pkt_rate_low =
1962 intrmod_cfg.minpkt_ratethr;
1963 intr_coal->rx_max_coalesced_frames_high =
1964 intrmod_cfg.rx_maxcnt_trigger;
1965 intr_coal->rx_coalesce_usecs_high =
1966 intrmod_cfg.rx_maxtmr_trigger;
1967 intr_coal->rx_coalesce_usecs_low =
1968 intrmod_cfg.rx_mintmr_trigger;
1969 intr_coal->rx_max_coalesced_frames_low =
1970 intrmod_cfg.rx_mincnt_trigger;
1971 }
1972 if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) &&
1973 (intrmod_cfg.tx_enable)) {
1974 intr_coal->use_adaptive_tx_coalesce =
1975 intrmod_cfg.tx_enable;
1976 intr_coal->tx_max_coalesced_frames_high =
1977 intrmod_cfg.tx_maxcnt_trigger;
1978 intr_coal->tx_max_coalesced_frames_low =
1979 intrmod_cfg.tx_mincnt_trigger;
1980 }
1981 return 0;
1982 }
1983
1984 /* Enable/Disable auto interrupt Moderation */
oct_cfg_adaptive_intr(struct lio * lio,struct oct_intrmod_cfg * intrmod_cfg,struct ethtool_coalesce * intr_coal)1985 static int oct_cfg_adaptive_intr(struct lio *lio,
1986 struct oct_intrmod_cfg *intrmod_cfg,
1987 struct ethtool_coalesce *intr_coal)
1988 {
1989 int ret = 0;
1990
1991 if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) {
1992 intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval;
1993 intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high;
1994 intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low;
1995 }
1996 if (intrmod_cfg->rx_enable) {
1997 intrmod_cfg->rx_maxcnt_trigger =
1998 intr_coal->rx_max_coalesced_frames_high;
1999 intrmod_cfg->rx_maxtmr_trigger =
2000 intr_coal->rx_coalesce_usecs_high;
2001 intrmod_cfg->rx_mintmr_trigger =
2002 intr_coal->rx_coalesce_usecs_low;
2003 intrmod_cfg->rx_mincnt_trigger =
2004 intr_coal->rx_max_coalesced_frames_low;
2005 }
2006 if (intrmod_cfg->tx_enable) {
2007 intrmod_cfg->tx_maxcnt_trigger =
2008 intr_coal->tx_max_coalesced_frames_high;
2009 intrmod_cfg->tx_mincnt_trigger =
2010 intr_coal->tx_max_coalesced_frames_low;
2011 }
2012
2013 ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
2014
2015 return ret;
2016 }
2017
2018 static int
oct_cfg_rx_intrcnt(struct lio * lio,struct oct_intrmod_cfg * intrmod,struct ethtool_coalesce * intr_coal)2019 oct_cfg_rx_intrcnt(struct lio *lio,
2020 struct oct_intrmod_cfg *intrmod,
2021 struct ethtool_coalesce *intr_coal)
2022 {
2023 struct octeon_device *oct = lio->oct_dev;
2024 u32 rx_max_coalesced_frames;
2025
2026 /* Config Cnt based interrupt values */
2027 switch (oct->chip_id) {
2028 case OCTEON_CN68XX:
2029 case OCTEON_CN66XX: {
2030 struct octeon_cn6xxx *cn6xxx =
2031 (struct octeon_cn6xxx *)oct->chip;
2032
2033 if (!intr_coal->rx_max_coalesced_frames)
2034 rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
2035 else
2036 rx_max_coalesced_frames =
2037 intr_coal->rx_max_coalesced_frames;
2038 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
2039 rx_max_coalesced_frames);
2040 CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
2041 break;
2042 }
2043 case OCTEON_CN23XX_PF_VID: {
2044 int q_no;
2045
2046 if (!intr_coal->rx_max_coalesced_frames)
2047 rx_max_coalesced_frames = intrmod->rx_frames;
2048 else
2049 rx_max_coalesced_frames =
2050 intr_coal->rx_max_coalesced_frames;
2051 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2052 q_no += oct->sriov_info.pf_srn;
2053 octeon_write_csr64(
2054 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
2055 (octeon_read_csr64(
2056 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) &
2057 (0x3fffff00000000UL)) |
2058 (rx_max_coalesced_frames - 1));
2059 /*consider setting resend bit*/
2060 }
2061 intrmod->rx_frames = rx_max_coalesced_frames;
2062 oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
2063 break;
2064 }
2065 case OCTEON_CN23XX_VF_VID: {
2066 int q_no;
2067
2068 if (!intr_coal->rx_max_coalesced_frames)
2069 rx_max_coalesced_frames = intrmod->rx_frames;
2070 else
2071 rx_max_coalesced_frames =
2072 intr_coal->rx_max_coalesced_frames;
2073 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2074 octeon_write_csr64(
2075 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
2076 (octeon_read_csr64(
2077 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) &
2078 (0x3fffff00000000UL)) |
2079 (rx_max_coalesced_frames - 1));
2080 /*consider writing to resend bit here*/
2081 }
2082 intrmod->rx_frames = rx_max_coalesced_frames;
2083 oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
2084 break;
2085 }
2086 default:
2087 return -EINVAL;
2088 }
2089 return 0;
2090 }
2091
oct_cfg_rx_intrtime(struct lio * lio,struct oct_intrmod_cfg * intrmod,struct ethtool_coalesce * intr_coal)2092 static int oct_cfg_rx_intrtime(struct lio *lio,
2093 struct oct_intrmod_cfg *intrmod,
2094 struct ethtool_coalesce *intr_coal)
2095 {
2096 struct octeon_device *oct = lio->oct_dev;
2097 u32 time_threshold, rx_coalesce_usecs;
2098
2099 /* Config Time based interrupt values */
2100 switch (oct->chip_id) {
2101 case OCTEON_CN68XX:
2102 case OCTEON_CN66XX: {
2103 struct octeon_cn6xxx *cn6xxx =
2104 (struct octeon_cn6xxx *)oct->chip;
2105 if (!intr_coal->rx_coalesce_usecs)
2106 rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
2107 else
2108 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
2109
2110 time_threshold = lio_cn6xxx_get_oq_ticks(oct,
2111 rx_coalesce_usecs);
2112 octeon_write_csr(oct,
2113 CN6XXX_SLI_OQ_INT_LEVEL_TIME,
2114 time_threshold);
2115
2116 CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
2117 break;
2118 }
2119 case OCTEON_CN23XX_PF_VID: {
2120 u64 time_threshold;
2121 int q_no;
2122
2123 if (!intr_coal->rx_coalesce_usecs)
2124 rx_coalesce_usecs = intrmod->rx_usecs;
2125 else
2126 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
2127 time_threshold =
2128 cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
2129 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2130 q_no += oct->sriov_info.pf_srn;
2131 octeon_write_csr64(oct,
2132 CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
2133 (intrmod->rx_frames |
2134 ((u64)time_threshold << 32)));
2135 /*consider writing to resend bit here*/
2136 }
2137 intrmod->rx_usecs = rx_coalesce_usecs;
2138 oct->rx_coalesce_usecs = rx_coalesce_usecs;
2139 break;
2140 }
2141 case OCTEON_CN23XX_VF_VID: {
2142 u64 time_threshold;
2143 int q_no;
2144
2145 if (!intr_coal->rx_coalesce_usecs)
2146 rx_coalesce_usecs = intrmod->rx_usecs;
2147 else
2148 rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
2149
2150 time_threshold =
2151 cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
2152 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
2153 octeon_write_csr64(
2154 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
2155 (intrmod->rx_frames |
2156 ((u64)time_threshold << 32)));
2157 /*consider setting resend bit*/
2158 }
2159 intrmod->rx_usecs = rx_coalesce_usecs;
2160 oct->rx_coalesce_usecs = rx_coalesce_usecs;
2161 break;
2162 }
2163 default:
2164 return -EINVAL;
2165 }
2166
2167 return 0;
2168 }
2169
2170 static int
oct_cfg_tx_intrcnt(struct lio * lio,struct oct_intrmod_cfg * intrmod,struct ethtool_coalesce * intr_coal)2171 oct_cfg_tx_intrcnt(struct lio *lio,
2172 struct oct_intrmod_cfg *intrmod,
2173 struct ethtool_coalesce *intr_coal)
2174 {
2175 struct octeon_device *oct = lio->oct_dev;
2176 u32 iq_intr_pkt;
2177 void __iomem *inst_cnt_reg;
2178 u64 val;
2179
2180 /* Config Cnt based interrupt values */
2181 switch (oct->chip_id) {
2182 case OCTEON_CN68XX:
2183 case OCTEON_CN66XX:
2184 break;
2185 case OCTEON_CN23XX_VF_VID:
2186 case OCTEON_CN23XX_PF_VID: {
2187 int q_no;
2188
2189 if (!intr_coal->tx_max_coalesced_frames)
2190 iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD &
2191 CN23XX_PKT_IN_DONE_WMARK_MASK;
2192 else
2193 iq_intr_pkt = intr_coal->tx_max_coalesced_frames &
2194 CN23XX_PKT_IN_DONE_WMARK_MASK;
2195 for (q_no = 0; q_no < oct->num_iqs; q_no++) {
2196 inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg;
2197 val = readq(inst_cnt_reg);
2198 /*clear wmark and count.dont want to write count back*/
2199 val = (val & 0xFFFF000000000000ULL) |
2200 ((u64)(iq_intr_pkt - 1)
2201 << CN23XX_PKT_IN_DONE_WMARK_BIT_POS);
2202 writeq(val, inst_cnt_reg);
2203 /*consider setting resend bit*/
2204 }
2205 intrmod->tx_frames = iq_intr_pkt;
2206 oct->tx_max_coalesced_frames = iq_intr_pkt;
2207 break;
2208 }
2209 default:
2210 return -EINVAL;
2211 }
2212 return 0;
2213 }
2214
lio_set_intr_coalesce(struct net_device * netdev,struct ethtool_coalesce * intr_coal)2215 static int lio_set_intr_coalesce(struct net_device *netdev,
2216 struct ethtool_coalesce *intr_coal)
2217 {
2218 struct lio *lio = GET_LIO(netdev);
2219 int ret;
2220 struct octeon_device *oct = lio->oct_dev;
2221 struct oct_intrmod_cfg intrmod = {0};
2222 u32 j, q_no;
2223 int db_max, db_min;
2224
2225 switch (oct->chip_id) {
2226 case OCTEON_CN68XX:
2227 case OCTEON_CN66XX:
2228 db_min = CN6XXX_DB_MIN;
2229 db_max = CN6XXX_DB_MAX;
2230 if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
2231 (intr_coal->tx_max_coalesced_frames <= db_max)) {
2232 for (j = 0; j < lio->linfo.num_txpciq; j++) {
2233 q_no = lio->linfo.txpciq[j].s.q_no;
2234 oct->instr_queue[q_no]->fill_threshold =
2235 intr_coal->tx_max_coalesced_frames;
2236 }
2237 } else {
2238 dev_err(&oct->pci_dev->dev,
2239 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
2240 intr_coal->tx_max_coalesced_frames,
2241 db_min, db_max);
2242 return -EINVAL;
2243 }
2244 break;
2245 case OCTEON_CN23XX_PF_VID:
2246 case OCTEON_CN23XX_VF_VID:
2247 break;
2248 default:
2249 return -EINVAL;
2250 }
2251
2252 intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
2253 intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
2254 intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
2255 intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
2256 intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
2257
2258 ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal);
2259
2260 if (!intr_coal->use_adaptive_rx_coalesce) {
2261 ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal);
2262 if (ret)
2263 goto ret_intrmod;
2264
2265 ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal);
2266 if (ret)
2267 goto ret_intrmod;
2268 } else {
2269 oct->rx_coalesce_usecs =
2270 CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
2271 oct->rx_max_coalesced_frames =
2272 CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
2273 }
2274
2275 if (!intr_coal->use_adaptive_tx_coalesce) {
2276 ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal);
2277 if (ret)
2278 goto ret_intrmod;
2279 } else {
2280 oct->tx_max_coalesced_frames =
2281 CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
2282 }
2283
2284 return 0;
2285 ret_intrmod:
2286 return ret;
2287 }
2288
lio_get_ts_info(struct net_device * netdev,struct ethtool_ts_info * info)2289 static int lio_get_ts_info(struct net_device *netdev,
2290 struct ethtool_ts_info *info)
2291 {
2292 struct lio *lio = GET_LIO(netdev);
2293
2294 info->so_timestamping =
2295 #ifdef PTP_HARDWARE_TIMESTAMPING
2296 SOF_TIMESTAMPING_TX_HARDWARE |
2297 SOF_TIMESTAMPING_RX_HARDWARE |
2298 SOF_TIMESTAMPING_RAW_HARDWARE |
2299 SOF_TIMESTAMPING_TX_SOFTWARE |
2300 #endif
2301 SOF_TIMESTAMPING_RX_SOFTWARE |
2302 SOF_TIMESTAMPING_SOFTWARE;
2303
2304 if (lio->ptp_clock)
2305 info->phc_index = ptp_clock_index(lio->ptp_clock);
2306 else
2307 info->phc_index = -1;
2308
2309 #ifdef PTP_HARDWARE_TIMESTAMPING
2310 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
2311
2312 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2313 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2314 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
2315 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
2316 #endif
2317
2318 return 0;
2319 }
2320
2321 /* Return register dump len. */
lio_get_regs_len(struct net_device * dev)2322 static int lio_get_regs_len(struct net_device *dev)
2323 {
2324 struct lio *lio = GET_LIO(dev);
2325 struct octeon_device *oct = lio->oct_dev;
2326
2327 switch (oct->chip_id) {
2328 case OCTEON_CN23XX_PF_VID:
2329 return OCT_ETHTOOL_REGDUMP_LEN_23XX;
2330 case OCTEON_CN23XX_VF_VID:
2331 return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF;
2332 default:
2333 return OCT_ETHTOOL_REGDUMP_LEN;
2334 }
2335 }
2336
cn23xx_read_csr_reg(char * s,struct octeon_device * oct)2337 static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct)
2338 {
2339 u32 reg;
2340 u8 pf_num = oct->pf_num;
2341 int len = 0;
2342 int i;
2343
2344 /* PCI Window Registers */
2345
2346 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2347
2348 /*0x29030 or 0x29040*/
2349 reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num);
2350 len += sprintf(s + len,
2351 "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n",
2352 reg, oct->pcie_port, oct->pf_num,
2353 (u64)octeon_read_csr64(oct, reg));
2354
2355 /*0x27080 or 0x27090*/
2356 reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
2357 len +=
2358 sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n",
2359 reg, oct->pcie_port, oct->pf_num,
2360 (u64)octeon_read_csr64(oct, reg));
2361
2362 /*0x27000 or 0x27010*/
2363 reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
2364 len +=
2365 sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n",
2366 reg, oct->pcie_port, oct->pf_num,
2367 (u64)octeon_read_csr64(oct, reg));
2368
2369 /*0x29120*/
2370 reg = 0x29120;
2371 len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg,
2372 (u64)octeon_read_csr64(oct, reg));
2373
2374 /*0x27300*/
2375 reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
2376 (oct->pf_num) * CN23XX_PF_INT_OFFSET;
2377 len += sprintf(
2378 s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg,
2379 oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg));
2380
2381 /*0x27200*/
2382 reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
2383 (oct->pf_num) * CN23XX_PF_INT_OFFSET;
2384 len += sprintf(s + len,
2385 "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n",
2386 reg, oct->pcie_port, oct->pf_num,
2387 (u64)octeon_read_csr64(oct, reg));
2388
2389 /*29130*/
2390 reg = CN23XX_SLI_PKT_CNT_INT;
2391 len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg,
2392 (u64)octeon_read_csr64(oct, reg));
2393
2394 /*0x29140*/
2395 reg = CN23XX_SLI_PKT_TIME_INT;
2396 len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg,
2397 (u64)octeon_read_csr64(oct, reg));
2398
2399 /*0x29160*/
2400 reg = 0x29160;
2401 len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg,
2402 (u64)octeon_read_csr64(oct, reg));
2403
2404 /*0x29180*/
2405 reg = CN23XX_SLI_OQ_WMARK;
2406 len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n",
2407 reg, (u64)octeon_read_csr64(oct, reg));
2408
2409 /*0x291E0*/
2410 reg = CN23XX_SLI_PKT_IOQ_RING_RST;
2411 len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg,
2412 (u64)octeon_read_csr64(oct, reg));
2413
2414 /*0x29210*/
2415 reg = CN23XX_SLI_GBL_CONTROL;
2416 len += sprintf(s + len,
2417 "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg,
2418 (u64)octeon_read_csr64(oct, reg));
2419
2420 /*0x29220*/
2421 reg = 0x29220;
2422 len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n",
2423 reg, (u64)octeon_read_csr64(oct, reg));
2424
2425 /*PF only*/
2426 if (pf_num == 0) {
2427 /*0x29260*/
2428 reg = CN23XX_SLI_OUT_BP_EN_W1S;
2429 len += sprintf(s + len,
2430 "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S): %016llx\n",
2431 reg, (u64)octeon_read_csr64(oct, reg));
2432 } else if (pf_num == 1) {
2433 /*0x29270*/
2434 reg = CN23XX_SLI_OUT_BP_EN2_W1S;
2435 len += sprintf(s + len,
2436 "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n",
2437 reg, (u64)octeon_read_csr64(oct, reg));
2438 }
2439
2440 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2441 reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i);
2442 len +=
2443 sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2444 reg, i, (u64)octeon_read_csr64(oct, reg));
2445 }
2446
2447 /*0x10040*/
2448 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2449 reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2450 len += sprintf(s + len,
2451 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2452 reg, i, (u64)octeon_read_csr64(oct, reg));
2453 }
2454
2455 /*0x10080*/
2456 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2457 reg = CN23XX_SLI_OQ_PKTS_CREDIT(i);
2458 len += sprintf(s + len,
2459 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2460 reg, i, (u64)octeon_read_csr64(oct, reg));
2461 }
2462
2463 /*0x10090*/
2464 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2465 reg = CN23XX_SLI_OQ_SIZE(i);
2466 len += sprintf(
2467 s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2468 reg, i, (u64)octeon_read_csr64(oct, reg));
2469 }
2470
2471 /*0x10050*/
2472 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2473 reg = CN23XX_SLI_OQ_PKT_CONTROL(i);
2474 len += sprintf(
2475 s + len,
2476 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2477 reg, i, (u64)octeon_read_csr64(oct, reg));
2478 }
2479
2480 /*0x10070*/
2481 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2482 reg = CN23XX_SLI_OQ_BASE_ADDR64(i);
2483 len += sprintf(s + len,
2484 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2485 reg, i, (u64)octeon_read_csr64(oct, reg));
2486 }
2487
2488 /*0x100a0*/
2489 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2490 reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i);
2491 len += sprintf(s + len,
2492 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2493 reg, i, (u64)octeon_read_csr64(oct, reg));
2494 }
2495
2496 /*0x100b0*/
2497 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2498 reg = CN23XX_SLI_OQ_PKTS_SENT(i);
2499 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2500 reg, i, (u64)octeon_read_csr64(oct, reg));
2501 }
2502
2503 /*0x100c0*/
2504 for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2505 reg = 0x100c0 + i * CN23XX_OQ_OFFSET;
2506 len += sprintf(s + len,
2507 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2508 reg, i, (u64)octeon_read_csr64(oct, reg));
2509
2510 /*0x10000*/
2511 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2512 reg = CN23XX_SLI_IQ_PKT_CONTROL64(i);
2513 len += sprintf(
2514 s + len,
2515 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2516 reg, i, (u64)octeon_read_csr64(oct, reg));
2517 }
2518
2519 /*0x10010*/
2520 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2521 reg = CN23XX_SLI_IQ_BASE_ADDR64(i);
2522 len += sprintf(
2523 s + len,
2524 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg,
2525 i, (u64)octeon_read_csr64(oct, reg));
2526 }
2527
2528 /*0x10020*/
2529 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2530 reg = CN23XX_SLI_IQ_DOORBELL(i);
2531 len += sprintf(
2532 s + len,
2533 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2534 reg, i, (u64)octeon_read_csr64(oct, reg));
2535 }
2536
2537 /*0x10030*/
2538 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2539 reg = CN23XX_SLI_IQ_SIZE(i);
2540 len += sprintf(
2541 s + len,
2542 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2543 reg, i, (u64)octeon_read_csr64(oct, reg));
2544 }
2545
2546 /*0x10040*/
2547 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++)
2548 reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2549 len += sprintf(s + len,
2550 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2551 reg, i, (u64)octeon_read_csr64(oct, reg));
2552 }
2553
2554 return len;
2555 }
2556
cn23xx_vf_read_csr_reg(char * s,struct octeon_device * oct)2557 static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct)
2558 {
2559 int len = 0;
2560 u32 reg;
2561 int i;
2562
2563 /* PCI Window Registers */
2564
2565 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2566
2567 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2568 reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i);
2569 len += sprintf(s + len,
2570 "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2571 reg, i, (u64)octeon_read_csr64(oct, reg));
2572 }
2573
2574 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2575 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
2576 len += sprintf(s + len,
2577 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2578 reg, i, (u64)octeon_read_csr64(oct, reg));
2579 }
2580
2581 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2582 reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i);
2583 len += sprintf(s + len,
2584 "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2585 reg, i, (u64)octeon_read_csr64(oct, reg));
2586 }
2587
2588 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2589 reg = CN23XX_VF_SLI_OQ_SIZE(i);
2590 len += sprintf(s + len,
2591 "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2592 reg, i, (u64)octeon_read_csr64(oct, reg));
2593 }
2594
2595 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2596 reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i);
2597 len += sprintf(s + len,
2598 "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2599 reg, i, (u64)octeon_read_csr64(oct, reg));
2600 }
2601
2602 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2603 reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i);
2604 len += sprintf(s + len,
2605 "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2606 reg, i, (u64)octeon_read_csr64(oct, reg));
2607 }
2608
2609 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2610 reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i);
2611 len += sprintf(s + len,
2612 "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2613 reg, i, (u64)octeon_read_csr64(oct, reg));
2614 }
2615
2616 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2617 reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i);
2618 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2619 reg, i, (u64)octeon_read_csr64(oct, reg));
2620 }
2621
2622 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2623 reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET;
2624 len += sprintf(s + len,
2625 "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2626 reg, i, (u64)octeon_read_csr64(oct, reg));
2627 }
2628
2629 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2630 reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET;
2631 len += sprintf(s + len,
2632 "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n",
2633 reg, i, (u64)octeon_read_csr64(oct, reg));
2634 }
2635
2636 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2637 reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i);
2638 len += sprintf(s + len,
2639 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2640 reg, i, (u64)octeon_read_csr64(oct, reg));
2641 }
2642
2643 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2644 reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i);
2645 len += sprintf(s + len,
2646 "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n",
2647 reg, i, (u64)octeon_read_csr64(oct, reg));
2648 }
2649
2650 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2651 reg = CN23XX_VF_SLI_IQ_DOORBELL(i);
2652 len += sprintf(s + len,
2653 "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2654 reg, i, (u64)octeon_read_csr64(oct, reg));
2655 }
2656
2657 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2658 reg = CN23XX_VF_SLI_IQ_SIZE(i);
2659 len += sprintf(s + len,
2660 "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2661 reg, i, (u64)octeon_read_csr64(oct, reg));
2662 }
2663
2664 for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2665 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
2666 len += sprintf(s + len,
2667 "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2668 reg, i, (u64)octeon_read_csr64(oct, reg));
2669 }
2670
2671 return len;
2672 }
2673
cn6xxx_read_csr_reg(char * s,struct octeon_device * oct)2674 static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
2675 {
2676 u32 reg;
2677 int i, len = 0;
2678
2679 /* PCI Window Registers */
2680
2681 len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2682 reg = CN6XXX_WIN_WR_ADDR_LO;
2683 len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
2684 CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
2685 reg = CN6XXX_WIN_WR_ADDR_HI;
2686 len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
2687 CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
2688 reg = CN6XXX_WIN_RD_ADDR_LO;
2689 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
2690 CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
2691 reg = CN6XXX_WIN_RD_ADDR_HI;
2692 len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
2693 CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
2694 reg = CN6XXX_WIN_WR_DATA_LO;
2695 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
2696 CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
2697 reg = CN6XXX_WIN_WR_DATA_HI;
2698 len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
2699 CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
2700 len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
2701 CN6XXX_WIN_WR_MASK_REG,
2702 octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
2703
2704 /* PCI Interrupt Register */
2705 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
2706 CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
2707 CN6XXX_SLI_INT_ENB64_PORT0));
2708 len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
2709 CN6XXX_SLI_INT_ENB64_PORT1,
2710 octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
2711 len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
2712 octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
2713
2714 /* PCI Output queue registers */
2715 for (i = 0; i < oct->num_oqs; i++) {
2716 reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
2717 len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
2718 reg, i, octeon_read_csr(oct, reg));
2719 reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
2720 len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
2721 reg, i, octeon_read_csr(oct, reg));
2722 }
2723 reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
2724 len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
2725 reg, octeon_read_csr(oct, reg));
2726 reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
2727 len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
2728 reg, octeon_read_csr(oct, reg));
2729
2730 /* PCI Input queue registers */
2731 for (i = 0; i <= 3; i++) {
2732 u32 reg;
2733
2734 reg = CN6XXX_SLI_IQ_DOORBELL(i);
2735 len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
2736 reg, i, octeon_read_csr(oct, reg));
2737 reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
2738 len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
2739 reg, i, octeon_read_csr(oct, reg));
2740 }
2741
2742 /* PCI DMA registers */
2743
2744 len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
2745 CN6XXX_DMA_CNT(0),
2746 octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
2747 reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
2748 len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
2749 CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
2750 reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
2751 len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
2752 CN6XXX_DMA_TIME_INT_LEVEL(0),
2753 octeon_read_csr(oct, reg));
2754
2755 len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
2756 CN6XXX_DMA_CNT(1),
2757 octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
2758 reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2759 len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
2760 CN6XXX_DMA_PKT_INT_LEVEL(1),
2761 octeon_read_csr(oct, reg));
2762 reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2763 len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
2764 CN6XXX_DMA_TIME_INT_LEVEL(1),
2765 octeon_read_csr(oct, reg));
2766
2767 /* PCI Index registers */
2768
2769 len += sprintf(s + len, "\n");
2770
2771 for (i = 0; i < 16; i++) {
2772 reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
2773 len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
2774 CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
2775 }
2776
2777 return len;
2778 }
2779
cn6xxx_read_config_reg(char * s,struct octeon_device * oct)2780 static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
2781 {
2782 u32 val;
2783 int i, len = 0;
2784
2785 /* PCI CONFIG Registers */
2786
2787 len += sprintf(s + len,
2788 "\n\t Octeon Config space Registers\n\n");
2789
2790 for (i = 0; i <= 13; i++) {
2791 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
2792 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
2793 (i * 4), i, val);
2794 }
2795
2796 for (i = 30; i <= 34; i++) {
2797 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
2798 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
2799 (i * 4), i, val);
2800 }
2801
2802 return len;
2803 }
2804
2805 /* Return register dump user app. */
lio_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * regbuf)2806 static void lio_get_regs(struct net_device *dev,
2807 struct ethtool_regs *regs, void *regbuf)
2808 {
2809 struct lio *lio = GET_LIO(dev);
2810 int len = 0;
2811 struct octeon_device *oct = lio->oct_dev;
2812
2813 regs->version = OCT_ETHTOOL_REGSVER;
2814
2815 switch (oct->chip_id) {
2816 case OCTEON_CN23XX_PF_VID:
2817 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX);
2818 len += cn23xx_read_csr_reg(regbuf + len, oct);
2819 break;
2820 case OCTEON_CN23XX_VF_VID:
2821 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF);
2822 len += cn23xx_vf_read_csr_reg(regbuf + len, oct);
2823 break;
2824 case OCTEON_CN68XX:
2825 case OCTEON_CN66XX:
2826 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
2827 len += cn6xxx_read_csr_reg(regbuf + len, oct);
2828 len += cn6xxx_read_config_reg(regbuf + len, oct);
2829 break;
2830 default:
2831 dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
2832 __func__, oct->chip_id);
2833 }
2834 }
2835
lio_get_priv_flags(struct net_device * netdev)2836 static u32 lio_get_priv_flags(struct net_device *netdev)
2837 {
2838 struct lio *lio = GET_LIO(netdev);
2839
2840 return lio->oct_dev->priv_flags;
2841 }
2842
lio_set_priv_flags(struct net_device * netdev,u32 flags)2843 static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
2844 {
2845 struct lio *lio = GET_LIO(netdev);
2846 bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
2847
2848 lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
2849 intr_by_tx_bytes);
2850 return 0;
2851 }
2852
2853 static const struct ethtool_ops lio_ethtool_ops = {
2854 .get_link_ksettings = lio_get_link_ksettings,
2855 .get_link = ethtool_op_get_link,
2856 .get_drvinfo = lio_get_drvinfo,
2857 .get_ringparam = lio_ethtool_get_ringparam,
2858 .set_ringparam = lio_ethtool_set_ringparam,
2859 .get_channels = lio_ethtool_get_channels,
2860 .set_channels = lio_ethtool_set_channels,
2861 .set_phys_id = lio_set_phys_id,
2862 .get_eeprom_len = lio_get_eeprom_len,
2863 .get_eeprom = lio_get_eeprom,
2864 .get_strings = lio_get_strings,
2865 .get_ethtool_stats = lio_get_ethtool_stats,
2866 .get_pauseparam = lio_get_pauseparam,
2867 .set_pauseparam = lio_set_pauseparam,
2868 .get_regs_len = lio_get_regs_len,
2869 .get_regs = lio_get_regs,
2870 .get_msglevel = lio_get_msglevel,
2871 .set_msglevel = lio_set_msglevel,
2872 .get_sset_count = lio_get_sset_count,
2873 .get_coalesce = lio_get_intr_coalesce,
2874 .set_coalesce = lio_set_intr_coalesce,
2875 .get_priv_flags = lio_get_priv_flags,
2876 .set_priv_flags = lio_set_priv_flags,
2877 .get_ts_info = lio_get_ts_info,
2878 };
2879
2880 static const struct ethtool_ops lio_vf_ethtool_ops = {
2881 .get_link_ksettings = lio_get_link_ksettings,
2882 .get_link = ethtool_op_get_link,
2883 .get_drvinfo = lio_get_vf_drvinfo,
2884 .get_ringparam = lio_ethtool_get_ringparam,
2885 .set_ringparam = lio_ethtool_set_ringparam,
2886 .get_channels = lio_ethtool_get_channels,
2887 .set_channels = lio_ethtool_set_channels,
2888 .get_strings = lio_vf_get_strings,
2889 .get_ethtool_stats = lio_vf_get_ethtool_stats,
2890 .get_regs_len = lio_get_regs_len,
2891 .get_regs = lio_get_regs,
2892 .get_msglevel = lio_get_msglevel,
2893 .set_msglevel = lio_vf_set_msglevel,
2894 .get_sset_count = lio_vf_get_sset_count,
2895 .get_coalesce = lio_get_intr_coalesce,
2896 .set_coalesce = lio_set_intr_coalesce,
2897 .get_priv_flags = lio_get_priv_flags,
2898 .set_priv_flags = lio_set_priv_flags,
2899 .get_ts_info = lio_get_ts_info,
2900 };
2901
liquidio_set_ethtool_ops(struct net_device * netdev)2902 void liquidio_set_ethtool_ops(struct net_device *netdev)
2903 {
2904 struct lio *lio = GET_LIO(netdev);
2905 struct octeon_device *oct = lio->oct_dev;
2906
2907 if (OCTEON_CN23XX_VF(oct))
2908 netdev->ethtool_ops = &lio_vf_ethtool_ops;
2909 else
2910 netdev->ethtool_ops = &lio_ethtool_ops;
2911 }
2912