1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
3
4 /*
5 * nfp_net_ethtool.c
6 * Netronome network device driver: ethtool support
7 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
8 * Jason McMullan <jason.mcmullan@netronome.com>
9 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
10 * Brad Petrus <brad.petrus@netronome.com>
11 */
12
13 #include <linux/bitfield.h>
14 #include <linux/kernel.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/interrupt.h>
18 #include <linux/pci.h>
19 #include <linux/ethtool.h>
20 #include <linux/firmware.h>
21 #include <linux/sfp.h>
22
23 #include "nfpcore/nfp.h"
24 #include "nfpcore/nfp_nsp.h"
25 #include "nfp_app.h"
26 #include "nfp_main.h"
27 #include "nfp_net_ctrl.h"
28 #include "nfp_net.h"
29 #include "nfp_port.h"
30
31 struct nfp_et_stat {
32 char name[ETH_GSTRING_LEN];
33 int off;
34 };
35
36 static const struct nfp_et_stat nfp_net_et_stats[] = {
37 /* Stats from the device */
38 { "dev_rx_discards", NFP_NET_CFG_STATS_RX_DISCARDS },
39 { "dev_rx_errors", NFP_NET_CFG_STATS_RX_ERRORS },
40 { "dev_rx_bytes", NFP_NET_CFG_STATS_RX_OCTETS },
41 { "dev_rx_uc_bytes", NFP_NET_CFG_STATS_RX_UC_OCTETS },
42 { "dev_rx_mc_bytes", NFP_NET_CFG_STATS_RX_MC_OCTETS },
43 { "dev_rx_bc_bytes", NFP_NET_CFG_STATS_RX_BC_OCTETS },
44 { "dev_rx_pkts", NFP_NET_CFG_STATS_RX_FRAMES },
45 { "dev_rx_mc_pkts", NFP_NET_CFG_STATS_RX_MC_FRAMES },
46 { "dev_rx_bc_pkts", NFP_NET_CFG_STATS_RX_BC_FRAMES },
47
48 { "dev_tx_discards", NFP_NET_CFG_STATS_TX_DISCARDS },
49 { "dev_tx_errors", NFP_NET_CFG_STATS_TX_ERRORS },
50 { "dev_tx_bytes", NFP_NET_CFG_STATS_TX_OCTETS },
51 { "dev_tx_uc_bytes", NFP_NET_CFG_STATS_TX_UC_OCTETS },
52 { "dev_tx_mc_bytes", NFP_NET_CFG_STATS_TX_MC_OCTETS },
53 { "dev_tx_bc_bytes", NFP_NET_CFG_STATS_TX_BC_OCTETS },
54 { "dev_tx_pkts", NFP_NET_CFG_STATS_TX_FRAMES },
55 { "dev_tx_mc_pkts", NFP_NET_CFG_STATS_TX_MC_FRAMES },
56 { "dev_tx_bc_pkts", NFP_NET_CFG_STATS_TX_BC_FRAMES },
57
58 { "bpf_pass_pkts", NFP_NET_CFG_STATS_APP0_FRAMES },
59 { "bpf_pass_bytes", NFP_NET_CFG_STATS_APP0_BYTES },
60 /* see comments in outro functions in nfp_bpf_jit.c to find out
61 * how different BPF modes use app-specific counters
62 */
63 { "bpf_app1_pkts", NFP_NET_CFG_STATS_APP1_FRAMES },
64 { "bpf_app1_bytes", NFP_NET_CFG_STATS_APP1_BYTES },
65 { "bpf_app2_pkts", NFP_NET_CFG_STATS_APP2_FRAMES },
66 { "bpf_app2_bytes", NFP_NET_CFG_STATS_APP2_BYTES },
67 { "bpf_app3_pkts", NFP_NET_CFG_STATS_APP3_FRAMES },
68 { "bpf_app3_bytes", NFP_NET_CFG_STATS_APP3_BYTES },
69 };
70
71 static const struct nfp_et_stat nfp_mac_et_stats[] = {
72 { "rx_octets", NFP_MAC_STATS_RX_IN_OCTETS, },
73 { "rx_frame_too_long_errors",
74 NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS, },
75 { "rx_range_length_errors", NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS, },
76 { "rx_vlan_received_ok", NFP_MAC_STATS_RX_VLAN_RECEIVED_OK, },
77 { "rx_errors", NFP_MAC_STATS_RX_IN_ERRORS, },
78 { "rx_broadcast_pkts", NFP_MAC_STATS_RX_IN_BROADCAST_PKTS, },
79 { "rx_drop_events", NFP_MAC_STATS_RX_DROP_EVENTS, },
80 { "rx_alignment_errors", NFP_MAC_STATS_RX_ALIGNMENT_ERRORS, },
81 { "rx_pause_mac_ctrl_frames",
82 NFP_MAC_STATS_RX_PAUSE_MAC_CTRL_FRAMES, },
83 { "rx_frames_received_ok", NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK, },
84 { "rx_frame_check_sequence_errors",
85 NFP_MAC_STATS_RX_FRAME_CHECK_SEQUENCE_ERRORS, },
86 { "rx_unicast_pkts", NFP_MAC_STATS_RX_UNICAST_PKTS, },
87 { "rx_multicast_pkts", NFP_MAC_STATS_RX_MULTICAST_PKTS, },
88 { "rx_pkts", NFP_MAC_STATS_RX_PKTS, },
89 { "rx_undersize_pkts", NFP_MAC_STATS_RX_UNDERSIZE_PKTS, },
90 { "rx_pkts_64_octets", NFP_MAC_STATS_RX_PKTS_64_OCTETS, },
91 { "rx_pkts_65_to_127_octets",
92 NFP_MAC_STATS_RX_PKTS_65_TO_127_OCTETS, },
93 { "rx_pkts_128_to_255_octets",
94 NFP_MAC_STATS_RX_PKTS_128_TO_255_OCTETS, },
95 { "rx_pkts_256_to_511_octets",
96 NFP_MAC_STATS_RX_PKTS_256_TO_511_OCTETS, },
97 { "rx_pkts_512_to_1023_octets",
98 NFP_MAC_STATS_RX_PKTS_512_TO_1023_OCTETS, },
99 { "rx_pkts_1024_to_1518_octets",
100 NFP_MAC_STATS_RX_PKTS_1024_TO_1518_OCTETS, },
101 { "rx_pkts_1519_to_max_octets",
102 NFP_MAC_STATS_RX_PKTS_1519_TO_MAX_OCTETS, },
103 { "rx_jabbers", NFP_MAC_STATS_RX_JABBERS, },
104 { "rx_fragments", NFP_MAC_STATS_RX_FRAGMENTS, },
105 { "rx_oversize_pkts", NFP_MAC_STATS_RX_OVERSIZE_PKTS, },
106 { "rx_pause_frames_class0", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS0, },
107 { "rx_pause_frames_class1", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS1, },
108 { "rx_pause_frames_class2", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS2, },
109 { "rx_pause_frames_class3", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS3, },
110 { "rx_pause_frames_class4", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS4, },
111 { "rx_pause_frames_class5", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS5, },
112 { "rx_pause_frames_class6", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS6, },
113 { "rx_pause_frames_class7", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS7, },
114 { "rx_mac_ctrl_frames_received",
115 NFP_MAC_STATS_RX_MAC_CTRL_FRAMES_RECEIVED, },
116 { "rx_mac_head_drop", NFP_MAC_STATS_RX_MAC_HEAD_DROP, },
117 { "tx_queue_drop", NFP_MAC_STATS_TX_QUEUE_DROP, },
118 { "tx_octets", NFP_MAC_STATS_TX_OUT_OCTETS, },
119 { "tx_vlan_transmitted_ok", NFP_MAC_STATS_TX_VLAN_TRANSMITTED_OK, },
120 { "tx_errors", NFP_MAC_STATS_TX_OUT_ERRORS, },
121 { "tx_broadcast_pkts", NFP_MAC_STATS_TX_BROADCAST_PKTS, },
122 { "tx_pause_mac_ctrl_frames",
123 NFP_MAC_STATS_TX_PAUSE_MAC_CTRL_FRAMES, },
124 { "tx_frames_transmitted_ok",
125 NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK, },
126 { "tx_unicast_pkts", NFP_MAC_STATS_TX_UNICAST_PKTS, },
127 { "tx_multicast_pkts", NFP_MAC_STATS_TX_MULTICAST_PKTS, },
128 { "tx_pkts_64_octets", NFP_MAC_STATS_TX_PKTS_64_OCTETS, },
129 { "tx_pkts_65_to_127_octets",
130 NFP_MAC_STATS_TX_PKTS_65_TO_127_OCTETS, },
131 { "tx_pkts_128_to_255_octets",
132 NFP_MAC_STATS_TX_PKTS_128_TO_255_OCTETS, },
133 { "tx_pkts_256_to_511_octets",
134 NFP_MAC_STATS_TX_PKTS_256_TO_511_OCTETS, },
135 { "tx_pkts_512_to_1023_octets",
136 NFP_MAC_STATS_TX_PKTS_512_TO_1023_OCTETS, },
137 { "tx_pkts_1024_to_1518_octets",
138 NFP_MAC_STATS_TX_PKTS_1024_TO_1518_OCTETS, },
139 { "tx_pkts_1519_to_max_octets",
140 NFP_MAC_STATS_TX_PKTS_1519_TO_MAX_OCTETS, },
141 { "tx_pause_frames_class0", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS0, },
142 { "tx_pause_frames_class1", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS1, },
143 { "tx_pause_frames_class2", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS2, },
144 { "tx_pause_frames_class3", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS3, },
145 { "tx_pause_frames_class4", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS4, },
146 { "tx_pause_frames_class5", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS5, },
147 { "tx_pause_frames_class6", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS6, },
148 { "tx_pause_frames_class7", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7, },
149 };
150
151 static const char nfp_tlv_stat_names[][ETH_GSTRING_LEN] = {
152 [1] = "dev_rx_discards",
153 [2] = "dev_rx_errors",
154 [3] = "dev_rx_bytes",
155 [4] = "dev_rx_uc_bytes",
156 [5] = "dev_rx_mc_bytes",
157 [6] = "dev_rx_bc_bytes",
158 [7] = "dev_rx_pkts",
159 [8] = "dev_rx_mc_pkts",
160 [9] = "dev_rx_bc_pkts",
161
162 [10] = "dev_tx_discards",
163 [11] = "dev_tx_errors",
164 [12] = "dev_tx_bytes",
165 [13] = "dev_tx_uc_bytes",
166 [14] = "dev_tx_mc_bytes",
167 [15] = "dev_tx_bc_bytes",
168 [16] = "dev_tx_pkts",
169 [17] = "dev_tx_mc_pkts",
170 [18] = "dev_tx_bc_pkts",
171 };
172
173 #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
174 #define NN_ET_SWITCH_STATS_LEN 9
175 #define NN_RVEC_GATHER_STATS 13
176 #define NN_RVEC_PER_Q_STATS 3
177 #define NN_CTRL_PATH_STATS 4
178
179 #define SFP_SFF_REV_COMPLIANCE 1
180
nfp_net_get_nspinfo(struct nfp_app * app,char * version)181 static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
182 {
183 struct nfp_nsp *nsp;
184
185 if (!app)
186 return;
187
188 nsp = nfp_nsp_open(app->cpp);
189 if (IS_ERR(nsp))
190 return;
191
192 snprintf(version, ETHTOOL_FWVERS_LEN, "%hu.%hu",
193 nfp_nsp_get_abi_ver_major(nsp),
194 nfp_nsp_get_abi_ver_minor(nsp));
195
196 nfp_nsp_close(nsp);
197 }
198
199 static void
nfp_get_drvinfo(struct nfp_app * app,struct pci_dev * pdev,const char * vnic_version,struct ethtool_drvinfo * drvinfo)200 nfp_get_drvinfo(struct nfp_app *app, struct pci_dev *pdev,
201 const char *vnic_version, struct ethtool_drvinfo *drvinfo)
202 {
203 char nsp_version[ETHTOOL_FWVERS_LEN] = {};
204
205 strlcpy(drvinfo->driver, pdev->driver->name, sizeof(drvinfo->driver));
206 nfp_net_get_nspinfo(app, nsp_version);
207 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
208 "%s %s %s %s", vnic_version, nsp_version,
209 nfp_app_mip_name(app), nfp_app_name(app));
210 }
211
212 static void
nfp_net_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)213 nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
214 {
215 char vnic_version[ETHTOOL_FWVERS_LEN] = {};
216 struct nfp_net *nn = netdev_priv(netdev);
217
218 snprintf(vnic_version, sizeof(vnic_version), "%d.%d.%d.%d",
219 nn->fw_ver.resv, nn->fw_ver.class,
220 nn->fw_ver.major, nn->fw_ver.minor);
221 strlcpy(drvinfo->bus_info, pci_name(nn->pdev),
222 sizeof(drvinfo->bus_info));
223
224 nfp_get_drvinfo(nn->app, nn->pdev, vnic_version, drvinfo);
225 }
226
227 static void
nfp_app_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)228 nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
229 {
230 struct nfp_app *app = nfp_app_from_netdev(netdev);
231
232 strlcpy(drvinfo->bus_info, pci_name(app->pdev),
233 sizeof(drvinfo->bus_info));
234 nfp_get_drvinfo(app, app->pdev, "*", drvinfo);
235 }
236
237 static void
nfp_net_set_fec_link_mode(struct nfp_eth_table_port * eth_port,struct ethtool_link_ksettings * c)238 nfp_net_set_fec_link_mode(struct nfp_eth_table_port *eth_port,
239 struct ethtool_link_ksettings *c)
240 {
241 unsigned int modes;
242
243 ethtool_link_ksettings_add_link_mode(c, supported, FEC_NONE);
244 if (!nfp_eth_can_support_fec(eth_port)) {
245 ethtool_link_ksettings_add_link_mode(c, advertising, FEC_NONE);
246 return;
247 }
248
249 modes = nfp_eth_supported_fec_modes(eth_port);
250 if (modes & NFP_FEC_BASER) {
251 ethtool_link_ksettings_add_link_mode(c, supported, FEC_BASER);
252 ethtool_link_ksettings_add_link_mode(c, advertising, FEC_BASER);
253 }
254
255 if (modes & NFP_FEC_REED_SOLOMON) {
256 ethtool_link_ksettings_add_link_mode(c, supported, FEC_RS);
257 ethtool_link_ksettings_add_link_mode(c, advertising, FEC_RS);
258 }
259 }
260
261 /**
262 * nfp_net_get_link_ksettings - Get Link Speed settings
263 * @netdev: network interface device structure
264 * @cmd: ethtool command
265 *
266 * Reports speed settings based on info in the BAR provided by the fw.
267 */
268 static int
nfp_net_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)269 nfp_net_get_link_ksettings(struct net_device *netdev,
270 struct ethtool_link_ksettings *cmd)
271 {
272 static const u32 ls_to_ethtool[] = {
273 [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = 0,
274 [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = SPEED_UNKNOWN,
275 [NFP_NET_CFG_STS_LINK_RATE_1G] = SPEED_1000,
276 [NFP_NET_CFG_STS_LINK_RATE_10G] = SPEED_10000,
277 [NFP_NET_CFG_STS_LINK_RATE_25G] = SPEED_25000,
278 [NFP_NET_CFG_STS_LINK_RATE_40G] = SPEED_40000,
279 [NFP_NET_CFG_STS_LINK_RATE_50G] = SPEED_50000,
280 [NFP_NET_CFG_STS_LINK_RATE_100G] = SPEED_100000,
281 };
282 struct nfp_eth_table_port *eth_port;
283 struct nfp_port *port;
284 struct nfp_net *nn;
285 u32 sts, ls;
286
287 /* Init to unknowns */
288 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
289 cmd->base.port = PORT_OTHER;
290 cmd->base.speed = SPEED_UNKNOWN;
291 cmd->base.duplex = DUPLEX_UNKNOWN;
292
293 port = nfp_port_from_netdev(netdev);
294 eth_port = nfp_port_get_eth_port(port);
295 if (eth_port) {
296 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
297 ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
298 cmd->base.autoneg = eth_port->aneg != NFP_ANEG_DISABLED ?
299 AUTONEG_ENABLE : AUTONEG_DISABLE;
300 nfp_net_set_fec_link_mode(eth_port, cmd);
301 }
302
303 if (!netif_carrier_ok(netdev))
304 return 0;
305
306 /* Use link speed from ETH table if available, otherwise try the BAR */
307 if (eth_port) {
308 cmd->base.port = eth_port->port_type;
309 cmd->base.speed = eth_port->speed;
310 cmd->base.duplex = DUPLEX_FULL;
311 return 0;
312 }
313
314 if (!nfp_netdev_is_nfp_net(netdev))
315 return -EOPNOTSUPP;
316 nn = netdev_priv(netdev);
317
318 sts = nn_readl(nn, NFP_NET_CFG_STS);
319
320 ls = FIELD_GET(NFP_NET_CFG_STS_LINK_RATE, sts);
321 if (ls == NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED)
322 return -EOPNOTSUPP;
323
324 if (ls == NFP_NET_CFG_STS_LINK_RATE_UNKNOWN ||
325 ls >= ARRAY_SIZE(ls_to_ethtool))
326 return 0;
327
328 cmd->base.speed = ls_to_ethtool[ls];
329 cmd->base.duplex = DUPLEX_FULL;
330
331 return 0;
332 }
333
334 static int
nfp_net_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * cmd)335 nfp_net_set_link_ksettings(struct net_device *netdev,
336 const struct ethtool_link_ksettings *cmd)
337 {
338 struct nfp_eth_table_port *eth_port;
339 struct nfp_port *port;
340 struct nfp_nsp *nsp;
341 int err;
342
343 port = nfp_port_from_netdev(netdev);
344 eth_port = __nfp_port_get_eth_port(port);
345 if (!eth_port)
346 return -EOPNOTSUPP;
347
348 if (netif_running(netdev)) {
349 netdev_warn(netdev, "Changing settings not allowed on an active interface. It may cause the port to be disabled until driver reload.\n");
350 return -EBUSY;
351 }
352
353 nsp = nfp_eth_config_start(port->app->cpp, eth_port->index);
354 if (IS_ERR(nsp))
355 return PTR_ERR(nsp);
356
357 err = __nfp_eth_set_aneg(nsp, cmd->base.autoneg == AUTONEG_ENABLE ?
358 NFP_ANEG_AUTO : NFP_ANEG_DISABLED);
359 if (err)
360 goto err_bad_set;
361 if (cmd->base.speed != SPEED_UNKNOWN) {
362 u32 speed = cmd->base.speed / eth_port->lanes;
363
364 err = __nfp_eth_set_speed(nsp, speed);
365 if (err)
366 goto err_bad_set;
367 }
368
369 err = nfp_eth_config_commit_end(nsp);
370 if (err > 0)
371 return 0; /* no change */
372
373 nfp_net_refresh_port_table(port);
374
375 return err;
376
377 err_bad_set:
378 nfp_eth_config_cleanup_end(nsp);
379 return err;
380 }
381
nfp_net_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring)382 static void nfp_net_get_ringparam(struct net_device *netdev,
383 struct ethtool_ringparam *ring)
384 {
385 struct nfp_net *nn = netdev_priv(netdev);
386
387 ring->rx_max_pending = NFP_NET_MAX_RX_DESCS;
388 ring->tx_max_pending = NFP_NET_MAX_TX_DESCS;
389 ring->rx_pending = nn->dp.rxd_cnt;
390 ring->tx_pending = nn->dp.txd_cnt;
391 }
392
nfp_net_set_ring_size(struct nfp_net * nn,u32 rxd_cnt,u32 txd_cnt)393 static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
394 {
395 struct nfp_net_dp *dp;
396
397 dp = nfp_net_clone_dp(nn);
398 if (!dp)
399 return -ENOMEM;
400
401 dp->rxd_cnt = rxd_cnt;
402 dp->txd_cnt = txd_cnt;
403
404 return nfp_net_ring_reconfig(nn, dp, NULL);
405 }
406
nfp_net_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring)407 static int nfp_net_set_ringparam(struct net_device *netdev,
408 struct ethtool_ringparam *ring)
409 {
410 struct nfp_net *nn = netdev_priv(netdev);
411 u32 rxd_cnt, txd_cnt;
412
413 /* We don't have separate queues/rings for small/large frames. */
414 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
415 return -EINVAL;
416
417 /* Round up to supported values */
418 rxd_cnt = roundup_pow_of_two(ring->rx_pending);
419 txd_cnt = roundup_pow_of_two(ring->tx_pending);
420
421 if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS ||
422 txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
423 return -EINVAL;
424
425 if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt)
426 return 0;
427
428 nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
429 nn->dp.rxd_cnt, rxd_cnt, nn->dp.txd_cnt, txd_cnt);
430
431 return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
432 }
433
nfp_pr_et(u8 * data,const char * fmt,...)434 __printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...)
435 {
436 va_list args;
437
438 va_start(args, fmt);
439 vsnprintf(data, ETH_GSTRING_LEN, fmt, args);
440 va_end(args);
441
442 return data + ETH_GSTRING_LEN;
443 }
444
nfp_vnic_get_sw_stats_count(struct net_device * netdev)445 static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
446 {
447 struct nfp_net *nn = netdev_priv(netdev);
448
449 return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS +
450 NN_CTRL_PATH_STATS;
451 }
452
nfp_vnic_get_sw_stats_strings(struct net_device * netdev,u8 * data)453 static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
454 {
455 struct nfp_net *nn = netdev_priv(netdev);
456 int i;
457
458 for (i = 0; i < nn->max_r_vecs; i++) {
459 data = nfp_pr_et(data, "rvec_%u_rx_pkts", i);
460 data = nfp_pr_et(data, "rvec_%u_tx_pkts", i);
461 data = nfp_pr_et(data, "rvec_%u_tx_busy", i);
462 }
463
464 data = nfp_pr_et(data, "hw_rx_csum_ok");
465 data = nfp_pr_et(data, "hw_rx_csum_inner_ok");
466 data = nfp_pr_et(data, "hw_rx_csum_complete");
467 data = nfp_pr_et(data, "hw_rx_csum_err");
468 data = nfp_pr_et(data, "rx_replace_buf_alloc_fail");
469 data = nfp_pr_et(data, "rx_tls_decrypted_packets");
470 data = nfp_pr_et(data, "hw_tx_csum");
471 data = nfp_pr_et(data, "hw_tx_inner_csum");
472 data = nfp_pr_et(data, "tx_gather");
473 data = nfp_pr_et(data, "tx_lso");
474 data = nfp_pr_et(data, "tx_tls_encrypted_packets");
475 data = nfp_pr_et(data, "tx_tls_ooo");
476 data = nfp_pr_et(data, "tx_tls_drop_no_sync_data");
477
478 data = nfp_pr_et(data, "hw_tls_no_space");
479 data = nfp_pr_et(data, "rx_tls_resync_req_ok");
480 data = nfp_pr_et(data, "rx_tls_resync_req_ign");
481 data = nfp_pr_et(data, "rx_tls_resync_sent");
482
483 return data;
484 }
485
nfp_vnic_get_sw_stats(struct net_device * netdev,u64 * data)486 static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
487 {
488 u64 gathered_stats[NN_RVEC_GATHER_STATS] = {};
489 struct nfp_net *nn = netdev_priv(netdev);
490 u64 tmp[NN_RVEC_GATHER_STATS];
491 unsigned int i, j;
492
493 for (i = 0; i < nn->max_r_vecs; i++) {
494 unsigned int start;
495
496 do {
497 start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].rx_sync);
498 data[0] = nn->r_vecs[i].rx_pkts;
499 tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
500 tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
501 tmp[2] = nn->r_vecs[i].hw_csum_rx_complete;
502 tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
503 tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
504 tmp[5] = nn->r_vecs[i].hw_tls_rx;
505 } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].rx_sync, start));
506
507 do {
508 start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].tx_sync);
509 data[1] = nn->r_vecs[i].tx_pkts;
510 data[2] = nn->r_vecs[i].tx_busy;
511 tmp[6] = nn->r_vecs[i].hw_csum_tx;
512 tmp[7] = nn->r_vecs[i].hw_csum_tx_inner;
513 tmp[8] = nn->r_vecs[i].tx_gather;
514 tmp[9] = nn->r_vecs[i].tx_lso;
515 tmp[10] = nn->r_vecs[i].hw_tls_tx;
516 tmp[11] = nn->r_vecs[i].tls_tx_fallback;
517 tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
518 } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].tx_sync, start));
519
520 data += NN_RVEC_PER_Q_STATS;
521
522 for (j = 0; j < NN_RVEC_GATHER_STATS; j++)
523 gathered_stats[j] += tmp[j];
524 }
525
526 for (j = 0; j < NN_RVEC_GATHER_STATS; j++)
527 *data++ = gathered_stats[j];
528
529 *data++ = atomic_read(&nn->ktls_no_space);
530 *data++ = atomic_read(&nn->ktls_rx_resync_req);
531 *data++ = atomic_read(&nn->ktls_rx_resync_ign);
532 *data++ = atomic_read(&nn->ktls_rx_resync_sent);
533
534 return data;
535 }
536
nfp_vnic_get_hw_stats_count(unsigned int num_vecs)537 static unsigned int nfp_vnic_get_hw_stats_count(unsigned int num_vecs)
538 {
539 return NN_ET_GLOBAL_STATS_LEN + num_vecs * 4;
540 }
541
542 static u8 *
nfp_vnic_get_hw_stats_strings(u8 * data,unsigned int num_vecs,bool repr)543 nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int num_vecs, bool repr)
544 {
545 int swap_off, i;
546
547 BUILD_BUG_ON(NN_ET_GLOBAL_STATS_LEN < NN_ET_SWITCH_STATS_LEN * 2);
548 /* If repr is true first add SWITCH_STATS_LEN and then subtract it
549 * effectively swapping the RX and TX statistics (giving us the RX
550 * and TX from perspective of the switch).
551 */
552 swap_off = repr * NN_ET_SWITCH_STATS_LEN;
553
554 for (i = 0; i < NN_ET_SWITCH_STATS_LEN; i++)
555 data = nfp_pr_et(data, nfp_net_et_stats[i + swap_off].name);
556
557 for (i = NN_ET_SWITCH_STATS_LEN; i < NN_ET_SWITCH_STATS_LEN * 2; i++)
558 data = nfp_pr_et(data, nfp_net_et_stats[i - swap_off].name);
559
560 for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++)
561 data = nfp_pr_et(data, nfp_net_et_stats[i].name);
562
563 for (i = 0; i < num_vecs; i++) {
564 data = nfp_pr_et(data, "rxq_%u_pkts", i);
565 data = nfp_pr_et(data, "rxq_%u_bytes", i);
566 data = nfp_pr_et(data, "txq_%u_pkts", i);
567 data = nfp_pr_et(data, "txq_%u_bytes", i);
568 }
569
570 return data;
571 }
572
573 static u64 *
nfp_vnic_get_hw_stats(u64 * data,u8 __iomem * mem,unsigned int num_vecs)574 nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, unsigned int num_vecs)
575 {
576 unsigned int i;
577
578 for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++)
579 *data++ = readq(mem + nfp_net_et_stats[i].off);
580
581 for (i = 0; i < num_vecs; i++) {
582 *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i));
583 *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8);
584 *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
585 *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
586 }
587
588 return data;
589 }
590
nfp_vnic_get_tlv_stats_count(struct nfp_net * nn)591 static unsigned int nfp_vnic_get_tlv_stats_count(struct nfp_net *nn)
592 {
593 return nn->tlv_caps.vnic_stats_cnt + nn->max_r_vecs * 4;
594 }
595
nfp_vnic_get_tlv_stats_strings(struct nfp_net * nn,u8 * data)596 static u8 *nfp_vnic_get_tlv_stats_strings(struct nfp_net *nn, u8 *data)
597 {
598 unsigned int i, id;
599 u8 __iomem *mem;
600 u64 id_word = 0;
601
602 mem = nn->dp.ctrl_bar + nn->tlv_caps.vnic_stats_off;
603 for (i = 0; i < nn->tlv_caps.vnic_stats_cnt; i++) {
604 if (!(i % 4))
605 id_word = readq(mem + i * 2);
606
607 id = (u16)id_word;
608 id_word >>= 16;
609
610 if (id < ARRAY_SIZE(nfp_tlv_stat_names) &&
611 nfp_tlv_stat_names[id][0]) {
612 memcpy(data, nfp_tlv_stat_names[id], ETH_GSTRING_LEN);
613 data += ETH_GSTRING_LEN;
614 } else {
615 data = nfp_pr_et(data, "dev_unknown_stat%u", id);
616 }
617 }
618
619 for (i = 0; i < nn->max_r_vecs; i++) {
620 data = nfp_pr_et(data, "rxq_%u_pkts", i);
621 data = nfp_pr_et(data, "rxq_%u_bytes", i);
622 data = nfp_pr_et(data, "txq_%u_pkts", i);
623 data = nfp_pr_et(data, "txq_%u_bytes", i);
624 }
625
626 return data;
627 }
628
nfp_vnic_get_tlv_stats(struct nfp_net * nn,u64 * data)629 static u64 *nfp_vnic_get_tlv_stats(struct nfp_net *nn, u64 *data)
630 {
631 u8 __iomem *mem;
632 unsigned int i;
633
634 mem = nn->dp.ctrl_bar + nn->tlv_caps.vnic_stats_off;
635 mem += roundup(2 * nn->tlv_caps.vnic_stats_cnt, 8);
636 for (i = 0; i < nn->tlv_caps.vnic_stats_cnt; i++)
637 *data++ = readq(mem + i * 8);
638
639 mem = nn->dp.ctrl_bar;
640 for (i = 0; i < nn->max_r_vecs; i++) {
641 *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i));
642 *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8);
643 *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
644 *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
645 }
646
647 return data;
648 }
649
nfp_mac_get_stats_count(struct net_device * netdev)650 static unsigned int nfp_mac_get_stats_count(struct net_device *netdev)
651 {
652 struct nfp_port *port;
653
654 port = nfp_port_from_netdev(netdev);
655 if (!__nfp_port_get_eth_port(port) || !port->eth_stats)
656 return 0;
657
658 return ARRAY_SIZE(nfp_mac_et_stats);
659 }
660
nfp_mac_get_stats_strings(struct net_device * netdev,u8 * data)661 static u8 *nfp_mac_get_stats_strings(struct net_device *netdev, u8 *data)
662 {
663 struct nfp_port *port;
664 unsigned int i;
665
666 port = nfp_port_from_netdev(netdev);
667 if (!__nfp_port_get_eth_port(port) || !port->eth_stats)
668 return data;
669
670 for (i = 0; i < ARRAY_SIZE(nfp_mac_et_stats); i++)
671 data = nfp_pr_et(data, "mac.%s", nfp_mac_et_stats[i].name);
672
673 return data;
674 }
675
nfp_mac_get_stats(struct net_device * netdev,u64 * data)676 static u64 *nfp_mac_get_stats(struct net_device *netdev, u64 *data)
677 {
678 struct nfp_port *port;
679 unsigned int i;
680
681 port = nfp_port_from_netdev(netdev);
682 if (!__nfp_port_get_eth_port(port) || !port->eth_stats)
683 return data;
684
685 for (i = 0; i < ARRAY_SIZE(nfp_mac_et_stats); i++)
686 *data++ = readq(port->eth_stats + nfp_mac_et_stats[i].off);
687
688 return data;
689 }
690
nfp_net_get_strings(struct net_device * netdev,u32 stringset,u8 * data)691 static void nfp_net_get_strings(struct net_device *netdev,
692 u32 stringset, u8 *data)
693 {
694 struct nfp_net *nn = netdev_priv(netdev);
695
696 switch (stringset) {
697 case ETH_SS_STATS:
698 data = nfp_vnic_get_sw_stats_strings(netdev, data);
699 if (!nn->tlv_caps.vnic_stats_off)
700 data = nfp_vnic_get_hw_stats_strings(data,
701 nn->max_r_vecs,
702 false);
703 else
704 data = nfp_vnic_get_tlv_stats_strings(nn, data);
705 data = nfp_mac_get_stats_strings(netdev, data);
706 data = nfp_app_port_get_stats_strings(nn->port, data);
707 break;
708 }
709 }
710
711 static void
nfp_net_get_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)712 nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
713 u64 *data)
714 {
715 struct nfp_net *nn = netdev_priv(netdev);
716
717 data = nfp_vnic_get_sw_stats(netdev, data);
718 if (!nn->tlv_caps.vnic_stats_off)
719 data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar,
720 nn->max_r_vecs);
721 else
722 data = nfp_vnic_get_tlv_stats(nn, data);
723 data = nfp_mac_get_stats(netdev, data);
724 data = nfp_app_port_get_stats(nn->port, data);
725 }
726
nfp_net_get_sset_count(struct net_device * netdev,int sset)727 static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
728 {
729 struct nfp_net *nn = netdev_priv(netdev);
730 unsigned int cnt;
731
732 switch (sset) {
733 case ETH_SS_STATS:
734 cnt = nfp_vnic_get_sw_stats_count(netdev);
735 if (!nn->tlv_caps.vnic_stats_off)
736 cnt += nfp_vnic_get_hw_stats_count(nn->max_r_vecs);
737 else
738 cnt += nfp_vnic_get_tlv_stats_count(nn);
739 cnt += nfp_mac_get_stats_count(netdev);
740 cnt += nfp_app_port_get_stats_count(nn->port);
741 return cnt;
742 default:
743 return -EOPNOTSUPP;
744 }
745 }
746
nfp_port_get_strings(struct net_device * netdev,u32 stringset,u8 * data)747 static void nfp_port_get_strings(struct net_device *netdev,
748 u32 stringset, u8 *data)
749 {
750 struct nfp_port *port = nfp_port_from_netdev(netdev);
751
752 switch (stringset) {
753 case ETH_SS_STATS:
754 if (nfp_port_is_vnic(port))
755 data = nfp_vnic_get_hw_stats_strings(data, 0, true);
756 else
757 data = nfp_mac_get_stats_strings(netdev, data);
758 data = nfp_app_port_get_stats_strings(port, data);
759 break;
760 }
761 }
762
763 static void
nfp_port_get_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)764 nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
765 u64 *data)
766 {
767 struct nfp_port *port = nfp_port_from_netdev(netdev);
768
769 if (nfp_port_is_vnic(port))
770 data = nfp_vnic_get_hw_stats(data, port->vnic, 0);
771 else
772 data = nfp_mac_get_stats(netdev, data);
773 data = nfp_app_port_get_stats(port, data);
774 }
775
nfp_port_get_sset_count(struct net_device * netdev,int sset)776 static int nfp_port_get_sset_count(struct net_device *netdev, int sset)
777 {
778 struct nfp_port *port = nfp_port_from_netdev(netdev);
779 unsigned int count;
780
781 switch (sset) {
782 case ETH_SS_STATS:
783 if (nfp_port_is_vnic(port))
784 count = nfp_vnic_get_hw_stats_count(0);
785 else
786 count = nfp_mac_get_stats_count(netdev);
787 count += nfp_app_port_get_stats_count(port);
788 return count;
789 default:
790 return -EOPNOTSUPP;
791 }
792 }
793
nfp_port_fec_ethtool_to_nsp(u32 fec)794 static int nfp_port_fec_ethtool_to_nsp(u32 fec)
795 {
796 switch (fec) {
797 case ETHTOOL_FEC_AUTO:
798 return NFP_FEC_AUTO_BIT;
799 case ETHTOOL_FEC_OFF:
800 return NFP_FEC_DISABLED_BIT;
801 case ETHTOOL_FEC_RS:
802 return NFP_FEC_REED_SOLOMON_BIT;
803 case ETHTOOL_FEC_BASER:
804 return NFP_FEC_BASER_BIT;
805 default:
806 /* NSP only supports a single mode at a time */
807 return -EOPNOTSUPP;
808 }
809 }
810
nfp_port_fec_nsp_to_ethtool(u32 fec)811 static u32 nfp_port_fec_nsp_to_ethtool(u32 fec)
812 {
813 u32 result = 0;
814
815 if (fec & NFP_FEC_AUTO)
816 result |= ETHTOOL_FEC_AUTO;
817 if (fec & NFP_FEC_BASER)
818 result |= ETHTOOL_FEC_BASER;
819 if (fec & NFP_FEC_REED_SOLOMON)
820 result |= ETHTOOL_FEC_RS;
821 if (fec & NFP_FEC_DISABLED)
822 result |= ETHTOOL_FEC_OFF;
823
824 return result ?: ETHTOOL_FEC_NONE;
825 }
826
827 static int
nfp_port_get_fecparam(struct net_device * netdev,struct ethtool_fecparam * param)828 nfp_port_get_fecparam(struct net_device *netdev,
829 struct ethtool_fecparam *param)
830 {
831 struct nfp_eth_table_port *eth_port;
832 struct nfp_port *port;
833
834 param->active_fec = ETHTOOL_FEC_NONE;
835 param->fec = ETHTOOL_FEC_NONE;
836
837 port = nfp_port_from_netdev(netdev);
838 eth_port = nfp_port_get_eth_port(port);
839 if (!eth_port)
840 return -EOPNOTSUPP;
841
842 if (!nfp_eth_can_support_fec(eth_port))
843 return 0;
844
845 param->fec = nfp_port_fec_nsp_to_ethtool(eth_port->fec_modes_supported);
846 param->active_fec = nfp_port_fec_nsp_to_ethtool(eth_port->fec);
847
848 return 0;
849 }
850
851 static int
nfp_port_set_fecparam(struct net_device * netdev,struct ethtool_fecparam * param)852 nfp_port_set_fecparam(struct net_device *netdev,
853 struct ethtool_fecparam *param)
854 {
855 struct nfp_eth_table_port *eth_port;
856 struct nfp_port *port;
857 int err, fec;
858
859 port = nfp_port_from_netdev(netdev);
860 eth_port = nfp_port_get_eth_port(port);
861 if (!eth_port)
862 return -EOPNOTSUPP;
863
864 if (!nfp_eth_can_support_fec(eth_port))
865 return -EOPNOTSUPP;
866
867 fec = nfp_port_fec_ethtool_to_nsp(param->fec);
868 if (fec < 0)
869 return fec;
870
871 err = nfp_eth_set_fec(port->app->cpp, eth_port->index, fec);
872 if (!err)
873 /* Only refresh if we did something */
874 nfp_net_refresh_port_table(port);
875
876 return err < 0 ? err : 0;
877 }
878
879 /* RX network flow classification (RSS, filters, etc)
880 */
ethtool_flow_to_nfp_flag(u32 flow_type)881 static u32 ethtool_flow_to_nfp_flag(u32 flow_type)
882 {
883 static const u32 xlate_ethtool_to_nfp[IPV6_FLOW + 1] = {
884 [TCP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_TCP,
885 [TCP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_TCP,
886 [UDP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_UDP,
887 [UDP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_UDP,
888 [IPV4_FLOW] = NFP_NET_CFG_RSS_IPV4,
889 [IPV6_FLOW] = NFP_NET_CFG_RSS_IPV6,
890 };
891
892 if (flow_type >= ARRAY_SIZE(xlate_ethtool_to_nfp))
893 return 0;
894
895 return xlate_ethtool_to_nfp[flow_type];
896 }
897
nfp_net_get_rss_hash_opts(struct nfp_net * nn,struct ethtool_rxnfc * cmd)898 static int nfp_net_get_rss_hash_opts(struct nfp_net *nn,
899 struct ethtool_rxnfc *cmd)
900 {
901 u32 nfp_rss_flag;
902
903 cmd->data = 0;
904
905 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
906 return -EOPNOTSUPP;
907
908 nfp_rss_flag = ethtool_flow_to_nfp_flag(cmd->flow_type);
909 if (!nfp_rss_flag)
910 return -EINVAL;
911
912 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
913 if (nn->rss_cfg & nfp_rss_flag)
914 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
915
916 return 0;
917 }
918
nfp_net_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 * rule_locs)919 static int nfp_net_get_rxnfc(struct net_device *netdev,
920 struct ethtool_rxnfc *cmd, u32 *rule_locs)
921 {
922 struct nfp_net *nn = netdev_priv(netdev);
923
924 switch (cmd->cmd) {
925 case ETHTOOL_GRXRINGS:
926 cmd->data = nn->dp.num_rx_rings;
927 return 0;
928 case ETHTOOL_GRXFH:
929 return nfp_net_get_rss_hash_opts(nn, cmd);
930 default:
931 return -EOPNOTSUPP;
932 }
933 }
934
nfp_net_set_rss_hash_opt(struct nfp_net * nn,struct ethtool_rxnfc * nfc)935 static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
936 struct ethtool_rxnfc *nfc)
937 {
938 u32 new_rss_cfg = nn->rss_cfg;
939 u32 nfp_rss_flag;
940 int err;
941
942 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
943 return -EOPNOTSUPP;
944
945 /* RSS only supports IP SA/DA and L4 src/dst ports */
946 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
947 RXH_L4_B_0_1 | RXH_L4_B_2_3))
948 return -EINVAL;
949
950 /* We need at least the IP SA/DA fields for hashing */
951 if (!(nfc->data & RXH_IP_SRC) ||
952 !(nfc->data & RXH_IP_DST))
953 return -EINVAL;
954
955 nfp_rss_flag = ethtool_flow_to_nfp_flag(nfc->flow_type);
956 if (!nfp_rss_flag)
957 return -EINVAL;
958
959 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
960 case 0:
961 new_rss_cfg &= ~nfp_rss_flag;
962 break;
963 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
964 new_rss_cfg |= nfp_rss_flag;
965 break;
966 default:
967 return -EINVAL;
968 }
969
970 new_rss_cfg |= FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc);
971 new_rss_cfg |= NFP_NET_CFG_RSS_MASK;
972
973 if (new_rss_cfg == nn->rss_cfg)
974 return 0;
975
976 writel(new_rss_cfg, nn->dp.ctrl_bar + NFP_NET_CFG_RSS_CTRL);
977 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
978 if (err)
979 return err;
980
981 nn->rss_cfg = new_rss_cfg;
982
983 nn_dbg(nn, "Changed RSS config to 0x%x\n", nn->rss_cfg);
984 return 0;
985 }
986
nfp_net_set_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd)987 static int nfp_net_set_rxnfc(struct net_device *netdev,
988 struct ethtool_rxnfc *cmd)
989 {
990 struct nfp_net *nn = netdev_priv(netdev);
991
992 switch (cmd->cmd) {
993 case ETHTOOL_SRXFH:
994 return nfp_net_set_rss_hash_opt(nn, cmd);
995 default:
996 return -EOPNOTSUPP;
997 }
998 }
999
nfp_net_get_rxfh_indir_size(struct net_device * netdev)1000 static u32 nfp_net_get_rxfh_indir_size(struct net_device *netdev)
1001 {
1002 struct nfp_net *nn = netdev_priv(netdev);
1003
1004 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
1005 return 0;
1006
1007 return ARRAY_SIZE(nn->rss_itbl);
1008 }
1009
nfp_net_get_rxfh_key_size(struct net_device * netdev)1010 static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
1011 {
1012 struct nfp_net *nn = netdev_priv(netdev);
1013
1014 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
1015 return -EOPNOTSUPP;
1016
1017 return nfp_net_rss_key_sz(nn);
1018 }
1019
nfp_net_get_rxfh(struct net_device * netdev,u32 * indir,u8 * key,u8 * hfunc)1020 static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
1021 u8 *hfunc)
1022 {
1023 struct nfp_net *nn = netdev_priv(netdev);
1024 int i;
1025
1026 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
1027 return -EOPNOTSUPP;
1028
1029 if (indir)
1030 for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
1031 indir[i] = nn->rss_itbl[i];
1032 if (key)
1033 memcpy(key, nn->rss_key, nfp_net_rss_key_sz(nn));
1034 if (hfunc) {
1035 *hfunc = nn->rss_hfunc;
1036 if (*hfunc >= 1 << ETH_RSS_HASH_FUNCS_COUNT)
1037 *hfunc = ETH_RSS_HASH_UNKNOWN;
1038 }
1039
1040 return 0;
1041 }
1042
nfp_net_set_rxfh(struct net_device * netdev,const u32 * indir,const u8 * key,const u8 hfunc)1043 static int nfp_net_set_rxfh(struct net_device *netdev,
1044 const u32 *indir, const u8 *key,
1045 const u8 hfunc)
1046 {
1047 struct nfp_net *nn = netdev_priv(netdev);
1048 int i;
1049
1050 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) ||
1051 !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == nn->rss_hfunc))
1052 return -EOPNOTSUPP;
1053
1054 if (!key && !indir)
1055 return 0;
1056
1057 if (key) {
1058 memcpy(nn->rss_key, key, nfp_net_rss_key_sz(nn));
1059 nfp_net_rss_write_key(nn);
1060 }
1061 if (indir) {
1062 for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
1063 nn->rss_itbl[i] = indir[i];
1064
1065 nfp_net_rss_write_itbl(nn);
1066 }
1067
1068 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
1069 }
1070
1071 /* Dump BAR registers
1072 */
nfp_net_get_regs_len(struct net_device * netdev)1073 static int nfp_net_get_regs_len(struct net_device *netdev)
1074 {
1075 return NFP_NET_CFG_BAR_SZ;
1076 }
1077
nfp_net_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * p)1078 static void nfp_net_get_regs(struct net_device *netdev,
1079 struct ethtool_regs *regs, void *p)
1080 {
1081 struct nfp_net *nn = netdev_priv(netdev);
1082 u32 *regs_buf = p;
1083 int i;
1084
1085 regs->version = nn_readl(nn, NFP_NET_CFG_VERSION);
1086
1087 for (i = 0; i < NFP_NET_CFG_BAR_SZ / sizeof(u32); i++)
1088 regs_buf[i] = readl(nn->dp.ctrl_bar + (i * sizeof(u32)));
1089 }
1090
nfp_net_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec)1091 static int nfp_net_get_coalesce(struct net_device *netdev,
1092 struct ethtool_coalesce *ec)
1093 {
1094 struct nfp_net *nn = netdev_priv(netdev);
1095
1096 if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
1097 return -EINVAL;
1098
1099 ec->rx_coalesce_usecs = nn->rx_coalesce_usecs;
1100 ec->rx_max_coalesced_frames = nn->rx_coalesce_max_frames;
1101 ec->tx_coalesce_usecs = nn->tx_coalesce_usecs;
1102 ec->tx_max_coalesced_frames = nn->tx_coalesce_max_frames;
1103
1104 return 0;
1105 }
1106
1107 /* Other debug dumps
1108 */
1109 static int
nfp_dump_nsp_diag(struct nfp_app * app,struct ethtool_dump * dump,void * buffer)1110 nfp_dump_nsp_diag(struct nfp_app *app, struct ethtool_dump *dump, void *buffer)
1111 {
1112 struct nfp_resource *res;
1113 int ret;
1114
1115 if (!app)
1116 return -EOPNOTSUPP;
1117
1118 dump->version = 1;
1119 dump->flag = NFP_DUMP_NSP_DIAG;
1120
1121 res = nfp_resource_acquire(app->cpp, NFP_RESOURCE_NSP_DIAG);
1122 if (IS_ERR(res))
1123 return PTR_ERR(res);
1124
1125 if (buffer) {
1126 if (dump->len != nfp_resource_size(res)) {
1127 ret = -EINVAL;
1128 goto exit_release;
1129 }
1130
1131 ret = nfp_cpp_read(app->cpp, nfp_resource_cpp_id(res),
1132 nfp_resource_address(res),
1133 buffer, dump->len);
1134 if (ret != dump->len)
1135 ret = ret < 0 ? ret : -EIO;
1136 else
1137 ret = 0;
1138 } else {
1139 dump->len = nfp_resource_size(res);
1140 ret = 0;
1141 }
1142 exit_release:
1143 nfp_resource_release(res);
1144
1145 return ret;
1146 }
1147
1148 /* Set the dump flag/level. Calculate the dump length for flag > 0 only (new TLV
1149 * based dumps), since flag 0 (default) calculates the length in
1150 * nfp_app_get_dump_flag(), and we need to support triggering a level 0 dump
1151 * without setting the flag first, for backward compatibility.
1152 */
nfp_app_set_dump(struct net_device * netdev,struct ethtool_dump * val)1153 static int nfp_app_set_dump(struct net_device *netdev, struct ethtool_dump *val)
1154 {
1155 struct nfp_app *app = nfp_app_from_netdev(netdev);
1156 s64 len;
1157
1158 if (!app)
1159 return -EOPNOTSUPP;
1160
1161 if (val->flag == NFP_DUMP_NSP_DIAG) {
1162 app->pf->dump_flag = val->flag;
1163 return 0;
1164 }
1165
1166 if (!app->pf->dumpspec)
1167 return -EOPNOTSUPP;
1168
1169 len = nfp_net_dump_calculate_size(app->pf, app->pf->dumpspec,
1170 val->flag);
1171 if (len < 0)
1172 return len;
1173
1174 app->pf->dump_flag = val->flag;
1175 app->pf->dump_len = len;
1176
1177 return 0;
1178 }
1179
1180 static int
nfp_app_get_dump_flag(struct net_device * netdev,struct ethtool_dump * dump)1181 nfp_app_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
1182 {
1183 struct nfp_app *app = nfp_app_from_netdev(netdev);
1184
1185 if (!app)
1186 return -EOPNOTSUPP;
1187
1188 if (app->pf->dump_flag == NFP_DUMP_NSP_DIAG)
1189 return nfp_dump_nsp_diag(app, dump, NULL);
1190
1191 dump->flag = app->pf->dump_flag;
1192 dump->len = app->pf->dump_len;
1193
1194 return 0;
1195 }
1196
1197 static int
nfp_app_get_dump_data(struct net_device * netdev,struct ethtool_dump * dump,void * buffer)1198 nfp_app_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
1199 void *buffer)
1200 {
1201 struct nfp_app *app = nfp_app_from_netdev(netdev);
1202
1203 if (!app)
1204 return -EOPNOTSUPP;
1205
1206 if (app->pf->dump_flag == NFP_DUMP_NSP_DIAG)
1207 return nfp_dump_nsp_diag(app, dump, buffer);
1208
1209 dump->flag = app->pf->dump_flag;
1210 dump->len = app->pf->dump_len;
1211
1212 return nfp_net_dump_populate_buffer(app->pf, app->pf->dumpspec, dump,
1213 buffer);
1214 }
1215
1216 static int
nfp_port_get_module_info(struct net_device * netdev,struct ethtool_modinfo * modinfo)1217 nfp_port_get_module_info(struct net_device *netdev,
1218 struct ethtool_modinfo *modinfo)
1219 {
1220 struct nfp_eth_table_port *eth_port;
1221 struct nfp_port *port;
1222 unsigned int read_len;
1223 struct nfp_nsp *nsp;
1224 int err = 0;
1225 u8 data;
1226
1227 port = nfp_port_from_netdev(netdev);
1228 if (!port)
1229 return -EOPNOTSUPP;
1230
1231 /* update port state to get latest interface */
1232 set_bit(NFP_PORT_CHANGED, &port->flags);
1233 eth_port = nfp_port_get_eth_port(port);
1234 if (!eth_port)
1235 return -EOPNOTSUPP;
1236
1237 nsp = nfp_nsp_open(port->app->cpp);
1238 if (IS_ERR(nsp)) {
1239 err = PTR_ERR(nsp);
1240 netdev_err(netdev, "Failed to access the NSP: %d\n", err);
1241 return err;
1242 }
1243
1244 if (!nfp_nsp_has_read_module_eeprom(nsp)) {
1245 netdev_info(netdev, "reading module EEPROM not supported. Please update flash\n");
1246 err = -EOPNOTSUPP;
1247 goto exit_close_nsp;
1248 }
1249
1250 switch (eth_port->interface) {
1251 case NFP_INTERFACE_SFP:
1252 case NFP_INTERFACE_SFP28:
1253 err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
1254 SFP_SFF8472_COMPLIANCE, &data,
1255 1, &read_len);
1256 if (err < 0)
1257 goto exit_close_nsp;
1258
1259 if (!data) {
1260 modinfo->type = ETH_MODULE_SFF_8079;
1261 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
1262 } else {
1263 modinfo->type = ETH_MODULE_SFF_8472;
1264 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
1265 }
1266 break;
1267 case NFP_INTERFACE_QSFP:
1268 err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
1269 SFP_SFF_REV_COMPLIANCE, &data,
1270 1, &read_len);
1271 if (err < 0)
1272 goto exit_close_nsp;
1273
1274 if (data < 0x3) {
1275 modinfo->type = ETH_MODULE_SFF_8436;
1276 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
1277 } else {
1278 modinfo->type = ETH_MODULE_SFF_8636;
1279 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
1280 }
1281 break;
1282 case NFP_INTERFACE_QSFP28:
1283 modinfo->type = ETH_MODULE_SFF_8636;
1284 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
1285 break;
1286 default:
1287 netdev_err(netdev, "Unsupported module 0x%x detected\n",
1288 eth_port->interface);
1289 err = -EINVAL;
1290 }
1291
1292 exit_close_nsp:
1293 nfp_nsp_close(nsp);
1294 return err;
1295 }
1296
1297 static int
nfp_port_get_module_eeprom(struct net_device * netdev,struct ethtool_eeprom * eeprom,u8 * data)1298 nfp_port_get_module_eeprom(struct net_device *netdev,
1299 struct ethtool_eeprom *eeprom, u8 *data)
1300 {
1301 struct nfp_eth_table_port *eth_port;
1302 struct nfp_port *port;
1303 struct nfp_nsp *nsp;
1304 int err;
1305
1306 port = nfp_port_from_netdev(netdev);
1307 eth_port = __nfp_port_get_eth_port(port);
1308 if (!eth_port)
1309 return -EOPNOTSUPP;
1310
1311 nsp = nfp_nsp_open(port->app->cpp);
1312 if (IS_ERR(nsp)) {
1313 err = PTR_ERR(nsp);
1314 netdev_err(netdev, "Failed to access the NSP: %d\n", err);
1315 return err;
1316 }
1317
1318 if (!nfp_nsp_has_read_module_eeprom(nsp)) {
1319 netdev_info(netdev, "reading module EEPROM not supported. Please update flash\n");
1320 err = -EOPNOTSUPP;
1321 goto exit_close_nsp;
1322 }
1323
1324 err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
1325 eeprom->offset, data, eeprom->len,
1326 &eeprom->len);
1327 if (err < 0) {
1328 if (eeprom->len) {
1329 netdev_warn(netdev,
1330 "Incomplete read from module EEPROM: %d\n",
1331 err);
1332 err = 0;
1333 } else {
1334 netdev_err(netdev,
1335 "Reading from module EEPROM failed: %d\n",
1336 err);
1337 }
1338 }
1339
1340 exit_close_nsp:
1341 nfp_nsp_close(nsp);
1342 return err;
1343 }
1344
nfp_net_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec)1345 static int nfp_net_set_coalesce(struct net_device *netdev,
1346 struct ethtool_coalesce *ec)
1347 {
1348 struct nfp_net *nn = netdev_priv(netdev);
1349 unsigned int factor;
1350
1351 /* Compute factor used to convert coalesce '_usecs' parameters to
1352 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
1353 * count.
1354 */
1355 factor = nn->tlv_caps.me_freq_mhz / 16;
1356
1357 /* Each pair of (usecs, max_frames) fields specifies that interrupts
1358 * should be coalesced until
1359 * (usecs > 0 && time_since_first_completion >= usecs) ||
1360 * (max_frames > 0 && completed_frames >= max_frames)
1361 *
1362 * It is illegal to set both usecs and max_frames to zero as this would
1363 * cause interrupts to never be generated. To disable coalescing, set
1364 * usecs = 0 and max_frames = 1.
1365 *
1366 * Some implementations ignore the value of max_frames and use the
1367 * condition time_since_first_completion >= usecs
1368 */
1369
1370 if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
1371 return -EINVAL;
1372
1373 /* ensure valid configuration */
1374 if (!ec->rx_coalesce_usecs && !ec->rx_max_coalesced_frames)
1375 return -EINVAL;
1376
1377 if (!ec->tx_coalesce_usecs && !ec->tx_max_coalesced_frames)
1378 return -EINVAL;
1379
1380 if (ec->rx_coalesce_usecs * factor >= ((1 << 16) - 1))
1381 return -EINVAL;
1382
1383 if (ec->tx_coalesce_usecs * factor >= ((1 << 16) - 1))
1384 return -EINVAL;
1385
1386 if (ec->rx_max_coalesced_frames >= ((1 << 16) - 1))
1387 return -EINVAL;
1388
1389 if (ec->tx_max_coalesced_frames >= ((1 << 16) - 1))
1390 return -EINVAL;
1391
1392 /* configuration is valid */
1393 nn->rx_coalesce_usecs = ec->rx_coalesce_usecs;
1394 nn->rx_coalesce_max_frames = ec->rx_max_coalesced_frames;
1395 nn->tx_coalesce_usecs = ec->tx_coalesce_usecs;
1396 nn->tx_coalesce_max_frames = ec->tx_max_coalesced_frames;
1397
1398 /* write configuration to device */
1399 nfp_net_coalesce_write_cfg(nn);
1400 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
1401 }
1402
nfp_net_get_channels(struct net_device * netdev,struct ethtool_channels * channel)1403 static void nfp_net_get_channels(struct net_device *netdev,
1404 struct ethtool_channels *channel)
1405 {
1406 struct nfp_net *nn = netdev_priv(netdev);
1407 unsigned int num_tx_rings;
1408
1409 num_tx_rings = nn->dp.num_tx_rings;
1410 if (nn->dp.xdp_prog)
1411 num_tx_rings -= nn->dp.num_rx_rings;
1412
1413 channel->max_rx = min(nn->max_rx_rings, nn->max_r_vecs);
1414 channel->max_tx = min(nn->max_tx_rings, nn->max_r_vecs);
1415 channel->max_combined = min(channel->max_rx, channel->max_tx);
1416 channel->max_other = NFP_NET_NON_Q_VECTORS;
1417 channel->combined_count = min(nn->dp.num_rx_rings, num_tx_rings);
1418 channel->rx_count = nn->dp.num_rx_rings - channel->combined_count;
1419 channel->tx_count = num_tx_rings - channel->combined_count;
1420 channel->other_count = NFP_NET_NON_Q_VECTORS;
1421 }
1422
nfp_net_set_num_rings(struct nfp_net * nn,unsigned int total_rx,unsigned int total_tx)1423 static int nfp_net_set_num_rings(struct nfp_net *nn, unsigned int total_rx,
1424 unsigned int total_tx)
1425 {
1426 struct nfp_net_dp *dp;
1427
1428 dp = nfp_net_clone_dp(nn);
1429 if (!dp)
1430 return -ENOMEM;
1431
1432 dp->num_rx_rings = total_rx;
1433 dp->num_tx_rings = total_tx;
1434 /* nfp_net_check_config() will catch num_tx_rings > nn->max_tx_rings */
1435 if (dp->xdp_prog)
1436 dp->num_tx_rings += total_rx;
1437
1438 return nfp_net_ring_reconfig(nn, dp, NULL);
1439 }
1440
nfp_net_set_channels(struct net_device * netdev,struct ethtool_channels * channel)1441 static int nfp_net_set_channels(struct net_device *netdev,
1442 struct ethtool_channels *channel)
1443 {
1444 struct nfp_net *nn = netdev_priv(netdev);
1445 unsigned int total_rx, total_tx;
1446
1447 /* Reject unsupported */
1448 if (channel->other_count != NFP_NET_NON_Q_VECTORS ||
1449 (channel->rx_count && channel->tx_count))
1450 return -EINVAL;
1451
1452 total_rx = channel->combined_count + channel->rx_count;
1453 total_tx = channel->combined_count + channel->tx_count;
1454
1455 if (total_rx > min(nn->max_rx_rings, nn->max_r_vecs) ||
1456 total_tx > min(nn->max_tx_rings, nn->max_r_vecs))
1457 return -EINVAL;
1458
1459 return nfp_net_set_num_rings(nn, total_rx, total_tx);
1460 }
1461
1462 static const struct ethtool_ops nfp_net_ethtool_ops = {
1463 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1464 ETHTOOL_COALESCE_MAX_FRAMES,
1465 .get_drvinfo = nfp_net_get_drvinfo,
1466 .get_link = ethtool_op_get_link,
1467 .get_ringparam = nfp_net_get_ringparam,
1468 .set_ringparam = nfp_net_set_ringparam,
1469 .get_strings = nfp_net_get_strings,
1470 .get_ethtool_stats = nfp_net_get_stats,
1471 .get_sset_count = nfp_net_get_sset_count,
1472 .get_rxnfc = nfp_net_get_rxnfc,
1473 .set_rxnfc = nfp_net_set_rxnfc,
1474 .get_rxfh_indir_size = nfp_net_get_rxfh_indir_size,
1475 .get_rxfh_key_size = nfp_net_get_rxfh_key_size,
1476 .get_rxfh = nfp_net_get_rxfh,
1477 .set_rxfh = nfp_net_set_rxfh,
1478 .get_regs_len = nfp_net_get_regs_len,
1479 .get_regs = nfp_net_get_regs,
1480 .set_dump = nfp_app_set_dump,
1481 .get_dump_flag = nfp_app_get_dump_flag,
1482 .get_dump_data = nfp_app_get_dump_data,
1483 .get_module_info = nfp_port_get_module_info,
1484 .get_module_eeprom = nfp_port_get_module_eeprom,
1485 .get_coalesce = nfp_net_get_coalesce,
1486 .set_coalesce = nfp_net_set_coalesce,
1487 .get_channels = nfp_net_get_channels,
1488 .set_channels = nfp_net_set_channels,
1489 .get_link_ksettings = nfp_net_get_link_ksettings,
1490 .set_link_ksettings = nfp_net_set_link_ksettings,
1491 .get_fecparam = nfp_port_get_fecparam,
1492 .set_fecparam = nfp_port_set_fecparam,
1493 };
1494
1495 const struct ethtool_ops nfp_port_ethtool_ops = {
1496 .get_drvinfo = nfp_app_get_drvinfo,
1497 .get_link = ethtool_op_get_link,
1498 .get_strings = nfp_port_get_strings,
1499 .get_ethtool_stats = nfp_port_get_stats,
1500 .get_sset_count = nfp_port_get_sset_count,
1501 .set_dump = nfp_app_set_dump,
1502 .get_dump_flag = nfp_app_get_dump_flag,
1503 .get_dump_data = nfp_app_get_dump_data,
1504 .get_module_info = nfp_port_get_module_info,
1505 .get_module_eeprom = nfp_port_get_module_eeprom,
1506 .get_link_ksettings = nfp_net_get_link_ksettings,
1507 .set_link_ksettings = nfp_net_set_link_ksettings,
1508 .get_fecparam = nfp_port_get_fecparam,
1509 .set_fecparam = nfp_port_set_fecparam,
1510 };
1511
nfp_net_set_ethtool_ops(struct net_device * netdev)1512 void nfp_net_set_ethtool_ops(struct net_device *netdev)
1513 {
1514 netdev->ethtool_ops = &nfp_net_ethtool_ops;
1515 }
1516