• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015-2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 /*
35  * nfp_net_ethtool.c
36  * Netronome network device driver: ethtool support
37  * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38  *          Jason McMullan <jason.mcmullan@netronome.com>
39  *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
40  *          Brad Petrus <brad.petrus@netronome.com>
41  */
42 
43 #include <linux/bitfield.h>
44 #include <linux/kernel.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/interrupt.h>
48 #include <linux/pci.h>
49 #include <linux/ethtool.h>
50 
51 #include "nfpcore/nfp.h"
52 #include "nfpcore/nfp_nsp.h"
53 #include "nfp_app.h"
54 #include "nfp_net_ctrl.h"
55 #include "nfp_net.h"
56 #include "nfp_port.h"
57 
58 enum nfp_dump_diag {
59 	NFP_DUMP_NSP_DIAG = 0,
60 };
61 
62 struct nfp_et_stat {
63 	char name[ETH_GSTRING_LEN];
64 	int off;
65 };
66 
67 static const struct nfp_et_stat nfp_net_et_stats[] = {
68 	/* Stats from the device */
69 	{ "dev_rx_discards",	NFP_NET_CFG_STATS_RX_DISCARDS },
70 	{ "dev_rx_errors",	NFP_NET_CFG_STATS_RX_ERRORS },
71 	{ "dev_rx_bytes",	NFP_NET_CFG_STATS_RX_OCTETS },
72 	{ "dev_rx_uc_bytes",	NFP_NET_CFG_STATS_RX_UC_OCTETS },
73 	{ "dev_rx_mc_bytes",	NFP_NET_CFG_STATS_RX_MC_OCTETS },
74 	{ "dev_rx_bc_bytes",	NFP_NET_CFG_STATS_RX_BC_OCTETS },
75 	{ "dev_rx_pkts",	NFP_NET_CFG_STATS_RX_FRAMES },
76 	{ "dev_rx_mc_pkts",	NFP_NET_CFG_STATS_RX_MC_FRAMES },
77 	{ "dev_rx_bc_pkts",	NFP_NET_CFG_STATS_RX_BC_FRAMES },
78 
79 	{ "dev_tx_discards",	NFP_NET_CFG_STATS_TX_DISCARDS },
80 	{ "dev_tx_errors",	NFP_NET_CFG_STATS_TX_ERRORS },
81 	{ "dev_tx_bytes",	NFP_NET_CFG_STATS_TX_OCTETS },
82 	{ "dev_tx_uc_bytes",	NFP_NET_CFG_STATS_TX_UC_OCTETS },
83 	{ "dev_tx_mc_bytes",	NFP_NET_CFG_STATS_TX_MC_OCTETS },
84 	{ "dev_tx_bc_bytes",	NFP_NET_CFG_STATS_TX_BC_OCTETS },
85 	{ "dev_tx_pkts",	NFP_NET_CFG_STATS_TX_FRAMES },
86 	{ "dev_tx_mc_pkts",	NFP_NET_CFG_STATS_TX_MC_FRAMES },
87 	{ "dev_tx_bc_pkts",	NFP_NET_CFG_STATS_TX_BC_FRAMES },
88 
89 	{ "bpf_pass_pkts",	NFP_NET_CFG_STATS_APP0_FRAMES },
90 	{ "bpf_pass_bytes",	NFP_NET_CFG_STATS_APP0_BYTES },
91 	/* see comments in outro functions in nfp_bpf_jit.c to find out
92 	 * how different BPF modes use app-specific counters
93 	 */
94 	{ "bpf_app1_pkts",	NFP_NET_CFG_STATS_APP1_FRAMES },
95 	{ "bpf_app1_bytes",	NFP_NET_CFG_STATS_APP1_BYTES },
96 	{ "bpf_app2_pkts",	NFP_NET_CFG_STATS_APP2_FRAMES },
97 	{ "bpf_app2_bytes",	NFP_NET_CFG_STATS_APP2_BYTES },
98 	{ "bpf_app3_pkts",	NFP_NET_CFG_STATS_APP3_FRAMES },
99 	{ "bpf_app3_bytes",	NFP_NET_CFG_STATS_APP3_BYTES },
100 };
101 
102 static const struct nfp_et_stat nfp_mac_et_stats[] = {
103 	{ "rx_octets",			NFP_MAC_STATS_RX_IN_OCTETS, },
104 	{ "rx_frame_too_long_errors",
105 			NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS, },
106 	{ "rx_range_length_errors",	NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS, },
107 	{ "rx_vlan_reveive_ok",		NFP_MAC_STATS_RX_VLAN_REVEIVE_OK, },
108 	{ "rx_errors",			NFP_MAC_STATS_RX_IN_ERRORS, },
109 	{ "rx_broadcast_pkts",		NFP_MAC_STATS_RX_IN_BROADCAST_PKTS, },
110 	{ "rx_drop_events",		NFP_MAC_STATS_RX_DROP_EVENTS, },
111 	{ "rx_alignment_errors",	NFP_MAC_STATS_RX_ALIGNMENT_ERRORS, },
112 	{ "rx_pause_mac_ctrl_frames",
113 			NFP_MAC_STATS_RX_PAUSE_MAC_CTRL_FRAMES, },
114 	{ "rx_frames_received_ok",	NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK, },
115 	{ "rx_frame_check_sequence_errors",
116 			NFP_MAC_STATS_RX_FRAME_CHECK_SEQUENCE_ERRORS, },
117 	{ "rx_unicast_pkts",		NFP_MAC_STATS_RX_UNICAST_PKTS, },
118 	{ "rx_multicast_pkts",		NFP_MAC_STATS_RX_MULTICAST_PKTS, },
119 	{ "rx_pkts",			NFP_MAC_STATS_RX_PKTS, },
120 	{ "rx_undersize_pkts",		NFP_MAC_STATS_RX_UNDERSIZE_PKTS, },
121 	{ "rx_pkts_64_octets",		NFP_MAC_STATS_RX_PKTS_64_OCTETS, },
122 	{ "rx_pkts_65_to_127_octets",
123 			NFP_MAC_STATS_RX_PKTS_65_TO_127_OCTETS, },
124 	{ "rx_pkts_128_to_255_octets",
125 			NFP_MAC_STATS_RX_PKTS_128_TO_255_OCTETS, },
126 	{ "rx_pkts_256_to_511_octets",
127 			NFP_MAC_STATS_RX_PKTS_256_TO_511_OCTETS, },
128 	{ "rx_pkts_512_to_1023_octets",
129 			NFP_MAC_STATS_RX_PKTS_512_TO_1023_OCTETS, },
130 	{ "rx_pkts_1024_to_1518_octets",
131 			NFP_MAC_STATS_RX_PKTS_1024_TO_1518_OCTETS, },
132 	{ "rx_pkts_1519_to_max_octets",
133 			NFP_MAC_STATS_RX_PKTS_1519_TO_MAX_OCTETS, },
134 	{ "rx_jabbers",			NFP_MAC_STATS_RX_JABBERS, },
135 	{ "rx_fragments",		NFP_MAC_STATS_RX_FRAGMENTS, },
136 	{ "rx_oversize_pkts",		NFP_MAC_STATS_RX_OVERSIZE_PKTS, },
137 	{ "rx_pause_frames_class0",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS0, },
138 	{ "rx_pause_frames_class1",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS1, },
139 	{ "rx_pause_frames_class2",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS2, },
140 	{ "rx_pause_frames_class3",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS3, },
141 	{ "rx_pause_frames_class4",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS4, },
142 	{ "rx_pause_frames_class5",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS5, },
143 	{ "rx_pause_frames_class6",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS6, },
144 	{ "rx_pause_frames_class7",	NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS7, },
145 	{ "rx_mac_ctrl_frames_received",
146 			NFP_MAC_STATS_RX_MAC_CTRL_FRAMES_RECEIVED, },
147 	{ "rx_mac_head_drop",		NFP_MAC_STATS_RX_MAC_HEAD_DROP, },
148 	{ "tx_queue_drop",		NFP_MAC_STATS_TX_QUEUE_DROP, },
149 	{ "tx_octets",			NFP_MAC_STATS_TX_OUT_OCTETS, },
150 	{ "tx_vlan_transmitted_ok",	NFP_MAC_STATS_TX_VLAN_TRANSMITTED_OK, },
151 	{ "tx_errors",			NFP_MAC_STATS_TX_OUT_ERRORS, },
152 	{ "tx_broadcast_pkts",		NFP_MAC_STATS_TX_BROADCAST_PKTS, },
153 	{ "tx_pause_mac_ctrl_frames",
154 			NFP_MAC_STATS_TX_PAUSE_MAC_CTRL_FRAMES, },
155 	{ "tx_frames_transmitted_ok",
156 			NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK, },
157 	{ "tx_unicast_pkts",		NFP_MAC_STATS_TX_UNICAST_PKTS, },
158 	{ "tx_multicast_pkts",		NFP_MAC_STATS_TX_MULTICAST_PKTS, },
159 	{ "tx_pkts_64_octets",		NFP_MAC_STATS_TX_PKTS_64_OCTETS, },
160 	{ "tx_pkts_65_to_127_octets",
161 			NFP_MAC_STATS_TX_PKTS_65_TO_127_OCTETS, },
162 	{ "tx_pkts_128_to_255_octets",
163 			NFP_MAC_STATS_TX_PKTS_128_TO_255_OCTETS, },
164 	{ "tx_pkts_256_to_511_octets",
165 			NFP_MAC_STATS_TX_PKTS_256_TO_511_OCTETS, },
166 	{ "tx_pkts_512_to_1023_octets",
167 			NFP_MAC_STATS_TX_PKTS_512_TO_1023_OCTETS, },
168 	{ "tx_pkts_1024_to_1518_octets",
169 			NFP_MAC_STATS_TX_PKTS_1024_TO_1518_OCTETS, },
170 	{ "tx_pkts_1519_to_max_octets",
171 			NFP_MAC_STATS_TX_PKTS_1519_TO_MAX_OCTETS, },
172 	{ "tx_pause_frames_class0",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS0, },
173 	{ "tx_pause_frames_class1",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS1, },
174 	{ "tx_pause_frames_class2",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS2, },
175 	{ "tx_pause_frames_class3",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS3, },
176 	{ "tx_pause_frames_class4",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS4, },
177 	{ "tx_pause_frames_class5",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS5, },
178 	{ "tx_pause_frames_class6",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS6, },
179 	{ "tx_pause_frames_class7",	NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7, },
180 };
181 
182 #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
183 #define NN_ET_SWITCH_STATS_LEN 9
184 #define NN_ET_RVEC_GATHER_STATS 7
185 
nfp_net_get_nspinfo(struct nfp_app * app,char * version)186 static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
187 {
188 	struct nfp_nsp *nsp;
189 
190 	if (!app)
191 		return;
192 
193 	nsp = nfp_nsp_open(app->cpp);
194 	if (IS_ERR(nsp))
195 		return;
196 
197 	snprintf(version, ETHTOOL_FWVERS_LEN, "%hu.%hu",
198 		 nfp_nsp_get_abi_ver_major(nsp),
199 		 nfp_nsp_get_abi_ver_minor(nsp));
200 
201 	nfp_nsp_close(nsp);
202 }
203 
204 static void
nfp_get_drvinfo(struct nfp_app * app,struct pci_dev * pdev,const char * vnic_version,struct ethtool_drvinfo * drvinfo)205 nfp_get_drvinfo(struct nfp_app *app, struct pci_dev *pdev,
206 		const char *vnic_version, struct ethtool_drvinfo *drvinfo)
207 {
208 	char nsp_version[ETHTOOL_FWVERS_LEN] = {};
209 
210 	strlcpy(drvinfo->driver, pdev->driver->name, sizeof(drvinfo->driver));
211 	strlcpy(drvinfo->version, nfp_driver_version, sizeof(drvinfo->version));
212 
213 	nfp_net_get_nspinfo(app, nsp_version);
214 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
215 		 "%s %s %s %s", vnic_version, nsp_version,
216 		 nfp_app_mip_name(app), nfp_app_name(app));
217 }
218 
219 static void
nfp_net_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)220 nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
221 {
222 	char vnic_version[ETHTOOL_FWVERS_LEN] = {};
223 	struct nfp_net *nn = netdev_priv(netdev);
224 
225 	snprintf(vnic_version, sizeof(vnic_version), "%d.%d.%d.%d",
226 		 nn->fw_ver.resv, nn->fw_ver.class,
227 		 nn->fw_ver.major, nn->fw_ver.minor);
228 	strlcpy(drvinfo->bus_info, pci_name(nn->pdev),
229 		sizeof(drvinfo->bus_info));
230 
231 	nfp_get_drvinfo(nn->app, nn->pdev, vnic_version, drvinfo);
232 }
233 
234 static void
nfp_app_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)235 nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
236 {
237 	struct nfp_app *app;
238 
239 	app = nfp_app_from_netdev(netdev);
240 	if (!app)
241 		return;
242 
243 	nfp_get_drvinfo(app, app->pdev, "*", drvinfo);
244 }
245 
246 /**
247  * nfp_net_get_link_ksettings - Get Link Speed settings
248  * @netdev:	network interface device structure
249  * @cmd:	ethtool command
250  *
251  * Reports speed settings based on info in the BAR provided by the fw.
252  */
253 static int
nfp_net_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)254 nfp_net_get_link_ksettings(struct net_device *netdev,
255 			   struct ethtool_link_ksettings *cmd)
256 {
257 	static const u32 ls_to_ethtool[] = {
258 		[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED]	= 0,
259 		[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]	= SPEED_UNKNOWN,
260 		[NFP_NET_CFG_STS_LINK_RATE_1G]		= SPEED_1000,
261 		[NFP_NET_CFG_STS_LINK_RATE_10G]		= SPEED_10000,
262 		[NFP_NET_CFG_STS_LINK_RATE_25G]		= SPEED_25000,
263 		[NFP_NET_CFG_STS_LINK_RATE_40G]		= SPEED_40000,
264 		[NFP_NET_CFG_STS_LINK_RATE_50G]		= SPEED_50000,
265 		[NFP_NET_CFG_STS_LINK_RATE_100G]	= SPEED_100000,
266 	};
267 	struct nfp_eth_table_port *eth_port;
268 	struct nfp_port *port;
269 	struct nfp_net *nn;
270 	u32 sts, ls;
271 
272 	/* Init to unknowns */
273 	ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
274 	cmd->base.port = PORT_OTHER;
275 	cmd->base.speed = SPEED_UNKNOWN;
276 	cmd->base.duplex = DUPLEX_UNKNOWN;
277 
278 	port = nfp_port_from_netdev(netdev);
279 	eth_port = nfp_port_get_eth_port(port);
280 	if (eth_port)
281 		cmd->base.autoneg = eth_port->aneg != NFP_ANEG_DISABLED ?
282 			AUTONEG_ENABLE : AUTONEG_DISABLE;
283 
284 	if (!netif_carrier_ok(netdev))
285 		return 0;
286 
287 	/* Use link speed from ETH table if available, otherwise try the BAR */
288 	if (eth_port) {
289 		cmd->base.port = eth_port->port_type;
290 		cmd->base.speed = eth_port->speed;
291 		cmd->base.duplex = DUPLEX_FULL;
292 		return 0;
293 	}
294 
295 	if (!nfp_netdev_is_nfp_net(netdev))
296 		return -EOPNOTSUPP;
297 	nn = netdev_priv(netdev);
298 
299 	sts = nn_readl(nn, NFP_NET_CFG_STS);
300 
301 	ls = FIELD_GET(NFP_NET_CFG_STS_LINK_RATE, sts);
302 	if (ls == NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED)
303 		return -EOPNOTSUPP;
304 
305 	if (ls == NFP_NET_CFG_STS_LINK_RATE_UNKNOWN ||
306 	    ls >= ARRAY_SIZE(ls_to_ethtool))
307 		return 0;
308 
309 	cmd->base.speed = ls_to_ethtool[ls];
310 	cmd->base.duplex = DUPLEX_FULL;
311 
312 	return 0;
313 }
314 
315 static int
nfp_net_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * cmd)316 nfp_net_set_link_ksettings(struct net_device *netdev,
317 			   const struct ethtool_link_ksettings *cmd)
318 {
319 	struct nfp_eth_table_port *eth_port;
320 	struct nfp_port *port;
321 	struct nfp_nsp *nsp;
322 	int err;
323 
324 	port = nfp_port_from_netdev(netdev);
325 	eth_port = __nfp_port_get_eth_port(port);
326 	if (!eth_port)
327 		return -EOPNOTSUPP;
328 
329 	if (netif_running(netdev)) {
330 		netdev_warn(netdev, "Changing settings not allowed on an active interface. It may cause the port to be disabled until reboot.\n");
331 		return -EBUSY;
332 	}
333 
334 	nsp = nfp_eth_config_start(port->app->cpp, eth_port->index);
335 	if (IS_ERR(nsp))
336 		return PTR_ERR(nsp);
337 
338 	err = __nfp_eth_set_aneg(nsp, cmd->base.autoneg == AUTONEG_ENABLE ?
339 				 NFP_ANEG_AUTO : NFP_ANEG_DISABLED);
340 	if (err)
341 		goto err_bad_set;
342 	if (cmd->base.speed != SPEED_UNKNOWN) {
343 		u32 speed = cmd->base.speed / eth_port->lanes;
344 
345 		err = __nfp_eth_set_speed(nsp, speed);
346 		if (err)
347 			goto err_bad_set;
348 	}
349 
350 	err = nfp_eth_config_commit_end(nsp);
351 	if (err > 0)
352 		return 0; /* no change */
353 
354 	nfp_net_refresh_port_table(port);
355 
356 	return err;
357 
358 err_bad_set:
359 	nfp_eth_config_cleanup_end(nsp);
360 	return err;
361 }
362 
nfp_net_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring)363 static void nfp_net_get_ringparam(struct net_device *netdev,
364 				  struct ethtool_ringparam *ring)
365 {
366 	struct nfp_net *nn = netdev_priv(netdev);
367 
368 	ring->rx_max_pending = NFP_NET_MAX_RX_DESCS;
369 	ring->tx_max_pending = NFP_NET_MAX_TX_DESCS;
370 	ring->rx_pending = nn->dp.rxd_cnt;
371 	ring->tx_pending = nn->dp.txd_cnt;
372 }
373 
nfp_net_set_ring_size(struct nfp_net * nn,u32 rxd_cnt,u32 txd_cnt)374 static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
375 {
376 	struct nfp_net_dp *dp;
377 
378 	dp = nfp_net_clone_dp(nn);
379 	if (!dp)
380 		return -ENOMEM;
381 
382 	dp->rxd_cnt = rxd_cnt;
383 	dp->txd_cnt = txd_cnt;
384 
385 	return nfp_net_ring_reconfig(nn, dp, NULL);
386 }
387 
nfp_net_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring)388 static int nfp_net_set_ringparam(struct net_device *netdev,
389 				 struct ethtool_ringparam *ring)
390 {
391 	struct nfp_net *nn = netdev_priv(netdev);
392 	u32 rxd_cnt, txd_cnt;
393 
394 	/* We don't have separate queues/rings for small/large frames. */
395 	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
396 		return -EINVAL;
397 
398 	/* Round up to supported values */
399 	rxd_cnt = roundup_pow_of_two(ring->rx_pending);
400 	txd_cnt = roundup_pow_of_two(ring->tx_pending);
401 
402 	if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS ||
403 	    txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
404 		return -EINVAL;
405 
406 	if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt)
407 		return 0;
408 
409 	nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
410 	       nn->dp.rxd_cnt, rxd_cnt, nn->dp.txd_cnt, txd_cnt);
411 
412 	return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
413 }
414 
nfp_pr_et(u8 * data,const char * fmt,...)415 static __printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...)
416 {
417 	va_list args;
418 
419 	va_start(args, fmt);
420 	vsnprintf(data, ETH_GSTRING_LEN, fmt, args);
421 	va_end(args);
422 
423 	return data + ETH_GSTRING_LEN;
424 }
425 
nfp_vnic_get_sw_stats_count(struct net_device * netdev)426 static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
427 {
428 	struct nfp_net *nn = netdev_priv(netdev);
429 
430 	return NN_ET_RVEC_GATHER_STATS + nn->dp.num_r_vecs * 3;
431 }
432 
nfp_vnic_get_sw_stats_strings(struct net_device * netdev,u8 * data)433 static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
434 {
435 	struct nfp_net *nn = netdev_priv(netdev);
436 	int i;
437 
438 	for (i = 0; i < nn->dp.num_r_vecs; i++) {
439 		data = nfp_pr_et(data, "rvec_%u_rx_pkts", i);
440 		data = nfp_pr_et(data, "rvec_%u_tx_pkts", i);
441 		data = nfp_pr_et(data, "rvec_%u_tx_busy", i);
442 	}
443 
444 	data = nfp_pr_et(data, "hw_rx_csum_ok");
445 	data = nfp_pr_et(data, "hw_rx_csum_inner_ok");
446 	data = nfp_pr_et(data, "hw_rx_csum_err");
447 	data = nfp_pr_et(data, "hw_tx_csum");
448 	data = nfp_pr_et(data, "hw_tx_inner_csum");
449 	data = nfp_pr_et(data, "tx_gather");
450 	data = nfp_pr_et(data, "tx_lso");
451 
452 	return data;
453 }
454 
nfp_vnic_get_sw_stats(struct net_device * netdev,u64 * data)455 static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
456 {
457 	u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {};
458 	struct nfp_net *nn = netdev_priv(netdev);
459 	u64 tmp[NN_ET_RVEC_GATHER_STATS];
460 	unsigned int i, j;
461 
462 	for (i = 0; i < nn->dp.num_r_vecs; i++) {
463 		unsigned int start;
464 
465 		do {
466 			start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
467 			data[0] = nn->r_vecs[i].rx_pkts;
468 			tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
469 			tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
470 			tmp[2] = nn->r_vecs[i].hw_csum_rx_error;
471 		} while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
472 
473 		do {
474 			start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
475 			data[1] = nn->r_vecs[i].tx_pkts;
476 			data[2] = nn->r_vecs[i].tx_busy;
477 			tmp[3] = nn->r_vecs[i].hw_csum_tx;
478 			tmp[4] = nn->r_vecs[i].hw_csum_tx_inner;
479 			tmp[5] = nn->r_vecs[i].tx_gather;
480 			tmp[6] = nn->r_vecs[i].tx_lso;
481 		} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
482 
483 		data += 3;
484 
485 		for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
486 			gathered_stats[j] += tmp[j];
487 	}
488 
489 	for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
490 		*data++ = gathered_stats[j];
491 
492 	return data;
493 }
494 
495 static unsigned int
nfp_vnic_get_hw_stats_count(unsigned int rx_rings,unsigned int tx_rings)496 nfp_vnic_get_hw_stats_count(unsigned int rx_rings, unsigned int tx_rings)
497 {
498 	return NN_ET_GLOBAL_STATS_LEN + (rx_rings + tx_rings) * 2;
499 }
500 
501 static u8 *
nfp_vnic_get_hw_stats_strings(u8 * data,unsigned int rx_rings,unsigned int tx_rings,bool repr)502 nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings,
503 			      unsigned int tx_rings, bool repr)
504 {
505 	int swap_off, i;
506 
507 	BUILD_BUG_ON(NN_ET_GLOBAL_STATS_LEN < NN_ET_SWITCH_STATS_LEN * 2);
508 	/* If repr is true first add SWITCH_STATS_LEN and then subtract it
509 	 * effectively swapping the RX and TX statistics (giving us the RX
510 	 * and TX from perspective of the switch).
511 	 */
512 	swap_off = repr * NN_ET_SWITCH_STATS_LEN;
513 
514 	for (i = 0; i < NN_ET_SWITCH_STATS_LEN; i++)
515 		data = nfp_pr_et(data, nfp_net_et_stats[i + swap_off].name);
516 
517 	for (i = NN_ET_SWITCH_STATS_LEN; i < NN_ET_SWITCH_STATS_LEN * 2; i++)
518 		data = nfp_pr_et(data, nfp_net_et_stats[i - swap_off].name);
519 
520 	for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++)
521 		data = nfp_pr_et(data, nfp_net_et_stats[i].name);
522 
523 	for (i = 0; i < tx_rings; i++) {
524 		data = nfp_pr_et(data, "txq_%u_pkts", i);
525 		data = nfp_pr_et(data, "txq_%u_bytes", i);
526 	}
527 
528 	for (i = 0; i < rx_rings; i++) {
529 		data = nfp_pr_et(data, "rxq_%u_pkts", i);
530 		data = nfp_pr_et(data, "rxq_%u_bytes", i);
531 	}
532 
533 	return data;
534 }
535 
536 static u64 *
nfp_vnic_get_hw_stats(u64 * data,u8 __iomem * mem,unsigned int rx_rings,unsigned int tx_rings)537 nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem,
538 		      unsigned int rx_rings, unsigned int tx_rings)
539 {
540 	unsigned int i;
541 
542 	for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++)
543 		*data++ = readq(mem + nfp_net_et_stats[i].off);
544 
545 	for (i = 0; i < tx_rings; i++) {
546 		*data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
547 		*data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
548 	}
549 
550 	for (i = 0; i < rx_rings; i++) {
551 		*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i));
552 		*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8);
553 	}
554 
555 	return data;
556 }
557 
nfp_mac_get_stats_count(struct net_device * netdev)558 static unsigned int nfp_mac_get_stats_count(struct net_device *netdev)
559 {
560 	struct nfp_port *port;
561 
562 	port = nfp_port_from_netdev(netdev);
563 	if (!__nfp_port_get_eth_port(port) || !port->eth_stats)
564 		return 0;
565 
566 	return ARRAY_SIZE(nfp_mac_et_stats);
567 }
568 
nfp_mac_get_stats_strings(struct net_device * netdev,u8 * data)569 static u8 *nfp_mac_get_stats_strings(struct net_device *netdev, u8 *data)
570 {
571 	struct nfp_port *port;
572 	unsigned int i;
573 
574 	port = nfp_port_from_netdev(netdev);
575 	if (!__nfp_port_get_eth_port(port) || !port->eth_stats)
576 		return data;
577 
578 	for (i = 0; i < ARRAY_SIZE(nfp_mac_et_stats); i++)
579 		data = nfp_pr_et(data, "mac.%s", nfp_mac_et_stats[i].name);
580 
581 	return data;
582 }
583 
nfp_mac_get_stats(struct net_device * netdev,u64 * data)584 static u64 *nfp_mac_get_stats(struct net_device *netdev, u64 *data)
585 {
586 	struct nfp_port *port;
587 	unsigned int i;
588 
589 	port = nfp_port_from_netdev(netdev);
590 	if (!__nfp_port_get_eth_port(port) || !port->eth_stats)
591 		return data;
592 
593 	for (i = 0; i < ARRAY_SIZE(nfp_mac_et_stats); i++)
594 		*data++ = readq(port->eth_stats + nfp_mac_et_stats[i].off);
595 
596 	return data;
597 }
598 
nfp_net_get_strings(struct net_device * netdev,u32 stringset,u8 * data)599 static void nfp_net_get_strings(struct net_device *netdev,
600 				u32 stringset, u8 *data)
601 {
602 	struct nfp_net *nn = netdev_priv(netdev);
603 
604 	switch (stringset) {
605 	case ETH_SS_STATS:
606 		data = nfp_vnic_get_sw_stats_strings(netdev, data);
607 		data = nfp_vnic_get_hw_stats_strings(data, nn->dp.num_rx_rings,
608 						     nn->dp.num_tx_rings,
609 						     false);
610 		data = nfp_mac_get_stats_strings(netdev, data);
611 		break;
612 	}
613 }
614 
615 static void
nfp_net_get_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)616 nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
617 		  u64 *data)
618 {
619 	struct nfp_net *nn = netdev_priv(netdev);
620 
621 	data = nfp_vnic_get_sw_stats(netdev, data);
622 	data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar,
623 				     nn->dp.num_rx_rings, nn->dp.num_tx_rings);
624 	data = nfp_mac_get_stats(netdev, data);
625 }
626 
nfp_net_get_sset_count(struct net_device * netdev,int sset)627 static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
628 {
629 	struct nfp_net *nn = netdev_priv(netdev);
630 
631 	switch (sset) {
632 	case ETH_SS_STATS:
633 		return nfp_vnic_get_sw_stats_count(netdev) +
634 		       nfp_vnic_get_hw_stats_count(nn->dp.num_rx_rings,
635 						   nn->dp.num_tx_rings) +
636 		       nfp_mac_get_stats_count(netdev);
637 	default:
638 		return -EOPNOTSUPP;
639 	}
640 }
641 
nfp_port_get_strings(struct net_device * netdev,u32 stringset,u8 * data)642 static void nfp_port_get_strings(struct net_device *netdev,
643 				 u32 stringset, u8 *data)
644 {
645 	struct nfp_port *port = nfp_port_from_netdev(netdev);
646 
647 	switch (stringset) {
648 	case ETH_SS_STATS:
649 		if (nfp_port_is_vnic(port))
650 			data = nfp_vnic_get_hw_stats_strings(data, 0, 0, true);
651 		else
652 			data = nfp_mac_get_stats_strings(netdev, data);
653 		break;
654 	}
655 }
656 
657 static void
nfp_port_get_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)658 nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
659 		   u64 *data)
660 {
661 	struct nfp_port *port = nfp_port_from_netdev(netdev);
662 
663 	if (nfp_port_is_vnic(port))
664 		data = nfp_vnic_get_hw_stats(data, port->vnic, 0, 0);
665 	else
666 		data = nfp_mac_get_stats(netdev, data);
667 }
668 
nfp_port_get_sset_count(struct net_device * netdev,int sset)669 static int nfp_port_get_sset_count(struct net_device *netdev, int sset)
670 {
671 	struct nfp_port *port = nfp_port_from_netdev(netdev);
672 	unsigned int count;
673 
674 	switch (sset) {
675 	case ETH_SS_STATS:
676 		if (nfp_port_is_vnic(port))
677 			count = nfp_vnic_get_hw_stats_count(0, 0);
678 		else
679 			count = nfp_mac_get_stats_count(netdev);
680 		return count;
681 	default:
682 		return -EOPNOTSUPP;
683 	}
684 }
685 
686 /* RX network flow classification (RSS, filters, etc)
687  */
ethtool_flow_to_nfp_flag(u32 flow_type)688 static u32 ethtool_flow_to_nfp_flag(u32 flow_type)
689 {
690 	static const u32 xlate_ethtool_to_nfp[IPV6_FLOW + 1] = {
691 		[TCP_V4_FLOW]	= NFP_NET_CFG_RSS_IPV4_TCP,
692 		[TCP_V6_FLOW]	= NFP_NET_CFG_RSS_IPV6_TCP,
693 		[UDP_V4_FLOW]	= NFP_NET_CFG_RSS_IPV4_UDP,
694 		[UDP_V6_FLOW]	= NFP_NET_CFG_RSS_IPV6_UDP,
695 		[IPV4_FLOW]	= NFP_NET_CFG_RSS_IPV4,
696 		[IPV6_FLOW]	= NFP_NET_CFG_RSS_IPV6,
697 	};
698 
699 	if (flow_type >= ARRAY_SIZE(xlate_ethtool_to_nfp))
700 		return 0;
701 
702 	return xlate_ethtool_to_nfp[flow_type];
703 }
704 
nfp_net_get_rss_hash_opts(struct nfp_net * nn,struct ethtool_rxnfc * cmd)705 static int nfp_net_get_rss_hash_opts(struct nfp_net *nn,
706 				     struct ethtool_rxnfc *cmd)
707 {
708 	u32 nfp_rss_flag;
709 
710 	cmd->data = 0;
711 
712 	if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
713 		return -EOPNOTSUPP;
714 
715 	nfp_rss_flag = ethtool_flow_to_nfp_flag(cmd->flow_type);
716 	if (!nfp_rss_flag)
717 		return -EINVAL;
718 
719 	cmd->data |= RXH_IP_SRC | RXH_IP_DST;
720 	if (nn->rss_cfg & nfp_rss_flag)
721 		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
722 
723 	return 0;
724 }
725 
nfp_net_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 * rule_locs)726 static int nfp_net_get_rxnfc(struct net_device *netdev,
727 			     struct ethtool_rxnfc *cmd, u32 *rule_locs)
728 {
729 	struct nfp_net *nn = netdev_priv(netdev);
730 
731 	switch (cmd->cmd) {
732 	case ETHTOOL_GRXRINGS:
733 		cmd->data = nn->dp.num_rx_rings;
734 		return 0;
735 	case ETHTOOL_GRXFH:
736 		return nfp_net_get_rss_hash_opts(nn, cmd);
737 	default:
738 		return -EOPNOTSUPP;
739 	}
740 }
741 
nfp_net_set_rss_hash_opt(struct nfp_net * nn,struct ethtool_rxnfc * nfc)742 static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
743 				    struct ethtool_rxnfc *nfc)
744 {
745 	u32 new_rss_cfg = nn->rss_cfg;
746 	u32 nfp_rss_flag;
747 	int err;
748 
749 	if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
750 		return -EOPNOTSUPP;
751 
752 	/* RSS only supports IP SA/DA and L4 src/dst ports  */
753 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
754 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
755 		return -EINVAL;
756 
757 	/* We need at least the IP SA/DA fields for hashing */
758 	if (!(nfc->data & RXH_IP_SRC) ||
759 	    !(nfc->data & RXH_IP_DST))
760 		return -EINVAL;
761 
762 	nfp_rss_flag = ethtool_flow_to_nfp_flag(nfc->flow_type);
763 	if (!nfp_rss_flag)
764 		return -EINVAL;
765 
766 	switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
767 	case 0:
768 		new_rss_cfg &= ~nfp_rss_flag;
769 		break;
770 	case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
771 		new_rss_cfg |= nfp_rss_flag;
772 		break;
773 	default:
774 		return -EINVAL;
775 	}
776 
777 	new_rss_cfg |= FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc);
778 	new_rss_cfg |= NFP_NET_CFG_RSS_MASK;
779 
780 	if (new_rss_cfg == nn->rss_cfg)
781 		return 0;
782 
783 	writel(new_rss_cfg, nn->dp.ctrl_bar + NFP_NET_CFG_RSS_CTRL);
784 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
785 	if (err)
786 		return err;
787 
788 	nn->rss_cfg = new_rss_cfg;
789 
790 	nn_dbg(nn, "Changed RSS config to 0x%x\n", nn->rss_cfg);
791 	return 0;
792 }
793 
nfp_net_set_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd)794 static int nfp_net_set_rxnfc(struct net_device *netdev,
795 			     struct ethtool_rxnfc *cmd)
796 {
797 	struct nfp_net *nn = netdev_priv(netdev);
798 
799 	switch (cmd->cmd) {
800 	case ETHTOOL_SRXFH:
801 		return nfp_net_set_rss_hash_opt(nn, cmd);
802 	default:
803 		return -EOPNOTSUPP;
804 	}
805 }
806 
nfp_net_get_rxfh_indir_size(struct net_device * netdev)807 static u32 nfp_net_get_rxfh_indir_size(struct net_device *netdev)
808 {
809 	struct nfp_net *nn = netdev_priv(netdev);
810 
811 	if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
812 		return 0;
813 
814 	return ARRAY_SIZE(nn->rss_itbl);
815 }
816 
nfp_net_get_rxfh_key_size(struct net_device * netdev)817 static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
818 {
819 	struct nfp_net *nn = netdev_priv(netdev);
820 
821 	if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
822 		return -EOPNOTSUPP;
823 
824 	return nfp_net_rss_key_sz(nn);
825 }
826 
nfp_net_get_rxfh(struct net_device * netdev,u32 * indir,u8 * key,u8 * hfunc)827 static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
828 			    u8 *hfunc)
829 {
830 	struct nfp_net *nn = netdev_priv(netdev);
831 	int i;
832 
833 	if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY))
834 		return -EOPNOTSUPP;
835 
836 	if (indir)
837 		for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
838 			indir[i] = nn->rss_itbl[i];
839 	if (key)
840 		memcpy(key, nn->rss_key, nfp_net_rss_key_sz(nn));
841 	if (hfunc) {
842 		*hfunc = nn->rss_hfunc;
843 		if (*hfunc >= 1 << ETH_RSS_HASH_FUNCS_COUNT)
844 			*hfunc = ETH_RSS_HASH_UNKNOWN;
845 	}
846 
847 	return 0;
848 }
849 
nfp_net_set_rxfh(struct net_device * netdev,const u32 * indir,const u8 * key,const u8 hfunc)850 static int nfp_net_set_rxfh(struct net_device *netdev,
851 			    const u32 *indir, const u8 *key,
852 			    const u8 hfunc)
853 {
854 	struct nfp_net *nn = netdev_priv(netdev);
855 	int i;
856 
857 	if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) ||
858 	    !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == nn->rss_hfunc))
859 		return -EOPNOTSUPP;
860 
861 	if (!key && !indir)
862 		return 0;
863 
864 	if (key) {
865 		memcpy(nn->rss_key, key, nfp_net_rss_key_sz(nn));
866 		nfp_net_rss_write_key(nn);
867 	}
868 	if (indir) {
869 		for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
870 			nn->rss_itbl[i] = indir[i];
871 
872 		nfp_net_rss_write_itbl(nn);
873 	}
874 
875 	return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
876 }
877 
878 /* Dump BAR registers
879  */
nfp_net_get_regs_len(struct net_device * netdev)880 static int nfp_net_get_regs_len(struct net_device *netdev)
881 {
882 	return NFP_NET_CFG_BAR_SZ;
883 }
884 
nfp_net_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * p)885 static void nfp_net_get_regs(struct net_device *netdev,
886 			     struct ethtool_regs *regs, void *p)
887 {
888 	struct nfp_net *nn = netdev_priv(netdev);
889 	u32 *regs_buf = p;
890 	int i;
891 
892 	regs->version = nn_readl(nn, NFP_NET_CFG_VERSION);
893 
894 	for (i = 0; i < NFP_NET_CFG_BAR_SZ / sizeof(u32); i++)
895 		regs_buf[i] = readl(nn->dp.ctrl_bar + (i * sizeof(u32)));
896 }
897 
nfp_net_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec)898 static int nfp_net_get_coalesce(struct net_device *netdev,
899 				struct ethtool_coalesce *ec)
900 {
901 	struct nfp_net *nn = netdev_priv(netdev);
902 
903 	if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
904 		return -EINVAL;
905 
906 	ec->rx_coalesce_usecs       = nn->rx_coalesce_usecs;
907 	ec->rx_max_coalesced_frames = nn->rx_coalesce_max_frames;
908 	ec->tx_coalesce_usecs       = nn->tx_coalesce_usecs;
909 	ec->tx_max_coalesced_frames = nn->tx_coalesce_max_frames;
910 
911 	return 0;
912 }
913 
914 /* Other debug dumps
915  */
916 static int
nfp_dump_nsp_diag(struct nfp_app * app,struct ethtool_dump * dump,void * buffer)917 nfp_dump_nsp_diag(struct nfp_app *app, struct ethtool_dump *dump, void *buffer)
918 {
919 	struct nfp_resource *res;
920 	int ret;
921 
922 	if (!app)
923 		return -EOPNOTSUPP;
924 
925 	dump->version = 1;
926 	dump->flag = NFP_DUMP_NSP_DIAG;
927 
928 	res = nfp_resource_acquire(app->cpp, NFP_RESOURCE_NSP_DIAG);
929 	if (IS_ERR(res))
930 		return PTR_ERR(res);
931 
932 	if (buffer) {
933 		if (dump->len != nfp_resource_size(res)) {
934 			ret = -EINVAL;
935 			goto exit_release;
936 		}
937 
938 		ret = nfp_cpp_read(app->cpp, nfp_resource_cpp_id(res),
939 				   nfp_resource_address(res),
940 				   buffer, dump->len);
941 		if (ret != dump->len)
942 			ret = ret < 0 ? ret : -EIO;
943 		else
944 			ret = 0;
945 	} else {
946 		dump->len = nfp_resource_size(res);
947 		ret = 0;
948 	}
949 exit_release:
950 	nfp_resource_release(res);
951 
952 	return ret;
953 }
954 
nfp_app_set_dump(struct net_device * netdev,struct ethtool_dump * val)955 static int nfp_app_set_dump(struct net_device *netdev, struct ethtool_dump *val)
956 {
957 	struct nfp_app *app = nfp_app_from_netdev(netdev);
958 
959 	if (!app)
960 		return -EOPNOTSUPP;
961 
962 	if (val->flag != NFP_DUMP_NSP_DIAG)
963 		return -EINVAL;
964 
965 	return 0;
966 }
967 
968 static int
nfp_app_get_dump_flag(struct net_device * netdev,struct ethtool_dump * dump)969 nfp_app_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
970 {
971 	return nfp_dump_nsp_diag(nfp_app_from_netdev(netdev), dump, NULL);
972 }
973 
974 static int
nfp_app_get_dump_data(struct net_device * netdev,struct ethtool_dump * dump,void * buffer)975 nfp_app_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
976 		      void *buffer)
977 {
978 	return nfp_dump_nsp_diag(nfp_app_from_netdev(netdev), dump, buffer);
979 }
980 
nfp_net_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec)981 static int nfp_net_set_coalesce(struct net_device *netdev,
982 				struct ethtool_coalesce *ec)
983 {
984 	struct nfp_net *nn = netdev_priv(netdev);
985 	unsigned int factor;
986 
987 	if (ec->rx_coalesce_usecs_irq ||
988 	    ec->rx_max_coalesced_frames_irq ||
989 	    ec->tx_coalesce_usecs_irq ||
990 	    ec->tx_max_coalesced_frames_irq ||
991 	    ec->stats_block_coalesce_usecs ||
992 	    ec->use_adaptive_rx_coalesce ||
993 	    ec->use_adaptive_tx_coalesce ||
994 	    ec->pkt_rate_low ||
995 	    ec->rx_coalesce_usecs_low ||
996 	    ec->rx_max_coalesced_frames_low ||
997 	    ec->tx_coalesce_usecs_low ||
998 	    ec->tx_max_coalesced_frames_low ||
999 	    ec->pkt_rate_high ||
1000 	    ec->rx_coalesce_usecs_high ||
1001 	    ec->rx_max_coalesced_frames_high ||
1002 	    ec->tx_coalesce_usecs_high ||
1003 	    ec->tx_max_coalesced_frames_high ||
1004 	    ec->rate_sample_interval)
1005 		return -EOPNOTSUPP;
1006 
1007 	/* Compute factor used to convert coalesce '_usecs' parameters to
1008 	 * ME timestamp ticks.  There are 16 ME clock cycles for each timestamp
1009 	 * count.
1010 	 */
1011 	factor = nn->me_freq_mhz / 16;
1012 
1013 	/* Each pair of (usecs, max_frames) fields specifies that interrupts
1014 	 * should be coalesced until
1015 	 *      (usecs > 0 && time_since_first_completion >= usecs) ||
1016 	 *      (max_frames > 0 && completed_frames >= max_frames)
1017 	 *
1018 	 * It is illegal to set both usecs and max_frames to zero as this would
1019 	 * cause interrupts to never be generated.  To disable coalescing, set
1020 	 * usecs = 0 and max_frames = 1.
1021 	 *
1022 	 * Some implementations ignore the value of max_frames and use the
1023 	 * condition time_since_first_completion >= usecs
1024 	 */
1025 
1026 	if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
1027 		return -EINVAL;
1028 
1029 	/* ensure valid configuration */
1030 	if (!ec->rx_coalesce_usecs && !ec->rx_max_coalesced_frames)
1031 		return -EINVAL;
1032 
1033 	if (!ec->tx_coalesce_usecs && !ec->tx_max_coalesced_frames)
1034 		return -EINVAL;
1035 
1036 	if (ec->rx_coalesce_usecs * factor >= ((1 << 16) - 1))
1037 		return -EINVAL;
1038 
1039 	if (ec->tx_coalesce_usecs * factor >= ((1 << 16) - 1))
1040 		return -EINVAL;
1041 
1042 	if (ec->rx_max_coalesced_frames >= ((1 << 16) - 1))
1043 		return -EINVAL;
1044 
1045 	if (ec->tx_max_coalesced_frames >= ((1 << 16) - 1))
1046 		return -EINVAL;
1047 
1048 	/* configuration is valid */
1049 	nn->rx_coalesce_usecs      = ec->rx_coalesce_usecs;
1050 	nn->rx_coalesce_max_frames = ec->rx_max_coalesced_frames;
1051 	nn->tx_coalesce_usecs      = ec->tx_coalesce_usecs;
1052 	nn->tx_coalesce_max_frames = ec->tx_max_coalesced_frames;
1053 
1054 	/* write configuration to device */
1055 	nfp_net_coalesce_write_cfg(nn);
1056 	return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
1057 }
1058 
nfp_net_get_channels(struct net_device * netdev,struct ethtool_channels * channel)1059 static void nfp_net_get_channels(struct net_device *netdev,
1060 				 struct ethtool_channels *channel)
1061 {
1062 	struct nfp_net *nn = netdev_priv(netdev);
1063 	unsigned int num_tx_rings;
1064 
1065 	num_tx_rings = nn->dp.num_tx_rings;
1066 	if (nn->dp.xdp_prog)
1067 		num_tx_rings -= nn->dp.num_rx_rings;
1068 
1069 	channel->max_rx = min(nn->max_rx_rings, nn->max_r_vecs);
1070 	channel->max_tx = min(nn->max_tx_rings, nn->max_r_vecs);
1071 	channel->max_combined = min(channel->max_rx, channel->max_tx);
1072 	channel->max_other = NFP_NET_NON_Q_VECTORS;
1073 	channel->combined_count = min(nn->dp.num_rx_rings, num_tx_rings);
1074 	channel->rx_count = nn->dp.num_rx_rings - channel->combined_count;
1075 	channel->tx_count = num_tx_rings - channel->combined_count;
1076 	channel->other_count = NFP_NET_NON_Q_VECTORS;
1077 }
1078 
nfp_net_set_num_rings(struct nfp_net * nn,unsigned int total_rx,unsigned int total_tx)1079 static int nfp_net_set_num_rings(struct nfp_net *nn, unsigned int total_rx,
1080 				 unsigned int total_tx)
1081 {
1082 	struct nfp_net_dp *dp;
1083 
1084 	dp = nfp_net_clone_dp(nn);
1085 	if (!dp)
1086 		return -ENOMEM;
1087 
1088 	dp->num_rx_rings = total_rx;
1089 	dp->num_tx_rings = total_tx;
1090 	/* nfp_net_check_config() will catch num_tx_rings > nn->max_tx_rings */
1091 	if (dp->xdp_prog)
1092 		dp->num_tx_rings += total_rx;
1093 
1094 	return nfp_net_ring_reconfig(nn, dp, NULL);
1095 }
1096 
nfp_net_set_channels(struct net_device * netdev,struct ethtool_channels * channel)1097 static int nfp_net_set_channels(struct net_device *netdev,
1098 				struct ethtool_channels *channel)
1099 {
1100 	struct nfp_net *nn = netdev_priv(netdev);
1101 	unsigned int total_rx, total_tx;
1102 
1103 	/* Reject unsupported */
1104 	if (!channel->combined_count ||
1105 	    channel->other_count != NFP_NET_NON_Q_VECTORS ||
1106 	    (channel->rx_count && channel->tx_count))
1107 		return -EINVAL;
1108 
1109 	total_rx = channel->combined_count + channel->rx_count;
1110 	total_tx = channel->combined_count + channel->tx_count;
1111 
1112 	if (total_rx > min(nn->max_rx_rings, nn->max_r_vecs) ||
1113 	    total_tx > min(nn->max_tx_rings, nn->max_r_vecs))
1114 		return -EINVAL;
1115 
1116 	return nfp_net_set_num_rings(nn, total_rx, total_tx);
1117 }
1118 
1119 static const struct ethtool_ops nfp_net_ethtool_ops = {
1120 	.get_drvinfo		= nfp_net_get_drvinfo,
1121 	.get_link		= ethtool_op_get_link,
1122 	.get_ringparam		= nfp_net_get_ringparam,
1123 	.set_ringparam		= nfp_net_set_ringparam,
1124 	.get_strings		= nfp_net_get_strings,
1125 	.get_ethtool_stats	= nfp_net_get_stats,
1126 	.get_sset_count		= nfp_net_get_sset_count,
1127 	.get_rxnfc		= nfp_net_get_rxnfc,
1128 	.set_rxnfc		= nfp_net_set_rxnfc,
1129 	.get_rxfh_indir_size	= nfp_net_get_rxfh_indir_size,
1130 	.get_rxfh_key_size	= nfp_net_get_rxfh_key_size,
1131 	.get_rxfh		= nfp_net_get_rxfh,
1132 	.set_rxfh		= nfp_net_set_rxfh,
1133 	.get_regs_len		= nfp_net_get_regs_len,
1134 	.get_regs		= nfp_net_get_regs,
1135 	.set_dump		= nfp_app_set_dump,
1136 	.get_dump_flag		= nfp_app_get_dump_flag,
1137 	.get_dump_data		= nfp_app_get_dump_data,
1138 	.get_coalesce           = nfp_net_get_coalesce,
1139 	.set_coalesce           = nfp_net_set_coalesce,
1140 	.get_channels		= nfp_net_get_channels,
1141 	.set_channels		= nfp_net_set_channels,
1142 	.get_link_ksettings	= nfp_net_get_link_ksettings,
1143 	.set_link_ksettings	= nfp_net_set_link_ksettings,
1144 };
1145 
1146 const struct ethtool_ops nfp_port_ethtool_ops = {
1147 	.get_drvinfo		= nfp_app_get_drvinfo,
1148 	.get_link		= ethtool_op_get_link,
1149 	.get_strings		= nfp_port_get_strings,
1150 	.get_ethtool_stats	= nfp_port_get_stats,
1151 	.get_sset_count		= nfp_port_get_sset_count,
1152 	.set_dump		= nfp_app_set_dump,
1153 	.get_dump_flag		= nfp_app_get_dump_flag,
1154 	.get_dump_data		= nfp_app_get_dump_data,
1155 };
1156 
nfp_net_set_ethtool_ops(struct net_device * netdev)1157 void nfp_net_set_ethtool_ops(struct net_device *netdev)
1158 {
1159 	netdev->ethtool_ops = &nfp_net_ethtool_ops;
1160 }
1161