1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2019 Intel Corporation. */
3
4 #include <linux/vmalloc.h>
5
6 #include "fm10k.h"
7
8 struct fm10k_stats {
9 /* The stat_string is expected to be a format string formatted using
10 * vsnprintf by fm10k_add_stat_strings. Every member of a stats array
11 * should use the same format specifiers as they will be formatted
12 * using the same variadic arguments.
13 */
14 char stat_string[ETH_GSTRING_LEN];
15 int sizeof_stat;
16 int stat_offset;
17 };
18
19 #define FM10K_STAT_FIELDS(_type, _name, _stat) { \
20 .stat_string = _name, \
21 .sizeof_stat = sizeof_field(_type, _stat), \
22 .stat_offset = offsetof(_type, _stat) \
23 }
24
25 /* netdevice statistics */
26 #define FM10K_NETDEV_STAT(_net_stat) \
27 FM10K_STAT_FIELDS(struct net_device_stats, __stringify(_net_stat), \
28 _net_stat)
29
30 static const struct fm10k_stats fm10k_gstrings_net_stats[] = {
31 FM10K_NETDEV_STAT(tx_packets),
32 FM10K_NETDEV_STAT(tx_bytes),
33 FM10K_NETDEV_STAT(tx_errors),
34 FM10K_NETDEV_STAT(rx_packets),
35 FM10K_NETDEV_STAT(rx_bytes),
36 FM10K_NETDEV_STAT(rx_errors),
37 FM10K_NETDEV_STAT(rx_dropped),
38
39 /* detailed Rx errors */
40 FM10K_NETDEV_STAT(rx_length_errors),
41 FM10K_NETDEV_STAT(rx_crc_errors),
42 FM10K_NETDEV_STAT(rx_fifo_errors),
43 };
44
45 #define FM10K_NETDEV_STATS_LEN ARRAY_SIZE(fm10k_gstrings_net_stats)
46
47 /* General interface statistics */
48 #define FM10K_STAT(_name, _stat) \
49 FM10K_STAT_FIELDS(struct fm10k_intfc, _name, _stat)
50
51 static const struct fm10k_stats fm10k_gstrings_global_stats[] = {
52 FM10K_STAT("tx_restart_queue", restart_queue),
53 FM10K_STAT("tx_busy", tx_busy),
54 FM10K_STAT("tx_csum_errors", tx_csum_errors),
55 FM10K_STAT("rx_alloc_failed", alloc_failed),
56 FM10K_STAT("rx_csum_errors", rx_csum_errors),
57
58 FM10K_STAT("tx_packets_nic", tx_packets_nic),
59 FM10K_STAT("tx_bytes_nic", tx_bytes_nic),
60 FM10K_STAT("rx_packets_nic", rx_packets_nic),
61 FM10K_STAT("rx_bytes_nic", rx_bytes_nic),
62 FM10K_STAT("rx_drops_nic", rx_drops_nic),
63 FM10K_STAT("rx_overrun_pf", rx_overrun_pf),
64 FM10K_STAT("rx_overrun_vf", rx_overrun_vf),
65
66 FM10K_STAT("swapi_status", hw.swapi.status),
67 FM10K_STAT("mac_rules_used", hw.swapi.mac.used),
68 FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail),
69
70 FM10K_STAT("reset_while_pending", hw.mac.reset_while_pending),
71
72 FM10K_STAT("tx_hang_count", tx_timeout_count),
73 };
74
75 static const struct fm10k_stats fm10k_gstrings_pf_stats[] = {
76 FM10K_STAT("timeout", stats.timeout.count),
77 FM10K_STAT("ur", stats.ur.count),
78 FM10K_STAT("ca", stats.ca.count),
79 FM10K_STAT("um", stats.um.count),
80 FM10K_STAT("xec", stats.xec.count),
81 FM10K_STAT("vlan_drop", stats.vlan_drop.count),
82 FM10K_STAT("loopback_drop", stats.loopback_drop.count),
83 FM10K_STAT("nodesc_drop", stats.nodesc_drop.count),
84 };
85
86 /* mailbox statistics */
87 #define FM10K_MBX_STAT(_name, _stat) \
88 FM10K_STAT_FIELDS(struct fm10k_mbx_info, _name, _stat)
89
90 static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = {
91 FM10K_MBX_STAT("mbx_tx_busy", tx_busy),
92 FM10K_MBX_STAT("mbx_tx_dropped", tx_dropped),
93 FM10K_MBX_STAT("mbx_tx_messages", tx_messages),
94 FM10K_MBX_STAT("mbx_tx_dwords", tx_dwords),
95 FM10K_MBX_STAT("mbx_tx_mbmem_pulled", tx_mbmem_pulled),
96 FM10K_MBX_STAT("mbx_rx_messages", rx_messages),
97 FM10K_MBX_STAT("mbx_rx_dwords", rx_dwords),
98 FM10K_MBX_STAT("mbx_rx_parse_err", rx_parse_err),
99 FM10K_MBX_STAT("mbx_rx_mbmem_pushed", rx_mbmem_pushed),
100 };
101
102 /* per-queue ring statistics */
103 #define FM10K_QUEUE_STAT(_name, _stat) \
104 FM10K_STAT_FIELDS(struct fm10k_ring, _name, _stat)
105
106 static const struct fm10k_stats fm10k_gstrings_queue_stats[] = {
107 FM10K_QUEUE_STAT("%s_queue_%u_packets", stats.packets),
108 FM10K_QUEUE_STAT("%s_queue_%u_bytes", stats.bytes),
109 };
110
111 #define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats)
112 #define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats)
113 #define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats)
114 #define FM10K_QUEUE_STATS_LEN ARRAY_SIZE(fm10k_gstrings_queue_stats)
115
116 #define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \
117 FM10K_NETDEV_STATS_LEN + \
118 FM10K_MBX_STATS_LEN)
119
120 static const char fm10k_gstrings_test[][ETH_GSTRING_LEN] = {
121 "Mailbox test (on/offline)"
122 };
123
124 #define FM10K_TEST_LEN (sizeof(fm10k_gstrings_test) / ETH_GSTRING_LEN)
125
126 enum fm10k_self_test_types {
127 FM10K_TEST_MBX,
128 FM10K_TEST_MAX = FM10K_TEST_LEN
129 };
130
131 enum {
132 FM10K_PRV_FLAG_LEN,
133 };
134
135 static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = {
136 };
137
__fm10k_add_stat_strings(u8 ** p,const struct fm10k_stats stats[],const unsigned int size,...)138 static void __fm10k_add_stat_strings(u8 **p, const struct fm10k_stats stats[],
139 const unsigned int size, ...)
140 {
141 unsigned int i;
142
143 for (i = 0; i < size; i++) {
144 va_list args;
145
146 va_start(args, size);
147 vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
148 *p += ETH_GSTRING_LEN;
149 va_end(args);
150 }
151 }
152
153 #define fm10k_add_stat_strings(p, stats, ...) \
154 __fm10k_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
155
fm10k_get_stat_strings(struct net_device * dev,u8 * data)156 static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
157 {
158 struct fm10k_intfc *interface = netdev_priv(dev);
159 unsigned int i;
160
161 fm10k_add_stat_strings(&data, fm10k_gstrings_net_stats);
162
163 fm10k_add_stat_strings(&data, fm10k_gstrings_global_stats);
164
165 fm10k_add_stat_strings(&data, fm10k_gstrings_mbx_stats);
166
167 if (interface->hw.mac.type != fm10k_mac_vf)
168 fm10k_add_stat_strings(&data, fm10k_gstrings_pf_stats);
169
170 for (i = 0; i < interface->hw.mac.max_queues; i++) {
171 fm10k_add_stat_strings(&data, fm10k_gstrings_queue_stats,
172 "tx", i);
173
174 fm10k_add_stat_strings(&data, fm10k_gstrings_queue_stats,
175 "rx", i);
176 }
177 }
178
fm10k_get_strings(struct net_device * dev,u32 stringset,u8 * data)179 static void fm10k_get_strings(struct net_device *dev,
180 u32 stringset, u8 *data)
181 {
182 switch (stringset) {
183 case ETH_SS_TEST:
184 memcpy(data, fm10k_gstrings_test,
185 FM10K_TEST_LEN * ETH_GSTRING_LEN);
186 break;
187 case ETH_SS_STATS:
188 fm10k_get_stat_strings(dev, data);
189 break;
190 case ETH_SS_PRIV_FLAGS:
191 memcpy(data, fm10k_prv_flags,
192 FM10K_PRV_FLAG_LEN * ETH_GSTRING_LEN);
193 break;
194 }
195 }
196
fm10k_get_sset_count(struct net_device * dev,int sset)197 static int fm10k_get_sset_count(struct net_device *dev, int sset)
198 {
199 struct fm10k_intfc *interface = netdev_priv(dev);
200 struct fm10k_hw *hw = &interface->hw;
201 int stats_len = FM10K_STATIC_STATS_LEN;
202
203 switch (sset) {
204 case ETH_SS_TEST:
205 return FM10K_TEST_LEN;
206 case ETH_SS_STATS:
207 stats_len += hw->mac.max_queues * 2 * FM10K_QUEUE_STATS_LEN;
208
209 if (hw->mac.type != fm10k_mac_vf)
210 stats_len += FM10K_PF_STATS_LEN;
211
212 return stats_len;
213 case ETH_SS_PRIV_FLAGS:
214 return FM10K_PRV_FLAG_LEN;
215 default:
216 return -EOPNOTSUPP;
217 }
218 }
219
__fm10k_add_ethtool_stats(u64 ** data,void * pointer,const struct fm10k_stats stats[],const unsigned int size)220 static void __fm10k_add_ethtool_stats(u64 **data, void *pointer,
221 const struct fm10k_stats stats[],
222 const unsigned int size)
223 {
224 unsigned int i;
225
226 if (!pointer) {
227 /* memory is not zero allocated so we have to clear it */
228 for (i = 0; i < size; i++)
229 *((*data)++) = 0;
230 return;
231 }
232
233 for (i = 0; i < size; i++) {
234 char *p = (char *)pointer + stats[i].stat_offset;
235
236 switch (stats[i].sizeof_stat) {
237 case sizeof(u64):
238 *((*data)++) = *(u64 *)p;
239 break;
240 case sizeof(u32):
241 *((*data)++) = *(u32 *)p;
242 break;
243 case sizeof(u16):
244 *((*data)++) = *(u16 *)p;
245 break;
246 case sizeof(u8):
247 *((*data)++) = *(u8 *)p;
248 break;
249 default:
250 WARN_ONCE(1, "unexpected stat size for %s",
251 stats[i].stat_string);
252 *((*data)++) = 0;
253 }
254 }
255 }
256
257 #define fm10k_add_ethtool_stats(data, pointer, stats) \
258 __fm10k_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
259
fm10k_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats __always_unused * stats,u64 * data)260 static void fm10k_get_ethtool_stats(struct net_device *netdev,
261 struct ethtool_stats __always_unused *stats,
262 u64 *data)
263 {
264 struct fm10k_intfc *interface = netdev_priv(netdev);
265 struct net_device_stats *net_stats = &netdev->stats;
266 int i;
267
268 fm10k_update_stats(interface);
269
270 fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats);
271
272 fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats);
273
274 fm10k_add_ethtool_stats(&data, &interface->hw.mbx,
275 fm10k_gstrings_mbx_stats);
276
277 if (interface->hw.mac.type != fm10k_mac_vf) {
278 fm10k_add_ethtool_stats(&data, interface,
279 fm10k_gstrings_pf_stats);
280 }
281
282 for (i = 0; i < interface->hw.mac.max_queues; i++) {
283 struct fm10k_ring *ring;
284
285 ring = interface->tx_ring[i];
286 fm10k_add_ethtool_stats(&data, ring,
287 fm10k_gstrings_queue_stats);
288
289 ring = interface->rx_ring[i];
290 fm10k_add_ethtool_stats(&data, ring,
291 fm10k_gstrings_queue_stats);
292 }
293 }
294
295 /* If function below adds more registers this define needs to be updated */
296 #define FM10K_REGS_LEN_Q 29
297
fm10k_get_reg_q(struct fm10k_hw * hw,u32 * buff,int i)298 static void fm10k_get_reg_q(struct fm10k_hw *hw, u32 *buff, int i)
299 {
300 int idx = 0;
301
302 buff[idx++] = fm10k_read_reg(hw, FM10K_RDBAL(i));
303 buff[idx++] = fm10k_read_reg(hw, FM10K_RDBAH(i));
304 buff[idx++] = fm10k_read_reg(hw, FM10K_RDLEN(i));
305 buff[idx++] = fm10k_read_reg(hw, FM10K_TPH_RXCTRL(i));
306 buff[idx++] = fm10k_read_reg(hw, FM10K_RDH(i));
307 buff[idx++] = fm10k_read_reg(hw, FM10K_RDT(i));
308 buff[idx++] = fm10k_read_reg(hw, FM10K_RXQCTL(i));
309 buff[idx++] = fm10k_read_reg(hw, FM10K_RXDCTL(i));
310 buff[idx++] = fm10k_read_reg(hw, FM10K_RXINT(i));
311 buff[idx++] = fm10k_read_reg(hw, FM10K_SRRCTL(i));
312 buff[idx++] = fm10k_read_reg(hw, FM10K_QPRC(i));
313 buff[idx++] = fm10k_read_reg(hw, FM10K_QPRDC(i));
314 buff[idx++] = fm10k_read_reg(hw, FM10K_QBRC_L(i));
315 buff[idx++] = fm10k_read_reg(hw, FM10K_QBRC_H(i));
316 buff[idx++] = fm10k_read_reg(hw, FM10K_TDBAL(i));
317 buff[idx++] = fm10k_read_reg(hw, FM10K_TDBAH(i));
318 buff[idx++] = fm10k_read_reg(hw, FM10K_TDLEN(i));
319 buff[idx++] = fm10k_read_reg(hw, FM10K_TPH_TXCTRL(i));
320 buff[idx++] = fm10k_read_reg(hw, FM10K_TDH(i));
321 buff[idx++] = fm10k_read_reg(hw, FM10K_TDT(i));
322 buff[idx++] = fm10k_read_reg(hw, FM10K_TXDCTL(i));
323 buff[idx++] = fm10k_read_reg(hw, FM10K_TXQCTL(i));
324 buff[idx++] = fm10k_read_reg(hw, FM10K_TXINT(i));
325 buff[idx++] = fm10k_read_reg(hw, FM10K_QPTC(i));
326 buff[idx++] = fm10k_read_reg(hw, FM10K_QBTC_L(i));
327 buff[idx++] = fm10k_read_reg(hw, FM10K_QBTC_H(i));
328 buff[idx++] = fm10k_read_reg(hw, FM10K_TQDLOC(i));
329 buff[idx++] = fm10k_read_reg(hw, FM10K_TX_SGLORT(i));
330 buff[idx++] = fm10k_read_reg(hw, FM10K_PFVTCTL(i));
331
332 BUG_ON(idx != FM10K_REGS_LEN_Q);
333 }
334
335 /* If function above adds more registers this define needs to be updated */
336 #define FM10K_REGS_LEN_VSI 43
337
fm10k_get_reg_vsi(struct fm10k_hw * hw,u32 * buff,int i)338 static void fm10k_get_reg_vsi(struct fm10k_hw *hw, u32 *buff, int i)
339 {
340 int idx = 0, j;
341
342 buff[idx++] = fm10k_read_reg(hw, FM10K_MRQC(i));
343 for (j = 0; j < 10; j++)
344 buff[idx++] = fm10k_read_reg(hw, FM10K_RSSRK(i, j));
345 for (j = 0; j < 32; j++)
346 buff[idx++] = fm10k_read_reg(hw, FM10K_RETA(i, j));
347
348 BUG_ON(idx != FM10K_REGS_LEN_VSI);
349 }
350
fm10k_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * p)351 static void fm10k_get_regs(struct net_device *netdev,
352 struct ethtool_regs *regs, void *p)
353 {
354 struct fm10k_intfc *interface = netdev_priv(netdev);
355 struct fm10k_hw *hw = &interface->hw;
356 u32 *buff = p;
357 u16 i;
358
359 regs->version = BIT(24) | (hw->revision_id << 16) | hw->device_id;
360
361 switch (hw->mac.type) {
362 case fm10k_mac_pf:
363 /* General PF Registers */
364 *(buff++) = fm10k_read_reg(hw, FM10K_CTRL);
365 *(buff++) = fm10k_read_reg(hw, FM10K_CTRL_EXT);
366 *(buff++) = fm10k_read_reg(hw, FM10K_GCR);
367 *(buff++) = fm10k_read_reg(hw, FM10K_GCR_EXT);
368
369 for (i = 0; i < 8; i++) {
370 *(buff++) = fm10k_read_reg(hw, FM10K_DGLORTMAP(i));
371 *(buff++) = fm10k_read_reg(hw, FM10K_DGLORTDEC(i));
372 }
373
374 for (i = 0; i < 65; i++) {
375 fm10k_get_reg_vsi(hw, buff, i);
376 buff += FM10K_REGS_LEN_VSI;
377 }
378
379 *(buff++) = fm10k_read_reg(hw, FM10K_DMA_CTRL);
380 *(buff++) = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
381
382 for (i = 0; i < FM10K_MAX_QUEUES_PF; i++) {
383 fm10k_get_reg_q(hw, buff, i);
384 buff += FM10K_REGS_LEN_Q;
385 }
386
387 *(buff++) = fm10k_read_reg(hw, FM10K_TPH_CTRL);
388
389 for (i = 0; i < 8; i++)
390 *(buff++) = fm10k_read_reg(hw, FM10K_INT_MAP(i));
391
392 /* Interrupt Throttling Registers */
393 for (i = 0; i < 130; i++)
394 *(buff++) = fm10k_read_reg(hw, FM10K_ITR(i));
395
396 break;
397 case fm10k_mac_vf:
398 /* General VF registers */
399 *(buff++) = fm10k_read_reg(hw, FM10K_VFCTRL);
400 *(buff++) = fm10k_read_reg(hw, FM10K_VFINT_MAP);
401 *(buff++) = fm10k_read_reg(hw, FM10K_VFSYSTIME);
402
403 /* Interrupt Throttling Registers */
404 for (i = 0; i < 8; i++)
405 *(buff++) = fm10k_read_reg(hw, FM10K_VFITR(i));
406
407 fm10k_get_reg_vsi(hw, buff, 0);
408 buff += FM10K_REGS_LEN_VSI;
409
410 for (i = 0; i < FM10K_MAX_QUEUES_POOL; i++) {
411 if (i < hw->mac.max_queues)
412 fm10k_get_reg_q(hw, buff, i);
413 else
414 memset(buff, 0, sizeof(u32) * FM10K_REGS_LEN_Q);
415 buff += FM10K_REGS_LEN_Q;
416 }
417
418 break;
419 default:
420 return;
421 }
422 }
423
424 /* If function above adds more registers these define need to be updated */
425 #define FM10K_REGS_LEN_PF \
426 (162 + (65 * FM10K_REGS_LEN_VSI) + (FM10K_MAX_QUEUES_PF * FM10K_REGS_LEN_Q))
427 #define FM10K_REGS_LEN_VF \
428 (11 + FM10K_REGS_LEN_VSI + (FM10K_MAX_QUEUES_POOL * FM10K_REGS_LEN_Q))
429
fm10k_get_regs_len(struct net_device * netdev)430 static int fm10k_get_regs_len(struct net_device *netdev)
431 {
432 struct fm10k_intfc *interface = netdev_priv(netdev);
433 struct fm10k_hw *hw = &interface->hw;
434
435 switch (hw->mac.type) {
436 case fm10k_mac_pf:
437 return FM10K_REGS_LEN_PF * sizeof(u32);
438 case fm10k_mac_vf:
439 return FM10K_REGS_LEN_VF * sizeof(u32);
440 default:
441 return 0;
442 }
443 }
444
fm10k_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)445 static void fm10k_get_drvinfo(struct net_device *dev,
446 struct ethtool_drvinfo *info)
447 {
448 struct fm10k_intfc *interface = netdev_priv(dev);
449
450 strncpy(info->driver, fm10k_driver_name,
451 sizeof(info->driver) - 1);
452 strncpy(info->bus_info, pci_name(interface->pdev),
453 sizeof(info->bus_info) - 1);
454 }
455
fm10k_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)456 static void fm10k_get_pauseparam(struct net_device *dev,
457 struct ethtool_pauseparam *pause)
458 {
459 struct fm10k_intfc *interface = netdev_priv(dev);
460
461 /* record fixed values for autoneg and tx pause */
462 pause->autoneg = 0;
463 pause->tx_pause = 1;
464
465 pause->rx_pause = interface->rx_pause ? 1 : 0;
466 }
467
fm10k_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pause)468 static int fm10k_set_pauseparam(struct net_device *dev,
469 struct ethtool_pauseparam *pause)
470 {
471 struct fm10k_intfc *interface = netdev_priv(dev);
472 struct fm10k_hw *hw = &interface->hw;
473
474 if (pause->autoneg || !pause->tx_pause)
475 return -EINVAL;
476
477 /* we can only support pause on the PF to avoid head-of-line blocking */
478 if (hw->mac.type == fm10k_mac_pf)
479 interface->rx_pause = pause->rx_pause ? ~0 : 0;
480 else if (pause->rx_pause)
481 return -EINVAL;
482
483 if (netif_running(dev))
484 fm10k_update_rx_drop_en(interface);
485
486 return 0;
487 }
488
fm10k_get_msglevel(struct net_device * netdev)489 static u32 fm10k_get_msglevel(struct net_device *netdev)
490 {
491 struct fm10k_intfc *interface = netdev_priv(netdev);
492
493 return interface->msg_enable;
494 }
495
fm10k_set_msglevel(struct net_device * netdev,u32 data)496 static void fm10k_set_msglevel(struct net_device *netdev, u32 data)
497 {
498 struct fm10k_intfc *interface = netdev_priv(netdev);
499
500 interface->msg_enable = data;
501 }
502
fm10k_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring)503 static void fm10k_get_ringparam(struct net_device *netdev,
504 struct ethtool_ringparam *ring)
505 {
506 struct fm10k_intfc *interface = netdev_priv(netdev);
507
508 ring->rx_max_pending = FM10K_MAX_RXD;
509 ring->tx_max_pending = FM10K_MAX_TXD;
510 ring->rx_mini_max_pending = 0;
511 ring->rx_jumbo_max_pending = 0;
512 ring->rx_pending = interface->rx_ring_count;
513 ring->tx_pending = interface->tx_ring_count;
514 ring->rx_mini_pending = 0;
515 ring->rx_jumbo_pending = 0;
516 }
517
fm10k_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring)518 static int fm10k_set_ringparam(struct net_device *netdev,
519 struct ethtool_ringparam *ring)
520 {
521 struct fm10k_intfc *interface = netdev_priv(netdev);
522 struct fm10k_ring *temp_ring;
523 int i, err = 0;
524 u32 new_rx_count, new_tx_count;
525
526 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
527 return -EINVAL;
528
529 new_tx_count = clamp_t(u32, ring->tx_pending,
530 FM10K_MIN_TXD, FM10K_MAX_TXD);
531 new_tx_count = ALIGN(new_tx_count, FM10K_REQ_TX_DESCRIPTOR_MULTIPLE);
532
533 new_rx_count = clamp_t(u32, ring->rx_pending,
534 FM10K_MIN_RXD, FM10K_MAX_RXD);
535 new_rx_count = ALIGN(new_rx_count, FM10K_REQ_RX_DESCRIPTOR_MULTIPLE);
536
537 if ((new_tx_count == interface->tx_ring_count) &&
538 (new_rx_count == interface->rx_ring_count)) {
539 /* nothing to do */
540 return 0;
541 }
542
543 while (test_and_set_bit(__FM10K_RESETTING, interface->state))
544 usleep_range(1000, 2000);
545
546 if (!netif_running(interface->netdev)) {
547 for (i = 0; i < interface->num_tx_queues; i++)
548 interface->tx_ring[i]->count = new_tx_count;
549 for (i = 0; i < interface->num_rx_queues; i++)
550 interface->rx_ring[i]->count = new_rx_count;
551 interface->tx_ring_count = new_tx_count;
552 interface->rx_ring_count = new_rx_count;
553 goto clear_reset;
554 }
555
556 /* allocate temporary buffer to store rings in */
557 i = max_t(int, interface->num_tx_queues, interface->num_rx_queues);
558 temp_ring = vmalloc(array_size(i, sizeof(struct fm10k_ring)));
559
560 if (!temp_ring) {
561 err = -ENOMEM;
562 goto clear_reset;
563 }
564
565 fm10k_down(interface);
566
567 /* Setup new Tx resources and free the old Tx resources in that order.
568 * We can then assign the new resources to the rings via a memcpy.
569 * The advantage to this approach is that we are guaranteed to still
570 * have resources even in the case of an allocation failure.
571 */
572 if (new_tx_count != interface->tx_ring_count) {
573 for (i = 0; i < interface->num_tx_queues; i++) {
574 memcpy(&temp_ring[i], interface->tx_ring[i],
575 sizeof(struct fm10k_ring));
576
577 temp_ring[i].count = new_tx_count;
578 err = fm10k_setup_tx_resources(&temp_ring[i]);
579 if (err) {
580 while (i) {
581 i--;
582 fm10k_free_tx_resources(&temp_ring[i]);
583 }
584 goto err_setup;
585 }
586 }
587
588 for (i = 0; i < interface->num_tx_queues; i++) {
589 fm10k_free_tx_resources(interface->tx_ring[i]);
590
591 memcpy(interface->tx_ring[i], &temp_ring[i],
592 sizeof(struct fm10k_ring));
593 }
594
595 interface->tx_ring_count = new_tx_count;
596 }
597
598 /* Repeat the process for the Rx rings if needed */
599 if (new_rx_count != interface->rx_ring_count) {
600 for (i = 0; i < interface->num_rx_queues; i++) {
601 memcpy(&temp_ring[i], interface->rx_ring[i],
602 sizeof(struct fm10k_ring));
603
604 temp_ring[i].count = new_rx_count;
605 err = fm10k_setup_rx_resources(&temp_ring[i]);
606 if (err) {
607 while (i) {
608 i--;
609 fm10k_free_rx_resources(&temp_ring[i]);
610 }
611 goto err_setup;
612 }
613 }
614
615 for (i = 0; i < interface->num_rx_queues; i++) {
616 fm10k_free_rx_resources(interface->rx_ring[i]);
617
618 memcpy(interface->rx_ring[i], &temp_ring[i],
619 sizeof(struct fm10k_ring));
620 }
621
622 interface->rx_ring_count = new_rx_count;
623 }
624
625 err_setup:
626 fm10k_up(interface);
627 vfree(temp_ring);
628 clear_reset:
629 clear_bit(__FM10K_RESETTING, interface->state);
630 return err;
631 }
632
fm10k_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec)633 static int fm10k_get_coalesce(struct net_device *dev,
634 struct ethtool_coalesce *ec)
635 {
636 struct fm10k_intfc *interface = netdev_priv(dev);
637
638 ec->use_adaptive_tx_coalesce = ITR_IS_ADAPTIVE(interface->tx_itr);
639 ec->tx_coalesce_usecs = interface->tx_itr & ~FM10K_ITR_ADAPTIVE;
640
641 ec->use_adaptive_rx_coalesce = ITR_IS_ADAPTIVE(interface->rx_itr);
642 ec->rx_coalesce_usecs = interface->rx_itr & ~FM10K_ITR_ADAPTIVE;
643
644 return 0;
645 }
646
fm10k_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec)647 static int fm10k_set_coalesce(struct net_device *dev,
648 struct ethtool_coalesce *ec)
649 {
650 struct fm10k_intfc *interface = netdev_priv(dev);
651 u16 tx_itr, rx_itr;
652 int i;
653
654 /* verify limits */
655 if ((ec->rx_coalesce_usecs > FM10K_ITR_MAX) ||
656 (ec->tx_coalesce_usecs > FM10K_ITR_MAX))
657 return -EINVAL;
658
659 /* record settings */
660 tx_itr = ec->tx_coalesce_usecs;
661 rx_itr = ec->rx_coalesce_usecs;
662
663 /* set initial values for adaptive ITR */
664 if (ec->use_adaptive_tx_coalesce)
665 tx_itr = FM10K_ITR_ADAPTIVE | FM10K_TX_ITR_DEFAULT;
666
667 if (ec->use_adaptive_rx_coalesce)
668 rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT;
669
670 /* update interface */
671 interface->tx_itr = tx_itr;
672 interface->rx_itr = rx_itr;
673
674 /* update q_vectors */
675 for (i = 0; i < interface->num_q_vectors; i++) {
676 struct fm10k_q_vector *qv = interface->q_vector[i];
677
678 qv->tx.itr = tx_itr;
679 qv->rx.itr = rx_itr;
680 }
681
682 return 0;
683 }
684
fm10k_get_rss_hash_opts(struct fm10k_intfc * interface,struct ethtool_rxnfc * cmd)685 static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface,
686 struct ethtool_rxnfc *cmd)
687 {
688 cmd->data = 0;
689
690 /* Report default options for RSS on fm10k */
691 switch (cmd->flow_type) {
692 case TCP_V4_FLOW:
693 case TCP_V6_FLOW:
694 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
695 fallthrough;
696 case UDP_V4_FLOW:
697 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
698 interface->flags))
699 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
700 fallthrough;
701 case SCTP_V4_FLOW:
702 case SCTP_V6_FLOW:
703 case AH_ESP_V4_FLOW:
704 case AH_ESP_V6_FLOW:
705 case AH_V4_FLOW:
706 case AH_V6_FLOW:
707 case ESP_V4_FLOW:
708 case ESP_V6_FLOW:
709 case IPV4_FLOW:
710 case IPV6_FLOW:
711 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
712 break;
713 case UDP_V6_FLOW:
714 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
715 interface->flags))
716 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
717 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
718 break;
719 default:
720 return -EINVAL;
721 }
722
723 return 0;
724 }
725
fm10k_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 __always_unused * rule_locs)726 static int fm10k_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
727 u32 __always_unused *rule_locs)
728 {
729 struct fm10k_intfc *interface = netdev_priv(dev);
730 int ret = -EOPNOTSUPP;
731
732 switch (cmd->cmd) {
733 case ETHTOOL_GRXRINGS:
734 cmd->data = interface->num_rx_queues;
735 ret = 0;
736 break;
737 case ETHTOOL_GRXFH:
738 ret = fm10k_get_rss_hash_opts(interface, cmd);
739 break;
740 default:
741 break;
742 }
743
744 return ret;
745 }
746
fm10k_set_rss_hash_opt(struct fm10k_intfc * interface,struct ethtool_rxnfc * nfc)747 static int fm10k_set_rss_hash_opt(struct fm10k_intfc *interface,
748 struct ethtool_rxnfc *nfc)
749 {
750 int rss_ipv4_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
751 interface->flags);
752 int rss_ipv6_udp = test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
753 interface->flags);
754
755 /* RSS does not support anything other than hashing
756 * to queues on src and dst IPs and ports
757 */
758 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
759 RXH_L4_B_0_1 | RXH_L4_B_2_3))
760 return -EINVAL;
761
762 switch (nfc->flow_type) {
763 case TCP_V4_FLOW:
764 case TCP_V6_FLOW:
765 if (!(nfc->data & RXH_IP_SRC) ||
766 !(nfc->data & RXH_IP_DST) ||
767 !(nfc->data & RXH_L4_B_0_1) ||
768 !(nfc->data & RXH_L4_B_2_3))
769 return -EINVAL;
770 break;
771 case UDP_V4_FLOW:
772 if (!(nfc->data & RXH_IP_SRC) ||
773 !(nfc->data & RXH_IP_DST))
774 return -EINVAL;
775 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
776 case 0:
777 clear_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
778 interface->flags);
779 break;
780 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
781 set_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
782 interface->flags);
783 break;
784 default:
785 return -EINVAL;
786 }
787 break;
788 case UDP_V6_FLOW:
789 if (!(nfc->data & RXH_IP_SRC) ||
790 !(nfc->data & RXH_IP_DST))
791 return -EINVAL;
792 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
793 case 0:
794 clear_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
795 interface->flags);
796 break;
797 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
798 set_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
799 interface->flags);
800 break;
801 default:
802 return -EINVAL;
803 }
804 break;
805 case AH_ESP_V4_FLOW:
806 case AH_V4_FLOW:
807 case ESP_V4_FLOW:
808 case SCTP_V4_FLOW:
809 case AH_ESP_V6_FLOW:
810 case AH_V6_FLOW:
811 case ESP_V6_FLOW:
812 case SCTP_V6_FLOW:
813 if (!(nfc->data & RXH_IP_SRC) ||
814 !(nfc->data & RXH_IP_DST) ||
815 (nfc->data & RXH_L4_B_0_1) ||
816 (nfc->data & RXH_L4_B_2_3))
817 return -EINVAL;
818 break;
819 default:
820 return -EINVAL;
821 }
822
823 /* If something changed we need to update the MRQC register. Note that
824 * test_bit() is guaranteed to return strictly 0 or 1, so testing for
825 * equality is safe.
826 */
827 if ((rss_ipv4_udp != test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
828 interface->flags)) ||
829 (rss_ipv6_udp != test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
830 interface->flags))) {
831 struct fm10k_hw *hw = &interface->hw;
832 bool warn = false;
833 u32 mrqc;
834
835 /* Perform hash on these packet types */
836 mrqc = FM10K_MRQC_IPV4 |
837 FM10K_MRQC_TCP_IPV4 |
838 FM10K_MRQC_IPV6 |
839 FM10K_MRQC_TCP_IPV6;
840
841 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
842 interface->flags)) {
843 mrqc |= FM10K_MRQC_UDP_IPV4;
844 warn = true;
845 }
846 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP,
847 interface->flags)) {
848 mrqc |= FM10K_MRQC_UDP_IPV6;
849 warn = true;
850 }
851
852 /* If we enable UDP RSS display a warning that this may cause
853 * fragmented UDP packets to arrive out of order.
854 */
855 if (warn)
856 netif_warn(interface, drv, interface->netdev,
857 "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
858
859 fm10k_write_reg(hw, FM10K_MRQC(0), mrqc);
860 }
861
862 return 0;
863 }
864
fm10k_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd)865 static int fm10k_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
866 {
867 struct fm10k_intfc *interface = netdev_priv(dev);
868 int ret = -EOPNOTSUPP;
869
870 switch (cmd->cmd) {
871 case ETHTOOL_SRXFH:
872 ret = fm10k_set_rss_hash_opt(interface, cmd);
873 break;
874 default:
875 break;
876 }
877
878 return ret;
879 }
880
fm10k_mbx_test(struct fm10k_intfc * interface,u64 * data)881 static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data)
882 {
883 struct fm10k_hw *hw = &interface->hw;
884 struct fm10k_mbx_info *mbx = &hw->mbx;
885 u32 attr_flag, test_msg[6];
886 unsigned long timeout;
887 int err = -EINVAL;
888
889 /* For now this is a VF only feature */
890 if (hw->mac.type != fm10k_mac_vf)
891 return 0;
892
893 /* loop through both nested and unnested attribute types */
894 for (attr_flag = BIT(FM10K_TEST_MSG_UNSET);
895 attr_flag < BIT(2 * FM10K_TEST_MSG_NESTED);
896 attr_flag += attr_flag) {
897 /* generate message to be tested */
898 fm10k_tlv_msg_test_create(test_msg, attr_flag);
899
900 fm10k_mbx_lock(interface);
901 mbx->test_result = FM10K_NOT_IMPLEMENTED;
902 err = mbx->ops.enqueue_tx(hw, mbx, test_msg);
903 fm10k_mbx_unlock(interface);
904
905 /* wait up to 1 second for response */
906 timeout = jiffies + HZ;
907 do {
908 if (err < 0)
909 goto err_out;
910
911 usleep_range(500, 1000);
912
913 fm10k_mbx_lock(interface);
914 mbx->ops.process(hw, mbx);
915 fm10k_mbx_unlock(interface);
916
917 err = mbx->test_result;
918 if (!err)
919 break;
920 } while (time_is_after_jiffies(timeout));
921
922 /* reporting errors */
923 if (err)
924 goto err_out;
925 }
926
927 err_out:
928 *data = err < 0 ? (attr_flag) : (err > 0);
929 return err;
930 }
931
fm10k_self_test(struct net_device * dev,struct ethtool_test * eth_test,u64 * data)932 static void fm10k_self_test(struct net_device *dev,
933 struct ethtool_test *eth_test, u64 *data)
934 {
935 struct fm10k_intfc *interface = netdev_priv(dev);
936 struct fm10k_hw *hw = &interface->hw;
937
938 memset(data, 0, sizeof(*data) * FM10K_TEST_LEN);
939
940 if (FM10K_REMOVED(hw->hw_addr)) {
941 netif_err(interface, drv, dev,
942 "Interface removed - test blocked\n");
943 eth_test->flags |= ETH_TEST_FL_FAILED;
944 return;
945 }
946
947 if (fm10k_mbx_test(interface, &data[FM10K_TEST_MBX]))
948 eth_test->flags |= ETH_TEST_FL_FAILED;
949 }
950
fm10k_get_priv_flags(struct net_device * netdev)951 static u32 fm10k_get_priv_flags(struct net_device *netdev)
952 {
953 return 0;
954 }
955
fm10k_set_priv_flags(struct net_device * netdev,u32 priv_flags)956 static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags)
957 {
958 if (priv_flags >= BIT(FM10K_PRV_FLAG_LEN))
959 return -EINVAL;
960
961 return 0;
962 }
963
fm10k_get_reta_size(struct net_device __always_unused * netdev)964 static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
965 {
966 return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG;
967 }
968
fm10k_write_reta(struct fm10k_intfc * interface,const u32 * indir)969 void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir)
970 {
971 u16 rss_i = interface->ring_feature[RING_F_RSS].indices;
972 struct fm10k_hw *hw = &interface->hw;
973 u32 table[4];
974 int i, j;
975
976 /* record entries to reta table */
977 for (i = 0; i < FM10K_RETA_SIZE; i++) {
978 u32 reta, n;
979
980 /* generate a new table if we weren't given one */
981 for (j = 0; j < 4; j++) {
982 if (indir)
983 n = indir[4 * i + j];
984 else
985 n = ethtool_rxfh_indir_default(4 * i + j,
986 rss_i);
987
988 table[j] = n;
989 }
990
991 reta = table[0] |
992 (table[1] << 8) |
993 (table[2] << 16) |
994 (table[3] << 24);
995
996 if (interface->reta[i] == reta)
997 continue;
998
999 interface->reta[i] = reta;
1000 fm10k_write_reg(hw, FM10K_RETA(0, i), reta);
1001 }
1002 }
1003
fm10k_get_reta(struct net_device * netdev,u32 * indir)1004 static int fm10k_get_reta(struct net_device *netdev, u32 *indir)
1005 {
1006 struct fm10k_intfc *interface = netdev_priv(netdev);
1007 int i;
1008
1009 if (!indir)
1010 return 0;
1011
1012 for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) {
1013 u32 reta = interface->reta[i];
1014
1015 indir[0] = (reta << 24) >> 24;
1016 indir[1] = (reta << 16) >> 24;
1017 indir[2] = (reta << 8) >> 24;
1018 indir[3] = (reta) >> 24;
1019 }
1020
1021 return 0;
1022 }
1023
fm10k_set_reta(struct net_device * netdev,const u32 * indir)1024 static int fm10k_set_reta(struct net_device *netdev, const u32 *indir)
1025 {
1026 struct fm10k_intfc *interface = netdev_priv(netdev);
1027 int i;
1028 u16 rss_i;
1029
1030 if (!indir)
1031 return 0;
1032
1033 /* Verify user input. */
1034 rss_i = interface->ring_feature[RING_F_RSS].indices;
1035 for (i = fm10k_get_reta_size(netdev); i--;) {
1036 if (indir[i] < rss_i)
1037 continue;
1038 return -EINVAL;
1039 }
1040
1041 fm10k_write_reta(interface, indir);
1042
1043 return 0;
1044 }
1045
fm10k_get_rssrk_size(struct net_device __always_unused * netdev)1046 static u32 fm10k_get_rssrk_size(struct net_device __always_unused *netdev)
1047 {
1048 return FM10K_RSSRK_SIZE * FM10K_RSSRK_ENTRIES_PER_REG;
1049 }
1050
fm10k_get_rssh(struct net_device * netdev,u32 * indir,u8 * key,u8 * hfunc)1051 static int fm10k_get_rssh(struct net_device *netdev, u32 *indir, u8 *key,
1052 u8 *hfunc)
1053 {
1054 struct fm10k_intfc *interface = netdev_priv(netdev);
1055 int i, err;
1056
1057 if (hfunc)
1058 *hfunc = ETH_RSS_HASH_TOP;
1059
1060 err = fm10k_get_reta(netdev, indir);
1061 if (err || !key)
1062 return err;
1063
1064 for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4)
1065 *(__le32 *)key = cpu_to_le32(interface->rssrk[i]);
1066
1067 return 0;
1068 }
1069
fm10k_set_rssh(struct net_device * netdev,const u32 * indir,const u8 * key,const u8 hfunc)1070 static int fm10k_set_rssh(struct net_device *netdev, const u32 *indir,
1071 const u8 *key, const u8 hfunc)
1072 {
1073 struct fm10k_intfc *interface = netdev_priv(netdev);
1074 struct fm10k_hw *hw = &interface->hw;
1075 int i, err;
1076
1077 /* We do not allow change in unsupported parameters */
1078 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1079 return -EOPNOTSUPP;
1080
1081 err = fm10k_set_reta(netdev, indir);
1082 if (err || !key)
1083 return err;
1084
1085 for (i = 0; i < FM10K_RSSRK_SIZE; i++, key += 4) {
1086 u32 rssrk = le32_to_cpu(*(__le32 *)key);
1087
1088 if (interface->rssrk[i] == rssrk)
1089 continue;
1090
1091 interface->rssrk[i] = rssrk;
1092 fm10k_write_reg(hw, FM10K_RSSRK(0, i), rssrk);
1093 }
1094
1095 return 0;
1096 }
1097
fm10k_max_channels(struct net_device * dev)1098 static unsigned int fm10k_max_channels(struct net_device *dev)
1099 {
1100 struct fm10k_intfc *interface = netdev_priv(dev);
1101 unsigned int max_combined = interface->hw.mac.max_queues;
1102 u8 tcs = netdev_get_num_tc(dev);
1103
1104 /* For QoS report channels per traffic class */
1105 if (tcs > 1)
1106 max_combined = BIT((fls(max_combined / tcs) - 1));
1107
1108 return max_combined;
1109 }
1110
fm10k_get_channels(struct net_device * dev,struct ethtool_channels * ch)1111 static void fm10k_get_channels(struct net_device *dev,
1112 struct ethtool_channels *ch)
1113 {
1114 struct fm10k_intfc *interface = netdev_priv(dev);
1115
1116 /* report maximum channels */
1117 ch->max_combined = fm10k_max_channels(dev);
1118
1119 /* report info for other vector */
1120 ch->max_other = NON_Q_VECTORS;
1121 ch->other_count = ch->max_other;
1122
1123 /* record RSS queues */
1124 ch->combined_count = interface->ring_feature[RING_F_RSS].indices;
1125 }
1126
fm10k_set_channels(struct net_device * dev,struct ethtool_channels * ch)1127 static int fm10k_set_channels(struct net_device *dev,
1128 struct ethtool_channels *ch)
1129 {
1130 struct fm10k_intfc *interface = netdev_priv(dev);
1131 unsigned int count = ch->combined_count;
1132
1133 /* verify they are not requesting separate vectors */
1134 if (!count || ch->rx_count || ch->tx_count)
1135 return -EINVAL;
1136
1137 /* verify other_count has not changed */
1138 if (ch->other_count != NON_Q_VECTORS)
1139 return -EINVAL;
1140
1141 /* verify the number of channels does not exceed hardware limits */
1142 if (count > fm10k_max_channels(dev))
1143 return -EINVAL;
1144
1145 interface->ring_feature[RING_F_RSS].limit = count;
1146
1147 /* use setup TC to update any traffic class queue mapping */
1148 return fm10k_setup_tc(dev, netdev_get_num_tc(dev));
1149 }
1150
1151 static const struct ethtool_ops fm10k_ethtool_ops = {
1152 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1153 ETHTOOL_COALESCE_USE_ADAPTIVE,
1154 .get_strings = fm10k_get_strings,
1155 .get_sset_count = fm10k_get_sset_count,
1156 .get_ethtool_stats = fm10k_get_ethtool_stats,
1157 .get_drvinfo = fm10k_get_drvinfo,
1158 .get_link = ethtool_op_get_link,
1159 .get_pauseparam = fm10k_get_pauseparam,
1160 .set_pauseparam = fm10k_set_pauseparam,
1161 .get_msglevel = fm10k_get_msglevel,
1162 .set_msglevel = fm10k_set_msglevel,
1163 .get_ringparam = fm10k_get_ringparam,
1164 .set_ringparam = fm10k_set_ringparam,
1165 .get_coalesce = fm10k_get_coalesce,
1166 .set_coalesce = fm10k_set_coalesce,
1167 .get_rxnfc = fm10k_get_rxnfc,
1168 .set_rxnfc = fm10k_set_rxnfc,
1169 .get_regs = fm10k_get_regs,
1170 .get_regs_len = fm10k_get_regs_len,
1171 .self_test = fm10k_self_test,
1172 .get_priv_flags = fm10k_get_priv_flags,
1173 .set_priv_flags = fm10k_set_priv_flags,
1174 .get_rxfh_indir_size = fm10k_get_reta_size,
1175 .get_rxfh_key_size = fm10k_get_rssrk_size,
1176 .get_rxfh = fm10k_get_rssh,
1177 .set_rxfh = fm10k_set_rssh,
1178 .get_channels = fm10k_get_channels,
1179 .set_channels = fm10k_set_channels,
1180 .get_ts_info = ethtool_op_get_ts_info,
1181 };
1182
fm10k_set_ethtool_ops(struct net_device * dev)1183 void fm10k_set_ethtool_ops(struct net_device *dev)
1184 {
1185 dev->ethtool_ops = &fm10k_ethtool_ops;
1186 }
1187