1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2005 - 2016 Broadcom
4 * All rights reserved.
5 *
6 * Contact Information:
7 * linux-drivers@emulex.com
8 *
9 * Emulex
10 * 3333 Susan Street
11 * Costa Mesa, CA 92626
12 */
13
14 #include "be.h"
15 #include "be_cmds.h"
16 #include <linux/ethtool.h>
17
18 struct be_ethtool_stat {
19 char desc[ETH_GSTRING_LEN];
20 int type;
21 int size;
22 int offset;
23 };
24
25 enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
26 #define FIELDINFO(_struct, field) sizeof_field(_struct, field), \
27 offsetof(_struct, field)
28 #define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
29 FIELDINFO(struct be_tx_stats, field)
30 #define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
31 FIELDINFO(struct be_rx_stats, field)
32 #define DRVSTAT_INFO(field) #field, DRVSTAT,\
33 FIELDINFO(struct be_drv_stats, field)
34
35 static const struct be_ethtool_stat et_stats[] = {
36 {DRVSTAT_INFO(rx_crc_errors)},
37 {DRVSTAT_INFO(rx_alignment_symbol_errors)},
38 {DRVSTAT_INFO(rx_pause_frames)},
39 {DRVSTAT_INFO(rx_control_frames)},
40 /* Received packets dropped when the Ethernet length field
41 * is not equal to the actual Ethernet data length.
42 */
43 {DRVSTAT_INFO(rx_in_range_errors)},
44 /* Received packets dropped when their length field is >= 1501 bytes
45 * and <= 1535 bytes.
46 */
47 {DRVSTAT_INFO(rx_out_range_errors)},
48 /* Received packets dropped when they are longer than 9216 bytes */
49 {DRVSTAT_INFO(rx_frame_too_long)},
50 /* Received packets dropped when they don't pass the unicast or
51 * multicast address filtering.
52 */
53 {DRVSTAT_INFO(rx_address_filtered)},
54 /* Received packets dropped when IP packet length field is less than
55 * the IP header length field.
56 */
57 {DRVSTAT_INFO(rx_dropped_too_small)},
58 /* Received packets dropped when IP length field is greater than
59 * the actual packet length.
60 */
61 {DRVSTAT_INFO(rx_dropped_too_short)},
62 /* Received packets dropped when the IP header length field is less
63 * than 5.
64 */
65 {DRVSTAT_INFO(rx_dropped_header_too_small)},
66 /* Received packets dropped when the TCP header length field is less
67 * than 5 or the TCP header length + IP header length is more
68 * than IP packet length.
69 */
70 {DRVSTAT_INFO(rx_dropped_tcp_length)},
71 {DRVSTAT_INFO(rx_dropped_runt)},
72 /* Number of received packets dropped when a fifo for descriptors going
73 * into the packet demux block overflows. In normal operation, this
74 * fifo must never overflow.
75 */
76 {DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
77 /* Received packets dropped when the RX block runs out of space in
78 * one of its input FIFOs. This could happen due a long burst of
79 * minimum-sized (64b) frames in the receive path.
80 * This counter may also be erroneously incremented rarely.
81 */
82 {DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
83 {DRVSTAT_INFO(rx_ip_checksum_errs)},
84 {DRVSTAT_INFO(rx_tcp_checksum_errs)},
85 {DRVSTAT_INFO(rx_udp_checksum_errs)},
86 {DRVSTAT_INFO(tx_pauseframes)},
87 {DRVSTAT_INFO(tx_controlframes)},
88 {DRVSTAT_INFO(rx_priority_pause_frames)},
89 {DRVSTAT_INFO(tx_priority_pauseframes)},
90 /* Received packets dropped when an internal fifo going into
91 * main packet buffer tank (PMEM) overflows.
92 */
93 {DRVSTAT_INFO(pmem_fifo_overflow_drop)},
94 {DRVSTAT_INFO(jabber_events)},
95 /* Received packets dropped due to lack of available HW packet buffers
96 * used to temporarily hold the received packets.
97 */
98 {DRVSTAT_INFO(rx_drops_no_pbuf)},
99 /* Received packets dropped due to input receive buffer
100 * descriptor fifo overflowing.
101 */
102 {DRVSTAT_INFO(rx_drops_no_erx_descr)},
103 /* Packets dropped because the internal FIFO to the offloaded TCP
104 * receive processing block is full. This could happen only for
105 * offloaded iSCSI or FCoE trarffic.
106 */
107 {DRVSTAT_INFO(rx_drops_no_tpre_descr)},
108 /* Received packets dropped when they need more than 8
109 * receive buffers. This cannot happen as the driver configures
110 * 2048 byte receive buffers.
111 */
112 {DRVSTAT_INFO(rx_drops_too_many_frags)},
113 {DRVSTAT_INFO(forwarded_packets)},
114 /* Received packets dropped when the frame length
115 * is more than 9018 bytes
116 */
117 {DRVSTAT_INFO(rx_drops_mtu)},
118 /* Number of dma mapping errors */
119 {DRVSTAT_INFO(dma_map_errors)},
120 /* Number of packets dropped due to random early drop function */
121 {DRVSTAT_INFO(eth_red_drops)},
122 {DRVSTAT_INFO(rx_roce_bytes_lsd)},
123 {DRVSTAT_INFO(rx_roce_bytes_msd)},
124 {DRVSTAT_INFO(rx_roce_frames)},
125 {DRVSTAT_INFO(roce_drops_payload_len)},
126 {DRVSTAT_INFO(roce_drops_crc)}
127 };
128
129 #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
130
131 /* Stats related to multi RX queues: get_stats routine assumes bytes, pkts
132 * are first and second members respectively.
133 */
134 static const struct be_ethtool_stat et_rx_stats[] = {
135 {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
136 {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
137 {DRVSTAT_RX_INFO(rx_vxlan_offload_pkts)},
138 {DRVSTAT_RX_INFO(rx_compl)},
139 {DRVSTAT_RX_INFO(rx_compl_err)},
140 {DRVSTAT_RX_INFO(rx_mcast_pkts)},
141 /* Number of page allocation failures while posting receive buffers
142 * to HW.
143 */
144 {DRVSTAT_RX_INFO(rx_post_fail)},
145 /* Recevied packets dropped due to skb allocation failure */
146 {DRVSTAT_RX_INFO(rx_drops_no_skbs)},
147 /* Received packets dropped due to lack of available fetched buffers
148 * posted by the driver.
149 */
150 {DRVSTAT_RX_INFO(rx_drops_no_frags)}
151 };
152
153 #define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
154
155 /* Stats related to multi TX queues: get_stats routine assumes compl is the
156 * first member
157 */
158 static const struct be_ethtool_stat et_tx_stats[] = {
159 {DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */
160 /* This counter is incremented when the HW encounters an error while
161 * parsing the packet header of an outgoing TX request. This counter is
162 * applicable only for BE2, BE3 and Skyhawk based adapters.
163 */
164 {DRVSTAT_TX_INFO(tx_hdr_parse_err)},
165 /* This counter is incremented when an error occurs in the DMA
166 * operation associated with the TX request from the host to the device.
167 */
168 {DRVSTAT_TX_INFO(tx_dma_err)},
169 /* This counter is incremented when MAC or VLAN spoof checking is
170 * enabled on the interface and the TX request fails the spoof check
171 * in HW.
172 */
173 {DRVSTAT_TX_INFO(tx_spoof_check_err)},
174 /* This counter is incremented when the HW encounters an error while
175 * performing TSO offload. This counter is applicable only for Lancer
176 * adapters.
177 */
178 {DRVSTAT_TX_INFO(tx_tso_err)},
179 /* This counter is incremented when the HW detects Q-in-Q style VLAN
180 * tagging in a packet and such tagging is not expected on the outgoing
181 * interface. This counter is applicable only for Lancer adapters.
182 */
183 {DRVSTAT_TX_INFO(tx_qinq_err)},
184 /* This counter is incremented when the HW detects parity errors in the
185 * packet data. This counter is applicable only for Lancer adapters.
186 */
187 {DRVSTAT_TX_INFO(tx_internal_parity_err)},
188 {DRVSTAT_TX_INFO(tx_sge_err)},
189 {DRVSTAT_TX_INFO(tx_bytes)},
190 {DRVSTAT_TX_INFO(tx_pkts)},
191 {DRVSTAT_TX_INFO(tx_vxlan_offload_pkts)},
192 /* Number of skbs queued for trasmission by the driver */
193 {DRVSTAT_TX_INFO(tx_reqs)},
194 /* Number of times the TX queue was stopped due to lack
195 * of spaces in the TXQ.
196 */
197 {DRVSTAT_TX_INFO(tx_stops)},
198 /* Pkts dropped in the driver's transmit path */
199 {DRVSTAT_TX_INFO(tx_drv_drops)}
200 };
201
202 #define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
203
204 static const char et_self_tests[][ETH_GSTRING_LEN] = {
205 "MAC Loopback test",
206 "PHY Loopback test",
207 "External Loopback test",
208 "DDR DMA test",
209 "Link test"
210 };
211
212 #define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
213 #define BE_MAC_LOOPBACK 0x0
214 #define BE_PHY_LOOPBACK 0x1
215 #define BE_ONE_PORT_EXT_LOOPBACK 0x2
216 #define BE_NO_LOOPBACK 0xff
217
be_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)218 static void be_get_drvinfo(struct net_device *netdev,
219 struct ethtool_drvinfo *drvinfo)
220 {
221 struct be_adapter *adapter = netdev_priv(netdev);
222
223 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
224 if (!memcmp(adapter->fw_ver, adapter->fw_on_flash, FW_VER_LEN))
225 strlcpy(drvinfo->fw_version, adapter->fw_ver,
226 sizeof(drvinfo->fw_version));
227 else
228 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
229 "%s [%s]", adapter->fw_ver, adapter->fw_on_flash);
230
231 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
232 sizeof(drvinfo->bus_info));
233 }
234
lancer_cmd_get_file_len(struct be_adapter * adapter,u8 * file_name)235 static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
236 {
237 u32 data_read = 0, eof;
238 u8 addn_status;
239 struct be_dma_mem data_len_cmd;
240
241 memset(&data_len_cmd, 0, sizeof(data_len_cmd));
242 /* data_offset and data_size should be 0 to get reg len */
243 lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0, file_name,
244 &data_read, &eof, &addn_status);
245
246 return data_read;
247 }
248
be_get_dump_len(struct be_adapter * adapter)249 static int be_get_dump_len(struct be_adapter *adapter)
250 {
251 u32 dump_size = 0;
252
253 if (lancer_chip(adapter))
254 dump_size = lancer_cmd_get_file_len(adapter,
255 LANCER_FW_DUMP_FILE);
256 else
257 dump_size = adapter->fat_dump_len;
258
259 return dump_size;
260 }
261
lancer_cmd_read_file(struct be_adapter * adapter,u8 * file_name,u32 buf_len,void * buf)262 static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
263 u32 buf_len, void *buf)
264 {
265 struct be_dma_mem read_cmd;
266 u32 read_len = 0, total_read_len = 0, chunk_size;
267 u32 eof = 0;
268 u8 addn_status;
269 int status = 0;
270
271 read_cmd.size = LANCER_READ_FILE_CHUNK;
272 read_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, read_cmd.size,
273 &read_cmd.dma, GFP_ATOMIC);
274
275 if (!read_cmd.va) {
276 dev_err(&adapter->pdev->dev,
277 "Memory allocation failure while reading dump\n");
278 return -ENOMEM;
279 }
280
281 while ((total_read_len < buf_len) && !eof) {
282 chunk_size = min_t(u32, (buf_len - total_read_len),
283 LANCER_READ_FILE_CHUNK);
284 chunk_size = ALIGN(chunk_size, 4);
285 status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
286 total_read_len, file_name,
287 &read_len, &eof, &addn_status);
288 if (!status) {
289 memcpy(buf + total_read_len, read_cmd.va, read_len);
290 total_read_len += read_len;
291 eof &= LANCER_READ_FILE_EOF_MASK;
292 } else {
293 status = -EIO;
294 break;
295 }
296 }
297 dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va,
298 read_cmd.dma);
299
300 return status;
301 }
302
be_read_dump_data(struct be_adapter * adapter,u32 dump_len,void * buf)303 static int be_read_dump_data(struct be_adapter *adapter, u32 dump_len,
304 void *buf)
305 {
306 int status = 0;
307
308 if (lancer_chip(adapter))
309 status = lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
310 dump_len, buf);
311 else
312 status = be_cmd_get_fat_dump(adapter, dump_len, buf);
313
314 return status;
315 }
316
be_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * et)317 static int be_get_coalesce(struct net_device *netdev,
318 struct ethtool_coalesce *et)
319 {
320 struct be_adapter *adapter = netdev_priv(netdev);
321 struct be_aic_obj *aic = &adapter->aic_obj[0];
322
323 et->rx_coalesce_usecs = aic->prev_eqd;
324 et->rx_coalesce_usecs_high = aic->max_eqd;
325 et->rx_coalesce_usecs_low = aic->min_eqd;
326
327 et->tx_coalesce_usecs = aic->prev_eqd;
328 et->tx_coalesce_usecs_high = aic->max_eqd;
329 et->tx_coalesce_usecs_low = aic->min_eqd;
330
331 et->use_adaptive_rx_coalesce = adapter->aic_enabled;
332 et->use_adaptive_tx_coalesce = adapter->aic_enabled;
333
334 return 0;
335 }
336
337 /* TX attributes are ignored. Only RX attributes are considered
338 * eqd cmd is issued in the worker thread.
339 */
be_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * et)340 static int be_set_coalesce(struct net_device *netdev,
341 struct ethtool_coalesce *et)
342 {
343 struct be_adapter *adapter = netdev_priv(netdev);
344 struct be_aic_obj *aic = &adapter->aic_obj[0];
345 struct be_eq_obj *eqo;
346 int i;
347
348 adapter->aic_enabled = et->use_adaptive_rx_coalesce;
349
350 for_all_evt_queues(adapter, eqo, i) {
351 aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
352 aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd);
353 aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd);
354 aic->et_eqd = max(aic->et_eqd, aic->min_eqd);
355 aic++;
356 }
357
358 /* For Skyhawk, the EQD setting happens via EQ_DB when AIC is enabled.
359 * When AIC is disabled, persistently force set EQD value via the
360 * FW cmd, so that we don't have to calculate the delay multiplier
361 * encode value each time EQ_DB is rung
362 */
363 if (!et->use_adaptive_rx_coalesce && skyhawk_chip(adapter))
364 be_eqd_update(adapter, true);
365
366 return 0;
367 }
368
be_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,uint64_t * data)369 static void be_get_ethtool_stats(struct net_device *netdev,
370 struct ethtool_stats *stats, uint64_t *data)
371 {
372 struct be_adapter *adapter = netdev_priv(netdev);
373 struct be_rx_obj *rxo;
374 struct be_tx_obj *txo;
375 void *p;
376 unsigned int i, j, base = 0, start;
377
378 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
379 p = (u8 *)&adapter->drv_stats + et_stats[i].offset;
380 data[i] = *(u32 *)p;
381 }
382 base += ETHTOOL_STATS_NUM;
383
384 for_all_rx_queues(adapter, rxo, j) {
385 struct be_rx_stats *stats = rx_stats(rxo);
386
387 do {
388 start = u64_stats_fetch_begin_irq(&stats->sync);
389 data[base] = stats->rx_bytes;
390 data[base + 1] = stats->rx_pkts;
391 } while (u64_stats_fetch_retry_irq(&stats->sync, start));
392
393 for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
394 p = (u8 *)stats + et_rx_stats[i].offset;
395 data[base + i] = *(u32 *)p;
396 }
397 base += ETHTOOL_RXSTATS_NUM;
398 }
399
400 for_all_tx_queues(adapter, txo, j) {
401 struct be_tx_stats *stats = tx_stats(txo);
402
403 do {
404 start = u64_stats_fetch_begin_irq(&stats->sync_compl);
405 data[base] = stats->tx_compl;
406 } while (u64_stats_fetch_retry_irq(&stats->sync_compl, start));
407
408 do {
409 start = u64_stats_fetch_begin_irq(&stats->sync);
410 for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
411 p = (u8 *)stats + et_tx_stats[i].offset;
412 data[base + i] =
413 (et_tx_stats[i].size == sizeof(u64)) ?
414 *(u64 *)p : *(u32 *)p;
415 }
416 } while (u64_stats_fetch_retry_irq(&stats->sync, start));
417 base += ETHTOOL_TXSTATS_NUM;
418 }
419 }
420
421 static const char be_priv_flags[][ETH_GSTRING_LEN] = {
422 "disable-tpe-recovery"
423 };
424
be_get_stat_strings(struct net_device * netdev,uint32_t stringset,uint8_t * data)425 static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
426 uint8_t *data)
427 {
428 struct be_adapter *adapter = netdev_priv(netdev);
429 int i, j;
430
431 switch (stringset) {
432 case ETH_SS_STATS:
433 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
434 memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
435 data += ETH_GSTRING_LEN;
436 }
437 for (i = 0; i < adapter->num_rx_qs; i++) {
438 for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
439 sprintf(data, "rxq%d: %s", i,
440 et_rx_stats[j].desc);
441 data += ETH_GSTRING_LEN;
442 }
443 }
444 for (i = 0; i < adapter->num_tx_qs; i++) {
445 for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
446 sprintf(data, "txq%d: %s", i,
447 et_tx_stats[j].desc);
448 data += ETH_GSTRING_LEN;
449 }
450 }
451 break;
452 case ETH_SS_TEST:
453 for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
454 memcpy(data, et_self_tests[i], ETH_GSTRING_LEN);
455 data += ETH_GSTRING_LEN;
456 }
457 break;
458 case ETH_SS_PRIV_FLAGS:
459 for (i = 0; i < ARRAY_SIZE(be_priv_flags); i++)
460 strcpy(data + i * ETH_GSTRING_LEN, be_priv_flags[i]);
461 break;
462 }
463 }
464
be_get_sset_count(struct net_device * netdev,int stringset)465 static int be_get_sset_count(struct net_device *netdev, int stringset)
466 {
467 struct be_adapter *adapter = netdev_priv(netdev);
468
469 switch (stringset) {
470 case ETH_SS_TEST:
471 return ETHTOOL_TESTS_NUM;
472 case ETH_SS_STATS:
473 return ETHTOOL_STATS_NUM +
474 adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
475 adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
476 case ETH_SS_PRIV_FLAGS:
477 return ARRAY_SIZE(be_priv_flags);
478 default:
479 return -EINVAL;
480 }
481 }
482
be_get_port_type(struct be_adapter * adapter)483 static u32 be_get_port_type(struct be_adapter *adapter)
484 {
485 u32 port;
486
487 switch (adapter->phy.interface_type) {
488 case PHY_TYPE_BASET_1GB:
489 case PHY_TYPE_BASEX_1GB:
490 case PHY_TYPE_SGMII:
491 port = PORT_TP;
492 break;
493 case PHY_TYPE_SFP_PLUS_10GB:
494 if (adapter->phy.cable_type & SFP_PLUS_COPPER_CABLE)
495 port = PORT_DA;
496 else
497 port = PORT_FIBRE;
498 break;
499 case PHY_TYPE_QSFP:
500 if (adapter->phy.cable_type & QSFP_PLUS_CR4_CABLE)
501 port = PORT_DA;
502 else
503 port = PORT_FIBRE;
504 break;
505 case PHY_TYPE_XFP_10GB:
506 case PHY_TYPE_SFP_1GB:
507 port = PORT_FIBRE;
508 break;
509 case PHY_TYPE_BASET_10GB:
510 port = PORT_TP;
511 break;
512 default:
513 port = PORT_OTHER;
514 }
515
516 return port;
517 }
518
convert_to_et_setting(struct be_adapter * adapter,u32 if_speeds)519 static u32 convert_to_et_setting(struct be_adapter *adapter, u32 if_speeds)
520 {
521 u32 val = 0;
522
523 switch (adapter->phy.interface_type) {
524 case PHY_TYPE_BASET_1GB:
525 case PHY_TYPE_BASEX_1GB:
526 case PHY_TYPE_SGMII:
527 val |= SUPPORTED_TP;
528 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
529 val |= SUPPORTED_1000baseT_Full;
530 if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
531 val |= SUPPORTED_100baseT_Full;
532 if (if_speeds & BE_SUPPORTED_SPEED_10MBPS)
533 val |= SUPPORTED_10baseT_Full;
534 break;
535 case PHY_TYPE_KX4_10GB:
536 val |= SUPPORTED_Backplane;
537 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
538 val |= SUPPORTED_1000baseKX_Full;
539 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
540 val |= SUPPORTED_10000baseKX4_Full;
541 break;
542 case PHY_TYPE_KR2_20GB:
543 val |= SUPPORTED_Backplane;
544 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
545 val |= SUPPORTED_10000baseKR_Full;
546 if (if_speeds & BE_SUPPORTED_SPEED_20GBPS)
547 val |= SUPPORTED_20000baseKR2_Full;
548 break;
549 case PHY_TYPE_KR_10GB:
550 val |= SUPPORTED_Backplane |
551 SUPPORTED_10000baseKR_Full;
552 break;
553 case PHY_TYPE_KR4_40GB:
554 val |= SUPPORTED_Backplane;
555 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
556 val |= SUPPORTED_10000baseKR_Full;
557 if (if_speeds & BE_SUPPORTED_SPEED_40GBPS)
558 val |= SUPPORTED_40000baseKR4_Full;
559 break;
560 case PHY_TYPE_QSFP:
561 if (if_speeds & BE_SUPPORTED_SPEED_40GBPS) {
562 switch (adapter->phy.cable_type) {
563 case QSFP_PLUS_CR4_CABLE:
564 val |= SUPPORTED_40000baseCR4_Full;
565 break;
566 case QSFP_PLUS_LR4_CABLE:
567 val |= SUPPORTED_40000baseLR4_Full;
568 break;
569 default:
570 val |= SUPPORTED_40000baseSR4_Full;
571 break;
572 }
573 }
574 fallthrough;
575 case PHY_TYPE_SFP_PLUS_10GB:
576 case PHY_TYPE_XFP_10GB:
577 case PHY_TYPE_SFP_1GB:
578 val |= SUPPORTED_FIBRE;
579 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
580 val |= SUPPORTED_10000baseT_Full;
581 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
582 val |= SUPPORTED_1000baseT_Full;
583 break;
584 case PHY_TYPE_BASET_10GB:
585 val |= SUPPORTED_TP;
586 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
587 val |= SUPPORTED_10000baseT_Full;
588 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
589 val |= SUPPORTED_1000baseT_Full;
590 if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
591 val |= SUPPORTED_100baseT_Full;
592 break;
593 default:
594 val |= SUPPORTED_TP;
595 }
596
597 return val;
598 }
599
be_pause_supported(struct be_adapter * adapter)600 bool be_pause_supported(struct be_adapter *adapter)
601 {
602 return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB ||
603 adapter->phy.interface_type == PHY_TYPE_XFP_10GB) ?
604 false : true;
605 }
606
be_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)607 static int be_get_link_ksettings(struct net_device *netdev,
608 struct ethtool_link_ksettings *cmd)
609 {
610 struct be_adapter *adapter = netdev_priv(netdev);
611 u8 link_status;
612 u16 link_speed = 0;
613 int status;
614 u32 auto_speeds;
615 u32 fixed_speeds;
616 u32 supported = 0, advertising = 0;
617
618 if (adapter->phy.link_speed < 0) {
619 status = be_cmd_link_status_query(adapter, &link_speed,
620 &link_status, 0);
621 if (!status)
622 be_link_status_update(adapter, link_status);
623 cmd->base.speed = link_speed;
624
625 status = be_cmd_get_phy_info(adapter);
626 if (!status) {
627 auto_speeds = adapter->phy.auto_speeds_supported;
628 fixed_speeds = adapter->phy.fixed_speeds_supported;
629
630 be_cmd_query_cable_type(adapter);
631
632 supported =
633 convert_to_et_setting(adapter,
634 auto_speeds |
635 fixed_speeds);
636 advertising =
637 convert_to_et_setting(adapter, auto_speeds);
638
639 cmd->base.port = be_get_port_type(adapter);
640
641 if (adapter->phy.auto_speeds_supported) {
642 supported |= SUPPORTED_Autoneg;
643 cmd->base.autoneg = AUTONEG_ENABLE;
644 advertising |= ADVERTISED_Autoneg;
645 }
646
647 supported |= SUPPORTED_Pause;
648 if (be_pause_supported(adapter))
649 advertising |= ADVERTISED_Pause;
650 } else {
651 cmd->base.port = PORT_OTHER;
652 cmd->base.autoneg = AUTONEG_DISABLE;
653 }
654
655 /* Save for future use */
656 adapter->phy.link_speed = cmd->base.speed;
657 adapter->phy.port_type = cmd->base.port;
658 adapter->phy.autoneg = cmd->base.autoneg;
659 adapter->phy.advertising = advertising;
660 adapter->phy.supported = supported;
661 } else {
662 cmd->base.speed = adapter->phy.link_speed;
663 cmd->base.port = adapter->phy.port_type;
664 cmd->base.autoneg = adapter->phy.autoneg;
665 advertising = adapter->phy.advertising;
666 supported = adapter->phy.supported;
667 }
668
669 cmd->base.duplex = netif_carrier_ok(netdev) ?
670 DUPLEX_FULL : DUPLEX_UNKNOWN;
671 cmd->base.phy_address = adapter->port_num;
672
673 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
674 supported);
675 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
676 advertising);
677
678 return 0;
679 }
680
be_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring)681 static void be_get_ringparam(struct net_device *netdev,
682 struct ethtool_ringparam *ring)
683 {
684 struct be_adapter *adapter = netdev_priv(netdev);
685
686 ring->rx_max_pending = adapter->rx_obj[0].q.len;
687 ring->rx_pending = adapter->rx_obj[0].q.len;
688 ring->tx_max_pending = adapter->tx_obj[0].q.len;
689 ring->tx_pending = adapter->tx_obj[0].q.len;
690 }
691
692 static void
be_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * ecmd)693 be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
694 {
695 struct be_adapter *adapter = netdev_priv(netdev);
696
697 be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
698 ecmd->autoneg = adapter->phy.fc_autoneg;
699 }
700
701 static int
be_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * ecmd)702 be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
703 {
704 struct be_adapter *adapter = netdev_priv(netdev);
705 int status;
706
707 if (ecmd->autoneg != adapter->phy.fc_autoneg)
708 return -EINVAL;
709
710 status = be_cmd_set_flow_control(adapter, ecmd->tx_pause,
711 ecmd->rx_pause);
712 if (status) {
713 dev_warn(&adapter->pdev->dev, "Pause param set failed\n");
714 return be_cmd_status(status);
715 }
716
717 adapter->tx_fc = ecmd->tx_pause;
718 adapter->rx_fc = ecmd->rx_pause;
719 return 0;
720 }
721
be_set_phys_id(struct net_device * netdev,enum ethtool_phys_id_state state)722 static int be_set_phys_id(struct net_device *netdev,
723 enum ethtool_phys_id_state state)
724 {
725 struct be_adapter *adapter = netdev_priv(netdev);
726 int status = 0;
727
728 switch (state) {
729 case ETHTOOL_ID_ACTIVE:
730 status = be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
731 &adapter->beacon_state);
732 if (status)
733 return be_cmd_status(status);
734 return 1; /* cycle on/off once per second */
735
736 case ETHTOOL_ID_ON:
737 status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num,
738 0, 0, BEACON_STATE_ENABLED);
739 break;
740
741 case ETHTOOL_ID_OFF:
742 status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num,
743 0, 0, BEACON_STATE_DISABLED);
744 break;
745
746 case ETHTOOL_ID_INACTIVE:
747 status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num,
748 0, 0, adapter->beacon_state);
749 }
750
751 return be_cmd_status(status);
752 }
753
be_set_dump(struct net_device * netdev,struct ethtool_dump * dump)754 static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
755 {
756 struct be_adapter *adapter = netdev_priv(netdev);
757 struct device *dev = &adapter->pdev->dev;
758 int status;
759
760 if (!lancer_chip(adapter) ||
761 !check_privilege(adapter, MAX_PRIVILEGES))
762 return -EOPNOTSUPP;
763
764 switch (dump->flag) {
765 case LANCER_INITIATE_FW_DUMP:
766 status = lancer_initiate_dump(adapter);
767 if (!status)
768 dev_info(dev, "FW dump initiated successfully\n");
769 break;
770 case LANCER_DELETE_FW_DUMP:
771 status = lancer_delete_dump(adapter);
772 if (!status)
773 dev_info(dev, "FW dump deleted successfully\n");
774 break;
775 default:
776 dev_err(dev, "Invalid dump level: 0x%x\n", dump->flag);
777 return -EINVAL;
778 }
779 return status;
780 }
781
be_get_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)782 static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
783 {
784 struct be_adapter *adapter = netdev_priv(netdev);
785
786 if (adapter->wol_cap & BE_WOL_CAP) {
787 wol->supported |= WAKE_MAGIC;
788 if (adapter->wol_en)
789 wol->wolopts |= WAKE_MAGIC;
790 } else {
791 wol->wolopts = 0;
792 }
793 memset(&wol->sopass, 0, sizeof(wol->sopass));
794 }
795
be_set_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)796 static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
797 {
798 struct be_adapter *adapter = netdev_priv(netdev);
799 struct device *dev = &adapter->pdev->dev;
800 struct be_dma_mem cmd;
801 u8 mac[ETH_ALEN];
802 bool enable;
803 int status;
804
805 if (wol->wolopts & ~WAKE_MAGIC)
806 return -EOPNOTSUPP;
807
808 if (!(adapter->wol_cap & BE_WOL_CAP)) {
809 dev_warn(&adapter->pdev->dev, "WOL not supported\n");
810 return -EOPNOTSUPP;
811 }
812
813 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
814 cmd.va = dma_alloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
815 if (!cmd.va)
816 return -ENOMEM;
817
818 eth_zero_addr(mac);
819
820 enable = wol->wolopts & WAKE_MAGIC;
821 if (enable)
822 ether_addr_copy(mac, adapter->netdev->dev_addr);
823
824 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
825 if (status) {
826 dev_err(dev, "Could not set Wake-on-lan mac address\n");
827 status = be_cmd_status(status);
828 goto err;
829 }
830
831 pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
832 pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
833
834 adapter->wol_en = enable ? true : false;
835
836 err:
837 dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
838 return status;
839 }
840
be_test_ddr_dma(struct be_adapter * adapter)841 static int be_test_ddr_dma(struct be_adapter *adapter)
842 {
843 int ret, i;
844 struct be_dma_mem ddrdma_cmd;
845 static const u64 pattern[2] = {
846 0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL
847 };
848
849 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
850 ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
851 ddrdma_cmd.size, &ddrdma_cmd.dma,
852 GFP_KERNEL);
853 if (!ddrdma_cmd.va)
854 return -ENOMEM;
855
856 for (i = 0; i < 2; i++) {
857 ret = be_cmd_ddr_dma_test(adapter, pattern[i],
858 4096, &ddrdma_cmd);
859 if (ret != 0)
860 goto err;
861 }
862
863 err:
864 dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
865 ddrdma_cmd.dma);
866 return be_cmd_status(ret);
867 }
868
be_loopback_test(struct be_adapter * adapter,u8 loopback_type,u64 * status)869 static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
870 u64 *status)
871 {
872 int ret;
873
874 ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
875 loopback_type, 1);
876 if (ret)
877 return ret;
878
879 *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
880 loopback_type, 1500, 2, 0xabc);
881
882 ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
883 BE_NO_LOOPBACK, 1);
884 if (ret)
885 return ret;
886
887 return *status;
888 }
889
be_self_test(struct net_device * netdev,struct ethtool_test * test,u64 * data)890 static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
891 u64 *data)
892 {
893 struct be_adapter *adapter = netdev_priv(netdev);
894 int status, cnt;
895 u8 link_status = 0;
896
897 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
898 dev_err(&adapter->pdev->dev, "Self test not supported\n");
899 test->flags |= ETH_TEST_FL_FAILED;
900 return;
901 }
902
903 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
904
905 /* check link status before offline tests */
906 link_status = netif_carrier_ok(netdev);
907
908 if (test->flags & ETH_TEST_FL_OFFLINE) {
909 if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
910 test->flags |= ETH_TEST_FL_FAILED;
911
912 if (be_loopback_test(adapter, BE_PHY_LOOPBACK, &data[1]) != 0)
913 test->flags |= ETH_TEST_FL_FAILED;
914
915 if (test->flags & ETH_TEST_FL_EXTERNAL_LB) {
916 if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
917 &data[2]) != 0)
918 test->flags |= ETH_TEST_FL_FAILED;
919 test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
920 }
921 }
922
923 if (!lancer_chip(adapter) && be_test_ddr_dma(adapter) != 0) {
924 data[3] = 1;
925 test->flags |= ETH_TEST_FL_FAILED;
926 }
927
928 /* link status was down prior to test */
929 if (!link_status) {
930 test->flags |= ETH_TEST_FL_FAILED;
931 data[4] = 1;
932 return;
933 }
934
935 for (cnt = 10; cnt; cnt--) {
936 status = be_cmd_link_status_query(adapter, NULL, &link_status,
937 0);
938 if (status) {
939 test->flags |= ETH_TEST_FL_FAILED;
940 data[4] = -1;
941 break;
942 }
943
944 if (link_status)
945 break;
946
947 msleep_interruptible(500);
948 }
949 }
950
be_do_flash(struct net_device * netdev,struct ethtool_flash * efl)951 static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
952 {
953 struct be_adapter *adapter = netdev_priv(netdev);
954
955 return be_load_fw(adapter, efl->data);
956 }
957
958 static int
be_get_dump_flag(struct net_device * netdev,struct ethtool_dump * dump)959 be_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
960 {
961 struct be_adapter *adapter = netdev_priv(netdev);
962
963 if (!check_privilege(adapter, MAX_PRIVILEGES))
964 return -EOPNOTSUPP;
965
966 dump->len = be_get_dump_len(adapter);
967 dump->version = 1;
968 dump->flag = 0x1; /* FW dump is enabled */
969 return 0;
970 }
971
972 static int
be_get_dump_data(struct net_device * netdev,struct ethtool_dump * dump,void * buf)973 be_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
974 void *buf)
975 {
976 struct be_adapter *adapter = netdev_priv(netdev);
977 int status;
978
979 if (!check_privilege(adapter, MAX_PRIVILEGES))
980 return -EOPNOTSUPP;
981
982 status = be_read_dump_data(adapter, dump->len, buf);
983 return be_cmd_status(status);
984 }
985
be_get_eeprom_len(struct net_device * netdev)986 static int be_get_eeprom_len(struct net_device *netdev)
987 {
988 struct be_adapter *adapter = netdev_priv(netdev);
989
990 if (!check_privilege(adapter, MAX_PRIVILEGES))
991 return 0;
992
993 if (lancer_chip(adapter)) {
994 if (be_physfn(adapter))
995 return lancer_cmd_get_file_len(adapter,
996 LANCER_VPD_PF_FILE);
997 else
998 return lancer_cmd_get_file_len(adapter,
999 LANCER_VPD_VF_FILE);
1000 } else {
1001 return BE_READ_SEEPROM_LEN;
1002 }
1003 }
1004
be_read_eeprom(struct net_device * netdev,struct ethtool_eeprom * eeprom,uint8_t * data)1005 static int be_read_eeprom(struct net_device *netdev,
1006 struct ethtool_eeprom *eeprom, uint8_t *data)
1007 {
1008 struct be_adapter *adapter = netdev_priv(netdev);
1009 struct be_dma_mem eeprom_cmd;
1010 struct be_cmd_resp_seeprom_read *resp;
1011 int status;
1012
1013 if (!eeprom->len)
1014 return -EINVAL;
1015
1016 if (lancer_chip(adapter)) {
1017 if (be_physfn(adapter))
1018 return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
1019 eeprom->len, data);
1020 else
1021 return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
1022 eeprom->len, data);
1023 }
1024
1025 eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
1026
1027 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
1028 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
1029 eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
1030 eeprom_cmd.size, &eeprom_cmd.dma,
1031 GFP_KERNEL);
1032
1033 if (!eeprom_cmd.va)
1034 return -ENOMEM;
1035
1036 status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
1037
1038 if (!status) {
1039 resp = eeprom_cmd.va;
1040 memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
1041 }
1042 dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
1043 eeprom_cmd.dma);
1044
1045 return be_cmd_status(status);
1046 }
1047
be_get_msg_level(struct net_device * netdev)1048 static u32 be_get_msg_level(struct net_device *netdev)
1049 {
1050 struct be_adapter *adapter = netdev_priv(netdev);
1051
1052 return adapter->msg_enable;
1053 }
1054
be_set_msg_level(struct net_device * netdev,u32 level)1055 static void be_set_msg_level(struct net_device *netdev, u32 level)
1056 {
1057 struct be_adapter *adapter = netdev_priv(netdev);
1058
1059 if (adapter->msg_enable == level)
1060 return;
1061
1062 if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW))
1063 if (BEx_chip(adapter))
1064 be_cmd_set_fw_log_level(adapter, level & NETIF_MSG_HW ?
1065 FW_LOG_LEVEL_DEFAULT :
1066 FW_LOG_LEVEL_FATAL);
1067 adapter->msg_enable = level;
1068 }
1069
be_get_rss_hash_opts(struct be_adapter * adapter,u64 flow_type)1070 static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
1071 {
1072 u64 data = 0;
1073
1074 switch (flow_type) {
1075 case TCP_V4_FLOW:
1076 if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
1077 data |= RXH_IP_DST | RXH_IP_SRC;
1078 if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV4)
1079 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1080 break;
1081 case UDP_V4_FLOW:
1082 if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
1083 data |= RXH_IP_DST | RXH_IP_SRC;
1084 if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV4)
1085 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1086 break;
1087 case TCP_V6_FLOW:
1088 if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
1089 data |= RXH_IP_DST | RXH_IP_SRC;
1090 if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV6)
1091 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1092 break;
1093 case UDP_V6_FLOW:
1094 if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
1095 data |= RXH_IP_DST | RXH_IP_SRC;
1096 if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV6)
1097 data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
1098 break;
1099 }
1100
1101 return data;
1102 }
1103
be_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 * rule_locs)1104 static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1105 u32 *rule_locs)
1106 {
1107 struct be_adapter *adapter = netdev_priv(netdev);
1108
1109 if (!be_multi_rxq(adapter)) {
1110 dev_info(&adapter->pdev->dev,
1111 "ethtool::get_rxnfc: RX flow hashing is disabled\n");
1112 return -EINVAL;
1113 }
1114
1115 switch (cmd->cmd) {
1116 case ETHTOOL_GRXFH:
1117 cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
1118 break;
1119 case ETHTOOL_GRXRINGS:
1120 cmd->data = adapter->num_rx_qs;
1121 break;
1122 default:
1123 return -EINVAL;
1124 }
1125
1126 return 0;
1127 }
1128
be_set_rss_hash_opts(struct be_adapter * adapter,struct ethtool_rxnfc * cmd)1129 static int be_set_rss_hash_opts(struct be_adapter *adapter,
1130 struct ethtool_rxnfc *cmd)
1131 {
1132 int status;
1133 u32 rss_flags = adapter->rss_info.rss_flags;
1134
1135 if (cmd->data != L3_RSS_FLAGS &&
1136 cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
1137 return -EINVAL;
1138
1139 switch (cmd->flow_type) {
1140 case TCP_V4_FLOW:
1141 if (cmd->data == L3_RSS_FLAGS)
1142 rss_flags &= ~RSS_ENABLE_TCP_IPV4;
1143 else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1144 rss_flags |= RSS_ENABLE_IPV4 |
1145 RSS_ENABLE_TCP_IPV4;
1146 break;
1147 case TCP_V6_FLOW:
1148 if (cmd->data == L3_RSS_FLAGS)
1149 rss_flags &= ~RSS_ENABLE_TCP_IPV6;
1150 else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1151 rss_flags |= RSS_ENABLE_IPV6 |
1152 RSS_ENABLE_TCP_IPV6;
1153 break;
1154 case UDP_V4_FLOW:
1155 if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
1156 BEx_chip(adapter))
1157 return -EINVAL;
1158
1159 if (cmd->data == L3_RSS_FLAGS)
1160 rss_flags &= ~RSS_ENABLE_UDP_IPV4;
1161 else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1162 rss_flags |= RSS_ENABLE_IPV4 |
1163 RSS_ENABLE_UDP_IPV4;
1164 break;
1165 case UDP_V6_FLOW:
1166 if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) &&
1167 BEx_chip(adapter))
1168 return -EINVAL;
1169
1170 if (cmd->data == L3_RSS_FLAGS)
1171 rss_flags &= ~RSS_ENABLE_UDP_IPV6;
1172 else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS))
1173 rss_flags |= RSS_ENABLE_IPV6 |
1174 RSS_ENABLE_UDP_IPV6;
1175 break;
1176 default:
1177 return -EINVAL;
1178 }
1179
1180 if (rss_flags == adapter->rss_info.rss_flags)
1181 return 0;
1182
1183 status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable,
1184 rss_flags, RSS_INDIR_TABLE_LEN,
1185 adapter->rss_info.rss_hkey);
1186 if (!status)
1187 adapter->rss_info.rss_flags = rss_flags;
1188
1189 return be_cmd_status(status);
1190 }
1191
be_set_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd)1192 static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1193 {
1194 struct be_adapter *adapter = netdev_priv(netdev);
1195 int status = 0;
1196
1197 if (!be_multi_rxq(adapter)) {
1198 dev_err(&adapter->pdev->dev,
1199 "ethtool::set_rxnfc: RX flow hashing is disabled\n");
1200 return -EINVAL;
1201 }
1202
1203 switch (cmd->cmd) {
1204 case ETHTOOL_SRXFH:
1205 status = be_set_rss_hash_opts(adapter, cmd);
1206 break;
1207 default:
1208 return -EINVAL;
1209 }
1210
1211 return status;
1212 }
1213
be_get_channels(struct net_device * netdev,struct ethtool_channels * ch)1214 static void be_get_channels(struct net_device *netdev,
1215 struct ethtool_channels *ch)
1216 {
1217 struct be_adapter *adapter = netdev_priv(netdev);
1218 u16 num_rx_irqs = max_t(u16, adapter->num_rss_qs, 1);
1219
1220 /* num_tx_qs is always same as the number of irqs used for TX */
1221 ch->combined_count = min(adapter->num_tx_qs, num_rx_irqs);
1222 ch->rx_count = num_rx_irqs - ch->combined_count;
1223 ch->tx_count = adapter->num_tx_qs - ch->combined_count;
1224
1225 ch->max_combined = be_max_qp_irqs(adapter);
1226 /* The user must create atleast one combined channel */
1227 ch->max_rx = be_max_rx_irqs(adapter) - 1;
1228 ch->max_tx = be_max_tx_irqs(adapter) - 1;
1229 }
1230
be_set_channels(struct net_device * netdev,struct ethtool_channels * ch)1231 static int be_set_channels(struct net_device *netdev,
1232 struct ethtool_channels *ch)
1233 {
1234 struct be_adapter *adapter = netdev_priv(netdev);
1235 int status;
1236
1237 /* we support either only combined channels or a combination of
1238 * combined and either RX-only or TX-only channels.
1239 */
1240 if (ch->other_count || !ch->combined_count ||
1241 (ch->rx_count && ch->tx_count))
1242 return -EINVAL;
1243
1244 if (ch->combined_count > be_max_qp_irqs(adapter) ||
1245 (ch->rx_count &&
1246 (ch->rx_count + ch->combined_count) > be_max_rx_irqs(adapter)) ||
1247 (ch->tx_count &&
1248 (ch->tx_count + ch->combined_count) > be_max_tx_irqs(adapter)))
1249 return -EINVAL;
1250
1251 adapter->cfg_num_rx_irqs = ch->combined_count + ch->rx_count;
1252 adapter->cfg_num_tx_irqs = ch->combined_count + ch->tx_count;
1253
1254 status = be_update_queues(adapter);
1255 return be_cmd_status(status);
1256 }
1257
be_get_rxfh_indir_size(struct net_device * netdev)1258 static u32 be_get_rxfh_indir_size(struct net_device *netdev)
1259 {
1260 return RSS_INDIR_TABLE_LEN;
1261 }
1262
be_get_rxfh_key_size(struct net_device * netdev)1263 static u32 be_get_rxfh_key_size(struct net_device *netdev)
1264 {
1265 return RSS_HASH_KEY_LEN;
1266 }
1267
be_get_rxfh(struct net_device * netdev,u32 * indir,u8 * hkey,u8 * hfunc)1268 static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
1269 u8 *hfunc)
1270 {
1271 struct be_adapter *adapter = netdev_priv(netdev);
1272 int i;
1273 struct rss_info *rss = &adapter->rss_info;
1274
1275 if (indir) {
1276 for (i = 0; i < RSS_INDIR_TABLE_LEN; i++)
1277 indir[i] = rss->rss_queue[i];
1278 }
1279
1280 if (hkey)
1281 memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN);
1282
1283 if (hfunc)
1284 *hfunc = ETH_RSS_HASH_TOP;
1285
1286 return 0;
1287 }
1288
be_set_rxfh(struct net_device * netdev,const u32 * indir,const u8 * hkey,const u8 hfunc)1289 static int be_set_rxfh(struct net_device *netdev, const u32 *indir,
1290 const u8 *hkey, const u8 hfunc)
1291 {
1292 int rc = 0, i, j;
1293 struct be_adapter *adapter = netdev_priv(netdev);
1294 u8 rsstable[RSS_INDIR_TABLE_LEN];
1295
1296 /* We do not allow change in unsupported parameters */
1297 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1298 return -EOPNOTSUPP;
1299
1300 if (indir) {
1301 struct be_rx_obj *rxo;
1302
1303 for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) {
1304 j = indir[i];
1305 rxo = &adapter->rx_obj[j];
1306 rsstable[i] = rxo->rss_id;
1307 adapter->rss_info.rss_queue[i] = j;
1308 }
1309 } else {
1310 memcpy(rsstable, adapter->rss_info.rsstable,
1311 RSS_INDIR_TABLE_LEN);
1312 }
1313
1314 if (!hkey)
1315 hkey = adapter->rss_info.rss_hkey;
1316
1317 rc = be_cmd_rss_config(adapter, rsstable,
1318 adapter->rss_info.rss_flags,
1319 RSS_INDIR_TABLE_LEN, hkey);
1320 if (rc) {
1321 adapter->rss_info.rss_flags = RSS_ENABLE_NONE;
1322 return -EIO;
1323 }
1324 memcpy(adapter->rss_info.rss_hkey, hkey, RSS_HASH_KEY_LEN);
1325 memcpy(adapter->rss_info.rsstable, rsstable,
1326 RSS_INDIR_TABLE_LEN);
1327 return 0;
1328 }
1329
be_get_module_info(struct net_device * netdev,struct ethtool_modinfo * modinfo)1330 static int be_get_module_info(struct net_device *netdev,
1331 struct ethtool_modinfo *modinfo)
1332 {
1333 struct be_adapter *adapter = netdev_priv(netdev);
1334 u8 page_data[PAGE_DATA_LEN];
1335 int status;
1336
1337 if (!check_privilege(adapter, MAX_PRIVILEGES))
1338 return -EOPNOTSUPP;
1339
1340 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
1341 0, PAGE_DATA_LEN, page_data);
1342 if (!status) {
1343 if (!page_data[SFP_PLUS_SFF_8472_COMP]) {
1344 modinfo->type = ETH_MODULE_SFF_8079;
1345 modinfo->eeprom_len = PAGE_DATA_LEN;
1346 } else {
1347 modinfo->type = ETH_MODULE_SFF_8472;
1348 modinfo->eeprom_len = 2 * PAGE_DATA_LEN;
1349 }
1350 }
1351 return be_cmd_status(status);
1352 }
1353
be_get_module_eeprom(struct net_device * netdev,struct ethtool_eeprom * eeprom,u8 * data)1354 static int be_get_module_eeprom(struct net_device *netdev,
1355 struct ethtool_eeprom *eeprom, u8 *data)
1356 {
1357 struct be_adapter *adapter = netdev_priv(netdev);
1358 int status;
1359 u32 begin, end;
1360
1361 if (!check_privilege(adapter, MAX_PRIVILEGES))
1362 return -EOPNOTSUPP;
1363
1364 begin = eeprom->offset;
1365 end = eeprom->offset + eeprom->len;
1366
1367 if (begin < PAGE_DATA_LEN) {
1368 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, begin,
1369 min_t(u32, end, PAGE_DATA_LEN) - begin,
1370 data);
1371 if (status)
1372 goto err;
1373
1374 data += PAGE_DATA_LEN - begin;
1375 begin = PAGE_DATA_LEN;
1376 }
1377
1378 if (end > PAGE_DATA_LEN) {
1379 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A2,
1380 begin - PAGE_DATA_LEN,
1381 end - begin, data);
1382 if (status)
1383 goto err;
1384 }
1385 err:
1386 return be_cmd_status(status);
1387 }
1388
be_get_priv_flags(struct net_device * netdev)1389 static u32 be_get_priv_flags(struct net_device *netdev)
1390 {
1391 struct be_adapter *adapter = netdev_priv(netdev);
1392
1393 return adapter->priv_flags;
1394 }
1395
be_set_priv_flags(struct net_device * netdev,u32 flags)1396 static int be_set_priv_flags(struct net_device *netdev, u32 flags)
1397 {
1398 struct be_adapter *adapter = netdev_priv(netdev);
1399 bool tpe_old = !!(adapter->priv_flags & BE_DISABLE_TPE_RECOVERY);
1400 bool tpe_new = !!(flags & BE_DISABLE_TPE_RECOVERY);
1401
1402 if (tpe_old != tpe_new) {
1403 if (tpe_new) {
1404 adapter->priv_flags |= BE_DISABLE_TPE_RECOVERY;
1405 dev_info(&adapter->pdev->dev,
1406 "HW error recovery is disabled\n");
1407 } else {
1408 adapter->priv_flags &= ~BE_DISABLE_TPE_RECOVERY;
1409 dev_info(&adapter->pdev->dev,
1410 "HW error recovery is enabled\n");
1411 }
1412 }
1413
1414 return 0;
1415 }
1416
1417 const struct ethtool_ops be_ethtool_ops = {
1418 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1419 ETHTOOL_COALESCE_USE_ADAPTIVE |
1420 ETHTOOL_COALESCE_USECS_LOW_HIGH,
1421 .get_drvinfo = be_get_drvinfo,
1422 .get_wol = be_get_wol,
1423 .set_wol = be_set_wol,
1424 .get_link = ethtool_op_get_link,
1425 .get_eeprom_len = be_get_eeprom_len,
1426 .get_eeprom = be_read_eeprom,
1427 .get_coalesce = be_get_coalesce,
1428 .set_coalesce = be_set_coalesce,
1429 .get_ringparam = be_get_ringparam,
1430 .get_pauseparam = be_get_pauseparam,
1431 .set_pauseparam = be_set_pauseparam,
1432 .set_priv_flags = be_set_priv_flags,
1433 .get_priv_flags = be_get_priv_flags,
1434 .get_strings = be_get_stat_strings,
1435 .set_phys_id = be_set_phys_id,
1436 .set_dump = be_set_dump,
1437 .get_msglevel = be_get_msg_level,
1438 .set_msglevel = be_set_msg_level,
1439 .get_sset_count = be_get_sset_count,
1440 .get_ethtool_stats = be_get_ethtool_stats,
1441 .flash_device = be_do_flash,
1442 .self_test = be_self_test,
1443 .get_rxnfc = be_get_rxnfc,
1444 .set_rxnfc = be_set_rxnfc,
1445 .get_rxfh_indir_size = be_get_rxfh_indir_size,
1446 .get_rxfh_key_size = be_get_rxfh_key_size,
1447 .get_rxfh = be_get_rxfh,
1448 .set_rxfh = be_set_rxfh,
1449 .get_dump_flag = be_get_dump_flag,
1450 .get_dump_data = be_get_dump_data,
1451 .get_channels = be_get_channels,
1452 .set_channels = be_set_channels,
1453 .get_module_info = be_get_module_info,
1454 .get_module_eeprom = be_get_module_eeprom,
1455 .get_link_ksettings = be_get_link_ksettings,
1456 };
1457