1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2017 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/bitops.h>
12 #include <linux/ctype.h>
13 #include <linux/stringify.h>
14 #include <linux/ethtool.h>
15 #include <linux/ethtool_netlink.h>
16 #include <linux/linkmode.h>
17 #include <linux/interrupt.h>
18 #include <linux/pci.h>
19 #include <linux/etherdevice.h>
20 #include <linux/crc32.h>
21 #include <linux/firmware.h>
22 #include <linux/utsname.h>
23 #include <linux/time.h>
24 #include <linux/ptp_clock_kernel.h>
25 #include <linux/net_tstamp.h>
26 #include <linux/timecounter.h>
27 #include <net/netlink.h>
28 #include "bnxt_hsi.h"
29 #include "bnxt.h"
30 #include "bnxt_hwrm.h"
31 #include "bnxt_ulp.h"
32 #include "bnxt_xdp.h"
33 #include "bnxt_ptp.h"
34 #include "bnxt_ethtool.h"
35 #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
36 #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
37 #include "bnxt_coredump.h"
38
39 #define BNXT_NVM_ERR_MSG(dev, extack, msg) \
40 do { \
41 if (extack) \
42 NL_SET_ERR_MSG_MOD(extack, msg); \
43 netdev_err(dev, "%s\n", msg); \
44 } while (0)
45
bnxt_get_msglevel(struct net_device * dev)46 static u32 bnxt_get_msglevel(struct net_device *dev)
47 {
48 struct bnxt *bp = netdev_priv(dev);
49
50 return bp->msg_enable;
51 }
52
bnxt_set_msglevel(struct net_device * dev,u32 value)53 static void bnxt_set_msglevel(struct net_device *dev, u32 value)
54 {
55 struct bnxt *bp = netdev_priv(dev);
56
57 bp->msg_enable = value;
58 }
59
bnxt_get_coalesce(struct net_device * dev,struct ethtool_coalesce * coal,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)60 static int bnxt_get_coalesce(struct net_device *dev,
61 struct ethtool_coalesce *coal,
62 struct kernel_ethtool_coalesce *kernel_coal,
63 struct netlink_ext_ack *extack)
64 {
65 struct bnxt *bp = netdev_priv(dev);
66 struct bnxt_coal *hw_coal;
67 u16 mult;
68
69 memset(coal, 0, sizeof(*coal));
70
71 coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
72
73 hw_coal = &bp->rx_coal;
74 mult = hw_coal->bufs_per_record;
75 coal->rx_coalesce_usecs = hw_coal->coal_ticks;
76 coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
77 coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
78 coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
79 if (hw_coal->flags &
80 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
81 kernel_coal->use_cqe_mode_rx = true;
82
83 hw_coal = &bp->tx_coal;
84 mult = hw_coal->bufs_per_record;
85 coal->tx_coalesce_usecs = hw_coal->coal_ticks;
86 coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
87 coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
88 coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
89 if (hw_coal->flags &
90 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
91 kernel_coal->use_cqe_mode_tx = true;
92
93 coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
94
95 return 0;
96 }
97
bnxt_set_coalesce(struct net_device * dev,struct ethtool_coalesce * coal,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)98 static int bnxt_set_coalesce(struct net_device *dev,
99 struct ethtool_coalesce *coal,
100 struct kernel_ethtool_coalesce *kernel_coal,
101 struct netlink_ext_ack *extack)
102 {
103 struct bnxt *bp = netdev_priv(dev);
104 bool update_stats = false;
105 struct bnxt_coal *hw_coal;
106 int rc = 0;
107 u16 mult;
108
109 if (coal->use_adaptive_rx_coalesce) {
110 bp->flags |= BNXT_FLAG_DIM;
111 } else {
112 if (bp->flags & BNXT_FLAG_DIM) {
113 bp->flags &= ~(BNXT_FLAG_DIM);
114 goto reset_coalesce;
115 }
116 }
117
118 if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
119 !(bp->coal_cap.cmpl_params &
120 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET))
121 return -EOPNOTSUPP;
122
123 hw_coal = &bp->rx_coal;
124 mult = hw_coal->bufs_per_record;
125 hw_coal->coal_ticks = coal->rx_coalesce_usecs;
126 hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
127 hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
128 hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
129 hw_coal->flags &=
130 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
131 if (kernel_coal->use_cqe_mode_rx)
132 hw_coal->flags |=
133 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
134
135 hw_coal = &bp->tx_coal;
136 mult = hw_coal->bufs_per_record;
137 hw_coal->coal_ticks = coal->tx_coalesce_usecs;
138 hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
139 hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
140 hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
141 hw_coal->flags &=
142 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
143 if (kernel_coal->use_cqe_mode_tx)
144 hw_coal->flags |=
145 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
146
147 if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
148 u32 stats_ticks = coal->stats_block_coalesce_usecs;
149
150 /* Allow 0, which means disable. */
151 if (stats_ticks)
152 stats_ticks = clamp_t(u32, stats_ticks,
153 BNXT_MIN_STATS_COAL_TICKS,
154 BNXT_MAX_STATS_COAL_TICKS);
155 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
156 bp->stats_coal_ticks = stats_ticks;
157 if (bp->stats_coal_ticks)
158 bp->current_interval =
159 bp->stats_coal_ticks * HZ / 1000000;
160 else
161 bp->current_interval = BNXT_TIMER_INTERVAL;
162 update_stats = true;
163 }
164
165 reset_coalesce:
166 if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
167 if (update_stats) {
168 bnxt_close_nic(bp, true, false);
169 rc = bnxt_open_nic(bp, true, false);
170 } else {
171 rc = bnxt_hwrm_set_coal(bp);
172 }
173 }
174
175 return rc;
176 }
177
178 static const char * const bnxt_ring_rx_stats_str[] = {
179 "rx_ucast_packets",
180 "rx_mcast_packets",
181 "rx_bcast_packets",
182 "rx_discards",
183 "rx_errors",
184 "rx_ucast_bytes",
185 "rx_mcast_bytes",
186 "rx_bcast_bytes",
187 };
188
189 static const char * const bnxt_ring_tx_stats_str[] = {
190 "tx_ucast_packets",
191 "tx_mcast_packets",
192 "tx_bcast_packets",
193 "tx_errors",
194 "tx_discards",
195 "tx_ucast_bytes",
196 "tx_mcast_bytes",
197 "tx_bcast_bytes",
198 };
199
200 static const char * const bnxt_ring_tpa_stats_str[] = {
201 "tpa_packets",
202 "tpa_bytes",
203 "tpa_events",
204 "tpa_aborts",
205 };
206
207 static const char * const bnxt_ring_tpa2_stats_str[] = {
208 "rx_tpa_eligible_pkt",
209 "rx_tpa_eligible_bytes",
210 "rx_tpa_pkt",
211 "rx_tpa_bytes",
212 "rx_tpa_errors",
213 "rx_tpa_events",
214 };
215
216 static const char * const bnxt_rx_sw_stats_str[] = {
217 "rx_l4_csum_errors",
218 "rx_resets",
219 "rx_buf_errors",
220 };
221
222 static const char * const bnxt_cmn_sw_stats_str[] = {
223 "missed_irqs",
224 };
225
226 #define BNXT_RX_STATS_ENTRY(counter) \
227 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
228
229 #define BNXT_TX_STATS_ENTRY(counter) \
230 { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
231
232 #define BNXT_RX_STATS_EXT_ENTRY(counter) \
233 { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
234
235 #define BNXT_TX_STATS_EXT_ENTRY(counter) \
236 { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) }
237
238 #define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \
239 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \
240 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions)
241
242 #define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \
243 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \
244 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions)
245
246 #define BNXT_RX_STATS_EXT_PFC_ENTRIES \
247 BNXT_RX_STATS_EXT_PFC_ENTRY(0), \
248 BNXT_RX_STATS_EXT_PFC_ENTRY(1), \
249 BNXT_RX_STATS_EXT_PFC_ENTRY(2), \
250 BNXT_RX_STATS_EXT_PFC_ENTRY(3), \
251 BNXT_RX_STATS_EXT_PFC_ENTRY(4), \
252 BNXT_RX_STATS_EXT_PFC_ENTRY(5), \
253 BNXT_RX_STATS_EXT_PFC_ENTRY(6), \
254 BNXT_RX_STATS_EXT_PFC_ENTRY(7)
255
256 #define BNXT_TX_STATS_EXT_PFC_ENTRIES \
257 BNXT_TX_STATS_EXT_PFC_ENTRY(0), \
258 BNXT_TX_STATS_EXT_PFC_ENTRY(1), \
259 BNXT_TX_STATS_EXT_PFC_ENTRY(2), \
260 BNXT_TX_STATS_EXT_PFC_ENTRY(3), \
261 BNXT_TX_STATS_EXT_PFC_ENTRY(4), \
262 BNXT_TX_STATS_EXT_PFC_ENTRY(5), \
263 BNXT_TX_STATS_EXT_PFC_ENTRY(6), \
264 BNXT_TX_STATS_EXT_PFC_ENTRY(7)
265
266 #define BNXT_RX_STATS_EXT_COS_ENTRY(n) \
267 BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \
268 BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n)
269
270 #define BNXT_TX_STATS_EXT_COS_ENTRY(n) \
271 BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \
272 BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n)
273
274 #define BNXT_RX_STATS_EXT_COS_ENTRIES \
275 BNXT_RX_STATS_EXT_COS_ENTRY(0), \
276 BNXT_RX_STATS_EXT_COS_ENTRY(1), \
277 BNXT_RX_STATS_EXT_COS_ENTRY(2), \
278 BNXT_RX_STATS_EXT_COS_ENTRY(3), \
279 BNXT_RX_STATS_EXT_COS_ENTRY(4), \
280 BNXT_RX_STATS_EXT_COS_ENTRY(5), \
281 BNXT_RX_STATS_EXT_COS_ENTRY(6), \
282 BNXT_RX_STATS_EXT_COS_ENTRY(7) \
283
284 #define BNXT_TX_STATS_EXT_COS_ENTRIES \
285 BNXT_TX_STATS_EXT_COS_ENTRY(0), \
286 BNXT_TX_STATS_EXT_COS_ENTRY(1), \
287 BNXT_TX_STATS_EXT_COS_ENTRY(2), \
288 BNXT_TX_STATS_EXT_COS_ENTRY(3), \
289 BNXT_TX_STATS_EXT_COS_ENTRY(4), \
290 BNXT_TX_STATS_EXT_COS_ENTRY(5), \
291 BNXT_TX_STATS_EXT_COS_ENTRY(6), \
292 BNXT_TX_STATS_EXT_COS_ENTRY(7) \
293
294 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \
295 BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \
296 BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n)
297
298 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \
299 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \
300 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \
301 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \
302 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \
303 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \
304 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \
305 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \
306 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7)
307
308 #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \
309 { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \
310 __stringify(counter##_pri##n) }
311
312 #define BNXT_TX_STATS_PRI_ENTRY(counter, n) \
313 { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \
314 __stringify(counter##_pri##n) }
315
316 #define BNXT_RX_STATS_PRI_ENTRIES(counter) \
317 BNXT_RX_STATS_PRI_ENTRY(counter, 0), \
318 BNXT_RX_STATS_PRI_ENTRY(counter, 1), \
319 BNXT_RX_STATS_PRI_ENTRY(counter, 2), \
320 BNXT_RX_STATS_PRI_ENTRY(counter, 3), \
321 BNXT_RX_STATS_PRI_ENTRY(counter, 4), \
322 BNXT_RX_STATS_PRI_ENTRY(counter, 5), \
323 BNXT_RX_STATS_PRI_ENTRY(counter, 6), \
324 BNXT_RX_STATS_PRI_ENTRY(counter, 7)
325
326 #define BNXT_TX_STATS_PRI_ENTRIES(counter) \
327 BNXT_TX_STATS_PRI_ENTRY(counter, 0), \
328 BNXT_TX_STATS_PRI_ENTRY(counter, 1), \
329 BNXT_TX_STATS_PRI_ENTRY(counter, 2), \
330 BNXT_TX_STATS_PRI_ENTRY(counter, 3), \
331 BNXT_TX_STATS_PRI_ENTRY(counter, 4), \
332 BNXT_TX_STATS_PRI_ENTRY(counter, 5), \
333 BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
334 BNXT_TX_STATS_PRI_ENTRY(counter, 7)
335
336 enum {
337 RX_TOTAL_DISCARDS,
338 TX_TOTAL_DISCARDS,
339 RX_NETPOLL_DISCARDS,
340 };
341
342 static const char *const bnxt_ring_err_stats_arr[] = {
343 "rx_total_l4_csum_errors",
344 "rx_total_resets",
345 "rx_total_buf_errors",
346 "rx_total_oom_discards",
347 "rx_total_netpoll_discards",
348 "rx_total_ring_discards",
349 "tx_total_resets",
350 "tx_total_ring_discards",
351 "total_missed_irqs",
352 };
353
354 #define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str)
355 #define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str)
356 #define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str)
357 #define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str)
358
359 static const struct {
360 long offset;
361 char string[ETH_GSTRING_LEN];
362 } bnxt_port_stats_arr[] = {
363 BNXT_RX_STATS_ENTRY(rx_64b_frames),
364 BNXT_RX_STATS_ENTRY(rx_65b_127b_frames),
365 BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
366 BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
367 BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
368 BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames),
369 BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
370 BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
371 BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
372 BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames),
373 BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames),
374 BNXT_RX_STATS_ENTRY(rx_total_frames),
375 BNXT_RX_STATS_ENTRY(rx_ucast_frames),
376 BNXT_RX_STATS_ENTRY(rx_mcast_frames),
377 BNXT_RX_STATS_ENTRY(rx_bcast_frames),
378 BNXT_RX_STATS_ENTRY(rx_fcs_err_frames),
379 BNXT_RX_STATS_ENTRY(rx_ctrl_frames),
380 BNXT_RX_STATS_ENTRY(rx_pause_frames),
381 BNXT_RX_STATS_ENTRY(rx_pfc_frames),
382 BNXT_RX_STATS_ENTRY(rx_align_err_frames),
383 BNXT_RX_STATS_ENTRY(rx_ovrsz_frames),
384 BNXT_RX_STATS_ENTRY(rx_jbr_frames),
385 BNXT_RX_STATS_ENTRY(rx_mtu_err_frames),
386 BNXT_RX_STATS_ENTRY(rx_tagged_frames),
387 BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
388 BNXT_RX_STATS_ENTRY(rx_good_frames),
389 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
390 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
391 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
392 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
393 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
394 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
395 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
396 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
397 BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
398 BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
399 BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
400 BNXT_RX_STATS_ENTRY(rx_bytes),
401 BNXT_RX_STATS_ENTRY(rx_runt_bytes),
402 BNXT_RX_STATS_ENTRY(rx_runt_frames),
403 BNXT_RX_STATS_ENTRY(rx_stat_discard),
404 BNXT_RX_STATS_ENTRY(rx_stat_err),
405
406 BNXT_TX_STATS_ENTRY(tx_64b_frames),
407 BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
408 BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
409 BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
410 BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
411 BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames),
412 BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
413 BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames),
414 BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
415 BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
416 BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
417 BNXT_TX_STATS_ENTRY(tx_good_frames),
418 BNXT_TX_STATS_ENTRY(tx_total_frames),
419 BNXT_TX_STATS_ENTRY(tx_ucast_frames),
420 BNXT_TX_STATS_ENTRY(tx_mcast_frames),
421 BNXT_TX_STATS_ENTRY(tx_bcast_frames),
422 BNXT_TX_STATS_ENTRY(tx_pause_frames),
423 BNXT_TX_STATS_ENTRY(tx_pfc_frames),
424 BNXT_TX_STATS_ENTRY(tx_jabber_frames),
425 BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
426 BNXT_TX_STATS_ENTRY(tx_err),
427 BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
428 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
429 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
430 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
431 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
432 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
433 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
434 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
435 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
436 BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
437 BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
438 BNXT_TX_STATS_ENTRY(tx_total_collisions),
439 BNXT_TX_STATS_ENTRY(tx_bytes),
440 BNXT_TX_STATS_ENTRY(tx_xthol_frames),
441 BNXT_TX_STATS_ENTRY(tx_stat_discard),
442 BNXT_TX_STATS_ENTRY(tx_stat_error),
443 };
444
445 static const struct {
446 long offset;
447 char string[ETH_GSTRING_LEN];
448 } bnxt_port_stats_ext_arr[] = {
449 BNXT_RX_STATS_EXT_ENTRY(link_down_events),
450 BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events),
451 BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
452 BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
453 BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
454 BNXT_RX_STATS_EXT_COS_ENTRIES,
455 BNXT_RX_STATS_EXT_PFC_ENTRIES,
456 BNXT_RX_STATS_EXT_ENTRY(rx_bits),
457 BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
458 BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
459 BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
460 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
461 BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks),
462 BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks),
463 BNXT_RX_STATS_EXT_ENTRY(rx_filter_miss),
464 };
465
466 static const struct {
467 long offset;
468 char string[ETH_GSTRING_LEN];
469 } bnxt_tx_port_stats_ext_arr[] = {
470 BNXT_TX_STATS_EXT_COS_ENTRIES,
471 BNXT_TX_STATS_EXT_PFC_ENTRIES,
472 };
473
474 static const struct {
475 long base_off;
476 char string[ETH_GSTRING_LEN];
477 } bnxt_rx_bytes_pri_arr[] = {
478 BNXT_RX_STATS_PRI_ENTRIES(rx_bytes),
479 };
480
481 static const struct {
482 long base_off;
483 char string[ETH_GSTRING_LEN];
484 } bnxt_rx_pkts_pri_arr[] = {
485 BNXT_RX_STATS_PRI_ENTRIES(rx_packets),
486 };
487
488 static const struct {
489 long base_off;
490 char string[ETH_GSTRING_LEN];
491 } bnxt_tx_bytes_pri_arr[] = {
492 BNXT_TX_STATS_PRI_ENTRIES(tx_bytes),
493 };
494
495 static const struct {
496 long base_off;
497 char string[ETH_GSTRING_LEN];
498 } bnxt_tx_pkts_pri_arr[] = {
499 BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
500 };
501
502 #define BNXT_NUM_RING_ERR_STATS ARRAY_SIZE(bnxt_ring_err_stats_arr)
503 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
504 #define BNXT_NUM_STATS_PRI \
505 (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \
506 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
507 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
508 ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
509
bnxt_get_num_tpa_ring_stats(struct bnxt * bp)510 static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
511 {
512 if (BNXT_SUPPORTS_TPA(bp)) {
513 if (bp->max_tpa_v2) {
514 if (BNXT_CHIP_P5(bp))
515 return BNXT_NUM_TPA_RING_STATS_P5;
516 return BNXT_NUM_TPA_RING_STATS_P7;
517 }
518 return BNXT_NUM_TPA_RING_STATS;
519 }
520 return 0;
521 }
522
bnxt_get_num_ring_stats(struct bnxt * bp)523 static int bnxt_get_num_ring_stats(struct bnxt *bp)
524 {
525 int rx, tx, cmn;
526
527 rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
528 bnxt_get_num_tpa_ring_stats(bp);
529 tx = NUM_RING_TX_HW_STATS;
530 cmn = NUM_RING_CMN_SW_STATS;
531 return rx * bp->rx_nr_rings +
532 tx * (bp->tx_nr_rings_xdp + bp->tx_nr_rings_per_tc) +
533 cmn * bp->cp_nr_rings;
534 }
535
bnxt_get_num_stats(struct bnxt * bp)536 static int bnxt_get_num_stats(struct bnxt *bp)
537 {
538 int num_stats = bnxt_get_num_ring_stats(bp);
539 int len;
540
541 num_stats += BNXT_NUM_RING_ERR_STATS;
542
543 if (bp->flags & BNXT_FLAG_PORT_STATS)
544 num_stats += BNXT_NUM_PORT_STATS;
545
546 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
547 len = min_t(int, bp->fw_rx_stats_ext_size,
548 ARRAY_SIZE(bnxt_port_stats_ext_arr));
549 num_stats += len;
550 len = min_t(int, bp->fw_tx_stats_ext_size,
551 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
552 num_stats += len;
553 if (bp->pri2cos_valid)
554 num_stats += BNXT_NUM_STATS_PRI;
555 }
556
557 return num_stats;
558 }
559
bnxt_get_sset_count(struct net_device * dev,int sset)560 static int bnxt_get_sset_count(struct net_device *dev, int sset)
561 {
562 struct bnxt *bp = netdev_priv(dev);
563
564 switch (sset) {
565 case ETH_SS_STATS:
566 return bnxt_get_num_stats(bp);
567 case ETH_SS_TEST:
568 if (!bp->num_tests)
569 return -EOPNOTSUPP;
570 return bp->num_tests;
571 default:
572 return -EOPNOTSUPP;
573 }
574 }
575
is_rx_ring(struct bnxt * bp,int ring_num)576 static bool is_rx_ring(struct bnxt *bp, int ring_num)
577 {
578 return ring_num < bp->rx_nr_rings;
579 }
580
is_tx_ring(struct bnxt * bp,int ring_num)581 static bool is_tx_ring(struct bnxt *bp, int ring_num)
582 {
583 int tx_base = 0;
584
585 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
586 tx_base = bp->rx_nr_rings;
587
588 if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings))
589 return true;
590 return false;
591 }
592
bnxt_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * buf)593 static void bnxt_get_ethtool_stats(struct net_device *dev,
594 struct ethtool_stats *stats, u64 *buf)
595 {
596 struct bnxt_total_ring_err_stats ring_err_stats = {0};
597 struct bnxt *bp = netdev_priv(dev);
598 u64 *curr, *prev;
599 u32 tpa_stats;
600 u32 i, j = 0;
601
602 if (!bp->bnapi) {
603 j += bnxt_get_num_ring_stats(bp);
604 goto skip_ring_stats;
605 }
606
607 tpa_stats = bnxt_get_num_tpa_ring_stats(bp);
608 for (i = 0; i < bp->cp_nr_rings; i++) {
609 struct bnxt_napi *bnapi = bp->bnapi[i];
610 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
611 u64 *sw_stats = cpr->stats.sw_stats;
612 u64 *sw;
613 int k;
614
615 if (is_rx_ring(bp, i)) {
616 for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
617 buf[j] = sw_stats[k];
618 }
619 if (is_tx_ring(bp, i)) {
620 k = NUM_RING_RX_HW_STATS;
621 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
622 j++, k++)
623 buf[j] = sw_stats[k];
624 }
625 if (!tpa_stats || !is_rx_ring(bp, i))
626 goto skip_tpa_ring_stats;
627
628 k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
629 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
630 tpa_stats; j++, k++)
631 buf[j] = sw_stats[k];
632
633 skip_tpa_ring_stats:
634 sw = (u64 *)&cpr->sw_stats->rx;
635 if (is_rx_ring(bp, i)) {
636 for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
637 buf[j] = sw[k];
638 }
639
640 sw = (u64 *)&cpr->sw_stats->cmn;
641 for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
642 buf[j] = sw[k];
643 }
644
645 bnxt_get_ring_err_stats(bp, &ring_err_stats);
646
647 skip_ring_stats:
648 curr = &ring_err_stats.rx_total_l4_csum_errors;
649 prev = &bp->ring_err_stats_prev.rx_total_l4_csum_errors;
650 for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++, j++, curr++, prev++)
651 buf[j] = *curr + *prev;
652
653 if (bp->flags & BNXT_FLAG_PORT_STATS) {
654 u64 *port_stats = bp->port_stats.sw_stats;
655
656 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++)
657 buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset);
658 }
659 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
660 u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats;
661 u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats;
662 u32 len;
663
664 len = min_t(u32, bp->fw_rx_stats_ext_size,
665 ARRAY_SIZE(bnxt_port_stats_ext_arr));
666 for (i = 0; i < len; i++, j++) {
667 buf[j] = *(rx_port_stats_ext +
668 bnxt_port_stats_ext_arr[i].offset);
669 }
670 len = min_t(u32, bp->fw_tx_stats_ext_size,
671 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
672 for (i = 0; i < len; i++, j++) {
673 buf[j] = *(tx_port_stats_ext +
674 bnxt_tx_port_stats_ext_arr[i].offset);
675 }
676 if (bp->pri2cos_valid) {
677 for (i = 0; i < 8; i++, j++) {
678 long n = bnxt_rx_bytes_pri_arr[i].base_off +
679 bp->pri2cos_idx[i];
680
681 buf[j] = *(rx_port_stats_ext + n);
682 }
683 for (i = 0; i < 8; i++, j++) {
684 long n = bnxt_rx_pkts_pri_arr[i].base_off +
685 bp->pri2cos_idx[i];
686
687 buf[j] = *(rx_port_stats_ext + n);
688 }
689 for (i = 0; i < 8; i++, j++) {
690 long n = bnxt_tx_bytes_pri_arr[i].base_off +
691 bp->pri2cos_idx[i];
692
693 buf[j] = *(tx_port_stats_ext + n);
694 }
695 for (i = 0; i < 8; i++, j++) {
696 long n = bnxt_tx_pkts_pri_arr[i].base_off +
697 bp->pri2cos_idx[i];
698
699 buf[j] = *(tx_port_stats_ext + n);
700 }
701 }
702 }
703 }
704
bnxt_get_strings(struct net_device * dev,u32 stringset,u8 * buf)705 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
706 {
707 struct bnxt *bp = netdev_priv(dev);
708 static const char * const *str;
709 u32 i, j, num_str;
710
711 switch (stringset) {
712 case ETH_SS_STATS:
713 for (i = 0; i < bp->cp_nr_rings; i++) {
714 if (is_rx_ring(bp, i)) {
715 num_str = NUM_RING_RX_HW_STATS;
716 for (j = 0; j < num_str; j++) {
717 sprintf(buf, "[%d]: %s", i,
718 bnxt_ring_rx_stats_str[j]);
719 buf += ETH_GSTRING_LEN;
720 }
721 }
722 if (is_tx_ring(bp, i)) {
723 num_str = NUM_RING_TX_HW_STATS;
724 for (j = 0; j < num_str; j++) {
725 sprintf(buf, "[%d]: %s", i,
726 bnxt_ring_tx_stats_str[j]);
727 buf += ETH_GSTRING_LEN;
728 }
729 }
730 num_str = bnxt_get_num_tpa_ring_stats(bp);
731 if (!num_str || !is_rx_ring(bp, i))
732 goto skip_tpa_stats;
733
734 if (bp->max_tpa_v2)
735 str = bnxt_ring_tpa2_stats_str;
736 else
737 str = bnxt_ring_tpa_stats_str;
738
739 for (j = 0; j < num_str; j++) {
740 sprintf(buf, "[%d]: %s", i, str[j]);
741 buf += ETH_GSTRING_LEN;
742 }
743 skip_tpa_stats:
744 if (is_rx_ring(bp, i)) {
745 num_str = NUM_RING_RX_SW_STATS;
746 for (j = 0; j < num_str; j++) {
747 sprintf(buf, "[%d]: %s", i,
748 bnxt_rx_sw_stats_str[j]);
749 buf += ETH_GSTRING_LEN;
750 }
751 }
752 num_str = NUM_RING_CMN_SW_STATS;
753 for (j = 0; j < num_str; j++) {
754 sprintf(buf, "[%d]: %s", i,
755 bnxt_cmn_sw_stats_str[j]);
756 buf += ETH_GSTRING_LEN;
757 }
758 }
759 for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++) {
760 strscpy(buf, bnxt_ring_err_stats_arr[i], ETH_GSTRING_LEN);
761 buf += ETH_GSTRING_LEN;
762 }
763
764 if (bp->flags & BNXT_FLAG_PORT_STATS) {
765 for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
766 strcpy(buf, bnxt_port_stats_arr[i].string);
767 buf += ETH_GSTRING_LEN;
768 }
769 }
770 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
771 u32 len;
772
773 len = min_t(u32, bp->fw_rx_stats_ext_size,
774 ARRAY_SIZE(bnxt_port_stats_ext_arr));
775 for (i = 0; i < len; i++) {
776 strcpy(buf, bnxt_port_stats_ext_arr[i].string);
777 buf += ETH_GSTRING_LEN;
778 }
779 len = min_t(u32, bp->fw_tx_stats_ext_size,
780 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
781 for (i = 0; i < len; i++) {
782 strcpy(buf,
783 bnxt_tx_port_stats_ext_arr[i].string);
784 buf += ETH_GSTRING_LEN;
785 }
786 if (bp->pri2cos_valid) {
787 for (i = 0; i < 8; i++) {
788 strcpy(buf,
789 bnxt_rx_bytes_pri_arr[i].string);
790 buf += ETH_GSTRING_LEN;
791 }
792 for (i = 0; i < 8; i++) {
793 strcpy(buf,
794 bnxt_rx_pkts_pri_arr[i].string);
795 buf += ETH_GSTRING_LEN;
796 }
797 for (i = 0; i < 8; i++) {
798 strcpy(buf,
799 bnxt_tx_bytes_pri_arr[i].string);
800 buf += ETH_GSTRING_LEN;
801 }
802 for (i = 0; i < 8; i++) {
803 strcpy(buf,
804 bnxt_tx_pkts_pri_arr[i].string);
805 buf += ETH_GSTRING_LEN;
806 }
807 }
808 }
809 break;
810 case ETH_SS_TEST:
811 if (bp->num_tests)
812 memcpy(buf, bp->test_info->string,
813 bp->num_tests * ETH_GSTRING_LEN);
814 break;
815 default:
816 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
817 stringset);
818 break;
819 }
820 }
821
bnxt_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)822 static void bnxt_get_ringparam(struct net_device *dev,
823 struct ethtool_ringparam *ering,
824 struct kernel_ethtool_ringparam *kernel_ering,
825 struct netlink_ext_ack *extack)
826 {
827 struct bnxt *bp = netdev_priv(dev);
828
829 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
830 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
831 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
832 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
833 } else {
834 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
835 ering->rx_jumbo_max_pending = 0;
836 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
837 }
838 ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
839
840 ering->rx_pending = bp->rx_ring_size;
841 ering->rx_jumbo_pending = bp->rx_agg_ring_size;
842 ering->tx_pending = bp->tx_ring_size;
843 }
844
bnxt_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)845 static int bnxt_set_ringparam(struct net_device *dev,
846 struct ethtool_ringparam *ering,
847 struct kernel_ethtool_ringparam *kernel_ering,
848 struct netlink_ext_ack *extack)
849 {
850 struct bnxt *bp = netdev_priv(dev);
851
852 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
853 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
854 (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
855 return -EINVAL;
856
857 if (netif_running(dev))
858 bnxt_close_nic(bp, false, false);
859
860 bp->rx_ring_size = ering->rx_pending;
861 bp->tx_ring_size = ering->tx_pending;
862 bnxt_set_ring_params(bp);
863
864 if (netif_running(dev))
865 return bnxt_open_nic(bp, false, false);
866
867 return 0;
868 }
869
bnxt_get_channels(struct net_device * dev,struct ethtool_channels * channel)870 static void bnxt_get_channels(struct net_device *dev,
871 struct ethtool_channels *channel)
872 {
873 struct bnxt *bp = netdev_priv(dev);
874 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
875 int max_rx_rings, max_tx_rings, tcs;
876 int max_tx_sch_inputs, tx_grps;
877
878 /* Get the most up-to-date max_tx_sch_inputs. */
879 if (netif_running(dev) && BNXT_NEW_RM(bp))
880 bnxt_hwrm_func_resc_qcaps(bp, false);
881 max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
882
883 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
884 if (max_tx_sch_inputs)
885 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
886
887 tcs = bp->num_tc;
888 tx_grps = max(tcs, 1);
889 if (bp->tx_nr_rings_xdp)
890 tx_grps++;
891 max_tx_rings /= tx_grps;
892 channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
893
894 if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
895 max_rx_rings = 0;
896 max_tx_rings = 0;
897 }
898 if (max_tx_sch_inputs)
899 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
900
901 if (tcs > 1)
902 max_tx_rings /= tcs;
903
904 channel->max_rx = max_rx_rings;
905 channel->max_tx = max_tx_rings;
906 channel->max_other = 0;
907 if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
908 channel->combined_count = bp->rx_nr_rings;
909 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
910 channel->combined_count--;
911 } else {
912 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
913 channel->rx_count = bp->rx_nr_rings;
914 channel->tx_count = bp->tx_nr_rings_per_tc;
915 }
916 }
917 }
918
bnxt_set_channels(struct net_device * dev,struct ethtool_channels * channel)919 static int bnxt_set_channels(struct net_device *dev,
920 struct ethtool_channels *channel)
921 {
922 struct bnxt *bp = netdev_priv(dev);
923 int req_tx_rings, req_rx_rings, tcs;
924 bool sh = false;
925 int tx_xdp = 0;
926 int rc = 0;
927 int tx_cp;
928
929 if (channel->other_count)
930 return -EINVAL;
931
932 if (!channel->combined_count &&
933 (!channel->rx_count || !channel->tx_count))
934 return -EINVAL;
935
936 if (channel->combined_count &&
937 (channel->rx_count || channel->tx_count))
938 return -EINVAL;
939
940 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
941 channel->tx_count))
942 return -EINVAL;
943
944 if (channel->combined_count)
945 sh = true;
946
947 tcs = bp->num_tc;
948
949 req_tx_rings = sh ? channel->combined_count : channel->tx_count;
950 req_rx_rings = sh ? channel->combined_count : channel->rx_count;
951 if (bp->tx_nr_rings_xdp) {
952 if (!sh) {
953 netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
954 return -EINVAL;
955 }
956 tx_xdp = req_rx_rings;
957 }
958
959 if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
960 bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
961 netif_is_rxfh_configured(dev)) {
962 netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
963 return -EINVAL;
964 }
965
966 rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
967 if (rc) {
968 netdev_warn(dev, "Unable to allocate the requested rings\n");
969 return rc;
970 }
971
972 if (netif_running(dev)) {
973 if (BNXT_PF(bp)) {
974 /* TODO CHIMP_FW: Send message to all VF's
975 * before PF unload
976 */
977 }
978 bnxt_close_nic(bp, true, false);
979 }
980
981 if (sh) {
982 bp->flags |= BNXT_FLAG_SHARED_RINGS;
983 bp->rx_nr_rings = channel->combined_count;
984 bp->tx_nr_rings_per_tc = channel->combined_count;
985 } else {
986 bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
987 bp->rx_nr_rings = channel->rx_count;
988 bp->tx_nr_rings_per_tc = channel->tx_count;
989 }
990 bp->tx_nr_rings_xdp = tx_xdp;
991 bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
992 if (tcs > 1)
993 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
994
995 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
996 bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) :
997 tx_cp + bp->rx_nr_rings;
998
999 /* After changing number of rx channels, update NTUPLE feature. */
1000 netdev_update_features(dev);
1001 if (netif_running(dev)) {
1002 rc = bnxt_open_nic(bp, true, false);
1003 if ((!rc) && BNXT_PF(bp)) {
1004 /* TODO CHIMP_FW: Send message to all VF's
1005 * to renable
1006 */
1007 }
1008 } else {
1009 rc = bnxt_reserve_rings(bp, true);
1010 }
1011
1012 return rc;
1013 }
1014
bnxt_get_all_fltr_ids_rcu(struct bnxt * bp,struct hlist_head tbl[],int tbl_size,u32 * ids,u32 start,u32 id_cnt)1015 static u32 bnxt_get_all_fltr_ids_rcu(struct bnxt *bp, struct hlist_head tbl[],
1016 int tbl_size, u32 *ids, u32 start,
1017 u32 id_cnt)
1018 {
1019 int i, j = start;
1020
1021 if (j >= id_cnt)
1022 return j;
1023 for (i = 0; i < tbl_size; i++) {
1024 struct hlist_head *head;
1025 struct bnxt_filter_base *fltr;
1026
1027 head = &tbl[i];
1028 hlist_for_each_entry_rcu(fltr, head, hash) {
1029 if (!fltr->flags ||
1030 test_bit(BNXT_FLTR_FW_DELETED, &fltr->state))
1031 continue;
1032 ids[j++] = fltr->sw_id;
1033 if (j == id_cnt)
1034 return j;
1035 }
1036 }
1037 return j;
1038 }
1039
bnxt_get_one_fltr_rcu(struct bnxt * bp,struct hlist_head tbl[],int tbl_size,u32 id)1040 static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp,
1041 struct hlist_head tbl[],
1042 int tbl_size, u32 id)
1043 {
1044 int i;
1045
1046 for (i = 0; i < tbl_size; i++) {
1047 struct hlist_head *head;
1048 struct bnxt_filter_base *fltr;
1049
1050 head = &tbl[i];
1051 hlist_for_each_entry_rcu(fltr, head, hash) {
1052 if (fltr->flags && fltr->sw_id == id)
1053 return fltr;
1054 }
1055 }
1056 return NULL;
1057 }
1058
bnxt_grxclsrlall(struct bnxt * bp,struct ethtool_rxnfc * cmd,u32 * rule_locs)1059 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
1060 u32 *rule_locs)
1061 {
1062 u32 count;
1063
1064 cmd->data = bp->ntp_fltr_count;
1065 rcu_read_lock();
1066 count = bnxt_get_all_fltr_ids_rcu(bp, bp->l2_fltr_hash_tbl,
1067 BNXT_L2_FLTR_HASH_SIZE, rule_locs, 0,
1068 cmd->rule_cnt);
1069 cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl,
1070 BNXT_NTP_FLTR_HASH_SIZE,
1071 rule_locs, count,
1072 cmd->rule_cnt);
1073 rcu_read_unlock();
1074
1075 return 0;
1076 }
1077
bnxt_grxclsrule(struct bnxt * bp,struct ethtool_rxnfc * cmd)1078 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1079 {
1080 struct ethtool_rx_flow_spec *fs =
1081 (struct ethtool_rx_flow_spec *)&cmd->fs;
1082 struct bnxt_filter_base *fltr_base;
1083 struct bnxt_ntuple_filter *fltr;
1084 struct bnxt_flow_masks *fmasks;
1085 struct flow_keys *fkeys;
1086 int rc = -EINVAL;
1087
1088 if (fs->location >= bp->max_fltr)
1089 return rc;
1090
1091 rcu_read_lock();
1092 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl,
1093 BNXT_L2_FLTR_HASH_SIZE,
1094 fs->location);
1095 if (fltr_base) {
1096 struct ethhdr *h_ether = &fs->h_u.ether_spec;
1097 struct ethhdr *m_ether = &fs->m_u.ether_spec;
1098 struct bnxt_l2_filter *l2_fltr;
1099 struct bnxt_l2_key *l2_key;
1100
1101 l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base);
1102 l2_key = &l2_fltr->l2_key;
1103 fs->flow_type = ETHER_FLOW;
1104 ether_addr_copy(h_ether->h_dest, l2_key->dst_mac_addr);
1105 eth_broadcast_addr(m_ether->h_dest);
1106 if (l2_key->vlan) {
1107 struct ethtool_flow_ext *m_ext = &fs->m_ext;
1108 struct ethtool_flow_ext *h_ext = &fs->h_ext;
1109
1110 fs->flow_type |= FLOW_EXT;
1111 m_ext->vlan_tci = htons(0xfff);
1112 h_ext->vlan_tci = htons(l2_key->vlan);
1113 }
1114 if (fltr_base->flags & BNXT_ACT_RING_DST)
1115 fs->ring_cookie = fltr_base->rxq;
1116 if (fltr_base->flags & BNXT_ACT_FUNC_DST)
1117 fs->ring_cookie = (u64)(fltr_base->vf_idx + 1) <<
1118 ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
1119 rcu_read_unlock();
1120 return 0;
1121 }
1122 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
1123 BNXT_NTP_FLTR_HASH_SIZE,
1124 fs->location);
1125 if (!fltr_base) {
1126 rcu_read_unlock();
1127 return rc;
1128 }
1129 fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
1130
1131 fkeys = &fltr->fkeys;
1132 fmasks = &fltr->fmasks;
1133 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
1134 if (fkeys->basic.ip_proto == IPPROTO_ICMP ||
1135 fkeys->basic.ip_proto == IPPROTO_RAW) {
1136 fs->flow_type = IP_USER_FLOW;
1137 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
1138 if (fkeys->basic.ip_proto == IPPROTO_ICMP)
1139 fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP;
1140 else
1141 fs->h_u.usr_ip4_spec.proto = IPPROTO_RAW;
1142 fs->m_u.usr_ip4_spec.proto = BNXT_IP_PROTO_FULL_MASK;
1143 } else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
1144 fs->flow_type = TCP_V4_FLOW;
1145 } else if (fkeys->basic.ip_proto == IPPROTO_UDP) {
1146 fs->flow_type = UDP_V4_FLOW;
1147 } else {
1148 goto fltr_err;
1149 }
1150
1151 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
1152 fs->m_u.tcp_ip4_spec.ip4src = fmasks->addrs.v4addrs.src;
1153 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
1154 fs->m_u.tcp_ip4_spec.ip4dst = fmasks->addrs.v4addrs.dst;
1155 if (fs->flow_type == TCP_V4_FLOW ||
1156 fs->flow_type == UDP_V4_FLOW) {
1157 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
1158 fs->m_u.tcp_ip4_spec.psrc = fmasks->ports.src;
1159 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
1160 fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst;
1161 }
1162 } else {
1163 if (fkeys->basic.ip_proto == IPPROTO_ICMPV6 ||
1164 fkeys->basic.ip_proto == IPPROTO_RAW) {
1165 fs->flow_type = IPV6_USER_FLOW;
1166 if (fkeys->basic.ip_proto == IPPROTO_ICMPV6)
1167 fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6;
1168 else
1169 fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_RAW;
1170 fs->m_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_FULL_MASK;
1171 } else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
1172 fs->flow_type = TCP_V6_FLOW;
1173 } else if (fkeys->basic.ip_proto == IPPROTO_UDP) {
1174 fs->flow_type = UDP_V6_FLOW;
1175 } else {
1176 goto fltr_err;
1177 }
1178
1179 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
1180 fkeys->addrs.v6addrs.src;
1181 *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6src[0] =
1182 fmasks->addrs.v6addrs.src;
1183 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
1184 fkeys->addrs.v6addrs.dst;
1185 *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6dst[0] =
1186 fmasks->addrs.v6addrs.dst;
1187 if (fs->flow_type == TCP_V6_FLOW ||
1188 fs->flow_type == UDP_V6_FLOW) {
1189 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
1190 fs->m_u.tcp_ip6_spec.psrc = fmasks->ports.src;
1191 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
1192 fs->m_u.tcp_ip6_spec.pdst = fmasks->ports.dst;
1193 }
1194 }
1195
1196 if (fltr->base.flags & BNXT_ACT_DROP) {
1197 fs->ring_cookie = RX_CLS_FLOW_DISC;
1198 } else if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
1199 fs->flow_type |= FLOW_RSS;
1200 cmd->rss_context = fltr->base.fw_vnic_id;
1201 } else {
1202 fs->ring_cookie = fltr->base.rxq;
1203 }
1204 rc = 0;
1205
1206 fltr_err:
1207 rcu_read_unlock();
1208
1209 return rc;
1210 }
1211
bnxt_get_rss_ctx_from_index(struct bnxt * bp,u32 index)1212 static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp,
1213 u32 index)
1214 {
1215 struct ethtool_rxfh_context *ctx;
1216
1217 ctx = xa_load(&bp->dev->ethtool->rss_ctx, index);
1218 if (!ctx)
1219 return NULL;
1220 return ethtool_rxfh_context_priv(ctx);
1221 }
1222
bnxt_alloc_vnic_rss_table(struct bnxt * bp,struct bnxt_vnic_info * vnic)1223 static int bnxt_alloc_vnic_rss_table(struct bnxt *bp,
1224 struct bnxt_vnic_info *vnic)
1225 {
1226 int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
1227
1228 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
1229 vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev,
1230 vnic->rss_table_size,
1231 &vnic->rss_table_dma_addr,
1232 GFP_KERNEL);
1233 if (!vnic->rss_table)
1234 return -ENOMEM;
1235
1236 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
1237 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
1238 return 0;
1239 }
1240
bnxt_add_l2_cls_rule(struct bnxt * bp,struct ethtool_rx_flow_spec * fs)1241 static int bnxt_add_l2_cls_rule(struct bnxt *bp,
1242 struct ethtool_rx_flow_spec *fs)
1243 {
1244 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
1245 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1246 struct ethhdr *h_ether = &fs->h_u.ether_spec;
1247 struct ethhdr *m_ether = &fs->m_u.ether_spec;
1248 struct bnxt_l2_filter *fltr;
1249 struct bnxt_l2_key key;
1250 u16 vnic_id;
1251 u8 flags;
1252 int rc;
1253
1254 if (BNXT_CHIP_P5_PLUS(bp))
1255 return -EOPNOTSUPP;
1256
1257 if (!is_broadcast_ether_addr(m_ether->h_dest))
1258 return -EINVAL;
1259 ether_addr_copy(key.dst_mac_addr, h_ether->h_dest);
1260 key.vlan = 0;
1261 if (fs->flow_type & FLOW_EXT) {
1262 struct ethtool_flow_ext *m_ext = &fs->m_ext;
1263 struct ethtool_flow_ext *h_ext = &fs->h_ext;
1264
1265 if (m_ext->vlan_tci != htons(0xfff) || !h_ext->vlan_tci)
1266 return -EINVAL;
1267 key.vlan = ntohs(h_ext->vlan_tci);
1268 }
1269
1270 if (vf) {
1271 flags = BNXT_ACT_FUNC_DST;
1272 vnic_id = 0xffff;
1273 vf--;
1274 } else {
1275 flags = BNXT_ACT_RING_DST;
1276 vnic_id = bp->vnic_info[ring + 1].fw_vnic_id;
1277 }
1278 fltr = bnxt_alloc_new_l2_filter(bp, &key, flags);
1279 if (IS_ERR(fltr))
1280 return PTR_ERR(fltr);
1281
1282 fltr->base.fw_vnic_id = vnic_id;
1283 fltr->base.rxq = ring;
1284 fltr->base.vf_idx = vf;
1285 rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
1286 if (rc)
1287 bnxt_del_l2_filter(bp, fltr);
1288 else
1289 fs->location = fltr->base.sw_id;
1290 return rc;
1291 }
1292
bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec * ip_spec,struct ethtool_usrip4_spec * ip_mask)1293 static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec,
1294 struct ethtool_usrip4_spec *ip_mask)
1295 {
1296 if (ip_mask->l4_4_bytes || ip_mask->tos ||
1297 ip_spec->ip_ver != ETH_RX_NFC_IP4 ||
1298 ip_mask->proto != BNXT_IP_PROTO_FULL_MASK ||
1299 (ip_spec->proto != IPPROTO_RAW && ip_spec->proto != IPPROTO_ICMP))
1300 return false;
1301 return true;
1302 }
1303
bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec * ip_spec,struct ethtool_usrip6_spec * ip_mask)1304 static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec,
1305 struct ethtool_usrip6_spec *ip_mask)
1306 {
1307 if (ip_mask->l4_4_bytes || ip_mask->tclass ||
1308 ip_mask->l4_proto != BNXT_IP_PROTO_FULL_MASK ||
1309 (ip_spec->l4_proto != IPPROTO_RAW &&
1310 ip_spec->l4_proto != IPPROTO_ICMPV6))
1311 return false;
1312 return true;
1313 }
1314
bnxt_add_ntuple_cls_rule(struct bnxt * bp,struct ethtool_rxnfc * cmd)1315 static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
1316 struct ethtool_rxnfc *cmd)
1317 {
1318 struct ethtool_rx_flow_spec *fs = &cmd->fs;
1319 struct bnxt_ntuple_filter *new_fltr, *fltr;
1320 u32 flow_type = fs->flow_type & 0xff;
1321 struct bnxt_l2_filter *l2_fltr;
1322 struct bnxt_flow_masks *fmasks;
1323 struct flow_keys *fkeys;
1324 u32 idx, ring;
1325 int rc;
1326 u8 vf;
1327
1328 if (!bp->vnic_info)
1329 return -EAGAIN;
1330
1331 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1332 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
1333 if ((fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
1334 return -EOPNOTSUPP;
1335
1336 if (flow_type == IP_USER_FLOW) {
1337 if (!bnxt_verify_ntuple_ip4_flow(&fs->h_u.usr_ip4_spec,
1338 &fs->m_u.usr_ip4_spec))
1339 return -EOPNOTSUPP;
1340 }
1341
1342 if (flow_type == IPV6_USER_FLOW) {
1343 if (!bnxt_verify_ntuple_ip6_flow(&fs->h_u.usr_ip6_spec,
1344 &fs->m_u.usr_ip6_spec))
1345 return -EOPNOTSUPP;
1346 }
1347
1348 new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL);
1349 if (!new_fltr)
1350 return -ENOMEM;
1351
1352 l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
1353 atomic_inc(&l2_fltr->refcnt);
1354 new_fltr->l2_fltr = l2_fltr;
1355 fmasks = &new_fltr->fmasks;
1356 fkeys = &new_fltr->fkeys;
1357
1358 rc = -EOPNOTSUPP;
1359 switch (flow_type) {
1360 case IP_USER_FLOW: {
1361 struct ethtool_usrip4_spec *ip_spec = &fs->h_u.usr_ip4_spec;
1362 struct ethtool_usrip4_spec *ip_mask = &fs->m_u.usr_ip4_spec;
1363
1364 fkeys->basic.ip_proto = ip_spec->proto;
1365 fkeys->basic.n_proto = htons(ETH_P_IP);
1366 fkeys->addrs.v4addrs.src = ip_spec->ip4src;
1367 fmasks->addrs.v4addrs.src = ip_mask->ip4src;
1368 fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
1369 fmasks->addrs.v4addrs.dst = ip_mask->ip4dst;
1370 break;
1371 }
1372 case TCP_V4_FLOW:
1373 case UDP_V4_FLOW: {
1374 struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec;
1375 struct ethtool_tcpip4_spec *ip_mask = &fs->m_u.tcp_ip4_spec;
1376
1377 fkeys->basic.ip_proto = IPPROTO_TCP;
1378 if (flow_type == UDP_V4_FLOW)
1379 fkeys->basic.ip_proto = IPPROTO_UDP;
1380 fkeys->basic.n_proto = htons(ETH_P_IP);
1381 fkeys->addrs.v4addrs.src = ip_spec->ip4src;
1382 fmasks->addrs.v4addrs.src = ip_mask->ip4src;
1383 fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
1384 fmasks->addrs.v4addrs.dst = ip_mask->ip4dst;
1385 fkeys->ports.src = ip_spec->psrc;
1386 fmasks->ports.src = ip_mask->psrc;
1387 fkeys->ports.dst = ip_spec->pdst;
1388 fmasks->ports.dst = ip_mask->pdst;
1389 break;
1390 }
1391 case IPV6_USER_FLOW: {
1392 struct ethtool_usrip6_spec *ip_spec = &fs->h_u.usr_ip6_spec;
1393 struct ethtool_usrip6_spec *ip_mask = &fs->m_u.usr_ip6_spec;
1394
1395 fkeys->basic.ip_proto = ip_spec->l4_proto;
1396 fkeys->basic.n_proto = htons(ETH_P_IPV6);
1397 fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
1398 fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
1399 fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst;
1400 fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst;
1401 break;
1402 }
1403 case TCP_V6_FLOW:
1404 case UDP_V6_FLOW: {
1405 struct ethtool_tcpip6_spec *ip_spec = &fs->h_u.tcp_ip6_spec;
1406 struct ethtool_tcpip6_spec *ip_mask = &fs->m_u.tcp_ip6_spec;
1407
1408 fkeys->basic.ip_proto = IPPROTO_TCP;
1409 if (flow_type == UDP_V6_FLOW)
1410 fkeys->basic.ip_proto = IPPROTO_UDP;
1411 fkeys->basic.n_proto = htons(ETH_P_IPV6);
1412
1413 fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
1414 fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
1415 fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst;
1416 fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst;
1417 fkeys->ports.src = ip_spec->psrc;
1418 fmasks->ports.src = ip_mask->psrc;
1419 fkeys->ports.dst = ip_spec->pdst;
1420 fmasks->ports.dst = ip_mask->pdst;
1421 break;
1422 }
1423 default:
1424 rc = -EOPNOTSUPP;
1425 goto ntuple_err;
1426 }
1427 if (!memcmp(&BNXT_FLOW_MASK_NONE, fmasks, sizeof(*fmasks)))
1428 goto ntuple_err;
1429
1430 idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL);
1431 rcu_read_lock();
1432 fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx);
1433 if (fltr) {
1434 rcu_read_unlock();
1435 rc = -EEXIST;
1436 goto ntuple_err;
1437 }
1438 rcu_read_unlock();
1439
1440 new_fltr->base.flags = BNXT_ACT_NO_AGING;
1441 if (fs->flow_type & FLOW_RSS) {
1442 struct bnxt_rss_ctx *rss_ctx;
1443
1444 new_fltr->base.fw_vnic_id = 0;
1445 new_fltr->base.flags |= BNXT_ACT_RSS_CTX;
1446 rss_ctx = bnxt_get_rss_ctx_from_index(bp, cmd->rss_context);
1447 if (rss_ctx) {
1448 new_fltr->base.fw_vnic_id = rss_ctx->index;
1449 } else {
1450 rc = -EINVAL;
1451 goto ntuple_err;
1452 }
1453 }
1454 if (fs->ring_cookie == RX_CLS_FLOW_DISC)
1455 new_fltr->base.flags |= BNXT_ACT_DROP;
1456 else
1457 new_fltr->base.rxq = ring;
1458 __set_bit(BNXT_FLTR_VALID, &new_fltr->base.state);
1459 rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
1460 if (!rc) {
1461 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, new_fltr);
1462 if (rc) {
1463 bnxt_del_ntp_filter(bp, new_fltr);
1464 return rc;
1465 }
1466 fs->location = new_fltr->base.sw_id;
1467 return 0;
1468 }
1469
1470 ntuple_err:
1471 atomic_dec(&l2_fltr->refcnt);
1472 kfree(new_fltr);
1473 return rc;
1474 }
1475
bnxt_srxclsrlins(struct bnxt * bp,struct ethtool_rxnfc * cmd)1476 static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1477 {
1478 struct ethtool_rx_flow_spec *fs = &cmd->fs;
1479 u32 ring, flow_type;
1480 int rc;
1481 u8 vf;
1482
1483 if (!netif_running(bp->dev))
1484 return -EAGAIN;
1485 if (!(bp->flags & BNXT_FLAG_RFS))
1486 return -EPERM;
1487 if (fs->location != RX_CLS_LOC_ANY)
1488 return -EINVAL;
1489
1490 flow_type = fs->flow_type;
1491 if ((flow_type == IP_USER_FLOW ||
1492 flow_type == IPV6_USER_FLOW) &&
1493 !(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO))
1494 return -EOPNOTSUPP;
1495 if (flow_type & FLOW_MAC_EXT)
1496 return -EINVAL;
1497 flow_type &= ~FLOW_EXT;
1498
1499 if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW)
1500 return bnxt_add_ntuple_cls_rule(bp, cmd);
1501
1502 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
1503 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1504 if (BNXT_VF(bp) && vf)
1505 return -EINVAL;
1506 if (BNXT_PF(bp) && vf > bp->pf.active_vfs)
1507 return -EINVAL;
1508 if (!vf && ring >= bp->rx_nr_rings)
1509 return -EINVAL;
1510
1511 if (flow_type == ETHER_FLOW)
1512 rc = bnxt_add_l2_cls_rule(bp, fs);
1513 else
1514 rc = bnxt_add_ntuple_cls_rule(bp, cmd);
1515 return rc;
1516 }
1517
bnxt_srxclsrldel(struct bnxt * bp,struct ethtool_rxnfc * cmd)1518 static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1519 {
1520 struct ethtool_rx_flow_spec *fs = &cmd->fs;
1521 struct bnxt_filter_base *fltr_base;
1522 struct bnxt_ntuple_filter *fltr;
1523 u32 id = fs->location;
1524
1525 rcu_read_lock();
1526 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl,
1527 BNXT_L2_FLTR_HASH_SIZE, id);
1528 if (fltr_base) {
1529 struct bnxt_l2_filter *l2_fltr;
1530
1531 l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base);
1532 rcu_read_unlock();
1533 bnxt_hwrm_l2_filter_free(bp, l2_fltr);
1534 bnxt_del_l2_filter(bp, l2_fltr);
1535 return 0;
1536 }
1537 fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
1538 BNXT_NTP_FLTR_HASH_SIZE, id);
1539 if (!fltr_base) {
1540 rcu_read_unlock();
1541 return -ENOENT;
1542 }
1543
1544 fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
1545 if (!(fltr->base.flags & BNXT_ACT_NO_AGING)) {
1546 rcu_read_unlock();
1547 return -EINVAL;
1548 }
1549 rcu_read_unlock();
1550 bnxt_hwrm_cfa_ntuple_filter_free(bp, fltr);
1551 bnxt_del_ntp_filter(bp, fltr);
1552 return 0;
1553 }
1554
get_ethtool_ipv4_rss(struct bnxt * bp)1555 static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
1556 {
1557 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
1558 return RXH_IP_SRC | RXH_IP_DST;
1559 return 0;
1560 }
1561
get_ethtool_ipv6_rss(struct bnxt * bp)1562 static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
1563 {
1564 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
1565 return RXH_IP_SRC | RXH_IP_DST;
1566 return 0;
1567 }
1568
bnxt_grxfh(struct bnxt * bp,struct ethtool_rxnfc * cmd)1569 static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1570 {
1571 cmd->data = 0;
1572 switch (cmd->flow_type) {
1573 case TCP_V4_FLOW:
1574 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4)
1575 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1576 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1577 cmd->data |= get_ethtool_ipv4_rss(bp);
1578 break;
1579 case UDP_V4_FLOW:
1580 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
1581 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1582 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1583 fallthrough;
1584 case AH_ESP_V4_FLOW:
1585 if (bp->rss_hash_cfg &
1586 (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
1587 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4))
1588 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1589 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1590 fallthrough;
1591 case SCTP_V4_FLOW:
1592 case AH_V4_FLOW:
1593 case ESP_V4_FLOW:
1594 case IPV4_FLOW:
1595 cmd->data |= get_ethtool_ipv4_rss(bp);
1596 break;
1597
1598 case TCP_V6_FLOW:
1599 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6)
1600 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1601 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1602 cmd->data |= get_ethtool_ipv6_rss(bp);
1603 break;
1604 case UDP_V6_FLOW:
1605 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
1606 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1607 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1608 fallthrough;
1609 case AH_ESP_V6_FLOW:
1610 if (bp->rss_hash_cfg &
1611 (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
1612 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6))
1613 cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1614 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1615 fallthrough;
1616 case SCTP_V6_FLOW:
1617 case AH_V6_FLOW:
1618 case ESP_V6_FLOW:
1619 case IPV6_FLOW:
1620 cmd->data |= get_ethtool_ipv6_rss(bp);
1621 break;
1622 }
1623 return 0;
1624 }
1625
1626 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
1627 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
1628
bnxt_srxfh(struct bnxt * bp,struct ethtool_rxnfc * cmd)1629 static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1630 {
1631 u32 rss_hash_cfg = bp->rss_hash_cfg;
1632 int tuple, rc = 0;
1633
1634 if (cmd->data == RXH_4TUPLE)
1635 tuple = 4;
1636 else if (cmd->data == RXH_2TUPLE)
1637 tuple = 2;
1638 else if (!cmd->data)
1639 tuple = 0;
1640 else
1641 return -EINVAL;
1642
1643 if (cmd->flow_type == TCP_V4_FLOW) {
1644 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1645 if (tuple == 4)
1646 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1647 } else if (cmd->flow_type == UDP_V4_FLOW) {
1648 if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
1649 return -EINVAL;
1650 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1651 if (tuple == 4)
1652 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1653 } else if (cmd->flow_type == TCP_V6_FLOW) {
1654 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1655 if (tuple == 4)
1656 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1657 } else if (cmd->flow_type == UDP_V6_FLOW) {
1658 if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP))
1659 return -EINVAL;
1660 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1661 if (tuple == 4)
1662 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1663 } else if (cmd->flow_type == AH_ESP_V4_FLOW) {
1664 if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V4_RSS_CAP) ||
1665 !(bp->rss_cap & BNXT_RSS_CAP_ESP_V4_RSS_CAP)))
1666 return -EINVAL;
1667 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
1668 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4);
1669 if (tuple == 4)
1670 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
1671 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4;
1672 } else if (cmd->flow_type == AH_ESP_V6_FLOW) {
1673 if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V6_RSS_CAP) ||
1674 !(bp->rss_cap & BNXT_RSS_CAP_ESP_V6_RSS_CAP)))
1675 return -EINVAL;
1676 rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
1677 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6);
1678 if (tuple == 4)
1679 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
1680 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6;
1681 } else if (tuple == 4) {
1682 return -EINVAL;
1683 }
1684
1685 switch (cmd->flow_type) {
1686 case TCP_V4_FLOW:
1687 case UDP_V4_FLOW:
1688 case SCTP_V4_FLOW:
1689 case AH_ESP_V4_FLOW:
1690 case AH_V4_FLOW:
1691 case ESP_V4_FLOW:
1692 case IPV4_FLOW:
1693 if (tuple == 2)
1694 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1695 else if (!tuple)
1696 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1697 break;
1698
1699 case TCP_V6_FLOW:
1700 case UDP_V6_FLOW:
1701 case SCTP_V6_FLOW:
1702 case AH_ESP_V6_FLOW:
1703 case AH_V6_FLOW:
1704 case ESP_V6_FLOW:
1705 case IPV6_FLOW:
1706 if (tuple == 2)
1707 rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1708 else if (!tuple)
1709 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1710 break;
1711 }
1712
1713 if (bp->rss_hash_cfg == rss_hash_cfg)
1714 return 0;
1715
1716 if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
1717 bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg;
1718 bp->rss_hash_cfg = rss_hash_cfg;
1719 if (netif_running(bp->dev)) {
1720 bnxt_close_nic(bp, false, false);
1721 rc = bnxt_open_nic(bp, false, false);
1722 }
1723 return rc;
1724 }
1725
bnxt_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)1726 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1727 u32 *rule_locs)
1728 {
1729 struct bnxt *bp = netdev_priv(dev);
1730 int rc = 0;
1731
1732 switch (cmd->cmd) {
1733 case ETHTOOL_GRXRINGS:
1734 cmd->data = bp->rx_nr_rings;
1735 break;
1736
1737 case ETHTOOL_GRXCLSRLCNT:
1738 cmd->rule_cnt = bp->ntp_fltr_count;
1739 cmd->data = bp->max_fltr | RX_CLS_LOC_SPECIAL;
1740 break;
1741
1742 case ETHTOOL_GRXCLSRLALL:
1743 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
1744 break;
1745
1746 case ETHTOOL_GRXCLSRULE:
1747 rc = bnxt_grxclsrule(bp, cmd);
1748 break;
1749
1750 case ETHTOOL_GRXFH:
1751 rc = bnxt_grxfh(bp, cmd);
1752 break;
1753
1754 default:
1755 rc = -EOPNOTSUPP;
1756 break;
1757 }
1758
1759 return rc;
1760 }
1761
bnxt_set_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd)1762 static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1763 {
1764 struct bnxt *bp = netdev_priv(dev);
1765 int rc;
1766
1767 switch (cmd->cmd) {
1768 case ETHTOOL_SRXFH:
1769 rc = bnxt_srxfh(bp, cmd);
1770 break;
1771
1772 case ETHTOOL_SRXCLSRLINS:
1773 rc = bnxt_srxclsrlins(bp, cmd);
1774 break;
1775
1776 case ETHTOOL_SRXCLSRLDEL:
1777 rc = bnxt_srxclsrldel(bp, cmd);
1778 break;
1779
1780 default:
1781 rc = -EOPNOTSUPP;
1782 break;
1783 }
1784 return rc;
1785 }
1786
bnxt_get_rxfh_indir_size(struct net_device * dev)1787 u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
1788 {
1789 struct bnxt *bp = netdev_priv(dev);
1790
1791 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
1792 return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
1793 BNXT_RSS_TABLE_ENTRIES_P5;
1794 return HW_HASH_INDEX_SIZE;
1795 }
1796
bnxt_get_rxfh_key_size(struct net_device * dev)1797 static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
1798 {
1799 return HW_HASH_KEY_SIZE;
1800 }
1801
bnxt_get_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh)1802 static int bnxt_get_rxfh(struct net_device *dev,
1803 struct ethtool_rxfh_param *rxfh)
1804 {
1805 struct bnxt_rss_ctx *rss_ctx = NULL;
1806 struct bnxt *bp = netdev_priv(dev);
1807 u32 *indir_tbl = bp->rss_indir_tbl;
1808 struct bnxt_vnic_info *vnic;
1809 u32 i, tbl_size;
1810
1811 rxfh->hfunc = ETH_RSS_HASH_TOP;
1812
1813 if (!bp->vnic_info)
1814 return 0;
1815
1816 vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
1817 if (rxfh->rss_context) {
1818 struct ethtool_rxfh_context *ctx;
1819
1820 ctx = xa_load(&bp->dev->ethtool->rss_ctx, rxfh->rss_context);
1821 if (!ctx)
1822 return -EINVAL;
1823 indir_tbl = ethtool_rxfh_context_indir(ctx);
1824 rss_ctx = ethtool_rxfh_context_priv(ctx);
1825 vnic = &rss_ctx->vnic;
1826 }
1827
1828 if (rxfh->indir && indir_tbl) {
1829 tbl_size = bnxt_get_rxfh_indir_size(dev);
1830 for (i = 0; i < tbl_size; i++)
1831 rxfh->indir[i] = indir_tbl[i];
1832 }
1833
1834 if (rxfh->key && vnic->rss_hash_key)
1835 memcpy(rxfh->key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
1836
1837 return 0;
1838 }
1839
bnxt_modify_rss(struct bnxt * bp,struct ethtool_rxfh_context * ctx,struct bnxt_rss_ctx * rss_ctx,const struct ethtool_rxfh_param * rxfh)1840 static void bnxt_modify_rss(struct bnxt *bp, struct ethtool_rxfh_context *ctx,
1841 struct bnxt_rss_ctx *rss_ctx,
1842 const struct ethtool_rxfh_param *rxfh)
1843 {
1844 if (rxfh->key) {
1845 if (rss_ctx) {
1846 memcpy(rss_ctx->vnic.rss_hash_key, rxfh->key,
1847 HW_HASH_KEY_SIZE);
1848 } else {
1849 memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE);
1850 bp->rss_hash_key_updated = true;
1851 }
1852 }
1853 if (rxfh->indir) {
1854 u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
1855 u32 *indir_tbl = bp->rss_indir_tbl;
1856
1857 if (rss_ctx)
1858 indir_tbl = ethtool_rxfh_context_indir(ctx);
1859 for (i = 0; i < tbl_size; i++)
1860 indir_tbl[i] = rxfh->indir[i];
1861 pad = bp->rss_indir_tbl_entries - tbl_size;
1862 if (pad)
1863 memset(&indir_tbl[i], 0, pad * sizeof(*indir_tbl));
1864 }
1865 }
1866
bnxt_rxfh_context_check(struct bnxt * bp,const struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)1867 static int bnxt_rxfh_context_check(struct bnxt *bp,
1868 const struct ethtool_rxfh_param *rxfh,
1869 struct netlink_ext_ack *extack)
1870 {
1871 if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
1872 NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
1873 return -EOPNOTSUPP;
1874 }
1875
1876 if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) {
1877 NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported");
1878 return -EOPNOTSUPP;
1879 }
1880
1881 if (!netif_running(bp->dev)) {
1882 NL_SET_ERR_MSG_MOD(extack, "Unable to set RSS contexts when interface is down");
1883 return -EAGAIN;
1884 }
1885
1886 return 0;
1887 }
1888
bnxt_create_rxfh_context(struct net_device * dev,struct ethtool_rxfh_context * ctx,const struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)1889 static int bnxt_create_rxfh_context(struct net_device *dev,
1890 struct ethtool_rxfh_context *ctx,
1891 const struct ethtool_rxfh_param *rxfh,
1892 struct netlink_ext_ack *extack)
1893 {
1894 struct bnxt *bp = netdev_priv(dev);
1895 struct bnxt_rss_ctx *rss_ctx;
1896 struct bnxt_vnic_info *vnic;
1897 int rc;
1898
1899 rc = bnxt_rxfh_context_check(bp, rxfh, extack);
1900 if (rc)
1901 return rc;
1902
1903 if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) {
1904 NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u",
1905 BNXT_MAX_ETH_RSS_CTX);
1906 return -EINVAL;
1907 }
1908
1909 if (!bnxt_rfs_capable(bp, true)) {
1910 NL_SET_ERR_MSG_MOD(extack, "Out hardware resources");
1911 return -ENOMEM;
1912 }
1913
1914 rss_ctx = ethtool_rxfh_context_priv(ctx);
1915
1916 bp->num_rss_ctx++;
1917
1918 vnic = &rss_ctx->vnic;
1919 vnic->rss_ctx = ctx;
1920 vnic->flags |= BNXT_VNIC_RSSCTX_FLAG;
1921 vnic->vnic_id = BNXT_VNIC_ID_INVALID;
1922 rc = bnxt_alloc_vnic_rss_table(bp, vnic);
1923 if (rc)
1924 goto out;
1925
1926 /* Populate defaults in the context */
1927 bnxt_set_dflt_rss_indir_tbl(bp, ctx);
1928 ctx->hfunc = ETH_RSS_HASH_TOP;
1929 memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE);
1930 memcpy(ethtool_rxfh_context_key(ctx),
1931 bp->rss_hash_key, HW_HASH_KEY_SIZE);
1932
1933 rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings);
1934 if (rc) {
1935 NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VNIC");
1936 goto out;
1937 }
1938
1939 rc = bnxt_hwrm_vnic_set_tpa(bp, vnic, bp->flags & BNXT_FLAG_TPA);
1940 if (rc) {
1941 NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
1942 goto out;
1943 }
1944 bnxt_modify_rss(bp, ctx, rss_ctx, rxfh);
1945
1946 rc = __bnxt_setup_vnic_p5(bp, vnic);
1947 if (rc) {
1948 NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
1949 goto out;
1950 }
1951
1952 rss_ctx->index = rxfh->rss_context;
1953 return 0;
1954 out:
1955 bnxt_del_one_rss_ctx(bp, rss_ctx, true);
1956 return rc;
1957 }
1958
bnxt_modify_rxfh_context(struct net_device * dev,struct ethtool_rxfh_context * ctx,const struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)1959 static int bnxt_modify_rxfh_context(struct net_device *dev,
1960 struct ethtool_rxfh_context *ctx,
1961 const struct ethtool_rxfh_param *rxfh,
1962 struct netlink_ext_ack *extack)
1963 {
1964 struct bnxt *bp = netdev_priv(dev);
1965 struct bnxt_rss_ctx *rss_ctx;
1966 int rc;
1967
1968 rc = bnxt_rxfh_context_check(bp, rxfh, extack);
1969 if (rc)
1970 return rc;
1971
1972 rss_ctx = ethtool_rxfh_context_priv(ctx);
1973
1974 bnxt_modify_rss(bp, ctx, rss_ctx, rxfh);
1975
1976 return bnxt_hwrm_vnic_rss_cfg_p5(bp, &rss_ctx->vnic);
1977 }
1978
bnxt_remove_rxfh_context(struct net_device * dev,struct ethtool_rxfh_context * ctx,u32 rss_context,struct netlink_ext_ack * extack)1979 static int bnxt_remove_rxfh_context(struct net_device *dev,
1980 struct ethtool_rxfh_context *ctx,
1981 u32 rss_context,
1982 struct netlink_ext_ack *extack)
1983 {
1984 struct bnxt *bp = netdev_priv(dev);
1985 struct bnxt_rss_ctx *rss_ctx;
1986
1987 rss_ctx = ethtool_rxfh_context_priv(ctx);
1988
1989 bnxt_del_one_rss_ctx(bp, rss_ctx, true);
1990 return 0;
1991 }
1992
bnxt_set_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)1993 static int bnxt_set_rxfh(struct net_device *dev,
1994 struct ethtool_rxfh_param *rxfh,
1995 struct netlink_ext_ack *extack)
1996 {
1997 struct bnxt *bp = netdev_priv(dev);
1998 int rc = 0;
1999
2000 if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
2001 return -EOPNOTSUPP;
2002
2003 bnxt_modify_rss(bp, NULL, NULL, rxfh);
2004
2005 if (netif_running(bp->dev)) {
2006 bnxt_close_nic(bp, false, false);
2007 rc = bnxt_open_nic(bp, false, false);
2008 }
2009 return rc;
2010 }
2011
bnxt_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)2012 static void bnxt_get_drvinfo(struct net_device *dev,
2013 struct ethtool_drvinfo *info)
2014 {
2015 struct bnxt *bp = netdev_priv(dev);
2016
2017 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
2018 strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
2019 strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
2020 info->n_stats = bnxt_get_num_stats(bp);
2021 info->testinfo_len = bp->num_tests;
2022 /* TODO CHIMP_FW: eeprom dump details */
2023 info->eedump_len = 0;
2024 /* TODO CHIMP FW: reg dump details */
2025 info->regdump_len = 0;
2026 }
2027
bnxt_get_regs_len(struct net_device * dev)2028 static int bnxt_get_regs_len(struct net_device *dev)
2029 {
2030 struct bnxt *bp = netdev_priv(dev);
2031 int reg_len;
2032
2033 if (!BNXT_PF(bp))
2034 return -EOPNOTSUPP;
2035
2036 reg_len = BNXT_PXP_REG_LEN;
2037
2038 if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
2039 reg_len += sizeof(struct pcie_ctx_hw_stats);
2040
2041 return reg_len;
2042 }
2043
2044 #define BNXT_PCIE_32B_ENTRY(start, end) \
2045 { offsetof(struct pcie_ctx_hw_stats, start), \
2046 offsetof(struct pcie_ctx_hw_stats, end) }
2047
2048 static const struct {
2049 u16 start;
2050 u16 end;
2051 } bnxt_pcie_32b_entries[] = {
2052 BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]),
2053 };
2054
bnxt_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)2055 static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2056 void *_p)
2057 {
2058 struct pcie_ctx_hw_stats *hw_pcie_stats;
2059 struct hwrm_pcie_qstats_input *req;
2060 struct bnxt *bp = netdev_priv(dev);
2061 dma_addr_t hw_pcie_stats_addr;
2062 int rc;
2063
2064 regs->version = 0;
2065 bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
2066
2067 if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
2068 return;
2069
2070 if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
2071 return;
2072
2073 hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
2074 &hw_pcie_stats_addr);
2075 if (!hw_pcie_stats) {
2076 hwrm_req_drop(bp, req);
2077 return;
2078 }
2079
2080 regs->version = 1;
2081 hwrm_req_hold(bp, req); /* hold on to slice */
2082 req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
2083 req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
2084 rc = hwrm_req_send(bp, req);
2085 if (!rc) {
2086 u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN);
2087 u8 *src = (u8 *)hw_pcie_stats;
2088 int i, j;
2089
2090 for (i = 0, j = 0; i < sizeof(*hw_pcie_stats); ) {
2091 if (i >= bnxt_pcie_32b_entries[j].start &&
2092 i <= bnxt_pcie_32b_entries[j].end) {
2093 u32 *dst32 = (u32 *)(dst + i);
2094
2095 *dst32 = le32_to_cpu(*(__le32 *)(src + i));
2096 i += 4;
2097 if (i > bnxt_pcie_32b_entries[j].end &&
2098 j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1)
2099 j++;
2100 } else {
2101 u64 *dst64 = (u64 *)(dst + i);
2102
2103 *dst64 = le64_to_cpu(*(__le64 *)(src + i));
2104 i += 8;
2105 }
2106 }
2107 }
2108 hwrm_req_drop(bp, req);
2109 }
2110
bnxt_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2111 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2112 {
2113 struct bnxt *bp = netdev_priv(dev);
2114
2115 wol->supported = 0;
2116 wol->wolopts = 0;
2117 memset(&wol->sopass, 0, sizeof(wol->sopass));
2118 if (bp->flags & BNXT_FLAG_WOL_CAP) {
2119 wol->supported = WAKE_MAGIC;
2120 if (bp->wol)
2121 wol->wolopts = WAKE_MAGIC;
2122 }
2123 }
2124
bnxt_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2125 static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2126 {
2127 struct bnxt *bp = netdev_priv(dev);
2128
2129 if (wol->wolopts & ~WAKE_MAGIC)
2130 return -EINVAL;
2131
2132 if (wol->wolopts & WAKE_MAGIC) {
2133 if (!(bp->flags & BNXT_FLAG_WOL_CAP))
2134 return -EINVAL;
2135 if (!bp->wol) {
2136 if (bnxt_hwrm_alloc_wol_fltr(bp))
2137 return -EBUSY;
2138 bp->wol = 1;
2139 }
2140 } else {
2141 if (bp->wol) {
2142 if (bnxt_hwrm_free_wol_fltr(bp))
2143 return -EBUSY;
2144 bp->wol = 0;
2145 }
2146 }
2147 return 0;
2148 }
2149
2150 /* TODO: support 25GB, 40GB, 50GB with different cable type */
_bnxt_fw_to_linkmode(unsigned long * mode,u16 fw_speeds)2151 void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds)
2152 {
2153 linkmode_zero(mode);
2154
2155 if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
2156 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode);
2157 if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
2158 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode);
2159 if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
2160 linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, mode);
2161 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
2162 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode);
2163 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
2164 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode);
2165 }
2166
2167 enum bnxt_media_type {
2168 BNXT_MEDIA_UNKNOWN = 0,
2169 BNXT_MEDIA_TP,
2170 BNXT_MEDIA_CR,
2171 BNXT_MEDIA_SR,
2172 BNXT_MEDIA_LR_ER_FR,
2173 BNXT_MEDIA_KR,
2174 BNXT_MEDIA_KX,
2175 BNXT_MEDIA_X,
2176 __BNXT_MEDIA_END,
2177 };
2178
2179 static const enum bnxt_media_type bnxt_phy_types[] = {
2180 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR] = BNXT_MEDIA_CR,
2181 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4] = BNXT_MEDIA_KR,
2182 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR] = BNXT_MEDIA_LR_ER_FR,
2183 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR] = BNXT_MEDIA_SR,
2184 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2] = BNXT_MEDIA_KR,
2185 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX] = BNXT_MEDIA_KX,
2186 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR] = BNXT_MEDIA_KR,
2187 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASET] = BNXT_MEDIA_TP,
2188 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE] = BNXT_MEDIA_TP,
2189 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L] = BNXT_MEDIA_CR,
2190 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S] = BNXT_MEDIA_CR,
2191 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N] = BNXT_MEDIA_CR,
2192 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR] = BNXT_MEDIA_SR,
2193 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4] = BNXT_MEDIA_CR,
2194 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4] = BNXT_MEDIA_SR,
2195 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
2196 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
2197 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10] = BNXT_MEDIA_SR,
2198 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4] = BNXT_MEDIA_CR,
2199 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4] = BNXT_MEDIA_SR,
2200 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
2201 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
2202 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE] = BNXT_MEDIA_SR,
2203 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET] = BNXT_MEDIA_TP,
2204 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX] = BNXT_MEDIA_X,
2205 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX] = BNXT_MEDIA_X,
2206 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4] = BNXT_MEDIA_CR,
2207 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4] = BNXT_MEDIA_SR,
2208 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
2209 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
2210 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR] = BNXT_MEDIA_CR,
2211 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR] = BNXT_MEDIA_SR,
2212 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR] = BNXT_MEDIA_LR_ER_FR,
2213 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER] = BNXT_MEDIA_LR_ER_FR,
2214 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2] = BNXT_MEDIA_CR,
2215 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2] = BNXT_MEDIA_SR,
2216 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
2217 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
2218 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR] = BNXT_MEDIA_CR,
2219 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR] = BNXT_MEDIA_SR,
2220 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR] = BNXT_MEDIA_LR_ER_FR,
2221 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER] = BNXT_MEDIA_LR_ER_FR,
2222 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2] = BNXT_MEDIA_CR,
2223 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2] = BNXT_MEDIA_SR,
2224 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
2225 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
2226 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8] = BNXT_MEDIA_CR,
2227 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8] = BNXT_MEDIA_SR,
2228 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8] = BNXT_MEDIA_LR_ER_FR,
2229 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8] = BNXT_MEDIA_LR_ER_FR,
2230 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4] = BNXT_MEDIA_CR,
2231 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4] = BNXT_MEDIA_SR,
2232 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
2233 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
2234 };
2235
2236 static enum bnxt_media_type
bnxt_get_media(struct bnxt_link_info * link_info)2237 bnxt_get_media(struct bnxt_link_info *link_info)
2238 {
2239 switch (link_info->media_type) {
2240 case PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP:
2241 return BNXT_MEDIA_TP;
2242 case PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC:
2243 return BNXT_MEDIA_CR;
2244 default:
2245 if (link_info->phy_type < ARRAY_SIZE(bnxt_phy_types))
2246 return bnxt_phy_types[link_info->phy_type];
2247 return BNXT_MEDIA_UNKNOWN;
2248 }
2249 }
2250
2251 enum bnxt_link_speed_indices {
2252 BNXT_LINK_SPEED_UNKNOWN = 0,
2253 BNXT_LINK_SPEED_100MB_IDX,
2254 BNXT_LINK_SPEED_1GB_IDX,
2255 BNXT_LINK_SPEED_10GB_IDX,
2256 BNXT_LINK_SPEED_25GB_IDX,
2257 BNXT_LINK_SPEED_40GB_IDX,
2258 BNXT_LINK_SPEED_50GB_IDX,
2259 BNXT_LINK_SPEED_100GB_IDX,
2260 BNXT_LINK_SPEED_200GB_IDX,
2261 BNXT_LINK_SPEED_400GB_IDX,
2262 __BNXT_LINK_SPEED_END
2263 };
2264
bnxt_fw_speed_idx(u16 speed)2265 static enum bnxt_link_speed_indices bnxt_fw_speed_idx(u16 speed)
2266 {
2267 switch (speed) {
2268 case BNXT_LINK_SPEED_100MB: return BNXT_LINK_SPEED_100MB_IDX;
2269 case BNXT_LINK_SPEED_1GB: return BNXT_LINK_SPEED_1GB_IDX;
2270 case BNXT_LINK_SPEED_10GB: return BNXT_LINK_SPEED_10GB_IDX;
2271 case BNXT_LINK_SPEED_25GB: return BNXT_LINK_SPEED_25GB_IDX;
2272 case BNXT_LINK_SPEED_40GB: return BNXT_LINK_SPEED_40GB_IDX;
2273 case BNXT_LINK_SPEED_50GB:
2274 case BNXT_LINK_SPEED_50GB_PAM4:
2275 return BNXT_LINK_SPEED_50GB_IDX;
2276 case BNXT_LINK_SPEED_100GB:
2277 case BNXT_LINK_SPEED_100GB_PAM4:
2278 case BNXT_LINK_SPEED_100GB_PAM4_112:
2279 return BNXT_LINK_SPEED_100GB_IDX;
2280 case BNXT_LINK_SPEED_200GB:
2281 case BNXT_LINK_SPEED_200GB_PAM4:
2282 case BNXT_LINK_SPEED_200GB_PAM4_112:
2283 return BNXT_LINK_SPEED_200GB_IDX;
2284 case BNXT_LINK_SPEED_400GB:
2285 case BNXT_LINK_SPEED_400GB_PAM4:
2286 case BNXT_LINK_SPEED_400GB_PAM4_112:
2287 return BNXT_LINK_SPEED_400GB_IDX;
2288 default: return BNXT_LINK_SPEED_UNKNOWN;
2289 }
2290 }
2291
2292 static const enum ethtool_link_mode_bit_indices
2293 bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = {
2294 [BNXT_LINK_SPEED_100MB_IDX] = {
2295 {
2296 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
2297 },
2298 },
2299 [BNXT_LINK_SPEED_1GB_IDX] = {
2300 {
2301 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
2302 /* historically baseT, but DAC is more correctly baseX */
2303 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
2304 [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2305 [BNXT_MEDIA_X] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
2306 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2307 },
2308 },
2309 [BNXT_LINK_SPEED_10GB_IDX] = {
2310 {
2311 [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2312 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
2313 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
2314 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
2315 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
2316 [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
2317 },
2318 },
2319 [BNXT_LINK_SPEED_25GB_IDX] = {
2320 {
2321 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
2322 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2323 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
2324 },
2325 },
2326 [BNXT_LINK_SPEED_40GB_IDX] = {
2327 {
2328 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
2329 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
2330 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
2331 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
2332 },
2333 },
2334 [BNXT_LINK_SPEED_50GB_IDX] = {
2335 [BNXT_SIG_MODE_NRZ] = {
2336 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
2337 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
2338 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
2339 },
2340 [BNXT_SIG_MODE_PAM4] = {
2341 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
2342 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
2343 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
2344 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
2345 },
2346 },
2347 [BNXT_LINK_SPEED_100GB_IDX] = {
2348 [BNXT_SIG_MODE_NRZ] = {
2349 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2350 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2351 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2352 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2353 },
2354 [BNXT_SIG_MODE_PAM4] = {
2355 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
2356 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
2357 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
2358 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
2359 },
2360 [BNXT_SIG_MODE_PAM4_112] = {
2361 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR_Full_BIT,
2362 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
2363 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
2364 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
2365 },
2366 },
2367 [BNXT_LINK_SPEED_200GB_IDX] = {
2368 [BNXT_SIG_MODE_PAM4] = {
2369 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
2370 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
2371 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
2372 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
2373 },
2374 [BNXT_SIG_MODE_PAM4_112] = {
2375 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT,
2376 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT,
2377 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT,
2378 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
2379 },
2380 },
2381 [BNXT_LINK_SPEED_400GB_IDX] = {
2382 [BNXT_SIG_MODE_PAM4] = {
2383 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT,
2384 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
2385 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
2386 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
2387 },
2388 [BNXT_SIG_MODE_PAM4_112] = {
2389 [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT,
2390 [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT,
2391 [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT,
2392 [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
2393 },
2394 },
2395 };
2396
2397 #define BNXT_LINK_MODE_UNKNOWN -1
2398
2399 static enum ethtool_link_mode_bit_indices
bnxt_get_link_mode(struct bnxt_link_info * link_info)2400 bnxt_get_link_mode(struct bnxt_link_info *link_info)
2401 {
2402 enum ethtool_link_mode_bit_indices link_mode;
2403 enum bnxt_link_speed_indices speed;
2404 enum bnxt_media_type media;
2405 u8 sig_mode;
2406
2407 if (link_info->phy_link_status != BNXT_LINK_LINK)
2408 return BNXT_LINK_MODE_UNKNOWN;
2409
2410 media = bnxt_get_media(link_info);
2411 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
2412 speed = bnxt_fw_speed_idx(link_info->link_speed);
2413 sig_mode = link_info->active_fec_sig_mode &
2414 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
2415 } else {
2416 speed = bnxt_fw_speed_idx(link_info->req_link_speed);
2417 sig_mode = link_info->req_signal_mode;
2418 }
2419 if (sig_mode >= BNXT_SIG_MODE_MAX)
2420 return BNXT_LINK_MODE_UNKNOWN;
2421
2422 /* Note ETHTOOL_LINK_MODE_10baseT_Half_BIT == 0 is a legal Linux
2423 * link mode, but since no such devices exist, the zeroes in the
2424 * map can be conveniently used to represent unknown link modes.
2425 */
2426 link_mode = bnxt_link_modes[speed][sig_mode][media];
2427 if (!link_mode)
2428 return BNXT_LINK_MODE_UNKNOWN;
2429
2430 switch (link_mode) {
2431 case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
2432 if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL)
2433 link_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT;
2434 break;
2435 case ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
2436 if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL)
2437 link_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT;
2438 break;
2439 default:
2440 break;
2441 }
2442
2443 return link_mode;
2444 }
2445
bnxt_get_ethtool_modes(struct bnxt_link_info * link_info,struct ethtool_link_ksettings * lk_ksettings)2446 static void bnxt_get_ethtool_modes(struct bnxt_link_info *link_info,
2447 struct ethtool_link_ksettings *lk_ksettings)
2448 {
2449 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2450
2451 if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) {
2452 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2453 lk_ksettings->link_modes.supported);
2454 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2455 lk_ksettings->link_modes.supported);
2456 }
2457
2458 if (link_info->support_auto_speeds || link_info->support_auto_speeds2 ||
2459 link_info->support_pam4_auto_speeds)
2460 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2461 lk_ksettings->link_modes.supported);
2462
2463 if (~link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
2464 return;
2465
2466 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX)
2467 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2468 lk_ksettings->link_modes.advertising);
2469 if (hweight8(link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 1)
2470 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2471 lk_ksettings->link_modes.advertising);
2472 if (link_info->lp_pause & BNXT_LINK_PAUSE_RX)
2473 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2474 lk_ksettings->link_modes.lp_advertising);
2475 if (hweight8(link_info->lp_pause & BNXT_LINK_PAUSE_BOTH) == 1)
2476 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2477 lk_ksettings->link_modes.lp_advertising);
2478 }
2479
2480 static const u16 bnxt_nrz_speed_masks[] = {
2481 [BNXT_LINK_SPEED_100MB_IDX] = BNXT_LINK_SPEED_MSK_100MB,
2482 [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEED_MSK_1GB,
2483 [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEED_MSK_10GB,
2484 [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEED_MSK_25GB,
2485 [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEED_MSK_40GB,
2486 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEED_MSK_50GB,
2487 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEED_MSK_100GB,
2488 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
2489 };
2490
2491 static const u16 bnxt_pam4_speed_masks[] = {
2492 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_50GB,
2493 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_100GB,
2494 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_200GB,
2495 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
2496 };
2497
2498 static const u16 bnxt_nrz_speeds2_masks[] = {
2499 [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEEDS2_MSK_1GB,
2500 [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEEDS2_MSK_10GB,
2501 [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEEDS2_MSK_25GB,
2502 [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEEDS2_MSK_40GB,
2503 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB,
2504 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB,
2505 [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
2506 };
2507
2508 static const u16 bnxt_pam4_speeds2_masks[] = {
2509 [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB_PAM4,
2510 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4,
2511 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4,
2512 [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4,
2513 };
2514
2515 static const u16 bnxt_pam4_112_speeds2_masks[] = {
2516 [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112,
2517 [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112,
2518 [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112,
2519 };
2520
2521 static enum bnxt_link_speed_indices
bnxt_encoding_speed_idx(u8 sig_mode,u16 phy_flags,u16 speed_msk)2522 bnxt_encoding_speed_idx(u8 sig_mode, u16 phy_flags, u16 speed_msk)
2523 {
2524 const u16 *speeds;
2525 int idx, len;
2526
2527 switch (sig_mode) {
2528 case BNXT_SIG_MODE_NRZ:
2529 if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2530 speeds = bnxt_nrz_speeds2_masks;
2531 len = ARRAY_SIZE(bnxt_nrz_speeds2_masks);
2532 } else {
2533 speeds = bnxt_nrz_speed_masks;
2534 len = ARRAY_SIZE(bnxt_nrz_speed_masks);
2535 }
2536 break;
2537 case BNXT_SIG_MODE_PAM4:
2538 if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2539 speeds = bnxt_pam4_speeds2_masks;
2540 len = ARRAY_SIZE(bnxt_pam4_speeds2_masks);
2541 } else {
2542 speeds = bnxt_pam4_speed_masks;
2543 len = ARRAY_SIZE(bnxt_pam4_speed_masks);
2544 }
2545 break;
2546 case BNXT_SIG_MODE_PAM4_112:
2547 speeds = bnxt_pam4_112_speeds2_masks;
2548 len = ARRAY_SIZE(bnxt_pam4_112_speeds2_masks);
2549 break;
2550 default:
2551 return BNXT_LINK_SPEED_UNKNOWN;
2552 }
2553
2554 for (idx = 0; idx < len; idx++) {
2555 if (speeds[idx] == speed_msk)
2556 return idx;
2557 }
2558
2559 return BNXT_LINK_SPEED_UNKNOWN;
2560 }
2561
2562 #define BNXT_FW_SPEED_MSK_BITS 16
2563
2564 static void
__bnxt_get_ethtool_speeds(unsigned long fw_mask,enum bnxt_media_type media,u8 sig_mode,u16 phy_flags,unsigned long * et_mask)2565 __bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
2566 u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
2567 {
2568 enum ethtool_link_mode_bit_indices link_mode;
2569 enum bnxt_link_speed_indices speed;
2570 u8 bit;
2571
2572 for_each_set_bit(bit, &fw_mask, BNXT_FW_SPEED_MSK_BITS) {
2573 speed = bnxt_encoding_speed_idx(sig_mode, phy_flags, 1 << bit);
2574 if (!speed)
2575 continue;
2576
2577 link_mode = bnxt_link_modes[speed][sig_mode][media];
2578 if (!link_mode)
2579 continue;
2580
2581 linkmode_set_bit(link_mode, et_mask);
2582 }
2583 }
2584
2585 static void
bnxt_get_ethtool_speeds(unsigned long fw_mask,enum bnxt_media_type media,u8 sig_mode,u16 phy_flags,unsigned long * et_mask)2586 bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
2587 u8 sig_mode, u16 phy_flags, unsigned long *et_mask)
2588 {
2589 if (media) {
2590 __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
2591 et_mask);
2592 return;
2593 }
2594
2595 /* list speeds for all media if unknown */
2596 for (media = 1; media < __BNXT_MEDIA_END; media++)
2597 __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags,
2598 et_mask);
2599 }
2600
2601 static void
bnxt_get_all_ethtool_support_speeds(struct bnxt_link_info * link_info,enum bnxt_media_type media,struct ethtool_link_ksettings * lk_ksettings)2602 bnxt_get_all_ethtool_support_speeds(struct bnxt_link_info *link_info,
2603 enum bnxt_media_type media,
2604 struct ethtool_link_ksettings *lk_ksettings)
2605 {
2606 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2607 u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
2608 u16 phy_flags = bp->phy_flags;
2609
2610 if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2611 sp_nrz = link_info->support_speeds2;
2612 sp_pam4 = link_info->support_speeds2;
2613 sp_pam4_112 = link_info->support_speeds2;
2614 } else {
2615 sp_nrz = link_info->support_speeds;
2616 sp_pam4 = link_info->support_pam4_speeds;
2617 }
2618 bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
2619 lk_ksettings->link_modes.supported);
2620 bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
2621 lk_ksettings->link_modes.supported);
2622 bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
2623 phy_flags, lk_ksettings->link_modes.supported);
2624 }
2625
2626 static void
bnxt_get_all_ethtool_adv_speeds(struct bnxt_link_info * link_info,enum bnxt_media_type media,struct ethtool_link_ksettings * lk_ksettings)2627 bnxt_get_all_ethtool_adv_speeds(struct bnxt_link_info *link_info,
2628 enum bnxt_media_type media,
2629 struct ethtool_link_ksettings *lk_ksettings)
2630 {
2631 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2632 u16 sp_nrz, sp_pam4, sp_pam4_112 = 0;
2633 u16 phy_flags = bp->phy_flags;
2634
2635 sp_nrz = link_info->advertising;
2636 if (phy_flags & BNXT_PHY_FL_SPEEDS2) {
2637 sp_pam4 = link_info->advertising;
2638 sp_pam4_112 = link_info->advertising;
2639 } else {
2640 sp_pam4 = link_info->advertising_pam4;
2641 }
2642 bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags,
2643 lk_ksettings->link_modes.advertising);
2644 bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags,
2645 lk_ksettings->link_modes.advertising);
2646 bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112,
2647 phy_flags, lk_ksettings->link_modes.advertising);
2648 }
2649
2650 static void
bnxt_get_all_ethtool_lp_speeds(struct bnxt_link_info * link_info,enum bnxt_media_type media,struct ethtool_link_ksettings * lk_ksettings)2651 bnxt_get_all_ethtool_lp_speeds(struct bnxt_link_info *link_info,
2652 enum bnxt_media_type media,
2653 struct ethtool_link_ksettings *lk_ksettings)
2654 {
2655 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2656 u16 phy_flags = bp->phy_flags;
2657
2658 bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds, media,
2659 BNXT_SIG_MODE_NRZ, phy_flags,
2660 lk_ksettings->link_modes.lp_advertising);
2661 bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds, media,
2662 BNXT_SIG_MODE_PAM4, phy_flags,
2663 lk_ksettings->link_modes.lp_advertising);
2664 }
2665
bnxt_update_speed(u32 * delta,bool installed_media,u16 * speeds,u16 speed_msk,const unsigned long * et_mask,enum ethtool_link_mode_bit_indices mode)2666 static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds,
2667 u16 speed_msk, const unsigned long *et_mask,
2668 enum ethtool_link_mode_bit_indices mode)
2669 {
2670 bool mode_desired = linkmode_test_bit(mode, et_mask);
2671
2672 if (!mode)
2673 return;
2674
2675 /* enabled speeds for installed media should override */
2676 if (installed_media && mode_desired) {
2677 *speeds |= speed_msk;
2678 *delta |= speed_msk;
2679 return;
2680 }
2681
2682 /* many to one mapping, only allow one change per fw_speed bit */
2683 if (!(*delta & speed_msk) && (mode_desired == !(*speeds & speed_msk))) {
2684 *speeds ^= speed_msk;
2685 *delta |= speed_msk;
2686 }
2687 }
2688
bnxt_set_ethtool_speeds(struct bnxt_link_info * link_info,const unsigned long * et_mask)2689 static void bnxt_set_ethtool_speeds(struct bnxt_link_info *link_info,
2690 const unsigned long *et_mask)
2691 {
2692 struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
2693 u16 const *sp_msks, *sp_pam4_msks, *sp_pam4_112_msks;
2694 enum bnxt_media_type media = bnxt_get_media(link_info);
2695 u16 *adv, *adv_pam4, *adv_pam4_112 = NULL;
2696 u32 delta_pam4_112 = 0;
2697 u32 delta_pam4 = 0;
2698 u32 delta_nrz = 0;
2699 int i, m;
2700
2701 adv = &link_info->advertising;
2702 if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) {
2703 adv_pam4 = &link_info->advertising;
2704 adv_pam4_112 = &link_info->advertising;
2705 sp_msks = bnxt_nrz_speeds2_masks;
2706 sp_pam4_msks = bnxt_pam4_speeds2_masks;
2707 sp_pam4_112_msks = bnxt_pam4_112_speeds2_masks;
2708 } else {
2709 adv_pam4 = &link_info->advertising_pam4;
2710 sp_msks = bnxt_nrz_speed_masks;
2711 sp_pam4_msks = bnxt_pam4_speed_masks;
2712 }
2713 for (i = 1; i < __BNXT_LINK_SPEED_END; i++) {
2714 /* accept any legal media from user */
2715 for (m = 1; m < __BNXT_MEDIA_END; m++) {
2716 bnxt_update_speed(&delta_nrz, m == media,
2717 adv, sp_msks[i], et_mask,
2718 bnxt_link_modes[i][BNXT_SIG_MODE_NRZ][m]);
2719 bnxt_update_speed(&delta_pam4, m == media,
2720 adv_pam4, sp_pam4_msks[i], et_mask,
2721 bnxt_link_modes[i][BNXT_SIG_MODE_PAM4][m]);
2722 if (!adv_pam4_112)
2723 continue;
2724
2725 bnxt_update_speed(&delta_pam4_112, m == media,
2726 adv_pam4_112, sp_pam4_112_msks[i], et_mask,
2727 bnxt_link_modes[i][BNXT_SIG_MODE_PAM4_112][m]);
2728 }
2729 }
2730 }
2731
bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info * link_info,struct ethtool_link_ksettings * lk_ksettings)2732 static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info,
2733 struct ethtool_link_ksettings *lk_ksettings)
2734 {
2735 u16 fec_cfg = link_info->fec_cfg;
2736
2737 if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) {
2738 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
2739 lk_ksettings->link_modes.advertising);
2740 return;
2741 }
2742 if (fec_cfg & BNXT_FEC_ENC_BASE_R)
2743 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
2744 lk_ksettings->link_modes.advertising);
2745 if (fec_cfg & BNXT_FEC_ENC_RS)
2746 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
2747 lk_ksettings->link_modes.advertising);
2748 if (fec_cfg & BNXT_FEC_ENC_LLRS)
2749 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
2750 lk_ksettings->link_modes.advertising);
2751 }
2752
bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info * link_info,struct ethtool_link_ksettings * lk_ksettings)2753 static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
2754 struct ethtool_link_ksettings *lk_ksettings)
2755 {
2756 u16 fec_cfg = link_info->fec_cfg;
2757
2758 if (fec_cfg & BNXT_FEC_NONE) {
2759 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
2760 lk_ksettings->link_modes.supported);
2761 return;
2762 }
2763 if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)
2764 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
2765 lk_ksettings->link_modes.supported);
2766 if (fec_cfg & BNXT_FEC_ENC_RS_CAP)
2767 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
2768 lk_ksettings->link_modes.supported);
2769 if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP)
2770 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
2771 lk_ksettings->link_modes.supported);
2772 }
2773
bnxt_fw_to_ethtool_speed(u16 fw_link_speed)2774 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
2775 {
2776 switch (fw_link_speed) {
2777 case BNXT_LINK_SPEED_100MB:
2778 return SPEED_100;
2779 case BNXT_LINK_SPEED_1GB:
2780 return SPEED_1000;
2781 case BNXT_LINK_SPEED_2_5GB:
2782 return SPEED_2500;
2783 case BNXT_LINK_SPEED_10GB:
2784 return SPEED_10000;
2785 case BNXT_LINK_SPEED_20GB:
2786 return SPEED_20000;
2787 case BNXT_LINK_SPEED_25GB:
2788 return SPEED_25000;
2789 case BNXT_LINK_SPEED_40GB:
2790 return SPEED_40000;
2791 case BNXT_LINK_SPEED_50GB:
2792 case BNXT_LINK_SPEED_50GB_PAM4:
2793 return SPEED_50000;
2794 case BNXT_LINK_SPEED_100GB:
2795 case BNXT_LINK_SPEED_100GB_PAM4:
2796 case BNXT_LINK_SPEED_100GB_PAM4_112:
2797 return SPEED_100000;
2798 case BNXT_LINK_SPEED_200GB:
2799 case BNXT_LINK_SPEED_200GB_PAM4:
2800 case BNXT_LINK_SPEED_200GB_PAM4_112:
2801 return SPEED_200000;
2802 case BNXT_LINK_SPEED_400GB:
2803 case BNXT_LINK_SPEED_400GB_PAM4:
2804 case BNXT_LINK_SPEED_400GB_PAM4_112:
2805 return SPEED_400000;
2806 default:
2807 return SPEED_UNKNOWN;
2808 }
2809 }
2810
bnxt_get_default_speeds(struct ethtool_link_ksettings * lk_ksettings,struct bnxt_link_info * link_info)2811 static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings,
2812 struct bnxt_link_info *link_info)
2813 {
2814 struct ethtool_link_settings *base = &lk_ksettings->base;
2815
2816 if (link_info->link_state == BNXT_LINK_STATE_UP) {
2817 base->speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
2818 base->duplex = DUPLEX_HALF;
2819 if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
2820 base->duplex = DUPLEX_FULL;
2821 lk_ksettings->lanes = link_info->active_lanes;
2822 } else if (!link_info->autoneg) {
2823 base->speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
2824 base->duplex = DUPLEX_HALF;
2825 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
2826 base->duplex = DUPLEX_FULL;
2827 }
2828 }
2829
bnxt_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * lk_ksettings)2830 static int bnxt_get_link_ksettings(struct net_device *dev,
2831 struct ethtool_link_ksettings *lk_ksettings)
2832 {
2833 struct ethtool_link_settings *base = &lk_ksettings->base;
2834 enum ethtool_link_mode_bit_indices link_mode;
2835 struct bnxt *bp = netdev_priv(dev);
2836 struct bnxt_link_info *link_info;
2837 enum bnxt_media_type media;
2838
2839 ethtool_link_ksettings_zero_link_mode(lk_ksettings, lp_advertising);
2840 ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
2841 ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
2842 base->duplex = DUPLEX_UNKNOWN;
2843 base->speed = SPEED_UNKNOWN;
2844 link_info = &bp->link_info;
2845
2846 mutex_lock(&bp->link_lock);
2847 bnxt_get_ethtool_modes(link_info, lk_ksettings);
2848 media = bnxt_get_media(link_info);
2849 bnxt_get_all_ethtool_support_speeds(link_info, media, lk_ksettings);
2850 bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
2851 link_mode = bnxt_get_link_mode(link_info);
2852 if (link_mode != BNXT_LINK_MODE_UNKNOWN)
2853 ethtool_params_from_link_mode(lk_ksettings, link_mode);
2854 else
2855 bnxt_get_default_speeds(lk_ksettings, link_info);
2856
2857 if (link_info->autoneg) {
2858 bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings);
2859 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2860 lk_ksettings->link_modes.advertising);
2861 base->autoneg = AUTONEG_ENABLE;
2862 bnxt_get_all_ethtool_adv_speeds(link_info, media, lk_ksettings);
2863 if (link_info->phy_link_status == BNXT_LINK_LINK)
2864 bnxt_get_all_ethtool_lp_speeds(link_info, media,
2865 lk_ksettings);
2866 } else {
2867 base->autoneg = AUTONEG_DISABLE;
2868 }
2869
2870 base->port = PORT_NONE;
2871 if (media == BNXT_MEDIA_TP) {
2872 base->port = PORT_TP;
2873 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
2874 lk_ksettings->link_modes.supported);
2875 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
2876 lk_ksettings->link_modes.advertising);
2877 } else if (media == BNXT_MEDIA_KR) {
2878 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT,
2879 lk_ksettings->link_modes.supported);
2880 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT,
2881 lk_ksettings->link_modes.advertising);
2882 } else {
2883 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
2884 lk_ksettings->link_modes.supported);
2885 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
2886 lk_ksettings->link_modes.advertising);
2887
2888 if (media == BNXT_MEDIA_CR)
2889 base->port = PORT_DA;
2890 else
2891 base->port = PORT_FIBRE;
2892 }
2893 base->phy_address = link_info->phy_addr;
2894 mutex_unlock(&bp->link_lock);
2895
2896 return 0;
2897 }
2898
2899 static int
bnxt_force_link_speed(struct net_device * dev,u32 ethtool_speed,u32 lanes)2900 bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
2901 {
2902 struct bnxt *bp = netdev_priv(dev);
2903 struct bnxt_link_info *link_info = &bp->link_info;
2904 u16 support_pam4_spds = link_info->support_pam4_speeds;
2905 u16 support_spds2 = link_info->support_speeds2;
2906 u16 support_spds = link_info->support_speeds;
2907 u8 sig_mode = BNXT_SIG_MODE_NRZ;
2908 u32 lanes_needed = 1;
2909 u16 fw_speed = 0;
2910
2911 switch (ethtool_speed) {
2912 case SPEED_100:
2913 if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
2914 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB;
2915 break;
2916 case SPEED_1000:
2917 if ((support_spds & BNXT_LINK_SPEED_MSK_1GB) ||
2918 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_1GB))
2919 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
2920 break;
2921 case SPEED_2500:
2922 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
2923 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB;
2924 break;
2925 case SPEED_10000:
2926 if ((support_spds & BNXT_LINK_SPEED_MSK_10GB) ||
2927 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_10GB))
2928 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
2929 break;
2930 case SPEED_20000:
2931 if (support_spds & BNXT_LINK_SPEED_MSK_20GB) {
2932 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB;
2933 lanes_needed = 2;
2934 }
2935 break;
2936 case SPEED_25000:
2937 if ((support_spds & BNXT_LINK_SPEED_MSK_25GB) ||
2938 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_25GB))
2939 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
2940 break;
2941 case SPEED_40000:
2942 if ((support_spds & BNXT_LINK_SPEED_MSK_40GB) ||
2943 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_40GB)) {
2944 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
2945 lanes_needed = 4;
2946 }
2947 break;
2948 case SPEED_50000:
2949 if (((support_spds & BNXT_LINK_SPEED_MSK_50GB) ||
2950 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB)) &&
2951 lanes != 1) {
2952 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
2953 lanes_needed = 2;
2954 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) {
2955 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB;
2956 sig_mode = BNXT_SIG_MODE_PAM4;
2957 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB_PAM4) {
2958 fw_speed = BNXT_LINK_SPEED_50GB_PAM4;
2959 sig_mode = BNXT_SIG_MODE_PAM4;
2960 }
2961 break;
2962 case SPEED_100000:
2963 if (((support_spds & BNXT_LINK_SPEED_MSK_100GB) ||
2964 (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB)) &&
2965 lanes != 2 && lanes != 1) {
2966 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
2967 lanes_needed = 4;
2968 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) {
2969 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB;
2970 sig_mode = BNXT_SIG_MODE_PAM4;
2971 lanes_needed = 2;
2972 } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4) &&
2973 lanes != 1) {
2974 fw_speed = BNXT_LINK_SPEED_100GB_PAM4;
2975 sig_mode = BNXT_SIG_MODE_PAM4;
2976 lanes_needed = 2;
2977 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112) {
2978 fw_speed = BNXT_LINK_SPEED_100GB_PAM4_112;
2979 sig_mode = BNXT_SIG_MODE_PAM4_112;
2980 }
2981 break;
2982 case SPEED_200000:
2983 if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) {
2984 fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB;
2985 sig_mode = BNXT_SIG_MODE_PAM4;
2986 lanes_needed = 4;
2987 } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4) &&
2988 lanes != 2) {
2989 fw_speed = BNXT_LINK_SPEED_200GB_PAM4;
2990 sig_mode = BNXT_SIG_MODE_PAM4;
2991 lanes_needed = 4;
2992 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112) {
2993 fw_speed = BNXT_LINK_SPEED_200GB_PAM4_112;
2994 sig_mode = BNXT_SIG_MODE_PAM4_112;
2995 lanes_needed = 2;
2996 }
2997 break;
2998 case SPEED_400000:
2999 if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4) &&
3000 lanes != 4) {
3001 fw_speed = BNXT_LINK_SPEED_400GB_PAM4;
3002 sig_mode = BNXT_SIG_MODE_PAM4;
3003 lanes_needed = 8;
3004 } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112) {
3005 fw_speed = BNXT_LINK_SPEED_400GB_PAM4_112;
3006 sig_mode = BNXT_SIG_MODE_PAM4_112;
3007 lanes_needed = 4;
3008 }
3009 break;
3010 }
3011
3012 if (!fw_speed) {
3013 netdev_err(dev, "unsupported speed!\n");
3014 return -EINVAL;
3015 }
3016
3017 if (lanes && lanes != lanes_needed) {
3018 netdev_err(dev, "unsupported number of lanes for speed\n");
3019 return -EINVAL;
3020 }
3021
3022 if (link_info->req_link_speed == fw_speed &&
3023 link_info->req_signal_mode == sig_mode &&
3024 link_info->autoneg == 0)
3025 return -EALREADY;
3026
3027 link_info->req_link_speed = fw_speed;
3028 link_info->req_signal_mode = sig_mode;
3029 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
3030 link_info->autoneg = 0;
3031 link_info->advertising = 0;
3032 link_info->advertising_pam4 = 0;
3033
3034 return 0;
3035 }
3036
bnxt_get_fw_auto_link_speeds(const unsigned long * mode)3037 u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode)
3038 {
3039 u16 fw_speed_mask = 0;
3040
3041 if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode) ||
3042 linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mode))
3043 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
3044
3045 if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode) ||
3046 linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mode))
3047 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
3048
3049 if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode))
3050 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
3051
3052 if (linkmode_test_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode))
3053 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
3054
3055 return fw_speed_mask;
3056 }
3057
bnxt_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * lk_ksettings)3058 static int bnxt_set_link_ksettings(struct net_device *dev,
3059 const struct ethtool_link_ksettings *lk_ksettings)
3060 {
3061 struct bnxt *bp = netdev_priv(dev);
3062 struct bnxt_link_info *link_info = &bp->link_info;
3063 const struct ethtool_link_settings *base = &lk_ksettings->base;
3064 bool set_pause = false;
3065 u32 speed, lanes = 0;
3066 int rc = 0;
3067
3068 if (!BNXT_PHY_CFG_ABLE(bp))
3069 return -EOPNOTSUPP;
3070
3071 mutex_lock(&bp->link_lock);
3072 if (base->autoneg == AUTONEG_ENABLE) {
3073 bnxt_set_ethtool_speeds(link_info,
3074 lk_ksettings->link_modes.advertising);
3075 link_info->autoneg |= BNXT_AUTONEG_SPEED;
3076 if (!link_info->advertising && !link_info->advertising_pam4) {
3077 link_info->advertising = link_info->support_auto_speeds;
3078 link_info->advertising_pam4 =
3079 link_info->support_pam4_auto_speeds;
3080 }
3081 /* any change to autoneg will cause link change, therefore the
3082 * driver should put back the original pause setting in autoneg
3083 */
3084 if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
3085 set_pause = true;
3086 } else {
3087 u8 phy_type = link_info->phy_type;
3088
3089 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET ||
3090 phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
3091 link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
3092 netdev_err(dev, "10GBase-T devices must autoneg\n");
3093 rc = -EINVAL;
3094 goto set_setting_exit;
3095 }
3096 if (base->duplex == DUPLEX_HALF) {
3097 netdev_err(dev, "HALF DUPLEX is not supported!\n");
3098 rc = -EINVAL;
3099 goto set_setting_exit;
3100 }
3101 speed = base->speed;
3102 lanes = lk_ksettings->lanes;
3103 rc = bnxt_force_link_speed(dev, speed, lanes);
3104 if (rc) {
3105 if (rc == -EALREADY)
3106 rc = 0;
3107 goto set_setting_exit;
3108 }
3109 }
3110
3111 if (netif_running(dev))
3112 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
3113
3114 set_setting_exit:
3115 mutex_unlock(&bp->link_lock);
3116 return rc;
3117 }
3118
bnxt_get_fecparam(struct net_device * dev,struct ethtool_fecparam * fec)3119 static int bnxt_get_fecparam(struct net_device *dev,
3120 struct ethtool_fecparam *fec)
3121 {
3122 struct bnxt *bp = netdev_priv(dev);
3123 struct bnxt_link_info *link_info;
3124 u8 active_fec;
3125 u16 fec_cfg;
3126
3127 link_info = &bp->link_info;
3128 fec_cfg = link_info->fec_cfg;
3129 active_fec = link_info->active_fec_sig_mode &
3130 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
3131 if (fec_cfg & BNXT_FEC_NONE) {
3132 fec->fec = ETHTOOL_FEC_NONE;
3133 fec->active_fec = ETHTOOL_FEC_NONE;
3134 return 0;
3135 }
3136 if (fec_cfg & BNXT_FEC_AUTONEG)
3137 fec->fec |= ETHTOOL_FEC_AUTO;
3138 if (fec_cfg & BNXT_FEC_ENC_BASE_R)
3139 fec->fec |= ETHTOOL_FEC_BASER;
3140 if (fec_cfg & BNXT_FEC_ENC_RS)
3141 fec->fec |= ETHTOOL_FEC_RS;
3142 if (fec_cfg & BNXT_FEC_ENC_LLRS)
3143 fec->fec |= ETHTOOL_FEC_LLRS;
3144
3145 switch (active_fec) {
3146 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
3147 fec->active_fec |= ETHTOOL_FEC_BASER;
3148 break;
3149 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
3150 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
3151 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
3152 fec->active_fec |= ETHTOOL_FEC_RS;
3153 break;
3154 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
3155 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
3156 fec->active_fec |= ETHTOOL_FEC_LLRS;
3157 break;
3158 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
3159 fec->active_fec |= ETHTOOL_FEC_OFF;
3160 break;
3161 }
3162 return 0;
3163 }
3164
bnxt_get_fec_stats(struct net_device * dev,struct ethtool_fec_stats * fec_stats)3165 static void bnxt_get_fec_stats(struct net_device *dev,
3166 struct ethtool_fec_stats *fec_stats)
3167 {
3168 struct bnxt *bp = netdev_priv(dev);
3169 u64 *rx;
3170
3171 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
3172 return;
3173
3174 rx = bp->rx_port_stats_ext.sw_stats;
3175 fec_stats->corrected_bits.total =
3176 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits));
3177
3178 if (bp->fw_rx_stats_ext_size <= BNXT_RX_STATS_EXT_NUM_LEGACY)
3179 return;
3180
3181 fec_stats->corrected_blocks.total =
3182 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks));
3183 fec_stats->uncorrectable_blocks.total =
3184 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks));
3185 }
3186
bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info * link_info,u32 fec)3187 static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
3188 u32 fec)
3189 {
3190 u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE;
3191
3192 if (fec & ETHTOOL_FEC_BASER)
3193 fw_fec |= BNXT_FEC_BASE_R_ON(link_info);
3194 else if (fec & ETHTOOL_FEC_RS)
3195 fw_fec |= BNXT_FEC_RS_ON(link_info);
3196 else if (fec & ETHTOOL_FEC_LLRS)
3197 fw_fec |= BNXT_FEC_LLRS_ON;
3198 return fw_fec;
3199 }
3200
bnxt_set_fecparam(struct net_device * dev,struct ethtool_fecparam * fecparam)3201 static int bnxt_set_fecparam(struct net_device *dev,
3202 struct ethtool_fecparam *fecparam)
3203 {
3204 struct hwrm_port_phy_cfg_input *req;
3205 struct bnxt *bp = netdev_priv(dev);
3206 struct bnxt_link_info *link_info;
3207 u32 new_cfg, fec = fecparam->fec;
3208 u16 fec_cfg;
3209 int rc;
3210
3211 link_info = &bp->link_info;
3212 fec_cfg = link_info->fec_cfg;
3213 if (fec_cfg & BNXT_FEC_NONE)
3214 return -EOPNOTSUPP;
3215
3216 if (fec & ETHTOOL_FEC_OFF) {
3217 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE |
3218 BNXT_FEC_ALL_OFF(link_info);
3219 goto apply_fec;
3220 }
3221 if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) ||
3222 ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) ||
3223 ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) ||
3224 ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)))
3225 return -EINVAL;
3226
3227 if (fec & ETHTOOL_FEC_AUTO) {
3228 if (!link_info->autoneg)
3229 return -EINVAL;
3230 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE;
3231 } else {
3232 new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec);
3233 }
3234
3235 apply_fec:
3236 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
3237 if (rc)
3238 return rc;
3239 req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
3240 rc = hwrm_req_send(bp, req);
3241 /* update current settings */
3242 if (!rc) {
3243 mutex_lock(&bp->link_lock);
3244 bnxt_update_link(bp, false);
3245 mutex_unlock(&bp->link_lock);
3246 }
3247 return rc;
3248 }
3249
bnxt_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)3250 static void bnxt_get_pauseparam(struct net_device *dev,
3251 struct ethtool_pauseparam *epause)
3252 {
3253 struct bnxt *bp = netdev_priv(dev);
3254 struct bnxt_link_info *link_info = &bp->link_info;
3255
3256 if (BNXT_VF(bp))
3257 return;
3258 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
3259 epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
3260 epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
3261 }
3262
bnxt_get_pause_stats(struct net_device * dev,struct ethtool_pause_stats * epstat)3263 static void bnxt_get_pause_stats(struct net_device *dev,
3264 struct ethtool_pause_stats *epstat)
3265 {
3266 struct bnxt *bp = netdev_priv(dev);
3267 u64 *rx, *tx;
3268
3269 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
3270 return;
3271
3272 rx = bp->port_stats.sw_stats;
3273 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
3274
3275 epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames);
3276 epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames);
3277 }
3278
bnxt_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)3279 static int bnxt_set_pauseparam(struct net_device *dev,
3280 struct ethtool_pauseparam *epause)
3281 {
3282 int rc = 0;
3283 struct bnxt *bp = netdev_priv(dev);
3284 struct bnxt_link_info *link_info = &bp->link_info;
3285
3286 if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
3287 return -EOPNOTSUPP;
3288
3289 mutex_lock(&bp->link_lock);
3290 if (epause->autoneg) {
3291 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
3292 rc = -EINVAL;
3293 goto pause_exit;
3294 }
3295
3296 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
3297 link_info->req_flow_ctrl = 0;
3298 } else {
3299 /* when transition from auto pause to force pause,
3300 * force a link change
3301 */
3302 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
3303 link_info->force_link_chng = true;
3304 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
3305 link_info->req_flow_ctrl = 0;
3306 }
3307 if (epause->rx_pause)
3308 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
3309
3310 if (epause->tx_pause)
3311 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
3312
3313 if (netif_running(dev))
3314 rc = bnxt_hwrm_set_pause(bp);
3315
3316 pause_exit:
3317 mutex_unlock(&bp->link_lock);
3318 return rc;
3319 }
3320
bnxt_get_link(struct net_device * dev)3321 static u32 bnxt_get_link(struct net_device *dev)
3322 {
3323 struct bnxt *bp = netdev_priv(dev);
3324
3325 /* TODO: handle MF, VF, driver close case */
3326 return BNXT_LINK_IS_UP(bp);
3327 }
3328
bnxt_hwrm_nvm_get_dev_info(struct bnxt * bp,struct hwrm_nvm_get_dev_info_output * nvm_dev_info)3329 int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
3330 struct hwrm_nvm_get_dev_info_output *nvm_dev_info)
3331 {
3332 struct hwrm_nvm_get_dev_info_output *resp;
3333 struct hwrm_nvm_get_dev_info_input *req;
3334 int rc;
3335
3336 if (BNXT_VF(bp))
3337 return -EOPNOTSUPP;
3338
3339 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO);
3340 if (rc)
3341 return rc;
3342
3343 resp = hwrm_req_hold(bp, req);
3344 rc = hwrm_req_send(bp, req);
3345 if (!rc)
3346 memcpy(nvm_dev_info, resp, sizeof(*resp));
3347 hwrm_req_drop(bp, req);
3348 return rc;
3349 }
3350
bnxt_print_admin_err(struct bnxt * bp)3351 static void bnxt_print_admin_err(struct bnxt *bp)
3352 {
3353 netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
3354 }
3355
3356 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
3357 u16 ext, u16 *index, u32 *item_length,
3358 u32 *data_length);
3359
bnxt_flash_nvram(struct net_device * dev,u16 dir_type,u16 dir_ordinal,u16 dir_ext,u16 dir_attr,u32 dir_item_len,const u8 * data,size_t data_len)3360 int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
3361 u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
3362 u32 dir_item_len, const u8 *data,
3363 size_t data_len)
3364 {
3365 struct bnxt *bp = netdev_priv(dev);
3366 struct hwrm_nvm_write_input *req;
3367 int rc;
3368
3369 rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE);
3370 if (rc)
3371 return rc;
3372
3373 if (data_len && data) {
3374 dma_addr_t dma_handle;
3375 u8 *kmem;
3376
3377 kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle);
3378 if (!kmem) {
3379 hwrm_req_drop(bp, req);
3380 return -ENOMEM;
3381 }
3382
3383 req->dir_data_length = cpu_to_le32(data_len);
3384
3385 memcpy(kmem, data, data_len);
3386 req->host_src_addr = cpu_to_le64(dma_handle);
3387 }
3388
3389 hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
3390 req->dir_type = cpu_to_le16(dir_type);
3391 req->dir_ordinal = cpu_to_le16(dir_ordinal);
3392 req->dir_ext = cpu_to_le16(dir_ext);
3393 req->dir_attr = cpu_to_le16(dir_attr);
3394 req->dir_item_length = cpu_to_le32(dir_item_len);
3395 rc = hwrm_req_send(bp, req);
3396
3397 if (rc == -EACCES)
3398 bnxt_print_admin_err(bp);
3399 return rc;
3400 }
3401
bnxt_hwrm_firmware_reset(struct net_device * dev,u8 proc_type,u8 self_reset,u8 flags)3402 int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
3403 u8 self_reset, u8 flags)
3404 {
3405 struct bnxt *bp = netdev_priv(dev);
3406 struct hwrm_fw_reset_input *req;
3407 int rc;
3408
3409 if (!bnxt_hwrm_reset_permitted(bp)) {
3410 netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver");
3411 return -EPERM;
3412 }
3413
3414 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
3415 if (rc)
3416 return rc;
3417
3418 req->embedded_proc_type = proc_type;
3419 req->selfrst_status = self_reset;
3420 req->flags = flags;
3421
3422 if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) {
3423 rc = hwrm_req_send_silent(bp, req);
3424 } else {
3425 rc = hwrm_req_send(bp, req);
3426 if (rc == -EACCES)
3427 bnxt_print_admin_err(bp);
3428 }
3429 return rc;
3430 }
3431
bnxt_firmware_reset(struct net_device * dev,enum bnxt_nvm_directory_type dir_type)3432 static int bnxt_firmware_reset(struct net_device *dev,
3433 enum bnxt_nvm_directory_type dir_type)
3434 {
3435 u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE;
3436 u8 proc_type, flags = 0;
3437
3438 /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
3439 /* (e.g. when firmware isn't already running) */
3440 switch (dir_type) {
3441 case BNX_DIR_TYPE_CHIMP_PATCH:
3442 case BNX_DIR_TYPE_BOOTCODE:
3443 case BNX_DIR_TYPE_BOOTCODE_2:
3444 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
3445 /* Self-reset ChiMP upon next PCIe reset: */
3446 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
3447 break;
3448 case BNX_DIR_TYPE_APE_FW:
3449 case BNX_DIR_TYPE_APE_PATCH:
3450 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
3451 /* Self-reset APE upon next PCIe reset: */
3452 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
3453 break;
3454 case BNX_DIR_TYPE_KONG_FW:
3455 case BNX_DIR_TYPE_KONG_PATCH:
3456 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
3457 break;
3458 case BNX_DIR_TYPE_BONO_FW:
3459 case BNX_DIR_TYPE_BONO_PATCH:
3460 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
3461 break;
3462 default:
3463 return -EINVAL;
3464 }
3465
3466 return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags);
3467 }
3468
bnxt_firmware_reset_chip(struct net_device * dev)3469 static int bnxt_firmware_reset_chip(struct net_device *dev)
3470 {
3471 struct bnxt *bp = netdev_priv(dev);
3472 u8 flags = 0;
3473
3474 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
3475 flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
3476
3477 return bnxt_hwrm_firmware_reset(dev,
3478 FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
3479 FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
3480 flags);
3481 }
3482
bnxt_firmware_reset_ap(struct net_device * dev)3483 static int bnxt_firmware_reset_ap(struct net_device *dev)
3484 {
3485 return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP,
3486 FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE,
3487 0);
3488 }
3489
bnxt_flash_firmware(struct net_device * dev,u16 dir_type,const u8 * fw_data,size_t fw_size)3490 static int bnxt_flash_firmware(struct net_device *dev,
3491 u16 dir_type,
3492 const u8 *fw_data,
3493 size_t fw_size)
3494 {
3495 int rc = 0;
3496 u16 code_type;
3497 u32 stored_crc;
3498 u32 calculated_crc;
3499 struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
3500
3501 switch (dir_type) {
3502 case BNX_DIR_TYPE_BOOTCODE:
3503 case BNX_DIR_TYPE_BOOTCODE_2:
3504 code_type = CODE_BOOT;
3505 break;
3506 case BNX_DIR_TYPE_CHIMP_PATCH:
3507 code_type = CODE_CHIMP_PATCH;
3508 break;
3509 case BNX_DIR_TYPE_APE_FW:
3510 code_type = CODE_MCTP_PASSTHRU;
3511 break;
3512 case BNX_DIR_TYPE_APE_PATCH:
3513 code_type = CODE_APE_PATCH;
3514 break;
3515 case BNX_DIR_TYPE_KONG_FW:
3516 code_type = CODE_KONG_FW;
3517 break;
3518 case BNX_DIR_TYPE_KONG_PATCH:
3519 code_type = CODE_KONG_PATCH;
3520 break;
3521 case BNX_DIR_TYPE_BONO_FW:
3522 code_type = CODE_BONO_FW;
3523 break;
3524 case BNX_DIR_TYPE_BONO_PATCH:
3525 code_type = CODE_BONO_PATCH;
3526 break;
3527 default:
3528 netdev_err(dev, "Unsupported directory entry type: %u\n",
3529 dir_type);
3530 return -EINVAL;
3531 }
3532 if (fw_size < sizeof(struct bnxt_fw_header)) {
3533 netdev_err(dev, "Invalid firmware file size: %u\n",
3534 (unsigned int)fw_size);
3535 return -EINVAL;
3536 }
3537 if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
3538 netdev_err(dev, "Invalid firmware signature: %08X\n",
3539 le32_to_cpu(header->signature));
3540 return -EINVAL;
3541 }
3542 if (header->code_type != code_type) {
3543 netdev_err(dev, "Expected firmware type: %d, read: %d\n",
3544 code_type, header->code_type);
3545 return -EINVAL;
3546 }
3547 if (header->device != DEVICE_CUMULUS_FAMILY) {
3548 netdev_err(dev, "Expected firmware device family %d, read: %d\n",
3549 DEVICE_CUMULUS_FAMILY, header->device);
3550 return -EINVAL;
3551 }
3552 /* Confirm the CRC32 checksum of the file: */
3553 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
3554 sizeof(stored_crc)));
3555 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
3556 if (calculated_crc != stored_crc) {
3557 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
3558 (unsigned long)stored_crc,
3559 (unsigned long)calculated_crc);
3560 return -EINVAL;
3561 }
3562 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
3563 0, 0, 0, fw_data, fw_size);
3564 if (rc == 0) /* Firmware update successful */
3565 rc = bnxt_firmware_reset(dev, dir_type);
3566
3567 return rc;
3568 }
3569
bnxt_flash_microcode(struct net_device * dev,u16 dir_type,const u8 * fw_data,size_t fw_size)3570 static int bnxt_flash_microcode(struct net_device *dev,
3571 u16 dir_type,
3572 const u8 *fw_data,
3573 size_t fw_size)
3574 {
3575 struct bnxt_ucode_trailer *trailer;
3576 u32 calculated_crc;
3577 u32 stored_crc;
3578 int rc = 0;
3579
3580 if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
3581 netdev_err(dev, "Invalid microcode file size: %u\n",
3582 (unsigned int)fw_size);
3583 return -EINVAL;
3584 }
3585 trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
3586 sizeof(*trailer)));
3587 if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
3588 netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
3589 le32_to_cpu(trailer->sig));
3590 return -EINVAL;
3591 }
3592 if (le16_to_cpu(trailer->dir_type) != dir_type) {
3593 netdev_err(dev, "Expected microcode type: %d, read: %d\n",
3594 dir_type, le16_to_cpu(trailer->dir_type));
3595 return -EINVAL;
3596 }
3597 if (le16_to_cpu(trailer->trailer_length) <
3598 sizeof(struct bnxt_ucode_trailer)) {
3599 netdev_err(dev, "Invalid microcode trailer length: %d\n",
3600 le16_to_cpu(trailer->trailer_length));
3601 return -EINVAL;
3602 }
3603
3604 /* Confirm the CRC32 checksum of the file: */
3605 stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
3606 sizeof(stored_crc)));
3607 calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
3608 if (calculated_crc != stored_crc) {
3609 netdev_err(dev,
3610 "CRC32 (%08lX) does not match calculated: %08lX\n",
3611 (unsigned long)stored_crc,
3612 (unsigned long)calculated_crc);
3613 return -EINVAL;
3614 }
3615 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
3616 0, 0, 0, fw_data, fw_size);
3617
3618 return rc;
3619 }
3620
bnxt_dir_type_is_ape_bin_format(u16 dir_type)3621 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
3622 {
3623 switch (dir_type) {
3624 case BNX_DIR_TYPE_CHIMP_PATCH:
3625 case BNX_DIR_TYPE_BOOTCODE:
3626 case BNX_DIR_TYPE_BOOTCODE_2:
3627 case BNX_DIR_TYPE_APE_FW:
3628 case BNX_DIR_TYPE_APE_PATCH:
3629 case BNX_DIR_TYPE_KONG_FW:
3630 case BNX_DIR_TYPE_KONG_PATCH:
3631 case BNX_DIR_TYPE_BONO_FW:
3632 case BNX_DIR_TYPE_BONO_PATCH:
3633 return true;
3634 }
3635
3636 return false;
3637 }
3638
bnxt_dir_type_is_other_exec_format(u16 dir_type)3639 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
3640 {
3641 switch (dir_type) {
3642 case BNX_DIR_TYPE_AVS:
3643 case BNX_DIR_TYPE_EXP_ROM_MBA:
3644 case BNX_DIR_TYPE_PCIE:
3645 case BNX_DIR_TYPE_TSCF_UCODE:
3646 case BNX_DIR_TYPE_EXT_PHY:
3647 case BNX_DIR_TYPE_CCM:
3648 case BNX_DIR_TYPE_ISCSI_BOOT:
3649 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
3650 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
3651 return true;
3652 }
3653
3654 return false;
3655 }
3656
bnxt_dir_type_is_executable(u16 dir_type)3657 static bool bnxt_dir_type_is_executable(u16 dir_type)
3658 {
3659 return bnxt_dir_type_is_ape_bin_format(dir_type) ||
3660 bnxt_dir_type_is_other_exec_format(dir_type);
3661 }
3662
bnxt_flash_firmware_from_file(struct net_device * dev,u16 dir_type,const char * filename)3663 static int bnxt_flash_firmware_from_file(struct net_device *dev,
3664 u16 dir_type,
3665 const char *filename)
3666 {
3667 const struct firmware *fw;
3668 int rc;
3669
3670 rc = request_firmware(&fw, filename, &dev->dev);
3671 if (rc != 0) {
3672 netdev_err(dev, "Error %d requesting firmware file: %s\n",
3673 rc, filename);
3674 return rc;
3675 }
3676 if (bnxt_dir_type_is_ape_bin_format(dir_type))
3677 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
3678 else if (bnxt_dir_type_is_other_exec_format(dir_type))
3679 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
3680 else
3681 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
3682 0, 0, 0, fw->data, fw->size);
3683 release_firmware(fw);
3684 return rc;
3685 }
3686
3687 #define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM"
3688 #define MSG_INVALID_PKG "PKG install error : Invalid package"
3689 #define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error"
3690 #define MSG_INVALID_DEV "PKG install error : Invalid device"
3691 #define MSG_INTERNAL_ERR "PKG install error : Internal error"
3692 #define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram"
3693 #define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram"
3694 #define MSG_RESIZE_UPDATE_ERR "Resize UPDATE entry error"
3695 #define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected"
3696 #define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure"
3697
nvm_update_err_to_stderr(struct net_device * dev,u8 result,struct netlink_ext_ack * extack)3698 static int nvm_update_err_to_stderr(struct net_device *dev, u8 result,
3699 struct netlink_ext_ack *extack)
3700 {
3701 switch (result) {
3702 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER:
3703 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER:
3704 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR:
3705 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR:
3706 case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND:
3707 case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED:
3708 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR);
3709 return -EINVAL;
3710 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE:
3711 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER:
3712 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE:
3713 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM:
3714 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH:
3715 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST:
3716 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER:
3717 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM:
3718 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM:
3719 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH:
3720 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE:
3721 case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM:
3722 case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM:
3723 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG);
3724 return -ENOPKG;
3725 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR:
3726 BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR);
3727 return -EPERM;
3728 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV:
3729 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID:
3730 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR:
3731 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID:
3732 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM:
3733 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV);
3734 return -EOPNOTSUPP;
3735 default:
3736 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR);
3737 return -EIO;
3738 }
3739 }
3740
3741 #define BNXT_PKG_DMA_SIZE 0x40000
3742 #define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
3743 #define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
3744
bnxt_resize_update_entry(struct net_device * dev,size_t fw_size,struct netlink_ext_ack * extack)3745 static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size,
3746 struct netlink_ext_ack *extack)
3747 {
3748 u32 item_len;
3749 int rc;
3750
3751 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
3752 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, NULL,
3753 &item_len, NULL);
3754 if (rc) {
3755 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
3756 return rc;
3757 }
3758
3759 if (fw_size > item_len) {
3760 rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE,
3761 BNX_DIR_ORDINAL_FIRST, 0, 1,
3762 round_up(fw_size, 4096), NULL, 0);
3763 if (rc) {
3764 BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR);
3765 return rc;
3766 }
3767 }
3768 return 0;
3769 }
3770
bnxt_flash_package_from_fw_obj(struct net_device * dev,const struct firmware * fw,u32 install_type,struct netlink_ext_ack * extack)3771 int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
3772 u32 install_type, struct netlink_ext_ack *extack)
3773 {
3774 struct hwrm_nvm_install_update_input *install;
3775 struct hwrm_nvm_install_update_output *resp;
3776 struct hwrm_nvm_modify_input *modify;
3777 struct bnxt *bp = netdev_priv(dev);
3778 bool defrag_attempted = false;
3779 dma_addr_t dma_handle;
3780 u8 *kmem = NULL;
3781 u32 modify_len;
3782 u32 item_len;
3783 u8 cmd_err;
3784 u16 index;
3785 int rc;
3786
3787 /* resize before flashing larger image than available space */
3788 rc = bnxt_resize_update_entry(dev, fw->size, extack);
3789 if (rc)
3790 return rc;
3791
3792 bnxt_hwrm_fw_set_time(bp);
3793
3794 rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY);
3795 if (rc)
3796 return rc;
3797
3798 /* Try allocating a large DMA buffer first. Older fw will
3799 * cause excessive NVRAM erases when using small blocks.
3800 */
3801 modify_len = roundup_pow_of_two(fw->size);
3802 modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE);
3803 while (1) {
3804 kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle);
3805 if (!kmem && modify_len > PAGE_SIZE)
3806 modify_len /= 2;
3807 else
3808 break;
3809 }
3810 if (!kmem) {
3811 hwrm_req_drop(bp, modify);
3812 return -ENOMEM;
3813 }
3814
3815 rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE);
3816 if (rc) {
3817 hwrm_req_drop(bp, modify);
3818 return rc;
3819 }
3820
3821 hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout);
3822 hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout);
3823
3824 hwrm_req_hold(bp, modify);
3825 modify->host_src_addr = cpu_to_le64(dma_handle);
3826
3827 resp = hwrm_req_hold(bp, install);
3828 if ((install_type & 0xffff) == 0)
3829 install_type >>= 16;
3830 install->install_type = cpu_to_le32(install_type);
3831
3832 do {
3833 u32 copied = 0, len = modify_len;
3834
3835 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
3836 BNX_DIR_ORDINAL_FIRST,
3837 BNX_DIR_EXT_NONE,
3838 &index, &item_len, NULL);
3839 if (rc) {
3840 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
3841 break;
3842 }
3843 if (fw->size > item_len) {
3844 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR);
3845 rc = -EFBIG;
3846 break;
3847 }
3848
3849 modify->dir_idx = cpu_to_le16(index);
3850
3851 if (fw->size > modify_len)
3852 modify->flags = BNXT_NVM_MORE_FLAG;
3853 while (copied < fw->size) {
3854 u32 balance = fw->size - copied;
3855
3856 if (balance <= modify_len) {
3857 len = balance;
3858 if (copied)
3859 modify->flags |= BNXT_NVM_LAST_FLAG;
3860 }
3861 memcpy(kmem, fw->data + copied, len);
3862 modify->len = cpu_to_le32(len);
3863 modify->offset = cpu_to_le32(copied);
3864 rc = hwrm_req_send(bp, modify);
3865 if (rc)
3866 goto pkg_abort;
3867 copied += len;
3868 }
3869
3870 rc = hwrm_req_send_silent(bp, install);
3871 if (!rc)
3872 break;
3873
3874 if (defrag_attempted) {
3875 /* We have tried to defragment already in the previous
3876 * iteration. Return with the result for INSTALL_UPDATE
3877 */
3878 break;
3879 }
3880
3881 cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
3882
3883 switch (cmd_err) {
3884 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK:
3885 BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR);
3886 rc = -EALREADY;
3887 break;
3888 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR:
3889 install->flags =
3890 cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
3891
3892 rc = hwrm_req_send_silent(bp, install);
3893 if (!rc)
3894 break;
3895
3896 cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
3897
3898 if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
3899 /* FW has cleared NVM area, driver will create
3900 * UPDATE directory and try the flash again
3901 */
3902 defrag_attempted = true;
3903 install->flags = 0;
3904 rc = bnxt_flash_nvram(bp->dev,
3905 BNX_DIR_TYPE_UPDATE,
3906 BNX_DIR_ORDINAL_FIRST,
3907 0, 0, item_len, NULL, 0);
3908 if (!rc)
3909 break;
3910 }
3911 fallthrough;
3912 default:
3913 BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR);
3914 }
3915 } while (defrag_attempted && !rc);
3916
3917 pkg_abort:
3918 hwrm_req_drop(bp, modify);
3919 hwrm_req_drop(bp, install);
3920
3921 if (resp->result) {
3922 netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
3923 (s8)resp->result, (int)resp->problem_item);
3924 rc = nvm_update_err_to_stderr(dev, resp->result, extack);
3925 }
3926 if (rc == -EACCES)
3927 bnxt_print_admin_err(bp);
3928 return rc;
3929 }
3930
bnxt_flash_package_from_file(struct net_device * dev,const char * filename,u32 install_type,struct netlink_ext_ack * extack)3931 static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
3932 u32 install_type, struct netlink_ext_ack *extack)
3933 {
3934 const struct firmware *fw;
3935 int rc;
3936
3937 rc = request_firmware(&fw, filename, &dev->dev);
3938 if (rc != 0) {
3939 netdev_err(dev, "PKG error %d requesting file: %s\n",
3940 rc, filename);
3941 return rc;
3942 }
3943
3944 rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack);
3945
3946 release_firmware(fw);
3947
3948 return rc;
3949 }
3950
bnxt_flash_device(struct net_device * dev,struct ethtool_flash * flash)3951 static int bnxt_flash_device(struct net_device *dev,
3952 struct ethtool_flash *flash)
3953 {
3954 if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
3955 netdev_err(dev, "flashdev not supported from a virtual function\n");
3956 return -EINVAL;
3957 }
3958
3959 if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
3960 flash->region > 0xffff)
3961 return bnxt_flash_package_from_file(dev, flash->data,
3962 flash->region, NULL);
3963
3964 return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
3965 }
3966
nvm_get_dir_info(struct net_device * dev,u32 * entries,u32 * length)3967 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
3968 {
3969 struct hwrm_nvm_get_dir_info_output *output;
3970 struct hwrm_nvm_get_dir_info_input *req;
3971 struct bnxt *bp = netdev_priv(dev);
3972 int rc;
3973
3974 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO);
3975 if (rc)
3976 return rc;
3977
3978 output = hwrm_req_hold(bp, req);
3979 rc = hwrm_req_send(bp, req);
3980 if (!rc) {
3981 *entries = le32_to_cpu(output->entries);
3982 *length = le32_to_cpu(output->entry_length);
3983 }
3984 hwrm_req_drop(bp, req);
3985 return rc;
3986 }
3987
bnxt_get_eeprom_len(struct net_device * dev)3988 static int bnxt_get_eeprom_len(struct net_device *dev)
3989 {
3990 struct bnxt *bp = netdev_priv(dev);
3991
3992 if (BNXT_VF(bp))
3993 return 0;
3994
3995 /* The -1 return value allows the entire 32-bit range of offsets to be
3996 * passed via the ethtool command-line utility.
3997 */
3998 return -1;
3999 }
4000
bnxt_get_nvram_directory(struct net_device * dev,u32 len,u8 * data)4001 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
4002 {
4003 struct bnxt *bp = netdev_priv(dev);
4004 int rc;
4005 u32 dir_entries;
4006 u32 entry_length;
4007 u8 *buf;
4008 size_t buflen;
4009 dma_addr_t dma_handle;
4010 struct hwrm_nvm_get_dir_entries_input *req;
4011
4012 rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
4013 if (rc != 0)
4014 return rc;
4015
4016 if (!dir_entries || !entry_length)
4017 return -EIO;
4018
4019 /* Insert 2 bytes of directory info (count and size of entries) */
4020 if (len < 2)
4021 return -EINVAL;
4022
4023 *data++ = dir_entries;
4024 *data++ = entry_length;
4025 len -= 2;
4026 memset(data, 0xff, len);
4027
4028 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES);
4029 if (rc)
4030 return rc;
4031
4032 buflen = mul_u32_u32(dir_entries, entry_length);
4033 buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle);
4034 if (!buf) {
4035 hwrm_req_drop(bp, req);
4036 return -ENOMEM;
4037 }
4038 req->host_dest_addr = cpu_to_le64(dma_handle);
4039
4040 hwrm_req_hold(bp, req); /* hold the slice */
4041 rc = hwrm_req_send(bp, req);
4042 if (rc == 0)
4043 memcpy(data, buf, len > buflen ? buflen : len);
4044 hwrm_req_drop(bp, req);
4045 return rc;
4046 }
4047
bnxt_get_nvram_item(struct net_device * dev,u32 index,u32 offset,u32 length,u8 * data)4048 int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
4049 u32 length, u8 *data)
4050 {
4051 struct bnxt *bp = netdev_priv(dev);
4052 int rc;
4053 u8 *buf;
4054 dma_addr_t dma_handle;
4055 struct hwrm_nvm_read_input *req;
4056
4057 if (!length)
4058 return -EINVAL;
4059
4060 rc = hwrm_req_init(bp, req, HWRM_NVM_READ);
4061 if (rc)
4062 return rc;
4063
4064 buf = hwrm_req_dma_slice(bp, req, length, &dma_handle);
4065 if (!buf) {
4066 hwrm_req_drop(bp, req);
4067 return -ENOMEM;
4068 }
4069
4070 req->host_dest_addr = cpu_to_le64(dma_handle);
4071 req->dir_idx = cpu_to_le16(index);
4072 req->offset = cpu_to_le32(offset);
4073 req->len = cpu_to_le32(length);
4074
4075 hwrm_req_hold(bp, req); /* hold the slice */
4076 rc = hwrm_req_send(bp, req);
4077 if (rc == 0)
4078 memcpy(data, buf, length);
4079 hwrm_req_drop(bp, req);
4080 return rc;
4081 }
4082
bnxt_find_nvram_item(struct net_device * dev,u16 type,u16 ordinal,u16 ext,u16 * index,u32 * item_length,u32 * data_length)4083 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
4084 u16 ext, u16 *index, u32 *item_length,
4085 u32 *data_length)
4086 {
4087 struct hwrm_nvm_find_dir_entry_output *output;
4088 struct hwrm_nvm_find_dir_entry_input *req;
4089 struct bnxt *bp = netdev_priv(dev);
4090 int rc;
4091
4092 rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY);
4093 if (rc)
4094 return rc;
4095
4096 req->enables = 0;
4097 req->dir_idx = 0;
4098 req->dir_type = cpu_to_le16(type);
4099 req->dir_ordinal = cpu_to_le16(ordinal);
4100 req->dir_ext = cpu_to_le16(ext);
4101 req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
4102 output = hwrm_req_hold(bp, req);
4103 rc = hwrm_req_send_silent(bp, req);
4104 if (rc == 0) {
4105 if (index)
4106 *index = le16_to_cpu(output->dir_idx);
4107 if (item_length)
4108 *item_length = le32_to_cpu(output->dir_item_length);
4109 if (data_length)
4110 *data_length = le32_to_cpu(output->dir_data_length);
4111 }
4112 hwrm_req_drop(bp, req);
4113 return rc;
4114 }
4115
bnxt_parse_pkglog(int desired_field,u8 * data,size_t datalen)4116 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
4117 {
4118 char *retval = NULL;
4119 char *p;
4120 char *value;
4121 int field = 0;
4122
4123 if (datalen < 1)
4124 return NULL;
4125 /* null-terminate the log data (removing last '\n'): */
4126 data[datalen - 1] = 0;
4127 for (p = data; *p != 0; p++) {
4128 field = 0;
4129 retval = NULL;
4130 while (*p != 0 && *p != '\n') {
4131 value = p;
4132 while (*p != 0 && *p != '\t' && *p != '\n')
4133 p++;
4134 if (field == desired_field)
4135 retval = value;
4136 if (*p != '\t')
4137 break;
4138 *p = 0;
4139 field++;
4140 p++;
4141 }
4142 if (*p == 0)
4143 break;
4144 *p = 0;
4145 }
4146 return retval;
4147 }
4148
bnxt_get_pkginfo(struct net_device * dev,char * ver,int size)4149 int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size)
4150 {
4151 struct bnxt *bp = netdev_priv(dev);
4152 u16 index = 0;
4153 char *pkgver;
4154 u32 pkglen;
4155 u8 *pkgbuf;
4156 int rc;
4157
4158 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
4159 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
4160 &index, NULL, &pkglen);
4161 if (rc)
4162 return rc;
4163
4164 pkgbuf = kzalloc(pkglen, GFP_KERNEL);
4165 if (!pkgbuf) {
4166 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
4167 pkglen);
4168 return -ENOMEM;
4169 }
4170
4171 rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf);
4172 if (rc)
4173 goto err;
4174
4175 pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
4176 pkglen);
4177 if (pkgver && *pkgver != 0 && isdigit(*pkgver))
4178 strscpy(ver, pkgver, size);
4179 else
4180 rc = -ENOENT;
4181
4182 err:
4183 kfree(pkgbuf);
4184
4185 return rc;
4186 }
4187
bnxt_get_pkgver(struct net_device * dev)4188 static void bnxt_get_pkgver(struct net_device *dev)
4189 {
4190 struct bnxt *bp = netdev_priv(dev);
4191 char buf[FW_VER_STR_LEN];
4192 int len;
4193
4194 if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
4195 len = strlen(bp->fw_ver_str);
4196 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len,
4197 "/pkg %s", buf);
4198 }
4199 }
4200
bnxt_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)4201 static int bnxt_get_eeprom(struct net_device *dev,
4202 struct ethtool_eeprom *eeprom,
4203 u8 *data)
4204 {
4205 u32 index;
4206 u32 offset;
4207
4208 if (eeprom->offset == 0) /* special offset value to get directory */
4209 return bnxt_get_nvram_directory(dev, eeprom->len, data);
4210
4211 index = eeprom->offset >> 24;
4212 offset = eeprom->offset & 0xffffff;
4213
4214 if (index == 0) {
4215 netdev_err(dev, "unsupported index value: %d\n", index);
4216 return -EINVAL;
4217 }
4218
4219 return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
4220 }
4221
bnxt_erase_nvram_directory(struct net_device * dev,u8 index)4222 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
4223 {
4224 struct hwrm_nvm_erase_dir_entry_input *req;
4225 struct bnxt *bp = netdev_priv(dev);
4226 int rc;
4227
4228 rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY);
4229 if (rc)
4230 return rc;
4231
4232 req->dir_idx = cpu_to_le16(index);
4233 return hwrm_req_send(bp, req);
4234 }
4235
bnxt_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)4236 static int bnxt_set_eeprom(struct net_device *dev,
4237 struct ethtool_eeprom *eeprom,
4238 u8 *data)
4239 {
4240 struct bnxt *bp = netdev_priv(dev);
4241 u8 index, dir_op;
4242 u16 type, ext, ordinal, attr;
4243
4244 if (!BNXT_PF(bp)) {
4245 netdev_err(dev, "NVM write not supported from a virtual function\n");
4246 return -EINVAL;
4247 }
4248
4249 type = eeprom->magic >> 16;
4250
4251 if (type == 0xffff) { /* special value for directory operations */
4252 index = eeprom->magic & 0xff;
4253 dir_op = eeprom->magic >> 8;
4254 if (index == 0)
4255 return -EINVAL;
4256 switch (dir_op) {
4257 case 0x0e: /* erase */
4258 if (eeprom->offset != ~eeprom->magic)
4259 return -EINVAL;
4260 return bnxt_erase_nvram_directory(dev, index - 1);
4261 default:
4262 return -EINVAL;
4263 }
4264 }
4265
4266 /* Create or re-write an NVM item: */
4267 if (bnxt_dir_type_is_executable(type))
4268 return -EOPNOTSUPP;
4269 ext = eeprom->magic & 0xffff;
4270 ordinal = eeprom->offset >> 16;
4271 attr = eeprom->offset & 0xffff;
4272
4273 return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data,
4274 eeprom->len);
4275 }
4276
bnxt_set_eee(struct net_device * dev,struct ethtool_keee * edata)4277 static int bnxt_set_eee(struct net_device *dev, struct ethtool_keee *edata)
4278 {
4279 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
4280 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
4281 struct bnxt *bp = netdev_priv(dev);
4282 struct ethtool_keee *eee = &bp->eee;
4283 struct bnxt_link_info *link_info = &bp->link_info;
4284 int rc = 0;
4285
4286 if (!BNXT_PHY_CFG_ABLE(bp))
4287 return -EOPNOTSUPP;
4288
4289 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
4290 return -EOPNOTSUPP;
4291
4292 mutex_lock(&bp->link_lock);
4293 _bnxt_fw_to_linkmode(advertising, link_info->advertising);
4294 if (!edata->eee_enabled)
4295 goto eee_ok;
4296
4297 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
4298 netdev_warn(dev, "EEE requires autoneg\n");
4299 rc = -EINVAL;
4300 goto eee_exit;
4301 }
4302 if (edata->tx_lpi_enabled) {
4303 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
4304 edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
4305 netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
4306 bp->lpi_tmr_lo, bp->lpi_tmr_hi);
4307 rc = -EINVAL;
4308 goto eee_exit;
4309 } else if (!bp->lpi_tmr_hi) {
4310 edata->tx_lpi_timer = eee->tx_lpi_timer;
4311 }
4312 }
4313 if (linkmode_empty(edata->advertised)) {
4314 linkmode_and(edata->advertised, advertising, eee->supported);
4315 } else if (linkmode_andnot(tmp, edata->advertised, advertising)) {
4316 netdev_warn(dev, "EEE advertised must be a subset of autoneg advertised speeds\n");
4317 rc = -EINVAL;
4318 goto eee_exit;
4319 }
4320
4321 linkmode_copy(eee->advertised, edata->advertised);
4322 eee->tx_lpi_enabled = edata->tx_lpi_enabled;
4323 eee->tx_lpi_timer = edata->tx_lpi_timer;
4324 eee_ok:
4325 eee->eee_enabled = edata->eee_enabled;
4326
4327 if (netif_running(dev))
4328 rc = bnxt_hwrm_set_link_setting(bp, false, true);
4329
4330 eee_exit:
4331 mutex_unlock(&bp->link_lock);
4332 return rc;
4333 }
4334
bnxt_get_eee(struct net_device * dev,struct ethtool_keee * edata)4335 static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata)
4336 {
4337 struct bnxt *bp = netdev_priv(dev);
4338
4339 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
4340 return -EOPNOTSUPP;
4341
4342 *edata = bp->eee;
4343 if (!bp->eee.eee_enabled) {
4344 /* Preserve tx_lpi_timer so that the last value will be used
4345 * by default when it is re-enabled.
4346 */
4347 linkmode_zero(edata->advertised);
4348 edata->tx_lpi_enabled = 0;
4349 }
4350
4351 if (!bp->eee.eee_active)
4352 linkmode_zero(edata->lp_advertised);
4353
4354 return 0;
4355 }
4356
bnxt_read_sfp_module_eeprom_info(struct bnxt * bp,u16 i2c_addr,u16 page_number,u8 bank,u16 start_addr,u16 data_length,u8 * buf)4357 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
4358 u16 page_number, u8 bank,
4359 u16 start_addr, u16 data_length,
4360 u8 *buf)
4361 {
4362 struct hwrm_port_phy_i2c_read_output *output;
4363 struct hwrm_port_phy_i2c_read_input *req;
4364 int rc, byte_offset = 0;
4365
4366 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ);
4367 if (rc)
4368 return rc;
4369
4370 output = hwrm_req_hold(bp, req);
4371 req->i2c_slave_addr = i2c_addr;
4372 req->page_number = cpu_to_le16(page_number);
4373 req->port_id = cpu_to_le16(bp->pf.port_id);
4374 do {
4375 u16 xfer_size;
4376
4377 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
4378 data_length -= xfer_size;
4379 req->page_offset = cpu_to_le16(start_addr + byte_offset);
4380 req->data_length = xfer_size;
4381 req->enables =
4382 cpu_to_le32((start_addr + byte_offset ?
4383 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET :
4384 0) |
4385 (bank ?
4386 PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER :
4387 0));
4388 rc = hwrm_req_send(bp, req);
4389 if (!rc)
4390 memcpy(buf + byte_offset, output->data, xfer_size);
4391 byte_offset += xfer_size;
4392 } while (!rc && data_length > 0);
4393 hwrm_req_drop(bp, req);
4394
4395 return rc;
4396 }
4397
bnxt_get_module_info(struct net_device * dev,struct ethtool_modinfo * modinfo)4398 static int bnxt_get_module_info(struct net_device *dev,
4399 struct ethtool_modinfo *modinfo)
4400 {
4401 u8 data[SFF_DIAG_SUPPORT_OFFSET + 1];
4402 struct bnxt *bp = netdev_priv(dev);
4403 int rc;
4404
4405 /* No point in going further if phy status indicates
4406 * module is not inserted or if it is powered down or
4407 * if it is of type 10GBase-T
4408 */
4409 if (bp->link_info.module_status >
4410 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
4411 return -EOPNOTSUPP;
4412
4413 /* This feature is not supported in older firmware versions */
4414 if (bp->hwrm_spec_code < 0x10202)
4415 return -EOPNOTSUPP;
4416
4417 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 0,
4418 SFF_DIAG_SUPPORT_OFFSET + 1,
4419 data);
4420 if (!rc) {
4421 u8 module_id = data[0];
4422 u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET];
4423
4424 switch (module_id) {
4425 case SFF_MODULE_ID_SFP:
4426 modinfo->type = ETH_MODULE_SFF_8472;
4427 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
4428 if (!diag_supported)
4429 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
4430 break;
4431 case SFF_MODULE_ID_QSFP:
4432 case SFF_MODULE_ID_QSFP_PLUS:
4433 modinfo->type = ETH_MODULE_SFF_8436;
4434 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
4435 break;
4436 case SFF_MODULE_ID_QSFP28:
4437 modinfo->type = ETH_MODULE_SFF_8636;
4438 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
4439 break;
4440 default:
4441 rc = -EOPNOTSUPP;
4442 break;
4443 }
4444 }
4445 return rc;
4446 }
4447
bnxt_get_module_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)4448 static int bnxt_get_module_eeprom(struct net_device *dev,
4449 struct ethtool_eeprom *eeprom,
4450 u8 *data)
4451 {
4452 struct bnxt *bp = netdev_priv(dev);
4453 u16 start = eeprom->offset, length = eeprom->len;
4454 int rc = 0;
4455
4456 memset(data, 0, eeprom->len);
4457
4458 /* Read A0 portion of the EEPROM */
4459 if (start < ETH_MODULE_SFF_8436_LEN) {
4460 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
4461 length = ETH_MODULE_SFF_8436_LEN - start;
4462 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
4463 start, length, data);
4464 if (rc)
4465 return rc;
4466 start += length;
4467 data += length;
4468 length = eeprom->len - length;
4469 }
4470
4471 /* Read A2 portion of the EEPROM */
4472 if (length) {
4473 start -= ETH_MODULE_SFF_8436_LEN;
4474 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 0,
4475 start, length, data);
4476 }
4477 return rc;
4478 }
4479
bnxt_get_module_status(struct bnxt * bp,struct netlink_ext_ack * extack)4480 static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extack)
4481 {
4482 if (bp->link_info.module_status <=
4483 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
4484 return 0;
4485
4486 switch (bp->link_info.module_status) {
4487 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
4488 NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down");
4489 break;
4490 case PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED:
4491 NL_SET_ERR_MSG_MOD(extack, "Transceiver module not inserted");
4492 break;
4493 case PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT:
4494 NL_SET_ERR_MSG_MOD(extack, "Transceiver module disabled due to current fault");
4495 break;
4496 default:
4497 NL_SET_ERR_MSG_MOD(extack, "Unknown error");
4498 break;
4499 }
4500 return -EINVAL;
4501 }
4502
bnxt_get_module_eeprom_by_page(struct net_device * dev,const struct ethtool_module_eeprom * page_data,struct netlink_ext_ack * extack)4503 static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
4504 const struct ethtool_module_eeprom *page_data,
4505 struct netlink_ext_ack *extack)
4506 {
4507 struct bnxt *bp = netdev_priv(dev);
4508 int rc;
4509
4510 rc = bnxt_get_module_status(bp, extack);
4511 if (rc)
4512 return rc;
4513
4514 if (bp->hwrm_spec_code < 0x10202) {
4515 NL_SET_ERR_MSG_MOD(extack, "Firmware version too old");
4516 return -EINVAL;
4517 }
4518
4519 if (page_data->bank && !(bp->phy_flags & BNXT_PHY_FL_BANK_SEL)) {
4520 NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection");
4521 return -EINVAL;
4522 }
4523
4524 rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1,
4525 page_data->page, page_data->bank,
4526 page_data->offset,
4527 page_data->length,
4528 page_data->data);
4529 if (rc) {
4530 NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom read failed");
4531 return rc;
4532 }
4533 return page_data->length;
4534 }
4535
bnxt_nway_reset(struct net_device * dev)4536 static int bnxt_nway_reset(struct net_device *dev)
4537 {
4538 int rc = 0;
4539
4540 struct bnxt *bp = netdev_priv(dev);
4541 struct bnxt_link_info *link_info = &bp->link_info;
4542
4543 if (!BNXT_PHY_CFG_ABLE(bp))
4544 return -EOPNOTSUPP;
4545
4546 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
4547 return -EINVAL;
4548
4549 if (netif_running(dev))
4550 rc = bnxt_hwrm_set_link_setting(bp, true, false);
4551
4552 return rc;
4553 }
4554
bnxt_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)4555 static int bnxt_set_phys_id(struct net_device *dev,
4556 enum ethtool_phys_id_state state)
4557 {
4558 struct hwrm_port_led_cfg_input *req;
4559 struct bnxt *bp = netdev_priv(dev);
4560 struct bnxt_pf_info *pf = &bp->pf;
4561 struct bnxt_led_cfg *led_cfg;
4562 u8 led_state;
4563 __le16 duration;
4564 int rc, i;
4565
4566 if (!bp->num_leds || BNXT_VF(bp))
4567 return -EOPNOTSUPP;
4568
4569 if (state == ETHTOOL_ID_ACTIVE) {
4570 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
4571 duration = cpu_to_le16(500);
4572 } else if (state == ETHTOOL_ID_INACTIVE) {
4573 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
4574 duration = cpu_to_le16(0);
4575 } else {
4576 return -EINVAL;
4577 }
4578 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG);
4579 if (rc)
4580 return rc;
4581
4582 req->port_id = cpu_to_le16(pf->port_id);
4583 req->num_leds = bp->num_leds;
4584 led_cfg = (struct bnxt_led_cfg *)&req->led0_id;
4585 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
4586 req->enables |= BNXT_LED_DFLT_ENABLES(i);
4587 led_cfg->led_id = bp->leds[i].led_id;
4588 led_cfg->led_state = led_state;
4589 led_cfg->led_blink_on = duration;
4590 led_cfg->led_blink_off = duration;
4591 led_cfg->led_group_id = bp->leds[i].led_group_id;
4592 }
4593 return hwrm_req_send(bp, req);
4594 }
4595
bnxt_hwrm_selftest_irq(struct bnxt * bp,u16 cmpl_ring)4596 static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
4597 {
4598 struct hwrm_selftest_irq_input *req;
4599 int rc;
4600
4601 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ);
4602 if (rc)
4603 return rc;
4604
4605 req->cmpl_ring = cpu_to_le16(cmpl_ring);
4606 return hwrm_req_send(bp, req);
4607 }
4608
bnxt_test_irq(struct bnxt * bp)4609 static int bnxt_test_irq(struct bnxt *bp)
4610 {
4611 int i;
4612
4613 for (i = 0; i < bp->cp_nr_rings; i++) {
4614 u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id;
4615 int rc;
4616
4617 rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring);
4618 if (rc)
4619 return rc;
4620 }
4621 return 0;
4622 }
4623
bnxt_hwrm_mac_loopback(struct bnxt * bp,bool enable)4624 static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
4625 {
4626 struct hwrm_port_mac_cfg_input *req;
4627 int rc;
4628
4629 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
4630 if (rc)
4631 return rc;
4632
4633 req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
4634 if (enable)
4635 req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
4636 else
4637 req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
4638 return hwrm_req_send(bp, req);
4639 }
4640
bnxt_query_force_speeds(struct bnxt * bp,u16 * force_speeds)4641 static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds)
4642 {
4643 struct hwrm_port_phy_qcaps_output *resp;
4644 struct hwrm_port_phy_qcaps_input *req;
4645 int rc;
4646
4647 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
4648 if (rc)
4649 return rc;
4650
4651 resp = hwrm_req_hold(bp, req);
4652 rc = hwrm_req_send(bp, req);
4653 if (!rc)
4654 *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode);
4655
4656 hwrm_req_drop(bp, req);
4657 return rc;
4658 }
4659
bnxt_disable_an_for_lpbk(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)4660 static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
4661 struct hwrm_port_phy_cfg_input *req)
4662 {
4663 struct bnxt_link_info *link_info = &bp->link_info;
4664 u16 fw_advertising;
4665 u16 fw_speed;
4666 int rc;
4667
4668 if (!link_info->autoneg ||
4669 (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK))
4670 return 0;
4671
4672 rc = bnxt_query_force_speeds(bp, &fw_advertising);
4673 if (rc)
4674 return rc;
4675
4676 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
4677 if (BNXT_LINK_IS_UP(bp))
4678 fw_speed = bp->link_info.link_speed;
4679 else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
4680 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
4681 else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB)
4682 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
4683 else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB)
4684 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
4685 else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB)
4686 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
4687
4688 req->force_link_speed = cpu_to_le16(fw_speed);
4689 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
4690 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
4691 rc = hwrm_req_send(bp, req);
4692 req->flags = 0;
4693 req->force_link_speed = cpu_to_le16(0);
4694 return rc;
4695 }
4696
bnxt_hwrm_phy_loopback(struct bnxt * bp,bool enable,bool ext)4697 static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
4698 {
4699 struct hwrm_port_phy_cfg_input *req;
4700 int rc;
4701
4702 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
4703 if (rc)
4704 return rc;
4705
4706 /* prevent bnxt_disable_an_for_lpbk() from consuming the request */
4707 hwrm_req_hold(bp, req);
4708
4709 if (enable) {
4710 bnxt_disable_an_for_lpbk(bp, req);
4711 if (ext)
4712 req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
4713 else
4714 req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
4715 } else {
4716 req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
4717 }
4718 req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
4719 rc = hwrm_req_send(bp, req);
4720 hwrm_req_drop(bp, req);
4721 return rc;
4722 }
4723
bnxt_rx_loopback(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 raw_cons,int pkt_size)4724 static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
4725 u32 raw_cons, int pkt_size)
4726 {
4727 struct bnxt_napi *bnapi = cpr->bnapi;
4728 struct bnxt_rx_ring_info *rxr;
4729 struct bnxt_sw_rx_bd *rx_buf;
4730 struct rx_cmp *rxcmp;
4731 u16 cp_cons, cons;
4732 u8 *data;
4733 u32 len;
4734 int i;
4735
4736 rxr = bnapi->rx_ring;
4737 cp_cons = RING_CMP(raw_cons);
4738 rxcmp = (struct rx_cmp *)
4739 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
4740 cons = rxcmp->rx_cmp_opaque;
4741 rx_buf = &rxr->rx_buf_ring[cons];
4742 data = rx_buf->data_ptr;
4743 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
4744 if (len != pkt_size)
4745 return -EIO;
4746 i = ETH_ALEN;
4747 if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr))
4748 return -EIO;
4749 i += ETH_ALEN;
4750 for ( ; i < pkt_size; i++) {
4751 if (data[i] != (u8)(i & 0xff))
4752 return -EIO;
4753 }
4754 return 0;
4755 }
4756
bnxt_poll_loopback(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int pkt_size)4757 static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
4758 int pkt_size)
4759 {
4760 struct tx_cmp *txcmp;
4761 int rc = -EIO;
4762 u32 raw_cons;
4763 u32 cons;
4764 int i;
4765
4766 raw_cons = cpr->cp_raw_cons;
4767 for (i = 0; i < 200; i++) {
4768 cons = RING_CMP(raw_cons);
4769 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
4770
4771 if (!TX_CMP_VALID(txcmp, raw_cons)) {
4772 udelay(5);
4773 continue;
4774 }
4775
4776 /* The valid test of the entry must be done first before
4777 * reading any further.
4778 */
4779 dma_rmb();
4780 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP ||
4781 TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_V3_CMP) {
4782 rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
4783 raw_cons = NEXT_RAW_CMP(raw_cons);
4784 raw_cons = NEXT_RAW_CMP(raw_cons);
4785 break;
4786 }
4787 raw_cons = NEXT_RAW_CMP(raw_cons);
4788 }
4789 cpr->cp_raw_cons = raw_cons;
4790 return rc;
4791 }
4792
bnxt_run_loopback(struct bnxt * bp)4793 static int bnxt_run_loopback(struct bnxt *bp)
4794 {
4795 struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
4796 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4797 struct bnxt_cp_ring_info *cpr;
4798 int pkt_size, i = 0;
4799 struct sk_buff *skb;
4800 dma_addr_t map;
4801 u8 *data;
4802 int rc;
4803
4804 cpr = &rxr->bnapi->cp_ring;
4805 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
4806 cpr = rxr->rx_cpr;
4807 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
4808 skb = netdev_alloc_skb(bp->dev, pkt_size);
4809 if (!skb)
4810 return -ENOMEM;
4811 data = skb_put(skb, pkt_size);
4812 ether_addr_copy(&data[i], bp->dev->dev_addr);
4813 i += ETH_ALEN;
4814 ether_addr_copy(&data[i], bp->dev->dev_addr);
4815 i += ETH_ALEN;
4816 for ( ; i < pkt_size; i++)
4817 data[i] = (u8)(i & 0xff);
4818
4819 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
4820 DMA_TO_DEVICE);
4821 if (dma_mapping_error(&bp->pdev->dev, map)) {
4822 dev_kfree_skb(skb);
4823 return -EIO;
4824 }
4825 bnxt_xmit_bd(bp, txr, map, pkt_size, NULL);
4826
4827 /* Sync BD data before updating doorbell */
4828 wmb();
4829
4830 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
4831 rc = bnxt_poll_loopback(bp, cpr, pkt_size);
4832
4833 dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
4834 dev_kfree_skb(skb);
4835 return rc;
4836 }
4837
bnxt_run_fw_tests(struct bnxt * bp,u8 test_mask,u8 * test_results)4838 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
4839 {
4840 struct hwrm_selftest_exec_output *resp;
4841 struct hwrm_selftest_exec_input *req;
4842 int rc;
4843
4844 rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC);
4845 if (rc)
4846 return rc;
4847
4848 hwrm_req_timeout(bp, req, bp->test_info->timeout);
4849 req->flags = test_mask;
4850
4851 resp = hwrm_req_hold(bp, req);
4852 rc = hwrm_req_send(bp, req);
4853 *test_results = resp->test_success;
4854 hwrm_req_drop(bp, req);
4855 return rc;
4856 }
4857
4858 #define BNXT_DRV_TESTS 4
4859 #define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS)
4860 #define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1)
4861 #define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2)
4862 #define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3)
4863
bnxt_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * buf)4864 static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
4865 u64 *buf)
4866 {
4867 struct bnxt *bp = netdev_priv(dev);
4868 bool do_ext_lpbk = false;
4869 bool offline = false;
4870 u8 test_results = 0;
4871 u8 test_mask = 0;
4872 int rc = 0, i;
4873
4874 if (!bp->num_tests || !BNXT_PF(bp))
4875 return;
4876
4877 memset(buf, 0, sizeof(u64) * bp->num_tests);
4878 if (etest->flags & ETH_TEST_FL_OFFLINE &&
4879 bnxt_ulp_registered(bp->edev)) {
4880 etest->flags |= ETH_TEST_FL_FAILED;
4881 netdev_warn(dev, "Offline tests cannot be run with RoCE driver loaded\n");
4882 return;
4883 }
4884
4885 if (!netif_running(dev)) {
4886 etest->flags |= ETH_TEST_FL_FAILED;
4887 return;
4888 }
4889
4890 if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
4891 (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK))
4892 do_ext_lpbk = true;
4893
4894 if (etest->flags & ETH_TEST_FL_OFFLINE) {
4895 if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) {
4896 etest->flags |= ETH_TEST_FL_FAILED;
4897 netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n");
4898 return;
4899 }
4900 offline = true;
4901 }
4902
4903 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
4904 u8 bit_val = 1 << i;
4905
4906 if (!(bp->test_info->offline_mask & bit_val))
4907 test_mask |= bit_val;
4908 else if (offline)
4909 test_mask |= bit_val;
4910 }
4911 if (!offline) {
4912 bnxt_run_fw_tests(bp, test_mask, &test_results);
4913 } else {
4914 bnxt_close_nic(bp, true, false);
4915 bnxt_run_fw_tests(bp, test_mask, &test_results);
4916
4917 buf[BNXT_MACLPBK_TEST_IDX] = 1;
4918 bnxt_hwrm_mac_loopback(bp, true);
4919 msleep(250);
4920 rc = bnxt_half_open_nic(bp);
4921 if (rc) {
4922 bnxt_hwrm_mac_loopback(bp, false);
4923 etest->flags |= ETH_TEST_FL_FAILED;
4924 return;
4925 }
4926 if (bnxt_run_loopback(bp))
4927 etest->flags |= ETH_TEST_FL_FAILED;
4928 else
4929 buf[BNXT_MACLPBK_TEST_IDX] = 0;
4930
4931 bnxt_hwrm_mac_loopback(bp, false);
4932 bnxt_hwrm_phy_loopback(bp, true, false);
4933 msleep(1000);
4934 if (bnxt_run_loopback(bp)) {
4935 buf[BNXT_PHYLPBK_TEST_IDX] = 1;
4936 etest->flags |= ETH_TEST_FL_FAILED;
4937 }
4938 if (do_ext_lpbk) {
4939 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
4940 bnxt_hwrm_phy_loopback(bp, true, true);
4941 msleep(1000);
4942 if (bnxt_run_loopback(bp)) {
4943 buf[BNXT_EXTLPBK_TEST_IDX] = 1;
4944 etest->flags |= ETH_TEST_FL_FAILED;
4945 }
4946 }
4947 bnxt_hwrm_phy_loopback(bp, false, false);
4948 bnxt_half_close_nic(bp);
4949 rc = bnxt_open_nic(bp, true, true);
4950 }
4951 if (rc || bnxt_test_irq(bp)) {
4952 buf[BNXT_IRQ_TEST_IDX] = 1;
4953 etest->flags |= ETH_TEST_FL_FAILED;
4954 }
4955 for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
4956 u8 bit_val = 1 << i;
4957
4958 if ((test_mask & bit_val) && !(test_results & bit_val)) {
4959 buf[i] = 1;
4960 etest->flags |= ETH_TEST_FL_FAILED;
4961 }
4962 }
4963 }
4964
bnxt_reset(struct net_device * dev,u32 * flags)4965 static int bnxt_reset(struct net_device *dev, u32 *flags)
4966 {
4967 struct bnxt *bp = netdev_priv(dev);
4968 bool reload = false;
4969 u32 req = *flags;
4970
4971 if (!req)
4972 return -EINVAL;
4973
4974 if (!BNXT_PF(bp)) {
4975 netdev_err(dev, "Reset is not supported from a VF\n");
4976 return -EOPNOTSUPP;
4977 }
4978
4979 if (pci_vfs_assigned(bp->pdev) &&
4980 !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) {
4981 netdev_err(dev,
4982 "Reset not allowed when VFs are assigned to VMs\n");
4983 return -EBUSY;
4984 }
4985
4986 if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) {
4987 /* This feature is not supported in older firmware versions */
4988 if (bp->hwrm_spec_code >= 0x10803) {
4989 if (!bnxt_firmware_reset_chip(dev)) {
4990 netdev_info(dev, "Firmware reset request successful.\n");
4991 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
4992 reload = true;
4993 *flags &= ~BNXT_FW_RESET_CHIP;
4994 }
4995 } else if (req == BNXT_FW_RESET_CHIP) {
4996 return -EOPNOTSUPP; /* only request, fail hard */
4997 }
4998 }
4999
5000 if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) {
5001 /* This feature is not supported in older firmware versions */
5002 if (bp->hwrm_spec_code >= 0x10803) {
5003 if (!bnxt_firmware_reset_ap(dev)) {
5004 netdev_info(dev, "Reset application processor successful.\n");
5005 reload = true;
5006 *flags &= ~BNXT_FW_RESET_AP;
5007 }
5008 } else if (req == BNXT_FW_RESET_AP) {
5009 return -EOPNOTSUPP; /* only request, fail hard */
5010 }
5011 }
5012
5013 if (reload)
5014 netdev_info(dev, "Reload driver to complete reset\n");
5015
5016 return 0;
5017 }
5018
bnxt_set_dump(struct net_device * dev,struct ethtool_dump * dump)5019 static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
5020 {
5021 struct bnxt *bp = netdev_priv(dev);
5022
5023 if (dump->flag > BNXT_DUMP_CRASH) {
5024 netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n");
5025 return -EINVAL;
5026 }
5027
5028 if (dump->flag == BNXT_DUMP_CRASH) {
5029 if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR &&
5030 (!IS_ENABLED(CONFIG_TEE_BNXT_FW))) {
5031 netdev_info(dev,
5032 "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
5033 return -EOPNOTSUPP;
5034 } else if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) {
5035 netdev_info(dev, "Crash dump collection from host memory is not supported on this interface.\n");
5036 return -EOPNOTSUPP;
5037 }
5038 }
5039
5040 bp->dump_flag = dump->flag;
5041 return 0;
5042 }
5043
bnxt_get_dump_flag(struct net_device * dev,struct ethtool_dump * dump)5044 static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
5045 {
5046 struct bnxt *bp = netdev_priv(dev);
5047
5048 if (bp->hwrm_spec_code < 0x10801)
5049 return -EOPNOTSUPP;
5050
5051 dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 |
5052 bp->ver_resp.hwrm_fw_min_8b << 16 |
5053 bp->ver_resp.hwrm_fw_bld_8b << 8 |
5054 bp->ver_resp.hwrm_fw_rsvd_8b;
5055
5056 dump->flag = bp->dump_flag;
5057 dump->len = bnxt_get_coredump_length(bp, bp->dump_flag);
5058 return 0;
5059 }
5060
bnxt_get_dump_data(struct net_device * dev,struct ethtool_dump * dump,void * buf)5061 static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
5062 void *buf)
5063 {
5064 struct bnxt *bp = netdev_priv(dev);
5065
5066 if (bp->hwrm_spec_code < 0x10801)
5067 return -EOPNOTSUPP;
5068
5069 memset(buf, 0, dump->len);
5070
5071 dump->flag = bp->dump_flag;
5072 return bnxt_get_coredump(bp, dump->flag, buf, &dump->len);
5073 }
5074
bnxt_get_ts_info(struct net_device * dev,struct kernel_ethtool_ts_info * info)5075 static int bnxt_get_ts_info(struct net_device *dev,
5076 struct kernel_ethtool_ts_info *info)
5077 {
5078 struct bnxt *bp = netdev_priv(dev);
5079 struct bnxt_ptp_cfg *ptp;
5080
5081 ptp = bp->ptp_cfg;
5082 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
5083
5084 if (!ptp)
5085 return 0;
5086
5087 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
5088 SOF_TIMESTAMPING_RX_HARDWARE |
5089 SOF_TIMESTAMPING_RAW_HARDWARE;
5090 if (ptp->ptp_clock)
5091 info->phc_index = ptp_clock_index(ptp->ptp_clock);
5092
5093 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5094
5095 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5096 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5097 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5098
5099 if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS)
5100 info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL);
5101 return 0;
5102 }
5103
bnxt_ethtool_init(struct bnxt * bp)5104 void bnxt_ethtool_init(struct bnxt *bp)
5105 {
5106 struct hwrm_selftest_qlist_output *resp;
5107 struct hwrm_selftest_qlist_input *req;
5108 struct bnxt_test_info *test_info;
5109 struct net_device *dev = bp->dev;
5110 int i, rc;
5111
5112 if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
5113 bnxt_get_pkgver(dev);
5114
5115 bp->num_tests = 0;
5116 if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp))
5117 return;
5118
5119 test_info = bp->test_info;
5120 if (!test_info) {
5121 test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
5122 if (!test_info)
5123 return;
5124 bp->test_info = test_info;
5125 }
5126
5127 if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST))
5128 return;
5129
5130 resp = hwrm_req_hold(bp, req);
5131 rc = hwrm_req_send_silent(bp, req);
5132 if (rc)
5133 goto ethtool_init_exit;
5134
5135 bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
5136 if (bp->num_tests > BNXT_MAX_TEST)
5137 bp->num_tests = BNXT_MAX_TEST;
5138
5139 test_info->offline_mask = resp->offline_tests;
5140 test_info->timeout = le16_to_cpu(resp->test_timeout);
5141 if (!test_info->timeout)
5142 test_info->timeout = HWRM_CMD_TIMEOUT;
5143 for (i = 0; i < bp->num_tests; i++) {
5144 char *str = test_info->string[i];
5145 char *fw_str = resp->test_name[i];
5146
5147 if (i == BNXT_MACLPBK_TEST_IDX) {
5148 strcpy(str, "Mac loopback test (offline)");
5149 } else if (i == BNXT_PHYLPBK_TEST_IDX) {
5150 strcpy(str, "Phy loopback test (offline)");
5151 } else if (i == BNXT_EXTLPBK_TEST_IDX) {
5152 strcpy(str, "Ext loopback test (offline)");
5153 } else if (i == BNXT_IRQ_TEST_IDX) {
5154 strcpy(str, "Interrupt_test (offline)");
5155 } else {
5156 snprintf(str, ETH_GSTRING_LEN, "%s test (%s)",
5157 fw_str, test_info->offline_mask & (1 << i) ?
5158 "offline" : "online");
5159 }
5160 }
5161
5162 ethtool_init_exit:
5163 hwrm_req_drop(bp, req);
5164 }
5165
bnxt_get_eth_phy_stats(struct net_device * dev,struct ethtool_eth_phy_stats * phy_stats)5166 static void bnxt_get_eth_phy_stats(struct net_device *dev,
5167 struct ethtool_eth_phy_stats *phy_stats)
5168 {
5169 struct bnxt *bp = netdev_priv(dev);
5170 u64 *rx;
5171
5172 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
5173 return;
5174
5175 rx = bp->rx_port_stats_ext.sw_stats;
5176 phy_stats->SymbolErrorDuringCarrier =
5177 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err));
5178 }
5179
bnxt_get_eth_mac_stats(struct net_device * dev,struct ethtool_eth_mac_stats * mac_stats)5180 static void bnxt_get_eth_mac_stats(struct net_device *dev,
5181 struct ethtool_eth_mac_stats *mac_stats)
5182 {
5183 struct bnxt *bp = netdev_priv(dev);
5184 u64 *rx, *tx;
5185
5186 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
5187 return;
5188
5189 rx = bp->port_stats.sw_stats;
5190 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5191
5192 mac_stats->FramesReceivedOK =
5193 BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames);
5194 mac_stats->FramesTransmittedOK =
5195 BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames);
5196 mac_stats->FrameCheckSequenceErrors =
5197 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
5198 mac_stats->AlignmentErrors =
5199 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
5200 mac_stats->OutOfRangeLengthField =
5201 BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames);
5202 }
5203
bnxt_get_eth_ctrl_stats(struct net_device * dev,struct ethtool_eth_ctrl_stats * ctrl_stats)5204 static void bnxt_get_eth_ctrl_stats(struct net_device *dev,
5205 struct ethtool_eth_ctrl_stats *ctrl_stats)
5206 {
5207 struct bnxt *bp = netdev_priv(dev);
5208 u64 *rx;
5209
5210 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
5211 return;
5212
5213 rx = bp->port_stats.sw_stats;
5214 ctrl_stats->MACControlFramesReceived =
5215 BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames);
5216 }
5217
5218 static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = {
5219 { 0, 64 },
5220 { 65, 127 },
5221 { 128, 255 },
5222 { 256, 511 },
5223 { 512, 1023 },
5224 { 1024, 1518 },
5225 { 1519, 2047 },
5226 { 2048, 4095 },
5227 { 4096, 9216 },
5228 { 9217, 16383 },
5229 {}
5230 };
5231
bnxt_get_rmon_stats(struct net_device * dev,struct ethtool_rmon_stats * rmon_stats,const struct ethtool_rmon_hist_range ** ranges)5232 static void bnxt_get_rmon_stats(struct net_device *dev,
5233 struct ethtool_rmon_stats *rmon_stats,
5234 const struct ethtool_rmon_hist_range **ranges)
5235 {
5236 struct bnxt *bp = netdev_priv(dev);
5237 u64 *rx, *tx;
5238
5239 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
5240 return;
5241
5242 rx = bp->port_stats.sw_stats;
5243 tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
5244
5245 rmon_stats->jabbers =
5246 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
5247 rmon_stats->oversize_pkts =
5248 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames);
5249 rmon_stats->undersize_pkts =
5250 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames);
5251
5252 rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames);
5253 rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames);
5254 rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames);
5255 rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames);
5256 rmon_stats->hist[4] =
5257 BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames);
5258 rmon_stats->hist[5] =
5259 BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames);
5260 rmon_stats->hist[6] =
5261 BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames);
5262 rmon_stats->hist[7] =
5263 BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames);
5264 rmon_stats->hist[8] =
5265 BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames);
5266 rmon_stats->hist[9] =
5267 BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames);
5268
5269 rmon_stats->hist_tx[0] =
5270 BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames);
5271 rmon_stats->hist_tx[1] =
5272 BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames);
5273 rmon_stats->hist_tx[2] =
5274 BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames);
5275 rmon_stats->hist_tx[3] =
5276 BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames);
5277 rmon_stats->hist_tx[4] =
5278 BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames);
5279 rmon_stats->hist_tx[5] =
5280 BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames);
5281 rmon_stats->hist_tx[6] =
5282 BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames);
5283 rmon_stats->hist_tx[7] =
5284 BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames);
5285 rmon_stats->hist_tx[8] =
5286 BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames);
5287 rmon_stats->hist_tx[9] =
5288 BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames);
5289
5290 *ranges = bnxt_rmon_ranges;
5291 }
5292
bnxt_get_ptp_stats(struct net_device * dev,struct ethtool_ts_stats * ts_stats)5293 static void bnxt_get_ptp_stats(struct net_device *dev,
5294 struct ethtool_ts_stats *ts_stats)
5295 {
5296 struct bnxt *bp = netdev_priv(dev);
5297 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
5298
5299 if (ptp) {
5300 ts_stats->pkts = ptp->stats.ts_pkts;
5301 ts_stats->lost = ptp->stats.ts_lost;
5302 ts_stats->err = atomic64_read(&ptp->stats.ts_err);
5303 }
5304 }
5305
bnxt_get_link_ext_stats(struct net_device * dev,struct ethtool_link_ext_stats * stats)5306 static void bnxt_get_link_ext_stats(struct net_device *dev,
5307 struct ethtool_link_ext_stats *stats)
5308 {
5309 struct bnxt *bp = netdev_priv(dev);
5310 u64 *rx;
5311
5312 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
5313 return;
5314
5315 rx = bp->rx_port_stats_ext.sw_stats;
5316 stats->link_down_events =
5317 *(rx + BNXT_RX_STATS_EXT_OFFSET(link_down_events));
5318 }
5319
bnxt_ethtool_free(struct bnxt * bp)5320 void bnxt_ethtool_free(struct bnxt *bp)
5321 {
5322 kfree(bp->test_info);
5323 bp->test_info = NULL;
5324 }
5325
5326 const struct ethtool_ops bnxt_ethtool_ops = {
5327 .cap_link_lanes_supported = 1,
5328 .rxfh_per_ctx_key = 1,
5329 .rxfh_max_num_contexts = BNXT_MAX_ETH_RSS_CTX + 1,
5330 .rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5,
5331 .rxfh_priv_size = sizeof(struct bnxt_rss_ctx),
5332 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
5333 ETHTOOL_COALESCE_MAX_FRAMES |
5334 ETHTOOL_COALESCE_USECS_IRQ |
5335 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
5336 ETHTOOL_COALESCE_STATS_BLOCK_USECS |
5337 ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
5338 ETHTOOL_COALESCE_USE_CQE,
5339 .get_link_ksettings = bnxt_get_link_ksettings,
5340 .set_link_ksettings = bnxt_set_link_ksettings,
5341 .get_fec_stats = bnxt_get_fec_stats,
5342 .get_fecparam = bnxt_get_fecparam,
5343 .set_fecparam = bnxt_set_fecparam,
5344 .get_pause_stats = bnxt_get_pause_stats,
5345 .get_pauseparam = bnxt_get_pauseparam,
5346 .set_pauseparam = bnxt_set_pauseparam,
5347 .get_drvinfo = bnxt_get_drvinfo,
5348 .get_regs_len = bnxt_get_regs_len,
5349 .get_regs = bnxt_get_regs,
5350 .get_wol = bnxt_get_wol,
5351 .set_wol = bnxt_set_wol,
5352 .get_coalesce = bnxt_get_coalesce,
5353 .set_coalesce = bnxt_set_coalesce,
5354 .get_msglevel = bnxt_get_msglevel,
5355 .set_msglevel = bnxt_set_msglevel,
5356 .get_sset_count = bnxt_get_sset_count,
5357 .get_strings = bnxt_get_strings,
5358 .get_ethtool_stats = bnxt_get_ethtool_stats,
5359 .set_ringparam = bnxt_set_ringparam,
5360 .get_ringparam = bnxt_get_ringparam,
5361 .get_channels = bnxt_get_channels,
5362 .set_channels = bnxt_set_channels,
5363 .get_rxnfc = bnxt_get_rxnfc,
5364 .set_rxnfc = bnxt_set_rxnfc,
5365 .get_rxfh_indir_size = bnxt_get_rxfh_indir_size,
5366 .get_rxfh_key_size = bnxt_get_rxfh_key_size,
5367 .get_rxfh = bnxt_get_rxfh,
5368 .set_rxfh = bnxt_set_rxfh,
5369 .create_rxfh_context = bnxt_create_rxfh_context,
5370 .modify_rxfh_context = bnxt_modify_rxfh_context,
5371 .remove_rxfh_context = bnxt_remove_rxfh_context,
5372 .flash_device = bnxt_flash_device,
5373 .get_eeprom_len = bnxt_get_eeprom_len,
5374 .get_eeprom = bnxt_get_eeprom,
5375 .set_eeprom = bnxt_set_eeprom,
5376 .get_link = bnxt_get_link,
5377 .get_link_ext_stats = bnxt_get_link_ext_stats,
5378 .get_eee = bnxt_get_eee,
5379 .set_eee = bnxt_set_eee,
5380 .get_module_info = bnxt_get_module_info,
5381 .get_module_eeprom = bnxt_get_module_eeprom,
5382 .get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
5383 .nway_reset = bnxt_nway_reset,
5384 .set_phys_id = bnxt_set_phys_id,
5385 .self_test = bnxt_self_test,
5386 .get_ts_info = bnxt_get_ts_info,
5387 .reset = bnxt_reset,
5388 .set_dump = bnxt_set_dump,
5389 .get_dump_flag = bnxt_get_dump_flag,
5390 .get_dump_data = bnxt_get_dump_data,
5391 .get_eth_phy_stats = bnxt_get_eth_phy_stats,
5392 .get_eth_mac_stats = bnxt_get_eth_mac_stats,
5393 .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats,
5394 .get_rmon_stats = bnxt_get_rmon_stats,
5395 .get_ts_stats = bnxt_get_ptp_stats,
5396 };
5397