1 // SPDX-License-Identifier: GPL-2.0
2 /* Huawei HiNIC PCI Express Linux driver
3 * Copyright(c) 2017 Huawei Technologies Co., Ltd
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 */
15
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/device.h>
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/etherdevice.h>
24 #include <linux/netdevice.h>
25 #include <linux/if_vlan.h>
26 #include <linux/ethtool.h>
27 #include <linux/vmalloc.h>
28 #include <linux/sfp.h>
29
30 #include "hinic_hw_qp.h"
31 #include "hinic_hw_dev.h"
32 #include "hinic_port.h"
33 #include "hinic_tx.h"
34 #include "hinic_rx.h"
35 #include "hinic_dev.h"
36
37 #define SET_LINK_STR_MAX_LEN 128
38
39 #define GET_SUPPORTED_MODE 0
40 #define GET_ADVERTISED_MODE 1
41
42 #define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode) \
43 ((ecmd)->supported |= \
44 (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
45 #define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode) \
46 ((ecmd)->advertising |= \
47 (1UL << hw_to_ethtool_link_mode_table[mode].link_mode_bit))
48 #define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \
49 ((ecmd)->supported |= SUPPORTED_##mode)
50 #define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \
51 ((ecmd)->advertising |= ADVERTISED_##mode)
52
53 #define COALESCE_PENDING_LIMIT_UNIT 8
54 #define COALESCE_TIMER_CFG_UNIT 9
55 #define COALESCE_ALL_QUEUE 0xFFFF
56 #define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT)
57 #define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT)
58 #define OBJ_STR_MAX_LEN 32
59
60 struct hw2ethtool_link_mode {
61 enum ethtool_link_mode_bit_indices link_mode_bit;
62 u32 speed;
63 enum hinic_link_mode hw_link_mode;
64 };
65
66 struct cmd_link_settings {
67 u64 supported;
68 u64 advertising;
69
70 u32 speed;
71 u8 duplex;
72 u8 port;
73 u8 autoneg;
74 };
75
76 static u32 hw_to_ethtool_speed[LINK_SPEED_LEVELS] = {
77 SPEED_10, SPEED_100,
78 SPEED_1000, SPEED_10000,
79 SPEED_25000, SPEED_40000,
80 SPEED_100000
81 };
82
83 static struct hw2ethtool_link_mode
84 hw_to_ethtool_link_mode_table[HINIC_LINK_MODE_NUMBERS] = {
85 {
86 .link_mode_bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
87 .speed = SPEED_10000,
88 .hw_link_mode = HINIC_10GE_BASE_KR,
89 },
90 {
91 .link_mode_bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
92 .speed = SPEED_40000,
93 .hw_link_mode = HINIC_40GE_BASE_KR4,
94 },
95 {
96 .link_mode_bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
97 .speed = SPEED_40000,
98 .hw_link_mode = HINIC_40GE_BASE_CR4,
99 },
100 {
101 .link_mode_bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
102 .speed = SPEED_100000,
103 .hw_link_mode = HINIC_100GE_BASE_KR4,
104 },
105 {
106 .link_mode_bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
107 .speed = SPEED_100000,
108 .hw_link_mode = HINIC_100GE_BASE_CR4,
109 },
110 {
111 .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
112 .speed = SPEED_25000,
113 .hw_link_mode = HINIC_25GE_BASE_KR_S,
114 },
115 {
116 .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
117 .speed = SPEED_25000,
118 .hw_link_mode = HINIC_25GE_BASE_CR_S,
119 },
120 {
121 .link_mode_bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
122 .speed = SPEED_25000,
123 .hw_link_mode = HINIC_25GE_BASE_KR,
124 },
125 {
126 .link_mode_bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
127 .speed = SPEED_25000,
128 .hw_link_mode = HINIC_25GE_BASE_CR,
129 },
130 {
131 .link_mode_bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
132 .speed = SPEED_1000,
133 .hw_link_mode = HINIC_GE_BASE_KX,
134 },
135 };
136
137 #define LP_DEFAULT_TIME 5 /* seconds */
138 #define LP_PKT_LEN 1514
139
140 #define PORT_DOWN_ERR_IDX 0
141 enum diag_test_index {
142 INTERNAL_LP_TEST = 0,
143 EXTERNAL_LP_TEST = 1,
144 DIAG_TEST_MAX = 2,
145 };
146
set_link_speed(struct ethtool_link_ksettings * link_ksettings,enum hinic_speed speed)147 static void set_link_speed(struct ethtool_link_ksettings *link_ksettings,
148 enum hinic_speed speed)
149 {
150 switch (speed) {
151 case HINIC_SPEED_10MB_LINK:
152 link_ksettings->base.speed = SPEED_10;
153 break;
154
155 case HINIC_SPEED_100MB_LINK:
156 link_ksettings->base.speed = SPEED_100;
157 break;
158
159 case HINIC_SPEED_1000MB_LINK:
160 link_ksettings->base.speed = SPEED_1000;
161 break;
162
163 case HINIC_SPEED_10GB_LINK:
164 link_ksettings->base.speed = SPEED_10000;
165 break;
166
167 case HINIC_SPEED_25GB_LINK:
168 link_ksettings->base.speed = SPEED_25000;
169 break;
170
171 case HINIC_SPEED_40GB_LINK:
172 link_ksettings->base.speed = SPEED_40000;
173 break;
174
175 case HINIC_SPEED_100GB_LINK:
176 link_ksettings->base.speed = SPEED_100000;
177 break;
178
179 default:
180 link_ksettings->base.speed = SPEED_UNKNOWN;
181 break;
182 }
183 }
184
hinic_get_link_mode_index(enum hinic_link_mode link_mode)185 static int hinic_get_link_mode_index(enum hinic_link_mode link_mode)
186 {
187 int i = 0;
188
189 for (i = 0; i < HINIC_LINK_MODE_NUMBERS; i++) {
190 if (link_mode == hw_to_ethtool_link_mode_table[i].hw_link_mode)
191 break;
192 }
193
194 return i;
195 }
196
hinic_add_ethtool_link_mode(struct cmd_link_settings * link_settings,enum hinic_link_mode hw_link_mode,u32 name)197 static void hinic_add_ethtool_link_mode(struct cmd_link_settings *link_settings,
198 enum hinic_link_mode hw_link_mode,
199 u32 name)
200 {
201 enum hinic_link_mode link_mode;
202 int idx = 0;
203
204 for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
205 if (hw_link_mode & ((u32)1 << link_mode)) {
206 idx = hinic_get_link_mode_index(link_mode);
207 if (idx >= HINIC_LINK_MODE_NUMBERS)
208 continue;
209
210 if (name == GET_SUPPORTED_MODE)
211 ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE
212 (link_settings, idx);
213 else
214 ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE
215 (link_settings, idx);
216 }
217 }
218 }
219
hinic_link_port_type(struct cmd_link_settings * link_settings,enum hinic_port_type port_type)220 static void hinic_link_port_type(struct cmd_link_settings *link_settings,
221 enum hinic_port_type port_type)
222 {
223 switch (port_type) {
224 case HINIC_PORT_ELEC:
225 case HINIC_PORT_TP:
226 ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, TP);
227 ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, TP);
228 link_settings->port = PORT_TP;
229 break;
230
231 case HINIC_PORT_AOC:
232 case HINIC_PORT_FIBRE:
233 ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE);
234 ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE);
235 link_settings->port = PORT_FIBRE;
236 break;
237
238 case HINIC_PORT_COPPER:
239 ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE);
240 ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE);
241 link_settings->port = PORT_DA;
242 break;
243
244 case HINIC_PORT_BACKPLANE:
245 ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Backplane);
246 ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Backplane);
247 link_settings->port = PORT_NONE;
248 break;
249
250 default:
251 link_settings->port = PORT_OTHER;
252 break;
253 }
254 }
255
hinic_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * link_ksettings)256 static int hinic_get_link_ksettings(struct net_device *netdev,
257 struct ethtool_link_ksettings
258 *link_ksettings)
259 {
260 struct hinic_dev *nic_dev = netdev_priv(netdev);
261 struct hinic_link_mode_cmd link_mode = { 0 };
262 struct hinic_pause_config pause_info = { 0 };
263 struct cmd_link_settings settings = { 0 };
264 enum hinic_port_link_state link_state;
265 struct hinic_port_cap port_cap;
266 int err;
267
268 ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
269 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
270
271 link_ksettings->base.speed = SPEED_UNKNOWN;
272 link_ksettings->base.autoneg = AUTONEG_DISABLE;
273 link_ksettings->base.duplex = DUPLEX_UNKNOWN;
274
275 err = hinic_port_get_cap(nic_dev, &port_cap);
276 if (err)
277 return err;
278
279 hinic_link_port_type(&settings, port_cap.port_type);
280 link_ksettings->base.port = settings.port;
281
282 err = hinic_port_link_state(nic_dev, &link_state);
283 if (err)
284 return err;
285
286 if (link_state == HINIC_LINK_STATE_UP) {
287 set_link_speed(link_ksettings, port_cap.speed);
288 link_ksettings->base.duplex =
289 (port_cap.duplex == HINIC_DUPLEX_FULL) ?
290 DUPLEX_FULL : DUPLEX_HALF;
291 }
292
293 if (!!(port_cap.autoneg_cap & HINIC_AUTONEG_SUPPORTED))
294 ethtool_link_ksettings_add_link_mode(link_ksettings,
295 advertising, Autoneg);
296
297 if (port_cap.autoneg_state == HINIC_AUTONEG_ACTIVE)
298 link_ksettings->base.autoneg = AUTONEG_ENABLE;
299
300 err = hinic_get_link_mode(nic_dev->hwdev, &link_mode);
301 if (err || link_mode.supported == HINIC_SUPPORTED_UNKNOWN ||
302 link_mode.advertised == HINIC_SUPPORTED_UNKNOWN)
303 return -EIO;
304
305 hinic_add_ethtool_link_mode(&settings, link_mode.supported,
306 GET_SUPPORTED_MODE);
307 hinic_add_ethtool_link_mode(&settings, link_mode.advertised,
308 GET_ADVERTISED_MODE);
309
310 if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
311 err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info);
312 if (err)
313 return err;
314 ETHTOOL_ADD_SUPPORTED_LINK_MODE(&settings, Pause);
315 if (pause_info.rx_pause && pause_info.tx_pause) {
316 ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause);
317 } else if (pause_info.tx_pause) {
318 ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause);
319 } else if (pause_info.rx_pause) {
320 ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Pause);
321 ETHTOOL_ADD_ADVERTISED_LINK_MODE(&settings, Asym_Pause);
322 }
323 }
324
325 bitmap_copy(link_ksettings->link_modes.supported,
326 (unsigned long *)&settings.supported,
327 __ETHTOOL_LINK_MODE_MASK_NBITS);
328 bitmap_copy(link_ksettings->link_modes.advertising,
329 (unsigned long *)&settings.advertising,
330 __ETHTOOL_LINK_MODE_MASK_NBITS);
331
332 return 0;
333 }
334
hinic_ethtool_to_hw_speed_level(u32 speed)335 static int hinic_ethtool_to_hw_speed_level(u32 speed)
336 {
337 int i;
338
339 for (i = 0; i < LINK_SPEED_LEVELS; i++) {
340 if (hw_to_ethtool_speed[i] == speed)
341 break;
342 }
343
344 return i;
345 }
346
hinic_is_support_speed(enum hinic_link_mode supported_link,u32 speed)347 static bool hinic_is_support_speed(enum hinic_link_mode supported_link,
348 u32 speed)
349 {
350 enum hinic_link_mode link_mode;
351 int idx;
352
353 for (link_mode = 0; link_mode < HINIC_LINK_MODE_NUMBERS; link_mode++) {
354 if (!(supported_link & ((u32)1 << link_mode)))
355 continue;
356
357 idx = hinic_get_link_mode_index(link_mode);
358 if (idx >= HINIC_LINK_MODE_NUMBERS)
359 continue;
360
361 if (hw_to_ethtool_link_mode_table[idx].speed == speed)
362 return true;
363 }
364
365 return false;
366 }
367
hinic_is_speed_legal(struct hinic_dev * nic_dev,u32 speed)368 static bool hinic_is_speed_legal(struct hinic_dev *nic_dev, u32 speed)
369 {
370 struct hinic_link_mode_cmd link_mode = { 0 };
371 struct net_device *netdev = nic_dev->netdev;
372 enum nic_speed_level speed_level = 0;
373 int err;
374
375 err = hinic_get_link_mode(nic_dev->hwdev, &link_mode);
376 if (err)
377 return false;
378
379 if (link_mode.supported == HINIC_SUPPORTED_UNKNOWN ||
380 link_mode.advertised == HINIC_SUPPORTED_UNKNOWN)
381 return false;
382
383 speed_level = hinic_ethtool_to_hw_speed_level(speed);
384 if (speed_level >= LINK_SPEED_LEVELS ||
385 !hinic_is_support_speed(link_mode.supported, speed)) {
386 netif_err(nic_dev, drv, netdev,
387 "Unsupported speed: %d\n", speed);
388 return false;
389 }
390
391 return true;
392 }
393
get_link_settings_type(struct hinic_dev * nic_dev,u8 autoneg,u32 speed,u32 * set_settings)394 static int get_link_settings_type(struct hinic_dev *nic_dev,
395 u8 autoneg, u32 speed, u32 *set_settings)
396 {
397 struct hinic_port_cap port_cap = { 0 };
398 int err;
399
400 err = hinic_port_get_cap(nic_dev, &port_cap);
401 if (err)
402 return err;
403
404 /* always set autonegotiation */
405 if (port_cap.autoneg_cap)
406 *set_settings |= HILINK_LINK_SET_AUTONEG;
407
408 if (autoneg == AUTONEG_ENABLE) {
409 if (!port_cap.autoneg_cap) {
410 netif_err(nic_dev, drv, nic_dev->netdev, "Not support autoneg\n");
411 return -EOPNOTSUPP;
412 }
413 } else if (speed != (u32)SPEED_UNKNOWN) {
414 /* set speed only when autoneg is disabled */
415 if (!hinic_is_speed_legal(nic_dev, speed))
416 return -EINVAL;
417 *set_settings |= HILINK_LINK_SET_SPEED;
418 } else {
419 netif_err(nic_dev, drv, nic_dev->netdev, "Need to set speed when autoneg is off\n");
420 return -EOPNOTSUPP;
421 }
422
423 return 0;
424 }
425
set_link_settings_separate_cmd(struct hinic_dev * nic_dev,u32 set_settings,u8 autoneg,u32 speed)426 static int set_link_settings_separate_cmd(struct hinic_dev *nic_dev,
427 u32 set_settings, u8 autoneg,
428 u32 speed)
429 {
430 enum nic_speed_level speed_level = 0;
431 int err = 0;
432
433 if (set_settings & HILINK_LINK_SET_AUTONEG) {
434 err = hinic_set_autoneg(nic_dev->hwdev,
435 (autoneg == AUTONEG_ENABLE));
436 if (err)
437 netif_err(nic_dev, drv, nic_dev->netdev, "%s autoneg failed\n",
438 (autoneg == AUTONEG_ENABLE) ?
439 "Enable" : "Disable");
440 else
441 netif_info(nic_dev, drv, nic_dev->netdev, "%s autoneg successfully\n",
442 (autoneg == AUTONEG_ENABLE) ?
443 "Enable" : "Disable");
444 }
445
446 if (!err && (set_settings & HILINK_LINK_SET_SPEED)) {
447 speed_level = hinic_ethtool_to_hw_speed_level(speed);
448 err = hinic_set_speed(nic_dev->hwdev, speed_level);
449 if (err)
450 netif_err(nic_dev, drv, nic_dev->netdev, "Set speed %d failed\n",
451 speed);
452 else
453 netif_info(nic_dev, drv, nic_dev->netdev, "Set speed %d successfully\n",
454 speed);
455 }
456
457 return err;
458 }
459
hinic_set_settings_to_hw(struct hinic_dev * nic_dev,u32 set_settings,u8 autoneg,u32 speed)460 static int hinic_set_settings_to_hw(struct hinic_dev *nic_dev,
461 u32 set_settings, u8 autoneg, u32 speed)
462 {
463 struct hinic_link_ksettings_info settings = {0};
464 char set_link_str[SET_LINK_STR_MAX_LEN] = {0};
465 struct net_device *netdev = nic_dev->netdev;
466 enum nic_speed_level speed_level = 0;
467 int err;
468
469 err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN, "%s",
470 (set_settings & HILINK_LINK_SET_AUTONEG) ?
471 (autoneg ? "autong enable " : "autong disable ") : "");
472 if (err < 0 || err >= SET_LINK_STR_MAX_LEN) {
473 netif_err(nic_dev, drv, netdev, "Failed to snprintf link state, function return(%d) and dest_len(%d)\n",
474 err, SET_LINK_STR_MAX_LEN);
475 return -EFAULT;
476 }
477
478 if (set_settings & HILINK_LINK_SET_SPEED) {
479 speed_level = hinic_ethtool_to_hw_speed_level(speed);
480 err = snprintf(set_link_str, SET_LINK_STR_MAX_LEN,
481 "%sspeed %d ", set_link_str, speed);
482 if (err <= 0 || err >= SET_LINK_STR_MAX_LEN) {
483 netif_err(nic_dev, drv, netdev, "Failed to snprintf link speed, function return(%d) and dest_len(%d)\n",
484 err, SET_LINK_STR_MAX_LEN);
485 return -EFAULT;
486 }
487 }
488
489 settings.func_id = HINIC_HWIF_FUNC_IDX(nic_dev->hwdev->hwif);
490 settings.valid_bitmap = set_settings;
491 settings.autoneg = autoneg;
492 settings.speed = speed_level;
493
494 err = hinic_set_link_settings(nic_dev->hwdev, &settings);
495 if (err != HINIC_MGMT_CMD_UNSUPPORTED) {
496 if (err)
497 netif_err(nic_dev, drv, netdev, "Set %s failed\n",
498 set_link_str);
499 else
500 netif_info(nic_dev, drv, netdev, "Set %s successfully\n",
501 set_link_str);
502
503 return err;
504 }
505
506 return set_link_settings_separate_cmd(nic_dev, set_settings, autoneg,
507 speed);
508 }
509
set_link_settings(struct net_device * netdev,u8 autoneg,u32 speed)510 static int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed)
511 {
512 struct hinic_dev *nic_dev = netdev_priv(netdev);
513 u32 set_settings = 0;
514 int err;
515
516 err = get_link_settings_type(nic_dev, autoneg, speed, &set_settings);
517 if (err)
518 return err;
519
520 if (set_settings)
521 err = hinic_set_settings_to_hw(nic_dev, set_settings,
522 autoneg, speed);
523 else
524 netif_info(nic_dev, drv, netdev, "Nothing changed, exit without setting anything\n");
525
526 return err;
527 }
528
hinic_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * link_settings)529 static int hinic_set_link_ksettings(struct net_device *netdev, const struct
530 ethtool_link_ksettings *link_settings)
531 {
532 /* only support to set autoneg and speed */
533 return set_link_settings(netdev, link_settings->base.autoneg,
534 link_settings->base.speed);
535 }
536
hinic_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * info)537 static void hinic_get_drvinfo(struct net_device *netdev,
538 struct ethtool_drvinfo *info)
539 {
540 struct hinic_dev *nic_dev = netdev_priv(netdev);
541 u8 mgmt_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0};
542 struct hinic_hwdev *hwdev = nic_dev->hwdev;
543 struct hinic_hwif *hwif = hwdev->hwif;
544 int err;
545
546 strlcpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver));
547 strlcpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info));
548
549 err = hinic_get_mgmt_version(nic_dev, mgmt_ver);
550 if (err)
551 return;
552
553 snprintf(info->fw_version, sizeof(info->fw_version), "%s", mgmt_ver);
554 }
555
hinic_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring)556 static void hinic_get_ringparam(struct net_device *netdev,
557 struct ethtool_ringparam *ring)
558 {
559 struct hinic_dev *nic_dev = netdev_priv(netdev);
560
561 ring->rx_max_pending = HINIC_MAX_QUEUE_DEPTH;
562 ring->tx_max_pending = HINIC_MAX_QUEUE_DEPTH;
563 ring->rx_pending = nic_dev->rq_depth;
564 ring->tx_pending = nic_dev->sq_depth;
565 }
566
check_ringparam_valid(struct hinic_dev * nic_dev,struct ethtool_ringparam * ring)567 static int check_ringparam_valid(struct hinic_dev *nic_dev,
568 struct ethtool_ringparam *ring)
569 {
570 if (ring->rx_jumbo_pending || ring->rx_mini_pending) {
571 netif_err(nic_dev, drv, nic_dev->netdev,
572 "Unsupported rx_jumbo_pending/rx_mini_pending\n");
573 return -EINVAL;
574 }
575
576 if (ring->tx_pending > HINIC_MAX_QUEUE_DEPTH ||
577 ring->tx_pending < HINIC_MIN_QUEUE_DEPTH ||
578 ring->rx_pending > HINIC_MAX_QUEUE_DEPTH ||
579 ring->rx_pending < HINIC_MIN_QUEUE_DEPTH) {
580 netif_err(nic_dev, drv, nic_dev->netdev,
581 "Queue depth out of range [%d-%d]\n",
582 HINIC_MIN_QUEUE_DEPTH, HINIC_MAX_QUEUE_DEPTH);
583 return -EINVAL;
584 }
585
586 return 0;
587 }
588
hinic_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring)589 static int hinic_set_ringparam(struct net_device *netdev,
590 struct ethtool_ringparam *ring)
591 {
592 struct hinic_dev *nic_dev = netdev_priv(netdev);
593 u16 new_sq_depth, new_rq_depth;
594 int err;
595
596 err = check_ringparam_valid(nic_dev, ring);
597 if (err)
598 return err;
599
600 new_sq_depth = (u16)(1U << (u16)ilog2(ring->tx_pending));
601 new_rq_depth = (u16)(1U << (u16)ilog2(ring->rx_pending));
602
603 if (new_sq_depth == nic_dev->sq_depth &&
604 new_rq_depth == nic_dev->rq_depth)
605 return 0;
606
607 netif_info(nic_dev, drv, netdev,
608 "Change Tx/Rx ring depth from %d/%d to %d/%d\n",
609 nic_dev->sq_depth, nic_dev->rq_depth,
610 new_sq_depth, new_rq_depth);
611
612 nic_dev->sq_depth = new_sq_depth;
613 nic_dev->rq_depth = new_rq_depth;
614
615 if (netif_running(netdev)) {
616 netif_info(nic_dev, drv, netdev, "Restarting netdev\n");
617 err = hinic_close(netdev);
618 if (err) {
619 netif_err(nic_dev, drv, netdev,
620 "Failed to close netdev\n");
621 return -EFAULT;
622 }
623
624 err = hinic_open(netdev);
625 if (err) {
626 netif_err(nic_dev, drv, netdev,
627 "Failed to open netdev\n");
628 return -EFAULT;
629 }
630 }
631
632 return 0;
633 }
634
__hinic_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * coal,u16 queue)635 static int __hinic_get_coalesce(struct net_device *netdev,
636 struct ethtool_coalesce *coal, u16 queue)
637 {
638 struct hinic_dev *nic_dev = netdev_priv(netdev);
639 struct hinic_intr_coal_info *rx_intr_coal_info;
640 struct hinic_intr_coal_info *tx_intr_coal_info;
641
642 if (queue == COALESCE_ALL_QUEUE) {
643 /* get tx/rx irq0 as default parameters */
644 rx_intr_coal_info = &nic_dev->rx_intr_coalesce[0];
645 tx_intr_coal_info = &nic_dev->tx_intr_coalesce[0];
646 } else {
647 if (queue >= nic_dev->num_qps) {
648 netif_err(nic_dev, drv, netdev,
649 "Invalid queue_id: %d\n", queue);
650 return -EINVAL;
651 }
652 rx_intr_coal_info = &nic_dev->rx_intr_coalesce[queue];
653 tx_intr_coal_info = &nic_dev->tx_intr_coalesce[queue];
654 }
655
656 /* coalesce_timer is in unit of 9us */
657 coal->rx_coalesce_usecs = rx_intr_coal_info->coalesce_timer_cfg *
658 COALESCE_TIMER_CFG_UNIT;
659 /* coalesced_frames is in unit of 8 */
660 coal->rx_max_coalesced_frames = rx_intr_coal_info->pending_limt *
661 COALESCE_PENDING_LIMIT_UNIT;
662 coal->tx_coalesce_usecs = tx_intr_coal_info->coalesce_timer_cfg *
663 COALESCE_TIMER_CFG_UNIT;
664 coal->tx_max_coalesced_frames = tx_intr_coal_info->pending_limt *
665 COALESCE_PENDING_LIMIT_UNIT;
666
667 return 0;
668 }
669
is_coalesce_exceed_limit(const struct ethtool_coalesce * coal)670 static int is_coalesce_exceed_limit(const struct ethtool_coalesce *coal)
671 {
672 if (coal->rx_coalesce_usecs > COALESCE_MAX_TIMER_CFG ||
673 coal->rx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT ||
674 coal->tx_coalesce_usecs > COALESCE_MAX_TIMER_CFG ||
675 coal->tx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT)
676 return -ERANGE;
677
678 return 0;
679 }
680
set_queue_coalesce(struct hinic_dev * nic_dev,u16 q_id,struct hinic_intr_coal_info * coal,bool set_rx_coal)681 static int set_queue_coalesce(struct hinic_dev *nic_dev, u16 q_id,
682 struct hinic_intr_coal_info *coal,
683 bool set_rx_coal)
684 {
685 struct hinic_intr_coal_info *intr_coal = NULL;
686 struct hinic_msix_config interrupt_info = {0};
687 struct net_device *netdev = nic_dev->netdev;
688 u16 msix_idx;
689 int err;
690
691 intr_coal = set_rx_coal ? &nic_dev->rx_intr_coalesce[q_id] :
692 &nic_dev->tx_intr_coalesce[q_id];
693
694 intr_coal->coalesce_timer_cfg = coal->coalesce_timer_cfg;
695 intr_coal->pending_limt = coal->pending_limt;
696
697 /* netdev not running or qp not in using,
698 * don't need to set coalesce to hw
699 */
700 if (!(nic_dev->flags & HINIC_INTF_UP) ||
701 q_id >= nic_dev->num_qps)
702 return 0;
703
704 msix_idx = set_rx_coal ? nic_dev->rxqs[q_id].rq->msix_entry :
705 nic_dev->txqs[q_id].sq->msix_entry;
706 interrupt_info.msix_index = msix_idx;
707 interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
708 interrupt_info.pending_cnt = intr_coal->pending_limt;
709 interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
710
711 err = hinic_set_interrupt_cfg(nic_dev->hwdev, &interrupt_info);
712 if (err)
713 netif_warn(nic_dev, drv, netdev,
714 "Failed to set %s queue%d coalesce",
715 set_rx_coal ? "rx" : "tx", q_id);
716
717 return err;
718 }
719
__set_hw_coal_param(struct hinic_dev * nic_dev,struct hinic_intr_coal_info * intr_coal,u16 queue,bool set_rx_coal)720 static int __set_hw_coal_param(struct hinic_dev *nic_dev,
721 struct hinic_intr_coal_info *intr_coal,
722 u16 queue, bool set_rx_coal)
723 {
724 int err;
725 u16 i;
726
727 if (queue == COALESCE_ALL_QUEUE) {
728 for (i = 0; i < nic_dev->max_qps; i++) {
729 err = set_queue_coalesce(nic_dev, i, intr_coal,
730 set_rx_coal);
731 if (err)
732 return err;
733 }
734 } else {
735 if (queue >= nic_dev->num_qps) {
736 netif_err(nic_dev, drv, nic_dev->netdev,
737 "Invalid queue_id: %d\n", queue);
738 return -EINVAL;
739 }
740 err = set_queue_coalesce(nic_dev, queue, intr_coal,
741 set_rx_coal);
742 if (err)
743 return err;
744 }
745
746 return 0;
747 }
748
__hinic_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * coal,u16 queue)749 static int __hinic_set_coalesce(struct net_device *netdev,
750 struct ethtool_coalesce *coal, u16 queue)
751 {
752 struct hinic_dev *nic_dev = netdev_priv(netdev);
753 struct hinic_intr_coal_info rx_intr_coal = {0};
754 struct hinic_intr_coal_info tx_intr_coal = {0};
755 bool set_rx_coal = false;
756 bool set_tx_coal = false;
757 int err;
758
759 err = is_coalesce_exceed_limit(coal);
760 if (err)
761 return err;
762
763 if (coal->rx_coalesce_usecs || coal->rx_max_coalesced_frames) {
764 rx_intr_coal.coalesce_timer_cfg =
765 (u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT);
766 rx_intr_coal.pending_limt = (u8)(coal->rx_max_coalesced_frames /
767 COALESCE_PENDING_LIMIT_UNIT);
768 set_rx_coal = true;
769 }
770
771 if (coal->tx_coalesce_usecs || coal->tx_max_coalesced_frames) {
772 tx_intr_coal.coalesce_timer_cfg =
773 (u8)(coal->tx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT);
774 tx_intr_coal.pending_limt = (u8)(coal->tx_max_coalesced_frames /
775 COALESCE_PENDING_LIMIT_UNIT);
776 set_tx_coal = true;
777 }
778
779 /* setting coalesce timer or pending limit to zero will disable
780 * coalesce
781 */
782 if (set_rx_coal && (!rx_intr_coal.coalesce_timer_cfg ||
783 !rx_intr_coal.pending_limt))
784 netif_warn(nic_dev, drv, netdev, "RX coalesce will be disabled\n");
785 if (set_tx_coal && (!tx_intr_coal.coalesce_timer_cfg ||
786 !tx_intr_coal.pending_limt))
787 netif_warn(nic_dev, drv, netdev, "TX coalesce will be disabled\n");
788
789 if (set_rx_coal) {
790 err = __set_hw_coal_param(nic_dev, &rx_intr_coal, queue, true);
791 if (err)
792 return err;
793 }
794 if (set_tx_coal) {
795 err = __set_hw_coal_param(nic_dev, &tx_intr_coal, queue, false);
796 if (err)
797 return err;
798 }
799 return 0;
800 }
801
hinic_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * coal)802 static int hinic_get_coalesce(struct net_device *netdev,
803 struct ethtool_coalesce *coal)
804 {
805 return __hinic_get_coalesce(netdev, coal, COALESCE_ALL_QUEUE);
806 }
807
hinic_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * coal)808 static int hinic_set_coalesce(struct net_device *netdev,
809 struct ethtool_coalesce *coal)
810 {
811 return __hinic_set_coalesce(netdev, coal, COALESCE_ALL_QUEUE);
812 }
813
hinic_get_per_queue_coalesce(struct net_device * netdev,u32 queue,struct ethtool_coalesce * coal)814 static int hinic_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
815 struct ethtool_coalesce *coal)
816 {
817 return __hinic_get_coalesce(netdev, coal, queue);
818 }
819
hinic_set_per_queue_coalesce(struct net_device * netdev,u32 queue,struct ethtool_coalesce * coal)820 static int hinic_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
821 struct ethtool_coalesce *coal)
822 {
823 return __hinic_set_coalesce(netdev, coal, queue);
824 }
825
hinic_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)826 static void hinic_get_pauseparam(struct net_device *netdev,
827 struct ethtool_pauseparam *pause)
828 {
829 struct hinic_dev *nic_dev = netdev_priv(netdev);
830 struct hinic_pause_config pause_info = {0};
831 struct hinic_nic_cfg *nic_cfg;
832 int err;
833
834 nic_cfg = &nic_dev->hwdev->func_to_io.nic_cfg;
835
836 err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info);
837 if (!err) {
838 pause->autoneg = pause_info.auto_neg;
839 if (nic_cfg->pause_set || !pause_info.auto_neg) {
840 pause->rx_pause = nic_cfg->rx_pause;
841 pause->tx_pause = nic_cfg->tx_pause;
842 } else {
843 pause->rx_pause = pause_info.rx_pause;
844 pause->tx_pause = pause_info.tx_pause;
845 }
846 }
847 }
848
hinic_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)849 static int hinic_set_pauseparam(struct net_device *netdev,
850 struct ethtool_pauseparam *pause)
851 {
852 struct hinic_dev *nic_dev = netdev_priv(netdev);
853 struct hinic_pause_config pause_info = {0};
854 struct hinic_port_cap port_cap = {0};
855 int err;
856
857 err = hinic_port_get_cap(nic_dev, &port_cap);
858 if (err)
859 return -EIO;
860
861 if (pause->autoneg != port_cap.autoneg_state)
862 return -EOPNOTSUPP;
863
864 pause_info.auto_neg = pause->autoneg;
865 pause_info.rx_pause = pause->rx_pause;
866 pause_info.tx_pause = pause->tx_pause;
867
868 mutex_lock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex);
869 err = hinic_set_hw_pause_info(nic_dev->hwdev, &pause_info);
870 if (err) {
871 mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex);
872 return err;
873 }
874 nic_dev->hwdev->func_to_io.nic_cfg.pause_set = true;
875 nic_dev->hwdev->func_to_io.nic_cfg.auto_neg = pause->autoneg;
876 nic_dev->hwdev->func_to_io.nic_cfg.rx_pause = pause->rx_pause;
877 nic_dev->hwdev->func_to_io.nic_cfg.tx_pause = pause->tx_pause;
878 mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex);
879
880 return 0;
881 }
882
hinic_get_channels(struct net_device * netdev,struct ethtool_channels * channels)883 static void hinic_get_channels(struct net_device *netdev,
884 struct ethtool_channels *channels)
885 {
886 struct hinic_dev *nic_dev = netdev_priv(netdev);
887 struct hinic_hwdev *hwdev = nic_dev->hwdev;
888
889 channels->max_combined = nic_dev->max_qps;
890 channels->combined_count = hinic_hwdev_num_qps(hwdev);
891 }
892
hinic_set_channels(struct net_device * netdev,struct ethtool_channels * channels)893 static int hinic_set_channels(struct net_device *netdev,
894 struct ethtool_channels *channels)
895 {
896 struct hinic_dev *nic_dev = netdev_priv(netdev);
897 unsigned int count = channels->combined_count;
898 int err;
899
900 netif_info(nic_dev, drv, netdev, "Set max combined queue number from %d to %d\n",
901 hinic_hwdev_num_qps(nic_dev->hwdev), count);
902
903 if (netif_running(netdev)) {
904 netif_info(nic_dev, drv, netdev, "Restarting netdev\n");
905 hinic_close(netdev);
906
907 nic_dev->hwdev->nic_cap.num_qps = count;
908
909 err = hinic_open(netdev);
910 if (err) {
911 netif_err(nic_dev, drv, netdev,
912 "Failed to open netdev\n");
913 return -EFAULT;
914 }
915 } else {
916 nic_dev->hwdev->nic_cap.num_qps = count;
917 }
918
919 return 0;
920 }
921
hinic_get_rss_hash_opts(struct hinic_dev * nic_dev,struct ethtool_rxnfc * cmd)922 static int hinic_get_rss_hash_opts(struct hinic_dev *nic_dev,
923 struct ethtool_rxnfc *cmd)
924 {
925 struct hinic_rss_type rss_type = { 0 };
926 int err;
927
928 cmd->data = 0;
929
930 if (!(nic_dev->flags & HINIC_RSS_ENABLE))
931 return 0;
932
933 err = hinic_get_rss_type(nic_dev, nic_dev->rss_tmpl_idx,
934 &rss_type);
935 if (err)
936 return err;
937
938 cmd->data = RXH_IP_SRC | RXH_IP_DST;
939 switch (cmd->flow_type) {
940 case TCP_V4_FLOW:
941 if (rss_type.tcp_ipv4)
942 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
943 break;
944 case TCP_V6_FLOW:
945 if (rss_type.tcp_ipv6)
946 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
947 break;
948 case UDP_V4_FLOW:
949 if (rss_type.udp_ipv4)
950 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
951 break;
952 case UDP_V6_FLOW:
953 if (rss_type.udp_ipv6)
954 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
955 break;
956 case IPV4_FLOW:
957 case IPV6_FLOW:
958 break;
959 default:
960 cmd->data = 0;
961 return -EINVAL;
962 }
963
964 return 0;
965 }
966
set_l4_rss_hash_ops(struct ethtool_rxnfc * cmd,struct hinic_rss_type * rss_type)967 static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd,
968 struct hinic_rss_type *rss_type)
969 {
970 u8 rss_l4_en = 0;
971
972 switch (cmd->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
973 case 0:
974 rss_l4_en = 0;
975 break;
976 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
977 rss_l4_en = 1;
978 break;
979 default:
980 return -EINVAL;
981 }
982
983 switch (cmd->flow_type) {
984 case TCP_V4_FLOW:
985 rss_type->tcp_ipv4 = rss_l4_en;
986 break;
987 case TCP_V6_FLOW:
988 rss_type->tcp_ipv6 = rss_l4_en;
989 break;
990 case UDP_V4_FLOW:
991 rss_type->udp_ipv4 = rss_l4_en;
992 break;
993 case UDP_V6_FLOW:
994 rss_type->udp_ipv6 = rss_l4_en;
995 break;
996 default:
997 return -EINVAL;
998 }
999
1000 return 0;
1001 }
1002
hinic_set_rss_hash_opts(struct hinic_dev * nic_dev,struct ethtool_rxnfc * cmd)1003 static int hinic_set_rss_hash_opts(struct hinic_dev *nic_dev,
1004 struct ethtool_rxnfc *cmd)
1005 {
1006 struct hinic_rss_type *rss_type = &nic_dev->rss_type;
1007 int err;
1008
1009 if (!(nic_dev->flags & HINIC_RSS_ENABLE)) {
1010 cmd->data = 0;
1011 return -EOPNOTSUPP;
1012 }
1013
1014 /* RSS does not support anything other than hashing
1015 * to queues on src and dst IPs and ports
1016 */
1017 if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 |
1018 RXH_L4_B_2_3))
1019 return -EINVAL;
1020
1021 /* We need at least the IP SRC and DEST fields for hashing */
1022 if (!(cmd->data & RXH_IP_SRC) || !(cmd->data & RXH_IP_DST))
1023 return -EINVAL;
1024
1025 err = hinic_get_rss_type(nic_dev,
1026 nic_dev->rss_tmpl_idx, rss_type);
1027 if (err)
1028 return -EFAULT;
1029
1030 switch (cmd->flow_type) {
1031 case TCP_V4_FLOW:
1032 case TCP_V6_FLOW:
1033 case UDP_V4_FLOW:
1034 case UDP_V6_FLOW:
1035 err = set_l4_rss_hash_ops(cmd, rss_type);
1036 if (err)
1037 return err;
1038 break;
1039 case IPV4_FLOW:
1040 rss_type->ipv4 = 1;
1041 break;
1042 case IPV6_FLOW:
1043 rss_type->ipv6 = 1;
1044 break;
1045 default:
1046 return -EINVAL;
1047 }
1048
1049 err = hinic_set_rss_type(nic_dev, nic_dev->rss_tmpl_idx,
1050 *rss_type);
1051 if (err)
1052 return -EFAULT;
1053
1054 return 0;
1055 }
1056
__set_rss_rxfh(struct net_device * netdev,const u32 * indir,const u8 * key)1057 static int __set_rss_rxfh(struct net_device *netdev,
1058 const u32 *indir, const u8 *key)
1059 {
1060 struct hinic_dev *nic_dev = netdev_priv(netdev);
1061 int err;
1062
1063 if (indir) {
1064 if (!nic_dev->rss_indir_user) {
1065 nic_dev->rss_indir_user =
1066 kzalloc(sizeof(u32) * HINIC_RSS_INDIR_SIZE,
1067 GFP_KERNEL);
1068 if (!nic_dev->rss_indir_user)
1069 return -ENOMEM;
1070 }
1071
1072 memcpy(nic_dev->rss_indir_user, indir,
1073 sizeof(u32) * HINIC_RSS_INDIR_SIZE);
1074
1075 err = hinic_rss_set_indir_tbl(nic_dev,
1076 nic_dev->rss_tmpl_idx, indir);
1077 if (err)
1078 return -EFAULT;
1079 }
1080
1081 if (key) {
1082 if (!nic_dev->rss_hkey_user) {
1083 nic_dev->rss_hkey_user =
1084 kzalloc(HINIC_RSS_KEY_SIZE * 2, GFP_KERNEL);
1085
1086 if (!nic_dev->rss_hkey_user)
1087 return -ENOMEM;
1088 }
1089
1090 memcpy(nic_dev->rss_hkey_user, key, HINIC_RSS_KEY_SIZE);
1091
1092 err = hinic_rss_set_template_tbl(nic_dev,
1093 nic_dev->rss_tmpl_idx, key);
1094 if (err)
1095 return -EFAULT;
1096 }
1097
1098 return 0;
1099 }
1100
hinic_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 * rule_locs)1101 static int hinic_get_rxnfc(struct net_device *netdev,
1102 struct ethtool_rxnfc *cmd, u32 *rule_locs)
1103 {
1104 struct hinic_dev *nic_dev = netdev_priv(netdev);
1105 int err = 0;
1106
1107 switch (cmd->cmd) {
1108 case ETHTOOL_GRXRINGS:
1109 cmd->data = nic_dev->num_qps;
1110 break;
1111 case ETHTOOL_GRXFH:
1112 err = hinic_get_rss_hash_opts(nic_dev, cmd);
1113 break;
1114 default:
1115 err = -EOPNOTSUPP;
1116 break;
1117 }
1118
1119 return err;
1120 }
1121
hinic_set_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd)1122 static int hinic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1123 {
1124 struct hinic_dev *nic_dev = netdev_priv(netdev);
1125 int err = 0;
1126
1127 switch (cmd->cmd) {
1128 case ETHTOOL_SRXFH:
1129 err = hinic_set_rss_hash_opts(nic_dev, cmd);
1130 break;
1131 default:
1132 err = -EOPNOTSUPP;
1133 break;
1134 }
1135
1136 return err;
1137 }
1138
hinic_get_rxfh(struct net_device * netdev,u32 * indir,u8 * key,u8 * hfunc)1139 static int hinic_get_rxfh(struct net_device *netdev,
1140 u32 *indir, u8 *key, u8 *hfunc)
1141 {
1142 struct hinic_dev *nic_dev = netdev_priv(netdev);
1143 u8 hash_engine_type = 0;
1144 int err = 0;
1145
1146 if (!(nic_dev->flags & HINIC_RSS_ENABLE))
1147 return -EOPNOTSUPP;
1148
1149 if (hfunc) {
1150 err = hinic_rss_get_hash_engine(nic_dev,
1151 nic_dev->rss_tmpl_idx,
1152 &hash_engine_type);
1153 if (err)
1154 return -EFAULT;
1155
1156 *hfunc = hash_engine_type ? ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR;
1157 }
1158
1159 if (indir) {
1160 err = hinic_rss_get_indir_tbl(nic_dev,
1161 nic_dev->rss_tmpl_idx, indir);
1162 if (err)
1163 return -EFAULT;
1164 }
1165
1166 if (key)
1167 err = hinic_rss_get_template_tbl(nic_dev,
1168 nic_dev->rss_tmpl_idx, key);
1169
1170 return err;
1171 }
1172
hinic_set_rxfh(struct net_device * netdev,const u32 * indir,const u8 * key,const u8 hfunc)1173 static int hinic_set_rxfh(struct net_device *netdev, const u32 *indir,
1174 const u8 *key, const u8 hfunc)
1175 {
1176 struct hinic_dev *nic_dev = netdev_priv(netdev);
1177 int err = 0;
1178
1179 if (!(nic_dev->flags & HINIC_RSS_ENABLE))
1180 return -EOPNOTSUPP;
1181
1182 if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
1183 if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)
1184 return -EOPNOTSUPP;
1185
1186 nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ?
1187 HINIC_RSS_HASH_ENGINE_TYPE_XOR :
1188 HINIC_RSS_HASH_ENGINE_TYPE_TOEP;
1189 err = hinic_rss_set_hash_engine
1190 (nic_dev, nic_dev->rss_tmpl_idx,
1191 nic_dev->rss_hash_engine);
1192 if (err)
1193 return -EFAULT;
1194 }
1195
1196 err = __set_rss_rxfh(netdev, indir, key);
1197
1198 return err;
1199 }
1200
hinic_get_rxfh_key_size(struct net_device * netdev)1201 static u32 hinic_get_rxfh_key_size(struct net_device *netdev)
1202 {
1203 return HINIC_RSS_KEY_SIZE;
1204 }
1205
hinic_get_rxfh_indir_size(struct net_device * netdev)1206 static u32 hinic_get_rxfh_indir_size(struct net_device *netdev)
1207 {
1208 return HINIC_RSS_INDIR_SIZE;
1209 }
1210
1211 #define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof(arr[0])))
1212
1213 #define HINIC_FUNC_STAT(_stat_item) { \
1214 .name = #_stat_item, \
1215 .size = sizeof_field(struct hinic_vport_stats, _stat_item), \
1216 .offset = offsetof(struct hinic_vport_stats, _stat_item) \
1217 }
1218
1219 static struct hinic_stats hinic_function_stats[] = {
1220 HINIC_FUNC_STAT(tx_unicast_pkts_vport),
1221 HINIC_FUNC_STAT(tx_unicast_bytes_vport),
1222 HINIC_FUNC_STAT(tx_multicast_pkts_vport),
1223 HINIC_FUNC_STAT(tx_multicast_bytes_vport),
1224 HINIC_FUNC_STAT(tx_broadcast_pkts_vport),
1225 HINIC_FUNC_STAT(tx_broadcast_bytes_vport),
1226
1227 HINIC_FUNC_STAT(rx_unicast_pkts_vport),
1228 HINIC_FUNC_STAT(rx_unicast_bytes_vport),
1229 HINIC_FUNC_STAT(rx_multicast_pkts_vport),
1230 HINIC_FUNC_STAT(rx_multicast_bytes_vport),
1231 HINIC_FUNC_STAT(rx_broadcast_pkts_vport),
1232 HINIC_FUNC_STAT(rx_broadcast_bytes_vport),
1233
1234 HINIC_FUNC_STAT(tx_discard_vport),
1235 HINIC_FUNC_STAT(rx_discard_vport),
1236 HINIC_FUNC_STAT(tx_err_vport),
1237 HINIC_FUNC_STAT(rx_err_vport),
1238 };
1239
1240 static char hinic_test_strings[][ETH_GSTRING_LEN] = {
1241 "Internal lb test (on/offline)",
1242 "External lb test (external_lb)",
1243 };
1244
1245 #define HINIC_PORT_STAT(_stat_item) { \
1246 .name = #_stat_item, \
1247 .size = sizeof_field(struct hinic_phy_port_stats, _stat_item), \
1248 .offset = offsetof(struct hinic_phy_port_stats, _stat_item) \
1249 }
1250
1251 static struct hinic_stats hinic_port_stats[] = {
1252 HINIC_PORT_STAT(mac_rx_total_pkt_num),
1253 HINIC_PORT_STAT(mac_rx_total_oct_num),
1254 HINIC_PORT_STAT(mac_rx_bad_pkt_num),
1255 HINIC_PORT_STAT(mac_rx_bad_oct_num),
1256 HINIC_PORT_STAT(mac_rx_good_pkt_num),
1257 HINIC_PORT_STAT(mac_rx_good_oct_num),
1258 HINIC_PORT_STAT(mac_rx_uni_pkt_num),
1259 HINIC_PORT_STAT(mac_rx_multi_pkt_num),
1260 HINIC_PORT_STAT(mac_rx_broad_pkt_num),
1261 HINIC_PORT_STAT(mac_tx_total_pkt_num),
1262 HINIC_PORT_STAT(mac_tx_total_oct_num),
1263 HINIC_PORT_STAT(mac_tx_bad_pkt_num),
1264 HINIC_PORT_STAT(mac_tx_bad_oct_num),
1265 HINIC_PORT_STAT(mac_tx_good_pkt_num),
1266 HINIC_PORT_STAT(mac_tx_good_oct_num),
1267 HINIC_PORT_STAT(mac_tx_uni_pkt_num),
1268 HINIC_PORT_STAT(mac_tx_multi_pkt_num),
1269 HINIC_PORT_STAT(mac_tx_broad_pkt_num),
1270 HINIC_PORT_STAT(mac_rx_fragment_pkt_num),
1271 HINIC_PORT_STAT(mac_rx_undersize_pkt_num),
1272 HINIC_PORT_STAT(mac_rx_undermin_pkt_num),
1273 HINIC_PORT_STAT(mac_rx_64_oct_pkt_num),
1274 HINIC_PORT_STAT(mac_rx_65_127_oct_pkt_num),
1275 HINIC_PORT_STAT(mac_rx_128_255_oct_pkt_num),
1276 HINIC_PORT_STAT(mac_rx_256_511_oct_pkt_num),
1277 HINIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num),
1278 HINIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num),
1279 HINIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num),
1280 HINIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num),
1281 HINIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num),
1282 HINIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num),
1283 HINIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num),
1284 HINIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num),
1285 HINIC_PORT_STAT(mac_rx_1519_max_good_pkt_num),
1286 HINIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num),
1287 HINIC_PORT_STAT(mac_rx_oversize_pkt_num),
1288 HINIC_PORT_STAT(mac_rx_jabber_pkt_num),
1289 HINIC_PORT_STAT(mac_rx_pause_num),
1290 HINIC_PORT_STAT(mac_rx_pfc_pkt_num),
1291 HINIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num),
1292 HINIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num),
1293 HINIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num),
1294 HINIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num),
1295 HINIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num),
1296 HINIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num),
1297 HINIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num),
1298 HINIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num),
1299 HINIC_PORT_STAT(mac_rx_control_pkt_num),
1300 HINIC_PORT_STAT(mac_rx_sym_err_pkt_num),
1301 HINIC_PORT_STAT(mac_rx_fcs_err_pkt_num),
1302 HINIC_PORT_STAT(mac_rx_send_app_good_pkt_num),
1303 HINIC_PORT_STAT(mac_rx_send_app_bad_pkt_num),
1304 HINIC_PORT_STAT(mac_tx_fragment_pkt_num),
1305 HINIC_PORT_STAT(mac_tx_undersize_pkt_num),
1306 HINIC_PORT_STAT(mac_tx_undermin_pkt_num),
1307 HINIC_PORT_STAT(mac_tx_64_oct_pkt_num),
1308 HINIC_PORT_STAT(mac_tx_65_127_oct_pkt_num),
1309 HINIC_PORT_STAT(mac_tx_128_255_oct_pkt_num),
1310 HINIC_PORT_STAT(mac_tx_256_511_oct_pkt_num),
1311 HINIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num),
1312 HINIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num),
1313 HINIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num),
1314 HINIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num),
1315 HINIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num),
1316 HINIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num),
1317 HINIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num),
1318 HINIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num),
1319 HINIC_PORT_STAT(mac_tx_1519_max_good_pkt_num),
1320 HINIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num),
1321 HINIC_PORT_STAT(mac_tx_oversize_pkt_num),
1322 HINIC_PORT_STAT(mac_tx_jabber_pkt_num),
1323 HINIC_PORT_STAT(mac_tx_pause_num),
1324 HINIC_PORT_STAT(mac_tx_pfc_pkt_num),
1325 HINIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num),
1326 HINIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num),
1327 HINIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num),
1328 HINIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num),
1329 HINIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num),
1330 HINIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num),
1331 HINIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num),
1332 HINIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num),
1333 HINIC_PORT_STAT(mac_tx_control_pkt_num),
1334 HINIC_PORT_STAT(mac_tx_err_all_pkt_num),
1335 HINIC_PORT_STAT(mac_tx_from_app_good_pkt_num),
1336 HINIC_PORT_STAT(mac_tx_from_app_bad_pkt_num),
1337 };
1338
1339 #define HINIC_TXQ_STAT(_stat_item) { \
1340 .name = "txq%d_"#_stat_item, \
1341 .size = sizeof_field(struct hinic_txq_stats, _stat_item), \
1342 .offset = offsetof(struct hinic_txq_stats, _stat_item) \
1343 }
1344
1345 static struct hinic_stats hinic_tx_queue_stats[] = {
1346 HINIC_TXQ_STAT(pkts),
1347 HINIC_TXQ_STAT(bytes),
1348 HINIC_TXQ_STAT(tx_busy),
1349 HINIC_TXQ_STAT(tx_wake),
1350 HINIC_TXQ_STAT(tx_dropped),
1351 HINIC_TXQ_STAT(big_frags_pkts),
1352 };
1353
1354 #define HINIC_RXQ_STAT(_stat_item) { \
1355 .name = "rxq%d_"#_stat_item, \
1356 .size = sizeof_field(struct hinic_rxq_stats, _stat_item), \
1357 .offset = offsetof(struct hinic_rxq_stats, _stat_item) \
1358 }
1359
1360 static struct hinic_stats hinic_rx_queue_stats[] = {
1361 HINIC_RXQ_STAT(pkts),
1362 HINIC_RXQ_STAT(bytes),
1363 HINIC_RXQ_STAT(errors),
1364 HINIC_RXQ_STAT(csum_errors),
1365 HINIC_RXQ_STAT(other_errors),
1366 };
1367
get_drv_queue_stats(struct hinic_dev * nic_dev,u64 * data)1368 static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data)
1369 {
1370 struct hinic_txq_stats txq_stats;
1371 struct hinic_rxq_stats rxq_stats;
1372 u16 i = 0, j = 0, qid = 0;
1373 char *p;
1374
1375 for (qid = 0; qid < nic_dev->num_qps; qid++) {
1376 if (!nic_dev->txqs)
1377 break;
1378
1379 hinic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats);
1380 for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++, i++) {
1381 p = (char *)&txq_stats +
1382 hinic_tx_queue_stats[j].offset;
1383 data[i] = (hinic_tx_queue_stats[j].size ==
1384 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1385 }
1386 }
1387
1388 for (qid = 0; qid < nic_dev->num_qps; qid++) {
1389 if (!nic_dev->rxqs)
1390 break;
1391
1392 hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats);
1393 for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++, i++) {
1394 p = (char *)&rxq_stats +
1395 hinic_rx_queue_stats[j].offset;
1396 data[i] = (hinic_rx_queue_stats[j].size ==
1397 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1398 }
1399 }
1400 }
1401
hinic_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)1402 static void hinic_get_ethtool_stats(struct net_device *netdev,
1403 struct ethtool_stats *stats, u64 *data)
1404 {
1405 struct hinic_dev *nic_dev = netdev_priv(netdev);
1406 struct hinic_vport_stats vport_stats = {0};
1407 struct hinic_phy_port_stats *port_stats;
1408 u16 i = 0, j = 0;
1409 char *p;
1410 int err;
1411
1412 err = hinic_get_vport_stats(nic_dev, &vport_stats);
1413 if (err)
1414 netif_err(nic_dev, drv, netdev,
1415 "Failed to get vport stats from firmware\n");
1416
1417 for (j = 0; j < ARRAY_LEN(hinic_function_stats); j++, i++) {
1418 p = (char *)&vport_stats + hinic_function_stats[j].offset;
1419 data[i] = (hinic_function_stats[j].size ==
1420 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1421 }
1422
1423 port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL);
1424 if (!port_stats) {
1425 memset(&data[i], 0,
1426 ARRAY_LEN(hinic_port_stats) * sizeof(*data));
1427 i += ARRAY_LEN(hinic_port_stats);
1428 goto get_drv_stats;
1429 }
1430
1431 err = hinic_get_phy_port_stats(nic_dev, port_stats);
1432 if (err)
1433 netif_err(nic_dev, drv, netdev,
1434 "Failed to get port stats from firmware\n");
1435
1436 for (j = 0; j < ARRAY_LEN(hinic_port_stats); j++, i++) {
1437 p = (char *)port_stats + hinic_port_stats[j].offset;
1438 data[i] = (hinic_port_stats[j].size ==
1439 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1440 }
1441
1442 kfree(port_stats);
1443
1444 get_drv_stats:
1445 get_drv_queue_stats(nic_dev, data + i);
1446 }
1447
hinic_get_sset_count(struct net_device * netdev,int sset)1448 static int hinic_get_sset_count(struct net_device *netdev, int sset)
1449 {
1450 struct hinic_dev *nic_dev = netdev_priv(netdev);
1451 int count, q_num;
1452
1453 switch (sset) {
1454 case ETH_SS_TEST:
1455 return ARRAY_LEN(hinic_test_strings);
1456 case ETH_SS_STATS:
1457 q_num = nic_dev->num_qps;
1458 count = ARRAY_LEN(hinic_function_stats) +
1459 (ARRAY_LEN(hinic_tx_queue_stats) +
1460 ARRAY_LEN(hinic_rx_queue_stats)) * q_num;
1461
1462 count += ARRAY_LEN(hinic_port_stats);
1463
1464 return count;
1465 default:
1466 return -EOPNOTSUPP;
1467 }
1468 }
1469
hinic_get_strings(struct net_device * netdev,u32 stringset,u8 * data)1470 static void hinic_get_strings(struct net_device *netdev,
1471 u32 stringset, u8 *data)
1472 {
1473 struct hinic_dev *nic_dev = netdev_priv(netdev);
1474 char *p = (char *)data;
1475 u16 i, j;
1476
1477 switch (stringset) {
1478 case ETH_SS_TEST:
1479 memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings));
1480 return;
1481 case ETH_SS_STATS:
1482 for (i = 0; i < ARRAY_LEN(hinic_function_stats); i++) {
1483 memcpy(p, hinic_function_stats[i].name,
1484 ETH_GSTRING_LEN);
1485 p += ETH_GSTRING_LEN;
1486 }
1487
1488 for (i = 0; i < ARRAY_LEN(hinic_port_stats); i++) {
1489 memcpy(p, hinic_port_stats[i].name,
1490 ETH_GSTRING_LEN);
1491 p += ETH_GSTRING_LEN;
1492 }
1493
1494 for (i = 0; i < nic_dev->num_qps; i++) {
1495 for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++) {
1496 sprintf(p, hinic_tx_queue_stats[j].name, i);
1497 p += ETH_GSTRING_LEN;
1498 }
1499 }
1500
1501 for (i = 0; i < nic_dev->num_qps; i++) {
1502 for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++) {
1503 sprintf(p, hinic_rx_queue_stats[j].name, i);
1504 p += ETH_GSTRING_LEN;
1505 }
1506 }
1507
1508 return;
1509 default:
1510 return;
1511 }
1512 }
1513
hinic_run_lp_test(struct hinic_dev * nic_dev,u32 test_time)1514 static int hinic_run_lp_test(struct hinic_dev *nic_dev, u32 test_time)
1515 {
1516 u8 *lb_test_rx_buf = nic_dev->lb_test_rx_buf;
1517 struct net_device *netdev = nic_dev->netdev;
1518 struct sk_buff *skb_tmp = NULL;
1519 struct sk_buff *skb = NULL;
1520 u32 cnt = test_time * 5;
1521 u8 *test_data = NULL;
1522 u32 i;
1523 u8 j;
1524
1525 skb_tmp = alloc_skb(LP_PKT_LEN, GFP_ATOMIC);
1526 if (!skb_tmp)
1527 return -ENOMEM;
1528
1529 test_data = __skb_put(skb_tmp, LP_PKT_LEN);
1530
1531 memset(test_data, 0xFF, 2 * ETH_ALEN);
1532 test_data[ETH_ALEN] = 0xFE;
1533 test_data[2 * ETH_ALEN] = 0x08;
1534 test_data[2 * ETH_ALEN + 1] = 0x0;
1535
1536 for (i = ETH_HLEN; i < LP_PKT_LEN; i++)
1537 test_data[i] = i & 0xFF;
1538
1539 skb_tmp->queue_mapping = 0;
1540 skb_tmp->ip_summed = CHECKSUM_COMPLETE;
1541 skb_tmp->dev = netdev;
1542
1543 for (i = 0; i < cnt; i++) {
1544 nic_dev->lb_test_rx_idx = 0;
1545 memset(lb_test_rx_buf, 0, LP_PKT_CNT * LP_PKT_LEN);
1546
1547 for (j = 0; j < LP_PKT_CNT; j++) {
1548 skb = pskb_copy(skb_tmp, GFP_ATOMIC);
1549 if (!skb) {
1550 dev_kfree_skb_any(skb_tmp);
1551 netif_err(nic_dev, drv, netdev,
1552 "Copy skb failed for loopback test\n");
1553 return -ENOMEM;
1554 }
1555
1556 /* mark index for every pkt */
1557 skb->data[LP_PKT_LEN - 1] = j;
1558
1559 if (hinic_lb_xmit_frame(skb, netdev)) {
1560 dev_kfree_skb_any(skb);
1561 dev_kfree_skb_any(skb_tmp);
1562 netif_err(nic_dev, drv, netdev,
1563 "Xmit pkt failed for loopback test\n");
1564 return -EBUSY;
1565 }
1566 }
1567
1568 /* wait till all pkts received to RX buffer */
1569 msleep(200);
1570
1571 for (j = 0; j < LP_PKT_CNT; j++) {
1572 if (memcmp(lb_test_rx_buf + j * LP_PKT_LEN,
1573 skb_tmp->data, LP_PKT_LEN - 1) ||
1574 (*(lb_test_rx_buf + j * LP_PKT_LEN +
1575 LP_PKT_LEN - 1) != j)) {
1576 dev_kfree_skb_any(skb_tmp);
1577 netif_err(nic_dev, drv, netdev,
1578 "Compare pkt failed in loopback test(index=0x%02x, data[%d]=0x%02x)\n",
1579 j + i * LP_PKT_CNT,
1580 LP_PKT_LEN - 1,
1581 *(lb_test_rx_buf + j * LP_PKT_LEN +
1582 LP_PKT_LEN - 1));
1583 return -EIO;
1584 }
1585 }
1586 }
1587
1588 dev_kfree_skb_any(skb_tmp);
1589 return 0;
1590 }
1591
do_lp_test(struct hinic_dev * nic_dev,u32 flags,u32 test_time,enum diag_test_index * test_index)1592 static int do_lp_test(struct hinic_dev *nic_dev, u32 flags, u32 test_time,
1593 enum diag_test_index *test_index)
1594 {
1595 struct net_device *netdev = nic_dev->netdev;
1596 u8 *lb_test_rx_buf = NULL;
1597 int err = 0;
1598
1599 if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) {
1600 *test_index = INTERNAL_LP_TEST;
1601 if (hinic_set_loopback_mode(nic_dev->hwdev,
1602 HINIC_INTERNAL_LP_MODE, true)) {
1603 netif_err(nic_dev, drv, netdev,
1604 "Failed to set port loopback mode before loopback test\n");
1605 return -EIO;
1606 }
1607 } else {
1608 *test_index = EXTERNAL_LP_TEST;
1609 }
1610
1611 lb_test_rx_buf = vmalloc(LP_PKT_CNT * LP_PKT_LEN);
1612 if (!lb_test_rx_buf) {
1613 err = -ENOMEM;
1614 } else {
1615 nic_dev->lb_test_rx_buf = lb_test_rx_buf;
1616 nic_dev->lb_pkt_len = LP_PKT_LEN;
1617 nic_dev->flags |= HINIC_LP_TEST;
1618 err = hinic_run_lp_test(nic_dev, test_time);
1619 nic_dev->flags &= ~HINIC_LP_TEST;
1620 msleep(100);
1621 vfree(lb_test_rx_buf);
1622 nic_dev->lb_test_rx_buf = NULL;
1623 }
1624
1625 if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) {
1626 if (hinic_set_loopback_mode(nic_dev->hwdev,
1627 HINIC_INTERNAL_LP_MODE, false)) {
1628 netif_err(nic_dev, drv, netdev,
1629 "Failed to cancel port loopback mode after loopback test\n");
1630 err = -EIO;
1631 }
1632 }
1633
1634 return err;
1635 }
1636
hinic_diag_test(struct net_device * netdev,struct ethtool_test * eth_test,u64 * data)1637 static void hinic_diag_test(struct net_device *netdev,
1638 struct ethtool_test *eth_test, u64 *data)
1639 {
1640 struct hinic_dev *nic_dev = netdev_priv(netdev);
1641 enum hinic_port_link_state link_state;
1642 enum diag_test_index test_index = 0;
1643 int err = 0;
1644
1645 memset(data, 0, DIAG_TEST_MAX * sizeof(u64));
1646
1647 /* don't support loopback test when netdev is closed. */
1648 if (!(nic_dev->flags & HINIC_INTF_UP)) {
1649 netif_err(nic_dev, drv, netdev,
1650 "Do not support loopback test when netdev is closed\n");
1651 eth_test->flags |= ETH_TEST_FL_FAILED;
1652 data[PORT_DOWN_ERR_IDX] = 1;
1653 return;
1654 }
1655
1656 netif_carrier_off(netdev);
1657 netif_tx_disable(netdev);
1658
1659 err = do_lp_test(nic_dev, eth_test->flags, LP_DEFAULT_TIME,
1660 &test_index);
1661 if (err) {
1662 eth_test->flags |= ETH_TEST_FL_FAILED;
1663 data[test_index] = 1;
1664 }
1665
1666 netif_tx_wake_all_queues(netdev);
1667
1668 err = hinic_port_link_state(nic_dev, &link_state);
1669 if (!err && link_state == HINIC_LINK_STATE_UP)
1670 netif_carrier_on(netdev);
1671
1672 }
1673
hinic_set_phys_id(struct net_device * netdev,enum ethtool_phys_id_state state)1674 static int hinic_set_phys_id(struct net_device *netdev,
1675 enum ethtool_phys_id_state state)
1676 {
1677 struct hinic_dev *nic_dev = netdev_priv(netdev);
1678 int err = 0;
1679 u8 port;
1680
1681 port = nic_dev->hwdev->port_id;
1682
1683 switch (state) {
1684 case ETHTOOL_ID_ACTIVE:
1685 err = hinic_set_led_status(nic_dev->hwdev, port,
1686 HINIC_LED_TYPE_LINK,
1687 HINIC_LED_MODE_FORCE_2HZ);
1688 if (err)
1689 netif_err(nic_dev, drv, netdev,
1690 "Set LED blinking in 2HZ failed\n");
1691 break;
1692
1693 case ETHTOOL_ID_INACTIVE:
1694 err = hinic_reset_led_status(nic_dev->hwdev, port);
1695 if (err)
1696 netif_err(nic_dev, drv, netdev,
1697 "Reset LED to original status failed\n");
1698 break;
1699
1700 default:
1701 return -EOPNOTSUPP;
1702 }
1703
1704 return err;
1705 }
1706
hinic_get_module_info(struct net_device * netdev,struct ethtool_modinfo * modinfo)1707 static int hinic_get_module_info(struct net_device *netdev,
1708 struct ethtool_modinfo *modinfo)
1709 {
1710 struct hinic_dev *nic_dev = netdev_priv(netdev);
1711 u8 sfp_type_ext;
1712 u8 sfp_type;
1713 int err;
1714
1715 err = hinic_get_sfp_type(nic_dev->hwdev, &sfp_type, &sfp_type_ext);
1716 if (err)
1717 return err;
1718
1719 switch (sfp_type) {
1720 case SFF8024_ID_SFP:
1721 modinfo->type = ETH_MODULE_SFF_8472;
1722 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
1723 break;
1724 case SFF8024_ID_QSFP_8438:
1725 modinfo->type = ETH_MODULE_SFF_8436;
1726 modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
1727 break;
1728 case SFF8024_ID_QSFP_8436_8636:
1729 if (sfp_type_ext >= 0x3) {
1730 modinfo->type = ETH_MODULE_SFF_8636;
1731 modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
1732
1733 } else {
1734 modinfo->type = ETH_MODULE_SFF_8436;
1735 modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
1736 }
1737 break;
1738 case SFF8024_ID_QSFP28_8636:
1739 modinfo->type = ETH_MODULE_SFF_8636;
1740 modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
1741 break;
1742 default:
1743 netif_warn(nic_dev, drv, netdev,
1744 "Optical module unknown: 0x%x\n", sfp_type);
1745 return -EINVAL;
1746 }
1747
1748 return 0;
1749 }
1750
hinic_get_module_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1751 static int hinic_get_module_eeprom(struct net_device *netdev,
1752 struct ethtool_eeprom *ee, u8 *data)
1753 {
1754 struct hinic_dev *nic_dev = netdev_priv(netdev);
1755 u8 sfp_data[STD_SFP_INFO_MAX_SIZE];
1756 u16 len;
1757 int err;
1758
1759 if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE))
1760 return -EINVAL;
1761
1762 memset(data, 0, ee->len);
1763
1764 err = hinic_get_sfp_eeprom(nic_dev->hwdev, sfp_data, &len);
1765 if (err)
1766 return err;
1767
1768 memcpy(data, sfp_data + ee->offset, ee->len);
1769
1770 return 0;
1771 }
1772
1773 static int
hinic_get_link_ext_state(struct net_device * netdev,struct ethtool_link_ext_state_info * link_ext_state_info)1774 hinic_get_link_ext_state(struct net_device *netdev,
1775 struct ethtool_link_ext_state_info *link_ext_state_info)
1776 {
1777 struct hinic_dev *nic_dev = netdev_priv(netdev);
1778
1779 if (netif_carrier_ok(netdev))
1780 return -ENODATA;
1781
1782 if (nic_dev->cable_unplugged)
1783 link_ext_state_info->link_ext_state =
1784 ETHTOOL_LINK_EXT_STATE_NO_CABLE;
1785 else if (nic_dev->module_unrecognized)
1786 link_ext_state_info->link_ext_state =
1787 ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH;
1788
1789 return 0;
1790 }
1791
1792 static const struct ethtool_ops hinic_ethtool_ops = {
1793 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
1794 ETHTOOL_COALESCE_RX_MAX_FRAMES |
1795 ETHTOOL_COALESCE_TX_USECS |
1796 ETHTOOL_COALESCE_TX_MAX_FRAMES,
1797
1798 .get_link_ksettings = hinic_get_link_ksettings,
1799 .set_link_ksettings = hinic_set_link_ksettings,
1800 .get_drvinfo = hinic_get_drvinfo,
1801 .get_link = ethtool_op_get_link,
1802 .get_link_ext_state = hinic_get_link_ext_state,
1803 .get_ringparam = hinic_get_ringparam,
1804 .set_ringparam = hinic_set_ringparam,
1805 .get_coalesce = hinic_get_coalesce,
1806 .set_coalesce = hinic_set_coalesce,
1807 .get_per_queue_coalesce = hinic_get_per_queue_coalesce,
1808 .set_per_queue_coalesce = hinic_set_per_queue_coalesce,
1809 .get_pauseparam = hinic_get_pauseparam,
1810 .set_pauseparam = hinic_set_pauseparam,
1811 .get_channels = hinic_get_channels,
1812 .set_channels = hinic_set_channels,
1813 .get_rxnfc = hinic_get_rxnfc,
1814 .set_rxnfc = hinic_set_rxnfc,
1815 .get_rxfh_key_size = hinic_get_rxfh_key_size,
1816 .get_rxfh_indir_size = hinic_get_rxfh_indir_size,
1817 .get_rxfh = hinic_get_rxfh,
1818 .set_rxfh = hinic_set_rxfh,
1819 .get_sset_count = hinic_get_sset_count,
1820 .get_ethtool_stats = hinic_get_ethtool_stats,
1821 .get_strings = hinic_get_strings,
1822 .self_test = hinic_diag_test,
1823 .set_phys_id = hinic_set_phys_id,
1824 .get_module_info = hinic_get_module_info,
1825 .get_module_eeprom = hinic_get_module_eeprom,
1826 };
1827
1828 static const struct ethtool_ops hinicvf_ethtool_ops = {
1829 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
1830 ETHTOOL_COALESCE_RX_MAX_FRAMES |
1831 ETHTOOL_COALESCE_TX_USECS |
1832 ETHTOOL_COALESCE_TX_MAX_FRAMES,
1833
1834 .get_link_ksettings = hinic_get_link_ksettings,
1835 .get_drvinfo = hinic_get_drvinfo,
1836 .get_link = ethtool_op_get_link,
1837 .get_ringparam = hinic_get_ringparam,
1838 .set_ringparam = hinic_set_ringparam,
1839 .get_coalesce = hinic_get_coalesce,
1840 .set_coalesce = hinic_set_coalesce,
1841 .get_per_queue_coalesce = hinic_get_per_queue_coalesce,
1842 .set_per_queue_coalesce = hinic_set_per_queue_coalesce,
1843 .get_channels = hinic_get_channels,
1844 .set_channels = hinic_set_channels,
1845 .get_rxnfc = hinic_get_rxnfc,
1846 .set_rxnfc = hinic_set_rxnfc,
1847 .get_rxfh_key_size = hinic_get_rxfh_key_size,
1848 .get_rxfh_indir_size = hinic_get_rxfh_indir_size,
1849 .get_rxfh = hinic_get_rxfh,
1850 .set_rxfh = hinic_set_rxfh,
1851 .get_sset_count = hinic_get_sset_count,
1852 .get_ethtool_stats = hinic_get_ethtool_stats,
1853 .get_strings = hinic_get_strings,
1854 };
1855
hinic_set_ethtool_ops(struct net_device * netdev)1856 void hinic_set_ethtool_ops(struct net_device *netdev)
1857 {
1858 struct hinic_dev *nic_dev = netdev_priv(netdev);
1859
1860 if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
1861 netdev->ethtool_ops = &hinic_ethtool_ops;
1862 else
1863 netdev->ethtool_ops = &hinicvf_ethtool_ops;
1864 }
1865