1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <generated/utsrelease.h>
9 #include <linux/crash_dump.h>
10 #include "ice.h"
11 #include "ice_base.h"
12 #include "ice_lib.h"
13 #include "ice_fltr.h"
14 #include "ice_dcb_lib.h"
15 #include "ice_dcb_nl.h"
16 #include "ice_devlink.h"
17 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
18 * ice tracepoint functions. This must be done exactly once across the
19 * ice driver.
20 */
21 #define CREATE_TRACE_POINTS
22 #include "ice_trace.h"
23 #include "ice_eswitch.h"
24 #include "ice_tc_lib.h"
25 #include "ice_vsi_vlan_ops.h"
26
27 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
28 static const char ice_driver_string[] = DRV_SUMMARY;
29 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
30
31 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
32 #define ICE_DDP_PKG_PATH "intel/ice/ddp/"
33 #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
34
35 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
36 MODULE_DESCRIPTION(DRV_SUMMARY);
37 MODULE_LICENSE("GPL v2");
38 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
39
40 static int debug = -1;
41 module_param(debug, int, 0644);
42 #ifndef CONFIG_DYNAMIC_DEBUG
43 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
44 #else
45 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
46 #endif /* !CONFIG_DYNAMIC_DEBUG */
47
48 static DEFINE_IDA(ice_aux_ida);
49 DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
50 EXPORT_SYMBOL(ice_xdp_locking_key);
51
52 /**
53 * ice_hw_to_dev - Get device pointer from the hardware structure
54 * @hw: pointer to the device HW structure
55 *
56 * Used to access the device pointer from compilation units which can't easily
57 * include the definition of struct ice_pf without leading to circular header
58 * dependencies.
59 */
ice_hw_to_dev(struct ice_hw * hw)60 struct device *ice_hw_to_dev(struct ice_hw *hw)
61 {
62 struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
63
64 return &pf->pdev->dev;
65 }
66
67 static struct workqueue_struct *ice_wq;
68 static const struct net_device_ops ice_netdev_safe_mode_ops;
69 static const struct net_device_ops ice_netdev_ops;
70
71 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
72
73 static void ice_vsi_release_all(struct ice_pf *pf);
74
75 static int ice_rebuild_channels(struct ice_pf *pf);
76 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
77
78 static int
79 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
80 void *cb_priv, enum tc_setup_type type, void *type_data,
81 void *data,
82 void (*cleanup)(struct flow_block_cb *block_cb));
83
netif_is_ice(struct net_device * dev)84 bool netif_is_ice(struct net_device *dev)
85 {
86 return dev && (dev->netdev_ops == &ice_netdev_ops);
87 }
88
89 /**
90 * ice_get_tx_pending - returns number of Tx descriptors not processed
91 * @ring: the ring of descriptors
92 */
ice_get_tx_pending(struct ice_tx_ring * ring)93 static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
94 {
95 u16 head, tail;
96
97 head = ring->next_to_clean;
98 tail = ring->next_to_use;
99
100 if (head != tail)
101 return (head < tail) ?
102 tail - head : (tail + ring->count - head);
103 return 0;
104 }
105
106 /**
107 * ice_check_for_hang_subtask - check for and recover hung queues
108 * @pf: pointer to PF struct
109 */
ice_check_for_hang_subtask(struct ice_pf * pf)110 static void ice_check_for_hang_subtask(struct ice_pf *pf)
111 {
112 struct ice_vsi *vsi = NULL;
113 struct ice_hw *hw;
114 unsigned int i;
115 int packets;
116 u32 v;
117
118 ice_for_each_vsi(pf, v)
119 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
120 vsi = pf->vsi[v];
121 break;
122 }
123
124 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
125 return;
126
127 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
128 return;
129
130 hw = &vsi->back->hw;
131
132 ice_for_each_txq(vsi, i) {
133 struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
134
135 if (!tx_ring)
136 continue;
137 if (ice_ring_ch_enabled(tx_ring))
138 continue;
139
140 if (tx_ring->desc) {
141 /* If packet counter has not changed the queue is
142 * likely stalled, so force an interrupt for this
143 * queue.
144 *
145 * prev_pkt would be negative if there was no
146 * pending work.
147 */
148 packets = tx_ring->stats.pkts & INT_MAX;
149 if (tx_ring->tx_stats.prev_pkt == packets) {
150 /* Trigger sw interrupt to revive the queue */
151 ice_trigger_sw_intr(hw, tx_ring->q_vector);
152 continue;
153 }
154
155 /* Memory barrier between read of packet count and call
156 * to ice_get_tx_pending()
157 */
158 smp_rmb();
159 tx_ring->tx_stats.prev_pkt =
160 ice_get_tx_pending(tx_ring) ? packets : -1;
161 }
162 }
163 }
164
165 /**
166 * ice_init_mac_fltr - Set initial MAC filters
167 * @pf: board private structure
168 *
169 * Set initial set of MAC filters for PF VSI; configure filters for permanent
170 * address and broadcast address. If an error is encountered, netdevice will be
171 * unregistered.
172 */
ice_init_mac_fltr(struct ice_pf * pf)173 static int ice_init_mac_fltr(struct ice_pf *pf)
174 {
175 struct ice_vsi *vsi;
176 u8 *perm_addr;
177
178 vsi = ice_get_main_vsi(pf);
179 if (!vsi)
180 return -EINVAL;
181
182 perm_addr = vsi->port_info->mac.perm_addr;
183 return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
184 }
185
186 /**
187 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
188 * @netdev: the net device on which the sync is happening
189 * @addr: MAC address to sync
190 *
191 * This is a callback function which is called by the in kernel device sync
192 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
193 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
194 * MAC filters from the hardware.
195 */
ice_add_mac_to_sync_list(struct net_device * netdev,const u8 * addr)196 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
197 {
198 struct ice_netdev_priv *np = netdev_priv(netdev);
199 struct ice_vsi *vsi = np->vsi;
200
201 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
202 ICE_FWD_TO_VSI))
203 return -EINVAL;
204
205 return 0;
206 }
207
208 /**
209 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
210 * @netdev: the net device on which the unsync is happening
211 * @addr: MAC address to unsync
212 *
213 * This is a callback function which is called by the in kernel device unsync
214 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
215 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
216 * delete the MAC filters from the hardware.
217 */
ice_add_mac_to_unsync_list(struct net_device * netdev,const u8 * addr)218 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
219 {
220 struct ice_netdev_priv *np = netdev_priv(netdev);
221 struct ice_vsi *vsi = np->vsi;
222
223 /* Under some circumstances, we might receive a request to delete our
224 * own device address from our uc list. Because we store the device
225 * address in the VSI's MAC filter list, we need to ignore such
226 * requests and not delete our device address from this list.
227 */
228 if (ether_addr_equal(addr, netdev->dev_addr))
229 return 0;
230
231 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
232 ICE_FWD_TO_VSI))
233 return -EINVAL;
234
235 return 0;
236 }
237
238 /**
239 * ice_vsi_fltr_changed - check if filter state changed
240 * @vsi: VSI to be checked
241 *
242 * returns true if filter state has changed, false otherwise.
243 */
ice_vsi_fltr_changed(struct ice_vsi * vsi)244 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
245 {
246 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
247 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
248 }
249
250 /**
251 * ice_set_promisc - Enable promiscuous mode for a given PF
252 * @vsi: the VSI being configured
253 * @promisc_m: mask of promiscuous config bits
254 *
255 */
ice_set_promisc(struct ice_vsi * vsi,u8 promisc_m)256 static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
257 {
258 int status;
259
260 if (vsi->type != ICE_VSI_PF)
261 return 0;
262
263 if (ice_vsi_has_non_zero_vlans(vsi)) {
264 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
265 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
266 promisc_m);
267 } else {
268 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
269 promisc_m, 0);
270 }
271 if (status && status != -EEXIST)
272 return status;
273
274 netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
275 vsi->vsi_num, promisc_m);
276 return 0;
277 }
278
279 /**
280 * ice_clear_promisc - Disable promiscuous mode for a given PF
281 * @vsi: the VSI being configured
282 * @promisc_m: mask of promiscuous config bits
283 *
284 */
ice_clear_promisc(struct ice_vsi * vsi,u8 promisc_m)285 static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
286 {
287 int status;
288
289 if (vsi->type != ICE_VSI_PF)
290 return 0;
291
292 if (ice_vsi_has_non_zero_vlans(vsi)) {
293 promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
294 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
295 promisc_m);
296 } else {
297 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
298 promisc_m, 0);
299 }
300
301 netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
302 vsi->vsi_num, promisc_m);
303 return status;
304 }
305
306 /**
307 * ice_get_devlink_port - Get devlink port from netdev
308 * @netdev: the netdevice structure
309 */
ice_get_devlink_port(struct net_device * netdev)310 static struct devlink_port *ice_get_devlink_port(struct net_device *netdev)
311 {
312 struct ice_pf *pf = ice_netdev_to_pf(netdev);
313
314 if (!ice_is_switchdev_running(pf))
315 return NULL;
316
317 return &pf->devlink_port;
318 }
319
320 /**
321 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
322 * @vsi: ptr to the VSI
323 *
324 * Push any outstanding VSI filter changes through the AdminQ.
325 */
ice_vsi_sync_fltr(struct ice_vsi * vsi)326 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
327 {
328 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
329 struct device *dev = ice_pf_to_dev(vsi->back);
330 struct net_device *netdev = vsi->netdev;
331 bool promisc_forced_on = false;
332 struct ice_pf *pf = vsi->back;
333 struct ice_hw *hw = &pf->hw;
334 u32 changed_flags = 0;
335 int err;
336
337 if (!vsi->netdev)
338 return -EINVAL;
339
340 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
341 usleep_range(1000, 2000);
342
343 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
344 vsi->current_netdev_flags = vsi->netdev->flags;
345
346 INIT_LIST_HEAD(&vsi->tmp_sync_list);
347 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
348
349 if (ice_vsi_fltr_changed(vsi)) {
350 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
351 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
352
353 /* grab the netdev's addr_list_lock */
354 netif_addr_lock_bh(netdev);
355 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
356 ice_add_mac_to_unsync_list);
357 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
358 ice_add_mac_to_unsync_list);
359 /* our temp lists are populated. release lock */
360 netif_addr_unlock_bh(netdev);
361 }
362
363 /* Remove MAC addresses in the unsync list */
364 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
365 ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
366 if (err) {
367 netdev_err(netdev, "Failed to delete MAC filters\n");
368 /* if we failed because of alloc failures, just bail */
369 if (err == -ENOMEM)
370 goto out;
371 }
372
373 /* Add MAC addresses in the sync list */
374 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
375 ice_fltr_free_list(dev, &vsi->tmp_sync_list);
376 /* If filter is added successfully or already exists, do not go into
377 * 'if' condition and report it as error. Instead continue processing
378 * rest of the function.
379 */
380 if (err && err != -EEXIST) {
381 netdev_err(netdev, "Failed to add MAC filters\n");
382 /* If there is no more space for new umac filters, VSI
383 * should go into promiscuous mode. There should be some
384 * space reserved for promiscuous filters.
385 */
386 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
387 !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
388 vsi->state)) {
389 promisc_forced_on = true;
390 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
391 vsi->vsi_num);
392 } else {
393 goto out;
394 }
395 }
396 err = 0;
397 /* check for changes in promiscuous modes */
398 if (changed_flags & IFF_ALLMULTI) {
399 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
400 err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
401 if (err) {
402 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
403 goto out_promisc;
404 }
405 } else {
406 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
407 err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
408 if (err) {
409 vsi->current_netdev_flags |= IFF_ALLMULTI;
410 goto out_promisc;
411 }
412 }
413 }
414
415 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
416 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
417 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
418 if (vsi->current_netdev_flags & IFF_PROMISC) {
419 /* Apply Rx filter rule to get traffic from wire */
420 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
421 err = ice_set_dflt_vsi(vsi);
422 if (err && err != -EEXIST) {
423 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
424 err, vsi->vsi_num);
425 vsi->current_netdev_flags &=
426 ~IFF_PROMISC;
427 goto out_promisc;
428 }
429 err = 0;
430 vlan_ops->dis_rx_filtering(vsi);
431
432 /* promiscuous mode implies allmulticast so
433 * that VSIs that are in promiscuous mode are
434 * subscribed to multicast packets coming to
435 * the port
436 */
437 err = ice_set_promisc(vsi,
438 ICE_MCAST_PROMISC_BITS);
439 if (err)
440 goto out_promisc;
441 }
442 } else {
443 /* Clear Rx filter to remove traffic from wire */
444 if (ice_is_vsi_dflt_vsi(vsi)) {
445 err = ice_clear_dflt_vsi(vsi);
446 if (err) {
447 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
448 err, vsi->vsi_num);
449 vsi->current_netdev_flags |=
450 IFF_PROMISC;
451 goto out_promisc;
452 }
453 if (vsi->netdev->features &
454 NETIF_F_HW_VLAN_CTAG_FILTER)
455 vlan_ops->ena_rx_filtering(vsi);
456 }
457
458 /* disable allmulti here, but only if allmulti is not
459 * still enabled for the netdev
460 */
461 if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
462 err = ice_clear_promisc(vsi,
463 ICE_MCAST_PROMISC_BITS);
464 if (err) {
465 netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
466 err, vsi->vsi_num);
467 }
468 }
469 }
470 }
471 goto exit;
472
473 out_promisc:
474 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
475 goto exit;
476 out:
477 /* if something went wrong then set the changed flag so we try again */
478 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
479 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
480 exit:
481 clear_bit(ICE_CFG_BUSY, vsi->state);
482 return err;
483 }
484
485 /**
486 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
487 * @pf: board private structure
488 */
ice_sync_fltr_subtask(struct ice_pf * pf)489 static void ice_sync_fltr_subtask(struct ice_pf *pf)
490 {
491 int v;
492
493 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
494 return;
495
496 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
497
498 ice_for_each_vsi(pf, v)
499 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
500 ice_vsi_sync_fltr(pf->vsi[v])) {
501 /* come back and try again later */
502 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
503 break;
504 }
505 }
506
507 /**
508 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
509 * @pf: the PF
510 * @locked: is the rtnl_lock already held
511 */
ice_pf_dis_all_vsi(struct ice_pf * pf,bool locked)512 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
513 {
514 int node;
515 int v;
516
517 ice_for_each_vsi(pf, v)
518 if (pf->vsi[v])
519 ice_dis_vsi(pf->vsi[v], locked);
520
521 for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
522 pf->pf_agg_node[node].num_vsis = 0;
523
524 for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
525 pf->vf_agg_node[node].num_vsis = 0;
526 }
527
528 /**
529 * ice_clear_sw_switch_recipes - clear switch recipes
530 * @pf: board private structure
531 *
532 * Mark switch recipes as not created in sw structures. There are cases where
533 * rules (especially advanced rules) need to be restored, either re-read from
534 * hardware or added again. For example after the reset. 'recp_created' flag
535 * prevents from doing that and need to be cleared upfront.
536 */
ice_clear_sw_switch_recipes(struct ice_pf * pf)537 static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
538 {
539 struct ice_sw_recipe *recp;
540 u8 i;
541
542 recp = pf->hw.switch_info->recp_list;
543 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
544 recp[i].recp_created = false;
545 }
546
547 /**
548 * ice_prepare_for_reset - prep for reset
549 * @pf: board private structure
550 * @reset_type: reset type requested
551 *
552 * Inform or close all dependent features in prep for reset.
553 */
554 static void
ice_prepare_for_reset(struct ice_pf * pf,enum ice_reset_req reset_type)555 ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
556 {
557 struct ice_hw *hw = &pf->hw;
558 struct ice_vsi *vsi;
559 struct ice_vf *vf;
560 unsigned int bkt;
561
562 dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
563
564 /* already prepared for reset */
565 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
566 return;
567
568 ice_unplug_aux_dev(pf);
569
570 /* Notify VFs of impending reset */
571 if (ice_check_sq_alive(hw, &hw->mailboxq))
572 ice_vc_notify_reset(pf);
573
574 /* Disable VFs until reset is completed */
575 mutex_lock(&pf->vfs.table_lock);
576 ice_for_each_vf(pf, bkt, vf)
577 ice_set_vf_state_dis(vf);
578 mutex_unlock(&pf->vfs.table_lock);
579
580 if (ice_is_eswitch_mode_switchdev(pf)) {
581 if (reset_type != ICE_RESET_PFR)
582 ice_clear_sw_switch_recipes(pf);
583 }
584
585 /* release ADQ specific HW and SW resources */
586 vsi = ice_get_main_vsi(pf);
587 if (!vsi)
588 goto skip;
589
590 /* to be on safe side, reset orig_rss_size so that normal flow
591 * of deciding rss_size can take precedence
592 */
593 vsi->orig_rss_size = 0;
594
595 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
596 if (reset_type == ICE_RESET_PFR) {
597 vsi->old_ena_tc = vsi->all_enatc;
598 vsi->old_numtc = vsi->all_numtc;
599 } else {
600 ice_remove_q_channels(vsi, true);
601
602 /* for other reset type, do not support channel rebuild
603 * hence reset needed info
604 */
605 vsi->old_ena_tc = 0;
606 vsi->all_enatc = 0;
607 vsi->old_numtc = 0;
608 vsi->all_numtc = 0;
609 vsi->req_txq = 0;
610 vsi->req_rxq = 0;
611 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
612 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
613 }
614 }
615 skip:
616
617 /* clear SW filtering DB */
618 ice_clear_hw_tbls(hw);
619 /* disable the VSIs and their queues that are not already DOWN */
620 ice_pf_dis_all_vsi(pf, false);
621
622 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
623 ice_ptp_prepare_for_reset(pf);
624
625 if (ice_is_feature_supported(pf, ICE_F_GNSS))
626 ice_gnss_exit(pf);
627
628 if (hw->port_info)
629 ice_sched_clear_port(hw->port_info);
630
631 ice_shutdown_all_ctrlq(hw);
632
633 set_bit(ICE_PREPARED_FOR_RESET, pf->state);
634 }
635
636 /**
637 * ice_do_reset - Initiate one of many types of resets
638 * @pf: board private structure
639 * @reset_type: reset type requested before this function was called.
640 */
ice_do_reset(struct ice_pf * pf,enum ice_reset_req reset_type)641 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
642 {
643 struct device *dev = ice_pf_to_dev(pf);
644 struct ice_hw *hw = &pf->hw;
645
646 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
647
648 ice_prepare_for_reset(pf, reset_type);
649
650 /* trigger the reset */
651 if (ice_reset(hw, reset_type)) {
652 dev_err(dev, "reset %d failed\n", reset_type);
653 set_bit(ICE_RESET_FAILED, pf->state);
654 clear_bit(ICE_RESET_OICR_RECV, pf->state);
655 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
656 clear_bit(ICE_PFR_REQ, pf->state);
657 clear_bit(ICE_CORER_REQ, pf->state);
658 clear_bit(ICE_GLOBR_REQ, pf->state);
659 wake_up(&pf->reset_wait_queue);
660 return;
661 }
662
663 /* PFR is a bit of a special case because it doesn't result in an OICR
664 * interrupt. So for PFR, rebuild after the reset and clear the reset-
665 * associated state bits.
666 */
667 if (reset_type == ICE_RESET_PFR) {
668 pf->pfr_count++;
669 ice_rebuild(pf, reset_type);
670 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
671 clear_bit(ICE_PFR_REQ, pf->state);
672 wake_up(&pf->reset_wait_queue);
673 ice_reset_all_vfs(pf);
674 }
675 }
676
677 /**
678 * ice_reset_subtask - Set up for resetting the device and driver
679 * @pf: board private structure
680 */
ice_reset_subtask(struct ice_pf * pf)681 static void ice_reset_subtask(struct ice_pf *pf)
682 {
683 enum ice_reset_req reset_type = ICE_RESET_INVAL;
684
685 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
686 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
687 * of reset is pending and sets bits in pf->state indicating the reset
688 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
689 * prepare for pending reset if not already (for PF software-initiated
690 * global resets the software should already be prepared for it as
691 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
692 * by firmware or software on other PFs, that bit is not set so prepare
693 * for the reset now), poll for reset done, rebuild and return.
694 */
695 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
696 /* Perform the largest reset requested */
697 if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
698 reset_type = ICE_RESET_CORER;
699 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
700 reset_type = ICE_RESET_GLOBR;
701 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
702 reset_type = ICE_RESET_EMPR;
703 /* return if no valid reset type requested */
704 if (reset_type == ICE_RESET_INVAL)
705 return;
706 ice_prepare_for_reset(pf, reset_type);
707
708 /* make sure we are ready to rebuild */
709 if (ice_check_reset(&pf->hw)) {
710 set_bit(ICE_RESET_FAILED, pf->state);
711 } else {
712 /* done with reset. start rebuild */
713 pf->hw.reset_ongoing = false;
714 ice_rebuild(pf, reset_type);
715 /* clear bit to resume normal operations, but
716 * ICE_NEEDS_RESTART bit is set in case rebuild failed
717 */
718 clear_bit(ICE_RESET_OICR_RECV, pf->state);
719 clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
720 clear_bit(ICE_PFR_REQ, pf->state);
721 clear_bit(ICE_CORER_REQ, pf->state);
722 clear_bit(ICE_GLOBR_REQ, pf->state);
723 wake_up(&pf->reset_wait_queue);
724 ice_reset_all_vfs(pf);
725 }
726
727 return;
728 }
729
730 /* No pending resets to finish processing. Check for new resets */
731 if (test_bit(ICE_PFR_REQ, pf->state))
732 reset_type = ICE_RESET_PFR;
733 if (test_bit(ICE_CORER_REQ, pf->state))
734 reset_type = ICE_RESET_CORER;
735 if (test_bit(ICE_GLOBR_REQ, pf->state))
736 reset_type = ICE_RESET_GLOBR;
737 /* If no valid reset type requested just return */
738 if (reset_type == ICE_RESET_INVAL)
739 return;
740
741 /* reset if not already down or busy */
742 if (!test_bit(ICE_DOWN, pf->state) &&
743 !test_bit(ICE_CFG_BUSY, pf->state)) {
744 ice_do_reset(pf, reset_type);
745 }
746 }
747
748 /**
749 * ice_print_topo_conflict - print topology conflict message
750 * @vsi: the VSI whose topology status is being checked
751 */
ice_print_topo_conflict(struct ice_vsi * vsi)752 static void ice_print_topo_conflict(struct ice_vsi *vsi)
753 {
754 switch (vsi->port_info->phy.link_info.topo_media_conflict) {
755 case ICE_AQ_LINK_TOPO_CONFLICT:
756 case ICE_AQ_LINK_MEDIA_CONFLICT:
757 case ICE_AQ_LINK_TOPO_UNREACH_PRT:
758 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
759 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
760 netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
761 break;
762 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
763 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
764 netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
765 else
766 netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
767 break;
768 default:
769 break;
770 }
771 }
772
773 /**
774 * ice_print_link_msg - print link up or down message
775 * @vsi: the VSI whose link status is being queried
776 * @isup: boolean for if the link is now up or down
777 */
ice_print_link_msg(struct ice_vsi * vsi,bool isup)778 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
779 {
780 struct ice_aqc_get_phy_caps_data *caps;
781 const char *an_advertised;
782 const char *fec_req;
783 const char *speed;
784 const char *fec;
785 const char *fc;
786 const char *an;
787 int status;
788
789 if (!vsi)
790 return;
791
792 if (vsi->current_isup == isup)
793 return;
794
795 vsi->current_isup = isup;
796
797 if (!isup) {
798 netdev_info(vsi->netdev, "NIC Link is Down\n");
799 return;
800 }
801
802 switch (vsi->port_info->phy.link_info.link_speed) {
803 case ICE_AQ_LINK_SPEED_100GB:
804 speed = "100 G";
805 break;
806 case ICE_AQ_LINK_SPEED_50GB:
807 speed = "50 G";
808 break;
809 case ICE_AQ_LINK_SPEED_40GB:
810 speed = "40 G";
811 break;
812 case ICE_AQ_LINK_SPEED_25GB:
813 speed = "25 G";
814 break;
815 case ICE_AQ_LINK_SPEED_20GB:
816 speed = "20 G";
817 break;
818 case ICE_AQ_LINK_SPEED_10GB:
819 speed = "10 G";
820 break;
821 case ICE_AQ_LINK_SPEED_5GB:
822 speed = "5 G";
823 break;
824 case ICE_AQ_LINK_SPEED_2500MB:
825 speed = "2.5 G";
826 break;
827 case ICE_AQ_LINK_SPEED_1000MB:
828 speed = "1 G";
829 break;
830 case ICE_AQ_LINK_SPEED_100MB:
831 speed = "100 M";
832 break;
833 default:
834 speed = "Unknown ";
835 break;
836 }
837
838 switch (vsi->port_info->fc.current_mode) {
839 case ICE_FC_FULL:
840 fc = "Rx/Tx";
841 break;
842 case ICE_FC_TX_PAUSE:
843 fc = "Tx";
844 break;
845 case ICE_FC_RX_PAUSE:
846 fc = "Rx";
847 break;
848 case ICE_FC_NONE:
849 fc = "None";
850 break;
851 default:
852 fc = "Unknown";
853 break;
854 }
855
856 /* Get FEC mode based on negotiated link info */
857 switch (vsi->port_info->phy.link_info.fec_info) {
858 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
859 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
860 fec = "RS-FEC";
861 break;
862 case ICE_AQ_LINK_25G_KR_FEC_EN:
863 fec = "FC-FEC/BASE-R";
864 break;
865 default:
866 fec = "NONE";
867 break;
868 }
869
870 /* check if autoneg completed, might be false due to not supported */
871 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
872 an = "True";
873 else
874 an = "False";
875
876 /* Get FEC mode requested based on PHY caps last SW configuration */
877 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
878 if (!caps) {
879 fec_req = "Unknown";
880 an_advertised = "Unknown";
881 goto done;
882 }
883
884 status = ice_aq_get_phy_caps(vsi->port_info, false,
885 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
886 if (status)
887 netdev_info(vsi->netdev, "Get phy capability failed.\n");
888
889 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
890
891 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
892 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
893 fec_req = "RS-FEC";
894 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
895 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
896 fec_req = "FC-FEC/BASE-R";
897 else
898 fec_req = "NONE";
899
900 kfree(caps);
901
902 done:
903 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
904 speed, fec_req, fec, an_advertised, an, fc);
905 ice_print_topo_conflict(vsi);
906 }
907
908 /**
909 * ice_vsi_link_event - update the VSI's netdev
910 * @vsi: the VSI on which the link event occurred
911 * @link_up: whether or not the VSI needs to be set up or down
912 */
ice_vsi_link_event(struct ice_vsi * vsi,bool link_up)913 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
914 {
915 if (!vsi)
916 return;
917
918 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
919 return;
920
921 if (vsi->type == ICE_VSI_PF) {
922 if (link_up == netif_carrier_ok(vsi->netdev))
923 return;
924
925 if (link_up) {
926 netif_carrier_on(vsi->netdev);
927 netif_tx_wake_all_queues(vsi->netdev);
928 } else {
929 netif_carrier_off(vsi->netdev);
930 netif_tx_stop_all_queues(vsi->netdev);
931 }
932 }
933 }
934
935 /**
936 * ice_set_dflt_mib - send a default config MIB to the FW
937 * @pf: private PF struct
938 *
939 * This function sends a default configuration MIB to the FW.
940 *
941 * If this function errors out at any point, the driver is still able to
942 * function. The main impact is that LFC may not operate as expected.
943 * Therefore an error state in this function should be treated with a DBG
944 * message and continue on with driver rebuild/reenable.
945 */
ice_set_dflt_mib(struct ice_pf * pf)946 static void ice_set_dflt_mib(struct ice_pf *pf)
947 {
948 struct device *dev = ice_pf_to_dev(pf);
949 u8 mib_type, *buf, *lldpmib = NULL;
950 u16 len, typelen, offset = 0;
951 struct ice_lldp_org_tlv *tlv;
952 struct ice_hw *hw = &pf->hw;
953 u32 ouisubtype;
954
955 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
956 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
957 if (!lldpmib) {
958 dev_dbg(dev, "%s Failed to allocate MIB memory\n",
959 __func__);
960 return;
961 }
962
963 /* Add ETS CFG TLV */
964 tlv = (struct ice_lldp_org_tlv *)lldpmib;
965 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
966 ICE_IEEE_ETS_TLV_LEN);
967 tlv->typelen = htons(typelen);
968 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
969 ICE_IEEE_SUBTYPE_ETS_CFG);
970 tlv->ouisubtype = htonl(ouisubtype);
971
972 buf = tlv->tlvinfo;
973 buf[0] = 0;
974
975 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
976 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
977 * Octets 13 - 20 are TSA values - leave as zeros
978 */
979 buf[5] = 0x64;
980 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
981 offset += len + 2;
982 tlv = (struct ice_lldp_org_tlv *)
983 ((char *)tlv + sizeof(tlv->typelen) + len);
984
985 /* Add ETS REC TLV */
986 buf = tlv->tlvinfo;
987 tlv->typelen = htons(typelen);
988
989 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
990 ICE_IEEE_SUBTYPE_ETS_REC);
991 tlv->ouisubtype = htonl(ouisubtype);
992
993 /* First octet of buf is reserved
994 * Octets 1 - 4 map UP to TC - all UPs map to zero
995 * Octets 5 - 12 are BW values - set TC 0 to 100%.
996 * Octets 13 - 20 are TSA value - leave as zeros
997 */
998 buf[5] = 0x64;
999 offset += len + 2;
1000 tlv = (struct ice_lldp_org_tlv *)
1001 ((char *)tlv + sizeof(tlv->typelen) + len);
1002
1003 /* Add PFC CFG TLV */
1004 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
1005 ICE_IEEE_PFC_TLV_LEN);
1006 tlv->typelen = htons(typelen);
1007
1008 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
1009 ICE_IEEE_SUBTYPE_PFC_CFG);
1010 tlv->ouisubtype = htonl(ouisubtype);
1011
1012 /* Octet 1 left as all zeros - PFC disabled */
1013 buf[0] = 0x08;
1014 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
1015 offset += len + 2;
1016
1017 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
1018 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
1019
1020 kfree(lldpmib);
1021 }
1022
1023 /**
1024 * ice_check_phy_fw_load - check if PHY FW load failed
1025 * @pf: pointer to PF struct
1026 * @link_cfg_err: bitmap from the link info structure
1027 *
1028 * check if external PHY FW load failed and print an error message if it did
1029 */
ice_check_phy_fw_load(struct ice_pf * pf,u8 link_cfg_err)1030 static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
1031 {
1032 if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
1033 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1034 return;
1035 }
1036
1037 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
1038 return;
1039
1040 if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
1041 dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
1042 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
1043 }
1044 }
1045
1046 /**
1047 * ice_check_module_power
1048 * @pf: pointer to PF struct
1049 * @link_cfg_err: bitmap from the link info structure
1050 *
1051 * check module power level returned by a previous call to aq_get_link_info
1052 * and print error messages if module power level is not supported
1053 */
ice_check_module_power(struct ice_pf * pf,u8 link_cfg_err)1054 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
1055 {
1056 /* if module power level is supported, clear the flag */
1057 if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
1058 ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
1059 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1060 return;
1061 }
1062
1063 /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
1064 * above block didn't clear this bit, there's nothing to do
1065 */
1066 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
1067 return;
1068
1069 if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
1070 dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
1071 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1072 } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
1073 dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
1074 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
1075 }
1076 }
1077
1078 /**
1079 * ice_check_link_cfg_err - check if link configuration failed
1080 * @pf: pointer to the PF struct
1081 * @link_cfg_err: bitmap from the link info structure
1082 *
1083 * print if any link configuration failure happens due to the value in the
1084 * link_cfg_err parameter in the link info structure
1085 */
ice_check_link_cfg_err(struct ice_pf * pf,u8 link_cfg_err)1086 static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
1087 {
1088 ice_check_module_power(pf, link_cfg_err);
1089 ice_check_phy_fw_load(pf, link_cfg_err);
1090 }
1091
1092 /**
1093 * ice_link_event - process the link event
1094 * @pf: PF that the link event is associated with
1095 * @pi: port_info for the port that the link event is associated with
1096 * @link_up: true if the physical link is up and false if it is down
1097 * @link_speed: current link speed received from the link event
1098 *
1099 * Returns 0 on success and negative on failure
1100 */
1101 static int
ice_link_event(struct ice_pf * pf,struct ice_port_info * pi,bool link_up,u16 link_speed)1102 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
1103 u16 link_speed)
1104 {
1105 struct device *dev = ice_pf_to_dev(pf);
1106 struct ice_phy_info *phy_info;
1107 struct ice_vsi *vsi;
1108 u16 old_link_speed;
1109 bool old_link;
1110 int status;
1111
1112 phy_info = &pi->phy;
1113 phy_info->link_info_old = phy_info->link_info;
1114
1115 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
1116 old_link_speed = phy_info->link_info_old.link_speed;
1117
1118 /* update the link info structures and re-enable link events,
1119 * don't bail on failure due to other book keeping needed
1120 */
1121 status = ice_update_link_info(pi);
1122 if (status)
1123 dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
1124 pi->lport, status,
1125 ice_aq_str(pi->hw->adminq.sq_last_status));
1126
1127 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
1128
1129 /* Check if the link state is up after updating link info, and treat
1130 * this event as an UP event since the link is actually UP now.
1131 */
1132 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
1133 link_up = true;
1134
1135 vsi = ice_get_main_vsi(pf);
1136 if (!vsi || !vsi->port_info)
1137 return -EINVAL;
1138
1139 /* turn off PHY if media was removed */
1140 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
1141 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
1142 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
1143 ice_set_link(vsi, false);
1144 }
1145
1146 /* if the old link up/down and speed is the same as the new */
1147 if (link_up == old_link && link_speed == old_link_speed)
1148 return 0;
1149
1150 if (!ice_is_e810(&pf->hw))
1151 ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
1152
1153 if (ice_is_dcb_active(pf)) {
1154 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1155 ice_dcb_rebuild(pf);
1156 } else {
1157 if (link_up)
1158 ice_set_dflt_mib(pf);
1159 }
1160 ice_vsi_link_event(vsi, link_up);
1161 ice_print_link_msg(vsi, link_up);
1162
1163 ice_vc_notify_link_state(pf);
1164
1165 return 0;
1166 }
1167
1168 /**
1169 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1170 * @pf: board private structure
1171 */
ice_watchdog_subtask(struct ice_pf * pf)1172 static void ice_watchdog_subtask(struct ice_pf *pf)
1173 {
1174 int i;
1175
1176 /* if interface is down do nothing */
1177 if (test_bit(ICE_DOWN, pf->state) ||
1178 test_bit(ICE_CFG_BUSY, pf->state))
1179 return;
1180
1181 /* make sure we don't do these things too often */
1182 if (time_before(jiffies,
1183 pf->serv_tmr_prev + pf->serv_tmr_period))
1184 return;
1185
1186 pf->serv_tmr_prev = jiffies;
1187
1188 /* Update the stats for active netdevs so the network stack
1189 * can look at updated numbers whenever it cares to
1190 */
1191 ice_update_pf_stats(pf);
1192 ice_for_each_vsi(pf, i)
1193 if (pf->vsi[i] && pf->vsi[i]->netdev)
1194 ice_update_vsi_stats(pf->vsi[i]);
1195 }
1196
1197 /**
1198 * ice_init_link_events - enable/initialize link events
1199 * @pi: pointer to the port_info instance
1200 *
1201 * Returns -EIO on failure, 0 on success
1202 */
ice_init_link_events(struct ice_port_info * pi)1203 static int ice_init_link_events(struct ice_port_info *pi)
1204 {
1205 u16 mask;
1206
1207 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1208 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
1209 ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
1210
1211 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1212 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1213 pi->lport);
1214 return -EIO;
1215 }
1216
1217 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1218 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1219 pi->lport);
1220 return -EIO;
1221 }
1222
1223 return 0;
1224 }
1225
1226 /**
1227 * ice_handle_link_event - handle link event via ARQ
1228 * @pf: PF that the link event is associated with
1229 * @event: event structure containing link status info
1230 */
1231 static int
ice_handle_link_event(struct ice_pf * pf,struct ice_rq_event_info * event)1232 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1233 {
1234 struct ice_aqc_get_link_status_data *link_data;
1235 struct ice_port_info *port_info;
1236 int status;
1237
1238 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1239 port_info = pf->hw.port_info;
1240 if (!port_info)
1241 return -EINVAL;
1242
1243 status = ice_link_event(pf, port_info,
1244 !!(link_data->link_info & ICE_AQ_LINK_UP),
1245 le16_to_cpu(link_data->link_speed));
1246 if (status)
1247 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1248 status);
1249
1250 return status;
1251 }
1252
1253 enum ice_aq_task_state {
1254 ICE_AQ_TASK_WAITING = 0,
1255 ICE_AQ_TASK_COMPLETE,
1256 ICE_AQ_TASK_CANCELED,
1257 };
1258
1259 struct ice_aq_task {
1260 struct hlist_node entry;
1261
1262 u16 opcode;
1263 struct ice_rq_event_info *event;
1264 enum ice_aq_task_state state;
1265 };
1266
1267 /**
1268 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1269 * @pf: pointer to the PF private structure
1270 * @opcode: the opcode to wait for
1271 * @timeout: how long to wait, in jiffies
1272 * @event: storage for the event info
1273 *
1274 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1275 * current thread will be put to sleep until the specified event occurs or
1276 * until the given timeout is reached.
1277 *
1278 * To obtain only the descriptor contents, pass an event without an allocated
1279 * msg_buf. If the complete data buffer is desired, allocate the
1280 * event->msg_buf with enough space ahead of time.
1281 *
1282 * Returns: zero on success, or a negative error code on failure.
1283 */
ice_aq_wait_for_event(struct ice_pf * pf,u16 opcode,unsigned long timeout,struct ice_rq_event_info * event)1284 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1285 struct ice_rq_event_info *event)
1286 {
1287 struct device *dev = ice_pf_to_dev(pf);
1288 struct ice_aq_task *task;
1289 unsigned long start;
1290 long ret;
1291 int err;
1292
1293 task = kzalloc(sizeof(*task), GFP_KERNEL);
1294 if (!task)
1295 return -ENOMEM;
1296
1297 INIT_HLIST_NODE(&task->entry);
1298 task->opcode = opcode;
1299 task->event = event;
1300 task->state = ICE_AQ_TASK_WAITING;
1301
1302 spin_lock_bh(&pf->aq_wait_lock);
1303 hlist_add_head(&task->entry, &pf->aq_wait_list);
1304 spin_unlock_bh(&pf->aq_wait_lock);
1305
1306 start = jiffies;
1307
1308 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1309 timeout);
1310 switch (task->state) {
1311 case ICE_AQ_TASK_WAITING:
1312 err = ret < 0 ? ret : -ETIMEDOUT;
1313 break;
1314 case ICE_AQ_TASK_CANCELED:
1315 err = ret < 0 ? ret : -ECANCELED;
1316 break;
1317 case ICE_AQ_TASK_COMPLETE:
1318 err = ret < 0 ? ret : 0;
1319 break;
1320 default:
1321 WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1322 err = -EINVAL;
1323 break;
1324 }
1325
1326 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1327 jiffies_to_msecs(jiffies - start),
1328 jiffies_to_msecs(timeout),
1329 opcode);
1330
1331 spin_lock_bh(&pf->aq_wait_lock);
1332 hlist_del(&task->entry);
1333 spin_unlock_bh(&pf->aq_wait_lock);
1334 kfree(task);
1335
1336 return err;
1337 }
1338
1339 /**
1340 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1341 * @pf: pointer to the PF private structure
1342 * @opcode: the opcode of the event
1343 * @event: the event to check
1344 *
1345 * Loops over the current list of pending threads waiting for an AdminQ event.
1346 * For each matching task, copy the contents of the event into the task
1347 * structure and wake up the thread.
1348 *
1349 * If multiple threads wait for the same opcode, they will all be woken up.
1350 *
1351 * Note that event->msg_buf will only be duplicated if the event has a buffer
1352 * with enough space already allocated. Otherwise, only the descriptor and
1353 * message length will be copied.
1354 *
1355 * Returns: true if an event was found, false otherwise
1356 */
ice_aq_check_events(struct ice_pf * pf,u16 opcode,struct ice_rq_event_info * event)1357 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1358 struct ice_rq_event_info *event)
1359 {
1360 struct ice_rq_event_info *task_ev;
1361 struct ice_aq_task *task;
1362 bool found = false;
1363
1364 spin_lock_bh(&pf->aq_wait_lock);
1365 hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1366 if (task->state || task->opcode != opcode)
1367 continue;
1368
1369 task_ev = task->event;
1370 memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
1371 task_ev->msg_len = event->msg_len;
1372
1373 /* Only copy the data buffer if a destination was set */
1374 if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
1375 memcpy(task_ev->msg_buf, event->msg_buf,
1376 event->buf_len);
1377 task_ev->buf_len = event->buf_len;
1378 }
1379
1380 task->state = ICE_AQ_TASK_COMPLETE;
1381 found = true;
1382 }
1383 spin_unlock_bh(&pf->aq_wait_lock);
1384
1385 if (found)
1386 wake_up(&pf->aq_wait_queue);
1387 }
1388
1389 /**
1390 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1391 * @pf: the PF private structure
1392 *
1393 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1394 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1395 */
ice_aq_cancel_waiting_tasks(struct ice_pf * pf)1396 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1397 {
1398 struct ice_aq_task *task;
1399
1400 spin_lock_bh(&pf->aq_wait_lock);
1401 hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1402 task->state = ICE_AQ_TASK_CANCELED;
1403 spin_unlock_bh(&pf->aq_wait_lock);
1404
1405 wake_up(&pf->aq_wait_queue);
1406 }
1407
1408 /**
1409 * __ice_clean_ctrlq - helper function to clean controlq rings
1410 * @pf: ptr to struct ice_pf
1411 * @q_type: specific Control queue type
1412 */
__ice_clean_ctrlq(struct ice_pf * pf,enum ice_ctl_q q_type)1413 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1414 {
1415 struct device *dev = ice_pf_to_dev(pf);
1416 struct ice_rq_event_info event;
1417 struct ice_hw *hw = &pf->hw;
1418 struct ice_ctl_q_info *cq;
1419 u16 pending, i = 0;
1420 const char *qtype;
1421 u32 oldval, val;
1422
1423 /* Do not clean control queue if/when PF reset fails */
1424 if (test_bit(ICE_RESET_FAILED, pf->state))
1425 return 0;
1426
1427 switch (q_type) {
1428 case ICE_CTL_Q_ADMIN:
1429 cq = &hw->adminq;
1430 qtype = "Admin";
1431 break;
1432 case ICE_CTL_Q_SB:
1433 cq = &hw->sbq;
1434 qtype = "Sideband";
1435 break;
1436 case ICE_CTL_Q_MAILBOX:
1437 cq = &hw->mailboxq;
1438 qtype = "Mailbox";
1439 /* we are going to try to detect a malicious VF, so set the
1440 * state to begin detection
1441 */
1442 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1443 break;
1444 default:
1445 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1446 return 0;
1447 }
1448
1449 /* check for error indications - PF_xx_AxQLEN register layout for
1450 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1451 */
1452 val = rd32(hw, cq->rq.len);
1453 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1454 PF_FW_ARQLEN_ARQCRIT_M)) {
1455 oldval = val;
1456 if (val & PF_FW_ARQLEN_ARQVFE_M)
1457 dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1458 qtype);
1459 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1460 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1461 qtype);
1462 }
1463 if (val & PF_FW_ARQLEN_ARQCRIT_M)
1464 dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1465 qtype);
1466 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1467 PF_FW_ARQLEN_ARQCRIT_M);
1468 if (oldval != val)
1469 wr32(hw, cq->rq.len, val);
1470 }
1471
1472 val = rd32(hw, cq->sq.len);
1473 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1474 PF_FW_ATQLEN_ATQCRIT_M)) {
1475 oldval = val;
1476 if (val & PF_FW_ATQLEN_ATQVFE_M)
1477 dev_dbg(dev, "%s Send Queue VF Error detected\n",
1478 qtype);
1479 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1480 dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1481 qtype);
1482 }
1483 if (val & PF_FW_ATQLEN_ATQCRIT_M)
1484 dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1485 qtype);
1486 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1487 PF_FW_ATQLEN_ATQCRIT_M);
1488 if (oldval != val)
1489 wr32(hw, cq->sq.len, val);
1490 }
1491
1492 event.buf_len = cq->rq_buf_size;
1493 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1494 if (!event.msg_buf)
1495 return 0;
1496
1497 do {
1498 u16 opcode;
1499 int ret;
1500
1501 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1502 if (ret == -EALREADY)
1503 break;
1504 if (ret) {
1505 dev_err(dev, "%s Receive Queue event error %d\n", qtype,
1506 ret);
1507 break;
1508 }
1509
1510 opcode = le16_to_cpu(event.desc.opcode);
1511
1512 /* Notify any thread that might be waiting for this event */
1513 ice_aq_check_events(pf, opcode, &event);
1514
1515 switch (opcode) {
1516 case ice_aqc_opc_get_link_status:
1517 if (ice_handle_link_event(pf, &event))
1518 dev_err(dev, "Could not handle link event\n");
1519 break;
1520 case ice_aqc_opc_event_lan_overflow:
1521 ice_vf_lan_overflow_event(pf, &event);
1522 break;
1523 case ice_mbx_opc_send_msg_to_pf:
1524 if (!ice_is_malicious_vf(pf, &event, i, pending))
1525 ice_vc_process_vf_msg(pf, &event);
1526 break;
1527 case ice_aqc_opc_fw_logging:
1528 ice_output_fw_log(hw, &event.desc, event.msg_buf);
1529 break;
1530 case ice_aqc_opc_lldp_set_mib_change:
1531 ice_dcb_process_lldp_set_mib_change(pf, &event);
1532 break;
1533 default:
1534 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1535 qtype, opcode);
1536 break;
1537 }
1538 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1539
1540 kfree(event.msg_buf);
1541
1542 return pending && (i == ICE_DFLT_IRQ_WORK);
1543 }
1544
1545 /**
1546 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1547 * @hw: pointer to hardware info
1548 * @cq: control queue information
1549 *
1550 * returns true if there are pending messages in a queue, false if there aren't
1551 */
ice_ctrlq_pending(struct ice_hw * hw,struct ice_ctl_q_info * cq)1552 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1553 {
1554 u16 ntu;
1555
1556 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1557 return cq->rq.next_to_clean != ntu;
1558 }
1559
1560 /**
1561 * ice_clean_adminq_subtask - clean the AdminQ rings
1562 * @pf: board private structure
1563 */
ice_clean_adminq_subtask(struct ice_pf * pf)1564 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1565 {
1566 struct ice_hw *hw = &pf->hw;
1567
1568 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1569 return;
1570
1571 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1572 return;
1573
1574 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1575
1576 /* There might be a situation where new messages arrive to a control
1577 * queue between processing the last message and clearing the
1578 * EVENT_PENDING bit. So before exiting, check queue head again (using
1579 * ice_ctrlq_pending) and process new messages if any.
1580 */
1581 if (ice_ctrlq_pending(hw, &hw->adminq))
1582 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1583
1584 ice_flush(hw);
1585 }
1586
1587 /**
1588 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1589 * @pf: board private structure
1590 */
ice_clean_mailboxq_subtask(struct ice_pf * pf)1591 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1592 {
1593 struct ice_hw *hw = &pf->hw;
1594
1595 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1596 return;
1597
1598 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1599 return;
1600
1601 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1602
1603 if (ice_ctrlq_pending(hw, &hw->mailboxq))
1604 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1605
1606 ice_flush(hw);
1607 }
1608
1609 /**
1610 * ice_clean_sbq_subtask - clean the Sideband Queue rings
1611 * @pf: board private structure
1612 */
ice_clean_sbq_subtask(struct ice_pf * pf)1613 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1614 {
1615 struct ice_hw *hw = &pf->hw;
1616
1617 /* Nothing to do here if sideband queue is not supported */
1618 if (!ice_is_sbq_supported(hw)) {
1619 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1620 return;
1621 }
1622
1623 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1624 return;
1625
1626 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1627 return;
1628
1629 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1630
1631 if (ice_ctrlq_pending(hw, &hw->sbq))
1632 __ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1633
1634 ice_flush(hw);
1635 }
1636
1637 /**
1638 * ice_service_task_schedule - schedule the service task to wake up
1639 * @pf: board private structure
1640 *
1641 * If not already scheduled, this puts the task into the work queue.
1642 */
ice_service_task_schedule(struct ice_pf * pf)1643 void ice_service_task_schedule(struct ice_pf *pf)
1644 {
1645 if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1646 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1647 !test_bit(ICE_NEEDS_RESTART, pf->state))
1648 queue_work(ice_wq, &pf->serv_task);
1649 }
1650
1651 /**
1652 * ice_service_task_complete - finish up the service task
1653 * @pf: board private structure
1654 */
ice_service_task_complete(struct ice_pf * pf)1655 static void ice_service_task_complete(struct ice_pf *pf)
1656 {
1657 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1658
1659 /* force memory (pf->state) to sync before next service task */
1660 smp_mb__before_atomic();
1661 clear_bit(ICE_SERVICE_SCHED, pf->state);
1662 }
1663
1664 /**
1665 * ice_service_task_stop - stop service task and cancel works
1666 * @pf: board private structure
1667 *
1668 * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1669 * 1 otherwise.
1670 */
ice_service_task_stop(struct ice_pf * pf)1671 static int ice_service_task_stop(struct ice_pf *pf)
1672 {
1673 int ret;
1674
1675 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1676
1677 if (pf->serv_tmr.function)
1678 del_timer_sync(&pf->serv_tmr);
1679 if (pf->serv_task.func)
1680 cancel_work_sync(&pf->serv_task);
1681
1682 clear_bit(ICE_SERVICE_SCHED, pf->state);
1683 return ret;
1684 }
1685
1686 /**
1687 * ice_service_task_restart - restart service task and schedule works
1688 * @pf: board private structure
1689 *
1690 * This function is needed for suspend and resume works (e.g WoL scenario)
1691 */
ice_service_task_restart(struct ice_pf * pf)1692 static void ice_service_task_restart(struct ice_pf *pf)
1693 {
1694 clear_bit(ICE_SERVICE_DIS, pf->state);
1695 ice_service_task_schedule(pf);
1696 }
1697
1698 /**
1699 * ice_service_timer - timer callback to schedule service task
1700 * @t: pointer to timer_list
1701 */
ice_service_timer(struct timer_list * t)1702 static void ice_service_timer(struct timer_list *t)
1703 {
1704 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1705
1706 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1707 ice_service_task_schedule(pf);
1708 }
1709
1710 /**
1711 * ice_handle_mdd_event - handle malicious driver detect event
1712 * @pf: pointer to the PF structure
1713 *
1714 * Called from service task. OICR interrupt handler indicates MDD event.
1715 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1716 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1717 * disable the queue, the PF can be configured to reset the VF using ethtool
1718 * private flag mdd-auto-reset-vf.
1719 */
ice_handle_mdd_event(struct ice_pf * pf)1720 static void ice_handle_mdd_event(struct ice_pf *pf)
1721 {
1722 struct device *dev = ice_pf_to_dev(pf);
1723 struct ice_hw *hw = &pf->hw;
1724 struct ice_vf *vf;
1725 unsigned int bkt;
1726 u32 reg;
1727
1728 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1729 /* Since the VF MDD event logging is rate limited, check if
1730 * there are pending MDD events.
1731 */
1732 ice_print_vfs_mdd_events(pf);
1733 return;
1734 }
1735
1736 /* find what triggered an MDD event */
1737 reg = rd32(hw, GL_MDET_TX_PQM);
1738 if (reg & GL_MDET_TX_PQM_VALID_M) {
1739 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1740 GL_MDET_TX_PQM_PF_NUM_S;
1741 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1742 GL_MDET_TX_PQM_VF_NUM_S;
1743 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1744 GL_MDET_TX_PQM_MAL_TYPE_S;
1745 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1746 GL_MDET_TX_PQM_QNUM_S);
1747
1748 if (netif_msg_tx_err(pf))
1749 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1750 event, queue, pf_num, vf_num);
1751 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1752 }
1753
1754 reg = rd32(hw, GL_MDET_TX_TCLAN);
1755 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1756 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1757 GL_MDET_TX_TCLAN_PF_NUM_S;
1758 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1759 GL_MDET_TX_TCLAN_VF_NUM_S;
1760 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1761 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1762 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1763 GL_MDET_TX_TCLAN_QNUM_S);
1764
1765 if (netif_msg_tx_err(pf))
1766 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1767 event, queue, pf_num, vf_num);
1768 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1769 }
1770
1771 reg = rd32(hw, GL_MDET_RX);
1772 if (reg & GL_MDET_RX_VALID_M) {
1773 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1774 GL_MDET_RX_PF_NUM_S;
1775 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1776 GL_MDET_RX_VF_NUM_S;
1777 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1778 GL_MDET_RX_MAL_TYPE_S;
1779 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1780 GL_MDET_RX_QNUM_S);
1781
1782 if (netif_msg_rx_err(pf))
1783 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1784 event, queue, pf_num, vf_num);
1785 wr32(hw, GL_MDET_RX, 0xffffffff);
1786 }
1787
1788 /* check to see if this PF caused an MDD event */
1789 reg = rd32(hw, PF_MDET_TX_PQM);
1790 if (reg & PF_MDET_TX_PQM_VALID_M) {
1791 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1792 if (netif_msg_tx_err(pf))
1793 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1794 }
1795
1796 reg = rd32(hw, PF_MDET_TX_TCLAN);
1797 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1798 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1799 if (netif_msg_tx_err(pf))
1800 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1801 }
1802
1803 reg = rd32(hw, PF_MDET_RX);
1804 if (reg & PF_MDET_RX_VALID_M) {
1805 wr32(hw, PF_MDET_RX, 0xFFFF);
1806 if (netif_msg_rx_err(pf))
1807 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1808 }
1809
1810 /* Check to see if one of the VFs caused an MDD event, and then
1811 * increment counters and set print pending
1812 */
1813 mutex_lock(&pf->vfs.table_lock);
1814 ice_for_each_vf(pf, bkt, vf) {
1815 reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
1816 if (reg & VP_MDET_TX_PQM_VALID_M) {
1817 wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
1818 vf->mdd_tx_events.count++;
1819 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1820 if (netif_msg_tx_err(pf))
1821 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1822 vf->vf_id);
1823 }
1824
1825 reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
1826 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1827 wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
1828 vf->mdd_tx_events.count++;
1829 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1830 if (netif_msg_tx_err(pf))
1831 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1832 vf->vf_id);
1833 }
1834
1835 reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
1836 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1837 wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
1838 vf->mdd_tx_events.count++;
1839 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1840 if (netif_msg_tx_err(pf))
1841 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1842 vf->vf_id);
1843 }
1844
1845 reg = rd32(hw, VP_MDET_RX(vf->vf_id));
1846 if (reg & VP_MDET_RX_VALID_M) {
1847 wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
1848 vf->mdd_rx_events.count++;
1849 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1850 if (netif_msg_rx_err(pf))
1851 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1852 vf->vf_id);
1853
1854 /* Since the queue is disabled on VF Rx MDD events, the
1855 * PF can be configured to reset the VF through ethtool
1856 * private flag mdd-auto-reset-vf.
1857 */
1858 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1859 /* VF MDD event counters will be cleared by
1860 * reset, so print the event prior to reset.
1861 */
1862 ice_print_vf_rx_mdd_event(vf);
1863 ice_reset_vf(vf, ICE_VF_RESET_LOCK);
1864 }
1865 }
1866 }
1867 mutex_unlock(&pf->vfs.table_lock);
1868
1869 ice_print_vfs_mdd_events(pf);
1870 }
1871
1872 /**
1873 * ice_force_phys_link_state - Force the physical link state
1874 * @vsi: VSI to force the physical link state to up/down
1875 * @link_up: true/false indicates to set the physical link to up/down
1876 *
1877 * Force the physical link state by getting the current PHY capabilities from
1878 * hardware and setting the PHY config based on the determined capabilities. If
1879 * link changes a link event will be triggered because both the Enable Automatic
1880 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1881 *
1882 * Returns 0 on success, negative on failure
1883 */
ice_force_phys_link_state(struct ice_vsi * vsi,bool link_up)1884 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1885 {
1886 struct ice_aqc_get_phy_caps_data *pcaps;
1887 struct ice_aqc_set_phy_cfg_data *cfg;
1888 struct ice_port_info *pi;
1889 struct device *dev;
1890 int retcode;
1891
1892 if (!vsi || !vsi->port_info || !vsi->back)
1893 return -EINVAL;
1894 if (vsi->type != ICE_VSI_PF)
1895 return 0;
1896
1897 dev = ice_pf_to_dev(vsi->back);
1898
1899 pi = vsi->port_info;
1900
1901 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1902 if (!pcaps)
1903 return -ENOMEM;
1904
1905 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1906 NULL);
1907 if (retcode) {
1908 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1909 vsi->vsi_num, retcode);
1910 retcode = -EIO;
1911 goto out;
1912 }
1913
1914 /* No change in link */
1915 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1916 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1917 goto out;
1918
1919 /* Use the current user PHY configuration. The current user PHY
1920 * configuration is initialized during probe from PHY capabilities
1921 * software mode, and updated on set PHY configuration.
1922 */
1923 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1924 if (!cfg) {
1925 retcode = -ENOMEM;
1926 goto out;
1927 }
1928
1929 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1930 if (link_up)
1931 cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1932 else
1933 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1934
1935 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1936 if (retcode) {
1937 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1938 vsi->vsi_num, retcode);
1939 retcode = -EIO;
1940 }
1941
1942 kfree(cfg);
1943 out:
1944 kfree(pcaps);
1945 return retcode;
1946 }
1947
1948 /**
1949 * ice_init_nvm_phy_type - Initialize the NVM PHY type
1950 * @pi: port info structure
1951 *
1952 * Initialize nvm_phy_type_[low|high] for link lenient mode support
1953 */
ice_init_nvm_phy_type(struct ice_port_info * pi)1954 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1955 {
1956 struct ice_aqc_get_phy_caps_data *pcaps;
1957 struct ice_pf *pf = pi->hw->back;
1958 int err;
1959
1960 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1961 if (!pcaps)
1962 return -ENOMEM;
1963
1964 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
1965 pcaps, NULL);
1966
1967 if (err) {
1968 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1969 goto out;
1970 }
1971
1972 pf->nvm_phy_type_hi = pcaps->phy_type_high;
1973 pf->nvm_phy_type_lo = pcaps->phy_type_low;
1974
1975 out:
1976 kfree(pcaps);
1977 return err;
1978 }
1979
1980 /**
1981 * ice_init_link_dflt_override - Initialize link default override
1982 * @pi: port info structure
1983 *
1984 * Initialize link default override and PHY total port shutdown during probe
1985 */
ice_init_link_dflt_override(struct ice_port_info * pi)1986 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1987 {
1988 struct ice_link_default_override_tlv *ldo;
1989 struct ice_pf *pf = pi->hw->back;
1990
1991 ldo = &pf->link_dflt_override;
1992 if (ice_get_link_default_override(ldo, pi))
1993 return;
1994
1995 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1996 return;
1997
1998 /* Enable Total Port Shutdown (override/replace link-down-on-close
1999 * ethtool private flag) for ports with Port Disable bit set.
2000 */
2001 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
2002 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
2003 }
2004
2005 /**
2006 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
2007 * @pi: port info structure
2008 *
2009 * If default override is enabled, initialize the user PHY cfg speed and FEC
2010 * settings using the default override mask from the NVM.
2011 *
2012 * The PHY should only be configured with the default override settings the
2013 * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
2014 * is used to indicate that the user PHY cfg default override is initialized
2015 * and the PHY has not been configured with the default override settings. The
2016 * state is set here, and cleared in ice_configure_phy the first time the PHY is
2017 * configured.
2018 *
2019 * This function should be called only if the FW doesn't support default
2020 * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
2021 */
ice_init_phy_cfg_dflt_override(struct ice_port_info * pi)2022 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
2023 {
2024 struct ice_link_default_override_tlv *ldo;
2025 struct ice_aqc_set_phy_cfg_data *cfg;
2026 struct ice_phy_info *phy = &pi->phy;
2027 struct ice_pf *pf = pi->hw->back;
2028
2029 ldo = &pf->link_dflt_override;
2030
2031 /* If link default override is enabled, use to mask NVM PHY capabilities
2032 * for speed and FEC default configuration.
2033 */
2034 cfg = &phy->curr_user_phy_cfg;
2035
2036 if (ldo->phy_type_low || ldo->phy_type_high) {
2037 cfg->phy_type_low = pf->nvm_phy_type_lo &
2038 cpu_to_le64(ldo->phy_type_low);
2039 cfg->phy_type_high = pf->nvm_phy_type_hi &
2040 cpu_to_le64(ldo->phy_type_high);
2041 }
2042 cfg->link_fec_opt = ldo->fec_options;
2043 phy->curr_user_fec_req = ICE_FEC_AUTO;
2044
2045 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
2046 }
2047
2048 /**
2049 * ice_init_phy_user_cfg - Initialize the PHY user configuration
2050 * @pi: port info structure
2051 *
2052 * Initialize the current user PHY configuration, speed, FEC, and FC requested
2053 * mode to default. The PHY defaults are from get PHY capabilities topology
2054 * with media so call when media is first available. An error is returned if
2055 * called when media is not available. The PHY initialization completed state is
2056 * set here.
2057 *
2058 * These configurations are used when setting PHY
2059 * configuration. The user PHY configuration is updated on set PHY
2060 * configuration. Returns 0 on success, negative on failure
2061 */
ice_init_phy_user_cfg(struct ice_port_info * pi)2062 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
2063 {
2064 struct ice_aqc_get_phy_caps_data *pcaps;
2065 struct ice_phy_info *phy = &pi->phy;
2066 struct ice_pf *pf = pi->hw->back;
2067 int err;
2068
2069 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2070 return -EIO;
2071
2072 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2073 if (!pcaps)
2074 return -ENOMEM;
2075
2076 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2077 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2078 pcaps, NULL);
2079 else
2080 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2081 pcaps, NULL);
2082 if (err) {
2083 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
2084 goto err_out;
2085 }
2086
2087 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
2088
2089 /* check if lenient mode is supported and enabled */
2090 if (ice_fw_supports_link_override(pi->hw) &&
2091 !(pcaps->module_compliance_enforcement &
2092 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
2093 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
2094
2095 /* if the FW supports default PHY configuration mode, then the driver
2096 * does not have to apply link override settings. If not,
2097 * initialize user PHY configuration with link override values
2098 */
2099 if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
2100 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
2101 ice_init_phy_cfg_dflt_override(pi);
2102 goto out;
2103 }
2104 }
2105
2106 /* if link default override is not enabled, set user flow control and
2107 * FEC settings based on what get_phy_caps returned
2108 */
2109 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
2110 pcaps->link_fec_options);
2111 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
2112
2113 out:
2114 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
2115 set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
2116 err_out:
2117 kfree(pcaps);
2118 return err;
2119 }
2120
2121 /**
2122 * ice_configure_phy - configure PHY
2123 * @vsi: VSI of PHY
2124 *
2125 * Set the PHY configuration. If the current PHY configuration is the same as
2126 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
2127 * configure the based get PHY capabilities for topology with media.
2128 */
ice_configure_phy(struct ice_vsi * vsi)2129 static int ice_configure_phy(struct ice_vsi *vsi)
2130 {
2131 struct device *dev = ice_pf_to_dev(vsi->back);
2132 struct ice_port_info *pi = vsi->port_info;
2133 struct ice_aqc_get_phy_caps_data *pcaps;
2134 struct ice_aqc_set_phy_cfg_data *cfg;
2135 struct ice_phy_info *phy = &pi->phy;
2136 struct ice_pf *pf = vsi->back;
2137 int err;
2138
2139 /* Ensure we have media as we cannot configure a medialess port */
2140 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
2141 return -ENOMEDIUM;
2142
2143 ice_print_topo_conflict(vsi);
2144
2145 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
2146 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
2147 return -EPERM;
2148
2149 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
2150 return ice_force_phys_link_state(vsi, true);
2151
2152 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
2153 if (!pcaps)
2154 return -ENOMEM;
2155
2156 /* Get current PHY config */
2157 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
2158 NULL);
2159 if (err) {
2160 dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
2161 vsi->vsi_num, err);
2162 goto done;
2163 }
2164
2165 /* If PHY enable link is configured and configuration has not changed,
2166 * there's nothing to do
2167 */
2168 if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
2169 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
2170 goto done;
2171
2172 /* Use PHY topology as baseline for configuration */
2173 memset(pcaps, 0, sizeof(*pcaps));
2174 if (ice_fw_supports_report_dflt_cfg(pi->hw))
2175 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2176 pcaps, NULL);
2177 else
2178 err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2179 pcaps, NULL);
2180 if (err) {
2181 dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
2182 vsi->vsi_num, err);
2183 goto done;
2184 }
2185
2186 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2187 if (!cfg) {
2188 err = -ENOMEM;
2189 goto done;
2190 }
2191
2192 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2193
2194 /* Speed - If default override pending, use curr_user_phy_cfg set in
2195 * ice_init_phy_user_cfg_ldo.
2196 */
2197 if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2198 vsi->back->state)) {
2199 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2200 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2201 } else {
2202 u64 phy_low = 0, phy_high = 0;
2203
2204 ice_update_phy_type(&phy_low, &phy_high,
2205 pi->phy.curr_user_speed_req);
2206 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2207 cfg->phy_type_high = pcaps->phy_type_high &
2208 cpu_to_le64(phy_high);
2209 }
2210
2211 /* Can't provide what was requested; use PHY capabilities */
2212 if (!cfg->phy_type_low && !cfg->phy_type_high) {
2213 cfg->phy_type_low = pcaps->phy_type_low;
2214 cfg->phy_type_high = pcaps->phy_type_high;
2215 }
2216
2217 /* FEC */
2218 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2219
2220 /* Can't provide what was requested; use PHY capabilities */
2221 if (cfg->link_fec_opt !=
2222 (cfg->link_fec_opt & pcaps->link_fec_options)) {
2223 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2224 cfg->link_fec_opt = pcaps->link_fec_options;
2225 }
2226
2227 /* Flow Control - always supported; no need to check against
2228 * capabilities
2229 */
2230 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2231
2232 /* Enable link and link update */
2233 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2234
2235 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2236 if (err)
2237 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
2238 vsi->vsi_num, err);
2239
2240 kfree(cfg);
2241 done:
2242 kfree(pcaps);
2243 return err;
2244 }
2245
2246 /**
2247 * ice_check_media_subtask - Check for media
2248 * @pf: pointer to PF struct
2249 *
2250 * If media is available, then initialize PHY user configuration if it is not
2251 * been, and configure the PHY if the interface is up.
2252 */
ice_check_media_subtask(struct ice_pf * pf)2253 static void ice_check_media_subtask(struct ice_pf *pf)
2254 {
2255 struct ice_port_info *pi;
2256 struct ice_vsi *vsi;
2257 int err;
2258
2259 /* No need to check for media if it's already present */
2260 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2261 return;
2262
2263 vsi = ice_get_main_vsi(pf);
2264 if (!vsi)
2265 return;
2266
2267 /* Refresh link info and check if media is present */
2268 pi = vsi->port_info;
2269 err = ice_update_link_info(pi);
2270 if (err)
2271 return;
2272
2273 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
2274
2275 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2276 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2277 ice_init_phy_user_cfg(pi);
2278
2279 /* PHY settings are reset on media insertion, reconfigure
2280 * PHY to preserve settings.
2281 */
2282 if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2283 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2284 return;
2285
2286 err = ice_configure_phy(vsi);
2287 if (!err)
2288 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2289
2290 /* A Link Status Event will be generated; the event handler
2291 * will complete bringing the interface up
2292 */
2293 }
2294 }
2295
2296 /**
2297 * ice_service_task - manage and run subtasks
2298 * @work: pointer to work_struct contained by the PF struct
2299 */
ice_service_task(struct work_struct * work)2300 static void ice_service_task(struct work_struct *work)
2301 {
2302 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2303 unsigned long start_time = jiffies;
2304
2305 /* subtasks */
2306
2307 /* process reset requests first */
2308 ice_reset_subtask(pf);
2309
2310 /* bail if a reset/recovery cycle is pending or rebuild failed */
2311 if (ice_is_reset_in_progress(pf->state) ||
2312 test_bit(ICE_SUSPENDED, pf->state) ||
2313 test_bit(ICE_NEEDS_RESTART, pf->state)) {
2314 ice_service_task_complete(pf);
2315 return;
2316 }
2317
2318 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2319 struct iidc_event *event;
2320
2321 event = kzalloc(sizeof(*event), GFP_KERNEL);
2322 if (event) {
2323 set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2324 /* report the entire OICR value to AUX driver */
2325 swap(event->reg, pf->oicr_err_reg);
2326 ice_send_event_to_aux(pf, event);
2327 kfree(event);
2328 }
2329 }
2330
2331 /* unplug aux dev per request, if an unplug request came in
2332 * while processing a plug request, this will handle it
2333 */
2334 if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
2335 ice_unplug_aux_dev(pf);
2336
2337 /* Plug aux device per request */
2338 if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2339 ice_plug_aux_dev(pf);
2340
2341 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2342 struct iidc_event *event;
2343
2344 event = kzalloc(sizeof(*event), GFP_KERNEL);
2345 if (event) {
2346 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
2347 ice_send_event_to_aux(pf, event);
2348 kfree(event);
2349 }
2350 }
2351
2352 ice_clean_adminq_subtask(pf);
2353 ice_check_media_subtask(pf);
2354 ice_check_for_hang_subtask(pf);
2355 ice_sync_fltr_subtask(pf);
2356 ice_handle_mdd_event(pf);
2357 ice_watchdog_subtask(pf);
2358
2359 if (ice_is_safe_mode(pf)) {
2360 ice_service_task_complete(pf);
2361 return;
2362 }
2363
2364 ice_process_vflr_event(pf);
2365 ice_clean_mailboxq_subtask(pf);
2366 ice_clean_sbq_subtask(pf);
2367 ice_sync_arfs_fltrs(pf);
2368 ice_flush_fdir_ctx(pf);
2369
2370 /* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2371 ice_service_task_complete(pf);
2372
2373 /* If the tasks have taken longer than one service timer period
2374 * or there is more work to be done, reset the service timer to
2375 * schedule the service task now.
2376 */
2377 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2378 test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2379 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2380 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2381 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2382 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2383 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2384 mod_timer(&pf->serv_tmr, jiffies);
2385 }
2386
2387 /**
2388 * ice_set_ctrlq_len - helper function to set controlq length
2389 * @hw: pointer to the HW instance
2390 */
ice_set_ctrlq_len(struct ice_hw * hw)2391 static void ice_set_ctrlq_len(struct ice_hw *hw)
2392 {
2393 hw->adminq.num_rq_entries = ICE_AQ_LEN;
2394 hw->adminq.num_sq_entries = ICE_AQ_LEN;
2395 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2396 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2397 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2398 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2399 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2400 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2401 hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2402 hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2403 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2404 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2405 }
2406
2407 /**
2408 * ice_schedule_reset - schedule a reset
2409 * @pf: board private structure
2410 * @reset: reset being requested
2411 */
ice_schedule_reset(struct ice_pf * pf,enum ice_reset_req reset)2412 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2413 {
2414 struct device *dev = ice_pf_to_dev(pf);
2415
2416 /* bail out if earlier reset has failed */
2417 if (test_bit(ICE_RESET_FAILED, pf->state)) {
2418 dev_dbg(dev, "earlier reset has failed\n");
2419 return -EIO;
2420 }
2421 /* bail if reset/recovery already in progress */
2422 if (ice_is_reset_in_progress(pf->state)) {
2423 dev_dbg(dev, "Reset already in progress\n");
2424 return -EBUSY;
2425 }
2426
2427 switch (reset) {
2428 case ICE_RESET_PFR:
2429 set_bit(ICE_PFR_REQ, pf->state);
2430 break;
2431 case ICE_RESET_CORER:
2432 set_bit(ICE_CORER_REQ, pf->state);
2433 break;
2434 case ICE_RESET_GLOBR:
2435 set_bit(ICE_GLOBR_REQ, pf->state);
2436 break;
2437 default:
2438 return -EINVAL;
2439 }
2440
2441 ice_service_task_schedule(pf);
2442 return 0;
2443 }
2444
2445 /**
2446 * ice_irq_affinity_notify - Callback for affinity changes
2447 * @notify: context as to what irq was changed
2448 * @mask: the new affinity mask
2449 *
2450 * This is a callback function used by the irq_set_affinity_notifier function
2451 * so that we may register to receive changes to the irq affinity masks.
2452 */
2453 static void
ice_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)2454 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2455 const cpumask_t *mask)
2456 {
2457 struct ice_q_vector *q_vector =
2458 container_of(notify, struct ice_q_vector, affinity_notify);
2459
2460 cpumask_copy(&q_vector->affinity_mask, mask);
2461 }
2462
2463 /**
2464 * ice_irq_affinity_release - Callback for affinity notifier release
2465 * @ref: internal core kernel usage
2466 *
2467 * This is a callback function used by the irq_set_affinity_notifier function
2468 * to inform the current notification subscriber that they will no longer
2469 * receive notifications.
2470 */
ice_irq_affinity_release(struct kref __always_unused * ref)2471 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2472
2473 /**
2474 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2475 * @vsi: the VSI being configured
2476 */
ice_vsi_ena_irq(struct ice_vsi * vsi)2477 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2478 {
2479 struct ice_hw *hw = &vsi->back->hw;
2480 int i;
2481
2482 ice_for_each_q_vector(vsi, i)
2483 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2484
2485 ice_flush(hw);
2486 return 0;
2487 }
2488
2489 /**
2490 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2491 * @vsi: the VSI being configured
2492 * @basename: name for the vector
2493 */
ice_vsi_req_irq_msix(struct ice_vsi * vsi,char * basename)2494 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2495 {
2496 int q_vectors = vsi->num_q_vectors;
2497 struct ice_pf *pf = vsi->back;
2498 int base = vsi->base_vector;
2499 struct device *dev;
2500 int rx_int_idx = 0;
2501 int tx_int_idx = 0;
2502 int vector, err;
2503 int irq_num;
2504
2505 dev = ice_pf_to_dev(pf);
2506 for (vector = 0; vector < q_vectors; vector++) {
2507 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2508
2509 irq_num = pf->msix_entries[base + vector].vector;
2510
2511 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
2512 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2513 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2514 tx_int_idx++;
2515 } else if (q_vector->rx.rx_ring) {
2516 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2517 "%s-%s-%d", basename, "rx", rx_int_idx++);
2518 } else if (q_vector->tx.tx_ring) {
2519 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2520 "%s-%s-%d", basename, "tx", tx_int_idx++);
2521 } else {
2522 /* skip this unused q_vector */
2523 continue;
2524 }
2525 if (vsi->type == ICE_VSI_CTRL && vsi->vf)
2526 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2527 IRQF_SHARED, q_vector->name,
2528 q_vector);
2529 else
2530 err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2531 0, q_vector->name, q_vector);
2532 if (err) {
2533 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2534 err);
2535 goto free_q_irqs;
2536 }
2537
2538 /* register for affinity change notifications */
2539 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2540 struct irq_affinity_notify *affinity_notify;
2541
2542 affinity_notify = &q_vector->affinity_notify;
2543 affinity_notify->notify = ice_irq_affinity_notify;
2544 affinity_notify->release = ice_irq_affinity_release;
2545 irq_set_affinity_notifier(irq_num, affinity_notify);
2546 }
2547
2548 /* assign the mask for this irq */
2549 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2550 }
2551
2552 err = ice_set_cpu_rx_rmap(vsi);
2553 if (err) {
2554 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
2555 vsi->vsi_num, ERR_PTR(err));
2556 goto free_q_irqs;
2557 }
2558
2559 vsi->irqs_ready = true;
2560 return 0;
2561
2562 free_q_irqs:
2563 while (vector) {
2564 vector--;
2565 irq_num = pf->msix_entries[base + vector].vector;
2566 if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2567 irq_set_affinity_notifier(irq_num, NULL);
2568 irq_set_affinity_hint(irq_num, NULL);
2569 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2570 }
2571 return err;
2572 }
2573
2574 /**
2575 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2576 * @vsi: VSI to setup Tx rings used by XDP
2577 *
2578 * Return 0 on success and negative value on error
2579 */
ice_xdp_alloc_setup_rings(struct ice_vsi * vsi)2580 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2581 {
2582 struct device *dev = ice_pf_to_dev(vsi->back);
2583 struct ice_tx_desc *tx_desc;
2584 int i, j;
2585
2586 ice_for_each_xdp_txq(vsi, i) {
2587 u16 xdp_q_idx = vsi->alloc_txq + i;
2588 struct ice_tx_ring *xdp_ring;
2589
2590 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2591
2592 if (!xdp_ring)
2593 goto free_xdp_rings;
2594
2595 xdp_ring->q_index = xdp_q_idx;
2596 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2597 xdp_ring->vsi = vsi;
2598 xdp_ring->netdev = NULL;
2599 xdp_ring->dev = dev;
2600 xdp_ring->count = vsi->num_tx_desc;
2601 xdp_ring->next_dd = ICE_RING_QUARTER(xdp_ring) - 1;
2602 xdp_ring->next_rs = ICE_RING_QUARTER(xdp_ring) - 1;
2603 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2604 if (ice_setup_tx_ring(xdp_ring))
2605 goto free_xdp_rings;
2606 ice_set_ring_xdp(xdp_ring);
2607 spin_lock_init(&xdp_ring->tx_lock);
2608 for (j = 0; j < xdp_ring->count; j++) {
2609 tx_desc = ICE_TX_DESC(xdp_ring, j);
2610 tx_desc->cmd_type_offset_bsz = 0;
2611 }
2612 }
2613
2614 return 0;
2615
2616 free_xdp_rings:
2617 for (; i >= 0; i--)
2618 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2619 ice_free_tx_ring(vsi->xdp_rings[i]);
2620 return -ENOMEM;
2621 }
2622
2623 /**
2624 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2625 * @vsi: VSI to set the bpf prog on
2626 * @prog: the bpf prog pointer
2627 */
ice_vsi_assign_bpf_prog(struct ice_vsi * vsi,struct bpf_prog * prog)2628 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2629 {
2630 struct bpf_prog *old_prog;
2631 int i;
2632
2633 old_prog = xchg(&vsi->xdp_prog, prog);
2634 if (old_prog)
2635 bpf_prog_put(old_prog);
2636
2637 ice_for_each_rxq(vsi, i)
2638 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2639 }
2640
2641 /**
2642 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2643 * @vsi: VSI to bring up Tx rings used by XDP
2644 * @prog: bpf program that will be assigned to VSI
2645 *
2646 * Return 0 on success and negative value on error
2647 */
ice_prepare_xdp_rings(struct ice_vsi * vsi,struct bpf_prog * prog)2648 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2649 {
2650 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2651 int xdp_rings_rem = vsi->num_xdp_txq;
2652 struct ice_pf *pf = vsi->back;
2653 struct ice_qs_cfg xdp_qs_cfg = {
2654 .qs_mutex = &pf->avail_q_mutex,
2655 .pf_map = pf->avail_txqs,
2656 .pf_map_size = pf->max_pf_txqs,
2657 .q_count = vsi->num_xdp_txq,
2658 .scatter_count = ICE_MAX_SCATTER_TXQS,
2659 .vsi_map = vsi->txq_map,
2660 .vsi_map_offset = vsi->alloc_txq,
2661 .mapping_mode = ICE_VSI_MAP_CONTIG
2662 };
2663 struct device *dev;
2664 int i, v_idx;
2665 int status;
2666
2667 dev = ice_pf_to_dev(pf);
2668 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2669 sizeof(*vsi->xdp_rings), GFP_KERNEL);
2670 if (!vsi->xdp_rings)
2671 return -ENOMEM;
2672
2673 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2674 if (__ice_vsi_get_qs(&xdp_qs_cfg))
2675 goto err_map_xdp;
2676
2677 if (static_key_enabled(&ice_xdp_locking_key))
2678 netdev_warn(vsi->netdev,
2679 "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
2680
2681 if (ice_xdp_alloc_setup_rings(vsi))
2682 goto clear_xdp_rings;
2683
2684 /* follow the logic from ice_vsi_map_rings_to_vectors */
2685 ice_for_each_q_vector(vsi, v_idx) {
2686 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2687 int xdp_rings_per_v, q_id, q_base;
2688
2689 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2690 vsi->num_q_vectors - v_idx);
2691 q_base = vsi->num_xdp_txq - xdp_rings_rem;
2692
2693 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2694 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
2695
2696 xdp_ring->q_vector = q_vector;
2697 xdp_ring->next = q_vector->tx.tx_ring;
2698 q_vector->tx.tx_ring = xdp_ring;
2699 }
2700 xdp_rings_rem -= xdp_rings_per_v;
2701 }
2702
2703 ice_for_each_rxq(vsi, i) {
2704 if (static_key_enabled(&ice_xdp_locking_key)) {
2705 vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
2706 } else {
2707 struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
2708 struct ice_tx_ring *ring;
2709
2710 ice_for_each_tx_ring(ring, q_vector->tx) {
2711 if (ice_ring_is_xdp(ring)) {
2712 vsi->rx_rings[i]->xdp_ring = ring;
2713 break;
2714 }
2715 }
2716 }
2717 ice_tx_xsk_pool(vsi, i);
2718 }
2719
2720 /* omit the scheduler update if in reset path; XDP queues will be
2721 * taken into account at the end of ice_vsi_rebuild, where
2722 * ice_cfg_vsi_lan is being called
2723 */
2724 if (ice_is_reset_in_progress(pf->state))
2725 return 0;
2726
2727 /* tell the Tx scheduler that right now we have
2728 * additional queues
2729 */
2730 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2731 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2732
2733 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2734 max_txqs);
2735 if (status) {
2736 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
2737 status);
2738 goto clear_xdp_rings;
2739 }
2740
2741 /* assign the prog only when it's not already present on VSI;
2742 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2743 * VSI rebuild that happens under ethtool -L can expose us to
2744 * the bpf_prog refcount issues as we would be swapping same
2745 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2746 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2747 * this is not harmful as dev_xdp_install bumps the refcount
2748 * before calling the op exposed by the driver;
2749 */
2750 if (!ice_is_xdp_ena_vsi(vsi))
2751 ice_vsi_assign_bpf_prog(vsi, prog);
2752
2753 return 0;
2754 clear_xdp_rings:
2755 ice_for_each_xdp_txq(vsi, i)
2756 if (vsi->xdp_rings[i]) {
2757 kfree_rcu(vsi->xdp_rings[i], rcu);
2758 vsi->xdp_rings[i] = NULL;
2759 }
2760
2761 err_map_xdp:
2762 mutex_lock(&pf->avail_q_mutex);
2763 ice_for_each_xdp_txq(vsi, i) {
2764 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2765 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2766 }
2767 mutex_unlock(&pf->avail_q_mutex);
2768
2769 devm_kfree(dev, vsi->xdp_rings);
2770 return -ENOMEM;
2771 }
2772
2773 /**
2774 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2775 * @vsi: VSI to remove XDP rings
2776 *
2777 * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2778 * resources
2779 */
ice_destroy_xdp_rings(struct ice_vsi * vsi)2780 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2781 {
2782 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2783 struct ice_pf *pf = vsi->back;
2784 int i, v_idx;
2785
2786 /* q_vectors are freed in reset path so there's no point in detaching
2787 * rings; in case of rebuild being triggered not from reset bits
2788 * in pf->state won't be set, so additionally check first q_vector
2789 * against NULL
2790 */
2791 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2792 goto free_qmap;
2793
2794 ice_for_each_q_vector(vsi, v_idx) {
2795 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2796 struct ice_tx_ring *ring;
2797
2798 ice_for_each_tx_ring(ring, q_vector->tx)
2799 if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2800 break;
2801
2802 /* restore the value of last node prior to XDP setup */
2803 q_vector->tx.tx_ring = ring;
2804 }
2805
2806 free_qmap:
2807 mutex_lock(&pf->avail_q_mutex);
2808 ice_for_each_xdp_txq(vsi, i) {
2809 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2810 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2811 }
2812 mutex_unlock(&pf->avail_q_mutex);
2813
2814 ice_for_each_xdp_txq(vsi, i)
2815 if (vsi->xdp_rings[i]) {
2816 if (vsi->xdp_rings[i]->desc) {
2817 synchronize_rcu();
2818 ice_free_tx_ring(vsi->xdp_rings[i]);
2819 }
2820 kfree_rcu(vsi->xdp_rings[i], rcu);
2821 vsi->xdp_rings[i] = NULL;
2822 }
2823
2824 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2825 vsi->xdp_rings = NULL;
2826
2827 if (static_key_enabled(&ice_xdp_locking_key))
2828 static_branch_dec(&ice_xdp_locking_key);
2829
2830 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2831 return 0;
2832
2833 ice_vsi_assign_bpf_prog(vsi, NULL);
2834
2835 /* notify Tx scheduler that we destroyed XDP queues and bring
2836 * back the old number of child nodes
2837 */
2838 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2839 max_txqs[i] = vsi->num_txq;
2840
2841 /* change number of XDP Tx queues to 0 */
2842 vsi->num_xdp_txq = 0;
2843
2844 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2845 max_txqs);
2846 }
2847
2848 /**
2849 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2850 * @vsi: VSI to schedule napi on
2851 */
ice_vsi_rx_napi_schedule(struct ice_vsi * vsi)2852 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2853 {
2854 int i;
2855
2856 ice_for_each_rxq(vsi, i) {
2857 struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
2858
2859 if (rx_ring->xsk_pool)
2860 napi_schedule(&rx_ring->q_vector->napi);
2861 }
2862 }
2863
2864 /**
2865 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2866 * @vsi: VSI to determine the count of XDP Tx qs
2867 *
2868 * returns 0 if Tx qs count is higher than at least half of CPU count,
2869 * -ENOMEM otherwise
2870 */
ice_vsi_determine_xdp_res(struct ice_vsi * vsi)2871 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
2872 {
2873 u16 avail = ice_get_avail_txq_count(vsi->back);
2874 u16 cpus = num_possible_cpus();
2875
2876 if (avail < cpus / 2)
2877 return -ENOMEM;
2878
2879 vsi->num_xdp_txq = min_t(u16, avail, cpus);
2880
2881 if (vsi->num_xdp_txq < cpus)
2882 static_branch_inc(&ice_xdp_locking_key);
2883
2884 return 0;
2885 }
2886
2887 /**
2888 * ice_xdp_setup_prog - Add or remove XDP eBPF program
2889 * @vsi: VSI to setup XDP for
2890 * @prog: XDP program
2891 * @extack: netlink extended ack
2892 */
2893 static int
ice_xdp_setup_prog(struct ice_vsi * vsi,struct bpf_prog * prog,struct netlink_ext_ack * extack)2894 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2895 struct netlink_ext_ack *extack)
2896 {
2897 int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2898 bool if_running = netif_running(vsi->netdev);
2899 int ret = 0, xdp_ring_err = 0;
2900
2901 if (frame_size > vsi->rx_buf_len) {
2902 NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2903 return -EOPNOTSUPP;
2904 }
2905
2906 /* need to stop netdev while setting up the program for Rx rings */
2907 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2908 ret = ice_down(vsi);
2909 if (ret) {
2910 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2911 return ret;
2912 }
2913 }
2914
2915 if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2916 xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
2917 if (xdp_ring_err) {
2918 NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
2919 } else {
2920 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2921 if (xdp_ring_err)
2922 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2923 }
2924 /* reallocate Rx queues that are used for zero-copy */
2925 xdp_ring_err = ice_realloc_zc_buf(vsi, true);
2926 if (xdp_ring_err)
2927 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
2928 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2929 xdp_ring_err = ice_destroy_xdp_rings(vsi);
2930 if (xdp_ring_err)
2931 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2932 /* reallocate Rx queues that were used for zero-copy */
2933 xdp_ring_err = ice_realloc_zc_buf(vsi, false);
2934 if (xdp_ring_err)
2935 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
2936 } else {
2937 /* safe to call even when prog == vsi->xdp_prog as
2938 * dev_xdp_install in net/core/dev.c incremented prog's
2939 * refcount so corresponding bpf_prog_put won't cause
2940 * underflow
2941 */
2942 ice_vsi_assign_bpf_prog(vsi, prog);
2943 }
2944
2945 if (if_running)
2946 ret = ice_up(vsi);
2947
2948 if (!ret && prog)
2949 ice_vsi_rx_napi_schedule(vsi);
2950
2951 return (ret || xdp_ring_err) ? -ENOMEM : 0;
2952 }
2953
2954 /**
2955 * ice_xdp_safe_mode - XDP handler for safe mode
2956 * @dev: netdevice
2957 * @xdp: XDP command
2958 */
ice_xdp_safe_mode(struct net_device __always_unused * dev,struct netdev_bpf * xdp)2959 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2960 struct netdev_bpf *xdp)
2961 {
2962 NL_SET_ERR_MSG_MOD(xdp->extack,
2963 "Please provide working DDP firmware package in order to use XDP\n"
2964 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
2965 return -EOPNOTSUPP;
2966 }
2967
2968 /**
2969 * ice_xdp - implements XDP handler
2970 * @dev: netdevice
2971 * @xdp: XDP command
2972 */
ice_xdp(struct net_device * dev,struct netdev_bpf * xdp)2973 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2974 {
2975 struct ice_netdev_priv *np = netdev_priv(dev);
2976 struct ice_vsi *vsi = np->vsi;
2977
2978 if (vsi->type != ICE_VSI_PF) {
2979 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2980 return -EINVAL;
2981 }
2982
2983 switch (xdp->command) {
2984 case XDP_SETUP_PROG:
2985 return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
2986 case XDP_SETUP_XSK_POOL:
2987 return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
2988 xdp->xsk.queue_id);
2989 default:
2990 return -EINVAL;
2991 }
2992 }
2993
2994 /**
2995 * ice_ena_misc_vector - enable the non-queue interrupts
2996 * @pf: board private structure
2997 */
ice_ena_misc_vector(struct ice_pf * pf)2998 static void ice_ena_misc_vector(struct ice_pf *pf)
2999 {
3000 struct ice_hw *hw = &pf->hw;
3001 u32 val;
3002
3003 /* Disable anti-spoof detection interrupt to prevent spurious event
3004 * interrupts during a function reset. Anti-spoof functionally is
3005 * still supported.
3006 */
3007 val = rd32(hw, GL_MDCK_TX_TDPU);
3008 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
3009 wr32(hw, GL_MDCK_TX_TDPU, val);
3010
3011 /* clear things first */
3012 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
3013 rd32(hw, PFINT_OICR); /* read to clear */
3014
3015 val = (PFINT_OICR_ECC_ERR_M |
3016 PFINT_OICR_MAL_DETECT_M |
3017 PFINT_OICR_GRST_M |
3018 PFINT_OICR_PCI_EXCEPTION_M |
3019 PFINT_OICR_VFLR_M |
3020 PFINT_OICR_HMC_ERR_M |
3021 PFINT_OICR_PE_PUSH_M |
3022 PFINT_OICR_PE_CRITERR_M);
3023
3024 wr32(hw, PFINT_OICR_ENA, val);
3025
3026 /* SW_ITR_IDX = 0, but don't change INTENA */
3027 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
3028 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
3029 }
3030
3031 /**
3032 * ice_misc_intr - misc interrupt handler
3033 * @irq: interrupt number
3034 * @data: pointer to a q_vector
3035 */
ice_misc_intr(int __always_unused irq,void * data)3036 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
3037 {
3038 struct ice_pf *pf = (struct ice_pf *)data;
3039 struct ice_hw *hw = &pf->hw;
3040 irqreturn_t ret = IRQ_NONE;
3041 struct device *dev;
3042 u32 oicr, ena_mask;
3043
3044 dev = ice_pf_to_dev(pf);
3045 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
3046 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
3047 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
3048
3049 oicr = rd32(hw, PFINT_OICR);
3050 ena_mask = rd32(hw, PFINT_OICR_ENA);
3051
3052 if (oicr & PFINT_OICR_SWINT_M) {
3053 ena_mask &= ~PFINT_OICR_SWINT_M;
3054 pf->sw_int_count++;
3055 }
3056
3057 if (oicr & PFINT_OICR_MAL_DETECT_M) {
3058 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
3059 set_bit(ICE_MDD_EVENT_PENDING, pf->state);
3060 }
3061 if (oicr & PFINT_OICR_VFLR_M) {
3062 /* disable any further VFLR event notifications */
3063 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
3064 u32 reg = rd32(hw, PFINT_OICR_ENA);
3065
3066 reg &= ~PFINT_OICR_VFLR_M;
3067 wr32(hw, PFINT_OICR_ENA, reg);
3068 } else {
3069 ena_mask &= ~PFINT_OICR_VFLR_M;
3070 set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
3071 }
3072 }
3073
3074 if (oicr & PFINT_OICR_GRST_M) {
3075 u32 reset;
3076
3077 /* we have a reset warning */
3078 ena_mask &= ~PFINT_OICR_GRST_M;
3079 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
3080 GLGEN_RSTAT_RESET_TYPE_S;
3081
3082 if (reset == ICE_RESET_CORER)
3083 pf->corer_count++;
3084 else if (reset == ICE_RESET_GLOBR)
3085 pf->globr_count++;
3086 else if (reset == ICE_RESET_EMPR)
3087 pf->empr_count++;
3088 else
3089 dev_dbg(dev, "Invalid reset type %d\n", reset);
3090
3091 /* If a reset cycle isn't already in progress, we set a bit in
3092 * pf->state so that the service task can start a reset/rebuild.
3093 */
3094 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
3095 if (reset == ICE_RESET_CORER)
3096 set_bit(ICE_CORER_RECV, pf->state);
3097 else if (reset == ICE_RESET_GLOBR)
3098 set_bit(ICE_GLOBR_RECV, pf->state);
3099 else
3100 set_bit(ICE_EMPR_RECV, pf->state);
3101
3102 /* There are couple of different bits at play here.
3103 * hw->reset_ongoing indicates whether the hardware is
3104 * in reset. This is set to true when a reset interrupt
3105 * is received and set back to false after the driver
3106 * has determined that the hardware is out of reset.
3107 *
3108 * ICE_RESET_OICR_RECV in pf->state indicates
3109 * that a post reset rebuild is required before the
3110 * driver is operational again. This is set above.
3111 *
3112 * As this is the start of the reset/rebuild cycle, set
3113 * both to indicate that.
3114 */
3115 hw->reset_ongoing = true;
3116 }
3117 }
3118
3119 if (oicr & PFINT_OICR_TSYN_TX_M) {
3120 ena_mask &= ~PFINT_OICR_TSYN_TX_M;
3121 if (!hw->reset_ongoing) {
3122 set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
3123 ret = IRQ_WAKE_THREAD;
3124 }
3125 }
3126
3127 if (oicr & PFINT_OICR_TSYN_EVNT_M) {
3128 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
3129 u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
3130
3131 ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
3132
3133 if (hw->func_caps.ts_func_info.src_tmr_owned) {
3134 /* Save EVENTs from GLTSYN register */
3135 pf->ptp.ext_ts_irq |= gltsyn_stat &
3136 (GLTSYN_STAT_EVENT0_M |
3137 GLTSYN_STAT_EVENT1_M |
3138 GLTSYN_STAT_EVENT2_M);
3139
3140 set_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread);
3141 ret = IRQ_WAKE_THREAD;
3142 }
3143 }
3144
3145 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
3146 if (oicr & ICE_AUX_CRIT_ERR) {
3147 pf->oicr_err_reg |= oicr;
3148 set_bit(ICE_AUX_ERR_PENDING, pf->state);
3149 ena_mask &= ~ICE_AUX_CRIT_ERR;
3150 }
3151
3152 /* Report any remaining unexpected interrupts */
3153 oicr &= ena_mask;
3154 if (oicr) {
3155 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
3156 /* If a critical error is pending there is no choice but to
3157 * reset the device.
3158 */
3159 if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
3160 PFINT_OICR_ECC_ERR_M)) {
3161 set_bit(ICE_PFR_REQ, pf->state);
3162 ice_service_task_schedule(pf);
3163 }
3164 }
3165 if (!ret)
3166 ret = IRQ_HANDLED;
3167
3168 ice_service_task_schedule(pf);
3169 ice_irq_dynamic_ena(hw, NULL, NULL);
3170
3171 return ret;
3172 }
3173
3174 /**
3175 * ice_misc_intr_thread_fn - misc interrupt thread function
3176 * @irq: interrupt number
3177 * @data: pointer to a q_vector
3178 */
ice_misc_intr_thread_fn(int __always_unused irq,void * data)3179 static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
3180 {
3181 struct ice_pf *pf = data;
3182
3183 if (ice_is_reset_in_progress(pf->state))
3184 return IRQ_HANDLED;
3185
3186 if (test_and_clear_bit(ICE_MISC_THREAD_EXTTS_EVENT, pf->misc_thread))
3187 ice_ptp_extts_event(pf);
3188
3189 if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {
3190 while (!ice_ptp_process_ts(pf))
3191 usleep_range(50, 100);
3192 }
3193
3194 return IRQ_HANDLED;
3195 }
3196
3197 /**
3198 * ice_dis_ctrlq_interrupts - disable control queue interrupts
3199 * @hw: pointer to HW structure
3200 */
ice_dis_ctrlq_interrupts(struct ice_hw * hw)3201 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
3202 {
3203 /* disable Admin queue Interrupt causes */
3204 wr32(hw, PFINT_FW_CTL,
3205 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
3206
3207 /* disable Mailbox queue Interrupt causes */
3208 wr32(hw, PFINT_MBX_CTL,
3209 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
3210
3211 wr32(hw, PFINT_SB_CTL,
3212 rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
3213
3214 /* disable Control queue Interrupt causes */
3215 wr32(hw, PFINT_OICR_CTL,
3216 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
3217
3218 ice_flush(hw);
3219 }
3220
3221 /**
3222 * ice_free_irq_msix_misc - Unroll misc vector setup
3223 * @pf: board private structure
3224 */
ice_free_irq_msix_misc(struct ice_pf * pf)3225 static void ice_free_irq_msix_misc(struct ice_pf *pf)
3226 {
3227 struct ice_hw *hw = &pf->hw;
3228
3229 ice_dis_ctrlq_interrupts(hw);
3230
3231 /* disable OICR interrupt */
3232 wr32(hw, PFINT_OICR_ENA, 0);
3233 ice_flush(hw);
3234
3235 if (pf->msix_entries) {
3236 synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
3237 devm_free_irq(ice_pf_to_dev(pf),
3238 pf->msix_entries[pf->oicr_idx].vector, pf);
3239 }
3240
3241 pf->num_avail_sw_msix += 1;
3242 ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
3243 }
3244
3245 /**
3246 * ice_ena_ctrlq_interrupts - enable control queue interrupts
3247 * @hw: pointer to HW structure
3248 * @reg_idx: HW vector index to associate the control queue interrupts with
3249 */
ice_ena_ctrlq_interrupts(struct ice_hw * hw,u16 reg_idx)3250 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
3251 {
3252 u32 val;
3253
3254 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
3255 PFINT_OICR_CTL_CAUSE_ENA_M);
3256 wr32(hw, PFINT_OICR_CTL, val);
3257
3258 /* enable Admin queue Interrupt causes */
3259 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
3260 PFINT_FW_CTL_CAUSE_ENA_M);
3261 wr32(hw, PFINT_FW_CTL, val);
3262
3263 /* enable Mailbox queue Interrupt causes */
3264 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
3265 PFINT_MBX_CTL_CAUSE_ENA_M);
3266 wr32(hw, PFINT_MBX_CTL, val);
3267
3268 /* This enables Sideband queue Interrupt causes */
3269 val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
3270 PFINT_SB_CTL_CAUSE_ENA_M);
3271 wr32(hw, PFINT_SB_CTL, val);
3272
3273 ice_flush(hw);
3274 }
3275
3276 /**
3277 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3278 * @pf: board private structure
3279 *
3280 * This sets up the handler for MSIX 0, which is used to manage the
3281 * non-queue interrupts, e.g. AdminQ and errors. This is not used
3282 * when in MSI or Legacy interrupt mode.
3283 */
ice_req_irq_msix_misc(struct ice_pf * pf)3284 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3285 {
3286 struct device *dev = ice_pf_to_dev(pf);
3287 struct ice_hw *hw = &pf->hw;
3288 int oicr_idx, err = 0;
3289
3290 if (!pf->int_name[0])
3291 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3292 dev_driver_string(dev), dev_name(dev));
3293
3294 /* Do not request IRQ but do enable OICR interrupt since settings are
3295 * lost during reset. Note that this function is called only during
3296 * rebuild path and not while reset is in progress.
3297 */
3298 if (ice_is_reset_in_progress(pf->state))
3299 goto skip_req_irq;
3300
3301 /* reserve one vector in irq_tracker for misc interrupts */
3302 oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3303 if (oicr_idx < 0)
3304 return oicr_idx;
3305
3306 pf->num_avail_sw_msix -= 1;
3307 pf->oicr_idx = (u16)oicr_idx;
3308
3309 err = devm_request_threaded_irq(dev,
3310 pf->msix_entries[pf->oicr_idx].vector,
3311 ice_misc_intr, ice_misc_intr_thread_fn,
3312 0, pf->int_name, pf);
3313 if (err) {
3314 dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
3315 pf->int_name, err);
3316 ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3317 pf->num_avail_sw_msix += 1;
3318 return err;
3319 }
3320
3321 skip_req_irq:
3322 ice_ena_misc_vector(pf);
3323
3324 ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
3325 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
3326 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3327
3328 ice_flush(hw);
3329 ice_irq_dynamic_ena(hw, NULL, NULL);
3330
3331 return 0;
3332 }
3333
3334 /**
3335 * ice_napi_add - register NAPI handler for the VSI
3336 * @vsi: VSI for which NAPI handler is to be registered
3337 *
3338 * This function is only called in the driver's load path. Registering the NAPI
3339 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3340 * reset/rebuild, etc.)
3341 */
ice_napi_add(struct ice_vsi * vsi)3342 static void ice_napi_add(struct ice_vsi *vsi)
3343 {
3344 int v_idx;
3345
3346 if (!vsi->netdev)
3347 return;
3348
3349 ice_for_each_q_vector(vsi, v_idx)
3350 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3351 ice_napi_poll);
3352 }
3353
3354 /**
3355 * ice_set_ops - set netdev and ethtools ops for the given netdev
3356 * @netdev: netdev instance
3357 */
ice_set_ops(struct net_device * netdev)3358 static void ice_set_ops(struct net_device *netdev)
3359 {
3360 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3361
3362 if (ice_is_safe_mode(pf)) {
3363 netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3364 ice_set_ethtool_safe_mode_ops(netdev);
3365 return;
3366 }
3367
3368 netdev->netdev_ops = &ice_netdev_ops;
3369 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3370 ice_set_ethtool_ops(netdev);
3371 }
3372
3373 /**
3374 * ice_set_netdev_features - set features for the given netdev
3375 * @netdev: netdev instance
3376 */
ice_set_netdev_features(struct net_device * netdev)3377 static void ice_set_netdev_features(struct net_device *netdev)
3378 {
3379 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3380 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
3381 netdev_features_t csumo_features;
3382 netdev_features_t vlano_features;
3383 netdev_features_t dflt_features;
3384 netdev_features_t tso_features;
3385
3386 if (ice_is_safe_mode(pf)) {
3387 /* safe mode */
3388 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3389 netdev->hw_features = netdev->features;
3390 return;
3391 }
3392
3393 dflt_features = NETIF_F_SG |
3394 NETIF_F_HIGHDMA |
3395 NETIF_F_NTUPLE |
3396 NETIF_F_RXHASH;
3397
3398 csumo_features = NETIF_F_RXCSUM |
3399 NETIF_F_IP_CSUM |
3400 NETIF_F_SCTP_CRC |
3401 NETIF_F_IPV6_CSUM;
3402
3403 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3404 NETIF_F_HW_VLAN_CTAG_TX |
3405 NETIF_F_HW_VLAN_CTAG_RX;
3406
3407 /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
3408 if (is_dvm_ena)
3409 vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
3410
3411 tso_features = NETIF_F_TSO |
3412 NETIF_F_TSO_ECN |
3413 NETIF_F_TSO6 |
3414 NETIF_F_GSO_GRE |
3415 NETIF_F_GSO_UDP_TUNNEL |
3416 NETIF_F_GSO_GRE_CSUM |
3417 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3418 NETIF_F_GSO_PARTIAL |
3419 NETIF_F_GSO_IPXIP4 |
3420 NETIF_F_GSO_IPXIP6 |
3421 NETIF_F_GSO_UDP_L4;
3422
3423 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3424 NETIF_F_GSO_GRE_CSUM;
3425 /* set features that user can change */
3426 netdev->hw_features = dflt_features | csumo_features |
3427 vlano_features | tso_features;
3428
3429 /* add support for HW_CSUM on packets with MPLS header */
3430 netdev->mpls_features = NETIF_F_HW_CSUM |
3431 NETIF_F_TSO |
3432 NETIF_F_TSO6;
3433
3434 /* enable features */
3435 netdev->features |= netdev->hw_features;
3436
3437 netdev->hw_features |= NETIF_F_HW_TC;
3438 netdev->hw_features |= NETIF_F_LOOPBACK;
3439
3440 /* encap and VLAN devices inherit default, csumo and tso features */
3441 netdev->hw_enc_features |= dflt_features | csumo_features |
3442 tso_features;
3443 netdev->vlan_features |= dflt_features | csumo_features |
3444 tso_features;
3445
3446 /* advertise support but don't enable by default since only one type of
3447 * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
3448 * type turns on the other has to be turned off. This is enforced by the
3449 * ice_fix_features() ndo callback.
3450 */
3451 if (is_dvm_ena)
3452 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
3453 NETIF_F_HW_VLAN_STAG_TX;
3454
3455 /* Leave CRC / FCS stripping enabled by default, but allow the value to
3456 * be changed at runtime
3457 */
3458 netdev->hw_features |= NETIF_F_RXFCS;
3459 }
3460
3461 /**
3462 * ice_cfg_netdev - Allocate, configure and register a netdev
3463 * @vsi: the VSI associated with the new netdev
3464 *
3465 * Returns 0 on success, negative value on failure
3466 */
ice_cfg_netdev(struct ice_vsi * vsi)3467 static int ice_cfg_netdev(struct ice_vsi *vsi)
3468 {
3469 struct ice_netdev_priv *np;
3470 struct net_device *netdev;
3471 u8 mac_addr[ETH_ALEN];
3472
3473 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
3474 vsi->alloc_rxq);
3475 if (!netdev)
3476 return -ENOMEM;
3477
3478 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3479 vsi->netdev = netdev;
3480 np = netdev_priv(netdev);
3481 np->vsi = vsi;
3482
3483 ice_set_netdev_features(netdev);
3484
3485 ice_set_ops(netdev);
3486
3487 if (vsi->type == ICE_VSI_PF) {
3488 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
3489 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
3490 eth_hw_addr_set(netdev, mac_addr);
3491 ether_addr_copy(netdev->perm_addr, mac_addr);
3492 }
3493
3494 netdev->priv_flags |= IFF_UNICAST_FLT;
3495
3496 /* Setup netdev TC information */
3497 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
3498
3499 /* setup watchdog timeout value to be 5 second */
3500 netdev->watchdog_timeo = 5 * HZ;
3501
3502 netdev->min_mtu = ETH_MIN_MTU;
3503 netdev->max_mtu = ICE_MAX_MTU;
3504
3505 return 0;
3506 }
3507
3508 /**
3509 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3510 * @lut: Lookup table
3511 * @rss_table_size: Lookup table size
3512 * @rss_size: Range of queue number for hashing
3513 */
ice_fill_rss_lut(u8 * lut,u16 rss_table_size,u16 rss_size)3514 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3515 {
3516 u16 i;
3517
3518 for (i = 0; i < rss_table_size; i++)
3519 lut[i] = i % rss_size;
3520 }
3521
3522 /**
3523 * ice_pf_vsi_setup - Set up a PF VSI
3524 * @pf: board private structure
3525 * @pi: pointer to the port_info instance
3526 *
3527 * Returns pointer to the successfully allocated VSI software struct
3528 * on success, otherwise returns NULL on failure.
3529 */
3530 static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3531 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3532 {
3533 return ice_vsi_setup(pf, pi, ICE_VSI_PF, NULL, NULL);
3534 }
3535
3536 static struct ice_vsi *
ice_chnl_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi,struct ice_channel * ch)3537 ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
3538 struct ice_channel *ch)
3539 {
3540 return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, NULL, ch);
3541 }
3542
3543 /**
3544 * ice_ctrl_vsi_setup - Set up a control VSI
3545 * @pf: board private structure
3546 * @pi: pointer to the port_info instance
3547 *
3548 * Returns pointer to the successfully allocated VSI software struct
3549 * on success, otherwise returns NULL on failure.
3550 */
3551 static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3552 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3553 {
3554 return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, NULL, NULL);
3555 }
3556
3557 /**
3558 * ice_lb_vsi_setup - Set up a loopback VSI
3559 * @pf: board private structure
3560 * @pi: pointer to the port_info instance
3561 *
3562 * Returns pointer to the successfully allocated VSI software struct
3563 * on success, otherwise returns NULL on failure.
3564 */
3565 struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3566 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3567 {
3568 return ice_vsi_setup(pf, pi, ICE_VSI_LB, NULL, NULL);
3569 }
3570
3571 /**
3572 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3573 * @netdev: network interface to be adjusted
3574 * @proto: VLAN TPID
3575 * @vid: VLAN ID to be added
3576 *
3577 * net_device_ops implementation for adding VLAN IDs
3578 */
3579 static int
ice_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)3580 ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3581 {
3582 struct ice_netdev_priv *np = netdev_priv(netdev);
3583 struct ice_vsi_vlan_ops *vlan_ops;
3584 struct ice_vsi *vsi = np->vsi;
3585 struct ice_vlan vlan;
3586 int ret;
3587
3588 /* VLAN 0 is added by default during load/reset */
3589 if (!vid)
3590 return 0;
3591
3592 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3593 usleep_range(1000, 2000);
3594
3595 /* Add multicast promisc rule for the VLAN ID to be added if
3596 * all-multicast is currently enabled.
3597 */
3598 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3599 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3600 ICE_MCAST_VLAN_PROMISC_BITS,
3601 vid);
3602 if (ret)
3603 goto finish;
3604 }
3605
3606 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3607
3608 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3609 * packets aren't pruned by the device's internal switch on Rx
3610 */
3611 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3612 ret = vlan_ops->add_vlan(vsi, &vlan);
3613 if (ret)
3614 goto finish;
3615
3616 /* If all-multicast is currently enabled and this VLAN ID is only one
3617 * besides VLAN-0 we have to update look-up type of multicast promisc
3618 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
3619 */
3620 if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
3621 ice_vsi_num_non_zero_vlans(vsi) == 1) {
3622 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3623 ICE_MCAST_PROMISC_BITS, 0);
3624 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3625 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3626 }
3627
3628 finish:
3629 clear_bit(ICE_CFG_BUSY, vsi->state);
3630
3631 return ret;
3632 }
3633
3634 /**
3635 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3636 * @netdev: network interface to be adjusted
3637 * @proto: VLAN TPID
3638 * @vid: VLAN ID to be removed
3639 *
3640 * net_device_ops implementation for removing VLAN IDs
3641 */
3642 static int
ice_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)3643 ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3644 {
3645 struct ice_netdev_priv *np = netdev_priv(netdev);
3646 struct ice_vsi_vlan_ops *vlan_ops;
3647 struct ice_vsi *vsi = np->vsi;
3648 struct ice_vlan vlan;
3649 int ret;
3650
3651 /* don't allow removal of VLAN 0 */
3652 if (!vid)
3653 return 0;
3654
3655 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
3656 usleep_range(1000, 2000);
3657
3658 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3659 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3660 if (ret) {
3661 netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
3662 vsi->vsi_num);
3663 vsi->current_netdev_flags |= IFF_ALLMULTI;
3664 }
3665
3666 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3667
3668 /* Make sure VLAN delete is successful before updating VLAN
3669 * information
3670 */
3671 vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
3672 ret = vlan_ops->del_vlan(vsi, &vlan);
3673 if (ret)
3674 goto finish;
3675
3676 /* Remove multicast promisc rule for the removed VLAN ID if
3677 * all-multicast is enabled.
3678 */
3679 if (vsi->current_netdev_flags & IFF_ALLMULTI)
3680 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3681 ICE_MCAST_VLAN_PROMISC_BITS, vid);
3682
3683 if (!ice_vsi_has_non_zero_vlans(vsi)) {
3684 /* Update look-up type of multicast promisc rule for VLAN 0
3685 * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
3686 * all-multicast is enabled and VLAN 0 is the only VLAN rule.
3687 */
3688 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
3689 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3690 ICE_MCAST_VLAN_PROMISC_BITS,
3691 0);
3692 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
3693 ICE_MCAST_PROMISC_BITS, 0);
3694 }
3695 }
3696
3697 finish:
3698 clear_bit(ICE_CFG_BUSY, vsi->state);
3699
3700 return ret;
3701 }
3702
3703 /**
3704 * ice_rep_indr_tc_block_unbind
3705 * @cb_priv: indirection block private data
3706 */
ice_rep_indr_tc_block_unbind(void * cb_priv)3707 static void ice_rep_indr_tc_block_unbind(void *cb_priv)
3708 {
3709 struct ice_indr_block_priv *indr_priv = cb_priv;
3710
3711 list_del(&indr_priv->list);
3712 kfree(indr_priv);
3713 }
3714
3715 /**
3716 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3717 * @vsi: VSI struct which has the netdev
3718 */
ice_tc_indir_block_unregister(struct ice_vsi * vsi)3719 static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
3720 {
3721 struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
3722
3723 flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
3724 ice_rep_indr_tc_block_unbind);
3725 }
3726
3727 /**
3728 * ice_tc_indir_block_remove - clean indirect TC block notifications
3729 * @pf: PF structure
3730 */
ice_tc_indir_block_remove(struct ice_pf * pf)3731 static void ice_tc_indir_block_remove(struct ice_pf *pf)
3732 {
3733 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
3734
3735 if (!pf_vsi)
3736 return;
3737
3738 ice_tc_indir_block_unregister(pf_vsi);
3739 }
3740
3741 /**
3742 * ice_tc_indir_block_register - Register TC indirect block notifications
3743 * @vsi: VSI struct which has the netdev
3744 *
3745 * Returns 0 on success, negative value on failure
3746 */
ice_tc_indir_block_register(struct ice_vsi * vsi)3747 static int ice_tc_indir_block_register(struct ice_vsi *vsi)
3748 {
3749 struct ice_netdev_priv *np;
3750
3751 if (!vsi || !vsi->netdev)
3752 return -EINVAL;
3753
3754 np = netdev_priv(vsi->netdev);
3755
3756 INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
3757 return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
3758 }
3759
3760 /**
3761 * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3762 * @pf: board private structure
3763 *
3764 * Returns 0 on success, negative value on failure
3765 */
ice_setup_pf_sw(struct ice_pf * pf)3766 static int ice_setup_pf_sw(struct ice_pf *pf)
3767 {
3768 struct device *dev = ice_pf_to_dev(pf);
3769 bool dvm = ice_is_dvm_ena(&pf->hw);
3770 struct ice_vsi *vsi;
3771 int status;
3772
3773 if (ice_is_reset_in_progress(pf->state))
3774 return -EBUSY;
3775
3776 status = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
3777 if (status)
3778 return -EIO;
3779
3780 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3781 if (!vsi)
3782 return -ENOMEM;
3783
3784 /* init channel list */
3785 INIT_LIST_HEAD(&vsi->ch_list);
3786
3787 status = ice_cfg_netdev(vsi);
3788 if (status)
3789 goto unroll_vsi_setup;
3790 /* netdev has to be configured before setting frame size */
3791 ice_vsi_cfg_frame_size(vsi);
3792
3793 /* init indirect block notifications */
3794 status = ice_tc_indir_block_register(vsi);
3795 if (status) {
3796 dev_err(dev, "Failed to register netdev notifier\n");
3797 goto unroll_cfg_netdev;
3798 }
3799
3800 /* Setup DCB netlink interface */
3801 ice_dcbnl_setup(vsi);
3802
3803 /* registering the NAPI handler requires both the queues and
3804 * netdev to be created, which are done in ice_pf_vsi_setup()
3805 * and ice_cfg_netdev() respectively
3806 */
3807 ice_napi_add(vsi);
3808
3809 status = ice_init_mac_fltr(pf);
3810 if (status)
3811 goto unroll_napi_add;
3812
3813 return 0;
3814
3815 unroll_napi_add:
3816 ice_tc_indir_block_unregister(vsi);
3817 unroll_cfg_netdev:
3818 if (vsi) {
3819 ice_napi_del(vsi);
3820 if (vsi->netdev) {
3821 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3822 free_netdev(vsi->netdev);
3823 vsi->netdev = NULL;
3824 }
3825 }
3826
3827 unroll_vsi_setup:
3828 ice_vsi_release(vsi);
3829 return status;
3830 }
3831
3832 /**
3833 * ice_get_avail_q_count - Get count of queues in use
3834 * @pf_qmap: bitmap to get queue use count from
3835 * @lock: pointer to a mutex that protects access to pf_qmap
3836 * @size: size of the bitmap
3837 */
3838 static u16
ice_get_avail_q_count(unsigned long * pf_qmap,struct mutex * lock,u16 size)3839 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3840 {
3841 unsigned long bit;
3842 u16 count = 0;
3843
3844 mutex_lock(lock);
3845 for_each_clear_bit(bit, pf_qmap, size)
3846 count++;
3847 mutex_unlock(lock);
3848
3849 return count;
3850 }
3851
3852 /**
3853 * ice_get_avail_txq_count - Get count of Tx queues in use
3854 * @pf: pointer to an ice_pf instance
3855 */
ice_get_avail_txq_count(struct ice_pf * pf)3856 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3857 {
3858 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3859 pf->max_pf_txqs);
3860 }
3861
3862 /**
3863 * ice_get_avail_rxq_count - Get count of Rx queues in use
3864 * @pf: pointer to an ice_pf instance
3865 */
ice_get_avail_rxq_count(struct ice_pf * pf)3866 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3867 {
3868 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3869 pf->max_pf_rxqs);
3870 }
3871
3872 /**
3873 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3874 * @pf: board private structure to initialize
3875 */
ice_deinit_pf(struct ice_pf * pf)3876 static void ice_deinit_pf(struct ice_pf *pf)
3877 {
3878 ice_service_task_stop(pf);
3879 mutex_destroy(&pf->adev_mutex);
3880 mutex_destroy(&pf->sw_mutex);
3881 mutex_destroy(&pf->tc_mutex);
3882 mutex_destroy(&pf->avail_q_mutex);
3883 mutex_destroy(&pf->vfs.table_lock);
3884
3885 if (pf->avail_txqs) {
3886 bitmap_free(pf->avail_txqs);
3887 pf->avail_txqs = NULL;
3888 }
3889
3890 if (pf->avail_rxqs) {
3891 bitmap_free(pf->avail_rxqs);
3892 pf->avail_rxqs = NULL;
3893 }
3894
3895 if (pf->ptp.clock)
3896 ptp_clock_unregister(pf->ptp.clock);
3897 }
3898
3899 /**
3900 * ice_set_pf_caps - set PFs capability flags
3901 * @pf: pointer to the PF instance
3902 */
ice_set_pf_caps(struct ice_pf * pf)3903 static void ice_set_pf_caps(struct ice_pf *pf)
3904 {
3905 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3906
3907 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3908 if (func_caps->common_cap.rdma)
3909 set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3910 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3911 if (func_caps->common_cap.dcb)
3912 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3913 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3914 if (func_caps->common_cap.sr_iov_1_1) {
3915 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3916 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
3917 ICE_MAX_SRIOV_VFS);
3918 }
3919 clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3920 if (func_caps->common_cap.rss_table_size)
3921 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3922
3923 clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3924 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3925 u16 unused;
3926
3927 /* ctrl_vsi_idx will be set to a valid value when flow director
3928 * is setup by ice_init_fdir
3929 */
3930 pf->ctrl_vsi_idx = ICE_NO_VSI;
3931 set_bit(ICE_FLAG_FD_ENA, pf->flags);
3932 /* force guaranteed filter pool for PF */
3933 ice_alloc_fd_guar_item(&pf->hw, &unused,
3934 func_caps->fd_fltr_guar);
3935 /* force shared filter pool for PF */
3936 ice_alloc_fd_shrd_item(&pf->hw, &unused,
3937 func_caps->fd_fltr_best_effort);
3938 }
3939
3940 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3941 if (func_caps->common_cap.ieee_1588)
3942 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3943
3944 pf->max_pf_txqs = func_caps->common_cap.num_txq;
3945 pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3946 }
3947
3948 /**
3949 * ice_init_pf - Initialize general software structures (struct ice_pf)
3950 * @pf: board private structure to initialize
3951 */
ice_init_pf(struct ice_pf * pf)3952 static int ice_init_pf(struct ice_pf *pf)
3953 {
3954 ice_set_pf_caps(pf);
3955
3956 mutex_init(&pf->sw_mutex);
3957 mutex_init(&pf->tc_mutex);
3958 mutex_init(&pf->adev_mutex);
3959
3960 INIT_HLIST_HEAD(&pf->aq_wait_list);
3961 spin_lock_init(&pf->aq_wait_lock);
3962 init_waitqueue_head(&pf->aq_wait_queue);
3963
3964 init_waitqueue_head(&pf->reset_wait_queue);
3965
3966 /* setup service timer and periodic service task */
3967 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3968 pf->serv_tmr_period = HZ;
3969 INIT_WORK(&pf->serv_task, ice_service_task);
3970 clear_bit(ICE_SERVICE_SCHED, pf->state);
3971
3972 mutex_init(&pf->avail_q_mutex);
3973 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3974 if (!pf->avail_txqs)
3975 return -ENOMEM;
3976
3977 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3978 if (!pf->avail_rxqs) {
3979 bitmap_free(pf->avail_txqs);
3980 pf->avail_txqs = NULL;
3981 return -ENOMEM;
3982 }
3983
3984 mutex_init(&pf->vfs.table_lock);
3985 hash_init(pf->vfs.table);
3986
3987 return 0;
3988 }
3989
3990 /**
3991 * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
3992 * @pf: board private structure
3993 * @v_remain: number of remaining MSI-X vectors to be distributed
3994 *
3995 * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
3996 * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
3997 * remaining vectors.
3998 */
ice_reduce_msix_usage(struct ice_pf * pf,int v_remain)3999 static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
4000 {
4001 int v_rdma;
4002
4003 if (!ice_is_rdma_ena(pf)) {
4004 pf->num_lan_msix = v_remain;
4005 return;
4006 }
4007
4008 /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
4009 v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
4010
4011 if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
4012 dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
4013 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
4014
4015 pf->num_rdma_msix = 0;
4016 pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
4017 } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
4018 (v_remain - v_rdma < v_rdma)) {
4019 /* Support minimum RDMA and give remaining vectors to LAN MSIX */
4020 pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
4021 pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
4022 } else {
4023 /* Split remaining MSIX with RDMA after accounting for AEQ MSIX
4024 */
4025 pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
4026 ICE_RDMA_NUM_AEQ_MSIX;
4027 pf->num_lan_msix = v_remain - pf->num_rdma_msix;
4028 }
4029 }
4030
4031 /**
4032 * ice_ena_msix_range - Request a range of MSIX vectors from the OS
4033 * @pf: board private structure
4034 *
4035 * Compute the number of MSIX vectors wanted and request from the OS. Adjust
4036 * device usage if there are not enough vectors. Return the number of vectors
4037 * reserved or negative on failure.
4038 */
ice_ena_msix_range(struct ice_pf * pf)4039 static int ice_ena_msix_range(struct ice_pf *pf)
4040 {
4041 int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
4042 struct device *dev = ice_pf_to_dev(pf);
4043 int err, i;
4044
4045 hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
4046 num_cpus = num_online_cpus();
4047
4048 /* LAN miscellaneous handler */
4049 v_other = ICE_MIN_LAN_OICR_MSIX;
4050
4051 /* Flow Director */
4052 if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
4053 v_other += ICE_FDIR_MSIX;
4054
4055 /* switchdev */
4056 v_other += ICE_ESWITCH_MSIX;
4057
4058 v_wanted = v_other;
4059
4060 /* LAN traffic */
4061 pf->num_lan_msix = num_cpus;
4062 v_wanted += pf->num_lan_msix;
4063
4064 /* RDMA auxiliary driver */
4065 if (ice_is_rdma_ena(pf)) {
4066 pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
4067 v_wanted += pf->num_rdma_msix;
4068 }
4069
4070 if (v_wanted > hw_num_msix) {
4071 int v_remain;
4072
4073 dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
4074 v_wanted, hw_num_msix);
4075
4076 if (hw_num_msix < ICE_MIN_MSIX) {
4077 err = -ERANGE;
4078 goto exit_err;
4079 }
4080
4081 v_remain = hw_num_msix - v_other;
4082 if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
4083 v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
4084 v_remain = ICE_MIN_LAN_TXRX_MSIX;
4085 }
4086
4087 ice_reduce_msix_usage(pf, v_remain);
4088 v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
4089
4090 dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
4091 pf->num_lan_msix);
4092 if (ice_is_rdma_ena(pf))
4093 dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
4094 pf->num_rdma_msix);
4095 }
4096
4097 pf->msix_entries = devm_kcalloc(dev, v_wanted,
4098 sizeof(*pf->msix_entries), GFP_KERNEL);
4099 if (!pf->msix_entries) {
4100 err = -ENOMEM;
4101 goto exit_err;
4102 }
4103
4104 for (i = 0; i < v_wanted; i++)
4105 pf->msix_entries[i].entry = i;
4106
4107 /* actually reserve the vectors */
4108 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
4109 ICE_MIN_MSIX, v_wanted);
4110 if (v_actual < 0) {
4111 dev_err(dev, "unable to reserve MSI-X vectors\n");
4112 err = v_actual;
4113 goto msix_err;
4114 }
4115
4116 if (v_actual < v_wanted) {
4117 dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
4118 v_wanted, v_actual);
4119
4120 if (v_actual < ICE_MIN_MSIX) {
4121 /* error if we can't get minimum vectors */
4122 pci_disable_msix(pf->pdev);
4123 err = -ERANGE;
4124 goto msix_err;
4125 } else {
4126 int v_remain = v_actual - v_other;
4127
4128 if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
4129 v_remain = ICE_MIN_LAN_TXRX_MSIX;
4130
4131 ice_reduce_msix_usage(pf, v_remain);
4132
4133 dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
4134 pf->num_lan_msix);
4135
4136 if (ice_is_rdma_ena(pf))
4137 dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
4138 pf->num_rdma_msix);
4139 }
4140 }
4141
4142 return v_actual;
4143
4144 msix_err:
4145 devm_kfree(dev, pf->msix_entries);
4146
4147 exit_err:
4148 pf->num_rdma_msix = 0;
4149 pf->num_lan_msix = 0;
4150 return err;
4151 }
4152
4153 /**
4154 * ice_dis_msix - Disable MSI-X interrupt setup in OS
4155 * @pf: board private structure
4156 */
ice_dis_msix(struct ice_pf * pf)4157 static void ice_dis_msix(struct ice_pf *pf)
4158 {
4159 pci_disable_msix(pf->pdev);
4160 devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
4161 pf->msix_entries = NULL;
4162 }
4163
4164 /**
4165 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
4166 * @pf: board private structure
4167 */
ice_clear_interrupt_scheme(struct ice_pf * pf)4168 static void ice_clear_interrupt_scheme(struct ice_pf *pf)
4169 {
4170 ice_dis_msix(pf);
4171
4172 if (pf->irq_tracker) {
4173 devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
4174 pf->irq_tracker = NULL;
4175 }
4176 }
4177
4178 /**
4179 * ice_init_interrupt_scheme - Determine proper interrupt scheme
4180 * @pf: board private structure to initialize
4181 */
ice_init_interrupt_scheme(struct ice_pf * pf)4182 static int ice_init_interrupt_scheme(struct ice_pf *pf)
4183 {
4184 int vectors;
4185
4186 vectors = ice_ena_msix_range(pf);
4187
4188 if (vectors < 0)
4189 return vectors;
4190
4191 /* set up vector assignment tracking */
4192 pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
4193 struct_size(pf->irq_tracker, list, vectors),
4194 GFP_KERNEL);
4195 if (!pf->irq_tracker) {
4196 ice_dis_msix(pf);
4197 return -ENOMEM;
4198 }
4199
4200 /* populate SW interrupts pool with number of OS granted IRQs. */
4201 pf->num_avail_sw_msix = (u16)vectors;
4202 pf->irq_tracker->num_entries = (u16)vectors;
4203 pf->irq_tracker->end = pf->irq_tracker->num_entries;
4204
4205 return 0;
4206 }
4207
4208 /**
4209 * ice_is_wol_supported - check if WoL is supported
4210 * @hw: pointer to hardware info
4211 *
4212 * Check if WoL is supported based on the HW configuration.
4213 * Returns true if NVM supports and enables WoL for this port, false otherwise
4214 */
ice_is_wol_supported(struct ice_hw * hw)4215 bool ice_is_wol_supported(struct ice_hw *hw)
4216 {
4217 u16 wol_ctrl;
4218
4219 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
4220 * word) indicates WoL is not supported on the corresponding PF ID.
4221 */
4222 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
4223 return false;
4224
4225 return !(BIT(hw->port_info->lport) & wol_ctrl);
4226 }
4227
4228 /**
4229 * ice_vsi_recfg_qs - Change the number of queues on a VSI
4230 * @vsi: VSI being changed
4231 * @new_rx: new number of Rx queues
4232 * @new_tx: new number of Tx queues
4233 * @locked: is adev device_lock held
4234 *
4235 * Only change the number of queues if new_tx, or new_rx is non-0.
4236 *
4237 * Returns 0 on success.
4238 */
ice_vsi_recfg_qs(struct ice_vsi * vsi,int new_rx,int new_tx,bool locked)4239 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
4240 {
4241 struct ice_pf *pf = vsi->back;
4242 int err = 0, timeout = 50;
4243
4244 if (!new_rx && !new_tx)
4245 return -EINVAL;
4246
4247 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
4248 timeout--;
4249 if (!timeout)
4250 return -EBUSY;
4251 usleep_range(1000, 2000);
4252 }
4253
4254 if (new_tx)
4255 vsi->req_txq = (u16)new_tx;
4256 if (new_rx)
4257 vsi->req_rxq = (u16)new_rx;
4258
4259 /* set for the next time the netdev is started */
4260 if (!netif_running(vsi->netdev)) {
4261 ice_vsi_rebuild(vsi, false);
4262 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
4263 goto done;
4264 }
4265
4266 ice_vsi_close(vsi);
4267 ice_vsi_rebuild(vsi, false);
4268 ice_pf_dcb_recfg(pf, locked);
4269 ice_vsi_open(vsi);
4270 done:
4271 clear_bit(ICE_CFG_BUSY, pf->state);
4272 return err;
4273 }
4274
4275 /**
4276 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4277 * @pf: PF to configure
4278 *
4279 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
4280 * VSI can still Tx/Rx VLAN tagged packets.
4281 */
ice_set_safe_mode_vlan_cfg(struct ice_pf * pf)4282 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
4283 {
4284 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4285 struct ice_vsi_ctx *ctxt;
4286 struct ice_hw *hw;
4287 int status;
4288
4289 if (!vsi)
4290 return;
4291
4292 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
4293 if (!ctxt)
4294 return;
4295
4296 hw = &pf->hw;
4297 ctxt->info = vsi->info;
4298
4299 ctxt->info.valid_sections =
4300 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
4301 ICE_AQ_VSI_PROP_SECURITY_VALID |
4302 ICE_AQ_VSI_PROP_SW_VALID);
4303
4304 /* disable VLAN anti-spoof */
4305 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4306 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4307
4308 /* disable VLAN pruning and keep all other settings */
4309 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
4310
4311 /* allow all VLANs on Tx and don't strip on Rx */
4312 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
4313 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
4314
4315 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
4316 if (status) {
4317 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
4318 status, ice_aq_str(hw->adminq.sq_last_status));
4319 } else {
4320 vsi->info.sec_flags = ctxt->info.sec_flags;
4321 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
4322 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
4323 }
4324
4325 kfree(ctxt);
4326 }
4327
4328 /**
4329 * ice_log_pkg_init - log result of DDP package load
4330 * @hw: pointer to hardware info
4331 * @state: state of package load
4332 */
ice_log_pkg_init(struct ice_hw * hw,enum ice_ddp_state state)4333 static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
4334 {
4335 struct ice_pf *pf = hw->back;
4336 struct device *dev;
4337
4338 dev = ice_pf_to_dev(pf);
4339
4340 switch (state) {
4341 case ICE_DDP_PKG_SUCCESS:
4342 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
4343 hw->active_pkg_name,
4344 hw->active_pkg_ver.major,
4345 hw->active_pkg_ver.minor,
4346 hw->active_pkg_ver.update,
4347 hw->active_pkg_ver.draft);
4348 break;
4349 case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
4350 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
4351 hw->active_pkg_name,
4352 hw->active_pkg_ver.major,
4353 hw->active_pkg_ver.minor,
4354 hw->active_pkg_ver.update,
4355 hw->active_pkg_ver.draft);
4356 break;
4357 case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
4358 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
4359 hw->active_pkg_name,
4360 hw->active_pkg_ver.major,
4361 hw->active_pkg_ver.minor,
4362 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4363 break;
4364 case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
4365 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
4366 hw->active_pkg_name,
4367 hw->active_pkg_ver.major,
4368 hw->active_pkg_ver.minor,
4369 hw->active_pkg_ver.update,
4370 hw->active_pkg_ver.draft,
4371 hw->pkg_name,
4372 hw->pkg_ver.major,
4373 hw->pkg_ver.minor,
4374 hw->pkg_ver.update,
4375 hw->pkg_ver.draft);
4376 break;
4377 case ICE_DDP_PKG_FW_MISMATCH:
4378 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
4379 break;
4380 case ICE_DDP_PKG_INVALID_FILE:
4381 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
4382 break;
4383 case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
4384 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
4385 break;
4386 case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
4387 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
4388 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
4389 break;
4390 case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
4391 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
4392 break;
4393 case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
4394 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
4395 break;
4396 case ICE_DDP_PKG_LOAD_ERROR:
4397 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
4398 /* poll for reset to complete */
4399 if (ice_check_reset(hw))
4400 dev_err(dev, "Error resetting device. Please reload the driver\n");
4401 break;
4402 case ICE_DDP_PKG_ERR:
4403 default:
4404 dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n");
4405 break;
4406 }
4407 }
4408
4409 /**
4410 * ice_load_pkg - load/reload the DDP Package file
4411 * @firmware: firmware structure when firmware requested or NULL for reload
4412 * @pf: pointer to the PF instance
4413 *
4414 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
4415 * initialize HW tables.
4416 */
4417 static void
ice_load_pkg(const struct firmware * firmware,struct ice_pf * pf)4418 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
4419 {
4420 enum ice_ddp_state state = ICE_DDP_PKG_ERR;
4421 struct device *dev = ice_pf_to_dev(pf);
4422 struct ice_hw *hw = &pf->hw;
4423
4424 /* Load DDP Package */
4425 if (firmware && !hw->pkg_copy) {
4426 state = ice_copy_and_init_pkg(hw, firmware->data,
4427 firmware->size);
4428 ice_log_pkg_init(hw, state);
4429 } else if (!firmware && hw->pkg_copy) {
4430 /* Reload package during rebuild after CORER/GLOBR reset */
4431 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4432 ice_log_pkg_init(hw, state);
4433 } else {
4434 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4435 }
4436
4437 if (!ice_is_init_pkg_successful(state)) {
4438 /* Safe Mode */
4439 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4440 return;
4441 }
4442
4443 /* Successful download package is the precondition for advanced
4444 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4445 */
4446 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4447 }
4448
4449 /**
4450 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4451 * @pf: pointer to the PF structure
4452 *
4453 * There is no error returned here because the driver should be able to handle
4454 * 128 Byte cache lines, so we only print a warning in case issues are seen,
4455 * specifically with Tx.
4456 */
ice_verify_cacheline_size(struct ice_pf * pf)4457 static void ice_verify_cacheline_size(struct ice_pf *pf)
4458 {
4459 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4460 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4461 ICE_CACHE_LINE_BYTES);
4462 }
4463
4464 /**
4465 * ice_send_version - update firmware with driver version
4466 * @pf: PF struct
4467 *
4468 * Returns 0 on success, else error code
4469 */
ice_send_version(struct ice_pf * pf)4470 static int ice_send_version(struct ice_pf *pf)
4471 {
4472 struct ice_driver_ver dv;
4473
4474 dv.major_ver = 0xff;
4475 dv.minor_ver = 0xff;
4476 dv.build_ver = 0xff;
4477 dv.subbuild_ver = 0;
4478 strscpy((char *)dv.driver_string, UTS_RELEASE,
4479 sizeof(dv.driver_string));
4480 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4481 }
4482
4483 /**
4484 * ice_init_fdir - Initialize flow director VSI and configuration
4485 * @pf: pointer to the PF instance
4486 *
4487 * returns 0 on success, negative on error
4488 */
ice_init_fdir(struct ice_pf * pf)4489 static int ice_init_fdir(struct ice_pf *pf)
4490 {
4491 struct device *dev = ice_pf_to_dev(pf);
4492 struct ice_vsi *ctrl_vsi;
4493 int err;
4494
4495 /* Side Band Flow Director needs to have a control VSI.
4496 * Allocate it and store it in the PF.
4497 */
4498 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4499 if (!ctrl_vsi) {
4500 dev_dbg(dev, "could not create control VSI\n");
4501 return -ENOMEM;
4502 }
4503
4504 err = ice_vsi_open_ctrl(ctrl_vsi);
4505 if (err) {
4506 dev_dbg(dev, "could not open control VSI\n");
4507 goto err_vsi_open;
4508 }
4509
4510 mutex_init(&pf->hw.fdir_fltr_lock);
4511
4512 err = ice_fdir_create_dflt_rules(pf);
4513 if (err)
4514 goto err_fdir_rule;
4515
4516 return 0;
4517
4518 err_fdir_rule:
4519 ice_fdir_release_flows(&pf->hw);
4520 ice_vsi_close(ctrl_vsi);
4521 err_vsi_open:
4522 ice_vsi_release(ctrl_vsi);
4523 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4524 pf->vsi[pf->ctrl_vsi_idx] = NULL;
4525 pf->ctrl_vsi_idx = ICE_NO_VSI;
4526 }
4527 return err;
4528 }
4529
4530 /**
4531 * ice_get_opt_fw_name - return optional firmware file name or NULL
4532 * @pf: pointer to the PF instance
4533 */
ice_get_opt_fw_name(struct ice_pf * pf)4534 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4535 {
4536 /* Optional firmware name same as default with additional dash
4537 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4538 */
4539 struct pci_dev *pdev = pf->pdev;
4540 char *opt_fw_filename;
4541 u64 dsn;
4542
4543 /* Determine the name of the optional file using the DSN (two
4544 * dwords following the start of the DSN Capability).
4545 */
4546 dsn = pci_get_dsn(pdev);
4547 if (!dsn)
4548 return NULL;
4549
4550 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4551 if (!opt_fw_filename)
4552 return NULL;
4553
4554 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4555 ICE_DDP_PKG_PATH, dsn);
4556
4557 return opt_fw_filename;
4558 }
4559
4560 /**
4561 * ice_request_fw - Device initialization routine
4562 * @pf: pointer to the PF instance
4563 */
ice_request_fw(struct ice_pf * pf)4564 static void ice_request_fw(struct ice_pf *pf)
4565 {
4566 char *opt_fw_filename = ice_get_opt_fw_name(pf);
4567 const struct firmware *firmware = NULL;
4568 struct device *dev = ice_pf_to_dev(pf);
4569 int err = 0;
4570
4571 /* optional device-specific DDP (if present) overrides the default DDP
4572 * package file. kernel logs a debug message if the file doesn't exist,
4573 * and warning messages for other errors.
4574 */
4575 if (opt_fw_filename) {
4576 err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4577 if (err) {
4578 kfree(opt_fw_filename);
4579 goto dflt_pkg_load;
4580 }
4581
4582 /* request for firmware was successful. Download to device */
4583 ice_load_pkg(firmware, pf);
4584 kfree(opt_fw_filename);
4585 release_firmware(firmware);
4586 return;
4587 }
4588
4589 dflt_pkg_load:
4590 err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4591 if (err) {
4592 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4593 return;
4594 }
4595
4596 /* request for firmware was successful. Download to device */
4597 ice_load_pkg(firmware, pf);
4598 release_firmware(firmware);
4599 }
4600
4601 /**
4602 * ice_print_wake_reason - show the wake up cause in the log
4603 * @pf: pointer to the PF struct
4604 */
ice_print_wake_reason(struct ice_pf * pf)4605 static void ice_print_wake_reason(struct ice_pf *pf)
4606 {
4607 u32 wus = pf->wakeup_reason;
4608 const char *wake_str;
4609
4610 /* if no wake event, nothing to print */
4611 if (!wus)
4612 return;
4613
4614 if (wus & PFPM_WUS_LNKC_M)
4615 wake_str = "Link\n";
4616 else if (wus & PFPM_WUS_MAG_M)
4617 wake_str = "Magic Packet\n";
4618 else if (wus & PFPM_WUS_MNG_M)
4619 wake_str = "Management\n";
4620 else if (wus & PFPM_WUS_FW_RST_WK_M)
4621 wake_str = "Firmware Reset\n";
4622 else
4623 wake_str = "Unknown\n";
4624
4625 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4626 }
4627
4628 /**
4629 * ice_register_netdev - register netdev and devlink port
4630 * @pf: pointer to the PF struct
4631 */
ice_register_netdev(struct ice_pf * pf)4632 static int ice_register_netdev(struct ice_pf *pf)
4633 {
4634 struct ice_vsi *vsi;
4635 int err = 0;
4636
4637 vsi = ice_get_main_vsi(pf);
4638 if (!vsi || !vsi->netdev)
4639 return -EIO;
4640
4641 err = ice_devlink_create_pf_port(pf);
4642 if (err)
4643 goto err_devlink_create;
4644
4645 err = register_netdev(vsi->netdev);
4646 if (err)
4647 goto err_register_netdev;
4648
4649 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4650 netif_carrier_off(vsi->netdev);
4651 netif_tx_stop_all_queues(vsi->netdev);
4652
4653 devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
4654
4655 return 0;
4656 err_register_netdev:
4657 ice_devlink_destroy_pf_port(pf);
4658 err_devlink_create:
4659 free_netdev(vsi->netdev);
4660 vsi->netdev = NULL;
4661 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4662 return err;
4663 }
4664
4665 /**
4666 * ice_probe - Device initialization routine
4667 * @pdev: PCI device information struct
4668 * @ent: entry in ice_pci_tbl
4669 *
4670 * Returns 0 on success, negative on failure
4671 */
4672 static int
ice_probe(struct pci_dev * pdev,const struct pci_device_id __always_unused * ent)4673 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
4674 {
4675 struct device *dev = &pdev->dev;
4676 struct ice_pf *pf;
4677 struct ice_hw *hw;
4678 int i, err;
4679
4680 if (pdev->is_virtfn) {
4681 dev_err(dev, "can't probe a virtual function\n");
4682 return -EINVAL;
4683 }
4684
4685 /* when under a kdump kernel initiate a reset before enabling the
4686 * device in order to clear out any pending DMA transactions. These
4687 * transactions can cause some systems to machine check when doing
4688 * the pcim_enable_device() below.
4689 */
4690 if (is_kdump_kernel()) {
4691 pci_save_state(pdev);
4692 pci_clear_master(pdev);
4693 err = pcie_flr(pdev);
4694 if (err)
4695 return err;
4696 pci_restore_state(pdev);
4697 }
4698
4699 /* this driver uses devres, see
4700 * Documentation/driver-api/driver-model/devres.rst
4701 */
4702 err = pcim_enable_device(pdev);
4703 if (err)
4704 return err;
4705
4706 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
4707 if (err) {
4708 dev_err(dev, "BAR0 I/O map error %d\n", err);
4709 return err;
4710 }
4711
4712 pf = ice_allocate_pf(dev);
4713 if (!pf)
4714 return -ENOMEM;
4715
4716 /* initialize Auxiliary index to invalid value */
4717 pf->aux_idx = -1;
4718
4719 /* set up for high or low DMA */
4720 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4721 if (err) {
4722 dev_err(dev, "DMA configuration failed: 0x%x\n", err);
4723 return err;
4724 }
4725
4726 pci_set_master(pdev);
4727
4728 pf->pdev = pdev;
4729 pci_set_drvdata(pdev, pf);
4730 set_bit(ICE_DOWN, pf->state);
4731 /* Disable service task until DOWN bit is cleared */
4732 set_bit(ICE_SERVICE_DIS, pf->state);
4733
4734 hw = &pf->hw;
4735 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4736 pci_save_state(pdev);
4737
4738 hw->back = pf;
4739 hw->vendor_id = pdev->vendor;
4740 hw->device_id = pdev->device;
4741 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4742 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4743 hw->subsystem_device_id = pdev->subsystem_device;
4744 hw->bus.device = PCI_SLOT(pdev->devfn);
4745 hw->bus.func = PCI_FUNC(pdev->devfn);
4746 ice_set_ctrlq_len(hw);
4747
4748 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
4749
4750 #ifndef CONFIG_DYNAMIC_DEBUG
4751 if (debug < -1)
4752 hw->debug_mask = debug;
4753 #endif
4754
4755 err = ice_init_hw(hw);
4756 if (err) {
4757 dev_err(dev, "ice_init_hw failed: %d\n", err);
4758 err = -EIO;
4759 goto err_exit_unroll;
4760 }
4761
4762 ice_init_feature_support(pf);
4763
4764 ice_request_fw(pf);
4765
4766 /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4767 * set in pf->state, which will cause ice_is_safe_mode to return
4768 * true
4769 */
4770 if (ice_is_safe_mode(pf)) {
4771 /* we already got function/device capabilities but these don't
4772 * reflect what the driver needs to do in safe mode. Instead of
4773 * adding conditional logic everywhere to ignore these
4774 * device/function capabilities, override them.
4775 */
4776 ice_set_safe_mode_caps(hw);
4777 }
4778
4779 err = ice_init_pf(pf);
4780 if (err) {
4781 dev_err(dev, "ice_init_pf failed: %d\n", err);
4782 goto err_init_pf_unroll;
4783 }
4784
4785 ice_devlink_init_regions(pf);
4786
4787 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4788 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4789 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4790 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4791 i = 0;
4792 if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4793 pf->hw.udp_tunnel_nic.tables[i].n_entries =
4794 pf->hw.tnl.valid_count[TNL_VXLAN];
4795 pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4796 UDP_TUNNEL_TYPE_VXLAN;
4797 i++;
4798 }
4799 if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4800 pf->hw.udp_tunnel_nic.tables[i].n_entries =
4801 pf->hw.tnl.valid_count[TNL_GENEVE];
4802 pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4803 UDP_TUNNEL_TYPE_GENEVE;
4804 i++;
4805 }
4806
4807 pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
4808 if (!pf->num_alloc_vsi) {
4809 err = -EIO;
4810 goto err_init_pf_unroll;
4811 }
4812 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4813 dev_warn(&pf->pdev->dev,
4814 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4815 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4816 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4817 }
4818
4819 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4820 GFP_KERNEL);
4821 if (!pf->vsi) {
4822 err = -ENOMEM;
4823 goto err_init_pf_unroll;
4824 }
4825
4826 err = ice_init_interrupt_scheme(pf);
4827 if (err) {
4828 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4829 err = -EIO;
4830 goto err_init_vsi_unroll;
4831 }
4832
4833 /* In case of MSIX we are going to setup the misc vector right here
4834 * to handle admin queue events etc. In case of legacy and MSI
4835 * the misc functionality and queue processing is combined in
4836 * the same vector and that gets setup at open.
4837 */
4838 err = ice_req_irq_msix_misc(pf);
4839 if (err) {
4840 dev_err(dev, "setup of misc vector failed: %d\n", err);
4841 goto err_init_interrupt_unroll;
4842 }
4843
4844 /* create switch struct for the switch element created by FW on boot */
4845 pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
4846 if (!pf->first_sw) {
4847 err = -ENOMEM;
4848 goto err_msix_misc_unroll;
4849 }
4850
4851 if (hw->evb_veb)
4852 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4853 else
4854 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4855
4856 pf->first_sw->pf = pf;
4857
4858 /* record the sw_id available for later use */
4859 pf->first_sw->sw_id = hw->port_info->sw_id;
4860
4861 err = ice_setup_pf_sw(pf);
4862 if (err) {
4863 dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
4864 goto err_alloc_sw_unroll;
4865 }
4866
4867 clear_bit(ICE_SERVICE_DIS, pf->state);
4868
4869 /* tell the firmware we are up */
4870 err = ice_send_version(pf);
4871 if (err) {
4872 dev_err(dev, "probe failed sending driver version %s. error: %d\n",
4873 UTS_RELEASE, err);
4874 goto err_send_version_unroll;
4875 }
4876
4877 /* since everything is good, start the service timer */
4878 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4879
4880 err = ice_init_link_events(pf->hw.port_info);
4881 if (err) {
4882 dev_err(dev, "ice_init_link_events failed: %d\n", err);
4883 goto err_send_version_unroll;
4884 }
4885
4886 /* not a fatal error if this fails */
4887 err = ice_init_nvm_phy_type(pf->hw.port_info);
4888 if (err)
4889 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4890
4891 /* not a fatal error if this fails */
4892 err = ice_update_link_info(pf->hw.port_info);
4893 if (err)
4894 dev_err(dev, "ice_update_link_info failed: %d\n", err);
4895
4896 ice_init_link_dflt_override(pf->hw.port_info);
4897
4898 ice_check_link_cfg_err(pf,
4899 pf->hw.port_info->phy.link_info.link_cfg_err);
4900
4901 /* if media available, initialize PHY settings */
4902 if (pf->hw.port_info->phy.link_info.link_info &
4903 ICE_AQ_MEDIA_AVAILABLE) {
4904 /* not a fatal error if this fails */
4905 err = ice_init_phy_user_cfg(pf->hw.port_info);
4906 if (err)
4907 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4908
4909 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4910 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4911
4912 if (vsi)
4913 ice_configure_phy(vsi);
4914 }
4915 } else {
4916 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4917 }
4918
4919 ice_verify_cacheline_size(pf);
4920
4921 /* Save wakeup reason register for later use */
4922 pf->wakeup_reason = rd32(hw, PFPM_WUS);
4923
4924 /* check for a power management event */
4925 ice_print_wake_reason(pf);
4926
4927 /* clear wake status, all bits */
4928 wr32(hw, PFPM_WUS, U32_MAX);
4929
4930 /* Disable WoL at init, wait for user to enable */
4931 device_set_wakeup_enable(dev, false);
4932
4933 if (ice_is_safe_mode(pf)) {
4934 ice_set_safe_mode_vlan_cfg(pf);
4935 goto probe_done;
4936 }
4937
4938 /* initialize DDP driven features */
4939 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4940 ice_ptp_init(pf);
4941
4942 if (ice_is_feature_supported(pf, ICE_F_GNSS))
4943 ice_gnss_init(pf);
4944
4945 /* Note: Flow director init failure is non-fatal to load */
4946 if (ice_init_fdir(pf))
4947 dev_err(dev, "could not initialize flow director\n");
4948
4949 /* Note: DCB init failure is non-fatal to load */
4950 if (ice_init_pf_dcb(pf, false)) {
4951 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4952 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4953 } else {
4954 ice_cfg_lldp_mib_change(&pf->hw, true);
4955 }
4956
4957 if (ice_init_lag(pf))
4958 dev_warn(dev, "Failed to init link aggregation support\n");
4959
4960 /* print PCI link speed and width */
4961 pcie_print_link_status(pf->pdev);
4962
4963 probe_done:
4964 err = ice_register_netdev(pf);
4965 if (err)
4966 goto err_netdev_reg;
4967
4968 err = ice_devlink_register_params(pf);
4969 if (err)
4970 goto err_netdev_reg;
4971
4972 /* ready to go, so clear down state bit */
4973 clear_bit(ICE_DOWN, pf->state);
4974 if (ice_is_rdma_ena(pf)) {
4975 pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
4976 if (pf->aux_idx < 0) {
4977 dev_err(dev, "Failed to allocate device ID for AUX driver\n");
4978 err = -ENOMEM;
4979 goto err_devlink_reg_param;
4980 }
4981
4982 err = ice_init_rdma(pf);
4983 if (err) {
4984 dev_err(dev, "Failed to initialize RDMA: %d\n", err);
4985 err = -EIO;
4986 goto err_init_aux_unroll;
4987 }
4988 } else {
4989 dev_warn(dev, "RDMA is not supported on this device\n");
4990 }
4991
4992 ice_devlink_register(pf);
4993 return 0;
4994
4995 err_init_aux_unroll:
4996 pf->adev = NULL;
4997 ida_free(&ice_aux_ida, pf->aux_idx);
4998 err_devlink_reg_param:
4999 ice_devlink_unregister_params(pf);
5000 err_netdev_reg:
5001 err_send_version_unroll:
5002 ice_vsi_release_all(pf);
5003 err_alloc_sw_unroll:
5004 set_bit(ICE_SERVICE_DIS, pf->state);
5005 set_bit(ICE_DOWN, pf->state);
5006 devm_kfree(dev, pf->first_sw);
5007 err_msix_misc_unroll:
5008 ice_free_irq_msix_misc(pf);
5009 err_init_interrupt_unroll:
5010 ice_clear_interrupt_scheme(pf);
5011 err_init_vsi_unroll:
5012 devm_kfree(dev, pf->vsi);
5013 err_init_pf_unroll:
5014 ice_deinit_pf(pf);
5015 ice_devlink_destroy_regions(pf);
5016 ice_deinit_hw(hw);
5017 err_exit_unroll:
5018 pci_disable_device(pdev);
5019 return err;
5020 }
5021
5022 /**
5023 * ice_set_wake - enable or disable Wake on LAN
5024 * @pf: pointer to the PF struct
5025 *
5026 * Simple helper for WoL control
5027 */
ice_set_wake(struct ice_pf * pf)5028 static void ice_set_wake(struct ice_pf *pf)
5029 {
5030 struct ice_hw *hw = &pf->hw;
5031 bool wol = pf->wol_ena;
5032
5033 /* clear wake state, otherwise new wake events won't fire */
5034 wr32(hw, PFPM_WUS, U32_MAX);
5035
5036 /* enable / disable APM wake up, no RMW needed */
5037 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
5038
5039 /* set magic packet filter enabled */
5040 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
5041 }
5042
5043 /**
5044 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5045 * @pf: pointer to the PF struct
5046 *
5047 * Issue firmware command to enable multicast magic wake, making
5048 * sure that any locally administered address (LAA) is used for
5049 * wake, and that PF reset doesn't undo the LAA.
5050 */
ice_setup_mc_magic_wake(struct ice_pf * pf)5051 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
5052 {
5053 struct device *dev = ice_pf_to_dev(pf);
5054 struct ice_hw *hw = &pf->hw;
5055 u8 mac_addr[ETH_ALEN];
5056 struct ice_vsi *vsi;
5057 int status;
5058 u8 flags;
5059
5060 if (!pf->wol_ena)
5061 return;
5062
5063 vsi = ice_get_main_vsi(pf);
5064 if (!vsi)
5065 return;
5066
5067 /* Get current MAC address in case it's an LAA */
5068 if (vsi->netdev)
5069 ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
5070 else
5071 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
5072
5073 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
5074 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
5075 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
5076
5077 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
5078 if (status)
5079 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
5080 status, ice_aq_str(hw->adminq.sq_last_status));
5081 }
5082
5083 /**
5084 * ice_remove - Device removal routine
5085 * @pdev: PCI device information struct
5086 */
ice_remove(struct pci_dev * pdev)5087 static void ice_remove(struct pci_dev *pdev)
5088 {
5089 struct ice_pf *pf = pci_get_drvdata(pdev);
5090 int i;
5091
5092 ice_devlink_unregister(pf);
5093 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
5094 if (!ice_is_reset_in_progress(pf->state))
5095 break;
5096 msleep(100);
5097 }
5098
5099 ice_tc_indir_block_remove(pf);
5100
5101 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
5102 set_bit(ICE_VF_RESETS_DISABLED, pf->state);
5103 ice_free_vfs(pf);
5104 }
5105
5106 ice_service_task_stop(pf);
5107
5108 ice_aq_cancel_waiting_tasks(pf);
5109 ice_unplug_aux_dev(pf);
5110 if (pf->aux_idx >= 0)
5111 ida_free(&ice_aux_ida, pf->aux_idx);
5112 ice_devlink_unregister_params(pf);
5113 set_bit(ICE_DOWN, pf->state);
5114
5115 ice_deinit_lag(pf);
5116 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
5117 ice_ptp_release(pf);
5118 if (ice_is_feature_supported(pf, ICE_F_GNSS))
5119 ice_gnss_exit(pf);
5120 if (!ice_is_safe_mode(pf))
5121 ice_remove_arfs(pf);
5122 ice_setup_mc_magic_wake(pf);
5123 ice_vsi_release_all(pf);
5124 mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
5125 ice_set_wake(pf);
5126 ice_free_irq_msix_misc(pf);
5127 ice_for_each_vsi(pf, i) {
5128 if (!pf->vsi[i])
5129 continue;
5130 ice_vsi_free_q_vectors(pf->vsi[i]);
5131 }
5132 ice_deinit_pf(pf);
5133 ice_devlink_destroy_regions(pf);
5134 ice_deinit_hw(&pf->hw);
5135
5136 /* Issue a PFR as part of the prescribed driver unload flow. Do not
5137 * do it via ice_schedule_reset() since there is no need to rebuild
5138 * and the service task is already stopped.
5139 */
5140 ice_reset(&pf->hw, ICE_RESET_PFR);
5141 pci_wait_for_pending_transaction(pdev);
5142 ice_clear_interrupt_scheme(pf);
5143 pci_disable_device(pdev);
5144 }
5145
5146 /**
5147 * ice_shutdown - PCI callback for shutting down device
5148 * @pdev: PCI device information struct
5149 */
ice_shutdown(struct pci_dev * pdev)5150 static void ice_shutdown(struct pci_dev *pdev)
5151 {
5152 struct ice_pf *pf = pci_get_drvdata(pdev);
5153
5154 ice_remove(pdev);
5155
5156 if (system_state == SYSTEM_POWER_OFF) {
5157 pci_wake_from_d3(pdev, pf->wol_ena);
5158 pci_set_power_state(pdev, PCI_D3hot);
5159 }
5160 }
5161
5162 #ifdef CONFIG_PM
5163 /**
5164 * ice_prepare_for_shutdown - prep for PCI shutdown
5165 * @pf: board private structure
5166 *
5167 * Inform or close all dependent features in prep for PCI device shutdown
5168 */
ice_prepare_for_shutdown(struct ice_pf * pf)5169 static void ice_prepare_for_shutdown(struct ice_pf *pf)
5170 {
5171 struct ice_hw *hw = &pf->hw;
5172 u32 v;
5173
5174 /* Notify VFs of impending reset */
5175 if (ice_check_sq_alive(hw, &hw->mailboxq))
5176 ice_vc_notify_reset(pf);
5177
5178 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
5179
5180 /* disable the VSIs and their queues that are not already DOWN */
5181 ice_pf_dis_all_vsi(pf, false);
5182
5183 ice_for_each_vsi(pf, v)
5184 if (pf->vsi[v])
5185 pf->vsi[v]->vsi_num = 0;
5186
5187 ice_shutdown_all_ctrlq(hw);
5188 }
5189
5190 /**
5191 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5192 * @pf: board private structure to reinitialize
5193 *
5194 * This routine reinitialize interrupt scheme that was cleared during
5195 * power management suspend callback.
5196 *
5197 * This should be called during resume routine to re-allocate the q_vectors
5198 * and reacquire interrupts.
5199 */
ice_reinit_interrupt_scheme(struct ice_pf * pf)5200 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
5201 {
5202 struct device *dev = ice_pf_to_dev(pf);
5203 int ret, v;
5204
5205 /* Since we clear MSIX flag during suspend, we need to
5206 * set it back during resume...
5207 */
5208
5209 ret = ice_init_interrupt_scheme(pf);
5210 if (ret) {
5211 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
5212 return ret;
5213 }
5214
5215 /* Remap vectors and rings, after successful re-init interrupts */
5216 ice_for_each_vsi(pf, v) {
5217 if (!pf->vsi[v])
5218 continue;
5219
5220 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
5221 if (ret)
5222 goto err_reinit;
5223 ice_vsi_map_rings_to_vectors(pf->vsi[v]);
5224 }
5225
5226 ret = ice_req_irq_msix_misc(pf);
5227 if (ret) {
5228 dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
5229 ret);
5230 goto err_reinit;
5231 }
5232
5233 return 0;
5234
5235 err_reinit:
5236 while (v--)
5237 if (pf->vsi[v])
5238 ice_vsi_free_q_vectors(pf->vsi[v]);
5239
5240 return ret;
5241 }
5242
5243 /**
5244 * ice_suspend
5245 * @dev: generic device information structure
5246 *
5247 * Power Management callback to quiesce the device and prepare
5248 * for D3 transition.
5249 */
ice_suspend(struct device * dev)5250 static int __maybe_unused ice_suspend(struct device *dev)
5251 {
5252 struct pci_dev *pdev = to_pci_dev(dev);
5253 struct ice_pf *pf;
5254 int disabled, v;
5255
5256 pf = pci_get_drvdata(pdev);
5257
5258 if (!ice_pf_state_is_nominal(pf)) {
5259 dev_err(dev, "Device is not ready, no need to suspend it\n");
5260 return -EBUSY;
5261 }
5262
5263 /* Stop watchdog tasks until resume completion.
5264 * Even though it is most likely that the service task is
5265 * disabled if the device is suspended or down, the service task's
5266 * state is controlled by a different state bit, and we should
5267 * store and honor whatever state that bit is in at this point.
5268 */
5269 disabled = ice_service_task_stop(pf);
5270
5271 ice_unplug_aux_dev(pf);
5272
5273 /* Already suspended?, then there is nothing to do */
5274 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
5275 if (!disabled)
5276 ice_service_task_restart(pf);
5277 return 0;
5278 }
5279
5280 if (test_bit(ICE_DOWN, pf->state) ||
5281 ice_is_reset_in_progress(pf->state)) {
5282 dev_err(dev, "can't suspend device in reset or already down\n");
5283 if (!disabled)
5284 ice_service_task_restart(pf);
5285 return 0;
5286 }
5287
5288 ice_setup_mc_magic_wake(pf);
5289
5290 ice_prepare_for_shutdown(pf);
5291
5292 ice_set_wake(pf);
5293
5294 /* Free vectors, clear the interrupt scheme and release IRQs
5295 * for proper hibernation, especially with large number of CPUs.
5296 * Otherwise hibernation might fail when mapping all the vectors back
5297 * to CPU0.
5298 */
5299 ice_free_irq_msix_misc(pf);
5300 ice_for_each_vsi(pf, v) {
5301 if (!pf->vsi[v])
5302 continue;
5303 ice_vsi_free_q_vectors(pf->vsi[v]);
5304 }
5305 ice_clear_interrupt_scheme(pf);
5306
5307 pci_save_state(pdev);
5308 pci_wake_from_d3(pdev, pf->wol_ena);
5309 pci_set_power_state(pdev, PCI_D3hot);
5310 return 0;
5311 }
5312
5313 /**
5314 * ice_resume - PM callback for waking up from D3
5315 * @dev: generic device information structure
5316 */
ice_resume(struct device * dev)5317 static int __maybe_unused ice_resume(struct device *dev)
5318 {
5319 struct pci_dev *pdev = to_pci_dev(dev);
5320 enum ice_reset_req reset_type;
5321 struct ice_pf *pf;
5322 struct ice_hw *hw;
5323 int ret;
5324
5325 pci_set_power_state(pdev, PCI_D0);
5326 pci_restore_state(pdev);
5327 pci_save_state(pdev);
5328
5329 if (!pci_device_is_present(pdev))
5330 return -ENODEV;
5331
5332 ret = pci_enable_device_mem(pdev);
5333 if (ret) {
5334 dev_err(dev, "Cannot enable device after suspend\n");
5335 return ret;
5336 }
5337
5338 pf = pci_get_drvdata(pdev);
5339 hw = &pf->hw;
5340
5341 pf->wakeup_reason = rd32(hw, PFPM_WUS);
5342 ice_print_wake_reason(pf);
5343
5344 /* We cleared the interrupt scheme when we suspended, so we need to
5345 * restore it now to resume device functionality.
5346 */
5347 ret = ice_reinit_interrupt_scheme(pf);
5348 if (ret)
5349 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
5350
5351 clear_bit(ICE_DOWN, pf->state);
5352 /* Now perform PF reset and rebuild */
5353 reset_type = ICE_RESET_PFR;
5354 /* re-enable service task for reset, but allow reset to schedule it */
5355 clear_bit(ICE_SERVICE_DIS, pf->state);
5356
5357 if (ice_schedule_reset(pf, reset_type))
5358 dev_err(dev, "Reset during resume failed.\n");
5359
5360 clear_bit(ICE_SUSPENDED, pf->state);
5361 ice_service_task_restart(pf);
5362
5363 /* Restart the service task */
5364 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5365
5366 return 0;
5367 }
5368 #endif /* CONFIG_PM */
5369
5370 /**
5371 * ice_pci_err_detected - warning that PCI error has been detected
5372 * @pdev: PCI device information struct
5373 * @err: the type of PCI error
5374 *
5375 * Called to warn that something happened on the PCI bus and the error handling
5376 * is in progress. Allows the driver to gracefully prepare/handle PCI errors.
5377 */
5378 static pci_ers_result_t
ice_pci_err_detected(struct pci_dev * pdev,pci_channel_state_t err)5379 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
5380 {
5381 struct ice_pf *pf = pci_get_drvdata(pdev);
5382
5383 if (!pf) {
5384 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
5385 __func__, err);
5386 return PCI_ERS_RESULT_DISCONNECT;
5387 }
5388
5389 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5390 ice_service_task_stop(pf);
5391
5392 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5393 set_bit(ICE_PFR_REQ, pf->state);
5394 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5395 }
5396 }
5397
5398 return PCI_ERS_RESULT_NEED_RESET;
5399 }
5400
5401 /**
5402 * ice_pci_err_slot_reset - a PCI slot reset has just happened
5403 * @pdev: PCI device information struct
5404 *
5405 * Called to determine if the driver can recover from the PCI slot reset by
5406 * using a register read to determine if the device is recoverable.
5407 */
ice_pci_err_slot_reset(struct pci_dev * pdev)5408 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
5409 {
5410 struct ice_pf *pf = pci_get_drvdata(pdev);
5411 pci_ers_result_t result;
5412 int err;
5413 u32 reg;
5414
5415 err = pci_enable_device_mem(pdev);
5416 if (err) {
5417 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
5418 err);
5419 result = PCI_ERS_RESULT_DISCONNECT;
5420 } else {
5421 pci_set_master(pdev);
5422 pci_restore_state(pdev);
5423 pci_save_state(pdev);
5424 pci_wake_from_d3(pdev, false);
5425
5426 /* Check for life */
5427 reg = rd32(&pf->hw, GLGEN_RTRIG);
5428 if (!reg)
5429 result = PCI_ERS_RESULT_RECOVERED;
5430 else
5431 result = PCI_ERS_RESULT_DISCONNECT;
5432 }
5433
5434 return result;
5435 }
5436
5437 /**
5438 * ice_pci_err_resume - restart operations after PCI error recovery
5439 * @pdev: PCI device information struct
5440 *
5441 * Called to allow the driver to bring things back up after PCI error and/or
5442 * reset recovery have finished
5443 */
ice_pci_err_resume(struct pci_dev * pdev)5444 static void ice_pci_err_resume(struct pci_dev *pdev)
5445 {
5446 struct ice_pf *pf = pci_get_drvdata(pdev);
5447
5448 if (!pf) {
5449 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5450 __func__);
5451 return;
5452 }
5453
5454 if (test_bit(ICE_SUSPENDED, pf->state)) {
5455 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5456 __func__);
5457 return;
5458 }
5459
5460 ice_restore_all_vfs_msi_state(pdev);
5461
5462 ice_do_reset(pf, ICE_RESET_PFR);
5463 ice_service_task_restart(pf);
5464 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5465 }
5466
5467 /**
5468 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5469 * @pdev: PCI device information struct
5470 */
ice_pci_err_reset_prepare(struct pci_dev * pdev)5471 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5472 {
5473 struct ice_pf *pf = pci_get_drvdata(pdev);
5474
5475 if (!test_bit(ICE_SUSPENDED, pf->state)) {
5476 ice_service_task_stop(pf);
5477
5478 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5479 set_bit(ICE_PFR_REQ, pf->state);
5480 ice_prepare_for_reset(pf, ICE_RESET_PFR);
5481 }
5482 }
5483 }
5484
5485 /**
5486 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5487 * @pdev: PCI device information struct
5488 */
ice_pci_err_reset_done(struct pci_dev * pdev)5489 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5490 {
5491 ice_pci_err_resume(pdev);
5492 }
5493
5494 /* ice_pci_tbl - PCI Device ID Table
5495 *
5496 * Wildcard entries (PCI_ANY_ID) should come last
5497 * Last entry must be all 0s
5498 *
5499 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5500 * Class, Class Mask, private data (not used) }
5501 */
5502 static const struct pci_device_id ice_pci_tbl[] = {
5503 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5504 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5505 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
5506 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
5507 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
5508 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
5509 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5510 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5511 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5512 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5513 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
5514 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
5515 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
5516 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
5517 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
5518 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
5519 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
5520 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
5521 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
5522 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5523 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5524 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5525 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5526 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5527 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
5528 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 },
5529 /* required last entry */
5530 { 0, }
5531 };
5532 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5533
5534 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5535
5536 static const struct pci_error_handlers ice_pci_err_handler = {
5537 .error_detected = ice_pci_err_detected,
5538 .slot_reset = ice_pci_err_slot_reset,
5539 .reset_prepare = ice_pci_err_reset_prepare,
5540 .reset_done = ice_pci_err_reset_done,
5541 .resume = ice_pci_err_resume
5542 };
5543
5544 static struct pci_driver ice_driver = {
5545 .name = KBUILD_MODNAME,
5546 .id_table = ice_pci_tbl,
5547 .probe = ice_probe,
5548 .remove = ice_remove,
5549 #ifdef CONFIG_PM
5550 .driver.pm = &ice_pm_ops,
5551 #endif /* CONFIG_PM */
5552 .shutdown = ice_shutdown,
5553 .sriov_configure = ice_sriov_configure,
5554 .err_handler = &ice_pci_err_handler
5555 };
5556
5557 /**
5558 * ice_module_init - Driver registration routine
5559 *
5560 * ice_module_init is the first routine called when the driver is
5561 * loaded. All it does is register with the PCI subsystem.
5562 */
ice_module_init(void)5563 static int __init ice_module_init(void)
5564 {
5565 int status;
5566
5567 pr_info("%s\n", ice_driver_string);
5568 pr_info("%s\n", ice_copyright);
5569
5570 ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
5571 if (!ice_wq) {
5572 pr_err("Failed to create workqueue\n");
5573 return -ENOMEM;
5574 }
5575
5576 status = pci_register_driver(&ice_driver);
5577 if (status) {
5578 pr_err("failed to register PCI driver, err %d\n", status);
5579 destroy_workqueue(ice_wq);
5580 }
5581
5582 return status;
5583 }
5584 module_init(ice_module_init);
5585
5586 /**
5587 * ice_module_exit - Driver exit cleanup routine
5588 *
5589 * ice_module_exit is called just before the driver is removed
5590 * from memory.
5591 */
ice_module_exit(void)5592 static void __exit ice_module_exit(void)
5593 {
5594 pci_unregister_driver(&ice_driver);
5595 destroy_workqueue(ice_wq);
5596 pr_info("module unloaded\n");
5597 }
5598 module_exit(ice_module_exit);
5599
5600 /**
5601 * ice_set_mac_address - NDO callback to set MAC address
5602 * @netdev: network interface device structure
5603 * @pi: pointer to an address structure
5604 *
5605 * Returns 0 on success, negative on failure
5606 */
ice_set_mac_address(struct net_device * netdev,void * pi)5607 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5608 {
5609 struct ice_netdev_priv *np = netdev_priv(netdev);
5610 struct ice_vsi *vsi = np->vsi;
5611 struct ice_pf *pf = vsi->back;
5612 struct ice_hw *hw = &pf->hw;
5613 struct sockaddr *addr = pi;
5614 u8 old_mac[ETH_ALEN];
5615 u8 flags = 0;
5616 u8 *mac;
5617 int err;
5618
5619 mac = (u8 *)addr->sa_data;
5620
5621 if (!is_valid_ether_addr(mac))
5622 return -EADDRNOTAVAIL;
5623
5624 if (ether_addr_equal(netdev->dev_addr, mac)) {
5625 netdev_dbg(netdev, "already using mac %pM\n", mac);
5626 return 0;
5627 }
5628
5629 if (test_bit(ICE_DOWN, pf->state) ||
5630 ice_is_reset_in_progress(pf->state)) {
5631 netdev_err(netdev, "can't set mac %pM. device not ready\n",
5632 mac);
5633 return -EBUSY;
5634 }
5635
5636 if (ice_chnl_dmac_fltr_cnt(pf)) {
5637 netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
5638 mac);
5639 return -EAGAIN;
5640 }
5641
5642 netif_addr_lock_bh(netdev);
5643 ether_addr_copy(old_mac, netdev->dev_addr);
5644 /* change the netdev's MAC address */
5645 eth_hw_addr_set(netdev, mac);
5646 netif_addr_unlock_bh(netdev);
5647
5648 /* Clean up old MAC filter. Not an error if old filter doesn't exist */
5649 err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5650 if (err && err != -ENOENT) {
5651 err = -EADDRNOTAVAIL;
5652 goto err_update_filters;
5653 }
5654
5655 /* Add filter for new MAC. If filter exists, return success */
5656 err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5657 if (err == -EEXIST) {
5658 /* Although this MAC filter is already present in hardware it's
5659 * possible in some cases (e.g. bonding) that dev_addr was
5660 * modified outside of the driver and needs to be restored back
5661 * to this value.
5662 */
5663 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5664
5665 return 0;
5666 } else if (err) {
5667 /* error if the new filter addition failed */
5668 err = -EADDRNOTAVAIL;
5669 }
5670
5671 err_update_filters:
5672 if (err) {
5673 netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5674 mac);
5675 netif_addr_lock_bh(netdev);
5676 eth_hw_addr_set(netdev, old_mac);
5677 netif_addr_unlock_bh(netdev);
5678 return err;
5679 }
5680
5681 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5682 netdev->dev_addr);
5683
5684 /* write new MAC address to the firmware */
5685 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5686 err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5687 if (err) {
5688 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
5689 mac, err);
5690 }
5691 return 0;
5692 }
5693
5694 /**
5695 * ice_set_rx_mode - NDO callback to set the netdev filters
5696 * @netdev: network interface device structure
5697 */
ice_set_rx_mode(struct net_device * netdev)5698 static void ice_set_rx_mode(struct net_device *netdev)
5699 {
5700 struct ice_netdev_priv *np = netdev_priv(netdev);
5701 struct ice_vsi *vsi = np->vsi;
5702
5703 if (!vsi)
5704 return;
5705
5706 /* Set the flags to synchronize filters
5707 * ndo_set_rx_mode may be triggered even without a change in netdev
5708 * flags
5709 */
5710 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5711 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5712 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5713
5714 /* schedule our worker thread which will take care of
5715 * applying the new filter changes
5716 */
5717 ice_service_task_schedule(vsi->back);
5718 }
5719
5720 /**
5721 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5722 * @netdev: network interface device structure
5723 * @queue_index: Queue ID
5724 * @maxrate: maximum bandwidth in Mbps
5725 */
5726 static int
ice_set_tx_maxrate(struct net_device * netdev,int queue_index,u32 maxrate)5727 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5728 {
5729 struct ice_netdev_priv *np = netdev_priv(netdev);
5730 struct ice_vsi *vsi = np->vsi;
5731 u16 q_handle;
5732 int status;
5733 u8 tc;
5734
5735 /* Validate maxrate requested is within permitted range */
5736 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5737 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5738 maxrate, queue_index);
5739 return -EINVAL;
5740 }
5741
5742 q_handle = vsi->tx_rings[queue_index]->q_handle;
5743 tc = ice_dcb_get_tc(vsi, queue_index);
5744
5745 /* Set BW back to default, when user set maxrate to 0 */
5746 if (!maxrate)
5747 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5748 q_handle, ICE_MAX_BW);
5749 else
5750 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5751 q_handle, ICE_MAX_BW, maxrate * 1000);
5752 if (status)
5753 netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
5754 status);
5755
5756 return status;
5757 }
5758
5759 /**
5760 * ice_fdb_add - add an entry to the hardware database
5761 * @ndm: the input from the stack
5762 * @tb: pointer to array of nladdr (unused)
5763 * @dev: the net device pointer
5764 * @addr: the MAC address entry being added
5765 * @vid: VLAN ID
5766 * @flags: instructions from stack about fdb operation
5767 * @extack: netlink extended ack
5768 */
5769 static int
ice_fdb_add(struct ndmsg * ndm,struct nlattr __always_unused * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,u16 flags,struct netlink_ext_ack __always_unused * extack)5770 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5771 struct net_device *dev, const unsigned char *addr, u16 vid,
5772 u16 flags, struct netlink_ext_ack __always_unused *extack)
5773 {
5774 int err;
5775
5776 if (vid) {
5777 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5778 return -EINVAL;
5779 }
5780 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5781 netdev_err(dev, "FDB only supports static addresses\n");
5782 return -EINVAL;
5783 }
5784
5785 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5786 err = dev_uc_add_excl(dev, addr);
5787 else if (is_multicast_ether_addr(addr))
5788 err = dev_mc_add_excl(dev, addr);
5789 else
5790 err = -EINVAL;
5791
5792 /* Only return duplicate errors if NLM_F_EXCL is set */
5793 if (err == -EEXIST && !(flags & NLM_F_EXCL))
5794 err = 0;
5795
5796 return err;
5797 }
5798
5799 /**
5800 * ice_fdb_del - delete an entry from the hardware database
5801 * @ndm: the input from the stack
5802 * @tb: pointer to array of nladdr (unused)
5803 * @dev: the net device pointer
5804 * @addr: the MAC address entry being added
5805 * @vid: VLAN ID
5806 * @extack: netlink extended ack
5807 */
5808 static int
ice_fdb_del(struct ndmsg * ndm,__always_unused struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,__always_unused u16 vid,struct netlink_ext_ack * extack)5809 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5810 struct net_device *dev, const unsigned char *addr,
5811 __always_unused u16 vid, struct netlink_ext_ack *extack)
5812 {
5813 int err;
5814
5815 if (ndm->ndm_state & NUD_PERMANENT) {
5816 netdev_err(dev, "FDB only supports static addresses\n");
5817 return -EINVAL;
5818 }
5819
5820 if (is_unicast_ether_addr(addr))
5821 err = dev_uc_del(dev, addr);
5822 else if (is_multicast_ether_addr(addr))
5823 err = dev_mc_del(dev, addr);
5824 else
5825 err = -EINVAL;
5826
5827 return err;
5828 }
5829
5830 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
5831 NETIF_F_HW_VLAN_CTAG_TX | \
5832 NETIF_F_HW_VLAN_STAG_RX | \
5833 NETIF_F_HW_VLAN_STAG_TX)
5834
5835 #define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
5836 NETIF_F_HW_VLAN_STAG_RX)
5837
5838 #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
5839 NETIF_F_HW_VLAN_STAG_FILTER)
5840
5841 /**
5842 * ice_fix_features - fix the netdev features flags based on device limitations
5843 * @netdev: ptr to the netdev that flags are being fixed on
5844 * @features: features that need to be checked and possibly fixed
5845 *
5846 * Make sure any fixups are made to features in this callback. This enables the
5847 * driver to not have to check unsupported configurations throughout the driver
5848 * because that's the responsiblity of this callback.
5849 *
5850 * Single VLAN Mode (SVM) Supported Features:
5851 * NETIF_F_HW_VLAN_CTAG_FILTER
5852 * NETIF_F_HW_VLAN_CTAG_RX
5853 * NETIF_F_HW_VLAN_CTAG_TX
5854 *
5855 * Double VLAN Mode (DVM) Supported Features:
5856 * NETIF_F_HW_VLAN_CTAG_FILTER
5857 * NETIF_F_HW_VLAN_CTAG_RX
5858 * NETIF_F_HW_VLAN_CTAG_TX
5859 *
5860 * NETIF_F_HW_VLAN_STAG_FILTER
5861 * NETIF_HW_VLAN_STAG_RX
5862 * NETIF_HW_VLAN_STAG_TX
5863 *
5864 * Features that need fixing:
5865 * Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
5866 * These are mutually exlusive as the VSI context cannot support multiple
5867 * VLAN ethertypes simultaneously for stripping and/or insertion. If this
5868 * is not done, then default to clearing the requested STAG offload
5869 * settings.
5870 *
5871 * All supported filtering has to be enabled or disabled together. For
5872 * example, in DVM, CTAG and STAG filtering have to be enabled and disabled
5873 * together. If this is not done, then default to VLAN filtering disabled.
5874 * These are mutually exclusive as there is currently no way to
5875 * enable/disable VLAN filtering based on VLAN ethertype when using VLAN
5876 * prune rules.
5877 */
5878 static netdev_features_t
ice_fix_features(struct net_device * netdev,netdev_features_t features)5879 ice_fix_features(struct net_device *netdev, netdev_features_t features)
5880 {
5881 struct ice_netdev_priv *np = netdev_priv(netdev);
5882 netdev_features_t req_vlan_fltr, cur_vlan_fltr;
5883 bool cur_ctag, cur_stag, req_ctag, req_stag;
5884
5885 cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
5886 cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5887 cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
5888
5889 req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
5890 req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
5891 req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
5892
5893 if (req_vlan_fltr != cur_vlan_fltr) {
5894 if (ice_is_dvm_ena(&np->vsi->back->hw)) {
5895 if (req_ctag && req_stag) {
5896 features |= NETIF_VLAN_FILTERING_FEATURES;
5897 } else if (!req_ctag && !req_stag) {
5898 features &= ~NETIF_VLAN_FILTERING_FEATURES;
5899 } else if ((!cur_ctag && req_ctag && !cur_stag) ||
5900 (!cur_stag && req_stag && !cur_ctag)) {
5901 features |= NETIF_VLAN_FILTERING_FEATURES;
5902 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
5903 } else if ((cur_ctag && !req_ctag && cur_stag) ||
5904 (cur_stag && !req_stag && cur_ctag)) {
5905 features &= ~NETIF_VLAN_FILTERING_FEATURES;
5906 netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
5907 }
5908 } else {
5909 if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
5910 netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
5911
5912 if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
5913 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5914 }
5915 }
5916
5917 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
5918 (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
5919 netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
5920 features &= ~(NETIF_F_HW_VLAN_STAG_RX |
5921 NETIF_F_HW_VLAN_STAG_TX);
5922 }
5923
5924 if (!(netdev->features & NETIF_F_RXFCS) &&
5925 (features & NETIF_F_RXFCS) &&
5926 (features & NETIF_VLAN_STRIPPING_FEATURES) &&
5927 !ice_vsi_has_non_zero_vlans(np->vsi)) {
5928 netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
5929 features &= ~NETIF_VLAN_STRIPPING_FEATURES;
5930 }
5931
5932 return features;
5933 }
5934
5935 /**
5936 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
5937 * @vsi: PF's VSI
5938 * @features: features used to determine VLAN offload settings
5939 *
5940 * First, determine the vlan_ethertype based on the VLAN offload bits in
5941 * features. Then determine if stripping and insertion should be enabled or
5942 * disabled. Finally enable or disable VLAN stripping and insertion.
5943 */
5944 static int
ice_set_vlan_offload_features(struct ice_vsi * vsi,netdev_features_t features)5945 ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
5946 {
5947 bool enable_stripping = true, enable_insertion = true;
5948 struct ice_vsi_vlan_ops *vlan_ops;
5949 int strip_err = 0, insert_err = 0;
5950 u16 vlan_ethertype = 0;
5951
5952 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
5953
5954 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
5955 vlan_ethertype = ETH_P_8021AD;
5956 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
5957 vlan_ethertype = ETH_P_8021Q;
5958
5959 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
5960 enable_stripping = false;
5961 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
5962 enable_insertion = false;
5963
5964 if (enable_stripping)
5965 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
5966 else
5967 strip_err = vlan_ops->dis_stripping(vsi);
5968
5969 if (enable_insertion)
5970 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
5971 else
5972 insert_err = vlan_ops->dis_insertion(vsi);
5973
5974 if (strip_err || insert_err)
5975 return -EIO;
5976
5977 return 0;
5978 }
5979
5980 /**
5981 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
5982 * @vsi: PF's VSI
5983 * @features: features used to determine VLAN filtering settings
5984 *
5985 * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
5986 * features.
5987 */
5988 static int
ice_set_vlan_filtering_features(struct ice_vsi * vsi,netdev_features_t features)5989 ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
5990 {
5991 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
5992 int err = 0;
5993
5994 /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
5995 * if either bit is set
5996 */
5997 if (features &
5998 (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
5999 err = vlan_ops->ena_rx_filtering(vsi);
6000 else
6001 err = vlan_ops->dis_rx_filtering(vsi);
6002
6003 return err;
6004 }
6005
6006 /**
6007 * ice_set_vlan_features - set VLAN settings based on suggested feature set
6008 * @netdev: ptr to the netdev being adjusted
6009 * @features: the feature set that the stack is suggesting
6010 *
6011 * Only update VLAN settings if the requested_vlan_features are different than
6012 * the current_vlan_features.
6013 */
6014 static int
ice_set_vlan_features(struct net_device * netdev,netdev_features_t features)6015 ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
6016 {
6017 netdev_features_t current_vlan_features, requested_vlan_features;
6018 struct ice_netdev_priv *np = netdev_priv(netdev);
6019 struct ice_vsi *vsi = np->vsi;
6020 int err;
6021
6022 current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
6023 requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
6024 if (current_vlan_features ^ requested_vlan_features) {
6025 if ((features & NETIF_F_RXFCS) &&
6026 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6027 dev_err(ice_pf_to_dev(vsi->back),
6028 "To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
6029 return -EIO;
6030 }
6031
6032 err = ice_set_vlan_offload_features(vsi, features);
6033 if (err)
6034 return err;
6035 }
6036
6037 current_vlan_features = netdev->features &
6038 NETIF_VLAN_FILTERING_FEATURES;
6039 requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
6040 if (current_vlan_features ^ requested_vlan_features) {
6041 err = ice_set_vlan_filtering_features(vsi, features);
6042 if (err)
6043 return err;
6044 }
6045
6046 return 0;
6047 }
6048
6049 /**
6050 * ice_set_loopback - turn on/off loopback mode on underlying PF
6051 * @vsi: ptr to VSI
6052 * @ena: flag to indicate the on/off setting
6053 */
ice_set_loopback(struct ice_vsi * vsi,bool ena)6054 static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
6055 {
6056 bool if_running = netif_running(vsi->netdev);
6057 int ret;
6058
6059 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6060 ret = ice_down(vsi);
6061 if (ret) {
6062 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
6063 return ret;
6064 }
6065 }
6066 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
6067 if (ret)
6068 netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
6069 if (if_running)
6070 ret = ice_up(vsi);
6071
6072 return ret;
6073 }
6074
6075 /**
6076 * ice_set_features - set the netdev feature flags
6077 * @netdev: ptr to the netdev being adjusted
6078 * @features: the feature set that the stack is suggesting
6079 */
6080 static int
ice_set_features(struct net_device * netdev,netdev_features_t features)6081 ice_set_features(struct net_device *netdev, netdev_features_t features)
6082 {
6083 netdev_features_t changed = netdev->features ^ features;
6084 struct ice_netdev_priv *np = netdev_priv(netdev);
6085 struct ice_vsi *vsi = np->vsi;
6086 struct ice_pf *pf = vsi->back;
6087 int ret = 0;
6088
6089 /* Don't set any netdev advanced features with device in Safe Mode */
6090 if (ice_is_safe_mode(pf)) {
6091 dev_err(ice_pf_to_dev(pf),
6092 "Device is in Safe Mode - not enabling advanced netdev features\n");
6093 return ret;
6094 }
6095
6096 /* Do not change setting during reset */
6097 if (ice_is_reset_in_progress(pf->state)) {
6098 dev_err(ice_pf_to_dev(pf),
6099 "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
6100 return -EBUSY;
6101 }
6102
6103 /* Multiple features can be changed in one call so keep features in
6104 * separate if/else statements to guarantee each feature is checked
6105 */
6106 if (changed & NETIF_F_RXHASH)
6107 ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
6108
6109 ret = ice_set_vlan_features(netdev, features);
6110 if (ret)
6111 return ret;
6112
6113 /* Turn on receive of FCS aka CRC, and after setting this
6114 * flag the packet data will have the 4 byte CRC appended
6115 */
6116 if (changed & NETIF_F_RXFCS) {
6117 if ((features & NETIF_F_RXFCS) &&
6118 (features & NETIF_VLAN_STRIPPING_FEATURES)) {
6119 dev_err(ice_pf_to_dev(vsi->back),
6120 "To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
6121 return -EIO;
6122 }
6123
6124 ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
6125 ret = ice_down_up(vsi);
6126 if (ret)
6127 return ret;
6128 }
6129
6130 if (changed & NETIF_F_NTUPLE) {
6131 bool ena = !!(features & NETIF_F_NTUPLE);
6132
6133 ice_vsi_manage_fdir(vsi, ena);
6134 ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
6135 }
6136
6137 /* don't turn off hw_tc_offload when ADQ is already enabled */
6138 if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
6139 dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
6140 return -EACCES;
6141 }
6142
6143 if (changed & NETIF_F_HW_TC) {
6144 bool ena = !!(features & NETIF_F_HW_TC);
6145
6146 ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
6147 clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
6148 }
6149
6150 if (changed & NETIF_F_LOOPBACK)
6151 ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
6152
6153 return ret;
6154 }
6155
6156 /**
6157 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6158 * @vsi: VSI to setup VLAN properties for
6159 */
ice_vsi_vlan_setup(struct ice_vsi * vsi)6160 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
6161 {
6162 int err;
6163
6164 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
6165 if (err)
6166 return err;
6167
6168 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
6169 if (err)
6170 return err;
6171
6172 return ice_vsi_add_vlan_zero(vsi);
6173 }
6174
6175 /**
6176 * ice_vsi_cfg - Setup the VSI
6177 * @vsi: the VSI being configured
6178 *
6179 * Return 0 on success and negative value on error
6180 */
ice_vsi_cfg(struct ice_vsi * vsi)6181 int ice_vsi_cfg(struct ice_vsi *vsi)
6182 {
6183 int err;
6184
6185 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6186 ice_set_rx_mode(vsi->netdev);
6187
6188 err = ice_vsi_vlan_setup(vsi);
6189 if (err)
6190 return err;
6191 }
6192 ice_vsi_cfg_dcb_rings(vsi);
6193
6194 err = ice_vsi_cfg_lan_txqs(vsi);
6195 if (!err && ice_is_xdp_ena_vsi(vsi))
6196 err = ice_vsi_cfg_xdp_txqs(vsi);
6197 if (!err)
6198 err = ice_vsi_cfg_rxqs(vsi);
6199
6200 return err;
6201 }
6202
6203 /* THEORY OF MODERATION:
6204 * The ice driver hardware works differently than the hardware that DIMLIB was
6205 * originally made for. ice hardware doesn't have packet count limits that
6206 * can trigger an interrupt, but it *does* have interrupt rate limit support,
6207 * which is hard-coded to a limit of 250,000 ints/second.
6208 * If not using dynamic moderation, the INTRL value can be modified
6209 * by ethtool rx-usecs-high.
6210 */
6211 struct ice_dim {
6212 /* the throttle rate for interrupts, basically worst case delay before
6213 * an initial interrupt fires, value is stored in microseconds.
6214 */
6215 u16 itr;
6216 };
6217
6218 /* Make a different profile for Rx that doesn't allow quite so aggressive
6219 * moderation at the high end (it maxes out at 126us or about 8k interrupts a
6220 * second.
6221 */
6222 static const struct ice_dim rx_profile[] = {
6223 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6224 {8}, /* 125,000 ints/s */
6225 {16}, /* 62,500 ints/s */
6226 {62}, /* 16,129 ints/s */
6227 {126} /* 7,936 ints/s */
6228 };
6229
6230 /* The transmit profile, which has the same sorts of values
6231 * as the previous struct
6232 */
6233 static const struct ice_dim tx_profile[] = {
6234 {2}, /* 500,000 ints/s, capped at 250K by INTRL */
6235 {8}, /* 125,000 ints/s */
6236 {40}, /* 16,125 ints/s */
6237 {128}, /* 7,812 ints/s */
6238 {256} /* 3,906 ints/s */
6239 };
6240
ice_tx_dim_work(struct work_struct * work)6241 static void ice_tx_dim_work(struct work_struct *work)
6242 {
6243 struct ice_ring_container *rc;
6244 struct dim *dim;
6245 u16 itr;
6246
6247 dim = container_of(work, struct dim, work);
6248 rc = (struct ice_ring_container *)dim->priv;
6249
6250 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
6251
6252 /* look up the values in our local table */
6253 itr = tx_profile[dim->profile_ix].itr;
6254
6255 ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
6256 ice_write_itr(rc, itr);
6257
6258 dim->state = DIM_START_MEASURE;
6259 }
6260
ice_rx_dim_work(struct work_struct * work)6261 static void ice_rx_dim_work(struct work_struct *work)
6262 {
6263 struct ice_ring_container *rc;
6264 struct dim *dim;
6265 u16 itr;
6266
6267 dim = container_of(work, struct dim, work);
6268 rc = (struct ice_ring_container *)dim->priv;
6269
6270 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
6271
6272 /* look up the values in our local table */
6273 itr = rx_profile[dim->profile_ix].itr;
6274
6275 ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
6276 ice_write_itr(rc, itr);
6277
6278 dim->state = DIM_START_MEASURE;
6279 }
6280
6281 #define ICE_DIM_DEFAULT_PROFILE_IX 1
6282
6283 /**
6284 * ice_init_moderation - set up interrupt moderation
6285 * @q_vector: the vector containing rings to be configured
6286 *
6287 * Set up interrupt moderation registers, with the intent to do the right thing
6288 * when called from reset or from probe, and whether or not dynamic moderation
6289 * is enabled or not. Take special care to write all the registers in both
6290 * dynamic moderation mode or not in order to make sure hardware is in a known
6291 * state.
6292 */
ice_init_moderation(struct ice_q_vector * q_vector)6293 static void ice_init_moderation(struct ice_q_vector *q_vector)
6294 {
6295 struct ice_ring_container *rc;
6296 bool tx_dynamic, rx_dynamic;
6297
6298 rc = &q_vector->tx;
6299 INIT_WORK(&rc->dim.work, ice_tx_dim_work);
6300 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6301 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6302 rc->dim.priv = rc;
6303 tx_dynamic = ITR_IS_DYNAMIC(rc);
6304
6305 /* set the initial TX ITR to match the above */
6306 ice_write_itr(rc, tx_dynamic ?
6307 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
6308
6309 rc = &q_vector->rx;
6310 INIT_WORK(&rc->dim.work, ice_rx_dim_work);
6311 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6312 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
6313 rc->dim.priv = rc;
6314 rx_dynamic = ITR_IS_DYNAMIC(rc);
6315
6316 /* set the initial RX ITR to match the above */
6317 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
6318 rc->itr_setting);
6319
6320 ice_set_q_vector_intrl(q_vector);
6321 }
6322
6323 /**
6324 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6325 * @vsi: the VSI being configured
6326 */
ice_napi_enable_all(struct ice_vsi * vsi)6327 static void ice_napi_enable_all(struct ice_vsi *vsi)
6328 {
6329 int q_idx;
6330
6331 if (!vsi->netdev)
6332 return;
6333
6334 ice_for_each_q_vector(vsi, q_idx) {
6335 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6336
6337 ice_init_moderation(q_vector);
6338
6339 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6340 napi_enable(&q_vector->napi);
6341 }
6342 }
6343
6344 /**
6345 * ice_up_complete - Finish the last steps of bringing up a connection
6346 * @vsi: The VSI being configured
6347 *
6348 * Return 0 on success and negative value on error
6349 */
ice_up_complete(struct ice_vsi * vsi)6350 static int ice_up_complete(struct ice_vsi *vsi)
6351 {
6352 struct ice_pf *pf = vsi->back;
6353 int err;
6354
6355 ice_vsi_cfg_msix(vsi);
6356
6357 /* Enable only Rx rings, Tx rings were enabled by the FW when the
6358 * Tx queue group list was configured and the context bits were
6359 * programmed using ice_vsi_cfg_txqs
6360 */
6361 err = ice_vsi_start_all_rx_rings(vsi);
6362 if (err)
6363 return err;
6364
6365 clear_bit(ICE_VSI_DOWN, vsi->state);
6366 ice_napi_enable_all(vsi);
6367 ice_vsi_ena_irq(vsi);
6368
6369 if (vsi->port_info &&
6370 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
6371 vsi->netdev && vsi->type == ICE_VSI_PF) {
6372 ice_print_link_msg(vsi, true);
6373 netif_tx_start_all_queues(vsi->netdev);
6374 netif_carrier_on(vsi->netdev);
6375 if (!ice_is_e810(&pf->hw))
6376 ice_ptp_link_change(pf, pf->hw.pf_id, true);
6377 }
6378
6379 /* Perform an initial read of the statistics registers now to
6380 * set the baseline so counters are ready when interface is up
6381 */
6382 ice_update_eth_stats(vsi);
6383
6384 if (vsi->type == ICE_VSI_PF)
6385 ice_service_task_schedule(pf);
6386
6387 return 0;
6388 }
6389
6390 /**
6391 * ice_up - Bring the connection back up after being down
6392 * @vsi: VSI being configured
6393 */
ice_up(struct ice_vsi * vsi)6394 int ice_up(struct ice_vsi *vsi)
6395 {
6396 int err;
6397
6398 err = ice_vsi_cfg(vsi);
6399 if (!err)
6400 err = ice_up_complete(vsi);
6401
6402 return err;
6403 }
6404
6405 /**
6406 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6407 * @syncp: pointer to u64_stats_sync
6408 * @stats: stats that pkts and bytes count will be taken from
6409 * @pkts: packets stats counter
6410 * @bytes: bytes stats counter
6411 *
6412 * This function fetches stats from the ring considering the atomic operations
6413 * that needs to be performed to read u64 values in 32 bit machine.
6414 */
6415 void
ice_fetch_u64_stats_per_ring(struct u64_stats_sync * syncp,struct ice_q_stats stats,u64 * pkts,u64 * bytes)6416 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
6417 struct ice_q_stats stats, u64 *pkts, u64 *bytes)
6418 {
6419 unsigned int start;
6420
6421 do {
6422 start = u64_stats_fetch_begin_irq(syncp);
6423 *pkts = stats.pkts;
6424 *bytes = stats.bytes;
6425 } while (u64_stats_fetch_retry_irq(syncp, start));
6426 }
6427
6428 /**
6429 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6430 * @vsi: the VSI to be updated
6431 * @vsi_stats: the stats struct to be updated
6432 * @rings: rings to work on
6433 * @count: number of rings
6434 */
6435 static void
ice_update_vsi_tx_ring_stats(struct ice_vsi * vsi,struct rtnl_link_stats64 * vsi_stats,struct ice_tx_ring ** rings,u16 count)6436 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
6437 struct rtnl_link_stats64 *vsi_stats,
6438 struct ice_tx_ring **rings, u16 count)
6439 {
6440 u16 i;
6441
6442 for (i = 0; i < count; i++) {
6443 struct ice_tx_ring *ring;
6444 u64 pkts = 0, bytes = 0;
6445
6446 ring = READ_ONCE(rings[i]);
6447 if (!ring)
6448 continue;
6449 ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
6450 vsi_stats->tx_packets += pkts;
6451 vsi_stats->tx_bytes += bytes;
6452 vsi->tx_restart += ring->tx_stats.restart_q;
6453 vsi->tx_busy += ring->tx_stats.tx_busy;
6454 vsi->tx_linearize += ring->tx_stats.tx_linearize;
6455 }
6456 }
6457
6458 /**
6459 * ice_update_vsi_ring_stats - Update VSI stats counters
6460 * @vsi: the VSI to be updated
6461 */
ice_update_vsi_ring_stats(struct ice_vsi * vsi)6462 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
6463 {
6464 struct rtnl_link_stats64 *vsi_stats;
6465 u64 pkts, bytes;
6466 int i;
6467
6468 vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
6469 if (!vsi_stats)
6470 return;
6471
6472 /* reset non-netdev (extended) stats */
6473 vsi->tx_restart = 0;
6474 vsi->tx_busy = 0;
6475 vsi->tx_linearize = 0;
6476 vsi->rx_buf_failed = 0;
6477 vsi->rx_page_failed = 0;
6478
6479 rcu_read_lock();
6480
6481 /* update Tx rings counters */
6482 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
6483 vsi->num_txq);
6484
6485 /* update Rx rings counters */
6486 ice_for_each_rxq(vsi, i) {
6487 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
6488
6489 ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
6490 vsi_stats->rx_packets += pkts;
6491 vsi_stats->rx_bytes += bytes;
6492 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
6493 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
6494 }
6495
6496 /* update XDP Tx rings counters */
6497 if (ice_is_xdp_ena_vsi(vsi))
6498 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
6499 vsi->num_xdp_txq);
6500
6501 rcu_read_unlock();
6502
6503 vsi->net_stats.tx_packets = vsi_stats->tx_packets;
6504 vsi->net_stats.tx_bytes = vsi_stats->tx_bytes;
6505 vsi->net_stats.rx_packets = vsi_stats->rx_packets;
6506 vsi->net_stats.rx_bytes = vsi_stats->rx_bytes;
6507
6508 kfree(vsi_stats);
6509 }
6510
6511 /**
6512 * ice_update_vsi_stats - Update VSI stats counters
6513 * @vsi: the VSI to be updated
6514 */
ice_update_vsi_stats(struct ice_vsi * vsi)6515 void ice_update_vsi_stats(struct ice_vsi *vsi)
6516 {
6517 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
6518 struct ice_eth_stats *cur_es = &vsi->eth_stats;
6519 struct ice_pf *pf = vsi->back;
6520
6521 if (test_bit(ICE_VSI_DOWN, vsi->state) ||
6522 test_bit(ICE_CFG_BUSY, pf->state))
6523 return;
6524
6525 /* get stats as recorded by Tx/Rx rings */
6526 ice_update_vsi_ring_stats(vsi);
6527
6528 /* get VSI stats as recorded by the hardware */
6529 ice_update_eth_stats(vsi);
6530
6531 cur_ns->tx_errors = cur_es->tx_errors;
6532 cur_ns->rx_dropped = cur_es->rx_discards;
6533 cur_ns->tx_dropped = cur_es->tx_discards;
6534 cur_ns->multicast = cur_es->rx_multicast;
6535
6536 /* update some more netdev stats if this is main VSI */
6537 if (vsi->type == ICE_VSI_PF) {
6538 cur_ns->rx_crc_errors = pf->stats.crc_errors;
6539 cur_ns->rx_errors = pf->stats.crc_errors +
6540 pf->stats.illegal_bytes +
6541 pf->stats.rx_len_errors +
6542 pf->stats.rx_undersize +
6543 pf->hw_csum_rx_error +
6544 pf->stats.rx_jabber +
6545 pf->stats.rx_fragments +
6546 pf->stats.rx_oversize;
6547 cur_ns->rx_length_errors = pf->stats.rx_len_errors;
6548 /* record drops from the port level */
6549 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
6550 }
6551 }
6552
6553 /**
6554 * ice_update_pf_stats - Update PF port stats counters
6555 * @pf: PF whose stats needs to be updated
6556 */
ice_update_pf_stats(struct ice_pf * pf)6557 void ice_update_pf_stats(struct ice_pf *pf)
6558 {
6559 struct ice_hw_port_stats *prev_ps, *cur_ps;
6560 struct ice_hw *hw = &pf->hw;
6561 u16 fd_ctr_base;
6562 u8 port;
6563
6564 port = hw->port_info->lport;
6565 prev_ps = &pf->stats_prev;
6566 cur_ps = &pf->stats;
6567
6568 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
6569 &prev_ps->eth.rx_bytes,
6570 &cur_ps->eth.rx_bytes);
6571
6572 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
6573 &prev_ps->eth.rx_unicast,
6574 &cur_ps->eth.rx_unicast);
6575
6576 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
6577 &prev_ps->eth.rx_multicast,
6578 &cur_ps->eth.rx_multicast);
6579
6580 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
6581 &prev_ps->eth.rx_broadcast,
6582 &cur_ps->eth.rx_broadcast);
6583
6584 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
6585 &prev_ps->eth.rx_discards,
6586 &cur_ps->eth.rx_discards);
6587
6588 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
6589 &prev_ps->eth.tx_bytes,
6590 &cur_ps->eth.tx_bytes);
6591
6592 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
6593 &prev_ps->eth.tx_unicast,
6594 &cur_ps->eth.tx_unicast);
6595
6596 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
6597 &prev_ps->eth.tx_multicast,
6598 &cur_ps->eth.tx_multicast);
6599
6600 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
6601 &prev_ps->eth.tx_broadcast,
6602 &cur_ps->eth.tx_broadcast);
6603
6604 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
6605 &prev_ps->tx_dropped_link_down,
6606 &cur_ps->tx_dropped_link_down);
6607
6608 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
6609 &prev_ps->rx_size_64, &cur_ps->rx_size_64);
6610
6611 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
6612 &prev_ps->rx_size_127, &cur_ps->rx_size_127);
6613
6614 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
6615 &prev_ps->rx_size_255, &cur_ps->rx_size_255);
6616
6617 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
6618 &prev_ps->rx_size_511, &cur_ps->rx_size_511);
6619
6620 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
6621 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
6622
6623 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
6624 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
6625
6626 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
6627 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
6628
6629 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
6630 &prev_ps->tx_size_64, &cur_ps->tx_size_64);
6631
6632 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
6633 &prev_ps->tx_size_127, &cur_ps->tx_size_127);
6634
6635 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
6636 &prev_ps->tx_size_255, &cur_ps->tx_size_255);
6637
6638 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
6639 &prev_ps->tx_size_511, &cur_ps->tx_size_511);
6640
6641 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
6642 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
6643
6644 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
6645 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
6646
6647 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
6648 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
6649
6650 fd_ctr_base = hw->fd_ctr_base;
6651
6652 ice_stat_update40(hw,
6653 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
6654 pf->stat_prev_loaded, &prev_ps->fd_sb_match,
6655 &cur_ps->fd_sb_match);
6656 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
6657 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
6658
6659 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
6660 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
6661
6662 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
6663 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
6664
6665 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
6666 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
6667
6668 ice_update_dcb_stats(pf);
6669
6670 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
6671 &prev_ps->crc_errors, &cur_ps->crc_errors);
6672
6673 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
6674 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
6675
6676 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
6677 &prev_ps->mac_local_faults,
6678 &cur_ps->mac_local_faults);
6679
6680 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
6681 &prev_ps->mac_remote_faults,
6682 &cur_ps->mac_remote_faults);
6683
6684 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
6685 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
6686
6687 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
6688 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
6689
6690 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
6691 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
6692
6693 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
6694 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
6695
6696 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
6697 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
6698
6699 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
6700
6701 pf->stat_prev_loaded = true;
6702 }
6703
6704 /**
6705 * ice_get_stats64 - get statistics for network device structure
6706 * @netdev: network interface device structure
6707 * @stats: main device statistics structure
6708 */
6709 static
ice_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)6710 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
6711 {
6712 struct ice_netdev_priv *np = netdev_priv(netdev);
6713 struct rtnl_link_stats64 *vsi_stats;
6714 struct ice_vsi *vsi = np->vsi;
6715
6716 vsi_stats = &vsi->net_stats;
6717
6718 if (!vsi->num_txq || !vsi->num_rxq)
6719 return;
6720
6721 /* netdev packet/byte stats come from ring counter. These are obtained
6722 * by summing up ring counters (done by ice_update_vsi_ring_stats).
6723 * But, only call the update routine and read the registers if VSI is
6724 * not down.
6725 */
6726 if (!test_bit(ICE_VSI_DOWN, vsi->state))
6727 ice_update_vsi_ring_stats(vsi);
6728 stats->tx_packets = vsi_stats->tx_packets;
6729 stats->tx_bytes = vsi_stats->tx_bytes;
6730 stats->rx_packets = vsi_stats->rx_packets;
6731 stats->rx_bytes = vsi_stats->rx_bytes;
6732
6733 /* The rest of the stats can be read from the hardware but instead we
6734 * just return values that the watchdog task has already obtained from
6735 * the hardware.
6736 */
6737 stats->multicast = vsi_stats->multicast;
6738 stats->tx_errors = vsi_stats->tx_errors;
6739 stats->tx_dropped = vsi_stats->tx_dropped;
6740 stats->rx_errors = vsi_stats->rx_errors;
6741 stats->rx_dropped = vsi_stats->rx_dropped;
6742 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
6743 stats->rx_length_errors = vsi_stats->rx_length_errors;
6744 }
6745
6746 /**
6747 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
6748 * @vsi: VSI having NAPI disabled
6749 */
ice_napi_disable_all(struct ice_vsi * vsi)6750 static void ice_napi_disable_all(struct ice_vsi *vsi)
6751 {
6752 int q_idx;
6753
6754 if (!vsi->netdev)
6755 return;
6756
6757 ice_for_each_q_vector(vsi, q_idx) {
6758 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6759
6760 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
6761 napi_disable(&q_vector->napi);
6762
6763 cancel_work_sync(&q_vector->tx.dim.work);
6764 cancel_work_sync(&q_vector->rx.dim.work);
6765 }
6766 }
6767
6768 /**
6769 * ice_down - Shutdown the connection
6770 * @vsi: The VSI being stopped
6771 *
6772 * Caller of this function is expected to set the vsi->state ICE_DOWN bit
6773 */
ice_down(struct ice_vsi * vsi)6774 int ice_down(struct ice_vsi *vsi)
6775 {
6776 int i, tx_err, rx_err, vlan_err = 0;
6777
6778 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
6779
6780 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
6781 vlan_err = ice_vsi_del_vlan_zero(vsi);
6782 if (!ice_is_e810(&vsi->back->hw))
6783 ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
6784 netif_carrier_off(vsi->netdev);
6785 netif_tx_disable(vsi->netdev);
6786 } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
6787 ice_eswitch_stop_all_tx_queues(vsi->back);
6788 }
6789
6790 ice_vsi_dis_irq(vsi);
6791
6792 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
6793 if (tx_err)
6794 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
6795 vsi->vsi_num, tx_err);
6796 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
6797 tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
6798 if (tx_err)
6799 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
6800 vsi->vsi_num, tx_err);
6801 }
6802
6803 rx_err = ice_vsi_stop_all_rx_rings(vsi);
6804 if (rx_err)
6805 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
6806 vsi->vsi_num, rx_err);
6807
6808 ice_napi_disable_all(vsi);
6809
6810 ice_for_each_txq(vsi, i)
6811 ice_clean_tx_ring(vsi->tx_rings[i]);
6812
6813 if (ice_is_xdp_ena_vsi(vsi))
6814 ice_for_each_xdp_txq(vsi, i)
6815 ice_clean_tx_ring(vsi->xdp_rings[i]);
6816
6817 ice_for_each_rxq(vsi, i)
6818 ice_clean_rx_ring(vsi->rx_rings[i]);
6819
6820 if (tx_err || rx_err || vlan_err) {
6821 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
6822 vsi->vsi_num, vsi->vsw->sw_id);
6823 return -EIO;
6824 }
6825
6826 return 0;
6827 }
6828
6829 /**
6830 * ice_down_up - shutdown the VSI connection and bring it up
6831 * @vsi: the VSI to be reconnected
6832 */
ice_down_up(struct ice_vsi * vsi)6833 int ice_down_up(struct ice_vsi *vsi)
6834 {
6835 int ret;
6836
6837 /* if DOWN already set, nothing to do */
6838 if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
6839 return 0;
6840
6841 ret = ice_down(vsi);
6842 if (ret)
6843 return ret;
6844
6845 ret = ice_up(vsi);
6846 if (ret) {
6847 netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
6848 return ret;
6849 }
6850
6851 return 0;
6852 }
6853
6854 /**
6855 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
6856 * @vsi: VSI having resources allocated
6857 *
6858 * Return 0 on success, negative on failure
6859 */
ice_vsi_setup_tx_rings(struct ice_vsi * vsi)6860 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
6861 {
6862 int i, err = 0;
6863
6864 if (!vsi->num_txq) {
6865 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
6866 vsi->vsi_num);
6867 return -EINVAL;
6868 }
6869
6870 ice_for_each_txq(vsi, i) {
6871 struct ice_tx_ring *ring = vsi->tx_rings[i];
6872
6873 if (!ring)
6874 return -EINVAL;
6875
6876 if (vsi->netdev)
6877 ring->netdev = vsi->netdev;
6878 err = ice_setup_tx_ring(ring);
6879 if (err)
6880 break;
6881 }
6882
6883 return err;
6884 }
6885
6886 /**
6887 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
6888 * @vsi: VSI having resources allocated
6889 *
6890 * Return 0 on success, negative on failure
6891 */
ice_vsi_setup_rx_rings(struct ice_vsi * vsi)6892 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
6893 {
6894 int i, err = 0;
6895
6896 if (!vsi->num_rxq) {
6897 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
6898 vsi->vsi_num);
6899 return -EINVAL;
6900 }
6901
6902 ice_for_each_rxq(vsi, i) {
6903 struct ice_rx_ring *ring = vsi->rx_rings[i];
6904
6905 if (!ring)
6906 return -EINVAL;
6907
6908 if (vsi->netdev)
6909 ring->netdev = vsi->netdev;
6910 err = ice_setup_rx_ring(ring);
6911 if (err)
6912 break;
6913 }
6914
6915 return err;
6916 }
6917
6918 /**
6919 * ice_vsi_open_ctrl - open control VSI for use
6920 * @vsi: the VSI to open
6921 *
6922 * Initialization of the Control VSI
6923 *
6924 * Returns 0 on success, negative value on error
6925 */
ice_vsi_open_ctrl(struct ice_vsi * vsi)6926 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
6927 {
6928 char int_name[ICE_INT_NAME_STR_LEN];
6929 struct ice_pf *pf = vsi->back;
6930 struct device *dev;
6931 int err;
6932
6933 dev = ice_pf_to_dev(pf);
6934 /* allocate descriptors */
6935 err = ice_vsi_setup_tx_rings(vsi);
6936 if (err)
6937 goto err_setup_tx;
6938
6939 err = ice_vsi_setup_rx_rings(vsi);
6940 if (err)
6941 goto err_setup_rx;
6942
6943 err = ice_vsi_cfg(vsi);
6944 if (err)
6945 goto err_setup_rx;
6946
6947 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
6948 dev_driver_string(dev), dev_name(dev));
6949 err = ice_vsi_req_irq_msix(vsi, int_name);
6950 if (err)
6951 goto err_setup_rx;
6952
6953 ice_vsi_cfg_msix(vsi);
6954
6955 err = ice_vsi_start_all_rx_rings(vsi);
6956 if (err)
6957 goto err_up_complete;
6958
6959 clear_bit(ICE_VSI_DOWN, vsi->state);
6960 ice_vsi_ena_irq(vsi);
6961
6962 return 0;
6963
6964 err_up_complete:
6965 ice_down(vsi);
6966 err_setup_rx:
6967 ice_vsi_free_rx_rings(vsi);
6968 err_setup_tx:
6969 ice_vsi_free_tx_rings(vsi);
6970
6971 return err;
6972 }
6973
6974 /**
6975 * ice_vsi_open - Called when a network interface is made active
6976 * @vsi: the VSI to open
6977 *
6978 * Initialization of the VSI
6979 *
6980 * Returns 0 on success, negative value on error
6981 */
ice_vsi_open(struct ice_vsi * vsi)6982 int ice_vsi_open(struct ice_vsi *vsi)
6983 {
6984 char int_name[ICE_INT_NAME_STR_LEN];
6985 struct ice_pf *pf = vsi->back;
6986 int err;
6987
6988 /* allocate descriptors */
6989 err = ice_vsi_setup_tx_rings(vsi);
6990 if (err)
6991 goto err_setup_tx;
6992
6993 err = ice_vsi_setup_rx_rings(vsi);
6994 if (err)
6995 goto err_setup_rx;
6996
6997 err = ice_vsi_cfg(vsi);
6998 if (err)
6999 goto err_setup_rx;
7000
7001 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7002 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
7003 err = ice_vsi_req_irq_msix(vsi, int_name);
7004 if (err)
7005 goto err_setup_rx;
7006
7007 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
7008
7009 if (vsi->type == ICE_VSI_PF) {
7010 /* Notify the stack of the actual queue counts. */
7011 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
7012 if (err)
7013 goto err_set_qs;
7014
7015 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
7016 if (err)
7017 goto err_set_qs;
7018 }
7019
7020 err = ice_up_complete(vsi);
7021 if (err)
7022 goto err_up_complete;
7023
7024 return 0;
7025
7026 err_up_complete:
7027 ice_down(vsi);
7028 err_set_qs:
7029 ice_vsi_free_irq(vsi);
7030 err_setup_rx:
7031 ice_vsi_free_rx_rings(vsi);
7032 err_setup_tx:
7033 ice_vsi_free_tx_rings(vsi);
7034
7035 return err;
7036 }
7037
7038 /**
7039 * ice_vsi_release_all - Delete all VSIs
7040 * @pf: PF from which all VSIs are being removed
7041 */
ice_vsi_release_all(struct ice_pf * pf)7042 static void ice_vsi_release_all(struct ice_pf *pf)
7043 {
7044 int err, i;
7045
7046 if (!pf->vsi)
7047 return;
7048
7049 ice_for_each_vsi(pf, i) {
7050 if (!pf->vsi[i])
7051 continue;
7052
7053 if (pf->vsi[i]->type == ICE_VSI_CHNL)
7054 continue;
7055
7056 err = ice_vsi_release(pf->vsi[i]);
7057 if (err)
7058 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
7059 i, err, pf->vsi[i]->vsi_num);
7060 }
7061 }
7062
7063 /**
7064 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7065 * @pf: pointer to the PF instance
7066 * @type: VSI type to rebuild
7067 *
7068 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7069 */
ice_vsi_rebuild_by_type(struct ice_pf * pf,enum ice_vsi_type type)7070 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
7071 {
7072 struct device *dev = ice_pf_to_dev(pf);
7073 int i, err;
7074
7075 ice_for_each_vsi(pf, i) {
7076 struct ice_vsi *vsi = pf->vsi[i];
7077
7078 if (!vsi || vsi->type != type)
7079 continue;
7080
7081 /* rebuild the VSI */
7082 err = ice_vsi_rebuild(vsi, true);
7083 if (err) {
7084 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
7085 err, vsi->idx, ice_vsi_type_str(type));
7086 return err;
7087 }
7088
7089 /* replay filters for the VSI */
7090 err = ice_replay_vsi(&pf->hw, vsi->idx);
7091 if (err) {
7092 dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
7093 err, vsi->idx, ice_vsi_type_str(type));
7094 return err;
7095 }
7096
7097 /* Re-map HW VSI number, using VSI handle that has been
7098 * previously validated in ice_replay_vsi() call above
7099 */
7100 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
7101
7102 /* enable the VSI */
7103 err = ice_ena_vsi(vsi, false);
7104 if (err) {
7105 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
7106 err, vsi->idx, ice_vsi_type_str(type));
7107 return err;
7108 }
7109
7110 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
7111 ice_vsi_type_str(type));
7112 }
7113
7114 return 0;
7115 }
7116
7117 /**
7118 * ice_update_pf_netdev_link - Update PF netdev link status
7119 * @pf: pointer to the PF instance
7120 */
ice_update_pf_netdev_link(struct ice_pf * pf)7121 static void ice_update_pf_netdev_link(struct ice_pf *pf)
7122 {
7123 bool link_up;
7124 int i;
7125
7126 ice_for_each_vsi(pf, i) {
7127 struct ice_vsi *vsi = pf->vsi[i];
7128
7129 if (!vsi || vsi->type != ICE_VSI_PF)
7130 return;
7131
7132 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
7133 if (link_up) {
7134 netif_carrier_on(pf->vsi[i]->netdev);
7135 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
7136 } else {
7137 netif_carrier_off(pf->vsi[i]->netdev);
7138 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
7139 }
7140 }
7141 }
7142
7143 /**
7144 * ice_rebuild - rebuild after reset
7145 * @pf: PF to rebuild
7146 * @reset_type: type of reset
7147 *
7148 * Do not rebuild VF VSI in this flow because that is already handled via
7149 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
7150 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
7151 * to reset/rebuild all the VF VSI twice.
7152 */
ice_rebuild(struct ice_pf * pf,enum ice_reset_req reset_type)7153 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
7154 {
7155 struct device *dev = ice_pf_to_dev(pf);
7156 struct ice_hw *hw = &pf->hw;
7157 bool dvm;
7158 int err;
7159
7160 if (test_bit(ICE_DOWN, pf->state))
7161 goto clear_recovery;
7162
7163 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
7164
7165 #define ICE_EMP_RESET_SLEEP_MS 5000
7166 if (reset_type == ICE_RESET_EMPR) {
7167 /* If an EMP reset has occurred, any previously pending flash
7168 * update will have completed. We no longer know whether or
7169 * not the NVM update EMP reset is restricted.
7170 */
7171 pf->fw_emp_reset_disabled = false;
7172
7173 msleep(ICE_EMP_RESET_SLEEP_MS);
7174 }
7175
7176 err = ice_init_all_ctrlq(hw);
7177 if (err) {
7178 dev_err(dev, "control queues init failed %d\n", err);
7179 goto err_init_ctrlq;
7180 }
7181
7182 /* if DDP was previously loaded successfully */
7183 if (!ice_is_safe_mode(pf)) {
7184 /* reload the SW DB of filter tables */
7185 if (reset_type == ICE_RESET_PFR)
7186 ice_fill_blk_tbls(hw);
7187 else
7188 /* Reload DDP Package after CORER/GLOBR reset */
7189 ice_load_pkg(NULL, pf);
7190 }
7191
7192 err = ice_clear_pf_cfg(hw);
7193 if (err) {
7194 dev_err(dev, "clear PF configuration failed %d\n", err);
7195 goto err_init_ctrlq;
7196 }
7197
7198 ice_clear_pxe_mode(hw);
7199
7200 err = ice_init_nvm(hw);
7201 if (err) {
7202 dev_err(dev, "ice_init_nvm failed %d\n", err);
7203 goto err_init_ctrlq;
7204 }
7205
7206 err = ice_get_caps(hw);
7207 if (err) {
7208 dev_err(dev, "ice_get_caps failed %d\n", err);
7209 goto err_init_ctrlq;
7210 }
7211
7212 err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
7213 if (err) {
7214 dev_err(dev, "set_mac_cfg failed %d\n", err);
7215 goto err_init_ctrlq;
7216 }
7217
7218 dvm = ice_is_dvm_ena(hw);
7219
7220 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
7221 if (err)
7222 goto err_init_ctrlq;
7223
7224 err = ice_sched_init_port(hw->port_info);
7225 if (err)
7226 goto err_sched_init_port;
7227
7228 /* start misc vector */
7229 err = ice_req_irq_msix_misc(pf);
7230 if (err) {
7231 dev_err(dev, "misc vector setup failed: %d\n", err);
7232 goto err_sched_init_port;
7233 }
7234
7235 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7236 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
7237 if (!rd32(hw, PFQF_FD_SIZE)) {
7238 u16 unused, guar, b_effort;
7239
7240 guar = hw->func_caps.fd_fltr_guar;
7241 b_effort = hw->func_caps.fd_fltr_best_effort;
7242
7243 /* force guaranteed filter pool for PF */
7244 ice_alloc_fd_guar_item(hw, &unused, guar);
7245 /* force shared filter pool for PF */
7246 ice_alloc_fd_shrd_item(hw, &unused, b_effort);
7247 }
7248 }
7249
7250 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
7251 ice_dcb_rebuild(pf);
7252
7253 /* If the PF previously had enabled PTP, PTP init needs to happen before
7254 * the VSI rebuild. If not, this causes the PTP link status events to
7255 * fail.
7256 */
7257 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7258 ice_ptp_reset(pf);
7259
7260 if (ice_is_feature_supported(pf, ICE_F_GNSS))
7261 ice_gnss_init(pf);
7262
7263 /* rebuild PF VSI */
7264 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
7265 if (err) {
7266 dev_err(dev, "PF VSI rebuild failed: %d\n", err);
7267 goto err_vsi_rebuild;
7268 }
7269
7270 /* configure PTP timestamping after VSI rebuild */
7271 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
7272 ice_ptp_cfg_timestamp(pf, false);
7273
7274 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
7275 if (err) {
7276 dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
7277 goto err_vsi_rebuild;
7278 }
7279
7280 if (reset_type == ICE_RESET_PFR) {
7281 err = ice_rebuild_channels(pf);
7282 if (err) {
7283 dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
7284 err);
7285 goto err_vsi_rebuild;
7286 }
7287 }
7288
7289 /* If Flow Director is active */
7290 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
7291 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
7292 if (err) {
7293 dev_err(dev, "control VSI rebuild failed: %d\n", err);
7294 goto err_vsi_rebuild;
7295 }
7296
7297 /* replay HW Flow Director recipes */
7298 if (hw->fdir_prof)
7299 ice_fdir_replay_flows(hw);
7300
7301 /* replay Flow Director filters */
7302 ice_fdir_replay_fltrs(pf);
7303
7304 ice_rebuild_arfs(pf);
7305 }
7306
7307 ice_update_pf_netdev_link(pf);
7308
7309 /* tell the firmware we are up */
7310 err = ice_send_version(pf);
7311 if (err) {
7312 dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
7313 err);
7314 goto err_vsi_rebuild;
7315 }
7316
7317 ice_replay_post(hw);
7318
7319 /* if we get here, reset flow is successful */
7320 clear_bit(ICE_RESET_FAILED, pf->state);
7321
7322 ice_plug_aux_dev(pf);
7323 return;
7324
7325 err_vsi_rebuild:
7326 err_sched_init_port:
7327 ice_sched_cleanup_all(hw);
7328 err_init_ctrlq:
7329 ice_shutdown_all_ctrlq(hw);
7330 set_bit(ICE_RESET_FAILED, pf->state);
7331 clear_recovery:
7332 /* set this bit in PF state to control service task scheduling */
7333 set_bit(ICE_NEEDS_RESTART, pf->state);
7334 dev_err(dev, "Rebuild failed, unload and reload driver\n");
7335 }
7336
7337 /**
7338 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
7339 * @vsi: Pointer to VSI structure
7340 */
ice_max_xdp_frame_size(struct ice_vsi * vsi)7341 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
7342 {
7343 if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
7344 return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
7345 else
7346 return ICE_RXBUF_3072;
7347 }
7348
7349 /**
7350 * ice_change_mtu - NDO callback to change the MTU
7351 * @netdev: network interface device structure
7352 * @new_mtu: new value for maximum frame size
7353 *
7354 * Returns 0 on success, negative on failure
7355 */
ice_change_mtu(struct net_device * netdev,int new_mtu)7356 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
7357 {
7358 struct ice_netdev_priv *np = netdev_priv(netdev);
7359 struct ice_vsi *vsi = np->vsi;
7360 struct ice_pf *pf = vsi->back;
7361 u8 count = 0;
7362 int err = 0;
7363
7364 if (new_mtu == (int)netdev->mtu) {
7365 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
7366 return 0;
7367 }
7368
7369 if (ice_is_xdp_ena_vsi(vsi)) {
7370 int frame_size = ice_max_xdp_frame_size(vsi);
7371
7372 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
7373 netdev_err(netdev, "max MTU for XDP usage is %d\n",
7374 frame_size - ICE_ETH_PKT_HDR_PAD);
7375 return -EINVAL;
7376 }
7377 }
7378
7379 /* if a reset is in progress, wait for some time for it to complete */
7380 do {
7381 if (ice_is_reset_in_progress(pf->state)) {
7382 count++;
7383 usleep_range(1000, 2000);
7384 } else {
7385 break;
7386 }
7387
7388 } while (count < 100);
7389
7390 if (count == 100) {
7391 netdev_err(netdev, "can't change MTU. Device is busy\n");
7392 return -EBUSY;
7393 }
7394
7395 netdev->mtu = (unsigned int)new_mtu;
7396
7397 /* if VSI is up, bring it down and then back up */
7398 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
7399 err = ice_down(vsi);
7400 if (err) {
7401 netdev_err(netdev, "change MTU if_down err %d\n", err);
7402 return err;
7403 }
7404
7405 err = ice_up(vsi);
7406 if (err) {
7407 netdev_err(netdev, "change MTU if_up err %d\n", err);
7408 return err;
7409 }
7410 }
7411
7412 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
7413 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
7414
7415 return err;
7416 }
7417
7418 /**
7419 * ice_eth_ioctl - Access the hwtstamp interface
7420 * @netdev: network interface device structure
7421 * @ifr: interface request data
7422 * @cmd: ioctl command
7423 */
ice_eth_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)7424 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7425 {
7426 struct ice_netdev_priv *np = netdev_priv(netdev);
7427 struct ice_pf *pf = np->vsi->back;
7428
7429 switch (cmd) {
7430 case SIOCGHWTSTAMP:
7431 return ice_ptp_get_ts_config(pf, ifr);
7432 case SIOCSHWTSTAMP:
7433 return ice_ptp_set_ts_config(pf, ifr);
7434 default:
7435 return -EOPNOTSUPP;
7436 }
7437 }
7438
7439 /**
7440 * ice_aq_str - convert AQ err code to a string
7441 * @aq_err: the AQ error code to convert
7442 */
ice_aq_str(enum ice_aq_err aq_err)7443 const char *ice_aq_str(enum ice_aq_err aq_err)
7444 {
7445 switch (aq_err) {
7446 case ICE_AQ_RC_OK:
7447 return "OK";
7448 case ICE_AQ_RC_EPERM:
7449 return "ICE_AQ_RC_EPERM";
7450 case ICE_AQ_RC_ENOENT:
7451 return "ICE_AQ_RC_ENOENT";
7452 case ICE_AQ_RC_ENOMEM:
7453 return "ICE_AQ_RC_ENOMEM";
7454 case ICE_AQ_RC_EBUSY:
7455 return "ICE_AQ_RC_EBUSY";
7456 case ICE_AQ_RC_EEXIST:
7457 return "ICE_AQ_RC_EEXIST";
7458 case ICE_AQ_RC_EINVAL:
7459 return "ICE_AQ_RC_EINVAL";
7460 case ICE_AQ_RC_ENOSPC:
7461 return "ICE_AQ_RC_ENOSPC";
7462 case ICE_AQ_RC_ENOSYS:
7463 return "ICE_AQ_RC_ENOSYS";
7464 case ICE_AQ_RC_EMODE:
7465 return "ICE_AQ_RC_EMODE";
7466 case ICE_AQ_RC_ENOSEC:
7467 return "ICE_AQ_RC_ENOSEC";
7468 case ICE_AQ_RC_EBADSIG:
7469 return "ICE_AQ_RC_EBADSIG";
7470 case ICE_AQ_RC_ESVN:
7471 return "ICE_AQ_RC_ESVN";
7472 case ICE_AQ_RC_EBADMAN:
7473 return "ICE_AQ_RC_EBADMAN";
7474 case ICE_AQ_RC_EBADBUF:
7475 return "ICE_AQ_RC_EBADBUF";
7476 }
7477
7478 return "ICE_AQ_RC_UNKNOWN";
7479 }
7480
7481 /**
7482 * ice_set_rss_lut - Set RSS LUT
7483 * @vsi: Pointer to VSI structure
7484 * @lut: Lookup table
7485 * @lut_size: Lookup table size
7486 *
7487 * Returns 0 on success, negative on failure
7488 */
ice_set_rss_lut(struct ice_vsi * vsi,u8 * lut,u16 lut_size)7489 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7490 {
7491 struct ice_aq_get_set_rss_lut_params params = {};
7492 struct ice_hw *hw = &vsi->back->hw;
7493 int status;
7494
7495 if (!lut)
7496 return -EINVAL;
7497
7498 params.vsi_handle = vsi->idx;
7499 params.lut_size = lut_size;
7500 params.lut_type = vsi->rss_lut_type;
7501 params.lut = lut;
7502
7503 status = ice_aq_set_rss_lut(hw, ¶ms);
7504 if (status)
7505 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
7506 status, ice_aq_str(hw->adminq.sq_last_status));
7507
7508 return status;
7509 }
7510
7511 /**
7512 * ice_set_rss_key - Set RSS key
7513 * @vsi: Pointer to the VSI structure
7514 * @seed: RSS hash seed
7515 *
7516 * Returns 0 on success, negative on failure
7517 */
ice_set_rss_key(struct ice_vsi * vsi,u8 * seed)7518 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
7519 {
7520 struct ice_hw *hw = &vsi->back->hw;
7521 int status;
7522
7523 if (!seed)
7524 return -EINVAL;
7525
7526 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7527 if (status)
7528 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
7529 status, ice_aq_str(hw->adminq.sq_last_status));
7530
7531 return status;
7532 }
7533
7534 /**
7535 * ice_get_rss_lut - Get RSS LUT
7536 * @vsi: Pointer to VSI structure
7537 * @lut: Buffer to store the lookup table entries
7538 * @lut_size: Size of buffer to store the lookup table entries
7539 *
7540 * Returns 0 on success, negative on failure
7541 */
ice_get_rss_lut(struct ice_vsi * vsi,u8 * lut,u16 lut_size)7542 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
7543 {
7544 struct ice_aq_get_set_rss_lut_params params = {};
7545 struct ice_hw *hw = &vsi->back->hw;
7546 int status;
7547
7548 if (!lut)
7549 return -EINVAL;
7550
7551 params.vsi_handle = vsi->idx;
7552 params.lut_size = lut_size;
7553 params.lut_type = vsi->rss_lut_type;
7554 params.lut = lut;
7555
7556 status = ice_aq_get_rss_lut(hw, ¶ms);
7557 if (status)
7558 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
7559 status, ice_aq_str(hw->adminq.sq_last_status));
7560
7561 return status;
7562 }
7563
7564 /**
7565 * ice_get_rss_key - Get RSS key
7566 * @vsi: Pointer to VSI structure
7567 * @seed: Buffer to store the key in
7568 *
7569 * Returns 0 on success, negative on failure
7570 */
ice_get_rss_key(struct ice_vsi * vsi,u8 * seed)7571 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
7572 {
7573 struct ice_hw *hw = &vsi->back->hw;
7574 int status;
7575
7576 if (!seed)
7577 return -EINVAL;
7578
7579 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
7580 if (status)
7581 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
7582 status, ice_aq_str(hw->adminq.sq_last_status));
7583
7584 return status;
7585 }
7586
7587 /**
7588 * ice_bridge_getlink - Get the hardware bridge mode
7589 * @skb: skb buff
7590 * @pid: process ID
7591 * @seq: RTNL message seq
7592 * @dev: the netdev being configured
7593 * @filter_mask: filter mask passed in
7594 * @nlflags: netlink flags passed in
7595 *
7596 * Return the bridge mode (VEB/VEPA)
7597 */
7598 static int
ice_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)7599 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7600 struct net_device *dev, u32 filter_mask, int nlflags)
7601 {
7602 struct ice_netdev_priv *np = netdev_priv(dev);
7603 struct ice_vsi *vsi = np->vsi;
7604 struct ice_pf *pf = vsi->back;
7605 u16 bmode;
7606
7607 bmode = pf->first_sw->bridge_mode;
7608
7609 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
7610 filter_mask, NULL);
7611 }
7612
7613 /**
7614 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
7615 * @vsi: Pointer to VSI structure
7616 * @bmode: Hardware bridge mode (VEB/VEPA)
7617 *
7618 * Returns 0 on success, negative on failure
7619 */
ice_vsi_update_bridge_mode(struct ice_vsi * vsi,u16 bmode)7620 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
7621 {
7622 struct ice_aqc_vsi_props *vsi_props;
7623 struct ice_hw *hw = &vsi->back->hw;
7624 struct ice_vsi_ctx *ctxt;
7625 int ret;
7626
7627 vsi_props = &vsi->info;
7628
7629 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
7630 if (!ctxt)
7631 return -ENOMEM;
7632
7633 ctxt->info = vsi->info;
7634
7635 if (bmode == BRIDGE_MODE_VEB)
7636 /* change from VEPA to VEB mode */
7637 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7638 else
7639 /* change from VEB to VEPA mode */
7640 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
7641 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
7642
7643 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
7644 if (ret) {
7645 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
7646 bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
7647 goto out;
7648 }
7649 /* Update sw flags for book keeping */
7650 vsi_props->sw_flags = ctxt->info.sw_flags;
7651
7652 out:
7653 kfree(ctxt);
7654 return ret;
7655 }
7656
7657 /**
7658 * ice_bridge_setlink - Set the hardware bridge mode
7659 * @dev: the netdev being configured
7660 * @nlh: RTNL message
7661 * @flags: bridge setlink flags
7662 * @extack: netlink extended ack
7663 *
7664 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
7665 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
7666 * not already set for all VSIs connected to this switch. And also update the
7667 * unicast switch filter rules for the corresponding switch of the netdev.
7668 */
7669 static int
ice_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 __always_unused flags,struct netlink_ext_ack __always_unused * extack)7670 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
7671 u16 __always_unused flags,
7672 struct netlink_ext_ack __always_unused *extack)
7673 {
7674 struct ice_netdev_priv *np = netdev_priv(dev);
7675 struct ice_pf *pf = np->vsi->back;
7676 struct nlattr *attr, *br_spec;
7677 struct ice_hw *hw = &pf->hw;
7678 struct ice_sw *pf_sw;
7679 int rem, v, err = 0;
7680
7681 pf_sw = pf->first_sw;
7682 /* find the attribute in the netlink message */
7683 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7684
7685 nla_for_each_nested(attr, br_spec, rem) {
7686 __u16 mode;
7687
7688 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7689 continue;
7690 mode = nla_get_u16(attr);
7691 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
7692 return -EINVAL;
7693 /* Continue if bridge mode is not being flipped */
7694 if (mode == pf_sw->bridge_mode)
7695 continue;
7696 /* Iterates through the PF VSI list and update the loopback
7697 * mode of the VSI
7698 */
7699 ice_for_each_vsi(pf, v) {
7700 if (!pf->vsi[v])
7701 continue;
7702 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
7703 if (err)
7704 return err;
7705 }
7706
7707 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
7708 /* Update the unicast switch filter rules for the corresponding
7709 * switch of the netdev
7710 */
7711 err = ice_update_sw_rule_bridge_mode(hw);
7712 if (err) {
7713 netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
7714 mode, err,
7715 ice_aq_str(hw->adminq.sq_last_status));
7716 /* revert hw->evb_veb */
7717 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
7718 return err;
7719 }
7720
7721 pf_sw->bridge_mode = mode;
7722 }
7723
7724 return 0;
7725 }
7726
7727 /**
7728 * ice_tx_timeout - Respond to a Tx Hang
7729 * @netdev: network interface device structure
7730 * @txqueue: Tx queue
7731 */
ice_tx_timeout(struct net_device * netdev,unsigned int txqueue)7732 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
7733 {
7734 struct ice_netdev_priv *np = netdev_priv(netdev);
7735 struct ice_tx_ring *tx_ring = NULL;
7736 struct ice_vsi *vsi = np->vsi;
7737 struct ice_pf *pf = vsi->back;
7738 u32 i;
7739
7740 pf->tx_timeout_count++;
7741
7742 /* Check if PFC is enabled for the TC to which the queue belongs
7743 * to. If yes then Tx timeout is not caused by a hung queue, no
7744 * need to reset and rebuild
7745 */
7746 if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
7747 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
7748 txqueue);
7749 return;
7750 }
7751
7752 /* now that we have an index, find the tx_ring struct */
7753 ice_for_each_txq(vsi, i)
7754 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7755 if (txqueue == vsi->tx_rings[i]->q_index) {
7756 tx_ring = vsi->tx_rings[i];
7757 break;
7758 }
7759
7760 /* Reset recovery level if enough time has elapsed after last timeout.
7761 * Also ensure no new reset action happens before next timeout period.
7762 */
7763 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
7764 pf->tx_timeout_recovery_level = 1;
7765 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
7766 netdev->watchdog_timeo)))
7767 return;
7768
7769 if (tx_ring) {
7770 struct ice_hw *hw = &pf->hw;
7771 u32 head, val = 0;
7772
7773 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
7774 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
7775 /* Read interrupt register */
7776 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
7777
7778 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
7779 vsi->vsi_num, txqueue, tx_ring->next_to_clean,
7780 head, tx_ring->next_to_use, val);
7781 }
7782
7783 pf->tx_timeout_last_recovery = jiffies;
7784 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
7785 pf->tx_timeout_recovery_level, txqueue);
7786
7787 switch (pf->tx_timeout_recovery_level) {
7788 case 1:
7789 set_bit(ICE_PFR_REQ, pf->state);
7790 break;
7791 case 2:
7792 set_bit(ICE_CORER_REQ, pf->state);
7793 break;
7794 case 3:
7795 set_bit(ICE_GLOBR_REQ, pf->state);
7796 break;
7797 default:
7798 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
7799 set_bit(ICE_DOWN, pf->state);
7800 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
7801 set_bit(ICE_SERVICE_DIS, pf->state);
7802 break;
7803 }
7804
7805 ice_service_task_schedule(pf);
7806 pf->tx_timeout_recovery_level++;
7807 }
7808
7809 /**
7810 * ice_setup_tc_cls_flower - flower classifier offloads
7811 * @np: net device to configure
7812 * @filter_dev: device on which filter is added
7813 * @cls_flower: offload data
7814 */
7815 static int
ice_setup_tc_cls_flower(struct ice_netdev_priv * np,struct net_device * filter_dev,struct flow_cls_offload * cls_flower)7816 ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
7817 struct net_device *filter_dev,
7818 struct flow_cls_offload *cls_flower)
7819 {
7820 struct ice_vsi *vsi = np->vsi;
7821
7822 if (cls_flower->common.chain_index)
7823 return -EOPNOTSUPP;
7824
7825 switch (cls_flower->command) {
7826 case FLOW_CLS_REPLACE:
7827 return ice_add_cls_flower(filter_dev, vsi, cls_flower);
7828 case FLOW_CLS_DESTROY:
7829 return ice_del_cls_flower(vsi, cls_flower);
7830 default:
7831 return -EINVAL;
7832 }
7833 }
7834
7835 /**
7836 * ice_setup_tc_block_cb - callback handler registered for TC block
7837 * @type: TC SETUP type
7838 * @type_data: TC flower offload data that contains user input
7839 * @cb_priv: netdev private data
7840 */
7841 static int
ice_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)7842 ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
7843 {
7844 struct ice_netdev_priv *np = cb_priv;
7845
7846 switch (type) {
7847 case TC_SETUP_CLSFLOWER:
7848 return ice_setup_tc_cls_flower(np, np->vsi->netdev,
7849 type_data);
7850 default:
7851 return -EOPNOTSUPP;
7852 }
7853 }
7854
7855 /**
7856 * ice_validate_mqprio_qopt - Validate TCF input parameters
7857 * @vsi: Pointer to VSI
7858 * @mqprio_qopt: input parameters for mqprio queue configuration
7859 *
7860 * This function validates MQPRIO params, such as qcount (power of 2 wherever
7861 * needed), and make sure user doesn't specify qcount and BW rate limit
7862 * for TCs, which are more than "num_tc"
7863 */
7864 static int
ice_validate_mqprio_qopt(struct ice_vsi * vsi,struct tc_mqprio_qopt_offload * mqprio_qopt)7865 ice_validate_mqprio_qopt(struct ice_vsi *vsi,
7866 struct tc_mqprio_qopt_offload *mqprio_qopt)
7867 {
7868 int non_power_of_2_qcount = 0;
7869 struct ice_pf *pf = vsi->back;
7870 int max_rss_q_cnt = 0;
7871 u64 sum_min_rate = 0;
7872 struct device *dev;
7873 int i, speed;
7874 u8 num_tc;
7875
7876 if (vsi->type != ICE_VSI_PF)
7877 return -EINVAL;
7878
7879 if (mqprio_qopt->qopt.offset[0] != 0 ||
7880 mqprio_qopt->qopt.num_tc < 1 ||
7881 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
7882 return -EINVAL;
7883
7884 dev = ice_pf_to_dev(pf);
7885 vsi->ch_rss_size = 0;
7886 num_tc = mqprio_qopt->qopt.num_tc;
7887 speed = ice_get_link_speed_kbps(vsi);
7888
7889 for (i = 0; num_tc; i++) {
7890 int qcount = mqprio_qopt->qopt.count[i];
7891 u64 max_rate, min_rate, rem;
7892
7893 if (!qcount)
7894 return -EINVAL;
7895
7896 if (is_power_of_2(qcount)) {
7897 if (non_power_of_2_qcount &&
7898 qcount > non_power_of_2_qcount) {
7899 dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
7900 qcount, non_power_of_2_qcount);
7901 return -EINVAL;
7902 }
7903 if (qcount > max_rss_q_cnt)
7904 max_rss_q_cnt = qcount;
7905 } else {
7906 if (non_power_of_2_qcount &&
7907 qcount != non_power_of_2_qcount) {
7908 dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
7909 qcount, non_power_of_2_qcount);
7910 return -EINVAL;
7911 }
7912 if (qcount < max_rss_q_cnt) {
7913 dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
7914 qcount, max_rss_q_cnt);
7915 return -EINVAL;
7916 }
7917 max_rss_q_cnt = qcount;
7918 non_power_of_2_qcount = qcount;
7919 }
7920
7921 /* TC command takes input in K/N/Gbps or K/M/Gbit etc but
7922 * converts the bandwidth rate limit into Bytes/s when
7923 * passing it down to the driver. So convert input bandwidth
7924 * from Bytes/s to Kbps
7925 */
7926 max_rate = mqprio_qopt->max_rate[i];
7927 max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
7928
7929 /* min_rate is minimum guaranteed rate and it can't be zero */
7930 min_rate = mqprio_qopt->min_rate[i];
7931 min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
7932 sum_min_rate += min_rate;
7933
7934 if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
7935 dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
7936 min_rate, ICE_MIN_BW_LIMIT);
7937 return -EINVAL;
7938 }
7939
7940 if (max_rate && max_rate > speed) {
7941 dev_err(dev, "TC%d: max_rate(%llu Kbps) > link speed of %u Kbps\n",
7942 i, max_rate, speed);
7943 return -EINVAL;
7944 }
7945
7946 iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
7947 if (rem) {
7948 dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
7949 i, ICE_MIN_BW_LIMIT);
7950 return -EINVAL;
7951 }
7952
7953 iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
7954 if (rem) {
7955 dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
7956 i, ICE_MIN_BW_LIMIT);
7957 return -EINVAL;
7958 }
7959
7960 /* min_rate can't be more than max_rate, except when max_rate
7961 * is zero (implies max_rate sought is max line rate). In such
7962 * a case min_rate can be more than max.
7963 */
7964 if (max_rate && min_rate > max_rate) {
7965 dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
7966 min_rate, max_rate);
7967 return -EINVAL;
7968 }
7969
7970 if (i >= mqprio_qopt->qopt.num_tc - 1)
7971 break;
7972 if (mqprio_qopt->qopt.offset[i + 1] !=
7973 (mqprio_qopt->qopt.offset[i] + qcount))
7974 return -EINVAL;
7975 }
7976 if (vsi->num_rxq <
7977 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7978 return -EINVAL;
7979 if (vsi->num_txq <
7980 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7981 return -EINVAL;
7982
7983 if (sum_min_rate && sum_min_rate > (u64)speed) {
7984 dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
7985 sum_min_rate, speed);
7986 return -EINVAL;
7987 }
7988
7989 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
7990 vsi->ch_rss_size = max_rss_q_cnt;
7991
7992 return 0;
7993 }
7994
7995 /**
7996 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
7997 * @pf: ptr to PF device
7998 * @vsi: ptr to VSI
7999 */
ice_add_vsi_to_fdir(struct ice_pf * pf,struct ice_vsi * vsi)8000 static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
8001 {
8002 struct device *dev = ice_pf_to_dev(pf);
8003 bool added = false;
8004 struct ice_hw *hw;
8005 int flow;
8006
8007 if (!(vsi->num_gfltr || vsi->num_bfltr))
8008 return -EINVAL;
8009
8010 hw = &pf->hw;
8011 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
8012 struct ice_fd_hw_prof *prof;
8013 int tun, status;
8014 u64 entry_h;
8015
8016 if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
8017 hw->fdir_prof[flow]->cnt))
8018 continue;
8019
8020 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
8021 enum ice_flow_priority prio;
8022 u64 prof_id;
8023
8024 /* add this VSI to FDir profile for this flow */
8025 prio = ICE_FLOW_PRIO_NORMAL;
8026 prof = hw->fdir_prof[flow];
8027 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
8028 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
8029 prof->vsi_h[0], vsi->idx,
8030 prio, prof->fdir_seg[tun],
8031 &entry_h);
8032 if (status) {
8033 dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
8034 vsi->idx, flow);
8035 continue;
8036 }
8037
8038 prof->entry_h[prof->cnt][tun] = entry_h;
8039 }
8040
8041 /* store VSI for filter replay and delete */
8042 prof->vsi_h[prof->cnt] = vsi->idx;
8043 prof->cnt++;
8044
8045 added = true;
8046 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
8047 flow);
8048 }
8049
8050 if (!added)
8051 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
8052
8053 return 0;
8054 }
8055
8056 /**
8057 * ice_add_channel - add a channel by adding VSI
8058 * @pf: ptr to PF device
8059 * @sw_id: underlying HW switching element ID
8060 * @ch: ptr to channel structure
8061 *
8062 * Add a channel (VSI) using add_vsi and queue_map
8063 */
ice_add_channel(struct ice_pf * pf,u16 sw_id,struct ice_channel * ch)8064 static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
8065 {
8066 struct device *dev = ice_pf_to_dev(pf);
8067 struct ice_vsi *vsi;
8068
8069 if (ch->type != ICE_VSI_CHNL) {
8070 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
8071 return -EINVAL;
8072 }
8073
8074 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
8075 if (!vsi || vsi->type != ICE_VSI_CHNL) {
8076 dev_err(dev, "create chnl VSI failure\n");
8077 return -EINVAL;
8078 }
8079
8080 ice_add_vsi_to_fdir(pf, vsi);
8081
8082 ch->sw_id = sw_id;
8083 ch->vsi_num = vsi->vsi_num;
8084 ch->info.mapping_flags = vsi->info.mapping_flags;
8085 ch->ch_vsi = vsi;
8086 /* set the back pointer of channel for newly created VSI */
8087 vsi->ch = ch;
8088
8089 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
8090 sizeof(vsi->info.q_mapping));
8091 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
8092 sizeof(vsi->info.tc_mapping));
8093
8094 return 0;
8095 }
8096
8097 /**
8098 * ice_chnl_cfg_res
8099 * @vsi: the VSI being setup
8100 * @ch: ptr to channel structure
8101 *
8102 * Configure channel specific resources such as rings, vector.
8103 */
ice_chnl_cfg_res(struct ice_vsi * vsi,struct ice_channel * ch)8104 static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
8105 {
8106 int i;
8107
8108 for (i = 0; i < ch->num_txq; i++) {
8109 struct ice_q_vector *tx_q_vector, *rx_q_vector;
8110 struct ice_ring_container *rc;
8111 struct ice_tx_ring *tx_ring;
8112 struct ice_rx_ring *rx_ring;
8113
8114 tx_ring = vsi->tx_rings[ch->base_q + i];
8115 rx_ring = vsi->rx_rings[ch->base_q + i];
8116 if (!tx_ring || !rx_ring)
8117 continue;
8118
8119 /* setup ring being channel enabled */
8120 tx_ring->ch = ch;
8121 rx_ring->ch = ch;
8122
8123 /* following code block sets up vector specific attributes */
8124 tx_q_vector = tx_ring->q_vector;
8125 rx_q_vector = rx_ring->q_vector;
8126 if (!tx_q_vector && !rx_q_vector)
8127 continue;
8128
8129 if (tx_q_vector) {
8130 tx_q_vector->ch = ch;
8131 /* setup Tx and Rx ITR setting if DIM is off */
8132 rc = &tx_q_vector->tx;
8133 if (!ITR_IS_DYNAMIC(rc))
8134 ice_write_itr(rc, rc->itr_setting);
8135 }
8136 if (rx_q_vector) {
8137 rx_q_vector->ch = ch;
8138 /* setup Tx and Rx ITR setting if DIM is off */
8139 rc = &rx_q_vector->rx;
8140 if (!ITR_IS_DYNAMIC(rc))
8141 ice_write_itr(rc, rc->itr_setting);
8142 }
8143 }
8144
8145 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then
8146 * GLINT_ITR register would have written to perform in-context
8147 * update, hence perform flush
8148 */
8149 if (ch->num_txq || ch->num_rxq)
8150 ice_flush(&vsi->back->hw);
8151 }
8152
8153 /**
8154 * ice_cfg_chnl_all_res - configure channel resources
8155 * @vsi: pte to main_vsi
8156 * @ch: ptr to channel structure
8157 *
8158 * This function configures channel specific resources such as flow-director
8159 * counter index, and other resources such as queues, vectors, ITR settings
8160 */
8161 static void
ice_cfg_chnl_all_res(struct ice_vsi * vsi,struct ice_channel * ch)8162 ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
8163 {
8164 /* configure channel (aka ADQ) resources such as queues, vectors,
8165 * ITR settings for channel specific vectors and anything else
8166 */
8167 ice_chnl_cfg_res(vsi, ch);
8168 }
8169
8170 /**
8171 * ice_setup_hw_channel - setup new channel
8172 * @pf: ptr to PF device
8173 * @vsi: the VSI being setup
8174 * @ch: ptr to channel structure
8175 * @sw_id: underlying HW switching element ID
8176 * @type: type of channel to be created (VMDq2/VF)
8177 *
8178 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8179 * and configures Tx rings accordingly
8180 */
8181 static int
ice_setup_hw_channel(struct ice_pf * pf,struct ice_vsi * vsi,struct ice_channel * ch,u16 sw_id,u8 type)8182 ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8183 struct ice_channel *ch, u16 sw_id, u8 type)
8184 {
8185 struct device *dev = ice_pf_to_dev(pf);
8186 int ret;
8187
8188 ch->base_q = vsi->next_base_q;
8189 ch->type = type;
8190
8191 ret = ice_add_channel(pf, sw_id, ch);
8192 if (ret) {
8193 dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
8194 return ret;
8195 }
8196
8197 /* configure/setup ADQ specific resources */
8198 ice_cfg_chnl_all_res(vsi, ch);
8199
8200 /* make sure to update the next_base_q so that subsequent channel's
8201 * (aka ADQ) VSI queue map is correct
8202 */
8203 vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
8204 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
8205 ch->num_rxq);
8206
8207 return 0;
8208 }
8209
8210 /**
8211 * ice_setup_channel - setup new channel using uplink element
8212 * @pf: ptr to PF device
8213 * @vsi: the VSI being setup
8214 * @ch: ptr to channel structure
8215 *
8216 * Setup new channel (VSI) based on specified type (VMDq2/VF)
8217 * and uplink switching element
8218 */
8219 static bool
ice_setup_channel(struct ice_pf * pf,struct ice_vsi * vsi,struct ice_channel * ch)8220 ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
8221 struct ice_channel *ch)
8222 {
8223 struct device *dev = ice_pf_to_dev(pf);
8224 u16 sw_id;
8225 int ret;
8226
8227 if (vsi->type != ICE_VSI_PF) {
8228 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
8229 return false;
8230 }
8231
8232 sw_id = pf->first_sw->sw_id;
8233
8234 /* create channel (VSI) */
8235 ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
8236 if (ret) {
8237 dev_err(dev, "failed to setup hw_channel\n");
8238 return false;
8239 }
8240 dev_dbg(dev, "successfully created channel()\n");
8241
8242 return ch->ch_vsi ? true : false;
8243 }
8244
8245 /**
8246 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8247 * @vsi: VSI to be configured
8248 * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
8249 * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
8250 */
8251 static int
ice_set_bw_limit(struct ice_vsi * vsi,u64 max_tx_rate,u64 min_tx_rate)8252 ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
8253 {
8254 int err;
8255
8256 err = ice_set_min_bw_limit(vsi, min_tx_rate);
8257 if (err)
8258 return err;
8259
8260 return ice_set_max_bw_limit(vsi, max_tx_rate);
8261 }
8262
8263 /**
8264 * ice_create_q_channel - function to create channel
8265 * @vsi: VSI to be configured
8266 * @ch: ptr to channel (it contains channel specific params)
8267 *
8268 * This function creates channel (VSI) using num_queues specified by user,
8269 * reconfigs RSS if needed.
8270 */
ice_create_q_channel(struct ice_vsi * vsi,struct ice_channel * ch)8271 static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
8272 {
8273 struct ice_pf *pf = vsi->back;
8274 struct device *dev;
8275
8276 if (!ch)
8277 return -EINVAL;
8278
8279 dev = ice_pf_to_dev(pf);
8280 if (!ch->num_txq || !ch->num_rxq) {
8281 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
8282 return -EINVAL;
8283 }
8284
8285 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
8286 dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
8287 vsi->cnt_q_avail, ch->num_txq);
8288 return -EINVAL;
8289 }
8290
8291 if (!ice_setup_channel(pf, vsi, ch)) {
8292 dev_info(dev, "Failed to setup channel\n");
8293 return -EINVAL;
8294 }
8295 /* configure BW rate limit */
8296 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
8297 int ret;
8298
8299 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
8300 ch->min_tx_rate);
8301 if (ret)
8302 dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
8303 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8304 else
8305 dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
8306 ch->max_tx_rate, ch->ch_vsi->vsi_num);
8307 }
8308
8309 vsi->cnt_q_avail -= ch->num_txq;
8310
8311 return 0;
8312 }
8313
8314 /**
8315 * ice_rem_all_chnl_fltrs - removes all channel filters
8316 * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8317 *
8318 * Remove all advanced switch filters only if they are channel specific
8319 * tc-flower based filter
8320 */
ice_rem_all_chnl_fltrs(struct ice_pf * pf)8321 static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
8322 {
8323 struct ice_tc_flower_fltr *fltr;
8324 struct hlist_node *node;
8325
8326 /* to remove all channel filters, iterate an ordered list of filters */
8327 hlist_for_each_entry_safe(fltr, node,
8328 &pf->tc_flower_fltr_list,
8329 tc_flower_node) {
8330 struct ice_rule_query_data rule;
8331 int status;
8332
8333 /* for now process only channel specific filters */
8334 if (!ice_is_chnl_fltr(fltr))
8335 continue;
8336
8337 rule.rid = fltr->rid;
8338 rule.rule_id = fltr->rule_id;
8339 rule.vsi_handle = fltr->dest_id;
8340 status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
8341 if (status) {
8342 if (status == -ENOENT)
8343 dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
8344 rule.rule_id);
8345 else
8346 dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
8347 status);
8348 } else if (fltr->dest_vsi) {
8349 /* update advanced switch filter count */
8350 if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
8351 u32 flags = fltr->flags;
8352
8353 fltr->dest_vsi->num_chnl_fltr--;
8354 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
8355 ICE_TC_FLWR_FIELD_ENC_DST_MAC))
8356 pf->num_dmac_chnl_fltrs--;
8357 }
8358 }
8359
8360 hlist_del(&fltr->tc_flower_node);
8361 kfree(fltr);
8362 }
8363 }
8364
8365 /**
8366 * ice_remove_q_channels - Remove queue channels for the TCs
8367 * @vsi: VSI to be configured
8368 * @rem_fltr: delete advanced switch filter or not
8369 *
8370 * Remove queue channels for the TCs
8371 */
ice_remove_q_channels(struct ice_vsi * vsi,bool rem_fltr)8372 static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
8373 {
8374 struct ice_channel *ch, *ch_tmp;
8375 struct ice_pf *pf = vsi->back;
8376 int i;
8377
8378 /* remove all tc-flower based filter if they are channel filters only */
8379 if (rem_fltr)
8380 ice_rem_all_chnl_fltrs(pf);
8381
8382 /* remove ntuple filters since queue configuration is being changed */
8383 if (vsi->netdev->features & NETIF_F_NTUPLE) {
8384 struct ice_hw *hw = &pf->hw;
8385
8386 mutex_lock(&hw->fdir_fltr_lock);
8387 ice_fdir_del_all_fltrs(vsi);
8388 mutex_unlock(&hw->fdir_fltr_lock);
8389 }
8390
8391 /* perform cleanup for channels if they exist */
8392 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
8393 struct ice_vsi *ch_vsi;
8394
8395 list_del(&ch->list);
8396 ch_vsi = ch->ch_vsi;
8397 if (!ch_vsi) {
8398 kfree(ch);
8399 continue;
8400 }
8401
8402 /* Reset queue contexts */
8403 for (i = 0; i < ch->num_rxq; i++) {
8404 struct ice_tx_ring *tx_ring;
8405 struct ice_rx_ring *rx_ring;
8406
8407 tx_ring = vsi->tx_rings[ch->base_q + i];
8408 rx_ring = vsi->rx_rings[ch->base_q + i];
8409 if (tx_ring) {
8410 tx_ring->ch = NULL;
8411 if (tx_ring->q_vector)
8412 tx_ring->q_vector->ch = NULL;
8413 }
8414 if (rx_ring) {
8415 rx_ring->ch = NULL;
8416 if (rx_ring->q_vector)
8417 rx_ring->q_vector->ch = NULL;
8418 }
8419 }
8420
8421 /* Release FD resources for the channel VSI */
8422 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
8423
8424 /* clear the VSI from scheduler tree */
8425 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
8426
8427 /* Delete VSI from FW */
8428 ice_vsi_delete(ch->ch_vsi);
8429
8430 /* Delete VSI from PF and HW VSI arrays */
8431 ice_vsi_clear(ch->ch_vsi);
8432
8433 /* free the channel */
8434 kfree(ch);
8435 }
8436
8437 /* clear the channel VSI map which is stored in main VSI */
8438 ice_for_each_chnl_tc(i)
8439 vsi->tc_map_vsi[i] = NULL;
8440
8441 /* reset main VSI's all TC information */
8442 vsi->all_enatc = 0;
8443 vsi->all_numtc = 0;
8444 }
8445
8446 /**
8447 * ice_rebuild_channels - rebuild channel
8448 * @pf: ptr to PF
8449 *
8450 * Recreate channel VSIs and replay filters
8451 */
ice_rebuild_channels(struct ice_pf * pf)8452 static int ice_rebuild_channels(struct ice_pf *pf)
8453 {
8454 struct device *dev = ice_pf_to_dev(pf);
8455 struct ice_vsi *main_vsi;
8456 bool rem_adv_fltr = true;
8457 struct ice_channel *ch;
8458 struct ice_vsi *vsi;
8459 int tc_idx = 1;
8460 int i, err;
8461
8462 main_vsi = ice_get_main_vsi(pf);
8463 if (!main_vsi)
8464 return 0;
8465
8466 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
8467 main_vsi->old_numtc == 1)
8468 return 0; /* nothing to be done */
8469
8470 /* reconfigure main VSI based on old value of TC and cached values
8471 * for MQPRIO opts
8472 */
8473 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
8474 if (err) {
8475 dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
8476 main_vsi->old_ena_tc, main_vsi->vsi_num);
8477 return err;
8478 }
8479
8480 /* rebuild ADQ VSIs */
8481 ice_for_each_vsi(pf, i) {
8482 enum ice_vsi_type type;
8483
8484 vsi = pf->vsi[i];
8485 if (!vsi || vsi->type != ICE_VSI_CHNL)
8486 continue;
8487
8488 type = vsi->type;
8489
8490 /* rebuild ADQ VSI */
8491 err = ice_vsi_rebuild(vsi, true);
8492 if (err) {
8493 dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
8494 ice_vsi_type_str(type), vsi->idx, err);
8495 goto cleanup;
8496 }
8497
8498 /* Re-map HW VSI number, using VSI handle that has been
8499 * previously validated in ice_replay_vsi() call above
8500 */
8501 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
8502
8503 /* replay filters for the VSI */
8504 err = ice_replay_vsi(&pf->hw, vsi->idx);
8505 if (err) {
8506 dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
8507 ice_vsi_type_str(type), err, vsi->idx);
8508 rem_adv_fltr = false;
8509 goto cleanup;
8510 }
8511 dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
8512 ice_vsi_type_str(type), vsi->idx);
8513
8514 /* store ADQ VSI at correct TC index in main VSI's
8515 * map of TC to VSI
8516 */
8517 main_vsi->tc_map_vsi[tc_idx++] = vsi;
8518 }
8519
8520 /* ADQ VSI(s) has been rebuilt successfully, so setup
8521 * channel for main VSI's Tx and Rx rings
8522 */
8523 list_for_each_entry(ch, &main_vsi->ch_list, list) {
8524 struct ice_vsi *ch_vsi;
8525
8526 ch_vsi = ch->ch_vsi;
8527 if (!ch_vsi)
8528 continue;
8529
8530 /* reconfig channel resources */
8531 ice_cfg_chnl_all_res(main_vsi, ch);
8532
8533 /* replay BW rate limit if it is non-zero */
8534 if (!ch->max_tx_rate && !ch->min_tx_rate)
8535 continue;
8536
8537 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
8538 ch->min_tx_rate);
8539 if (err)
8540 dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8541 err, ch->max_tx_rate, ch->min_tx_rate,
8542 ch_vsi->vsi_num);
8543 else
8544 dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
8545 ch->max_tx_rate, ch->min_tx_rate,
8546 ch_vsi->vsi_num);
8547 }
8548
8549 /* reconfig RSS for main VSI */
8550 if (main_vsi->ch_rss_size)
8551 ice_vsi_cfg_rss_lut_key(main_vsi);
8552
8553 return 0;
8554
8555 cleanup:
8556 ice_remove_q_channels(main_vsi, rem_adv_fltr);
8557 return err;
8558 }
8559
8560 /**
8561 * ice_create_q_channels - Add queue channel for the given TCs
8562 * @vsi: VSI to be configured
8563 *
8564 * Configures queue channel mapping to the given TCs
8565 */
ice_create_q_channels(struct ice_vsi * vsi)8566 static int ice_create_q_channels(struct ice_vsi *vsi)
8567 {
8568 struct ice_pf *pf = vsi->back;
8569 struct ice_channel *ch;
8570 int ret = 0, i;
8571
8572 ice_for_each_chnl_tc(i) {
8573 if (!(vsi->all_enatc & BIT(i)))
8574 continue;
8575
8576 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
8577 if (!ch) {
8578 ret = -ENOMEM;
8579 goto err_free;
8580 }
8581 INIT_LIST_HEAD(&ch->list);
8582 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
8583 ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
8584 ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
8585 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
8586 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
8587
8588 /* convert to Kbits/s */
8589 if (ch->max_tx_rate)
8590 ch->max_tx_rate = div_u64(ch->max_tx_rate,
8591 ICE_BW_KBPS_DIVISOR);
8592 if (ch->min_tx_rate)
8593 ch->min_tx_rate = div_u64(ch->min_tx_rate,
8594 ICE_BW_KBPS_DIVISOR);
8595
8596 ret = ice_create_q_channel(vsi, ch);
8597 if (ret) {
8598 dev_err(ice_pf_to_dev(pf),
8599 "failed creating channel TC:%d\n", i);
8600 kfree(ch);
8601 goto err_free;
8602 }
8603 list_add_tail(&ch->list, &vsi->ch_list);
8604 vsi->tc_map_vsi[i] = ch->ch_vsi;
8605 dev_dbg(ice_pf_to_dev(pf),
8606 "successfully created channel: VSI %pK\n", ch->ch_vsi);
8607 }
8608 return 0;
8609
8610 err_free:
8611 ice_remove_q_channels(vsi, false);
8612
8613 return ret;
8614 }
8615
8616 /**
8617 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
8618 * @netdev: net device to configure
8619 * @type_data: TC offload data
8620 */
ice_setup_tc_mqprio_qdisc(struct net_device * netdev,void * type_data)8621 static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
8622 {
8623 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8624 struct ice_netdev_priv *np = netdev_priv(netdev);
8625 struct ice_vsi *vsi = np->vsi;
8626 struct ice_pf *pf = vsi->back;
8627 u16 mode, ena_tc_qdisc = 0;
8628 int cur_txq, cur_rxq;
8629 u8 hw = 0, num_tcf;
8630 struct device *dev;
8631 int ret, i;
8632
8633 dev = ice_pf_to_dev(pf);
8634 num_tcf = mqprio_qopt->qopt.num_tc;
8635 hw = mqprio_qopt->qopt.hw;
8636 mode = mqprio_qopt->mode;
8637 if (!hw) {
8638 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8639 vsi->ch_rss_size = 0;
8640 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8641 goto config_tcf;
8642 }
8643
8644 /* Generate queue region map for number of TCF requested */
8645 for (i = 0; i < num_tcf; i++)
8646 ena_tc_qdisc |= BIT(i);
8647
8648 switch (mode) {
8649 case TC_MQPRIO_MODE_CHANNEL:
8650
8651 ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
8652 if (ret) {
8653 netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
8654 ret);
8655 return ret;
8656 }
8657 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8658 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
8659 /* don't assume state of hw_tc_offload during driver load
8660 * and set the flag for TC flower filter if hw_tc_offload
8661 * already ON
8662 */
8663 if (vsi->netdev->features & NETIF_F_HW_TC)
8664 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
8665 break;
8666 default:
8667 return -EINVAL;
8668 }
8669
8670 config_tcf:
8671
8672 /* Requesting same TCF configuration as already enabled */
8673 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
8674 mode != TC_MQPRIO_MODE_CHANNEL)
8675 return 0;
8676
8677 /* Pause VSI queues */
8678 ice_dis_vsi(vsi, true);
8679
8680 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
8681 ice_remove_q_channels(vsi, true);
8682
8683 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8684 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
8685 num_online_cpus());
8686 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
8687 num_online_cpus());
8688 } else {
8689 /* logic to rebuild VSI, same like ethtool -L */
8690 u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
8691
8692 for (i = 0; i < num_tcf; i++) {
8693 if (!(ena_tc_qdisc & BIT(i)))
8694 continue;
8695
8696 offset = vsi->mqprio_qopt.qopt.offset[i];
8697 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
8698 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
8699 }
8700 vsi->req_txq = offset + qcount_tx;
8701 vsi->req_rxq = offset + qcount_rx;
8702
8703 /* store away original rss_size info, so that it gets reused
8704 * form ice_vsi_rebuild during tc-qdisc delete stage - to
8705 * determine, what should be the rss_sizefor main VSI
8706 */
8707 vsi->orig_rss_size = vsi->rss_size;
8708 }
8709
8710 /* save current values of Tx and Rx queues before calling VSI rebuild
8711 * for fallback option
8712 */
8713 cur_txq = vsi->num_txq;
8714 cur_rxq = vsi->num_rxq;
8715
8716 /* proceed with rebuild main VSI using correct number of queues */
8717 ret = ice_vsi_rebuild(vsi, false);
8718 if (ret) {
8719 /* fallback to current number of queues */
8720 dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
8721 vsi->req_txq = cur_txq;
8722 vsi->req_rxq = cur_rxq;
8723 clear_bit(ICE_RESET_FAILED, pf->state);
8724 if (ice_vsi_rebuild(vsi, false)) {
8725 dev_err(dev, "Rebuild of main VSI failed again\n");
8726 return ret;
8727 }
8728 }
8729
8730 vsi->all_numtc = num_tcf;
8731 vsi->all_enatc = ena_tc_qdisc;
8732 ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
8733 if (ret) {
8734 netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
8735 vsi->vsi_num);
8736 goto exit;
8737 }
8738
8739 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
8740 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
8741 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
8742
8743 /* set TC0 rate limit if specified */
8744 if (max_tx_rate || min_tx_rate) {
8745 /* convert to Kbits/s */
8746 if (max_tx_rate)
8747 max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
8748 if (min_tx_rate)
8749 min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
8750
8751 ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
8752 if (!ret) {
8753 dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
8754 max_tx_rate, min_tx_rate, vsi->vsi_num);
8755 } else {
8756 dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
8757 max_tx_rate, min_tx_rate, vsi->vsi_num);
8758 goto exit;
8759 }
8760 }
8761 ret = ice_create_q_channels(vsi);
8762 if (ret) {
8763 netdev_err(netdev, "failed configuring queue channels\n");
8764 goto exit;
8765 } else {
8766 netdev_dbg(netdev, "successfully configured channels\n");
8767 }
8768 }
8769
8770 if (vsi->ch_rss_size)
8771 ice_vsi_cfg_rss_lut_key(vsi);
8772
8773 exit:
8774 /* if error, reset the all_numtc and all_enatc */
8775 if (ret) {
8776 vsi->all_numtc = 0;
8777 vsi->all_enatc = 0;
8778 }
8779 /* resume VSI */
8780 ice_ena_vsi(vsi, true);
8781
8782 return ret;
8783 }
8784
8785 static LIST_HEAD(ice_block_cb_list);
8786
8787 static int
ice_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)8788 ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8789 void *type_data)
8790 {
8791 struct ice_netdev_priv *np = netdev_priv(netdev);
8792 struct ice_pf *pf = np->vsi->back;
8793 bool locked = false;
8794 int err;
8795
8796 switch (type) {
8797 case TC_SETUP_BLOCK:
8798 return flow_block_cb_setup_simple(type_data,
8799 &ice_block_cb_list,
8800 ice_setup_tc_block_cb,
8801 np, np, true);
8802 case TC_SETUP_QDISC_MQPRIO:
8803 if (ice_is_eswitch_mode_switchdev(pf)) {
8804 netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
8805 return -EOPNOTSUPP;
8806 }
8807
8808 if (pf->adev) {
8809 mutex_lock(&pf->adev_mutex);
8810 device_lock(&pf->adev->dev);
8811 locked = true;
8812 if (pf->adev->dev.driver) {
8813 netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
8814 err = -EBUSY;
8815 goto adev_unlock;
8816 }
8817 }
8818
8819 /* setup traffic classifier for receive side */
8820 mutex_lock(&pf->tc_mutex);
8821 err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
8822 mutex_unlock(&pf->tc_mutex);
8823
8824 adev_unlock:
8825 if (locked) {
8826 device_unlock(&pf->adev->dev);
8827 mutex_unlock(&pf->adev_mutex);
8828 }
8829 return err;
8830 default:
8831 return -EOPNOTSUPP;
8832 }
8833 return -EOPNOTSUPP;
8834 }
8835
8836 static struct ice_indr_block_priv *
ice_indr_block_priv_lookup(struct ice_netdev_priv * np,struct net_device * netdev)8837 ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
8838 struct net_device *netdev)
8839 {
8840 struct ice_indr_block_priv *cb_priv;
8841
8842 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
8843 if (!cb_priv->netdev)
8844 return NULL;
8845 if (cb_priv->netdev == netdev)
8846 return cb_priv;
8847 }
8848 return NULL;
8849 }
8850
8851 static int
ice_indr_setup_block_cb(enum tc_setup_type type,void * type_data,void * indr_priv)8852 ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
8853 void *indr_priv)
8854 {
8855 struct ice_indr_block_priv *priv = indr_priv;
8856 struct ice_netdev_priv *np = priv->np;
8857
8858 switch (type) {
8859 case TC_SETUP_CLSFLOWER:
8860 return ice_setup_tc_cls_flower(np, priv->netdev,
8861 (struct flow_cls_offload *)
8862 type_data);
8863 default:
8864 return -EOPNOTSUPP;
8865 }
8866 }
8867
8868 static int
ice_indr_setup_tc_block(struct net_device * netdev,struct Qdisc * sch,struct ice_netdev_priv * np,struct flow_block_offload * f,void * data,void (* cleanup)(struct flow_block_cb * block_cb))8869 ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
8870 struct ice_netdev_priv *np,
8871 struct flow_block_offload *f, void *data,
8872 void (*cleanup)(struct flow_block_cb *block_cb))
8873 {
8874 struct ice_indr_block_priv *indr_priv;
8875 struct flow_block_cb *block_cb;
8876
8877 if (!ice_is_tunnel_supported(netdev) &&
8878 !(is_vlan_dev(netdev) &&
8879 vlan_dev_real_dev(netdev) == np->vsi->netdev))
8880 return -EOPNOTSUPP;
8881
8882 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
8883 return -EOPNOTSUPP;
8884
8885 switch (f->command) {
8886 case FLOW_BLOCK_BIND:
8887 indr_priv = ice_indr_block_priv_lookup(np, netdev);
8888 if (indr_priv)
8889 return -EEXIST;
8890
8891 indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
8892 if (!indr_priv)
8893 return -ENOMEM;
8894
8895 indr_priv->netdev = netdev;
8896 indr_priv->np = np;
8897 list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
8898
8899 block_cb =
8900 flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
8901 indr_priv, indr_priv,
8902 ice_rep_indr_tc_block_unbind,
8903 f, netdev, sch, data, np,
8904 cleanup);
8905
8906 if (IS_ERR(block_cb)) {
8907 list_del(&indr_priv->list);
8908 kfree(indr_priv);
8909 return PTR_ERR(block_cb);
8910 }
8911 flow_block_cb_add(block_cb, f);
8912 list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
8913 break;
8914 case FLOW_BLOCK_UNBIND:
8915 indr_priv = ice_indr_block_priv_lookup(np, netdev);
8916 if (!indr_priv)
8917 return -ENOENT;
8918
8919 block_cb = flow_block_cb_lookup(f->block,
8920 ice_indr_setup_block_cb,
8921 indr_priv);
8922 if (!block_cb)
8923 return -ENOENT;
8924
8925 flow_indr_block_cb_remove(block_cb, f);
8926
8927 list_del(&block_cb->driver_list);
8928 break;
8929 default:
8930 return -EOPNOTSUPP;
8931 }
8932 return 0;
8933 }
8934
8935 static int
ice_indr_setup_tc_cb(struct net_device * netdev,struct Qdisc * sch,void * cb_priv,enum tc_setup_type type,void * type_data,void * data,void (* cleanup)(struct flow_block_cb * block_cb))8936 ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
8937 void *cb_priv, enum tc_setup_type type, void *type_data,
8938 void *data,
8939 void (*cleanup)(struct flow_block_cb *block_cb))
8940 {
8941 switch (type) {
8942 case TC_SETUP_BLOCK:
8943 return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
8944 data, cleanup);
8945
8946 default:
8947 return -EOPNOTSUPP;
8948 }
8949 }
8950
8951 /**
8952 * ice_open - Called when a network interface becomes active
8953 * @netdev: network interface device structure
8954 *
8955 * The open entry point is called when a network interface is made
8956 * active by the system (IFF_UP). At this point all resources needed
8957 * for transmit and receive operations are allocated, the interrupt
8958 * handler is registered with the OS, the netdev watchdog is enabled,
8959 * and the stack is notified that the interface is ready.
8960 *
8961 * Returns 0 on success, negative value on failure
8962 */
ice_open(struct net_device * netdev)8963 int ice_open(struct net_device *netdev)
8964 {
8965 struct ice_netdev_priv *np = netdev_priv(netdev);
8966 struct ice_pf *pf = np->vsi->back;
8967
8968 if (ice_is_reset_in_progress(pf->state)) {
8969 netdev_err(netdev, "can't open net device while reset is in progress");
8970 return -EBUSY;
8971 }
8972
8973 return ice_open_internal(netdev);
8974 }
8975
8976 /**
8977 * ice_open_internal - Called when a network interface becomes active
8978 * @netdev: network interface device structure
8979 *
8980 * Internal ice_open implementation. Should not be used directly except for ice_open and reset
8981 * handling routine
8982 *
8983 * Returns 0 on success, negative value on failure
8984 */
ice_open_internal(struct net_device * netdev)8985 int ice_open_internal(struct net_device *netdev)
8986 {
8987 struct ice_netdev_priv *np = netdev_priv(netdev);
8988 struct ice_vsi *vsi = np->vsi;
8989 struct ice_pf *pf = vsi->back;
8990 struct ice_port_info *pi;
8991 int err;
8992
8993 if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
8994 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
8995 return -EIO;
8996 }
8997
8998 netif_carrier_off(netdev);
8999
9000 pi = vsi->port_info;
9001 err = ice_update_link_info(pi);
9002 if (err) {
9003 netdev_err(netdev, "Failed to get link info, error %d\n", err);
9004 return err;
9005 }
9006
9007 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
9008
9009 /* Set PHY if there is media, otherwise, turn off PHY */
9010 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
9011 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9012 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
9013 err = ice_init_phy_user_cfg(pi);
9014 if (err) {
9015 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
9016 err);
9017 return err;
9018 }
9019 }
9020
9021 err = ice_configure_phy(vsi);
9022 if (err) {
9023 netdev_err(netdev, "Failed to set physical link up, error %d\n",
9024 err);
9025 return err;
9026 }
9027 } else {
9028 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
9029 ice_set_link(vsi, false);
9030 }
9031
9032 err = ice_vsi_open(vsi);
9033 if (err)
9034 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
9035 vsi->vsi_num, vsi->vsw->sw_id);
9036
9037 /* Update existing tunnels information */
9038 udp_tunnel_get_rx_info(netdev);
9039
9040 return err;
9041 }
9042
9043 /**
9044 * ice_stop - Disables a network interface
9045 * @netdev: network interface device structure
9046 *
9047 * The stop entry point is called when an interface is de-activated by the OS,
9048 * and the netdevice enters the DOWN state. The hardware is still under the
9049 * driver's control, but the netdev interface is disabled.
9050 *
9051 * Returns success only - not allowed to fail
9052 */
ice_stop(struct net_device * netdev)9053 int ice_stop(struct net_device *netdev)
9054 {
9055 struct ice_netdev_priv *np = netdev_priv(netdev);
9056 struct ice_vsi *vsi = np->vsi;
9057 struct ice_pf *pf = vsi->back;
9058
9059 if (ice_is_reset_in_progress(pf->state)) {
9060 netdev_err(netdev, "can't stop net device while reset is in progress");
9061 return -EBUSY;
9062 }
9063
9064 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
9065 int link_err = ice_force_phys_link_state(vsi, false);
9066
9067 if (link_err) {
9068 if (link_err == -ENOMEDIUM)
9069 netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n",
9070 vsi->vsi_num);
9071 else
9072 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
9073 vsi->vsi_num, link_err);
9074
9075 ice_vsi_close(vsi);
9076 return -EIO;
9077 }
9078 }
9079
9080 ice_vsi_close(vsi);
9081
9082 return 0;
9083 }
9084
9085 /**
9086 * ice_features_check - Validate encapsulated packet conforms to limits
9087 * @skb: skb buffer
9088 * @netdev: This port's netdev
9089 * @features: Offload features that the stack believes apply
9090 */
9091 static netdev_features_t
ice_features_check(struct sk_buff * skb,struct net_device __always_unused * netdev,netdev_features_t features)9092 ice_features_check(struct sk_buff *skb,
9093 struct net_device __always_unused *netdev,
9094 netdev_features_t features)
9095 {
9096 bool gso = skb_is_gso(skb);
9097 size_t len;
9098
9099 /* No point in doing any of this if neither checksum nor GSO are
9100 * being requested for this frame. We can rule out both by just
9101 * checking for CHECKSUM_PARTIAL
9102 */
9103 if (skb->ip_summed != CHECKSUM_PARTIAL)
9104 return features;
9105
9106 /* We cannot support GSO if the MSS is going to be less than
9107 * 64 bytes. If it is then we need to drop support for GSO.
9108 */
9109 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
9110 features &= ~NETIF_F_GSO_MASK;
9111
9112 len = skb_network_offset(skb);
9113 if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
9114 goto out_rm_features;
9115
9116 len = skb_network_header_len(skb);
9117 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9118 goto out_rm_features;
9119
9120 if (skb->encapsulation) {
9121 /* this must work for VXLAN frames AND IPIP/SIT frames, and in
9122 * the case of IPIP frames, the transport header pointer is
9123 * after the inner header! So check to make sure that this
9124 * is a GRE or UDP_TUNNEL frame before doing that math.
9125 */
9126 if (gso && (skb_shinfo(skb)->gso_type &
9127 (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
9128 len = skb_inner_network_header(skb) -
9129 skb_transport_header(skb);
9130 if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
9131 goto out_rm_features;
9132 }
9133
9134 len = skb_inner_network_header_len(skb);
9135 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
9136 goto out_rm_features;
9137 }
9138
9139 return features;
9140 out_rm_features:
9141 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
9142 }
9143
9144 static const struct net_device_ops ice_netdev_safe_mode_ops = {
9145 .ndo_open = ice_open,
9146 .ndo_stop = ice_stop,
9147 .ndo_start_xmit = ice_start_xmit,
9148 .ndo_set_mac_address = ice_set_mac_address,
9149 .ndo_validate_addr = eth_validate_addr,
9150 .ndo_change_mtu = ice_change_mtu,
9151 .ndo_get_stats64 = ice_get_stats64,
9152 .ndo_tx_timeout = ice_tx_timeout,
9153 .ndo_bpf = ice_xdp_safe_mode,
9154 };
9155
9156 static const struct net_device_ops ice_netdev_ops = {
9157 .ndo_open = ice_open,
9158 .ndo_stop = ice_stop,
9159 .ndo_start_xmit = ice_start_xmit,
9160 .ndo_select_queue = ice_select_queue,
9161 .ndo_features_check = ice_features_check,
9162 .ndo_fix_features = ice_fix_features,
9163 .ndo_set_rx_mode = ice_set_rx_mode,
9164 .ndo_set_mac_address = ice_set_mac_address,
9165 .ndo_validate_addr = eth_validate_addr,
9166 .ndo_change_mtu = ice_change_mtu,
9167 .ndo_get_stats64 = ice_get_stats64,
9168 .ndo_set_tx_maxrate = ice_set_tx_maxrate,
9169 .ndo_eth_ioctl = ice_eth_ioctl,
9170 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
9171 .ndo_set_vf_mac = ice_set_vf_mac,
9172 .ndo_get_vf_config = ice_get_vf_cfg,
9173 .ndo_set_vf_trust = ice_set_vf_trust,
9174 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
9175 .ndo_set_vf_link_state = ice_set_vf_link_state,
9176 .ndo_get_vf_stats = ice_get_vf_stats,
9177 .ndo_set_vf_rate = ice_set_vf_bw,
9178 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
9179 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
9180 .ndo_setup_tc = ice_setup_tc,
9181 .ndo_set_features = ice_set_features,
9182 .ndo_bridge_getlink = ice_bridge_getlink,
9183 .ndo_bridge_setlink = ice_bridge_setlink,
9184 .ndo_fdb_add = ice_fdb_add,
9185 .ndo_fdb_del = ice_fdb_del,
9186 #ifdef CONFIG_RFS_ACCEL
9187 .ndo_rx_flow_steer = ice_rx_flow_steer,
9188 #endif
9189 .ndo_tx_timeout = ice_tx_timeout,
9190 .ndo_bpf = ice_xdp,
9191 .ndo_xdp_xmit = ice_xdp_xmit,
9192 .ndo_xsk_wakeup = ice_xsk_wakeup,
9193 .ndo_get_devlink_port = ice_get_devlink_port,
9194 };
9195