1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <generated/utsrelease.h>
9 #include <linux/crash_dump.h>
10 #include "ice.h"
11 #include "ice_base.h"
12 #include "ice_lib.h"
13 #include "ice_fltr.h"
14 #include "ice_dcb_lib.h"
15 #include "ice_dcb_nl.h"
16 #include "ice_devlink.h"
17
18 #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
19 static const char ice_driver_string[] = DRV_SUMMARY;
20 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
21
22 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
23 #define ICE_DDP_PKG_PATH "intel/ice/ddp/"
24 #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
25
26 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
27 MODULE_DESCRIPTION(DRV_SUMMARY);
28 MODULE_LICENSE("GPL v2");
29 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
30
31 static int debug = -1;
32 module_param(debug, int, 0644);
33 #ifndef CONFIG_DYNAMIC_DEBUG
34 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
35 #else
36 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
37 #endif /* !CONFIG_DYNAMIC_DEBUG */
38
39 static struct workqueue_struct *ice_wq;
40 static const struct net_device_ops ice_netdev_safe_mode_ops;
41 static const struct net_device_ops ice_netdev_ops;
42 static int ice_vsi_open(struct ice_vsi *vsi);
43
44 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
45
46 static void ice_vsi_release_all(struct ice_pf *pf);
47
48 /**
49 * ice_get_tx_pending - returns number of Tx descriptors not processed
50 * @ring: the ring of descriptors
51 */
ice_get_tx_pending(struct ice_ring * ring)52 static u16 ice_get_tx_pending(struct ice_ring *ring)
53 {
54 u16 head, tail;
55
56 head = ring->next_to_clean;
57 tail = ring->next_to_use;
58
59 if (head != tail)
60 return (head < tail) ?
61 tail - head : (tail + ring->count - head);
62 return 0;
63 }
64
65 /**
66 * ice_check_for_hang_subtask - check for and recover hung queues
67 * @pf: pointer to PF struct
68 */
ice_check_for_hang_subtask(struct ice_pf * pf)69 static void ice_check_for_hang_subtask(struct ice_pf *pf)
70 {
71 struct ice_vsi *vsi = NULL;
72 struct ice_hw *hw;
73 unsigned int i;
74 int packets;
75 u32 v;
76
77 ice_for_each_vsi(pf, v)
78 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
79 vsi = pf->vsi[v];
80 break;
81 }
82
83 if (!vsi || test_bit(__ICE_DOWN, vsi->state))
84 return;
85
86 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
87 return;
88
89 hw = &vsi->back->hw;
90
91 for (i = 0; i < vsi->num_txq; i++) {
92 struct ice_ring *tx_ring = vsi->tx_rings[i];
93
94 if (tx_ring && tx_ring->desc) {
95 /* If packet counter has not changed the queue is
96 * likely stalled, so force an interrupt for this
97 * queue.
98 *
99 * prev_pkt would be negative if there was no
100 * pending work.
101 */
102 packets = tx_ring->stats.pkts & INT_MAX;
103 if (tx_ring->tx_stats.prev_pkt == packets) {
104 /* Trigger sw interrupt to revive the queue */
105 ice_trigger_sw_intr(hw, tx_ring->q_vector);
106 continue;
107 }
108
109 /* Memory barrier between read of packet count and call
110 * to ice_get_tx_pending()
111 */
112 smp_rmb();
113 tx_ring->tx_stats.prev_pkt =
114 ice_get_tx_pending(tx_ring) ? packets : -1;
115 }
116 }
117 }
118
119 /**
120 * ice_init_mac_fltr - Set initial MAC filters
121 * @pf: board private structure
122 *
123 * Set initial set of MAC filters for PF VSI; configure filters for permanent
124 * address and broadcast address. If an error is encountered, netdevice will be
125 * unregistered.
126 */
ice_init_mac_fltr(struct ice_pf * pf)127 static int ice_init_mac_fltr(struct ice_pf *pf)
128 {
129 enum ice_status status;
130 struct ice_vsi *vsi;
131 u8 *perm_addr;
132
133 vsi = ice_get_main_vsi(pf);
134 if (!vsi)
135 return -EINVAL;
136
137 perm_addr = vsi->port_info->mac.perm_addr;
138 status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
139 if (!status)
140 return 0;
141
142 /* We aren't useful with no MAC filters, so unregister if we
143 * had an error
144 */
145 if (vsi->netdev->reg_state == NETREG_REGISTERED) {
146 dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %s. Unregistering device\n",
147 ice_stat_str(status));
148 unregister_netdev(vsi->netdev);
149 free_netdev(vsi->netdev);
150 vsi->netdev = NULL;
151 }
152
153 return -EIO;
154 }
155
156 /**
157 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
158 * @netdev: the net device on which the sync is happening
159 * @addr: MAC address to sync
160 *
161 * This is a callback function which is called by the in kernel device sync
162 * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
163 * populates the tmp_sync_list, which is later used by ice_add_mac to add the
164 * MAC filters from the hardware.
165 */
ice_add_mac_to_sync_list(struct net_device * netdev,const u8 * addr)166 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
167 {
168 struct ice_netdev_priv *np = netdev_priv(netdev);
169 struct ice_vsi *vsi = np->vsi;
170
171 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
172 ICE_FWD_TO_VSI))
173 return -EINVAL;
174
175 return 0;
176 }
177
178 /**
179 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
180 * @netdev: the net device on which the unsync is happening
181 * @addr: MAC address to unsync
182 *
183 * This is a callback function which is called by the in kernel device unsync
184 * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
185 * populates the tmp_unsync_list, which is later used by ice_remove_mac to
186 * delete the MAC filters from the hardware.
187 */
ice_add_mac_to_unsync_list(struct net_device * netdev,const u8 * addr)188 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
189 {
190 struct ice_netdev_priv *np = netdev_priv(netdev);
191 struct ice_vsi *vsi = np->vsi;
192
193 /* Under some circumstances, we might receive a request to delete our
194 * own device address from our uc list. Because we store the device
195 * address in the VSI's MAC filter list, we need to ignore such
196 * requests and not delete our device address from this list.
197 */
198 if (ether_addr_equal(addr, netdev->dev_addr))
199 return 0;
200
201 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
202 ICE_FWD_TO_VSI))
203 return -EINVAL;
204
205 return 0;
206 }
207
208 /**
209 * ice_vsi_fltr_changed - check if filter state changed
210 * @vsi: VSI to be checked
211 *
212 * returns true if filter state has changed, false otherwise.
213 */
ice_vsi_fltr_changed(struct ice_vsi * vsi)214 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
215 {
216 return test_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags) ||
217 test_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags) ||
218 test_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
219 }
220
221 /**
222 * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
223 * @vsi: the VSI being configured
224 * @promisc_m: mask of promiscuous config bits
225 * @set_promisc: enable or disable promisc flag request
226 *
227 */
ice_cfg_promisc(struct ice_vsi * vsi,u8 promisc_m,bool set_promisc)228 static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
229 {
230 struct ice_hw *hw = &vsi->back->hw;
231 enum ice_status status = 0;
232
233 if (vsi->type != ICE_VSI_PF)
234 return 0;
235
236 if (vsi->vlan_ena) {
237 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
238 set_promisc);
239 } else {
240 if (set_promisc)
241 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
242 0);
243 else
244 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
245 0);
246 }
247
248 if (status)
249 return -EIO;
250
251 return 0;
252 }
253
254 /**
255 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
256 * @vsi: ptr to the VSI
257 *
258 * Push any outstanding VSI filter changes through the AdminQ.
259 */
ice_vsi_sync_fltr(struct ice_vsi * vsi)260 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
261 {
262 struct device *dev = ice_pf_to_dev(vsi->back);
263 struct net_device *netdev = vsi->netdev;
264 bool promisc_forced_on = false;
265 struct ice_pf *pf = vsi->back;
266 struct ice_hw *hw = &pf->hw;
267 enum ice_status status = 0;
268 u32 changed_flags = 0;
269 u8 promisc_m;
270 int err = 0;
271
272 if (!vsi->netdev)
273 return -EINVAL;
274
275 while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state))
276 usleep_range(1000, 2000);
277
278 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
279 vsi->current_netdev_flags = vsi->netdev->flags;
280
281 INIT_LIST_HEAD(&vsi->tmp_sync_list);
282 INIT_LIST_HEAD(&vsi->tmp_unsync_list);
283
284 if (ice_vsi_fltr_changed(vsi)) {
285 clear_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
286 clear_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
287 clear_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
288
289 /* grab the netdev's addr_list_lock */
290 netif_addr_lock_bh(netdev);
291 __dev_uc_sync(netdev, ice_add_mac_to_sync_list,
292 ice_add_mac_to_unsync_list);
293 __dev_mc_sync(netdev, ice_add_mac_to_sync_list,
294 ice_add_mac_to_unsync_list);
295 /* our temp lists are populated. release lock */
296 netif_addr_unlock_bh(netdev);
297 }
298
299 /* Remove MAC addresses in the unsync list */
300 status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
301 ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
302 if (status) {
303 netdev_err(netdev, "Failed to delete MAC filters\n");
304 /* if we failed because of alloc failures, just bail */
305 if (status == ICE_ERR_NO_MEMORY) {
306 err = -ENOMEM;
307 goto out;
308 }
309 }
310
311 /* Add MAC addresses in the sync list */
312 status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
313 ice_fltr_free_list(dev, &vsi->tmp_sync_list);
314 /* If filter is added successfully or already exists, do not go into
315 * 'if' condition and report it as error. Instead continue processing
316 * rest of the function.
317 */
318 if (status && status != ICE_ERR_ALREADY_EXISTS) {
319 netdev_err(netdev, "Failed to add MAC filters\n");
320 /* If there is no more space for new umac filters, VSI
321 * should go into promiscuous mode. There should be some
322 * space reserved for promiscuous filters.
323 */
324 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
325 !test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC,
326 vsi->state)) {
327 promisc_forced_on = true;
328 netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
329 vsi->vsi_num);
330 } else {
331 err = -EIO;
332 goto out;
333 }
334 }
335 /* check for changes in promiscuous modes */
336 if (changed_flags & IFF_ALLMULTI) {
337 if (vsi->current_netdev_flags & IFF_ALLMULTI) {
338 if (vsi->vlan_ena)
339 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
340 else
341 promisc_m = ICE_MCAST_PROMISC_BITS;
342
343 err = ice_cfg_promisc(vsi, promisc_m, true);
344 if (err) {
345 netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
346 vsi->vsi_num);
347 vsi->current_netdev_flags &= ~IFF_ALLMULTI;
348 goto out_promisc;
349 }
350 } else {
351 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
352 if (vsi->vlan_ena)
353 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
354 else
355 promisc_m = ICE_MCAST_PROMISC_BITS;
356
357 err = ice_cfg_promisc(vsi, promisc_m, false);
358 if (err) {
359 netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
360 vsi->vsi_num);
361 vsi->current_netdev_flags |= IFF_ALLMULTI;
362 goto out_promisc;
363 }
364 }
365 }
366
367 if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
368 test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
369 clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
370 if (vsi->current_netdev_flags & IFF_PROMISC) {
371 /* Apply Rx filter rule to get traffic from wire */
372 if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
373 err = ice_set_dflt_vsi(pf->first_sw, vsi);
374 if (err && err != -EEXIST) {
375 netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
376 err, vsi->vsi_num);
377 vsi->current_netdev_flags &=
378 ~IFF_PROMISC;
379 goto out_promisc;
380 }
381 ice_cfg_vlan_pruning(vsi, false, false);
382 }
383 } else {
384 /* Clear Rx filter to remove traffic from wire */
385 if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
386 err = ice_clear_dflt_vsi(pf->first_sw);
387 if (err) {
388 netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
389 err, vsi->vsi_num);
390 vsi->current_netdev_flags |=
391 IFF_PROMISC;
392 goto out_promisc;
393 }
394 if (vsi->num_vlan > 1)
395 ice_cfg_vlan_pruning(vsi, true, false);
396 }
397 }
398 }
399 goto exit;
400
401 out_promisc:
402 set_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
403 goto exit;
404 out:
405 /* if something went wrong then set the changed flag so we try again */
406 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
407 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
408 exit:
409 clear_bit(__ICE_CFG_BUSY, vsi->state);
410 return err;
411 }
412
413 /**
414 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
415 * @pf: board private structure
416 */
ice_sync_fltr_subtask(struct ice_pf * pf)417 static void ice_sync_fltr_subtask(struct ice_pf *pf)
418 {
419 int v;
420
421 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
422 return;
423
424 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
425
426 ice_for_each_vsi(pf, v)
427 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
428 ice_vsi_sync_fltr(pf->vsi[v])) {
429 /* come back and try again later */
430 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
431 break;
432 }
433 }
434
435 /**
436 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
437 * @pf: the PF
438 * @locked: is the rtnl_lock already held
439 */
ice_pf_dis_all_vsi(struct ice_pf * pf,bool locked)440 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
441 {
442 int v;
443
444 ice_for_each_vsi(pf, v)
445 if (pf->vsi[v])
446 ice_dis_vsi(pf->vsi[v], locked);
447 }
448
449 /**
450 * ice_prepare_for_reset - prep for the core to reset
451 * @pf: board private structure
452 *
453 * Inform or close all dependent features in prep for reset.
454 */
455 static void
ice_prepare_for_reset(struct ice_pf * pf)456 ice_prepare_for_reset(struct ice_pf *pf)
457 {
458 struct ice_hw *hw = &pf->hw;
459 unsigned int i;
460
461 /* already prepared for reset */
462 if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
463 return;
464
465 /* Notify VFs of impending reset */
466 if (ice_check_sq_alive(hw, &hw->mailboxq))
467 ice_vc_notify_reset(pf);
468
469 /* Disable VFs until reset is completed */
470 ice_for_each_vf(pf, i)
471 ice_set_vf_state_qs_dis(&pf->vf[i]);
472
473 /* clear SW filtering DB */
474 ice_clear_hw_tbls(hw);
475 /* disable the VSIs and their queues that are not already DOWN */
476 ice_pf_dis_all_vsi(pf, false);
477
478 if (hw->port_info)
479 ice_sched_clear_port(hw->port_info);
480
481 ice_shutdown_all_ctrlq(hw);
482
483 set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
484 }
485
486 /**
487 * ice_do_reset - Initiate one of many types of resets
488 * @pf: board private structure
489 * @reset_type: reset type requested
490 * before this function was called.
491 */
ice_do_reset(struct ice_pf * pf,enum ice_reset_req reset_type)492 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
493 {
494 struct device *dev = ice_pf_to_dev(pf);
495 struct ice_hw *hw = &pf->hw;
496
497 dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
498
499 ice_prepare_for_reset(pf);
500
501 /* trigger the reset */
502 if (ice_reset(hw, reset_type)) {
503 dev_err(dev, "reset %d failed\n", reset_type);
504 set_bit(__ICE_RESET_FAILED, pf->state);
505 clear_bit(__ICE_RESET_OICR_RECV, pf->state);
506 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
507 clear_bit(__ICE_PFR_REQ, pf->state);
508 clear_bit(__ICE_CORER_REQ, pf->state);
509 clear_bit(__ICE_GLOBR_REQ, pf->state);
510 return;
511 }
512
513 /* PFR is a bit of a special case because it doesn't result in an OICR
514 * interrupt. So for PFR, rebuild after the reset and clear the reset-
515 * associated state bits.
516 */
517 if (reset_type == ICE_RESET_PFR) {
518 pf->pfr_count++;
519 ice_rebuild(pf, reset_type);
520 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
521 clear_bit(__ICE_PFR_REQ, pf->state);
522 ice_reset_all_vfs(pf, true);
523 }
524 }
525
526 /**
527 * ice_reset_subtask - Set up for resetting the device and driver
528 * @pf: board private structure
529 */
ice_reset_subtask(struct ice_pf * pf)530 static void ice_reset_subtask(struct ice_pf *pf)
531 {
532 enum ice_reset_req reset_type = ICE_RESET_INVAL;
533
534 /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
535 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
536 * of reset is pending and sets bits in pf->state indicating the reset
537 * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set
538 * prepare for pending reset if not already (for PF software-initiated
539 * global resets the software should already be prepared for it as
540 * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated
541 * by firmware or software on other PFs, that bit is not set so prepare
542 * for the reset now), poll for reset done, rebuild and return.
543 */
544 if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) {
545 /* Perform the largest reset requested */
546 if (test_and_clear_bit(__ICE_CORER_RECV, pf->state))
547 reset_type = ICE_RESET_CORER;
548 if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state))
549 reset_type = ICE_RESET_GLOBR;
550 if (test_and_clear_bit(__ICE_EMPR_RECV, pf->state))
551 reset_type = ICE_RESET_EMPR;
552 /* return if no valid reset type requested */
553 if (reset_type == ICE_RESET_INVAL)
554 return;
555 ice_prepare_for_reset(pf);
556
557 /* make sure we are ready to rebuild */
558 if (ice_check_reset(&pf->hw)) {
559 set_bit(__ICE_RESET_FAILED, pf->state);
560 } else {
561 /* done with reset. start rebuild */
562 pf->hw.reset_ongoing = false;
563 ice_rebuild(pf, reset_type);
564 /* clear bit to resume normal operations, but
565 * ICE_NEEDS_RESTART bit is set in case rebuild failed
566 */
567 clear_bit(__ICE_RESET_OICR_RECV, pf->state);
568 clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
569 clear_bit(__ICE_PFR_REQ, pf->state);
570 clear_bit(__ICE_CORER_REQ, pf->state);
571 clear_bit(__ICE_GLOBR_REQ, pf->state);
572 ice_reset_all_vfs(pf, true);
573 }
574
575 return;
576 }
577
578 /* No pending resets to finish processing. Check for new resets */
579 if (test_bit(__ICE_PFR_REQ, pf->state))
580 reset_type = ICE_RESET_PFR;
581 if (test_bit(__ICE_CORER_REQ, pf->state))
582 reset_type = ICE_RESET_CORER;
583 if (test_bit(__ICE_GLOBR_REQ, pf->state))
584 reset_type = ICE_RESET_GLOBR;
585 /* If no valid reset type requested just return */
586 if (reset_type == ICE_RESET_INVAL)
587 return;
588
589 /* reset if not already down or busy */
590 if (!test_bit(__ICE_DOWN, pf->state) &&
591 !test_bit(__ICE_CFG_BUSY, pf->state)) {
592 ice_do_reset(pf, reset_type);
593 }
594 }
595
596 /**
597 * ice_print_topo_conflict - print topology conflict message
598 * @vsi: the VSI whose topology status is being checked
599 */
ice_print_topo_conflict(struct ice_vsi * vsi)600 static void ice_print_topo_conflict(struct ice_vsi *vsi)
601 {
602 switch (vsi->port_info->phy.link_info.topo_media_conflict) {
603 case ICE_AQ_LINK_TOPO_CONFLICT:
604 case ICE_AQ_LINK_MEDIA_CONFLICT:
605 case ICE_AQ_LINK_TOPO_UNREACH_PRT:
606 case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
607 case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
608 netdev_info(vsi->netdev, "Possible mis-configuration of the Ethernet port detected, please use the Intel(R) Ethernet Port Configuration Tool application to address the issue.\n");
609 break;
610 case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
611 netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
612 break;
613 default:
614 break;
615 }
616 }
617
618 /**
619 * ice_print_link_msg - print link up or down message
620 * @vsi: the VSI whose link status is being queried
621 * @isup: boolean for if the link is now up or down
622 */
ice_print_link_msg(struct ice_vsi * vsi,bool isup)623 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
624 {
625 struct ice_aqc_get_phy_caps_data *caps;
626 const char *an_advertised;
627 enum ice_status status;
628 const char *fec_req;
629 const char *speed;
630 const char *fec;
631 const char *fc;
632 const char *an;
633
634 if (!vsi)
635 return;
636
637 if (vsi->current_isup == isup)
638 return;
639
640 vsi->current_isup = isup;
641
642 if (!isup) {
643 netdev_info(vsi->netdev, "NIC Link is Down\n");
644 return;
645 }
646
647 switch (vsi->port_info->phy.link_info.link_speed) {
648 case ICE_AQ_LINK_SPEED_100GB:
649 speed = "100 G";
650 break;
651 case ICE_AQ_LINK_SPEED_50GB:
652 speed = "50 G";
653 break;
654 case ICE_AQ_LINK_SPEED_40GB:
655 speed = "40 G";
656 break;
657 case ICE_AQ_LINK_SPEED_25GB:
658 speed = "25 G";
659 break;
660 case ICE_AQ_LINK_SPEED_20GB:
661 speed = "20 G";
662 break;
663 case ICE_AQ_LINK_SPEED_10GB:
664 speed = "10 G";
665 break;
666 case ICE_AQ_LINK_SPEED_5GB:
667 speed = "5 G";
668 break;
669 case ICE_AQ_LINK_SPEED_2500MB:
670 speed = "2.5 G";
671 break;
672 case ICE_AQ_LINK_SPEED_1000MB:
673 speed = "1 G";
674 break;
675 case ICE_AQ_LINK_SPEED_100MB:
676 speed = "100 M";
677 break;
678 default:
679 speed = "Unknown";
680 break;
681 }
682
683 switch (vsi->port_info->fc.current_mode) {
684 case ICE_FC_FULL:
685 fc = "Rx/Tx";
686 break;
687 case ICE_FC_TX_PAUSE:
688 fc = "Tx";
689 break;
690 case ICE_FC_RX_PAUSE:
691 fc = "Rx";
692 break;
693 case ICE_FC_NONE:
694 fc = "None";
695 break;
696 default:
697 fc = "Unknown";
698 break;
699 }
700
701 /* Get FEC mode based on negotiated link info */
702 switch (vsi->port_info->phy.link_info.fec_info) {
703 case ICE_AQ_LINK_25G_RS_528_FEC_EN:
704 case ICE_AQ_LINK_25G_RS_544_FEC_EN:
705 fec = "RS-FEC";
706 break;
707 case ICE_AQ_LINK_25G_KR_FEC_EN:
708 fec = "FC-FEC/BASE-R";
709 break;
710 default:
711 fec = "NONE";
712 break;
713 }
714
715 /* check if autoneg completed, might be false due to not supported */
716 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
717 an = "True";
718 else
719 an = "False";
720
721 /* Get FEC mode requested based on PHY caps last SW configuration */
722 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
723 if (!caps) {
724 fec_req = "Unknown";
725 an_advertised = "Unknown";
726 goto done;
727 }
728
729 status = ice_aq_get_phy_caps(vsi->port_info, false,
730 ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
731 if (status)
732 netdev_info(vsi->netdev, "Get phy capability failed.\n");
733
734 an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
735
736 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
737 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
738 fec_req = "RS-FEC";
739 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
740 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
741 fec_req = "FC-FEC/BASE-R";
742 else
743 fec_req = "NONE";
744
745 kfree(caps);
746
747 done:
748 netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
749 speed, fec_req, fec, an_advertised, an, fc);
750 ice_print_topo_conflict(vsi);
751 }
752
753 /**
754 * ice_vsi_link_event - update the VSI's netdev
755 * @vsi: the VSI on which the link event occurred
756 * @link_up: whether or not the VSI needs to be set up or down
757 */
ice_vsi_link_event(struct ice_vsi * vsi,bool link_up)758 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
759 {
760 if (!vsi)
761 return;
762
763 if (test_bit(__ICE_DOWN, vsi->state) || !vsi->netdev)
764 return;
765
766 if (vsi->type == ICE_VSI_PF) {
767 if (link_up == netif_carrier_ok(vsi->netdev))
768 return;
769
770 if (link_up) {
771 netif_carrier_on(vsi->netdev);
772 netif_tx_wake_all_queues(vsi->netdev);
773 } else {
774 netif_carrier_off(vsi->netdev);
775 netif_tx_stop_all_queues(vsi->netdev);
776 }
777 }
778 }
779
780 /**
781 * ice_set_dflt_mib - send a default config MIB to the FW
782 * @pf: private PF struct
783 *
784 * This function sends a default configuration MIB to the FW.
785 *
786 * If this function errors out at any point, the driver is still able to
787 * function. The main impact is that LFC may not operate as expected.
788 * Therefore an error state in this function should be treated with a DBG
789 * message and continue on with driver rebuild/reenable.
790 */
ice_set_dflt_mib(struct ice_pf * pf)791 static void ice_set_dflt_mib(struct ice_pf *pf)
792 {
793 struct device *dev = ice_pf_to_dev(pf);
794 u8 mib_type, *buf, *lldpmib = NULL;
795 u16 len, typelen, offset = 0;
796 struct ice_lldp_org_tlv *tlv;
797 struct ice_hw *hw;
798 u32 ouisubtype;
799
800 if (!pf) {
801 dev_dbg(dev, "%s NULL pf pointer\n", __func__);
802 return;
803 }
804
805 hw = &pf->hw;
806 mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
807 lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
808 if (!lldpmib) {
809 dev_dbg(dev, "%s Failed to allocate MIB memory\n",
810 __func__);
811 return;
812 }
813
814 /* Add ETS CFG TLV */
815 tlv = (struct ice_lldp_org_tlv *)lldpmib;
816 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
817 ICE_IEEE_ETS_TLV_LEN);
818 tlv->typelen = htons(typelen);
819 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
820 ICE_IEEE_SUBTYPE_ETS_CFG);
821 tlv->ouisubtype = htonl(ouisubtype);
822
823 buf = tlv->tlvinfo;
824 buf[0] = 0;
825
826 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
827 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
828 * Octets 13 - 20 are TSA values - leave as zeros
829 */
830 buf[5] = 0x64;
831 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
832 offset += len + 2;
833 tlv = (struct ice_lldp_org_tlv *)
834 ((char *)tlv + sizeof(tlv->typelen) + len);
835
836 /* Add ETS REC TLV */
837 buf = tlv->tlvinfo;
838 tlv->typelen = htons(typelen);
839
840 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
841 ICE_IEEE_SUBTYPE_ETS_REC);
842 tlv->ouisubtype = htonl(ouisubtype);
843
844 /* First octet of buf is reserved
845 * Octets 1 - 4 map UP to TC - all UPs map to zero
846 * Octets 5 - 12 are BW values - set TC 0 to 100%.
847 * Octets 13 - 20 are TSA value - leave as zeros
848 */
849 buf[5] = 0x64;
850 offset += len + 2;
851 tlv = (struct ice_lldp_org_tlv *)
852 ((char *)tlv + sizeof(tlv->typelen) + len);
853
854 /* Add PFC CFG TLV */
855 typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
856 ICE_IEEE_PFC_TLV_LEN);
857 tlv->typelen = htons(typelen);
858
859 ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
860 ICE_IEEE_SUBTYPE_PFC_CFG);
861 tlv->ouisubtype = htonl(ouisubtype);
862
863 /* Octet 1 left as all zeros - PFC disabled */
864 buf[0] = 0x08;
865 len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
866 offset += len + 2;
867
868 if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
869 dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
870
871 kfree(lldpmib);
872 }
873
874 /**
875 * ice_link_event - process the link event
876 * @pf: PF that the link event is associated with
877 * @pi: port_info for the port that the link event is associated with
878 * @link_up: true if the physical link is up and false if it is down
879 * @link_speed: current link speed received from the link event
880 *
881 * Returns 0 on success and negative on failure
882 */
883 static int
ice_link_event(struct ice_pf * pf,struct ice_port_info * pi,bool link_up,u16 link_speed)884 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
885 u16 link_speed)
886 {
887 struct device *dev = ice_pf_to_dev(pf);
888 struct ice_phy_info *phy_info;
889 struct ice_vsi *vsi;
890 u16 old_link_speed;
891 bool old_link;
892 int result;
893
894 phy_info = &pi->phy;
895 phy_info->link_info_old = phy_info->link_info;
896
897 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
898 old_link_speed = phy_info->link_info_old.link_speed;
899
900 /* update the link info structures and re-enable link events,
901 * don't bail on failure due to other book keeping needed
902 */
903 result = ice_update_link_info(pi);
904 if (result)
905 dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n",
906 pi->lport);
907
908 /* Check if the link state is up after updating link info, and treat
909 * this event as an UP event since the link is actually UP now.
910 */
911 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
912 link_up = true;
913
914 vsi = ice_get_main_vsi(pf);
915 if (!vsi || !vsi->port_info)
916 return -EINVAL;
917
918 /* turn off PHY if media was removed */
919 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
920 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
921 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
922
923 result = ice_aq_set_link_restart_an(pi, false, NULL);
924 if (result) {
925 dev_dbg(dev, "Failed to set link down, VSI %d error %d\n",
926 vsi->vsi_num, result);
927 return result;
928 }
929 }
930
931 /* if the old link up/down and speed is the same as the new */
932 if (link_up == old_link && link_speed == old_link_speed)
933 return result;
934
935 if (ice_is_dcb_active(pf)) {
936 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
937 ice_dcb_rebuild(pf);
938 } else {
939 if (link_up)
940 ice_set_dflt_mib(pf);
941 }
942 ice_vsi_link_event(vsi, link_up);
943 ice_print_link_msg(vsi, link_up);
944
945 ice_vc_notify_link_state(pf);
946
947 return result;
948 }
949
950 /**
951 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
952 * @pf: board private structure
953 */
ice_watchdog_subtask(struct ice_pf * pf)954 static void ice_watchdog_subtask(struct ice_pf *pf)
955 {
956 int i;
957
958 /* if interface is down do nothing */
959 if (test_bit(__ICE_DOWN, pf->state) ||
960 test_bit(__ICE_CFG_BUSY, pf->state))
961 return;
962
963 /* make sure we don't do these things too often */
964 if (time_before(jiffies,
965 pf->serv_tmr_prev + pf->serv_tmr_period))
966 return;
967
968 pf->serv_tmr_prev = jiffies;
969
970 /* Update the stats for active netdevs so the network stack
971 * can look at updated numbers whenever it cares to
972 */
973 ice_update_pf_stats(pf);
974 ice_for_each_vsi(pf, i)
975 if (pf->vsi[i] && pf->vsi[i]->netdev)
976 ice_update_vsi_stats(pf->vsi[i]);
977 }
978
979 /**
980 * ice_init_link_events - enable/initialize link events
981 * @pi: pointer to the port_info instance
982 *
983 * Returns -EIO on failure, 0 on success
984 */
ice_init_link_events(struct ice_port_info * pi)985 static int ice_init_link_events(struct ice_port_info *pi)
986 {
987 u16 mask;
988
989 mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
990 ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
991
992 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
993 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
994 pi->lport);
995 return -EIO;
996 }
997
998 if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
999 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1000 pi->lport);
1001 return -EIO;
1002 }
1003
1004 return 0;
1005 }
1006
1007 /**
1008 * ice_handle_link_event - handle link event via ARQ
1009 * @pf: PF that the link event is associated with
1010 * @event: event structure containing link status info
1011 */
1012 static int
ice_handle_link_event(struct ice_pf * pf,struct ice_rq_event_info * event)1013 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1014 {
1015 struct ice_aqc_get_link_status_data *link_data;
1016 struct ice_port_info *port_info;
1017 int status;
1018
1019 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1020 port_info = pf->hw.port_info;
1021 if (!port_info)
1022 return -EINVAL;
1023
1024 status = ice_link_event(pf, port_info,
1025 !!(link_data->link_info & ICE_AQ_LINK_UP),
1026 le16_to_cpu(link_data->link_speed));
1027 if (status)
1028 dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1029 status);
1030
1031 return status;
1032 }
1033
1034 enum ice_aq_task_state {
1035 ICE_AQ_TASK_WAITING = 0,
1036 ICE_AQ_TASK_COMPLETE,
1037 ICE_AQ_TASK_CANCELED,
1038 };
1039
1040 struct ice_aq_task {
1041 struct hlist_node entry;
1042
1043 u16 opcode;
1044 struct ice_rq_event_info *event;
1045 enum ice_aq_task_state state;
1046 };
1047
1048 /**
1049 * ice_wait_for_aq_event - Wait for an AdminQ event from firmware
1050 * @pf: pointer to the PF private structure
1051 * @opcode: the opcode to wait for
1052 * @timeout: how long to wait, in jiffies
1053 * @event: storage for the event info
1054 *
1055 * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1056 * current thread will be put to sleep until the specified event occurs or
1057 * until the given timeout is reached.
1058 *
1059 * To obtain only the descriptor contents, pass an event without an allocated
1060 * msg_buf. If the complete data buffer is desired, allocate the
1061 * event->msg_buf with enough space ahead of time.
1062 *
1063 * Returns: zero on success, or a negative error code on failure.
1064 */
ice_aq_wait_for_event(struct ice_pf * pf,u16 opcode,unsigned long timeout,struct ice_rq_event_info * event)1065 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1066 struct ice_rq_event_info *event)
1067 {
1068 struct device *dev = ice_pf_to_dev(pf);
1069 struct ice_aq_task *task;
1070 unsigned long start;
1071 long ret;
1072 int err;
1073
1074 task = kzalloc(sizeof(*task), GFP_KERNEL);
1075 if (!task)
1076 return -ENOMEM;
1077
1078 INIT_HLIST_NODE(&task->entry);
1079 task->opcode = opcode;
1080 task->event = event;
1081 task->state = ICE_AQ_TASK_WAITING;
1082
1083 spin_lock_bh(&pf->aq_wait_lock);
1084 hlist_add_head(&task->entry, &pf->aq_wait_list);
1085 spin_unlock_bh(&pf->aq_wait_lock);
1086
1087 start = jiffies;
1088
1089 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1090 timeout);
1091 switch (task->state) {
1092 case ICE_AQ_TASK_WAITING:
1093 err = ret < 0 ? ret : -ETIMEDOUT;
1094 break;
1095 case ICE_AQ_TASK_CANCELED:
1096 err = ret < 0 ? ret : -ECANCELED;
1097 break;
1098 case ICE_AQ_TASK_COMPLETE:
1099 err = ret < 0 ? ret : 0;
1100 break;
1101 default:
1102 WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1103 err = -EINVAL;
1104 break;
1105 }
1106
1107 dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1108 jiffies_to_msecs(jiffies - start),
1109 jiffies_to_msecs(timeout),
1110 opcode);
1111
1112 spin_lock_bh(&pf->aq_wait_lock);
1113 hlist_del(&task->entry);
1114 spin_unlock_bh(&pf->aq_wait_lock);
1115 kfree(task);
1116
1117 return err;
1118 }
1119
1120 /**
1121 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1122 * @pf: pointer to the PF private structure
1123 * @opcode: the opcode of the event
1124 * @event: the event to check
1125 *
1126 * Loops over the current list of pending threads waiting for an AdminQ event.
1127 * For each matching task, copy the contents of the event into the task
1128 * structure and wake up the thread.
1129 *
1130 * If multiple threads wait for the same opcode, they will all be woken up.
1131 *
1132 * Note that event->msg_buf will only be duplicated if the event has a buffer
1133 * with enough space already allocated. Otherwise, only the descriptor and
1134 * message length will be copied.
1135 *
1136 * Returns: true if an event was found, false otherwise
1137 */
ice_aq_check_events(struct ice_pf * pf,u16 opcode,struct ice_rq_event_info * event)1138 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1139 struct ice_rq_event_info *event)
1140 {
1141 struct ice_rq_event_info *task_ev;
1142 struct ice_aq_task *task;
1143 bool found = false;
1144
1145 spin_lock_bh(&pf->aq_wait_lock);
1146 hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1147 if (task->state || task->opcode != opcode)
1148 continue;
1149
1150 task_ev = task->event;
1151 memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
1152 task_ev->msg_len = event->msg_len;
1153
1154 /* Only copy the data buffer if a destination was set */
1155 if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
1156 memcpy(task_ev->msg_buf, event->msg_buf,
1157 event->buf_len);
1158 task_ev->buf_len = event->buf_len;
1159 }
1160
1161 task->state = ICE_AQ_TASK_COMPLETE;
1162 found = true;
1163 }
1164 spin_unlock_bh(&pf->aq_wait_lock);
1165
1166 if (found)
1167 wake_up(&pf->aq_wait_queue);
1168 }
1169
1170 /**
1171 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1172 * @pf: the PF private structure
1173 *
1174 * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1175 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1176 */
ice_aq_cancel_waiting_tasks(struct ice_pf * pf)1177 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1178 {
1179 struct ice_aq_task *task;
1180
1181 spin_lock_bh(&pf->aq_wait_lock);
1182 hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1183 task->state = ICE_AQ_TASK_CANCELED;
1184 spin_unlock_bh(&pf->aq_wait_lock);
1185
1186 wake_up(&pf->aq_wait_queue);
1187 }
1188
1189 /**
1190 * __ice_clean_ctrlq - helper function to clean controlq rings
1191 * @pf: ptr to struct ice_pf
1192 * @q_type: specific Control queue type
1193 */
__ice_clean_ctrlq(struct ice_pf * pf,enum ice_ctl_q q_type)1194 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1195 {
1196 struct device *dev = ice_pf_to_dev(pf);
1197 struct ice_rq_event_info event;
1198 struct ice_hw *hw = &pf->hw;
1199 struct ice_ctl_q_info *cq;
1200 u16 pending, i = 0;
1201 const char *qtype;
1202 u32 oldval, val;
1203
1204 /* Do not clean control queue if/when PF reset fails */
1205 if (test_bit(__ICE_RESET_FAILED, pf->state))
1206 return 0;
1207
1208 switch (q_type) {
1209 case ICE_CTL_Q_ADMIN:
1210 cq = &hw->adminq;
1211 qtype = "Admin";
1212 break;
1213 case ICE_CTL_Q_MAILBOX:
1214 cq = &hw->mailboxq;
1215 qtype = "Mailbox";
1216 break;
1217 default:
1218 dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1219 return 0;
1220 }
1221
1222 /* check for error indications - PF_xx_AxQLEN register layout for
1223 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1224 */
1225 val = rd32(hw, cq->rq.len);
1226 if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1227 PF_FW_ARQLEN_ARQCRIT_M)) {
1228 oldval = val;
1229 if (val & PF_FW_ARQLEN_ARQVFE_M)
1230 dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1231 qtype);
1232 if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1233 dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1234 qtype);
1235 }
1236 if (val & PF_FW_ARQLEN_ARQCRIT_M)
1237 dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1238 qtype);
1239 val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1240 PF_FW_ARQLEN_ARQCRIT_M);
1241 if (oldval != val)
1242 wr32(hw, cq->rq.len, val);
1243 }
1244
1245 val = rd32(hw, cq->sq.len);
1246 if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1247 PF_FW_ATQLEN_ATQCRIT_M)) {
1248 oldval = val;
1249 if (val & PF_FW_ATQLEN_ATQVFE_M)
1250 dev_dbg(dev, "%s Send Queue VF Error detected\n",
1251 qtype);
1252 if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1253 dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1254 qtype);
1255 }
1256 if (val & PF_FW_ATQLEN_ATQCRIT_M)
1257 dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1258 qtype);
1259 val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1260 PF_FW_ATQLEN_ATQCRIT_M);
1261 if (oldval != val)
1262 wr32(hw, cq->sq.len, val);
1263 }
1264
1265 event.buf_len = cq->rq_buf_size;
1266 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1267 if (!event.msg_buf)
1268 return 0;
1269
1270 do {
1271 enum ice_status ret;
1272 u16 opcode;
1273
1274 ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1275 if (ret == ICE_ERR_AQ_NO_WORK)
1276 break;
1277 if (ret) {
1278 dev_err(dev, "%s Receive Queue event error %s\n", qtype,
1279 ice_stat_str(ret));
1280 break;
1281 }
1282
1283 opcode = le16_to_cpu(event.desc.opcode);
1284
1285 /* Notify any thread that might be waiting for this event */
1286 ice_aq_check_events(pf, opcode, &event);
1287
1288 switch (opcode) {
1289 case ice_aqc_opc_get_link_status:
1290 if (ice_handle_link_event(pf, &event))
1291 dev_err(dev, "Could not handle link event\n");
1292 break;
1293 case ice_aqc_opc_event_lan_overflow:
1294 ice_vf_lan_overflow_event(pf, &event);
1295 break;
1296 case ice_mbx_opc_send_msg_to_pf:
1297 ice_vc_process_vf_msg(pf, &event);
1298 break;
1299 case ice_aqc_opc_fw_logging:
1300 ice_output_fw_log(hw, &event.desc, event.msg_buf);
1301 break;
1302 case ice_aqc_opc_lldp_set_mib_change:
1303 ice_dcb_process_lldp_set_mib_change(pf, &event);
1304 break;
1305 default:
1306 dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1307 qtype, opcode);
1308 break;
1309 }
1310 } while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1311
1312 kfree(event.msg_buf);
1313
1314 return pending && (i == ICE_DFLT_IRQ_WORK);
1315 }
1316
1317 /**
1318 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1319 * @hw: pointer to hardware info
1320 * @cq: control queue information
1321 *
1322 * returns true if there are pending messages in a queue, false if there aren't
1323 */
ice_ctrlq_pending(struct ice_hw * hw,struct ice_ctl_q_info * cq)1324 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1325 {
1326 u16 ntu;
1327
1328 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1329 return cq->rq.next_to_clean != ntu;
1330 }
1331
1332 /**
1333 * ice_clean_adminq_subtask - clean the AdminQ rings
1334 * @pf: board private structure
1335 */
ice_clean_adminq_subtask(struct ice_pf * pf)1336 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1337 {
1338 struct ice_hw *hw = &pf->hw;
1339
1340 if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
1341 return;
1342
1343 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1344 return;
1345
1346 clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
1347
1348 /* There might be a situation where new messages arrive to a control
1349 * queue between processing the last message and clearing the
1350 * EVENT_PENDING bit. So before exiting, check queue head again (using
1351 * ice_ctrlq_pending) and process new messages if any.
1352 */
1353 if (ice_ctrlq_pending(hw, &hw->adminq))
1354 __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1355
1356 ice_flush(hw);
1357 }
1358
1359 /**
1360 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1361 * @pf: board private structure
1362 */
ice_clean_mailboxq_subtask(struct ice_pf * pf)1363 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1364 {
1365 struct ice_hw *hw = &pf->hw;
1366
1367 if (!test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1368 return;
1369
1370 if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1371 return;
1372
1373 clear_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1374
1375 if (ice_ctrlq_pending(hw, &hw->mailboxq))
1376 __ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1377
1378 ice_flush(hw);
1379 }
1380
1381 /**
1382 * ice_service_task_schedule - schedule the service task to wake up
1383 * @pf: board private structure
1384 *
1385 * If not already scheduled, this puts the task into the work queue.
1386 */
ice_service_task_schedule(struct ice_pf * pf)1387 void ice_service_task_schedule(struct ice_pf *pf)
1388 {
1389 if (!test_bit(__ICE_SERVICE_DIS, pf->state) &&
1390 !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) &&
1391 !test_bit(__ICE_NEEDS_RESTART, pf->state))
1392 queue_work(ice_wq, &pf->serv_task);
1393 }
1394
1395 /**
1396 * ice_service_task_complete - finish up the service task
1397 * @pf: board private structure
1398 */
ice_service_task_complete(struct ice_pf * pf)1399 static void ice_service_task_complete(struct ice_pf *pf)
1400 {
1401 WARN_ON(!test_bit(__ICE_SERVICE_SCHED, pf->state));
1402
1403 /* force memory (pf->state) to sync before next service task */
1404 smp_mb__before_atomic();
1405 clear_bit(__ICE_SERVICE_SCHED, pf->state);
1406 }
1407
1408 /**
1409 * ice_service_task_stop - stop service task and cancel works
1410 * @pf: board private structure
1411 *
1412 * Return 0 if the __ICE_SERVICE_DIS bit was not already set,
1413 * 1 otherwise.
1414 */
ice_service_task_stop(struct ice_pf * pf)1415 static int ice_service_task_stop(struct ice_pf *pf)
1416 {
1417 int ret;
1418
1419 ret = test_and_set_bit(__ICE_SERVICE_DIS, pf->state);
1420
1421 if (pf->serv_tmr.function)
1422 del_timer_sync(&pf->serv_tmr);
1423 if (pf->serv_task.func)
1424 cancel_work_sync(&pf->serv_task);
1425
1426 clear_bit(__ICE_SERVICE_SCHED, pf->state);
1427 return ret;
1428 }
1429
1430 /**
1431 * ice_service_task_restart - restart service task and schedule works
1432 * @pf: board private structure
1433 *
1434 * This function is needed for suspend and resume works (e.g WoL scenario)
1435 */
ice_service_task_restart(struct ice_pf * pf)1436 static void ice_service_task_restart(struct ice_pf *pf)
1437 {
1438 clear_bit(__ICE_SERVICE_DIS, pf->state);
1439 ice_service_task_schedule(pf);
1440 }
1441
1442 /**
1443 * ice_service_timer - timer callback to schedule service task
1444 * @t: pointer to timer_list
1445 */
ice_service_timer(struct timer_list * t)1446 static void ice_service_timer(struct timer_list *t)
1447 {
1448 struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1449
1450 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1451 ice_service_task_schedule(pf);
1452 }
1453
1454 /**
1455 * ice_handle_mdd_event - handle malicious driver detect event
1456 * @pf: pointer to the PF structure
1457 *
1458 * Called from service task. OICR interrupt handler indicates MDD event.
1459 * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1460 * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1461 * disable the queue, the PF can be configured to reset the VF using ethtool
1462 * private flag mdd-auto-reset-vf.
1463 */
ice_handle_mdd_event(struct ice_pf * pf)1464 static void ice_handle_mdd_event(struct ice_pf *pf)
1465 {
1466 struct device *dev = ice_pf_to_dev(pf);
1467 struct ice_hw *hw = &pf->hw;
1468 unsigned int i;
1469 u32 reg;
1470
1471 if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state)) {
1472 /* Since the VF MDD event logging is rate limited, check if
1473 * there are pending MDD events.
1474 */
1475 ice_print_vfs_mdd_events(pf);
1476 return;
1477 }
1478
1479 /* find what triggered an MDD event */
1480 reg = rd32(hw, GL_MDET_TX_PQM);
1481 if (reg & GL_MDET_TX_PQM_VALID_M) {
1482 u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1483 GL_MDET_TX_PQM_PF_NUM_S;
1484 u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1485 GL_MDET_TX_PQM_VF_NUM_S;
1486 u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1487 GL_MDET_TX_PQM_MAL_TYPE_S;
1488 u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1489 GL_MDET_TX_PQM_QNUM_S);
1490
1491 if (netif_msg_tx_err(pf))
1492 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1493 event, queue, pf_num, vf_num);
1494 wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1495 }
1496
1497 reg = rd32(hw, GL_MDET_TX_TCLAN);
1498 if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1499 u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1500 GL_MDET_TX_TCLAN_PF_NUM_S;
1501 u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1502 GL_MDET_TX_TCLAN_VF_NUM_S;
1503 u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1504 GL_MDET_TX_TCLAN_MAL_TYPE_S;
1505 u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1506 GL_MDET_TX_TCLAN_QNUM_S);
1507
1508 if (netif_msg_tx_err(pf))
1509 dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1510 event, queue, pf_num, vf_num);
1511 wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1512 }
1513
1514 reg = rd32(hw, GL_MDET_RX);
1515 if (reg & GL_MDET_RX_VALID_M) {
1516 u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1517 GL_MDET_RX_PF_NUM_S;
1518 u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1519 GL_MDET_RX_VF_NUM_S;
1520 u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1521 GL_MDET_RX_MAL_TYPE_S;
1522 u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1523 GL_MDET_RX_QNUM_S);
1524
1525 if (netif_msg_rx_err(pf))
1526 dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1527 event, queue, pf_num, vf_num);
1528 wr32(hw, GL_MDET_RX, 0xffffffff);
1529 }
1530
1531 /* check to see if this PF caused an MDD event */
1532 reg = rd32(hw, PF_MDET_TX_PQM);
1533 if (reg & PF_MDET_TX_PQM_VALID_M) {
1534 wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1535 if (netif_msg_tx_err(pf))
1536 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1537 }
1538
1539 reg = rd32(hw, PF_MDET_TX_TCLAN);
1540 if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1541 wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1542 if (netif_msg_tx_err(pf))
1543 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1544 }
1545
1546 reg = rd32(hw, PF_MDET_RX);
1547 if (reg & PF_MDET_RX_VALID_M) {
1548 wr32(hw, PF_MDET_RX, 0xFFFF);
1549 if (netif_msg_rx_err(pf))
1550 dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1551 }
1552
1553 /* Check to see if one of the VFs caused an MDD event, and then
1554 * increment counters and set print pending
1555 */
1556 ice_for_each_vf(pf, i) {
1557 struct ice_vf *vf = &pf->vf[i];
1558
1559 reg = rd32(hw, VP_MDET_TX_PQM(i));
1560 if (reg & VP_MDET_TX_PQM_VALID_M) {
1561 wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
1562 vf->mdd_tx_events.count++;
1563 set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
1564 if (netif_msg_tx_err(pf))
1565 dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1566 i);
1567 }
1568
1569 reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1570 if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1571 wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
1572 vf->mdd_tx_events.count++;
1573 set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
1574 if (netif_msg_tx_err(pf))
1575 dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1576 i);
1577 }
1578
1579 reg = rd32(hw, VP_MDET_TX_TDPU(i));
1580 if (reg & VP_MDET_TX_TDPU_VALID_M) {
1581 wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
1582 vf->mdd_tx_events.count++;
1583 set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
1584 if (netif_msg_tx_err(pf))
1585 dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1586 i);
1587 }
1588
1589 reg = rd32(hw, VP_MDET_RX(i));
1590 if (reg & VP_MDET_RX_VALID_M) {
1591 wr32(hw, VP_MDET_RX(i), 0xFFFF);
1592 vf->mdd_rx_events.count++;
1593 set_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state);
1594 if (netif_msg_rx_err(pf))
1595 dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1596 i);
1597
1598 /* Since the queue is disabled on VF Rx MDD events, the
1599 * PF can be configured to reset the VF through ethtool
1600 * private flag mdd-auto-reset-vf.
1601 */
1602 if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1603 /* VF MDD event counters will be cleared by
1604 * reset, so print the event prior to reset.
1605 */
1606 ice_print_vf_rx_mdd_event(vf);
1607 mutex_lock(&pf->vf[i].cfg_lock);
1608 ice_reset_vf(&pf->vf[i], false);
1609 mutex_unlock(&pf->vf[i].cfg_lock);
1610 }
1611 }
1612 }
1613
1614 ice_print_vfs_mdd_events(pf);
1615 }
1616
1617 /**
1618 * ice_force_phys_link_state - Force the physical link state
1619 * @vsi: VSI to force the physical link state to up/down
1620 * @link_up: true/false indicates to set the physical link to up/down
1621 *
1622 * Force the physical link state by getting the current PHY capabilities from
1623 * hardware and setting the PHY config based on the determined capabilities. If
1624 * link changes a link event will be triggered because both the Enable Automatic
1625 * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1626 *
1627 * Returns 0 on success, negative on failure
1628 */
ice_force_phys_link_state(struct ice_vsi * vsi,bool link_up)1629 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1630 {
1631 struct ice_aqc_get_phy_caps_data *pcaps;
1632 struct ice_aqc_set_phy_cfg_data *cfg;
1633 struct ice_port_info *pi;
1634 struct device *dev;
1635 int retcode;
1636
1637 if (!vsi || !vsi->port_info || !vsi->back)
1638 return -EINVAL;
1639 if (vsi->type != ICE_VSI_PF)
1640 return 0;
1641
1642 dev = ice_pf_to_dev(vsi->back);
1643
1644 pi = vsi->port_info;
1645
1646 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1647 if (!pcaps)
1648 return -ENOMEM;
1649
1650 retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1651 NULL);
1652 if (retcode) {
1653 dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1654 vsi->vsi_num, retcode);
1655 retcode = -EIO;
1656 goto out;
1657 }
1658
1659 /* No change in link */
1660 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1661 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1662 goto out;
1663
1664 /* Use the current user PHY configuration. The current user PHY
1665 * configuration is initialized during probe from PHY capabilities
1666 * software mode, and updated on set PHY configuration.
1667 */
1668 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1669 if (!cfg) {
1670 retcode = -ENOMEM;
1671 goto out;
1672 }
1673
1674 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1675 if (link_up)
1676 cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1677 else
1678 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1679
1680 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1681 if (retcode) {
1682 dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1683 vsi->vsi_num, retcode);
1684 retcode = -EIO;
1685 }
1686
1687 kfree(cfg);
1688 out:
1689 kfree(pcaps);
1690 return retcode;
1691 }
1692
1693 /**
1694 * ice_init_nvm_phy_type - Initialize the NVM PHY type
1695 * @pi: port info structure
1696 *
1697 * Initialize nvm_phy_type_[low|high] for link lenient mode support
1698 */
ice_init_nvm_phy_type(struct ice_port_info * pi)1699 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1700 {
1701 struct ice_aqc_get_phy_caps_data *pcaps;
1702 struct ice_pf *pf = pi->hw->back;
1703 enum ice_status status;
1704 int err = 0;
1705
1706 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1707 if (!pcaps)
1708 return -ENOMEM;
1709
1710 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps,
1711 NULL);
1712
1713 if (status) {
1714 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1715 err = -EIO;
1716 goto out;
1717 }
1718
1719 pf->nvm_phy_type_hi = pcaps->phy_type_high;
1720 pf->nvm_phy_type_lo = pcaps->phy_type_low;
1721
1722 out:
1723 kfree(pcaps);
1724 return err;
1725 }
1726
1727 /**
1728 * ice_init_link_dflt_override - Initialize link default override
1729 * @pi: port info structure
1730 *
1731 * Initialize link default override and PHY total port shutdown during probe
1732 */
ice_init_link_dflt_override(struct ice_port_info * pi)1733 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1734 {
1735 struct ice_link_default_override_tlv *ldo;
1736 struct ice_pf *pf = pi->hw->back;
1737
1738 ldo = &pf->link_dflt_override;
1739 if (ice_get_link_default_override(ldo, pi))
1740 return;
1741
1742 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1743 return;
1744
1745 /* Enable Total Port Shutdown (override/replace link-down-on-close
1746 * ethtool private flag) for ports with Port Disable bit set.
1747 */
1748 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1749 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1750 }
1751
1752 /**
1753 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
1754 * @pi: port info structure
1755 *
1756 * If default override is enabled, initialized the user PHY cfg speed and FEC
1757 * settings using the default override mask from the NVM.
1758 *
1759 * The PHY should only be configured with the default override settings the
1760 * first time media is available. The __ICE_LINK_DEFAULT_OVERRIDE_PENDING state
1761 * is used to indicate that the user PHY cfg default override is initialized
1762 * and the PHY has not been configured with the default override settings. The
1763 * state is set here, and cleared in ice_configure_phy the first time the PHY is
1764 * configured.
1765 */
ice_init_phy_cfg_dflt_override(struct ice_port_info * pi)1766 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
1767 {
1768 struct ice_link_default_override_tlv *ldo;
1769 struct ice_aqc_set_phy_cfg_data *cfg;
1770 struct ice_phy_info *phy = &pi->phy;
1771 struct ice_pf *pf = pi->hw->back;
1772
1773 ldo = &pf->link_dflt_override;
1774
1775 /* If link default override is enabled, use to mask NVM PHY capabilities
1776 * for speed and FEC default configuration.
1777 */
1778 cfg = &phy->curr_user_phy_cfg;
1779
1780 if (ldo->phy_type_low || ldo->phy_type_high) {
1781 cfg->phy_type_low = pf->nvm_phy_type_lo &
1782 cpu_to_le64(ldo->phy_type_low);
1783 cfg->phy_type_high = pf->nvm_phy_type_hi &
1784 cpu_to_le64(ldo->phy_type_high);
1785 }
1786 cfg->link_fec_opt = ldo->fec_options;
1787 phy->curr_user_fec_req = ICE_FEC_AUTO;
1788
1789 set_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
1790 }
1791
1792 /**
1793 * ice_init_phy_user_cfg - Initialize the PHY user configuration
1794 * @pi: port info structure
1795 *
1796 * Initialize the current user PHY configuration, speed, FEC, and FC requested
1797 * mode to default. The PHY defaults are from get PHY capabilities topology
1798 * with media so call when media is first available. An error is returned if
1799 * called when media is not available. The PHY initialization completed state is
1800 * set here.
1801 *
1802 * These configurations are used when setting PHY
1803 * configuration. The user PHY configuration is updated on set PHY
1804 * configuration. Returns 0 on success, negative on failure
1805 */
ice_init_phy_user_cfg(struct ice_port_info * pi)1806 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
1807 {
1808 struct ice_aqc_get_phy_caps_data *pcaps;
1809 struct ice_phy_info *phy = &pi->phy;
1810 struct ice_pf *pf = pi->hw->back;
1811 enum ice_status status;
1812 struct ice_vsi *vsi;
1813 int err = 0;
1814
1815 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1816 return -EIO;
1817
1818 vsi = ice_get_main_vsi(pf);
1819 if (!vsi)
1820 return -EINVAL;
1821
1822 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1823 if (!pcaps)
1824 return -ENOMEM;
1825
1826 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
1827 NULL);
1828 if (status) {
1829 dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1830 err = -EIO;
1831 goto err_out;
1832 }
1833
1834 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
1835
1836 /* check if lenient mode is supported and enabled */
1837 if (ice_fw_supports_link_override(&vsi->back->hw) &&
1838 !(pcaps->module_compliance_enforcement &
1839 ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
1840 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
1841
1842 /* if link default override is enabled, initialize user PHY
1843 * configuration with link default override values
1844 */
1845 if (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN) {
1846 ice_init_phy_cfg_dflt_override(pi);
1847 goto out;
1848 }
1849 }
1850
1851 /* if link default override is not enabled, initialize PHY using
1852 * topology with media
1853 */
1854 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
1855 pcaps->link_fec_options);
1856 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
1857
1858 out:
1859 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
1860 set_bit(__ICE_PHY_INIT_COMPLETE, pf->state);
1861 err_out:
1862 kfree(pcaps);
1863 return err;
1864 }
1865
1866 /**
1867 * ice_configure_phy - configure PHY
1868 * @vsi: VSI of PHY
1869 *
1870 * Set the PHY configuration. If the current PHY configuration is the same as
1871 * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
1872 * configure the based get PHY capabilities for topology with media.
1873 */
ice_configure_phy(struct ice_vsi * vsi)1874 static int ice_configure_phy(struct ice_vsi *vsi)
1875 {
1876 struct device *dev = ice_pf_to_dev(vsi->back);
1877 struct ice_aqc_get_phy_caps_data *pcaps;
1878 struct ice_aqc_set_phy_cfg_data *cfg;
1879 struct ice_port_info *pi;
1880 enum ice_status status;
1881 int err = 0;
1882
1883 pi = vsi->port_info;
1884 if (!pi)
1885 return -EINVAL;
1886
1887 /* Ensure we have media as we cannot configure a medialess port */
1888 if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1889 return -EPERM;
1890
1891 ice_print_topo_conflict(vsi);
1892
1893 if (vsi->port_info->phy.link_info.topo_media_conflict ==
1894 ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
1895 return -EPERM;
1896
1897 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
1898 return ice_force_phys_link_state(vsi, true);
1899
1900 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1901 if (!pcaps)
1902 return -ENOMEM;
1903
1904 /* Get current PHY config */
1905 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1906 NULL);
1907 if (status) {
1908 dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
1909 vsi->vsi_num, ice_stat_str(status));
1910 err = -EIO;
1911 goto done;
1912 }
1913
1914 /* If PHY enable link is configured and configuration has not changed,
1915 * there's nothing to do
1916 */
1917 if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
1918 ice_phy_caps_equals_cfg(pcaps, &pi->phy.curr_user_phy_cfg))
1919 goto done;
1920
1921 /* Use PHY topology as baseline for configuration */
1922 memset(pcaps, 0, sizeof(*pcaps));
1923 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
1924 NULL);
1925 if (status) {
1926 dev_err(dev, "Failed to get PHY topology, VSI %d error %s\n",
1927 vsi->vsi_num, ice_stat_str(status));
1928 err = -EIO;
1929 goto done;
1930 }
1931
1932 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1933 if (!cfg) {
1934 err = -ENOMEM;
1935 goto done;
1936 }
1937
1938 ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
1939
1940 /* Speed - If default override pending, use curr_user_phy_cfg set in
1941 * ice_init_phy_user_cfg_ldo.
1942 */
1943 if (test_and_clear_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING,
1944 vsi->back->state)) {
1945 cfg->phy_type_low = pi->phy.curr_user_phy_cfg.phy_type_low;
1946 cfg->phy_type_high = pi->phy.curr_user_phy_cfg.phy_type_high;
1947 } else {
1948 u64 phy_low = 0, phy_high = 0;
1949
1950 ice_update_phy_type(&phy_low, &phy_high,
1951 pi->phy.curr_user_speed_req);
1952 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
1953 cfg->phy_type_high = pcaps->phy_type_high &
1954 cpu_to_le64(phy_high);
1955 }
1956
1957 /* Can't provide what was requested; use PHY capabilities */
1958 if (!cfg->phy_type_low && !cfg->phy_type_high) {
1959 cfg->phy_type_low = pcaps->phy_type_low;
1960 cfg->phy_type_high = pcaps->phy_type_high;
1961 }
1962
1963 /* FEC */
1964 ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req);
1965
1966 /* Can't provide what was requested; use PHY capabilities */
1967 if (cfg->link_fec_opt !=
1968 (cfg->link_fec_opt & pcaps->link_fec_options)) {
1969 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
1970 cfg->link_fec_opt = pcaps->link_fec_options;
1971 }
1972
1973 /* Flow Control - always supported; no need to check against
1974 * capabilities
1975 */
1976 ice_cfg_phy_fc(pi, cfg, pi->phy.curr_user_fc_req);
1977
1978 /* Enable link and link update */
1979 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
1980
1981 status = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1982 if (status) {
1983 dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
1984 vsi->vsi_num, ice_stat_str(status));
1985 err = -EIO;
1986 }
1987
1988 kfree(cfg);
1989 done:
1990 kfree(pcaps);
1991 return err;
1992 }
1993
1994 /**
1995 * ice_check_media_subtask - Check for media
1996 * @pf: pointer to PF struct
1997 *
1998 * If media is available, then initialize PHY user configuration if it is not
1999 * been, and configure the PHY if the interface is up.
2000 */
ice_check_media_subtask(struct ice_pf * pf)2001 static void ice_check_media_subtask(struct ice_pf *pf)
2002 {
2003 struct ice_port_info *pi;
2004 struct ice_vsi *vsi;
2005 int err;
2006
2007 /* No need to check for media if it's already present */
2008 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2009 return;
2010
2011 vsi = ice_get_main_vsi(pf);
2012 if (!vsi)
2013 return;
2014
2015 /* Refresh link info and check if media is present */
2016 pi = vsi->port_info;
2017 err = ice_update_link_info(pi);
2018 if (err)
2019 return;
2020
2021 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2022 if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state))
2023 ice_init_phy_user_cfg(pi);
2024
2025 /* PHY settings are reset on media insertion, reconfigure
2026 * PHY to preserve settings.
2027 */
2028 if (test_bit(__ICE_DOWN, vsi->state) &&
2029 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2030 return;
2031
2032 err = ice_configure_phy(vsi);
2033 if (!err)
2034 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2035
2036 /* A Link Status Event will be generated; the event handler
2037 * will complete bringing the interface up
2038 */
2039 }
2040 }
2041
2042 /**
2043 * ice_service_task - manage and run subtasks
2044 * @work: pointer to work_struct contained by the PF struct
2045 */
ice_service_task(struct work_struct * work)2046 static void ice_service_task(struct work_struct *work)
2047 {
2048 struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2049 unsigned long start_time = jiffies;
2050
2051 /* subtasks */
2052
2053 /* process reset requests first */
2054 ice_reset_subtask(pf);
2055
2056 /* bail if a reset/recovery cycle is pending or rebuild failed */
2057 if (ice_is_reset_in_progress(pf->state) ||
2058 test_bit(__ICE_SUSPENDED, pf->state) ||
2059 test_bit(__ICE_NEEDS_RESTART, pf->state)) {
2060 ice_service_task_complete(pf);
2061 return;
2062 }
2063
2064 ice_clean_adminq_subtask(pf);
2065 ice_check_media_subtask(pf);
2066 ice_check_for_hang_subtask(pf);
2067 ice_sync_fltr_subtask(pf);
2068 ice_handle_mdd_event(pf);
2069 ice_watchdog_subtask(pf);
2070
2071 if (ice_is_safe_mode(pf)) {
2072 ice_service_task_complete(pf);
2073 return;
2074 }
2075
2076 ice_process_vflr_event(pf);
2077 ice_clean_mailboxq_subtask(pf);
2078 ice_sync_arfs_fltrs(pf);
2079 /* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
2080 ice_service_task_complete(pf);
2081
2082 /* If the tasks have taken longer than one service timer period
2083 * or there is more work to be done, reset the service timer to
2084 * schedule the service task now.
2085 */
2086 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2087 test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
2088 test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
2089 test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2090 test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
2091 mod_timer(&pf->serv_tmr, jiffies);
2092 }
2093
2094 /**
2095 * ice_set_ctrlq_len - helper function to set controlq length
2096 * @hw: pointer to the HW instance
2097 */
ice_set_ctrlq_len(struct ice_hw * hw)2098 static void ice_set_ctrlq_len(struct ice_hw *hw)
2099 {
2100 hw->adminq.num_rq_entries = ICE_AQ_LEN;
2101 hw->adminq.num_sq_entries = ICE_AQ_LEN;
2102 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2103 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2104 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2105 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2106 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2107 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2108 }
2109
2110 /**
2111 * ice_schedule_reset - schedule a reset
2112 * @pf: board private structure
2113 * @reset: reset being requested
2114 */
ice_schedule_reset(struct ice_pf * pf,enum ice_reset_req reset)2115 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2116 {
2117 struct device *dev = ice_pf_to_dev(pf);
2118
2119 /* bail out if earlier reset has failed */
2120 if (test_bit(__ICE_RESET_FAILED, pf->state)) {
2121 dev_dbg(dev, "earlier reset has failed\n");
2122 return -EIO;
2123 }
2124 /* bail if reset/recovery already in progress */
2125 if (ice_is_reset_in_progress(pf->state)) {
2126 dev_dbg(dev, "Reset already in progress\n");
2127 return -EBUSY;
2128 }
2129
2130 switch (reset) {
2131 case ICE_RESET_PFR:
2132 set_bit(__ICE_PFR_REQ, pf->state);
2133 break;
2134 case ICE_RESET_CORER:
2135 set_bit(__ICE_CORER_REQ, pf->state);
2136 break;
2137 case ICE_RESET_GLOBR:
2138 set_bit(__ICE_GLOBR_REQ, pf->state);
2139 break;
2140 default:
2141 return -EINVAL;
2142 }
2143
2144 ice_service_task_schedule(pf);
2145 return 0;
2146 }
2147
2148 /**
2149 * ice_irq_affinity_notify - Callback for affinity changes
2150 * @notify: context as to what irq was changed
2151 * @mask: the new affinity mask
2152 *
2153 * This is a callback function used by the irq_set_affinity_notifier function
2154 * so that we may register to receive changes to the irq affinity masks.
2155 */
2156 static void
ice_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)2157 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2158 const cpumask_t *mask)
2159 {
2160 struct ice_q_vector *q_vector =
2161 container_of(notify, struct ice_q_vector, affinity_notify);
2162
2163 cpumask_copy(&q_vector->affinity_mask, mask);
2164 }
2165
2166 /**
2167 * ice_irq_affinity_release - Callback for affinity notifier release
2168 * @ref: internal core kernel usage
2169 *
2170 * This is a callback function used by the irq_set_affinity_notifier function
2171 * to inform the current notification subscriber that they will no longer
2172 * receive notifications.
2173 */
ice_irq_affinity_release(struct kref __always_unused * ref)2174 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2175
2176 /**
2177 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2178 * @vsi: the VSI being configured
2179 */
ice_vsi_ena_irq(struct ice_vsi * vsi)2180 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2181 {
2182 struct ice_hw *hw = &vsi->back->hw;
2183 int i;
2184
2185 ice_for_each_q_vector(vsi, i)
2186 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2187
2188 ice_flush(hw);
2189 return 0;
2190 }
2191
2192 /**
2193 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2194 * @vsi: the VSI being configured
2195 * @basename: name for the vector
2196 */
ice_vsi_req_irq_msix(struct ice_vsi * vsi,char * basename)2197 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2198 {
2199 int q_vectors = vsi->num_q_vectors;
2200 struct ice_pf *pf = vsi->back;
2201 int base = vsi->base_vector;
2202 struct device *dev;
2203 int rx_int_idx = 0;
2204 int tx_int_idx = 0;
2205 int vector, err;
2206 int irq_num;
2207
2208 dev = ice_pf_to_dev(pf);
2209 for (vector = 0; vector < q_vectors; vector++) {
2210 struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2211
2212 irq_num = pf->msix_entries[base + vector].vector;
2213
2214 if (q_vector->tx.ring && q_vector->rx.ring) {
2215 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2216 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2217 tx_int_idx++;
2218 } else if (q_vector->rx.ring) {
2219 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2220 "%s-%s-%d", basename, "rx", rx_int_idx++);
2221 } else if (q_vector->tx.ring) {
2222 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2223 "%s-%s-%d", basename, "tx", tx_int_idx++);
2224 } else {
2225 /* skip this unused q_vector */
2226 continue;
2227 }
2228 err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0,
2229 q_vector->name, q_vector);
2230 if (err) {
2231 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2232 err);
2233 goto free_q_irqs;
2234 }
2235
2236 /* register for affinity change notifications */
2237 if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2238 struct irq_affinity_notify *affinity_notify;
2239
2240 affinity_notify = &q_vector->affinity_notify;
2241 affinity_notify->notify = ice_irq_affinity_notify;
2242 affinity_notify->release = ice_irq_affinity_release;
2243 irq_set_affinity_notifier(irq_num, affinity_notify);
2244 }
2245
2246 /* assign the mask for this irq */
2247 irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2248 }
2249
2250 vsi->irqs_ready = true;
2251 return 0;
2252
2253 free_q_irqs:
2254 while (vector) {
2255 vector--;
2256 irq_num = pf->msix_entries[base + vector].vector;
2257 if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2258 irq_set_affinity_notifier(irq_num, NULL);
2259 irq_set_affinity_hint(irq_num, NULL);
2260 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2261 }
2262 return err;
2263 }
2264
2265 /**
2266 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2267 * @vsi: VSI to setup Tx rings used by XDP
2268 *
2269 * Return 0 on success and negative value on error
2270 */
ice_xdp_alloc_setup_rings(struct ice_vsi * vsi)2271 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2272 {
2273 struct device *dev = ice_pf_to_dev(vsi->back);
2274 int i;
2275
2276 for (i = 0; i < vsi->num_xdp_txq; i++) {
2277 u16 xdp_q_idx = vsi->alloc_txq + i;
2278 struct ice_ring *xdp_ring;
2279
2280 xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2281
2282 if (!xdp_ring)
2283 goto free_xdp_rings;
2284
2285 xdp_ring->q_index = xdp_q_idx;
2286 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2287 xdp_ring->ring_active = false;
2288 xdp_ring->vsi = vsi;
2289 xdp_ring->netdev = NULL;
2290 xdp_ring->dev = dev;
2291 xdp_ring->count = vsi->num_tx_desc;
2292 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2293 if (ice_setup_tx_ring(xdp_ring))
2294 goto free_xdp_rings;
2295 ice_set_ring_xdp(xdp_ring);
2296 xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
2297 }
2298
2299 return 0;
2300
2301 free_xdp_rings:
2302 for (; i >= 0; i--)
2303 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2304 ice_free_tx_ring(vsi->xdp_rings[i]);
2305 return -ENOMEM;
2306 }
2307
2308 /**
2309 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2310 * @vsi: VSI to set the bpf prog on
2311 * @prog: the bpf prog pointer
2312 */
ice_vsi_assign_bpf_prog(struct ice_vsi * vsi,struct bpf_prog * prog)2313 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2314 {
2315 struct bpf_prog *old_prog;
2316 int i;
2317
2318 old_prog = xchg(&vsi->xdp_prog, prog);
2319 if (old_prog)
2320 bpf_prog_put(old_prog);
2321
2322 ice_for_each_rxq(vsi, i)
2323 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2324 }
2325
2326 /**
2327 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2328 * @vsi: VSI to bring up Tx rings used by XDP
2329 * @prog: bpf program that will be assigned to VSI
2330 *
2331 * Return 0 on success and negative value on error
2332 */
ice_prepare_xdp_rings(struct ice_vsi * vsi,struct bpf_prog * prog)2333 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2334 {
2335 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2336 int xdp_rings_rem = vsi->num_xdp_txq;
2337 struct ice_pf *pf = vsi->back;
2338 struct ice_qs_cfg xdp_qs_cfg = {
2339 .qs_mutex = &pf->avail_q_mutex,
2340 .pf_map = pf->avail_txqs,
2341 .pf_map_size = pf->max_pf_txqs,
2342 .q_count = vsi->num_xdp_txq,
2343 .scatter_count = ICE_MAX_SCATTER_TXQS,
2344 .vsi_map = vsi->txq_map,
2345 .vsi_map_offset = vsi->alloc_txq,
2346 .mapping_mode = ICE_VSI_MAP_CONTIG
2347 };
2348 enum ice_status status;
2349 struct device *dev;
2350 int i, v_idx;
2351
2352 dev = ice_pf_to_dev(pf);
2353 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2354 sizeof(*vsi->xdp_rings), GFP_KERNEL);
2355 if (!vsi->xdp_rings)
2356 return -ENOMEM;
2357
2358 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2359 if (__ice_vsi_get_qs(&xdp_qs_cfg))
2360 goto err_map_xdp;
2361
2362 if (ice_xdp_alloc_setup_rings(vsi))
2363 goto clear_xdp_rings;
2364
2365 /* follow the logic from ice_vsi_map_rings_to_vectors */
2366 ice_for_each_q_vector(vsi, v_idx) {
2367 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2368 int xdp_rings_per_v, q_id, q_base;
2369
2370 xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2371 vsi->num_q_vectors - v_idx);
2372 q_base = vsi->num_xdp_txq - xdp_rings_rem;
2373
2374 for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2375 struct ice_ring *xdp_ring = vsi->xdp_rings[q_id];
2376
2377 xdp_ring->q_vector = q_vector;
2378 xdp_ring->next = q_vector->tx.ring;
2379 q_vector->tx.ring = xdp_ring;
2380 }
2381 xdp_rings_rem -= xdp_rings_per_v;
2382 }
2383
2384 /* omit the scheduler update if in reset path; XDP queues will be
2385 * taken into account at the end of ice_vsi_rebuild, where
2386 * ice_cfg_vsi_lan is being called
2387 */
2388 if (ice_is_reset_in_progress(pf->state))
2389 return 0;
2390
2391 /* tell the Tx scheduler that right now we have
2392 * additional queues
2393 */
2394 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2395 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2396
2397 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2398 max_txqs);
2399 if (status) {
2400 dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
2401 ice_stat_str(status));
2402 goto clear_xdp_rings;
2403 }
2404
2405 /* assign the prog only when it's not already present on VSI;
2406 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2407 * VSI rebuild that happens under ethtool -L can expose us to
2408 * the bpf_prog refcount issues as we would be swapping same
2409 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2410 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2411 * this is not harmful as dev_xdp_install bumps the refcount
2412 * before calling the op exposed by the driver;
2413 */
2414 if (!ice_is_xdp_ena_vsi(vsi))
2415 ice_vsi_assign_bpf_prog(vsi, prog);
2416
2417 return 0;
2418 clear_xdp_rings:
2419 for (i = 0; i < vsi->num_xdp_txq; i++)
2420 if (vsi->xdp_rings[i]) {
2421 kfree_rcu(vsi->xdp_rings[i], rcu);
2422 vsi->xdp_rings[i] = NULL;
2423 }
2424
2425 err_map_xdp:
2426 mutex_lock(&pf->avail_q_mutex);
2427 for (i = 0; i < vsi->num_xdp_txq; i++) {
2428 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2429 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2430 }
2431 mutex_unlock(&pf->avail_q_mutex);
2432
2433 devm_kfree(dev, vsi->xdp_rings);
2434 return -ENOMEM;
2435 }
2436
2437 /**
2438 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2439 * @vsi: VSI to remove XDP rings
2440 *
2441 * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2442 * resources
2443 */
ice_destroy_xdp_rings(struct ice_vsi * vsi)2444 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2445 {
2446 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2447 struct ice_pf *pf = vsi->back;
2448 int i, v_idx;
2449
2450 /* q_vectors are freed in reset path so there's no point in detaching
2451 * rings; in case of rebuild being triggered not from reset bits
2452 * in pf->state won't be set, so additionally check first q_vector
2453 * against NULL
2454 */
2455 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2456 goto free_qmap;
2457
2458 ice_for_each_q_vector(vsi, v_idx) {
2459 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2460 struct ice_ring *ring;
2461
2462 ice_for_each_ring(ring, q_vector->tx)
2463 if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2464 break;
2465
2466 /* restore the value of last node prior to XDP setup */
2467 q_vector->tx.ring = ring;
2468 }
2469
2470 free_qmap:
2471 mutex_lock(&pf->avail_q_mutex);
2472 for (i = 0; i < vsi->num_xdp_txq; i++) {
2473 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2474 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2475 }
2476 mutex_unlock(&pf->avail_q_mutex);
2477
2478 for (i = 0; i < vsi->num_xdp_txq; i++)
2479 if (vsi->xdp_rings[i]) {
2480 if (vsi->xdp_rings[i]->desc) {
2481 synchronize_rcu();
2482 ice_free_tx_ring(vsi->xdp_rings[i]);
2483 }
2484 kfree_rcu(vsi->xdp_rings[i], rcu);
2485 vsi->xdp_rings[i] = NULL;
2486 }
2487
2488 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2489 vsi->xdp_rings = NULL;
2490
2491 if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2492 return 0;
2493
2494 ice_vsi_assign_bpf_prog(vsi, NULL);
2495
2496 /* notify Tx scheduler that we destroyed XDP queues and bring
2497 * back the old number of child nodes
2498 */
2499 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2500 max_txqs[i] = vsi->num_txq;
2501
2502 /* change number of XDP Tx queues to 0 */
2503 vsi->num_xdp_txq = 0;
2504
2505 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2506 max_txqs);
2507 }
2508
2509 /**
2510 * ice_xdp_setup_prog - Add or remove XDP eBPF program
2511 * @vsi: VSI to setup XDP for
2512 * @prog: XDP program
2513 * @extack: netlink extended ack
2514 */
2515 static int
ice_xdp_setup_prog(struct ice_vsi * vsi,struct bpf_prog * prog,struct netlink_ext_ack * extack)2516 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2517 struct netlink_ext_ack *extack)
2518 {
2519 int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2520 bool if_running = netif_running(vsi->netdev);
2521 int ret = 0, xdp_ring_err = 0;
2522
2523 if (frame_size > vsi->rx_buf_len) {
2524 NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2525 return -EOPNOTSUPP;
2526 }
2527
2528 /* need to stop netdev while setting up the program for Rx rings */
2529 if (if_running && !test_and_set_bit(__ICE_DOWN, vsi->state)) {
2530 ret = ice_down(vsi);
2531 if (ret) {
2532 NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2533 return ret;
2534 }
2535 }
2536
2537 if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2538 vsi->num_xdp_txq = vsi->alloc_rxq;
2539 xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2540 if (xdp_ring_err)
2541 NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2542 } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2543 xdp_ring_err = ice_destroy_xdp_rings(vsi);
2544 if (xdp_ring_err)
2545 NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2546 } else {
2547 /* safe to call even when prog == vsi->xdp_prog as
2548 * dev_xdp_install in net/core/dev.c incremented prog's
2549 * refcount so corresponding bpf_prog_put won't cause
2550 * underflow
2551 */
2552 ice_vsi_assign_bpf_prog(vsi, prog);
2553 }
2554
2555 if (if_running)
2556 ret = ice_up(vsi);
2557
2558 if (!ret && prog && vsi->xsk_pools) {
2559 int i;
2560
2561 ice_for_each_rxq(vsi, i) {
2562 struct ice_ring *rx_ring = vsi->rx_rings[i];
2563
2564 if (rx_ring->xsk_pool)
2565 napi_schedule(&rx_ring->q_vector->napi);
2566 }
2567 }
2568
2569 return (ret || xdp_ring_err) ? -ENOMEM : 0;
2570 }
2571
2572 /**
2573 * ice_xdp_safe_mode - XDP handler for safe mode
2574 * @dev: netdevice
2575 * @xdp: XDP command
2576 */
ice_xdp_safe_mode(struct net_device __always_unused * dev,struct netdev_bpf * xdp)2577 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2578 struct netdev_bpf *xdp)
2579 {
2580 NL_SET_ERR_MSG_MOD(xdp->extack,
2581 "Please provide working DDP firmware package in order to use XDP\n"
2582 "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
2583 return -EOPNOTSUPP;
2584 }
2585
2586 /**
2587 * ice_xdp - implements XDP handler
2588 * @dev: netdevice
2589 * @xdp: XDP command
2590 */
ice_xdp(struct net_device * dev,struct netdev_bpf * xdp)2591 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2592 {
2593 struct ice_netdev_priv *np = netdev_priv(dev);
2594 struct ice_vsi *vsi = np->vsi;
2595
2596 if (vsi->type != ICE_VSI_PF) {
2597 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2598 return -EINVAL;
2599 }
2600
2601 switch (xdp->command) {
2602 case XDP_SETUP_PROG:
2603 return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
2604 case XDP_SETUP_XSK_POOL:
2605 return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
2606 xdp->xsk.queue_id);
2607 default:
2608 return -EINVAL;
2609 }
2610 }
2611
2612 /**
2613 * ice_ena_misc_vector - enable the non-queue interrupts
2614 * @pf: board private structure
2615 */
ice_ena_misc_vector(struct ice_pf * pf)2616 static void ice_ena_misc_vector(struct ice_pf *pf)
2617 {
2618 struct ice_hw *hw = &pf->hw;
2619 u32 val;
2620
2621 /* Disable anti-spoof detection interrupt to prevent spurious event
2622 * interrupts during a function reset. Anti-spoof functionally is
2623 * still supported.
2624 */
2625 val = rd32(hw, GL_MDCK_TX_TDPU);
2626 val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
2627 wr32(hw, GL_MDCK_TX_TDPU, val);
2628
2629 /* clear things first */
2630 wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
2631 rd32(hw, PFINT_OICR); /* read to clear */
2632
2633 val = (PFINT_OICR_ECC_ERR_M |
2634 PFINT_OICR_MAL_DETECT_M |
2635 PFINT_OICR_GRST_M |
2636 PFINT_OICR_PCI_EXCEPTION_M |
2637 PFINT_OICR_VFLR_M |
2638 PFINT_OICR_HMC_ERR_M |
2639 PFINT_OICR_PE_CRITERR_M);
2640
2641 wr32(hw, PFINT_OICR_ENA, val);
2642
2643 /* SW_ITR_IDX = 0, but don't change INTENA */
2644 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
2645 GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
2646 }
2647
2648 /**
2649 * ice_misc_intr - misc interrupt handler
2650 * @irq: interrupt number
2651 * @data: pointer to a q_vector
2652 */
ice_misc_intr(int __always_unused irq,void * data)2653 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
2654 {
2655 struct ice_pf *pf = (struct ice_pf *)data;
2656 struct ice_hw *hw = &pf->hw;
2657 irqreturn_t ret = IRQ_NONE;
2658 struct device *dev;
2659 u32 oicr, ena_mask;
2660
2661 dev = ice_pf_to_dev(pf);
2662 set_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
2663 set_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state);
2664
2665 oicr = rd32(hw, PFINT_OICR);
2666 ena_mask = rd32(hw, PFINT_OICR_ENA);
2667
2668 if (oicr & PFINT_OICR_SWINT_M) {
2669 ena_mask &= ~PFINT_OICR_SWINT_M;
2670 pf->sw_int_count++;
2671 }
2672
2673 if (oicr & PFINT_OICR_MAL_DETECT_M) {
2674 ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
2675 set_bit(__ICE_MDD_EVENT_PENDING, pf->state);
2676 }
2677 if (oicr & PFINT_OICR_VFLR_M) {
2678 /* disable any further VFLR event notifications */
2679 if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
2680 u32 reg = rd32(hw, PFINT_OICR_ENA);
2681
2682 reg &= ~PFINT_OICR_VFLR_M;
2683 wr32(hw, PFINT_OICR_ENA, reg);
2684 } else {
2685 ena_mask &= ~PFINT_OICR_VFLR_M;
2686 set_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
2687 }
2688 }
2689
2690 if (oicr & PFINT_OICR_GRST_M) {
2691 u32 reset;
2692
2693 /* we have a reset warning */
2694 ena_mask &= ~PFINT_OICR_GRST_M;
2695 reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
2696 GLGEN_RSTAT_RESET_TYPE_S;
2697
2698 if (reset == ICE_RESET_CORER)
2699 pf->corer_count++;
2700 else if (reset == ICE_RESET_GLOBR)
2701 pf->globr_count++;
2702 else if (reset == ICE_RESET_EMPR)
2703 pf->empr_count++;
2704 else
2705 dev_dbg(dev, "Invalid reset type %d\n", reset);
2706
2707 /* If a reset cycle isn't already in progress, we set a bit in
2708 * pf->state so that the service task can start a reset/rebuild.
2709 * We also make note of which reset happened so that peer
2710 * devices/drivers can be informed.
2711 */
2712 if (!test_and_set_bit(__ICE_RESET_OICR_RECV, pf->state)) {
2713 if (reset == ICE_RESET_CORER)
2714 set_bit(__ICE_CORER_RECV, pf->state);
2715 else if (reset == ICE_RESET_GLOBR)
2716 set_bit(__ICE_GLOBR_RECV, pf->state);
2717 else
2718 set_bit(__ICE_EMPR_RECV, pf->state);
2719
2720 /* There are couple of different bits at play here.
2721 * hw->reset_ongoing indicates whether the hardware is
2722 * in reset. This is set to true when a reset interrupt
2723 * is received and set back to false after the driver
2724 * has determined that the hardware is out of reset.
2725 *
2726 * __ICE_RESET_OICR_RECV in pf->state indicates
2727 * that a post reset rebuild is required before the
2728 * driver is operational again. This is set above.
2729 *
2730 * As this is the start of the reset/rebuild cycle, set
2731 * both to indicate that.
2732 */
2733 hw->reset_ongoing = true;
2734 }
2735 }
2736
2737 if (oicr & PFINT_OICR_HMC_ERR_M) {
2738 ena_mask &= ~PFINT_OICR_HMC_ERR_M;
2739 dev_dbg(dev, "HMC Error interrupt - info 0x%x, data 0x%x\n",
2740 rd32(hw, PFHMC_ERRORINFO),
2741 rd32(hw, PFHMC_ERRORDATA));
2742 }
2743
2744 /* Report any remaining unexpected interrupts */
2745 oicr &= ena_mask;
2746 if (oicr) {
2747 dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
2748 /* If a critical error is pending there is no choice but to
2749 * reset the device.
2750 */
2751 if (oicr & (PFINT_OICR_PE_CRITERR_M |
2752 PFINT_OICR_PCI_EXCEPTION_M |
2753 PFINT_OICR_ECC_ERR_M)) {
2754 set_bit(__ICE_PFR_REQ, pf->state);
2755 ice_service_task_schedule(pf);
2756 }
2757 }
2758 ret = IRQ_HANDLED;
2759
2760 ice_service_task_schedule(pf);
2761 ice_irq_dynamic_ena(hw, NULL, NULL);
2762
2763 return ret;
2764 }
2765
2766 /**
2767 * ice_dis_ctrlq_interrupts - disable control queue interrupts
2768 * @hw: pointer to HW structure
2769 */
ice_dis_ctrlq_interrupts(struct ice_hw * hw)2770 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
2771 {
2772 /* disable Admin queue Interrupt causes */
2773 wr32(hw, PFINT_FW_CTL,
2774 rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
2775
2776 /* disable Mailbox queue Interrupt causes */
2777 wr32(hw, PFINT_MBX_CTL,
2778 rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
2779
2780 /* disable Control queue Interrupt causes */
2781 wr32(hw, PFINT_OICR_CTL,
2782 rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
2783
2784 ice_flush(hw);
2785 }
2786
2787 /**
2788 * ice_free_irq_msix_misc - Unroll misc vector setup
2789 * @pf: board private structure
2790 */
ice_free_irq_msix_misc(struct ice_pf * pf)2791 static void ice_free_irq_msix_misc(struct ice_pf *pf)
2792 {
2793 struct ice_hw *hw = &pf->hw;
2794
2795 ice_dis_ctrlq_interrupts(hw);
2796
2797 /* disable OICR interrupt */
2798 wr32(hw, PFINT_OICR_ENA, 0);
2799 ice_flush(hw);
2800
2801 if (pf->msix_entries) {
2802 synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
2803 devm_free_irq(ice_pf_to_dev(pf),
2804 pf->msix_entries[pf->oicr_idx].vector, pf);
2805 }
2806
2807 pf->num_avail_sw_msix += 1;
2808 ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
2809 }
2810
2811 /**
2812 * ice_ena_ctrlq_interrupts - enable control queue interrupts
2813 * @hw: pointer to HW structure
2814 * @reg_idx: HW vector index to associate the control queue interrupts with
2815 */
ice_ena_ctrlq_interrupts(struct ice_hw * hw,u16 reg_idx)2816 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
2817 {
2818 u32 val;
2819
2820 val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
2821 PFINT_OICR_CTL_CAUSE_ENA_M);
2822 wr32(hw, PFINT_OICR_CTL, val);
2823
2824 /* enable Admin queue Interrupt causes */
2825 val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
2826 PFINT_FW_CTL_CAUSE_ENA_M);
2827 wr32(hw, PFINT_FW_CTL, val);
2828
2829 /* enable Mailbox queue Interrupt causes */
2830 val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
2831 PFINT_MBX_CTL_CAUSE_ENA_M);
2832 wr32(hw, PFINT_MBX_CTL, val);
2833
2834 ice_flush(hw);
2835 }
2836
2837 /**
2838 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
2839 * @pf: board private structure
2840 *
2841 * This sets up the handler for MSIX 0, which is used to manage the
2842 * non-queue interrupts, e.g. AdminQ and errors. This is not used
2843 * when in MSI or Legacy interrupt mode.
2844 */
ice_req_irq_msix_misc(struct ice_pf * pf)2845 static int ice_req_irq_msix_misc(struct ice_pf *pf)
2846 {
2847 struct device *dev = ice_pf_to_dev(pf);
2848 struct ice_hw *hw = &pf->hw;
2849 int oicr_idx, err = 0;
2850
2851 if (!pf->int_name[0])
2852 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
2853 dev_driver_string(dev), dev_name(dev));
2854
2855 /* Do not request IRQ but do enable OICR interrupt since settings are
2856 * lost during reset. Note that this function is called only during
2857 * rebuild path and not while reset is in progress.
2858 */
2859 if (ice_is_reset_in_progress(pf->state))
2860 goto skip_req_irq;
2861
2862 /* reserve one vector in irq_tracker for misc interrupts */
2863 oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2864 if (oicr_idx < 0)
2865 return oicr_idx;
2866
2867 pf->num_avail_sw_msix -= 1;
2868 pf->oicr_idx = (u16)oicr_idx;
2869
2870 err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
2871 ice_misc_intr, 0, pf->int_name, pf);
2872 if (err) {
2873 dev_err(dev, "devm_request_irq for %s failed: %d\n",
2874 pf->int_name, err);
2875 ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
2876 pf->num_avail_sw_msix += 1;
2877 return err;
2878 }
2879
2880 skip_req_irq:
2881 ice_ena_misc_vector(pf);
2882
2883 ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
2884 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
2885 ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
2886
2887 ice_flush(hw);
2888 ice_irq_dynamic_ena(hw, NULL, NULL);
2889
2890 return 0;
2891 }
2892
2893 /**
2894 * ice_napi_add - register NAPI handler for the VSI
2895 * @vsi: VSI for which NAPI handler is to be registered
2896 *
2897 * This function is only called in the driver's load path. Registering the NAPI
2898 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
2899 * reset/rebuild, etc.)
2900 */
ice_napi_add(struct ice_vsi * vsi)2901 static void ice_napi_add(struct ice_vsi *vsi)
2902 {
2903 int v_idx;
2904
2905 if (!vsi->netdev)
2906 return;
2907
2908 ice_for_each_q_vector(vsi, v_idx)
2909 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
2910 ice_napi_poll, NAPI_POLL_WEIGHT);
2911 }
2912
2913 /**
2914 * ice_set_ops - set netdev and ethtools ops for the given netdev
2915 * @netdev: netdev instance
2916 */
ice_set_ops(struct net_device * netdev)2917 static void ice_set_ops(struct net_device *netdev)
2918 {
2919 struct ice_pf *pf = ice_netdev_to_pf(netdev);
2920
2921 if (ice_is_safe_mode(pf)) {
2922 netdev->netdev_ops = &ice_netdev_safe_mode_ops;
2923 ice_set_ethtool_safe_mode_ops(netdev);
2924 return;
2925 }
2926
2927 netdev->netdev_ops = &ice_netdev_ops;
2928 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
2929 ice_set_ethtool_ops(netdev);
2930 }
2931
2932 /**
2933 * ice_set_netdev_features - set features for the given netdev
2934 * @netdev: netdev instance
2935 */
ice_set_netdev_features(struct net_device * netdev)2936 static void ice_set_netdev_features(struct net_device *netdev)
2937 {
2938 struct ice_pf *pf = ice_netdev_to_pf(netdev);
2939 netdev_features_t csumo_features;
2940 netdev_features_t vlano_features;
2941 netdev_features_t dflt_features;
2942 netdev_features_t tso_features;
2943
2944 if (ice_is_safe_mode(pf)) {
2945 /* safe mode */
2946 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
2947 netdev->hw_features = netdev->features;
2948 return;
2949 }
2950
2951 dflt_features = NETIF_F_SG |
2952 NETIF_F_HIGHDMA |
2953 NETIF_F_NTUPLE |
2954 NETIF_F_RXHASH;
2955
2956 csumo_features = NETIF_F_RXCSUM |
2957 NETIF_F_IP_CSUM |
2958 NETIF_F_SCTP_CRC |
2959 NETIF_F_IPV6_CSUM;
2960
2961 vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
2962 NETIF_F_HW_VLAN_CTAG_TX |
2963 NETIF_F_HW_VLAN_CTAG_RX;
2964
2965 tso_features = NETIF_F_TSO |
2966 NETIF_F_TSO_ECN |
2967 NETIF_F_TSO6 |
2968 NETIF_F_GSO_GRE |
2969 NETIF_F_GSO_UDP_TUNNEL |
2970 NETIF_F_GSO_GRE_CSUM |
2971 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2972 NETIF_F_GSO_PARTIAL |
2973 NETIF_F_GSO_IPXIP4 |
2974 NETIF_F_GSO_IPXIP6 |
2975 NETIF_F_GSO_UDP_L4;
2976
2977 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
2978 NETIF_F_GSO_GRE_CSUM;
2979 /* set features that user can change */
2980 netdev->hw_features = dflt_features | csumo_features |
2981 vlano_features | tso_features;
2982
2983 /* add support for HW_CSUM on packets with MPLS header */
2984 netdev->mpls_features = NETIF_F_HW_CSUM;
2985
2986 /* enable features */
2987 netdev->features |= netdev->hw_features;
2988 /* encap and VLAN devices inherit default, csumo and tso features */
2989 netdev->hw_enc_features |= dflt_features | csumo_features |
2990 tso_features;
2991 netdev->vlan_features |= dflt_features | csumo_features |
2992 tso_features;
2993 }
2994
2995 /**
2996 * ice_cfg_netdev - Allocate, configure and register a netdev
2997 * @vsi: the VSI associated with the new netdev
2998 *
2999 * Returns 0 on success, negative value on failure
3000 */
ice_cfg_netdev(struct ice_vsi * vsi)3001 static int ice_cfg_netdev(struct ice_vsi *vsi)
3002 {
3003 struct ice_pf *pf = vsi->back;
3004 struct ice_netdev_priv *np;
3005 struct net_device *netdev;
3006 u8 mac_addr[ETH_ALEN];
3007 int err;
3008
3009 err = ice_devlink_create_port(vsi);
3010 if (err)
3011 return err;
3012
3013 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
3014 vsi->alloc_rxq);
3015 if (!netdev) {
3016 err = -ENOMEM;
3017 goto err_destroy_devlink_port;
3018 }
3019
3020 vsi->netdev = netdev;
3021 np = netdev_priv(netdev);
3022 np->vsi = vsi;
3023
3024 ice_set_netdev_features(netdev);
3025
3026 ice_set_ops(netdev);
3027
3028 if (vsi->type == ICE_VSI_PF) {
3029 SET_NETDEV_DEV(netdev, ice_pf_to_dev(pf));
3030 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
3031 ether_addr_copy(netdev->dev_addr, mac_addr);
3032 ether_addr_copy(netdev->perm_addr, mac_addr);
3033 }
3034
3035 netdev->priv_flags |= IFF_UNICAST_FLT;
3036
3037 /* Setup netdev TC information */
3038 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
3039
3040 /* setup watchdog timeout value to be 5 second */
3041 netdev->watchdog_timeo = 5 * HZ;
3042
3043 netdev->min_mtu = ETH_MIN_MTU;
3044 netdev->max_mtu = ICE_MAX_MTU;
3045
3046 err = register_netdev(vsi->netdev);
3047 if (err)
3048 goto err_free_netdev;
3049
3050 devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev);
3051
3052 netif_carrier_off(vsi->netdev);
3053
3054 /* make sure transmit queues start off as stopped */
3055 netif_tx_stop_all_queues(vsi->netdev);
3056
3057 return 0;
3058
3059 err_free_netdev:
3060 free_netdev(vsi->netdev);
3061 vsi->netdev = NULL;
3062 err_destroy_devlink_port:
3063 ice_devlink_destroy_port(vsi);
3064 return err;
3065 }
3066
3067 /**
3068 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3069 * @lut: Lookup table
3070 * @rss_table_size: Lookup table size
3071 * @rss_size: Range of queue number for hashing
3072 */
ice_fill_rss_lut(u8 * lut,u16 rss_table_size,u16 rss_size)3073 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3074 {
3075 u16 i;
3076
3077 for (i = 0; i < rss_table_size; i++)
3078 lut[i] = i % rss_size;
3079 }
3080
3081 /**
3082 * ice_pf_vsi_setup - Set up a PF VSI
3083 * @pf: board private structure
3084 * @pi: pointer to the port_info instance
3085 *
3086 * Returns pointer to the successfully allocated VSI software struct
3087 * on success, otherwise returns NULL on failure.
3088 */
3089 static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3090 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3091 {
3092 return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
3093 }
3094
3095 /**
3096 * ice_ctrl_vsi_setup - Set up a control VSI
3097 * @pf: board private structure
3098 * @pi: pointer to the port_info instance
3099 *
3100 * Returns pointer to the successfully allocated VSI software struct
3101 * on success, otherwise returns NULL on failure.
3102 */
3103 static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3104 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3105 {
3106 return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
3107 }
3108
3109 /**
3110 * ice_lb_vsi_setup - Set up a loopback VSI
3111 * @pf: board private structure
3112 * @pi: pointer to the port_info instance
3113 *
3114 * Returns pointer to the successfully allocated VSI software struct
3115 * on success, otherwise returns NULL on failure.
3116 */
3117 struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3118 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3119 {
3120 return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
3121 }
3122
3123 /**
3124 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3125 * @netdev: network interface to be adjusted
3126 * @proto: unused protocol
3127 * @vid: VLAN ID to be added
3128 *
3129 * net_device_ops implementation for adding VLAN IDs
3130 */
3131 static int
ice_vlan_rx_add_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)3132 ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
3133 u16 vid)
3134 {
3135 struct ice_netdev_priv *np = netdev_priv(netdev);
3136 struct ice_vsi *vsi = np->vsi;
3137 int ret;
3138
3139 if (vid >= VLAN_N_VID) {
3140 netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
3141 vid, VLAN_N_VID);
3142 return -EINVAL;
3143 }
3144
3145 if (vsi->info.pvid)
3146 return -EINVAL;
3147
3148 /* VLAN 0 is added by default during load/reset */
3149 if (!vid)
3150 return 0;
3151
3152 /* Enable VLAN pruning when a VLAN other than 0 is added */
3153 if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
3154 ret = ice_cfg_vlan_pruning(vsi, true, false);
3155 if (ret)
3156 return ret;
3157 }
3158
3159 /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3160 * packets aren't pruned by the device's internal switch on Rx
3161 */
3162 ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3163 if (!ret) {
3164 vsi->vlan_ena = true;
3165 set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
3166 }
3167
3168 return ret;
3169 }
3170
3171 /**
3172 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3173 * @netdev: network interface to be adjusted
3174 * @proto: unused protocol
3175 * @vid: VLAN ID to be removed
3176 *
3177 * net_device_ops implementation for removing VLAN IDs
3178 */
3179 static int
ice_vlan_rx_kill_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)3180 ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
3181 u16 vid)
3182 {
3183 struct ice_netdev_priv *np = netdev_priv(netdev);
3184 struct ice_vsi *vsi = np->vsi;
3185 int ret;
3186
3187 if (vsi->info.pvid)
3188 return -EINVAL;
3189
3190 /* don't allow removal of VLAN 0 */
3191 if (!vid)
3192 return 0;
3193
3194 /* Make sure ice_vsi_kill_vlan is successful before updating VLAN
3195 * information
3196 */
3197 ret = ice_vsi_kill_vlan(vsi, vid);
3198 if (ret)
3199 return ret;
3200
3201 /* Disable pruning when VLAN 0 is the only VLAN rule */
3202 if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
3203 ret = ice_cfg_vlan_pruning(vsi, false, false);
3204
3205 vsi->vlan_ena = false;
3206 set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
3207 return ret;
3208 }
3209
3210 /**
3211 * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3212 * @pf: board private structure
3213 *
3214 * Returns 0 on success, negative value on failure
3215 */
ice_setup_pf_sw(struct ice_pf * pf)3216 static int ice_setup_pf_sw(struct ice_pf *pf)
3217 {
3218 struct ice_vsi *vsi;
3219 int status = 0;
3220
3221 if (ice_is_reset_in_progress(pf->state))
3222 return -EBUSY;
3223
3224 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3225 if (!vsi)
3226 return -ENOMEM;
3227
3228 status = ice_cfg_netdev(vsi);
3229 if (status) {
3230 status = -ENODEV;
3231 goto unroll_vsi_setup;
3232 }
3233 /* netdev has to be configured before setting frame size */
3234 ice_vsi_cfg_frame_size(vsi);
3235
3236 /* Setup DCB netlink interface */
3237 ice_dcbnl_setup(vsi);
3238
3239 /* registering the NAPI handler requires both the queues and
3240 * netdev to be created, which are done in ice_pf_vsi_setup()
3241 * and ice_cfg_netdev() respectively
3242 */
3243 ice_napi_add(vsi);
3244
3245 status = ice_set_cpu_rx_rmap(vsi);
3246 if (status) {
3247 dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n",
3248 vsi->vsi_num, status);
3249 status = -EINVAL;
3250 goto unroll_napi_add;
3251 }
3252 status = ice_init_mac_fltr(pf);
3253 if (status)
3254 goto free_cpu_rx_map;
3255
3256 return status;
3257
3258 free_cpu_rx_map:
3259 ice_free_cpu_rx_rmap(vsi);
3260
3261 unroll_napi_add:
3262 if (vsi) {
3263 ice_napi_del(vsi);
3264 if (vsi->netdev) {
3265 if (vsi->netdev->reg_state == NETREG_REGISTERED)
3266 unregister_netdev(vsi->netdev);
3267 free_netdev(vsi->netdev);
3268 vsi->netdev = NULL;
3269 }
3270 }
3271
3272 unroll_vsi_setup:
3273 ice_vsi_release(vsi);
3274 return status;
3275 }
3276
3277 /**
3278 * ice_get_avail_q_count - Get count of queues in use
3279 * @pf_qmap: bitmap to get queue use count from
3280 * @lock: pointer to a mutex that protects access to pf_qmap
3281 * @size: size of the bitmap
3282 */
3283 static u16
ice_get_avail_q_count(unsigned long * pf_qmap,struct mutex * lock,u16 size)3284 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3285 {
3286 unsigned long bit;
3287 u16 count = 0;
3288
3289 mutex_lock(lock);
3290 for_each_clear_bit(bit, pf_qmap, size)
3291 count++;
3292 mutex_unlock(lock);
3293
3294 return count;
3295 }
3296
3297 /**
3298 * ice_get_avail_txq_count - Get count of Tx queues in use
3299 * @pf: pointer to an ice_pf instance
3300 */
ice_get_avail_txq_count(struct ice_pf * pf)3301 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3302 {
3303 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3304 pf->max_pf_txqs);
3305 }
3306
3307 /**
3308 * ice_get_avail_rxq_count - Get count of Rx queues in use
3309 * @pf: pointer to an ice_pf instance
3310 */
ice_get_avail_rxq_count(struct ice_pf * pf)3311 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3312 {
3313 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3314 pf->max_pf_rxqs);
3315 }
3316
3317 /**
3318 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3319 * @pf: board private structure to initialize
3320 */
ice_deinit_pf(struct ice_pf * pf)3321 static void ice_deinit_pf(struct ice_pf *pf)
3322 {
3323 ice_service_task_stop(pf);
3324 mutex_destroy(&pf->sw_mutex);
3325 mutex_destroy(&pf->tc_mutex);
3326 mutex_destroy(&pf->avail_q_mutex);
3327
3328 if (pf->avail_txqs) {
3329 bitmap_free(pf->avail_txqs);
3330 pf->avail_txqs = NULL;
3331 }
3332
3333 if (pf->avail_rxqs) {
3334 bitmap_free(pf->avail_rxqs);
3335 pf->avail_rxqs = NULL;
3336 }
3337 }
3338
3339 /**
3340 * ice_set_pf_caps - set PFs capability flags
3341 * @pf: pointer to the PF instance
3342 */
ice_set_pf_caps(struct ice_pf * pf)3343 static void ice_set_pf_caps(struct ice_pf *pf)
3344 {
3345 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3346
3347 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3348 if (func_caps->common_cap.dcb)
3349 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3350 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3351 if (func_caps->common_cap.sr_iov_1_1) {
3352 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3353 pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
3354 ICE_MAX_VF_COUNT);
3355 }
3356 clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3357 if (func_caps->common_cap.rss_table_size)
3358 set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3359
3360 clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3361 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3362 u16 unused;
3363
3364 /* ctrl_vsi_idx will be set to a valid value when flow director
3365 * is setup by ice_init_fdir
3366 */
3367 pf->ctrl_vsi_idx = ICE_NO_VSI;
3368 set_bit(ICE_FLAG_FD_ENA, pf->flags);
3369 /* force guaranteed filter pool for PF */
3370 ice_alloc_fd_guar_item(&pf->hw, &unused,
3371 func_caps->fd_fltr_guar);
3372 /* force shared filter pool for PF */
3373 ice_alloc_fd_shrd_item(&pf->hw, &unused,
3374 func_caps->fd_fltr_best_effort);
3375 }
3376
3377 pf->max_pf_txqs = func_caps->common_cap.num_txq;
3378 pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3379 }
3380
3381 /**
3382 * ice_init_pf - Initialize general software structures (struct ice_pf)
3383 * @pf: board private structure to initialize
3384 */
ice_init_pf(struct ice_pf * pf)3385 static int ice_init_pf(struct ice_pf *pf)
3386 {
3387 ice_set_pf_caps(pf);
3388
3389 mutex_init(&pf->sw_mutex);
3390 mutex_init(&pf->tc_mutex);
3391
3392 INIT_HLIST_HEAD(&pf->aq_wait_list);
3393 spin_lock_init(&pf->aq_wait_lock);
3394 init_waitqueue_head(&pf->aq_wait_queue);
3395
3396 /* setup service timer and periodic service task */
3397 timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3398 pf->serv_tmr_period = HZ;
3399 INIT_WORK(&pf->serv_task, ice_service_task);
3400 clear_bit(__ICE_SERVICE_SCHED, pf->state);
3401
3402 mutex_init(&pf->avail_q_mutex);
3403 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3404 if (!pf->avail_txqs)
3405 return -ENOMEM;
3406
3407 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3408 if (!pf->avail_rxqs) {
3409 bitmap_free(pf->avail_txqs);
3410 pf->avail_txqs = NULL;
3411 return -ENOMEM;
3412 }
3413
3414 return 0;
3415 }
3416
3417 /**
3418 * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3419 * @pf: board private structure
3420 *
3421 * compute the number of MSIX vectors required (v_budget) and request from
3422 * the OS. Return the number of vectors reserved or negative on failure
3423 */
ice_ena_msix_range(struct ice_pf * pf)3424 static int ice_ena_msix_range(struct ice_pf *pf)
3425 {
3426 struct device *dev = ice_pf_to_dev(pf);
3427 int v_left, v_actual, v_budget = 0;
3428 int needed, err, i;
3429
3430 v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
3431
3432 /* reserve one vector for miscellaneous handler */
3433 needed = 1;
3434 if (v_left < needed)
3435 goto no_hw_vecs_left_err;
3436 v_budget += needed;
3437 v_left -= needed;
3438
3439 /* reserve vectors for LAN traffic */
3440 needed = min_t(int, num_online_cpus(), v_left);
3441 if (v_left < needed)
3442 goto no_hw_vecs_left_err;
3443 pf->num_lan_msix = needed;
3444 v_budget += needed;
3445 v_left -= needed;
3446
3447 /* reserve one vector for flow director */
3448 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
3449 needed = ICE_FDIR_MSIX;
3450 if (v_left < needed)
3451 goto no_hw_vecs_left_err;
3452 v_budget += needed;
3453 v_left -= needed;
3454 }
3455
3456 pf->msix_entries = devm_kcalloc(dev, v_budget,
3457 sizeof(*pf->msix_entries), GFP_KERNEL);
3458
3459 if (!pf->msix_entries) {
3460 err = -ENOMEM;
3461 goto exit_err;
3462 }
3463
3464 for (i = 0; i < v_budget; i++)
3465 pf->msix_entries[i].entry = i;
3466
3467 /* actually reserve the vectors */
3468 v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3469 ICE_MIN_MSIX, v_budget);
3470
3471 if (v_actual < 0) {
3472 dev_err(dev, "unable to reserve MSI-X vectors\n");
3473 err = v_actual;
3474 goto msix_err;
3475 }
3476
3477 if (v_actual < v_budget) {
3478 dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
3479 v_budget, v_actual);
3480
3481 if (v_actual < ICE_MIN_MSIX) {
3482 /* error if we can't get minimum vectors */
3483 pci_disable_msix(pf->pdev);
3484 err = -ERANGE;
3485 goto msix_err;
3486 } else {
3487 pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
3488 }
3489 }
3490
3491 return v_actual;
3492
3493 msix_err:
3494 devm_kfree(dev, pf->msix_entries);
3495 goto exit_err;
3496
3497 no_hw_vecs_left_err:
3498 dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
3499 needed, v_left);
3500 err = -ERANGE;
3501 exit_err:
3502 pf->num_lan_msix = 0;
3503 return err;
3504 }
3505
3506 /**
3507 * ice_dis_msix - Disable MSI-X interrupt setup in OS
3508 * @pf: board private structure
3509 */
ice_dis_msix(struct ice_pf * pf)3510 static void ice_dis_msix(struct ice_pf *pf)
3511 {
3512 pci_disable_msix(pf->pdev);
3513 devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
3514 pf->msix_entries = NULL;
3515 }
3516
3517 /**
3518 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
3519 * @pf: board private structure
3520 */
ice_clear_interrupt_scheme(struct ice_pf * pf)3521 static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3522 {
3523 ice_dis_msix(pf);
3524
3525 if (pf->irq_tracker) {
3526 devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
3527 pf->irq_tracker = NULL;
3528 }
3529 }
3530
3531 /**
3532 * ice_init_interrupt_scheme - Determine proper interrupt scheme
3533 * @pf: board private structure to initialize
3534 */
ice_init_interrupt_scheme(struct ice_pf * pf)3535 static int ice_init_interrupt_scheme(struct ice_pf *pf)
3536 {
3537 int vectors;
3538
3539 vectors = ice_ena_msix_range(pf);
3540
3541 if (vectors < 0)
3542 return vectors;
3543
3544 /* set up vector assignment tracking */
3545 pf->irq_tracker =
3546 devm_kzalloc(ice_pf_to_dev(pf), sizeof(*pf->irq_tracker) +
3547 (sizeof(u16) * vectors), GFP_KERNEL);
3548 if (!pf->irq_tracker) {
3549 ice_dis_msix(pf);
3550 return -ENOMEM;
3551 }
3552
3553 /* populate SW interrupts pool with number of OS granted IRQs. */
3554 pf->num_avail_sw_msix = (u16)vectors;
3555 pf->irq_tracker->num_entries = (u16)vectors;
3556 pf->irq_tracker->end = pf->irq_tracker->num_entries;
3557
3558 return 0;
3559 }
3560
3561 /**
3562 * ice_is_wol_supported - check if WoL is supported
3563 * @hw: pointer to hardware info
3564 *
3565 * Check if WoL is supported based on the HW configuration.
3566 * Returns true if NVM supports and enables WoL for this port, false otherwise
3567 */
ice_is_wol_supported(struct ice_hw * hw)3568 bool ice_is_wol_supported(struct ice_hw *hw)
3569 {
3570 u16 wol_ctrl;
3571
3572 /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3573 * word) indicates WoL is not supported on the corresponding PF ID.
3574 */
3575 if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3576 return false;
3577
3578 return !(BIT(hw->port_info->lport) & wol_ctrl);
3579 }
3580
3581 /**
3582 * ice_vsi_recfg_qs - Change the number of queues on a VSI
3583 * @vsi: VSI being changed
3584 * @new_rx: new number of Rx queues
3585 * @new_tx: new number of Tx queues
3586 *
3587 * Only change the number of queues if new_tx, or new_rx is non-0.
3588 *
3589 * Returns 0 on success.
3590 */
ice_vsi_recfg_qs(struct ice_vsi * vsi,int new_rx,int new_tx)3591 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
3592 {
3593 struct ice_pf *pf = vsi->back;
3594 int err = 0, timeout = 50;
3595
3596 if (!new_rx && !new_tx)
3597 return -EINVAL;
3598
3599 while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
3600 timeout--;
3601 if (!timeout)
3602 return -EBUSY;
3603 usleep_range(1000, 2000);
3604 }
3605
3606 if (new_tx)
3607 vsi->req_txq = (u16)new_tx;
3608 if (new_rx)
3609 vsi->req_rxq = (u16)new_rx;
3610
3611 /* set for the next time the netdev is started */
3612 if (!netif_running(vsi->netdev)) {
3613 ice_vsi_rebuild(vsi, false);
3614 dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
3615 goto done;
3616 }
3617
3618 ice_vsi_close(vsi);
3619 ice_vsi_rebuild(vsi, false);
3620 ice_pf_dcb_recfg(pf);
3621 ice_vsi_open(vsi);
3622 done:
3623 clear_bit(__ICE_CFG_BUSY, pf->state);
3624 return err;
3625 }
3626
3627 /**
3628 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
3629 * @pf: PF to configure
3630 *
3631 * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
3632 * VSI can still Tx/Rx VLAN tagged packets.
3633 */
ice_set_safe_mode_vlan_cfg(struct ice_pf * pf)3634 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
3635 {
3636 struct ice_vsi *vsi = ice_get_main_vsi(pf);
3637 struct ice_vsi_ctx *ctxt;
3638 enum ice_status status;
3639 struct ice_hw *hw;
3640
3641 if (!vsi)
3642 return;
3643
3644 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
3645 if (!ctxt)
3646 return;
3647
3648 hw = &pf->hw;
3649 ctxt->info = vsi->info;
3650
3651 ctxt->info.valid_sections =
3652 cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
3653 ICE_AQ_VSI_PROP_SECURITY_VALID |
3654 ICE_AQ_VSI_PROP_SW_VALID);
3655
3656 /* disable VLAN anti-spoof */
3657 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3658 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
3659
3660 /* disable VLAN pruning and keep all other settings */
3661 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3662
3663 /* allow all VLANs on Tx and don't strip on Rx */
3664 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
3665 ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3666
3667 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
3668 if (status) {
3669 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
3670 ice_stat_str(status),
3671 ice_aq_str(hw->adminq.sq_last_status));
3672 } else {
3673 vsi->info.sec_flags = ctxt->info.sec_flags;
3674 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
3675 vsi->info.vlan_flags = ctxt->info.vlan_flags;
3676 }
3677
3678 kfree(ctxt);
3679 }
3680
3681 /**
3682 * ice_log_pkg_init - log result of DDP package load
3683 * @hw: pointer to hardware info
3684 * @status: status of package load
3685 */
3686 static void
ice_log_pkg_init(struct ice_hw * hw,enum ice_status * status)3687 ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
3688 {
3689 struct ice_pf *pf = (struct ice_pf *)hw->back;
3690 struct device *dev = ice_pf_to_dev(pf);
3691
3692 switch (*status) {
3693 case ICE_SUCCESS:
3694 /* The package download AdminQ command returned success because
3695 * this download succeeded or ICE_ERR_AQ_NO_WORK since there is
3696 * already a package loaded on the device.
3697 */
3698 if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
3699 hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
3700 hw->pkg_ver.update == hw->active_pkg_ver.update &&
3701 hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
3702 !memcmp(hw->pkg_name, hw->active_pkg_name,
3703 sizeof(hw->pkg_name))) {
3704 if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
3705 dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
3706 hw->active_pkg_name,
3707 hw->active_pkg_ver.major,
3708 hw->active_pkg_ver.minor,
3709 hw->active_pkg_ver.update,
3710 hw->active_pkg_ver.draft);
3711 else
3712 dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
3713 hw->active_pkg_name,
3714 hw->active_pkg_ver.major,
3715 hw->active_pkg_ver.minor,
3716 hw->active_pkg_ver.update,
3717 hw->active_pkg_ver.draft);
3718 } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
3719 hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
3720 dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
3721 hw->active_pkg_name,
3722 hw->active_pkg_ver.major,
3723 hw->active_pkg_ver.minor,
3724 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3725 *status = ICE_ERR_NOT_SUPPORTED;
3726 } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3727 hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
3728 dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
3729 hw->active_pkg_name,
3730 hw->active_pkg_ver.major,
3731 hw->active_pkg_ver.minor,
3732 hw->active_pkg_ver.update,
3733 hw->active_pkg_ver.draft,
3734 hw->pkg_name,
3735 hw->pkg_ver.major,
3736 hw->pkg_ver.minor,
3737 hw->pkg_ver.update,
3738 hw->pkg_ver.draft);
3739 } else {
3740 dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n");
3741 *status = ICE_ERR_NOT_SUPPORTED;
3742 }
3743 break;
3744 case ICE_ERR_FW_DDP_MISMATCH:
3745 dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
3746 break;
3747 case ICE_ERR_BUF_TOO_SHORT:
3748 case ICE_ERR_CFG:
3749 dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
3750 break;
3751 case ICE_ERR_NOT_SUPPORTED:
3752 /* Package File version not supported */
3753 if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
3754 (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3755 hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
3756 dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
3757 else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
3758 (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3759 hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
3760 dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
3761 ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3762 break;
3763 case ICE_ERR_AQ_ERROR:
3764 switch (hw->pkg_dwnld_status) {
3765 case ICE_AQ_RC_ENOSEC:
3766 case ICE_AQ_RC_EBADSIG:
3767 dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
3768 return;
3769 case ICE_AQ_RC_ESVN:
3770 dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
3771 return;
3772 case ICE_AQ_RC_EBADMAN:
3773 case ICE_AQ_RC_EBADBUF:
3774 dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
3775 /* poll for reset to complete */
3776 if (ice_check_reset(hw))
3777 dev_err(dev, "Error resetting device. Please reload the driver\n");
3778 return;
3779 default:
3780 break;
3781 }
3782 fallthrough;
3783 default:
3784 dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
3785 *status);
3786 break;
3787 }
3788 }
3789
3790 /**
3791 * ice_load_pkg - load/reload the DDP Package file
3792 * @firmware: firmware structure when firmware requested or NULL for reload
3793 * @pf: pointer to the PF instance
3794 *
3795 * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
3796 * initialize HW tables.
3797 */
3798 static void
ice_load_pkg(const struct firmware * firmware,struct ice_pf * pf)3799 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
3800 {
3801 enum ice_status status = ICE_ERR_PARAM;
3802 struct device *dev = ice_pf_to_dev(pf);
3803 struct ice_hw *hw = &pf->hw;
3804
3805 /* Load DDP Package */
3806 if (firmware && !hw->pkg_copy) {
3807 status = ice_copy_and_init_pkg(hw, firmware->data,
3808 firmware->size);
3809 ice_log_pkg_init(hw, &status);
3810 } else if (!firmware && hw->pkg_copy) {
3811 /* Reload package during rebuild after CORER/GLOBR reset */
3812 status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
3813 ice_log_pkg_init(hw, &status);
3814 } else {
3815 dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
3816 }
3817
3818 if (status) {
3819 /* Safe Mode */
3820 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3821 return;
3822 }
3823
3824 /* Successful download package is the precondition for advanced
3825 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
3826 */
3827 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
3828 }
3829
3830 /**
3831 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
3832 * @pf: pointer to the PF structure
3833 *
3834 * There is no error returned here because the driver should be able to handle
3835 * 128 Byte cache lines, so we only print a warning in case issues are seen,
3836 * specifically with Tx.
3837 */
ice_verify_cacheline_size(struct ice_pf * pf)3838 static void ice_verify_cacheline_size(struct ice_pf *pf)
3839 {
3840 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
3841 dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
3842 ICE_CACHE_LINE_BYTES);
3843 }
3844
3845 /**
3846 * ice_send_version - update firmware with driver version
3847 * @pf: PF struct
3848 *
3849 * Returns ICE_SUCCESS on success, else error code
3850 */
ice_send_version(struct ice_pf * pf)3851 static enum ice_status ice_send_version(struct ice_pf *pf)
3852 {
3853 struct ice_driver_ver dv;
3854
3855 dv.major_ver = 0xff;
3856 dv.minor_ver = 0xff;
3857 dv.build_ver = 0xff;
3858 dv.subbuild_ver = 0;
3859 strscpy((char *)dv.driver_string, UTS_RELEASE,
3860 sizeof(dv.driver_string));
3861 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
3862 }
3863
3864 /**
3865 * ice_init_fdir - Initialize flow director VSI and configuration
3866 * @pf: pointer to the PF instance
3867 *
3868 * returns 0 on success, negative on error
3869 */
ice_init_fdir(struct ice_pf * pf)3870 static int ice_init_fdir(struct ice_pf *pf)
3871 {
3872 struct device *dev = ice_pf_to_dev(pf);
3873 struct ice_vsi *ctrl_vsi;
3874 int err;
3875
3876 /* Side Band Flow Director needs to have a control VSI.
3877 * Allocate it and store it in the PF.
3878 */
3879 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
3880 if (!ctrl_vsi) {
3881 dev_dbg(dev, "could not create control VSI\n");
3882 return -ENOMEM;
3883 }
3884
3885 err = ice_vsi_open_ctrl(ctrl_vsi);
3886 if (err) {
3887 dev_dbg(dev, "could not open control VSI\n");
3888 goto err_vsi_open;
3889 }
3890
3891 mutex_init(&pf->hw.fdir_fltr_lock);
3892
3893 err = ice_fdir_create_dflt_rules(pf);
3894 if (err)
3895 goto err_fdir_rule;
3896
3897 return 0;
3898
3899 err_fdir_rule:
3900 ice_fdir_release_flows(&pf->hw);
3901 ice_vsi_close(ctrl_vsi);
3902 err_vsi_open:
3903 ice_vsi_release(ctrl_vsi);
3904 if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
3905 pf->vsi[pf->ctrl_vsi_idx] = NULL;
3906 pf->ctrl_vsi_idx = ICE_NO_VSI;
3907 }
3908 return err;
3909 }
3910
3911 /**
3912 * ice_get_opt_fw_name - return optional firmware file name or NULL
3913 * @pf: pointer to the PF instance
3914 */
ice_get_opt_fw_name(struct ice_pf * pf)3915 static char *ice_get_opt_fw_name(struct ice_pf *pf)
3916 {
3917 /* Optional firmware name same as default with additional dash
3918 * followed by a EUI-64 identifier (PCIe Device Serial Number)
3919 */
3920 struct pci_dev *pdev = pf->pdev;
3921 char *opt_fw_filename;
3922 u64 dsn;
3923
3924 /* Determine the name of the optional file using the DSN (two
3925 * dwords following the start of the DSN Capability).
3926 */
3927 dsn = pci_get_dsn(pdev);
3928 if (!dsn)
3929 return NULL;
3930
3931 opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
3932 if (!opt_fw_filename)
3933 return NULL;
3934
3935 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
3936 ICE_DDP_PKG_PATH, dsn);
3937
3938 return opt_fw_filename;
3939 }
3940
3941 /**
3942 * ice_request_fw - Device initialization routine
3943 * @pf: pointer to the PF instance
3944 */
ice_request_fw(struct ice_pf * pf)3945 static void ice_request_fw(struct ice_pf *pf)
3946 {
3947 char *opt_fw_filename = ice_get_opt_fw_name(pf);
3948 const struct firmware *firmware = NULL;
3949 struct device *dev = ice_pf_to_dev(pf);
3950 int err = 0;
3951
3952 /* optional device-specific DDP (if present) overrides the default DDP
3953 * package file. kernel logs a debug message if the file doesn't exist,
3954 * and warning messages for other errors.
3955 */
3956 if (opt_fw_filename) {
3957 err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
3958 if (err) {
3959 kfree(opt_fw_filename);
3960 goto dflt_pkg_load;
3961 }
3962
3963 /* request for firmware was successful. Download to device */
3964 ice_load_pkg(firmware, pf);
3965 kfree(opt_fw_filename);
3966 release_firmware(firmware);
3967 return;
3968 }
3969
3970 dflt_pkg_load:
3971 err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
3972 if (err) {
3973 dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
3974 return;
3975 }
3976
3977 /* request for firmware was successful. Download to device */
3978 ice_load_pkg(firmware, pf);
3979 release_firmware(firmware);
3980 }
3981
3982 /**
3983 * ice_print_wake_reason - show the wake up cause in the log
3984 * @pf: pointer to the PF struct
3985 */
ice_print_wake_reason(struct ice_pf * pf)3986 static void ice_print_wake_reason(struct ice_pf *pf)
3987 {
3988 u32 wus = pf->wakeup_reason;
3989 const char *wake_str;
3990
3991 /* if no wake event, nothing to print */
3992 if (!wus)
3993 return;
3994
3995 if (wus & PFPM_WUS_LNKC_M)
3996 wake_str = "Link\n";
3997 else if (wus & PFPM_WUS_MAG_M)
3998 wake_str = "Magic Packet\n";
3999 else if (wus & PFPM_WUS_MNG_M)
4000 wake_str = "Management\n";
4001 else if (wus & PFPM_WUS_FW_RST_WK_M)
4002 wake_str = "Firmware Reset\n";
4003 else
4004 wake_str = "Unknown\n";
4005
4006 dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4007 }
4008
4009 /**
4010 * ice_probe - Device initialization routine
4011 * @pdev: PCI device information struct
4012 * @ent: entry in ice_pci_tbl
4013 *
4014 * Returns 0 on success, negative on failure
4015 */
4016 static int
ice_probe(struct pci_dev * pdev,const struct pci_device_id __always_unused * ent)4017 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
4018 {
4019 struct device *dev = &pdev->dev;
4020 struct ice_pf *pf;
4021 struct ice_hw *hw;
4022 int i, err;
4023
4024 if (pdev->is_virtfn) {
4025 dev_err(dev, "can't probe a virtual function\n");
4026 return -EINVAL;
4027 }
4028
4029 /* when under a kdump kernel initiate a reset before enabling the
4030 * device in order to clear out any pending DMA transactions. These
4031 * transactions can cause some systems to machine check when doing
4032 * the pcim_enable_device() below.
4033 */
4034 if (is_kdump_kernel()) {
4035 pci_save_state(pdev);
4036 pci_clear_master(pdev);
4037 err = pcie_flr(pdev);
4038 if (err)
4039 return err;
4040 pci_restore_state(pdev);
4041 }
4042
4043 /* this driver uses devres, see
4044 * Documentation/driver-api/driver-model/devres.rst
4045 */
4046 err = pcim_enable_device(pdev);
4047 if (err)
4048 return err;
4049
4050 err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
4051 if (err) {
4052 dev_err(dev, "BAR0 I/O map error %d\n", err);
4053 return err;
4054 }
4055
4056 pf = ice_allocate_pf(dev);
4057 if (!pf)
4058 return -ENOMEM;
4059
4060 /* set up for high or low DMA */
4061 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4062 if (err)
4063 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
4064 if (err) {
4065 dev_err(dev, "DMA configuration failed: 0x%x\n", err);
4066 return err;
4067 }
4068
4069 pci_enable_pcie_error_reporting(pdev);
4070 pci_set_master(pdev);
4071
4072 pf->pdev = pdev;
4073 pci_set_drvdata(pdev, pf);
4074 set_bit(__ICE_DOWN, pf->state);
4075 /* Disable service task until DOWN bit is cleared */
4076 set_bit(__ICE_SERVICE_DIS, pf->state);
4077
4078 hw = &pf->hw;
4079 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4080 pci_save_state(pdev);
4081
4082 hw->back = pf;
4083 hw->vendor_id = pdev->vendor;
4084 hw->device_id = pdev->device;
4085 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4086 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4087 hw->subsystem_device_id = pdev->subsystem_device;
4088 hw->bus.device = PCI_SLOT(pdev->devfn);
4089 hw->bus.func = PCI_FUNC(pdev->devfn);
4090 ice_set_ctrlq_len(hw);
4091
4092 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
4093
4094 err = ice_devlink_register(pf);
4095 if (err) {
4096 dev_err(dev, "ice_devlink_register failed: %d\n", err);
4097 goto err_exit_unroll;
4098 }
4099
4100 #ifndef CONFIG_DYNAMIC_DEBUG
4101 if (debug < -1)
4102 hw->debug_mask = debug;
4103 #endif
4104
4105 err = ice_init_hw(hw);
4106 if (err) {
4107 dev_err(dev, "ice_init_hw failed: %d\n", err);
4108 err = -EIO;
4109 goto err_exit_unroll;
4110 }
4111
4112 ice_request_fw(pf);
4113
4114 /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4115 * set in pf->state, which will cause ice_is_safe_mode to return
4116 * true
4117 */
4118 if (ice_is_safe_mode(pf)) {
4119 dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
4120 /* we already got function/device capabilities but these don't
4121 * reflect what the driver needs to do in safe mode. Instead of
4122 * adding conditional logic everywhere to ignore these
4123 * device/function capabilities, override them.
4124 */
4125 ice_set_safe_mode_caps(hw);
4126 }
4127
4128 err = ice_init_pf(pf);
4129 if (err) {
4130 dev_err(dev, "ice_init_pf failed: %d\n", err);
4131 goto err_init_pf_unroll;
4132 }
4133
4134 ice_devlink_init_regions(pf);
4135
4136 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4137 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4138 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4139 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4140 i = 0;
4141 if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4142 pf->hw.udp_tunnel_nic.tables[i].n_entries =
4143 pf->hw.tnl.valid_count[TNL_VXLAN];
4144 pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4145 UDP_TUNNEL_TYPE_VXLAN;
4146 i++;
4147 }
4148 if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4149 pf->hw.udp_tunnel_nic.tables[i].n_entries =
4150 pf->hw.tnl.valid_count[TNL_GENEVE];
4151 pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4152 UDP_TUNNEL_TYPE_GENEVE;
4153 i++;
4154 }
4155
4156 pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
4157 if (!pf->num_alloc_vsi) {
4158 err = -EIO;
4159 goto err_init_pf_unroll;
4160 }
4161 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4162 dev_warn(&pf->pdev->dev,
4163 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4164 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4165 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4166 }
4167
4168 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4169 GFP_KERNEL);
4170 if (!pf->vsi) {
4171 err = -ENOMEM;
4172 goto err_init_pf_unroll;
4173 }
4174
4175 err = ice_init_interrupt_scheme(pf);
4176 if (err) {
4177 dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4178 err = -EIO;
4179 goto err_init_vsi_unroll;
4180 }
4181
4182 /* In case of MSIX we are going to setup the misc vector right here
4183 * to handle admin queue events etc. In case of legacy and MSI
4184 * the misc functionality and queue processing is combined in
4185 * the same vector and that gets setup at open.
4186 */
4187 err = ice_req_irq_msix_misc(pf);
4188 if (err) {
4189 dev_err(dev, "setup of misc vector failed: %d\n", err);
4190 goto err_init_interrupt_unroll;
4191 }
4192
4193 /* create switch struct for the switch element created by FW on boot */
4194 pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
4195 if (!pf->first_sw) {
4196 err = -ENOMEM;
4197 goto err_msix_misc_unroll;
4198 }
4199
4200 if (hw->evb_veb)
4201 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4202 else
4203 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4204
4205 pf->first_sw->pf = pf;
4206
4207 /* record the sw_id available for later use */
4208 pf->first_sw->sw_id = hw->port_info->sw_id;
4209
4210 err = ice_setup_pf_sw(pf);
4211 if (err) {
4212 dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
4213 goto err_alloc_sw_unroll;
4214 }
4215
4216 clear_bit(__ICE_SERVICE_DIS, pf->state);
4217
4218 /* tell the firmware we are up */
4219 err = ice_send_version(pf);
4220 if (err) {
4221 dev_err(dev, "probe failed sending driver version %s. error: %d\n",
4222 UTS_RELEASE, err);
4223 goto err_send_version_unroll;
4224 }
4225
4226 /* since everything is good, start the service timer */
4227 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4228
4229 err = ice_init_link_events(pf->hw.port_info);
4230 if (err) {
4231 dev_err(dev, "ice_init_link_events failed: %d\n", err);
4232 goto err_send_version_unroll;
4233 }
4234
4235 /* not a fatal error if this fails */
4236 err = ice_init_nvm_phy_type(pf->hw.port_info);
4237 if (err)
4238 dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4239
4240 /* not a fatal error if this fails */
4241 err = ice_update_link_info(pf->hw.port_info);
4242 if (err)
4243 dev_err(dev, "ice_update_link_info failed: %d\n", err);
4244
4245 ice_init_link_dflt_override(pf->hw.port_info);
4246
4247 /* if media available, initialize PHY settings */
4248 if (pf->hw.port_info->phy.link_info.link_info &
4249 ICE_AQ_MEDIA_AVAILABLE) {
4250 /* not a fatal error if this fails */
4251 err = ice_init_phy_user_cfg(pf->hw.port_info);
4252 if (err)
4253 dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4254
4255 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4256 struct ice_vsi *vsi = ice_get_main_vsi(pf);
4257
4258 if (vsi)
4259 ice_configure_phy(vsi);
4260 }
4261 } else {
4262 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4263 }
4264
4265 ice_verify_cacheline_size(pf);
4266
4267 /* Save wakeup reason register for later use */
4268 pf->wakeup_reason = rd32(hw, PFPM_WUS);
4269
4270 /* check for a power management event */
4271 ice_print_wake_reason(pf);
4272
4273 /* clear wake status, all bits */
4274 wr32(hw, PFPM_WUS, U32_MAX);
4275
4276 /* Disable WoL at init, wait for user to enable */
4277 device_set_wakeup_enable(dev, false);
4278
4279 if (ice_is_safe_mode(pf)) {
4280 ice_set_safe_mode_vlan_cfg(pf);
4281 goto probe_done;
4282 }
4283
4284 /* initialize DDP driven features */
4285
4286 /* Note: Flow director init failure is non-fatal to load */
4287 if (ice_init_fdir(pf))
4288 dev_err(dev, "could not initialize flow director\n");
4289
4290 /* Note: DCB init failure is non-fatal to load */
4291 if (ice_init_pf_dcb(pf, false)) {
4292 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4293 clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4294 } else {
4295 ice_cfg_lldp_mib_change(&pf->hw, true);
4296 }
4297
4298 /* print PCI link speed and width */
4299 pcie_print_link_status(pf->pdev);
4300
4301 probe_done:
4302 /* ready to go, so clear down state bit */
4303 clear_bit(__ICE_DOWN, pf->state);
4304 return 0;
4305
4306 err_send_version_unroll:
4307 ice_vsi_release_all(pf);
4308 err_alloc_sw_unroll:
4309 set_bit(__ICE_SERVICE_DIS, pf->state);
4310 set_bit(__ICE_DOWN, pf->state);
4311 devm_kfree(dev, pf->first_sw);
4312 err_msix_misc_unroll:
4313 ice_free_irq_msix_misc(pf);
4314 err_init_interrupt_unroll:
4315 ice_clear_interrupt_scheme(pf);
4316 err_init_vsi_unroll:
4317 devm_kfree(dev, pf->vsi);
4318 err_init_pf_unroll:
4319 ice_deinit_pf(pf);
4320 ice_devlink_destroy_regions(pf);
4321 ice_deinit_hw(hw);
4322 err_exit_unroll:
4323 ice_devlink_unregister(pf);
4324 pci_disable_pcie_error_reporting(pdev);
4325 pci_disable_device(pdev);
4326 return err;
4327 }
4328
4329 /**
4330 * ice_set_wake - enable or disable Wake on LAN
4331 * @pf: pointer to the PF struct
4332 *
4333 * Simple helper for WoL control
4334 */
ice_set_wake(struct ice_pf * pf)4335 static void ice_set_wake(struct ice_pf *pf)
4336 {
4337 struct ice_hw *hw = &pf->hw;
4338 bool wol = pf->wol_ena;
4339
4340 /* clear wake state, otherwise new wake events won't fire */
4341 wr32(hw, PFPM_WUS, U32_MAX);
4342
4343 /* enable / disable APM wake up, no RMW needed */
4344 wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
4345
4346 /* set magic packet filter enabled */
4347 wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
4348 }
4349
4350 /**
4351 * ice_setup_magic_mc_wake - setup device to wake on multicast magic packet
4352 * @pf: pointer to the PF struct
4353 *
4354 * Issue firmware command to enable multicast magic wake, making
4355 * sure that any locally administered address (LAA) is used for
4356 * wake, and that PF reset doesn't undo the LAA.
4357 */
ice_setup_mc_magic_wake(struct ice_pf * pf)4358 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
4359 {
4360 struct device *dev = ice_pf_to_dev(pf);
4361 struct ice_hw *hw = &pf->hw;
4362 enum ice_status status;
4363 u8 mac_addr[ETH_ALEN];
4364 struct ice_vsi *vsi;
4365 u8 flags;
4366
4367 if (!pf->wol_ena)
4368 return;
4369
4370 vsi = ice_get_main_vsi(pf);
4371 if (!vsi)
4372 return;
4373
4374 /* Get current MAC address in case it's an LAA */
4375 if (vsi->netdev)
4376 ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
4377 else
4378 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4379
4380 flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
4381 ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
4382 ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
4383
4384 status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
4385 if (status)
4386 dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
4387 ice_stat_str(status),
4388 ice_aq_str(hw->adminq.sq_last_status));
4389 }
4390
4391 /**
4392 * ice_remove - Device removal routine
4393 * @pdev: PCI device information struct
4394 */
ice_remove(struct pci_dev * pdev)4395 static void ice_remove(struct pci_dev *pdev)
4396 {
4397 struct ice_pf *pf = pci_get_drvdata(pdev);
4398 int i;
4399
4400 for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
4401 if (!ice_is_reset_in_progress(pf->state))
4402 break;
4403 msleep(100);
4404 }
4405
4406 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
4407 set_bit(__ICE_VF_RESETS_DISABLED, pf->state);
4408 ice_free_vfs(pf);
4409 }
4410
4411 set_bit(__ICE_DOWN, pf->state);
4412 ice_service_task_stop(pf);
4413
4414 ice_aq_cancel_waiting_tasks(pf);
4415
4416 mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4417 if (!ice_is_safe_mode(pf))
4418 ice_remove_arfs(pf);
4419 ice_setup_mc_magic_wake(pf);
4420 ice_vsi_release_all(pf);
4421 ice_set_wake(pf);
4422 ice_free_irq_msix_misc(pf);
4423 ice_for_each_vsi(pf, i) {
4424 if (!pf->vsi[i])
4425 continue;
4426 ice_vsi_free_q_vectors(pf->vsi[i]);
4427 }
4428 ice_deinit_pf(pf);
4429 ice_devlink_destroy_regions(pf);
4430 ice_deinit_hw(&pf->hw);
4431 ice_devlink_unregister(pf);
4432
4433 /* Issue a PFR as part of the prescribed driver unload flow. Do not
4434 * do it via ice_schedule_reset() since there is no need to rebuild
4435 * and the service task is already stopped.
4436 */
4437 ice_reset(&pf->hw, ICE_RESET_PFR);
4438 pci_wait_for_pending_transaction(pdev);
4439 ice_clear_interrupt_scheme(pf);
4440 pci_disable_pcie_error_reporting(pdev);
4441 pci_disable_device(pdev);
4442 }
4443
4444 /**
4445 * ice_shutdown - PCI callback for shutting down device
4446 * @pdev: PCI device information struct
4447 */
ice_shutdown(struct pci_dev * pdev)4448 static void ice_shutdown(struct pci_dev *pdev)
4449 {
4450 struct ice_pf *pf = pci_get_drvdata(pdev);
4451
4452 ice_remove(pdev);
4453
4454 if (system_state == SYSTEM_POWER_OFF) {
4455 pci_wake_from_d3(pdev, pf->wol_ena);
4456 pci_set_power_state(pdev, PCI_D3hot);
4457 }
4458 }
4459
4460 #ifdef CONFIG_PM
4461 /**
4462 * ice_prepare_for_shutdown - prep for PCI shutdown
4463 * @pf: board private structure
4464 *
4465 * Inform or close all dependent features in prep for PCI device shutdown
4466 */
ice_prepare_for_shutdown(struct ice_pf * pf)4467 static void ice_prepare_for_shutdown(struct ice_pf *pf)
4468 {
4469 struct ice_hw *hw = &pf->hw;
4470 u32 v;
4471
4472 /* Notify VFs of impending reset */
4473 if (ice_check_sq_alive(hw, &hw->mailboxq))
4474 ice_vc_notify_reset(pf);
4475
4476 dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
4477
4478 /* disable the VSIs and their queues that are not already DOWN */
4479 ice_pf_dis_all_vsi(pf, false);
4480
4481 ice_for_each_vsi(pf, v)
4482 if (pf->vsi[v])
4483 pf->vsi[v]->vsi_num = 0;
4484
4485 ice_shutdown_all_ctrlq(hw);
4486 }
4487
4488 /**
4489 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
4490 * @pf: board private structure to reinitialize
4491 *
4492 * This routine reinitialize interrupt scheme that was cleared during
4493 * power management suspend callback.
4494 *
4495 * This should be called during resume routine to re-allocate the q_vectors
4496 * and reacquire interrupts.
4497 */
ice_reinit_interrupt_scheme(struct ice_pf * pf)4498 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
4499 {
4500 struct device *dev = ice_pf_to_dev(pf);
4501 int ret, v;
4502
4503 /* Since we clear MSIX flag during suspend, we need to
4504 * set it back during resume...
4505 */
4506
4507 ret = ice_init_interrupt_scheme(pf);
4508 if (ret) {
4509 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
4510 return ret;
4511 }
4512
4513 /* Remap vectors and rings, after successful re-init interrupts */
4514 ice_for_each_vsi(pf, v) {
4515 if (!pf->vsi[v])
4516 continue;
4517
4518 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
4519 if (ret)
4520 goto err_reinit;
4521 ice_vsi_map_rings_to_vectors(pf->vsi[v]);
4522 }
4523
4524 ret = ice_req_irq_msix_misc(pf);
4525 if (ret) {
4526 dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
4527 ret);
4528 goto err_reinit;
4529 }
4530
4531 return 0;
4532
4533 err_reinit:
4534 while (v--)
4535 if (pf->vsi[v])
4536 ice_vsi_free_q_vectors(pf->vsi[v]);
4537
4538 return ret;
4539 }
4540
4541 /**
4542 * ice_suspend
4543 * @dev: generic device information structure
4544 *
4545 * Power Management callback to quiesce the device and prepare
4546 * for D3 transition.
4547 */
ice_suspend(struct device * dev)4548 static int __maybe_unused ice_suspend(struct device *dev)
4549 {
4550 struct pci_dev *pdev = to_pci_dev(dev);
4551 struct ice_pf *pf;
4552 int disabled, v;
4553
4554 pf = pci_get_drvdata(pdev);
4555
4556 if (!ice_pf_state_is_nominal(pf)) {
4557 dev_err(dev, "Device is not ready, no need to suspend it\n");
4558 return -EBUSY;
4559 }
4560
4561 /* Stop watchdog tasks until resume completion.
4562 * Even though it is most likely that the service task is
4563 * disabled if the device is suspended or down, the service task's
4564 * state is controlled by a different state bit, and we should
4565 * store and honor whatever state that bit is in at this point.
4566 */
4567 disabled = ice_service_task_stop(pf);
4568
4569 /* Already suspended?, then there is nothing to do */
4570 if (test_and_set_bit(__ICE_SUSPENDED, pf->state)) {
4571 if (!disabled)
4572 ice_service_task_restart(pf);
4573 return 0;
4574 }
4575
4576 if (test_bit(__ICE_DOWN, pf->state) ||
4577 ice_is_reset_in_progress(pf->state)) {
4578 dev_err(dev, "can't suspend device in reset or already down\n");
4579 if (!disabled)
4580 ice_service_task_restart(pf);
4581 return 0;
4582 }
4583
4584 ice_setup_mc_magic_wake(pf);
4585
4586 ice_prepare_for_shutdown(pf);
4587
4588 ice_set_wake(pf);
4589
4590 /* Free vectors, clear the interrupt scheme and release IRQs
4591 * for proper hibernation, especially with large number of CPUs.
4592 * Otherwise hibernation might fail when mapping all the vectors back
4593 * to CPU0.
4594 */
4595 ice_free_irq_msix_misc(pf);
4596 ice_for_each_vsi(pf, v) {
4597 if (!pf->vsi[v])
4598 continue;
4599 ice_vsi_free_q_vectors(pf->vsi[v]);
4600 }
4601 ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
4602 ice_clear_interrupt_scheme(pf);
4603
4604 pci_save_state(pdev);
4605 pci_wake_from_d3(pdev, pf->wol_ena);
4606 pci_set_power_state(pdev, PCI_D3hot);
4607 return 0;
4608 }
4609
4610 /**
4611 * ice_resume - PM callback for waking up from D3
4612 * @dev: generic device information structure
4613 */
ice_resume(struct device * dev)4614 static int __maybe_unused ice_resume(struct device *dev)
4615 {
4616 struct pci_dev *pdev = to_pci_dev(dev);
4617 enum ice_reset_req reset_type;
4618 struct ice_pf *pf;
4619 struct ice_hw *hw;
4620 int ret;
4621
4622 pci_set_power_state(pdev, PCI_D0);
4623 pci_restore_state(pdev);
4624 pci_save_state(pdev);
4625
4626 if (!pci_device_is_present(pdev))
4627 return -ENODEV;
4628
4629 ret = pci_enable_device_mem(pdev);
4630 if (ret) {
4631 dev_err(dev, "Cannot enable device after suspend\n");
4632 return ret;
4633 }
4634
4635 pf = pci_get_drvdata(pdev);
4636 hw = &pf->hw;
4637
4638 pf->wakeup_reason = rd32(hw, PFPM_WUS);
4639 ice_print_wake_reason(pf);
4640
4641 /* We cleared the interrupt scheme when we suspended, so we need to
4642 * restore it now to resume device functionality.
4643 */
4644 ret = ice_reinit_interrupt_scheme(pf);
4645 if (ret)
4646 dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
4647
4648 clear_bit(__ICE_DOWN, pf->state);
4649 /* Now perform PF reset and rebuild */
4650 reset_type = ICE_RESET_PFR;
4651 /* re-enable service task for reset, but allow reset to schedule it */
4652 clear_bit(__ICE_SERVICE_DIS, pf->state);
4653
4654 if (ice_schedule_reset(pf, reset_type))
4655 dev_err(dev, "Reset during resume failed.\n");
4656
4657 clear_bit(__ICE_SUSPENDED, pf->state);
4658 ice_service_task_restart(pf);
4659
4660 /* Restart the service task */
4661 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4662
4663 return 0;
4664 }
4665 #endif /* CONFIG_PM */
4666
4667 /**
4668 * ice_pci_err_detected - warning that PCI error has been detected
4669 * @pdev: PCI device information struct
4670 * @err: the type of PCI error
4671 *
4672 * Called to warn that something happened on the PCI bus and the error handling
4673 * is in progress. Allows the driver to gracefully prepare/handle PCI errors.
4674 */
4675 static pci_ers_result_t
ice_pci_err_detected(struct pci_dev * pdev,pci_channel_state_t err)4676 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
4677 {
4678 struct ice_pf *pf = pci_get_drvdata(pdev);
4679
4680 if (!pf) {
4681 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
4682 __func__, err);
4683 return PCI_ERS_RESULT_DISCONNECT;
4684 }
4685
4686 if (!test_bit(__ICE_SUSPENDED, pf->state)) {
4687 ice_service_task_stop(pf);
4688
4689 if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
4690 set_bit(__ICE_PFR_REQ, pf->state);
4691 ice_prepare_for_reset(pf);
4692 }
4693 }
4694
4695 return PCI_ERS_RESULT_NEED_RESET;
4696 }
4697
4698 /**
4699 * ice_pci_err_slot_reset - a PCI slot reset has just happened
4700 * @pdev: PCI device information struct
4701 *
4702 * Called to determine if the driver can recover from the PCI slot reset by
4703 * using a register read to determine if the device is recoverable.
4704 */
ice_pci_err_slot_reset(struct pci_dev * pdev)4705 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
4706 {
4707 struct ice_pf *pf = pci_get_drvdata(pdev);
4708 pci_ers_result_t result;
4709 int err;
4710 u32 reg;
4711
4712 err = pci_enable_device_mem(pdev);
4713 if (err) {
4714 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
4715 err);
4716 result = PCI_ERS_RESULT_DISCONNECT;
4717 } else {
4718 pci_set_master(pdev);
4719 pci_restore_state(pdev);
4720 pci_save_state(pdev);
4721 pci_wake_from_d3(pdev, false);
4722
4723 /* Check for life */
4724 reg = rd32(&pf->hw, GLGEN_RTRIG);
4725 if (!reg)
4726 result = PCI_ERS_RESULT_RECOVERED;
4727 else
4728 result = PCI_ERS_RESULT_DISCONNECT;
4729 }
4730
4731 err = pci_aer_clear_nonfatal_status(pdev);
4732 if (err)
4733 dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
4734 err);
4735 /* non-fatal, continue */
4736
4737 return result;
4738 }
4739
4740 /**
4741 * ice_pci_err_resume - restart operations after PCI error recovery
4742 * @pdev: PCI device information struct
4743 *
4744 * Called to allow the driver to bring things back up after PCI error and/or
4745 * reset recovery have finished
4746 */
ice_pci_err_resume(struct pci_dev * pdev)4747 static void ice_pci_err_resume(struct pci_dev *pdev)
4748 {
4749 struct ice_pf *pf = pci_get_drvdata(pdev);
4750
4751 if (!pf) {
4752 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
4753 __func__);
4754 return;
4755 }
4756
4757 if (test_bit(__ICE_SUSPENDED, pf->state)) {
4758 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
4759 __func__);
4760 return;
4761 }
4762
4763 ice_restore_all_vfs_msi_state(pdev);
4764
4765 ice_do_reset(pf, ICE_RESET_PFR);
4766 ice_service_task_restart(pf);
4767 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4768 }
4769
4770 /**
4771 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
4772 * @pdev: PCI device information struct
4773 */
ice_pci_err_reset_prepare(struct pci_dev * pdev)4774 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
4775 {
4776 struct ice_pf *pf = pci_get_drvdata(pdev);
4777
4778 if (!test_bit(__ICE_SUSPENDED, pf->state)) {
4779 ice_service_task_stop(pf);
4780
4781 if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
4782 set_bit(__ICE_PFR_REQ, pf->state);
4783 ice_prepare_for_reset(pf);
4784 }
4785 }
4786 }
4787
4788 /**
4789 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
4790 * @pdev: PCI device information struct
4791 */
ice_pci_err_reset_done(struct pci_dev * pdev)4792 static void ice_pci_err_reset_done(struct pci_dev *pdev)
4793 {
4794 ice_pci_err_resume(pdev);
4795 }
4796
4797 /* ice_pci_tbl - PCI Device ID Table
4798 *
4799 * Wildcard entries (PCI_ANY_ID) should come last
4800 * Last entry must be all 0s
4801 *
4802 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
4803 * Class, Class Mask, private data (not used) }
4804 */
4805 static const struct pci_device_id ice_pci_tbl[] = {
4806 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
4807 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
4808 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
4809 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
4810 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
4811 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
4812 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
4813 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
4814 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
4815 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
4816 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
4817 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
4818 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
4819 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
4820 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
4821 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
4822 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
4823 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
4824 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
4825 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
4826 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
4827 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
4828 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
4829 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
4830 { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
4831 /* required last entry */
4832 { 0, }
4833 };
4834 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
4835
4836 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
4837
4838 static const struct pci_error_handlers ice_pci_err_handler = {
4839 .error_detected = ice_pci_err_detected,
4840 .slot_reset = ice_pci_err_slot_reset,
4841 .reset_prepare = ice_pci_err_reset_prepare,
4842 .reset_done = ice_pci_err_reset_done,
4843 .resume = ice_pci_err_resume
4844 };
4845
4846 static struct pci_driver ice_driver = {
4847 .name = KBUILD_MODNAME,
4848 .id_table = ice_pci_tbl,
4849 .probe = ice_probe,
4850 .remove = ice_remove,
4851 #ifdef CONFIG_PM
4852 .driver.pm = &ice_pm_ops,
4853 #endif /* CONFIG_PM */
4854 .shutdown = ice_shutdown,
4855 .sriov_configure = ice_sriov_configure,
4856 .err_handler = &ice_pci_err_handler
4857 };
4858
4859 /**
4860 * ice_module_init - Driver registration routine
4861 *
4862 * ice_module_init is the first routine called when the driver is
4863 * loaded. All it does is register with the PCI subsystem.
4864 */
ice_module_init(void)4865 static int __init ice_module_init(void)
4866 {
4867 int status;
4868
4869 pr_info("%s\n", ice_driver_string);
4870 pr_info("%s\n", ice_copyright);
4871
4872 ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
4873 if (!ice_wq) {
4874 pr_err("Failed to create workqueue\n");
4875 return -ENOMEM;
4876 }
4877
4878 status = pci_register_driver(&ice_driver);
4879 if (status) {
4880 pr_err("failed to register PCI driver, err %d\n", status);
4881 destroy_workqueue(ice_wq);
4882 }
4883
4884 return status;
4885 }
4886 module_init(ice_module_init);
4887
4888 /**
4889 * ice_module_exit - Driver exit cleanup routine
4890 *
4891 * ice_module_exit is called just before the driver is removed
4892 * from memory.
4893 */
ice_module_exit(void)4894 static void __exit ice_module_exit(void)
4895 {
4896 pci_unregister_driver(&ice_driver);
4897 destroy_workqueue(ice_wq);
4898 pr_info("module unloaded\n");
4899 }
4900 module_exit(ice_module_exit);
4901
4902 /**
4903 * ice_set_mac_address - NDO callback to set MAC address
4904 * @netdev: network interface device structure
4905 * @pi: pointer to an address structure
4906 *
4907 * Returns 0 on success, negative on failure
4908 */
ice_set_mac_address(struct net_device * netdev,void * pi)4909 static int ice_set_mac_address(struct net_device *netdev, void *pi)
4910 {
4911 struct ice_netdev_priv *np = netdev_priv(netdev);
4912 struct ice_vsi *vsi = np->vsi;
4913 struct ice_pf *pf = vsi->back;
4914 struct ice_hw *hw = &pf->hw;
4915 struct sockaddr *addr = pi;
4916 enum ice_status status;
4917 u8 old_mac[ETH_ALEN];
4918 u8 flags = 0;
4919 int err = 0;
4920 u8 *mac;
4921
4922 mac = (u8 *)addr->sa_data;
4923
4924 if (!is_valid_ether_addr(mac))
4925 return -EADDRNOTAVAIL;
4926
4927 if (ether_addr_equal(netdev->dev_addr, mac)) {
4928 netdev_dbg(netdev, "already using mac %pM\n", mac);
4929 return 0;
4930 }
4931
4932 if (test_bit(__ICE_DOWN, pf->state) ||
4933 ice_is_reset_in_progress(pf->state)) {
4934 netdev_err(netdev, "can't set mac %pM. device not ready\n",
4935 mac);
4936 return -EBUSY;
4937 }
4938
4939 netif_addr_lock_bh(netdev);
4940 ether_addr_copy(old_mac, netdev->dev_addr);
4941 /* change the netdev's MAC address */
4942 memcpy(netdev->dev_addr, mac, netdev->addr_len);
4943 netif_addr_unlock_bh(netdev);
4944
4945 /* Clean up old MAC filter. Not an error if old filter doesn't exist */
4946 status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
4947 if (status && status != ICE_ERR_DOES_NOT_EXIST) {
4948 err = -EADDRNOTAVAIL;
4949 goto err_update_filters;
4950 }
4951
4952 /* Add filter for new MAC. If filter exists, return success */
4953 status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
4954 if (status == ICE_ERR_ALREADY_EXISTS)
4955 /* Although this MAC filter is already present in hardware it's
4956 * possible in some cases (e.g. bonding) that dev_addr was
4957 * modified outside of the driver and needs to be restored back
4958 * to this value.
4959 */
4960 netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
4961 else if (status)
4962 /* error if the new filter addition failed */
4963 err = -EADDRNOTAVAIL;
4964
4965 err_update_filters:
4966 if (err) {
4967 netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
4968 mac);
4969 netif_addr_lock_bh(netdev);
4970 ether_addr_copy(netdev->dev_addr, old_mac);
4971 netif_addr_unlock_bh(netdev);
4972 return err;
4973 }
4974
4975 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
4976 netdev->dev_addr);
4977
4978 /* write new MAC address to the firmware */
4979 flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
4980 status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
4981 if (status) {
4982 netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
4983 mac, ice_stat_str(status));
4984 }
4985 return 0;
4986 }
4987
4988 /**
4989 * ice_set_rx_mode - NDO callback to set the netdev filters
4990 * @netdev: network interface device structure
4991 */
ice_set_rx_mode(struct net_device * netdev)4992 static void ice_set_rx_mode(struct net_device *netdev)
4993 {
4994 struct ice_netdev_priv *np = netdev_priv(netdev);
4995 struct ice_vsi *vsi = np->vsi;
4996
4997 if (!vsi)
4998 return;
4999
5000 /* Set the flags to synchronize filters
5001 * ndo_set_rx_mode may be triggered even without a change in netdev
5002 * flags
5003 */
5004 set_bit(ICE_VSI_FLAG_UMAC_FLTR_CHANGED, vsi->flags);
5005 set_bit(ICE_VSI_FLAG_MMAC_FLTR_CHANGED, vsi->flags);
5006 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5007
5008 /* schedule our worker thread which will take care of
5009 * applying the new filter changes
5010 */
5011 ice_service_task_schedule(vsi->back);
5012 }
5013
5014 /**
5015 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5016 * @netdev: network interface device structure
5017 * @queue_index: Queue ID
5018 * @maxrate: maximum bandwidth in Mbps
5019 */
5020 static int
ice_set_tx_maxrate(struct net_device * netdev,int queue_index,u32 maxrate)5021 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5022 {
5023 struct ice_netdev_priv *np = netdev_priv(netdev);
5024 struct ice_vsi *vsi = np->vsi;
5025 enum ice_status status;
5026 u16 q_handle;
5027 u8 tc;
5028
5029 /* Validate maxrate requested is within permitted range */
5030 if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5031 netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5032 maxrate, queue_index);
5033 return -EINVAL;
5034 }
5035
5036 q_handle = vsi->tx_rings[queue_index]->q_handle;
5037 tc = ice_dcb_get_tc(vsi, queue_index);
5038
5039 /* Set BW back to default, when user set maxrate to 0 */
5040 if (!maxrate)
5041 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5042 q_handle, ICE_MAX_BW);
5043 else
5044 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5045 q_handle, ICE_MAX_BW, maxrate * 1000);
5046 if (status) {
5047 netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
5048 ice_stat_str(status));
5049 return -EIO;
5050 }
5051
5052 return 0;
5053 }
5054
5055 /**
5056 * ice_fdb_add - add an entry to the hardware database
5057 * @ndm: the input from the stack
5058 * @tb: pointer to array of nladdr (unused)
5059 * @dev: the net device pointer
5060 * @addr: the MAC address entry being added
5061 * @vid: VLAN ID
5062 * @flags: instructions from stack about fdb operation
5063 * @extack: netlink extended ack
5064 */
5065 static int
ice_fdb_add(struct ndmsg * ndm,struct nlattr __always_unused * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,u16 flags,struct netlink_ext_ack __always_unused * extack)5066 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5067 struct net_device *dev, const unsigned char *addr, u16 vid,
5068 u16 flags, struct netlink_ext_ack __always_unused *extack)
5069 {
5070 int err;
5071
5072 if (vid) {
5073 netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5074 return -EINVAL;
5075 }
5076 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5077 netdev_err(dev, "FDB only supports static addresses\n");
5078 return -EINVAL;
5079 }
5080
5081 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5082 err = dev_uc_add_excl(dev, addr);
5083 else if (is_multicast_ether_addr(addr))
5084 err = dev_mc_add_excl(dev, addr);
5085 else
5086 err = -EINVAL;
5087
5088 /* Only return duplicate errors if NLM_F_EXCL is set */
5089 if (err == -EEXIST && !(flags & NLM_F_EXCL))
5090 err = 0;
5091
5092 return err;
5093 }
5094
5095 /**
5096 * ice_fdb_del - delete an entry from the hardware database
5097 * @ndm: the input from the stack
5098 * @tb: pointer to array of nladdr (unused)
5099 * @dev: the net device pointer
5100 * @addr: the MAC address entry being added
5101 * @vid: VLAN ID
5102 */
5103 static int
ice_fdb_del(struct ndmsg * ndm,__always_unused struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,__always_unused u16 vid)5104 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5105 struct net_device *dev, const unsigned char *addr,
5106 __always_unused u16 vid)
5107 {
5108 int err;
5109
5110 if (ndm->ndm_state & NUD_PERMANENT) {
5111 netdev_err(dev, "FDB only supports static addresses\n");
5112 return -EINVAL;
5113 }
5114
5115 if (is_unicast_ether_addr(addr))
5116 err = dev_uc_del(dev, addr);
5117 else if (is_multicast_ether_addr(addr))
5118 err = dev_mc_del(dev, addr);
5119 else
5120 err = -EINVAL;
5121
5122 return err;
5123 }
5124
5125 /**
5126 * ice_set_features - set the netdev feature flags
5127 * @netdev: ptr to the netdev being adjusted
5128 * @features: the feature set that the stack is suggesting
5129 */
5130 static int
ice_set_features(struct net_device * netdev,netdev_features_t features)5131 ice_set_features(struct net_device *netdev, netdev_features_t features)
5132 {
5133 struct ice_netdev_priv *np = netdev_priv(netdev);
5134 struct ice_vsi *vsi = np->vsi;
5135 struct ice_pf *pf = vsi->back;
5136 int ret = 0;
5137
5138 /* Don't set any netdev advanced features with device in Safe Mode */
5139 if (ice_is_safe_mode(vsi->back)) {
5140 dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
5141 return ret;
5142 }
5143
5144 /* Do not change setting during reset */
5145 if (ice_is_reset_in_progress(pf->state)) {
5146 dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
5147 return -EBUSY;
5148 }
5149
5150 /* Multiple features can be changed in one call so keep features in
5151 * separate if/else statements to guarantee each feature is checked
5152 */
5153 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
5154 ret = ice_vsi_manage_rss_lut(vsi, true);
5155 else if (!(features & NETIF_F_RXHASH) &&
5156 netdev->features & NETIF_F_RXHASH)
5157 ret = ice_vsi_manage_rss_lut(vsi, false);
5158
5159 if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
5160 !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5161 ret = ice_vsi_manage_vlan_stripping(vsi, true);
5162 else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
5163 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5164 ret = ice_vsi_manage_vlan_stripping(vsi, false);
5165
5166 if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
5167 !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5168 ret = ice_vsi_manage_vlan_insertion(vsi);
5169 else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
5170 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5171 ret = ice_vsi_manage_vlan_insertion(vsi);
5172
5173 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5174 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5175 ret = ice_cfg_vlan_pruning(vsi, true, false);
5176 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5177 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5178 ret = ice_cfg_vlan_pruning(vsi, false, false);
5179
5180 if ((features & NETIF_F_NTUPLE) &&
5181 !(netdev->features & NETIF_F_NTUPLE)) {
5182 ice_vsi_manage_fdir(vsi, true);
5183 ice_init_arfs(vsi);
5184 } else if (!(features & NETIF_F_NTUPLE) &&
5185 (netdev->features & NETIF_F_NTUPLE)) {
5186 ice_vsi_manage_fdir(vsi, false);
5187 ice_clear_arfs(vsi);
5188 }
5189
5190 return ret;
5191 }
5192
5193 /**
5194 * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
5195 * @vsi: VSI to setup VLAN properties for
5196 */
ice_vsi_vlan_setup(struct ice_vsi * vsi)5197 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
5198 {
5199 int ret = 0;
5200
5201 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
5202 ret = ice_vsi_manage_vlan_stripping(vsi, true);
5203 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
5204 ret = ice_vsi_manage_vlan_insertion(vsi);
5205
5206 return ret;
5207 }
5208
5209 /**
5210 * ice_vsi_cfg - Setup the VSI
5211 * @vsi: the VSI being configured
5212 *
5213 * Return 0 on success and negative value on error
5214 */
ice_vsi_cfg(struct ice_vsi * vsi)5215 int ice_vsi_cfg(struct ice_vsi *vsi)
5216 {
5217 int err;
5218
5219 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
5220 ice_set_rx_mode(vsi->netdev);
5221
5222 err = ice_vsi_vlan_setup(vsi);
5223 if (err)
5224 return err;
5225 }
5226 ice_vsi_cfg_dcb_rings(vsi);
5227
5228 err = ice_vsi_cfg_lan_txqs(vsi);
5229 if (!err && ice_is_xdp_ena_vsi(vsi))
5230 err = ice_vsi_cfg_xdp_txqs(vsi);
5231 if (!err)
5232 err = ice_vsi_cfg_rxqs(vsi);
5233
5234 return err;
5235 }
5236
5237 /**
5238 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5239 * @vsi: the VSI being configured
5240 */
ice_napi_enable_all(struct ice_vsi * vsi)5241 static void ice_napi_enable_all(struct ice_vsi *vsi)
5242 {
5243 int q_idx;
5244
5245 if (!vsi->netdev)
5246 return;
5247
5248 ice_for_each_q_vector(vsi, q_idx) {
5249 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5250
5251 if (q_vector->rx.ring || q_vector->tx.ring)
5252 napi_enable(&q_vector->napi);
5253 }
5254 }
5255
5256 /**
5257 * ice_up_complete - Finish the last steps of bringing up a connection
5258 * @vsi: The VSI being configured
5259 *
5260 * Return 0 on success and negative value on error
5261 */
ice_up_complete(struct ice_vsi * vsi)5262 static int ice_up_complete(struct ice_vsi *vsi)
5263 {
5264 struct ice_pf *pf = vsi->back;
5265 int err;
5266
5267 ice_vsi_cfg_msix(vsi);
5268
5269 /* Enable only Rx rings, Tx rings were enabled by the FW when the
5270 * Tx queue group list was configured and the context bits were
5271 * programmed using ice_vsi_cfg_txqs
5272 */
5273 err = ice_vsi_start_all_rx_rings(vsi);
5274 if (err)
5275 return err;
5276
5277 clear_bit(__ICE_DOWN, vsi->state);
5278 ice_napi_enable_all(vsi);
5279 ice_vsi_ena_irq(vsi);
5280
5281 if (vsi->port_info &&
5282 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
5283 vsi->netdev && vsi->type == ICE_VSI_PF) {
5284 ice_print_link_msg(vsi, true);
5285 netif_tx_start_all_queues(vsi->netdev);
5286 netif_carrier_on(vsi->netdev);
5287 }
5288
5289 /* Perform an initial read of the statistics registers now to
5290 * set the baseline so counters are ready when interface is up
5291 */
5292 ice_update_eth_stats(vsi);
5293
5294 if (vsi->type == ICE_VSI_PF)
5295 ice_service_task_schedule(pf);
5296
5297 return 0;
5298 }
5299
5300 /**
5301 * ice_up - Bring the connection back up after being down
5302 * @vsi: VSI being configured
5303 */
ice_up(struct ice_vsi * vsi)5304 int ice_up(struct ice_vsi *vsi)
5305 {
5306 int err;
5307
5308 err = ice_vsi_cfg(vsi);
5309 if (!err)
5310 err = ice_up_complete(vsi);
5311
5312 return err;
5313 }
5314
5315 /**
5316 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
5317 * @ring: Tx or Rx ring to read stats from
5318 * @pkts: packets stats counter
5319 * @bytes: bytes stats counter
5320 *
5321 * This function fetches stats from the ring considering the atomic operations
5322 * that needs to be performed to read u64 values in 32 bit machine.
5323 */
5324 static void
ice_fetch_u64_stats_per_ring(struct ice_ring * ring,u64 * pkts,u64 * bytes)5325 ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
5326 {
5327 unsigned int start;
5328 *pkts = 0;
5329 *bytes = 0;
5330
5331 if (!ring)
5332 return;
5333 do {
5334 start = u64_stats_fetch_begin_irq(&ring->syncp);
5335 *pkts = ring->stats.pkts;
5336 *bytes = ring->stats.bytes;
5337 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
5338 }
5339
5340 /**
5341 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
5342 * @vsi: the VSI to be updated
5343 * @rings: rings to work on
5344 * @count: number of rings
5345 */
5346 static void
ice_update_vsi_tx_ring_stats(struct ice_vsi * vsi,struct ice_ring ** rings,u16 count)5347 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
5348 u16 count)
5349 {
5350 struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5351 u16 i;
5352
5353 for (i = 0; i < count; i++) {
5354 struct ice_ring *ring;
5355 u64 pkts, bytes;
5356
5357 ring = READ_ONCE(rings[i]);
5358 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5359 vsi_stats->tx_packets += pkts;
5360 vsi_stats->tx_bytes += bytes;
5361 vsi->tx_restart += ring->tx_stats.restart_q;
5362 vsi->tx_busy += ring->tx_stats.tx_busy;
5363 vsi->tx_linearize += ring->tx_stats.tx_linearize;
5364 }
5365 }
5366
5367 /**
5368 * ice_update_vsi_ring_stats - Update VSI stats counters
5369 * @vsi: the VSI to be updated
5370 */
ice_update_vsi_ring_stats(struct ice_vsi * vsi)5371 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
5372 {
5373 struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5374 struct ice_ring *ring;
5375 u64 pkts, bytes;
5376 int i;
5377
5378 /* reset netdev stats */
5379 vsi_stats->tx_packets = 0;
5380 vsi_stats->tx_bytes = 0;
5381 vsi_stats->rx_packets = 0;
5382 vsi_stats->rx_bytes = 0;
5383
5384 /* reset non-netdev (extended) stats */
5385 vsi->tx_restart = 0;
5386 vsi->tx_busy = 0;
5387 vsi->tx_linearize = 0;
5388 vsi->rx_buf_failed = 0;
5389 vsi->rx_page_failed = 0;
5390 vsi->rx_gro_dropped = 0;
5391
5392 rcu_read_lock();
5393
5394 /* update Tx rings counters */
5395 ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
5396
5397 /* update Rx rings counters */
5398 ice_for_each_rxq(vsi, i) {
5399 ring = READ_ONCE(vsi->rx_rings[i]);
5400 ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5401 vsi_stats->rx_packets += pkts;
5402 vsi_stats->rx_bytes += bytes;
5403 vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
5404 vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
5405 vsi->rx_gro_dropped += ring->rx_stats.gro_dropped;
5406 }
5407
5408 /* update XDP Tx rings counters */
5409 if (ice_is_xdp_ena_vsi(vsi))
5410 ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
5411 vsi->num_xdp_txq);
5412
5413 rcu_read_unlock();
5414 }
5415
5416 /**
5417 * ice_update_vsi_stats - Update VSI stats counters
5418 * @vsi: the VSI to be updated
5419 */
ice_update_vsi_stats(struct ice_vsi * vsi)5420 void ice_update_vsi_stats(struct ice_vsi *vsi)
5421 {
5422 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
5423 struct ice_eth_stats *cur_es = &vsi->eth_stats;
5424 struct ice_pf *pf = vsi->back;
5425
5426 if (test_bit(__ICE_DOWN, vsi->state) ||
5427 test_bit(__ICE_CFG_BUSY, pf->state))
5428 return;
5429
5430 /* get stats as recorded by Tx/Rx rings */
5431 ice_update_vsi_ring_stats(vsi);
5432
5433 /* get VSI stats as recorded by the hardware */
5434 ice_update_eth_stats(vsi);
5435
5436 cur_ns->tx_errors = cur_es->tx_errors;
5437 cur_ns->rx_dropped = cur_es->rx_discards + vsi->rx_gro_dropped;
5438 cur_ns->tx_dropped = cur_es->tx_discards;
5439 cur_ns->multicast = cur_es->rx_multicast;
5440
5441 /* update some more netdev stats if this is main VSI */
5442 if (vsi->type == ICE_VSI_PF) {
5443 cur_ns->rx_crc_errors = pf->stats.crc_errors;
5444 cur_ns->rx_errors = pf->stats.crc_errors +
5445 pf->stats.illegal_bytes +
5446 pf->stats.rx_len_errors +
5447 pf->stats.rx_undersize +
5448 pf->hw_csum_rx_error +
5449 pf->stats.rx_jabber +
5450 pf->stats.rx_fragments +
5451 pf->stats.rx_oversize;
5452 cur_ns->rx_length_errors = pf->stats.rx_len_errors;
5453 /* record drops from the port level */
5454 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
5455 }
5456 }
5457
5458 /**
5459 * ice_update_pf_stats - Update PF port stats counters
5460 * @pf: PF whose stats needs to be updated
5461 */
ice_update_pf_stats(struct ice_pf * pf)5462 void ice_update_pf_stats(struct ice_pf *pf)
5463 {
5464 struct ice_hw_port_stats *prev_ps, *cur_ps;
5465 struct ice_hw *hw = &pf->hw;
5466 u16 fd_ctr_base;
5467 u8 port;
5468
5469 port = hw->port_info->lport;
5470 prev_ps = &pf->stats_prev;
5471 cur_ps = &pf->stats;
5472
5473 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
5474 &prev_ps->eth.rx_bytes,
5475 &cur_ps->eth.rx_bytes);
5476
5477 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
5478 &prev_ps->eth.rx_unicast,
5479 &cur_ps->eth.rx_unicast);
5480
5481 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
5482 &prev_ps->eth.rx_multicast,
5483 &cur_ps->eth.rx_multicast);
5484
5485 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
5486 &prev_ps->eth.rx_broadcast,
5487 &cur_ps->eth.rx_broadcast);
5488
5489 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
5490 &prev_ps->eth.rx_discards,
5491 &cur_ps->eth.rx_discards);
5492
5493 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
5494 &prev_ps->eth.tx_bytes,
5495 &cur_ps->eth.tx_bytes);
5496
5497 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
5498 &prev_ps->eth.tx_unicast,
5499 &cur_ps->eth.tx_unicast);
5500
5501 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
5502 &prev_ps->eth.tx_multicast,
5503 &cur_ps->eth.tx_multicast);
5504
5505 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
5506 &prev_ps->eth.tx_broadcast,
5507 &cur_ps->eth.tx_broadcast);
5508
5509 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
5510 &prev_ps->tx_dropped_link_down,
5511 &cur_ps->tx_dropped_link_down);
5512
5513 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
5514 &prev_ps->rx_size_64, &cur_ps->rx_size_64);
5515
5516 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
5517 &prev_ps->rx_size_127, &cur_ps->rx_size_127);
5518
5519 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
5520 &prev_ps->rx_size_255, &cur_ps->rx_size_255);
5521
5522 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
5523 &prev_ps->rx_size_511, &cur_ps->rx_size_511);
5524
5525 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
5526 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
5527
5528 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
5529 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
5530
5531 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
5532 &prev_ps->rx_size_big, &cur_ps->rx_size_big);
5533
5534 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
5535 &prev_ps->tx_size_64, &cur_ps->tx_size_64);
5536
5537 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
5538 &prev_ps->tx_size_127, &cur_ps->tx_size_127);
5539
5540 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
5541 &prev_ps->tx_size_255, &cur_ps->tx_size_255);
5542
5543 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
5544 &prev_ps->tx_size_511, &cur_ps->tx_size_511);
5545
5546 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
5547 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
5548
5549 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
5550 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
5551
5552 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
5553 &prev_ps->tx_size_big, &cur_ps->tx_size_big);
5554
5555 fd_ctr_base = hw->fd_ctr_base;
5556
5557 ice_stat_update40(hw,
5558 GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
5559 pf->stat_prev_loaded, &prev_ps->fd_sb_match,
5560 &cur_ps->fd_sb_match);
5561 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
5562 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
5563
5564 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
5565 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
5566
5567 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
5568 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
5569
5570 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
5571 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
5572
5573 ice_update_dcb_stats(pf);
5574
5575 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
5576 &prev_ps->crc_errors, &cur_ps->crc_errors);
5577
5578 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
5579 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
5580
5581 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
5582 &prev_ps->mac_local_faults,
5583 &cur_ps->mac_local_faults);
5584
5585 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
5586 &prev_ps->mac_remote_faults,
5587 &cur_ps->mac_remote_faults);
5588
5589 ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
5590 &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
5591
5592 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
5593 &prev_ps->rx_undersize, &cur_ps->rx_undersize);
5594
5595 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
5596 &prev_ps->rx_fragments, &cur_ps->rx_fragments);
5597
5598 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
5599 &prev_ps->rx_oversize, &cur_ps->rx_oversize);
5600
5601 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
5602 &prev_ps->rx_jabber, &cur_ps->rx_jabber);
5603
5604 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
5605
5606 pf->stat_prev_loaded = true;
5607 }
5608
5609 /**
5610 * ice_get_stats64 - get statistics for network device structure
5611 * @netdev: network interface device structure
5612 * @stats: main device statistics structure
5613 */
5614 static
ice_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)5615 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
5616 {
5617 struct ice_netdev_priv *np = netdev_priv(netdev);
5618 struct rtnl_link_stats64 *vsi_stats;
5619 struct ice_vsi *vsi = np->vsi;
5620
5621 vsi_stats = &vsi->net_stats;
5622
5623 if (!vsi->num_txq || !vsi->num_rxq)
5624 return;
5625
5626 /* netdev packet/byte stats come from ring counter. These are obtained
5627 * by summing up ring counters (done by ice_update_vsi_ring_stats).
5628 * But, only call the update routine and read the registers if VSI is
5629 * not down.
5630 */
5631 if (!test_bit(__ICE_DOWN, vsi->state))
5632 ice_update_vsi_ring_stats(vsi);
5633 stats->tx_packets = vsi_stats->tx_packets;
5634 stats->tx_bytes = vsi_stats->tx_bytes;
5635 stats->rx_packets = vsi_stats->rx_packets;
5636 stats->rx_bytes = vsi_stats->rx_bytes;
5637
5638 /* The rest of the stats can be read from the hardware but instead we
5639 * just return values that the watchdog task has already obtained from
5640 * the hardware.
5641 */
5642 stats->multicast = vsi_stats->multicast;
5643 stats->tx_errors = vsi_stats->tx_errors;
5644 stats->tx_dropped = vsi_stats->tx_dropped;
5645 stats->rx_errors = vsi_stats->rx_errors;
5646 stats->rx_dropped = vsi_stats->rx_dropped;
5647 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
5648 stats->rx_length_errors = vsi_stats->rx_length_errors;
5649 }
5650
5651 /**
5652 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
5653 * @vsi: VSI having NAPI disabled
5654 */
ice_napi_disable_all(struct ice_vsi * vsi)5655 static void ice_napi_disable_all(struct ice_vsi *vsi)
5656 {
5657 int q_idx;
5658
5659 if (!vsi->netdev)
5660 return;
5661
5662 ice_for_each_q_vector(vsi, q_idx) {
5663 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5664
5665 if (q_vector->rx.ring || q_vector->tx.ring)
5666 napi_disable(&q_vector->napi);
5667 }
5668 }
5669
5670 /**
5671 * ice_down - Shutdown the connection
5672 * @vsi: The VSI being stopped
5673 */
ice_down(struct ice_vsi * vsi)5674 int ice_down(struct ice_vsi *vsi)
5675 {
5676 int i, tx_err, rx_err, link_err = 0;
5677
5678 /* Caller of this function is expected to set the
5679 * vsi->state __ICE_DOWN bit
5680 */
5681 if (vsi->netdev) {
5682 netif_carrier_off(vsi->netdev);
5683 netif_tx_disable(vsi->netdev);
5684 }
5685
5686 ice_vsi_dis_irq(vsi);
5687
5688 tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
5689 if (tx_err)
5690 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
5691 vsi->vsi_num, tx_err);
5692 if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
5693 tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
5694 if (tx_err)
5695 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
5696 vsi->vsi_num, tx_err);
5697 }
5698
5699 rx_err = ice_vsi_stop_all_rx_rings(vsi);
5700 if (rx_err)
5701 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
5702 vsi->vsi_num, rx_err);
5703
5704 ice_napi_disable_all(vsi);
5705
5706 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
5707 link_err = ice_force_phys_link_state(vsi, false);
5708 if (link_err)
5709 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
5710 vsi->vsi_num, link_err);
5711 }
5712
5713 ice_for_each_txq(vsi, i)
5714 ice_clean_tx_ring(vsi->tx_rings[i]);
5715
5716 ice_for_each_rxq(vsi, i)
5717 ice_clean_rx_ring(vsi->rx_rings[i]);
5718
5719 if (tx_err || rx_err || link_err) {
5720 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
5721 vsi->vsi_num, vsi->vsw->sw_id);
5722 return -EIO;
5723 }
5724
5725 return 0;
5726 }
5727
5728 /**
5729 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
5730 * @vsi: VSI having resources allocated
5731 *
5732 * Return 0 on success, negative on failure
5733 */
ice_vsi_setup_tx_rings(struct ice_vsi * vsi)5734 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
5735 {
5736 int i, err = 0;
5737
5738 if (!vsi->num_txq) {
5739 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
5740 vsi->vsi_num);
5741 return -EINVAL;
5742 }
5743
5744 ice_for_each_txq(vsi, i) {
5745 struct ice_ring *ring = vsi->tx_rings[i];
5746
5747 if (!ring)
5748 return -EINVAL;
5749
5750 ring->netdev = vsi->netdev;
5751 err = ice_setup_tx_ring(ring);
5752 if (err)
5753 break;
5754 }
5755
5756 return err;
5757 }
5758
5759 /**
5760 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
5761 * @vsi: VSI having resources allocated
5762 *
5763 * Return 0 on success, negative on failure
5764 */
ice_vsi_setup_rx_rings(struct ice_vsi * vsi)5765 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
5766 {
5767 int i, err = 0;
5768
5769 if (!vsi->num_rxq) {
5770 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
5771 vsi->vsi_num);
5772 return -EINVAL;
5773 }
5774
5775 ice_for_each_rxq(vsi, i) {
5776 struct ice_ring *ring = vsi->rx_rings[i];
5777
5778 if (!ring)
5779 return -EINVAL;
5780
5781 ring->netdev = vsi->netdev;
5782 err = ice_setup_rx_ring(ring);
5783 if (err)
5784 break;
5785 }
5786
5787 return err;
5788 }
5789
5790 /**
5791 * ice_vsi_open_ctrl - open control VSI for use
5792 * @vsi: the VSI to open
5793 *
5794 * Initialization of the Control VSI
5795 *
5796 * Returns 0 on success, negative value on error
5797 */
ice_vsi_open_ctrl(struct ice_vsi * vsi)5798 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
5799 {
5800 char int_name[ICE_INT_NAME_STR_LEN];
5801 struct ice_pf *pf = vsi->back;
5802 struct device *dev;
5803 int err;
5804
5805 dev = ice_pf_to_dev(pf);
5806 /* allocate descriptors */
5807 err = ice_vsi_setup_tx_rings(vsi);
5808 if (err)
5809 goto err_setup_tx;
5810
5811 err = ice_vsi_setup_rx_rings(vsi);
5812 if (err)
5813 goto err_setup_rx;
5814
5815 err = ice_vsi_cfg(vsi);
5816 if (err)
5817 goto err_setup_rx;
5818
5819 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
5820 dev_driver_string(dev), dev_name(dev));
5821 err = ice_vsi_req_irq_msix(vsi, int_name);
5822 if (err)
5823 goto err_setup_rx;
5824
5825 ice_vsi_cfg_msix(vsi);
5826
5827 err = ice_vsi_start_all_rx_rings(vsi);
5828 if (err)
5829 goto err_up_complete;
5830
5831 clear_bit(__ICE_DOWN, vsi->state);
5832 ice_vsi_ena_irq(vsi);
5833
5834 return 0;
5835
5836 err_up_complete:
5837 ice_down(vsi);
5838 err_setup_rx:
5839 ice_vsi_free_rx_rings(vsi);
5840 err_setup_tx:
5841 ice_vsi_free_tx_rings(vsi);
5842
5843 return err;
5844 }
5845
5846 /**
5847 * ice_vsi_open - Called when a network interface is made active
5848 * @vsi: the VSI to open
5849 *
5850 * Initialization of the VSI
5851 *
5852 * Returns 0 on success, negative value on error
5853 */
ice_vsi_open(struct ice_vsi * vsi)5854 static int ice_vsi_open(struct ice_vsi *vsi)
5855 {
5856 char int_name[ICE_INT_NAME_STR_LEN];
5857 struct ice_pf *pf = vsi->back;
5858 int err;
5859
5860 /* allocate descriptors */
5861 err = ice_vsi_setup_tx_rings(vsi);
5862 if (err)
5863 goto err_setup_tx;
5864
5865 err = ice_vsi_setup_rx_rings(vsi);
5866 if (err)
5867 goto err_setup_rx;
5868
5869 err = ice_vsi_cfg(vsi);
5870 if (err)
5871 goto err_setup_rx;
5872
5873 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5874 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
5875 err = ice_vsi_req_irq_msix(vsi, int_name);
5876 if (err)
5877 goto err_setup_rx;
5878
5879 /* Notify the stack of the actual queue counts. */
5880 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
5881 if (err)
5882 goto err_set_qs;
5883
5884 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
5885 if (err)
5886 goto err_set_qs;
5887
5888 err = ice_up_complete(vsi);
5889 if (err)
5890 goto err_up_complete;
5891
5892 return 0;
5893
5894 err_up_complete:
5895 ice_down(vsi);
5896 err_set_qs:
5897 ice_vsi_free_irq(vsi);
5898 err_setup_rx:
5899 ice_vsi_free_rx_rings(vsi);
5900 err_setup_tx:
5901 ice_vsi_free_tx_rings(vsi);
5902
5903 return err;
5904 }
5905
5906 /**
5907 * ice_vsi_release_all - Delete all VSIs
5908 * @pf: PF from which all VSIs are being removed
5909 */
ice_vsi_release_all(struct ice_pf * pf)5910 static void ice_vsi_release_all(struct ice_pf *pf)
5911 {
5912 int err, i;
5913
5914 if (!pf->vsi)
5915 return;
5916
5917 ice_for_each_vsi(pf, i) {
5918 if (!pf->vsi[i])
5919 continue;
5920
5921 err = ice_vsi_release(pf->vsi[i]);
5922 if (err)
5923 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
5924 i, err, pf->vsi[i]->vsi_num);
5925 }
5926 }
5927
5928 /**
5929 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
5930 * @pf: pointer to the PF instance
5931 * @type: VSI type to rebuild
5932 *
5933 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
5934 */
ice_vsi_rebuild_by_type(struct ice_pf * pf,enum ice_vsi_type type)5935 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
5936 {
5937 struct device *dev = ice_pf_to_dev(pf);
5938 enum ice_status status;
5939 int i, err;
5940
5941 ice_for_each_vsi(pf, i) {
5942 struct ice_vsi *vsi = pf->vsi[i];
5943
5944 if (!vsi || vsi->type != type)
5945 continue;
5946
5947 /* rebuild the VSI */
5948 err = ice_vsi_rebuild(vsi, true);
5949 if (err) {
5950 dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
5951 err, vsi->idx, ice_vsi_type_str(type));
5952 return err;
5953 }
5954
5955 /* replay filters for the VSI */
5956 status = ice_replay_vsi(&pf->hw, vsi->idx);
5957 if (status) {
5958 dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
5959 ice_stat_str(status), vsi->idx,
5960 ice_vsi_type_str(type));
5961 return -EIO;
5962 }
5963
5964 /* Re-map HW VSI number, using VSI handle that has been
5965 * previously validated in ice_replay_vsi() call above
5966 */
5967 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
5968
5969 /* enable the VSI */
5970 err = ice_ena_vsi(vsi, false);
5971 if (err) {
5972 dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
5973 err, vsi->idx, ice_vsi_type_str(type));
5974 return err;
5975 }
5976
5977 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
5978 ice_vsi_type_str(type));
5979 }
5980
5981 return 0;
5982 }
5983
5984 /**
5985 * ice_update_pf_netdev_link - Update PF netdev link status
5986 * @pf: pointer to the PF instance
5987 */
ice_update_pf_netdev_link(struct ice_pf * pf)5988 static void ice_update_pf_netdev_link(struct ice_pf *pf)
5989 {
5990 bool link_up;
5991 int i;
5992
5993 ice_for_each_vsi(pf, i) {
5994 struct ice_vsi *vsi = pf->vsi[i];
5995
5996 if (!vsi || vsi->type != ICE_VSI_PF)
5997 return;
5998
5999 ice_get_link_status(pf->vsi[i]->port_info, &link_up);
6000 if (link_up) {
6001 netif_carrier_on(pf->vsi[i]->netdev);
6002 netif_tx_wake_all_queues(pf->vsi[i]->netdev);
6003 } else {
6004 netif_carrier_off(pf->vsi[i]->netdev);
6005 netif_tx_stop_all_queues(pf->vsi[i]->netdev);
6006 }
6007 }
6008 }
6009
6010 /**
6011 * ice_rebuild - rebuild after reset
6012 * @pf: PF to rebuild
6013 * @reset_type: type of reset
6014 *
6015 * Do not rebuild VF VSI in this flow because that is already handled via
6016 * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
6017 * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
6018 * to reset/rebuild all the VF VSI twice.
6019 */
ice_rebuild(struct ice_pf * pf,enum ice_reset_req reset_type)6020 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
6021 {
6022 struct device *dev = ice_pf_to_dev(pf);
6023 struct ice_hw *hw = &pf->hw;
6024 enum ice_status ret;
6025 int err;
6026
6027 if (test_bit(__ICE_DOWN, pf->state))
6028 goto clear_recovery;
6029
6030 dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
6031
6032 ret = ice_init_all_ctrlq(hw);
6033 if (ret) {
6034 dev_err(dev, "control queues init failed %s\n",
6035 ice_stat_str(ret));
6036 goto err_init_ctrlq;
6037 }
6038
6039 /* if DDP was previously loaded successfully */
6040 if (!ice_is_safe_mode(pf)) {
6041 /* reload the SW DB of filter tables */
6042 if (reset_type == ICE_RESET_PFR)
6043 ice_fill_blk_tbls(hw);
6044 else
6045 /* Reload DDP Package after CORER/GLOBR reset */
6046 ice_load_pkg(NULL, pf);
6047 }
6048
6049 ret = ice_clear_pf_cfg(hw);
6050 if (ret) {
6051 dev_err(dev, "clear PF configuration failed %s\n",
6052 ice_stat_str(ret));
6053 goto err_init_ctrlq;
6054 }
6055
6056 if (pf->first_sw->dflt_vsi_ena)
6057 dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
6058 /* clear the default VSI configuration if it exists */
6059 pf->first_sw->dflt_vsi = NULL;
6060 pf->first_sw->dflt_vsi_ena = false;
6061
6062 ice_clear_pxe_mode(hw);
6063
6064 ret = ice_get_caps(hw);
6065 if (ret) {
6066 dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
6067 goto err_init_ctrlq;
6068 }
6069
6070 ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
6071 if (ret) {
6072 dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
6073 goto err_init_ctrlq;
6074 }
6075
6076 err = ice_sched_init_port(hw->port_info);
6077 if (err)
6078 goto err_sched_init_port;
6079
6080 /* start misc vector */
6081 err = ice_req_irq_msix_misc(pf);
6082 if (err) {
6083 dev_err(dev, "misc vector setup failed: %d\n", err);
6084 goto err_sched_init_port;
6085 }
6086
6087 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6088 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
6089 if (!rd32(hw, PFQF_FD_SIZE)) {
6090 u16 unused, guar, b_effort;
6091
6092 guar = hw->func_caps.fd_fltr_guar;
6093 b_effort = hw->func_caps.fd_fltr_best_effort;
6094
6095 /* force guaranteed filter pool for PF */
6096 ice_alloc_fd_guar_item(hw, &unused, guar);
6097 /* force shared filter pool for PF */
6098 ice_alloc_fd_shrd_item(hw, &unused, b_effort);
6099 }
6100 }
6101
6102 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
6103 ice_dcb_rebuild(pf);
6104
6105 /* rebuild PF VSI */
6106 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
6107 if (err) {
6108 dev_err(dev, "PF VSI rebuild failed: %d\n", err);
6109 goto err_vsi_rebuild;
6110 }
6111
6112 /* If Flow Director is active */
6113 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6114 err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
6115 if (err) {
6116 dev_err(dev, "control VSI rebuild failed: %d\n", err);
6117 goto err_vsi_rebuild;
6118 }
6119
6120 /* replay HW Flow Director recipes */
6121 if (hw->fdir_prof)
6122 ice_fdir_replay_flows(hw);
6123
6124 /* replay Flow Director filters */
6125 ice_fdir_replay_fltrs(pf);
6126
6127 ice_rebuild_arfs(pf);
6128 }
6129
6130 ice_update_pf_netdev_link(pf);
6131
6132 /* tell the firmware we are up */
6133 ret = ice_send_version(pf);
6134 if (ret) {
6135 dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
6136 ice_stat_str(ret));
6137 goto err_vsi_rebuild;
6138 }
6139
6140 ice_replay_post(hw);
6141
6142 /* if we get here, reset flow is successful */
6143 clear_bit(__ICE_RESET_FAILED, pf->state);
6144 return;
6145
6146 err_vsi_rebuild:
6147 err_sched_init_port:
6148 ice_sched_cleanup_all(hw);
6149 err_init_ctrlq:
6150 ice_shutdown_all_ctrlq(hw);
6151 set_bit(__ICE_RESET_FAILED, pf->state);
6152 clear_recovery:
6153 /* set this bit in PF state to control service task scheduling */
6154 set_bit(__ICE_NEEDS_RESTART, pf->state);
6155 dev_err(dev, "Rebuild failed, unload and reload driver\n");
6156 }
6157
6158 /**
6159 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
6160 * @vsi: Pointer to VSI structure
6161 */
ice_max_xdp_frame_size(struct ice_vsi * vsi)6162 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
6163 {
6164 if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
6165 return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
6166 else
6167 return ICE_RXBUF_3072;
6168 }
6169
6170 /**
6171 * ice_change_mtu - NDO callback to change the MTU
6172 * @netdev: network interface device structure
6173 * @new_mtu: new value for maximum frame size
6174 *
6175 * Returns 0 on success, negative on failure
6176 */
ice_change_mtu(struct net_device * netdev,int new_mtu)6177 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
6178 {
6179 struct ice_netdev_priv *np = netdev_priv(netdev);
6180 struct ice_vsi *vsi = np->vsi;
6181 struct ice_pf *pf = vsi->back;
6182 u8 count = 0;
6183
6184 if (new_mtu == (int)netdev->mtu) {
6185 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
6186 return 0;
6187 }
6188
6189 if (ice_is_xdp_ena_vsi(vsi)) {
6190 int frame_size = ice_max_xdp_frame_size(vsi);
6191
6192 if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
6193 netdev_err(netdev, "max MTU for XDP usage is %d\n",
6194 frame_size - ICE_ETH_PKT_HDR_PAD);
6195 return -EINVAL;
6196 }
6197 }
6198
6199 if (new_mtu < (int)netdev->min_mtu) {
6200 netdev_err(netdev, "new MTU invalid. min_mtu is %d\n",
6201 netdev->min_mtu);
6202 return -EINVAL;
6203 } else if (new_mtu > (int)netdev->max_mtu) {
6204 netdev_err(netdev, "new MTU invalid. max_mtu is %d\n",
6205 netdev->min_mtu);
6206 return -EINVAL;
6207 }
6208 /* if a reset is in progress, wait for some time for it to complete */
6209 do {
6210 if (ice_is_reset_in_progress(pf->state)) {
6211 count++;
6212 usleep_range(1000, 2000);
6213 } else {
6214 break;
6215 }
6216
6217 } while (count < 100);
6218
6219 if (count == 100) {
6220 netdev_err(netdev, "can't change MTU. Device is busy\n");
6221 return -EBUSY;
6222 }
6223
6224 netdev->mtu = (unsigned int)new_mtu;
6225
6226 /* if VSI is up, bring it down and then back up */
6227 if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
6228 int err;
6229
6230 err = ice_down(vsi);
6231 if (err) {
6232 netdev_err(netdev, "change MTU if_up err %d\n", err);
6233 return err;
6234 }
6235
6236 err = ice_up(vsi);
6237 if (err) {
6238 netdev_err(netdev, "change MTU if_up err %d\n", err);
6239 return err;
6240 }
6241 }
6242
6243 netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
6244 return 0;
6245 }
6246
6247 /**
6248 * ice_aq_str - convert AQ err code to a string
6249 * @aq_err: the AQ error code to convert
6250 */
ice_aq_str(enum ice_aq_err aq_err)6251 const char *ice_aq_str(enum ice_aq_err aq_err)
6252 {
6253 switch (aq_err) {
6254 case ICE_AQ_RC_OK:
6255 return "OK";
6256 case ICE_AQ_RC_EPERM:
6257 return "ICE_AQ_RC_EPERM";
6258 case ICE_AQ_RC_ENOENT:
6259 return "ICE_AQ_RC_ENOENT";
6260 case ICE_AQ_RC_ENOMEM:
6261 return "ICE_AQ_RC_ENOMEM";
6262 case ICE_AQ_RC_EBUSY:
6263 return "ICE_AQ_RC_EBUSY";
6264 case ICE_AQ_RC_EEXIST:
6265 return "ICE_AQ_RC_EEXIST";
6266 case ICE_AQ_RC_EINVAL:
6267 return "ICE_AQ_RC_EINVAL";
6268 case ICE_AQ_RC_ENOSPC:
6269 return "ICE_AQ_RC_ENOSPC";
6270 case ICE_AQ_RC_ENOSYS:
6271 return "ICE_AQ_RC_ENOSYS";
6272 case ICE_AQ_RC_EMODE:
6273 return "ICE_AQ_RC_EMODE";
6274 case ICE_AQ_RC_ENOSEC:
6275 return "ICE_AQ_RC_ENOSEC";
6276 case ICE_AQ_RC_EBADSIG:
6277 return "ICE_AQ_RC_EBADSIG";
6278 case ICE_AQ_RC_ESVN:
6279 return "ICE_AQ_RC_ESVN";
6280 case ICE_AQ_RC_EBADMAN:
6281 return "ICE_AQ_RC_EBADMAN";
6282 case ICE_AQ_RC_EBADBUF:
6283 return "ICE_AQ_RC_EBADBUF";
6284 }
6285
6286 return "ICE_AQ_RC_UNKNOWN";
6287 }
6288
6289 /**
6290 * ice_stat_str - convert status err code to a string
6291 * @stat_err: the status error code to convert
6292 */
ice_stat_str(enum ice_status stat_err)6293 const char *ice_stat_str(enum ice_status stat_err)
6294 {
6295 switch (stat_err) {
6296 case ICE_SUCCESS:
6297 return "OK";
6298 case ICE_ERR_PARAM:
6299 return "ICE_ERR_PARAM";
6300 case ICE_ERR_NOT_IMPL:
6301 return "ICE_ERR_NOT_IMPL";
6302 case ICE_ERR_NOT_READY:
6303 return "ICE_ERR_NOT_READY";
6304 case ICE_ERR_NOT_SUPPORTED:
6305 return "ICE_ERR_NOT_SUPPORTED";
6306 case ICE_ERR_BAD_PTR:
6307 return "ICE_ERR_BAD_PTR";
6308 case ICE_ERR_INVAL_SIZE:
6309 return "ICE_ERR_INVAL_SIZE";
6310 case ICE_ERR_DEVICE_NOT_SUPPORTED:
6311 return "ICE_ERR_DEVICE_NOT_SUPPORTED";
6312 case ICE_ERR_RESET_FAILED:
6313 return "ICE_ERR_RESET_FAILED";
6314 case ICE_ERR_FW_API_VER:
6315 return "ICE_ERR_FW_API_VER";
6316 case ICE_ERR_NO_MEMORY:
6317 return "ICE_ERR_NO_MEMORY";
6318 case ICE_ERR_CFG:
6319 return "ICE_ERR_CFG";
6320 case ICE_ERR_OUT_OF_RANGE:
6321 return "ICE_ERR_OUT_OF_RANGE";
6322 case ICE_ERR_ALREADY_EXISTS:
6323 return "ICE_ERR_ALREADY_EXISTS";
6324 case ICE_ERR_NVM_CHECKSUM:
6325 return "ICE_ERR_NVM_CHECKSUM";
6326 case ICE_ERR_BUF_TOO_SHORT:
6327 return "ICE_ERR_BUF_TOO_SHORT";
6328 case ICE_ERR_NVM_BLANK_MODE:
6329 return "ICE_ERR_NVM_BLANK_MODE";
6330 case ICE_ERR_IN_USE:
6331 return "ICE_ERR_IN_USE";
6332 case ICE_ERR_MAX_LIMIT:
6333 return "ICE_ERR_MAX_LIMIT";
6334 case ICE_ERR_RESET_ONGOING:
6335 return "ICE_ERR_RESET_ONGOING";
6336 case ICE_ERR_HW_TABLE:
6337 return "ICE_ERR_HW_TABLE";
6338 case ICE_ERR_DOES_NOT_EXIST:
6339 return "ICE_ERR_DOES_NOT_EXIST";
6340 case ICE_ERR_FW_DDP_MISMATCH:
6341 return "ICE_ERR_FW_DDP_MISMATCH";
6342 case ICE_ERR_AQ_ERROR:
6343 return "ICE_ERR_AQ_ERROR";
6344 case ICE_ERR_AQ_TIMEOUT:
6345 return "ICE_ERR_AQ_TIMEOUT";
6346 case ICE_ERR_AQ_FULL:
6347 return "ICE_ERR_AQ_FULL";
6348 case ICE_ERR_AQ_NO_WORK:
6349 return "ICE_ERR_AQ_NO_WORK";
6350 case ICE_ERR_AQ_EMPTY:
6351 return "ICE_ERR_AQ_EMPTY";
6352 case ICE_ERR_AQ_FW_CRITICAL:
6353 return "ICE_ERR_AQ_FW_CRITICAL";
6354 }
6355
6356 return "ICE_ERR_UNKNOWN";
6357 }
6358
6359 /**
6360 * ice_set_rss - Set RSS keys and lut
6361 * @vsi: Pointer to VSI structure
6362 * @seed: RSS hash seed
6363 * @lut: Lookup table
6364 * @lut_size: Lookup table size
6365 *
6366 * Returns 0 on success, negative on failure
6367 */
ice_set_rss(struct ice_vsi * vsi,u8 * seed,u8 * lut,u16 lut_size)6368 int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
6369 {
6370 struct ice_pf *pf = vsi->back;
6371 struct ice_hw *hw = &pf->hw;
6372 enum ice_status status;
6373 struct device *dev;
6374
6375 dev = ice_pf_to_dev(pf);
6376 if (seed) {
6377 struct ice_aqc_get_set_rss_keys *buf =
6378 (struct ice_aqc_get_set_rss_keys *)seed;
6379
6380 status = ice_aq_set_rss_key(hw, vsi->idx, buf);
6381
6382 if (status) {
6383 dev_err(dev, "Cannot set RSS key, err %s aq_err %s\n",
6384 ice_stat_str(status),
6385 ice_aq_str(hw->adminq.sq_last_status));
6386 return -EIO;
6387 }
6388 }
6389
6390 if (lut) {
6391 status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
6392 lut, lut_size);
6393 if (status) {
6394 dev_err(dev, "Cannot set RSS lut, err %s aq_err %s\n",
6395 ice_stat_str(status),
6396 ice_aq_str(hw->adminq.sq_last_status));
6397 return -EIO;
6398 }
6399 }
6400
6401 return 0;
6402 }
6403
6404 /**
6405 * ice_get_rss - Get RSS keys and lut
6406 * @vsi: Pointer to VSI structure
6407 * @seed: Buffer to store the keys
6408 * @lut: Buffer to store the lookup table entries
6409 * @lut_size: Size of buffer to store the lookup table entries
6410 *
6411 * Returns 0 on success, negative on failure
6412 */
ice_get_rss(struct ice_vsi * vsi,u8 * seed,u8 * lut,u16 lut_size)6413 int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
6414 {
6415 struct ice_pf *pf = vsi->back;
6416 struct ice_hw *hw = &pf->hw;
6417 enum ice_status status;
6418 struct device *dev;
6419
6420 dev = ice_pf_to_dev(pf);
6421 if (seed) {
6422 struct ice_aqc_get_set_rss_keys *buf =
6423 (struct ice_aqc_get_set_rss_keys *)seed;
6424
6425 status = ice_aq_get_rss_key(hw, vsi->idx, buf);
6426 if (status) {
6427 dev_err(dev, "Cannot get RSS key, err %s aq_err %s\n",
6428 ice_stat_str(status),
6429 ice_aq_str(hw->adminq.sq_last_status));
6430 return -EIO;
6431 }
6432 }
6433
6434 if (lut) {
6435 status = ice_aq_get_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
6436 lut, lut_size);
6437 if (status) {
6438 dev_err(dev, "Cannot get RSS lut, err %s aq_err %s\n",
6439 ice_stat_str(status),
6440 ice_aq_str(hw->adminq.sq_last_status));
6441 return -EIO;
6442 }
6443 }
6444
6445 return 0;
6446 }
6447
6448 /**
6449 * ice_bridge_getlink - Get the hardware bridge mode
6450 * @skb: skb buff
6451 * @pid: process ID
6452 * @seq: RTNL message seq
6453 * @dev: the netdev being configured
6454 * @filter_mask: filter mask passed in
6455 * @nlflags: netlink flags passed in
6456 *
6457 * Return the bridge mode (VEB/VEPA)
6458 */
6459 static int
ice_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)6460 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
6461 struct net_device *dev, u32 filter_mask, int nlflags)
6462 {
6463 struct ice_netdev_priv *np = netdev_priv(dev);
6464 struct ice_vsi *vsi = np->vsi;
6465 struct ice_pf *pf = vsi->back;
6466 u16 bmode;
6467
6468 bmode = pf->first_sw->bridge_mode;
6469
6470 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
6471 filter_mask, NULL);
6472 }
6473
6474 /**
6475 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
6476 * @vsi: Pointer to VSI structure
6477 * @bmode: Hardware bridge mode (VEB/VEPA)
6478 *
6479 * Returns 0 on success, negative on failure
6480 */
ice_vsi_update_bridge_mode(struct ice_vsi * vsi,u16 bmode)6481 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
6482 {
6483 struct ice_aqc_vsi_props *vsi_props;
6484 struct ice_hw *hw = &vsi->back->hw;
6485 struct ice_vsi_ctx *ctxt;
6486 enum ice_status status;
6487 int ret = 0;
6488
6489 vsi_props = &vsi->info;
6490
6491 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
6492 if (!ctxt)
6493 return -ENOMEM;
6494
6495 ctxt->info = vsi->info;
6496
6497 if (bmode == BRIDGE_MODE_VEB)
6498 /* change from VEPA to VEB mode */
6499 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6500 else
6501 /* change from VEB to VEPA mode */
6502 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6503 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
6504
6505 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
6506 if (status) {
6507 dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
6508 bmode, ice_stat_str(status),
6509 ice_aq_str(hw->adminq.sq_last_status));
6510 ret = -EIO;
6511 goto out;
6512 }
6513 /* Update sw flags for book keeping */
6514 vsi_props->sw_flags = ctxt->info.sw_flags;
6515
6516 out:
6517 kfree(ctxt);
6518 return ret;
6519 }
6520
6521 /**
6522 * ice_bridge_setlink - Set the hardware bridge mode
6523 * @dev: the netdev being configured
6524 * @nlh: RTNL message
6525 * @flags: bridge setlink flags
6526 * @extack: netlink extended ack
6527 *
6528 * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
6529 * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
6530 * not already set for all VSIs connected to this switch. And also update the
6531 * unicast switch filter rules for the corresponding switch of the netdev.
6532 */
6533 static int
ice_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 __always_unused flags,struct netlink_ext_ack __always_unused * extack)6534 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
6535 u16 __always_unused flags,
6536 struct netlink_ext_ack __always_unused *extack)
6537 {
6538 struct ice_netdev_priv *np = netdev_priv(dev);
6539 struct ice_pf *pf = np->vsi->back;
6540 struct nlattr *attr, *br_spec;
6541 struct ice_hw *hw = &pf->hw;
6542 enum ice_status status;
6543 struct ice_sw *pf_sw;
6544 int rem, v, err = 0;
6545
6546 pf_sw = pf->first_sw;
6547 /* find the attribute in the netlink message */
6548 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
6549 if (!br_spec)
6550 return -EINVAL;
6551
6552 nla_for_each_nested(attr, br_spec, rem) {
6553 __u16 mode;
6554
6555 if (nla_type(attr) != IFLA_BRIDGE_MODE)
6556 continue;
6557 mode = nla_get_u16(attr);
6558 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
6559 return -EINVAL;
6560 /* Continue if bridge mode is not being flipped */
6561 if (mode == pf_sw->bridge_mode)
6562 continue;
6563 /* Iterates through the PF VSI list and update the loopback
6564 * mode of the VSI
6565 */
6566 ice_for_each_vsi(pf, v) {
6567 if (!pf->vsi[v])
6568 continue;
6569 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
6570 if (err)
6571 return err;
6572 }
6573
6574 hw->evb_veb = (mode == BRIDGE_MODE_VEB);
6575 /* Update the unicast switch filter rules for the corresponding
6576 * switch of the netdev
6577 */
6578 status = ice_update_sw_rule_bridge_mode(hw);
6579 if (status) {
6580 netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
6581 mode, ice_stat_str(status),
6582 ice_aq_str(hw->adminq.sq_last_status));
6583 /* revert hw->evb_veb */
6584 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
6585 return -EIO;
6586 }
6587
6588 pf_sw->bridge_mode = mode;
6589 }
6590
6591 return 0;
6592 }
6593
6594 /**
6595 * ice_tx_timeout - Respond to a Tx Hang
6596 * @netdev: network interface device structure
6597 * @txqueue: Tx queue
6598 */
ice_tx_timeout(struct net_device * netdev,unsigned int txqueue)6599 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
6600 {
6601 struct ice_netdev_priv *np = netdev_priv(netdev);
6602 struct ice_ring *tx_ring = NULL;
6603 struct ice_vsi *vsi = np->vsi;
6604 struct ice_pf *pf = vsi->back;
6605 u32 i;
6606
6607 pf->tx_timeout_count++;
6608
6609 /* Check if PFC is enabled for the TC to which the queue belongs
6610 * to. If yes then Tx timeout is not caused by a hung queue, no
6611 * need to reset and rebuild
6612 */
6613 if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
6614 dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
6615 txqueue);
6616 return;
6617 }
6618
6619 /* now that we have an index, find the tx_ring struct */
6620 for (i = 0; i < vsi->num_txq; i++)
6621 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
6622 if (txqueue == vsi->tx_rings[i]->q_index) {
6623 tx_ring = vsi->tx_rings[i];
6624 break;
6625 }
6626
6627 /* Reset recovery level if enough time has elapsed after last timeout.
6628 * Also ensure no new reset action happens before next timeout period.
6629 */
6630 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
6631 pf->tx_timeout_recovery_level = 1;
6632 else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
6633 netdev->watchdog_timeo)))
6634 return;
6635
6636 if (tx_ring) {
6637 struct ice_hw *hw = &pf->hw;
6638 u32 head, val = 0;
6639
6640 head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
6641 QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
6642 /* Read interrupt register */
6643 val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
6644
6645 netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
6646 vsi->vsi_num, txqueue, tx_ring->next_to_clean,
6647 head, tx_ring->next_to_use, val);
6648 }
6649
6650 pf->tx_timeout_last_recovery = jiffies;
6651 netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
6652 pf->tx_timeout_recovery_level, txqueue);
6653
6654 switch (pf->tx_timeout_recovery_level) {
6655 case 1:
6656 set_bit(__ICE_PFR_REQ, pf->state);
6657 break;
6658 case 2:
6659 set_bit(__ICE_CORER_REQ, pf->state);
6660 break;
6661 case 3:
6662 set_bit(__ICE_GLOBR_REQ, pf->state);
6663 break;
6664 default:
6665 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
6666 set_bit(__ICE_DOWN, pf->state);
6667 set_bit(__ICE_NEEDS_RESTART, vsi->state);
6668 set_bit(__ICE_SERVICE_DIS, pf->state);
6669 break;
6670 }
6671
6672 ice_service_task_schedule(pf);
6673 pf->tx_timeout_recovery_level++;
6674 }
6675
6676 /**
6677 * ice_open - Called when a network interface becomes active
6678 * @netdev: network interface device structure
6679 *
6680 * The open entry point is called when a network interface is made
6681 * active by the system (IFF_UP). At this point all resources needed
6682 * for transmit and receive operations are allocated, the interrupt
6683 * handler is registered with the OS, the netdev watchdog is enabled,
6684 * and the stack is notified that the interface is ready.
6685 *
6686 * Returns 0 on success, negative value on failure
6687 */
ice_open(struct net_device * netdev)6688 int ice_open(struct net_device *netdev)
6689 {
6690 struct ice_netdev_priv *np = netdev_priv(netdev);
6691 struct ice_pf *pf = np->vsi->back;
6692
6693 if (ice_is_reset_in_progress(pf->state)) {
6694 netdev_err(netdev, "can't open net device while reset is in progress");
6695 return -EBUSY;
6696 }
6697
6698 return ice_open_internal(netdev);
6699 }
6700
6701 /**
6702 * ice_open_internal - Called when a network interface becomes active
6703 * @netdev: network interface device structure
6704 *
6705 * Internal ice_open implementation. Should not be used directly except for ice_open and reset
6706 * handling routine
6707 *
6708 * Returns 0 on success, negative value on failure
6709 */
ice_open_internal(struct net_device * netdev)6710 int ice_open_internal(struct net_device *netdev)
6711 {
6712 struct ice_netdev_priv *np = netdev_priv(netdev);
6713 struct ice_vsi *vsi = np->vsi;
6714 struct ice_pf *pf = vsi->back;
6715 struct ice_port_info *pi;
6716 int err;
6717
6718 if (test_bit(__ICE_NEEDS_RESTART, pf->state)) {
6719 netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
6720 return -EIO;
6721 }
6722
6723 if (test_bit(__ICE_DOWN, pf->state)) {
6724 netdev_err(netdev, "device is not ready yet\n");
6725 return -EBUSY;
6726 }
6727
6728 netif_carrier_off(netdev);
6729
6730 pi = vsi->port_info;
6731 err = ice_update_link_info(pi);
6732 if (err) {
6733 netdev_err(netdev, "Failed to get link info, error %d\n",
6734 err);
6735 return err;
6736 }
6737
6738 /* Set PHY if there is media, otherwise, turn off PHY */
6739 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
6740 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
6741 if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) {
6742 err = ice_init_phy_user_cfg(pi);
6743 if (err) {
6744 netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
6745 err);
6746 return err;
6747 }
6748 }
6749
6750 err = ice_configure_phy(vsi);
6751 if (err) {
6752 netdev_err(netdev, "Failed to set physical link up, error %d\n",
6753 err);
6754 return err;
6755 }
6756 } else {
6757 set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
6758 err = ice_aq_set_link_restart_an(pi, false, NULL);
6759 if (err) {
6760 netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n",
6761 vsi->vsi_num, err);
6762 return err;
6763 }
6764 }
6765
6766 err = ice_vsi_open(vsi);
6767 if (err)
6768 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
6769 vsi->vsi_num, vsi->vsw->sw_id);
6770
6771 /* Update existing tunnels information */
6772 udp_tunnel_get_rx_info(netdev);
6773
6774 return err;
6775 }
6776
6777 /**
6778 * ice_stop - Disables a network interface
6779 * @netdev: network interface device structure
6780 *
6781 * The stop entry point is called when an interface is de-activated by the OS,
6782 * and the netdevice enters the DOWN state. The hardware is still under the
6783 * driver's control, but the netdev interface is disabled.
6784 *
6785 * Returns success only - not allowed to fail
6786 */
ice_stop(struct net_device * netdev)6787 int ice_stop(struct net_device *netdev)
6788 {
6789 struct ice_netdev_priv *np = netdev_priv(netdev);
6790 struct ice_vsi *vsi = np->vsi;
6791 struct ice_pf *pf = vsi->back;
6792
6793 if (ice_is_reset_in_progress(pf->state)) {
6794 netdev_err(netdev, "can't stop net device while reset is in progress");
6795 return -EBUSY;
6796 }
6797
6798 ice_vsi_close(vsi);
6799
6800 return 0;
6801 }
6802
6803 /**
6804 * ice_features_check - Validate encapsulated packet conforms to limits
6805 * @skb: skb buffer
6806 * @netdev: This port's netdev
6807 * @features: Offload features that the stack believes apply
6808 */
6809 static netdev_features_t
ice_features_check(struct sk_buff * skb,struct net_device __always_unused * netdev,netdev_features_t features)6810 ice_features_check(struct sk_buff *skb,
6811 struct net_device __always_unused *netdev,
6812 netdev_features_t features)
6813 {
6814 bool gso = skb_is_gso(skb);
6815 size_t len;
6816
6817 /* No point in doing any of this if neither checksum nor GSO are
6818 * being requested for this frame. We can rule out both by just
6819 * checking for CHECKSUM_PARTIAL
6820 */
6821 if (skb->ip_summed != CHECKSUM_PARTIAL)
6822 return features;
6823
6824 /* We cannot support GSO if the MSS is going to be less than
6825 * 64 bytes. If it is then we need to drop support for GSO.
6826 */
6827 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
6828 features &= ~NETIF_F_GSO_MASK;
6829
6830 len = skb_network_offset(skb);
6831 if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
6832 goto out_rm_features;
6833
6834 len = skb_network_header_len(skb);
6835 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
6836 goto out_rm_features;
6837
6838 if (skb->encapsulation) {
6839 /* this must work for VXLAN frames AND IPIP/SIT frames, and in
6840 * the case of IPIP frames, the transport header pointer is
6841 * after the inner header! So check to make sure that this
6842 * is a GRE or UDP_TUNNEL frame before doing that math.
6843 */
6844 if (gso && (skb_shinfo(skb)->gso_type &
6845 (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
6846 len = skb_inner_network_header(skb) -
6847 skb_transport_header(skb);
6848 if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
6849 goto out_rm_features;
6850 }
6851
6852 len = skb_inner_network_header_len(skb);
6853 if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
6854 goto out_rm_features;
6855 }
6856
6857 return features;
6858 out_rm_features:
6859 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
6860 }
6861
6862 static const struct net_device_ops ice_netdev_safe_mode_ops = {
6863 .ndo_open = ice_open,
6864 .ndo_stop = ice_stop,
6865 .ndo_start_xmit = ice_start_xmit,
6866 .ndo_set_mac_address = ice_set_mac_address,
6867 .ndo_validate_addr = eth_validate_addr,
6868 .ndo_change_mtu = ice_change_mtu,
6869 .ndo_get_stats64 = ice_get_stats64,
6870 .ndo_tx_timeout = ice_tx_timeout,
6871 .ndo_bpf = ice_xdp_safe_mode,
6872 };
6873
6874 static const struct net_device_ops ice_netdev_ops = {
6875 .ndo_open = ice_open,
6876 .ndo_stop = ice_stop,
6877 .ndo_start_xmit = ice_start_xmit,
6878 .ndo_features_check = ice_features_check,
6879 .ndo_set_rx_mode = ice_set_rx_mode,
6880 .ndo_set_mac_address = ice_set_mac_address,
6881 .ndo_validate_addr = eth_validate_addr,
6882 .ndo_change_mtu = ice_change_mtu,
6883 .ndo_get_stats64 = ice_get_stats64,
6884 .ndo_set_tx_maxrate = ice_set_tx_maxrate,
6885 .ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
6886 .ndo_set_vf_mac = ice_set_vf_mac,
6887 .ndo_get_vf_config = ice_get_vf_cfg,
6888 .ndo_set_vf_trust = ice_set_vf_trust,
6889 .ndo_set_vf_vlan = ice_set_vf_port_vlan,
6890 .ndo_set_vf_link_state = ice_set_vf_link_state,
6891 .ndo_get_vf_stats = ice_get_vf_stats,
6892 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
6893 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
6894 .ndo_set_features = ice_set_features,
6895 .ndo_bridge_getlink = ice_bridge_getlink,
6896 .ndo_bridge_setlink = ice_bridge_setlink,
6897 .ndo_fdb_add = ice_fdb_add,
6898 .ndo_fdb_del = ice_fdb_del,
6899 #ifdef CONFIG_RFS_ACCEL
6900 .ndo_rx_flow_steer = ice_rx_flow_steer,
6901 #endif
6902 .ndo_tx_timeout = ice_tx_timeout,
6903 .ndo_bpf = ice_xdp,
6904 .ndo_xdp_xmit = ice_xdp_xmit,
6905 .ndo_xsk_wakeup = ice_xsk_wakeup,
6906 .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
6907 .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
6908 };
6909