• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* Intel(R) Ethernet Connection E800 Series Linux Driver */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <generated/utsrelease.h>
9 #include <linux/crash_dump.h>
10 #include "ice.h"
11 #include "ice_base.h"
12 #include "ice_lib.h"
13 #include "ice_fltr.h"
14 #include "ice_dcb_lib.h"
15 #include "ice_dcb_nl.h"
16 #include "ice_devlink.h"
17 /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
18  * ice tracepoint functions. This must be done exactly once across the
19  * ice driver.
20  */
21 #define CREATE_TRACE_POINTS
22 #include "ice_trace.h"
23 
24 #define DRV_SUMMARY	"Intel(R) Ethernet Connection E800 Series Linux Driver"
25 static const char ice_driver_string[] = DRV_SUMMARY;
26 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
27 
28 /* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
29 #define ICE_DDP_PKG_PATH	"intel/ice/ddp/"
30 #define ICE_DDP_PKG_FILE	ICE_DDP_PKG_PATH "ice.pkg"
31 
32 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
33 MODULE_DESCRIPTION(DRV_SUMMARY);
34 MODULE_LICENSE("GPL v2");
35 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
36 
37 static int debug = -1;
38 module_param(debug, int, 0644);
39 #ifndef CONFIG_DYNAMIC_DEBUG
40 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXXXXX)");
41 #else
42 MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
43 #endif /* !CONFIG_DYNAMIC_DEBUG */
44 
45 static DEFINE_IDA(ice_aux_ida);
46 
47 static struct workqueue_struct *ice_wq;
48 static const struct net_device_ops ice_netdev_safe_mode_ops;
49 static const struct net_device_ops ice_netdev_ops;
50 static int ice_vsi_open(struct ice_vsi *vsi);
51 
52 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
53 
54 static void ice_vsi_release_all(struct ice_pf *pf);
55 
netif_is_ice(struct net_device * dev)56 bool netif_is_ice(struct net_device *dev)
57 {
58 	return dev && (dev->netdev_ops == &ice_netdev_ops);
59 }
60 
61 /**
62  * ice_get_tx_pending - returns number of Tx descriptors not processed
63  * @ring: the ring of descriptors
64  */
ice_get_tx_pending(struct ice_ring * ring)65 static u16 ice_get_tx_pending(struct ice_ring *ring)
66 {
67 	u16 head, tail;
68 
69 	head = ring->next_to_clean;
70 	tail = ring->next_to_use;
71 
72 	if (head != tail)
73 		return (head < tail) ?
74 			tail - head : (tail + ring->count - head);
75 	return 0;
76 }
77 
78 /**
79  * ice_check_for_hang_subtask - check for and recover hung queues
80  * @pf: pointer to PF struct
81  */
ice_check_for_hang_subtask(struct ice_pf * pf)82 static void ice_check_for_hang_subtask(struct ice_pf *pf)
83 {
84 	struct ice_vsi *vsi = NULL;
85 	struct ice_hw *hw;
86 	unsigned int i;
87 	int packets;
88 	u32 v;
89 
90 	ice_for_each_vsi(pf, v)
91 		if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) {
92 			vsi = pf->vsi[v];
93 			break;
94 		}
95 
96 	if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state))
97 		return;
98 
99 	if (!(vsi->netdev && netif_carrier_ok(vsi->netdev)))
100 		return;
101 
102 	hw = &vsi->back->hw;
103 
104 	for (i = 0; i < vsi->num_txq; i++) {
105 		struct ice_ring *tx_ring = vsi->tx_rings[i];
106 
107 		if (tx_ring && tx_ring->desc) {
108 			/* If packet counter has not changed the queue is
109 			 * likely stalled, so force an interrupt for this
110 			 * queue.
111 			 *
112 			 * prev_pkt would be negative if there was no
113 			 * pending work.
114 			 */
115 			packets = tx_ring->stats.pkts & INT_MAX;
116 			if (tx_ring->tx_stats.prev_pkt == packets) {
117 				/* Trigger sw interrupt to revive the queue */
118 				ice_trigger_sw_intr(hw, tx_ring->q_vector);
119 				continue;
120 			}
121 
122 			/* Memory barrier between read of packet count and call
123 			 * to ice_get_tx_pending()
124 			 */
125 			smp_rmb();
126 			tx_ring->tx_stats.prev_pkt =
127 			    ice_get_tx_pending(tx_ring) ? packets : -1;
128 		}
129 	}
130 }
131 
132 /**
133  * ice_init_mac_fltr - Set initial MAC filters
134  * @pf: board private structure
135  *
136  * Set initial set of MAC filters for PF VSI; configure filters for permanent
137  * address and broadcast address. If an error is encountered, netdevice will be
138  * unregistered.
139  */
ice_init_mac_fltr(struct ice_pf * pf)140 static int ice_init_mac_fltr(struct ice_pf *pf)
141 {
142 	enum ice_status status;
143 	struct ice_vsi *vsi;
144 	u8 *perm_addr;
145 
146 	vsi = ice_get_main_vsi(pf);
147 	if (!vsi)
148 		return -EINVAL;
149 
150 	perm_addr = vsi->port_info->mac.perm_addr;
151 	status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
152 	if (status)
153 		return -EIO;
154 
155 	return 0;
156 }
157 
158 /**
159  * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
160  * @netdev: the net device on which the sync is happening
161  * @addr: MAC address to sync
162  *
163  * This is a callback function which is called by the in kernel device sync
164  * functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
165  * populates the tmp_sync_list, which is later used by ice_add_mac to add the
166  * MAC filters from the hardware.
167  */
ice_add_mac_to_sync_list(struct net_device * netdev,const u8 * addr)168 static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
169 {
170 	struct ice_netdev_priv *np = netdev_priv(netdev);
171 	struct ice_vsi *vsi = np->vsi;
172 
173 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr,
174 				     ICE_FWD_TO_VSI))
175 		return -EINVAL;
176 
177 	return 0;
178 }
179 
180 /**
181  * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
182  * @netdev: the net device on which the unsync is happening
183  * @addr: MAC address to unsync
184  *
185  * This is a callback function which is called by the in kernel device unsync
186  * functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
187  * populates the tmp_unsync_list, which is later used by ice_remove_mac to
188  * delete the MAC filters from the hardware.
189  */
ice_add_mac_to_unsync_list(struct net_device * netdev,const u8 * addr)190 static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
191 {
192 	struct ice_netdev_priv *np = netdev_priv(netdev);
193 	struct ice_vsi *vsi = np->vsi;
194 
195 	/* Under some circumstances, we might receive a request to delete our
196 	 * own device address from our uc list. Because we store the device
197 	 * address in the VSI's MAC filter list, we need to ignore such
198 	 * requests and not delete our device address from this list.
199 	 */
200 	if (ether_addr_equal(addr, netdev->dev_addr))
201 		return 0;
202 
203 	if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr,
204 				     ICE_FWD_TO_VSI))
205 		return -EINVAL;
206 
207 	return 0;
208 }
209 
210 /**
211  * ice_vsi_fltr_changed - check if filter state changed
212  * @vsi: VSI to be checked
213  *
214  * returns true if filter state has changed, false otherwise.
215  */
ice_vsi_fltr_changed(struct ice_vsi * vsi)216 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
217 {
218 	return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
219 	       test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) ||
220 	       test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
221 }
222 
223 /**
224  * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
225  * @vsi: the VSI being configured
226  * @promisc_m: mask of promiscuous config bits
227  * @set_promisc: enable or disable promisc flag request
228  *
229  */
ice_cfg_promisc(struct ice_vsi * vsi,u8 promisc_m,bool set_promisc)230 static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
231 {
232 	struct ice_hw *hw = &vsi->back->hw;
233 	enum ice_status status = 0;
234 
235 	if (vsi->type != ICE_VSI_PF)
236 		return 0;
237 
238 	if (vsi->num_vlan > 1) {
239 		status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
240 						  set_promisc);
241 	} else {
242 		if (set_promisc)
243 			status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
244 						     0);
245 		else
246 			status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
247 						       0);
248 	}
249 
250 	if (status)
251 		return -EIO;
252 
253 	return 0;
254 }
255 
256 /**
257  * ice_vsi_sync_fltr - Update the VSI filter list to the HW
258  * @vsi: ptr to the VSI
259  *
260  * Push any outstanding VSI filter changes through the AdminQ.
261  */
ice_vsi_sync_fltr(struct ice_vsi * vsi)262 static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
263 {
264 	struct device *dev = ice_pf_to_dev(vsi->back);
265 	struct net_device *netdev = vsi->netdev;
266 	bool promisc_forced_on = false;
267 	struct ice_pf *pf = vsi->back;
268 	struct ice_hw *hw = &pf->hw;
269 	enum ice_status status = 0;
270 	u32 changed_flags = 0;
271 	u8 promisc_m;
272 	int err = 0;
273 
274 	if (!vsi->netdev)
275 		return -EINVAL;
276 
277 	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
278 		usleep_range(1000, 2000);
279 
280 	changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
281 	vsi->current_netdev_flags = vsi->netdev->flags;
282 
283 	INIT_LIST_HEAD(&vsi->tmp_sync_list);
284 	INIT_LIST_HEAD(&vsi->tmp_unsync_list);
285 
286 	if (ice_vsi_fltr_changed(vsi)) {
287 		clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
288 		clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
289 		clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
290 
291 		/* grab the netdev's addr_list_lock */
292 		netif_addr_lock_bh(netdev);
293 		__dev_uc_sync(netdev, ice_add_mac_to_sync_list,
294 			      ice_add_mac_to_unsync_list);
295 		__dev_mc_sync(netdev, ice_add_mac_to_sync_list,
296 			      ice_add_mac_to_unsync_list);
297 		/* our temp lists are populated. release lock */
298 		netif_addr_unlock_bh(netdev);
299 	}
300 
301 	/* Remove MAC addresses in the unsync list */
302 	status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
303 	ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
304 	if (status) {
305 		netdev_err(netdev, "Failed to delete MAC filters\n");
306 		/* if we failed because of alloc failures, just bail */
307 		if (status == ICE_ERR_NO_MEMORY) {
308 			err = -ENOMEM;
309 			goto out;
310 		}
311 	}
312 
313 	/* Add MAC addresses in the sync list */
314 	status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
315 	ice_fltr_free_list(dev, &vsi->tmp_sync_list);
316 	/* If filter is added successfully or already exists, do not go into
317 	 * 'if' condition and report it as error. Instead continue processing
318 	 * rest of the function.
319 	 */
320 	if (status && status != ICE_ERR_ALREADY_EXISTS) {
321 		netdev_err(netdev, "Failed to add MAC filters\n");
322 		/* If there is no more space for new umac filters, VSI
323 		 * should go into promiscuous mode. There should be some
324 		 * space reserved for promiscuous filters.
325 		 */
326 		if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
327 		    !test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
328 				      vsi->state)) {
329 			promisc_forced_on = true;
330 			netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
331 				    vsi->vsi_num);
332 		} else {
333 			err = -EIO;
334 			goto out;
335 		}
336 	}
337 	/* check for changes in promiscuous modes */
338 	if (changed_flags & IFF_ALLMULTI) {
339 		if (vsi->current_netdev_flags & IFF_ALLMULTI) {
340 			if (vsi->num_vlan > 1)
341 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
342 			else
343 				promisc_m = ICE_MCAST_PROMISC_BITS;
344 
345 			err = ice_cfg_promisc(vsi, promisc_m, true);
346 			if (err) {
347 				netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
348 					   vsi->vsi_num);
349 				vsi->current_netdev_flags &= ~IFF_ALLMULTI;
350 				goto out_promisc;
351 			}
352 		} else {
353 			/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
354 			if (vsi->num_vlan > 1)
355 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
356 			else
357 				promisc_m = ICE_MCAST_PROMISC_BITS;
358 
359 			err = ice_cfg_promisc(vsi, promisc_m, false);
360 			if (err) {
361 				netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
362 					   vsi->vsi_num);
363 				vsi->current_netdev_flags |= IFF_ALLMULTI;
364 				goto out_promisc;
365 			}
366 		}
367 	}
368 
369 	if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
370 	    test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) {
371 		clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
372 		if (vsi->current_netdev_flags & IFF_PROMISC) {
373 			/* Apply Rx filter rule to get traffic from wire */
374 			if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
375 				err = ice_set_dflt_vsi(pf->first_sw, vsi);
376 				if (err && err != -EEXIST) {
377 					netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
378 						   err, vsi->vsi_num);
379 					vsi->current_netdev_flags &=
380 						~IFF_PROMISC;
381 					goto out_promisc;
382 				}
383 				ice_cfg_vlan_pruning(vsi, false, false);
384 			}
385 		} else {
386 			/* Clear Rx filter to remove traffic from wire */
387 			if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
388 				err = ice_clear_dflt_vsi(pf->first_sw);
389 				if (err) {
390 					netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
391 						   err, vsi->vsi_num);
392 					vsi->current_netdev_flags |=
393 						IFF_PROMISC;
394 					goto out_promisc;
395 				}
396 				if (vsi->num_vlan > 1)
397 					ice_cfg_vlan_pruning(vsi, true, false);
398 			}
399 		}
400 	}
401 	goto exit;
402 
403 out_promisc:
404 	set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
405 	goto exit;
406 out:
407 	/* if something went wrong then set the changed flag so we try again */
408 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
409 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
410 exit:
411 	clear_bit(ICE_CFG_BUSY, vsi->state);
412 	return err;
413 }
414 
415 /**
416  * ice_sync_fltr_subtask - Sync the VSI filter list with HW
417  * @pf: board private structure
418  */
ice_sync_fltr_subtask(struct ice_pf * pf)419 static void ice_sync_fltr_subtask(struct ice_pf *pf)
420 {
421 	int v;
422 
423 	if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags)))
424 		return;
425 
426 	clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
427 
428 	ice_for_each_vsi(pf, v)
429 		if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
430 		    ice_vsi_sync_fltr(pf->vsi[v])) {
431 			/* come back and try again later */
432 			set_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
433 			break;
434 		}
435 }
436 
437 /**
438  * ice_pf_dis_all_vsi - Pause all VSIs on a PF
439  * @pf: the PF
440  * @locked: is the rtnl_lock already held
441  */
ice_pf_dis_all_vsi(struct ice_pf * pf,bool locked)442 static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
443 {
444 	int node;
445 	int v;
446 
447 	ice_for_each_vsi(pf, v)
448 		if (pf->vsi[v])
449 			ice_dis_vsi(pf->vsi[v], locked);
450 
451 	for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
452 		pf->pf_agg_node[node].num_vsis = 0;
453 
454 	for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
455 		pf->vf_agg_node[node].num_vsis = 0;
456 }
457 
458 /**
459  * ice_prepare_for_reset - prep for the core to reset
460  * @pf: board private structure
461  *
462  * Inform or close all dependent features in prep for reset.
463  */
464 static void
ice_prepare_for_reset(struct ice_pf * pf)465 ice_prepare_for_reset(struct ice_pf *pf)
466 {
467 	struct ice_hw *hw = &pf->hw;
468 	unsigned int i;
469 
470 	/* already prepared for reset */
471 	if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
472 		return;
473 
474 	ice_unplug_aux_dev(pf);
475 
476 	/* Notify VFs of impending reset */
477 	if (ice_check_sq_alive(hw, &hw->mailboxq))
478 		ice_vc_notify_reset(pf);
479 
480 	/* Disable VFs until reset is completed */
481 	ice_for_each_vf(pf, i)
482 		ice_set_vf_state_qs_dis(&pf->vf[i]);
483 
484 	/* clear SW filtering DB */
485 	ice_clear_hw_tbls(hw);
486 	/* disable the VSIs and their queues that are not already DOWN */
487 	ice_pf_dis_all_vsi(pf, false);
488 
489 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
490 		ice_ptp_release(pf);
491 
492 	if (hw->port_info)
493 		ice_sched_clear_port(hw->port_info);
494 
495 	ice_shutdown_all_ctrlq(hw);
496 
497 	set_bit(ICE_PREPARED_FOR_RESET, pf->state);
498 }
499 
500 /**
501  * ice_do_reset - Initiate one of many types of resets
502  * @pf: board private structure
503  * @reset_type: reset type requested
504  * before this function was called.
505  */
ice_do_reset(struct ice_pf * pf,enum ice_reset_req reset_type)506 static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
507 {
508 	struct device *dev = ice_pf_to_dev(pf);
509 	struct ice_hw *hw = &pf->hw;
510 
511 	dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
512 
513 	ice_prepare_for_reset(pf);
514 
515 	/* trigger the reset */
516 	if (ice_reset(hw, reset_type)) {
517 		dev_err(dev, "reset %d failed\n", reset_type);
518 		set_bit(ICE_RESET_FAILED, pf->state);
519 		clear_bit(ICE_RESET_OICR_RECV, pf->state);
520 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
521 		clear_bit(ICE_PFR_REQ, pf->state);
522 		clear_bit(ICE_CORER_REQ, pf->state);
523 		clear_bit(ICE_GLOBR_REQ, pf->state);
524 		wake_up(&pf->reset_wait_queue);
525 		return;
526 	}
527 
528 	/* PFR is a bit of a special case because it doesn't result in an OICR
529 	 * interrupt. So for PFR, rebuild after the reset and clear the reset-
530 	 * associated state bits.
531 	 */
532 	if (reset_type == ICE_RESET_PFR) {
533 		pf->pfr_count++;
534 		ice_rebuild(pf, reset_type);
535 		clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
536 		clear_bit(ICE_PFR_REQ, pf->state);
537 		wake_up(&pf->reset_wait_queue);
538 		ice_reset_all_vfs(pf, true);
539 	}
540 }
541 
542 /**
543  * ice_reset_subtask - Set up for resetting the device and driver
544  * @pf: board private structure
545  */
ice_reset_subtask(struct ice_pf * pf)546 static void ice_reset_subtask(struct ice_pf *pf)
547 {
548 	enum ice_reset_req reset_type = ICE_RESET_INVAL;
549 
550 	/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
551 	 * OICR interrupt. The OICR handler (ice_misc_intr) determines what type
552 	 * of reset is pending and sets bits in pf->state indicating the reset
553 	 * type and ICE_RESET_OICR_RECV. So, if the latter bit is set
554 	 * prepare for pending reset if not already (for PF software-initiated
555 	 * global resets the software should already be prepared for it as
556 	 * indicated by ICE_PREPARED_FOR_RESET; for global resets initiated
557 	 * by firmware or software on other PFs, that bit is not set so prepare
558 	 * for the reset now), poll for reset done, rebuild and return.
559 	 */
560 	if (test_bit(ICE_RESET_OICR_RECV, pf->state)) {
561 		/* Perform the largest reset requested */
562 		if (test_and_clear_bit(ICE_CORER_RECV, pf->state))
563 			reset_type = ICE_RESET_CORER;
564 		if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state))
565 			reset_type = ICE_RESET_GLOBR;
566 		if (test_and_clear_bit(ICE_EMPR_RECV, pf->state))
567 			reset_type = ICE_RESET_EMPR;
568 		/* return if no valid reset type requested */
569 		if (reset_type == ICE_RESET_INVAL)
570 			return;
571 		ice_prepare_for_reset(pf);
572 
573 		/* make sure we are ready to rebuild */
574 		if (ice_check_reset(&pf->hw)) {
575 			set_bit(ICE_RESET_FAILED, pf->state);
576 		} else {
577 			/* done with reset. start rebuild */
578 			pf->hw.reset_ongoing = false;
579 			ice_rebuild(pf, reset_type);
580 			/* clear bit to resume normal operations, but
581 			 * ICE_NEEDS_RESTART bit is set in case rebuild failed
582 			 */
583 			clear_bit(ICE_RESET_OICR_RECV, pf->state);
584 			clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
585 			clear_bit(ICE_PFR_REQ, pf->state);
586 			clear_bit(ICE_CORER_REQ, pf->state);
587 			clear_bit(ICE_GLOBR_REQ, pf->state);
588 			wake_up(&pf->reset_wait_queue);
589 			ice_reset_all_vfs(pf, true);
590 		}
591 
592 		return;
593 	}
594 
595 	/* No pending resets to finish processing. Check for new resets */
596 	if (test_bit(ICE_PFR_REQ, pf->state))
597 		reset_type = ICE_RESET_PFR;
598 	if (test_bit(ICE_CORER_REQ, pf->state))
599 		reset_type = ICE_RESET_CORER;
600 	if (test_bit(ICE_GLOBR_REQ, pf->state))
601 		reset_type = ICE_RESET_GLOBR;
602 	/* If no valid reset type requested just return */
603 	if (reset_type == ICE_RESET_INVAL)
604 		return;
605 
606 	/* reset if not already down or busy */
607 	if (!test_bit(ICE_DOWN, pf->state) &&
608 	    !test_bit(ICE_CFG_BUSY, pf->state)) {
609 		ice_do_reset(pf, reset_type);
610 	}
611 }
612 
613 /**
614  * ice_print_topo_conflict - print topology conflict message
615  * @vsi: the VSI whose topology status is being checked
616  */
ice_print_topo_conflict(struct ice_vsi * vsi)617 static void ice_print_topo_conflict(struct ice_vsi *vsi)
618 {
619 	switch (vsi->port_info->phy.link_info.topo_media_conflict) {
620 	case ICE_AQ_LINK_TOPO_CONFLICT:
621 	case ICE_AQ_LINK_MEDIA_CONFLICT:
622 	case ICE_AQ_LINK_TOPO_UNREACH_PRT:
623 	case ICE_AQ_LINK_TOPO_UNDRUTIL_PRT:
624 	case ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA:
625 		netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
626 		break;
627 	case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
628 		netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
629 		break;
630 	default:
631 		break;
632 	}
633 }
634 
635 /**
636  * ice_print_link_msg - print link up or down message
637  * @vsi: the VSI whose link status is being queried
638  * @isup: boolean for if the link is now up or down
639  */
ice_print_link_msg(struct ice_vsi * vsi,bool isup)640 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
641 {
642 	struct ice_aqc_get_phy_caps_data *caps;
643 	const char *an_advertised;
644 	enum ice_status status;
645 	const char *fec_req;
646 	const char *speed;
647 	const char *fec;
648 	const char *fc;
649 	const char *an;
650 
651 	if (!vsi)
652 		return;
653 
654 	if (vsi->current_isup == isup)
655 		return;
656 
657 	vsi->current_isup = isup;
658 
659 	if (!isup) {
660 		netdev_info(vsi->netdev, "NIC Link is Down\n");
661 		return;
662 	}
663 
664 	switch (vsi->port_info->phy.link_info.link_speed) {
665 	case ICE_AQ_LINK_SPEED_100GB:
666 		speed = "100 G";
667 		break;
668 	case ICE_AQ_LINK_SPEED_50GB:
669 		speed = "50 G";
670 		break;
671 	case ICE_AQ_LINK_SPEED_40GB:
672 		speed = "40 G";
673 		break;
674 	case ICE_AQ_LINK_SPEED_25GB:
675 		speed = "25 G";
676 		break;
677 	case ICE_AQ_LINK_SPEED_20GB:
678 		speed = "20 G";
679 		break;
680 	case ICE_AQ_LINK_SPEED_10GB:
681 		speed = "10 G";
682 		break;
683 	case ICE_AQ_LINK_SPEED_5GB:
684 		speed = "5 G";
685 		break;
686 	case ICE_AQ_LINK_SPEED_2500MB:
687 		speed = "2.5 G";
688 		break;
689 	case ICE_AQ_LINK_SPEED_1000MB:
690 		speed = "1 G";
691 		break;
692 	case ICE_AQ_LINK_SPEED_100MB:
693 		speed = "100 M";
694 		break;
695 	default:
696 		speed = "Unknown ";
697 		break;
698 	}
699 
700 	switch (vsi->port_info->fc.current_mode) {
701 	case ICE_FC_FULL:
702 		fc = "Rx/Tx";
703 		break;
704 	case ICE_FC_TX_PAUSE:
705 		fc = "Tx";
706 		break;
707 	case ICE_FC_RX_PAUSE:
708 		fc = "Rx";
709 		break;
710 	case ICE_FC_NONE:
711 		fc = "None";
712 		break;
713 	default:
714 		fc = "Unknown";
715 		break;
716 	}
717 
718 	/* Get FEC mode based on negotiated link info */
719 	switch (vsi->port_info->phy.link_info.fec_info) {
720 	case ICE_AQ_LINK_25G_RS_528_FEC_EN:
721 	case ICE_AQ_LINK_25G_RS_544_FEC_EN:
722 		fec = "RS-FEC";
723 		break;
724 	case ICE_AQ_LINK_25G_KR_FEC_EN:
725 		fec = "FC-FEC/BASE-R";
726 		break;
727 	default:
728 		fec = "NONE";
729 		break;
730 	}
731 
732 	/* check if autoneg completed, might be false due to not supported */
733 	if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
734 		an = "True";
735 	else
736 		an = "False";
737 
738 	/* Get FEC mode requested based on PHY caps last SW configuration */
739 	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
740 	if (!caps) {
741 		fec_req = "Unknown";
742 		an_advertised = "Unknown";
743 		goto done;
744 	}
745 
746 	status = ice_aq_get_phy_caps(vsi->port_info, false,
747 				     ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
748 	if (status)
749 		netdev_info(vsi->netdev, "Get phy capability failed.\n");
750 
751 	an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
752 
753 	if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
754 	    caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
755 		fec_req = "RS-FEC";
756 	else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
757 		 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
758 		fec_req = "FC-FEC/BASE-R";
759 	else
760 		fec_req = "NONE";
761 
762 	kfree(caps);
763 
764 done:
765 	netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
766 		    speed, fec_req, fec, an_advertised, an, fc);
767 	ice_print_topo_conflict(vsi);
768 }
769 
770 /**
771  * ice_vsi_link_event - update the VSI's netdev
772  * @vsi: the VSI on which the link event occurred
773  * @link_up: whether or not the VSI needs to be set up or down
774  */
ice_vsi_link_event(struct ice_vsi * vsi,bool link_up)775 static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
776 {
777 	if (!vsi)
778 		return;
779 
780 	if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev)
781 		return;
782 
783 	if (vsi->type == ICE_VSI_PF) {
784 		if (link_up == netif_carrier_ok(vsi->netdev))
785 			return;
786 
787 		if (link_up) {
788 			netif_carrier_on(vsi->netdev);
789 			netif_tx_wake_all_queues(vsi->netdev);
790 		} else {
791 			netif_carrier_off(vsi->netdev);
792 			netif_tx_stop_all_queues(vsi->netdev);
793 		}
794 	}
795 }
796 
797 /**
798  * ice_set_dflt_mib - send a default config MIB to the FW
799  * @pf: private PF struct
800  *
801  * This function sends a default configuration MIB to the FW.
802  *
803  * If this function errors out at any point, the driver is still able to
804  * function.  The main impact is that LFC may not operate as expected.
805  * Therefore an error state in this function should be treated with a DBG
806  * message and continue on with driver rebuild/reenable.
807  */
ice_set_dflt_mib(struct ice_pf * pf)808 static void ice_set_dflt_mib(struct ice_pf *pf)
809 {
810 	struct device *dev = ice_pf_to_dev(pf);
811 	u8 mib_type, *buf, *lldpmib = NULL;
812 	u16 len, typelen, offset = 0;
813 	struct ice_lldp_org_tlv *tlv;
814 	struct ice_hw *hw = &pf->hw;
815 	u32 ouisubtype;
816 
817 	mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
818 	lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
819 	if (!lldpmib) {
820 		dev_dbg(dev, "%s Failed to allocate MIB memory\n",
821 			__func__);
822 		return;
823 	}
824 
825 	/* Add ETS CFG TLV */
826 	tlv = (struct ice_lldp_org_tlv *)lldpmib;
827 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
828 		   ICE_IEEE_ETS_TLV_LEN);
829 	tlv->typelen = htons(typelen);
830 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
831 		      ICE_IEEE_SUBTYPE_ETS_CFG);
832 	tlv->ouisubtype = htonl(ouisubtype);
833 
834 	buf = tlv->tlvinfo;
835 	buf[0] = 0;
836 
837 	/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
838 	 * Octets 5 - 12 are BW values, set octet 5 to 100% BW.
839 	 * Octets 13 - 20 are TSA values - leave as zeros
840 	 */
841 	buf[5] = 0x64;
842 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
843 	offset += len + 2;
844 	tlv = (struct ice_lldp_org_tlv *)
845 		((char *)tlv + sizeof(tlv->typelen) + len);
846 
847 	/* Add ETS REC TLV */
848 	buf = tlv->tlvinfo;
849 	tlv->typelen = htons(typelen);
850 
851 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
852 		      ICE_IEEE_SUBTYPE_ETS_REC);
853 	tlv->ouisubtype = htonl(ouisubtype);
854 
855 	/* First octet of buf is reserved
856 	 * Octets 1 - 4 map UP to TC - all UPs map to zero
857 	 * Octets 5 - 12 are BW values - set TC 0 to 100%.
858 	 * Octets 13 - 20 are TSA value - leave as zeros
859 	 */
860 	buf[5] = 0x64;
861 	offset += len + 2;
862 	tlv = (struct ice_lldp_org_tlv *)
863 		((char *)tlv + sizeof(tlv->typelen) + len);
864 
865 	/* Add PFC CFG TLV */
866 	typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
867 		   ICE_IEEE_PFC_TLV_LEN);
868 	tlv->typelen = htons(typelen);
869 
870 	ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
871 		      ICE_IEEE_SUBTYPE_PFC_CFG);
872 	tlv->ouisubtype = htonl(ouisubtype);
873 
874 	/* Octet 1 left as all zeros - PFC disabled */
875 	buf[0] = 0x08;
876 	len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
877 	offset += len + 2;
878 
879 	if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
880 		dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
881 
882 	kfree(lldpmib);
883 }
884 
885 /**
886  * ice_check_module_power
887  * @pf: pointer to PF struct
888  * @link_cfg_err: bitmap from the link info structure
889  *
890  * check module power level returned by a previous call to aq_get_link_info
891  * and print error messages if module power level is not supported
892  */
ice_check_module_power(struct ice_pf * pf,u8 link_cfg_err)893 static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
894 {
895 	/* if module power level is supported, clear the flag */
896 	if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
897 			      ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
898 		clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
899 		return;
900 	}
901 
902 	/* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
903 	 * above block didn't clear this bit, there's nothing to do
904 	 */
905 	if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
906 		return;
907 
908 	if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
909 		dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
910 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
911 	} else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
912 		dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
913 		set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
914 	}
915 }
916 
917 /**
918  * ice_link_event - process the link event
919  * @pf: PF that the link event is associated with
920  * @pi: port_info for the port that the link event is associated with
921  * @link_up: true if the physical link is up and false if it is down
922  * @link_speed: current link speed received from the link event
923  *
924  * Returns 0 on success and negative on failure
925  */
926 static int
ice_link_event(struct ice_pf * pf,struct ice_port_info * pi,bool link_up,u16 link_speed)927 ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
928 	       u16 link_speed)
929 {
930 	struct device *dev = ice_pf_to_dev(pf);
931 	struct ice_phy_info *phy_info;
932 	enum ice_status status;
933 	struct ice_vsi *vsi;
934 	u16 old_link_speed;
935 	bool old_link;
936 
937 	phy_info = &pi->phy;
938 	phy_info->link_info_old = phy_info->link_info;
939 
940 	old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
941 	old_link_speed = phy_info->link_info_old.link_speed;
942 
943 	/* update the link info structures and re-enable link events,
944 	 * don't bail on failure due to other book keeping needed
945 	 */
946 	status = ice_update_link_info(pi);
947 	if (status)
948 		dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n",
949 			pi->lport, ice_stat_str(status),
950 			ice_aq_str(pi->hw->adminq.sq_last_status));
951 
952 	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
953 
954 	/* Check if the link state is up after updating link info, and treat
955 	 * this event as an UP event since the link is actually UP now.
956 	 */
957 	if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
958 		link_up = true;
959 
960 	vsi = ice_get_main_vsi(pf);
961 	if (!vsi || !vsi->port_info)
962 		return -EINVAL;
963 
964 	/* turn off PHY if media was removed */
965 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
966 	    !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
967 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
968 		ice_set_link(vsi, false);
969 	}
970 
971 	/* if the old link up/down and speed is the same as the new */
972 	if (link_up == old_link && link_speed == old_link_speed)
973 		return 0;
974 
975 	if (ice_is_dcb_active(pf)) {
976 		if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
977 			ice_dcb_rebuild(pf);
978 	} else {
979 		if (link_up)
980 			ice_set_dflt_mib(pf);
981 	}
982 	ice_vsi_link_event(vsi, link_up);
983 	ice_print_link_msg(vsi, link_up);
984 
985 	ice_vc_notify_link_state(pf);
986 
987 	return 0;
988 }
989 
990 /**
991  * ice_watchdog_subtask - periodic tasks not using event driven scheduling
992  * @pf: board private structure
993  */
ice_watchdog_subtask(struct ice_pf * pf)994 static void ice_watchdog_subtask(struct ice_pf *pf)
995 {
996 	int i;
997 
998 	/* if interface is down do nothing */
999 	if (test_bit(ICE_DOWN, pf->state) ||
1000 	    test_bit(ICE_CFG_BUSY, pf->state))
1001 		return;
1002 
1003 	/* make sure we don't do these things too often */
1004 	if (time_before(jiffies,
1005 			pf->serv_tmr_prev + pf->serv_tmr_period))
1006 		return;
1007 
1008 	pf->serv_tmr_prev = jiffies;
1009 
1010 	/* Update the stats for active netdevs so the network stack
1011 	 * can look at updated numbers whenever it cares to
1012 	 */
1013 	ice_update_pf_stats(pf);
1014 	ice_for_each_vsi(pf, i)
1015 		if (pf->vsi[i] && pf->vsi[i]->netdev)
1016 			ice_update_vsi_stats(pf->vsi[i]);
1017 }
1018 
1019 /**
1020  * ice_init_link_events - enable/initialize link events
1021  * @pi: pointer to the port_info instance
1022  *
1023  * Returns -EIO on failure, 0 on success
1024  */
ice_init_link_events(struct ice_port_info * pi)1025 static int ice_init_link_events(struct ice_port_info *pi)
1026 {
1027 	u16 mask;
1028 
1029 	mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
1030 		       ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
1031 
1032 	if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
1033 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
1034 			pi->lport);
1035 		return -EIO;
1036 	}
1037 
1038 	if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
1039 		dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
1040 			pi->lport);
1041 		return -EIO;
1042 	}
1043 
1044 	return 0;
1045 }
1046 
1047 /**
1048  * ice_handle_link_event - handle link event via ARQ
1049  * @pf: PF that the link event is associated with
1050  * @event: event structure containing link status info
1051  */
1052 static int
ice_handle_link_event(struct ice_pf * pf,struct ice_rq_event_info * event)1053 ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1054 {
1055 	struct ice_aqc_get_link_status_data *link_data;
1056 	struct ice_port_info *port_info;
1057 	int status;
1058 
1059 	link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
1060 	port_info = pf->hw.port_info;
1061 	if (!port_info)
1062 		return -EINVAL;
1063 
1064 	status = ice_link_event(pf, port_info,
1065 				!!(link_data->link_info & ICE_AQ_LINK_UP),
1066 				le16_to_cpu(link_data->link_speed));
1067 	if (status)
1068 		dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
1069 			status);
1070 
1071 	return status;
1072 }
1073 
1074 enum ice_aq_task_state {
1075 	ICE_AQ_TASK_WAITING = 0,
1076 	ICE_AQ_TASK_COMPLETE,
1077 	ICE_AQ_TASK_CANCELED,
1078 };
1079 
1080 struct ice_aq_task {
1081 	struct hlist_node entry;
1082 
1083 	u16 opcode;
1084 	struct ice_rq_event_info *event;
1085 	enum ice_aq_task_state state;
1086 };
1087 
1088 /**
1089  * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1090  * @pf: pointer to the PF private structure
1091  * @opcode: the opcode to wait for
1092  * @timeout: how long to wait, in jiffies
1093  * @event: storage for the event info
1094  *
1095  * Waits for a specific AdminQ completion event on the ARQ for a given PF. The
1096  * current thread will be put to sleep until the specified event occurs or
1097  * until the given timeout is reached.
1098  *
1099  * To obtain only the descriptor contents, pass an event without an allocated
1100  * msg_buf. If the complete data buffer is desired, allocate the
1101  * event->msg_buf with enough space ahead of time.
1102  *
1103  * Returns: zero on success, or a negative error code on failure.
1104  */
ice_aq_wait_for_event(struct ice_pf * pf,u16 opcode,unsigned long timeout,struct ice_rq_event_info * event)1105 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
1106 			  struct ice_rq_event_info *event)
1107 {
1108 	struct device *dev = ice_pf_to_dev(pf);
1109 	struct ice_aq_task *task;
1110 	unsigned long start;
1111 	long ret;
1112 	int err;
1113 
1114 	task = kzalloc(sizeof(*task), GFP_KERNEL);
1115 	if (!task)
1116 		return -ENOMEM;
1117 
1118 	INIT_HLIST_NODE(&task->entry);
1119 	task->opcode = opcode;
1120 	task->event = event;
1121 	task->state = ICE_AQ_TASK_WAITING;
1122 
1123 	spin_lock_bh(&pf->aq_wait_lock);
1124 	hlist_add_head(&task->entry, &pf->aq_wait_list);
1125 	spin_unlock_bh(&pf->aq_wait_lock);
1126 
1127 	start = jiffies;
1128 
1129 	ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
1130 					       timeout);
1131 	switch (task->state) {
1132 	case ICE_AQ_TASK_WAITING:
1133 		err = ret < 0 ? ret : -ETIMEDOUT;
1134 		break;
1135 	case ICE_AQ_TASK_CANCELED:
1136 		err = ret < 0 ? ret : -ECANCELED;
1137 		break;
1138 	case ICE_AQ_TASK_COMPLETE:
1139 		err = ret < 0 ? ret : 0;
1140 		break;
1141 	default:
1142 		WARN(1, "Unexpected AdminQ wait task state %u", task->state);
1143 		err = -EINVAL;
1144 		break;
1145 	}
1146 
1147 	dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
1148 		jiffies_to_msecs(jiffies - start),
1149 		jiffies_to_msecs(timeout),
1150 		opcode);
1151 
1152 	spin_lock_bh(&pf->aq_wait_lock);
1153 	hlist_del(&task->entry);
1154 	spin_unlock_bh(&pf->aq_wait_lock);
1155 	kfree(task);
1156 
1157 	return err;
1158 }
1159 
1160 /**
1161  * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1162  * @pf: pointer to the PF private structure
1163  * @opcode: the opcode of the event
1164  * @event: the event to check
1165  *
1166  * Loops over the current list of pending threads waiting for an AdminQ event.
1167  * For each matching task, copy the contents of the event into the task
1168  * structure and wake up the thread.
1169  *
1170  * If multiple threads wait for the same opcode, they will all be woken up.
1171  *
1172  * Note that event->msg_buf will only be duplicated if the event has a buffer
1173  * with enough space already allocated. Otherwise, only the descriptor and
1174  * message length will be copied.
1175  *
1176  * Returns: true if an event was found, false otherwise
1177  */
ice_aq_check_events(struct ice_pf * pf,u16 opcode,struct ice_rq_event_info * event)1178 static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
1179 				struct ice_rq_event_info *event)
1180 {
1181 	struct ice_rq_event_info *task_ev;
1182 	struct ice_aq_task *task;
1183 	bool found = false;
1184 
1185 	spin_lock_bh(&pf->aq_wait_lock);
1186 	hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
1187 		if (task->state || task->opcode != opcode)
1188 			continue;
1189 
1190 		task_ev = task->event;
1191 		memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
1192 		task_ev->msg_len = event->msg_len;
1193 
1194 		/* Only copy the data buffer if a destination was set */
1195 		if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
1196 			memcpy(task_ev->msg_buf, event->msg_buf,
1197 			       event->buf_len);
1198 			task_ev->buf_len = event->buf_len;
1199 		}
1200 
1201 		task->state = ICE_AQ_TASK_COMPLETE;
1202 		found = true;
1203 	}
1204 	spin_unlock_bh(&pf->aq_wait_lock);
1205 
1206 	if (found)
1207 		wake_up(&pf->aq_wait_queue);
1208 }
1209 
1210 /**
1211  * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1212  * @pf: the PF private structure
1213  *
1214  * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads.
1215  * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1216  */
ice_aq_cancel_waiting_tasks(struct ice_pf * pf)1217 static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
1218 {
1219 	struct ice_aq_task *task;
1220 
1221 	spin_lock_bh(&pf->aq_wait_lock);
1222 	hlist_for_each_entry(task, &pf->aq_wait_list, entry)
1223 		task->state = ICE_AQ_TASK_CANCELED;
1224 	spin_unlock_bh(&pf->aq_wait_lock);
1225 
1226 	wake_up(&pf->aq_wait_queue);
1227 }
1228 
1229 /**
1230  * __ice_clean_ctrlq - helper function to clean controlq rings
1231  * @pf: ptr to struct ice_pf
1232  * @q_type: specific Control queue type
1233  */
__ice_clean_ctrlq(struct ice_pf * pf,enum ice_ctl_q q_type)1234 static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
1235 {
1236 	struct device *dev = ice_pf_to_dev(pf);
1237 	struct ice_rq_event_info event;
1238 	struct ice_hw *hw = &pf->hw;
1239 	struct ice_ctl_q_info *cq;
1240 	u16 pending, i = 0;
1241 	const char *qtype;
1242 	u32 oldval, val;
1243 
1244 	/* Do not clean control queue if/when PF reset fails */
1245 	if (test_bit(ICE_RESET_FAILED, pf->state))
1246 		return 0;
1247 
1248 	switch (q_type) {
1249 	case ICE_CTL_Q_ADMIN:
1250 		cq = &hw->adminq;
1251 		qtype = "Admin";
1252 		break;
1253 	case ICE_CTL_Q_SB:
1254 		cq = &hw->sbq;
1255 		qtype = "Sideband";
1256 		break;
1257 	case ICE_CTL_Q_MAILBOX:
1258 		cq = &hw->mailboxq;
1259 		qtype = "Mailbox";
1260 		/* we are going to try to detect a malicious VF, so set the
1261 		 * state to begin detection
1262 		 */
1263 		hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
1264 		break;
1265 	default:
1266 		dev_warn(dev, "Unknown control queue type 0x%x\n", q_type);
1267 		return 0;
1268 	}
1269 
1270 	/* check for error indications - PF_xx_AxQLEN register layout for
1271 	 * FW/MBX/SB are identical so just use defines for PF_FW_AxQLEN.
1272 	 */
1273 	val = rd32(hw, cq->rq.len);
1274 	if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1275 		   PF_FW_ARQLEN_ARQCRIT_M)) {
1276 		oldval = val;
1277 		if (val & PF_FW_ARQLEN_ARQVFE_M)
1278 			dev_dbg(dev, "%s Receive Queue VF Error detected\n",
1279 				qtype);
1280 		if (val & PF_FW_ARQLEN_ARQOVFL_M) {
1281 			dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
1282 				qtype);
1283 		}
1284 		if (val & PF_FW_ARQLEN_ARQCRIT_M)
1285 			dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
1286 				qtype);
1287 		val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
1288 			 PF_FW_ARQLEN_ARQCRIT_M);
1289 		if (oldval != val)
1290 			wr32(hw, cq->rq.len, val);
1291 	}
1292 
1293 	val = rd32(hw, cq->sq.len);
1294 	if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1295 		   PF_FW_ATQLEN_ATQCRIT_M)) {
1296 		oldval = val;
1297 		if (val & PF_FW_ATQLEN_ATQVFE_M)
1298 			dev_dbg(dev, "%s Send Queue VF Error detected\n",
1299 				qtype);
1300 		if (val & PF_FW_ATQLEN_ATQOVFL_M) {
1301 			dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
1302 				qtype);
1303 		}
1304 		if (val & PF_FW_ATQLEN_ATQCRIT_M)
1305 			dev_dbg(dev, "%s Send Queue Critical Error detected\n",
1306 				qtype);
1307 		val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
1308 			 PF_FW_ATQLEN_ATQCRIT_M);
1309 		if (oldval != val)
1310 			wr32(hw, cq->sq.len, val);
1311 	}
1312 
1313 	event.buf_len = cq->rq_buf_size;
1314 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
1315 	if (!event.msg_buf)
1316 		return 0;
1317 
1318 	do {
1319 		enum ice_status ret;
1320 		u16 opcode;
1321 
1322 		ret = ice_clean_rq_elem(hw, cq, &event, &pending);
1323 		if (ret == ICE_ERR_AQ_NO_WORK)
1324 			break;
1325 		if (ret) {
1326 			dev_err(dev, "%s Receive Queue event error %s\n", qtype,
1327 				ice_stat_str(ret));
1328 			break;
1329 		}
1330 
1331 		opcode = le16_to_cpu(event.desc.opcode);
1332 
1333 		/* Notify any thread that might be waiting for this event */
1334 		ice_aq_check_events(pf, opcode, &event);
1335 
1336 		switch (opcode) {
1337 		case ice_aqc_opc_get_link_status:
1338 			if (ice_handle_link_event(pf, &event))
1339 				dev_err(dev, "Could not handle link event\n");
1340 			break;
1341 		case ice_aqc_opc_event_lan_overflow:
1342 			ice_vf_lan_overflow_event(pf, &event);
1343 			break;
1344 		case ice_mbx_opc_send_msg_to_pf:
1345 			if (!ice_is_malicious_vf(pf, &event, i, pending))
1346 				ice_vc_process_vf_msg(pf, &event);
1347 			break;
1348 		case ice_aqc_opc_fw_logging:
1349 			ice_output_fw_log(hw, &event.desc, event.msg_buf);
1350 			break;
1351 		case ice_aqc_opc_lldp_set_mib_change:
1352 			ice_dcb_process_lldp_set_mib_change(pf, &event);
1353 			break;
1354 		default:
1355 			dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
1356 				qtype, opcode);
1357 			break;
1358 		}
1359 	} while (pending && (i++ < ICE_DFLT_IRQ_WORK));
1360 
1361 	kfree(event.msg_buf);
1362 
1363 	return pending && (i == ICE_DFLT_IRQ_WORK);
1364 }
1365 
1366 /**
1367  * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1368  * @hw: pointer to hardware info
1369  * @cq: control queue information
1370  *
1371  * returns true if there are pending messages in a queue, false if there aren't
1372  */
ice_ctrlq_pending(struct ice_hw * hw,struct ice_ctl_q_info * cq)1373 static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
1374 {
1375 	u16 ntu;
1376 
1377 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1378 	return cq->rq.next_to_clean != ntu;
1379 }
1380 
1381 /**
1382  * ice_clean_adminq_subtask - clean the AdminQ rings
1383  * @pf: board private structure
1384  */
ice_clean_adminq_subtask(struct ice_pf * pf)1385 static void ice_clean_adminq_subtask(struct ice_pf *pf)
1386 {
1387 	struct ice_hw *hw = &pf->hw;
1388 
1389 	if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
1390 		return;
1391 
1392 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN))
1393 		return;
1394 
1395 	clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
1396 
1397 	/* There might be a situation where new messages arrive to a control
1398 	 * queue between processing the last message and clearing the
1399 	 * EVENT_PENDING bit. So before exiting, check queue head again (using
1400 	 * ice_ctrlq_pending) and process new messages if any.
1401 	 */
1402 	if (ice_ctrlq_pending(hw, &hw->adminq))
1403 		__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
1404 
1405 	ice_flush(hw);
1406 }
1407 
1408 /**
1409  * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1410  * @pf: board private structure
1411  */
ice_clean_mailboxq_subtask(struct ice_pf * pf)1412 static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
1413 {
1414 	struct ice_hw *hw = &pf->hw;
1415 
1416 	if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state))
1417 		return;
1418 
1419 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX))
1420 		return;
1421 
1422 	clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
1423 
1424 	if (ice_ctrlq_pending(hw, &hw->mailboxq))
1425 		__ice_clean_ctrlq(pf, ICE_CTL_Q_MAILBOX);
1426 
1427 	ice_flush(hw);
1428 }
1429 
1430 /**
1431  * ice_clean_sbq_subtask - clean the Sideband Queue rings
1432  * @pf: board private structure
1433  */
ice_clean_sbq_subtask(struct ice_pf * pf)1434 static void ice_clean_sbq_subtask(struct ice_pf *pf)
1435 {
1436 	struct ice_hw *hw = &pf->hw;
1437 
1438 	/* Nothing to do here if sideband queue is not supported */
1439 	if (!ice_is_sbq_supported(hw)) {
1440 		clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1441 		return;
1442 	}
1443 
1444 	if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
1445 		return;
1446 
1447 	if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
1448 		return;
1449 
1450 	clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
1451 
1452 	if (ice_ctrlq_pending(hw, &hw->sbq))
1453 		__ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
1454 
1455 	ice_flush(hw);
1456 }
1457 
1458 /**
1459  * ice_service_task_schedule - schedule the service task to wake up
1460  * @pf: board private structure
1461  *
1462  * If not already scheduled, this puts the task into the work queue.
1463  */
ice_service_task_schedule(struct ice_pf * pf)1464 void ice_service_task_schedule(struct ice_pf *pf)
1465 {
1466 	if (!test_bit(ICE_SERVICE_DIS, pf->state) &&
1467 	    !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) &&
1468 	    !test_bit(ICE_NEEDS_RESTART, pf->state))
1469 		queue_work(ice_wq, &pf->serv_task);
1470 }
1471 
1472 /**
1473  * ice_service_task_complete - finish up the service task
1474  * @pf: board private structure
1475  */
ice_service_task_complete(struct ice_pf * pf)1476 static void ice_service_task_complete(struct ice_pf *pf)
1477 {
1478 	WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state));
1479 
1480 	/* force memory (pf->state) to sync before next service task */
1481 	smp_mb__before_atomic();
1482 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1483 }
1484 
1485 /**
1486  * ice_service_task_stop - stop service task and cancel works
1487  * @pf: board private structure
1488  *
1489  * Return 0 if the ICE_SERVICE_DIS bit was not already set,
1490  * 1 otherwise.
1491  */
ice_service_task_stop(struct ice_pf * pf)1492 static int ice_service_task_stop(struct ice_pf *pf)
1493 {
1494 	int ret;
1495 
1496 	ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
1497 
1498 	if (pf->serv_tmr.function)
1499 		del_timer_sync(&pf->serv_tmr);
1500 	if (pf->serv_task.func)
1501 		cancel_work_sync(&pf->serv_task);
1502 
1503 	clear_bit(ICE_SERVICE_SCHED, pf->state);
1504 	return ret;
1505 }
1506 
1507 /**
1508  * ice_service_task_restart - restart service task and schedule works
1509  * @pf: board private structure
1510  *
1511  * This function is needed for suspend and resume works (e.g WoL scenario)
1512  */
ice_service_task_restart(struct ice_pf * pf)1513 static void ice_service_task_restart(struct ice_pf *pf)
1514 {
1515 	clear_bit(ICE_SERVICE_DIS, pf->state);
1516 	ice_service_task_schedule(pf);
1517 }
1518 
1519 /**
1520  * ice_service_timer - timer callback to schedule service task
1521  * @t: pointer to timer_list
1522  */
ice_service_timer(struct timer_list * t)1523 static void ice_service_timer(struct timer_list *t)
1524 {
1525 	struct ice_pf *pf = from_timer(pf, t, serv_tmr);
1526 
1527 	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
1528 	ice_service_task_schedule(pf);
1529 }
1530 
1531 /**
1532  * ice_handle_mdd_event - handle malicious driver detect event
1533  * @pf: pointer to the PF structure
1534  *
1535  * Called from service task. OICR interrupt handler indicates MDD event.
1536  * VF MDD logging is guarded by net_ratelimit. Additional PF and VF log
1537  * messages are wrapped by netif_msg_[rx|tx]_err. Since VF Rx MDD events
1538  * disable the queue, the PF can be configured to reset the VF using ethtool
1539  * private flag mdd-auto-reset-vf.
1540  */
ice_handle_mdd_event(struct ice_pf * pf)1541 static void ice_handle_mdd_event(struct ice_pf *pf)
1542 {
1543 	struct device *dev = ice_pf_to_dev(pf);
1544 	struct ice_hw *hw = &pf->hw;
1545 	unsigned int i;
1546 	u32 reg;
1547 
1548 	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
1549 		/* Since the VF MDD event logging is rate limited, check if
1550 		 * there are pending MDD events.
1551 		 */
1552 		ice_print_vfs_mdd_events(pf);
1553 		return;
1554 	}
1555 
1556 	/* find what triggered an MDD event */
1557 	reg = rd32(hw, GL_MDET_TX_PQM);
1558 	if (reg & GL_MDET_TX_PQM_VALID_M) {
1559 		u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >>
1560 				GL_MDET_TX_PQM_PF_NUM_S;
1561 		u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >>
1562 				GL_MDET_TX_PQM_VF_NUM_S;
1563 		u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >>
1564 				GL_MDET_TX_PQM_MAL_TYPE_S;
1565 		u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >>
1566 				GL_MDET_TX_PQM_QNUM_S);
1567 
1568 		if (netif_msg_tx_err(pf))
1569 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1570 				 event, queue, pf_num, vf_num);
1571 		wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
1572 	}
1573 
1574 	reg = rd32(hw, GL_MDET_TX_TCLAN);
1575 	if (reg & GL_MDET_TX_TCLAN_VALID_M) {
1576 		u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
1577 				GL_MDET_TX_TCLAN_PF_NUM_S;
1578 		u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >>
1579 				GL_MDET_TX_TCLAN_VF_NUM_S;
1580 		u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >>
1581 				GL_MDET_TX_TCLAN_MAL_TYPE_S;
1582 		u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
1583 				GL_MDET_TX_TCLAN_QNUM_S);
1584 
1585 		if (netif_msg_tx_err(pf))
1586 			dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
1587 				 event, queue, pf_num, vf_num);
1588 		wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
1589 	}
1590 
1591 	reg = rd32(hw, GL_MDET_RX);
1592 	if (reg & GL_MDET_RX_VALID_M) {
1593 		u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >>
1594 				GL_MDET_RX_PF_NUM_S;
1595 		u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >>
1596 				GL_MDET_RX_VF_NUM_S;
1597 		u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >>
1598 				GL_MDET_RX_MAL_TYPE_S;
1599 		u16 queue = ((reg & GL_MDET_RX_QNUM_M) >>
1600 				GL_MDET_RX_QNUM_S);
1601 
1602 		if (netif_msg_rx_err(pf))
1603 			dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
1604 				 event, queue, pf_num, vf_num);
1605 		wr32(hw, GL_MDET_RX, 0xffffffff);
1606 	}
1607 
1608 	/* check to see if this PF caused an MDD event */
1609 	reg = rd32(hw, PF_MDET_TX_PQM);
1610 	if (reg & PF_MDET_TX_PQM_VALID_M) {
1611 		wr32(hw, PF_MDET_TX_PQM, 0xFFFF);
1612 		if (netif_msg_tx_err(pf))
1613 			dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
1614 	}
1615 
1616 	reg = rd32(hw, PF_MDET_TX_TCLAN);
1617 	if (reg & PF_MDET_TX_TCLAN_VALID_M) {
1618 		wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
1619 		if (netif_msg_tx_err(pf))
1620 			dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
1621 	}
1622 
1623 	reg = rd32(hw, PF_MDET_RX);
1624 	if (reg & PF_MDET_RX_VALID_M) {
1625 		wr32(hw, PF_MDET_RX, 0xFFFF);
1626 		if (netif_msg_rx_err(pf))
1627 			dev_info(dev, "Malicious Driver Detection event RX detected on PF\n");
1628 	}
1629 
1630 	/* Check to see if one of the VFs caused an MDD event, and then
1631 	 * increment counters and set print pending
1632 	 */
1633 	ice_for_each_vf(pf, i) {
1634 		struct ice_vf *vf = &pf->vf[i];
1635 
1636 		reg = rd32(hw, VP_MDET_TX_PQM(i));
1637 		if (reg & VP_MDET_TX_PQM_VALID_M) {
1638 			wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
1639 			vf->mdd_tx_events.count++;
1640 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1641 			if (netif_msg_tx_err(pf))
1642 				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
1643 					 i);
1644 		}
1645 
1646 		reg = rd32(hw, VP_MDET_TX_TCLAN(i));
1647 		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
1648 			wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
1649 			vf->mdd_tx_events.count++;
1650 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1651 			if (netif_msg_tx_err(pf))
1652 				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
1653 					 i);
1654 		}
1655 
1656 		reg = rd32(hw, VP_MDET_TX_TDPU(i));
1657 		if (reg & VP_MDET_TX_TDPU_VALID_M) {
1658 			wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
1659 			vf->mdd_tx_events.count++;
1660 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1661 			if (netif_msg_tx_err(pf))
1662 				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
1663 					 i);
1664 		}
1665 
1666 		reg = rd32(hw, VP_MDET_RX(i));
1667 		if (reg & VP_MDET_RX_VALID_M) {
1668 			wr32(hw, VP_MDET_RX(i), 0xFFFF);
1669 			vf->mdd_rx_events.count++;
1670 			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
1671 			if (netif_msg_rx_err(pf))
1672 				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
1673 					 i);
1674 
1675 			/* Since the queue is disabled on VF Rx MDD events, the
1676 			 * PF can be configured to reset the VF through ethtool
1677 			 * private flag mdd-auto-reset-vf.
1678 			 */
1679 			if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
1680 				/* VF MDD event counters will be cleared by
1681 				 * reset, so print the event prior to reset.
1682 				 */
1683 				ice_print_vf_rx_mdd_event(vf);
1684 				mutex_lock(&pf->vf[i].cfg_lock);
1685 				ice_reset_vf(&pf->vf[i], false);
1686 				mutex_unlock(&pf->vf[i].cfg_lock);
1687 			}
1688 		}
1689 	}
1690 
1691 	ice_print_vfs_mdd_events(pf);
1692 }
1693 
1694 /**
1695  * ice_force_phys_link_state - Force the physical link state
1696  * @vsi: VSI to force the physical link state to up/down
1697  * @link_up: true/false indicates to set the physical link to up/down
1698  *
1699  * Force the physical link state by getting the current PHY capabilities from
1700  * hardware and setting the PHY config based on the determined capabilities. If
1701  * link changes a link event will be triggered because both the Enable Automatic
1702  * Link Update and LESM Enable bits are set when setting the PHY capabilities.
1703  *
1704  * Returns 0 on success, negative on failure
1705  */
ice_force_phys_link_state(struct ice_vsi * vsi,bool link_up)1706 static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
1707 {
1708 	struct ice_aqc_get_phy_caps_data *pcaps;
1709 	struct ice_aqc_set_phy_cfg_data *cfg;
1710 	struct ice_port_info *pi;
1711 	struct device *dev;
1712 	int retcode;
1713 
1714 	if (!vsi || !vsi->port_info || !vsi->back)
1715 		return -EINVAL;
1716 	if (vsi->type != ICE_VSI_PF)
1717 		return 0;
1718 
1719 	dev = ice_pf_to_dev(vsi->back);
1720 
1721 	pi = vsi->port_info;
1722 
1723 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1724 	if (!pcaps)
1725 		return -ENOMEM;
1726 
1727 	retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1728 				      NULL);
1729 	if (retcode) {
1730 		dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
1731 			vsi->vsi_num, retcode);
1732 		retcode = -EIO;
1733 		goto out;
1734 	}
1735 
1736 	/* No change in link */
1737 	if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
1738 	    link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
1739 		goto out;
1740 
1741 	/* Use the current user PHY configuration. The current user PHY
1742 	 * configuration is initialized during probe from PHY capabilities
1743 	 * software mode, and updated on set PHY configuration.
1744 	 */
1745 	cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
1746 	if (!cfg) {
1747 		retcode = -ENOMEM;
1748 		goto out;
1749 	}
1750 
1751 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
1752 	if (link_up)
1753 		cfg->caps |= ICE_AQ_PHY_ENA_LINK;
1754 	else
1755 		cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
1756 
1757 	retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
1758 	if (retcode) {
1759 		dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
1760 			vsi->vsi_num, retcode);
1761 		retcode = -EIO;
1762 	}
1763 
1764 	kfree(cfg);
1765 out:
1766 	kfree(pcaps);
1767 	return retcode;
1768 }
1769 
1770 /**
1771  * ice_init_nvm_phy_type - Initialize the NVM PHY type
1772  * @pi: port info structure
1773  *
1774  * Initialize nvm_phy_type_[low|high] for link lenient mode support
1775  */
ice_init_nvm_phy_type(struct ice_port_info * pi)1776 static int ice_init_nvm_phy_type(struct ice_port_info *pi)
1777 {
1778 	struct ice_aqc_get_phy_caps_data *pcaps;
1779 	struct ice_pf *pf = pi->hw->back;
1780 	enum ice_status status;
1781 	int err = 0;
1782 
1783 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1784 	if (!pcaps)
1785 		return -ENOMEM;
1786 
1787 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps,
1788 				     NULL);
1789 
1790 	if (status) {
1791 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1792 		err = -EIO;
1793 		goto out;
1794 	}
1795 
1796 	pf->nvm_phy_type_hi = pcaps->phy_type_high;
1797 	pf->nvm_phy_type_lo = pcaps->phy_type_low;
1798 
1799 out:
1800 	kfree(pcaps);
1801 	return err;
1802 }
1803 
1804 /**
1805  * ice_init_link_dflt_override - Initialize link default override
1806  * @pi: port info structure
1807  *
1808  * Initialize link default override and PHY total port shutdown during probe
1809  */
ice_init_link_dflt_override(struct ice_port_info * pi)1810 static void ice_init_link_dflt_override(struct ice_port_info *pi)
1811 {
1812 	struct ice_link_default_override_tlv *ldo;
1813 	struct ice_pf *pf = pi->hw->back;
1814 
1815 	ldo = &pf->link_dflt_override;
1816 	if (ice_get_link_default_override(ldo, pi))
1817 		return;
1818 
1819 	if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
1820 		return;
1821 
1822 	/* Enable Total Port Shutdown (override/replace link-down-on-close
1823 	 * ethtool private flag) for ports with Port Disable bit set.
1824 	 */
1825 	set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
1826 	set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
1827 }
1828 
1829 /**
1830  * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
1831  * @pi: port info structure
1832  *
1833  * If default override is enabled, initialize the user PHY cfg speed and FEC
1834  * settings using the default override mask from the NVM.
1835  *
1836  * The PHY should only be configured with the default override settings the
1837  * first time media is available. The ICE_LINK_DEFAULT_OVERRIDE_PENDING state
1838  * is used to indicate that the user PHY cfg default override is initialized
1839  * and the PHY has not been configured with the default override settings. The
1840  * state is set here, and cleared in ice_configure_phy the first time the PHY is
1841  * configured.
1842  *
1843  * This function should be called only if the FW doesn't support default
1844  * configuration mode, as reported by ice_fw_supports_report_dflt_cfg.
1845  */
ice_init_phy_cfg_dflt_override(struct ice_port_info * pi)1846 static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
1847 {
1848 	struct ice_link_default_override_tlv *ldo;
1849 	struct ice_aqc_set_phy_cfg_data *cfg;
1850 	struct ice_phy_info *phy = &pi->phy;
1851 	struct ice_pf *pf = pi->hw->back;
1852 
1853 	ldo = &pf->link_dflt_override;
1854 
1855 	/* If link default override is enabled, use to mask NVM PHY capabilities
1856 	 * for speed and FEC default configuration.
1857 	 */
1858 	cfg = &phy->curr_user_phy_cfg;
1859 
1860 	if (ldo->phy_type_low || ldo->phy_type_high) {
1861 		cfg->phy_type_low = pf->nvm_phy_type_lo &
1862 				    cpu_to_le64(ldo->phy_type_low);
1863 		cfg->phy_type_high = pf->nvm_phy_type_hi &
1864 				     cpu_to_le64(ldo->phy_type_high);
1865 	}
1866 	cfg->link_fec_opt = ldo->fec_options;
1867 	phy->curr_user_fec_req = ICE_FEC_AUTO;
1868 
1869 	set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
1870 }
1871 
1872 /**
1873  * ice_init_phy_user_cfg - Initialize the PHY user configuration
1874  * @pi: port info structure
1875  *
1876  * Initialize the current user PHY configuration, speed, FEC, and FC requested
1877  * mode to default. The PHY defaults are from get PHY capabilities topology
1878  * with media so call when media is first available. An error is returned if
1879  * called when media is not available. The PHY initialization completed state is
1880  * set here.
1881  *
1882  * These configurations are used when setting PHY
1883  * configuration. The user PHY configuration is updated on set PHY
1884  * configuration. Returns 0 on success, negative on failure
1885  */
ice_init_phy_user_cfg(struct ice_port_info * pi)1886 static int ice_init_phy_user_cfg(struct ice_port_info *pi)
1887 {
1888 	struct ice_aqc_get_phy_caps_data *pcaps;
1889 	struct ice_phy_info *phy = &pi->phy;
1890 	struct ice_pf *pf = pi->hw->back;
1891 	enum ice_status status;
1892 	int err = 0;
1893 
1894 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1895 		return -EIO;
1896 
1897 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1898 	if (!pcaps)
1899 		return -ENOMEM;
1900 
1901 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
1902 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
1903 					     pcaps, NULL);
1904 	else
1905 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
1906 					     pcaps, NULL);
1907 	if (status) {
1908 		dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
1909 		err = -EIO;
1910 		goto err_out;
1911 	}
1912 
1913 	ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
1914 
1915 	/* check if lenient mode is supported and enabled */
1916 	if (ice_fw_supports_link_override(pi->hw) &&
1917 	    !(pcaps->module_compliance_enforcement &
1918 	      ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
1919 		set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
1920 
1921 		/* if the FW supports default PHY configuration mode, then the driver
1922 		 * does not have to apply link override settings. If not,
1923 		 * initialize user PHY configuration with link override values
1924 		 */
1925 		if (!ice_fw_supports_report_dflt_cfg(pi->hw) &&
1926 		    (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) {
1927 			ice_init_phy_cfg_dflt_override(pi);
1928 			goto out;
1929 		}
1930 	}
1931 
1932 	/* if link default override is not enabled, set user flow control and
1933 	 * FEC settings based on what get_phy_caps returned
1934 	 */
1935 	phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
1936 						      pcaps->link_fec_options);
1937 	phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
1938 
1939 out:
1940 	phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
1941 	set_bit(ICE_PHY_INIT_COMPLETE, pf->state);
1942 err_out:
1943 	kfree(pcaps);
1944 	return err;
1945 }
1946 
1947 /**
1948  * ice_configure_phy - configure PHY
1949  * @vsi: VSI of PHY
1950  *
1951  * Set the PHY configuration. If the current PHY configuration is the same as
1952  * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
1953  * configure the based get PHY capabilities for topology with media.
1954  */
ice_configure_phy(struct ice_vsi * vsi)1955 static int ice_configure_phy(struct ice_vsi *vsi)
1956 {
1957 	struct device *dev = ice_pf_to_dev(vsi->back);
1958 	struct ice_port_info *pi = vsi->port_info;
1959 	struct ice_aqc_get_phy_caps_data *pcaps;
1960 	struct ice_aqc_set_phy_cfg_data *cfg;
1961 	struct ice_phy_info *phy = &pi->phy;
1962 	struct ice_pf *pf = vsi->back;
1963 	enum ice_status status;
1964 	int err = 0;
1965 
1966 	/* Ensure we have media as we cannot configure a medialess port */
1967 	if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
1968 		return -EPERM;
1969 
1970 	ice_print_topo_conflict(vsi);
1971 
1972 	if (phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
1973 		return -EPERM;
1974 
1975 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
1976 		return ice_force_phys_link_state(vsi, true);
1977 
1978 	pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1979 	if (!pcaps)
1980 		return -ENOMEM;
1981 
1982 	/* Get current PHY config */
1983 	status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
1984 				     NULL);
1985 	if (status) {
1986 		dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
1987 			vsi->vsi_num, ice_stat_str(status));
1988 		err = -EIO;
1989 		goto done;
1990 	}
1991 
1992 	/* If PHY enable link is configured and configuration has not changed,
1993 	 * there's nothing to do
1994 	 */
1995 	if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
1996 	    ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg))
1997 		goto done;
1998 
1999 	/* Use PHY topology as baseline for configuration */
2000 	memset(pcaps, 0, sizeof(*pcaps));
2001 	if (ice_fw_supports_report_dflt_cfg(pi->hw))
2002 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
2003 					     pcaps, NULL);
2004 	else
2005 		status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
2006 					     pcaps, NULL);
2007 	if (status) {
2008 		dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n",
2009 			vsi->vsi_num, ice_stat_str(status));
2010 		err = -EIO;
2011 		goto done;
2012 	}
2013 
2014 	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
2015 	if (!cfg) {
2016 		err = -ENOMEM;
2017 		goto done;
2018 	}
2019 
2020 	ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
2021 
2022 	/* Speed - If default override pending, use curr_user_phy_cfg set in
2023 	 * ice_init_phy_user_cfg_ldo.
2024 	 */
2025 	if (test_and_clear_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING,
2026 			       vsi->back->state)) {
2027 		cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low;
2028 		cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high;
2029 	} else {
2030 		u64 phy_low = 0, phy_high = 0;
2031 
2032 		ice_update_phy_type(&phy_low, &phy_high,
2033 				    pi->phy.curr_user_speed_req);
2034 		cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
2035 		cfg->phy_type_high = pcaps->phy_type_high &
2036 				     cpu_to_le64(phy_high);
2037 	}
2038 
2039 	/* Can't provide what was requested; use PHY capabilities */
2040 	if (!cfg->phy_type_low && !cfg->phy_type_high) {
2041 		cfg->phy_type_low = pcaps->phy_type_low;
2042 		cfg->phy_type_high = pcaps->phy_type_high;
2043 	}
2044 
2045 	/* FEC */
2046 	ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req);
2047 
2048 	/* Can't provide what was requested; use PHY capabilities */
2049 	if (cfg->link_fec_opt !=
2050 	    (cfg->link_fec_opt & pcaps->link_fec_options)) {
2051 		cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
2052 		cfg->link_fec_opt = pcaps->link_fec_options;
2053 	}
2054 
2055 	/* Flow Control - always supported; no need to check against
2056 	 * capabilities
2057 	 */
2058 	ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req);
2059 
2060 	/* Enable link and link update */
2061 	cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
2062 
2063 	status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
2064 	if (status) {
2065 		dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
2066 			vsi->vsi_num, ice_stat_str(status));
2067 		err = -EIO;
2068 	}
2069 
2070 	kfree(cfg);
2071 done:
2072 	kfree(pcaps);
2073 	return err;
2074 }
2075 
2076 /**
2077  * ice_check_media_subtask - Check for media
2078  * @pf: pointer to PF struct
2079  *
2080  * If media is available, then initialize PHY user configuration if it is not
2081  * been, and configure the PHY if the interface is up.
2082  */
ice_check_media_subtask(struct ice_pf * pf)2083 static void ice_check_media_subtask(struct ice_pf *pf)
2084 {
2085 	struct ice_port_info *pi;
2086 	struct ice_vsi *vsi;
2087 	int err;
2088 
2089 	/* No need to check for media if it's already present */
2090 	if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
2091 		return;
2092 
2093 	vsi = ice_get_main_vsi(pf);
2094 	if (!vsi)
2095 		return;
2096 
2097 	/* Refresh link info and check if media is present */
2098 	pi = vsi->port_info;
2099 	err = ice_update_link_info(pi);
2100 	if (err)
2101 		return;
2102 
2103 	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
2104 
2105 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
2106 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
2107 			ice_init_phy_user_cfg(pi);
2108 
2109 		/* PHY settings are reset on media insertion, reconfigure
2110 		 * PHY to preserve settings.
2111 		 */
2112 		if (test_bit(ICE_VSI_DOWN, vsi->state) &&
2113 		    test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
2114 			return;
2115 
2116 		err = ice_configure_phy(vsi);
2117 		if (!err)
2118 			clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
2119 
2120 		/* A Link Status Event will be generated; the event handler
2121 		 * will complete bringing the interface up
2122 		 */
2123 	}
2124 }
2125 
2126 /**
2127  * ice_service_task - manage and run subtasks
2128  * @work: pointer to work_struct contained by the PF struct
2129  */
ice_service_task(struct work_struct * work)2130 static void ice_service_task(struct work_struct *work)
2131 {
2132 	struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
2133 	unsigned long start_time = jiffies;
2134 
2135 	/* subtasks */
2136 
2137 	/* process reset requests first */
2138 	ice_reset_subtask(pf);
2139 
2140 	/* bail if a reset/recovery cycle is pending or rebuild failed */
2141 	if (ice_is_reset_in_progress(pf->state) ||
2142 	    test_bit(ICE_SUSPENDED, pf->state) ||
2143 	    test_bit(ICE_NEEDS_RESTART, pf->state)) {
2144 		ice_service_task_complete(pf);
2145 		return;
2146 	}
2147 
2148 	if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
2149 		struct iidc_event *event;
2150 
2151 		event = kzalloc(sizeof(*event), GFP_KERNEL);
2152 		if (event) {
2153 			set_bit(IIDC_EVENT_CRIT_ERR, event->type);
2154 			/* report the entire OICR value to AUX driver */
2155 			swap(event->reg, pf->oicr_err_reg);
2156 			ice_send_event_to_aux(pf, event);
2157 			kfree(event);
2158 		}
2159 	}
2160 
2161 	/* unplug aux dev per request, if an unplug request came in
2162 	 * while processing a plug request, this will handle it
2163 	 */
2164 	if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
2165 		ice_unplug_aux_dev(pf);
2166 
2167 	/* Plug aux device per request */
2168 	if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
2169 		ice_plug_aux_dev(pf);
2170 
2171 	if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
2172 		struct iidc_event *event;
2173 
2174 		event = kzalloc(sizeof(*event), GFP_KERNEL);
2175 		if (event) {
2176 			set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
2177 			ice_send_event_to_aux(pf, event);
2178 			kfree(event);
2179 		}
2180 	}
2181 
2182 	ice_clean_adminq_subtask(pf);
2183 	ice_check_media_subtask(pf);
2184 	ice_check_for_hang_subtask(pf);
2185 	ice_sync_fltr_subtask(pf);
2186 	ice_handle_mdd_event(pf);
2187 	ice_watchdog_subtask(pf);
2188 
2189 	if (ice_is_safe_mode(pf)) {
2190 		ice_service_task_complete(pf);
2191 		return;
2192 	}
2193 
2194 	ice_process_vflr_event(pf);
2195 	ice_clean_mailboxq_subtask(pf);
2196 	ice_clean_sbq_subtask(pf);
2197 	ice_sync_arfs_fltrs(pf);
2198 	ice_flush_fdir_ctx(pf);
2199 
2200 	/* Clear ICE_SERVICE_SCHED flag to allow scheduling next event */
2201 	ice_service_task_complete(pf);
2202 
2203 	/* If the tasks have taken longer than one service timer period
2204 	 * or there is more work to be done, reset the service timer to
2205 	 * schedule the service task now.
2206 	 */
2207 	if (time_after(jiffies, (start_time + pf->serv_tmr_period)) ||
2208 	    test_bit(ICE_MDD_EVENT_PENDING, pf->state) ||
2209 	    test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2210 	    test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
2211 	    test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
2212 	    test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
2213 	    test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
2214 		mod_timer(&pf->serv_tmr, jiffies);
2215 }
2216 
2217 /**
2218  * ice_set_ctrlq_len - helper function to set controlq length
2219  * @hw: pointer to the HW instance
2220  */
ice_set_ctrlq_len(struct ice_hw * hw)2221 static void ice_set_ctrlq_len(struct ice_hw *hw)
2222 {
2223 	hw->adminq.num_rq_entries = ICE_AQ_LEN;
2224 	hw->adminq.num_sq_entries = ICE_AQ_LEN;
2225 	hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
2226 	hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
2227 	hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M;
2228 	hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
2229 	hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2230 	hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
2231 	hw->sbq.num_rq_entries = ICE_SBQ_LEN;
2232 	hw->sbq.num_sq_entries = ICE_SBQ_LEN;
2233 	hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2234 	hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
2235 }
2236 
2237 /**
2238  * ice_schedule_reset - schedule a reset
2239  * @pf: board private structure
2240  * @reset: reset being requested
2241  */
ice_schedule_reset(struct ice_pf * pf,enum ice_reset_req reset)2242 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
2243 {
2244 	struct device *dev = ice_pf_to_dev(pf);
2245 
2246 	/* bail out if earlier reset has failed */
2247 	if (test_bit(ICE_RESET_FAILED, pf->state)) {
2248 		dev_dbg(dev, "earlier reset has failed\n");
2249 		return -EIO;
2250 	}
2251 	/* bail if reset/recovery already in progress */
2252 	if (ice_is_reset_in_progress(pf->state)) {
2253 		dev_dbg(dev, "Reset already in progress\n");
2254 		return -EBUSY;
2255 	}
2256 
2257 	switch (reset) {
2258 	case ICE_RESET_PFR:
2259 		set_bit(ICE_PFR_REQ, pf->state);
2260 		break;
2261 	case ICE_RESET_CORER:
2262 		set_bit(ICE_CORER_REQ, pf->state);
2263 		break;
2264 	case ICE_RESET_GLOBR:
2265 		set_bit(ICE_GLOBR_REQ, pf->state);
2266 		break;
2267 	default:
2268 		return -EINVAL;
2269 	}
2270 
2271 	ice_service_task_schedule(pf);
2272 	return 0;
2273 }
2274 
2275 /**
2276  * ice_irq_affinity_notify - Callback for affinity changes
2277  * @notify: context as to what irq was changed
2278  * @mask: the new affinity mask
2279  *
2280  * This is a callback function used by the irq_set_affinity_notifier function
2281  * so that we may register to receive changes to the irq affinity masks.
2282  */
2283 static void
ice_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)2284 ice_irq_affinity_notify(struct irq_affinity_notify *notify,
2285 			const cpumask_t *mask)
2286 {
2287 	struct ice_q_vector *q_vector =
2288 		container_of(notify, struct ice_q_vector, affinity_notify);
2289 
2290 	cpumask_copy(&q_vector->affinity_mask, mask);
2291 }
2292 
2293 /**
2294  * ice_irq_affinity_release - Callback for affinity notifier release
2295  * @ref: internal core kernel usage
2296  *
2297  * This is a callback function used by the irq_set_affinity_notifier function
2298  * to inform the current notification subscriber that they will no longer
2299  * receive notifications.
2300  */
ice_irq_affinity_release(struct kref __always_unused * ref)2301 static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
2302 
2303 /**
2304  * ice_vsi_ena_irq - Enable IRQ for the given VSI
2305  * @vsi: the VSI being configured
2306  */
ice_vsi_ena_irq(struct ice_vsi * vsi)2307 static int ice_vsi_ena_irq(struct ice_vsi *vsi)
2308 {
2309 	struct ice_hw *hw = &vsi->back->hw;
2310 	int i;
2311 
2312 	ice_for_each_q_vector(vsi, i)
2313 		ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
2314 
2315 	ice_flush(hw);
2316 	return 0;
2317 }
2318 
2319 /**
2320  * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2321  * @vsi: the VSI being configured
2322  * @basename: name for the vector
2323  */
ice_vsi_req_irq_msix(struct ice_vsi * vsi,char * basename)2324 static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
2325 {
2326 	int q_vectors = vsi->num_q_vectors;
2327 	struct ice_pf *pf = vsi->back;
2328 	int base = vsi->base_vector;
2329 	struct device *dev;
2330 	int rx_int_idx = 0;
2331 	int tx_int_idx = 0;
2332 	int vector, err;
2333 	int irq_num;
2334 
2335 	dev = ice_pf_to_dev(pf);
2336 	for (vector = 0; vector < q_vectors; vector++) {
2337 		struct ice_q_vector *q_vector = vsi->q_vectors[vector];
2338 
2339 		irq_num = pf->msix_entries[base + vector].vector;
2340 
2341 		if (q_vector->tx.ring && q_vector->rx.ring) {
2342 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2343 				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2344 			tx_int_idx++;
2345 		} else if (q_vector->rx.ring) {
2346 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2347 				 "%s-%s-%d", basename, "rx", rx_int_idx++);
2348 		} else if (q_vector->tx.ring) {
2349 			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2350 				 "%s-%s-%d", basename, "tx", tx_int_idx++);
2351 		} else {
2352 			/* skip this unused q_vector */
2353 			continue;
2354 		}
2355 		if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID)
2356 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2357 					       IRQF_SHARED, q_vector->name,
2358 					       q_vector);
2359 		else
2360 			err = devm_request_irq(dev, irq_num, vsi->irq_handler,
2361 					       0, q_vector->name, q_vector);
2362 		if (err) {
2363 			netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
2364 				   err);
2365 			goto free_q_irqs;
2366 		}
2367 
2368 		/* register for affinity change notifications */
2369 		if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
2370 			struct irq_affinity_notify *affinity_notify;
2371 
2372 			affinity_notify = &q_vector->affinity_notify;
2373 			affinity_notify->notify = ice_irq_affinity_notify;
2374 			affinity_notify->release = ice_irq_affinity_release;
2375 			irq_set_affinity_notifier(irq_num, affinity_notify);
2376 		}
2377 
2378 		/* assign the mask for this irq */
2379 		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
2380 	}
2381 
2382 	vsi->irqs_ready = true;
2383 	return 0;
2384 
2385 free_q_irqs:
2386 	while (vector) {
2387 		vector--;
2388 		irq_num = pf->msix_entries[base + vector].vector;
2389 		if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2390 			irq_set_affinity_notifier(irq_num, NULL);
2391 		irq_set_affinity_hint(irq_num, NULL);
2392 		devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
2393 	}
2394 	return err;
2395 }
2396 
2397 /**
2398  * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2399  * @vsi: VSI to setup Tx rings used by XDP
2400  *
2401  * Return 0 on success and negative value on error
2402  */
ice_xdp_alloc_setup_rings(struct ice_vsi * vsi)2403 static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
2404 {
2405 	struct device *dev = ice_pf_to_dev(vsi->back);
2406 	int i;
2407 
2408 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2409 		u16 xdp_q_idx = vsi->alloc_txq + i;
2410 		struct ice_ring *xdp_ring;
2411 
2412 		xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
2413 
2414 		if (!xdp_ring)
2415 			goto free_xdp_rings;
2416 
2417 		xdp_ring->q_index = xdp_q_idx;
2418 		xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
2419 		xdp_ring->ring_active = false;
2420 		xdp_ring->vsi = vsi;
2421 		xdp_ring->netdev = NULL;
2422 		xdp_ring->dev = dev;
2423 		xdp_ring->count = vsi->num_tx_desc;
2424 		WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
2425 		if (ice_setup_tx_ring(xdp_ring))
2426 			goto free_xdp_rings;
2427 		ice_set_ring_xdp(xdp_ring);
2428 		xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
2429 	}
2430 
2431 	return 0;
2432 
2433 free_xdp_rings:
2434 	for (; i >= 0; i--)
2435 		if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
2436 			ice_free_tx_ring(vsi->xdp_rings[i]);
2437 	return -ENOMEM;
2438 }
2439 
2440 /**
2441  * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2442  * @vsi: VSI to set the bpf prog on
2443  * @prog: the bpf prog pointer
2444  */
ice_vsi_assign_bpf_prog(struct ice_vsi * vsi,struct bpf_prog * prog)2445 static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
2446 {
2447 	struct bpf_prog *old_prog;
2448 	int i;
2449 
2450 	old_prog = xchg(&vsi->xdp_prog, prog);
2451 	if (old_prog)
2452 		bpf_prog_put(old_prog);
2453 
2454 	ice_for_each_rxq(vsi, i)
2455 		WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
2456 }
2457 
2458 /**
2459  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2460  * @vsi: VSI to bring up Tx rings used by XDP
2461  * @prog: bpf program that will be assigned to VSI
2462  *
2463  * Return 0 on success and negative value on error
2464  */
ice_prepare_xdp_rings(struct ice_vsi * vsi,struct bpf_prog * prog)2465 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
2466 {
2467 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2468 	int xdp_rings_rem = vsi->num_xdp_txq;
2469 	struct ice_pf *pf = vsi->back;
2470 	struct ice_qs_cfg xdp_qs_cfg = {
2471 		.qs_mutex = &pf->avail_q_mutex,
2472 		.pf_map = pf->avail_txqs,
2473 		.pf_map_size = pf->max_pf_txqs,
2474 		.q_count = vsi->num_xdp_txq,
2475 		.scatter_count = ICE_MAX_SCATTER_TXQS,
2476 		.vsi_map = vsi->txq_map,
2477 		.vsi_map_offset = vsi->alloc_txq,
2478 		.mapping_mode = ICE_VSI_MAP_CONTIG
2479 	};
2480 	enum ice_status status;
2481 	struct device *dev;
2482 	int i, v_idx;
2483 
2484 	dev = ice_pf_to_dev(pf);
2485 	vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
2486 				      sizeof(*vsi->xdp_rings), GFP_KERNEL);
2487 	if (!vsi->xdp_rings)
2488 		return -ENOMEM;
2489 
2490 	vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode;
2491 	if (__ice_vsi_get_qs(&xdp_qs_cfg))
2492 		goto err_map_xdp;
2493 
2494 	if (ice_xdp_alloc_setup_rings(vsi))
2495 		goto clear_xdp_rings;
2496 
2497 	/* follow the logic from ice_vsi_map_rings_to_vectors */
2498 	ice_for_each_q_vector(vsi, v_idx) {
2499 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2500 		int xdp_rings_per_v, q_id, q_base;
2501 
2502 		xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
2503 					       vsi->num_q_vectors - v_idx);
2504 		q_base = vsi->num_xdp_txq - xdp_rings_rem;
2505 
2506 		for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
2507 			struct ice_ring *xdp_ring = vsi->xdp_rings[q_id];
2508 
2509 			xdp_ring->q_vector = q_vector;
2510 			xdp_ring->next = q_vector->tx.ring;
2511 			q_vector->tx.ring = xdp_ring;
2512 		}
2513 		xdp_rings_rem -= xdp_rings_per_v;
2514 	}
2515 
2516 	/* omit the scheduler update if in reset path; XDP queues will be
2517 	 * taken into account at the end of ice_vsi_rebuild, where
2518 	 * ice_cfg_vsi_lan is being called
2519 	 */
2520 	if (ice_is_reset_in_progress(pf->state))
2521 		return 0;
2522 
2523 	/* tell the Tx scheduler that right now we have
2524 	 * additional queues
2525 	 */
2526 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2527 		max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq;
2528 
2529 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2530 				 max_txqs);
2531 	if (status) {
2532 		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
2533 			ice_stat_str(status));
2534 		goto clear_xdp_rings;
2535 	}
2536 
2537 	/* assign the prog only when it's not already present on VSI;
2538 	 * this flow is a subject of both ethtool -L and ndo_bpf flows;
2539 	 * VSI rebuild that happens under ethtool -L can expose us to
2540 	 * the bpf_prog refcount issues as we would be swapping same
2541 	 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
2542 	 * on it as it would be treated as an 'old_prog'; for ndo_bpf
2543 	 * this is not harmful as dev_xdp_install bumps the refcount
2544 	 * before calling the op exposed by the driver;
2545 	 */
2546 	if (!ice_is_xdp_ena_vsi(vsi))
2547 		ice_vsi_assign_bpf_prog(vsi, prog);
2548 
2549 	return 0;
2550 clear_xdp_rings:
2551 	for (i = 0; i < vsi->num_xdp_txq; i++)
2552 		if (vsi->xdp_rings[i]) {
2553 			kfree_rcu(vsi->xdp_rings[i], rcu);
2554 			vsi->xdp_rings[i] = NULL;
2555 		}
2556 
2557 err_map_xdp:
2558 	mutex_lock(&pf->avail_q_mutex);
2559 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2560 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2561 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2562 	}
2563 	mutex_unlock(&pf->avail_q_mutex);
2564 
2565 	devm_kfree(dev, vsi->xdp_rings);
2566 	return -ENOMEM;
2567 }
2568 
2569 /**
2570  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2571  * @vsi: VSI to remove XDP rings
2572  *
2573  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
2574  * resources
2575  */
ice_destroy_xdp_rings(struct ice_vsi * vsi)2576 int ice_destroy_xdp_rings(struct ice_vsi *vsi)
2577 {
2578 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2579 	struct ice_pf *pf = vsi->back;
2580 	int i, v_idx;
2581 
2582 	/* q_vectors are freed in reset path so there's no point in detaching
2583 	 * rings; in case of rebuild being triggered not from reset bits
2584 	 * in pf->state won't be set, so additionally check first q_vector
2585 	 * against NULL
2586 	 */
2587 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2588 		goto free_qmap;
2589 
2590 	ice_for_each_q_vector(vsi, v_idx) {
2591 		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
2592 		struct ice_ring *ring;
2593 
2594 		ice_for_each_ring(ring, q_vector->tx)
2595 			if (!ring->tx_buf || !ice_ring_is_xdp(ring))
2596 				break;
2597 
2598 		/* restore the value of last node prior to XDP setup */
2599 		q_vector->tx.ring = ring;
2600 	}
2601 
2602 free_qmap:
2603 	mutex_lock(&pf->avail_q_mutex);
2604 	for (i = 0; i < vsi->num_xdp_txq; i++) {
2605 		clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
2606 		vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
2607 	}
2608 	mutex_unlock(&pf->avail_q_mutex);
2609 
2610 	for (i = 0; i < vsi->num_xdp_txq; i++)
2611 		if (vsi->xdp_rings[i]) {
2612 			if (vsi->xdp_rings[i]->desc) {
2613 				synchronize_rcu();
2614 				ice_free_tx_ring(vsi->xdp_rings[i]);
2615 			}
2616 			kfree_rcu(vsi->xdp_rings[i], rcu);
2617 			vsi->xdp_rings[i] = NULL;
2618 		}
2619 
2620 	devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
2621 	vsi->xdp_rings = NULL;
2622 
2623 	if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
2624 		return 0;
2625 
2626 	ice_vsi_assign_bpf_prog(vsi, NULL);
2627 
2628 	/* notify Tx scheduler that we destroyed XDP queues and bring
2629 	 * back the old number of child nodes
2630 	 */
2631 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2632 		max_txqs[i] = vsi->num_txq;
2633 
2634 	/* change number of XDP Tx queues to 0 */
2635 	vsi->num_xdp_txq = 0;
2636 
2637 	return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2638 			       max_txqs);
2639 }
2640 
2641 /**
2642  * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2643  * @vsi: VSI to schedule napi on
2644  */
ice_vsi_rx_napi_schedule(struct ice_vsi * vsi)2645 static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
2646 {
2647 	int i;
2648 
2649 	ice_for_each_rxq(vsi, i) {
2650 		struct ice_ring *rx_ring = vsi->rx_rings[i];
2651 
2652 		if (rx_ring->xsk_pool)
2653 			napi_schedule(&rx_ring->q_vector->napi);
2654 	}
2655 }
2656 
2657 /**
2658  * ice_xdp_setup_prog - Add or remove XDP eBPF program
2659  * @vsi: VSI to setup XDP for
2660  * @prog: XDP program
2661  * @extack: netlink extended ack
2662  */
2663 static int
ice_xdp_setup_prog(struct ice_vsi * vsi,struct bpf_prog * prog,struct netlink_ext_ack * extack)2664 ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
2665 		   struct netlink_ext_ack *extack)
2666 {
2667 	int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
2668 	bool if_running = netif_running(vsi->netdev);
2669 	int ret = 0, xdp_ring_err = 0;
2670 
2671 	if (frame_size > vsi->rx_buf_len) {
2672 		NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
2673 		return -EOPNOTSUPP;
2674 	}
2675 
2676 	/* need to stop netdev while setting up the program for Rx rings */
2677 	if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
2678 		ret = ice_down(vsi);
2679 		if (ret) {
2680 			NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed");
2681 			return ret;
2682 		}
2683 	}
2684 
2685 	if (!ice_is_xdp_ena_vsi(vsi) && prog) {
2686 		vsi->num_xdp_txq = vsi->alloc_rxq;
2687 		xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
2688 		if (xdp_ring_err)
2689 			NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
2690 	} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
2691 		xdp_ring_err = ice_destroy_xdp_rings(vsi);
2692 		if (xdp_ring_err)
2693 			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
2694 	} else {
2695 		/* safe to call even when prog == vsi->xdp_prog as
2696 		 * dev_xdp_install in net/core/dev.c incremented prog's
2697 		 * refcount so corresponding bpf_prog_put won't cause
2698 		 * underflow
2699 		 */
2700 		ice_vsi_assign_bpf_prog(vsi, prog);
2701 	}
2702 
2703 	if (if_running)
2704 		ret = ice_up(vsi);
2705 
2706 	if (!ret && prog)
2707 		ice_vsi_rx_napi_schedule(vsi);
2708 
2709 	return (ret || xdp_ring_err) ? -ENOMEM : 0;
2710 }
2711 
2712 /**
2713  * ice_xdp_safe_mode - XDP handler for safe mode
2714  * @dev: netdevice
2715  * @xdp: XDP command
2716  */
ice_xdp_safe_mode(struct net_device __always_unused * dev,struct netdev_bpf * xdp)2717 static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
2718 			     struct netdev_bpf *xdp)
2719 {
2720 	NL_SET_ERR_MSG_MOD(xdp->extack,
2721 			   "Please provide working DDP firmware package in order to use XDP\n"
2722 			   "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
2723 	return -EOPNOTSUPP;
2724 }
2725 
2726 /**
2727  * ice_xdp - implements XDP handler
2728  * @dev: netdevice
2729  * @xdp: XDP command
2730  */
ice_xdp(struct net_device * dev,struct netdev_bpf * xdp)2731 static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2732 {
2733 	struct ice_netdev_priv *np = netdev_priv(dev);
2734 	struct ice_vsi *vsi = np->vsi;
2735 
2736 	if (vsi->type != ICE_VSI_PF) {
2737 		NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI");
2738 		return -EINVAL;
2739 	}
2740 
2741 	switch (xdp->command) {
2742 	case XDP_SETUP_PROG:
2743 		return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
2744 	case XDP_SETUP_XSK_POOL:
2745 		return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
2746 					  xdp->xsk.queue_id);
2747 	default:
2748 		return -EINVAL;
2749 	}
2750 }
2751 
2752 /**
2753  * ice_ena_misc_vector - enable the non-queue interrupts
2754  * @pf: board private structure
2755  */
ice_ena_misc_vector(struct ice_pf * pf)2756 static void ice_ena_misc_vector(struct ice_pf *pf)
2757 {
2758 	struct ice_hw *hw = &pf->hw;
2759 	u32 val;
2760 
2761 	/* Disable anti-spoof detection interrupt to prevent spurious event
2762 	 * interrupts during a function reset. Anti-spoof functionally is
2763 	 * still supported.
2764 	 */
2765 	val = rd32(hw, GL_MDCK_TX_TDPU);
2766 	val |= GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M;
2767 	wr32(hw, GL_MDCK_TX_TDPU, val);
2768 
2769 	/* clear things first */
2770 	wr32(hw, PFINT_OICR_ENA, 0);	/* disable all */
2771 	rd32(hw, PFINT_OICR);		/* read to clear */
2772 
2773 	val = (PFINT_OICR_ECC_ERR_M |
2774 	       PFINT_OICR_MAL_DETECT_M |
2775 	       PFINT_OICR_GRST_M |
2776 	       PFINT_OICR_PCI_EXCEPTION_M |
2777 	       PFINT_OICR_VFLR_M |
2778 	       PFINT_OICR_HMC_ERR_M |
2779 	       PFINT_OICR_PE_PUSH_M |
2780 	       PFINT_OICR_PE_CRITERR_M);
2781 
2782 	wr32(hw, PFINT_OICR_ENA, val);
2783 
2784 	/* SW_ITR_IDX = 0, but don't change INTENA */
2785 	wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
2786 	     GLINT_DYN_CTL_SW_ITR_INDX_M | GLINT_DYN_CTL_INTENA_MSK_M);
2787 }
2788 
2789 /**
2790  * ice_misc_intr - misc interrupt handler
2791  * @irq: interrupt number
2792  * @data: pointer to a q_vector
2793  */
ice_misc_intr(int __always_unused irq,void * data)2794 static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
2795 {
2796 	struct ice_pf *pf = (struct ice_pf *)data;
2797 	struct ice_hw *hw = &pf->hw;
2798 	irqreturn_t ret = IRQ_NONE;
2799 	struct device *dev;
2800 	u32 oicr, ena_mask;
2801 
2802 	dev = ice_pf_to_dev(pf);
2803 	set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
2804 	set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
2805 	set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
2806 
2807 	oicr = rd32(hw, PFINT_OICR);
2808 	ena_mask = rd32(hw, PFINT_OICR_ENA);
2809 
2810 	if (oicr & PFINT_OICR_SWINT_M) {
2811 		ena_mask &= ~PFINT_OICR_SWINT_M;
2812 		pf->sw_int_count++;
2813 	}
2814 
2815 	if (oicr & PFINT_OICR_MAL_DETECT_M) {
2816 		ena_mask &= ~PFINT_OICR_MAL_DETECT_M;
2817 		set_bit(ICE_MDD_EVENT_PENDING, pf->state);
2818 	}
2819 	if (oicr & PFINT_OICR_VFLR_M) {
2820 		/* disable any further VFLR event notifications */
2821 		if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
2822 			u32 reg = rd32(hw, PFINT_OICR_ENA);
2823 
2824 			reg &= ~PFINT_OICR_VFLR_M;
2825 			wr32(hw, PFINT_OICR_ENA, reg);
2826 		} else {
2827 			ena_mask &= ~PFINT_OICR_VFLR_M;
2828 			set_bit(ICE_VFLR_EVENT_PENDING, pf->state);
2829 		}
2830 	}
2831 
2832 	if (oicr & PFINT_OICR_GRST_M) {
2833 		u32 reset;
2834 
2835 		/* we have a reset warning */
2836 		ena_mask &= ~PFINT_OICR_GRST_M;
2837 		reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
2838 			GLGEN_RSTAT_RESET_TYPE_S;
2839 
2840 		if (reset == ICE_RESET_CORER)
2841 			pf->corer_count++;
2842 		else if (reset == ICE_RESET_GLOBR)
2843 			pf->globr_count++;
2844 		else if (reset == ICE_RESET_EMPR)
2845 			pf->empr_count++;
2846 		else
2847 			dev_dbg(dev, "Invalid reset type %d\n", reset);
2848 
2849 		/* If a reset cycle isn't already in progress, we set a bit in
2850 		 * pf->state so that the service task can start a reset/rebuild.
2851 		 */
2852 		if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
2853 			if (reset == ICE_RESET_CORER)
2854 				set_bit(ICE_CORER_RECV, pf->state);
2855 			else if (reset == ICE_RESET_GLOBR)
2856 				set_bit(ICE_GLOBR_RECV, pf->state);
2857 			else
2858 				set_bit(ICE_EMPR_RECV, pf->state);
2859 
2860 			/* There are couple of different bits at play here.
2861 			 * hw->reset_ongoing indicates whether the hardware is
2862 			 * in reset. This is set to true when a reset interrupt
2863 			 * is received and set back to false after the driver
2864 			 * has determined that the hardware is out of reset.
2865 			 *
2866 			 * ICE_RESET_OICR_RECV in pf->state indicates
2867 			 * that a post reset rebuild is required before the
2868 			 * driver is operational again. This is set above.
2869 			 *
2870 			 * As this is the start of the reset/rebuild cycle, set
2871 			 * both to indicate that.
2872 			 */
2873 			hw->reset_ongoing = true;
2874 		}
2875 	}
2876 
2877 	if (oicr & PFINT_OICR_TSYN_TX_M) {
2878 		ena_mask &= ~PFINT_OICR_TSYN_TX_M;
2879 		ice_ptp_process_ts(pf);
2880 	}
2881 
2882 	if (oicr & PFINT_OICR_TSYN_EVNT_M) {
2883 		u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
2884 		u32 gltsyn_stat = rd32(hw, GLTSYN_STAT(tmr_idx));
2885 
2886 		/* Save EVENTs from GTSYN register */
2887 		pf->ptp.ext_ts_irq |= gltsyn_stat & (GLTSYN_STAT_EVENT0_M |
2888 						     GLTSYN_STAT_EVENT1_M |
2889 						     GLTSYN_STAT_EVENT2_M);
2890 		ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
2891 		kthread_queue_work(pf->ptp.kworker, &pf->ptp.extts_work);
2892 	}
2893 
2894 #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
2895 	if (oicr & ICE_AUX_CRIT_ERR) {
2896 		pf->oicr_err_reg |= oicr;
2897 		set_bit(ICE_AUX_ERR_PENDING, pf->state);
2898 		ena_mask &= ~ICE_AUX_CRIT_ERR;
2899 	}
2900 
2901 	/* Report any remaining unexpected interrupts */
2902 	oicr &= ena_mask;
2903 	if (oicr) {
2904 		dev_dbg(dev, "unhandled interrupt oicr=0x%08x\n", oicr);
2905 		/* If a critical error is pending there is no choice but to
2906 		 * reset the device.
2907 		 */
2908 		if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
2909 			    PFINT_OICR_ECC_ERR_M)) {
2910 			set_bit(ICE_PFR_REQ, pf->state);
2911 			ice_service_task_schedule(pf);
2912 		}
2913 	}
2914 	ret = IRQ_HANDLED;
2915 
2916 	ice_service_task_schedule(pf);
2917 	ice_irq_dynamic_ena(hw, NULL, NULL);
2918 
2919 	return ret;
2920 }
2921 
2922 /**
2923  * ice_dis_ctrlq_interrupts - disable control queue interrupts
2924  * @hw: pointer to HW structure
2925  */
ice_dis_ctrlq_interrupts(struct ice_hw * hw)2926 static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
2927 {
2928 	/* disable Admin queue Interrupt causes */
2929 	wr32(hw, PFINT_FW_CTL,
2930 	     rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M);
2931 
2932 	/* disable Mailbox queue Interrupt causes */
2933 	wr32(hw, PFINT_MBX_CTL,
2934 	     rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
2935 
2936 	wr32(hw, PFINT_SB_CTL,
2937 	     rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
2938 
2939 	/* disable Control queue Interrupt causes */
2940 	wr32(hw, PFINT_OICR_CTL,
2941 	     rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
2942 
2943 	ice_flush(hw);
2944 }
2945 
2946 /**
2947  * ice_free_irq_msix_misc - Unroll misc vector setup
2948  * @pf: board private structure
2949  */
ice_free_irq_msix_misc(struct ice_pf * pf)2950 static void ice_free_irq_msix_misc(struct ice_pf *pf)
2951 {
2952 	struct ice_hw *hw = &pf->hw;
2953 
2954 	ice_dis_ctrlq_interrupts(hw);
2955 
2956 	/* disable OICR interrupt */
2957 	wr32(hw, PFINT_OICR_ENA, 0);
2958 	ice_flush(hw);
2959 
2960 	if (pf->msix_entries) {
2961 		synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
2962 		devm_free_irq(ice_pf_to_dev(pf),
2963 			      pf->msix_entries[pf->oicr_idx].vector, pf);
2964 	}
2965 
2966 	pf->num_avail_sw_msix += 1;
2967 	ice_free_res(pf->irq_tracker, pf->oicr_idx, ICE_RES_MISC_VEC_ID);
2968 }
2969 
2970 /**
2971  * ice_ena_ctrlq_interrupts - enable control queue interrupts
2972  * @hw: pointer to HW structure
2973  * @reg_idx: HW vector index to associate the control queue interrupts with
2974  */
ice_ena_ctrlq_interrupts(struct ice_hw * hw,u16 reg_idx)2975 static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
2976 {
2977 	u32 val;
2978 
2979 	val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
2980 	       PFINT_OICR_CTL_CAUSE_ENA_M);
2981 	wr32(hw, PFINT_OICR_CTL, val);
2982 
2983 	/* enable Admin queue Interrupt causes */
2984 	val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
2985 	       PFINT_FW_CTL_CAUSE_ENA_M);
2986 	wr32(hw, PFINT_FW_CTL, val);
2987 
2988 	/* enable Mailbox queue Interrupt causes */
2989 	val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
2990 	       PFINT_MBX_CTL_CAUSE_ENA_M);
2991 	wr32(hw, PFINT_MBX_CTL, val);
2992 
2993 	/* This enables Sideband queue Interrupt causes */
2994 	val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
2995 	       PFINT_SB_CTL_CAUSE_ENA_M);
2996 	wr32(hw, PFINT_SB_CTL, val);
2997 
2998 	ice_flush(hw);
2999 }
3000 
3001 /**
3002  * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3003  * @pf: board private structure
3004  *
3005  * This sets up the handler for MSIX 0, which is used to manage the
3006  * non-queue interrupts, e.g. AdminQ and errors. This is not used
3007  * when in MSI or Legacy interrupt mode.
3008  */
ice_req_irq_msix_misc(struct ice_pf * pf)3009 static int ice_req_irq_msix_misc(struct ice_pf *pf)
3010 {
3011 	struct device *dev = ice_pf_to_dev(pf);
3012 	struct ice_hw *hw = &pf->hw;
3013 	int oicr_idx, err = 0;
3014 
3015 	if (!pf->int_name[0])
3016 		snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc",
3017 			 dev_driver_string(dev), dev_name(dev));
3018 
3019 	/* Do not request IRQ but do enable OICR interrupt since settings are
3020 	 * lost during reset. Note that this function is called only during
3021 	 * rebuild path and not while reset is in progress.
3022 	 */
3023 	if (ice_is_reset_in_progress(pf->state))
3024 		goto skip_req_irq;
3025 
3026 	/* reserve one vector in irq_tracker for misc interrupts */
3027 	oicr_idx = ice_get_res(pf, pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3028 	if (oicr_idx < 0)
3029 		return oicr_idx;
3030 
3031 	pf->num_avail_sw_msix -= 1;
3032 	pf->oicr_idx = (u16)oicr_idx;
3033 
3034 	err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
3035 			       ice_misc_intr, 0, pf->int_name, pf);
3036 	if (err) {
3037 		dev_err(dev, "devm_request_irq for %s failed: %d\n",
3038 			pf->int_name, err);
3039 		ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
3040 		pf->num_avail_sw_msix += 1;
3041 		return err;
3042 	}
3043 
3044 skip_req_irq:
3045 	ice_ena_misc_vector(pf);
3046 
3047 	ice_ena_ctrlq_interrupts(hw, pf->oicr_idx);
3048 	wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_idx),
3049 	     ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S);
3050 
3051 	ice_flush(hw);
3052 	ice_irq_dynamic_ena(hw, NULL, NULL);
3053 
3054 	return 0;
3055 }
3056 
3057 /**
3058  * ice_napi_add - register NAPI handler for the VSI
3059  * @vsi: VSI for which NAPI handler is to be registered
3060  *
3061  * This function is only called in the driver's load path. Registering the NAPI
3062  * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
3063  * reset/rebuild, etc.)
3064  */
ice_napi_add(struct ice_vsi * vsi)3065 static void ice_napi_add(struct ice_vsi *vsi)
3066 {
3067 	int v_idx;
3068 
3069 	if (!vsi->netdev)
3070 		return;
3071 
3072 	ice_for_each_q_vector(vsi, v_idx)
3073 		netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
3074 			       ice_napi_poll, NAPI_POLL_WEIGHT);
3075 }
3076 
3077 /**
3078  * ice_set_ops - set netdev and ethtools ops for the given netdev
3079  * @netdev: netdev instance
3080  */
ice_set_ops(struct net_device * netdev)3081 static void ice_set_ops(struct net_device *netdev)
3082 {
3083 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3084 
3085 	if (ice_is_safe_mode(pf)) {
3086 		netdev->netdev_ops = &ice_netdev_safe_mode_ops;
3087 		ice_set_ethtool_safe_mode_ops(netdev);
3088 		return;
3089 	}
3090 
3091 	netdev->netdev_ops = &ice_netdev_ops;
3092 	netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
3093 	ice_set_ethtool_ops(netdev);
3094 }
3095 
3096 /**
3097  * ice_set_netdev_features - set features for the given netdev
3098  * @netdev: netdev instance
3099  */
ice_set_netdev_features(struct net_device * netdev)3100 static void ice_set_netdev_features(struct net_device *netdev)
3101 {
3102 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3103 	netdev_features_t csumo_features;
3104 	netdev_features_t vlano_features;
3105 	netdev_features_t dflt_features;
3106 	netdev_features_t tso_features;
3107 
3108 	if (ice_is_safe_mode(pf)) {
3109 		/* safe mode */
3110 		netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
3111 		netdev->hw_features = netdev->features;
3112 		return;
3113 	}
3114 
3115 	dflt_features = NETIF_F_SG	|
3116 			NETIF_F_HIGHDMA	|
3117 			NETIF_F_NTUPLE	|
3118 			NETIF_F_RXHASH;
3119 
3120 	csumo_features = NETIF_F_RXCSUM	  |
3121 			 NETIF_F_IP_CSUM  |
3122 			 NETIF_F_SCTP_CRC |
3123 			 NETIF_F_IPV6_CSUM;
3124 
3125 	vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER |
3126 			 NETIF_F_HW_VLAN_CTAG_TX     |
3127 			 NETIF_F_HW_VLAN_CTAG_RX;
3128 
3129 	tso_features = NETIF_F_TSO			|
3130 		       NETIF_F_TSO_ECN			|
3131 		       NETIF_F_TSO6			|
3132 		       NETIF_F_GSO_GRE			|
3133 		       NETIF_F_GSO_UDP_TUNNEL		|
3134 		       NETIF_F_GSO_GRE_CSUM		|
3135 		       NETIF_F_GSO_UDP_TUNNEL_CSUM	|
3136 		       NETIF_F_GSO_PARTIAL		|
3137 		       NETIF_F_GSO_IPXIP4		|
3138 		       NETIF_F_GSO_IPXIP6		|
3139 		       NETIF_F_GSO_UDP_L4;
3140 
3141 	netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3142 					NETIF_F_GSO_GRE_CSUM;
3143 	/* set features that user can change */
3144 	netdev->hw_features = dflt_features | csumo_features |
3145 			      vlano_features | tso_features;
3146 
3147 	/* add support for HW_CSUM on packets with MPLS header */
3148 	netdev->mpls_features =  NETIF_F_HW_CSUM;
3149 
3150 	/* enable features */
3151 	netdev->features |= netdev->hw_features;
3152 	/* encap and VLAN devices inherit default, csumo and tso features */
3153 	netdev->hw_enc_features |= dflt_features | csumo_features |
3154 				   tso_features;
3155 	netdev->vlan_features |= dflt_features | csumo_features |
3156 				 tso_features;
3157 }
3158 
3159 /**
3160  * ice_cfg_netdev - Allocate, configure and register a netdev
3161  * @vsi: the VSI associated with the new netdev
3162  *
3163  * Returns 0 on success, negative value on failure
3164  */
ice_cfg_netdev(struct ice_vsi * vsi)3165 static int ice_cfg_netdev(struct ice_vsi *vsi)
3166 {
3167 	struct ice_netdev_priv *np;
3168 	struct net_device *netdev;
3169 	u8 mac_addr[ETH_ALEN];
3170 
3171 	netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
3172 				    vsi->alloc_rxq);
3173 	if (!netdev)
3174 		return -ENOMEM;
3175 
3176 	set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3177 	vsi->netdev = netdev;
3178 	np = netdev_priv(netdev);
3179 	np->vsi = vsi;
3180 
3181 	ice_set_netdev_features(netdev);
3182 
3183 	ice_set_ops(netdev);
3184 
3185 	if (vsi->type == ICE_VSI_PF) {
3186 		SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
3187 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
3188 		eth_hw_addr_set(netdev, mac_addr);
3189 		ether_addr_copy(netdev->perm_addr, mac_addr);
3190 	}
3191 
3192 	netdev->priv_flags |= IFF_UNICAST_FLT;
3193 
3194 	/* Setup netdev TC information */
3195 	ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
3196 
3197 	/* setup watchdog timeout value to be 5 second */
3198 	netdev->watchdog_timeo = 5 * HZ;
3199 
3200 	netdev->min_mtu = ETH_MIN_MTU;
3201 	netdev->max_mtu = ICE_MAX_MTU;
3202 
3203 	return 0;
3204 }
3205 
3206 /**
3207  * ice_fill_rss_lut - Fill the RSS lookup table with default values
3208  * @lut: Lookup table
3209  * @rss_table_size: Lookup table size
3210  * @rss_size: Range of queue number for hashing
3211  */
ice_fill_rss_lut(u8 * lut,u16 rss_table_size,u16 rss_size)3212 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
3213 {
3214 	u16 i;
3215 
3216 	for (i = 0; i < rss_table_size; i++)
3217 		lut[i] = i % rss_size;
3218 }
3219 
3220 /**
3221  * ice_pf_vsi_setup - Set up a PF VSI
3222  * @pf: board private structure
3223  * @pi: pointer to the port_info instance
3224  *
3225  * Returns pointer to the successfully allocated VSI software struct
3226  * on success, otherwise returns NULL on failure.
3227  */
3228 static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3229 ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3230 {
3231 	return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
3232 }
3233 
3234 /**
3235  * ice_ctrl_vsi_setup - Set up a control VSI
3236  * @pf: board private structure
3237  * @pi: pointer to the port_info instance
3238  *
3239  * Returns pointer to the successfully allocated VSI software struct
3240  * on success, otherwise returns NULL on failure.
3241  */
3242 static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3243 ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3244 {
3245 	return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
3246 }
3247 
3248 /**
3249  * ice_lb_vsi_setup - Set up a loopback VSI
3250  * @pf: board private structure
3251  * @pi: pointer to the port_info instance
3252  *
3253  * Returns pointer to the successfully allocated VSI software struct
3254  * on success, otherwise returns NULL on failure.
3255  */
3256 struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf * pf,struct ice_port_info * pi)3257 ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
3258 {
3259 	return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
3260 }
3261 
3262 /**
3263  * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3264  * @netdev: network interface to be adjusted
3265  * @proto: unused protocol
3266  * @vid: VLAN ID to be added
3267  *
3268  * net_device_ops implementation for adding VLAN IDs
3269  */
3270 static int
ice_vlan_rx_add_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)3271 ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
3272 		    u16 vid)
3273 {
3274 	struct ice_netdev_priv *np = netdev_priv(netdev);
3275 	struct ice_vsi *vsi = np->vsi;
3276 	int ret;
3277 
3278 	/* VLAN 0 is added by default during load/reset */
3279 	if (!vid)
3280 		return 0;
3281 
3282 	/* Enable VLAN pruning when a VLAN other than 0 is added */
3283 	if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
3284 		ret = ice_cfg_vlan_pruning(vsi, true, false);
3285 		if (ret)
3286 			return ret;
3287 	}
3288 
3289 	/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
3290 	 * packets aren't pruned by the device's internal switch on Rx
3291 	 */
3292 	ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3293 	if (!ret)
3294 		set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
3295 
3296 	return ret;
3297 }
3298 
3299 /**
3300  * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3301  * @netdev: network interface to be adjusted
3302  * @proto: unused protocol
3303  * @vid: VLAN ID to be removed
3304  *
3305  * net_device_ops implementation for removing VLAN IDs
3306  */
3307 static int
ice_vlan_rx_kill_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)3308 ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
3309 		     u16 vid)
3310 {
3311 	struct ice_netdev_priv *np = netdev_priv(netdev);
3312 	struct ice_vsi *vsi = np->vsi;
3313 	int ret;
3314 
3315 	/* don't allow removal of VLAN 0 */
3316 	if (!vid)
3317 		return 0;
3318 
3319 	/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
3320 	 * information
3321 	 */
3322 	ret = ice_vsi_kill_vlan(vsi, vid);
3323 	if (ret)
3324 		return ret;
3325 
3326 	/* Disable pruning when VLAN 0 is the only VLAN rule */
3327 	if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
3328 		ret = ice_cfg_vlan_pruning(vsi, false, false);
3329 
3330 	set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
3331 	return ret;
3332 }
3333 
3334 /**
3335  * ice_setup_pf_sw - Setup the HW switch on startup or after reset
3336  * @pf: board private structure
3337  *
3338  * Returns 0 on success, negative value on failure
3339  */
ice_setup_pf_sw(struct ice_pf * pf)3340 static int ice_setup_pf_sw(struct ice_pf *pf)
3341 {
3342 	struct ice_vsi *vsi;
3343 	int status = 0;
3344 
3345 	if (ice_is_reset_in_progress(pf->state))
3346 		return -EBUSY;
3347 
3348 	vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
3349 	if (!vsi)
3350 		return -ENOMEM;
3351 
3352 	status = ice_cfg_netdev(vsi);
3353 	if (status) {
3354 		status = -ENODEV;
3355 		goto unroll_vsi_setup;
3356 	}
3357 	/* netdev has to be configured before setting frame size */
3358 	ice_vsi_cfg_frame_size(vsi);
3359 
3360 	/* Setup DCB netlink interface */
3361 	ice_dcbnl_setup(vsi);
3362 
3363 	/* registering the NAPI handler requires both the queues and
3364 	 * netdev to be created, which are done in ice_pf_vsi_setup()
3365 	 * and ice_cfg_netdev() respectively
3366 	 */
3367 	ice_napi_add(vsi);
3368 
3369 	status = ice_set_cpu_rx_rmap(vsi);
3370 	if (status) {
3371 		dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n",
3372 			vsi->vsi_num, status);
3373 		status = -EINVAL;
3374 		goto unroll_napi_add;
3375 	}
3376 	status = ice_init_mac_fltr(pf);
3377 	if (status)
3378 		goto free_cpu_rx_map;
3379 
3380 	return status;
3381 
3382 free_cpu_rx_map:
3383 	ice_free_cpu_rx_rmap(vsi);
3384 
3385 unroll_napi_add:
3386 	if (vsi) {
3387 		ice_napi_del(vsi);
3388 		if (vsi->netdev) {
3389 			clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
3390 			free_netdev(vsi->netdev);
3391 			vsi->netdev = NULL;
3392 		}
3393 	}
3394 
3395 unroll_vsi_setup:
3396 	ice_vsi_release(vsi);
3397 	return status;
3398 }
3399 
3400 /**
3401  * ice_get_avail_q_count - Get count of queues in use
3402  * @pf_qmap: bitmap to get queue use count from
3403  * @lock: pointer to a mutex that protects access to pf_qmap
3404  * @size: size of the bitmap
3405  */
3406 static u16
ice_get_avail_q_count(unsigned long * pf_qmap,struct mutex * lock,u16 size)3407 ice_get_avail_q_count(unsigned long *pf_qmap, struct mutex *lock, u16 size)
3408 {
3409 	unsigned long bit;
3410 	u16 count = 0;
3411 
3412 	mutex_lock(lock);
3413 	for_each_clear_bit(bit, pf_qmap, size)
3414 		count++;
3415 	mutex_unlock(lock);
3416 
3417 	return count;
3418 }
3419 
3420 /**
3421  * ice_get_avail_txq_count - Get count of Tx queues in use
3422  * @pf: pointer to an ice_pf instance
3423  */
ice_get_avail_txq_count(struct ice_pf * pf)3424 u16 ice_get_avail_txq_count(struct ice_pf *pf)
3425 {
3426 	return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex,
3427 				     pf->max_pf_txqs);
3428 }
3429 
3430 /**
3431  * ice_get_avail_rxq_count - Get count of Rx queues in use
3432  * @pf: pointer to an ice_pf instance
3433  */
ice_get_avail_rxq_count(struct ice_pf * pf)3434 u16 ice_get_avail_rxq_count(struct ice_pf *pf)
3435 {
3436 	return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex,
3437 				     pf->max_pf_rxqs);
3438 }
3439 
3440 /**
3441  * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
3442  * @pf: board private structure to initialize
3443  */
ice_deinit_pf(struct ice_pf * pf)3444 static void ice_deinit_pf(struct ice_pf *pf)
3445 {
3446 	ice_service_task_stop(pf);
3447 	mutex_destroy(&pf->adev_mutex);
3448 	mutex_destroy(&pf->sw_mutex);
3449 	mutex_destroy(&pf->tc_mutex);
3450 	mutex_destroy(&pf->avail_q_mutex);
3451 
3452 	if (pf->avail_txqs) {
3453 		bitmap_free(pf->avail_txqs);
3454 		pf->avail_txqs = NULL;
3455 	}
3456 
3457 	if (pf->avail_rxqs) {
3458 		bitmap_free(pf->avail_rxqs);
3459 		pf->avail_rxqs = NULL;
3460 	}
3461 
3462 	if (pf->ptp.clock)
3463 		ptp_clock_unregister(pf->ptp.clock);
3464 }
3465 
3466 /**
3467  * ice_set_pf_caps - set PFs capability flags
3468  * @pf: pointer to the PF instance
3469  */
ice_set_pf_caps(struct ice_pf * pf)3470 static void ice_set_pf_caps(struct ice_pf *pf)
3471 {
3472 	struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
3473 
3474 	clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3475 	clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
3476 	if (func_caps->common_cap.rdma) {
3477 		set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3478 		set_bit(ICE_FLAG_AUX_ENA, pf->flags);
3479 	}
3480 	clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3481 	if (func_caps->common_cap.dcb)
3482 		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
3483 	clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3484 	if (func_caps->common_cap.sr_iov_1_1) {
3485 		set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
3486 		pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
3487 					      ICE_MAX_VF_COUNT);
3488 	}
3489 	clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
3490 	if (func_caps->common_cap.rss_table_size)
3491 		set_bit(ICE_FLAG_RSS_ENA, pf->flags);
3492 
3493 	clear_bit(ICE_FLAG_FD_ENA, pf->flags);
3494 	if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) {
3495 		u16 unused;
3496 
3497 		/* ctrl_vsi_idx will be set to a valid value when flow director
3498 		 * is setup by ice_init_fdir
3499 		 */
3500 		pf->ctrl_vsi_idx = ICE_NO_VSI;
3501 		set_bit(ICE_FLAG_FD_ENA, pf->flags);
3502 		/* force guaranteed filter pool for PF */
3503 		ice_alloc_fd_guar_item(&pf->hw, &unused,
3504 				       func_caps->fd_fltr_guar);
3505 		/* force shared filter pool for PF */
3506 		ice_alloc_fd_shrd_item(&pf->hw, &unused,
3507 				       func_caps->fd_fltr_best_effort);
3508 	}
3509 
3510 	clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3511 	if (func_caps->common_cap.ieee_1588)
3512 		set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
3513 
3514 	pf->max_pf_txqs = func_caps->common_cap.num_txq;
3515 	pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
3516 }
3517 
3518 /**
3519  * ice_init_pf - Initialize general software structures (struct ice_pf)
3520  * @pf: board private structure to initialize
3521  */
ice_init_pf(struct ice_pf * pf)3522 static int ice_init_pf(struct ice_pf *pf)
3523 {
3524 	ice_set_pf_caps(pf);
3525 
3526 	mutex_init(&pf->sw_mutex);
3527 	mutex_init(&pf->tc_mutex);
3528 	mutex_init(&pf->adev_mutex);
3529 
3530 	INIT_HLIST_HEAD(&pf->aq_wait_list);
3531 	spin_lock_init(&pf->aq_wait_lock);
3532 	init_waitqueue_head(&pf->aq_wait_queue);
3533 
3534 	init_waitqueue_head(&pf->reset_wait_queue);
3535 
3536 	/* setup service timer and periodic service task */
3537 	timer_setup(&pf->serv_tmr, ice_service_timer, 0);
3538 	pf->serv_tmr_period = HZ;
3539 	INIT_WORK(&pf->serv_task, ice_service_task);
3540 	clear_bit(ICE_SERVICE_SCHED, pf->state);
3541 
3542 	mutex_init(&pf->avail_q_mutex);
3543 	pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
3544 	if (!pf->avail_txqs)
3545 		return -ENOMEM;
3546 
3547 	pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
3548 	if (!pf->avail_rxqs) {
3549 		bitmap_free(pf->avail_txqs);
3550 		pf->avail_txqs = NULL;
3551 		return -ENOMEM;
3552 	}
3553 
3554 	return 0;
3555 }
3556 
3557 /**
3558  * ice_ena_msix_range - Request a range of MSIX vectors from the OS
3559  * @pf: board private structure
3560  *
3561  * compute the number of MSIX vectors required (v_budget) and request from
3562  * the OS. Return the number of vectors reserved or negative on failure
3563  */
ice_ena_msix_range(struct ice_pf * pf)3564 static int ice_ena_msix_range(struct ice_pf *pf)
3565 {
3566 	int num_cpus, v_left, v_actual, v_other, v_budget = 0;
3567 	struct device *dev = ice_pf_to_dev(pf);
3568 	int needed, err, i;
3569 
3570 	v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
3571 	num_cpus = num_online_cpus();
3572 
3573 	/* reserve for LAN miscellaneous handler */
3574 	needed = ICE_MIN_LAN_OICR_MSIX;
3575 	if (v_left < needed)
3576 		goto no_hw_vecs_left_err;
3577 	v_budget += needed;
3578 	v_left -= needed;
3579 
3580 	/* reserve for flow director */
3581 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
3582 		needed = ICE_FDIR_MSIX;
3583 		if (v_left < needed)
3584 			goto no_hw_vecs_left_err;
3585 		v_budget += needed;
3586 		v_left -= needed;
3587 	}
3588 
3589 	/* total used for non-traffic vectors */
3590 	v_other = v_budget;
3591 
3592 	/* reserve vectors for LAN traffic */
3593 	needed = num_cpus;
3594 	if (v_left < needed)
3595 		goto no_hw_vecs_left_err;
3596 	pf->num_lan_msix = needed;
3597 	v_budget += needed;
3598 	v_left -= needed;
3599 
3600 	/* reserve vectors for RDMA auxiliary driver */
3601 	if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3602 		needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
3603 		if (v_left < needed)
3604 			goto no_hw_vecs_left_err;
3605 		pf->num_rdma_msix = needed;
3606 		v_budget += needed;
3607 		v_left -= needed;
3608 	}
3609 
3610 	pf->msix_entries = devm_kcalloc(dev, v_budget,
3611 					sizeof(*pf->msix_entries), GFP_KERNEL);
3612 	if (!pf->msix_entries) {
3613 		err = -ENOMEM;
3614 		goto exit_err;
3615 	}
3616 
3617 	for (i = 0; i < v_budget; i++)
3618 		pf->msix_entries[i].entry = i;
3619 
3620 	/* actually reserve the vectors */
3621 	v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
3622 					 ICE_MIN_MSIX, v_budget);
3623 	if (v_actual < 0) {
3624 		dev_err(dev, "unable to reserve MSI-X vectors\n");
3625 		err = v_actual;
3626 		goto msix_err;
3627 	}
3628 
3629 	if (v_actual < v_budget) {
3630 		dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
3631 			 v_budget, v_actual);
3632 
3633 		if (v_actual < ICE_MIN_MSIX) {
3634 			/* error if we can't get minimum vectors */
3635 			pci_disable_msix(pf->pdev);
3636 			err = -ERANGE;
3637 			goto msix_err;
3638 		} else {
3639 			int v_remain = v_actual - v_other;
3640 			int v_rdma = 0, v_min_rdma = 0;
3641 
3642 			if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
3643 				/* Need at least 1 interrupt in addition to
3644 				 * AEQ MSIX
3645 				 */
3646 				v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
3647 				v_min_rdma = ICE_MIN_RDMA_MSIX;
3648 			}
3649 
3650 			if (v_actual == ICE_MIN_MSIX ||
3651 			    v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
3652 				dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
3653 				clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
3654 
3655 				pf->num_rdma_msix = 0;
3656 				pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
3657 			} else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
3658 				   (v_remain - v_rdma < v_rdma)) {
3659 				/* Support minimum RDMA and give remaining
3660 				 * vectors to LAN MSIX
3661 				 */
3662 				pf->num_rdma_msix = v_min_rdma;
3663 				pf->num_lan_msix = v_remain - v_min_rdma;
3664 			} else {
3665 				/* Split remaining MSIX with RDMA after
3666 				 * accounting for AEQ MSIX
3667 				 */
3668 				pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
3669 						    ICE_RDMA_NUM_AEQ_MSIX;
3670 				pf->num_lan_msix = v_remain - pf->num_rdma_msix;
3671 			}
3672 
3673 			dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
3674 				   pf->num_lan_msix);
3675 
3676 			if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
3677 				dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
3678 					   pf->num_rdma_msix);
3679 		}
3680 	}
3681 
3682 	return v_actual;
3683 
3684 msix_err:
3685 	devm_kfree(dev, pf->msix_entries);
3686 	goto exit_err;
3687 
3688 no_hw_vecs_left_err:
3689 	dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
3690 		needed, v_left);
3691 	err = -ERANGE;
3692 exit_err:
3693 	pf->num_rdma_msix = 0;
3694 	pf->num_lan_msix = 0;
3695 	return err;
3696 }
3697 
3698 /**
3699  * ice_dis_msix - Disable MSI-X interrupt setup in OS
3700  * @pf: board private structure
3701  */
ice_dis_msix(struct ice_pf * pf)3702 static void ice_dis_msix(struct ice_pf *pf)
3703 {
3704 	pci_disable_msix(pf->pdev);
3705 	devm_kfree(ice_pf_to_dev(pf), pf->msix_entries);
3706 	pf->msix_entries = NULL;
3707 }
3708 
3709 /**
3710  * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
3711  * @pf: board private structure
3712  */
ice_clear_interrupt_scheme(struct ice_pf * pf)3713 static void ice_clear_interrupt_scheme(struct ice_pf *pf)
3714 {
3715 	ice_dis_msix(pf);
3716 
3717 	if (pf->irq_tracker) {
3718 		devm_kfree(ice_pf_to_dev(pf), pf->irq_tracker);
3719 		pf->irq_tracker = NULL;
3720 	}
3721 }
3722 
3723 /**
3724  * ice_init_interrupt_scheme - Determine proper interrupt scheme
3725  * @pf: board private structure to initialize
3726  */
ice_init_interrupt_scheme(struct ice_pf * pf)3727 static int ice_init_interrupt_scheme(struct ice_pf *pf)
3728 {
3729 	int vectors;
3730 
3731 	vectors = ice_ena_msix_range(pf);
3732 
3733 	if (vectors < 0)
3734 		return vectors;
3735 
3736 	/* set up vector assignment tracking */
3737 	pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
3738 				       struct_size(pf->irq_tracker, list, vectors),
3739 				       GFP_KERNEL);
3740 	if (!pf->irq_tracker) {
3741 		ice_dis_msix(pf);
3742 		return -ENOMEM;
3743 	}
3744 
3745 	/* populate SW interrupts pool with number of OS granted IRQs. */
3746 	pf->num_avail_sw_msix = (u16)vectors;
3747 	pf->irq_tracker->num_entries = (u16)vectors;
3748 	pf->irq_tracker->end = pf->irq_tracker->num_entries;
3749 
3750 	return 0;
3751 }
3752 
3753 /**
3754  * ice_is_wol_supported - check if WoL is supported
3755  * @hw: pointer to hardware info
3756  *
3757  * Check if WoL is supported based on the HW configuration.
3758  * Returns true if NVM supports and enables WoL for this port, false otherwise
3759  */
ice_is_wol_supported(struct ice_hw * hw)3760 bool ice_is_wol_supported(struct ice_hw *hw)
3761 {
3762 	u16 wol_ctrl;
3763 
3764 	/* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
3765 	 * word) indicates WoL is not supported on the corresponding PF ID.
3766 	 */
3767 	if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
3768 		return false;
3769 
3770 	return !(BIT(hw->port_info->lport) & wol_ctrl);
3771 }
3772 
3773 /**
3774  * ice_vsi_recfg_qs - Change the number of queues on a VSI
3775  * @vsi: VSI being changed
3776  * @new_rx: new number of Rx queues
3777  * @new_tx: new number of Tx queues
3778  * @locked: is adev device_lock held
3779  *
3780  * Only change the number of queues if new_tx, or new_rx is non-0.
3781  *
3782  * Returns 0 on success.
3783  */
ice_vsi_recfg_qs(struct ice_vsi * vsi,int new_rx,int new_tx,bool locked)3784 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
3785 {
3786 	struct ice_pf *pf = vsi->back;
3787 	int err = 0, timeout = 50;
3788 
3789 	if (!new_rx && !new_tx)
3790 		return -EINVAL;
3791 
3792 	while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
3793 		timeout--;
3794 		if (!timeout)
3795 			return -EBUSY;
3796 		usleep_range(1000, 2000);
3797 	}
3798 
3799 	if (new_tx)
3800 		vsi->req_txq = (u16)new_tx;
3801 	if (new_rx)
3802 		vsi->req_rxq = (u16)new_rx;
3803 
3804 	/* set for the next time the netdev is started */
3805 	if (!netif_running(vsi->netdev)) {
3806 		ice_vsi_rebuild(vsi, false);
3807 		dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
3808 		goto done;
3809 	}
3810 
3811 	ice_vsi_close(vsi);
3812 	ice_vsi_rebuild(vsi, false);
3813 	ice_pf_dcb_recfg(pf, locked);
3814 	ice_vsi_open(vsi);
3815 done:
3816 	clear_bit(ICE_CFG_BUSY, pf->state);
3817 	return err;
3818 }
3819 
3820 /**
3821  * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
3822  * @pf: PF to configure
3823  *
3824  * No VLAN offloads/filtering are advertised in safe mode so make sure the PF
3825  * VSI can still Tx/Rx VLAN tagged packets.
3826  */
ice_set_safe_mode_vlan_cfg(struct ice_pf * pf)3827 static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
3828 {
3829 	struct ice_vsi *vsi = ice_get_main_vsi(pf);
3830 	struct ice_vsi_ctx *ctxt;
3831 	enum ice_status status;
3832 	struct ice_hw *hw;
3833 
3834 	if (!vsi)
3835 		return;
3836 
3837 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
3838 	if (!ctxt)
3839 		return;
3840 
3841 	hw = &pf->hw;
3842 	ctxt->info = vsi->info;
3843 
3844 	ctxt->info.valid_sections =
3845 		cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
3846 			    ICE_AQ_VSI_PROP_SECURITY_VALID |
3847 			    ICE_AQ_VSI_PROP_SW_VALID);
3848 
3849 	/* disable VLAN anti-spoof */
3850 	ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
3851 				  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
3852 
3853 	/* disable VLAN pruning and keep all other settings */
3854 	ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
3855 
3856 	/* allow all VLANs on Tx and don't strip on Rx */
3857 	ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
3858 		ICE_AQ_VSI_VLAN_EMOD_NOTHING;
3859 
3860 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
3861 	if (status) {
3862 		dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
3863 			ice_stat_str(status),
3864 			ice_aq_str(hw->adminq.sq_last_status));
3865 	} else {
3866 		vsi->info.sec_flags = ctxt->info.sec_flags;
3867 		vsi->info.sw_flags2 = ctxt->info.sw_flags2;
3868 		vsi->info.vlan_flags = ctxt->info.vlan_flags;
3869 	}
3870 
3871 	kfree(ctxt);
3872 }
3873 
3874 /**
3875  * ice_log_pkg_init - log result of DDP package load
3876  * @hw: pointer to hardware info
3877  * @status: status of package load
3878  */
3879 static void
ice_log_pkg_init(struct ice_hw * hw,enum ice_status * status)3880 ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
3881 {
3882 	struct ice_pf *pf = (struct ice_pf *)hw->back;
3883 	struct device *dev = ice_pf_to_dev(pf);
3884 
3885 	switch (*status) {
3886 	case ICE_SUCCESS:
3887 		/* The package download AdminQ command returned success because
3888 		 * this download succeeded or ICE_ERR_AQ_NO_WORK since there is
3889 		 * already a package loaded on the device.
3890 		 */
3891 		if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
3892 		    hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
3893 		    hw->pkg_ver.update == hw->active_pkg_ver.update &&
3894 		    hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
3895 		    !memcmp(hw->pkg_name, hw->active_pkg_name,
3896 			    sizeof(hw->pkg_name))) {
3897 			if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
3898 				dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
3899 					 hw->active_pkg_name,
3900 					 hw->active_pkg_ver.major,
3901 					 hw->active_pkg_ver.minor,
3902 					 hw->active_pkg_ver.update,
3903 					 hw->active_pkg_ver.draft);
3904 			else
3905 				dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
3906 					 hw->active_pkg_name,
3907 					 hw->active_pkg_ver.major,
3908 					 hw->active_pkg_ver.minor,
3909 					 hw->active_pkg_ver.update,
3910 					 hw->active_pkg_ver.draft);
3911 		} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
3912 			   hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
3913 			dev_err(dev, "The device has a DDP package that is not supported by the driver.  The device has package '%s' version %d.%d.x.x.  The driver requires version %d.%d.x.x.  Entering Safe Mode.\n",
3914 				hw->active_pkg_name,
3915 				hw->active_pkg_ver.major,
3916 				hw->active_pkg_ver.minor,
3917 				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3918 			*status = ICE_ERR_NOT_SUPPORTED;
3919 		} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3920 			   hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
3921 			dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device.  The device has package '%s' version %d.%d.%d.%d.  The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
3922 				 hw->active_pkg_name,
3923 				 hw->active_pkg_ver.major,
3924 				 hw->active_pkg_ver.minor,
3925 				 hw->active_pkg_ver.update,
3926 				 hw->active_pkg_ver.draft,
3927 				 hw->pkg_name,
3928 				 hw->pkg_ver.major,
3929 				 hw->pkg_ver.minor,
3930 				 hw->pkg_ver.update,
3931 				 hw->pkg_ver.draft);
3932 		} else {
3933 			dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system.  If the problem persists, update the NVM.  Entering Safe Mode.\n");
3934 			*status = ICE_ERR_NOT_SUPPORTED;
3935 		}
3936 		break;
3937 	case ICE_ERR_FW_DDP_MISMATCH:
3938 		dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package.  Please update the device's NVM.  Entering safe mode.\n");
3939 		break;
3940 	case ICE_ERR_BUF_TOO_SHORT:
3941 	case ICE_ERR_CFG:
3942 		dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
3943 		break;
3944 	case ICE_ERR_NOT_SUPPORTED:
3945 		/* Package File version not supported */
3946 		if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
3947 		    (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3948 		     hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
3949 			dev_err(dev, "The DDP package file version is higher than the driver supports.  Please use an updated driver.  Entering Safe Mode.\n");
3950 		else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
3951 			 (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
3952 			  hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
3953 			dev_err(dev, "The DDP package file version is lower than the driver supports.  The driver requires version %d.%d.x.x.  Please use an updated DDP Package file.  Entering Safe Mode.\n",
3954 				ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
3955 		break;
3956 	case ICE_ERR_AQ_ERROR:
3957 		switch (hw->pkg_dwnld_status) {
3958 		case ICE_AQ_RC_ENOSEC:
3959 		case ICE_AQ_RC_EBADSIG:
3960 			dev_err(dev, "The DDP package could not be loaded because its signature is not valid.  Please use a valid DDP Package.  Entering Safe Mode.\n");
3961 			return;
3962 		case ICE_AQ_RC_ESVN:
3963 			dev_err(dev, "The DDP Package could not be loaded because its security revision is too low.  Please use an updated DDP Package.  Entering Safe Mode.\n");
3964 			return;
3965 		case ICE_AQ_RC_EBADMAN:
3966 		case ICE_AQ_RC_EBADBUF:
3967 			dev_err(dev, "An error occurred on the device while loading the DDP package.  The device will be reset.\n");
3968 			/* poll for reset to complete */
3969 			if (ice_check_reset(hw))
3970 				dev_err(dev, "Error resetting device. Please reload the driver\n");
3971 			return;
3972 		default:
3973 			break;
3974 		}
3975 		fallthrough;
3976 	default:
3977 		dev_err(dev, "An unknown error (%d) occurred when loading the DDP package.  Entering Safe Mode.\n",
3978 			*status);
3979 		break;
3980 	}
3981 }
3982 
3983 /**
3984  * ice_load_pkg - load/reload the DDP Package file
3985  * @firmware: firmware structure when firmware requested or NULL for reload
3986  * @pf: pointer to the PF instance
3987  *
3988  * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
3989  * initialize HW tables.
3990  */
3991 static void
ice_load_pkg(const struct firmware * firmware,struct ice_pf * pf)3992 ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
3993 {
3994 	enum ice_status status = ICE_ERR_PARAM;
3995 	struct device *dev = ice_pf_to_dev(pf);
3996 	struct ice_hw *hw = &pf->hw;
3997 
3998 	/* Load DDP Package */
3999 	if (firmware && !hw->pkg_copy) {
4000 		status = ice_copy_and_init_pkg(hw, firmware->data,
4001 					       firmware->size);
4002 		ice_log_pkg_init(hw, &status);
4003 	} else if (!firmware && hw->pkg_copy) {
4004 		/* Reload package during rebuild after CORER/GLOBR reset */
4005 		status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
4006 		ice_log_pkg_init(hw, &status);
4007 	} else {
4008 		dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
4009 	}
4010 
4011 	if (status) {
4012 		/* Safe Mode */
4013 		clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4014 		return;
4015 	}
4016 
4017 	/* Successful download package is the precondition for advanced
4018 	 * features, hence setting the ICE_FLAG_ADV_FEATURES flag
4019 	 */
4020 	set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
4021 }
4022 
4023 /**
4024  * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4025  * @pf: pointer to the PF structure
4026  *
4027  * There is no error returned here because the driver should be able to handle
4028  * 128 Byte cache lines, so we only print a warning in case issues are seen,
4029  * specifically with Tx.
4030  */
ice_verify_cacheline_size(struct ice_pf * pf)4031 static void ice_verify_cacheline_size(struct ice_pf *pf)
4032 {
4033 	if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
4034 		dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
4035 			 ICE_CACHE_LINE_BYTES);
4036 }
4037 
4038 /**
4039  * ice_send_version - update firmware with driver version
4040  * @pf: PF struct
4041  *
4042  * Returns ICE_SUCCESS on success, else error code
4043  */
ice_send_version(struct ice_pf * pf)4044 static enum ice_status ice_send_version(struct ice_pf *pf)
4045 {
4046 	struct ice_driver_ver dv;
4047 
4048 	dv.major_ver = 0xff;
4049 	dv.minor_ver = 0xff;
4050 	dv.build_ver = 0xff;
4051 	dv.subbuild_ver = 0;
4052 	strscpy((char *)dv.driver_string, UTS_RELEASE,
4053 		sizeof(dv.driver_string));
4054 	return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
4055 }
4056 
4057 /**
4058  * ice_init_fdir - Initialize flow director VSI and configuration
4059  * @pf: pointer to the PF instance
4060  *
4061  * returns 0 on success, negative on error
4062  */
ice_init_fdir(struct ice_pf * pf)4063 static int ice_init_fdir(struct ice_pf *pf)
4064 {
4065 	struct device *dev = ice_pf_to_dev(pf);
4066 	struct ice_vsi *ctrl_vsi;
4067 	int err;
4068 
4069 	/* Side Band Flow Director needs to have a control VSI.
4070 	 * Allocate it and store it in the PF.
4071 	 */
4072 	ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info);
4073 	if (!ctrl_vsi) {
4074 		dev_dbg(dev, "could not create control VSI\n");
4075 		return -ENOMEM;
4076 	}
4077 
4078 	err = ice_vsi_open_ctrl(ctrl_vsi);
4079 	if (err) {
4080 		dev_dbg(dev, "could not open control VSI\n");
4081 		goto err_vsi_open;
4082 	}
4083 
4084 	mutex_init(&pf->hw.fdir_fltr_lock);
4085 
4086 	err = ice_fdir_create_dflt_rules(pf);
4087 	if (err)
4088 		goto err_fdir_rule;
4089 
4090 	return 0;
4091 
4092 err_fdir_rule:
4093 	ice_fdir_release_flows(&pf->hw);
4094 	ice_vsi_close(ctrl_vsi);
4095 err_vsi_open:
4096 	ice_vsi_release(ctrl_vsi);
4097 	if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
4098 		pf->vsi[pf->ctrl_vsi_idx] = NULL;
4099 		pf->ctrl_vsi_idx = ICE_NO_VSI;
4100 	}
4101 	return err;
4102 }
4103 
4104 /**
4105  * ice_get_opt_fw_name - return optional firmware file name or NULL
4106  * @pf: pointer to the PF instance
4107  */
ice_get_opt_fw_name(struct ice_pf * pf)4108 static char *ice_get_opt_fw_name(struct ice_pf *pf)
4109 {
4110 	/* Optional firmware name same as default with additional dash
4111 	 * followed by a EUI-64 identifier (PCIe Device Serial Number)
4112 	 */
4113 	struct pci_dev *pdev = pf->pdev;
4114 	char *opt_fw_filename;
4115 	u64 dsn;
4116 
4117 	/* Determine the name of the optional file using the DSN (two
4118 	 * dwords following the start of the DSN Capability).
4119 	 */
4120 	dsn = pci_get_dsn(pdev);
4121 	if (!dsn)
4122 		return NULL;
4123 
4124 	opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
4125 	if (!opt_fw_filename)
4126 		return NULL;
4127 
4128 	snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg",
4129 		 ICE_DDP_PKG_PATH, dsn);
4130 
4131 	return opt_fw_filename;
4132 }
4133 
4134 /**
4135  * ice_request_fw - Device initialization routine
4136  * @pf: pointer to the PF instance
4137  */
ice_request_fw(struct ice_pf * pf)4138 static void ice_request_fw(struct ice_pf *pf)
4139 {
4140 	char *opt_fw_filename = ice_get_opt_fw_name(pf);
4141 	const struct firmware *firmware = NULL;
4142 	struct device *dev = ice_pf_to_dev(pf);
4143 	int err = 0;
4144 
4145 	/* optional device-specific DDP (if present) overrides the default DDP
4146 	 * package file. kernel logs a debug message if the file doesn't exist,
4147 	 * and warning messages for other errors.
4148 	 */
4149 	if (opt_fw_filename) {
4150 		err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
4151 		if (err) {
4152 			kfree(opt_fw_filename);
4153 			goto dflt_pkg_load;
4154 		}
4155 
4156 		/* request for firmware was successful. Download to device */
4157 		ice_load_pkg(firmware, pf);
4158 		kfree(opt_fw_filename);
4159 		release_firmware(firmware);
4160 		return;
4161 	}
4162 
4163 dflt_pkg_load:
4164 	err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
4165 	if (err) {
4166 		dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
4167 		return;
4168 	}
4169 
4170 	/* request for firmware was successful. Download to device */
4171 	ice_load_pkg(firmware, pf);
4172 	release_firmware(firmware);
4173 }
4174 
4175 /**
4176  * ice_print_wake_reason - show the wake up cause in the log
4177  * @pf: pointer to the PF struct
4178  */
ice_print_wake_reason(struct ice_pf * pf)4179 static void ice_print_wake_reason(struct ice_pf *pf)
4180 {
4181 	u32 wus = pf->wakeup_reason;
4182 	const char *wake_str;
4183 
4184 	/* if no wake event, nothing to print */
4185 	if (!wus)
4186 		return;
4187 
4188 	if (wus & PFPM_WUS_LNKC_M)
4189 		wake_str = "Link\n";
4190 	else if (wus & PFPM_WUS_MAG_M)
4191 		wake_str = "Magic Packet\n";
4192 	else if (wus & PFPM_WUS_MNG_M)
4193 		wake_str = "Management\n";
4194 	else if (wus & PFPM_WUS_FW_RST_WK_M)
4195 		wake_str = "Firmware Reset\n";
4196 	else
4197 		wake_str = "Unknown\n";
4198 
4199 	dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
4200 }
4201 
4202 /**
4203  * ice_register_netdev - register netdev and devlink port
4204  * @pf: pointer to the PF struct
4205  */
ice_register_netdev(struct ice_pf * pf)4206 static int ice_register_netdev(struct ice_pf *pf)
4207 {
4208 	struct ice_vsi *vsi;
4209 	int err = 0;
4210 
4211 	vsi = ice_get_main_vsi(pf);
4212 	if (!vsi || !vsi->netdev)
4213 		return -EIO;
4214 
4215 	err = register_netdev(vsi->netdev);
4216 	if (err)
4217 		goto err_register_netdev;
4218 
4219 	set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4220 	netif_carrier_off(vsi->netdev);
4221 	netif_tx_stop_all_queues(vsi->netdev);
4222 	err = ice_devlink_create_pf_port(pf);
4223 	if (err)
4224 		goto err_devlink_create;
4225 
4226 	devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
4227 
4228 	return 0;
4229 err_devlink_create:
4230 	unregister_netdev(vsi->netdev);
4231 	clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
4232 err_register_netdev:
4233 	free_netdev(vsi->netdev);
4234 	vsi->netdev = NULL;
4235 	clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
4236 	return err;
4237 }
4238 
4239 /**
4240  * ice_probe - Device initialization routine
4241  * @pdev: PCI device information struct
4242  * @ent: entry in ice_pci_tbl
4243  *
4244  * Returns 0 on success, negative on failure
4245  */
4246 static int
ice_probe(struct pci_dev * pdev,const struct pci_device_id __always_unused * ent)4247 ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
4248 {
4249 	struct device *dev = &pdev->dev;
4250 	struct ice_pf *pf;
4251 	struct ice_hw *hw;
4252 	int i, err;
4253 
4254 	if (pdev->is_virtfn) {
4255 		dev_err(dev, "can't probe a virtual function\n");
4256 		return -EINVAL;
4257 	}
4258 
4259 	/* when under a kdump kernel initiate a reset before enabling the
4260 	 * device in order to clear out any pending DMA transactions. These
4261 	 * transactions can cause some systems to machine check when doing
4262 	 * the pcim_enable_device() below.
4263 	 */
4264 	if (is_kdump_kernel()) {
4265 		pci_save_state(pdev);
4266 		pci_clear_master(pdev);
4267 		err = pcie_flr(pdev);
4268 		if (err)
4269 			return err;
4270 		pci_restore_state(pdev);
4271 	}
4272 
4273 	/* this driver uses devres, see
4274 	 * Documentation/driver-api/driver-model/devres.rst
4275 	 */
4276 	err = pcim_enable_device(pdev);
4277 	if (err)
4278 		return err;
4279 
4280 	err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
4281 	if (err) {
4282 		dev_err(dev, "BAR0 I/O map error %d\n", err);
4283 		return err;
4284 	}
4285 
4286 	pf = ice_allocate_pf(dev);
4287 	if (!pf)
4288 		return -ENOMEM;
4289 
4290 	/* initialize Auxiliary index to invalid value */
4291 	pf->aux_idx = -1;
4292 
4293 	/* set up for high or low DMA */
4294 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4295 	if (err)
4296 		err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
4297 	if (err) {
4298 		dev_err(dev, "DMA configuration failed: 0x%x\n", err);
4299 		return err;
4300 	}
4301 
4302 	pci_enable_pcie_error_reporting(pdev);
4303 	pci_set_master(pdev);
4304 
4305 	pf->pdev = pdev;
4306 	pci_set_drvdata(pdev, pf);
4307 	set_bit(ICE_DOWN, pf->state);
4308 	/* Disable service task until DOWN bit is cleared */
4309 	set_bit(ICE_SERVICE_DIS, pf->state);
4310 
4311 	hw = &pf->hw;
4312 	hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
4313 	pci_save_state(pdev);
4314 
4315 	hw->back = pf;
4316 	hw->vendor_id = pdev->vendor;
4317 	hw->device_id = pdev->device;
4318 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4319 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
4320 	hw->subsystem_device_id = pdev->subsystem_device;
4321 	hw->bus.device = PCI_SLOT(pdev->devfn);
4322 	hw->bus.func = PCI_FUNC(pdev->devfn);
4323 	ice_set_ctrlq_len(hw);
4324 
4325 	pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
4326 
4327 	err = ice_devlink_register(pf);
4328 	if (err) {
4329 		dev_err(dev, "ice_devlink_register failed: %d\n", err);
4330 		goto err_exit_unroll;
4331 	}
4332 
4333 #ifndef CONFIG_DYNAMIC_DEBUG
4334 	if (debug < -1)
4335 		hw->debug_mask = debug;
4336 #endif
4337 
4338 	err = ice_init_hw(hw);
4339 	if (err) {
4340 		dev_err(dev, "ice_init_hw failed: %d\n", err);
4341 		err = -EIO;
4342 		goto err_exit_unroll;
4343 	}
4344 
4345 	ice_request_fw(pf);
4346 
4347 	/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
4348 	 * set in pf->state, which will cause ice_is_safe_mode to return
4349 	 * true
4350 	 */
4351 	if (ice_is_safe_mode(pf)) {
4352 		dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
4353 		/* we already got function/device capabilities but these don't
4354 		 * reflect what the driver needs to do in safe mode. Instead of
4355 		 * adding conditional logic everywhere to ignore these
4356 		 * device/function capabilities, override them.
4357 		 */
4358 		ice_set_safe_mode_caps(hw);
4359 	}
4360 
4361 	err = ice_init_pf(pf);
4362 	if (err) {
4363 		dev_err(dev, "ice_init_pf failed: %d\n", err);
4364 		goto err_init_pf_unroll;
4365 	}
4366 
4367 	ice_devlink_init_regions(pf);
4368 
4369 	pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
4370 	pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
4371 	pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
4372 	pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
4373 	i = 0;
4374 	if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
4375 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4376 			pf->hw.tnl.valid_count[TNL_VXLAN];
4377 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4378 			UDP_TUNNEL_TYPE_VXLAN;
4379 		i++;
4380 	}
4381 	if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
4382 		pf->hw.udp_tunnel_nic.tables[i].n_entries =
4383 			pf->hw.tnl.valid_count[TNL_GENEVE];
4384 		pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
4385 			UDP_TUNNEL_TYPE_GENEVE;
4386 		i++;
4387 	}
4388 
4389 	pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
4390 	if (!pf->num_alloc_vsi) {
4391 		err = -EIO;
4392 		goto err_init_pf_unroll;
4393 	}
4394 	if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
4395 		dev_warn(&pf->pdev->dev,
4396 			 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
4397 			 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
4398 		pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
4399 	}
4400 
4401 	pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
4402 			       GFP_KERNEL);
4403 	if (!pf->vsi) {
4404 		err = -ENOMEM;
4405 		goto err_init_pf_unroll;
4406 	}
4407 
4408 	err = ice_init_interrupt_scheme(pf);
4409 	if (err) {
4410 		dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
4411 		err = -EIO;
4412 		goto err_init_vsi_unroll;
4413 	}
4414 
4415 	/* In case of MSIX we are going to setup the misc vector right here
4416 	 * to handle admin queue events etc. In case of legacy and MSI
4417 	 * the misc functionality and queue processing is combined in
4418 	 * the same vector and that gets setup at open.
4419 	 */
4420 	err = ice_req_irq_msix_misc(pf);
4421 	if (err) {
4422 		dev_err(dev, "setup of misc vector failed: %d\n", err);
4423 		goto err_init_interrupt_unroll;
4424 	}
4425 
4426 	/* create switch struct for the switch element created by FW on boot */
4427 	pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
4428 	if (!pf->first_sw) {
4429 		err = -ENOMEM;
4430 		goto err_msix_misc_unroll;
4431 	}
4432 
4433 	if (hw->evb_veb)
4434 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
4435 	else
4436 		pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
4437 
4438 	pf->first_sw->pf = pf;
4439 
4440 	/* record the sw_id available for later use */
4441 	pf->first_sw->sw_id = hw->port_info->sw_id;
4442 
4443 	err = ice_setup_pf_sw(pf);
4444 	if (err) {
4445 		dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
4446 		goto err_alloc_sw_unroll;
4447 	}
4448 
4449 	clear_bit(ICE_SERVICE_DIS, pf->state);
4450 
4451 	/* tell the firmware we are up */
4452 	err = ice_send_version(pf);
4453 	if (err) {
4454 		dev_err(dev, "probe failed sending driver version %s. error: %d\n",
4455 			UTS_RELEASE, err);
4456 		goto err_send_version_unroll;
4457 	}
4458 
4459 	/* since everything is good, start the service timer */
4460 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4461 
4462 	err = ice_init_link_events(pf->hw.port_info);
4463 	if (err) {
4464 		dev_err(dev, "ice_init_link_events failed: %d\n", err);
4465 		goto err_send_version_unroll;
4466 	}
4467 
4468 	/* not a fatal error if this fails */
4469 	err = ice_init_nvm_phy_type(pf->hw.port_info);
4470 	if (err)
4471 		dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
4472 
4473 	/* not a fatal error if this fails */
4474 	err = ice_update_link_info(pf->hw.port_info);
4475 	if (err)
4476 		dev_err(dev, "ice_update_link_info failed: %d\n", err);
4477 
4478 	ice_init_link_dflt_override(pf->hw.port_info);
4479 
4480 	ice_check_module_power(pf, pf->hw.port_info->phy.link_info.link_cfg_err);
4481 
4482 	/* if media available, initialize PHY settings */
4483 	if (pf->hw.port_info->phy.link_info.link_info &
4484 	    ICE_AQ_MEDIA_AVAILABLE) {
4485 		/* not a fatal error if this fails */
4486 		err = ice_init_phy_user_cfg(pf->hw.port_info);
4487 		if (err)
4488 			dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
4489 
4490 		if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
4491 			struct ice_vsi *vsi = ice_get_main_vsi(pf);
4492 
4493 			if (vsi)
4494 				ice_configure_phy(vsi);
4495 		}
4496 	} else {
4497 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
4498 	}
4499 
4500 	ice_verify_cacheline_size(pf);
4501 
4502 	/* Save wakeup reason register for later use */
4503 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4504 
4505 	/* check for a power management event */
4506 	ice_print_wake_reason(pf);
4507 
4508 	/* clear wake status, all bits */
4509 	wr32(hw, PFPM_WUS, U32_MAX);
4510 
4511 	/* Disable WoL at init, wait for user to enable */
4512 	device_set_wakeup_enable(dev, false);
4513 
4514 	if (ice_is_safe_mode(pf)) {
4515 		ice_set_safe_mode_vlan_cfg(pf);
4516 		goto probe_done;
4517 	}
4518 
4519 	/* initialize DDP driven features */
4520 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4521 		ice_ptp_init(pf);
4522 
4523 	/* Note: Flow director init failure is non-fatal to load */
4524 	if (ice_init_fdir(pf))
4525 		dev_err(dev, "could not initialize flow director\n");
4526 
4527 	/* Note: DCB init failure is non-fatal to load */
4528 	if (ice_init_pf_dcb(pf, false)) {
4529 		clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
4530 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
4531 	} else {
4532 		ice_cfg_lldp_mib_change(&pf->hw, true);
4533 	}
4534 
4535 	if (ice_init_lag(pf))
4536 		dev_warn(dev, "Failed to init link aggregation support\n");
4537 
4538 	/* print PCI link speed and width */
4539 	pcie_print_link_status(pf->pdev);
4540 
4541 probe_done:
4542 	err = ice_register_netdev(pf);
4543 	if (err)
4544 		goto err_netdev_reg;
4545 
4546 	/* ready to go, so clear down state bit */
4547 	clear_bit(ICE_DOWN, pf->state);
4548 	if (ice_is_aux_ena(pf)) {
4549 		pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
4550 		if (pf->aux_idx < 0) {
4551 			dev_err(dev, "Failed to allocate device ID for AUX driver\n");
4552 			err = -ENOMEM;
4553 			goto err_netdev_reg;
4554 		}
4555 
4556 		err = ice_init_rdma(pf);
4557 		if (err) {
4558 			dev_err(dev, "Failed to initialize RDMA: %d\n", err);
4559 			err = -EIO;
4560 			goto err_init_aux_unroll;
4561 		}
4562 	} else {
4563 		dev_warn(dev, "RDMA is not supported on this device\n");
4564 	}
4565 
4566 	return 0;
4567 
4568 err_init_aux_unroll:
4569 	pf->adev = NULL;
4570 	ida_free(&ice_aux_ida, pf->aux_idx);
4571 err_netdev_reg:
4572 err_send_version_unroll:
4573 	ice_vsi_release_all(pf);
4574 err_alloc_sw_unroll:
4575 	set_bit(ICE_SERVICE_DIS, pf->state);
4576 	set_bit(ICE_DOWN, pf->state);
4577 	devm_kfree(dev, pf->first_sw);
4578 err_msix_misc_unroll:
4579 	ice_free_irq_msix_misc(pf);
4580 err_init_interrupt_unroll:
4581 	ice_clear_interrupt_scheme(pf);
4582 err_init_vsi_unroll:
4583 	devm_kfree(dev, pf->vsi);
4584 err_init_pf_unroll:
4585 	ice_deinit_pf(pf);
4586 	ice_devlink_destroy_regions(pf);
4587 	ice_deinit_hw(hw);
4588 err_exit_unroll:
4589 	ice_devlink_unregister(pf);
4590 	pci_disable_pcie_error_reporting(pdev);
4591 	pci_disable_device(pdev);
4592 	return err;
4593 }
4594 
4595 /**
4596  * ice_set_wake - enable or disable Wake on LAN
4597  * @pf: pointer to the PF struct
4598  *
4599  * Simple helper for WoL control
4600  */
ice_set_wake(struct ice_pf * pf)4601 static void ice_set_wake(struct ice_pf *pf)
4602 {
4603 	struct ice_hw *hw = &pf->hw;
4604 	bool wol = pf->wol_ena;
4605 
4606 	/* clear wake state, otherwise new wake events won't fire */
4607 	wr32(hw, PFPM_WUS, U32_MAX);
4608 
4609 	/* enable / disable APM wake up, no RMW needed */
4610 	wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
4611 
4612 	/* set magic packet filter enabled */
4613 	wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
4614 }
4615 
4616 /**
4617  * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
4618  * @pf: pointer to the PF struct
4619  *
4620  * Issue firmware command to enable multicast magic wake, making
4621  * sure that any locally administered address (LAA) is used for
4622  * wake, and that PF reset doesn't undo the LAA.
4623  */
ice_setup_mc_magic_wake(struct ice_pf * pf)4624 static void ice_setup_mc_magic_wake(struct ice_pf *pf)
4625 {
4626 	struct device *dev = ice_pf_to_dev(pf);
4627 	struct ice_hw *hw = &pf->hw;
4628 	enum ice_status status;
4629 	u8 mac_addr[ETH_ALEN];
4630 	struct ice_vsi *vsi;
4631 	u8 flags;
4632 
4633 	if (!pf->wol_ena)
4634 		return;
4635 
4636 	vsi = ice_get_main_vsi(pf);
4637 	if (!vsi)
4638 		return;
4639 
4640 	/* Get current MAC address in case it's an LAA */
4641 	if (vsi->netdev)
4642 		ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
4643 	else
4644 		ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
4645 
4646 	flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
4647 		ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
4648 		ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
4649 
4650 	status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
4651 	if (status)
4652 		dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
4653 			ice_stat_str(status),
4654 			ice_aq_str(hw->adminq.sq_last_status));
4655 }
4656 
4657 /**
4658  * ice_remove - Device removal routine
4659  * @pdev: PCI device information struct
4660  */
ice_remove(struct pci_dev * pdev)4661 static void ice_remove(struct pci_dev *pdev)
4662 {
4663 	struct ice_pf *pf = pci_get_drvdata(pdev);
4664 	int i;
4665 
4666 	for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
4667 		if (!ice_is_reset_in_progress(pf->state))
4668 			break;
4669 		msleep(100);
4670 	}
4671 
4672 	if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
4673 		set_bit(ICE_VF_RESETS_DISABLED, pf->state);
4674 		ice_free_vfs(pf);
4675 	}
4676 
4677 	ice_service_task_stop(pf);
4678 
4679 	ice_aq_cancel_waiting_tasks(pf);
4680 	ice_unplug_aux_dev(pf);
4681 	if (pf->aux_idx >= 0)
4682 		ida_free(&ice_aux_ida, pf->aux_idx);
4683 	set_bit(ICE_DOWN, pf->state);
4684 
4685 	mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
4686 	ice_deinit_lag(pf);
4687 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
4688 		ice_ptp_release(pf);
4689 	if (!ice_is_safe_mode(pf))
4690 		ice_remove_arfs(pf);
4691 	ice_setup_mc_magic_wake(pf);
4692 	ice_vsi_release_all(pf);
4693 	ice_set_wake(pf);
4694 	ice_free_irq_msix_misc(pf);
4695 	ice_for_each_vsi(pf, i) {
4696 		if (!pf->vsi[i])
4697 			continue;
4698 		ice_vsi_free_q_vectors(pf->vsi[i]);
4699 	}
4700 	ice_deinit_pf(pf);
4701 	ice_devlink_destroy_regions(pf);
4702 	ice_deinit_hw(&pf->hw);
4703 	ice_devlink_unregister(pf);
4704 
4705 	/* Issue a PFR as part of the prescribed driver unload flow.  Do not
4706 	 * do it via ice_schedule_reset() since there is no need to rebuild
4707 	 * and the service task is already stopped.
4708 	 */
4709 	ice_reset(&pf->hw, ICE_RESET_PFR);
4710 	pci_wait_for_pending_transaction(pdev);
4711 	ice_clear_interrupt_scheme(pf);
4712 	pci_disable_pcie_error_reporting(pdev);
4713 	pci_disable_device(pdev);
4714 }
4715 
4716 /**
4717  * ice_shutdown - PCI callback for shutting down device
4718  * @pdev: PCI device information struct
4719  */
ice_shutdown(struct pci_dev * pdev)4720 static void ice_shutdown(struct pci_dev *pdev)
4721 {
4722 	struct ice_pf *pf = pci_get_drvdata(pdev);
4723 
4724 	ice_remove(pdev);
4725 
4726 	if (system_state == SYSTEM_POWER_OFF) {
4727 		pci_wake_from_d3(pdev, pf->wol_ena);
4728 		pci_set_power_state(pdev, PCI_D3hot);
4729 	}
4730 }
4731 
4732 #ifdef CONFIG_PM
4733 /**
4734  * ice_prepare_for_shutdown - prep for PCI shutdown
4735  * @pf: board private structure
4736  *
4737  * Inform or close all dependent features in prep for PCI device shutdown
4738  */
ice_prepare_for_shutdown(struct ice_pf * pf)4739 static void ice_prepare_for_shutdown(struct ice_pf *pf)
4740 {
4741 	struct ice_hw *hw = &pf->hw;
4742 	u32 v;
4743 
4744 	/* Notify VFs of impending reset */
4745 	if (ice_check_sq_alive(hw, &hw->mailboxq))
4746 		ice_vc_notify_reset(pf);
4747 
4748 	dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
4749 
4750 	/* disable the VSIs and their queues that are not already DOWN */
4751 	ice_pf_dis_all_vsi(pf, false);
4752 
4753 	ice_for_each_vsi(pf, v)
4754 		if (pf->vsi[v])
4755 			pf->vsi[v]->vsi_num = 0;
4756 
4757 	ice_shutdown_all_ctrlq(hw);
4758 }
4759 
4760 /**
4761  * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
4762  * @pf: board private structure to reinitialize
4763  *
4764  * This routine reinitialize interrupt scheme that was cleared during
4765  * power management suspend callback.
4766  *
4767  * This should be called during resume routine to re-allocate the q_vectors
4768  * and reacquire interrupts.
4769  */
ice_reinit_interrupt_scheme(struct ice_pf * pf)4770 static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
4771 {
4772 	struct device *dev = ice_pf_to_dev(pf);
4773 	int ret, v;
4774 
4775 	/* Since we clear MSIX flag during suspend, we need to
4776 	 * set it back during resume...
4777 	 */
4778 
4779 	ret = ice_init_interrupt_scheme(pf);
4780 	if (ret) {
4781 		dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
4782 		return ret;
4783 	}
4784 
4785 	/* Remap vectors and rings, after successful re-init interrupts */
4786 	ice_for_each_vsi(pf, v) {
4787 		if (!pf->vsi[v])
4788 			continue;
4789 
4790 		ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
4791 		if (ret)
4792 			goto err_reinit;
4793 		ice_vsi_map_rings_to_vectors(pf->vsi[v]);
4794 	}
4795 
4796 	ret = ice_req_irq_msix_misc(pf);
4797 	if (ret) {
4798 		dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
4799 			ret);
4800 		goto err_reinit;
4801 	}
4802 
4803 	return 0;
4804 
4805 err_reinit:
4806 	while (v--)
4807 		if (pf->vsi[v])
4808 			ice_vsi_free_q_vectors(pf->vsi[v]);
4809 
4810 	return ret;
4811 }
4812 
4813 /**
4814  * ice_suspend
4815  * @dev: generic device information structure
4816  *
4817  * Power Management callback to quiesce the device and prepare
4818  * for D3 transition.
4819  */
ice_suspend(struct device * dev)4820 static int __maybe_unused ice_suspend(struct device *dev)
4821 {
4822 	struct pci_dev *pdev = to_pci_dev(dev);
4823 	struct ice_pf *pf;
4824 	int disabled, v;
4825 
4826 	pf = pci_get_drvdata(pdev);
4827 
4828 	if (!ice_pf_state_is_nominal(pf)) {
4829 		dev_err(dev, "Device is not ready, no need to suspend it\n");
4830 		return -EBUSY;
4831 	}
4832 
4833 	/* Stop watchdog tasks until resume completion.
4834 	 * Even though it is most likely that the service task is
4835 	 * disabled if the device is suspended or down, the service task's
4836 	 * state is controlled by a different state bit, and we should
4837 	 * store and honor whatever state that bit is in at this point.
4838 	 */
4839 	disabled = ice_service_task_stop(pf);
4840 
4841 	ice_unplug_aux_dev(pf);
4842 
4843 	/* Already suspended?, then there is nothing to do */
4844 	if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
4845 		if (!disabled)
4846 			ice_service_task_restart(pf);
4847 		return 0;
4848 	}
4849 
4850 	if (test_bit(ICE_DOWN, pf->state) ||
4851 	    ice_is_reset_in_progress(pf->state)) {
4852 		dev_err(dev, "can't suspend device in reset or already down\n");
4853 		if (!disabled)
4854 			ice_service_task_restart(pf);
4855 		return 0;
4856 	}
4857 
4858 	ice_setup_mc_magic_wake(pf);
4859 
4860 	ice_prepare_for_shutdown(pf);
4861 
4862 	ice_set_wake(pf);
4863 
4864 	/* Free vectors, clear the interrupt scheme and release IRQs
4865 	 * for proper hibernation, especially with large number of CPUs.
4866 	 * Otherwise hibernation might fail when mapping all the vectors back
4867 	 * to CPU0.
4868 	 */
4869 	ice_free_irq_msix_misc(pf);
4870 	ice_for_each_vsi(pf, v) {
4871 		if (!pf->vsi[v])
4872 			continue;
4873 		ice_vsi_free_q_vectors(pf->vsi[v]);
4874 	}
4875 	ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
4876 	ice_clear_interrupt_scheme(pf);
4877 
4878 	pci_save_state(pdev);
4879 	pci_wake_from_d3(pdev, pf->wol_ena);
4880 	pci_set_power_state(pdev, PCI_D3hot);
4881 	return 0;
4882 }
4883 
4884 /**
4885  * ice_resume - PM callback for waking up from D3
4886  * @dev: generic device information structure
4887  */
ice_resume(struct device * dev)4888 static int __maybe_unused ice_resume(struct device *dev)
4889 {
4890 	struct pci_dev *pdev = to_pci_dev(dev);
4891 	enum ice_reset_req reset_type;
4892 	struct ice_pf *pf;
4893 	struct ice_hw *hw;
4894 	int ret;
4895 
4896 	pci_set_power_state(pdev, PCI_D0);
4897 	pci_restore_state(pdev);
4898 	pci_save_state(pdev);
4899 
4900 	if (!pci_device_is_present(pdev))
4901 		return -ENODEV;
4902 
4903 	ret = pci_enable_device_mem(pdev);
4904 	if (ret) {
4905 		dev_err(dev, "Cannot enable device after suspend\n");
4906 		return ret;
4907 	}
4908 
4909 	pf = pci_get_drvdata(pdev);
4910 	hw = &pf->hw;
4911 
4912 	pf->wakeup_reason = rd32(hw, PFPM_WUS);
4913 	ice_print_wake_reason(pf);
4914 
4915 	/* We cleared the interrupt scheme when we suspended, so we need to
4916 	 * restore it now to resume device functionality.
4917 	 */
4918 	ret = ice_reinit_interrupt_scheme(pf);
4919 	if (ret)
4920 		dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
4921 
4922 	clear_bit(ICE_DOWN, pf->state);
4923 	/* Now perform PF reset and rebuild */
4924 	reset_type = ICE_RESET_PFR;
4925 	/* re-enable service task for reset, but allow reset to schedule it */
4926 	clear_bit(ICE_SERVICE_DIS, pf->state);
4927 
4928 	if (ice_schedule_reset(pf, reset_type))
4929 		dev_err(dev, "Reset during resume failed.\n");
4930 
4931 	clear_bit(ICE_SUSPENDED, pf->state);
4932 	ice_service_task_restart(pf);
4933 
4934 	/* Restart the service task */
4935 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
4936 
4937 	return 0;
4938 }
4939 #endif /* CONFIG_PM */
4940 
4941 /**
4942  * ice_pci_err_detected - warning that PCI error has been detected
4943  * @pdev: PCI device information struct
4944  * @err: the type of PCI error
4945  *
4946  * Called to warn that something happened on the PCI bus and the error handling
4947  * is in progress.  Allows the driver to gracefully prepare/handle PCI errors.
4948  */
4949 static pci_ers_result_t
ice_pci_err_detected(struct pci_dev * pdev,pci_channel_state_t err)4950 ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
4951 {
4952 	struct ice_pf *pf = pci_get_drvdata(pdev);
4953 
4954 	if (!pf) {
4955 		dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
4956 			__func__, err);
4957 		return PCI_ERS_RESULT_DISCONNECT;
4958 	}
4959 
4960 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
4961 		ice_service_task_stop(pf);
4962 
4963 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
4964 			set_bit(ICE_PFR_REQ, pf->state);
4965 			ice_prepare_for_reset(pf);
4966 		}
4967 	}
4968 
4969 	return PCI_ERS_RESULT_NEED_RESET;
4970 }
4971 
4972 /**
4973  * ice_pci_err_slot_reset - a PCI slot reset has just happened
4974  * @pdev: PCI device information struct
4975  *
4976  * Called to determine if the driver can recover from the PCI slot reset by
4977  * using a register read to determine if the device is recoverable.
4978  */
ice_pci_err_slot_reset(struct pci_dev * pdev)4979 static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
4980 {
4981 	struct ice_pf *pf = pci_get_drvdata(pdev);
4982 	pci_ers_result_t result;
4983 	int err;
4984 	u32 reg;
4985 
4986 	err = pci_enable_device_mem(pdev);
4987 	if (err) {
4988 		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
4989 			err);
4990 		result = PCI_ERS_RESULT_DISCONNECT;
4991 	} else {
4992 		pci_set_master(pdev);
4993 		pci_restore_state(pdev);
4994 		pci_save_state(pdev);
4995 		pci_wake_from_d3(pdev, false);
4996 
4997 		/* Check for life */
4998 		reg = rd32(&pf->hw, GLGEN_RTRIG);
4999 		if (!reg)
5000 			result = PCI_ERS_RESULT_RECOVERED;
5001 		else
5002 			result = PCI_ERS_RESULT_DISCONNECT;
5003 	}
5004 
5005 	err = pci_aer_clear_nonfatal_status(pdev);
5006 	if (err)
5007 		dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
5008 			err);
5009 		/* non-fatal, continue */
5010 
5011 	return result;
5012 }
5013 
5014 /**
5015  * ice_pci_err_resume - restart operations after PCI error recovery
5016  * @pdev: PCI device information struct
5017  *
5018  * Called to allow the driver to bring things back up after PCI error and/or
5019  * reset recovery have finished
5020  */
ice_pci_err_resume(struct pci_dev * pdev)5021 static void ice_pci_err_resume(struct pci_dev *pdev)
5022 {
5023 	struct ice_pf *pf = pci_get_drvdata(pdev);
5024 
5025 	if (!pf) {
5026 		dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
5027 			__func__);
5028 		return;
5029 	}
5030 
5031 	if (test_bit(ICE_SUSPENDED, pf->state)) {
5032 		dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
5033 			__func__);
5034 		return;
5035 	}
5036 
5037 	ice_restore_all_vfs_msi_state(pdev);
5038 
5039 	ice_do_reset(pf, ICE_RESET_PFR);
5040 	ice_service_task_restart(pf);
5041 	mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
5042 }
5043 
5044 /**
5045  * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5046  * @pdev: PCI device information struct
5047  */
ice_pci_err_reset_prepare(struct pci_dev * pdev)5048 static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
5049 {
5050 	struct ice_pf *pf = pci_get_drvdata(pdev);
5051 
5052 	if (!test_bit(ICE_SUSPENDED, pf->state)) {
5053 		ice_service_task_stop(pf);
5054 
5055 		if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
5056 			set_bit(ICE_PFR_REQ, pf->state);
5057 			ice_prepare_for_reset(pf);
5058 		}
5059 	}
5060 }
5061 
5062 /**
5063  * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5064  * @pdev: PCI device information struct
5065  */
ice_pci_err_reset_done(struct pci_dev * pdev)5066 static void ice_pci_err_reset_done(struct pci_dev *pdev)
5067 {
5068 	ice_pci_err_resume(pdev);
5069 }
5070 
5071 /* ice_pci_tbl - PCI Device ID Table
5072  *
5073  * Wildcard entries (PCI_ANY_ID) should come last
5074  * Last entry must be all 0s
5075  *
5076  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
5077  *   Class, Class Mask, private data (not used) }
5078  */
5079 static const struct pci_device_id ice_pci_tbl[] = {
5080 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
5081 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
5082 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
5083 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
5084 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
5085 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
5086 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
5087 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
5088 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
5089 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
5090 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
5091 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
5092 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
5093 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
5094 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
5095 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
5096 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
5097 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
5098 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
5099 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
5100 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
5101 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
5102 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
5103 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
5104 	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
5105 	/* required last entry */
5106 	{ 0, }
5107 };
5108 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
5109 
5110 static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
5111 
5112 static const struct pci_error_handlers ice_pci_err_handler = {
5113 	.error_detected = ice_pci_err_detected,
5114 	.slot_reset = ice_pci_err_slot_reset,
5115 	.reset_prepare = ice_pci_err_reset_prepare,
5116 	.reset_done = ice_pci_err_reset_done,
5117 	.resume = ice_pci_err_resume
5118 };
5119 
5120 static struct pci_driver ice_driver = {
5121 	.name = KBUILD_MODNAME,
5122 	.id_table = ice_pci_tbl,
5123 	.probe = ice_probe,
5124 	.remove = ice_remove,
5125 #ifdef CONFIG_PM
5126 	.driver.pm = &ice_pm_ops,
5127 #endif /* CONFIG_PM */
5128 	.shutdown = ice_shutdown,
5129 	.sriov_configure = ice_sriov_configure,
5130 	.err_handler = &ice_pci_err_handler
5131 };
5132 
5133 /**
5134  * ice_module_init - Driver registration routine
5135  *
5136  * ice_module_init is the first routine called when the driver is
5137  * loaded. All it does is register with the PCI subsystem.
5138  */
ice_module_init(void)5139 static int __init ice_module_init(void)
5140 {
5141 	int status;
5142 
5143 	pr_info("%s\n", ice_driver_string);
5144 	pr_info("%s\n", ice_copyright);
5145 
5146 	ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
5147 	if (!ice_wq) {
5148 		pr_err("Failed to create workqueue\n");
5149 		return -ENOMEM;
5150 	}
5151 
5152 	status = pci_register_driver(&ice_driver);
5153 	if (status) {
5154 		pr_err("failed to register PCI driver, err %d\n", status);
5155 		destroy_workqueue(ice_wq);
5156 	}
5157 
5158 	return status;
5159 }
5160 module_init(ice_module_init);
5161 
5162 /**
5163  * ice_module_exit - Driver exit cleanup routine
5164  *
5165  * ice_module_exit is called just before the driver is removed
5166  * from memory.
5167  */
ice_module_exit(void)5168 static void __exit ice_module_exit(void)
5169 {
5170 	pci_unregister_driver(&ice_driver);
5171 	destroy_workqueue(ice_wq);
5172 	pr_info("module unloaded\n");
5173 }
5174 module_exit(ice_module_exit);
5175 
5176 /**
5177  * ice_set_mac_address - NDO callback to set MAC address
5178  * @netdev: network interface device structure
5179  * @pi: pointer to an address structure
5180  *
5181  * Returns 0 on success, negative on failure
5182  */
ice_set_mac_address(struct net_device * netdev,void * pi)5183 static int ice_set_mac_address(struct net_device *netdev, void *pi)
5184 {
5185 	struct ice_netdev_priv *np = netdev_priv(netdev);
5186 	struct ice_vsi *vsi = np->vsi;
5187 	struct ice_pf *pf = vsi->back;
5188 	struct ice_hw *hw = &pf->hw;
5189 	struct sockaddr *addr = pi;
5190 	enum ice_status status;
5191 	u8 old_mac[ETH_ALEN];
5192 	u8 flags = 0;
5193 	int err = 0;
5194 	u8 *mac;
5195 
5196 	mac = (u8 *)addr->sa_data;
5197 
5198 	if (!is_valid_ether_addr(mac))
5199 		return -EADDRNOTAVAIL;
5200 
5201 	if (ether_addr_equal(netdev->dev_addr, mac)) {
5202 		netdev_dbg(netdev, "already using mac %pM\n", mac);
5203 		return 0;
5204 	}
5205 
5206 	if (test_bit(ICE_DOWN, pf->state) ||
5207 	    ice_is_reset_in_progress(pf->state)) {
5208 		netdev_err(netdev, "can't set mac %pM. device not ready\n",
5209 			   mac);
5210 		return -EBUSY;
5211 	}
5212 
5213 	netif_addr_lock_bh(netdev);
5214 	ether_addr_copy(old_mac, netdev->dev_addr);
5215 	/* change the netdev's MAC address */
5216 	memcpy(netdev->dev_addr, mac, netdev->addr_len);
5217 	netif_addr_unlock_bh(netdev);
5218 
5219 	/* Clean up old MAC filter. Not an error if old filter doesn't exist */
5220 	status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5221 	if (status && status != ICE_ERR_DOES_NOT_EXIST) {
5222 		err = -EADDRNOTAVAIL;
5223 		goto err_update_filters;
5224 	}
5225 
5226 	/* Add filter for new MAC. If filter exists, return success */
5227 	status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
5228 	if (status == ICE_ERR_ALREADY_EXISTS)
5229 		/* Although this MAC filter is already present in hardware it's
5230 		 * possible in some cases (e.g. bonding) that dev_addr was
5231 		 * modified outside of the driver and needs to be restored back
5232 		 * to this value.
5233 		 */
5234 		netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
5235 	else if (status)
5236 		/* error if the new filter addition failed */
5237 		err = -EADDRNOTAVAIL;
5238 
5239 err_update_filters:
5240 	if (err) {
5241 		netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5242 			   mac);
5243 		netif_addr_lock_bh(netdev);
5244 		eth_hw_addr_set(netdev, old_mac);
5245 		netif_addr_unlock_bh(netdev);
5246 		return err;
5247 	}
5248 
5249 	netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5250 		   netdev->dev_addr);
5251 
5252 	/* write new MAC address to the firmware */
5253 	flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
5254 	status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
5255 	if (status) {
5256 		netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
5257 			   mac, ice_stat_str(status));
5258 	}
5259 	return 0;
5260 }
5261 
5262 /**
5263  * ice_set_rx_mode - NDO callback to set the netdev filters
5264  * @netdev: network interface device structure
5265  */
ice_set_rx_mode(struct net_device * netdev)5266 static void ice_set_rx_mode(struct net_device *netdev)
5267 {
5268 	struct ice_netdev_priv *np = netdev_priv(netdev);
5269 	struct ice_vsi *vsi = np->vsi;
5270 
5271 	if (!vsi)
5272 		return;
5273 
5274 	/* Set the flags to synchronize filters
5275 	 * ndo_set_rx_mode may be triggered even without a change in netdev
5276 	 * flags
5277 	 */
5278 	set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
5279 	set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
5280 	set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags);
5281 
5282 	/* schedule our worker thread which will take care of
5283 	 * applying the new filter changes
5284 	 */
5285 	ice_service_task_schedule(vsi->back);
5286 }
5287 
5288 /**
5289  * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
5290  * @netdev: network interface device structure
5291  * @queue_index: Queue ID
5292  * @maxrate: maximum bandwidth in Mbps
5293  */
5294 static int
ice_set_tx_maxrate(struct net_device * netdev,int queue_index,u32 maxrate)5295 ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
5296 {
5297 	struct ice_netdev_priv *np = netdev_priv(netdev);
5298 	struct ice_vsi *vsi = np->vsi;
5299 	enum ice_status status;
5300 	u16 q_handle;
5301 	u8 tc;
5302 
5303 	/* Validate maxrate requested is within permitted range */
5304 	if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
5305 		netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
5306 			   maxrate, queue_index);
5307 		return -EINVAL;
5308 	}
5309 
5310 	q_handle = vsi->tx_rings[queue_index]->q_handle;
5311 	tc = ice_dcb_get_tc(vsi, queue_index);
5312 
5313 	/* Set BW back to default, when user set maxrate to 0 */
5314 	if (!maxrate)
5315 		status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
5316 					       q_handle, ICE_MAX_BW);
5317 	else
5318 		status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
5319 					  q_handle, ICE_MAX_BW, maxrate * 1000);
5320 	if (status) {
5321 		netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
5322 			   ice_stat_str(status));
5323 		return -EIO;
5324 	}
5325 
5326 	return 0;
5327 }
5328 
5329 /**
5330  * ice_fdb_add - add an entry to the hardware database
5331  * @ndm: the input from the stack
5332  * @tb: pointer to array of nladdr (unused)
5333  * @dev: the net device pointer
5334  * @addr: the MAC address entry being added
5335  * @vid: VLAN ID
5336  * @flags: instructions from stack about fdb operation
5337  * @extack: netlink extended ack
5338  */
5339 static int
ice_fdb_add(struct ndmsg * ndm,struct nlattr __always_unused * tb[],struct net_device * dev,const unsigned char * addr,u16 vid,u16 flags,struct netlink_ext_ack __always_unused * extack)5340 ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
5341 	    struct net_device *dev, const unsigned char *addr, u16 vid,
5342 	    u16 flags, struct netlink_ext_ack __always_unused *extack)
5343 {
5344 	int err;
5345 
5346 	if (vid) {
5347 		netdev_err(dev, "VLANs aren't supported yet for dev_uc|mc_add()\n");
5348 		return -EINVAL;
5349 	}
5350 	if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
5351 		netdev_err(dev, "FDB only supports static addresses\n");
5352 		return -EINVAL;
5353 	}
5354 
5355 	if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
5356 		err = dev_uc_add_excl(dev, addr);
5357 	else if (is_multicast_ether_addr(addr))
5358 		err = dev_mc_add_excl(dev, addr);
5359 	else
5360 		err = -EINVAL;
5361 
5362 	/* Only return duplicate errors if NLM_F_EXCL is set */
5363 	if (err == -EEXIST && !(flags & NLM_F_EXCL))
5364 		err = 0;
5365 
5366 	return err;
5367 }
5368 
5369 /**
5370  * ice_fdb_del - delete an entry from the hardware database
5371  * @ndm: the input from the stack
5372  * @tb: pointer to array of nladdr (unused)
5373  * @dev: the net device pointer
5374  * @addr: the MAC address entry being added
5375  * @vid: VLAN ID
5376  */
5377 static int
ice_fdb_del(struct ndmsg * ndm,__always_unused struct nlattr * tb[],struct net_device * dev,const unsigned char * addr,__always_unused u16 vid)5378 ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
5379 	    struct net_device *dev, const unsigned char *addr,
5380 	    __always_unused u16 vid)
5381 {
5382 	int err;
5383 
5384 	if (ndm->ndm_state & NUD_PERMANENT) {
5385 		netdev_err(dev, "FDB only supports static addresses\n");
5386 		return -EINVAL;
5387 	}
5388 
5389 	if (is_unicast_ether_addr(addr))
5390 		err = dev_uc_del(dev, addr);
5391 	else if (is_multicast_ether_addr(addr))
5392 		err = dev_mc_del(dev, addr);
5393 	else
5394 		err = -EINVAL;
5395 
5396 	return err;
5397 }
5398 
5399 /**
5400  * ice_set_features - set the netdev feature flags
5401  * @netdev: ptr to the netdev being adjusted
5402  * @features: the feature set that the stack is suggesting
5403  */
5404 static int
ice_set_features(struct net_device * netdev,netdev_features_t features)5405 ice_set_features(struct net_device *netdev, netdev_features_t features)
5406 {
5407 	struct ice_netdev_priv *np = netdev_priv(netdev);
5408 	struct ice_vsi *vsi = np->vsi;
5409 	struct ice_pf *pf = vsi->back;
5410 	int ret = 0;
5411 
5412 	/* Don't set any netdev advanced features with device in Safe Mode */
5413 	if (ice_is_safe_mode(vsi->back)) {
5414 		dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
5415 		return ret;
5416 	}
5417 
5418 	/* Do not change setting during reset */
5419 	if (ice_is_reset_in_progress(pf->state)) {
5420 		dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
5421 		return -EBUSY;
5422 	}
5423 
5424 	/* Multiple features can be changed in one call so keep features in
5425 	 * separate if/else statements to guarantee each feature is checked
5426 	 */
5427 	if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
5428 		ice_vsi_manage_rss_lut(vsi, true);
5429 	else if (!(features & NETIF_F_RXHASH) &&
5430 		 netdev->features & NETIF_F_RXHASH)
5431 		ice_vsi_manage_rss_lut(vsi, false);
5432 
5433 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
5434 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5435 		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5436 	else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
5437 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
5438 		ret = ice_vsi_manage_vlan_stripping(vsi, false);
5439 
5440 	if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
5441 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5442 		ret = ice_vsi_manage_vlan_insertion(vsi);
5443 	else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
5444 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
5445 		ret = ice_vsi_manage_vlan_insertion(vsi);
5446 
5447 	if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5448 	    !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5449 		ret = ice_cfg_vlan_pruning(vsi, true, false);
5450 	else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
5451 		 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
5452 		ret = ice_cfg_vlan_pruning(vsi, false, false);
5453 
5454 	if ((features & NETIF_F_NTUPLE) &&
5455 	    !(netdev->features & NETIF_F_NTUPLE)) {
5456 		ice_vsi_manage_fdir(vsi, true);
5457 		ice_init_arfs(vsi);
5458 	} else if (!(features & NETIF_F_NTUPLE) &&
5459 		 (netdev->features & NETIF_F_NTUPLE)) {
5460 		ice_vsi_manage_fdir(vsi, false);
5461 		ice_clear_arfs(vsi);
5462 	}
5463 
5464 	return ret;
5465 }
5466 
5467 /**
5468  * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
5469  * @vsi: VSI to setup VLAN properties for
5470  */
ice_vsi_vlan_setup(struct ice_vsi * vsi)5471 static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
5472 {
5473 	int ret = 0;
5474 
5475 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
5476 		ret = ice_vsi_manage_vlan_stripping(vsi, true);
5477 	if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
5478 		ret = ice_vsi_manage_vlan_insertion(vsi);
5479 
5480 	return ret;
5481 }
5482 
5483 /**
5484  * ice_vsi_cfg - Setup the VSI
5485  * @vsi: the VSI being configured
5486  *
5487  * Return 0 on success and negative value on error
5488  */
ice_vsi_cfg(struct ice_vsi * vsi)5489 int ice_vsi_cfg(struct ice_vsi *vsi)
5490 {
5491 	int err;
5492 
5493 	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
5494 		ice_set_rx_mode(vsi->netdev);
5495 
5496 		err = ice_vsi_vlan_setup(vsi);
5497 		if (err)
5498 			return err;
5499 	}
5500 	ice_vsi_cfg_dcb_rings(vsi);
5501 
5502 	err = ice_vsi_cfg_lan_txqs(vsi);
5503 	if (!err && ice_is_xdp_ena_vsi(vsi))
5504 		err = ice_vsi_cfg_xdp_txqs(vsi);
5505 	if (!err)
5506 		err = ice_vsi_cfg_rxqs(vsi);
5507 
5508 	return err;
5509 }
5510 
5511 /* THEORY OF MODERATION:
5512  * The below code creates custom DIM profiles for use by this driver, because
5513  * the ice driver hardware works differently than the hardware that DIMLIB was
5514  * originally made for. ice hardware doesn't have packet count limits that
5515  * can trigger an interrupt, but it *does* have interrupt rate limit support,
5516  * and this code adds that capability to be used by the driver when it's using
5517  * DIMLIB. The DIMLIB code was always designed to be a suggestion to the driver
5518  * for how to "respond" to traffic and interrupts, so this driver uses a
5519  * slightly different set of moderation parameters to get best performance.
5520  */
5521 struct ice_dim {
5522 	/* the throttle rate for interrupts, basically worst case delay before
5523 	 * an initial interrupt fires, value is stored in microseconds.
5524 	 */
5525 	u16 itr;
5526 	/* the rate limit for interrupts, which can cap a delay from a small
5527 	 * ITR at a certain amount of interrupts per second. f.e. a 2us ITR
5528 	 * could yield as much as 500,000 interrupts per second, but with a
5529 	 * 10us rate limit, it limits to 100,000 interrupts per second. Value
5530 	 * is stored in microseconds.
5531 	 */
5532 	u16 intrl;
5533 };
5534 
5535 /* Make a different profile for Rx that doesn't allow quite so aggressive
5536  * moderation at the high end (it maxes out at 128us or about 8k interrupts a
5537  * second. The INTRL/rate parameters here are only useful to cap small ITR
5538  * values, which is why for larger ITR's - like 128, which can only generate
5539  * 8k interrupts per second, there is no point to rate limit and the values
5540  * are set to zero. The rate limit values do affect latency, and so must
5541  * be reasonably small so to not impact latency sensitive tests.
5542  */
5543 static const struct ice_dim rx_profile[] = {
5544 	{2, 10},
5545 	{8, 16},
5546 	{32, 0},
5547 	{96, 0},
5548 	{128, 0}
5549 };
5550 
5551 /* The transmit profile, which has the same sorts of values
5552  * as the previous struct
5553  */
5554 static const struct ice_dim tx_profile[] = {
5555 	{2, 10},
5556 	{8, 16},
5557 	{64, 0},
5558 	{128, 0},
5559 	{256, 0}
5560 };
5561 
ice_tx_dim_work(struct work_struct * work)5562 static void ice_tx_dim_work(struct work_struct *work)
5563 {
5564 	struct ice_ring_container *rc;
5565 	struct ice_q_vector *q_vector;
5566 	struct dim *dim;
5567 	u16 itr, intrl;
5568 
5569 	dim = container_of(work, struct dim, work);
5570 	rc = container_of(dim, struct ice_ring_container, dim);
5571 	q_vector = container_of(rc, struct ice_q_vector, tx);
5572 
5573 	if (dim->profile_ix >= ARRAY_SIZE(tx_profile))
5574 		dim->profile_ix = ARRAY_SIZE(tx_profile) - 1;
5575 
5576 	/* look up the values in our local table */
5577 	itr = tx_profile[dim->profile_ix].itr;
5578 	intrl = tx_profile[dim->profile_ix].intrl;
5579 
5580 	ice_trace(tx_dim_work, q_vector, dim);
5581 	ice_write_itr(rc, itr);
5582 	ice_write_intrl(q_vector, intrl);
5583 
5584 	dim->state = DIM_START_MEASURE;
5585 }
5586 
ice_rx_dim_work(struct work_struct * work)5587 static void ice_rx_dim_work(struct work_struct *work)
5588 {
5589 	struct ice_ring_container *rc;
5590 	struct ice_q_vector *q_vector;
5591 	struct dim *dim;
5592 	u16 itr, intrl;
5593 
5594 	dim = container_of(work, struct dim, work);
5595 	rc = container_of(dim, struct ice_ring_container, dim);
5596 	q_vector = container_of(rc, struct ice_q_vector, rx);
5597 
5598 	if (dim->profile_ix >= ARRAY_SIZE(rx_profile))
5599 		dim->profile_ix = ARRAY_SIZE(rx_profile) - 1;
5600 
5601 	/* look up the values in our local table */
5602 	itr = rx_profile[dim->profile_ix].itr;
5603 	intrl = rx_profile[dim->profile_ix].intrl;
5604 
5605 	ice_trace(rx_dim_work, q_vector, dim);
5606 	ice_write_itr(rc, itr);
5607 	ice_write_intrl(q_vector, intrl);
5608 
5609 	dim->state = DIM_START_MEASURE;
5610 }
5611 
5612 /**
5613  * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5614  * @vsi: the VSI being configured
5615  */
ice_napi_enable_all(struct ice_vsi * vsi)5616 static void ice_napi_enable_all(struct ice_vsi *vsi)
5617 {
5618 	int q_idx;
5619 
5620 	if (!vsi->netdev)
5621 		return;
5622 
5623 	ice_for_each_q_vector(vsi, q_idx) {
5624 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
5625 
5626 		INIT_WORK(&q_vector->tx.dim.work, ice_tx_dim_work);
5627 		q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5628 
5629 		INIT_WORK(&q_vector->rx.dim.work, ice_rx_dim_work);
5630 		q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
5631 
5632 		if (q_vector->rx.ring || q_vector->tx.ring)
5633 			napi_enable(&q_vector->napi);
5634 	}
5635 }
5636 
5637 /**
5638  * ice_up_complete - Finish the last steps of bringing up a connection
5639  * @vsi: The VSI being configured
5640  *
5641  * Return 0 on success and negative value on error
5642  */
ice_up_complete(struct ice_vsi * vsi)5643 static int ice_up_complete(struct ice_vsi *vsi)
5644 {
5645 	struct ice_pf *pf = vsi->back;
5646 	int err;
5647 
5648 	ice_vsi_cfg_msix(vsi);
5649 
5650 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
5651 	 * Tx queue group list was configured and the context bits were
5652 	 * programmed using ice_vsi_cfg_txqs
5653 	 */
5654 	err = ice_vsi_start_all_rx_rings(vsi);
5655 	if (err)
5656 		return err;
5657 
5658 	clear_bit(ICE_VSI_DOWN, vsi->state);
5659 	ice_napi_enable_all(vsi);
5660 	ice_vsi_ena_irq(vsi);
5661 
5662 	if (vsi->port_info &&
5663 	    (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
5664 	    vsi->netdev && vsi->type == ICE_VSI_PF) {
5665 		ice_print_link_msg(vsi, true);
5666 		netif_tx_start_all_queues(vsi->netdev);
5667 		netif_carrier_on(vsi->netdev);
5668 	}
5669 
5670 	/* Perform an initial read of the statistics registers now to
5671 	 * set the baseline so counters are ready when interface is up
5672 	 */
5673 	ice_update_eth_stats(vsi);
5674 
5675 	if (vsi->type == ICE_VSI_PF)
5676 		ice_service_task_schedule(pf);
5677 
5678 	return 0;
5679 }
5680 
5681 /**
5682  * ice_up - Bring the connection back up after being down
5683  * @vsi: VSI being configured
5684  */
ice_up(struct ice_vsi * vsi)5685 int ice_up(struct ice_vsi *vsi)
5686 {
5687 	int err;
5688 
5689 	err = ice_vsi_cfg(vsi);
5690 	if (!err)
5691 		err = ice_up_complete(vsi);
5692 
5693 	return err;
5694 }
5695 
5696 /**
5697  * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
5698  * @ring: Tx or Rx ring to read stats from
5699  * @pkts: packets stats counter
5700  * @bytes: bytes stats counter
5701  *
5702  * This function fetches stats from the ring considering the atomic operations
5703  * that needs to be performed to read u64 values in 32 bit machine.
5704  */
5705 static void
ice_fetch_u64_stats_per_ring(struct ice_ring * ring,u64 * pkts,u64 * bytes)5706 ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
5707 {
5708 	unsigned int start;
5709 	*pkts = 0;
5710 	*bytes = 0;
5711 
5712 	if (!ring)
5713 		return;
5714 	do {
5715 		start = u64_stats_fetch_begin_irq(&ring->syncp);
5716 		*pkts = ring->stats.pkts;
5717 		*bytes = ring->stats.bytes;
5718 	} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
5719 }
5720 
5721 /**
5722  * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
5723  * @vsi: the VSI to be updated
5724  * @rings: rings to work on
5725  * @count: number of rings
5726  */
5727 static void
ice_update_vsi_tx_ring_stats(struct ice_vsi * vsi,struct ice_ring ** rings,u16 count)5728 ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
5729 			     u16 count)
5730 {
5731 	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5732 	u16 i;
5733 
5734 	for (i = 0; i < count; i++) {
5735 		struct ice_ring *ring;
5736 		u64 pkts, bytes;
5737 
5738 		ring = READ_ONCE(rings[i]);
5739 		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5740 		vsi_stats->tx_packets += pkts;
5741 		vsi_stats->tx_bytes += bytes;
5742 		vsi->tx_restart += ring->tx_stats.restart_q;
5743 		vsi->tx_busy += ring->tx_stats.tx_busy;
5744 		vsi->tx_linearize += ring->tx_stats.tx_linearize;
5745 	}
5746 }
5747 
5748 /**
5749  * ice_update_vsi_ring_stats - Update VSI stats counters
5750  * @vsi: the VSI to be updated
5751  */
ice_update_vsi_ring_stats(struct ice_vsi * vsi)5752 static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
5753 {
5754 	struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
5755 	u64 pkts, bytes;
5756 	int i;
5757 
5758 	/* reset netdev stats */
5759 	vsi_stats->tx_packets = 0;
5760 	vsi_stats->tx_bytes = 0;
5761 	vsi_stats->rx_packets = 0;
5762 	vsi_stats->rx_bytes = 0;
5763 
5764 	/* reset non-netdev (extended) stats */
5765 	vsi->tx_restart = 0;
5766 	vsi->tx_busy = 0;
5767 	vsi->tx_linearize = 0;
5768 	vsi->rx_buf_failed = 0;
5769 	vsi->rx_page_failed = 0;
5770 
5771 	rcu_read_lock();
5772 
5773 	/* update Tx rings counters */
5774 	ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
5775 
5776 	/* update Rx rings counters */
5777 	ice_for_each_rxq(vsi, i) {
5778 		struct ice_ring *ring = READ_ONCE(vsi->rx_rings[i]);
5779 
5780 		ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
5781 		vsi_stats->rx_packets += pkts;
5782 		vsi_stats->rx_bytes += bytes;
5783 		vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
5784 		vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
5785 	}
5786 
5787 	/* update XDP Tx rings counters */
5788 	if (ice_is_xdp_ena_vsi(vsi))
5789 		ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
5790 					     vsi->num_xdp_txq);
5791 
5792 	rcu_read_unlock();
5793 }
5794 
5795 /**
5796  * ice_update_vsi_stats - Update VSI stats counters
5797  * @vsi: the VSI to be updated
5798  */
ice_update_vsi_stats(struct ice_vsi * vsi)5799 void ice_update_vsi_stats(struct ice_vsi *vsi)
5800 {
5801 	struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
5802 	struct ice_eth_stats *cur_es = &vsi->eth_stats;
5803 	struct ice_pf *pf = vsi->back;
5804 
5805 	if (test_bit(ICE_VSI_DOWN, vsi->state) ||
5806 	    test_bit(ICE_CFG_BUSY, pf->state))
5807 		return;
5808 
5809 	/* get stats as recorded by Tx/Rx rings */
5810 	ice_update_vsi_ring_stats(vsi);
5811 
5812 	/* get VSI stats as recorded by the hardware */
5813 	ice_update_eth_stats(vsi);
5814 
5815 	cur_ns->tx_errors = cur_es->tx_errors;
5816 	cur_ns->rx_dropped = cur_es->rx_discards;
5817 	cur_ns->tx_dropped = cur_es->tx_discards;
5818 	cur_ns->multicast = cur_es->rx_multicast;
5819 
5820 	/* update some more netdev stats if this is main VSI */
5821 	if (vsi->type == ICE_VSI_PF) {
5822 		cur_ns->rx_crc_errors = pf->stats.crc_errors;
5823 		cur_ns->rx_errors = pf->stats.crc_errors +
5824 				    pf->stats.illegal_bytes +
5825 				    pf->stats.rx_len_errors +
5826 				    pf->stats.rx_undersize +
5827 				    pf->hw_csum_rx_error +
5828 				    pf->stats.rx_jabber +
5829 				    pf->stats.rx_fragments +
5830 				    pf->stats.rx_oversize;
5831 		cur_ns->rx_length_errors = pf->stats.rx_len_errors;
5832 		/* record drops from the port level */
5833 		cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
5834 	}
5835 }
5836 
5837 /**
5838  * ice_update_pf_stats - Update PF port stats counters
5839  * @pf: PF whose stats needs to be updated
5840  */
ice_update_pf_stats(struct ice_pf * pf)5841 void ice_update_pf_stats(struct ice_pf *pf)
5842 {
5843 	struct ice_hw_port_stats *prev_ps, *cur_ps;
5844 	struct ice_hw *hw = &pf->hw;
5845 	u16 fd_ctr_base;
5846 	u8 port;
5847 
5848 	port = hw->port_info->lport;
5849 	prev_ps = &pf->stats_prev;
5850 	cur_ps = &pf->stats;
5851 
5852 	ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
5853 			  &prev_ps->eth.rx_bytes,
5854 			  &cur_ps->eth.rx_bytes);
5855 
5856 	ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
5857 			  &prev_ps->eth.rx_unicast,
5858 			  &cur_ps->eth.rx_unicast);
5859 
5860 	ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
5861 			  &prev_ps->eth.rx_multicast,
5862 			  &cur_ps->eth.rx_multicast);
5863 
5864 	ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
5865 			  &prev_ps->eth.rx_broadcast,
5866 			  &cur_ps->eth.rx_broadcast);
5867 
5868 	ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
5869 			  &prev_ps->eth.rx_discards,
5870 			  &cur_ps->eth.rx_discards);
5871 
5872 	ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
5873 			  &prev_ps->eth.tx_bytes,
5874 			  &cur_ps->eth.tx_bytes);
5875 
5876 	ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
5877 			  &prev_ps->eth.tx_unicast,
5878 			  &cur_ps->eth.tx_unicast);
5879 
5880 	ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
5881 			  &prev_ps->eth.tx_multicast,
5882 			  &cur_ps->eth.tx_multicast);
5883 
5884 	ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
5885 			  &prev_ps->eth.tx_broadcast,
5886 			  &cur_ps->eth.tx_broadcast);
5887 
5888 	ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
5889 			  &prev_ps->tx_dropped_link_down,
5890 			  &cur_ps->tx_dropped_link_down);
5891 
5892 	ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
5893 			  &prev_ps->rx_size_64, &cur_ps->rx_size_64);
5894 
5895 	ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
5896 			  &prev_ps->rx_size_127, &cur_ps->rx_size_127);
5897 
5898 	ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
5899 			  &prev_ps->rx_size_255, &cur_ps->rx_size_255);
5900 
5901 	ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
5902 			  &prev_ps->rx_size_511, &cur_ps->rx_size_511);
5903 
5904 	ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
5905 			  &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
5906 
5907 	ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
5908 			  &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
5909 
5910 	ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
5911 			  &prev_ps->rx_size_big, &cur_ps->rx_size_big);
5912 
5913 	ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
5914 			  &prev_ps->tx_size_64, &cur_ps->tx_size_64);
5915 
5916 	ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
5917 			  &prev_ps->tx_size_127, &cur_ps->tx_size_127);
5918 
5919 	ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
5920 			  &prev_ps->tx_size_255, &cur_ps->tx_size_255);
5921 
5922 	ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
5923 			  &prev_ps->tx_size_511, &cur_ps->tx_size_511);
5924 
5925 	ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
5926 			  &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
5927 
5928 	ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
5929 			  &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
5930 
5931 	ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
5932 			  &prev_ps->tx_size_big, &cur_ps->tx_size_big);
5933 
5934 	fd_ctr_base = hw->fd_ctr_base;
5935 
5936 	ice_stat_update40(hw,
5937 			  GLSTAT_FD_CNT0L(ICE_FD_SB_STAT_IDX(fd_ctr_base)),
5938 			  pf->stat_prev_loaded, &prev_ps->fd_sb_match,
5939 			  &cur_ps->fd_sb_match);
5940 	ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
5941 			  &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
5942 
5943 	ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
5944 			  &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
5945 
5946 	ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
5947 			  &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
5948 
5949 	ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
5950 			  &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
5951 
5952 	ice_update_dcb_stats(pf);
5953 
5954 	ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
5955 			  &prev_ps->crc_errors, &cur_ps->crc_errors);
5956 
5957 	ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
5958 			  &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
5959 
5960 	ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
5961 			  &prev_ps->mac_local_faults,
5962 			  &cur_ps->mac_local_faults);
5963 
5964 	ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
5965 			  &prev_ps->mac_remote_faults,
5966 			  &cur_ps->mac_remote_faults);
5967 
5968 	ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
5969 			  &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
5970 
5971 	ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
5972 			  &prev_ps->rx_undersize, &cur_ps->rx_undersize);
5973 
5974 	ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
5975 			  &prev_ps->rx_fragments, &cur_ps->rx_fragments);
5976 
5977 	ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
5978 			  &prev_ps->rx_oversize, &cur_ps->rx_oversize);
5979 
5980 	ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
5981 			  &prev_ps->rx_jabber, &cur_ps->rx_jabber);
5982 
5983 	cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0;
5984 
5985 	pf->stat_prev_loaded = true;
5986 }
5987 
5988 /**
5989  * ice_get_stats64 - get statistics for network device structure
5990  * @netdev: network interface device structure
5991  * @stats: main device statistics structure
5992  */
5993 static
ice_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)5994 void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
5995 {
5996 	struct ice_netdev_priv *np = netdev_priv(netdev);
5997 	struct rtnl_link_stats64 *vsi_stats;
5998 	struct ice_vsi *vsi = np->vsi;
5999 
6000 	vsi_stats = &vsi->net_stats;
6001 
6002 	if (!vsi->num_txq || !vsi->num_rxq)
6003 		return;
6004 
6005 	/* netdev packet/byte stats come from ring counter. These are obtained
6006 	 * by summing up ring counters (done by ice_update_vsi_ring_stats).
6007 	 * But, only call the update routine and read the registers if VSI is
6008 	 * not down.
6009 	 */
6010 	if (!test_bit(ICE_VSI_DOWN, vsi->state))
6011 		ice_update_vsi_ring_stats(vsi);
6012 	stats->tx_packets = vsi_stats->tx_packets;
6013 	stats->tx_bytes = vsi_stats->tx_bytes;
6014 	stats->rx_packets = vsi_stats->rx_packets;
6015 	stats->rx_bytes = vsi_stats->rx_bytes;
6016 
6017 	/* The rest of the stats can be read from the hardware but instead we
6018 	 * just return values that the watchdog task has already obtained from
6019 	 * the hardware.
6020 	 */
6021 	stats->multicast = vsi_stats->multicast;
6022 	stats->tx_errors = vsi_stats->tx_errors;
6023 	stats->tx_dropped = vsi_stats->tx_dropped;
6024 	stats->rx_errors = vsi_stats->rx_errors;
6025 	stats->rx_dropped = vsi_stats->rx_dropped;
6026 	stats->rx_crc_errors = vsi_stats->rx_crc_errors;
6027 	stats->rx_length_errors = vsi_stats->rx_length_errors;
6028 }
6029 
6030 /**
6031  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
6032  * @vsi: VSI having NAPI disabled
6033  */
ice_napi_disable_all(struct ice_vsi * vsi)6034 static void ice_napi_disable_all(struct ice_vsi *vsi)
6035 {
6036 	int q_idx;
6037 
6038 	if (!vsi->netdev)
6039 		return;
6040 
6041 	ice_for_each_q_vector(vsi, q_idx) {
6042 		struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
6043 
6044 		if (q_vector->rx.ring || q_vector->tx.ring)
6045 			napi_disable(&q_vector->napi);
6046 
6047 		cancel_work_sync(&q_vector->tx.dim.work);
6048 		cancel_work_sync(&q_vector->rx.dim.work);
6049 	}
6050 }
6051 
6052 /**
6053  * ice_down - Shutdown the connection
6054  * @vsi: The VSI being stopped
6055  */
ice_down(struct ice_vsi * vsi)6056 int ice_down(struct ice_vsi *vsi)
6057 {
6058 	int i, tx_err, rx_err, link_err = 0;
6059 
6060 	/* Caller of this function is expected to set the
6061 	 * vsi->state ICE_DOWN bit
6062 	 */
6063 	if (vsi->netdev) {
6064 		netif_carrier_off(vsi->netdev);
6065 		netif_tx_disable(vsi->netdev);
6066 	}
6067 
6068 	ice_vsi_dis_irq(vsi);
6069 
6070 	tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
6071 	if (tx_err)
6072 		netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
6073 			   vsi->vsi_num, tx_err);
6074 	if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
6075 		tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
6076 		if (tx_err)
6077 			netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
6078 				   vsi->vsi_num, tx_err);
6079 	}
6080 
6081 	rx_err = ice_vsi_stop_all_rx_rings(vsi);
6082 	if (rx_err)
6083 		netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
6084 			   vsi->vsi_num, rx_err);
6085 
6086 	ice_napi_disable_all(vsi);
6087 
6088 	if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
6089 		link_err = ice_force_phys_link_state(vsi, false);
6090 		if (link_err)
6091 			netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
6092 				   vsi->vsi_num, link_err);
6093 	}
6094 
6095 	ice_for_each_txq(vsi, i)
6096 		ice_clean_tx_ring(vsi->tx_rings[i]);
6097 
6098 	ice_for_each_rxq(vsi, i)
6099 		ice_clean_rx_ring(vsi->rx_rings[i]);
6100 
6101 	if (tx_err || rx_err || link_err) {
6102 		netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
6103 			   vsi->vsi_num, vsi->vsw->sw_id);
6104 		return -EIO;
6105 	}
6106 
6107 	return 0;
6108 }
6109 
6110 /**
6111  * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
6112  * @vsi: VSI having resources allocated
6113  *
6114  * Return 0 on success, negative on failure
6115  */
ice_vsi_setup_tx_rings(struct ice_vsi * vsi)6116 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
6117 {
6118 	int i, err = 0;
6119 
6120 	if (!vsi->num_txq) {
6121 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
6122 			vsi->vsi_num);
6123 		return -EINVAL;
6124 	}
6125 
6126 	ice_for_each_txq(vsi, i) {
6127 		struct ice_ring *ring = vsi->tx_rings[i];
6128 
6129 		if (!ring)
6130 			return -EINVAL;
6131 
6132 		ring->netdev = vsi->netdev;
6133 		err = ice_setup_tx_ring(ring);
6134 		if (err)
6135 			break;
6136 	}
6137 
6138 	return err;
6139 }
6140 
6141 /**
6142  * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
6143  * @vsi: VSI having resources allocated
6144  *
6145  * Return 0 on success, negative on failure
6146  */
ice_vsi_setup_rx_rings(struct ice_vsi * vsi)6147 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
6148 {
6149 	int i, err = 0;
6150 
6151 	if (!vsi->num_rxq) {
6152 		dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
6153 			vsi->vsi_num);
6154 		return -EINVAL;
6155 	}
6156 
6157 	ice_for_each_rxq(vsi, i) {
6158 		struct ice_ring *ring = vsi->rx_rings[i];
6159 
6160 		if (!ring)
6161 			return -EINVAL;
6162 
6163 		ring->netdev = vsi->netdev;
6164 		err = ice_setup_rx_ring(ring);
6165 		if (err)
6166 			break;
6167 	}
6168 
6169 	return err;
6170 }
6171 
6172 /**
6173  * ice_vsi_open_ctrl - open control VSI for use
6174  * @vsi: the VSI to open
6175  *
6176  * Initialization of the Control VSI
6177  *
6178  * Returns 0 on success, negative value on error
6179  */
ice_vsi_open_ctrl(struct ice_vsi * vsi)6180 int ice_vsi_open_ctrl(struct ice_vsi *vsi)
6181 {
6182 	char int_name[ICE_INT_NAME_STR_LEN];
6183 	struct ice_pf *pf = vsi->back;
6184 	struct device *dev;
6185 	int err;
6186 
6187 	dev = ice_pf_to_dev(pf);
6188 	/* allocate descriptors */
6189 	err = ice_vsi_setup_tx_rings(vsi);
6190 	if (err)
6191 		goto err_setup_tx;
6192 
6193 	err = ice_vsi_setup_rx_rings(vsi);
6194 	if (err)
6195 		goto err_setup_rx;
6196 
6197 	err = ice_vsi_cfg(vsi);
6198 	if (err)
6199 		goto err_setup_rx;
6200 
6201 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl",
6202 		 dev_driver_string(dev), dev_name(dev));
6203 	err = ice_vsi_req_irq_msix(vsi, int_name);
6204 	if (err)
6205 		goto err_setup_rx;
6206 
6207 	ice_vsi_cfg_msix(vsi);
6208 
6209 	err = ice_vsi_start_all_rx_rings(vsi);
6210 	if (err)
6211 		goto err_up_complete;
6212 
6213 	clear_bit(ICE_VSI_DOWN, vsi->state);
6214 	ice_vsi_ena_irq(vsi);
6215 
6216 	return 0;
6217 
6218 err_up_complete:
6219 	ice_down(vsi);
6220 err_setup_rx:
6221 	ice_vsi_free_rx_rings(vsi);
6222 err_setup_tx:
6223 	ice_vsi_free_tx_rings(vsi);
6224 
6225 	return err;
6226 }
6227 
6228 /**
6229  * ice_vsi_open - Called when a network interface is made active
6230  * @vsi: the VSI to open
6231  *
6232  * Initialization of the VSI
6233  *
6234  * Returns 0 on success, negative value on error
6235  */
ice_vsi_open(struct ice_vsi * vsi)6236 static int ice_vsi_open(struct ice_vsi *vsi)
6237 {
6238 	char int_name[ICE_INT_NAME_STR_LEN];
6239 	struct ice_pf *pf = vsi->back;
6240 	int err;
6241 
6242 	/* allocate descriptors */
6243 	err = ice_vsi_setup_tx_rings(vsi);
6244 	if (err)
6245 		goto err_setup_tx;
6246 
6247 	err = ice_vsi_setup_rx_rings(vsi);
6248 	if (err)
6249 		goto err_setup_rx;
6250 
6251 	err = ice_vsi_cfg(vsi);
6252 	if (err)
6253 		goto err_setup_rx;
6254 
6255 	snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
6256 		 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name);
6257 	err = ice_vsi_req_irq_msix(vsi, int_name);
6258 	if (err)
6259 		goto err_setup_rx;
6260 
6261 	/* Notify the stack of the actual queue counts. */
6262 	err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
6263 	if (err)
6264 		goto err_set_qs;
6265 
6266 	err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
6267 	if (err)
6268 		goto err_set_qs;
6269 
6270 	err = ice_up_complete(vsi);
6271 	if (err)
6272 		goto err_up_complete;
6273 
6274 	return 0;
6275 
6276 err_up_complete:
6277 	ice_down(vsi);
6278 err_set_qs:
6279 	ice_vsi_free_irq(vsi);
6280 err_setup_rx:
6281 	ice_vsi_free_rx_rings(vsi);
6282 err_setup_tx:
6283 	ice_vsi_free_tx_rings(vsi);
6284 
6285 	return err;
6286 }
6287 
6288 /**
6289  * ice_vsi_release_all - Delete all VSIs
6290  * @pf: PF from which all VSIs are being removed
6291  */
ice_vsi_release_all(struct ice_pf * pf)6292 static void ice_vsi_release_all(struct ice_pf *pf)
6293 {
6294 	int err, i;
6295 
6296 	if (!pf->vsi)
6297 		return;
6298 
6299 	ice_for_each_vsi(pf, i) {
6300 		if (!pf->vsi[i])
6301 			continue;
6302 
6303 		err = ice_vsi_release(pf->vsi[i]);
6304 		if (err)
6305 			dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
6306 				i, err, pf->vsi[i]->vsi_num);
6307 	}
6308 }
6309 
6310 /**
6311  * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
6312  * @pf: pointer to the PF instance
6313  * @type: VSI type to rebuild
6314  *
6315  * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
6316  */
ice_vsi_rebuild_by_type(struct ice_pf * pf,enum ice_vsi_type type)6317 static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
6318 {
6319 	struct device *dev = ice_pf_to_dev(pf);
6320 	enum ice_status status;
6321 	int i, err;
6322 
6323 	ice_for_each_vsi(pf, i) {
6324 		struct ice_vsi *vsi = pf->vsi[i];
6325 
6326 		if (!vsi || vsi->type != type)
6327 			continue;
6328 
6329 		/* rebuild the VSI */
6330 		err = ice_vsi_rebuild(vsi, true);
6331 		if (err) {
6332 			dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
6333 				err, vsi->idx, ice_vsi_type_str(type));
6334 			return err;
6335 		}
6336 
6337 		/* replay filters for the VSI */
6338 		status = ice_replay_vsi(&pf->hw, vsi->idx);
6339 		if (status) {
6340 			dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
6341 				ice_stat_str(status), vsi->idx,
6342 				ice_vsi_type_str(type));
6343 			return -EIO;
6344 		}
6345 
6346 		/* Re-map HW VSI number, using VSI handle that has been
6347 		 * previously validated in ice_replay_vsi() call above
6348 		 */
6349 		vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
6350 
6351 		/* enable the VSI */
6352 		err = ice_ena_vsi(vsi, false);
6353 		if (err) {
6354 			dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
6355 				err, vsi->idx, ice_vsi_type_str(type));
6356 			return err;
6357 		}
6358 
6359 		dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx,
6360 			 ice_vsi_type_str(type));
6361 	}
6362 
6363 	return 0;
6364 }
6365 
6366 /**
6367  * ice_update_pf_netdev_link - Update PF netdev link status
6368  * @pf: pointer to the PF instance
6369  */
ice_update_pf_netdev_link(struct ice_pf * pf)6370 static void ice_update_pf_netdev_link(struct ice_pf *pf)
6371 {
6372 	bool link_up;
6373 	int i;
6374 
6375 	ice_for_each_vsi(pf, i) {
6376 		struct ice_vsi *vsi = pf->vsi[i];
6377 
6378 		if (!vsi || vsi->type != ICE_VSI_PF)
6379 			return;
6380 
6381 		ice_get_link_status(pf->vsi[i]->port_info, &link_up);
6382 		if (link_up) {
6383 			netif_carrier_on(pf->vsi[i]->netdev);
6384 			netif_tx_wake_all_queues(pf->vsi[i]->netdev);
6385 		} else {
6386 			netif_carrier_off(pf->vsi[i]->netdev);
6387 			netif_tx_stop_all_queues(pf->vsi[i]->netdev);
6388 		}
6389 	}
6390 }
6391 
6392 /**
6393  * ice_rebuild - rebuild after reset
6394  * @pf: PF to rebuild
6395  * @reset_type: type of reset
6396  *
6397  * Do not rebuild VF VSI in this flow because that is already handled via
6398  * ice_reset_all_vfs(). This is because requirements for resetting a VF after a
6399  * PFR/CORER/GLOBER/etc. are different than the normal flow. Also, we don't want
6400  * to reset/rebuild all the VF VSI twice.
6401  */
ice_rebuild(struct ice_pf * pf,enum ice_reset_req reset_type)6402 static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
6403 {
6404 	struct device *dev = ice_pf_to_dev(pf);
6405 	struct ice_hw *hw = &pf->hw;
6406 	enum ice_status ret;
6407 	int err;
6408 
6409 	if (test_bit(ICE_DOWN, pf->state))
6410 		goto clear_recovery;
6411 
6412 	dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
6413 
6414 	ret = ice_init_all_ctrlq(hw);
6415 	if (ret) {
6416 		dev_err(dev, "control queues init failed %s\n",
6417 			ice_stat_str(ret));
6418 		goto err_init_ctrlq;
6419 	}
6420 
6421 	/* if DDP was previously loaded successfully */
6422 	if (!ice_is_safe_mode(pf)) {
6423 		/* reload the SW DB of filter tables */
6424 		if (reset_type == ICE_RESET_PFR)
6425 			ice_fill_blk_tbls(hw);
6426 		else
6427 			/* Reload DDP Package after CORER/GLOBR reset */
6428 			ice_load_pkg(NULL, pf);
6429 	}
6430 
6431 	ret = ice_clear_pf_cfg(hw);
6432 	if (ret) {
6433 		dev_err(dev, "clear PF configuration failed %s\n",
6434 			ice_stat_str(ret));
6435 		goto err_init_ctrlq;
6436 	}
6437 
6438 	if (pf->first_sw->dflt_vsi_ena)
6439 		dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
6440 	/* clear the default VSI configuration if it exists */
6441 	pf->first_sw->dflt_vsi = NULL;
6442 	pf->first_sw->dflt_vsi_ena = false;
6443 
6444 	ice_clear_pxe_mode(hw);
6445 
6446 	ret = ice_init_nvm(hw);
6447 	if (ret) {
6448 		dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret));
6449 		goto err_init_ctrlq;
6450 	}
6451 
6452 	ret = ice_get_caps(hw);
6453 	if (ret) {
6454 		dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
6455 		goto err_init_ctrlq;
6456 	}
6457 
6458 	ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
6459 	if (ret) {
6460 		dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
6461 		goto err_init_ctrlq;
6462 	}
6463 
6464 	err = ice_sched_init_port(hw->port_info);
6465 	if (err)
6466 		goto err_sched_init_port;
6467 
6468 	/* start misc vector */
6469 	err = ice_req_irq_msix_misc(pf);
6470 	if (err) {
6471 		dev_err(dev, "misc vector setup failed: %d\n", err);
6472 		goto err_sched_init_port;
6473 	}
6474 
6475 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6476 		wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M);
6477 		if (!rd32(hw, PFQF_FD_SIZE)) {
6478 			u16 unused, guar, b_effort;
6479 
6480 			guar = hw->func_caps.fd_fltr_guar;
6481 			b_effort = hw->func_caps.fd_fltr_best_effort;
6482 
6483 			/* force guaranteed filter pool for PF */
6484 			ice_alloc_fd_guar_item(hw, &unused, guar);
6485 			/* force shared filter pool for PF */
6486 			ice_alloc_fd_shrd_item(hw, &unused, b_effort);
6487 		}
6488 	}
6489 
6490 	if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
6491 		ice_dcb_rebuild(pf);
6492 
6493 	/* If the PF previously had enabled PTP, PTP init needs to happen before
6494 	 * the VSI rebuild. If not, this causes the PTP link status events to
6495 	 * fail.
6496 	 */
6497 	if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
6498 		ice_ptp_init(pf);
6499 
6500 	/* rebuild PF VSI */
6501 	err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
6502 	if (err) {
6503 		dev_err(dev, "PF VSI rebuild failed: %d\n", err);
6504 		goto err_vsi_rebuild;
6505 	}
6506 
6507 	/* If Flow Director is active */
6508 	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
6509 		err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
6510 		if (err) {
6511 			dev_err(dev, "control VSI rebuild failed: %d\n", err);
6512 			goto err_vsi_rebuild;
6513 		}
6514 
6515 		/* replay HW Flow Director recipes */
6516 		if (hw->fdir_prof)
6517 			ice_fdir_replay_flows(hw);
6518 
6519 		/* replay Flow Director filters */
6520 		ice_fdir_replay_fltrs(pf);
6521 
6522 		ice_rebuild_arfs(pf);
6523 	}
6524 
6525 	ice_update_pf_netdev_link(pf);
6526 
6527 	/* tell the firmware we are up */
6528 	ret = ice_send_version(pf);
6529 	if (ret) {
6530 		dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
6531 			ice_stat_str(ret));
6532 		goto err_vsi_rebuild;
6533 	}
6534 
6535 	ice_replay_post(hw);
6536 
6537 	/* if we get here, reset flow is successful */
6538 	clear_bit(ICE_RESET_FAILED, pf->state);
6539 
6540 	ice_plug_aux_dev(pf);
6541 	return;
6542 
6543 err_vsi_rebuild:
6544 err_sched_init_port:
6545 	ice_sched_cleanup_all(hw);
6546 err_init_ctrlq:
6547 	ice_shutdown_all_ctrlq(hw);
6548 	set_bit(ICE_RESET_FAILED, pf->state);
6549 clear_recovery:
6550 	/* set this bit in PF state to control service task scheduling */
6551 	set_bit(ICE_NEEDS_RESTART, pf->state);
6552 	dev_err(dev, "Rebuild failed, unload and reload driver\n");
6553 }
6554 
6555 /**
6556  * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
6557  * @vsi: Pointer to VSI structure
6558  */
ice_max_xdp_frame_size(struct ice_vsi * vsi)6559 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
6560 {
6561 	if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
6562 		return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
6563 	else
6564 		return ICE_RXBUF_3072;
6565 }
6566 
6567 /**
6568  * ice_change_mtu - NDO callback to change the MTU
6569  * @netdev: network interface device structure
6570  * @new_mtu: new value for maximum frame size
6571  *
6572  * Returns 0 on success, negative on failure
6573  */
ice_change_mtu(struct net_device * netdev,int new_mtu)6574 static int ice_change_mtu(struct net_device *netdev, int new_mtu)
6575 {
6576 	struct ice_netdev_priv *np = netdev_priv(netdev);
6577 	struct ice_vsi *vsi = np->vsi;
6578 	struct ice_pf *pf = vsi->back;
6579 	u8 count = 0;
6580 	int err = 0;
6581 
6582 	if (new_mtu == (int)netdev->mtu) {
6583 		netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
6584 		return 0;
6585 	}
6586 
6587 	if (ice_is_xdp_ena_vsi(vsi)) {
6588 		int frame_size = ice_max_xdp_frame_size(vsi);
6589 
6590 		if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
6591 			netdev_err(netdev, "max MTU for XDP usage is %d\n",
6592 				   frame_size - ICE_ETH_PKT_HDR_PAD);
6593 			return -EINVAL;
6594 		}
6595 	}
6596 
6597 	/* if a reset is in progress, wait for some time for it to complete */
6598 	do {
6599 		if (ice_is_reset_in_progress(pf->state)) {
6600 			count++;
6601 			usleep_range(1000, 2000);
6602 		} else {
6603 			break;
6604 		}
6605 
6606 	} while (count < 100);
6607 
6608 	if (count == 100) {
6609 		netdev_err(netdev, "can't change MTU. Device is busy\n");
6610 		return -EBUSY;
6611 	}
6612 
6613 	netdev->mtu = (unsigned int)new_mtu;
6614 
6615 	/* if VSI is up, bring it down and then back up */
6616 	if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
6617 		err = ice_down(vsi);
6618 		if (err) {
6619 			netdev_err(netdev, "change MTU if_down err %d\n", err);
6620 			return err;
6621 		}
6622 
6623 		err = ice_up(vsi);
6624 		if (err) {
6625 			netdev_err(netdev, "change MTU if_up err %d\n", err);
6626 			return err;
6627 		}
6628 	}
6629 
6630 	netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
6631 	set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
6632 
6633 	return err;
6634 }
6635 
6636 /**
6637  * ice_eth_ioctl - Access the hwtstamp interface
6638  * @netdev: network interface device structure
6639  * @ifr: interface request data
6640  * @cmd: ioctl command
6641  */
ice_eth_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)6642 static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6643 {
6644 	struct ice_netdev_priv *np = netdev_priv(netdev);
6645 	struct ice_pf *pf = np->vsi->back;
6646 
6647 	switch (cmd) {
6648 	case SIOCGHWTSTAMP:
6649 		return ice_ptp_get_ts_config(pf, ifr);
6650 	case SIOCSHWTSTAMP:
6651 		return ice_ptp_set_ts_config(pf, ifr);
6652 	default:
6653 		return -EOPNOTSUPP;
6654 	}
6655 }
6656 
6657 /**
6658  * ice_aq_str - convert AQ err code to a string
6659  * @aq_err: the AQ error code to convert
6660  */
ice_aq_str(enum ice_aq_err aq_err)6661 const char *ice_aq_str(enum ice_aq_err aq_err)
6662 {
6663 	switch (aq_err) {
6664 	case ICE_AQ_RC_OK:
6665 		return "OK";
6666 	case ICE_AQ_RC_EPERM:
6667 		return "ICE_AQ_RC_EPERM";
6668 	case ICE_AQ_RC_ENOENT:
6669 		return "ICE_AQ_RC_ENOENT";
6670 	case ICE_AQ_RC_ENOMEM:
6671 		return "ICE_AQ_RC_ENOMEM";
6672 	case ICE_AQ_RC_EBUSY:
6673 		return "ICE_AQ_RC_EBUSY";
6674 	case ICE_AQ_RC_EEXIST:
6675 		return "ICE_AQ_RC_EEXIST";
6676 	case ICE_AQ_RC_EINVAL:
6677 		return "ICE_AQ_RC_EINVAL";
6678 	case ICE_AQ_RC_ENOSPC:
6679 		return "ICE_AQ_RC_ENOSPC";
6680 	case ICE_AQ_RC_ENOSYS:
6681 		return "ICE_AQ_RC_ENOSYS";
6682 	case ICE_AQ_RC_EMODE:
6683 		return "ICE_AQ_RC_EMODE";
6684 	case ICE_AQ_RC_ENOSEC:
6685 		return "ICE_AQ_RC_ENOSEC";
6686 	case ICE_AQ_RC_EBADSIG:
6687 		return "ICE_AQ_RC_EBADSIG";
6688 	case ICE_AQ_RC_ESVN:
6689 		return "ICE_AQ_RC_ESVN";
6690 	case ICE_AQ_RC_EBADMAN:
6691 		return "ICE_AQ_RC_EBADMAN";
6692 	case ICE_AQ_RC_EBADBUF:
6693 		return "ICE_AQ_RC_EBADBUF";
6694 	}
6695 
6696 	return "ICE_AQ_RC_UNKNOWN";
6697 }
6698 
6699 /**
6700  * ice_stat_str - convert status err code to a string
6701  * @stat_err: the status error code to convert
6702  */
ice_stat_str(enum ice_status stat_err)6703 const char *ice_stat_str(enum ice_status stat_err)
6704 {
6705 	switch (stat_err) {
6706 	case ICE_SUCCESS:
6707 		return "OK";
6708 	case ICE_ERR_PARAM:
6709 		return "ICE_ERR_PARAM";
6710 	case ICE_ERR_NOT_IMPL:
6711 		return "ICE_ERR_NOT_IMPL";
6712 	case ICE_ERR_NOT_READY:
6713 		return "ICE_ERR_NOT_READY";
6714 	case ICE_ERR_NOT_SUPPORTED:
6715 		return "ICE_ERR_NOT_SUPPORTED";
6716 	case ICE_ERR_BAD_PTR:
6717 		return "ICE_ERR_BAD_PTR";
6718 	case ICE_ERR_INVAL_SIZE:
6719 		return "ICE_ERR_INVAL_SIZE";
6720 	case ICE_ERR_DEVICE_NOT_SUPPORTED:
6721 		return "ICE_ERR_DEVICE_NOT_SUPPORTED";
6722 	case ICE_ERR_RESET_FAILED:
6723 		return "ICE_ERR_RESET_FAILED";
6724 	case ICE_ERR_FW_API_VER:
6725 		return "ICE_ERR_FW_API_VER";
6726 	case ICE_ERR_NO_MEMORY:
6727 		return "ICE_ERR_NO_MEMORY";
6728 	case ICE_ERR_CFG:
6729 		return "ICE_ERR_CFG";
6730 	case ICE_ERR_OUT_OF_RANGE:
6731 		return "ICE_ERR_OUT_OF_RANGE";
6732 	case ICE_ERR_ALREADY_EXISTS:
6733 		return "ICE_ERR_ALREADY_EXISTS";
6734 	case ICE_ERR_NVM:
6735 		return "ICE_ERR_NVM";
6736 	case ICE_ERR_NVM_CHECKSUM:
6737 		return "ICE_ERR_NVM_CHECKSUM";
6738 	case ICE_ERR_BUF_TOO_SHORT:
6739 		return "ICE_ERR_BUF_TOO_SHORT";
6740 	case ICE_ERR_NVM_BLANK_MODE:
6741 		return "ICE_ERR_NVM_BLANK_MODE";
6742 	case ICE_ERR_IN_USE:
6743 		return "ICE_ERR_IN_USE";
6744 	case ICE_ERR_MAX_LIMIT:
6745 		return "ICE_ERR_MAX_LIMIT";
6746 	case ICE_ERR_RESET_ONGOING:
6747 		return "ICE_ERR_RESET_ONGOING";
6748 	case ICE_ERR_HW_TABLE:
6749 		return "ICE_ERR_HW_TABLE";
6750 	case ICE_ERR_DOES_NOT_EXIST:
6751 		return "ICE_ERR_DOES_NOT_EXIST";
6752 	case ICE_ERR_FW_DDP_MISMATCH:
6753 		return "ICE_ERR_FW_DDP_MISMATCH";
6754 	case ICE_ERR_AQ_ERROR:
6755 		return "ICE_ERR_AQ_ERROR";
6756 	case ICE_ERR_AQ_TIMEOUT:
6757 		return "ICE_ERR_AQ_TIMEOUT";
6758 	case ICE_ERR_AQ_FULL:
6759 		return "ICE_ERR_AQ_FULL";
6760 	case ICE_ERR_AQ_NO_WORK:
6761 		return "ICE_ERR_AQ_NO_WORK";
6762 	case ICE_ERR_AQ_EMPTY:
6763 		return "ICE_ERR_AQ_EMPTY";
6764 	case ICE_ERR_AQ_FW_CRITICAL:
6765 		return "ICE_ERR_AQ_FW_CRITICAL";
6766 	}
6767 
6768 	return "ICE_ERR_UNKNOWN";
6769 }
6770 
6771 /**
6772  * ice_set_rss_lut - Set RSS LUT
6773  * @vsi: Pointer to VSI structure
6774  * @lut: Lookup table
6775  * @lut_size: Lookup table size
6776  *
6777  * Returns 0 on success, negative on failure
6778  */
ice_set_rss_lut(struct ice_vsi * vsi,u8 * lut,u16 lut_size)6779 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
6780 {
6781 	struct ice_aq_get_set_rss_lut_params params = {};
6782 	struct ice_hw *hw = &vsi->back->hw;
6783 	enum ice_status status;
6784 
6785 	if (!lut)
6786 		return -EINVAL;
6787 
6788 	params.vsi_handle = vsi->idx;
6789 	params.lut_size = lut_size;
6790 	params.lut_type = vsi->rss_lut_type;
6791 	params.lut = lut;
6792 
6793 	status = ice_aq_set_rss_lut(hw, &params);
6794 	if (status) {
6795 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n",
6796 			ice_stat_str(status),
6797 			ice_aq_str(hw->adminq.sq_last_status));
6798 		return -EIO;
6799 	}
6800 
6801 	return 0;
6802 }
6803 
6804 /**
6805  * ice_set_rss_key - Set RSS key
6806  * @vsi: Pointer to the VSI structure
6807  * @seed: RSS hash seed
6808  *
6809  * Returns 0 on success, negative on failure
6810  */
ice_set_rss_key(struct ice_vsi * vsi,u8 * seed)6811 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
6812 {
6813 	struct ice_hw *hw = &vsi->back->hw;
6814 	enum ice_status status;
6815 
6816 	if (!seed)
6817 		return -EINVAL;
6818 
6819 	status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
6820 	if (status) {
6821 		dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n",
6822 			ice_stat_str(status),
6823 			ice_aq_str(hw->adminq.sq_last_status));
6824 		return -EIO;
6825 	}
6826 
6827 	return 0;
6828 }
6829 
6830 /**
6831  * ice_get_rss_lut - Get RSS LUT
6832  * @vsi: Pointer to VSI structure
6833  * @lut: Buffer to store the lookup table entries
6834  * @lut_size: Size of buffer to store the lookup table entries
6835  *
6836  * Returns 0 on success, negative on failure
6837  */
ice_get_rss_lut(struct ice_vsi * vsi,u8 * lut,u16 lut_size)6838 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
6839 {
6840 	struct ice_aq_get_set_rss_lut_params params = {};
6841 	struct ice_hw *hw = &vsi->back->hw;
6842 	enum ice_status status;
6843 
6844 	if (!lut)
6845 		return -EINVAL;
6846 
6847 	params.vsi_handle = vsi->idx;
6848 	params.lut_size = lut_size;
6849 	params.lut_type = vsi->rss_lut_type;
6850 	params.lut = lut;
6851 
6852 	status = ice_aq_get_rss_lut(hw, &params);
6853 	if (status) {
6854 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n",
6855 			ice_stat_str(status),
6856 			ice_aq_str(hw->adminq.sq_last_status));
6857 		return -EIO;
6858 	}
6859 
6860 	return 0;
6861 }
6862 
6863 /**
6864  * ice_get_rss_key - Get RSS key
6865  * @vsi: Pointer to VSI structure
6866  * @seed: Buffer to store the key in
6867  *
6868  * Returns 0 on success, negative on failure
6869  */
ice_get_rss_key(struct ice_vsi * vsi,u8 * seed)6870 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
6871 {
6872 	struct ice_hw *hw = &vsi->back->hw;
6873 	enum ice_status status;
6874 
6875 	if (!seed)
6876 		return -EINVAL;
6877 
6878 	status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
6879 	if (status) {
6880 		dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n",
6881 			ice_stat_str(status),
6882 			ice_aq_str(hw->adminq.sq_last_status));
6883 		return -EIO;
6884 	}
6885 
6886 	return 0;
6887 }
6888 
6889 /**
6890  * ice_bridge_getlink - Get the hardware bridge mode
6891  * @skb: skb buff
6892  * @pid: process ID
6893  * @seq: RTNL message seq
6894  * @dev: the netdev being configured
6895  * @filter_mask: filter mask passed in
6896  * @nlflags: netlink flags passed in
6897  *
6898  * Return the bridge mode (VEB/VEPA)
6899  */
6900 static int
ice_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)6901 ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
6902 		   struct net_device *dev, u32 filter_mask, int nlflags)
6903 {
6904 	struct ice_netdev_priv *np = netdev_priv(dev);
6905 	struct ice_vsi *vsi = np->vsi;
6906 	struct ice_pf *pf = vsi->back;
6907 	u16 bmode;
6908 
6909 	bmode = pf->first_sw->bridge_mode;
6910 
6911 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags,
6912 				       filter_mask, NULL);
6913 }
6914 
6915 /**
6916  * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
6917  * @vsi: Pointer to VSI structure
6918  * @bmode: Hardware bridge mode (VEB/VEPA)
6919  *
6920  * Returns 0 on success, negative on failure
6921  */
ice_vsi_update_bridge_mode(struct ice_vsi * vsi,u16 bmode)6922 static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
6923 {
6924 	struct ice_aqc_vsi_props *vsi_props;
6925 	struct ice_hw *hw = &vsi->back->hw;
6926 	struct ice_vsi_ctx *ctxt;
6927 	enum ice_status status;
6928 	int ret = 0;
6929 
6930 	vsi_props = &vsi->info;
6931 
6932 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
6933 	if (!ctxt)
6934 		return -ENOMEM;
6935 
6936 	ctxt->info = vsi->info;
6937 
6938 	if (bmode == BRIDGE_MODE_VEB)
6939 		/* change from VEPA to VEB mode */
6940 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6941 	else
6942 		/* change from VEB to VEPA mode */
6943 		ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
6944 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
6945 
6946 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
6947 	if (status) {
6948 		dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
6949 			bmode, ice_stat_str(status),
6950 			ice_aq_str(hw->adminq.sq_last_status));
6951 		ret = -EIO;
6952 		goto out;
6953 	}
6954 	/* Update sw flags for book keeping */
6955 	vsi_props->sw_flags = ctxt->info.sw_flags;
6956 
6957 out:
6958 	kfree(ctxt);
6959 	return ret;
6960 }
6961 
6962 /**
6963  * ice_bridge_setlink - Set the hardware bridge mode
6964  * @dev: the netdev being configured
6965  * @nlh: RTNL message
6966  * @flags: bridge setlink flags
6967  * @extack: netlink extended ack
6968  *
6969  * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
6970  * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
6971  * not already set for all VSIs connected to this switch. And also update the
6972  * unicast switch filter rules for the corresponding switch of the netdev.
6973  */
6974 static int
ice_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 __always_unused flags,struct netlink_ext_ack __always_unused * extack)6975 ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
6976 		   u16 __always_unused flags,
6977 		   struct netlink_ext_ack __always_unused *extack)
6978 {
6979 	struct ice_netdev_priv *np = netdev_priv(dev);
6980 	struct ice_pf *pf = np->vsi->back;
6981 	struct nlattr *attr, *br_spec;
6982 	struct ice_hw *hw = &pf->hw;
6983 	enum ice_status status;
6984 	struct ice_sw *pf_sw;
6985 	int rem, v, err = 0;
6986 
6987 	pf_sw = pf->first_sw;
6988 	/* find the attribute in the netlink message */
6989 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
6990 
6991 	nla_for_each_nested(attr, br_spec, rem) {
6992 		__u16 mode;
6993 
6994 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
6995 			continue;
6996 		mode = nla_get_u16(attr);
6997 		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
6998 			return -EINVAL;
6999 		/* Continue  if bridge mode is not being flipped */
7000 		if (mode == pf_sw->bridge_mode)
7001 			continue;
7002 		/* Iterates through the PF VSI list and update the loopback
7003 		 * mode of the VSI
7004 		 */
7005 		ice_for_each_vsi(pf, v) {
7006 			if (!pf->vsi[v])
7007 				continue;
7008 			err = ice_vsi_update_bridge_mode(pf->vsi[v], mode);
7009 			if (err)
7010 				return err;
7011 		}
7012 
7013 		hw->evb_veb = (mode == BRIDGE_MODE_VEB);
7014 		/* Update the unicast switch filter rules for the corresponding
7015 		 * switch of the netdev
7016 		 */
7017 		status = ice_update_sw_rule_bridge_mode(hw);
7018 		if (status) {
7019 			netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
7020 				   mode, ice_stat_str(status),
7021 				   ice_aq_str(hw->adminq.sq_last_status));
7022 			/* revert hw->evb_veb */
7023 			hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
7024 			return -EIO;
7025 		}
7026 
7027 		pf_sw->bridge_mode = mode;
7028 	}
7029 
7030 	return 0;
7031 }
7032 
7033 /**
7034  * ice_tx_timeout - Respond to a Tx Hang
7035  * @netdev: network interface device structure
7036  * @txqueue: Tx queue
7037  */
ice_tx_timeout(struct net_device * netdev,unsigned int txqueue)7038 static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
7039 {
7040 	struct ice_netdev_priv *np = netdev_priv(netdev);
7041 	struct ice_ring *tx_ring = NULL;
7042 	struct ice_vsi *vsi = np->vsi;
7043 	struct ice_pf *pf = vsi->back;
7044 	u32 i;
7045 
7046 	pf->tx_timeout_count++;
7047 
7048 	/* Check if PFC is enabled for the TC to which the queue belongs
7049 	 * to. If yes then Tx timeout is not caused by a hung queue, no
7050 	 * need to reset and rebuild
7051 	 */
7052 	if (ice_is_pfc_causing_hung_q(pf, txqueue)) {
7053 		dev_info(ice_pf_to_dev(pf), "Fake Tx hang detected on queue %u, timeout caused by PFC storm\n",
7054 			 txqueue);
7055 		return;
7056 	}
7057 
7058 	/* now that we have an index, find the tx_ring struct */
7059 	for (i = 0; i < vsi->num_txq; i++)
7060 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
7061 			if (txqueue == vsi->tx_rings[i]->q_index) {
7062 				tx_ring = vsi->tx_rings[i];
7063 				break;
7064 			}
7065 
7066 	/* Reset recovery level if enough time has elapsed after last timeout.
7067 	 * Also ensure no new reset action happens before next timeout period.
7068 	 */
7069 	if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20)))
7070 		pf->tx_timeout_recovery_level = 1;
7071 	else if (time_before(jiffies, (pf->tx_timeout_last_recovery +
7072 				       netdev->watchdog_timeo)))
7073 		return;
7074 
7075 	if (tx_ring) {
7076 		struct ice_hw *hw = &pf->hw;
7077 		u32 head, val = 0;
7078 
7079 		head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])) &
7080 			QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
7081 		/* Read interrupt register */
7082 		val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
7083 
7084 		netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
7085 			    vsi->vsi_num, txqueue, tx_ring->next_to_clean,
7086 			    head, tx_ring->next_to_use, val);
7087 	}
7088 
7089 	pf->tx_timeout_last_recovery = jiffies;
7090 	netdev_info(netdev, "tx_timeout recovery level %d, txqueue %u\n",
7091 		    pf->tx_timeout_recovery_level, txqueue);
7092 
7093 	switch (pf->tx_timeout_recovery_level) {
7094 	case 1:
7095 		set_bit(ICE_PFR_REQ, pf->state);
7096 		break;
7097 	case 2:
7098 		set_bit(ICE_CORER_REQ, pf->state);
7099 		break;
7100 	case 3:
7101 		set_bit(ICE_GLOBR_REQ, pf->state);
7102 		break;
7103 	default:
7104 		netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n");
7105 		set_bit(ICE_DOWN, pf->state);
7106 		set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
7107 		set_bit(ICE_SERVICE_DIS, pf->state);
7108 		break;
7109 	}
7110 
7111 	ice_service_task_schedule(pf);
7112 	pf->tx_timeout_recovery_level++;
7113 }
7114 
7115 /**
7116  * ice_open - Called when a network interface becomes active
7117  * @netdev: network interface device structure
7118  *
7119  * The open entry point is called when a network interface is made
7120  * active by the system (IFF_UP). At this point all resources needed
7121  * for transmit and receive operations are allocated, the interrupt
7122  * handler is registered with the OS, the netdev watchdog is enabled,
7123  * and the stack is notified that the interface is ready.
7124  *
7125  * Returns 0 on success, negative value on failure
7126  */
ice_open(struct net_device * netdev)7127 int ice_open(struct net_device *netdev)
7128 {
7129 	struct ice_netdev_priv *np = netdev_priv(netdev);
7130 	struct ice_pf *pf = np->vsi->back;
7131 
7132 	if (ice_is_reset_in_progress(pf->state)) {
7133 		netdev_err(netdev, "can't open net device while reset is in progress");
7134 		return -EBUSY;
7135 	}
7136 
7137 	return ice_open_internal(netdev);
7138 }
7139 
7140 /**
7141  * ice_open_internal - Called when a network interface becomes active
7142  * @netdev: network interface device structure
7143  *
7144  * Internal ice_open implementation. Should not be used directly except for ice_open and reset
7145  * handling routine
7146  *
7147  * Returns 0 on success, negative value on failure
7148  */
ice_open_internal(struct net_device * netdev)7149 int ice_open_internal(struct net_device *netdev)
7150 {
7151 	struct ice_netdev_priv *np = netdev_priv(netdev);
7152 	struct ice_vsi *vsi = np->vsi;
7153 	struct ice_pf *pf = vsi->back;
7154 	struct ice_port_info *pi;
7155 	enum ice_status status;
7156 	int err;
7157 
7158 	if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
7159 		netdev_err(netdev, "driver needs to be unloaded and reloaded\n");
7160 		return -EIO;
7161 	}
7162 
7163 	netif_carrier_off(netdev);
7164 
7165 	pi = vsi->port_info;
7166 	status = ice_update_link_info(pi);
7167 	if (status) {
7168 		netdev_err(netdev, "Failed to get link info, error %s\n",
7169 			   ice_stat_str(status));
7170 		return -EIO;
7171 	}
7172 
7173 	ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
7174 
7175 	/* Set PHY if there is media, otherwise, turn off PHY */
7176 	if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
7177 		clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
7178 		if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) {
7179 			err = ice_init_phy_user_cfg(pi);
7180 			if (err) {
7181 				netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
7182 					   err);
7183 				return err;
7184 			}
7185 		}
7186 
7187 		err = ice_configure_phy(vsi);
7188 		if (err) {
7189 			netdev_err(netdev, "Failed to set physical link up, error %d\n",
7190 				   err);
7191 			return err;
7192 		}
7193 	} else {
7194 		set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
7195 		ice_set_link(vsi, false);
7196 	}
7197 
7198 	err = ice_vsi_open(vsi);
7199 	if (err)
7200 		netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
7201 			   vsi->vsi_num, vsi->vsw->sw_id);
7202 
7203 	/* Update existing tunnels information */
7204 	udp_tunnel_get_rx_info(netdev);
7205 
7206 	return err;
7207 }
7208 
7209 /**
7210  * ice_stop - Disables a network interface
7211  * @netdev: network interface device structure
7212  *
7213  * The stop entry point is called when an interface is de-activated by the OS,
7214  * and the netdevice enters the DOWN state. The hardware is still under the
7215  * driver's control, but the netdev interface is disabled.
7216  *
7217  * Returns success only - not allowed to fail
7218  */
ice_stop(struct net_device * netdev)7219 int ice_stop(struct net_device *netdev)
7220 {
7221 	struct ice_netdev_priv *np = netdev_priv(netdev);
7222 	struct ice_vsi *vsi = np->vsi;
7223 	struct ice_pf *pf = vsi->back;
7224 
7225 	if (ice_is_reset_in_progress(pf->state)) {
7226 		netdev_err(netdev, "can't stop net device while reset is in progress");
7227 		return -EBUSY;
7228 	}
7229 
7230 	ice_vsi_close(vsi);
7231 
7232 	return 0;
7233 }
7234 
7235 /**
7236  * ice_features_check - Validate encapsulated packet conforms to limits
7237  * @skb: skb buffer
7238  * @netdev: This port's netdev
7239  * @features: Offload features that the stack believes apply
7240  */
7241 static netdev_features_t
ice_features_check(struct sk_buff * skb,struct net_device __always_unused * netdev,netdev_features_t features)7242 ice_features_check(struct sk_buff *skb,
7243 		   struct net_device __always_unused *netdev,
7244 		   netdev_features_t features)
7245 {
7246 	bool gso = skb_is_gso(skb);
7247 	size_t len;
7248 
7249 	/* No point in doing any of this if neither checksum nor GSO are
7250 	 * being requested for this frame. We can rule out both by just
7251 	 * checking for CHECKSUM_PARTIAL
7252 	 */
7253 	if (skb->ip_summed != CHECKSUM_PARTIAL)
7254 		return features;
7255 
7256 	/* We cannot support GSO if the MSS is going to be less than
7257 	 * 64 bytes. If it is then we need to drop support for GSO.
7258 	 */
7259 	if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
7260 		features &= ~NETIF_F_GSO_MASK;
7261 
7262 	len = skb_network_offset(skb);
7263 	if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
7264 		goto out_rm_features;
7265 
7266 	len = skb_network_header_len(skb);
7267 	if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
7268 		goto out_rm_features;
7269 
7270 	if (skb->encapsulation) {
7271 		/* this must work for VXLAN frames AND IPIP/SIT frames, and in
7272 		 * the case of IPIP frames, the transport header pointer is
7273 		 * after the inner header! So check to make sure that this
7274 		 * is a GRE or UDP_TUNNEL frame before doing that math.
7275 		 */
7276 		if (gso && (skb_shinfo(skb)->gso_type &
7277 			    (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
7278 			len = skb_inner_network_header(skb) -
7279 			      skb_transport_header(skb);
7280 			if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
7281 				goto out_rm_features;
7282 		}
7283 
7284 		len = skb_inner_network_header_len(skb);
7285 		if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
7286 			goto out_rm_features;
7287 	}
7288 
7289 	return features;
7290 out_rm_features:
7291 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
7292 }
7293 
7294 static const struct net_device_ops ice_netdev_safe_mode_ops = {
7295 	.ndo_open = ice_open,
7296 	.ndo_stop = ice_stop,
7297 	.ndo_start_xmit = ice_start_xmit,
7298 	.ndo_set_mac_address = ice_set_mac_address,
7299 	.ndo_validate_addr = eth_validate_addr,
7300 	.ndo_change_mtu = ice_change_mtu,
7301 	.ndo_get_stats64 = ice_get_stats64,
7302 	.ndo_tx_timeout = ice_tx_timeout,
7303 	.ndo_bpf = ice_xdp_safe_mode,
7304 };
7305 
7306 static const struct net_device_ops ice_netdev_ops = {
7307 	.ndo_open = ice_open,
7308 	.ndo_stop = ice_stop,
7309 	.ndo_start_xmit = ice_start_xmit,
7310 	.ndo_features_check = ice_features_check,
7311 	.ndo_set_rx_mode = ice_set_rx_mode,
7312 	.ndo_set_mac_address = ice_set_mac_address,
7313 	.ndo_validate_addr = eth_validate_addr,
7314 	.ndo_change_mtu = ice_change_mtu,
7315 	.ndo_get_stats64 = ice_get_stats64,
7316 	.ndo_set_tx_maxrate = ice_set_tx_maxrate,
7317 	.ndo_eth_ioctl = ice_eth_ioctl,
7318 	.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
7319 	.ndo_set_vf_mac = ice_set_vf_mac,
7320 	.ndo_get_vf_config = ice_get_vf_cfg,
7321 	.ndo_set_vf_trust = ice_set_vf_trust,
7322 	.ndo_set_vf_vlan = ice_set_vf_port_vlan,
7323 	.ndo_set_vf_link_state = ice_set_vf_link_state,
7324 	.ndo_get_vf_stats = ice_get_vf_stats,
7325 	.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
7326 	.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
7327 	.ndo_set_features = ice_set_features,
7328 	.ndo_bridge_getlink = ice_bridge_getlink,
7329 	.ndo_bridge_setlink = ice_bridge_setlink,
7330 	.ndo_fdb_add = ice_fdb_add,
7331 	.ndo_fdb_del = ice_fdb_del,
7332 #ifdef CONFIG_RFS_ACCEL
7333 	.ndo_rx_flow_steer = ice_rx_flow_steer,
7334 #endif
7335 	.ndo_tx_timeout = ice_tx_timeout,
7336 	.ndo_bpf = ice_xdp,
7337 	.ndo_xdp_xmit = ice_xdp_xmit,
7338 	.ndo_xsk_wakeup = ice_xsk_wakeup,
7339 };
7340