• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_base.h"
6 #include "ice_lib.h"
7 #include "ice_fltr.h"
8 
9 /**
10  * ice_validate_vf_id - helper to check if VF ID is valid
11  * @pf: pointer to the PF structure
12  * @vf_id: the ID of the VF to check
13  */
ice_validate_vf_id(struct ice_pf * pf,u16 vf_id)14 static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
15 {
16 	/* vf_id range is only valid for 0-255, and should always be unsigned */
17 	if (vf_id >= pf->num_alloc_vfs) {
18 		dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
19 		return -EINVAL;
20 	}
21 	return 0;
22 }
23 
24 /**
25  * ice_check_vf_init - helper to check if VF init complete
26  * @pf: pointer to the PF structure
27  * @vf: the pointer to the VF to check
28  */
ice_check_vf_init(struct ice_pf * pf,struct ice_vf * vf)29 static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
30 {
31 	if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
32 		dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
33 			vf->vf_id);
34 		return -EBUSY;
35 	}
36 	return 0;
37 }
38 
39 /**
40  * ice_err_to_virt_err - translate errors for VF return code
41  * @ice_err: error return code
42  */
ice_err_to_virt_err(enum ice_status ice_err)43 static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
44 {
45 	switch (ice_err) {
46 	case ICE_SUCCESS:
47 		return VIRTCHNL_STATUS_SUCCESS;
48 	case ICE_ERR_BAD_PTR:
49 	case ICE_ERR_INVAL_SIZE:
50 	case ICE_ERR_DEVICE_NOT_SUPPORTED:
51 	case ICE_ERR_PARAM:
52 	case ICE_ERR_CFG:
53 		return VIRTCHNL_STATUS_ERR_PARAM;
54 	case ICE_ERR_NO_MEMORY:
55 		return VIRTCHNL_STATUS_ERR_NO_MEMORY;
56 	case ICE_ERR_NOT_READY:
57 	case ICE_ERR_RESET_FAILED:
58 	case ICE_ERR_FW_API_VER:
59 	case ICE_ERR_AQ_ERROR:
60 	case ICE_ERR_AQ_TIMEOUT:
61 	case ICE_ERR_AQ_FULL:
62 	case ICE_ERR_AQ_NO_WORK:
63 	case ICE_ERR_AQ_EMPTY:
64 		return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
65 	default:
66 		return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
67 	}
68 }
69 
70 /**
71  * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
72  * @pf: pointer to the PF structure
73  * @v_opcode: operation code
74  * @v_retval: return value
75  * @msg: pointer to the msg buffer
76  * @msglen: msg length
77  */
78 static void
ice_vc_vf_broadcast(struct ice_pf * pf,enum virtchnl_ops v_opcode,enum virtchnl_status_code v_retval,u8 * msg,u16 msglen)79 ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
80 		    enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
81 {
82 	struct ice_hw *hw = &pf->hw;
83 	unsigned int i;
84 
85 	ice_for_each_vf(pf, i) {
86 		struct ice_vf *vf = &pf->vf[i];
87 
88 		/* Not all vfs are enabled so skip the ones that are not */
89 		if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
90 		    !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
91 			continue;
92 
93 		/* Ignore return value on purpose - a given VF may fail, but
94 		 * we need to keep going and send to all of them
95 		 */
96 		ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
97 				      msglen, NULL);
98 	}
99 }
100 
101 /**
102  * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
103  * @vf: pointer to the VF structure
104  * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
105  * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
106  * @link_up: whether or not to set the link up/down
107  */
108 static void
ice_set_pfe_link(struct ice_vf * vf,struct virtchnl_pf_event * pfe,int ice_link_speed,bool link_up)109 ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
110 		 int ice_link_speed, bool link_up)
111 {
112 	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
113 		pfe->event_data.link_event_adv.link_status = link_up;
114 		/* Speed in Mbps */
115 		pfe->event_data.link_event_adv.link_speed =
116 			ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
117 	} else {
118 		pfe->event_data.link_event.link_status = link_up;
119 		/* Legacy method for virtchnl link speeds */
120 		pfe->event_data.link_event.link_speed =
121 			(enum virtchnl_link_speed)
122 			ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
123 	}
124 }
125 
126 /**
127  * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
128  * @vf: the VF to check
129  *
130  * Returns true if the VF has no Rx and no Tx queues enabled and returns false
131  * otherwise
132  */
ice_vf_has_no_qs_ena(struct ice_vf * vf)133 static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
134 {
135 	return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
136 		!bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
137 }
138 
139 /**
140  * ice_is_vf_link_up - check if the VF's link is up
141  * @vf: VF to check if link is up
142  */
ice_is_vf_link_up(struct ice_vf * vf)143 static bool ice_is_vf_link_up(struct ice_vf *vf)
144 {
145 	struct ice_pf *pf = vf->pf;
146 
147 	if (ice_check_vf_init(pf, vf))
148 		return false;
149 
150 	if (ice_vf_has_no_qs_ena(vf))
151 		return false;
152 	else if (vf->link_forced)
153 		return vf->link_up;
154 	else
155 		return pf->hw.port_info->phy.link_info.link_info &
156 			ICE_AQ_LINK_UP;
157 }
158 
159 /**
160  * ice_vc_notify_vf_link_state - Inform a VF of link status
161  * @vf: pointer to the VF structure
162  *
163  * send a link status message to a single VF
164  */
ice_vc_notify_vf_link_state(struct ice_vf * vf)165 static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
166 {
167 	struct virtchnl_pf_event pfe = { 0 };
168 	struct ice_hw *hw = &vf->pf->hw;
169 
170 	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
171 	pfe.severity = PF_EVENT_SEVERITY_INFO;
172 
173 	if (ice_is_vf_link_up(vf))
174 		ice_set_pfe_link(vf, &pfe,
175 				 hw->port_info->phy.link_info.link_speed, true);
176 	else
177 		ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
178 
179 	ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
180 			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
181 			      sizeof(pfe), NULL);
182 }
183 
184 /**
185  * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
186  * @vf: VF to remove access to VSI for
187  */
ice_vf_invalidate_vsi(struct ice_vf * vf)188 static void ice_vf_invalidate_vsi(struct ice_vf *vf)
189 {
190 	vf->lan_vsi_idx = ICE_NO_VSI;
191 	vf->lan_vsi_num = ICE_NO_VSI;
192 }
193 
194 /**
195  * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
196  * @vf: invalidate this VF's VSI after freeing it
197  */
ice_vf_vsi_release(struct ice_vf * vf)198 static void ice_vf_vsi_release(struct ice_vf *vf)
199 {
200 	ice_vsi_release(vf->pf->vsi[vf->lan_vsi_idx]);
201 	ice_vf_invalidate_vsi(vf);
202 }
203 
204 /**
205  * ice_free_vf_res - Free a VF's resources
206  * @vf: pointer to the VF info
207  */
ice_free_vf_res(struct ice_vf * vf)208 static void ice_free_vf_res(struct ice_vf *vf)
209 {
210 	struct ice_pf *pf = vf->pf;
211 	int i, last_vector_idx;
212 
213 	/* First, disable VF's configuration API to prevent OS from
214 	 * accessing the VF's VSI after it's freed or invalidated.
215 	 */
216 	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
217 
218 	/* free VSI and disconnect it from the parent uplink */
219 	if (vf->lan_vsi_idx != ICE_NO_VSI) {
220 		ice_vf_vsi_release(vf);
221 		vf->num_mac = 0;
222 	}
223 
224 	last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
225 
226 	/* clear VF MDD event information */
227 	memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
228 	memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
229 
230 	/* Disable interrupts so that VF starts in a known state */
231 	for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
232 		wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
233 		ice_flush(&pf->hw);
234 	}
235 	/* reset some of the state variables keeping track of the resources */
236 	clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
237 	clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
238 }
239 
240 /**
241  * ice_dis_vf_mappings
242  * @vf: pointer to the VF structure
243  */
ice_dis_vf_mappings(struct ice_vf * vf)244 static void ice_dis_vf_mappings(struct ice_vf *vf)
245 {
246 	struct ice_pf *pf = vf->pf;
247 	struct ice_vsi *vsi;
248 	struct device *dev;
249 	int first, last, v;
250 	struct ice_hw *hw;
251 
252 	hw = &pf->hw;
253 	vsi = pf->vsi[vf->lan_vsi_idx];
254 
255 	dev = ice_pf_to_dev(pf);
256 	wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
257 	wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
258 
259 	first = vf->first_vector_idx;
260 	last = first + pf->num_msix_per_vf - 1;
261 	for (v = first; v <= last; v++) {
262 		u32 reg;
263 
264 		reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
265 			GLINT_VECT2FUNC_IS_PF_M) |
266 		       ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
267 			GLINT_VECT2FUNC_PF_NUM_M));
268 		wr32(hw, GLINT_VECT2FUNC(v), reg);
269 	}
270 
271 	if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
272 		wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
273 	else
274 		dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
275 
276 	if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
277 		wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
278 	else
279 		dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
280 }
281 
282 /**
283  * ice_sriov_free_msix_res - Reset/free any used MSIX resources
284  * @pf: pointer to the PF structure
285  *
286  * Since no MSIX entries are taken from the pf->irq_tracker then just clear
287  * the pf->sriov_base_vector.
288  *
289  * Returns 0 on success, and -EINVAL on error.
290  */
ice_sriov_free_msix_res(struct ice_pf * pf)291 static int ice_sriov_free_msix_res(struct ice_pf *pf)
292 {
293 	struct ice_res_tracker *res;
294 
295 	if (!pf)
296 		return -EINVAL;
297 
298 	res = pf->irq_tracker;
299 	if (!res)
300 		return -EINVAL;
301 
302 	/* give back irq_tracker resources used */
303 	WARN_ON(pf->sriov_base_vector < res->num_entries);
304 
305 	pf->sriov_base_vector = 0;
306 
307 	return 0;
308 }
309 
310 /**
311  * ice_set_vf_state_qs_dis - Set VF queues state to disabled
312  * @vf: pointer to the VF structure
313  */
ice_set_vf_state_qs_dis(struct ice_vf * vf)314 void ice_set_vf_state_qs_dis(struct ice_vf *vf)
315 {
316 	/* Clear Rx/Tx enabled queues flag */
317 	bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
318 	bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
319 	clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
320 }
321 
322 /**
323  * ice_dis_vf_qs - Disable the VF queues
324  * @vf: pointer to the VF structure
325  */
ice_dis_vf_qs(struct ice_vf * vf)326 static void ice_dis_vf_qs(struct ice_vf *vf)
327 {
328 	struct ice_pf *pf = vf->pf;
329 	struct ice_vsi *vsi;
330 
331 	vsi = pf->vsi[vf->lan_vsi_idx];
332 
333 	ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
334 	ice_vsi_stop_all_rx_rings(vsi);
335 	ice_set_vf_state_qs_dis(vf);
336 }
337 
338 /**
339  * ice_free_vfs - Free all VFs
340  * @pf: pointer to the PF structure
341  */
ice_free_vfs(struct ice_pf * pf)342 void ice_free_vfs(struct ice_pf *pf)
343 {
344 	struct device *dev = ice_pf_to_dev(pf);
345 	struct ice_hw *hw = &pf->hw;
346 	unsigned int tmp, i;
347 
348 	if (!pf->vf)
349 		return;
350 
351 	while (test_and_set_bit(__ICE_VF_DIS, pf->state))
352 		usleep_range(1000, 2000);
353 
354 	/* Disable IOV before freeing resources. This lets any VF drivers
355 	 * running in the host get themselves cleaned up before we yank
356 	 * the carpet out from underneath their feet.
357 	 */
358 	if (!pci_vfs_assigned(pf->pdev))
359 		pci_disable_sriov(pf->pdev);
360 	else
361 		dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
362 
363 	tmp = pf->num_alloc_vfs;
364 	pf->num_qps_per_vf = 0;
365 	pf->num_alloc_vfs = 0;
366 	for (i = 0; i < tmp; i++) {
367 		struct ice_vf *vf = &pf->vf[i];
368 
369 		mutex_lock(&vf->cfg_lock);
370 
371 		ice_dis_vf_qs(vf);
372 
373 		if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
374 			/* disable VF qp mappings and set VF disable state */
375 			ice_dis_vf_mappings(vf);
376 			set_bit(ICE_VF_STATE_DIS, vf->vf_states);
377 			ice_free_vf_res(vf);
378 		}
379 
380 		mutex_unlock(&vf->cfg_lock);
381 
382 		mutex_destroy(&vf->cfg_lock);
383 	}
384 
385 	if (ice_sriov_free_msix_res(pf))
386 		dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
387 
388 	devm_kfree(dev, pf->vf);
389 	pf->vf = NULL;
390 
391 	/* This check is for when the driver is unloaded while VFs are
392 	 * assigned. Setting the number of VFs to 0 through sysfs is caught
393 	 * before this function ever gets called.
394 	 */
395 	if (!pci_vfs_assigned(pf->pdev)) {
396 		unsigned int vf_id;
397 
398 		/* Acknowledge VFLR for all VFs. Without this, VFs will fail to
399 		 * work correctly when SR-IOV gets re-enabled.
400 		 */
401 		for (vf_id = 0; vf_id < tmp; vf_id++) {
402 			u32 reg_idx, bit_idx;
403 
404 			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
405 			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
406 			wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
407 		}
408 	}
409 	clear_bit(__ICE_VF_DIS, pf->state);
410 	clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
411 }
412 
413 /**
414  * ice_trigger_vf_reset - Reset a VF on HW
415  * @vf: pointer to the VF structure
416  * @is_vflr: true if VFLR was issued, false if not
417  * @is_pfr: true if the reset was triggered due to a previous PFR
418  *
419  * Trigger hardware to start a reset for a particular VF. Expects the caller
420  * to wait the proper amount of time to allow hardware to reset the VF before
421  * it cleans up and restores VF functionality.
422  */
ice_trigger_vf_reset(struct ice_vf * vf,bool is_vflr,bool is_pfr)423 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
424 {
425 	struct ice_pf *pf = vf->pf;
426 	u32 reg, reg_idx, bit_idx;
427 	unsigned int vf_abs_id, i;
428 	struct device *dev;
429 	struct ice_hw *hw;
430 
431 	dev = ice_pf_to_dev(pf);
432 	hw = &pf->hw;
433 	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
434 
435 	/* Inform VF that it is no longer active, as a warning */
436 	clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
437 
438 	/* Disable VF's configuration API during reset. The flag is re-enabled
439 	 * when it's safe again to access VF's VSI.
440 	 */
441 	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
442 
443 	/* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
444 	 * needs to clear them in the case of VFR/VFLR. If this is done for
445 	 * PFR, it can mess up VF resets because the VF driver may already
446 	 * have started cleanup by the time we get here.
447 	 */
448 	if (!is_pfr) {
449 		wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
450 		wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
451 	}
452 
453 	/* In the case of a VFLR, the HW has already reset the VF and we
454 	 * just need to clean up, so don't hit the VFRTRIG register.
455 	 */
456 	if (!is_vflr) {
457 		/* reset VF using VPGEN_VFRTRIG reg */
458 		reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
459 		reg |= VPGEN_VFRTRIG_VFSWR_M;
460 		wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
461 	}
462 	/* clear the VFLR bit in GLGEN_VFLRSTAT */
463 	reg_idx = (vf_abs_id) / 32;
464 	bit_idx = (vf_abs_id) % 32;
465 	wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
466 	ice_flush(hw);
467 
468 	wr32(hw, PF_PCI_CIAA,
469 	     VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
470 	for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
471 		reg = rd32(hw, PF_PCI_CIAD);
472 		/* no transactions pending so stop polling */
473 		if ((reg & VF_TRANS_PENDING_M) == 0)
474 			break;
475 
476 		dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
477 		udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
478 	}
479 }
480 
481 /**
482  * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
483  * @vsi: the VSI to update
484  * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
485  * @enable: true for enable PVID false for disable
486  */
ice_vsi_manage_pvid(struct ice_vsi * vsi,u16 pvid_info,bool enable)487 static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
488 {
489 	struct ice_hw *hw = &vsi->back->hw;
490 	struct ice_aqc_vsi_props *info;
491 	struct ice_vsi_ctx *ctxt;
492 	enum ice_status status;
493 	int ret = 0;
494 
495 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
496 	if (!ctxt)
497 		return -ENOMEM;
498 
499 	ctxt->info = vsi->info;
500 	info = &ctxt->info;
501 	if (enable) {
502 		info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
503 			ICE_AQ_VSI_PVLAN_INSERT_PVID |
504 			ICE_AQ_VSI_VLAN_EMOD_STR;
505 		info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
506 	} else {
507 		info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
508 			ICE_AQ_VSI_VLAN_MODE_ALL;
509 		info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
510 	}
511 
512 	info->pvid = cpu_to_le16(pvid_info);
513 	info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
514 					   ICE_AQ_VSI_PROP_SW_VALID);
515 
516 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
517 	if (status) {
518 		dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
519 			 ice_stat_str(status),
520 			 ice_aq_str(hw->adminq.sq_last_status));
521 		ret = -EIO;
522 		goto out;
523 	}
524 
525 	vsi->info.vlan_flags = info->vlan_flags;
526 	vsi->info.sw_flags2 = info->sw_flags2;
527 	vsi->info.pvid = info->pvid;
528 out:
529 	kfree(ctxt);
530 	return ret;
531 }
532 
533 /**
534  * ice_vf_get_port_info - Get the VF's port info structure
535  * @vf: VF used to get the port info structure for
536  */
ice_vf_get_port_info(struct ice_vf * vf)537 static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
538 {
539 	return vf->pf->hw.port_info;
540 }
541 
542 /**
543  * ice_vf_vsi_setup - Set up a VF VSI
544  * @vf: VF to setup VSI for
545  *
546  * Returns pointer to the successfully allocated VSI struct on success,
547  * otherwise returns NULL on failure.
548  */
ice_vf_vsi_setup(struct ice_vf * vf)549 static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
550 {
551 	struct ice_port_info *pi = ice_vf_get_port_info(vf);
552 	struct ice_pf *pf = vf->pf;
553 	struct ice_vsi *vsi;
554 
555 	vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id);
556 
557 	if (!vsi) {
558 		dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
559 		ice_vf_invalidate_vsi(vf);
560 		return NULL;
561 	}
562 
563 	vf->lan_vsi_idx = vsi->idx;
564 	vf->lan_vsi_num = vsi->vsi_num;
565 
566 	return vsi;
567 }
568 
569 /**
570  * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
571  * @pf: pointer to PF structure
572  * @vf: pointer to VF that the first MSIX vector index is being calculated for
573  *
574  * This returns the first MSIX vector index in PF space that is used by this VF.
575  * This index is used when accessing PF relative registers such as
576  * GLINT_VECT2FUNC and GLINT_DYN_CTL.
577  * This will always be the OICR index in the AVF driver so any functionality
578  * using vf->first_vector_idx for queue configuration will have to increment by
579  * 1 to avoid meddling with the OICR index.
580  */
ice_calc_vf_first_vector_idx(struct ice_pf * pf,struct ice_vf * vf)581 static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
582 {
583 	return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
584 }
585 
586 /**
587  * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
588  * @vf: VF to add MAC filters for
589  *
590  * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
591  * always re-adds either a VLAN 0 or port VLAN based filter after reset.
592  */
ice_vf_rebuild_host_vlan_cfg(struct ice_vf * vf)593 static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
594 {
595 	struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
596 	struct device *dev = ice_pf_to_dev(vf->pf);
597 	u16 vlan_id = 0;
598 	int err;
599 
600 	if (vf->port_vlan_info) {
601 		err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
602 		if (err) {
603 			dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
604 				vf->vf_id, err);
605 			return err;
606 		}
607 
608 		vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
609 	}
610 
611 	/* vlan_id will either be 0 or the port VLAN number */
612 	err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
613 	if (err) {
614 		dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
615 			vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
616 			err);
617 		return err;
618 	}
619 
620 	return 0;
621 }
622 
623 /**
624  * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
625  * @vf: VF to add MAC filters for
626  *
627  * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
628  * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
629  */
ice_vf_rebuild_host_mac_cfg(struct ice_vf * vf)630 static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
631 {
632 	struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
633 	struct device *dev = ice_pf_to_dev(vf->pf);
634 	enum ice_status status;
635 	u8 broadcast[ETH_ALEN];
636 
637 	eth_broadcast_addr(broadcast);
638 	status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
639 	if (status) {
640 		dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
641 			vf->vf_id, ice_stat_str(status));
642 		return ice_status_to_errno(status);
643 	}
644 
645 	vf->num_mac++;
646 
647 	if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
648 		status = ice_fltr_add_mac(vsi, vf->dflt_lan_addr.addr,
649 					  ICE_FWD_TO_VSI);
650 		if (status) {
651 			dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
652 				&vf->dflt_lan_addr.addr[0], vf->vf_id,
653 				ice_stat_str(status));
654 			return ice_status_to_errno(status);
655 		}
656 		vf->num_mac++;
657 	}
658 
659 	return 0;
660 }
661 
662 /**
663  * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
664  * @vf: VF to configure trust setting for
665  */
ice_vf_set_host_trust_cfg(struct ice_vf * vf)666 static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
667 {
668 	if (vf->trusted)
669 		set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
670 	else
671 		clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
672 }
673 
674 /**
675  * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
676  * @vf: VF to enable MSIX mappings for
677  *
678  * Some of the registers need to be indexed/configured using hardware global
679  * device values and other registers need 0-based values, which represent PF
680  * based values.
681  */
ice_ena_vf_msix_mappings(struct ice_vf * vf)682 static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
683 {
684 	int device_based_first_msix, device_based_last_msix;
685 	int pf_based_first_msix, pf_based_last_msix, v;
686 	struct ice_pf *pf = vf->pf;
687 	int device_based_vf_id;
688 	struct ice_hw *hw;
689 	u32 reg;
690 
691 	hw = &pf->hw;
692 	pf_based_first_msix = vf->first_vector_idx;
693 	pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
694 
695 	device_based_first_msix = pf_based_first_msix +
696 		pf->hw.func_caps.common_cap.msix_vector_first_id;
697 	device_based_last_msix =
698 		(device_based_first_msix + pf->num_msix_per_vf) - 1;
699 	device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
700 
701 	reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
702 		VPINT_ALLOC_FIRST_M) |
703 	       ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
704 		VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
705 	wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
706 
707 	reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
708 		 & VPINT_ALLOC_PCI_FIRST_M) |
709 	       ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
710 		VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
711 	wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
712 
713 	/* map the interrupts to its functions */
714 	for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
715 		reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
716 			GLINT_VECT2FUNC_VF_NUM_M) |
717 		       ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
718 			GLINT_VECT2FUNC_PF_NUM_M));
719 		wr32(hw, GLINT_VECT2FUNC(v), reg);
720 	}
721 
722 	/* Map mailbox interrupt to VF MSI-X vector 0 */
723 	wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
724 }
725 
726 /**
727  * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
728  * @vf: VF to enable the mappings for
729  * @max_txq: max Tx queues allowed on the VF's VSI
730  * @max_rxq: max Rx queues allowed on the VF's VSI
731  */
ice_ena_vf_q_mappings(struct ice_vf * vf,u16 max_txq,u16 max_rxq)732 static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
733 {
734 	struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
735 	struct device *dev = ice_pf_to_dev(vf->pf);
736 	struct ice_hw *hw = &vf->pf->hw;
737 	u32 reg;
738 
739 	/* set regardless of mapping mode */
740 	wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
741 
742 	/* VF Tx queues allocation */
743 	if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
744 		/* set the VF PF Tx queue range
745 		 * VFNUMQ value should be set to (number of queues - 1). A value
746 		 * of 0 means 1 queue and a value of 255 means 256 queues
747 		 */
748 		reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
749 			VPLAN_TX_QBASE_VFFIRSTQ_M) |
750 		       (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
751 			VPLAN_TX_QBASE_VFNUMQ_M));
752 		wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
753 	} else {
754 		dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
755 	}
756 
757 	/* set regardless of mapping mode */
758 	wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
759 
760 	/* VF Rx queues allocation */
761 	if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
762 		/* set the VF PF Rx queue range
763 		 * VFNUMQ value should be set to (number of queues - 1). A value
764 		 * of 0 means 1 queue and a value of 255 means 256 queues
765 		 */
766 		reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
767 			VPLAN_RX_QBASE_VFFIRSTQ_M) |
768 		       (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
769 			VPLAN_RX_QBASE_VFNUMQ_M));
770 		wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
771 	} else {
772 		dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
773 	}
774 }
775 
776 /**
777  * ice_ena_vf_mappings - enable VF MSIX and queue mapping
778  * @vf: pointer to the VF structure
779  */
ice_ena_vf_mappings(struct ice_vf * vf)780 static void ice_ena_vf_mappings(struct ice_vf *vf)
781 {
782 	struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
783 
784 	ice_ena_vf_msix_mappings(vf);
785 	ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
786 }
787 
788 /**
789  * ice_determine_res
790  * @pf: pointer to the PF structure
791  * @avail_res: available resources in the PF structure
792  * @max_res: maximum resources that can be given per VF
793  * @min_res: minimum resources that can be given per VF
794  *
795  * Returns non-zero value if resources (queues/vectors) are available or
796  * returns zero if PF cannot accommodate for all num_alloc_vfs.
797  */
798 static int
ice_determine_res(struct ice_pf * pf,u16 avail_res,u16 max_res,u16 min_res)799 ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
800 {
801 	bool checked_min_res = false;
802 	int res;
803 
804 	/* start by checking if PF can assign max number of resources for
805 	 * all num_alloc_vfs.
806 	 * if yes, return number per VF
807 	 * If no, divide by 2 and roundup, check again
808 	 * repeat the loop till we reach a point where even minimum resources
809 	 * are not available, in that case return 0
810 	 */
811 	res = max_res;
812 	while ((res >= min_res) && !checked_min_res) {
813 		int num_all_res;
814 
815 		num_all_res = pf->num_alloc_vfs * res;
816 		if (num_all_res <= avail_res)
817 			return res;
818 
819 		if (res == min_res)
820 			checked_min_res = true;
821 
822 		res = DIV_ROUND_UP(res, 2);
823 	}
824 	return 0;
825 }
826 
827 /**
828  * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
829  * @vf: VF to calculate the register index for
830  * @q_vector: a q_vector associated to the VF
831  */
ice_calc_vf_reg_idx(struct ice_vf * vf,struct ice_q_vector * q_vector)832 int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
833 {
834 	struct ice_pf *pf;
835 
836 	if (!vf || !q_vector)
837 		return -EINVAL;
838 
839 	pf = vf->pf;
840 
841 	/* always add one to account for the OICR being the first MSIX */
842 	return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
843 		q_vector->v_idx + 1;
844 }
845 
846 /**
847  * ice_get_max_valid_res_idx - Get the max valid resource index
848  * @res: pointer to the resource to find the max valid index for
849  *
850  * Start from the end of the ice_res_tracker and return right when we find the
851  * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
852  * valid for SR-IOV because it is the only consumer that manipulates the
853  * res->end and this is always called when res->end is set to res->num_entries.
854  */
ice_get_max_valid_res_idx(struct ice_res_tracker * res)855 static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
856 {
857 	int i;
858 
859 	if (!res)
860 		return -EINVAL;
861 
862 	for (i = res->num_entries - 1; i >= 0; i--)
863 		if (res->list[i] & ICE_RES_VALID_BIT)
864 			return i;
865 
866 	return 0;
867 }
868 
869 /**
870  * ice_sriov_set_msix_res - Set any used MSIX resources
871  * @pf: pointer to PF structure
872  * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
873  *
874  * This function allows SR-IOV resources to be taken from the end of the PF's
875  * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
876  * just set the pf->sriov_base_vector and return success.
877  *
878  * If there are not enough resources available, return an error. This should
879  * always be caught by ice_set_per_vf_res().
880  *
881  * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
882  * in the PF's space available for SR-IOV.
883  */
ice_sriov_set_msix_res(struct ice_pf * pf,u16 num_msix_needed)884 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
885 {
886 	u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
887 	int vectors_used = pf->irq_tracker->num_entries;
888 	int sriov_base_vector;
889 
890 	sriov_base_vector = total_vectors - num_msix_needed;
891 
892 	/* make sure we only grab irq_tracker entries from the list end and
893 	 * that we have enough available MSIX vectors
894 	 */
895 	if (sriov_base_vector < vectors_used)
896 		return -EINVAL;
897 
898 	pf->sriov_base_vector = sriov_base_vector;
899 
900 	return 0;
901 }
902 
903 /**
904  * ice_set_per_vf_res - check if vectors and queues are available
905  * @pf: pointer to the PF structure
906  *
907  * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
908  * get more vectors and can enable more queues per VF. Note that this does not
909  * grab any vectors from the SW pool already allocated. Also note, that all
910  * vector counts include one for each VF's miscellaneous interrupt vector
911  * (i.e. OICR).
912  *
913  * Minimum VFs - 2 vectors, 1 queue pair
914  * Small VFs - 5 vectors, 4 queue pairs
915  * Medium VFs - 17 vectors, 16 queue pairs
916  *
917  * Second, determine number of queue pairs per VF by starting with a pre-defined
918  * maximum each VF supports. If this is not possible, then we adjust based on
919  * queue pairs available on the device.
920  *
921  * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
922  * by each VF during VF initialization and reset.
923  */
ice_set_per_vf_res(struct ice_pf * pf)924 static int ice_set_per_vf_res(struct ice_pf *pf)
925 {
926 	int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
927 	int msix_avail_per_vf, msix_avail_for_sriov;
928 	struct device *dev = ice_pf_to_dev(pf);
929 	u16 num_msix_per_vf, num_txq, num_rxq;
930 
931 	if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
932 		return -EINVAL;
933 
934 	/* determine MSI-X resources per VF */
935 	msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
936 		pf->irq_tracker->num_entries;
937 	msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
938 	if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
939 		num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
940 	} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
941 		num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
942 	} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
943 		num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
944 	} else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
945 		num_msix_per_vf = ICE_MIN_INTR_PER_VF;
946 	} else {
947 		dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
948 			msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
949 			pf->num_alloc_vfs);
950 		return -EIO;
951 	}
952 
953 	/* determine queue resources per VF */
954 	num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
955 				    min_t(u16,
956 					  num_msix_per_vf - ICE_NONQ_VECS_VF,
957 					  ICE_MAX_RSS_QS_PER_VF),
958 				    ICE_MIN_QS_PER_VF);
959 
960 	num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
961 				    min_t(u16,
962 					  num_msix_per_vf - ICE_NONQ_VECS_VF,
963 					  ICE_MAX_RSS_QS_PER_VF),
964 				    ICE_MIN_QS_PER_VF);
965 
966 	if (!num_txq || !num_rxq) {
967 		dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
968 			ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
969 		return -EIO;
970 	}
971 
972 	if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
973 		dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
974 			pf->num_alloc_vfs);
975 		return -EINVAL;
976 	}
977 
978 	/* only allow equal Tx/Rx queue count (i.e. queue pairs) */
979 	pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
980 	pf->num_msix_per_vf = num_msix_per_vf;
981 	dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
982 		 pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
983 
984 	return 0;
985 }
986 
987 /**
988  * ice_clear_vf_reset_trigger - enable VF to access hardware
989  * @vf: VF to enabled hardware access for
990  */
ice_clear_vf_reset_trigger(struct ice_vf * vf)991 static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
992 {
993 	struct ice_hw *hw = &vf->pf->hw;
994 	u32 reg;
995 
996 	reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
997 	reg &= ~VPGEN_VFRTRIG_VFSWR_M;
998 	wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
999 	ice_flush(hw);
1000 }
1001 
1002 /**
1003  * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
1004  * @vf: pointer to the VF info
1005  * @vsi: the VSI being configured
1006  * @promisc_m: mask of promiscuous config bits
1007  * @rm_promisc: promisc flag request from the VF to remove or add filter
1008  *
1009  * This function configures VF VSI promiscuous mode, based on the VF requests,
1010  * for Unicast, Multicast and VLAN
1011  */
1012 static enum ice_status
ice_vf_set_vsi_promisc(struct ice_vf * vf,struct ice_vsi * vsi,u8 promisc_m,bool rm_promisc)1013 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
1014 		       bool rm_promisc)
1015 {
1016 	struct ice_pf *pf = vf->pf;
1017 	enum ice_status status = 0;
1018 	struct ice_hw *hw;
1019 
1020 	hw = &pf->hw;
1021 	if (vsi->num_vlan) {
1022 		status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
1023 						  rm_promisc);
1024 	} else if (vf->port_vlan_info) {
1025 		if (rm_promisc)
1026 			status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1027 						       vf->port_vlan_info);
1028 		else
1029 			status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1030 						     vf->port_vlan_info);
1031 	} else {
1032 		if (rm_promisc)
1033 			status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1034 						       0);
1035 		else
1036 			status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1037 						     0);
1038 	}
1039 
1040 	return status;
1041 }
1042 
ice_vf_clear_counters(struct ice_vf * vf)1043 static void ice_vf_clear_counters(struct ice_vf *vf)
1044 {
1045 	struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
1046 
1047 	vf->num_mac = 0;
1048 	vsi->num_vlan = 0;
1049 	memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
1050 	memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
1051 }
1052 
1053 /**
1054  * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
1055  * @vf: VF to perform pre VSI rebuild tasks
1056  *
1057  * These tasks are items that don't need to be amortized since they are most
1058  * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
1059  */
ice_vf_pre_vsi_rebuild(struct ice_vf * vf)1060 static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
1061 {
1062 	ice_vf_clear_counters(vf);
1063 	ice_clear_vf_reset_trigger(vf);
1064 }
1065 
1066 /**
1067  * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
1068  * @vf: VF to rebuild host configuration on
1069  */
ice_vf_rebuild_host_cfg(struct ice_vf * vf)1070 static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
1071 {
1072 	struct device *dev = ice_pf_to_dev(vf->pf);
1073 
1074 	ice_vf_set_host_trust_cfg(vf);
1075 
1076 	if (ice_vf_rebuild_host_mac_cfg(vf))
1077 		dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
1078 			vf->vf_id);
1079 
1080 	if (ice_vf_rebuild_host_vlan_cfg(vf))
1081 		dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
1082 			vf->vf_id);
1083 }
1084 
1085 /**
1086  * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI
1087  * @vf: VF to release and setup the VSI for
1088  *
1089  * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
1090  * configuration change, etc.).
1091  */
ice_vf_rebuild_vsi_with_release(struct ice_vf * vf)1092 static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
1093 {
1094 	ice_vf_vsi_release(vf);
1095 	if (!ice_vf_vsi_setup(vf))
1096 		return -ENOMEM;
1097 
1098 	return 0;
1099 }
1100 
1101 /**
1102  * ice_vf_rebuild_vsi - rebuild the VF's VSI
1103  * @vf: VF to rebuild the VSI for
1104  *
1105  * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
1106  * host, PFR, CORER, etc.).
1107  */
ice_vf_rebuild_vsi(struct ice_vf * vf)1108 static int ice_vf_rebuild_vsi(struct ice_vf *vf)
1109 {
1110 	struct ice_pf *pf = vf->pf;
1111 	struct ice_vsi *vsi;
1112 
1113 	vsi = pf->vsi[vf->lan_vsi_idx];
1114 
1115 	if (ice_vsi_rebuild(vsi, true)) {
1116 		dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
1117 			vf->vf_id);
1118 		return -EIO;
1119 	}
1120 	/* vsi->idx will remain the same in this case so don't update
1121 	 * vf->lan_vsi_idx
1122 	 */
1123 	vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
1124 	vf->lan_vsi_num = vsi->vsi_num;
1125 
1126 	return 0;
1127 }
1128 
1129 /**
1130  * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
1131  * @vf: VF to set in initialized state
1132  *
1133  * After this function the VF will be ready to receive/handle the
1134  * VIRTCHNL_OP_GET_VF_RESOURCES message
1135  */
ice_vf_set_initialized(struct ice_vf * vf)1136 static void ice_vf_set_initialized(struct ice_vf *vf)
1137 {
1138 	ice_set_vf_state_qs_dis(vf);
1139 	clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
1140 	clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
1141 	clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
1142 	set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1143 }
1144 
1145 /**
1146  * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
1147  * @vf: VF to perform tasks on
1148  */
ice_vf_post_vsi_rebuild(struct ice_vf * vf)1149 static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
1150 {
1151 	struct ice_pf *pf = vf->pf;
1152 	struct ice_hw *hw;
1153 
1154 	hw = &pf->hw;
1155 
1156 	ice_vf_rebuild_host_cfg(vf);
1157 
1158 	ice_vf_set_initialized(vf);
1159 	ice_ena_vf_mappings(vf);
1160 	wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1161 }
1162 
1163 /**
1164  * ice_reset_all_vfs - reset all allocated VFs in one go
1165  * @pf: pointer to the PF structure
1166  * @is_vflr: true if VFLR was issued, false if not
1167  *
1168  * First, tell the hardware to reset each VF, then do all the waiting in one
1169  * chunk, and finally finish restoring each VF after the wait. This is useful
1170  * during PF routines which need to reset all VFs, as otherwise it must perform
1171  * these resets in a serialized fashion.
1172  *
1173  * Returns true if any VFs were reset, and false otherwise.
1174  */
ice_reset_all_vfs(struct ice_pf * pf,bool is_vflr)1175 bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1176 {
1177 	struct device *dev = ice_pf_to_dev(pf);
1178 	struct ice_hw *hw = &pf->hw;
1179 	struct ice_vf *vf;
1180 	int v, i;
1181 
1182 	/* If we don't have any VFs, then there is nothing to reset */
1183 	if (!pf->num_alloc_vfs)
1184 		return false;
1185 
1186 	/* If VFs have been disabled, there is no need to reset */
1187 	if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1188 		return false;
1189 
1190 	/* Begin reset on all VFs at once */
1191 	ice_for_each_vf(pf, v)
1192 		ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
1193 
1194 	/* HW requires some time to make sure it can flush the FIFO for a VF
1195 	 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1196 	 * sequence to make sure that it has completed. We'll keep track of
1197 	 * the VFs using a simple iterator that increments once that VF has
1198 	 * finished resetting.
1199 	 */
1200 	for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1201 		/* Check each VF in sequence */
1202 		while (v < pf->num_alloc_vfs) {
1203 			u32 reg;
1204 
1205 			vf = &pf->vf[v];
1206 			reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1207 			if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1208 				/* only delay if the check failed */
1209 				usleep_range(10, 20);
1210 				break;
1211 			}
1212 
1213 			/* If the current VF has finished resetting, move on
1214 			 * to the next VF in sequence.
1215 			 */
1216 			v++;
1217 		}
1218 	}
1219 
1220 	/* Display a warning if at least one VF didn't manage to reset in
1221 	 * time, but continue on with the operation.
1222 	 */
1223 	if (v < pf->num_alloc_vfs)
1224 		dev_warn(dev, "VF reset check timeout\n");
1225 
1226 	/* free VF resources to begin resetting the VSI state */
1227 	ice_for_each_vf(pf, v) {
1228 		vf = &pf->vf[v];
1229 
1230 		mutex_lock(&vf->cfg_lock);
1231 
1232 		ice_vf_pre_vsi_rebuild(vf);
1233 		ice_vf_rebuild_vsi(vf);
1234 		ice_vf_post_vsi_rebuild(vf);
1235 
1236 		mutex_unlock(&vf->cfg_lock);
1237 	}
1238 
1239 	ice_flush(hw);
1240 	clear_bit(__ICE_VF_DIS, pf->state);
1241 
1242 	return true;
1243 }
1244 
1245 /**
1246  * ice_is_vf_disabled
1247  * @vf: pointer to the VF info
1248  *
1249  * Returns true if the PF or VF is disabled, false otherwise.
1250  */
ice_is_vf_disabled(struct ice_vf * vf)1251 static bool ice_is_vf_disabled(struct ice_vf *vf)
1252 {
1253 	struct ice_pf *pf = vf->pf;
1254 
1255 	/* If the PF has been disabled, there is no need resetting VF until
1256 	 * PF is active again. Similarly, if the VF has been disabled, this
1257 	 * means something else is resetting the VF, so we shouldn't continue.
1258 	 * Otherwise, set disable VF state bit for actual reset, and continue.
1259 	 */
1260 	return (test_bit(__ICE_VF_DIS, pf->state) ||
1261 		test_bit(ICE_VF_STATE_DIS, vf->vf_states));
1262 }
1263 
1264 /**
1265  * ice_reset_vf - Reset a particular VF
1266  * @vf: pointer to the VF structure
1267  * @is_vflr: true if VFLR was issued, false if not
1268  *
1269  * Returns true if the VF is currently in reset, resets successfully, or resets
1270  * are disabled and false otherwise.
1271  */
ice_reset_vf(struct ice_vf * vf,bool is_vflr)1272 bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1273 {
1274 	struct ice_pf *pf = vf->pf;
1275 	struct ice_vsi *vsi;
1276 	struct device *dev;
1277 	struct ice_hw *hw;
1278 	bool rsd = false;
1279 	u8 promisc_m;
1280 	u32 reg;
1281 	int i;
1282 
1283 	lockdep_assert_held(&vf->cfg_lock);
1284 
1285 	dev = ice_pf_to_dev(pf);
1286 
1287 	if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
1288 		dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
1289 			vf->vf_id);
1290 		return true;
1291 	}
1292 
1293 	if (ice_is_vf_disabled(vf)) {
1294 		dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1295 			vf->vf_id);
1296 		return true;
1297 	}
1298 
1299 	/* Set VF disable bit state here, before triggering reset */
1300 	set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1301 	ice_trigger_vf_reset(vf, is_vflr, false);
1302 
1303 	vsi = pf->vsi[vf->lan_vsi_idx];
1304 
1305 	ice_dis_vf_qs(vf);
1306 
1307 	/* Call Disable LAN Tx queue AQ whether or not queues are
1308 	 * enabled. This is needed for successful completion of VFR.
1309 	 */
1310 	ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1311 			NULL, ICE_VF_RESET, vf->vf_id, NULL);
1312 
1313 	hw = &pf->hw;
1314 	/* poll VPGEN_VFRSTAT reg to make sure
1315 	 * that reset is complete
1316 	 */
1317 	for (i = 0; i < 10; i++) {
1318 		/* VF reset requires driver to first reset the VF and then
1319 		 * poll the status register to make sure that the reset
1320 		 * completed successfully.
1321 		 */
1322 		reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1323 		if (reg & VPGEN_VFRSTAT_VFRD_M) {
1324 			rsd = true;
1325 			break;
1326 		}
1327 
1328 		/* only sleep if the reset is not done */
1329 		usleep_range(10, 20);
1330 	}
1331 
1332 	/* Display a warning if VF didn't manage to reset in time, but need to
1333 	 * continue on with the operation.
1334 	 */
1335 	if (!rsd)
1336 		dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
1337 
1338 	/* disable promiscuous modes in case they were enabled
1339 	 * ignore any error if disabling process failed
1340 	 */
1341 	if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1342 	    test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
1343 		if (vf->port_vlan_info || vsi->num_vlan)
1344 			promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1345 		else
1346 			promisc_m = ICE_UCAST_PROMISC_BITS;
1347 
1348 		vsi = pf->vsi[vf->lan_vsi_idx];
1349 		if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
1350 			dev_err(dev, "disabling promiscuous mode failed\n");
1351 	}
1352 
1353 	ice_vf_pre_vsi_rebuild(vf);
1354 
1355 	if (ice_vf_rebuild_vsi_with_release(vf)) {
1356 		dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
1357 		return false;
1358 	}
1359 
1360 	ice_vf_post_vsi_rebuild(vf);
1361 
1362 	return true;
1363 }
1364 
1365 /**
1366  * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1367  * @pf: pointer to the PF structure
1368  */
ice_vc_notify_link_state(struct ice_pf * pf)1369 void ice_vc_notify_link_state(struct ice_pf *pf)
1370 {
1371 	int i;
1372 
1373 	ice_for_each_vf(pf, i)
1374 		ice_vc_notify_vf_link_state(&pf->vf[i]);
1375 }
1376 
1377 /**
1378  * ice_vc_notify_reset - Send pending reset message to all VFs
1379  * @pf: pointer to the PF structure
1380  *
1381  * indicate a pending reset to all VFs on a given PF
1382  */
ice_vc_notify_reset(struct ice_pf * pf)1383 void ice_vc_notify_reset(struct ice_pf *pf)
1384 {
1385 	struct virtchnl_pf_event pfe;
1386 
1387 	if (!pf->num_alloc_vfs)
1388 		return;
1389 
1390 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1391 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1392 	ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
1393 			    (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1394 }
1395 
1396 /**
1397  * ice_vc_notify_vf_reset - Notify VF of a reset event
1398  * @vf: pointer to the VF structure
1399  */
ice_vc_notify_vf_reset(struct ice_vf * vf)1400 static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1401 {
1402 	struct virtchnl_pf_event pfe;
1403 	struct ice_pf *pf;
1404 
1405 	if (!vf)
1406 		return;
1407 
1408 	pf = vf->pf;
1409 	if (ice_validate_vf_id(pf, vf->vf_id))
1410 		return;
1411 
1412 	/* Bail out if VF is in disabled state, neither initialized, nor active
1413 	 * state - otherwise proceed with notifications
1414 	 */
1415 	if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1416 	     !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1417 	    test_bit(ICE_VF_STATE_DIS, vf->vf_states))
1418 		return;
1419 
1420 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1421 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1422 	ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
1423 			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1424 			      NULL);
1425 }
1426 
1427 /**
1428  * ice_init_vf_vsi_res - initialize/setup VF VSI resources
1429  * @vf: VF to initialize/setup the VSI for
1430  *
1431  * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
1432  * VF VSI's broadcast filter and is only used during initial VF creation.
1433  */
ice_init_vf_vsi_res(struct ice_vf * vf)1434 static int ice_init_vf_vsi_res(struct ice_vf *vf)
1435 {
1436 	struct ice_pf *pf = vf->pf;
1437 	u8 broadcast[ETH_ALEN];
1438 	enum ice_status status;
1439 	struct ice_vsi *vsi;
1440 	struct device *dev;
1441 	int err;
1442 
1443 	vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
1444 
1445 	dev = ice_pf_to_dev(pf);
1446 	vsi = ice_vf_vsi_setup(vf);
1447 	if (!vsi)
1448 		return -ENOMEM;
1449 
1450 	err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
1451 	if (err) {
1452 		dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1453 			 vf->vf_id);
1454 		goto release_vsi;
1455 	}
1456 
1457 	eth_broadcast_addr(broadcast);
1458 	status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1459 	if (status) {
1460 		dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
1461 			vf->vf_id, ice_stat_str(status));
1462 		err = ice_status_to_errno(status);
1463 		goto release_vsi;
1464 	}
1465 
1466 	vf->num_mac = 1;
1467 
1468 	return 0;
1469 
1470 release_vsi:
1471 	ice_vf_vsi_release(vf);
1472 	return err;
1473 }
1474 
1475 /**
1476  * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
1477  * @pf: PF the VFs are associated with
1478  */
ice_start_vfs(struct ice_pf * pf)1479 static int ice_start_vfs(struct ice_pf *pf)
1480 {
1481 	struct ice_hw *hw = &pf->hw;
1482 	int retval, i;
1483 
1484 	ice_for_each_vf(pf, i) {
1485 		struct ice_vf *vf = &pf->vf[i];
1486 
1487 		ice_clear_vf_reset_trigger(vf);
1488 
1489 		retval = ice_init_vf_vsi_res(vf);
1490 		if (retval) {
1491 			dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
1492 				vf->vf_id, retval);
1493 			goto teardown;
1494 		}
1495 
1496 		set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1497 		ice_ena_vf_mappings(vf);
1498 		wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1499 	}
1500 
1501 	ice_flush(hw);
1502 	return 0;
1503 
1504 teardown:
1505 	for (i = i - 1; i >= 0; i--) {
1506 		struct ice_vf *vf = &pf->vf[i];
1507 
1508 		ice_dis_vf_mappings(vf);
1509 		ice_vf_vsi_release(vf);
1510 	}
1511 
1512 	return retval;
1513 }
1514 
1515 /**
1516  * ice_set_dflt_settings - set VF defaults during initialization/creation
1517  * @pf: PF holding reference to all VFs for default configuration
1518  */
ice_set_dflt_settings_vfs(struct ice_pf * pf)1519 static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
1520 {
1521 	int i;
1522 
1523 	ice_for_each_vf(pf, i) {
1524 		struct ice_vf *vf = &pf->vf[i];
1525 
1526 		vf->pf = pf;
1527 		vf->vf_id = i;
1528 		vf->vf_sw_id = pf->first_sw;
1529 		/* assign default capabilities */
1530 		set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
1531 		vf->spoofchk = true;
1532 		vf->num_vf_qs = pf->num_qps_per_vf;
1533 
1534 		mutex_init(&vf->cfg_lock);
1535 	}
1536 }
1537 
1538 /**
1539  * ice_alloc_vfs - allocate num_vfs in the PF structure
1540  * @pf: PF to store the allocated VFs in
1541  * @num_vfs: number of VFs to allocate
1542  */
ice_alloc_vfs(struct ice_pf * pf,int num_vfs)1543 static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
1544 {
1545 	struct ice_vf *vfs;
1546 
1547 	vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
1548 			   GFP_KERNEL);
1549 	if (!vfs)
1550 		return -ENOMEM;
1551 
1552 	pf->vf = vfs;
1553 	pf->num_alloc_vfs = num_vfs;
1554 
1555 	return 0;
1556 }
1557 
1558 /**
1559  * ice_ena_vfs - enable VFs so they are ready to be used
1560  * @pf: pointer to the PF structure
1561  * @num_vfs: number of VFs to enable
1562  */
ice_ena_vfs(struct ice_pf * pf,u16 num_vfs)1563 static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
1564 {
1565 	struct device *dev = ice_pf_to_dev(pf);
1566 	struct ice_hw *hw = &pf->hw;
1567 	int ret;
1568 
1569 	/* Disable global interrupt 0 so we don't try to handle the VFLR. */
1570 	wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1571 	     ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
1572 	set_bit(__ICE_OICR_INTR_DIS, pf->state);
1573 	ice_flush(hw);
1574 
1575 	ret = pci_enable_sriov(pf->pdev, num_vfs);
1576 	if (ret) {
1577 		pf->num_alloc_vfs = 0;
1578 		goto err_unroll_intr;
1579 	}
1580 
1581 	ret = ice_alloc_vfs(pf, num_vfs);
1582 	if (ret)
1583 		goto err_pci_disable_sriov;
1584 
1585 	if (ice_set_per_vf_res(pf)) {
1586 		dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
1587 			num_vfs);
1588 		ret = -ENOSPC;
1589 		goto err_unroll_sriov;
1590 	}
1591 
1592 	ice_set_dflt_settings_vfs(pf);
1593 
1594 	if (ice_start_vfs(pf)) {
1595 		dev_err(dev, "Failed to start VF(s)\n");
1596 		ret = -EAGAIN;
1597 		goto err_unroll_sriov;
1598 	}
1599 
1600 	clear_bit(__ICE_VF_DIS, pf->state);
1601 	return 0;
1602 
1603 err_unroll_sriov:
1604 	devm_kfree(dev, pf->vf);
1605 	pf->vf = NULL;
1606 	pf->num_alloc_vfs = 0;
1607 err_pci_disable_sriov:
1608 	pci_disable_sriov(pf->pdev);
1609 err_unroll_intr:
1610 	/* rearm interrupts here */
1611 	ice_irq_dynamic_ena(hw, NULL, NULL);
1612 	clear_bit(__ICE_OICR_INTR_DIS, pf->state);
1613 	return ret;
1614 }
1615 
1616 /**
1617  * ice_pci_sriov_ena - Enable or change number of VFs
1618  * @pf: pointer to the PF structure
1619  * @num_vfs: number of VFs to allocate
1620  *
1621  * Returns 0 on success and negative on failure
1622  */
ice_pci_sriov_ena(struct ice_pf * pf,int num_vfs)1623 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1624 {
1625 	int pre_existing_vfs = pci_num_vf(pf->pdev);
1626 	struct device *dev = ice_pf_to_dev(pf);
1627 	int err;
1628 
1629 	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1630 		ice_free_vfs(pf);
1631 	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1632 		return 0;
1633 
1634 	if (num_vfs > pf->num_vfs_supported) {
1635 		dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1636 			num_vfs, pf->num_vfs_supported);
1637 		return -EOPNOTSUPP;
1638 	}
1639 
1640 	dev_info(dev, "Enabling %d VFs\n", num_vfs);
1641 	err = ice_ena_vfs(pf, num_vfs);
1642 	if (err) {
1643 		dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1644 		return err;
1645 	}
1646 
1647 	set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
1648 	return 0;
1649 }
1650 
1651 /**
1652  * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
1653  * @pf: PF to enabled SR-IOV on
1654  */
ice_check_sriov_allowed(struct ice_pf * pf)1655 static int ice_check_sriov_allowed(struct ice_pf *pf)
1656 {
1657 	struct device *dev = ice_pf_to_dev(pf);
1658 
1659 	if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1660 		dev_err(dev, "This device is not capable of SR-IOV\n");
1661 		return -EOPNOTSUPP;
1662 	}
1663 
1664 	if (ice_is_safe_mode(pf)) {
1665 		dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
1666 		return -EOPNOTSUPP;
1667 	}
1668 
1669 	if (!ice_pf_state_is_nominal(pf)) {
1670 		dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1671 		return -EBUSY;
1672 	}
1673 
1674 	return 0;
1675 }
1676 
1677 /**
1678  * ice_sriov_configure - Enable or change number of VFs via sysfs
1679  * @pdev: pointer to a pci_dev structure
1680  * @num_vfs: number of VFs to allocate or 0 to free VFs
1681  *
1682  * This function is called when the user updates the number of VFs in sysfs. On
1683  * success return whatever num_vfs was set to by the caller. Return negative on
1684  * failure.
1685  */
ice_sriov_configure(struct pci_dev * pdev,int num_vfs)1686 int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1687 {
1688 	struct ice_pf *pf = pci_get_drvdata(pdev);
1689 	struct device *dev = ice_pf_to_dev(pf);
1690 	int err;
1691 
1692 	err = ice_check_sriov_allowed(pf);
1693 	if (err)
1694 		return err;
1695 
1696 	if (!num_vfs) {
1697 		if (!pci_vfs_assigned(pdev)) {
1698 			ice_free_vfs(pf);
1699 			return 0;
1700 		}
1701 
1702 		dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
1703 		return -EBUSY;
1704 	}
1705 
1706 	err = ice_pci_sriov_ena(pf, num_vfs);
1707 	if (err)
1708 		return err;
1709 
1710 	return num_vfs;
1711 }
1712 
1713 /**
1714  * ice_process_vflr_event - Free VF resources via IRQ calls
1715  * @pf: pointer to the PF structure
1716  *
1717  * called from the VFLR IRQ handler to
1718  * free up VF resources and state variables
1719  */
ice_process_vflr_event(struct ice_pf * pf)1720 void ice_process_vflr_event(struct ice_pf *pf)
1721 {
1722 	struct ice_hw *hw = &pf->hw;
1723 	unsigned int vf_id;
1724 	u32 reg;
1725 
1726 	if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
1727 	    !pf->num_alloc_vfs)
1728 		return;
1729 
1730 	ice_for_each_vf(pf, vf_id) {
1731 		struct ice_vf *vf = &pf->vf[vf_id];
1732 		u32 reg_idx, bit_idx;
1733 
1734 		reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1735 		bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1736 		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
1737 		reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1738 		if (reg & BIT(bit_idx)) {
1739 			/* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1740 			mutex_lock(&vf->cfg_lock);
1741 			ice_reset_vf(vf, true);
1742 			mutex_unlock(&vf->cfg_lock);
1743 		}
1744 	}
1745 }
1746 
1747 /**
1748  * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
1749  * @vf: pointer to the VF info
1750  */
ice_vc_reset_vf(struct ice_vf * vf)1751 static void ice_vc_reset_vf(struct ice_vf *vf)
1752 {
1753 	ice_vc_notify_vf_reset(vf);
1754 	ice_reset_vf(vf, false);
1755 }
1756 
1757 /**
1758  * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1759  * @pf: PF used to index all VFs
1760  * @pfq: queue index relative to the PF's function space
1761  *
1762  * If no VF is found who owns the pfq then return NULL, otherwise return a
1763  * pointer to the VF who owns the pfq
1764  */
ice_get_vf_from_pfq(struct ice_pf * pf,u16 pfq)1765 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
1766 {
1767 	unsigned int vf_id;
1768 
1769 	ice_for_each_vf(pf, vf_id) {
1770 		struct ice_vf *vf = &pf->vf[vf_id];
1771 		struct ice_vsi *vsi;
1772 		u16 rxq_idx;
1773 
1774 		vsi = pf->vsi[vf->lan_vsi_idx];
1775 
1776 		ice_for_each_rxq(vsi, rxq_idx)
1777 			if (vsi->rxq_map[rxq_idx] == pfq)
1778 				return vf;
1779 	}
1780 
1781 	return NULL;
1782 }
1783 
1784 /**
1785  * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1786  * @pf: PF used for conversion
1787  * @globalq: global queue index used to convert to PF space queue index
1788  */
ice_globalq_to_pfq(struct ice_pf * pf,u32 globalq)1789 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
1790 {
1791 	return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
1792 }
1793 
1794 /**
1795  * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
1796  * @pf: PF that the LAN overflow event happened on
1797  * @event: structure holding the event information for the LAN overflow event
1798  *
1799  * Determine if the LAN overflow event was caused by a VF queue. If it was not
1800  * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
1801  * reset on the offending VF.
1802  */
1803 void
ice_vf_lan_overflow_event(struct ice_pf * pf,struct ice_rq_event_info * event)1804 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1805 {
1806 	u32 gldcb_rtctq, queue;
1807 	struct ice_vf *vf;
1808 
1809 	gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
1810 	dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
1811 
1812 	/* event returns device global Rx queue number */
1813 	queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
1814 		GLDCB_RTCTQ_RXQNUM_S;
1815 
1816 	vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
1817 	if (!vf)
1818 		return;
1819 
1820 	mutex_lock(&vf->cfg_lock);
1821 	ice_vc_reset_vf(vf);
1822 	mutex_unlock(&vf->cfg_lock);
1823 }
1824 
1825 /**
1826  * ice_vc_send_msg_to_vf - Send message to VF
1827  * @vf: pointer to the VF info
1828  * @v_opcode: virtual channel opcode
1829  * @v_retval: virtual channel return value
1830  * @msg: pointer to the msg buffer
1831  * @msglen: msg length
1832  *
1833  * send msg to VF
1834  */
1835 static int
ice_vc_send_msg_to_vf(struct ice_vf * vf,u32 v_opcode,enum virtchnl_status_code v_retval,u8 * msg,u16 msglen)1836 ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
1837 		      enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
1838 {
1839 	enum ice_status aq_ret;
1840 	struct device *dev;
1841 	struct ice_pf *pf;
1842 
1843 	if (!vf)
1844 		return -EINVAL;
1845 
1846 	pf = vf->pf;
1847 	if (ice_validate_vf_id(pf, vf->vf_id))
1848 		return -EINVAL;
1849 
1850 	dev = ice_pf_to_dev(pf);
1851 
1852 	aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
1853 				       msg, msglen, NULL);
1854 	if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
1855 		dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
1856 			 vf->vf_id, ice_stat_str(aq_ret),
1857 			 ice_aq_str(pf->hw.mailboxq.sq_last_status));
1858 		return -EIO;
1859 	}
1860 
1861 	return 0;
1862 }
1863 
1864 /**
1865  * ice_vc_get_ver_msg
1866  * @vf: pointer to the VF info
1867  * @msg: pointer to the msg buffer
1868  *
1869  * called from the VF to request the API version used by the PF
1870  */
ice_vc_get_ver_msg(struct ice_vf * vf,u8 * msg)1871 static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
1872 {
1873 	struct virtchnl_version_info info = {
1874 		VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1875 	};
1876 
1877 	vf->vf_ver = *(struct virtchnl_version_info *)msg;
1878 	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1879 	if (VF_IS_V10(&vf->vf_ver))
1880 		info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1881 
1882 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1883 				     VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
1884 				     sizeof(struct virtchnl_version_info));
1885 }
1886 
1887 /**
1888  * ice_vc_get_max_frame_size - get max frame size allowed for VF
1889  * @vf: VF used to determine max frame size
1890  *
1891  * Max frame size is determined based on the current port's max frame size and
1892  * whether a port VLAN is configured on this VF. The VF is not aware whether
1893  * it's in a port VLAN so the PF needs to account for this in max frame size
1894  * checks and sending the max frame size to the VF.
1895  */
ice_vc_get_max_frame_size(struct ice_vf * vf)1896 static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
1897 {
1898 	struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
1899 	struct ice_port_info *pi = vsi->port_info;
1900 	u16 max_frame_size;
1901 
1902 	max_frame_size = pi->phy.link_info.max_frame_size;
1903 
1904 	if (vf->port_vlan_info)
1905 		max_frame_size -= VLAN_HLEN;
1906 
1907 	return max_frame_size;
1908 }
1909 
1910 /**
1911  * ice_vc_get_vf_res_msg
1912  * @vf: pointer to the VF info
1913  * @msg: pointer to the msg buffer
1914  *
1915  * called from the VF to request its resources
1916  */
ice_vc_get_vf_res_msg(struct ice_vf * vf,u8 * msg)1917 static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
1918 {
1919 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1920 	struct virtchnl_vf_resource *vfres = NULL;
1921 	struct ice_pf *pf = vf->pf;
1922 	struct ice_vsi *vsi;
1923 	int len = 0;
1924 	int ret;
1925 
1926 	if (ice_check_vf_init(pf, vf)) {
1927 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1928 		goto err;
1929 	}
1930 
1931 	len = sizeof(struct virtchnl_vf_resource);
1932 
1933 	vfres = kzalloc(len, GFP_KERNEL);
1934 	if (!vfres) {
1935 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1936 		len = 0;
1937 		goto err;
1938 	}
1939 	if (VF_IS_V11(&vf->vf_ver))
1940 		vf->driver_caps = *(u32 *)msg;
1941 	else
1942 		vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1943 				  VIRTCHNL_VF_OFFLOAD_RSS_REG |
1944 				  VIRTCHNL_VF_OFFLOAD_VLAN;
1945 
1946 	vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1947 	vsi = pf->vsi[vf->lan_vsi_idx];
1948 	if (!vsi) {
1949 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1950 		goto err;
1951 	}
1952 
1953 	if (!vsi->info.pvid)
1954 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1955 
1956 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1957 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1958 	} else {
1959 		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1960 			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1961 		else
1962 			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1963 	}
1964 
1965 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1966 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1967 
1968 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1969 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1970 
1971 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
1972 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1973 
1974 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
1975 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1976 
1977 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1978 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1979 
1980 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1981 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1982 
1983 	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1984 		vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
1985 
1986 	vfres->num_vsis = 1;
1987 	/* Tx and Rx queue are equal for VF */
1988 	vfres->num_queue_pairs = vsi->num_txq;
1989 	vfres->max_vectors = pf->num_msix_per_vf;
1990 	vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
1991 	vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
1992 	vfres->max_mtu = ice_vc_get_max_frame_size(vf);
1993 
1994 	vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
1995 	vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1996 	vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
1997 	ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1998 			vf->dflt_lan_addr.addr);
1999 
2000 	/* match guest capabilities */
2001 	vf->driver_caps = vfres->vf_cap_flags;
2002 
2003 	set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
2004 
2005 err:
2006 	/* send the response back to the VF */
2007 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
2008 				    (u8 *)vfres, len);
2009 
2010 	kfree(vfres);
2011 	return ret;
2012 }
2013 
2014 /**
2015  * ice_vc_reset_vf_msg
2016  * @vf: pointer to the VF info
2017  *
2018  * called from the VF to reset itself,
2019  * unlike other virtchnl messages, PF driver
2020  * doesn't send the response back to the VF
2021  */
ice_vc_reset_vf_msg(struct ice_vf * vf)2022 static void ice_vc_reset_vf_msg(struct ice_vf *vf)
2023 {
2024 	if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2025 		ice_reset_vf(vf, false);
2026 }
2027 
2028 /**
2029  * ice_find_vsi_from_id
2030  * @pf: the PF structure to search for the VSI
2031  * @id: ID of the VSI it is searching for
2032  *
2033  * searches for the VSI with the given ID
2034  */
ice_find_vsi_from_id(struct ice_pf * pf,u16 id)2035 static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
2036 {
2037 	int i;
2038 
2039 	ice_for_each_vsi(pf, i)
2040 		if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
2041 			return pf->vsi[i];
2042 
2043 	return NULL;
2044 }
2045 
2046 /**
2047  * ice_vc_isvalid_vsi_id
2048  * @vf: pointer to the VF info
2049  * @vsi_id: VF relative VSI ID
2050  *
2051  * check for the valid VSI ID
2052  */
ice_vc_isvalid_vsi_id(struct ice_vf * vf,u16 vsi_id)2053 static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
2054 {
2055 	struct ice_pf *pf = vf->pf;
2056 	struct ice_vsi *vsi;
2057 
2058 	vsi = ice_find_vsi_from_id(pf, vsi_id);
2059 
2060 	return (vsi && (vsi->vf_id == vf->vf_id));
2061 }
2062 
2063 /**
2064  * ice_vc_isvalid_q_id
2065  * @vf: pointer to the VF info
2066  * @vsi_id: VSI ID
2067  * @qid: VSI relative queue ID
2068  *
2069  * check for the valid queue ID
2070  */
ice_vc_isvalid_q_id(struct ice_vf * vf,u16 vsi_id,u8 qid)2071 static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
2072 {
2073 	struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
2074 	/* allocated Tx and Rx queues should be always equal for VF VSI */
2075 	return (vsi && (qid < vsi->alloc_txq));
2076 }
2077 
2078 /**
2079  * ice_vc_isvalid_ring_len
2080  * @ring_len: length of ring
2081  *
2082  * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
2083  * or zero
2084  */
ice_vc_isvalid_ring_len(u16 ring_len)2085 static bool ice_vc_isvalid_ring_len(u16 ring_len)
2086 {
2087 	return ring_len == 0 ||
2088 	       (ring_len >= ICE_MIN_NUM_DESC &&
2089 		ring_len <= ICE_MAX_NUM_DESC &&
2090 		!(ring_len % ICE_REQ_DESC_MULTIPLE));
2091 }
2092 
2093 /**
2094  * ice_vc_config_rss_key
2095  * @vf: pointer to the VF info
2096  * @msg: pointer to the msg buffer
2097  *
2098  * Configure the VF's RSS key
2099  */
ice_vc_config_rss_key(struct ice_vf * vf,u8 * msg)2100 static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
2101 {
2102 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2103 	struct virtchnl_rss_key *vrk =
2104 		(struct virtchnl_rss_key *)msg;
2105 	struct ice_pf *pf = vf->pf;
2106 	struct ice_vsi *vsi;
2107 
2108 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2109 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2110 		goto error_param;
2111 	}
2112 
2113 	if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
2114 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2115 		goto error_param;
2116 	}
2117 
2118 	if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
2119 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2120 		goto error_param;
2121 	}
2122 
2123 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2124 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2125 		goto error_param;
2126 	}
2127 
2128 	vsi = pf->vsi[vf->lan_vsi_idx];
2129 	if (!vsi) {
2130 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2131 		goto error_param;
2132 	}
2133 
2134 	if (ice_set_rss(vsi, vrk->key, NULL, 0))
2135 		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2136 error_param:
2137 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
2138 				     NULL, 0);
2139 }
2140 
2141 /**
2142  * ice_vc_config_rss_lut
2143  * @vf: pointer to the VF info
2144  * @msg: pointer to the msg buffer
2145  *
2146  * Configure the VF's RSS LUT
2147  */
ice_vc_config_rss_lut(struct ice_vf * vf,u8 * msg)2148 static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
2149 {
2150 	struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
2151 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2152 	struct ice_pf *pf = vf->pf;
2153 	struct ice_vsi *vsi;
2154 
2155 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2156 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2157 		goto error_param;
2158 	}
2159 
2160 	if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
2161 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2162 		goto error_param;
2163 	}
2164 
2165 	if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
2166 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2167 		goto error_param;
2168 	}
2169 
2170 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2171 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2172 		goto error_param;
2173 	}
2174 
2175 	vsi = pf->vsi[vf->lan_vsi_idx];
2176 	if (!vsi) {
2177 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2178 		goto error_param;
2179 	}
2180 
2181 	if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
2182 		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2183 error_param:
2184 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
2185 				     NULL, 0);
2186 }
2187 
2188 /**
2189  * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
2190  * @vf: The VF being resseting
2191  *
2192  * The max poll time is about ~800ms, which is about the maximum time it takes
2193  * for a VF to be reset and/or a VF driver to be removed.
2194  */
ice_wait_on_vf_reset(struct ice_vf * vf)2195 static void ice_wait_on_vf_reset(struct ice_vf *vf)
2196 {
2197 	int i;
2198 
2199 	for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
2200 		if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2201 			break;
2202 		msleep(ICE_MAX_VF_RESET_SLEEP_MS);
2203 	}
2204 }
2205 
2206 /**
2207  * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
2208  * @vf: VF to check if it's ready to be configured/queried
2209  *
2210  * The purpose of this function is to make sure the VF is not in reset, not
2211  * disabled, and initialized so it can be configured and/or queried by a host
2212  * administrator.
2213  */
ice_check_vf_ready_for_cfg(struct ice_vf * vf)2214 static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
2215 {
2216 	struct ice_pf *pf;
2217 
2218 	ice_wait_on_vf_reset(vf);
2219 
2220 	if (ice_is_vf_disabled(vf))
2221 		return -EINVAL;
2222 
2223 	pf = vf->pf;
2224 	if (ice_check_vf_init(pf, vf))
2225 		return -EBUSY;
2226 
2227 	return 0;
2228 }
2229 
2230 /**
2231  * ice_set_vf_spoofchk
2232  * @netdev: network interface device structure
2233  * @vf_id: VF identifier
2234  * @ena: flag to enable or disable feature
2235  *
2236  * Enable or disable VF spoof checking
2237  */
ice_set_vf_spoofchk(struct net_device * netdev,int vf_id,bool ena)2238 int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
2239 {
2240 	struct ice_netdev_priv *np = netdev_priv(netdev);
2241 	struct ice_pf *pf = np->vsi->back;
2242 	struct ice_vsi_ctx *ctx;
2243 	struct ice_vsi *vf_vsi;
2244 	enum ice_status status;
2245 	struct device *dev;
2246 	struct ice_vf *vf;
2247 	int ret;
2248 
2249 	dev = ice_pf_to_dev(pf);
2250 	if (ice_validate_vf_id(pf, vf_id))
2251 		return -EINVAL;
2252 
2253 	vf = &pf->vf[vf_id];
2254 	ret = ice_check_vf_ready_for_cfg(vf);
2255 	if (ret)
2256 		return ret;
2257 
2258 	vf_vsi = pf->vsi[vf->lan_vsi_idx];
2259 	if (!vf_vsi) {
2260 		netdev_err(netdev, "VSI %d for VF %d is null\n",
2261 			   vf->lan_vsi_idx, vf->vf_id);
2262 		return -EINVAL;
2263 	}
2264 
2265 	if (vf_vsi->type != ICE_VSI_VF) {
2266 		netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
2267 			   vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
2268 		return -ENODEV;
2269 	}
2270 
2271 	if (ena == vf->spoofchk) {
2272 		dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
2273 		return 0;
2274 	}
2275 
2276 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2277 	if (!ctx)
2278 		return -ENOMEM;
2279 
2280 	ctx->info.sec_flags = vf_vsi->info.sec_flags;
2281 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
2282 	if (ena) {
2283 		ctx->info.sec_flags |=
2284 			ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2285 			(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2286 			 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
2287 	} else {
2288 		ctx->info.sec_flags &=
2289 			~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2290 			  (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2291 			   ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
2292 	}
2293 
2294 	status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
2295 	if (status) {
2296 		dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
2297 			ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
2298 			ice_stat_str(status));
2299 		ret = -EIO;
2300 		goto out;
2301 	}
2302 
2303 	/* only update spoofchk state and VSI context on success */
2304 	vf_vsi->info.sec_flags = ctx->info.sec_flags;
2305 	vf->spoofchk = ena;
2306 
2307 out:
2308 	kfree(ctx);
2309 	return ret;
2310 }
2311 
2312 /**
2313  * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
2314  * @pf: PF structure for accessing VF(s)
2315  *
2316  * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
2317  * else return true
2318  */
ice_is_any_vf_in_promisc(struct ice_pf * pf)2319 bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
2320 {
2321 	int vf_idx;
2322 
2323 	ice_for_each_vf(pf, vf_idx) {
2324 		struct ice_vf *vf = &pf->vf[vf_idx];
2325 
2326 		/* found a VF that has promiscuous mode configured */
2327 		if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2328 		    test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2329 			return true;
2330 	}
2331 
2332 	return false;
2333 }
2334 
2335 /**
2336  * ice_vc_cfg_promiscuous_mode_msg
2337  * @vf: pointer to the VF info
2338  * @msg: pointer to the msg buffer
2339  *
2340  * called from the VF to configure VF VSIs promiscuous mode
2341  */
ice_vc_cfg_promiscuous_mode_msg(struct ice_vf * vf,u8 * msg)2342 static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
2343 {
2344 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2345 	struct virtchnl_promisc_info *info =
2346 	    (struct virtchnl_promisc_info *)msg;
2347 	struct ice_pf *pf = vf->pf;
2348 	struct ice_vsi *vsi;
2349 	struct device *dev;
2350 	bool rm_promisc;
2351 	int ret = 0;
2352 
2353 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2354 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2355 		goto error_param;
2356 	}
2357 
2358 	if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2359 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2360 		goto error_param;
2361 	}
2362 
2363 	vsi = pf->vsi[vf->lan_vsi_idx];
2364 	if (!vsi) {
2365 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2366 		goto error_param;
2367 	}
2368 
2369 	dev = ice_pf_to_dev(pf);
2370 	if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2371 		dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2372 			vf->vf_id);
2373 		/* Leave v_ret alone, lie to the VF on purpose. */
2374 		goto error_param;
2375 	}
2376 
2377 	rm_promisc = !(info->flags & FLAG_VF_UNICAST_PROMISC) &&
2378 		!(info->flags & FLAG_VF_MULTICAST_PROMISC);
2379 
2380 	if (vsi->num_vlan || vf->port_vlan_info) {
2381 		struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
2382 		struct net_device *pf_netdev;
2383 
2384 		if (!pf_vsi) {
2385 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2386 			goto error_param;
2387 		}
2388 
2389 		pf_netdev = pf_vsi->netdev;
2390 
2391 		ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
2392 		if (ret) {
2393 			dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
2394 				rm_promisc ? "ON" : "OFF", vf->vf_id,
2395 				vsi->vsi_num);
2396 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2397 		}
2398 
2399 		ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
2400 		if (ret) {
2401 			dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
2402 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2403 			goto error_param;
2404 		}
2405 	}
2406 
2407 	if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
2408 		bool set_dflt_vsi = !!(info->flags & FLAG_VF_UNICAST_PROMISC);
2409 
2410 		if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
2411 			/* only attempt to set the default forwarding VSI if
2412 			 * it's not currently set
2413 			 */
2414 			ret = ice_set_dflt_vsi(pf->first_sw, vsi);
2415 		else if (!set_dflt_vsi &&
2416 			 ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
2417 			/* only attempt to free the default forwarding VSI if we
2418 			 * are the owner
2419 			 */
2420 			ret = ice_clear_dflt_vsi(pf->first_sw);
2421 
2422 		if (ret) {
2423 			dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
2424 				set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
2425 			v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2426 			goto error_param;
2427 		}
2428 	} else {
2429 		enum ice_status status;
2430 		u8 promisc_m;
2431 
2432 		if (info->flags & FLAG_VF_UNICAST_PROMISC) {
2433 			if (vf->port_vlan_info || vsi->num_vlan)
2434 				promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2435 			else
2436 				promisc_m = ICE_UCAST_PROMISC_BITS;
2437 		} else if (info->flags & FLAG_VF_MULTICAST_PROMISC) {
2438 			if (vf->port_vlan_info || vsi->num_vlan)
2439 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
2440 			else
2441 				promisc_m = ICE_MCAST_PROMISC_BITS;
2442 		} else {
2443 			if (vf->port_vlan_info || vsi->num_vlan)
2444 				promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2445 			else
2446 				promisc_m = ICE_UCAST_PROMISC_BITS;
2447 		}
2448 
2449 		/* Configure multicast/unicast with or without VLAN promiscuous
2450 		 * mode
2451 		 */
2452 		status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
2453 		if (status) {
2454 			dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n",
2455 				rm_promisc ? "dis" : "en", vf->vf_id,
2456 				ice_stat_str(status));
2457 			v_ret = ice_err_to_virt_err(status);
2458 			goto error_param;
2459 		} else {
2460 			dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
2461 				rm_promisc ? "dis" : "en", vf->vf_id);
2462 		}
2463 	}
2464 
2465 	if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2466 		set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2467 	else
2468 		clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2469 
2470 	if (info->flags & FLAG_VF_UNICAST_PROMISC)
2471 		set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2472 	else
2473 		clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2474 
2475 error_param:
2476 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2477 				     v_ret, NULL, 0);
2478 }
2479 
2480 /**
2481  * ice_vc_get_stats_msg
2482  * @vf: pointer to the VF info
2483  * @msg: pointer to the msg buffer
2484  *
2485  * called from the VF to get VSI stats
2486  */
ice_vc_get_stats_msg(struct ice_vf * vf,u8 * msg)2487 static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
2488 {
2489 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2490 	struct virtchnl_queue_select *vqs =
2491 		(struct virtchnl_queue_select *)msg;
2492 	struct ice_eth_stats stats = { 0 };
2493 	struct ice_pf *pf = vf->pf;
2494 	struct ice_vsi *vsi;
2495 
2496 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2497 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2498 		goto error_param;
2499 	}
2500 
2501 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2502 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2503 		goto error_param;
2504 	}
2505 
2506 	vsi = pf->vsi[vf->lan_vsi_idx];
2507 	if (!vsi) {
2508 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2509 		goto error_param;
2510 	}
2511 
2512 	ice_update_eth_stats(vsi);
2513 
2514 	stats = vsi->eth_stats;
2515 
2516 error_param:
2517 	/* send the response to the VF */
2518 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
2519 				     (u8 *)&stats, sizeof(stats));
2520 }
2521 
2522 /**
2523  * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
2524  * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2525  *
2526  * Return true on successful validation, else false
2527  */
ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select * vqs)2528 static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2529 {
2530 	if ((!vqs->rx_queues && !vqs->tx_queues) ||
2531 	    vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
2532 	    vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
2533 		return false;
2534 
2535 	return true;
2536 }
2537 
2538 /**
2539  * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
2540  * @vsi: VSI of the VF to configure
2541  * @q_idx: VF queue index used to determine the queue in the PF's space
2542  */
ice_vf_ena_txq_interrupt(struct ice_vsi * vsi,u32 q_idx)2543 static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2544 {
2545 	struct ice_hw *hw = &vsi->back->hw;
2546 	u32 pfq = vsi->txq_map[q_idx];
2547 	u32 reg;
2548 
2549 	reg = rd32(hw, QINT_TQCTL(pfq));
2550 
2551 	/* MSI-X index 0 in the VF's space is always for the OICR, which means
2552 	 * this is most likely a poll mode VF driver, so don't enable an
2553 	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
2554 	 */
2555 	if (!(reg & QINT_TQCTL_MSIX_INDX_M))
2556 		return;
2557 
2558 	wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
2559 }
2560 
2561 /**
2562  * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
2563  * @vsi: VSI of the VF to configure
2564  * @q_idx: VF queue index used to determine the queue in the PF's space
2565  */
ice_vf_ena_rxq_interrupt(struct ice_vsi * vsi,u32 q_idx)2566 static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2567 {
2568 	struct ice_hw *hw = &vsi->back->hw;
2569 	u32 pfq = vsi->rxq_map[q_idx];
2570 	u32 reg;
2571 
2572 	reg = rd32(hw, QINT_RQCTL(pfq));
2573 
2574 	/* MSI-X index 0 in the VF's space is always for the OICR, which means
2575 	 * this is most likely a poll mode VF driver, so don't enable an
2576 	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
2577 	 */
2578 	if (!(reg & QINT_RQCTL_MSIX_INDX_M))
2579 		return;
2580 
2581 	wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
2582 }
2583 
2584 /**
2585  * ice_vc_ena_qs_msg
2586  * @vf: pointer to the VF info
2587  * @msg: pointer to the msg buffer
2588  *
2589  * called from the VF to enable all or specific queue(s)
2590  */
ice_vc_ena_qs_msg(struct ice_vf * vf,u8 * msg)2591 static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
2592 {
2593 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2594 	struct virtchnl_queue_select *vqs =
2595 	    (struct virtchnl_queue_select *)msg;
2596 	struct ice_pf *pf = vf->pf;
2597 	struct ice_vsi *vsi;
2598 	unsigned long q_map;
2599 	u16 vf_q_id;
2600 
2601 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2602 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2603 		goto error_param;
2604 	}
2605 
2606 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2607 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2608 		goto error_param;
2609 	}
2610 
2611 	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
2612 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2613 		goto error_param;
2614 	}
2615 
2616 	vsi = pf->vsi[vf->lan_vsi_idx];
2617 	if (!vsi) {
2618 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2619 		goto error_param;
2620 	}
2621 
2622 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
2623 	 * Tx queue group list was configured and the context bits were
2624 	 * programmed using ice_vsi_cfg_txqs
2625 	 */
2626 	q_map = vqs->rx_queues;
2627 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2628 		if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2629 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2630 			goto error_param;
2631 		}
2632 
2633 		/* Skip queue if enabled */
2634 		if (test_bit(vf_q_id, vf->rxq_ena))
2635 			continue;
2636 
2637 		if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
2638 			dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
2639 				vf_q_id, vsi->vsi_num);
2640 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2641 			goto error_param;
2642 		}
2643 
2644 		ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
2645 		set_bit(vf_q_id, vf->rxq_ena);
2646 	}
2647 
2648 	vsi = pf->vsi[vf->lan_vsi_idx];
2649 	q_map = vqs->tx_queues;
2650 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2651 		if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2652 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2653 			goto error_param;
2654 		}
2655 
2656 		/* Skip queue if enabled */
2657 		if (test_bit(vf_q_id, vf->txq_ena))
2658 			continue;
2659 
2660 		ice_vf_ena_txq_interrupt(vsi, vf_q_id);
2661 		set_bit(vf_q_id, vf->txq_ena);
2662 	}
2663 
2664 	/* Set flag to indicate that queues are enabled */
2665 	if (v_ret == VIRTCHNL_STATUS_SUCCESS)
2666 		set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2667 
2668 error_param:
2669 	/* send the response to the VF */
2670 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
2671 				     NULL, 0);
2672 }
2673 
2674 /**
2675  * ice_vc_dis_qs_msg
2676  * @vf: pointer to the VF info
2677  * @msg: pointer to the msg buffer
2678  *
2679  * called from the VF to disable all or specific
2680  * queue(s)
2681  */
ice_vc_dis_qs_msg(struct ice_vf * vf,u8 * msg)2682 static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
2683 {
2684 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2685 	struct virtchnl_queue_select *vqs =
2686 	    (struct virtchnl_queue_select *)msg;
2687 	struct ice_pf *pf = vf->pf;
2688 	struct ice_vsi *vsi;
2689 	unsigned long q_map;
2690 	u16 vf_q_id;
2691 
2692 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
2693 	    !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
2694 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2695 		goto error_param;
2696 	}
2697 
2698 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2699 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2700 		goto error_param;
2701 	}
2702 
2703 	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
2704 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2705 		goto error_param;
2706 	}
2707 
2708 	vsi = pf->vsi[vf->lan_vsi_idx];
2709 	if (!vsi) {
2710 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2711 		goto error_param;
2712 	}
2713 
2714 	if (vqs->tx_queues) {
2715 		q_map = vqs->tx_queues;
2716 
2717 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2718 			struct ice_ring *ring = vsi->tx_rings[vf_q_id];
2719 			struct ice_txq_meta txq_meta = { 0 };
2720 
2721 			if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2722 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2723 				goto error_param;
2724 			}
2725 
2726 			if (!test_bit(vf_q_id, vf->txq_ena))
2727 				dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
2728 					vf_q_id, vsi->vsi_num);
2729 
2730 			ice_fill_txq_meta(vsi, ring, &txq_meta);
2731 
2732 			if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
2733 						 ring, &txq_meta)) {
2734 				dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
2735 					vf_q_id, vsi->vsi_num);
2736 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2737 				goto error_param;
2738 			}
2739 
2740 			/* Clear enabled queues flag */
2741 			clear_bit(vf_q_id, vf->txq_ena);
2742 		}
2743 	}
2744 
2745 	q_map = vqs->rx_queues;
2746 	/* speed up Rx queue disable by batching them if possible */
2747 	if (q_map &&
2748 	    bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
2749 		if (ice_vsi_stop_all_rx_rings(vsi)) {
2750 			dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
2751 				vsi->vsi_num);
2752 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2753 			goto error_param;
2754 		}
2755 
2756 		bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
2757 	} else if (q_map) {
2758 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2759 			if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2760 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2761 				goto error_param;
2762 			}
2763 
2764 			/* Skip queue if not enabled */
2765 			if (!test_bit(vf_q_id, vf->rxq_ena))
2766 				continue;
2767 
2768 			if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
2769 						     true)) {
2770 				dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
2771 					vf_q_id, vsi->vsi_num);
2772 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2773 				goto error_param;
2774 			}
2775 
2776 			/* Clear enabled queues flag */
2777 			clear_bit(vf_q_id, vf->rxq_ena);
2778 		}
2779 	}
2780 
2781 	/* Clear enabled queues flag */
2782 	if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
2783 		clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2784 
2785 error_param:
2786 	/* send the response to the VF */
2787 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
2788 				     NULL, 0);
2789 }
2790 
2791 /**
2792  * ice_cfg_interrupt
2793  * @vf: pointer to the VF info
2794  * @vsi: the VSI being configured
2795  * @vector_id: vector ID
2796  * @map: vector map for mapping vectors to queues
2797  * @q_vector: structure for interrupt vector
2798  * configure the IRQ to queue map
2799  */
2800 static int
ice_cfg_interrupt(struct ice_vf * vf,struct ice_vsi * vsi,u16 vector_id,struct virtchnl_vector_map * map,struct ice_q_vector * q_vector)2801 ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
2802 		  struct virtchnl_vector_map *map,
2803 		  struct ice_q_vector *q_vector)
2804 {
2805 	u16 vsi_q_id, vsi_q_id_idx;
2806 	unsigned long qmap;
2807 
2808 	q_vector->num_ring_rx = 0;
2809 	q_vector->num_ring_tx = 0;
2810 
2811 	qmap = map->rxq_map;
2812 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2813 		vsi_q_id = vsi_q_id_idx;
2814 
2815 		if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2816 			return VIRTCHNL_STATUS_ERR_PARAM;
2817 
2818 		q_vector->num_ring_rx++;
2819 		q_vector->rx.itr_idx = map->rxitr_idx;
2820 		vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
2821 		ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
2822 				      q_vector->rx.itr_idx);
2823 	}
2824 
2825 	qmap = map->txq_map;
2826 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2827 		vsi_q_id = vsi_q_id_idx;
2828 
2829 		if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2830 			return VIRTCHNL_STATUS_ERR_PARAM;
2831 
2832 		q_vector->num_ring_tx++;
2833 		q_vector->tx.itr_idx = map->txitr_idx;
2834 		vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
2835 		ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
2836 				      q_vector->tx.itr_idx);
2837 	}
2838 
2839 	return VIRTCHNL_STATUS_SUCCESS;
2840 }
2841 
2842 /**
2843  * ice_vc_cfg_irq_map_msg
2844  * @vf: pointer to the VF info
2845  * @msg: pointer to the msg buffer
2846  *
2847  * called from the VF to configure the IRQ to queue map
2848  */
ice_vc_cfg_irq_map_msg(struct ice_vf * vf,u8 * msg)2849 static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
2850 {
2851 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2852 	u16 num_q_vectors_mapped, vsi_id, vector_id;
2853 	struct virtchnl_irq_map_info *irqmap_info;
2854 	struct virtchnl_vector_map *map;
2855 	struct ice_pf *pf = vf->pf;
2856 	struct ice_vsi *vsi;
2857 	int i;
2858 
2859 	irqmap_info = (struct virtchnl_irq_map_info *)msg;
2860 	num_q_vectors_mapped = irqmap_info->num_vectors;
2861 
2862 	/* Check to make sure number of VF vectors mapped is not greater than
2863 	 * number of VF vectors originally allocated, and check that
2864 	 * there is actually at least a single VF queue vector mapped
2865 	 */
2866 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2867 	    pf->num_msix_per_vf < num_q_vectors_mapped ||
2868 	    !num_q_vectors_mapped) {
2869 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2870 		goto error_param;
2871 	}
2872 
2873 	vsi = pf->vsi[vf->lan_vsi_idx];
2874 	if (!vsi) {
2875 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2876 		goto error_param;
2877 	}
2878 
2879 	for (i = 0; i < num_q_vectors_mapped; i++) {
2880 		struct ice_q_vector *q_vector;
2881 
2882 		map = &irqmap_info->vecmap[i];
2883 
2884 		vector_id = map->vector_id;
2885 		vsi_id = map->vsi_id;
2886 		/* vector_id is always 0-based for each VF, and can never be
2887 		 * larger than or equal to the max allowed interrupts per VF
2888 		 */
2889 		if (!(vector_id < pf->num_msix_per_vf) ||
2890 		    !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
2891 		    (!vector_id && (map->rxq_map || map->txq_map))) {
2892 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2893 			goto error_param;
2894 		}
2895 
2896 		/* No need to map VF miscellaneous or rogue vector */
2897 		if (!vector_id)
2898 			continue;
2899 
2900 		/* Subtract non queue vector from vector_id passed by VF
2901 		 * to get actual number of VSI queue vector array index
2902 		 */
2903 		q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
2904 		if (!q_vector) {
2905 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2906 			goto error_param;
2907 		}
2908 
2909 		/* lookout for the invalid queue index */
2910 		v_ret = (enum virtchnl_status_code)
2911 			ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
2912 		if (v_ret)
2913 			goto error_param;
2914 	}
2915 
2916 error_param:
2917 	/* send the response to the VF */
2918 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
2919 				     NULL, 0);
2920 }
2921 
2922 /**
2923  * ice_vc_cfg_qs_msg
2924  * @vf: pointer to the VF info
2925  * @msg: pointer to the msg buffer
2926  *
2927  * called from the VF to configure the Rx/Tx queues
2928  */
ice_vc_cfg_qs_msg(struct ice_vf * vf,u8 * msg)2929 static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
2930 {
2931 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2932 	struct virtchnl_vsi_queue_config_info *qci =
2933 	    (struct virtchnl_vsi_queue_config_info *)msg;
2934 	struct virtchnl_queue_pair_info *qpi;
2935 	u16 num_rxq = 0, num_txq = 0;
2936 	struct ice_pf *pf = vf->pf;
2937 	struct ice_vsi *vsi;
2938 	int i;
2939 
2940 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2941 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2942 		goto error_param;
2943 	}
2944 
2945 	if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2946 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2947 		goto error_param;
2948 	}
2949 
2950 	vsi = pf->vsi[vf->lan_vsi_idx];
2951 	if (!vsi) {
2952 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2953 		goto error_param;
2954 	}
2955 
2956 	if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
2957 	    qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
2958 		dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
2959 			vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
2960 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2961 		goto error_param;
2962 	}
2963 
2964 	for (i = 0; i < qci->num_queue_pairs; i++) {
2965 		qpi = &qci->qpair[i];
2966 		if (qpi->txq.vsi_id != qci->vsi_id ||
2967 		    qpi->rxq.vsi_id != qci->vsi_id ||
2968 		    qpi->rxq.queue_id != qpi->txq.queue_id ||
2969 		    qpi->txq.headwb_enabled ||
2970 		    !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
2971 		    !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
2972 		    !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
2973 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2974 			goto error_param;
2975 		}
2976 		/* copy Tx queue info from VF into VSI */
2977 		if (qpi->txq.ring_len > 0) {
2978 			num_txq++;
2979 			vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
2980 			vsi->tx_rings[i]->count = qpi->txq.ring_len;
2981 		}
2982 
2983 		/* copy Rx queue info from VF into VSI */
2984 		if (qpi->rxq.ring_len > 0) {
2985 			u16 max_frame_size = ice_vc_get_max_frame_size(vf);
2986 
2987 			num_rxq++;
2988 			vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
2989 			vsi->rx_rings[i]->count = qpi->rxq.ring_len;
2990 
2991 			if (qpi->rxq.databuffer_size != 0 &&
2992 			    (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
2993 			     qpi->rxq.databuffer_size < 1024)) {
2994 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2995 				goto error_param;
2996 			}
2997 			vsi->rx_buf_len = qpi->rxq.databuffer_size;
2998 			vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
2999 			if (qpi->rxq.max_pkt_size > max_frame_size ||
3000 			    qpi->rxq.max_pkt_size < 64) {
3001 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3002 				goto error_param;
3003 			}
3004 		}
3005 
3006 		vsi->max_frame = qpi->rxq.max_pkt_size;
3007 		/* add space for the port VLAN since the VF driver is not
3008 		 * expected to account for it in the MTU calculation
3009 		 */
3010 		if (vf->port_vlan_info)
3011 			vsi->max_frame += VLAN_HLEN;
3012 	}
3013 
3014 	/* VF can request to configure less than allocated queues or default
3015 	 * allocated queues. So update the VSI with new number
3016 	 */
3017 	vsi->num_txq = num_txq;
3018 	vsi->num_rxq = num_rxq;
3019 	/* All queues of VF VSI are in TC 0 */
3020 	vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
3021 	vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
3022 
3023 	if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
3024 		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3025 
3026 error_param:
3027 	/* send the response to the VF */
3028 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
3029 				     NULL, 0);
3030 }
3031 
3032 /**
3033  * ice_is_vf_trusted
3034  * @vf: pointer to the VF info
3035  */
ice_is_vf_trusted(struct ice_vf * vf)3036 static bool ice_is_vf_trusted(struct ice_vf *vf)
3037 {
3038 	return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
3039 }
3040 
3041 /**
3042  * ice_can_vf_change_mac
3043  * @vf: pointer to the VF info
3044  *
3045  * Return true if the VF is allowed to change its MAC filters, false otherwise
3046  */
ice_can_vf_change_mac(struct ice_vf * vf)3047 static bool ice_can_vf_change_mac(struct ice_vf *vf)
3048 {
3049 	/* If the VF MAC address has been set administratively (via the
3050 	 * ndo_set_vf_mac command), then deny permission to the VF to
3051 	 * add/delete unicast MAC addresses, unless the VF is trusted
3052 	 */
3053 	if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
3054 		return false;
3055 
3056 	return true;
3057 }
3058 
3059 /**
3060  * ice_vc_add_mac_addr - attempt to add the MAC address passed in
3061  * @vf: pointer to the VF info
3062  * @vsi: pointer to the VF's VSI
3063  * @mac_addr: MAC address to add
3064  */
3065 static int
ice_vc_add_mac_addr(struct ice_vf * vf,struct ice_vsi * vsi,u8 * mac_addr)3066 ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3067 {
3068 	struct device *dev = ice_pf_to_dev(vf->pf);
3069 	enum ice_status status;
3070 	int ret = 0;
3071 
3072 	/* default unicast MAC already added */
3073 	if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3074 		return 0;
3075 
3076 	if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
3077 		dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
3078 		return -EPERM;
3079 	}
3080 
3081 	status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3082 	if (status == ICE_ERR_ALREADY_EXISTS) {
3083 		dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
3084 			vf->vf_id);
3085 		/* don't return since we might need to update
3086 		 * the primary MAC in ice_vfhw_mac_add() below
3087 		 */
3088 		ret = -EEXIST;
3089 	} else if (status) {
3090 		dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
3091 			mac_addr, vf->vf_id, ice_stat_str(status));
3092 		return -EIO;
3093 	} else {
3094 		vf->num_mac++;
3095 	}
3096 
3097 	/* Set the default LAN address to the latest unicast MAC address added
3098 	 * by the VF. The default LAN address is reported by the PF via
3099 	 * ndo_get_vf_config.
3100 	 */
3101 	if (is_unicast_ether_addr(mac_addr))
3102 		ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
3103 
3104 	return ret;
3105 }
3106 
3107 /**
3108  * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
3109  * @vf: pointer to the VF info
3110  * @vsi: pointer to the VF's VSI
3111  * @mac_addr: MAC address to delete
3112  */
3113 static int
ice_vc_del_mac_addr(struct ice_vf * vf,struct ice_vsi * vsi,u8 * mac_addr)3114 ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3115 {
3116 	struct device *dev = ice_pf_to_dev(vf->pf);
3117 	enum ice_status status;
3118 
3119 	if (!ice_can_vf_change_mac(vf) &&
3120 	    ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3121 		return 0;
3122 
3123 	status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3124 	if (status == ICE_ERR_DOES_NOT_EXIST) {
3125 		dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
3126 			vf->vf_id);
3127 		return -ENOENT;
3128 	} else if (status) {
3129 		dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
3130 			mac_addr, vf->vf_id, ice_stat_str(status));
3131 		return -EIO;
3132 	}
3133 
3134 	if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3135 		eth_zero_addr(vf->dflt_lan_addr.addr);
3136 
3137 	vf->num_mac--;
3138 
3139 	return 0;
3140 }
3141 
3142 /**
3143  * ice_vc_handle_mac_addr_msg
3144  * @vf: pointer to the VF info
3145  * @msg: pointer to the msg buffer
3146  * @set: true if MAC filters are being set, false otherwise
3147  *
3148  * add guest MAC address filter
3149  */
3150 static int
ice_vc_handle_mac_addr_msg(struct ice_vf * vf,u8 * msg,bool set)3151 ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
3152 {
3153 	int (*ice_vc_cfg_mac)
3154 		(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
3155 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3156 	struct virtchnl_ether_addr_list *al =
3157 	    (struct virtchnl_ether_addr_list *)msg;
3158 	struct ice_pf *pf = vf->pf;
3159 	enum virtchnl_ops vc_op;
3160 	struct ice_vsi *vsi;
3161 	int i;
3162 
3163 	if (set) {
3164 		vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
3165 		ice_vc_cfg_mac = ice_vc_add_mac_addr;
3166 	} else {
3167 		vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
3168 		ice_vc_cfg_mac = ice_vc_del_mac_addr;
3169 	}
3170 
3171 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3172 	    !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3173 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3174 		goto handle_mac_exit;
3175 	}
3176 
3177 	/* If this VF is not privileged, then we can't add more than a
3178 	 * limited number of addresses. Check to make sure that the
3179 	 * additions do not push us over the limit.
3180 	 */
3181 	if (set && !ice_is_vf_trusted(vf) &&
3182 	    (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
3183 		dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
3184 			vf->vf_id);
3185 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3186 		goto handle_mac_exit;
3187 	}
3188 
3189 	vsi = pf->vsi[vf->lan_vsi_idx];
3190 	if (!vsi) {
3191 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3192 		goto handle_mac_exit;
3193 	}
3194 
3195 	for (i = 0; i < al->num_elements; i++) {
3196 		u8 *mac_addr = al->list[i].addr;
3197 		int result;
3198 
3199 		if (is_broadcast_ether_addr(mac_addr) ||
3200 		    is_zero_ether_addr(mac_addr))
3201 			continue;
3202 
3203 		result = ice_vc_cfg_mac(vf, vsi, mac_addr);
3204 		if (result == -EEXIST || result == -ENOENT) {
3205 			continue;
3206 		} else if (result) {
3207 			v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3208 			goto handle_mac_exit;
3209 		}
3210 	}
3211 
3212 handle_mac_exit:
3213 	/* send the response to the VF */
3214 	return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
3215 }
3216 
3217 /**
3218  * ice_vc_add_mac_addr_msg
3219  * @vf: pointer to the VF info
3220  * @msg: pointer to the msg buffer
3221  *
3222  * add guest MAC address filter
3223  */
ice_vc_add_mac_addr_msg(struct ice_vf * vf,u8 * msg)3224 static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3225 {
3226 	return ice_vc_handle_mac_addr_msg(vf, msg, true);
3227 }
3228 
3229 /**
3230  * ice_vc_del_mac_addr_msg
3231  * @vf: pointer to the VF info
3232  * @msg: pointer to the msg buffer
3233  *
3234  * remove guest MAC address filter
3235  */
ice_vc_del_mac_addr_msg(struct ice_vf * vf,u8 * msg)3236 static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3237 {
3238 	return ice_vc_handle_mac_addr_msg(vf, msg, false);
3239 }
3240 
3241 /**
3242  * ice_vc_request_qs_msg
3243  * @vf: pointer to the VF info
3244  * @msg: pointer to the msg buffer
3245  *
3246  * VFs get a default number of queues but can use this message to request a
3247  * different number. If the request is successful, PF will reset the VF and
3248  * return 0. If unsuccessful, PF will send message informing VF of number of
3249  * available queue pairs via virtchnl message response to VF.
3250  */
ice_vc_request_qs_msg(struct ice_vf * vf,u8 * msg)3251 static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
3252 {
3253 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3254 	struct virtchnl_vf_res_request *vfres =
3255 		(struct virtchnl_vf_res_request *)msg;
3256 	u16 req_queues = vfres->num_queue_pairs;
3257 	struct ice_pf *pf = vf->pf;
3258 	u16 max_allowed_vf_queues;
3259 	u16 tx_rx_queue_left;
3260 	struct device *dev;
3261 	u16 cur_queues;
3262 
3263 	dev = ice_pf_to_dev(pf);
3264 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3265 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3266 		goto error_param;
3267 	}
3268 
3269 	cur_queues = vf->num_vf_qs;
3270 	tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
3271 				 ice_get_avail_rxq_count(pf));
3272 	max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
3273 	if (!req_queues) {
3274 		dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
3275 			vf->vf_id);
3276 	} else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
3277 		dev_err(dev, "VF %d tried to request more than %d queues.\n",
3278 			vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
3279 		vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
3280 	} else if (req_queues > cur_queues &&
3281 		   req_queues - cur_queues > tx_rx_queue_left) {
3282 		dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
3283 			 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
3284 		vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
3285 					       ICE_MAX_RSS_QS_PER_VF);
3286 	} else {
3287 		/* request is successful, then reset VF */
3288 		vf->num_req_qs = req_queues;
3289 		ice_vc_reset_vf(vf);
3290 		dev_info(dev, "VF %d granted request of %u queues.\n",
3291 			 vf->vf_id, req_queues);
3292 		return 0;
3293 	}
3294 
3295 error_param:
3296 	/* send the response to the VF */
3297 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
3298 				     v_ret, (u8 *)vfres, sizeof(*vfres));
3299 }
3300 
3301 /**
3302  * ice_set_vf_port_vlan
3303  * @netdev: network interface device structure
3304  * @vf_id: VF identifier
3305  * @vlan_id: VLAN ID being set
3306  * @qos: priority setting
3307  * @vlan_proto: VLAN protocol
3308  *
3309  * program VF Port VLAN ID and/or QoS
3310  */
3311 int
ice_set_vf_port_vlan(struct net_device * netdev,int vf_id,u16 vlan_id,u8 qos,__be16 vlan_proto)3312 ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
3313 		     __be16 vlan_proto)
3314 {
3315 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3316 	struct device *dev;
3317 	struct ice_vf *vf;
3318 	u16 vlanprio;
3319 	int ret;
3320 
3321 	dev = ice_pf_to_dev(pf);
3322 	if (ice_validate_vf_id(pf, vf_id))
3323 		return -EINVAL;
3324 
3325 	if (vlan_id >= VLAN_N_VID || qos > 7) {
3326 		dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
3327 			vf_id, vlan_id, qos);
3328 		return -EINVAL;
3329 	}
3330 
3331 	if (vlan_proto != htons(ETH_P_8021Q)) {
3332 		dev_err(dev, "VF VLAN protocol is not supported\n");
3333 		return -EPROTONOSUPPORT;
3334 	}
3335 
3336 	vf = &pf->vf[vf_id];
3337 	ret = ice_check_vf_ready_for_cfg(vf);
3338 	if (ret)
3339 		return ret;
3340 
3341 	vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
3342 
3343 	if (vf->port_vlan_info == vlanprio) {
3344 		/* duplicate request, so just return success */
3345 		dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
3346 		return 0;
3347 	}
3348 
3349 	mutex_lock(&vf->cfg_lock);
3350 
3351 	vf->port_vlan_info = vlanprio;
3352 
3353 	if (vf->port_vlan_info)
3354 		dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
3355 			 vlan_id, qos, vf_id);
3356 	else
3357 		dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
3358 
3359 	ice_vc_reset_vf(vf);
3360 	mutex_unlock(&vf->cfg_lock);
3361 
3362 	return 0;
3363 }
3364 
3365 /**
3366  * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
3367  * @caps: VF driver negotiated capabilities
3368  *
3369  * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
3370  */
ice_vf_vlan_offload_ena(u32 caps)3371 static bool ice_vf_vlan_offload_ena(u32 caps)
3372 {
3373 	return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
3374 }
3375 
3376 /**
3377  * ice_vc_process_vlan_msg
3378  * @vf: pointer to the VF info
3379  * @msg: pointer to the msg buffer
3380  * @add_v: Add VLAN if true, otherwise delete VLAN
3381  *
3382  * Process virtchnl op to add or remove programmed guest VLAN ID
3383  */
ice_vc_process_vlan_msg(struct ice_vf * vf,u8 * msg,bool add_v)3384 static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
3385 {
3386 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3387 	struct virtchnl_vlan_filter_list *vfl =
3388 	    (struct virtchnl_vlan_filter_list *)msg;
3389 	struct ice_pf *pf = vf->pf;
3390 	bool vlan_promisc = false;
3391 	struct ice_vsi *vsi;
3392 	struct device *dev;
3393 	struct ice_hw *hw;
3394 	int status = 0;
3395 	u8 promisc_m;
3396 	int i;
3397 
3398 	dev = ice_pf_to_dev(pf);
3399 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3400 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3401 		goto error_param;
3402 	}
3403 
3404 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3405 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3406 		goto error_param;
3407 	}
3408 
3409 	if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3410 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3411 		goto error_param;
3412 	}
3413 
3414 	for (i = 0; i < vfl->num_elements; i++) {
3415 		if (vfl->vlan_id[i] >= VLAN_N_VID) {
3416 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3417 			dev_err(dev, "invalid VF VLAN id %d\n",
3418 				vfl->vlan_id[i]);
3419 			goto error_param;
3420 		}
3421 	}
3422 
3423 	hw = &pf->hw;
3424 	vsi = pf->vsi[vf->lan_vsi_idx];
3425 	if (!vsi) {
3426 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3427 		goto error_param;
3428 	}
3429 
3430 	if (add_v && !ice_is_vf_trusted(vf) &&
3431 	    vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
3432 		dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
3433 			 vf->vf_id);
3434 		/* There is no need to let VF know about being not trusted,
3435 		 * so we can just return success message here
3436 		 */
3437 		goto error_param;
3438 	}
3439 
3440 	if (vsi->info.pvid) {
3441 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3442 		goto error_param;
3443 	}
3444 
3445 	if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
3446 	     test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
3447 	    test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
3448 		vlan_promisc = true;
3449 
3450 	if (add_v) {
3451 		for (i = 0; i < vfl->num_elements; i++) {
3452 			u16 vid = vfl->vlan_id[i];
3453 
3454 			if (!ice_is_vf_trusted(vf) &&
3455 			    vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
3456 				dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
3457 					 vf->vf_id);
3458 				/* There is no need to let VF know about being
3459 				 * not trusted, so we can just return success
3460 				 * message here as well.
3461 				 */
3462 				goto error_param;
3463 			}
3464 
3465 			/* we add VLAN 0 by default for each VF so we can enable
3466 			 * Tx VLAN anti-spoof without triggering MDD events so
3467 			 * we don't need to add it again here
3468 			 */
3469 			if (!vid)
3470 				continue;
3471 
3472 			status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3473 			if (status) {
3474 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3475 				goto error_param;
3476 			}
3477 
3478 			/* Enable VLAN pruning when non-zero VLAN is added */
3479 			if (!vlan_promisc && vid &&
3480 			    !ice_vsi_is_vlan_pruning_ena(vsi)) {
3481 				status = ice_cfg_vlan_pruning(vsi, true, false);
3482 				if (status) {
3483 					v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3484 					dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
3485 						vid, status);
3486 					goto error_param;
3487 				}
3488 			} else if (vlan_promisc) {
3489 				/* Enable Ucast/Mcast VLAN promiscuous mode */
3490 				promisc_m = ICE_PROMISC_VLAN_TX |
3491 					    ICE_PROMISC_VLAN_RX;
3492 
3493 				status = ice_set_vsi_promisc(hw, vsi->idx,
3494 							     promisc_m, vid);
3495 				if (status) {
3496 					v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3497 					dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
3498 						vid, status);
3499 				}
3500 			}
3501 		}
3502 	} else {
3503 		/* In case of non_trusted VF, number of VLAN elements passed
3504 		 * to PF for removal might be greater than number of VLANs
3505 		 * filter programmed for that VF - So, use actual number of
3506 		 * VLANS added earlier with add VLAN opcode. In order to avoid
3507 		 * removing VLAN that doesn't exist, which result to sending
3508 		 * erroneous failed message back to the VF
3509 		 */
3510 		int num_vf_vlan;
3511 
3512 		num_vf_vlan = vsi->num_vlan;
3513 		for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
3514 			u16 vid = vfl->vlan_id[i];
3515 
3516 			/* we add VLAN 0 by default for each VF so we can enable
3517 			 * Tx VLAN anti-spoof without triggering MDD events so
3518 			 * we don't want a VIRTCHNL request to remove it
3519 			 */
3520 			if (!vid)
3521 				continue;
3522 
3523 			/* Make sure ice_vsi_kill_vlan is successful before
3524 			 * updating VLAN information
3525 			 */
3526 			status = ice_vsi_kill_vlan(vsi, vid);
3527 			if (status) {
3528 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3529 				goto error_param;
3530 			}
3531 
3532 			/* Disable VLAN pruning when only VLAN 0 is left */
3533 			if (vsi->num_vlan == 1 &&
3534 			    ice_vsi_is_vlan_pruning_ena(vsi))
3535 				ice_cfg_vlan_pruning(vsi, false, false);
3536 
3537 			/* Disable Unicast/Multicast VLAN promiscuous mode */
3538 			if (vlan_promisc) {
3539 				promisc_m = ICE_PROMISC_VLAN_TX |
3540 					    ICE_PROMISC_VLAN_RX;
3541 
3542 				ice_clear_vsi_promisc(hw, vsi->idx,
3543 						      promisc_m, vid);
3544 			}
3545 		}
3546 	}
3547 
3548 error_param:
3549 	/* send the response to the VF */
3550 	if (add_v)
3551 		return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
3552 					     NULL, 0);
3553 	else
3554 		return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
3555 					     NULL, 0);
3556 }
3557 
3558 /**
3559  * ice_vc_add_vlan_msg
3560  * @vf: pointer to the VF info
3561  * @msg: pointer to the msg buffer
3562  *
3563  * Add and program guest VLAN ID
3564  */
ice_vc_add_vlan_msg(struct ice_vf * vf,u8 * msg)3565 static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
3566 {
3567 	return ice_vc_process_vlan_msg(vf, msg, true);
3568 }
3569 
3570 /**
3571  * ice_vc_remove_vlan_msg
3572  * @vf: pointer to the VF info
3573  * @msg: pointer to the msg buffer
3574  *
3575  * remove programmed guest VLAN ID
3576  */
ice_vc_remove_vlan_msg(struct ice_vf * vf,u8 * msg)3577 static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
3578 {
3579 	return ice_vc_process_vlan_msg(vf, msg, false);
3580 }
3581 
3582 /**
3583  * ice_vc_ena_vlan_stripping
3584  * @vf: pointer to the VF info
3585  *
3586  * Enable VLAN header stripping for a given VF
3587  */
ice_vc_ena_vlan_stripping(struct ice_vf * vf)3588 static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
3589 {
3590 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3591 	struct ice_pf *pf = vf->pf;
3592 	struct ice_vsi *vsi;
3593 
3594 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3595 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3596 		goto error_param;
3597 	}
3598 
3599 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3600 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3601 		goto error_param;
3602 	}
3603 
3604 	vsi = pf->vsi[vf->lan_vsi_idx];
3605 	if (ice_vsi_manage_vlan_stripping(vsi, true))
3606 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3607 
3608 error_param:
3609 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3610 				     v_ret, NULL, 0);
3611 }
3612 
3613 /**
3614  * ice_vc_dis_vlan_stripping
3615  * @vf: pointer to the VF info
3616  *
3617  * Disable VLAN header stripping for a given VF
3618  */
ice_vc_dis_vlan_stripping(struct ice_vf * vf)3619 static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
3620 {
3621 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3622 	struct ice_pf *pf = vf->pf;
3623 	struct ice_vsi *vsi;
3624 
3625 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3626 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3627 		goto error_param;
3628 	}
3629 
3630 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3631 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3632 		goto error_param;
3633 	}
3634 
3635 	vsi = pf->vsi[vf->lan_vsi_idx];
3636 	if (!vsi) {
3637 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3638 		goto error_param;
3639 	}
3640 
3641 	if (ice_vsi_manage_vlan_stripping(vsi, false))
3642 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3643 
3644 error_param:
3645 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3646 				     v_ret, NULL, 0);
3647 }
3648 
3649 /**
3650  * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
3651  * @vf: VF to enable/disable VLAN stripping for on initialization
3652  *
3653  * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
3654  * the flag is cleared then we want to disable stripping. For example, the flag
3655  * will be cleared when port VLANs are configured by the administrator before
3656  * passing the VF to the guest or if the AVF driver doesn't support VLAN
3657  * offloads.
3658  */
ice_vf_init_vlan_stripping(struct ice_vf * vf)3659 static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
3660 {
3661 	struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
3662 
3663 	if (!vsi)
3664 		return -EINVAL;
3665 
3666 	/* don't modify stripping if port VLAN is configured */
3667 	if (vsi->info.pvid)
3668 		return 0;
3669 
3670 	if (ice_vf_vlan_offload_ena(vf->driver_caps))
3671 		return ice_vsi_manage_vlan_stripping(vsi, true);
3672 	else
3673 		return ice_vsi_manage_vlan_stripping(vsi, false);
3674 }
3675 
3676 /**
3677  * ice_vc_process_vf_msg - Process request from VF
3678  * @pf: pointer to the PF structure
3679  * @event: pointer to the AQ event
3680  *
3681  * called from the common asq/arq handler to
3682  * process request from VF
3683  */
ice_vc_process_vf_msg(struct ice_pf * pf,struct ice_rq_event_info * event)3684 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
3685 {
3686 	u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
3687 	s16 vf_id = le16_to_cpu(event->desc.retval);
3688 	u16 msglen = event->msg_len;
3689 	u8 *msg = event->msg_buf;
3690 	struct ice_vf *vf = NULL;
3691 	struct device *dev;
3692 	int err = 0;
3693 
3694 	dev = ice_pf_to_dev(pf);
3695 	if (ice_validate_vf_id(pf, vf_id)) {
3696 		err = -EINVAL;
3697 		goto error_handler;
3698 	}
3699 
3700 	vf = &pf->vf[vf_id];
3701 
3702 	/* Check if VF is disabled. */
3703 	if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
3704 		err = -EPERM;
3705 		goto error_handler;
3706 	}
3707 
3708 	/* Perform basic checks on the msg */
3709 	err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3710 	if (err) {
3711 		if (err == VIRTCHNL_STATUS_ERR_PARAM)
3712 			err = -EPERM;
3713 		else
3714 			err = -EINVAL;
3715 	}
3716 
3717 error_handler:
3718 	if (err) {
3719 		ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
3720 				      NULL, 0);
3721 		dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
3722 			vf_id, v_opcode, msglen, err);
3723 		return;
3724 	}
3725 
3726 	/* VF is being configured in another context that triggers a VFR, so no
3727 	 * need to process this message
3728 	 */
3729 	if (!mutex_trylock(&vf->cfg_lock)) {
3730 		dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n",
3731 			 vf->vf_id);
3732 		return;
3733 	}
3734 
3735 	switch (v_opcode) {
3736 	case VIRTCHNL_OP_VERSION:
3737 		err = ice_vc_get_ver_msg(vf, msg);
3738 		break;
3739 	case VIRTCHNL_OP_GET_VF_RESOURCES:
3740 		err = ice_vc_get_vf_res_msg(vf, msg);
3741 		if (ice_vf_init_vlan_stripping(vf))
3742 			dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
3743 				vf->vf_id);
3744 		ice_vc_notify_vf_link_state(vf);
3745 		break;
3746 	case VIRTCHNL_OP_RESET_VF:
3747 		ice_vc_reset_vf_msg(vf);
3748 		break;
3749 	case VIRTCHNL_OP_ADD_ETH_ADDR:
3750 		err = ice_vc_add_mac_addr_msg(vf, msg);
3751 		break;
3752 	case VIRTCHNL_OP_DEL_ETH_ADDR:
3753 		err = ice_vc_del_mac_addr_msg(vf, msg);
3754 		break;
3755 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3756 		err = ice_vc_cfg_qs_msg(vf, msg);
3757 		break;
3758 	case VIRTCHNL_OP_ENABLE_QUEUES:
3759 		err = ice_vc_ena_qs_msg(vf, msg);
3760 		ice_vc_notify_vf_link_state(vf);
3761 		break;
3762 	case VIRTCHNL_OP_DISABLE_QUEUES:
3763 		err = ice_vc_dis_qs_msg(vf, msg);
3764 		break;
3765 	case VIRTCHNL_OP_REQUEST_QUEUES:
3766 		err = ice_vc_request_qs_msg(vf, msg);
3767 		break;
3768 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3769 		err = ice_vc_cfg_irq_map_msg(vf, msg);
3770 		break;
3771 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
3772 		err = ice_vc_config_rss_key(vf, msg);
3773 		break;
3774 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
3775 		err = ice_vc_config_rss_lut(vf, msg);
3776 		break;
3777 	case VIRTCHNL_OP_GET_STATS:
3778 		err = ice_vc_get_stats_msg(vf, msg);
3779 		break;
3780 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3781 		err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
3782 		break;
3783 	case VIRTCHNL_OP_ADD_VLAN:
3784 		err = ice_vc_add_vlan_msg(vf, msg);
3785 		break;
3786 	case VIRTCHNL_OP_DEL_VLAN:
3787 		err = ice_vc_remove_vlan_msg(vf, msg);
3788 		break;
3789 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3790 		err = ice_vc_ena_vlan_stripping(vf);
3791 		break;
3792 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3793 		err = ice_vc_dis_vlan_stripping(vf);
3794 		break;
3795 	case VIRTCHNL_OP_UNKNOWN:
3796 	default:
3797 		dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
3798 			vf_id);
3799 		err = ice_vc_send_msg_to_vf(vf, v_opcode,
3800 					    VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3801 					    NULL, 0);
3802 		break;
3803 	}
3804 	if (err) {
3805 		/* Helper function cares less about error return values here
3806 		 * as it is busy with pending work.
3807 		 */
3808 		dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
3809 			 vf_id, v_opcode, err);
3810 	}
3811 
3812 	mutex_unlock(&vf->cfg_lock);
3813 }
3814 
3815 /**
3816  * ice_get_vf_cfg
3817  * @netdev: network interface device structure
3818  * @vf_id: VF identifier
3819  * @ivi: VF configuration structure
3820  *
3821  * return VF configuration
3822  */
3823 int
ice_get_vf_cfg(struct net_device * netdev,int vf_id,struct ifla_vf_info * ivi)3824 ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
3825 {
3826 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3827 	struct ice_vf *vf;
3828 
3829 	if (ice_validate_vf_id(pf, vf_id))
3830 		return -EINVAL;
3831 
3832 	vf = &pf->vf[vf_id];
3833 
3834 	if (ice_check_vf_init(pf, vf))
3835 		return -EBUSY;
3836 
3837 	ivi->vf = vf_id;
3838 	ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
3839 
3840 	/* VF configuration for VLAN and applicable QoS */
3841 	ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
3842 	ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
3843 
3844 	ivi->trusted = vf->trusted;
3845 	ivi->spoofchk = vf->spoofchk;
3846 	if (!vf->link_forced)
3847 		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
3848 	else if (vf->link_up)
3849 		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
3850 	else
3851 		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
3852 	ivi->max_tx_rate = vf->tx_rate;
3853 	ivi->min_tx_rate = 0;
3854 	return 0;
3855 }
3856 
3857 /**
3858  * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
3859  * @pf: PF used to reference the switch's rules
3860  * @umac: unicast MAC to compare against existing switch rules
3861  *
3862  * Return true on the first/any match, else return false
3863  */
ice_unicast_mac_exists(struct ice_pf * pf,u8 * umac)3864 static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
3865 {
3866 	struct ice_sw_recipe *mac_recipe_list =
3867 		&pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
3868 	struct ice_fltr_mgmt_list_entry *list_itr;
3869 	struct list_head *rule_head;
3870 	struct mutex *rule_lock; /* protect MAC filter list access */
3871 
3872 	rule_head = &mac_recipe_list->filt_rules;
3873 	rule_lock = &mac_recipe_list->filt_rule_lock;
3874 
3875 	mutex_lock(rule_lock);
3876 	list_for_each_entry(list_itr, rule_head, list_entry) {
3877 		u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3878 
3879 		if (ether_addr_equal(existing_mac, umac)) {
3880 			mutex_unlock(rule_lock);
3881 			return true;
3882 		}
3883 	}
3884 
3885 	mutex_unlock(rule_lock);
3886 
3887 	return false;
3888 }
3889 
3890 /**
3891  * ice_set_vf_mac
3892  * @netdev: network interface device structure
3893  * @vf_id: VF identifier
3894  * @mac: MAC address
3895  *
3896  * program VF MAC address
3897  */
ice_set_vf_mac(struct net_device * netdev,int vf_id,u8 * mac)3898 int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3899 {
3900 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3901 	struct ice_vf *vf;
3902 	int ret;
3903 
3904 	if (ice_validate_vf_id(pf, vf_id))
3905 		return -EINVAL;
3906 
3907 	if (is_multicast_ether_addr(mac)) {
3908 		netdev_err(netdev, "%pM not a valid unicast address\n", mac);
3909 		return -EINVAL;
3910 	}
3911 
3912 	vf = &pf->vf[vf_id];
3913 	/* nothing left to do, unicast MAC already set */
3914 	if (ether_addr_equal(vf->dflt_lan_addr.addr, mac))
3915 		return 0;
3916 
3917 	ret = ice_check_vf_ready_for_cfg(vf);
3918 	if (ret)
3919 		return ret;
3920 
3921 	if (ice_unicast_mac_exists(pf, mac)) {
3922 		netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
3923 			   mac, vf_id, mac);
3924 		return -EINVAL;
3925 	}
3926 
3927 	mutex_lock(&vf->cfg_lock);
3928 
3929 	/* VF is notified of its new MAC via the PF's response to the
3930 	 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
3931 	 */
3932 	ether_addr_copy(vf->dflt_lan_addr.addr, mac);
3933 	if (is_zero_ether_addr(mac)) {
3934 		/* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
3935 		vf->pf_set_mac = false;
3936 		netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
3937 			    vf->vf_id);
3938 	} else {
3939 		/* PF will add MAC rule for the VF */
3940 		vf->pf_set_mac = true;
3941 		netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
3942 			    mac, vf_id);
3943 	}
3944 
3945 	ice_vc_reset_vf(vf);
3946 	mutex_unlock(&vf->cfg_lock);
3947 	return 0;
3948 }
3949 
3950 /**
3951  * ice_set_vf_trust
3952  * @netdev: network interface device structure
3953  * @vf_id: VF identifier
3954  * @trusted: Boolean value to enable/disable trusted VF
3955  *
3956  * Enable or disable a given VF as trusted
3957  */
ice_set_vf_trust(struct net_device * netdev,int vf_id,bool trusted)3958 int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
3959 {
3960 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3961 	struct ice_vf *vf;
3962 	int ret;
3963 
3964 	if (ice_validate_vf_id(pf, vf_id))
3965 		return -EINVAL;
3966 
3967 	vf = &pf->vf[vf_id];
3968 	ret = ice_check_vf_ready_for_cfg(vf);
3969 	if (ret)
3970 		return ret;
3971 
3972 	/* Check if already trusted */
3973 	if (trusted == vf->trusted)
3974 		return 0;
3975 
3976 	mutex_lock(&vf->cfg_lock);
3977 
3978 	vf->trusted = trusted;
3979 	ice_vc_reset_vf(vf);
3980 	dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
3981 		 vf_id, trusted ? "" : "un");
3982 
3983 	mutex_unlock(&vf->cfg_lock);
3984 
3985 	return 0;
3986 }
3987 
3988 /**
3989  * ice_set_vf_link_state
3990  * @netdev: network interface device structure
3991  * @vf_id: VF identifier
3992  * @link_state: required link state
3993  *
3994  * Set VF's link state, irrespective of physical link state status
3995  */
ice_set_vf_link_state(struct net_device * netdev,int vf_id,int link_state)3996 int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
3997 {
3998 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3999 	struct ice_vf *vf;
4000 	int ret;
4001 
4002 	if (ice_validate_vf_id(pf, vf_id))
4003 		return -EINVAL;
4004 
4005 	vf = &pf->vf[vf_id];
4006 	ret = ice_check_vf_ready_for_cfg(vf);
4007 	if (ret)
4008 		return ret;
4009 
4010 	switch (link_state) {
4011 	case IFLA_VF_LINK_STATE_AUTO:
4012 		vf->link_forced = false;
4013 		break;
4014 	case IFLA_VF_LINK_STATE_ENABLE:
4015 		vf->link_forced = true;
4016 		vf->link_up = true;
4017 		break;
4018 	case IFLA_VF_LINK_STATE_DISABLE:
4019 		vf->link_forced = true;
4020 		vf->link_up = false;
4021 		break;
4022 	default:
4023 		return -EINVAL;
4024 	}
4025 
4026 	ice_vc_notify_vf_link_state(vf);
4027 
4028 	return 0;
4029 }
4030 
4031 /**
4032  * ice_get_vf_stats - populate some stats for the VF
4033  * @netdev: the netdev of the PF
4034  * @vf_id: the host OS identifier (0-255)
4035  * @vf_stats: pointer to the OS memory to be initialized
4036  */
ice_get_vf_stats(struct net_device * netdev,int vf_id,struct ifla_vf_stats * vf_stats)4037 int ice_get_vf_stats(struct net_device *netdev, int vf_id,
4038 		     struct ifla_vf_stats *vf_stats)
4039 {
4040 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
4041 	struct ice_eth_stats *stats;
4042 	struct ice_vsi *vsi;
4043 	struct ice_vf *vf;
4044 	int ret;
4045 
4046 	if (ice_validate_vf_id(pf, vf_id))
4047 		return -EINVAL;
4048 
4049 	vf = &pf->vf[vf_id];
4050 	ret = ice_check_vf_ready_for_cfg(vf);
4051 	if (ret)
4052 		return ret;
4053 
4054 	vsi = pf->vsi[vf->lan_vsi_idx];
4055 	if (!vsi)
4056 		return -EINVAL;
4057 
4058 	ice_update_eth_stats(vsi);
4059 	stats = &vsi->eth_stats;
4060 
4061 	memset(vf_stats, 0, sizeof(*vf_stats));
4062 
4063 	vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4064 		stats->rx_multicast;
4065 	vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4066 		stats->tx_multicast;
4067 	vf_stats->rx_bytes   = stats->rx_bytes;
4068 	vf_stats->tx_bytes   = stats->tx_bytes;
4069 	vf_stats->broadcast  = stats->rx_broadcast;
4070 	vf_stats->multicast  = stats->rx_multicast;
4071 	vf_stats->rx_dropped = stats->rx_discards;
4072 	vf_stats->tx_dropped = stats->tx_discards;
4073 
4074 	return 0;
4075 }
4076 
4077 /**
4078  * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
4079  * @vf: pointer to the VF structure
4080  */
ice_print_vf_rx_mdd_event(struct ice_vf * vf)4081 void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
4082 {
4083 	struct ice_pf *pf = vf->pf;
4084 	struct device *dev;
4085 
4086 	dev = ice_pf_to_dev(pf);
4087 
4088 	dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
4089 		 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
4090 		 vf->dflt_lan_addr.addr,
4091 		 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
4092 			  ? "on" : "off");
4093 }
4094 
4095 /**
4096  * ice_print_vfs_mdd_event - print VFs malicious driver detect event
4097  * @pf: pointer to the PF structure
4098  *
4099  * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
4100  */
ice_print_vfs_mdd_events(struct ice_pf * pf)4101 void ice_print_vfs_mdd_events(struct ice_pf *pf)
4102 {
4103 	struct device *dev = ice_pf_to_dev(pf);
4104 	struct ice_hw *hw = &pf->hw;
4105 	int i;
4106 
4107 	/* check that there are pending MDD events to print */
4108 	if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state))
4109 		return;
4110 
4111 	/* VF MDD event logs are rate limited to one second intervals */
4112 	if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
4113 		return;
4114 
4115 	pf->last_printed_mdd_jiffies = jiffies;
4116 
4117 	ice_for_each_vf(pf, i) {
4118 		struct ice_vf *vf = &pf->vf[i];
4119 
4120 		/* only print Rx MDD event message if there are new events */
4121 		if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
4122 			vf->mdd_rx_events.last_printed =
4123 							vf->mdd_rx_events.count;
4124 			ice_print_vf_rx_mdd_event(vf);
4125 		}
4126 
4127 		/* only print Tx MDD event message if there are new events */
4128 		if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
4129 			vf->mdd_tx_events.last_printed =
4130 							vf->mdd_tx_events.count;
4131 
4132 			dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
4133 				 vf->mdd_tx_events.count, hw->pf_id, i,
4134 				 vf->dflt_lan_addr.addr);
4135 		}
4136 	}
4137 }
4138 
4139 /**
4140  * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
4141  * @pdev: pointer to a pci_dev structure
4142  *
4143  * Called when recovering from a PF FLR to restore interrupt capability to
4144  * the VFs.
4145  */
ice_restore_all_vfs_msi_state(struct pci_dev * pdev)4146 void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
4147 {
4148 	struct pci_dev *vfdev;
4149 	u16 vf_id;
4150 	int pos;
4151 
4152 	if (!pci_num_vf(pdev))
4153 		return;
4154 
4155 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4156 	if (pos) {
4157 		pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
4158 				     &vf_id);
4159 		vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
4160 		while (vfdev) {
4161 			if (vfdev->is_virtfn && vfdev->physfn == pdev)
4162 				pci_restore_msi_state(vfdev);
4163 			vfdev = pci_get_device(pdev->vendor, vf_id,
4164 					       vfdev);
4165 		}
4166 	}
4167 }
4168