• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_base.h"
6 #include "ice_lib.h"
7 #include "ice_fltr.h"
8 
9 /**
10  * ice_validate_vf_id - helper to check if VF ID is valid
11  * @pf: pointer to the PF structure
12  * @vf_id: the ID of the VF to check
13  */
ice_validate_vf_id(struct ice_pf * pf,u16 vf_id)14 static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
15 {
16 	/* vf_id range is only valid for 0-255, and should always be unsigned */
17 	if (vf_id >= pf->num_alloc_vfs) {
18 		dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
19 		return -EINVAL;
20 	}
21 	return 0;
22 }
23 
24 /**
25  * ice_check_vf_init - helper to check if VF init complete
26  * @pf: pointer to the PF structure
27  * @vf: the pointer to the VF to check
28  */
ice_check_vf_init(struct ice_pf * pf,struct ice_vf * vf)29 static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
30 {
31 	if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
32 		dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
33 			vf->vf_id);
34 		return -EBUSY;
35 	}
36 	return 0;
37 }
38 
39 /**
40  * ice_err_to_virt_err - translate errors for VF return code
41  * @ice_err: error return code
42  */
ice_err_to_virt_err(enum ice_status ice_err)43 static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
44 {
45 	switch (ice_err) {
46 	case ICE_SUCCESS:
47 		return VIRTCHNL_STATUS_SUCCESS;
48 	case ICE_ERR_BAD_PTR:
49 	case ICE_ERR_INVAL_SIZE:
50 	case ICE_ERR_DEVICE_NOT_SUPPORTED:
51 	case ICE_ERR_PARAM:
52 	case ICE_ERR_CFG:
53 		return VIRTCHNL_STATUS_ERR_PARAM;
54 	case ICE_ERR_NO_MEMORY:
55 		return VIRTCHNL_STATUS_ERR_NO_MEMORY;
56 	case ICE_ERR_NOT_READY:
57 	case ICE_ERR_RESET_FAILED:
58 	case ICE_ERR_FW_API_VER:
59 	case ICE_ERR_AQ_ERROR:
60 	case ICE_ERR_AQ_TIMEOUT:
61 	case ICE_ERR_AQ_FULL:
62 	case ICE_ERR_AQ_NO_WORK:
63 	case ICE_ERR_AQ_EMPTY:
64 		return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
65 	default:
66 		return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
67 	}
68 }
69 
70 /**
71  * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
72  * @pf: pointer to the PF structure
73  * @v_opcode: operation code
74  * @v_retval: return value
75  * @msg: pointer to the msg buffer
76  * @msglen: msg length
77  */
78 static void
ice_vc_vf_broadcast(struct ice_pf * pf,enum virtchnl_ops v_opcode,enum virtchnl_status_code v_retval,u8 * msg,u16 msglen)79 ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
80 		    enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
81 {
82 	struct ice_hw *hw = &pf->hw;
83 	unsigned int i;
84 
85 	ice_for_each_vf(pf, i) {
86 		struct ice_vf *vf = &pf->vf[i];
87 
88 		/* Not all vfs are enabled so skip the ones that are not */
89 		if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
90 		    !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
91 			continue;
92 
93 		/* Ignore return value on purpose - a given VF may fail, but
94 		 * we need to keep going and send to all of them
95 		 */
96 		ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
97 				      msglen, NULL);
98 	}
99 }
100 
101 /**
102  * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
103  * @vf: pointer to the VF structure
104  * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
105  * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
106  * @link_up: whether or not to set the link up/down
107  */
108 static void
ice_set_pfe_link(struct ice_vf * vf,struct virtchnl_pf_event * pfe,int ice_link_speed,bool link_up)109 ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
110 		 int ice_link_speed, bool link_up)
111 {
112 	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
113 		pfe->event_data.link_event_adv.link_status = link_up;
114 		/* Speed in Mbps */
115 		pfe->event_data.link_event_adv.link_speed =
116 			ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
117 	} else {
118 		pfe->event_data.link_event.link_status = link_up;
119 		/* Legacy method for virtchnl link speeds */
120 		pfe->event_data.link_event.link_speed =
121 			(enum virtchnl_link_speed)
122 			ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
123 	}
124 }
125 
126 /**
127  * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
128  * @vf: the VF to check
129  *
130  * Returns true if the VF has no Rx and no Tx queues enabled and returns false
131  * otherwise
132  */
ice_vf_has_no_qs_ena(struct ice_vf * vf)133 static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
134 {
135 	return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
136 		!bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
137 }
138 
139 /**
140  * ice_is_vf_link_up - check if the VF's link is up
141  * @vf: VF to check if link is up
142  */
ice_is_vf_link_up(struct ice_vf * vf)143 static bool ice_is_vf_link_up(struct ice_vf *vf)
144 {
145 	struct ice_pf *pf = vf->pf;
146 
147 	if (ice_check_vf_init(pf, vf))
148 		return false;
149 
150 	if (ice_vf_has_no_qs_ena(vf))
151 		return false;
152 	else if (vf->link_forced)
153 		return vf->link_up;
154 	else
155 		return pf->hw.port_info->phy.link_info.link_info &
156 			ICE_AQ_LINK_UP;
157 }
158 
159 /**
160  * ice_vc_notify_vf_link_state - Inform a VF of link status
161  * @vf: pointer to the VF structure
162  *
163  * send a link status message to a single VF
164  */
ice_vc_notify_vf_link_state(struct ice_vf * vf)165 static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
166 {
167 	struct virtchnl_pf_event pfe = { 0 };
168 	struct ice_hw *hw = &vf->pf->hw;
169 
170 	pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
171 	pfe.severity = PF_EVENT_SEVERITY_INFO;
172 
173 	if (ice_is_vf_link_up(vf))
174 		ice_set_pfe_link(vf, &pfe,
175 				 hw->port_info->phy.link_info.link_speed, true);
176 	else
177 		ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
178 
179 	ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
180 			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
181 			      sizeof(pfe), NULL);
182 }
183 
184 /**
185  * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
186  * @vf: VF to remove access to VSI for
187  */
ice_vf_invalidate_vsi(struct ice_vf * vf)188 static void ice_vf_invalidate_vsi(struct ice_vf *vf)
189 {
190 	vf->lan_vsi_idx = ICE_NO_VSI;
191 	vf->lan_vsi_num = ICE_NO_VSI;
192 }
193 
194 /**
195  * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
196  * @vf: invalidate this VF's VSI after freeing it
197  */
ice_vf_vsi_release(struct ice_vf * vf)198 static void ice_vf_vsi_release(struct ice_vf *vf)
199 {
200 	ice_vsi_release(vf->pf->vsi[vf->lan_vsi_idx]);
201 	ice_vf_invalidate_vsi(vf);
202 }
203 
204 /**
205  * ice_free_vf_res - Free a VF's resources
206  * @vf: pointer to the VF info
207  */
ice_free_vf_res(struct ice_vf * vf)208 static void ice_free_vf_res(struct ice_vf *vf)
209 {
210 	struct ice_pf *pf = vf->pf;
211 	int i, last_vector_idx;
212 
213 	/* First, disable VF's configuration API to prevent OS from
214 	 * accessing the VF's VSI after it's freed or invalidated.
215 	 */
216 	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
217 
218 	/* free VSI and disconnect it from the parent uplink */
219 	if (vf->lan_vsi_idx != ICE_NO_VSI) {
220 		ice_vf_vsi_release(vf);
221 		vf->num_mac = 0;
222 	}
223 
224 	last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
225 
226 	/* clear VF MDD event information */
227 	memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
228 	memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
229 
230 	/* Disable interrupts so that VF starts in a known state */
231 	for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
232 		wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
233 		ice_flush(&pf->hw);
234 	}
235 	/* reset some of the state variables keeping track of the resources */
236 	clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
237 	clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
238 }
239 
240 /**
241  * ice_dis_vf_mappings
242  * @vf: pointer to the VF structure
243  */
ice_dis_vf_mappings(struct ice_vf * vf)244 static void ice_dis_vf_mappings(struct ice_vf *vf)
245 {
246 	struct ice_pf *pf = vf->pf;
247 	struct ice_vsi *vsi;
248 	struct device *dev;
249 	int first, last, v;
250 	struct ice_hw *hw;
251 
252 	hw = &pf->hw;
253 	vsi = pf->vsi[vf->lan_vsi_idx];
254 
255 	dev = ice_pf_to_dev(pf);
256 	wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
257 	wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
258 
259 	first = vf->first_vector_idx;
260 	last = first + pf->num_msix_per_vf - 1;
261 	for (v = first; v <= last; v++) {
262 		u32 reg;
263 
264 		reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
265 			GLINT_VECT2FUNC_IS_PF_M) |
266 		       ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
267 			GLINT_VECT2FUNC_PF_NUM_M));
268 		wr32(hw, GLINT_VECT2FUNC(v), reg);
269 	}
270 
271 	if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
272 		wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
273 	else
274 		dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
275 
276 	if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
277 		wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
278 	else
279 		dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
280 }
281 
282 /**
283  * ice_sriov_free_msix_res - Reset/free any used MSIX resources
284  * @pf: pointer to the PF structure
285  *
286  * Since no MSIX entries are taken from the pf->irq_tracker then just clear
287  * the pf->sriov_base_vector.
288  *
289  * Returns 0 on success, and -EINVAL on error.
290  */
ice_sriov_free_msix_res(struct ice_pf * pf)291 static int ice_sriov_free_msix_res(struct ice_pf *pf)
292 {
293 	struct ice_res_tracker *res;
294 
295 	if (!pf)
296 		return -EINVAL;
297 
298 	res = pf->irq_tracker;
299 	if (!res)
300 		return -EINVAL;
301 
302 	/* give back irq_tracker resources used */
303 	WARN_ON(pf->sriov_base_vector < res->num_entries);
304 
305 	pf->sriov_base_vector = 0;
306 
307 	return 0;
308 }
309 
310 /**
311  * ice_set_vf_state_qs_dis - Set VF queues state to disabled
312  * @vf: pointer to the VF structure
313  */
ice_set_vf_state_qs_dis(struct ice_vf * vf)314 void ice_set_vf_state_qs_dis(struct ice_vf *vf)
315 {
316 	/* Clear Rx/Tx enabled queues flag */
317 	bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
318 	bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
319 	clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
320 }
321 
322 /**
323  * ice_dis_vf_qs - Disable the VF queues
324  * @vf: pointer to the VF structure
325  */
ice_dis_vf_qs(struct ice_vf * vf)326 static void ice_dis_vf_qs(struct ice_vf *vf)
327 {
328 	struct ice_pf *pf = vf->pf;
329 	struct ice_vsi *vsi;
330 
331 	vsi = pf->vsi[vf->lan_vsi_idx];
332 
333 	ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
334 	ice_vsi_stop_all_rx_rings(vsi);
335 	ice_set_vf_state_qs_dis(vf);
336 }
337 
338 /**
339  * ice_free_vfs - Free all VFs
340  * @pf: pointer to the PF structure
341  */
ice_free_vfs(struct ice_pf * pf)342 void ice_free_vfs(struct ice_pf *pf)
343 {
344 	struct device *dev = ice_pf_to_dev(pf);
345 	struct ice_hw *hw = &pf->hw;
346 	unsigned int tmp, i;
347 
348 	if (!pf->vf)
349 		return;
350 
351 	while (test_and_set_bit(__ICE_VF_DIS, pf->state))
352 		usleep_range(1000, 2000);
353 
354 	/* Disable IOV before freeing resources. This lets any VF drivers
355 	 * running in the host get themselves cleaned up before we yank
356 	 * the carpet out from underneath their feet.
357 	 */
358 	if (!pci_vfs_assigned(pf->pdev))
359 		pci_disable_sriov(pf->pdev);
360 	else
361 		dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
362 
363 	/* Avoid wait time by stopping all VFs at the same time */
364 	ice_for_each_vf(pf, i)
365 		ice_dis_vf_qs(&pf->vf[i]);
366 
367 	tmp = pf->num_alloc_vfs;
368 	pf->num_qps_per_vf = 0;
369 	pf->num_alloc_vfs = 0;
370 	for (i = 0; i < tmp; i++) {
371 		if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
372 			/* disable VF qp mappings and set VF disable state */
373 			ice_dis_vf_mappings(&pf->vf[i]);
374 			set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
375 			ice_free_vf_res(&pf->vf[i]);
376 		}
377 	}
378 
379 	if (ice_sriov_free_msix_res(pf))
380 		dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
381 
382 	devm_kfree(dev, pf->vf);
383 	pf->vf = NULL;
384 
385 	/* This check is for when the driver is unloaded while VFs are
386 	 * assigned. Setting the number of VFs to 0 through sysfs is caught
387 	 * before this function ever gets called.
388 	 */
389 	if (!pci_vfs_assigned(pf->pdev)) {
390 		unsigned int vf_id;
391 
392 		/* Acknowledge VFLR for all VFs. Without this, VFs will fail to
393 		 * work correctly when SR-IOV gets re-enabled.
394 		 */
395 		for (vf_id = 0; vf_id < tmp; vf_id++) {
396 			u32 reg_idx, bit_idx;
397 
398 			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
399 			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
400 			wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
401 		}
402 	}
403 	clear_bit(__ICE_VF_DIS, pf->state);
404 	clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
405 }
406 
407 /**
408  * ice_trigger_vf_reset - Reset a VF on HW
409  * @vf: pointer to the VF structure
410  * @is_vflr: true if VFLR was issued, false if not
411  * @is_pfr: true if the reset was triggered due to a previous PFR
412  *
413  * Trigger hardware to start a reset for a particular VF. Expects the caller
414  * to wait the proper amount of time to allow hardware to reset the VF before
415  * it cleans up and restores VF functionality.
416  */
ice_trigger_vf_reset(struct ice_vf * vf,bool is_vflr,bool is_pfr)417 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
418 {
419 	struct ice_pf *pf = vf->pf;
420 	u32 reg, reg_idx, bit_idx;
421 	unsigned int vf_abs_id, i;
422 	struct device *dev;
423 	struct ice_hw *hw;
424 
425 	dev = ice_pf_to_dev(pf);
426 	hw = &pf->hw;
427 	vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
428 
429 	/* Inform VF that it is no longer active, as a warning */
430 	clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
431 
432 	/* Disable VF's configuration API during reset. The flag is re-enabled
433 	 * when it's safe again to access VF's VSI.
434 	 */
435 	clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
436 
437 	/* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
438 	 * needs to clear them in the case of VFR/VFLR. If this is done for
439 	 * PFR, it can mess up VF resets because the VF driver may already
440 	 * have started cleanup by the time we get here.
441 	 */
442 	if (!is_pfr) {
443 		wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
444 		wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
445 	}
446 
447 	/* In the case of a VFLR, the HW has already reset the VF and we
448 	 * just need to clean up, so don't hit the VFRTRIG register.
449 	 */
450 	if (!is_vflr) {
451 		/* reset VF using VPGEN_VFRTRIG reg */
452 		reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
453 		reg |= VPGEN_VFRTRIG_VFSWR_M;
454 		wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
455 	}
456 	/* clear the VFLR bit in GLGEN_VFLRSTAT */
457 	reg_idx = (vf_abs_id) / 32;
458 	bit_idx = (vf_abs_id) % 32;
459 	wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
460 	ice_flush(hw);
461 
462 	wr32(hw, PF_PCI_CIAA,
463 	     VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
464 	for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
465 		reg = rd32(hw, PF_PCI_CIAD);
466 		/* no transactions pending so stop polling */
467 		if ((reg & VF_TRANS_PENDING_M) == 0)
468 			break;
469 
470 		dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
471 		udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
472 	}
473 }
474 
475 /**
476  * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
477  * @vsi: the VSI to update
478  * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
479  * @enable: true for enable PVID false for disable
480  */
ice_vsi_manage_pvid(struct ice_vsi * vsi,u16 pvid_info,bool enable)481 static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
482 {
483 	struct ice_hw *hw = &vsi->back->hw;
484 	struct ice_aqc_vsi_props *info;
485 	struct ice_vsi_ctx *ctxt;
486 	enum ice_status status;
487 	int ret = 0;
488 
489 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
490 	if (!ctxt)
491 		return -ENOMEM;
492 
493 	ctxt->info = vsi->info;
494 	info = &ctxt->info;
495 	if (enable) {
496 		info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
497 			ICE_AQ_VSI_PVLAN_INSERT_PVID |
498 			ICE_AQ_VSI_VLAN_EMOD_STR;
499 		info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
500 	} else {
501 		info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
502 			ICE_AQ_VSI_VLAN_MODE_ALL;
503 		info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
504 	}
505 
506 	info->pvid = cpu_to_le16(pvid_info);
507 	info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
508 					   ICE_AQ_VSI_PROP_SW_VALID);
509 
510 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
511 	if (status) {
512 		dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
513 			 ice_stat_str(status),
514 			 ice_aq_str(hw->adminq.sq_last_status));
515 		ret = -EIO;
516 		goto out;
517 	}
518 
519 	vsi->info.vlan_flags = info->vlan_flags;
520 	vsi->info.sw_flags2 = info->sw_flags2;
521 	vsi->info.pvid = info->pvid;
522 out:
523 	kfree(ctxt);
524 	return ret;
525 }
526 
527 /**
528  * ice_vf_get_port_info - Get the VF's port info structure
529  * @vf: VF used to get the port info structure for
530  */
ice_vf_get_port_info(struct ice_vf * vf)531 static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
532 {
533 	return vf->pf->hw.port_info;
534 }
535 
536 /**
537  * ice_vf_vsi_setup - Set up a VF VSI
538  * @vf: VF to setup VSI for
539  *
540  * Returns pointer to the successfully allocated VSI struct on success,
541  * otherwise returns NULL on failure.
542  */
ice_vf_vsi_setup(struct ice_vf * vf)543 static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
544 {
545 	struct ice_port_info *pi = ice_vf_get_port_info(vf);
546 	struct ice_pf *pf = vf->pf;
547 	struct ice_vsi *vsi;
548 
549 	vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id);
550 
551 	if (!vsi) {
552 		dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
553 		ice_vf_invalidate_vsi(vf);
554 		return NULL;
555 	}
556 
557 	vf->lan_vsi_idx = vsi->idx;
558 	vf->lan_vsi_num = vsi->vsi_num;
559 
560 	return vsi;
561 }
562 
563 /**
564  * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
565  * @pf: pointer to PF structure
566  * @vf: pointer to VF that the first MSIX vector index is being calculated for
567  *
568  * This returns the first MSIX vector index in PF space that is used by this VF.
569  * This index is used when accessing PF relative registers such as
570  * GLINT_VECT2FUNC and GLINT_DYN_CTL.
571  * This will always be the OICR index in the AVF driver so any functionality
572  * using vf->first_vector_idx for queue configuration will have to increment by
573  * 1 to avoid meddling with the OICR index.
574  */
ice_calc_vf_first_vector_idx(struct ice_pf * pf,struct ice_vf * vf)575 static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
576 {
577 	return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
578 }
579 
580 /**
581  * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
582  * @vf: VF to add MAC filters for
583  *
584  * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
585  * always re-adds either a VLAN 0 or port VLAN based filter after reset.
586  */
ice_vf_rebuild_host_vlan_cfg(struct ice_vf * vf)587 static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
588 {
589 	struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
590 	struct device *dev = ice_pf_to_dev(vf->pf);
591 	u16 vlan_id = 0;
592 	int err;
593 
594 	if (vf->port_vlan_info) {
595 		err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
596 		if (err) {
597 			dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
598 				vf->vf_id, err);
599 			return err;
600 		}
601 
602 		vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
603 	}
604 
605 	/* vlan_id will either be 0 or the port VLAN number */
606 	err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
607 	if (err) {
608 		dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
609 			vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
610 			err);
611 		return err;
612 	}
613 
614 	return 0;
615 }
616 
617 /**
618  * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
619  * @vf: VF to add MAC filters for
620  *
621  * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
622  * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
623  */
ice_vf_rebuild_host_mac_cfg(struct ice_vf * vf)624 static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
625 {
626 	struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
627 	struct device *dev = ice_pf_to_dev(vf->pf);
628 	enum ice_status status;
629 	u8 broadcast[ETH_ALEN];
630 
631 	eth_broadcast_addr(broadcast);
632 	status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
633 	if (status) {
634 		dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
635 			vf->vf_id, ice_stat_str(status));
636 		return ice_status_to_errno(status);
637 	}
638 
639 	vf->num_mac++;
640 
641 	if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
642 		status = ice_fltr_add_mac(vsi, vf->dflt_lan_addr.addr,
643 					  ICE_FWD_TO_VSI);
644 		if (status) {
645 			dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
646 				&vf->dflt_lan_addr.addr[0], vf->vf_id,
647 				ice_stat_str(status));
648 			return ice_status_to_errno(status);
649 		}
650 		vf->num_mac++;
651 	}
652 
653 	return 0;
654 }
655 
656 /**
657  * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
658  * @vf: VF to configure trust setting for
659  */
ice_vf_set_host_trust_cfg(struct ice_vf * vf)660 static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
661 {
662 	if (vf->trusted)
663 		set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
664 	else
665 		clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
666 }
667 
668 /**
669  * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
670  * @vf: VF to enable MSIX mappings for
671  *
672  * Some of the registers need to be indexed/configured using hardware global
673  * device values and other registers need 0-based values, which represent PF
674  * based values.
675  */
ice_ena_vf_msix_mappings(struct ice_vf * vf)676 static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
677 {
678 	int device_based_first_msix, device_based_last_msix;
679 	int pf_based_first_msix, pf_based_last_msix, v;
680 	struct ice_pf *pf = vf->pf;
681 	int device_based_vf_id;
682 	struct ice_hw *hw;
683 	u32 reg;
684 
685 	hw = &pf->hw;
686 	pf_based_first_msix = vf->first_vector_idx;
687 	pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
688 
689 	device_based_first_msix = pf_based_first_msix +
690 		pf->hw.func_caps.common_cap.msix_vector_first_id;
691 	device_based_last_msix =
692 		(device_based_first_msix + pf->num_msix_per_vf) - 1;
693 	device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
694 
695 	reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
696 		VPINT_ALLOC_FIRST_M) |
697 	       ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
698 		VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
699 	wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
700 
701 	reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
702 		 & VPINT_ALLOC_PCI_FIRST_M) |
703 	       ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
704 		VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
705 	wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
706 
707 	/* map the interrupts to its functions */
708 	for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
709 		reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
710 			GLINT_VECT2FUNC_VF_NUM_M) |
711 		       ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
712 			GLINT_VECT2FUNC_PF_NUM_M));
713 		wr32(hw, GLINT_VECT2FUNC(v), reg);
714 	}
715 
716 	/* Map mailbox interrupt to VF MSI-X vector 0 */
717 	wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
718 }
719 
720 /**
721  * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
722  * @vf: VF to enable the mappings for
723  * @max_txq: max Tx queues allowed on the VF's VSI
724  * @max_rxq: max Rx queues allowed on the VF's VSI
725  */
ice_ena_vf_q_mappings(struct ice_vf * vf,u16 max_txq,u16 max_rxq)726 static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
727 {
728 	struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
729 	struct device *dev = ice_pf_to_dev(vf->pf);
730 	struct ice_hw *hw = &vf->pf->hw;
731 	u32 reg;
732 
733 	/* set regardless of mapping mode */
734 	wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
735 
736 	/* VF Tx queues allocation */
737 	if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
738 		/* set the VF PF Tx queue range
739 		 * VFNUMQ value should be set to (number of queues - 1). A value
740 		 * of 0 means 1 queue and a value of 255 means 256 queues
741 		 */
742 		reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
743 			VPLAN_TX_QBASE_VFFIRSTQ_M) |
744 		       (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
745 			VPLAN_TX_QBASE_VFNUMQ_M));
746 		wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
747 	} else {
748 		dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
749 	}
750 
751 	/* set regardless of mapping mode */
752 	wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
753 
754 	/* VF Rx queues allocation */
755 	if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
756 		/* set the VF PF Rx queue range
757 		 * VFNUMQ value should be set to (number of queues - 1). A value
758 		 * of 0 means 1 queue and a value of 255 means 256 queues
759 		 */
760 		reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
761 			VPLAN_RX_QBASE_VFFIRSTQ_M) |
762 		       (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
763 			VPLAN_RX_QBASE_VFNUMQ_M));
764 		wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
765 	} else {
766 		dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
767 	}
768 }
769 
770 /**
771  * ice_ena_vf_mappings - enable VF MSIX and queue mapping
772  * @vf: pointer to the VF structure
773  */
ice_ena_vf_mappings(struct ice_vf * vf)774 static void ice_ena_vf_mappings(struct ice_vf *vf)
775 {
776 	struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
777 
778 	ice_ena_vf_msix_mappings(vf);
779 	ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
780 }
781 
782 /**
783  * ice_determine_res
784  * @pf: pointer to the PF structure
785  * @avail_res: available resources in the PF structure
786  * @max_res: maximum resources that can be given per VF
787  * @min_res: minimum resources that can be given per VF
788  *
789  * Returns non-zero value if resources (queues/vectors) are available or
790  * returns zero if PF cannot accommodate for all num_alloc_vfs.
791  */
792 static int
ice_determine_res(struct ice_pf * pf,u16 avail_res,u16 max_res,u16 min_res)793 ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
794 {
795 	bool checked_min_res = false;
796 	int res;
797 
798 	/* start by checking if PF can assign max number of resources for
799 	 * all num_alloc_vfs.
800 	 * if yes, return number per VF
801 	 * If no, divide by 2 and roundup, check again
802 	 * repeat the loop till we reach a point where even minimum resources
803 	 * are not available, in that case return 0
804 	 */
805 	res = max_res;
806 	while ((res >= min_res) && !checked_min_res) {
807 		int num_all_res;
808 
809 		num_all_res = pf->num_alloc_vfs * res;
810 		if (num_all_res <= avail_res)
811 			return res;
812 
813 		if (res == min_res)
814 			checked_min_res = true;
815 
816 		res = DIV_ROUND_UP(res, 2);
817 	}
818 	return 0;
819 }
820 
821 /**
822  * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
823  * @vf: VF to calculate the register index for
824  * @q_vector: a q_vector associated to the VF
825  */
ice_calc_vf_reg_idx(struct ice_vf * vf,struct ice_q_vector * q_vector)826 int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
827 {
828 	struct ice_pf *pf;
829 
830 	if (!vf || !q_vector)
831 		return -EINVAL;
832 
833 	pf = vf->pf;
834 
835 	/* always add one to account for the OICR being the first MSIX */
836 	return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
837 		q_vector->v_idx + 1;
838 }
839 
840 /**
841  * ice_get_max_valid_res_idx - Get the max valid resource index
842  * @res: pointer to the resource to find the max valid index for
843  *
844  * Start from the end of the ice_res_tracker and return right when we find the
845  * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
846  * valid for SR-IOV because it is the only consumer that manipulates the
847  * res->end and this is always called when res->end is set to res->num_entries.
848  */
ice_get_max_valid_res_idx(struct ice_res_tracker * res)849 static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
850 {
851 	int i;
852 
853 	if (!res)
854 		return -EINVAL;
855 
856 	for (i = res->num_entries - 1; i >= 0; i--)
857 		if (res->list[i] & ICE_RES_VALID_BIT)
858 			return i;
859 
860 	return 0;
861 }
862 
863 /**
864  * ice_sriov_set_msix_res - Set any used MSIX resources
865  * @pf: pointer to PF structure
866  * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
867  *
868  * This function allows SR-IOV resources to be taken from the end of the PF's
869  * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
870  * just set the pf->sriov_base_vector and return success.
871  *
872  * If there are not enough resources available, return an error. This should
873  * always be caught by ice_set_per_vf_res().
874  *
875  * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
876  * in the PF's space available for SR-IOV.
877  */
ice_sriov_set_msix_res(struct ice_pf * pf,u16 num_msix_needed)878 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
879 {
880 	u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
881 	int vectors_used = pf->irq_tracker->num_entries;
882 	int sriov_base_vector;
883 
884 	sriov_base_vector = total_vectors - num_msix_needed;
885 
886 	/* make sure we only grab irq_tracker entries from the list end and
887 	 * that we have enough available MSIX vectors
888 	 */
889 	if (sriov_base_vector < vectors_used)
890 		return -EINVAL;
891 
892 	pf->sriov_base_vector = sriov_base_vector;
893 
894 	return 0;
895 }
896 
897 /**
898  * ice_set_per_vf_res - check if vectors and queues are available
899  * @pf: pointer to the PF structure
900  *
901  * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
902  * get more vectors and can enable more queues per VF. Note that this does not
903  * grab any vectors from the SW pool already allocated. Also note, that all
904  * vector counts include one for each VF's miscellaneous interrupt vector
905  * (i.e. OICR).
906  *
907  * Minimum VFs - 2 vectors, 1 queue pair
908  * Small VFs - 5 vectors, 4 queue pairs
909  * Medium VFs - 17 vectors, 16 queue pairs
910  *
911  * Second, determine number of queue pairs per VF by starting with a pre-defined
912  * maximum each VF supports. If this is not possible, then we adjust based on
913  * queue pairs available on the device.
914  *
915  * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
916  * by each VF during VF initialization and reset.
917  */
ice_set_per_vf_res(struct ice_pf * pf)918 static int ice_set_per_vf_res(struct ice_pf *pf)
919 {
920 	int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
921 	int msix_avail_per_vf, msix_avail_for_sriov;
922 	struct device *dev = ice_pf_to_dev(pf);
923 	u16 num_msix_per_vf, num_txq, num_rxq;
924 
925 	if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
926 		return -EINVAL;
927 
928 	/* determine MSI-X resources per VF */
929 	msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
930 		pf->irq_tracker->num_entries;
931 	msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
932 	if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
933 		num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
934 	} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
935 		num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
936 	} else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
937 		num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
938 	} else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
939 		num_msix_per_vf = ICE_MIN_INTR_PER_VF;
940 	} else {
941 		dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
942 			msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
943 			pf->num_alloc_vfs);
944 		return -EIO;
945 	}
946 
947 	/* determine queue resources per VF */
948 	num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
949 				    min_t(u16,
950 					  num_msix_per_vf - ICE_NONQ_VECS_VF,
951 					  ICE_MAX_RSS_QS_PER_VF),
952 				    ICE_MIN_QS_PER_VF);
953 
954 	num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
955 				    min_t(u16,
956 					  num_msix_per_vf - ICE_NONQ_VECS_VF,
957 					  ICE_MAX_RSS_QS_PER_VF),
958 				    ICE_MIN_QS_PER_VF);
959 
960 	if (!num_txq || !num_rxq) {
961 		dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
962 			ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
963 		return -EIO;
964 	}
965 
966 	if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
967 		dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
968 			pf->num_alloc_vfs);
969 		return -EINVAL;
970 	}
971 
972 	/* only allow equal Tx/Rx queue count (i.e. queue pairs) */
973 	pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
974 	pf->num_msix_per_vf = num_msix_per_vf;
975 	dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
976 		 pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
977 
978 	return 0;
979 }
980 
981 /**
982  * ice_clear_vf_reset_trigger - enable VF to access hardware
983  * @vf: VF to enabled hardware access for
984  */
ice_clear_vf_reset_trigger(struct ice_vf * vf)985 static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
986 {
987 	struct ice_hw *hw = &vf->pf->hw;
988 	u32 reg;
989 
990 	reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
991 	reg &= ~VPGEN_VFRTRIG_VFSWR_M;
992 	wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
993 	ice_flush(hw);
994 }
995 
996 /**
997  * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
998  * @vf: pointer to the VF info
999  * @vsi: the VSI being configured
1000  * @promisc_m: mask of promiscuous config bits
1001  * @rm_promisc: promisc flag request from the VF to remove or add filter
1002  *
1003  * This function configures VF VSI promiscuous mode, based on the VF requests,
1004  * for Unicast, Multicast and VLAN
1005  */
1006 static enum ice_status
ice_vf_set_vsi_promisc(struct ice_vf * vf,struct ice_vsi * vsi,u8 promisc_m,bool rm_promisc)1007 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
1008 		       bool rm_promisc)
1009 {
1010 	struct ice_pf *pf = vf->pf;
1011 	enum ice_status status = 0;
1012 	struct ice_hw *hw;
1013 
1014 	hw = &pf->hw;
1015 	if (vsi->num_vlan) {
1016 		status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
1017 						  rm_promisc);
1018 	} else if (vf->port_vlan_info) {
1019 		if (rm_promisc)
1020 			status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1021 						       vf->port_vlan_info);
1022 		else
1023 			status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1024 						     vf->port_vlan_info);
1025 	} else {
1026 		if (rm_promisc)
1027 			status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1028 						       0);
1029 		else
1030 			status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1031 						     0);
1032 	}
1033 
1034 	return status;
1035 }
1036 
ice_vf_clear_counters(struct ice_vf * vf)1037 static void ice_vf_clear_counters(struct ice_vf *vf)
1038 {
1039 	struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
1040 
1041 	vf->num_mac = 0;
1042 	vsi->num_vlan = 0;
1043 	memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
1044 	memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
1045 }
1046 
1047 /**
1048  * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
1049  * @vf: VF to perform pre VSI rebuild tasks
1050  *
1051  * These tasks are items that don't need to be amortized since they are most
1052  * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
1053  */
ice_vf_pre_vsi_rebuild(struct ice_vf * vf)1054 static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
1055 {
1056 	ice_vf_clear_counters(vf);
1057 	ice_clear_vf_reset_trigger(vf);
1058 }
1059 
1060 /**
1061  * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
1062  * @vf: VF to rebuild host configuration on
1063  */
ice_vf_rebuild_host_cfg(struct ice_vf * vf)1064 static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
1065 {
1066 	struct device *dev = ice_pf_to_dev(vf->pf);
1067 
1068 	ice_vf_set_host_trust_cfg(vf);
1069 
1070 	if (ice_vf_rebuild_host_mac_cfg(vf))
1071 		dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
1072 			vf->vf_id);
1073 
1074 	if (ice_vf_rebuild_host_vlan_cfg(vf))
1075 		dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
1076 			vf->vf_id);
1077 }
1078 
1079 /**
1080  * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI
1081  * @vf: VF to release and setup the VSI for
1082  *
1083  * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
1084  * configuration change, etc.).
1085  */
ice_vf_rebuild_vsi_with_release(struct ice_vf * vf)1086 static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
1087 {
1088 	ice_vf_vsi_release(vf);
1089 	if (!ice_vf_vsi_setup(vf))
1090 		return -ENOMEM;
1091 
1092 	return 0;
1093 }
1094 
1095 /**
1096  * ice_vf_rebuild_vsi - rebuild the VF's VSI
1097  * @vf: VF to rebuild the VSI for
1098  *
1099  * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
1100  * host, PFR, CORER, etc.).
1101  */
ice_vf_rebuild_vsi(struct ice_vf * vf)1102 static int ice_vf_rebuild_vsi(struct ice_vf *vf)
1103 {
1104 	struct ice_pf *pf = vf->pf;
1105 	struct ice_vsi *vsi;
1106 
1107 	vsi = pf->vsi[vf->lan_vsi_idx];
1108 
1109 	if (ice_vsi_rebuild(vsi, true)) {
1110 		dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
1111 			vf->vf_id);
1112 		return -EIO;
1113 	}
1114 	/* vsi->idx will remain the same in this case so don't update
1115 	 * vf->lan_vsi_idx
1116 	 */
1117 	vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
1118 	vf->lan_vsi_num = vsi->vsi_num;
1119 
1120 	return 0;
1121 }
1122 
1123 /**
1124  * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
1125  * @vf: VF to set in initialized state
1126  *
1127  * After this function the VF will be ready to receive/handle the
1128  * VIRTCHNL_OP_GET_VF_RESOURCES message
1129  */
ice_vf_set_initialized(struct ice_vf * vf)1130 static void ice_vf_set_initialized(struct ice_vf *vf)
1131 {
1132 	ice_set_vf_state_qs_dis(vf);
1133 	clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
1134 	clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
1135 	clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
1136 	set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1137 }
1138 
1139 /**
1140  * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
1141  * @vf: VF to perform tasks on
1142  */
ice_vf_post_vsi_rebuild(struct ice_vf * vf)1143 static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
1144 {
1145 	struct ice_pf *pf = vf->pf;
1146 	struct ice_hw *hw;
1147 
1148 	hw = &pf->hw;
1149 
1150 	ice_vf_rebuild_host_cfg(vf);
1151 
1152 	ice_vf_set_initialized(vf);
1153 	ice_ena_vf_mappings(vf);
1154 	wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1155 }
1156 
1157 /**
1158  * ice_reset_all_vfs - reset all allocated VFs in one go
1159  * @pf: pointer to the PF structure
1160  * @is_vflr: true if VFLR was issued, false if not
1161  *
1162  * First, tell the hardware to reset each VF, then do all the waiting in one
1163  * chunk, and finally finish restoring each VF after the wait. This is useful
1164  * during PF routines which need to reset all VFs, as otherwise it must perform
1165  * these resets in a serialized fashion.
1166  *
1167  * Returns true if any VFs were reset, and false otherwise.
1168  */
ice_reset_all_vfs(struct ice_pf * pf,bool is_vflr)1169 bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1170 {
1171 	struct device *dev = ice_pf_to_dev(pf);
1172 	struct ice_hw *hw = &pf->hw;
1173 	struct ice_vf *vf;
1174 	int v, i;
1175 
1176 	/* If we don't have any VFs, then there is nothing to reset */
1177 	if (!pf->num_alloc_vfs)
1178 		return false;
1179 
1180 	/* If VFs have been disabled, there is no need to reset */
1181 	if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1182 		return false;
1183 
1184 	/* Begin reset on all VFs at once */
1185 	ice_for_each_vf(pf, v)
1186 		ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
1187 
1188 	/* HW requires some time to make sure it can flush the FIFO for a VF
1189 	 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1190 	 * sequence to make sure that it has completed. We'll keep track of
1191 	 * the VFs using a simple iterator that increments once that VF has
1192 	 * finished resetting.
1193 	 */
1194 	for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1195 		/* Check each VF in sequence */
1196 		while (v < pf->num_alloc_vfs) {
1197 			u32 reg;
1198 
1199 			vf = &pf->vf[v];
1200 			reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1201 			if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1202 				/* only delay if the check failed */
1203 				usleep_range(10, 20);
1204 				break;
1205 			}
1206 
1207 			/* If the current VF has finished resetting, move on
1208 			 * to the next VF in sequence.
1209 			 */
1210 			v++;
1211 		}
1212 	}
1213 
1214 	/* Display a warning if at least one VF didn't manage to reset in
1215 	 * time, but continue on with the operation.
1216 	 */
1217 	if (v < pf->num_alloc_vfs)
1218 		dev_warn(dev, "VF reset check timeout\n");
1219 
1220 	/* free VF resources to begin resetting the VSI state */
1221 	ice_for_each_vf(pf, v) {
1222 		vf = &pf->vf[v];
1223 
1224 		ice_vf_pre_vsi_rebuild(vf);
1225 		ice_vf_rebuild_vsi(vf);
1226 		ice_vf_post_vsi_rebuild(vf);
1227 	}
1228 
1229 	ice_flush(hw);
1230 	clear_bit(__ICE_VF_DIS, pf->state);
1231 
1232 	return true;
1233 }
1234 
1235 /**
1236  * ice_is_vf_disabled
1237  * @vf: pointer to the VF info
1238  *
1239  * Returns true if the PF or VF is disabled, false otherwise.
1240  */
ice_is_vf_disabled(struct ice_vf * vf)1241 static bool ice_is_vf_disabled(struct ice_vf *vf)
1242 {
1243 	struct ice_pf *pf = vf->pf;
1244 
1245 	/* If the PF has been disabled, there is no need resetting VF until
1246 	 * PF is active again. Similarly, if the VF has been disabled, this
1247 	 * means something else is resetting the VF, so we shouldn't continue.
1248 	 * Otherwise, set disable VF state bit for actual reset, and continue.
1249 	 */
1250 	return (test_bit(__ICE_VF_DIS, pf->state) ||
1251 		test_bit(ICE_VF_STATE_DIS, vf->vf_states));
1252 }
1253 
1254 /**
1255  * ice_reset_vf - Reset a particular VF
1256  * @vf: pointer to the VF structure
1257  * @is_vflr: true if VFLR was issued, false if not
1258  *
1259  * Returns true if the VF is currently in reset, resets successfully, or resets
1260  * are disabled and false otherwise.
1261  */
ice_reset_vf(struct ice_vf * vf,bool is_vflr)1262 bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1263 {
1264 	struct ice_pf *pf = vf->pf;
1265 	struct ice_vsi *vsi;
1266 	struct device *dev;
1267 	struct ice_hw *hw;
1268 	bool rsd = false;
1269 	u8 promisc_m;
1270 	u32 reg;
1271 	int i;
1272 
1273 	dev = ice_pf_to_dev(pf);
1274 
1275 	if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
1276 		dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
1277 			vf->vf_id);
1278 		return true;
1279 	}
1280 
1281 	if (ice_is_vf_disabled(vf)) {
1282 		dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1283 			vf->vf_id);
1284 		return true;
1285 	}
1286 
1287 	/* Set VF disable bit state here, before triggering reset */
1288 	set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1289 	ice_trigger_vf_reset(vf, is_vflr, false);
1290 
1291 	vsi = pf->vsi[vf->lan_vsi_idx];
1292 
1293 	ice_dis_vf_qs(vf);
1294 
1295 	/* Call Disable LAN Tx queue AQ whether or not queues are
1296 	 * enabled. This is needed for successful completion of VFR.
1297 	 */
1298 	ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1299 			NULL, ICE_VF_RESET, vf->vf_id, NULL);
1300 
1301 	hw = &pf->hw;
1302 	/* poll VPGEN_VFRSTAT reg to make sure
1303 	 * that reset is complete
1304 	 */
1305 	for (i = 0; i < 10; i++) {
1306 		/* VF reset requires driver to first reset the VF and then
1307 		 * poll the status register to make sure that the reset
1308 		 * completed successfully.
1309 		 */
1310 		reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1311 		if (reg & VPGEN_VFRSTAT_VFRD_M) {
1312 			rsd = true;
1313 			break;
1314 		}
1315 
1316 		/* only sleep if the reset is not done */
1317 		usleep_range(10, 20);
1318 	}
1319 
1320 	/* Display a warning if VF didn't manage to reset in time, but need to
1321 	 * continue on with the operation.
1322 	 */
1323 	if (!rsd)
1324 		dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
1325 
1326 	/* disable promiscuous modes in case they were enabled
1327 	 * ignore any error if disabling process failed
1328 	 */
1329 	if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1330 	    test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
1331 		if (vf->port_vlan_info || vsi->num_vlan)
1332 			promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1333 		else
1334 			promisc_m = ICE_UCAST_PROMISC_BITS;
1335 
1336 		vsi = pf->vsi[vf->lan_vsi_idx];
1337 		if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
1338 			dev_err(dev, "disabling promiscuous mode failed\n");
1339 	}
1340 
1341 	ice_vf_pre_vsi_rebuild(vf);
1342 
1343 	if (ice_vf_rebuild_vsi_with_release(vf)) {
1344 		dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
1345 		return false;
1346 	}
1347 
1348 	ice_vf_post_vsi_rebuild(vf);
1349 
1350 	return true;
1351 }
1352 
1353 /**
1354  * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1355  * @pf: pointer to the PF structure
1356  */
ice_vc_notify_link_state(struct ice_pf * pf)1357 void ice_vc_notify_link_state(struct ice_pf *pf)
1358 {
1359 	int i;
1360 
1361 	ice_for_each_vf(pf, i)
1362 		ice_vc_notify_vf_link_state(&pf->vf[i]);
1363 }
1364 
1365 /**
1366  * ice_vc_notify_reset - Send pending reset message to all VFs
1367  * @pf: pointer to the PF structure
1368  *
1369  * indicate a pending reset to all VFs on a given PF
1370  */
ice_vc_notify_reset(struct ice_pf * pf)1371 void ice_vc_notify_reset(struct ice_pf *pf)
1372 {
1373 	struct virtchnl_pf_event pfe;
1374 
1375 	if (!pf->num_alloc_vfs)
1376 		return;
1377 
1378 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1379 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1380 	ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
1381 			    (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1382 }
1383 
1384 /**
1385  * ice_vc_notify_vf_reset - Notify VF of a reset event
1386  * @vf: pointer to the VF structure
1387  */
ice_vc_notify_vf_reset(struct ice_vf * vf)1388 static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1389 {
1390 	struct virtchnl_pf_event pfe;
1391 	struct ice_pf *pf;
1392 
1393 	if (!vf)
1394 		return;
1395 
1396 	pf = vf->pf;
1397 	if (ice_validate_vf_id(pf, vf->vf_id))
1398 		return;
1399 
1400 	/* Bail out if VF is in disabled state, neither initialized, nor active
1401 	 * state - otherwise proceed with notifications
1402 	 */
1403 	if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1404 	     !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1405 	    test_bit(ICE_VF_STATE_DIS, vf->vf_states))
1406 		return;
1407 
1408 	pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1409 	pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1410 	ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
1411 			      VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1412 			      NULL);
1413 }
1414 
1415 /**
1416  * ice_init_vf_vsi_res - initialize/setup VF VSI resources
1417  * @vf: VF to initialize/setup the VSI for
1418  *
1419  * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
1420  * VF VSI's broadcast filter and is only used during initial VF creation.
1421  */
ice_init_vf_vsi_res(struct ice_vf * vf)1422 static int ice_init_vf_vsi_res(struct ice_vf *vf)
1423 {
1424 	struct ice_pf *pf = vf->pf;
1425 	u8 broadcast[ETH_ALEN];
1426 	enum ice_status status;
1427 	struct ice_vsi *vsi;
1428 	struct device *dev;
1429 	int err;
1430 
1431 	vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
1432 
1433 	dev = ice_pf_to_dev(pf);
1434 	vsi = ice_vf_vsi_setup(vf);
1435 	if (!vsi)
1436 		return -ENOMEM;
1437 
1438 	err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
1439 	if (err) {
1440 		dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1441 			 vf->vf_id);
1442 		goto release_vsi;
1443 	}
1444 
1445 	eth_broadcast_addr(broadcast);
1446 	status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1447 	if (status) {
1448 		dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
1449 			vf->vf_id, ice_stat_str(status));
1450 		err = ice_status_to_errno(status);
1451 		goto release_vsi;
1452 	}
1453 
1454 	vf->num_mac = 1;
1455 
1456 	return 0;
1457 
1458 release_vsi:
1459 	ice_vf_vsi_release(vf);
1460 	return err;
1461 }
1462 
1463 /**
1464  * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
1465  * @pf: PF the VFs are associated with
1466  */
ice_start_vfs(struct ice_pf * pf)1467 static int ice_start_vfs(struct ice_pf *pf)
1468 {
1469 	struct ice_hw *hw = &pf->hw;
1470 	int retval, i;
1471 
1472 	ice_for_each_vf(pf, i) {
1473 		struct ice_vf *vf = &pf->vf[i];
1474 
1475 		ice_clear_vf_reset_trigger(vf);
1476 
1477 		retval = ice_init_vf_vsi_res(vf);
1478 		if (retval) {
1479 			dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
1480 				vf->vf_id, retval);
1481 			goto teardown;
1482 		}
1483 
1484 		set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1485 		ice_ena_vf_mappings(vf);
1486 		wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1487 	}
1488 
1489 	ice_flush(hw);
1490 	return 0;
1491 
1492 teardown:
1493 	for (i = i - 1; i >= 0; i--) {
1494 		struct ice_vf *vf = &pf->vf[i];
1495 
1496 		ice_dis_vf_mappings(vf);
1497 		ice_vf_vsi_release(vf);
1498 	}
1499 
1500 	return retval;
1501 }
1502 
1503 /**
1504  * ice_set_dflt_settings - set VF defaults during initialization/creation
1505  * @pf: PF holding reference to all VFs for default configuration
1506  */
ice_set_dflt_settings_vfs(struct ice_pf * pf)1507 static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
1508 {
1509 	int i;
1510 
1511 	ice_for_each_vf(pf, i) {
1512 		struct ice_vf *vf = &pf->vf[i];
1513 
1514 		vf->pf = pf;
1515 		vf->vf_id = i;
1516 		vf->vf_sw_id = pf->first_sw;
1517 		/* assign default capabilities */
1518 		set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
1519 		vf->spoofchk = true;
1520 		vf->num_vf_qs = pf->num_qps_per_vf;
1521 	}
1522 }
1523 
1524 /**
1525  * ice_alloc_vfs - allocate num_vfs in the PF structure
1526  * @pf: PF to store the allocated VFs in
1527  * @num_vfs: number of VFs to allocate
1528  */
ice_alloc_vfs(struct ice_pf * pf,int num_vfs)1529 static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
1530 {
1531 	struct ice_vf *vfs;
1532 
1533 	vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
1534 			   GFP_KERNEL);
1535 	if (!vfs)
1536 		return -ENOMEM;
1537 
1538 	pf->vf = vfs;
1539 	pf->num_alloc_vfs = num_vfs;
1540 
1541 	return 0;
1542 }
1543 
1544 /**
1545  * ice_ena_vfs - enable VFs so they are ready to be used
1546  * @pf: pointer to the PF structure
1547  * @num_vfs: number of VFs to enable
1548  */
ice_ena_vfs(struct ice_pf * pf,u16 num_vfs)1549 static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
1550 {
1551 	struct device *dev = ice_pf_to_dev(pf);
1552 	struct ice_hw *hw = &pf->hw;
1553 	int ret;
1554 
1555 	/* Disable global interrupt 0 so we don't try to handle the VFLR. */
1556 	wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1557 	     ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
1558 	set_bit(__ICE_OICR_INTR_DIS, pf->state);
1559 	ice_flush(hw);
1560 
1561 	ret = pci_enable_sriov(pf->pdev, num_vfs);
1562 	if (ret) {
1563 		pf->num_alloc_vfs = 0;
1564 		goto err_unroll_intr;
1565 	}
1566 
1567 	ret = ice_alloc_vfs(pf, num_vfs);
1568 	if (ret)
1569 		goto err_pci_disable_sriov;
1570 
1571 	if (ice_set_per_vf_res(pf)) {
1572 		dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
1573 			num_vfs);
1574 		ret = -ENOSPC;
1575 		goto err_unroll_sriov;
1576 	}
1577 
1578 	ice_set_dflt_settings_vfs(pf);
1579 
1580 	if (ice_start_vfs(pf)) {
1581 		dev_err(dev, "Failed to start VF(s)\n");
1582 		ret = -EAGAIN;
1583 		goto err_unroll_sriov;
1584 	}
1585 
1586 	clear_bit(__ICE_VF_DIS, pf->state);
1587 	return 0;
1588 
1589 err_unroll_sriov:
1590 	devm_kfree(dev, pf->vf);
1591 	pf->vf = NULL;
1592 	pf->num_alloc_vfs = 0;
1593 err_pci_disable_sriov:
1594 	pci_disable_sriov(pf->pdev);
1595 err_unroll_intr:
1596 	/* rearm interrupts here */
1597 	ice_irq_dynamic_ena(hw, NULL, NULL);
1598 	clear_bit(__ICE_OICR_INTR_DIS, pf->state);
1599 	return ret;
1600 }
1601 
1602 /**
1603  * ice_pci_sriov_ena - Enable or change number of VFs
1604  * @pf: pointer to the PF structure
1605  * @num_vfs: number of VFs to allocate
1606  *
1607  * Returns 0 on success and negative on failure
1608  */
ice_pci_sriov_ena(struct ice_pf * pf,int num_vfs)1609 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1610 {
1611 	int pre_existing_vfs = pci_num_vf(pf->pdev);
1612 	struct device *dev = ice_pf_to_dev(pf);
1613 	int err;
1614 
1615 	if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1616 		ice_free_vfs(pf);
1617 	else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1618 		return 0;
1619 
1620 	if (num_vfs > pf->num_vfs_supported) {
1621 		dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1622 			num_vfs, pf->num_vfs_supported);
1623 		return -EOPNOTSUPP;
1624 	}
1625 
1626 	dev_info(dev, "Enabling %d VFs\n", num_vfs);
1627 	err = ice_ena_vfs(pf, num_vfs);
1628 	if (err) {
1629 		dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1630 		return err;
1631 	}
1632 
1633 	set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
1634 	return 0;
1635 }
1636 
1637 /**
1638  * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
1639  * @pf: PF to enabled SR-IOV on
1640  */
ice_check_sriov_allowed(struct ice_pf * pf)1641 static int ice_check_sriov_allowed(struct ice_pf *pf)
1642 {
1643 	struct device *dev = ice_pf_to_dev(pf);
1644 
1645 	if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1646 		dev_err(dev, "This device is not capable of SR-IOV\n");
1647 		return -EOPNOTSUPP;
1648 	}
1649 
1650 	if (ice_is_safe_mode(pf)) {
1651 		dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
1652 		return -EOPNOTSUPP;
1653 	}
1654 
1655 	if (!ice_pf_state_is_nominal(pf)) {
1656 		dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1657 		return -EBUSY;
1658 	}
1659 
1660 	return 0;
1661 }
1662 
1663 /**
1664  * ice_sriov_configure - Enable or change number of VFs via sysfs
1665  * @pdev: pointer to a pci_dev structure
1666  * @num_vfs: number of VFs to allocate or 0 to free VFs
1667  *
1668  * This function is called when the user updates the number of VFs in sysfs. On
1669  * success return whatever num_vfs was set to by the caller. Return negative on
1670  * failure.
1671  */
ice_sriov_configure(struct pci_dev * pdev,int num_vfs)1672 int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1673 {
1674 	struct ice_pf *pf = pci_get_drvdata(pdev);
1675 	struct device *dev = ice_pf_to_dev(pf);
1676 	int err;
1677 
1678 	err = ice_check_sriov_allowed(pf);
1679 	if (err)
1680 		return err;
1681 
1682 	if (!num_vfs) {
1683 		if (!pci_vfs_assigned(pdev)) {
1684 			ice_free_vfs(pf);
1685 			return 0;
1686 		}
1687 
1688 		dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
1689 		return -EBUSY;
1690 	}
1691 
1692 	err = ice_pci_sriov_ena(pf, num_vfs);
1693 	if (err)
1694 		return err;
1695 
1696 	return num_vfs;
1697 }
1698 
1699 /**
1700  * ice_process_vflr_event - Free VF resources via IRQ calls
1701  * @pf: pointer to the PF structure
1702  *
1703  * called from the VFLR IRQ handler to
1704  * free up VF resources and state variables
1705  */
ice_process_vflr_event(struct ice_pf * pf)1706 void ice_process_vflr_event(struct ice_pf *pf)
1707 {
1708 	struct ice_hw *hw = &pf->hw;
1709 	unsigned int vf_id;
1710 	u32 reg;
1711 
1712 	if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
1713 	    !pf->num_alloc_vfs)
1714 		return;
1715 
1716 	ice_for_each_vf(pf, vf_id) {
1717 		struct ice_vf *vf = &pf->vf[vf_id];
1718 		u32 reg_idx, bit_idx;
1719 
1720 		reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1721 		bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1722 		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
1723 		reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1724 		if (reg & BIT(bit_idx))
1725 			/* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1726 			ice_reset_vf(vf, true);
1727 	}
1728 }
1729 
1730 /**
1731  * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
1732  * @vf: pointer to the VF info
1733  */
ice_vc_reset_vf(struct ice_vf * vf)1734 static void ice_vc_reset_vf(struct ice_vf *vf)
1735 {
1736 	ice_vc_notify_vf_reset(vf);
1737 	ice_reset_vf(vf, false);
1738 }
1739 
1740 /**
1741  * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1742  * @pf: PF used to index all VFs
1743  * @pfq: queue index relative to the PF's function space
1744  *
1745  * If no VF is found who owns the pfq then return NULL, otherwise return a
1746  * pointer to the VF who owns the pfq
1747  */
ice_get_vf_from_pfq(struct ice_pf * pf,u16 pfq)1748 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
1749 {
1750 	unsigned int vf_id;
1751 
1752 	ice_for_each_vf(pf, vf_id) {
1753 		struct ice_vf *vf = &pf->vf[vf_id];
1754 		struct ice_vsi *vsi;
1755 		u16 rxq_idx;
1756 
1757 		vsi = pf->vsi[vf->lan_vsi_idx];
1758 
1759 		ice_for_each_rxq(vsi, rxq_idx)
1760 			if (vsi->rxq_map[rxq_idx] == pfq)
1761 				return vf;
1762 	}
1763 
1764 	return NULL;
1765 }
1766 
1767 /**
1768  * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1769  * @pf: PF used for conversion
1770  * @globalq: global queue index used to convert to PF space queue index
1771  */
ice_globalq_to_pfq(struct ice_pf * pf,u32 globalq)1772 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
1773 {
1774 	return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
1775 }
1776 
1777 /**
1778  * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
1779  * @pf: PF that the LAN overflow event happened on
1780  * @event: structure holding the event information for the LAN overflow event
1781  *
1782  * Determine if the LAN overflow event was caused by a VF queue. If it was not
1783  * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
1784  * reset on the offending VF.
1785  */
1786 void
ice_vf_lan_overflow_event(struct ice_pf * pf,struct ice_rq_event_info * event)1787 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1788 {
1789 	u32 gldcb_rtctq, queue;
1790 	struct ice_vf *vf;
1791 
1792 	gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
1793 	dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
1794 
1795 	/* event returns device global Rx queue number */
1796 	queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
1797 		GLDCB_RTCTQ_RXQNUM_S;
1798 
1799 	vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
1800 	if (!vf)
1801 		return;
1802 
1803 	ice_vc_reset_vf(vf);
1804 }
1805 
1806 /**
1807  * ice_vc_send_msg_to_vf - Send message to VF
1808  * @vf: pointer to the VF info
1809  * @v_opcode: virtual channel opcode
1810  * @v_retval: virtual channel return value
1811  * @msg: pointer to the msg buffer
1812  * @msglen: msg length
1813  *
1814  * send msg to VF
1815  */
1816 static int
ice_vc_send_msg_to_vf(struct ice_vf * vf,u32 v_opcode,enum virtchnl_status_code v_retval,u8 * msg,u16 msglen)1817 ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
1818 		      enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
1819 {
1820 	enum ice_status aq_ret;
1821 	struct device *dev;
1822 	struct ice_pf *pf;
1823 
1824 	if (!vf)
1825 		return -EINVAL;
1826 
1827 	pf = vf->pf;
1828 	if (ice_validate_vf_id(pf, vf->vf_id))
1829 		return -EINVAL;
1830 
1831 	dev = ice_pf_to_dev(pf);
1832 
1833 	/* single place to detect unsuccessful return values */
1834 	if (v_retval) {
1835 		vf->num_inval_msgs++;
1836 		dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
1837 			 v_opcode, v_retval);
1838 		if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
1839 			dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
1840 				vf->vf_id);
1841 			dev_err(dev, "Use PF Control I/F to enable the VF\n");
1842 			set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1843 			return -EIO;
1844 		}
1845 	} else {
1846 		vf->num_valid_msgs++;
1847 		/* reset the invalid counter, if a valid message is received. */
1848 		vf->num_inval_msgs = 0;
1849 	}
1850 
1851 	aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
1852 				       msg, msglen, NULL);
1853 	if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
1854 		dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
1855 			 vf->vf_id, ice_stat_str(aq_ret),
1856 			 ice_aq_str(pf->hw.mailboxq.sq_last_status));
1857 		return -EIO;
1858 	}
1859 
1860 	return 0;
1861 }
1862 
1863 /**
1864  * ice_vc_get_ver_msg
1865  * @vf: pointer to the VF info
1866  * @msg: pointer to the msg buffer
1867  *
1868  * called from the VF to request the API version used by the PF
1869  */
ice_vc_get_ver_msg(struct ice_vf * vf,u8 * msg)1870 static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
1871 {
1872 	struct virtchnl_version_info info = {
1873 		VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1874 	};
1875 
1876 	vf->vf_ver = *(struct virtchnl_version_info *)msg;
1877 	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1878 	if (VF_IS_V10(&vf->vf_ver))
1879 		info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1880 
1881 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1882 				     VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
1883 				     sizeof(struct virtchnl_version_info));
1884 }
1885 
1886 /**
1887  * ice_vc_get_max_frame_size - get max frame size allowed for VF
1888  * @vf: VF used to determine max frame size
1889  *
1890  * Max frame size is determined based on the current port's max frame size and
1891  * whether a port VLAN is configured on this VF. The VF is not aware whether
1892  * it's in a port VLAN so the PF needs to account for this in max frame size
1893  * checks and sending the max frame size to the VF.
1894  */
ice_vc_get_max_frame_size(struct ice_vf * vf)1895 static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
1896 {
1897 	struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
1898 	struct ice_port_info *pi = vsi->port_info;
1899 	u16 max_frame_size;
1900 
1901 	max_frame_size = pi->phy.link_info.max_frame_size;
1902 
1903 	if (vf->port_vlan_info)
1904 		max_frame_size -= VLAN_HLEN;
1905 
1906 	return max_frame_size;
1907 }
1908 
1909 /**
1910  * ice_vc_get_vf_res_msg
1911  * @vf: pointer to the VF info
1912  * @msg: pointer to the msg buffer
1913  *
1914  * called from the VF to request its resources
1915  */
ice_vc_get_vf_res_msg(struct ice_vf * vf,u8 * msg)1916 static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
1917 {
1918 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1919 	struct virtchnl_vf_resource *vfres = NULL;
1920 	struct ice_pf *pf = vf->pf;
1921 	struct ice_vsi *vsi;
1922 	int len = 0;
1923 	int ret;
1924 
1925 	if (ice_check_vf_init(pf, vf)) {
1926 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1927 		goto err;
1928 	}
1929 
1930 	len = sizeof(struct virtchnl_vf_resource);
1931 
1932 	vfres = kzalloc(len, GFP_KERNEL);
1933 	if (!vfres) {
1934 		v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1935 		len = 0;
1936 		goto err;
1937 	}
1938 	if (VF_IS_V11(&vf->vf_ver))
1939 		vf->driver_caps = *(u32 *)msg;
1940 	else
1941 		vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1942 				  VIRTCHNL_VF_OFFLOAD_RSS_REG |
1943 				  VIRTCHNL_VF_OFFLOAD_VLAN;
1944 
1945 	vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1946 	vsi = pf->vsi[vf->lan_vsi_idx];
1947 	if (!vsi) {
1948 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1949 		goto err;
1950 	}
1951 
1952 	if (!vsi->info.pvid)
1953 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1954 
1955 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1956 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1957 	} else {
1958 		if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1959 			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1960 		else
1961 			vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1962 	}
1963 
1964 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1965 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1966 
1967 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1968 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1969 
1970 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
1971 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1972 
1973 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
1974 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1975 
1976 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1977 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1978 
1979 	if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1980 		vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1981 
1982 	if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1983 		vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
1984 
1985 	vfres->num_vsis = 1;
1986 	/* Tx and Rx queue are equal for VF */
1987 	vfres->num_queue_pairs = vsi->num_txq;
1988 	vfres->max_vectors = pf->num_msix_per_vf;
1989 	vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
1990 	vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
1991 	vfres->max_mtu = ice_vc_get_max_frame_size(vf);
1992 
1993 	vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
1994 	vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1995 	vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
1996 	ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1997 			vf->dflt_lan_addr.addr);
1998 
1999 	/* match guest capabilities */
2000 	vf->driver_caps = vfres->vf_cap_flags;
2001 
2002 	set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
2003 
2004 err:
2005 	/* send the response back to the VF */
2006 	ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
2007 				    (u8 *)vfres, len);
2008 
2009 	kfree(vfres);
2010 	return ret;
2011 }
2012 
2013 /**
2014  * ice_vc_reset_vf_msg
2015  * @vf: pointer to the VF info
2016  *
2017  * called from the VF to reset itself,
2018  * unlike other virtchnl messages, PF driver
2019  * doesn't send the response back to the VF
2020  */
ice_vc_reset_vf_msg(struct ice_vf * vf)2021 static void ice_vc_reset_vf_msg(struct ice_vf *vf)
2022 {
2023 	if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2024 		ice_reset_vf(vf, false);
2025 }
2026 
2027 /**
2028  * ice_find_vsi_from_id
2029  * @pf: the PF structure to search for the VSI
2030  * @id: ID of the VSI it is searching for
2031  *
2032  * searches for the VSI with the given ID
2033  */
ice_find_vsi_from_id(struct ice_pf * pf,u16 id)2034 static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
2035 {
2036 	int i;
2037 
2038 	ice_for_each_vsi(pf, i)
2039 		if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
2040 			return pf->vsi[i];
2041 
2042 	return NULL;
2043 }
2044 
2045 /**
2046  * ice_vc_isvalid_vsi_id
2047  * @vf: pointer to the VF info
2048  * @vsi_id: VF relative VSI ID
2049  *
2050  * check for the valid VSI ID
2051  */
ice_vc_isvalid_vsi_id(struct ice_vf * vf,u16 vsi_id)2052 static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
2053 {
2054 	struct ice_pf *pf = vf->pf;
2055 	struct ice_vsi *vsi;
2056 
2057 	vsi = ice_find_vsi_from_id(pf, vsi_id);
2058 
2059 	return (vsi && (vsi->vf_id == vf->vf_id));
2060 }
2061 
2062 /**
2063  * ice_vc_isvalid_q_id
2064  * @vf: pointer to the VF info
2065  * @vsi_id: VSI ID
2066  * @qid: VSI relative queue ID
2067  *
2068  * check for the valid queue ID
2069  */
ice_vc_isvalid_q_id(struct ice_vf * vf,u16 vsi_id,u8 qid)2070 static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
2071 {
2072 	struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
2073 	/* allocated Tx and Rx queues should be always equal for VF VSI */
2074 	return (vsi && (qid < vsi->alloc_txq));
2075 }
2076 
2077 /**
2078  * ice_vc_isvalid_ring_len
2079  * @ring_len: length of ring
2080  *
2081  * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
2082  * or zero
2083  */
ice_vc_isvalid_ring_len(u16 ring_len)2084 static bool ice_vc_isvalid_ring_len(u16 ring_len)
2085 {
2086 	return ring_len == 0 ||
2087 	       (ring_len >= ICE_MIN_NUM_DESC &&
2088 		ring_len <= ICE_MAX_NUM_DESC &&
2089 		!(ring_len % ICE_REQ_DESC_MULTIPLE));
2090 }
2091 
2092 /**
2093  * ice_vc_config_rss_key
2094  * @vf: pointer to the VF info
2095  * @msg: pointer to the msg buffer
2096  *
2097  * Configure the VF's RSS key
2098  */
ice_vc_config_rss_key(struct ice_vf * vf,u8 * msg)2099 static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
2100 {
2101 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2102 	struct virtchnl_rss_key *vrk =
2103 		(struct virtchnl_rss_key *)msg;
2104 	struct ice_pf *pf = vf->pf;
2105 	struct ice_vsi *vsi;
2106 
2107 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2108 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2109 		goto error_param;
2110 	}
2111 
2112 	if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
2113 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2114 		goto error_param;
2115 	}
2116 
2117 	if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
2118 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2119 		goto error_param;
2120 	}
2121 
2122 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2123 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2124 		goto error_param;
2125 	}
2126 
2127 	vsi = pf->vsi[vf->lan_vsi_idx];
2128 	if (!vsi) {
2129 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2130 		goto error_param;
2131 	}
2132 
2133 	if (ice_set_rss(vsi, vrk->key, NULL, 0))
2134 		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2135 error_param:
2136 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
2137 				     NULL, 0);
2138 }
2139 
2140 /**
2141  * ice_vc_config_rss_lut
2142  * @vf: pointer to the VF info
2143  * @msg: pointer to the msg buffer
2144  *
2145  * Configure the VF's RSS LUT
2146  */
ice_vc_config_rss_lut(struct ice_vf * vf,u8 * msg)2147 static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
2148 {
2149 	struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
2150 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2151 	struct ice_pf *pf = vf->pf;
2152 	struct ice_vsi *vsi;
2153 
2154 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2155 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2156 		goto error_param;
2157 	}
2158 
2159 	if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
2160 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2161 		goto error_param;
2162 	}
2163 
2164 	if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
2165 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2166 		goto error_param;
2167 	}
2168 
2169 	if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2170 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2171 		goto error_param;
2172 	}
2173 
2174 	vsi = pf->vsi[vf->lan_vsi_idx];
2175 	if (!vsi) {
2176 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2177 		goto error_param;
2178 	}
2179 
2180 	if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
2181 		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2182 error_param:
2183 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
2184 				     NULL, 0);
2185 }
2186 
2187 /**
2188  * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
2189  * @vf: The VF being resseting
2190  *
2191  * The max poll time is about ~800ms, which is about the maximum time it takes
2192  * for a VF to be reset and/or a VF driver to be removed.
2193  */
ice_wait_on_vf_reset(struct ice_vf * vf)2194 static void ice_wait_on_vf_reset(struct ice_vf *vf)
2195 {
2196 	int i;
2197 
2198 	for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
2199 		if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2200 			break;
2201 		msleep(ICE_MAX_VF_RESET_SLEEP_MS);
2202 	}
2203 }
2204 
2205 /**
2206  * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
2207  * @vf: VF to check if it's ready to be configured/queried
2208  *
2209  * The purpose of this function is to make sure the VF is not in reset, not
2210  * disabled, and initialized so it can be configured and/or queried by a host
2211  * administrator.
2212  */
ice_check_vf_ready_for_cfg(struct ice_vf * vf)2213 static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
2214 {
2215 	struct ice_pf *pf;
2216 
2217 	ice_wait_on_vf_reset(vf);
2218 
2219 	if (ice_is_vf_disabled(vf))
2220 		return -EINVAL;
2221 
2222 	pf = vf->pf;
2223 	if (ice_check_vf_init(pf, vf))
2224 		return -EBUSY;
2225 
2226 	return 0;
2227 }
2228 
2229 /**
2230  * ice_set_vf_spoofchk
2231  * @netdev: network interface device structure
2232  * @vf_id: VF identifier
2233  * @ena: flag to enable or disable feature
2234  *
2235  * Enable or disable VF spoof checking
2236  */
ice_set_vf_spoofchk(struct net_device * netdev,int vf_id,bool ena)2237 int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
2238 {
2239 	struct ice_netdev_priv *np = netdev_priv(netdev);
2240 	struct ice_pf *pf = np->vsi->back;
2241 	struct ice_vsi_ctx *ctx;
2242 	struct ice_vsi *vf_vsi;
2243 	enum ice_status status;
2244 	struct device *dev;
2245 	struct ice_vf *vf;
2246 	int ret;
2247 
2248 	dev = ice_pf_to_dev(pf);
2249 	if (ice_validate_vf_id(pf, vf_id))
2250 		return -EINVAL;
2251 
2252 	vf = &pf->vf[vf_id];
2253 	ret = ice_check_vf_ready_for_cfg(vf);
2254 	if (ret)
2255 		return ret;
2256 
2257 	vf_vsi = pf->vsi[vf->lan_vsi_idx];
2258 	if (!vf_vsi) {
2259 		netdev_err(netdev, "VSI %d for VF %d is null\n",
2260 			   vf->lan_vsi_idx, vf->vf_id);
2261 		return -EINVAL;
2262 	}
2263 
2264 	if (vf_vsi->type != ICE_VSI_VF) {
2265 		netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
2266 			   vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
2267 		return -ENODEV;
2268 	}
2269 
2270 	if (ena == vf->spoofchk) {
2271 		dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
2272 		return 0;
2273 	}
2274 
2275 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2276 	if (!ctx)
2277 		return -ENOMEM;
2278 
2279 	ctx->info.sec_flags = vf_vsi->info.sec_flags;
2280 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
2281 	if (ena) {
2282 		ctx->info.sec_flags |=
2283 			ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2284 			(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2285 			 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
2286 	} else {
2287 		ctx->info.sec_flags &=
2288 			~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2289 			  (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2290 			   ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
2291 	}
2292 
2293 	status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
2294 	if (status) {
2295 		dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
2296 			ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
2297 			ice_stat_str(status));
2298 		ret = -EIO;
2299 		goto out;
2300 	}
2301 
2302 	/* only update spoofchk state and VSI context on success */
2303 	vf_vsi->info.sec_flags = ctx->info.sec_flags;
2304 	vf->spoofchk = ena;
2305 
2306 out:
2307 	kfree(ctx);
2308 	return ret;
2309 }
2310 
2311 /**
2312  * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
2313  * @pf: PF structure for accessing VF(s)
2314  *
2315  * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
2316  * else return true
2317  */
ice_is_any_vf_in_promisc(struct ice_pf * pf)2318 bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
2319 {
2320 	int vf_idx;
2321 
2322 	ice_for_each_vf(pf, vf_idx) {
2323 		struct ice_vf *vf = &pf->vf[vf_idx];
2324 
2325 		/* found a VF that has promiscuous mode configured */
2326 		if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2327 		    test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2328 			return true;
2329 	}
2330 
2331 	return false;
2332 }
2333 
2334 /**
2335  * ice_vc_cfg_promiscuous_mode_msg
2336  * @vf: pointer to the VF info
2337  * @msg: pointer to the msg buffer
2338  *
2339  * called from the VF to configure VF VSIs promiscuous mode
2340  */
ice_vc_cfg_promiscuous_mode_msg(struct ice_vf * vf,u8 * msg)2341 static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
2342 {
2343 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2344 	struct virtchnl_promisc_info *info =
2345 	    (struct virtchnl_promisc_info *)msg;
2346 	struct ice_pf *pf = vf->pf;
2347 	struct ice_vsi *vsi;
2348 	struct device *dev;
2349 	bool rm_promisc;
2350 	int ret = 0;
2351 
2352 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2353 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2354 		goto error_param;
2355 	}
2356 
2357 	if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2358 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2359 		goto error_param;
2360 	}
2361 
2362 	vsi = pf->vsi[vf->lan_vsi_idx];
2363 	if (!vsi) {
2364 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2365 		goto error_param;
2366 	}
2367 
2368 	dev = ice_pf_to_dev(pf);
2369 	if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2370 		dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2371 			vf->vf_id);
2372 		/* Leave v_ret alone, lie to the VF on purpose. */
2373 		goto error_param;
2374 	}
2375 
2376 	rm_promisc = !(info->flags & FLAG_VF_UNICAST_PROMISC) &&
2377 		!(info->flags & FLAG_VF_MULTICAST_PROMISC);
2378 
2379 	if (vsi->num_vlan || vf->port_vlan_info) {
2380 		struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
2381 		struct net_device *pf_netdev;
2382 
2383 		if (!pf_vsi) {
2384 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2385 			goto error_param;
2386 		}
2387 
2388 		pf_netdev = pf_vsi->netdev;
2389 
2390 		ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
2391 		if (ret) {
2392 			dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
2393 				rm_promisc ? "ON" : "OFF", vf->vf_id,
2394 				vsi->vsi_num);
2395 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2396 		}
2397 
2398 		ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
2399 		if (ret) {
2400 			dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
2401 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2402 			goto error_param;
2403 		}
2404 	}
2405 
2406 	if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
2407 		bool set_dflt_vsi = !!(info->flags & FLAG_VF_UNICAST_PROMISC);
2408 
2409 		if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
2410 			/* only attempt to set the default forwarding VSI if
2411 			 * it's not currently set
2412 			 */
2413 			ret = ice_set_dflt_vsi(pf->first_sw, vsi);
2414 		else if (!set_dflt_vsi &&
2415 			 ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
2416 			/* only attempt to free the default forwarding VSI if we
2417 			 * are the owner
2418 			 */
2419 			ret = ice_clear_dflt_vsi(pf->first_sw);
2420 
2421 		if (ret) {
2422 			dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
2423 				set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
2424 			v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2425 			goto error_param;
2426 		}
2427 	} else {
2428 		enum ice_status status;
2429 		u8 promisc_m;
2430 
2431 		if (info->flags & FLAG_VF_UNICAST_PROMISC) {
2432 			if (vf->port_vlan_info || vsi->num_vlan)
2433 				promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2434 			else
2435 				promisc_m = ICE_UCAST_PROMISC_BITS;
2436 		} else if (info->flags & FLAG_VF_MULTICAST_PROMISC) {
2437 			if (vf->port_vlan_info || vsi->num_vlan)
2438 				promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
2439 			else
2440 				promisc_m = ICE_MCAST_PROMISC_BITS;
2441 		} else {
2442 			if (vf->port_vlan_info || vsi->num_vlan)
2443 				promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2444 			else
2445 				promisc_m = ICE_UCAST_PROMISC_BITS;
2446 		}
2447 
2448 		/* Configure multicast/unicast with or without VLAN promiscuous
2449 		 * mode
2450 		 */
2451 		status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
2452 		if (status) {
2453 			dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n",
2454 				rm_promisc ? "dis" : "en", vf->vf_id,
2455 				ice_stat_str(status));
2456 			v_ret = ice_err_to_virt_err(status);
2457 			goto error_param;
2458 		} else {
2459 			dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
2460 				rm_promisc ? "dis" : "en", vf->vf_id);
2461 		}
2462 	}
2463 
2464 	if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2465 		set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2466 	else
2467 		clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2468 
2469 	if (info->flags & FLAG_VF_UNICAST_PROMISC)
2470 		set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2471 	else
2472 		clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2473 
2474 error_param:
2475 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2476 				     v_ret, NULL, 0);
2477 }
2478 
2479 /**
2480  * ice_vc_get_stats_msg
2481  * @vf: pointer to the VF info
2482  * @msg: pointer to the msg buffer
2483  *
2484  * called from the VF to get VSI stats
2485  */
ice_vc_get_stats_msg(struct ice_vf * vf,u8 * msg)2486 static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
2487 {
2488 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2489 	struct virtchnl_queue_select *vqs =
2490 		(struct virtchnl_queue_select *)msg;
2491 	struct ice_eth_stats stats = { 0 };
2492 	struct ice_pf *pf = vf->pf;
2493 	struct ice_vsi *vsi;
2494 
2495 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2496 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2497 		goto error_param;
2498 	}
2499 
2500 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2501 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2502 		goto error_param;
2503 	}
2504 
2505 	vsi = pf->vsi[vf->lan_vsi_idx];
2506 	if (!vsi) {
2507 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2508 		goto error_param;
2509 	}
2510 
2511 	ice_update_eth_stats(vsi);
2512 
2513 	stats = vsi->eth_stats;
2514 
2515 error_param:
2516 	/* send the response to the VF */
2517 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
2518 				     (u8 *)&stats, sizeof(stats));
2519 }
2520 
2521 /**
2522  * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
2523  * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2524  *
2525  * Return true on successful validation, else false
2526  */
ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select * vqs)2527 static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2528 {
2529 	if ((!vqs->rx_queues && !vqs->tx_queues) ||
2530 	    vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
2531 	    vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
2532 		return false;
2533 
2534 	return true;
2535 }
2536 
2537 /**
2538  * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
2539  * @vsi: VSI of the VF to configure
2540  * @q_idx: VF queue index used to determine the queue in the PF's space
2541  */
ice_vf_ena_txq_interrupt(struct ice_vsi * vsi,u32 q_idx)2542 static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2543 {
2544 	struct ice_hw *hw = &vsi->back->hw;
2545 	u32 pfq = vsi->txq_map[q_idx];
2546 	u32 reg;
2547 
2548 	reg = rd32(hw, QINT_TQCTL(pfq));
2549 
2550 	/* MSI-X index 0 in the VF's space is always for the OICR, which means
2551 	 * this is most likely a poll mode VF driver, so don't enable an
2552 	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
2553 	 */
2554 	if (!(reg & QINT_TQCTL_MSIX_INDX_M))
2555 		return;
2556 
2557 	wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
2558 }
2559 
2560 /**
2561  * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
2562  * @vsi: VSI of the VF to configure
2563  * @q_idx: VF queue index used to determine the queue in the PF's space
2564  */
ice_vf_ena_rxq_interrupt(struct ice_vsi * vsi,u32 q_idx)2565 static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
2566 {
2567 	struct ice_hw *hw = &vsi->back->hw;
2568 	u32 pfq = vsi->rxq_map[q_idx];
2569 	u32 reg;
2570 
2571 	reg = rd32(hw, QINT_RQCTL(pfq));
2572 
2573 	/* MSI-X index 0 in the VF's space is always for the OICR, which means
2574 	 * this is most likely a poll mode VF driver, so don't enable an
2575 	 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
2576 	 */
2577 	if (!(reg & QINT_RQCTL_MSIX_INDX_M))
2578 		return;
2579 
2580 	wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
2581 }
2582 
2583 /**
2584  * ice_vc_ena_qs_msg
2585  * @vf: pointer to the VF info
2586  * @msg: pointer to the msg buffer
2587  *
2588  * called from the VF to enable all or specific queue(s)
2589  */
ice_vc_ena_qs_msg(struct ice_vf * vf,u8 * msg)2590 static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
2591 {
2592 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2593 	struct virtchnl_queue_select *vqs =
2594 	    (struct virtchnl_queue_select *)msg;
2595 	struct ice_pf *pf = vf->pf;
2596 	struct ice_vsi *vsi;
2597 	unsigned long q_map;
2598 	u16 vf_q_id;
2599 
2600 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2601 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2602 		goto error_param;
2603 	}
2604 
2605 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2606 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2607 		goto error_param;
2608 	}
2609 
2610 	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
2611 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2612 		goto error_param;
2613 	}
2614 
2615 	vsi = pf->vsi[vf->lan_vsi_idx];
2616 	if (!vsi) {
2617 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2618 		goto error_param;
2619 	}
2620 
2621 	/* Enable only Rx rings, Tx rings were enabled by the FW when the
2622 	 * Tx queue group list was configured and the context bits were
2623 	 * programmed using ice_vsi_cfg_txqs
2624 	 */
2625 	q_map = vqs->rx_queues;
2626 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2627 		if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2628 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2629 			goto error_param;
2630 		}
2631 
2632 		/* Skip queue if enabled */
2633 		if (test_bit(vf_q_id, vf->rxq_ena))
2634 			continue;
2635 
2636 		if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
2637 			dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
2638 				vf_q_id, vsi->vsi_num);
2639 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2640 			goto error_param;
2641 		}
2642 
2643 		ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
2644 		set_bit(vf_q_id, vf->rxq_ena);
2645 	}
2646 
2647 	vsi = pf->vsi[vf->lan_vsi_idx];
2648 	q_map = vqs->tx_queues;
2649 	for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2650 		if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2651 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2652 			goto error_param;
2653 		}
2654 
2655 		/* Skip queue if enabled */
2656 		if (test_bit(vf_q_id, vf->txq_ena))
2657 			continue;
2658 
2659 		ice_vf_ena_txq_interrupt(vsi, vf_q_id);
2660 		set_bit(vf_q_id, vf->txq_ena);
2661 	}
2662 
2663 	/* Set flag to indicate that queues are enabled */
2664 	if (v_ret == VIRTCHNL_STATUS_SUCCESS)
2665 		set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2666 
2667 error_param:
2668 	/* send the response to the VF */
2669 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
2670 				     NULL, 0);
2671 }
2672 
2673 /**
2674  * ice_vc_dis_qs_msg
2675  * @vf: pointer to the VF info
2676  * @msg: pointer to the msg buffer
2677  *
2678  * called from the VF to disable all or specific
2679  * queue(s)
2680  */
ice_vc_dis_qs_msg(struct ice_vf * vf,u8 * msg)2681 static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
2682 {
2683 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2684 	struct virtchnl_queue_select *vqs =
2685 	    (struct virtchnl_queue_select *)msg;
2686 	struct ice_pf *pf = vf->pf;
2687 	struct ice_vsi *vsi;
2688 	unsigned long q_map;
2689 	u16 vf_q_id;
2690 
2691 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
2692 	    !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
2693 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2694 		goto error_param;
2695 	}
2696 
2697 	if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2698 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2699 		goto error_param;
2700 	}
2701 
2702 	if (!ice_vc_validate_vqs_bitmaps(vqs)) {
2703 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2704 		goto error_param;
2705 	}
2706 
2707 	vsi = pf->vsi[vf->lan_vsi_idx];
2708 	if (!vsi) {
2709 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2710 		goto error_param;
2711 	}
2712 
2713 	if (vqs->tx_queues) {
2714 		q_map = vqs->tx_queues;
2715 
2716 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2717 			struct ice_ring *ring = vsi->tx_rings[vf_q_id];
2718 			struct ice_txq_meta txq_meta = { 0 };
2719 
2720 			if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2721 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2722 				goto error_param;
2723 			}
2724 
2725 			/* Skip queue if not enabled */
2726 			if (!test_bit(vf_q_id, vf->txq_ena))
2727 				continue;
2728 
2729 			ice_fill_txq_meta(vsi, ring, &txq_meta);
2730 
2731 			if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
2732 						 ring, &txq_meta)) {
2733 				dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
2734 					vf_q_id, vsi->vsi_num);
2735 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2736 				goto error_param;
2737 			}
2738 
2739 			/* Clear enabled queues flag */
2740 			clear_bit(vf_q_id, vf->txq_ena);
2741 		}
2742 	}
2743 
2744 	q_map = vqs->rx_queues;
2745 	/* speed up Rx queue disable by batching them if possible */
2746 	if (q_map &&
2747 	    bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
2748 		if (ice_vsi_stop_all_rx_rings(vsi)) {
2749 			dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
2750 				vsi->vsi_num);
2751 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2752 			goto error_param;
2753 		}
2754 
2755 		bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
2756 	} else if (q_map) {
2757 		for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2758 			if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2759 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2760 				goto error_param;
2761 			}
2762 
2763 			/* Skip queue if not enabled */
2764 			if (!test_bit(vf_q_id, vf->rxq_ena))
2765 				continue;
2766 
2767 			if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
2768 						     true)) {
2769 				dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
2770 					vf_q_id, vsi->vsi_num);
2771 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2772 				goto error_param;
2773 			}
2774 
2775 			/* Clear enabled queues flag */
2776 			clear_bit(vf_q_id, vf->rxq_ena);
2777 		}
2778 	}
2779 
2780 	/* Clear enabled queues flag */
2781 	if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
2782 		clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2783 
2784 error_param:
2785 	/* send the response to the VF */
2786 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
2787 				     NULL, 0);
2788 }
2789 
2790 /**
2791  * ice_cfg_interrupt
2792  * @vf: pointer to the VF info
2793  * @vsi: the VSI being configured
2794  * @vector_id: vector ID
2795  * @map: vector map for mapping vectors to queues
2796  * @q_vector: structure for interrupt vector
2797  * configure the IRQ to queue map
2798  */
2799 static int
ice_cfg_interrupt(struct ice_vf * vf,struct ice_vsi * vsi,u16 vector_id,struct virtchnl_vector_map * map,struct ice_q_vector * q_vector)2800 ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
2801 		  struct virtchnl_vector_map *map,
2802 		  struct ice_q_vector *q_vector)
2803 {
2804 	u16 vsi_q_id, vsi_q_id_idx;
2805 	unsigned long qmap;
2806 
2807 	q_vector->num_ring_rx = 0;
2808 	q_vector->num_ring_tx = 0;
2809 
2810 	qmap = map->rxq_map;
2811 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2812 		vsi_q_id = vsi_q_id_idx;
2813 
2814 		if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2815 			return VIRTCHNL_STATUS_ERR_PARAM;
2816 
2817 		q_vector->num_ring_rx++;
2818 		q_vector->rx.itr_idx = map->rxitr_idx;
2819 		vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
2820 		ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
2821 				      q_vector->rx.itr_idx);
2822 	}
2823 
2824 	qmap = map->txq_map;
2825 	for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2826 		vsi_q_id = vsi_q_id_idx;
2827 
2828 		if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2829 			return VIRTCHNL_STATUS_ERR_PARAM;
2830 
2831 		q_vector->num_ring_tx++;
2832 		q_vector->tx.itr_idx = map->txitr_idx;
2833 		vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
2834 		ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
2835 				      q_vector->tx.itr_idx);
2836 	}
2837 
2838 	return VIRTCHNL_STATUS_SUCCESS;
2839 }
2840 
2841 /**
2842  * ice_vc_cfg_irq_map_msg
2843  * @vf: pointer to the VF info
2844  * @msg: pointer to the msg buffer
2845  *
2846  * called from the VF to configure the IRQ to queue map
2847  */
ice_vc_cfg_irq_map_msg(struct ice_vf * vf,u8 * msg)2848 static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
2849 {
2850 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2851 	u16 num_q_vectors_mapped, vsi_id, vector_id;
2852 	struct virtchnl_irq_map_info *irqmap_info;
2853 	struct virtchnl_vector_map *map;
2854 	struct ice_pf *pf = vf->pf;
2855 	struct ice_vsi *vsi;
2856 	int i;
2857 
2858 	irqmap_info = (struct virtchnl_irq_map_info *)msg;
2859 	num_q_vectors_mapped = irqmap_info->num_vectors;
2860 
2861 	/* Check to make sure number of VF vectors mapped is not greater than
2862 	 * number of VF vectors originally allocated, and check that
2863 	 * there is actually at least a single VF queue vector mapped
2864 	 */
2865 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2866 	    pf->num_msix_per_vf < num_q_vectors_mapped ||
2867 	    !num_q_vectors_mapped) {
2868 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2869 		goto error_param;
2870 	}
2871 
2872 	vsi = pf->vsi[vf->lan_vsi_idx];
2873 	if (!vsi) {
2874 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2875 		goto error_param;
2876 	}
2877 
2878 	for (i = 0; i < num_q_vectors_mapped; i++) {
2879 		struct ice_q_vector *q_vector;
2880 
2881 		map = &irqmap_info->vecmap[i];
2882 
2883 		vector_id = map->vector_id;
2884 		vsi_id = map->vsi_id;
2885 		/* vector_id is always 0-based for each VF, and can never be
2886 		 * larger than or equal to the max allowed interrupts per VF
2887 		 */
2888 		if (!(vector_id < pf->num_msix_per_vf) ||
2889 		    !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
2890 		    (!vector_id && (map->rxq_map || map->txq_map))) {
2891 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2892 			goto error_param;
2893 		}
2894 
2895 		/* No need to map VF miscellaneous or rogue vector */
2896 		if (!vector_id)
2897 			continue;
2898 
2899 		/* Subtract non queue vector from vector_id passed by VF
2900 		 * to get actual number of VSI queue vector array index
2901 		 */
2902 		q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
2903 		if (!q_vector) {
2904 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2905 			goto error_param;
2906 		}
2907 
2908 		/* lookout for the invalid queue index */
2909 		v_ret = (enum virtchnl_status_code)
2910 			ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
2911 		if (v_ret)
2912 			goto error_param;
2913 	}
2914 
2915 error_param:
2916 	/* send the response to the VF */
2917 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
2918 				     NULL, 0);
2919 }
2920 
2921 /**
2922  * ice_vc_cfg_qs_msg
2923  * @vf: pointer to the VF info
2924  * @msg: pointer to the msg buffer
2925  *
2926  * called from the VF to configure the Rx/Tx queues
2927  */
ice_vc_cfg_qs_msg(struct ice_vf * vf,u8 * msg)2928 static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
2929 {
2930 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2931 	struct virtchnl_vsi_queue_config_info *qci =
2932 	    (struct virtchnl_vsi_queue_config_info *)msg;
2933 	struct virtchnl_queue_pair_info *qpi;
2934 	u16 num_rxq = 0, num_txq = 0;
2935 	struct ice_pf *pf = vf->pf;
2936 	struct ice_vsi *vsi;
2937 	int i;
2938 
2939 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2940 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2941 		goto error_param;
2942 	}
2943 
2944 	if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2945 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2946 		goto error_param;
2947 	}
2948 
2949 	vsi = pf->vsi[vf->lan_vsi_idx];
2950 	if (!vsi) {
2951 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2952 		goto error_param;
2953 	}
2954 
2955 	if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
2956 	    qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
2957 		dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
2958 			vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
2959 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2960 		goto error_param;
2961 	}
2962 
2963 	for (i = 0; i < qci->num_queue_pairs; i++) {
2964 		qpi = &qci->qpair[i];
2965 		if (qpi->txq.vsi_id != qci->vsi_id ||
2966 		    qpi->rxq.vsi_id != qci->vsi_id ||
2967 		    qpi->rxq.queue_id != qpi->txq.queue_id ||
2968 		    qpi->txq.headwb_enabled ||
2969 		    !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
2970 		    !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
2971 		    !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
2972 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2973 			goto error_param;
2974 		}
2975 		/* copy Tx queue info from VF into VSI */
2976 		if (qpi->txq.ring_len > 0) {
2977 			num_txq++;
2978 			vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
2979 			vsi->tx_rings[i]->count = qpi->txq.ring_len;
2980 		}
2981 
2982 		/* copy Rx queue info from VF into VSI */
2983 		if (qpi->rxq.ring_len > 0) {
2984 			u16 max_frame_size = ice_vc_get_max_frame_size(vf);
2985 
2986 			num_rxq++;
2987 			vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
2988 			vsi->rx_rings[i]->count = qpi->rxq.ring_len;
2989 
2990 			if (qpi->rxq.databuffer_size != 0 &&
2991 			    (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
2992 			     qpi->rxq.databuffer_size < 1024)) {
2993 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2994 				goto error_param;
2995 			}
2996 			vsi->rx_buf_len = qpi->rxq.databuffer_size;
2997 			vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
2998 			if (qpi->rxq.max_pkt_size > max_frame_size ||
2999 			    qpi->rxq.max_pkt_size < 64) {
3000 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3001 				goto error_param;
3002 			}
3003 		}
3004 
3005 		vsi->max_frame = qpi->rxq.max_pkt_size;
3006 		/* add space for the port VLAN since the VF driver is not
3007 		 * expected to account for it in the MTU calculation
3008 		 */
3009 		if (vf->port_vlan_info)
3010 			vsi->max_frame += VLAN_HLEN;
3011 	}
3012 
3013 	/* VF can request to configure less than allocated queues or default
3014 	 * allocated queues. So update the VSI with new number
3015 	 */
3016 	vsi->num_txq = num_txq;
3017 	vsi->num_rxq = num_rxq;
3018 	/* All queues of VF VSI are in TC 0 */
3019 	vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
3020 	vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
3021 
3022 	if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
3023 		v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3024 
3025 error_param:
3026 	/* send the response to the VF */
3027 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
3028 				     NULL, 0);
3029 }
3030 
3031 /**
3032  * ice_is_vf_trusted
3033  * @vf: pointer to the VF info
3034  */
ice_is_vf_trusted(struct ice_vf * vf)3035 static bool ice_is_vf_trusted(struct ice_vf *vf)
3036 {
3037 	return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
3038 }
3039 
3040 /**
3041  * ice_can_vf_change_mac
3042  * @vf: pointer to the VF info
3043  *
3044  * Return true if the VF is allowed to change its MAC filters, false otherwise
3045  */
ice_can_vf_change_mac(struct ice_vf * vf)3046 static bool ice_can_vf_change_mac(struct ice_vf *vf)
3047 {
3048 	/* If the VF MAC address has been set administratively (via the
3049 	 * ndo_set_vf_mac command), then deny permission to the VF to
3050 	 * add/delete unicast MAC addresses, unless the VF is trusted
3051 	 */
3052 	if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
3053 		return false;
3054 
3055 	return true;
3056 }
3057 
3058 /**
3059  * ice_vc_add_mac_addr - attempt to add the MAC address passed in
3060  * @vf: pointer to the VF info
3061  * @vsi: pointer to the VF's VSI
3062  * @mac_addr: MAC address to add
3063  */
3064 static int
ice_vc_add_mac_addr(struct ice_vf * vf,struct ice_vsi * vsi,u8 * mac_addr)3065 ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3066 {
3067 	struct device *dev = ice_pf_to_dev(vf->pf);
3068 	enum ice_status status;
3069 	int ret = 0;
3070 
3071 	/* default unicast MAC already added */
3072 	if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3073 		return 0;
3074 
3075 	if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
3076 		dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
3077 		return -EPERM;
3078 	}
3079 
3080 	status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3081 	if (status == ICE_ERR_ALREADY_EXISTS) {
3082 		dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
3083 			vf->vf_id);
3084 		/* don't return since we might need to update
3085 		 * the primary MAC in ice_vfhw_mac_add() below
3086 		 */
3087 		ret = -EEXIST;
3088 	} else if (status) {
3089 		dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
3090 			mac_addr, vf->vf_id, ice_stat_str(status));
3091 		return -EIO;
3092 	} else {
3093 		vf->num_mac++;
3094 	}
3095 
3096 	/* Set the default LAN address to the latest unicast MAC address added
3097 	 * by the VF. The default LAN address is reported by the PF via
3098 	 * ndo_get_vf_config.
3099 	 */
3100 	if (is_unicast_ether_addr(mac_addr))
3101 		ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
3102 
3103 	return ret;
3104 }
3105 
3106 /**
3107  * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
3108  * @vf: pointer to the VF info
3109  * @vsi: pointer to the VF's VSI
3110  * @mac_addr: MAC address to delete
3111  */
3112 static int
ice_vc_del_mac_addr(struct ice_vf * vf,struct ice_vsi * vsi,u8 * mac_addr)3113 ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3114 {
3115 	struct device *dev = ice_pf_to_dev(vf->pf);
3116 	enum ice_status status;
3117 
3118 	if (!ice_can_vf_change_mac(vf) &&
3119 	    ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3120 		return 0;
3121 
3122 	status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3123 	if (status == ICE_ERR_DOES_NOT_EXIST) {
3124 		dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
3125 			vf->vf_id);
3126 		return -ENOENT;
3127 	} else if (status) {
3128 		dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
3129 			mac_addr, vf->vf_id, ice_stat_str(status));
3130 		return -EIO;
3131 	}
3132 
3133 	if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3134 		eth_zero_addr(vf->dflt_lan_addr.addr);
3135 
3136 	vf->num_mac--;
3137 
3138 	return 0;
3139 }
3140 
3141 /**
3142  * ice_vc_handle_mac_addr_msg
3143  * @vf: pointer to the VF info
3144  * @msg: pointer to the msg buffer
3145  * @set: true if MAC filters are being set, false otherwise
3146  *
3147  * add guest MAC address filter
3148  */
3149 static int
ice_vc_handle_mac_addr_msg(struct ice_vf * vf,u8 * msg,bool set)3150 ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
3151 {
3152 	int (*ice_vc_cfg_mac)
3153 		(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
3154 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3155 	struct virtchnl_ether_addr_list *al =
3156 	    (struct virtchnl_ether_addr_list *)msg;
3157 	struct ice_pf *pf = vf->pf;
3158 	enum virtchnl_ops vc_op;
3159 	struct ice_vsi *vsi;
3160 	int i;
3161 
3162 	if (set) {
3163 		vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
3164 		ice_vc_cfg_mac = ice_vc_add_mac_addr;
3165 	} else {
3166 		vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
3167 		ice_vc_cfg_mac = ice_vc_del_mac_addr;
3168 	}
3169 
3170 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3171 	    !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3172 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3173 		goto handle_mac_exit;
3174 	}
3175 
3176 	/* If this VF is not privileged, then we can't add more than a
3177 	 * limited number of addresses. Check to make sure that the
3178 	 * additions do not push us over the limit.
3179 	 */
3180 	if (set && !ice_is_vf_trusted(vf) &&
3181 	    (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
3182 		dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
3183 			vf->vf_id);
3184 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3185 		goto handle_mac_exit;
3186 	}
3187 
3188 	vsi = pf->vsi[vf->lan_vsi_idx];
3189 	if (!vsi) {
3190 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3191 		goto handle_mac_exit;
3192 	}
3193 
3194 	for (i = 0; i < al->num_elements; i++) {
3195 		u8 *mac_addr = al->list[i].addr;
3196 		int result;
3197 
3198 		if (is_broadcast_ether_addr(mac_addr) ||
3199 		    is_zero_ether_addr(mac_addr))
3200 			continue;
3201 
3202 		result = ice_vc_cfg_mac(vf, vsi, mac_addr);
3203 		if (result == -EEXIST || result == -ENOENT) {
3204 			continue;
3205 		} else if (result) {
3206 			v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3207 			goto handle_mac_exit;
3208 		}
3209 	}
3210 
3211 handle_mac_exit:
3212 	/* send the response to the VF */
3213 	return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
3214 }
3215 
3216 /**
3217  * ice_vc_add_mac_addr_msg
3218  * @vf: pointer to the VF info
3219  * @msg: pointer to the msg buffer
3220  *
3221  * add guest MAC address filter
3222  */
ice_vc_add_mac_addr_msg(struct ice_vf * vf,u8 * msg)3223 static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3224 {
3225 	return ice_vc_handle_mac_addr_msg(vf, msg, true);
3226 }
3227 
3228 /**
3229  * ice_vc_del_mac_addr_msg
3230  * @vf: pointer to the VF info
3231  * @msg: pointer to the msg buffer
3232  *
3233  * remove guest MAC address filter
3234  */
ice_vc_del_mac_addr_msg(struct ice_vf * vf,u8 * msg)3235 static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3236 {
3237 	return ice_vc_handle_mac_addr_msg(vf, msg, false);
3238 }
3239 
3240 /**
3241  * ice_vc_request_qs_msg
3242  * @vf: pointer to the VF info
3243  * @msg: pointer to the msg buffer
3244  *
3245  * VFs get a default number of queues but can use this message to request a
3246  * different number. If the request is successful, PF will reset the VF and
3247  * return 0. If unsuccessful, PF will send message informing VF of number of
3248  * available queue pairs via virtchnl message response to VF.
3249  */
ice_vc_request_qs_msg(struct ice_vf * vf,u8 * msg)3250 static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
3251 {
3252 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3253 	struct virtchnl_vf_res_request *vfres =
3254 		(struct virtchnl_vf_res_request *)msg;
3255 	u16 req_queues = vfres->num_queue_pairs;
3256 	struct ice_pf *pf = vf->pf;
3257 	u16 max_allowed_vf_queues;
3258 	u16 tx_rx_queue_left;
3259 	struct device *dev;
3260 	u16 cur_queues;
3261 
3262 	dev = ice_pf_to_dev(pf);
3263 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3264 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3265 		goto error_param;
3266 	}
3267 
3268 	cur_queues = vf->num_vf_qs;
3269 	tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
3270 				 ice_get_avail_rxq_count(pf));
3271 	max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
3272 	if (!req_queues) {
3273 		dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
3274 			vf->vf_id);
3275 	} else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
3276 		dev_err(dev, "VF %d tried to request more than %d queues.\n",
3277 			vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
3278 		vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
3279 	} else if (req_queues > cur_queues &&
3280 		   req_queues - cur_queues > tx_rx_queue_left) {
3281 		dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
3282 			 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
3283 		vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
3284 					       ICE_MAX_RSS_QS_PER_VF);
3285 	} else {
3286 		/* request is successful, then reset VF */
3287 		vf->num_req_qs = req_queues;
3288 		ice_vc_reset_vf(vf);
3289 		dev_info(dev, "VF %d granted request of %u queues.\n",
3290 			 vf->vf_id, req_queues);
3291 		return 0;
3292 	}
3293 
3294 error_param:
3295 	/* send the response to the VF */
3296 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
3297 				     v_ret, (u8 *)vfres, sizeof(*vfres));
3298 }
3299 
3300 /**
3301  * ice_set_vf_port_vlan
3302  * @netdev: network interface device structure
3303  * @vf_id: VF identifier
3304  * @vlan_id: VLAN ID being set
3305  * @qos: priority setting
3306  * @vlan_proto: VLAN protocol
3307  *
3308  * program VF Port VLAN ID and/or QoS
3309  */
3310 int
ice_set_vf_port_vlan(struct net_device * netdev,int vf_id,u16 vlan_id,u8 qos,__be16 vlan_proto)3311 ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
3312 		     __be16 vlan_proto)
3313 {
3314 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3315 	struct device *dev;
3316 	struct ice_vf *vf;
3317 	u16 vlanprio;
3318 	int ret;
3319 
3320 	dev = ice_pf_to_dev(pf);
3321 	if (ice_validate_vf_id(pf, vf_id))
3322 		return -EINVAL;
3323 
3324 	if (vlan_id >= VLAN_N_VID || qos > 7) {
3325 		dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
3326 			vf_id, vlan_id, qos);
3327 		return -EINVAL;
3328 	}
3329 
3330 	if (vlan_proto != htons(ETH_P_8021Q)) {
3331 		dev_err(dev, "VF VLAN protocol is not supported\n");
3332 		return -EPROTONOSUPPORT;
3333 	}
3334 
3335 	vf = &pf->vf[vf_id];
3336 	ret = ice_check_vf_ready_for_cfg(vf);
3337 	if (ret)
3338 		return ret;
3339 
3340 	vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
3341 
3342 	if (vf->port_vlan_info == vlanprio) {
3343 		/* duplicate request, so just return success */
3344 		dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
3345 		return 0;
3346 	}
3347 
3348 	vf->port_vlan_info = vlanprio;
3349 
3350 	if (vf->port_vlan_info)
3351 		dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
3352 			 vlan_id, qos, vf_id);
3353 	else
3354 		dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
3355 
3356 	ice_vc_reset_vf(vf);
3357 
3358 	return 0;
3359 }
3360 
3361 /**
3362  * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
3363  * @caps: VF driver negotiated capabilities
3364  *
3365  * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
3366  */
ice_vf_vlan_offload_ena(u32 caps)3367 static bool ice_vf_vlan_offload_ena(u32 caps)
3368 {
3369 	return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
3370 }
3371 
3372 /**
3373  * ice_vc_process_vlan_msg
3374  * @vf: pointer to the VF info
3375  * @msg: pointer to the msg buffer
3376  * @add_v: Add VLAN if true, otherwise delete VLAN
3377  *
3378  * Process virtchnl op to add or remove programmed guest VLAN ID
3379  */
ice_vc_process_vlan_msg(struct ice_vf * vf,u8 * msg,bool add_v)3380 static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
3381 {
3382 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3383 	struct virtchnl_vlan_filter_list *vfl =
3384 	    (struct virtchnl_vlan_filter_list *)msg;
3385 	struct ice_pf *pf = vf->pf;
3386 	bool vlan_promisc = false;
3387 	struct ice_vsi *vsi;
3388 	struct device *dev;
3389 	struct ice_hw *hw;
3390 	int status = 0;
3391 	u8 promisc_m;
3392 	int i;
3393 
3394 	dev = ice_pf_to_dev(pf);
3395 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3396 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3397 		goto error_param;
3398 	}
3399 
3400 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3401 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3402 		goto error_param;
3403 	}
3404 
3405 	if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3406 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3407 		goto error_param;
3408 	}
3409 
3410 	for (i = 0; i < vfl->num_elements; i++) {
3411 		if (vfl->vlan_id[i] >= VLAN_N_VID) {
3412 			v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3413 			dev_err(dev, "invalid VF VLAN id %d\n",
3414 				vfl->vlan_id[i]);
3415 			goto error_param;
3416 		}
3417 	}
3418 
3419 	hw = &pf->hw;
3420 	vsi = pf->vsi[vf->lan_vsi_idx];
3421 	if (!vsi) {
3422 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3423 		goto error_param;
3424 	}
3425 
3426 	if (add_v && !ice_is_vf_trusted(vf) &&
3427 	    vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
3428 		dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
3429 			 vf->vf_id);
3430 		/* There is no need to let VF know about being not trusted,
3431 		 * so we can just return success message here
3432 		 */
3433 		goto error_param;
3434 	}
3435 
3436 	if (vsi->info.pvid) {
3437 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3438 		goto error_param;
3439 	}
3440 
3441 	if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
3442 	     test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
3443 	    test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
3444 		vlan_promisc = true;
3445 
3446 	if (add_v) {
3447 		for (i = 0; i < vfl->num_elements; i++) {
3448 			u16 vid = vfl->vlan_id[i];
3449 
3450 			if (!ice_is_vf_trusted(vf) &&
3451 			    vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
3452 				dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
3453 					 vf->vf_id);
3454 				/* There is no need to let VF know about being
3455 				 * not trusted, so we can just return success
3456 				 * message here as well.
3457 				 */
3458 				goto error_param;
3459 			}
3460 
3461 			/* we add VLAN 0 by default for each VF so we can enable
3462 			 * Tx VLAN anti-spoof without triggering MDD events so
3463 			 * we don't need to add it again here
3464 			 */
3465 			if (!vid)
3466 				continue;
3467 
3468 			status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
3469 			if (status) {
3470 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3471 				goto error_param;
3472 			}
3473 
3474 			/* Enable VLAN pruning when non-zero VLAN is added */
3475 			if (!vlan_promisc && vid &&
3476 			    !ice_vsi_is_vlan_pruning_ena(vsi)) {
3477 				status = ice_cfg_vlan_pruning(vsi, true, false);
3478 				if (status) {
3479 					v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3480 					dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
3481 						vid, status);
3482 					goto error_param;
3483 				}
3484 			} else if (vlan_promisc) {
3485 				/* Enable Ucast/Mcast VLAN promiscuous mode */
3486 				promisc_m = ICE_PROMISC_VLAN_TX |
3487 					    ICE_PROMISC_VLAN_RX;
3488 
3489 				status = ice_set_vsi_promisc(hw, vsi->idx,
3490 							     promisc_m, vid);
3491 				if (status) {
3492 					v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3493 					dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
3494 						vid, status);
3495 				}
3496 			}
3497 		}
3498 	} else {
3499 		/* In case of non_trusted VF, number of VLAN elements passed
3500 		 * to PF for removal might be greater than number of VLANs
3501 		 * filter programmed for that VF - So, use actual number of
3502 		 * VLANS added earlier with add VLAN opcode. In order to avoid
3503 		 * removing VLAN that doesn't exist, which result to sending
3504 		 * erroneous failed message back to the VF
3505 		 */
3506 		int num_vf_vlan;
3507 
3508 		num_vf_vlan = vsi->num_vlan;
3509 		for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
3510 			u16 vid = vfl->vlan_id[i];
3511 
3512 			/* we add VLAN 0 by default for each VF so we can enable
3513 			 * Tx VLAN anti-spoof without triggering MDD events so
3514 			 * we don't want a VIRTCHNL request to remove it
3515 			 */
3516 			if (!vid)
3517 				continue;
3518 
3519 			/* Make sure ice_vsi_kill_vlan is successful before
3520 			 * updating VLAN information
3521 			 */
3522 			status = ice_vsi_kill_vlan(vsi, vid);
3523 			if (status) {
3524 				v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3525 				goto error_param;
3526 			}
3527 
3528 			/* Disable VLAN pruning when only VLAN 0 is left */
3529 			if (vsi->num_vlan == 1 &&
3530 			    ice_vsi_is_vlan_pruning_ena(vsi))
3531 				ice_cfg_vlan_pruning(vsi, false, false);
3532 
3533 			/* Disable Unicast/Multicast VLAN promiscuous mode */
3534 			if (vlan_promisc) {
3535 				promisc_m = ICE_PROMISC_VLAN_TX |
3536 					    ICE_PROMISC_VLAN_RX;
3537 
3538 				ice_clear_vsi_promisc(hw, vsi->idx,
3539 						      promisc_m, vid);
3540 			}
3541 		}
3542 	}
3543 
3544 error_param:
3545 	/* send the response to the VF */
3546 	if (add_v)
3547 		return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
3548 					     NULL, 0);
3549 	else
3550 		return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
3551 					     NULL, 0);
3552 }
3553 
3554 /**
3555  * ice_vc_add_vlan_msg
3556  * @vf: pointer to the VF info
3557  * @msg: pointer to the msg buffer
3558  *
3559  * Add and program guest VLAN ID
3560  */
ice_vc_add_vlan_msg(struct ice_vf * vf,u8 * msg)3561 static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
3562 {
3563 	return ice_vc_process_vlan_msg(vf, msg, true);
3564 }
3565 
3566 /**
3567  * ice_vc_remove_vlan_msg
3568  * @vf: pointer to the VF info
3569  * @msg: pointer to the msg buffer
3570  *
3571  * remove programmed guest VLAN ID
3572  */
ice_vc_remove_vlan_msg(struct ice_vf * vf,u8 * msg)3573 static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
3574 {
3575 	return ice_vc_process_vlan_msg(vf, msg, false);
3576 }
3577 
3578 /**
3579  * ice_vc_ena_vlan_stripping
3580  * @vf: pointer to the VF info
3581  *
3582  * Enable VLAN header stripping for a given VF
3583  */
ice_vc_ena_vlan_stripping(struct ice_vf * vf)3584 static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
3585 {
3586 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3587 	struct ice_pf *pf = vf->pf;
3588 	struct ice_vsi *vsi;
3589 
3590 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3591 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3592 		goto error_param;
3593 	}
3594 
3595 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3596 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3597 		goto error_param;
3598 	}
3599 
3600 	vsi = pf->vsi[vf->lan_vsi_idx];
3601 	if (ice_vsi_manage_vlan_stripping(vsi, true))
3602 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3603 
3604 error_param:
3605 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3606 				     v_ret, NULL, 0);
3607 }
3608 
3609 /**
3610  * ice_vc_dis_vlan_stripping
3611  * @vf: pointer to the VF info
3612  *
3613  * Disable VLAN header stripping for a given VF
3614  */
ice_vc_dis_vlan_stripping(struct ice_vf * vf)3615 static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
3616 {
3617 	enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3618 	struct ice_pf *pf = vf->pf;
3619 	struct ice_vsi *vsi;
3620 
3621 	if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3622 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3623 		goto error_param;
3624 	}
3625 
3626 	if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3627 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3628 		goto error_param;
3629 	}
3630 
3631 	vsi = pf->vsi[vf->lan_vsi_idx];
3632 	if (!vsi) {
3633 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3634 		goto error_param;
3635 	}
3636 
3637 	if (ice_vsi_manage_vlan_stripping(vsi, false))
3638 		v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3639 
3640 error_param:
3641 	return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3642 				     v_ret, NULL, 0);
3643 }
3644 
3645 /**
3646  * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
3647  * @vf: VF to enable/disable VLAN stripping for on initialization
3648  *
3649  * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
3650  * the flag is cleared then we want to disable stripping. For example, the flag
3651  * will be cleared when port VLANs are configured by the administrator before
3652  * passing the VF to the guest or if the AVF driver doesn't support VLAN
3653  * offloads.
3654  */
ice_vf_init_vlan_stripping(struct ice_vf * vf)3655 static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
3656 {
3657 	struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
3658 
3659 	if (!vsi)
3660 		return -EINVAL;
3661 
3662 	/* don't modify stripping if port VLAN is configured */
3663 	if (vsi->info.pvid)
3664 		return 0;
3665 
3666 	if (ice_vf_vlan_offload_ena(vf->driver_caps))
3667 		return ice_vsi_manage_vlan_stripping(vsi, true);
3668 	else
3669 		return ice_vsi_manage_vlan_stripping(vsi, false);
3670 }
3671 
3672 /**
3673  * ice_vc_process_vf_msg - Process request from VF
3674  * @pf: pointer to the PF structure
3675  * @event: pointer to the AQ event
3676  *
3677  * called from the common asq/arq handler to
3678  * process request from VF
3679  */
ice_vc_process_vf_msg(struct ice_pf * pf,struct ice_rq_event_info * event)3680 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
3681 {
3682 	u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
3683 	s16 vf_id = le16_to_cpu(event->desc.retval);
3684 	u16 msglen = event->msg_len;
3685 	u8 *msg = event->msg_buf;
3686 	struct ice_vf *vf = NULL;
3687 	struct device *dev;
3688 	int err = 0;
3689 
3690 	dev = ice_pf_to_dev(pf);
3691 	if (ice_validate_vf_id(pf, vf_id)) {
3692 		err = -EINVAL;
3693 		goto error_handler;
3694 	}
3695 
3696 	vf = &pf->vf[vf_id];
3697 
3698 	/* Check if VF is disabled. */
3699 	if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
3700 		err = -EPERM;
3701 		goto error_handler;
3702 	}
3703 
3704 	/* Perform basic checks on the msg */
3705 	err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3706 	if (err) {
3707 		if (err == VIRTCHNL_STATUS_ERR_PARAM)
3708 			err = -EPERM;
3709 		else
3710 			err = -EINVAL;
3711 	}
3712 
3713 error_handler:
3714 	if (err) {
3715 		ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
3716 				      NULL, 0);
3717 		dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
3718 			vf_id, v_opcode, msglen, err);
3719 		return;
3720 	}
3721 
3722 	switch (v_opcode) {
3723 	case VIRTCHNL_OP_VERSION:
3724 		err = ice_vc_get_ver_msg(vf, msg);
3725 		break;
3726 	case VIRTCHNL_OP_GET_VF_RESOURCES:
3727 		err = ice_vc_get_vf_res_msg(vf, msg);
3728 		if (ice_vf_init_vlan_stripping(vf))
3729 			dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
3730 				vf->vf_id);
3731 		ice_vc_notify_vf_link_state(vf);
3732 		break;
3733 	case VIRTCHNL_OP_RESET_VF:
3734 		ice_vc_reset_vf_msg(vf);
3735 		break;
3736 	case VIRTCHNL_OP_ADD_ETH_ADDR:
3737 		err = ice_vc_add_mac_addr_msg(vf, msg);
3738 		break;
3739 	case VIRTCHNL_OP_DEL_ETH_ADDR:
3740 		err = ice_vc_del_mac_addr_msg(vf, msg);
3741 		break;
3742 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3743 		err = ice_vc_cfg_qs_msg(vf, msg);
3744 		break;
3745 	case VIRTCHNL_OP_ENABLE_QUEUES:
3746 		err = ice_vc_ena_qs_msg(vf, msg);
3747 		ice_vc_notify_vf_link_state(vf);
3748 		break;
3749 	case VIRTCHNL_OP_DISABLE_QUEUES:
3750 		err = ice_vc_dis_qs_msg(vf, msg);
3751 		break;
3752 	case VIRTCHNL_OP_REQUEST_QUEUES:
3753 		err = ice_vc_request_qs_msg(vf, msg);
3754 		break;
3755 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3756 		err = ice_vc_cfg_irq_map_msg(vf, msg);
3757 		break;
3758 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
3759 		err = ice_vc_config_rss_key(vf, msg);
3760 		break;
3761 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
3762 		err = ice_vc_config_rss_lut(vf, msg);
3763 		break;
3764 	case VIRTCHNL_OP_GET_STATS:
3765 		err = ice_vc_get_stats_msg(vf, msg);
3766 		break;
3767 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3768 		err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
3769 		break;
3770 	case VIRTCHNL_OP_ADD_VLAN:
3771 		err = ice_vc_add_vlan_msg(vf, msg);
3772 		break;
3773 	case VIRTCHNL_OP_DEL_VLAN:
3774 		err = ice_vc_remove_vlan_msg(vf, msg);
3775 		break;
3776 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3777 		err = ice_vc_ena_vlan_stripping(vf);
3778 		break;
3779 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3780 		err = ice_vc_dis_vlan_stripping(vf);
3781 		break;
3782 	case VIRTCHNL_OP_UNKNOWN:
3783 	default:
3784 		dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
3785 			vf_id);
3786 		err = ice_vc_send_msg_to_vf(vf, v_opcode,
3787 					    VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3788 					    NULL, 0);
3789 		break;
3790 	}
3791 	if (err) {
3792 		/* Helper function cares less about error return values here
3793 		 * as it is busy with pending work.
3794 		 */
3795 		dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
3796 			 vf_id, v_opcode, err);
3797 	}
3798 }
3799 
3800 /**
3801  * ice_get_vf_cfg
3802  * @netdev: network interface device structure
3803  * @vf_id: VF identifier
3804  * @ivi: VF configuration structure
3805  *
3806  * return VF configuration
3807  */
3808 int
ice_get_vf_cfg(struct net_device * netdev,int vf_id,struct ifla_vf_info * ivi)3809 ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
3810 {
3811 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3812 	struct ice_vf *vf;
3813 
3814 	if (ice_validate_vf_id(pf, vf_id))
3815 		return -EINVAL;
3816 
3817 	vf = &pf->vf[vf_id];
3818 
3819 	if (ice_check_vf_init(pf, vf))
3820 		return -EBUSY;
3821 
3822 	ivi->vf = vf_id;
3823 	ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
3824 
3825 	/* VF configuration for VLAN and applicable QoS */
3826 	ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
3827 	ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
3828 
3829 	ivi->trusted = vf->trusted;
3830 	ivi->spoofchk = vf->spoofchk;
3831 	if (!vf->link_forced)
3832 		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
3833 	else if (vf->link_up)
3834 		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
3835 	else
3836 		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
3837 	ivi->max_tx_rate = vf->tx_rate;
3838 	ivi->min_tx_rate = 0;
3839 	return 0;
3840 }
3841 
3842 /**
3843  * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
3844  * @pf: PF used to reference the switch's rules
3845  * @umac: unicast MAC to compare against existing switch rules
3846  *
3847  * Return true on the first/any match, else return false
3848  */
ice_unicast_mac_exists(struct ice_pf * pf,u8 * umac)3849 static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
3850 {
3851 	struct ice_sw_recipe *mac_recipe_list =
3852 		&pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
3853 	struct ice_fltr_mgmt_list_entry *list_itr;
3854 	struct list_head *rule_head;
3855 	struct mutex *rule_lock; /* protect MAC filter list access */
3856 
3857 	rule_head = &mac_recipe_list->filt_rules;
3858 	rule_lock = &mac_recipe_list->filt_rule_lock;
3859 
3860 	mutex_lock(rule_lock);
3861 	list_for_each_entry(list_itr, rule_head, list_entry) {
3862 		u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3863 
3864 		if (ether_addr_equal(existing_mac, umac)) {
3865 			mutex_unlock(rule_lock);
3866 			return true;
3867 		}
3868 	}
3869 
3870 	mutex_unlock(rule_lock);
3871 
3872 	return false;
3873 }
3874 
3875 /**
3876  * ice_set_vf_mac
3877  * @netdev: network interface device structure
3878  * @vf_id: VF identifier
3879  * @mac: MAC address
3880  *
3881  * program VF MAC address
3882  */
ice_set_vf_mac(struct net_device * netdev,int vf_id,u8 * mac)3883 int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3884 {
3885 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3886 	struct ice_vf *vf;
3887 	int ret;
3888 
3889 	if (ice_validate_vf_id(pf, vf_id))
3890 		return -EINVAL;
3891 
3892 	if (is_multicast_ether_addr(mac)) {
3893 		netdev_err(netdev, "%pM not a valid unicast address\n", mac);
3894 		return -EINVAL;
3895 	}
3896 
3897 	vf = &pf->vf[vf_id];
3898 	/* nothing left to do, unicast MAC already set */
3899 	if (ether_addr_equal(vf->dflt_lan_addr.addr, mac))
3900 		return 0;
3901 
3902 	ret = ice_check_vf_ready_for_cfg(vf);
3903 	if (ret)
3904 		return ret;
3905 
3906 	if (ice_unicast_mac_exists(pf, mac)) {
3907 		netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
3908 			   mac, vf_id, mac);
3909 		return -EINVAL;
3910 	}
3911 
3912 	/* VF is notified of its new MAC via the PF's response to the
3913 	 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
3914 	 */
3915 	ether_addr_copy(vf->dflt_lan_addr.addr, mac);
3916 	if (is_zero_ether_addr(mac)) {
3917 		/* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
3918 		vf->pf_set_mac = false;
3919 		netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
3920 			    vf->vf_id);
3921 	} else {
3922 		/* PF will add MAC rule for the VF */
3923 		vf->pf_set_mac = true;
3924 		netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
3925 			    mac, vf_id);
3926 	}
3927 
3928 	ice_vc_reset_vf(vf);
3929 	return 0;
3930 }
3931 
3932 /**
3933  * ice_set_vf_trust
3934  * @netdev: network interface device structure
3935  * @vf_id: VF identifier
3936  * @trusted: Boolean value to enable/disable trusted VF
3937  *
3938  * Enable or disable a given VF as trusted
3939  */
ice_set_vf_trust(struct net_device * netdev,int vf_id,bool trusted)3940 int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
3941 {
3942 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3943 	struct ice_vf *vf;
3944 	int ret;
3945 
3946 	if (ice_validate_vf_id(pf, vf_id))
3947 		return -EINVAL;
3948 
3949 	vf = &pf->vf[vf_id];
3950 	ret = ice_check_vf_ready_for_cfg(vf);
3951 	if (ret)
3952 		return ret;
3953 
3954 	/* Check if already trusted */
3955 	if (trusted == vf->trusted)
3956 		return 0;
3957 
3958 	vf->trusted = trusted;
3959 	ice_vc_reset_vf(vf);
3960 	dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
3961 		 vf_id, trusted ? "" : "un");
3962 
3963 	return 0;
3964 }
3965 
3966 /**
3967  * ice_set_vf_link_state
3968  * @netdev: network interface device structure
3969  * @vf_id: VF identifier
3970  * @link_state: required link state
3971  *
3972  * Set VF's link state, irrespective of physical link state status
3973  */
ice_set_vf_link_state(struct net_device * netdev,int vf_id,int link_state)3974 int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
3975 {
3976 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
3977 	struct ice_vf *vf;
3978 	int ret;
3979 
3980 	if (ice_validate_vf_id(pf, vf_id))
3981 		return -EINVAL;
3982 
3983 	vf = &pf->vf[vf_id];
3984 	ret = ice_check_vf_ready_for_cfg(vf);
3985 	if (ret)
3986 		return ret;
3987 
3988 	switch (link_state) {
3989 	case IFLA_VF_LINK_STATE_AUTO:
3990 		vf->link_forced = false;
3991 		break;
3992 	case IFLA_VF_LINK_STATE_ENABLE:
3993 		vf->link_forced = true;
3994 		vf->link_up = true;
3995 		break;
3996 	case IFLA_VF_LINK_STATE_DISABLE:
3997 		vf->link_forced = true;
3998 		vf->link_up = false;
3999 		break;
4000 	default:
4001 		return -EINVAL;
4002 	}
4003 
4004 	ice_vc_notify_vf_link_state(vf);
4005 
4006 	return 0;
4007 }
4008 
4009 /**
4010  * ice_get_vf_stats - populate some stats for the VF
4011  * @netdev: the netdev of the PF
4012  * @vf_id: the host OS identifier (0-255)
4013  * @vf_stats: pointer to the OS memory to be initialized
4014  */
ice_get_vf_stats(struct net_device * netdev,int vf_id,struct ifla_vf_stats * vf_stats)4015 int ice_get_vf_stats(struct net_device *netdev, int vf_id,
4016 		     struct ifla_vf_stats *vf_stats)
4017 {
4018 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
4019 	struct ice_eth_stats *stats;
4020 	struct ice_vsi *vsi;
4021 	struct ice_vf *vf;
4022 	int ret;
4023 
4024 	if (ice_validate_vf_id(pf, vf_id))
4025 		return -EINVAL;
4026 
4027 	vf = &pf->vf[vf_id];
4028 	ret = ice_check_vf_ready_for_cfg(vf);
4029 	if (ret)
4030 		return ret;
4031 
4032 	vsi = pf->vsi[vf->lan_vsi_idx];
4033 	if (!vsi)
4034 		return -EINVAL;
4035 
4036 	ice_update_eth_stats(vsi);
4037 	stats = &vsi->eth_stats;
4038 
4039 	memset(vf_stats, 0, sizeof(*vf_stats));
4040 
4041 	vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4042 		stats->rx_multicast;
4043 	vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4044 		stats->tx_multicast;
4045 	vf_stats->rx_bytes   = stats->rx_bytes;
4046 	vf_stats->tx_bytes   = stats->tx_bytes;
4047 	vf_stats->broadcast  = stats->rx_broadcast;
4048 	vf_stats->multicast  = stats->rx_multicast;
4049 	vf_stats->rx_dropped = stats->rx_discards;
4050 	vf_stats->tx_dropped = stats->tx_discards;
4051 
4052 	return 0;
4053 }
4054 
4055 /**
4056  * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
4057  * @vf: pointer to the VF structure
4058  */
ice_print_vf_rx_mdd_event(struct ice_vf * vf)4059 void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
4060 {
4061 	struct ice_pf *pf = vf->pf;
4062 	struct device *dev;
4063 
4064 	dev = ice_pf_to_dev(pf);
4065 
4066 	dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
4067 		 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
4068 		 vf->dflt_lan_addr.addr,
4069 		 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
4070 			  ? "on" : "off");
4071 }
4072 
4073 /**
4074  * ice_print_vfs_mdd_event - print VFs malicious driver detect event
4075  * @pf: pointer to the PF structure
4076  *
4077  * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
4078  */
ice_print_vfs_mdd_events(struct ice_pf * pf)4079 void ice_print_vfs_mdd_events(struct ice_pf *pf)
4080 {
4081 	struct device *dev = ice_pf_to_dev(pf);
4082 	struct ice_hw *hw = &pf->hw;
4083 	int i;
4084 
4085 	/* check that there are pending MDD events to print */
4086 	if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state))
4087 		return;
4088 
4089 	/* VF MDD event logs are rate limited to one second intervals */
4090 	if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
4091 		return;
4092 
4093 	pf->last_printed_mdd_jiffies = jiffies;
4094 
4095 	ice_for_each_vf(pf, i) {
4096 		struct ice_vf *vf = &pf->vf[i];
4097 
4098 		/* only print Rx MDD event message if there are new events */
4099 		if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
4100 			vf->mdd_rx_events.last_printed =
4101 							vf->mdd_rx_events.count;
4102 			ice_print_vf_rx_mdd_event(vf);
4103 		}
4104 
4105 		/* only print Tx MDD event message if there are new events */
4106 		if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
4107 			vf->mdd_tx_events.last_printed =
4108 							vf->mdd_tx_events.count;
4109 
4110 			dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
4111 				 vf->mdd_tx_events.count, hw->pf_id, i,
4112 				 vf->dflt_lan_addr.addr);
4113 		}
4114 	}
4115 }
4116 
4117 /**
4118  * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
4119  * @pdev: pointer to a pci_dev structure
4120  *
4121  * Called when recovering from a PF FLR to restore interrupt capability to
4122  * the VFs.
4123  */
ice_restore_all_vfs_msi_state(struct pci_dev * pdev)4124 void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
4125 {
4126 	struct pci_dev *vfdev;
4127 	u16 vf_id;
4128 	int pos;
4129 
4130 	if (!pci_num_vf(pdev))
4131 		return;
4132 
4133 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4134 	if (pos) {
4135 		pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
4136 				     &vf_id);
4137 		vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
4138 		while (vfdev) {
4139 			if (vfdev->is_virtfn && vfdev->physfn == pdev)
4140 				pci_restore_msi_state(vfdev);
4141 			vfdev = pci_get_device(pdev->vendor, vf_id,
4142 					       vfdev);
4143 		}
4144 	}
4145 }
4146