1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #include "i40e.h"
5 #include "i40e_lan_hmc.h"
6 #include "i40e_virtchnl_pf.h"
7
8 /*********************notification routines***********************/
9
10 /**
11 * i40e_vc_vf_broadcast
12 * @pf: pointer to the PF structure
13 * @v_opcode: operation code
14 * @v_retval: return value
15 * @msg: pointer to the msg buffer
16 * @msglen: msg length
17 *
18 * send a message to all VFs on a given PF
19 **/
i40e_vc_vf_broadcast(struct i40e_pf * pf,enum virtchnl_ops v_opcode,int v_retval,u8 * msg,u16 msglen)20 static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
21 enum virtchnl_ops v_opcode,
22 int v_retval, u8 *msg,
23 u16 msglen)
24 {
25 struct i40e_hw *hw = &pf->hw;
26 struct i40e_vf *vf = pf->vf;
27 int i;
28
29 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
30 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
31 /* Not all vfs are enabled so skip the ones that are not */
32 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
33 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
34 continue;
35
36 /* Ignore return value on purpose - a given VF may fail, but
37 * we need to keep going and send to all of them
38 */
39 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
40 msg, msglen, NULL);
41 }
42 }
43
44 /**
45 * i40e_vc_link_speed2mbps
46 * converts i40e_aq_link_speed to integer value of Mbps
47 * @link_speed: the speed to convert
48 *
49 * return the speed as direct value of Mbps.
50 **/
51 static u32
i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed)52 i40e_vc_link_speed2mbps(enum i40e_aq_link_speed link_speed)
53 {
54 switch (link_speed) {
55 case I40E_LINK_SPEED_100MB:
56 return SPEED_100;
57 case I40E_LINK_SPEED_1GB:
58 return SPEED_1000;
59 case I40E_LINK_SPEED_2_5GB:
60 return SPEED_2500;
61 case I40E_LINK_SPEED_5GB:
62 return SPEED_5000;
63 case I40E_LINK_SPEED_10GB:
64 return SPEED_10000;
65 case I40E_LINK_SPEED_20GB:
66 return SPEED_20000;
67 case I40E_LINK_SPEED_25GB:
68 return SPEED_25000;
69 case I40E_LINK_SPEED_40GB:
70 return SPEED_40000;
71 case I40E_LINK_SPEED_UNKNOWN:
72 return SPEED_UNKNOWN;
73 }
74 return SPEED_UNKNOWN;
75 }
76
77 /**
78 * i40e_set_vf_link_state
79 * @vf: pointer to the VF structure
80 * @pfe: pointer to PF event structure
81 * @ls: pointer to link status structure
82 *
83 * set a link state on a single vf
84 **/
i40e_set_vf_link_state(struct i40e_vf * vf,struct virtchnl_pf_event * pfe,struct i40e_link_status * ls)85 static void i40e_set_vf_link_state(struct i40e_vf *vf,
86 struct virtchnl_pf_event *pfe, struct i40e_link_status *ls)
87 {
88 u8 link_status = ls->link_info & I40E_AQ_LINK_UP;
89
90 if (vf->link_forced)
91 link_status = vf->link_up;
92
93 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
94 pfe->event_data.link_event_adv.link_speed = link_status ?
95 i40e_vc_link_speed2mbps(ls->link_speed) : 0;
96 pfe->event_data.link_event_adv.link_status = link_status;
97 } else {
98 pfe->event_data.link_event.link_speed = link_status ?
99 i40e_virtchnl_link_speed(ls->link_speed) : 0;
100 pfe->event_data.link_event.link_status = link_status;
101 }
102 }
103
104 /**
105 * i40e_vc_notify_vf_link_state
106 * @vf: pointer to the VF structure
107 *
108 * send a link status message to a single VF
109 **/
i40e_vc_notify_vf_link_state(struct i40e_vf * vf)110 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
111 {
112 struct virtchnl_pf_event pfe;
113 struct i40e_pf *pf = vf->pf;
114 struct i40e_hw *hw = &pf->hw;
115 struct i40e_link_status *ls = &pf->hw.phy.link_info;
116 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
117
118 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
119 pfe.severity = PF_EVENT_SEVERITY_INFO;
120
121 i40e_set_vf_link_state(vf, &pfe, ls);
122
123 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
124 0, (u8 *)&pfe, sizeof(pfe), NULL);
125 }
126
127 /**
128 * i40e_vc_notify_link_state
129 * @pf: pointer to the PF structure
130 *
131 * send a link status message to all VFs on a given PF
132 **/
i40e_vc_notify_link_state(struct i40e_pf * pf)133 void i40e_vc_notify_link_state(struct i40e_pf *pf)
134 {
135 int i;
136
137 for (i = 0; i < pf->num_alloc_vfs; i++)
138 i40e_vc_notify_vf_link_state(&pf->vf[i]);
139 }
140
141 /**
142 * i40e_vc_notify_reset
143 * @pf: pointer to the PF structure
144 *
145 * indicate a pending reset to all VFs on a given PF
146 **/
i40e_vc_notify_reset(struct i40e_pf * pf)147 void i40e_vc_notify_reset(struct i40e_pf *pf)
148 {
149 struct virtchnl_pf_event pfe;
150
151 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
152 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
153 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
154 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
155 }
156
157 #ifdef CONFIG_PCI_IOV
i40e_restore_all_vfs_msi_state(struct pci_dev * pdev)158 void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev)
159 {
160 u16 vf_id;
161 u16 pos;
162
163 /* Continue only if this is a PF */
164 if (!pdev->is_physfn)
165 return;
166
167 if (!pci_num_vf(pdev))
168 return;
169
170 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
171 if (pos) {
172 struct pci_dev *vf_dev = NULL;
173
174 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
175 while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) {
176 if (vf_dev->is_virtfn && vf_dev->physfn == pdev)
177 pci_restore_msi_state(vf_dev);
178 }
179 }
180 }
181 #endif /* CONFIG_PCI_IOV */
182
183 /**
184 * i40e_vc_notify_vf_reset
185 * @vf: pointer to the VF structure
186 *
187 * indicate a pending reset to the given VF
188 **/
i40e_vc_notify_vf_reset(struct i40e_vf * vf)189 void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
190 {
191 struct virtchnl_pf_event pfe;
192 int abs_vf_id;
193
194 /* validate the request */
195 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
196 return;
197
198 /* verify if the VF is in either init or active before proceeding */
199 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
200 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
201 return;
202
203 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
204
205 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
206 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
207 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
208 0, (u8 *)&pfe,
209 sizeof(struct virtchnl_pf_event), NULL);
210 }
211 /***********************misc routines*****************************/
212
213 /**
214 * i40e_vc_reset_vf
215 * @vf: pointer to the VF info
216 * @notify_vf: notify vf about reset or not
217 * Reset VF handler.
218 **/
i40e_vc_reset_vf(struct i40e_vf * vf,bool notify_vf)219 static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
220 {
221 struct i40e_pf *pf = vf->pf;
222 int i;
223
224 if (notify_vf)
225 i40e_vc_notify_vf_reset(vf);
226
227 /* We want to ensure that an actual reset occurs initiated after this
228 * function was called. However, we do not want to wait forever, so
229 * we'll give a reasonable time and print a message if we failed to
230 * ensure a reset.
231 */
232 for (i = 0; i < 20; i++) {
233 /* If PF is in VFs releasing state reset VF is impossible,
234 * so leave it.
235 */
236 if (test_bit(__I40E_VFS_RELEASING, pf->state))
237 return;
238 if (i40e_reset_vf(vf, false))
239 return;
240 usleep_range(10000, 20000);
241 }
242
243 if (notify_vf)
244 dev_warn(&vf->pf->pdev->dev,
245 "Failed to initiate reset for VF %d after 200 milliseconds\n",
246 vf->vf_id);
247 else
248 dev_dbg(&vf->pf->pdev->dev,
249 "Failed to initiate reset for VF %d after 200 milliseconds\n",
250 vf->vf_id);
251 }
252
253 /**
254 * i40e_vc_isvalid_vsi_id
255 * @vf: pointer to the VF info
256 * @vsi_id: VF relative VSI id
257 *
258 * check for the valid VSI id
259 **/
i40e_vc_isvalid_vsi_id(struct i40e_vf * vf,u16 vsi_id)260 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
261 {
262 struct i40e_pf *pf = vf->pf;
263 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
264
265 return (vsi && (vsi->vf_id == vf->vf_id));
266 }
267
268 /**
269 * i40e_vc_isvalid_queue_id
270 * @vf: pointer to the VF info
271 * @vsi_id: vsi id
272 * @qid: vsi relative queue id
273 *
274 * check for the valid queue id
275 **/
i40e_vc_isvalid_queue_id(struct i40e_vf * vf,u16 vsi_id,u16 qid)276 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
277 u16 qid)
278 {
279 struct i40e_pf *pf = vf->pf;
280 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
281
282 return (vsi && (qid < vsi->alloc_queue_pairs));
283 }
284
285 /**
286 * i40e_vc_isvalid_vector_id
287 * @vf: pointer to the VF info
288 * @vector_id: VF relative vector id
289 *
290 * check for the valid vector id
291 **/
i40e_vc_isvalid_vector_id(struct i40e_vf * vf,u32 vector_id)292 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
293 {
294 struct i40e_pf *pf = vf->pf;
295
296 return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
297 }
298
299 /***********************vf resource mgmt routines*****************/
300
301 /**
302 * i40e_vc_get_pf_queue_id
303 * @vf: pointer to the VF info
304 * @vsi_id: id of VSI as provided by the FW
305 * @vsi_queue_id: vsi relative queue id
306 *
307 * return PF relative queue id
308 **/
i40e_vc_get_pf_queue_id(struct i40e_vf * vf,u16 vsi_id,u8 vsi_queue_id)309 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
310 u8 vsi_queue_id)
311 {
312 struct i40e_pf *pf = vf->pf;
313 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
314 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
315
316 if (!vsi)
317 return pf_queue_id;
318
319 if (le16_to_cpu(vsi->info.mapping_flags) &
320 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
321 pf_queue_id =
322 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
323 else
324 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
325 vsi_queue_id;
326
327 return pf_queue_id;
328 }
329
330 /**
331 * i40e_get_real_pf_qid
332 * @vf: pointer to the VF info
333 * @vsi_id: vsi id
334 * @queue_id: queue number
335 *
336 * wrapper function to get pf_queue_id handling ADq code as well
337 **/
i40e_get_real_pf_qid(struct i40e_vf * vf,u16 vsi_id,u16 queue_id)338 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
339 {
340 int i;
341
342 if (vf->adq_enabled) {
343 /* Although VF considers all the queues(can be 1 to 16) as its
344 * own but they may actually belong to different VSIs(up to 4).
345 * We need to find which queues belongs to which VSI.
346 */
347 for (i = 0; i < vf->num_tc; i++) {
348 if (queue_id < vf->ch[i].num_qps) {
349 vsi_id = vf->ch[i].vsi_id;
350 break;
351 }
352 /* find right queue id which is relative to a
353 * given VSI.
354 */
355 queue_id -= vf->ch[i].num_qps;
356 }
357 }
358
359 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
360 }
361
362 /**
363 * i40e_config_irq_link_list
364 * @vf: pointer to the VF info
365 * @vsi_id: id of VSI as given by the FW
366 * @vecmap: irq map info
367 *
368 * configure irq link list from the map
369 **/
i40e_config_irq_link_list(struct i40e_vf * vf,u16 vsi_id,struct virtchnl_vector_map * vecmap)370 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
371 struct virtchnl_vector_map *vecmap)
372 {
373 unsigned long linklistmap = 0, tempmap;
374 struct i40e_pf *pf = vf->pf;
375 struct i40e_hw *hw = &pf->hw;
376 u16 vsi_queue_id, pf_queue_id;
377 enum i40e_queue_type qtype;
378 u16 next_q, vector_id, size;
379 u32 reg, reg_idx;
380 u16 itr_idx = 0;
381
382 vector_id = vecmap->vector_id;
383 /* setup the head */
384 if (0 == vector_id)
385 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
386 else
387 reg_idx = I40E_VPINT_LNKLSTN(
388 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
389 (vector_id - 1));
390
391 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
392 /* Special case - No queues mapped on this vector */
393 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
394 goto irq_list_done;
395 }
396 tempmap = vecmap->rxq_map;
397 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
398 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
399 vsi_queue_id));
400 }
401
402 tempmap = vecmap->txq_map;
403 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
404 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
405 vsi_queue_id + 1));
406 }
407
408 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
409 next_q = find_first_bit(&linklistmap, size);
410 if (unlikely(next_q == size))
411 goto irq_list_done;
412
413 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
414 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
415 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
416 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
417
418 wr32(hw, reg_idx, reg);
419
420 while (next_q < size) {
421 switch (qtype) {
422 case I40E_QUEUE_TYPE_RX:
423 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
424 itr_idx = vecmap->rxitr_idx;
425 break;
426 case I40E_QUEUE_TYPE_TX:
427 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
428 itr_idx = vecmap->txitr_idx;
429 break;
430 default:
431 break;
432 }
433
434 next_q = find_next_bit(&linklistmap, size, next_q + 1);
435 if (next_q < size) {
436 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
437 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
438 pf_queue_id = i40e_get_real_pf_qid(vf,
439 vsi_id,
440 vsi_queue_id);
441 } else {
442 pf_queue_id = I40E_QUEUE_END_OF_LIST;
443 qtype = 0;
444 }
445
446 /* format for the RQCTL & TQCTL regs is same */
447 reg = (vector_id) |
448 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
449 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
450 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
451 FIELD_PREP(I40E_QINT_RQCTL_ITR_INDX_MASK, itr_idx);
452 wr32(hw, reg_idx, reg);
453 }
454
455 /* if the vf is running in polling mode and using interrupt zero,
456 * need to disable auto-mask on enabling zero interrupt for VFs.
457 */
458 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
459 (vector_id == 0)) {
460 reg = rd32(hw, I40E_GLINT_CTL);
461 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
462 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
463 wr32(hw, I40E_GLINT_CTL, reg);
464 }
465 }
466
467 irq_list_done:
468 i40e_flush(hw);
469 }
470
471 /**
472 * i40e_release_rdma_qvlist
473 * @vf: pointer to the VF.
474 *
475 **/
i40e_release_rdma_qvlist(struct i40e_vf * vf)476 static void i40e_release_rdma_qvlist(struct i40e_vf *vf)
477 {
478 struct i40e_pf *pf = vf->pf;
479 struct virtchnl_rdma_qvlist_info *qvlist_info = vf->qvlist_info;
480 u32 msix_vf;
481 u32 i;
482
483 if (!vf->qvlist_info)
484 return;
485
486 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
487 for (i = 0; i < qvlist_info->num_vectors; i++) {
488 struct virtchnl_rdma_qv_info *qv_info;
489 u32 next_q_index, next_q_type;
490 struct i40e_hw *hw = &pf->hw;
491 u32 v_idx, reg_idx, reg;
492
493 qv_info = &qvlist_info->qv_info[i];
494 v_idx = qv_info->v_idx;
495 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
496 /* Figure out the queue after CEQ and make that the
497 * first queue.
498 */
499 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
500 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
501 next_q_index = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK,
502 reg);
503 next_q_type = FIELD_GET(I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK,
504 reg);
505
506 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
507 reg = (next_q_index &
508 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
509 (next_q_type <<
510 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
511
512 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
513 }
514 }
515 kfree(vf->qvlist_info);
516 vf->qvlist_info = NULL;
517 }
518
519 /**
520 * i40e_config_rdma_qvlist
521 * @vf: pointer to the VF info
522 * @qvlist_info: queue and vector list
523 *
524 * Return 0 on success or < 0 on error
525 **/
526 static int
i40e_config_rdma_qvlist(struct i40e_vf * vf,struct virtchnl_rdma_qvlist_info * qvlist_info)527 i40e_config_rdma_qvlist(struct i40e_vf *vf,
528 struct virtchnl_rdma_qvlist_info *qvlist_info)
529 {
530 struct i40e_pf *pf = vf->pf;
531 struct i40e_hw *hw = &pf->hw;
532 struct virtchnl_rdma_qv_info *qv_info;
533 u32 v_idx, i, reg_idx, reg;
534 u32 next_q_idx, next_q_type;
535 size_t size;
536 u32 msix_vf;
537 int ret = 0;
538
539 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
540
541 if (qvlist_info->num_vectors > msix_vf) {
542 dev_warn(&pf->pdev->dev,
543 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
544 qvlist_info->num_vectors,
545 msix_vf);
546 ret = -EINVAL;
547 goto err_out;
548 }
549
550 kfree(vf->qvlist_info);
551 size = virtchnl_struct_size(vf->qvlist_info, qv_info,
552 qvlist_info->num_vectors);
553 vf->qvlist_info = kzalloc(size, GFP_KERNEL);
554 if (!vf->qvlist_info) {
555 ret = -ENOMEM;
556 goto err_out;
557 }
558 vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
559
560 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
561 for (i = 0; i < qvlist_info->num_vectors; i++) {
562 qv_info = &qvlist_info->qv_info[i];
563
564 /* Validate vector id belongs to this vf */
565 if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
566 ret = -EINVAL;
567 goto err_free;
568 }
569
570 v_idx = qv_info->v_idx;
571
572 vf->qvlist_info->qv_info[i] = *qv_info;
573
574 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
575 /* We might be sharing the interrupt, so get the first queue
576 * index and type, push it down the list by adding the new
577 * queue on top. Also link it with the new queue in CEQCTL.
578 */
579 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
580 next_q_idx = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK,
581 reg);
582 next_q_type = FIELD_GET(I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK,
583 reg);
584
585 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
586 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
587 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
588 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
589 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
590 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
591 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
592 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
593
594 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
595 reg = (qv_info->ceq_idx &
596 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
597 (I40E_QUEUE_TYPE_PE_CEQ <<
598 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
599 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
600 }
601
602 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
603 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
604 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
605 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
606
607 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
608 }
609 }
610
611 return 0;
612 err_free:
613 kfree(vf->qvlist_info);
614 vf->qvlist_info = NULL;
615 err_out:
616 return ret;
617 }
618
619 /**
620 * i40e_config_vsi_tx_queue
621 * @vf: pointer to the VF info
622 * @vsi_id: id of VSI as provided by the FW
623 * @vsi_queue_id: vsi relative queue index
624 * @info: config. info
625 *
626 * configure tx queue
627 **/
i40e_config_vsi_tx_queue(struct i40e_vf * vf,u16 vsi_id,u16 vsi_queue_id,struct virtchnl_txq_info * info)628 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
629 u16 vsi_queue_id,
630 struct virtchnl_txq_info *info)
631 {
632 struct i40e_pf *pf = vf->pf;
633 struct i40e_hw *hw = &pf->hw;
634 struct i40e_hmc_obj_txq tx_ctx;
635 struct i40e_vsi *vsi;
636 u16 pf_queue_id;
637 u32 qtx_ctl;
638 int ret = 0;
639
640 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
641 ret = -ENOENT;
642 goto error_context;
643 }
644 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
645 vsi = i40e_find_vsi_from_id(pf, vsi_id);
646 if (!vsi) {
647 ret = -ENOENT;
648 goto error_context;
649 }
650
651 /* clear the context structure first */
652 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
653
654 /* only set the required fields */
655 tx_ctx.base = info->dma_ring_addr / 128;
656
657 /* ring_len has to be multiple of 8 */
658 if (!IS_ALIGNED(info->ring_len, 8) ||
659 info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
660 ret = -EINVAL;
661 goto error_context;
662 }
663 tx_ctx.qlen = info->ring_len;
664 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
665 tx_ctx.rdylist_act = 0;
666 tx_ctx.head_wb_ena = info->headwb_enabled;
667 tx_ctx.head_wb_addr = info->dma_headwb_addr;
668
669 /* clear the context in the HMC */
670 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
671 if (ret) {
672 dev_err(&pf->pdev->dev,
673 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
674 pf_queue_id, ret);
675 ret = -ENOENT;
676 goto error_context;
677 }
678
679 /* set the context in the HMC */
680 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
681 if (ret) {
682 dev_err(&pf->pdev->dev,
683 "Failed to set VF LAN Tx queue context %d error: %d\n",
684 pf_queue_id, ret);
685 ret = -ENOENT;
686 goto error_context;
687 }
688
689 /* associate this queue with the PCI VF function */
690 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
691 qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_PF_INDX_MASK, hw->pf_id);
692 qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_VFVM_INDX_MASK,
693 vf->vf_id + hw->func_caps.vf_base_id);
694 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
695 i40e_flush(hw);
696
697 error_context:
698 return ret;
699 }
700
701 /**
702 * i40e_config_vsi_rx_queue
703 * @vf: pointer to the VF info
704 * @vsi_id: id of VSI as provided by the FW
705 * @vsi_queue_id: vsi relative queue index
706 * @info: config. info
707 *
708 * configure rx queue
709 **/
i40e_config_vsi_rx_queue(struct i40e_vf * vf,u16 vsi_id,u16 vsi_queue_id,struct virtchnl_rxq_info * info)710 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
711 u16 vsi_queue_id,
712 struct virtchnl_rxq_info *info)
713 {
714 u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
715 struct i40e_pf *pf = vf->pf;
716 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
717 struct i40e_hw *hw = &pf->hw;
718 struct i40e_hmc_obj_rxq rx_ctx;
719 int ret = 0;
720
721 /* clear the context structure first */
722 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
723
724 /* only set the required fields */
725 rx_ctx.base = info->dma_ring_addr / 128;
726
727 /* ring_len has to be multiple of 32 */
728 if (!IS_ALIGNED(info->ring_len, 32) ||
729 info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
730 ret = -EINVAL;
731 goto error_param;
732 }
733 rx_ctx.qlen = info->ring_len;
734
735 if (info->splithdr_enabled) {
736 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
737 I40E_RX_SPLIT_IP |
738 I40E_RX_SPLIT_TCP_UDP |
739 I40E_RX_SPLIT_SCTP;
740 /* header length validation */
741 if (info->hdr_size > ((2 * 1024) - 64)) {
742 ret = -EINVAL;
743 goto error_param;
744 }
745 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
746
747 /* set split mode 10b */
748 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
749 }
750
751 /* databuffer length validation */
752 if (info->databuffer_size > ((16 * 1024) - 128)) {
753 ret = -EINVAL;
754 goto error_param;
755 }
756 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
757
758 /* max pkt. length validation */
759 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
760 ret = -EINVAL;
761 goto error_param;
762 }
763 rx_ctx.rxmax = info->max_pkt_size;
764
765 /* if port VLAN is configured increase the max packet size */
766 if (vsi->info.pvid)
767 rx_ctx.rxmax += VLAN_HLEN;
768
769 /* enable 32bytes desc always */
770 rx_ctx.dsize = 1;
771
772 /* default values */
773 rx_ctx.lrxqthresh = 1;
774 rx_ctx.crcstrip = 1;
775 rx_ctx.prefena = 1;
776 rx_ctx.l2tsel = 1;
777
778 /* clear the context in the HMC */
779 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
780 if (ret) {
781 dev_err(&pf->pdev->dev,
782 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
783 pf_queue_id, ret);
784 ret = -ENOENT;
785 goto error_param;
786 }
787
788 /* set the context in the HMC */
789 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
790 if (ret) {
791 dev_err(&pf->pdev->dev,
792 "Failed to set VF LAN Rx queue context %d error: %d\n",
793 pf_queue_id, ret);
794 ret = -ENOENT;
795 goto error_param;
796 }
797
798 error_param:
799 return ret;
800 }
801
802 /**
803 * i40e_alloc_vsi_res
804 * @vf: pointer to the VF info
805 * @idx: VSI index, applies only for ADq mode, zero otherwise
806 *
807 * alloc VF vsi context & resources
808 **/
i40e_alloc_vsi_res(struct i40e_vf * vf,u8 idx)809 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
810 {
811 struct i40e_mac_filter *f = NULL;
812 struct i40e_vsi *main_vsi, *vsi;
813 struct i40e_pf *pf = vf->pf;
814 u64 max_tx_rate = 0;
815 int ret = 0;
816
817 main_vsi = i40e_pf_get_main_vsi(pf);
818 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, main_vsi->seid, vf->vf_id);
819
820 if (!vsi) {
821 dev_err(&pf->pdev->dev,
822 "add vsi failed for VF %d, aq_err %d\n",
823 vf->vf_id, pf->hw.aq.asq_last_status);
824 ret = -ENOENT;
825 goto error_alloc_vsi_res;
826 }
827
828 if (!idx) {
829 u64 hena = i40e_pf_get_default_rss_hena(pf);
830 u8 broadcast[ETH_ALEN];
831
832 vf->lan_vsi_idx = vsi->idx;
833 vf->lan_vsi_id = vsi->id;
834 /* If the port VLAN has been configured and then the
835 * VF driver was removed then the VSI port VLAN
836 * configuration was destroyed. Check if there is
837 * a port VLAN and restore the VSI configuration if
838 * needed.
839 */
840 if (vf->port_vlan_id)
841 i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
842
843 spin_lock_bh(&vsi->mac_filter_hash_lock);
844 if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
845 f = i40e_add_mac_filter(vsi,
846 vf->default_lan_addr.addr);
847 if (!f)
848 dev_info(&pf->pdev->dev,
849 "Could not add MAC filter %pM for VF %d\n",
850 vf->default_lan_addr.addr, vf->vf_id);
851 }
852 eth_broadcast_addr(broadcast);
853 f = i40e_add_mac_filter(vsi, broadcast);
854 if (!f)
855 dev_info(&pf->pdev->dev,
856 "Could not allocate VF broadcast filter\n");
857 spin_unlock_bh(&vsi->mac_filter_hash_lock);
858 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
859 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
860 /* program mac filter only for VF VSI */
861 ret = i40e_sync_vsi_filters(vsi);
862 if (ret)
863 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
864 }
865
866 /* storing VSI index and id for ADq and don't apply the mac filter */
867 if (vf->adq_enabled) {
868 vf->ch[idx].vsi_idx = vsi->idx;
869 vf->ch[idx].vsi_id = vsi->id;
870 }
871
872 /* Set VF bandwidth if specified */
873 if (vf->tx_rate) {
874 max_tx_rate = vf->tx_rate;
875 } else if (vf->ch[idx].max_tx_rate) {
876 max_tx_rate = vf->ch[idx].max_tx_rate;
877 }
878
879 if (max_tx_rate) {
880 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
881 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
882 max_tx_rate, 0, NULL);
883 if (ret)
884 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
885 vf->vf_id, ret);
886 }
887
888 error_alloc_vsi_res:
889 return ret;
890 }
891
892 /**
893 * i40e_map_pf_queues_to_vsi
894 * @vf: pointer to the VF info
895 *
896 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
897 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
898 **/
i40e_map_pf_queues_to_vsi(struct i40e_vf * vf)899 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
900 {
901 struct i40e_pf *pf = vf->pf;
902 struct i40e_hw *hw = &pf->hw;
903 u32 reg, num_tc = 1; /* VF has at least one traffic class */
904 u16 vsi_id, qps;
905 int i, j;
906
907 if (vf->adq_enabled)
908 num_tc = vf->num_tc;
909
910 for (i = 0; i < num_tc; i++) {
911 if (vf->adq_enabled) {
912 qps = vf->ch[i].num_qps;
913 vsi_id = vf->ch[i].vsi_id;
914 } else {
915 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
916 vsi_id = vf->lan_vsi_id;
917 }
918
919 for (j = 0; j < 7; j++) {
920 if (j * 2 >= qps) {
921 /* end of list */
922 reg = 0x07FF07FF;
923 } else {
924 u16 qid = i40e_vc_get_pf_queue_id(vf,
925 vsi_id,
926 j * 2);
927 reg = qid;
928 qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
929 (j * 2) + 1);
930 reg |= qid << 16;
931 }
932 i40e_write_rx_ctl(hw,
933 I40E_VSILAN_QTABLE(j, vsi_id),
934 reg);
935 }
936 }
937 }
938
939 /**
940 * i40e_map_pf_to_vf_queues
941 * @vf: pointer to the VF info
942 *
943 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
944 * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
945 **/
i40e_map_pf_to_vf_queues(struct i40e_vf * vf)946 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
947 {
948 struct i40e_pf *pf = vf->pf;
949 struct i40e_hw *hw = &pf->hw;
950 u32 reg, total_qps = 0;
951 u32 qps, num_tc = 1; /* VF has at least one traffic class */
952 u16 vsi_id, qid;
953 int i, j;
954
955 if (vf->adq_enabled)
956 num_tc = vf->num_tc;
957
958 for (i = 0; i < num_tc; i++) {
959 if (vf->adq_enabled) {
960 qps = vf->ch[i].num_qps;
961 vsi_id = vf->ch[i].vsi_id;
962 } else {
963 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
964 vsi_id = vf->lan_vsi_id;
965 }
966
967 for (j = 0; j < qps; j++) {
968 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
969
970 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
971 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
972 reg);
973 total_qps++;
974 }
975 }
976 }
977
978 /**
979 * i40e_enable_vf_mappings
980 * @vf: pointer to the VF info
981 *
982 * enable VF mappings
983 **/
i40e_enable_vf_mappings(struct i40e_vf * vf)984 static void i40e_enable_vf_mappings(struct i40e_vf *vf)
985 {
986 struct i40e_pf *pf = vf->pf;
987 struct i40e_hw *hw = &pf->hw;
988 u32 reg;
989
990 /* Tell the hardware we're using noncontiguous mapping. HW requires
991 * that VF queues be mapped using this method, even when they are
992 * contiguous in real life
993 */
994 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
995 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
996
997 /* enable VF vplan_qtable mappings */
998 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
999 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
1000
1001 i40e_map_pf_to_vf_queues(vf);
1002 i40e_map_pf_queues_to_vsi(vf);
1003
1004 i40e_flush(hw);
1005 }
1006
1007 /**
1008 * i40e_disable_vf_mappings
1009 * @vf: pointer to the VF info
1010 *
1011 * disable VF mappings
1012 **/
i40e_disable_vf_mappings(struct i40e_vf * vf)1013 static void i40e_disable_vf_mappings(struct i40e_vf *vf)
1014 {
1015 struct i40e_pf *pf = vf->pf;
1016 struct i40e_hw *hw = &pf->hw;
1017 int i;
1018
1019 /* disable qp mappings */
1020 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
1021 for (i = 0; i < I40E_MAX_VSI_QP; i++)
1022 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
1023 I40E_QUEUE_END_OF_LIST);
1024 i40e_flush(hw);
1025 }
1026
1027 /**
1028 * i40e_free_vf_res
1029 * @vf: pointer to the VF info
1030 *
1031 * free VF resources
1032 **/
i40e_free_vf_res(struct i40e_vf * vf)1033 static void i40e_free_vf_res(struct i40e_vf *vf)
1034 {
1035 struct i40e_pf *pf = vf->pf;
1036 struct i40e_hw *hw = &pf->hw;
1037 u32 reg_idx, reg;
1038 int i, j, msix_vf;
1039
1040 /* Start by disabling VF's configuration API to prevent the OS from
1041 * accessing the VF's VSI after it's freed / invalidated.
1042 */
1043 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1044
1045 /* It's possible the VF had requeuested more queues than the default so
1046 * do the accounting here when we're about to free them.
1047 */
1048 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
1049 pf->queues_left += vf->num_queue_pairs -
1050 I40E_DEFAULT_QUEUES_PER_VF;
1051 }
1052
1053 /* free vsi & disconnect it from the parent uplink */
1054 if (vf->lan_vsi_idx) {
1055 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
1056 vf->lan_vsi_idx = 0;
1057 vf->lan_vsi_id = 0;
1058 }
1059
1060 /* do the accounting and remove additional ADq VSI's */
1061 if (vf->adq_enabled && vf->ch[0].vsi_idx) {
1062 for (j = 0; j < vf->num_tc; j++) {
1063 /* At this point VSI0 is already released so don't
1064 * release it again and only clear their values in
1065 * structure variables
1066 */
1067 if (j)
1068 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
1069 vf->ch[j].vsi_idx = 0;
1070 vf->ch[j].vsi_id = 0;
1071 }
1072 }
1073 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
1074
1075 /* disable interrupts so the VF starts in a known state */
1076 for (i = 0; i < msix_vf; i++) {
1077 /* format is same for both registers */
1078 if (0 == i)
1079 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
1080 else
1081 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
1082 (vf->vf_id))
1083 + (i - 1));
1084 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1085 i40e_flush(hw);
1086 }
1087
1088 /* clear the irq settings */
1089 for (i = 0; i < msix_vf; i++) {
1090 /* format is same for both registers */
1091 if (0 == i)
1092 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
1093 else
1094 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
1095 (vf->vf_id))
1096 + (i - 1));
1097 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
1098 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1099 wr32(hw, reg_idx, reg);
1100 i40e_flush(hw);
1101 }
1102 /* reset some of the state variables keeping track of the resources */
1103 vf->num_queue_pairs = 0;
1104 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1105 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1106 }
1107
1108 /**
1109 * i40e_alloc_vf_res
1110 * @vf: pointer to the VF info
1111 *
1112 * allocate VF resources
1113 **/
i40e_alloc_vf_res(struct i40e_vf * vf)1114 static int i40e_alloc_vf_res(struct i40e_vf *vf)
1115 {
1116 struct i40e_pf *pf = vf->pf;
1117 int total_queue_pairs = 0;
1118 int ret, idx;
1119
1120 if (vf->num_req_queues &&
1121 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1122 pf->num_vf_qps = vf->num_req_queues;
1123 else
1124 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1125
1126 /* allocate hw vsi context & associated resources */
1127 ret = i40e_alloc_vsi_res(vf, 0);
1128 if (ret)
1129 goto error_alloc;
1130 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1131
1132 /* allocate additional VSIs based on tc information for ADq */
1133 if (vf->adq_enabled) {
1134 if (pf->queues_left >=
1135 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1136 /* TC 0 always belongs to VF VSI */
1137 for (idx = 1; idx < vf->num_tc; idx++) {
1138 ret = i40e_alloc_vsi_res(vf, idx);
1139 if (ret)
1140 goto error_alloc;
1141 }
1142 /* send correct number of queues */
1143 total_queue_pairs = I40E_MAX_VF_QUEUES;
1144 } else {
1145 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1146 vf->vf_id);
1147 vf->adq_enabled = false;
1148 }
1149 }
1150
1151 /* We account for each VF to get a default number of queue pairs. If
1152 * the VF has now requested more, we need to account for that to make
1153 * certain we never request more queues than we actually have left in
1154 * HW.
1155 */
1156 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1157 pf->queues_left -=
1158 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1159
1160 if (vf->trusted)
1161 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1162 else
1163 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1164
1165 /* store the total qps number for the runtime
1166 * VF req validation
1167 */
1168 vf->num_queue_pairs = total_queue_pairs;
1169
1170 /* VF is now completely initialized */
1171 set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1172
1173 error_alloc:
1174 if (ret)
1175 i40e_free_vf_res(vf);
1176
1177 return ret;
1178 }
1179
1180 #define VF_DEVICE_STATUS 0xAA
1181 #define VF_TRANS_PENDING_MASK 0x20
1182 /**
1183 * i40e_quiesce_vf_pci
1184 * @vf: pointer to the VF structure
1185 *
1186 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1187 * if the transactions never clear.
1188 **/
i40e_quiesce_vf_pci(struct i40e_vf * vf)1189 static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1190 {
1191 struct i40e_pf *pf = vf->pf;
1192 struct i40e_hw *hw = &pf->hw;
1193 int vf_abs_id, i;
1194 u32 reg;
1195
1196 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1197
1198 wr32(hw, I40E_PF_PCI_CIAA,
1199 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1200 for (i = 0; i < 100; i++) {
1201 reg = rd32(hw, I40E_PF_PCI_CIAD);
1202 if ((reg & VF_TRANS_PENDING_MASK) == 0)
1203 return 0;
1204 udelay(1);
1205 }
1206 return -EIO;
1207 }
1208
1209 /**
1210 * __i40e_getnum_vf_vsi_vlan_filters
1211 * @vsi: pointer to the vsi
1212 *
1213 * called to get the number of VLANs offloaded on this VF
1214 **/
__i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi * vsi)1215 static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1216 {
1217 struct i40e_mac_filter *f;
1218 u16 num_vlans = 0, bkt;
1219
1220 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1221 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1222 num_vlans++;
1223 }
1224
1225 return num_vlans;
1226 }
1227
1228 /**
1229 * i40e_getnum_vf_vsi_vlan_filters
1230 * @vsi: pointer to the vsi
1231 *
1232 * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
1233 **/
i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi * vsi)1234 static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1235 {
1236 int num_vlans;
1237
1238 spin_lock_bh(&vsi->mac_filter_hash_lock);
1239 num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1240 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1241
1242 return num_vlans;
1243 }
1244
1245 /**
1246 * i40e_get_vlan_list_sync
1247 * @vsi: pointer to the VSI
1248 * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
1249 * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
1250 * This array is allocated here, but has to be freed in caller.
1251 *
1252 * Called to get number of VLANs and VLAN list present in mac_filter_hash.
1253 **/
i40e_get_vlan_list_sync(struct i40e_vsi * vsi,u16 * num_vlans,s16 ** vlan_list)1254 static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
1255 s16 **vlan_list)
1256 {
1257 struct i40e_mac_filter *f;
1258 int i = 0;
1259 int bkt;
1260
1261 spin_lock_bh(&vsi->mac_filter_hash_lock);
1262 *num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1263 *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
1264 if (!(*vlan_list))
1265 goto err;
1266
1267 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1268 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1269 continue;
1270 (*vlan_list)[i++] = f->vlan;
1271 }
1272 err:
1273 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1274 }
1275
1276 /**
1277 * i40e_set_vsi_promisc
1278 * @vf: pointer to the VF struct
1279 * @seid: VSI number
1280 * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
1281 * for a given VLAN
1282 * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
1283 * for a given VLAN
1284 * @vl: List of VLANs - apply filter for given VLANs
1285 * @num_vlans: Number of elements in @vl
1286 **/
1287 static int
i40e_set_vsi_promisc(struct i40e_vf * vf,u16 seid,bool multi_enable,bool unicast_enable,s16 * vl,u16 num_vlans)1288 i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
1289 bool unicast_enable, s16 *vl, u16 num_vlans)
1290 {
1291 struct i40e_pf *pf = vf->pf;
1292 struct i40e_hw *hw = &pf->hw;
1293 int aq_ret, aq_tmp = 0;
1294 int i;
1295
1296 /* No VLAN to set promisc on, set on VSI */
1297 if (!num_vlans || !vl) {
1298 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
1299 multi_enable,
1300 NULL);
1301 if (aq_ret) {
1302 int aq_err = pf->hw.aq.asq_last_status;
1303
1304 dev_err(&pf->pdev->dev,
1305 "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
1306 vf->vf_id,
1307 ERR_PTR(aq_ret),
1308 i40e_aq_str(&pf->hw, aq_err));
1309
1310 return aq_ret;
1311 }
1312
1313 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
1314 unicast_enable,
1315 NULL, true);
1316
1317 if (aq_ret) {
1318 int aq_err = pf->hw.aq.asq_last_status;
1319
1320 dev_err(&pf->pdev->dev,
1321 "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
1322 vf->vf_id,
1323 ERR_PTR(aq_ret),
1324 i40e_aq_str(&pf->hw, aq_err));
1325 }
1326
1327 return aq_ret;
1328 }
1329
1330 for (i = 0; i < num_vlans; i++) {
1331 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
1332 multi_enable,
1333 vl[i], NULL);
1334 if (aq_ret) {
1335 int aq_err = pf->hw.aq.asq_last_status;
1336
1337 dev_err(&pf->pdev->dev,
1338 "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
1339 vf->vf_id,
1340 ERR_PTR(aq_ret),
1341 i40e_aq_str(&pf->hw, aq_err));
1342
1343 if (!aq_tmp)
1344 aq_tmp = aq_ret;
1345 }
1346
1347 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
1348 unicast_enable,
1349 vl[i], NULL);
1350 if (aq_ret) {
1351 int aq_err = pf->hw.aq.asq_last_status;
1352
1353 dev_err(&pf->pdev->dev,
1354 "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
1355 vf->vf_id,
1356 ERR_PTR(aq_ret),
1357 i40e_aq_str(&pf->hw, aq_err));
1358
1359 if (!aq_tmp)
1360 aq_tmp = aq_ret;
1361 }
1362 }
1363
1364 if (aq_tmp)
1365 aq_ret = aq_tmp;
1366
1367 return aq_ret;
1368 }
1369
1370 /**
1371 * i40e_config_vf_promiscuous_mode
1372 * @vf: pointer to the VF info
1373 * @vsi_id: VSI id
1374 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
1375 * @alluni: set MAC L2 layer unicast promiscuous enable/disable
1376 *
1377 * Called from the VF to configure the promiscuous mode of
1378 * VF vsis and from the VF reset path to reset promiscuous mode.
1379 **/
i40e_config_vf_promiscuous_mode(struct i40e_vf * vf,u16 vsi_id,bool allmulti,bool alluni)1380 static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
1381 u16 vsi_id,
1382 bool allmulti,
1383 bool alluni)
1384 {
1385 struct i40e_pf *pf = vf->pf;
1386 struct i40e_vsi *vsi;
1387 int aq_ret = 0;
1388 u16 num_vlans;
1389 s16 *vl;
1390
1391 vsi = i40e_find_vsi_from_id(pf, vsi_id);
1392 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
1393 return -EINVAL;
1394
1395 if (vf->port_vlan_id) {
1396 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
1397 alluni, &vf->port_vlan_id, 1);
1398 return aq_ret;
1399 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1400 i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
1401
1402 if (!vl)
1403 return -ENOMEM;
1404
1405 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1406 vl, num_vlans);
1407 kfree(vl);
1408 return aq_ret;
1409 }
1410
1411 /* no VLANs to set on, set on VSI */
1412 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1413 NULL, 0);
1414 return aq_ret;
1415 }
1416
1417 /**
1418 * i40e_sync_vfr_reset
1419 * @hw: pointer to hw struct
1420 * @vf_id: VF identifier
1421 *
1422 * Before trigger hardware reset, we need to know if no other process has
1423 * reserved the hardware for any reset operations. This check is done by
1424 * examining the status of the RSTAT1 register used to signal the reset.
1425 **/
i40e_sync_vfr_reset(struct i40e_hw * hw,int vf_id)1426 static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
1427 {
1428 u32 reg;
1429 int i;
1430
1431 for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
1432 reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
1433 I40E_VFINT_ICR0_ADMINQ_MASK;
1434 if (reg)
1435 return 0;
1436
1437 usleep_range(100, 200);
1438 }
1439
1440 return -EAGAIN;
1441 }
1442
1443 /**
1444 * i40e_trigger_vf_reset
1445 * @vf: pointer to the VF structure
1446 * @flr: VFLR was issued or not
1447 *
1448 * Trigger hardware to start a reset for a particular VF. Expects the caller
1449 * to wait the proper amount of time to allow hardware to reset the VF before
1450 * it cleans up and restores VF functionality.
1451 **/
i40e_trigger_vf_reset(struct i40e_vf * vf,bool flr)1452 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1453 {
1454 struct i40e_pf *pf = vf->pf;
1455 struct i40e_hw *hw = &pf->hw;
1456 u32 reg, reg_idx, bit_idx;
1457 bool vf_active;
1458 u32 radq;
1459
1460 /* warn the VF */
1461 vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1462
1463 /* Disable VF's configuration API during reset. The flag is re-enabled
1464 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1465 * It's normally disabled in i40e_free_vf_res(), but it's safer
1466 * to do it earlier to give some time to finish to any VF config
1467 * functions that may still be running at this point.
1468 */
1469 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1470 clear_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states);
1471
1472 /* In the case of a VFLR, the HW has already reset the VF and we
1473 * just need to clean up, so don't hit the VFRTRIG register.
1474 */
1475 if (!flr) {
1476 /* Sync VFR reset before trigger next one */
1477 radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
1478 I40E_VFINT_ICR0_ADMINQ_MASK;
1479 if (vf_active && !radq)
1480 /* waiting for finish reset by virtual driver */
1481 if (i40e_sync_vfr_reset(hw, vf->vf_id))
1482 dev_info(&pf->pdev->dev,
1483 "Reset VF %d never finished\n",
1484 vf->vf_id);
1485
1486 /* Reset VF using VPGEN_VFRTRIG reg. It is also setting
1487 * in progress state in rstat1 register.
1488 */
1489 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1490 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1491 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1492 i40e_flush(hw);
1493 }
1494 /* clear the VFLR bit in GLGEN_VFLRSTAT */
1495 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1496 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1497 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1498 i40e_flush(hw);
1499
1500 if (i40e_quiesce_vf_pci(vf))
1501 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1502 vf->vf_id);
1503 }
1504
1505 /**
1506 * i40e_cleanup_reset_vf
1507 * @vf: pointer to the VF structure
1508 *
1509 * Cleanup a VF after the hardware reset is finished. Expects the caller to
1510 * have verified whether the reset is finished properly, and ensure the
1511 * minimum amount of wait time has passed.
1512 **/
i40e_cleanup_reset_vf(struct i40e_vf * vf)1513 static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1514 {
1515 struct i40e_pf *pf = vf->pf;
1516 struct i40e_hw *hw = &pf->hw;
1517 u32 reg;
1518
1519 /* disable promisc modes in case they were enabled */
1520 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
1521
1522 /* free VF resources to begin resetting the VSI state */
1523 i40e_free_vf_res(vf);
1524
1525 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1526 * By doing this we allow HW to access VF memory at any point. If we
1527 * did it any sooner, HW could access memory while it was being freed
1528 * in i40e_free_vf_res(), causing an IOMMU fault.
1529 *
1530 * On the other hand, this needs to be done ASAP, because the VF driver
1531 * is waiting for this to happen and may report a timeout. It's
1532 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1533 * it.
1534 */
1535 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1536 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1537 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1538
1539 /* reallocate VF resources to finish resetting the VSI state */
1540 if (!i40e_alloc_vf_res(vf)) {
1541 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1542 i40e_enable_vf_mappings(vf);
1543 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1544 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1545 /* Do not notify the client during VF init */
1546 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1547 &vf->vf_states))
1548 i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1549 vf->num_vlan = 0;
1550 }
1551
1552 /* Tell the VF driver the reset is done. This needs to be done only
1553 * after VF has been fully initialized, because the VF driver may
1554 * request resources immediately after setting this flag.
1555 */
1556 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1557 }
1558
1559 /**
1560 * i40e_reset_vf
1561 * @vf: pointer to the VF structure
1562 * @flr: VFLR was issued or not
1563 *
1564 * Return: True if reset was performed successfully or if resets are disabled.
1565 * False if reset is already in progress.
1566 **/
i40e_reset_vf(struct i40e_vf * vf,bool flr)1567 bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1568 {
1569 struct i40e_pf *pf = vf->pf;
1570 struct i40e_hw *hw = &pf->hw;
1571 bool rsd = false;
1572 u32 reg;
1573 int i;
1574
1575 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
1576 return true;
1577
1578 /* Bail out if VFs are disabled. */
1579 if (test_bit(__I40E_VF_DISABLE, pf->state))
1580 return true;
1581
1582 /* If VF is being reset already we don't need to continue. */
1583 if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1584 return false;
1585
1586 i40e_trigger_vf_reset(vf, flr);
1587
1588 /* poll VPGEN_VFRSTAT reg to make sure
1589 * that reset is complete
1590 */
1591 for (i = 0; i < 10; i++) {
1592 /* VF reset requires driver to first reset the VF and then
1593 * poll the status register to make sure that the reset
1594 * completed successfully. Due to internal HW FIFO flushes,
1595 * we must wait 10ms before the register will be valid.
1596 */
1597 usleep_range(10000, 20000);
1598 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1599 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1600 rsd = true;
1601 break;
1602 }
1603 }
1604
1605 if (flr)
1606 usleep_range(10000, 20000);
1607
1608 if (!rsd)
1609 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1610 vf->vf_id);
1611 usleep_range(10000, 20000);
1612
1613 /* On initial reset, we don't have any queues to disable */
1614 if (vf->lan_vsi_idx != 0)
1615 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1616
1617 i40e_cleanup_reset_vf(vf);
1618
1619 i40e_flush(hw);
1620 usleep_range(20000, 40000);
1621 clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
1622
1623 return true;
1624 }
1625
1626 /**
1627 * i40e_reset_all_vfs
1628 * @pf: pointer to the PF structure
1629 * @flr: VFLR was issued or not
1630 *
1631 * Reset all allocated VFs in one go. First, tell the hardware to reset each
1632 * VF, then do all the waiting in one chunk, and finally finish restoring each
1633 * VF after the wait. This is useful during PF routines which need to reset
1634 * all VFs, as otherwise it must perform these resets in a serialized fashion.
1635 *
1636 * Returns true if any VFs were reset, and false otherwise.
1637 **/
i40e_reset_all_vfs(struct i40e_pf * pf,bool flr)1638 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1639 {
1640 struct i40e_hw *hw = &pf->hw;
1641 struct i40e_vf *vf;
1642 u32 reg;
1643 int i;
1644
1645 /* If we don't have any VFs, then there is nothing to reset */
1646 if (!pf->num_alloc_vfs)
1647 return false;
1648
1649 /* If VFs have been disabled, there is no need to reset */
1650 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1651 return false;
1652
1653 /* Begin reset on all VFs at once */
1654 for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
1655 /* If VF is being reset no need to trigger reset again */
1656 if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1657 i40e_trigger_vf_reset(vf, flr);
1658 }
1659
1660 /* HW requires some time to make sure it can flush the FIFO for a VF
1661 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1662 * sequence to make sure that it has completed. We'll keep track of
1663 * the VFs using a simple iterator that increments once that VF has
1664 * finished resetting.
1665 */
1666 for (i = 0, vf = &pf->vf[0]; i < 10 && vf < &pf->vf[pf->num_alloc_vfs]; ++i) {
1667 usleep_range(10000, 20000);
1668
1669 /* Check each VF in sequence, beginning with the VF to fail
1670 * the previous check.
1671 */
1672 while (vf < &pf->vf[pf->num_alloc_vfs]) {
1673 if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
1674 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1675 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1676 break;
1677 }
1678
1679 /* If the current VF has finished resetting, move on
1680 * to the next VF in sequence.
1681 */
1682 ++vf;
1683 }
1684 }
1685
1686 if (flr)
1687 usleep_range(10000, 20000);
1688
1689 /* Display a warning if at least one VF didn't manage to reset in
1690 * time, but continue on with the operation.
1691 */
1692 if (vf < &pf->vf[pf->num_alloc_vfs])
1693 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1694 vf->vf_id);
1695 usleep_range(10000, 20000);
1696
1697 /* Begin disabling all the rings associated with VFs, but do not wait
1698 * between each VF.
1699 */
1700 for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
1701 /* On initial reset, we don't have any queues to disable */
1702 if (vf->lan_vsi_idx == 0)
1703 continue;
1704
1705 /* If VF is reset in another thread just continue */
1706 if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1707 continue;
1708
1709 i40e_vsi_stop_rings_no_wait(pf->vsi[vf->lan_vsi_idx]);
1710 }
1711
1712 /* Now that we've notified HW to disable all of the VF rings, wait
1713 * until they finish.
1714 */
1715 for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
1716 /* On initial reset, we don't have any queues to disable */
1717 if (vf->lan_vsi_idx == 0)
1718 continue;
1719
1720 /* If VF is reset in another thread just continue */
1721 if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1722 continue;
1723
1724 i40e_vsi_wait_queues_disabled(pf->vsi[vf->lan_vsi_idx]);
1725 }
1726
1727 /* Hw may need up to 50ms to finish disabling the RX queues. We
1728 * minimize the wait by delaying only once for all VFs.
1729 */
1730 mdelay(50);
1731
1732 /* Finish the reset on each VF */
1733 for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
1734 /* If VF is reset in another thread just continue */
1735 if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1736 continue;
1737
1738 i40e_cleanup_reset_vf(vf);
1739 }
1740
1741 i40e_flush(hw);
1742 usleep_range(20000, 40000);
1743 clear_bit(__I40E_VF_DISABLE, pf->state);
1744
1745 return true;
1746 }
1747
1748 /**
1749 * i40e_free_vfs
1750 * @pf: pointer to the PF structure
1751 *
1752 * free VF resources
1753 **/
i40e_free_vfs(struct i40e_pf * pf)1754 void i40e_free_vfs(struct i40e_pf *pf)
1755 {
1756 struct i40e_hw *hw = &pf->hw;
1757 u32 reg_idx, bit_idx;
1758 int i, tmp, vf_id;
1759
1760 if (!pf->vf)
1761 return;
1762
1763 set_bit(__I40E_VFS_RELEASING, pf->state);
1764 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1765 usleep_range(1000, 2000);
1766
1767 i40e_notify_client_of_vf_enable(pf, 0);
1768
1769 /* Disable IOV before freeing resources. This lets any VF drivers
1770 * running in the host get themselves cleaned up before we yank
1771 * the carpet out from underneath their feet.
1772 */
1773 if (!pci_vfs_assigned(pf->pdev))
1774 pci_disable_sriov(pf->pdev);
1775 else
1776 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1777
1778 /* Amortize wait time by stopping all VFs at the same time */
1779 for (i = 0; i < pf->num_alloc_vfs; i++) {
1780 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1781 continue;
1782
1783 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1784 }
1785
1786 for (i = 0; i < pf->num_alloc_vfs; i++) {
1787 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1788 continue;
1789
1790 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1791 }
1792
1793 /* free up VF resources */
1794 tmp = pf->num_alloc_vfs;
1795 pf->num_alloc_vfs = 0;
1796 for (i = 0; i < tmp; i++) {
1797 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1798 i40e_free_vf_res(&pf->vf[i]);
1799 /* disable qp mappings */
1800 i40e_disable_vf_mappings(&pf->vf[i]);
1801 }
1802
1803 kfree(pf->vf);
1804 pf->vf = NULL;
1805
1806 /* This check is for when the driver is unloaded while VFs are
1807 * assigned. Setting the number of VFs to 0 through sysfs is caught
1808 * before this function ever gets called.
1809 */
1810 if (!pci_vfs_assigned(pf->pdev)) {
1811 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1812 * work correctly when SR-IOV gets re-enabled.
1813 */
1814 for (vf_id = 0; vf_id < tmp; vf_id++) {
1815 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1816 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1817 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1818 }
1819 }
1820 clear_bit(__I40E_VF_DISABLE, pf->state);
1821 clear_bit(__I40E_VFS_RELEASING, pf->state);
1822 }
1823
1824 #ifdef CONFIG_PCI_IOV
1825 /**
1826 * i40e_alloc_vfs
1827 * @pf: pointer to the PF structure
1828 * @num_alloc_vfs: number of VFs to allocate
1829 *
1830 * allocate VF resources
1831 **/
i40e_alloc_vfs(struct i40e_pf * pf,u16 num_alloc_vfs)1832 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1833 {
1834 struct i40e_vf *vfs;
1835 int i, ret = 0;
1836
1837 /* Disable interrupt 0 so we don't try to handle the VFLR. */
1838 i40e_irq_dynamic_disable_icr0(pf);
1839
1840 /* Check to see if we're just allocating resources for extant VFs */
1841 if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1842 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1843 if (ret) {
1844 clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
1845 pf->num_alloc_vfs = 0;
1846 goto err_iov;
1847 }
1848 }
1849 /* allocate memory */
1850 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1851 if (!vfs) {
1852 ret = -ENOMEM;
1853 goto err_alloc;
1854 }
1855 pf->vf = vfs;
1856
1857 /* apply default profile */
1858 for (i = 0; i < num_alloc_vfs; i++) {
1859 vfs[i].pf = pf;
1860 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1861 vfs[i].vf_id = i;
1862
1863 /* assign default capabilities */
1864 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1865 vfs[i].spoofchk = true;
1866
1867 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1868
1869 }
1870 pf->num_alloc_vfs = num_alloc_vfs;
1871
1872 /* VF resources get allocated during reset */
1873 i40e_reset_all_vfs(pf, false);
1874
1875 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1876
1877 err_alloc:
1878 if (ret)
1879 i40e_free_vfs(pf);
1880 err_iov:
1881 /* Re-enable interrupt 0. */
1882 i40e_irq_dynamic_enable_icr0(pf);
1883 return ret;
1884 }
1885
1886 #endif
1887 /**
1888 * i40e_pci_sriov_enable
1889 * @pdev: pointer to a pci_dev structure
1890 * @num_vfs: number of VFs to allocate
1891 *
1892 * Enable or change the number of VFs
1893 **/
i40e_pci_sriov_enable(struct pci_dev * pdev,int num_vfs)1894 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1895 {
1896 #ifdef CONFIG_PCI_IOV
1897 struct i40e_pf *pf = pci_get_drvdata(pdev);
1898 int pre_existing_vfs = pci_num_vf(pdev);
1899 int err = 0;
1900
1901 if (test_bit(__I40E_TESTING, pf->state)) {
1902 dev_warn(&pdev->dev,
1903 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1904 err = -EPERM;
1905 goto err_out;
1906 }
1907
1908 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1909 i40e_free_vfs(pf);
1910 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1911 goto out;
1912
1913 if (num_vfs > pf->num_req_vfs) {
1914 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1915 num_vfs, pf->num_req_vfs);
1916 err = -EPERM;
1917 goto err_out;
1918 }
1919
1920 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1921 err = i40e_alloc_vfs(pf, num_vfs);
1922 if (err) {
1923 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1924 goto err_out;
1925 }
1926
1927 out:
1928 return num_vfs;
1929
1930 err_out:
1931 return err;
1932 #endif
1933 return 0;
1934 }
1935
1936 /**
1937 * i40e_pci_sriov_configure
1938 * @pdev: pointer to a pci_dev structure
1939 * @num_vfs: number of VFs to allocate
1940 *
1941 * Enable or change the number of VFs. Called when the user updates the number
1942 * of VFs in sysfs.
1943 **/
i40e_pci_sriov_configure(struct pci_dev * pdev,int num_vfs)1944 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1945 {
1946 struct i40e_pf *pf = pci_get_drvdata(pdev);
1947 int ret = 0;
1948
1949 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
1950 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
1951 return -EAGAIN;
1952 }
1953
1954 if (num_vfs) {
1955 if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) {
1956 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
1957 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1958 }
1959 ret = i40e_pci_sriov_enable(pdev, num_vfs);
1960 goto sriov_configure_out;
1961 }
1962
1963 if (!pci_vfs_assigned(pf->pdev)) {
1964 i40e_free_vfs(pf);
1965 clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags);
1966 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1967 } else {
1968 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1969 ret = -EINVAL;
1970 goto sriov_configure_out;
1971 }
1972 sriov_configure_out:
1973 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
1974 return ret;
1975 }
1976
1977 /***********************virtual channel routines******************/
1978
1979 /**
1980 * i40e_vc_send_msg_to_vf
1981 * @vf: pointer to the VF info
1982 * @v_opcode: virtual channel opcode
1983 * @v_retval: virtual channel return value
1984 * @msg: pointer to the msg buffer
1985 * @msglen: msg length
1986 *
1987 * send msg to VF
1988 **/
i40e_vc_send_msg_to_vf(struct i40e_vf * vf,u32 v_opcode,u32 v_retval,u8 * msg,u16 msglen)1989 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1990 u32 v_retval, u8 *msg, u16 msglen)
1991 {
1992 struct i40e_pf *pf;
1993 struct i40e_hw *hw;
1994 int abs_vf_id;
1995 int aq_ret;
1996
1997 /* validate the request */
1998 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1999 return -EINVAL;
2000
2001 pf = vf->pf;
2002 hw = &pf->hw;
2003 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
2004
2005 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
2006 msg, msglen, NULL);
2007 if (aq_ret) {
2008 dev_info(&pf->pdev->dev,
2009 "Unable to send the message to VF %d aq_err %d\n",
2010 vf->vf_id, pf->hw.aq.asq_last_status);
2011 return -EIO;
2012 }
2013
2014 return 0;
2015 }
2016
2017 /**
2018 * i40e_vc_send_resp_to_vf
2019 * @vf: pointer to the VF info
2020 * @opcode: operation code
2021 * @retval: return value
2022 *
2023 * send resp msg to VF
2024 **/
i40e_vc_send_resp_to_vf(struct i40e_vf * vf,enum virtchnl_ops opcode,int retval)2025 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
2026 enum virtchnl_ops opcode,
2027 int retval)
2028 {
2029 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
2030 }
2031
2032 /**
2033 * i40e_sync_vf_state
2034 * @vf: pointer to the VF info
2035 * @state: VF state
2036 *
2037 * Called from a VF message to synchronize the service with a potential
2038 * VF reset state
2039 **/
i40e_sync_vf_state(struct i40e_vf * vf,enum i40e_vf_states state)2040 static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
2041 {
2042 int i;
2043
2044 /* When handling some messages, it needs VF state to be set.
2045 * It is possible that this flag is cleared during VF reset,
2046 * so there is a need to wait until the end of the reset to
2047 * handle the request message correctly.
2048 */
2049 for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
2050 if (test_bit(state, &vf->vf_states))
2051 return true;
2052 usleep_range(10000, 20000);
2053 }
2054
2055 return test_bit(state, &vf->vf_states);
2056 }
2057
2058 /**
2059 * i40e_vc_get_version_msg
2060 * @vf: pointer to the VF info
2061 * @msg: pointer to the msg buffer
2062 *
2063 * called from the VF to request the API version used by the PF
2064 **/
i40e_vc_get_version_msg(struct i40e_vf * vf,u8 * msg)2065 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
2066 {
2067 struct virtchnl_version_info info = {
2068 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
2069 };
2070
2071 vf->vf_ver = *(struct virtchnl_version_info *)msg;
2072 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
2073 if (VF_IS_V10(&vf->vf_ver))
2074 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
2075 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
2076 0, (u8 *)&info,
2077 sizeof(struct virtchnl_version_info));
2078 }
2079
2080 /**
2081 * i40e_del_qch - delete all the additional VSIs created as a part of ADq
2082 * @vf: pointer to VF structure
2083 **/
i40e_del_qch(struct i40e_vf * vf)2084 static void i40e_del_qch(struct i40e_vf *vf)
2085 {
2086 struct i40e_pf *pf = vf->pf;
2087 int i;
2088
2089 /* first element in the array belongs to primary VF VSI and we shouldn't
2090 * delete it. We should however delete the rest of the VSIs created
2091 */
2092 for (i = 1; i < vf->num_tc; i++) {
2093 if (vf->ch[i].vsi_idx) {
2094 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
2095 vf->ch[i].vsi_idx = 0;
2096 vf->ch[i].vsi_id = 0;
2097 }
2098 }
2099 }
2100
2101 /**
2102 * i40e_vc_get_max_frame_size
2103 * @vf: pointer to the VF
2104 *
2105 * Max frame size is determined based on the current port's max frame size and
2106 * whether a port VLAN is configured on this VF. The VF is not aware whether
2107 * it's in a port VLAN so the PF needs to account for this in max frame size
2108 * checks and sending the max frame size to the VF.
2109 **/
i40e_vc_get_max_frame_size(struct i40e_vf * vf)2110 static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
2111 {
2112 u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
2113
2114 if (vf->port_vlan_id)
2115 max_frame_size -= VLAN_HLEN;
2116
2117 return max_frame_size;
2118 }
2119
2120 /**
2121 * i40e_vc_get_vf_resources_msg
2122 * @vf: pointer to the VF info
2123 * @msg: pointer to the msg buffer
2124 *
2125 * called from the VF to request its resources
2126 **/
i40e_vc_get_vf_resources_msg(struct i40e_vf * vf,u8 * msg)2127 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
2128 {
2129 struct virtchnl_vf_resource *vfres = NULL;
2130 struct i40e_pf *pf = vf->pf;
2131 struct i40e_vsi *vsi;
2132 int num_vsis = 1;
2133 int aq_ret = 0;
2134 size_t len = 0;
2135 int ret;
2136
2137 i40e_sync_vf_state(vf, I40E_VF_STATE_INIT);
2138
2139 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) ||
2140 test_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states)) {
2141 aq_ret = -EINVAL;
2142 goto err;
2143 }
2144
2145 len = virtchnl_struct_size(vfres, vsi_res, num_vsis);
2146 vfres = kzalloc(len, GFP_KERNEL);
2147 if (!vfres) {
2148 aq_ret = -ENOMEM;
2149 len = 0;
2150 goto err;
2151 }
2152 if (VF_IS_V11(&vf->vf_ver))
2153 vf->driver_caps = *(u32 *)msg;
2154 else
2155 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
2156 VIRTCHNL_VF_OFFLOAD_RSS_REG |
2157 VIRTCHNL_VF_OFFLOAD_VLAN;
2158
2159 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
2160 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
2161 vsi = pf->vsi[vf->lan_vsi_idx];
2162 if (!vsi->info.pvid)
2163 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
2164
2165 if (i40e_vf_client_capable(pf, vf->vf_id) &&
2166 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RDMA)) {
2167 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RDMA;
2168 set_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
2169 } else {
2170 clear_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
2171 }
2172
2173 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2174 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
2175 } else {
2176 if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) &&
2177 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
2178 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
2179 else
2180 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
2181 }
2182
2183 if (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, pf->hw.caps)) {
2184 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
2185 vfres->vf_cap_flags |=
2186 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
2187 }
2188
2189 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
2190 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
2191
2192 if (test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps) &&
2193 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
2194 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
2195
2196 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
2197 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
2198 dev_err(&pf->pdev->dev,
2199 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
2200 vf->vf_id);
2201 aq_ret = -EINVAL;
2202 goto err;
2203 }
2204 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
2205 }
2206
2207 if (test_bit(I40E_HW_CAP_WB_ON_ITR, pf->hw.caps)) {
2208 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2209 vfres->vf_cap_flags |=
2210 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2211 }
2212
2213 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2214 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2215
2216 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
2217 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
2218
2219 vfres->num_vsis = num_vsis;
2220 vfres->num_queue_pairs = vf->num_queue_pairs;
2221 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
2222 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
2223 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
2224 vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
2225
2226 if (vf->lan_vsi_idx) {
2227 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
2228 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2229 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
2230 /* VFs only use TC 0 */
2231 vfres->vsi_res[0].qset_handle
2232 = le16_to_cpu(vsi->info.qs_handle[0]);
2233 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) {
2234 spin_lock_bh(&vsi->mac_filter_hash_lock);
2235 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
2236 eth_zero_addr(vf->default_lan_addr.addr);
2237 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2238 }
2239 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
2240 vf->default_lan_addr.addr);
2241 }
2242 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
2243 set_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states);
2244
2245 err:
2246 /* send the response back to the VF */
2247 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
2248 aq_ret, (u8 *)vfres, len);
2249
2250 kfree(vfres);
2251 return ret;
2252 }
2253
2254 /**
2255 * i40e_vc_config_promiscuous_mode_msg
2256 * @vf: pointer to the VF info
2257 * @msg: pointer to the msg buffer
2258 *
2259 * called from the VF to configure the promiscuous mode of
2260 * VF vsis
2261 **/
i40e_vc_config_promiscuous_mode_msg(struct i40e_vf * vf,u8 * msg)2262 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
2263 {
2264 struct virtchnl_promisc_info *info =
2265 (struct virtchnl_promisc_info *)msg;
2266 struct i40e_pf *pf = vf->pf;
2267 bool allmulti = false;
2268 bool alluni = false;
2269 int aq_ret = 0;
2270
2271 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2272 aq_ret = -EINVAL;
2273 goto err_out;
2274 }
2275 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2276 dev_err(&pf->pdev->dev,
2277 "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2278 vf->vf_id);
2279
2280 /* Lie to the VF on purpose, because this is an error we can
2281 * ignore. Unprivileged VF is not a virtual channel error.
2282 */
2283 aq_ret = 0;
2284 goto err_out;
2285 }
2286
2287 if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
2288 aq_ret = -EINVAL;
2289 goto err_out;
2290 }
2291
2292 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2293 aq_ret = -EINVAL;
2294 goto err_out;
2295 }
2296
2297 /* Multicast promiscuous handling*/
2298 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2299 allmulti = true;
2300
2301 if (info->flags & FLAG_VF_UNICAST_PROMISC)
2302 alluni = true;
2303 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
2304 alluni);
2305 if (aq_ret)
2306 goto err_out;
2307
2308 if (allmulti) {
2309 if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
2310 &vf->vf_states))
2311 dev_info(&pf->pdev->dev,
2312 "VF %d successfully set multicast promiscuous mode\n",
2313 vf->vf_id);
2314 } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
2315 &vf->vf_states))
2316 dev_info(&pf->pdev->dev,
2317 "VF %d successfully unset multicast promiscuous mode\n",
2318 vf->vf_id);
2319
2320 if (alluni) {
2321 if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
2322 &vf->vf_states))
2323 dev_info(&pf->pdev->dev,
2324 "VF %d successfully set unicast promiscuous mode\n",
2325 vf->vf_id);
2326 } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
2327 &vf->vf_states))
2328 dev_info(&pf->pdev->dev,
2329 "VF %d successfully unset unicast promiscuous mode\n",
2330 vf->vf_id);
2331
2332 err_out:
2333 /* send the response to the VF */
2334 return i40e_vc_send_resp_to_vf(vf,
2335 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2336 aq_ret);
2337 }
2338
2339 /**
2340 * i40e_vc_config_queues_msg
2341 * @vf: pointer to the VF info
2342 * @msg: pointer to the msg buffer
2343 *
2344 * called from the VF to configure the rx/tx
2345 * queues
2346 **/
i40e_vc_config_queues_msg(struct i40e_vf * vf,u8 * msg)2347 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2348 {
2349 struct virtchnl_vsi_queue_config_info *qci =
2350 (struct virtchnl_vsi_queue_config_info *)msg;
2351 struct virtchnl_queue_pair_info *qpi;
2352 u16 vsi_id, vsi_queue_id = 0;
2353 struct i40e_pf *pf = vf->pf;
2354 int i, j = 0, idx = 0;
2355 struct i40e_vsi *vsi;
2356 u16 num_qps_all = 0;
2357 int aq_ret = 0;
2358
2359 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2360 aq_ret = -EINVAL;
2361 goto error_param;
2362 }
2363
2364 if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2365 aq_ret = -EINVAL;
2366 goto error_param;
2367 }
2368
2369 if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
2370 aq_ret = -EINVAL;
2371 goto error_param;
2372 }
2373
2374 if (vf->adq_enabled) {
2375 for (i = 0; i < vf->num_tc; i++)
2376 num_qps_all += vf->ch[i].num_qps;
2377 if (num_qps_all != qci->num_queue_pairs) {
2378 aq_ret = -EINVAL;
2379 goto error_param;
2380 }
2381 }
2382
2383 vsi_id = qci->vsi_id;
2384
2385 for (i = 0; i < qci->num_queue_pairs; i++) {
2386 qpi = &qci->qpair[i];
2387
2388 if (!vf->adq_enabled) {
2389 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
2390 qpi->txq.queue_id)) {
2391 aq_ret = -EINVAL;
2392 goto error_param;
2393 }
2394
2395 vsi_queue_id = qpi->txq.queue_id;
2396
2397 if (qpi->txq.vsi_id != qci->vsi_id ||
2398 qpi->rxq.vsi_id != qci->vsi_id ||
2399 qpi->rxq.queue_id != vsi_queue_id) {
2400 aq_ret = -EINVAL;
2401 goto error_param;
2402 }
2403 }
2404
2405 if (vf->adq_enabled) {
2406 if (idx >= vf->num_tc) {
2407 aq_ret = -ENODEV;
2408 goto error_param;
2409 }
2410 vsi_id = vf->ch[idx].vsi_id;
2411 }
2412
2413 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2414 &qpi->rxq) ||
2415 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2416 &qpi->txq)) {
2417 aq_ret = -EINVAL;
2418 goto error_param;
2419 }
2420
2421 /* For ADq there can be up to 4 VSIs with max 4 queues each.
2422 * VF does not know about these additional VSIs and all
2423 * it cares is about its own queues. PF configures these queues
2424 * to its appropriate VSIs based on TC mapping
2425 */
2426 if (vf->adq_enabled) {
2427 if (idx >= vf->num_tc) {
2428 aq_ret = -ENODEV;
2429 goto error_param;
2430 }
2431 if (j == (vf->ch[idx].num_qps - 1)) {
2432 idx++;
2433 j = 0; /* resetting the queue count */
2434 vsi_queue_id = 0;
2435 } else {
2436 j++;
2437 vsi_queue_id++;
2438 }
2439 }
2440 }
2441 /* set vsi num_queue_pairs in use to num configured by VF */
2442 if (!vf->adq_enabled) {
2443 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2444 qci->num_queue_pairs;
2445 } else {
2446 for (i = 0; i < vf->num_tc; i++) {
2447 vsi = pf->vsi[vf->ch[i].vsi_idx];
2448 vsi->num_queue_pairs = vf->ch[i].num_qps;
2449
2450 if (i40e_update_adq_vsi_queues(vsi, i)) {
2451 aq_ret = -EIO;
2452 goto error_param;
2453 }
2454 }
2455 }
2456
2457 error_param:
2458 /* send the response to the VF */
2459 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2460 aq_ret);
2461 }
2462
2463 /**
2464 * i40e_validate_queue_map - check queue map is valid
2465 * @vf: the VF structure pointer
2466 * @vsi_id: vsi id
2467 * @queuemap: Tx or Rx queue map
2468 *
2469 * check if Tx or Rx queue map is valid
2470 **/
i40e_validate_queue_map(struct i40e_vf * vf,u16 vsi_id,unsigned long queuemap)2471 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2472 unsigned long queuemap)
2473 {
2474 u16 vsi_queue_id, queue_id;
2475
2476 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2477 u16 idx = vsi_queue_id / I40E_MAX_VF_VSI;
2478
2479 if (vf->adq_enabled && idx < vf->num_tc) {
2480 vsi_id = vf->ch[idx].vsi_id;
2481 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2482 } else {
2483 queue_id = vsi_queue_id;
2484 }
2485
2486 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2487 return -EINVAL;
2488 }
2489
2490 return 0;
2491 }
2492
2493 /**
2494 * i40e_vc_config_irq_map_msg
2495 * @vf: pointer to the VF info
2496 * @msg: pointer to the msg buffer
2497 *
2498 * called from the VF to configure the irq to
2499 * queue map
2500 **/
i40e_vc_config_irq_map_msg(struct i40e_vf * vf,u8 * msg)2501 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
2502 {
2503 struct virtchnl_irq_map_info *irqmap_info =
2504 (struct virtchnl_irq_map_info *)msg;
2505 struct virtchnl_vector_map *map;
2506 int aq_ret = 0;
2507 u16 vsi_id;
2508 int i;
2509
2510 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2511 aq_ret = -EINVAL;
2512 goto error_param;
2513 }
2514
2515 if (irqmap_info->num_vectors >
2516 vf->pf->hw.func_caps.num_msix_vectors_vf) {
2517 aq_ret = -EINVAL;
2518 goto error_param;
2519 }
2520
2521 for (i = 0; i < irqmap_info->num_vectors; i++) {
2522 map = &irqmap_info->vecmap[i];
2523 /* validate msg params */
2524 if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
2525 !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
2526 aq_ret = -EINVAL;
2527 goto error_param;
2528 }
2529 vsi_id = map->vsi_id;
2530
2531 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2532 aq_ret = -EINVAL;
2533 goto error_param;
2534 }
2535
2536 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2537 aq_ret = -EINVAL;
2538 goto error_param;
2539 }
2540
2541 i40e_config_irq_link_list(vf, vsi_id, map);
2542 }
2543 error_param:
2544 /* send the response to the VF */
2545 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2546 aq_ret);
2547 }
2548
2549 /**
2550 * i40e_ctrl_vf_tx_rings
2551 * @vsi: the SRIOV VSI being configured
2552 * @q_map: bit map of the queues to be enabled
2553 * @enable: start or stop the queue
2554 **/
i40e_ctrl_vf_tx_rings(struct i40e_vsi * vsi,unsigned long q_map,bool enable)2555 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2556 bool enable)
2557 {
2558 struct i40e_pf *pf = vsi->back;
2559 int ret = 0;
2560 u16 q_id;
2561
2562 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2563 ret = i40e_control_wait_tx_q(vsi->seid, pf,
2564 vsi->base_queue + q_id,
2565 false /*is xdp*/, enable);
2566 if (ret)
2567 break;
2568 }
2569 return ret;
2570 }
2571
2572 /**
2573 * i40e_ctrl_vf_rx_rings
2574 * @vsi: the SRIOV VSI being configured
2575 * @q_map: bit map of the queues to be enabled
2576 * @enable: start or stop the queue
2577 **/
i40e_ctrl_vf_rx_rings(struct i40e_vsi * vsi,unsigned long q_map,bool enable)2578 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2579 bool enable)
2580 {
2581 struct i40e_pf *pf = vsi->back;
2582 int ret = 0;
2583 u16 q_id;
2584
2585 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2586 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2587 enable);
2588 if (ret)
2589 break;
2590 }
2591 return ret;
2592 }
2593
2594 /**
2595 * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
2596 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2597 *
2598 * Returns true if validation was successful, else false.
2599 */
i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select * vqs)2600 static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2601 {
2602 if ((!vqs->rx_queues && !vqs->tx_queues) ||
2603 vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
2604 vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
2605 return false;
2606
2607 return true;
2608 }
2609
2610 /**
2611 * i40e_vc_enable_queues_msg
2612 * @vf: pointer to the VF info
2613 * @msg: pointer to the msg buffer
2614 *
2615 * called from the VF to enable all or specific queue(s)
2616 **/
i40e_vc_enable_queues_msg(struct i40e_vf * vf,u8 * msg)2617 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
2618 {
2619 struct virtchnl_queue_select *vqs =
2620 (struct virtchnl_queue_select *)msg;
2621 struct i40e_pf *pf = vf->pf;
2622 int aq_ret = 0;
2623 int i;
2624
2625 if (vf->is_disabled_from_host) {
2626 aq_ret = -EPERM;
2627 dev_info(&pf->pdev->dev,
2628 "Admin has disabled VF %d, will not enable queues\n",
2629 vf->vf_id);
2630 goto error_param;
2631 }
2632
2633 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2634 aq_ret = -EINVAL;
2635 goto error_param;
2636 }
2637
2638 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2639 aq_ret = -EINVAL;
2640 goto error_param;
2641 }
2642
2643 if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2644 aq_ret = -EINVAL;
2645 goto error_param;
2646 }
2647
2648 /* Use the queue bit map sent by the VF */
2649 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2650 true)) {
2651 aq_ret = -EIO;
2652 goto error_param;
2653 }
2654 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2655 true)) {
2656 aq_ret = -EIO;
2657 goto error_param;
2658 }
2659
2660 /* need to start the rings for additional ADq VSI's as well */
2661 if (vf->adq_enabled) {
2662 /* zero belongs to LAN VSI */
2663 for (i = 1; i < vf->num_tc; i++) {
2664 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2665 aq_ret = -EIO;
2666 }
2667 }
2668
2669 error_param:
2670 /* send the response to the VF */
2671 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2672 aq_ret);
2673 }
2674
2675 /**
2676 * i40e_vc_disable_queues_msg
2677 * @vf: pointer to the VF info
2678 * @msg: pointer to the msg buffer
2679 *
2680 * called from the VF to disable all or specific
2681 * queue(s)
2682 **/
i40e_vc_disable_queues_msg(struct i40e_vf * vf,u8 * msg)2683 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
2684 {
2685 struct virtchnl_queue_select *vqs =
2686 (struct virtchnl_queue_select *)msg;
2687 struct i40e_pf *pf = vf->pf;
2688 int aq_ret = 0;
2689
2690 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2691 aq_ret = -EINVAL;
2692 goto error_param;
2693 }
2694
2695 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2696 aq_ret = -EINVAL;
2697 goto error_param;
2698 }
2699
2700 if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2701 aq_ret = -EINVAL;
2702 goto error_param;
2703 }
2704
2705 /* Use the queue bit map sent by the VF */
2706 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2707 false)) {
2708 aq_ret = -EIO;
2709 goto error_param;
2710 }
2711 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2712 false)) {
2713 aq_ret = -EIO;
2714 goto error_param;
2715 }
2716 error_param:
2717 /* send the response to the VF */
2718 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2719 aq_ret);
2720 }
2721
2722 /**
2723 * i40e_check_enough_queue - find big enough queue number
2724 * @vf: pointer to the VF info
2725 * @needed: the number of items needed
2726 *
2727 * Returns the base item index of the queue, or negative for error
2728 **/
i40e_check_enough_queue(struct i40e_vf * vf,u16 needed)2729 static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
2730 {
2731 unsigned int i, cur_queues, more, pool_size;
2732 struct i40e_lump_tracking *pile;
2733 struct i40e_pf *pf = vf->pf;
2734 struct i40e_vsi *vsi;
2735
2736 vsi = pf->vsi[vf->lan_vsi_idx];
2737 cur_queues = vsi->alloc_queue_pairs;
2738
2739 /* if current allocated queues are enough for need */
2740 if (cur_queues >= needed)
2741 return vsi->base_queue;
2742
2743 pile = pf->qp_pile;
2744 if (cur_queues > 0) {
2745 /* if the allocated queues are not zero
2746 * just check if there are enough queues for more
2747 * behind the allocated queues.
2748 */
2749 more = needed - cur_queues;
2750 for (i = vsi->base_queue + cur_queues;
2751 i < pile->num_entries; i++) {
2752 if (pile->list[i] & I40E_PILE_VALID_BIT)
2753 break;
2754
2755 if (more-- == 1)
2756 /* there is enough */
2757 return vsi->base_queue;
2758 }
2759 }
2760
2761 pool_size = 0;
2762 for (i = 0; i < pile->num_entries; i++) {
2763 if (pile->list[i] & I40E_PILE_VALID_BIT) {
2764 pool_size = 0;
2765 continue;
2766 }
2767 if (needed <= ++pool_size)
2768 /* there is enough */
2769 return i;
2770 }
2771
2772 return -ENOMEM;
2773 }
2774
2775 /**
2776 * i40e_vc_request_queues_msg
2777 * @vf: pointer to the VF info
2778 * @msg: pointer to the msg buffer
2779 *
2780 * VFs get a default number of queues but can use this message to request a
2781 * different number. If the request is successful, PF will reset the VF and
2782 * return 0. If unsuccessful, PF will send message informing VF of number of
2783 * available queues and return result of sending VF a message.
2784 **/
i40e_vc_request_queues_msg(struct i40e_vf * vf,u8 * msg)2785 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
2786 {
2787 struct virtchnl_vf_res_request *vfres =
2788 (struct virtchnl_vf_res_request *)msg;
2789 u16 req_pairs = vfres->num_queue_pairs;
2790 u8 cur_pairs = vf->num_queue_pairs;
2791 struct i40e_pf *pf = vf->pf;
2792
2793 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
2794 return -EINVAL;
2795
2796 if (req_pairs > I40E_MAX_VF_QUEUES) {
2797 dev_err(&pf->pdev->dev,
2798 "VF %d tried to request more than %d queues.\n",
2799 vf->vf_id,
2800 I40E_MAX_VF_QUEUES);
2801 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2802 } else if (req_pairs - cur_pairs > pf->queues_left) {
2803 dev_warn(&pf->pdev->dev,
2804 "VF %d requested %d more queues, but only %d left.\n",
2805 vf->vf_id,
2806 req_pairs - cur_pairs,
2807 pf->queues_left);
2808 vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2809 } else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
2810 dev_warn(&pf->pdev->dev,
2811 "VF %d requested %d more queues, but there is not enough for it.\n",
2812 vf->vf_id,
2813 req_pairs - cur_pairs);
2814 vfres->num_queue_pairs = cur_pairs;
2815 } else {
2816 /* successful request */
2817 vf->num_req_queues = req_pairs;
2818 i40e_vc_reset_vf(vf, true);
2819 return 0;
2820 }
2821
2822 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2823 (u8 *)vfres, sizeof(*vfres));
2824 }
2825
2826 /**
2827 * i40e_vc_get_stats_msg
2828 * @vf: pointer to the VF info
2829 * @msg: pointer to the msg buffer
2830 *
2831 * called from the VF to get vsi stats
2832 **/
i40e_vc_get_stats_msg(struct i40e_vf * vf,u8 * msg)2833 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
2834 {
2835 struct virtchnl_queue_select *vqs =
2836 (struct virtchnl_queue_select *)msg;
2837 struct i40e_pf *pf = vf->pf;
2838 struct i40e_eth_stats stats;
2839 int aq_ret = 0;
2840 struct i40e_vsi *vsi;
2841
2842 memset(&stats, 0, sizeof(struct i40e_eth_stats));
2843
2844 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2845 aq_ret = -EINVAL;
2846 goto error_param;
2847 }
2848
2849 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2850 aq_ret = -EINVAL;
2851 goto error_param;
2852 }
2853
2854 vsi = pf->vsi[vf->lan_vsi_idx];
2855 if (!vsi) {
2856 aq_ret = -EINVAL;
2857 goto error_param;
2858 }
2859 i40e_update_eth_stats(vsi);
2860 stats = vsi->eth_stats;
2861
2862 error_param:
2863 /* send the response back to the VF */
2864 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2865 (u8 *)&stats, sizeof(stats));
2866 }
2867
2868 #define I40E_MAX_MACVLAN_PER_HW 3072
2869 #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
2870 (num_ports))
2871 /* If the VF is not trusted restrict the number of MAC/VLAN it can program
2872 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
2873 */
2874 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2875 #define I40E_VC_MAX_VLAN_PER_VF 16
2876
2877 #define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports) \
2878 ({ typeof(vf_num) vf_num_ = (vf_num); \
2879 typeof(num_ports) num_ports_ = (num_ports); \
2880 ((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ * \
2881 I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) + \
2882 I40E_VC_MAX_MAC_ADDR_PER_VF; })
2883 /**
2884 * i40e_check_vf_permission
2885 * @vf: pointer to the VF info
2886 * @al: MAC address list from virtchnl
2887 *
2888 * Check that the given list of MAC addresses is allowed. Will return -EPERM
2889 * if any address in the list is not valid. Checks the following conditions:
2890 *
2891 * 1) broadcast and zero addresses are never valid
2892 * 2) unicast addresses are not allowed if the VMM has administratively set
2893 * the VF MAC address, unless the VF is marked as privileged.
2894 * 3) There is enough space to add all the addresses.
2895 *
2896 * Note that to guarantee consistency, it is expected this function be called
2897 * while holding the mac_filter_hash_lock, as otherwise the current number of
2898 * addresses might not be accurate.
2899 **/
i40e_check_vf_permission(struct i40e_vf * vf,struct virtchnl_ether_addr_list * al)2900 static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2901 struct virtchnl_ether_addr_list *al)
2902 {
2903 struct i40e_pf *pf = vf->pf;
2904 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
2905 struct i40e_hw *hw = &pf->hw;
2906 int i, mac_add_max, mac_add_cnt = 0;
2907 bool vf_trusted;
2908
2909 vf_trusted = test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
2910
2911 for (i = 0; i < al->num_elements; i++) {
2912 struct i40e_mac_filter *f;
2913 u8 *addr = al->list[i].addr;
2914
2915 if (is_broadcast_ether_addr(addr) ||
2916 is_zero_ether_addr(addr)) {
2917 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2918 addr);
2919 return -EINVAL;
2920 }
2921
2922 /* If the host VMM administrator has set the VF MAC address
2923 * administratively via the ndo_set_vf_mac command then deny
2924 * permission to the VF to add or delete unicast MAC addresses.
2925 * Unless the VF is privileged and then it can do whatever.
2926 * The VF may request to set the MAC address filter already
2927 * assigned to it so do not return an error in that case.
2928 */
2929 if (!vf_trusted && !is_multicast_ether_addr(addr) &&
2930 vf->pf_set_mac && !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2931 dev_err(&pf->pdev->dev,
2932 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2933 return -EPERM;
2934 }
2935
2936 /*count filters that really will be added*/
2937 f = i40e_find_mac(vsi, addr);
2938 if (!f)
2939 ++mac_add_cnt;
2940 }
2941
2942 /* If this VF is not privileged, then we can't add more than a limited
2943 * number of addresses.
2944 *
2945 * If this VF is trusted, it can use more resources than untrusted.
2946 * However to ensure that every trusted VF has appropriate number of
2947 * resources, divide whole pool of resources per port and then across
2948 * all VFs.
2949 */
2950 if (!vf_trusted)
2951 mac_add_max = I40E_VC_MAX_MAC_ADDR_PER_VF;
2952 else
2953 mac_add_max = I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, hw->num_ports);
2954
2955 /* VF can replace all its filters in one step, in this case mac_add_max
2956 * will be added as active and another mac_add_max will be in
2957 * a to-be-removed state. Account for that.
2958 */
2959 if ((i40e_count_active_filters(vsi) + mac_add_cnt) > mac_add_max ||
2960 (i40e_count_all_filters(vsi) + mac_add_cnt) > 2 * mac_add_max) {
2961 if (!vf_trusted) {
2962 dev_err(&pf->pdev->dev,
2963 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2964 return -EPERM;
2965 } else {
2966 dev_err(&pf->pdev->dev,
2967 "Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
2968 return -EPERM;
2969 }
2970 }
2971 return 0;
2972 }
2973
2974 /**
2975 * i40e_vc_ether_addr_type - get type of virtchnl_ether_addr
2976 * @vc_ether_addr: used to extract the type
2977 **/
2978 static u8
i40e_vc_ether_addr_type(struct virtchnl_ether_addr * vc_ether_addr)2979 i40e_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
2980 {
2981 return vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK;
2982 }
2983
2984 /**
2985 * i40e_is_vc_addr_legacy
2986 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
2987 *
2988 * check if the MAC address is from an older VF
2989 **/
2990 static bool
i40e_is_vc_addr_legacy(struct virtchnl_ether_addr * vc_ether_addr)2991 i40e_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
2992 {
2993 return i40e_vc_ether_addr_type(vc_ether_addr) ==
2994 VIRTCHNL_ETHER_ADDR_LEGACY;
2995 }
2996
2997 /**
2998 * i40e_is_vc_addr_primary
2999 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
3000 *
3001 * check if the MAC address is the VF's primary MAC
3002 * This function should only be called when the MAC address in
3003 * virtchnl_ether_addr is a valid unicast MAC
3004 **/
3005 static bool
i40e_is_vc_addr_primary(struct virtchnl_ether_addr * vc_ether_addr)3006 i40e_is_vc_addr_primary(struct virtchnl_ether_addr *vc_ether_addr)
3007 {
3008 return i40e_vc_ether_addr_type(vc_ether_addr) ==
3009 VIRTCHNL_ETHER_ADDR_PRIMARY;
3010 }
3011
3012 /**
3013 * i40e_update_vf_mac_addr
3014 * @vf: VF to update
3015 * @vc_ether_addr: structure from VIRTCHNL with MAC to add
3016 *
3017 * update the VF's cached hardware MAC if allowed
3018 **/
3019 static void
i40e_update_vf_mac_addr(struct i40e_vf * vf,struct virtchnl_ether_addr * vc_ether_addr)3020 i40e_update_vf_mac_addr(struct i40e_vf *vf,
3021 struct virtchnl_ether_addr *vc_ether_addr)
3022 {
3023 u8 *mac_addr = vc_ether_addr->addr;
3024
3025 if (!is_valid_ether_addr(mac_addr))
3026 return;
3027
3028 /* If request to add MAC filter is a primary request update its default
3029 * MAC address with the requested one. If it is a legacy request then
3030 * check if current default is empty if so update the default MAC
3031 */
3032 if (i40e_is_vc_addr_primary(vc_ether_addr)) {
3033 ether_addr_copy(vf->default_lan_addr.addr, mac_addr);
3034 } else if (i40e_is_vc_addr_legacy(vc_ether_addr)) {
3035 if (is_zero_ether_addr(vf->default_lan_addr.addr))
3036 ether_addr_copy(vf->default_lan_addr.addr, mac_addr);
3037 }
3038 }
3039
3040 /**
3041 * i40e_vc_add_mac_addr_msg
3042 * @vf: pointer to the VF info
3043 * @msg: pointer to the msg buffer
3044 *
3045 * add guest mac address filter
3046 **/
i40e_vc_add_mac_addr_msg(struct i40e_vf * vf,u8 * msg)3047 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
3048 {
3049 struct virtchnl_ether_addr_list *al =
3050 (struct virtchnl_ether_addr_list *)msg;
3051 struct i40e_pf *pf = vf->pf;
3052 struct i40e_vsi *vsi = NULL;
3053 int ret = 0;
3054 int i;
3055
3056 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3057 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3058 ret = -EINVAL;
3059 goto error_param;
3060 }
3061
3062 vsi = pf->vsi[vf->lan_vsi_idx];
3063
3064 /* Lock once, because all function inside for loop accesses VSI's
3065 * MAC filter list which needs to be protected using same lock.
3066 */
3067 spin_lock_bh(&vsi->mac_filter_hash_lock);
3068
3069 ret = i40e_check_vf_permission(vf, al);
3070 if (ret) {
3071 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3072 goto error_param;
3073 }
3074
3075 /* add new addresses to the list */
3076 for (i = 0; i < al->num_elements; i++) {
3077 struct i40e_mac_filter *f;
3078
3079 f = i40e_find_mac(vsi, al->list[i].addr);
3080 if (!f) {
3081 f = i40e_add_mac_filter(vsi, al->list[i].addr);
3082
3083 if (!f) {
3084 dev_err(&pf->pdev->dev,
3085 "Unable to add MAC filter %pM for VF %d\n",
3086 al->list[i].addr, vf->vf_id);
3087 ret = -EINVAL;
3088 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3089 goto error_param;
3090 }
3091 }
3092 i40e_update_vf_mac_addr(vf, &al->list[i]);
3093 }
3094 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3095
3096 /* program the updated filter list */
3097 ret = i40e_sync_vsi_filters(vsi);
3098 if (ret)
3099 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
3100 vf->vf_id, ret);
3101
3102 error_param:
3103 /* send the response to the VF */
3104 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
3105 ret, NULL, 0);
3106 }
3107
3108 /**
3109 * i40e_vc_del_mac_addr_msg
3110 * @vf: pointer to the VF info
3111 * @msg: pointer to the msg buffer
3112 *
3113 * remove guest mac address filter
3114 **/
i40e_vc_del_mac_addr_msg(struct i40e_vf * vf,u8 * msg)3115 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
3116 {
3117 struct virtchnl_ether_addr_list *al =
3118 (struct virtchnl_ether_addr_list *)msg;
3119 bool was_unimac_deleted = false;
3120 struct i40e_pf *pf = vf->pf;
3121 struct i40e_vsi *vsi = NULL;
3122 int ret = 0;
3123 int i;
3124
3125 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3126 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3127 ret = -EINVAL;
3128 goto error_param;
3129 }
3130
3131 for (i = 0; i < al->num_elements; i++) {
3132 if (is_broadcast_ether_addr(al->list[i].addr) ||
3133 is_zero_ether_addr(al->list[i].addr)) {
3134 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
3135 al->list[i].addr, vf->vf_id);
3136 ret = -EINVAL;
3137 goto error_param;
3138 }
3139 }
3140 vsi = pf->vsi[vf->lan_vsi_idx];
3141
3142 spin_lock_bh(&vsi->mac_filter_hash_lock);
3143 /* delete addresses from the list */
3144 for (i = 0; i < al->num_elements; i++) {
3145 const u8 *addr = al->list[i].addr;
3146
3147 /* Allow to delete VF primary MAC only if it was not set
3148 * administratively by PF.
3149 */
3150 if (ether_addr_equal(addr, vf->default_lan_addr.addr)) {
3151 if (!vf->pf_set_mac)
3152 was_unimac_deleted = true;
3153 else
3154 continue;
3155 }
3156
3157 if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
3158 ret = -EINVAL;
3159 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3160 goto error_param;
3161 }
3162 }
3163
3164 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3165
3166 if (was_unimac_deleted)
3167 eth_zero_addr(vf->default_lan_addr.addr);
3168
3169 /* program the updated filter list */
3170 ret = i40e_sync_vsi_filters(vsi);
3171 if (ret)
3172 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
3173 vf->vf_id, ret);
3174
3175 if (vf->trusted && was_unimac_deleted) {
3176 struct i40e_mac_filter *f;
3177 struct hlist_node *h;
3178 u8 *macaddr = NULL;
3179 int bkt;
3180
3181 /* set last unicast mac address as default */
3182 spin_lock_bh(&vsi->mac_filter_hash_lock);
3183 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3184 if (is_valid_ether_addr(f->macaddr))
3185 macaddr = f->macaddr;
3186 }
3187 if (macaddr)
3188 ether_addr_copy(vf->default_lan_addr.addr, macaddr);
3189 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3190 }
3191 error_param:
3192 /* send the response to the VF */
3193 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
3194 }
3195
3196 /**
3197 * i40e_vc_add_vlan_msg
3198 * @vf: pointer to the VF info
3199 * @msg: pointer to the msg buffer
3200 *
3201 * program guest vlan id
3202 **/
i40e_vc_add_vlan_msg(struct i40e_vf * vf,u8 * msg)3203 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
3204 {
3205 struct virtchnl_vlan_filter_list *vfl =
3206 (struct virtchnl_vlan_filter_list *)msg;
3207 struct i40e_pf *pf = vf->pf;
3208 struct i40e_vsi *vsi = NULL;
3209 int aq_ret = 0;
3210 int i;
3211
3212 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
3213 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3214 dev_err(&pf->pdev->dev,
3215 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
3216 goto error_param;
3217 }
3218 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3219 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3220 aq_ret = -EINVAL;
3221 goto error_param;
3222 }
3223
3224 for (i = 0; i < vfl->num_elements; i++) {
3225 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3226 aq_ret = -EINVAL;
3227 dev_err(&pf->pdev->dev,
3228 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
3229 goto error_param;
3230 }
3231 }
3232 vsi = pf->vsi[vf->lan_vsi_idx];
3233 if (vsi->info.pvid) {
3234 aq_ret = -EINVAL;
3235 goto error_param;
3236 }
3237
3238 i40e_vlan_stripping_enable(vsi);
3239 for (i = 0; i < vfl->num_elements; i++) {
3240 /* add new VLAN filter */
3241 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
3242 if (!ret)
3243 vf->num_vlan++;
3244
3245 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3246 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3247 true,
3248 vfl->vlan_id[i],
3249 NULL);
3250 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3251 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3252 true,
3253 vfl->vlan_id[i],
3254 NULL);
3255
3256 if (ret)
3257 dev_err(&pf->pdev->dev,
3258 "Unable to add VLAN filter %d for VF %d, error %d\n",
3259 vfl->vlan_id[i], vf->vf_id, ret);
3260 }
3261
3262 error_param:
3263 /* send the response to the VF */
3264 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
3265 }
3266
3267 /**
3268 * i40e_vc_remove_vlan_msg
3269 * @vf: pointer to the VF info
3270 * @msg: pointer to the msg buffer
3271 *
3272 * remove programmed guest vlan id
3273 **/
i40e_vc_remove_vlan_msg(struct i40e_vf * vf,u8 * msg)3274 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
3275 {
3276 struct virtchnl_vlan_filter_list *vfl =
3277 (struct virtchnl_vlan_filter_list *)msg;
3278 struct i40e_pf *pf = vf->pf;
3279 struct i40e_vsi *vsi = NULL;
3280 int aq_ret = 0;
3281 int i;
3282
3283 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3284 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3285 aq_ret = -EINVAL;
3286 goto error_param;
3287 }
3288
3289 for (i = 0; i < vfl->num_elements; i++) {
3290 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3291 aq_ret = -EINVAL;
3292 goto error_param;
3293 }
3294 }
3295
3296 vsi = pf->vsi[vf->lan_vsi_idx];
3297 if (vsi->info.pvid) {
3298 if (vfl->num_elements > 1 || vfl->vlan_id[0])
3299 aq_ret = -EINVAL;
3300 goto error_param;
3301 }
3302
3303 for (i = 0; i < vfl->num_elements; i++) {
3304 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
3305 vf->num_vlan--;
3306
3307 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3308 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3309 false,
3310 vfl->vlan_id[i],
3311 NULL);
3312 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3313 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3314 false,
3315 vfl->vlan_id[i],
3316 NULL);
3317 }
3318
3319 error_param:
3320 /* send the response to the VF */
3321 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
3322 }
3323
3324 /**
3325 * i40e_vc_rdma_msg
3326 * @vf: pointer to the VF info
3327 * @msg: pointer to the msg buffer
3328 * @msglen: msg length
3329 *
3330 * called from the VF for the iwarp msgs
3331 **/
i40e_vc_rdma_msg(struct i40e_vf * vf,u8 * msg,u16 msglen)3332 static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
3333 {
3334 struct i40e_pf *pf = vf->pf;
3335 struct i40e_vsi *main_vsi;
3336 int aq_ret = 0;
3337 int abs_vf_id;
3338
3339 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3340 !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
3341 aq_ret = -EINVAL;
3342 goto error_param;
3343 }
3344
3345 main_vsi = i40e_pf_get_main_vsi(pf);
3346 abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
3347 i40e_notify_client_of_vf_msg(main_vsi, abs_vf_id, msg, msglen);
3348
3349 error_param:
3350 /* send the response to the VF */
3351 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_RDMA,
3352 aq_ret);
3353 }
3354
3355 /**
3356 * i40e_vc_rdma_qvmap_msg
3357 * @vf: pointer to the VF info
3358 * @msg: pointer to the msg buffer
3359 * @config: config qvmap or release it
3360 *
3361 * called from the VF for the iwarp msgs
3362 **/
i40e_vc_rdma_qvmap_msg(struct i40e_vf * vf,u8 * msg,bool config)3363 static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
3364 {
3365 struct virtchnl_rdma_qvlist_info *qvlist_info =
3366 (struct virtchnl_rdma_qvlist_info *)msg;
3367 int aq_ret = 0;
3368
3369 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3370 !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
3371 aq_ret = -EINVAL;
3372 goto error_param;
3373 }
3374
3375 if (config) {
3376 if (i40e_config_rdma_qvlist(vf, qvlist_info))
3377 aq_ret = -EINVAL;
3378 } else {
3379 i40e_release_rdma_qvlist(vf);
3380 }
3381
3382 error_param:
3383 /* send the response to the VF */
3384 return i40e_vc_send_resp_to_vf(vf,
3385 config ? VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP :
3386 VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP,
3387 aq_ret);
3388 }
3389
3390 /**
3391 * i40e_vc_config_rss_key
3392 * @vf: pointer to the VF info
3393 * @msg: pointer to the msg buffer
3394 *
3395 * Configure the VF's RSS key
3396 **/
i40e_vc_config_rss_key(struct i40e_vf * vf,u8 * msg)3397 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
3398 {
3399 struct virtchnl_rss_key *vrk =
3400 (struct virtchnl_rss_key *)msg;
3401 struct i40e_pf *pf = vf->pf;
3402 struct i40e_vsi *vsi = NULL;
3403 int aq_ret = 0;
3404
3405 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3406 !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
3407 vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
3408 aq_ret = -EINVAL;
3409 goto err;
3410 }
3411
3412 vsi = pf->vsi[vf->lan_vsi_idx];
3413 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
3414 err:
3415 /* send the response to the VF */
3416 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
3417 aq_ret);
3418 }
3419
3420 /**
3421 * i40e_vc_config_rss_lut
3422 * @vf: pointer to the VF info
3423 * @msg: pointer to the msg buffer
3424 *
3425 * Configure the VF's RSS LUT
3426 **/
i40e_vc_config_rss_lut(struct i40e_vf * vf,u8 * msg)3427 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
3428 {
3429 struct virtchnl_rss_lut *vrl =
3430 (struct virtchnl_rss_lut *)msg;
3431 struct i40e_pf *pf = vf->pf;
3432 struct i40e_vsi *vsi = NULL;
3433 int aq_ret = 0;
3434 u16 i;
3435
3436 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3437 !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
3438 vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
3439 aq_ret = -EINVAL;
3440 goto err;
3441 }
3442
3443 for (i = 0; i < vrl->lut_entries; i++)
3444 if (vrl->lut[i] >= vf->num_queue_pairs) {
3445 aq_ret = -EINVAL;
3446 goto err;
3447 }
3448
3449 vsi = pf->vsi[vf->lan_vsi_idx];
3450 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
3451 /* send the response to the VF */
3452 err:
3453 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
3454 aq_ret);
3455 }
3456
3457 /**
3458 * i40e_vc_get_rss_hena
3459 * @vf: pointer to the VF info
3460 * @msg: pointer to the msg buffer
3461 *
3462 * Return the RSS HENA bits allowed by the hardware
3463 **/
i40e_vc_get_rss_hena(struct i40e_vf * vf,u8 * msg)3464 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
3465 {
3466 struct virtchnl_rss_hena *vrh = NULL;
3467 struct i40e_pf *pf = vf->pf;
3468 int aq_ret = 0;
3469 int len = 0;
3470
3471 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3472 aq_ret = -EINVAL;
3473 goto err;
3474 }
3475 len = sizeof(struct virtchnl_rss_hena);
3476
3477 vrh = kzalloc(len, GFP_KERNEL);
3478 if (!vrh) {
3479 aq_ret = -ENOMEM;
3480 len = 0;
3481 goto err;
3482 }
3483 vrh->hena = i40e_pf_get_default_rss_hena(pf);
3484 err:
3485 /* send the response back to the VF */
3486 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
3487 aq_ret, (u8 *)vrh, len);
3488 kfree(vrh);
3489 return aq_ret;
3490 }
3491
3492 /**
3493 * i40e_vc_set_rss_hena
3494 * @vf: pointer to the VF info
3495 * @msg: pointer to the msg buffer
3496 *
3497 * Set the RSS HENA bits for the VF
3498 **/
i40e_vc_set_rss_hena(struct i40e_vf * vf,u8 * msg)3499 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
3500 {
3501 struct virtchnl_rss_hena *vrh =
3502 (struct virtchnl_rss_hena *)msg;
3503 struct i40e_pf *pf = vf->pf;
3504 struct i40e_hw *hw = &pf->hw;
3505 int aq_ret = 0;
3506
3507 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3508 aq_ret = -EINVAL;
3509 goto err;
3510 }
3511 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
3512 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
3513 (u32)(vrh->hena >> 32));
3514
3515 /* send the response to the VF */
3516 err:
3517 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
3518 }
3519
3520 /**
3521 * i40e_vc_enable_vlan_stripping
3522 * @vf: pointer to the VF info
3523 * @msg: pointer to the msg buffer
3524 *
3525 * Enable vlan header stripping for the VF
3526 **/
i40e_vc_enable_vlan_stripping(struct i40e_vf * vf,u8 * msg)3527 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3528 {
3529 struct i40e_vsi *vsi;
3530 int aq_ret = 0;
3531
3532 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3533 aq_ret = -EINVAL;
3534 goto err;
3535 }
3536
3537 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3538 i40e_vlan_stripping_enable(vsi);
3539
3540 /* send the response to the VF */
3541 err:
3542 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3543 aq_ret);
3544 }
3545
3546 /**
3547 * i40e_vc_disable_vlan_stripping
3548 * @vf: pointer to the VF info
3549 * @msg: pointer to the msg buffer
3550 *
3551 * Disable vlan header stripping for the VF
3552 **/
i40e_vc_disable_vlan_stripping(struct i40e_vf * vf,u8 * msg)3553 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3554 {
3555 struct i40e_vsi *vsi;
3556 int aq_ret = 0;
3557
3558 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3559 aq_ret = -EINVAL;
3560 goto err;
3561 }
3562
3563 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3564 i40e_vlan_stripping_disable(vsi);
3565
3566 /* send the response to the VF */
3567 err:
3568 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3569 aq_ret);
3570 }
3571
3572 /**
3573 * i40e_validate_cloud_filter
3574 * @vf: pointer to VF structure
3575 * @tc_filter: pointer to filter requested
3576 *
3577 * This function validates cloud filter programmed as TC filter for ADq
3578 **/
i40e_validate_cloud_filter(struct i40e_vf * vf,struct virtchnl_filter * tc_filter)3579 static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3580 struct virtchnl_filter *tc_filter)
3581 {
3582 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3583 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3584 struct i40e_pf *pf = vf->pf;
3585 struct i40e_vsi *vsi = NULL;
3586 struct i40e_mac_filter *f;
3587 struct hlist_node *h;
3588 bool found = false;
3589 int bkt;
3590
3591 if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) {
3592 dev_info(&pf->pdev->dev,
3593 "VF %d: ADQ doesn't support this action (%d)\n",
3594 vf->vf_id, tc_filter->action);
3595 goto err;
3596 }
3597
3598 /* action_meta is TC number here to which the filter is applied */
3599 if (!tc_filter->action_meta ||
3600 tc_filter->action_meta >= vf->num_tc) {
3601 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3602 vf->vf_id, tc_filter->action_meta);
3603 goto err;
3604 }
3605
3606 /* Check filter if it's programmed for advanced mode or basic mode.
3607 * There are two ADq modes (for VF only),
3608 * 1. Basic mode: intended to allow as many filter options as possible
3609 * to be added to a VF in Non-trusted mode. Main goal is
3610 * to add filters to its own MAC and VLAN id.
3611 * 2. Advanced mode: is for allowing filters to be applied other than
3612 * its own MAC or VLAN. This mode requires the VF to be
3613 * Trusted.
3614 */
3615 if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3616 vsi = pf->vsi[vf->lan_vsi_idx];
3617 f = i40e_find_mac(vsi, data.dst_mac);
3618
3619 if (!f) {
3620 dev_info(&pf->pdev->dev,
3621 "Destination MAC %pM doesn't belong to VF %d\n",
3622 data.dst_mac, vf->vf_id);
3623 goto err;
3624 }
3625
3626 if (mask.vlan_id) {
3627 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3628 hlist) {
3629 if (f->vlan == ntohs(data.vlan_id)) {
3630 found = true;
3631 break;
3632 }
3633 }
3634 if (!found) {
3635 dev_info(&pf->pdev->dev,
3636 "VF %d doesn't have any VLAN id %u\n",
3637 vf->vf_id, ntohs(data.vlan_id));
3638 goto err;
3639 }
3640 }
3641 } else {
3642 /* Check if VF is trusted */
3643 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3644 dev_err(&pf->pdev->dev,
3645 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3646 vf->vf_id);
3647 return -EIO;
3648 }
3649 }
3650
3651 if (mask.dst_mac[0] & data.dst_mac[0]) {
3652 if (is_broadcast_ether_addr(data.dst_mac) ||
3653 is_zero_ether_addr(data.dst_mac)) {
3654 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3655 vf->vf_id, data.dst_mac);
3656 goto err;
3657 }
3658 }
3659
3660 if (mask.src_mac[0] & data.src_mac[0]) {
3661 if (is_broadcast_ether_addr(data.src_mac) ||
3662 is_zero_ether_addr(data.src_mac)) {
3663 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3664 vf->vf_id, data.src_mac);
3665 goto err;
3666 }
3667 }
3668
3669 if (mask.dst_port & data.dst_port) {
3670 if (!data.dst_port) {
3671 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3672 vf->vf_id);
3673 goto err;
3674 }
3675 }
3676
3677 if (mask.src_port & data.src_port) {
3678 if (!data.src_port) {
3679 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3680 vf->vf_id);
3681 goto err;
3682 }
3683 }
3684
3685 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3686 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3687 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3688 vf->vf_id);
3689 goto err;
3690 }
3691
3692 if (mask.vlan_id & data.vlan_id) {
3693 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3694 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3695 vf->vf_id);
3696 goto err;
3697 }
3698 }
3699
3700 return 0;
3701 err:
3702 return -EIO;
3703 }
3704
3705 /**
3706 * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3707 * @vf: pointer to the VF info
3708 * @seid: seid of the vsi it is searching for
3709 **/
i40e_find_vsi_from_seid(struct i40e_vf * vf,u16 seid)3710 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3711 {
3712 struct i40e_pf *pf = vf->pf;
3713 struct i40e_vsi *vsi = NULL;
3714 int i;
3715
3716 for (i = 0; i < vf->num_tc ; i++) {
3717 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3718 if (vsi && vsi->seid == seid)
3719 return vsi;
3720 }
3721 return NULL;
3722 }
3723
3724 /**
3725 * i40e_del_all_cloud_filters
3726 * @vf: pointer to the VF info
3727 *
3728 * This function deletes all cloud filters
3729 **/
i40e_del_all_cloud_filters(struct i40e_vf * vf)3730 static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3731 {
3732 struct i40e_cloud_filter *cfilter = NULL;
3733 struct i40e_pf *pf = vf->pf;
3734 struct i40e_vsi *vsi = NULL;
3735 struct hlist_node *node;
3736 int ret;
3737
3738 hlist_for_each_entry_safe(cfilter, node,
3739 &vf->cloud_filter_list, cloud_node) {
3740 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3741
3742 if (!vsi) {
3743 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3744 vf->vf_id, cfilter->seid);
3745 continue;
3746 }
3747
3748 if (cfilter->dst_port)
3749 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3750 false);
3751 else
3752 ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3753 if (ret)
3754 dev_err(&pf->pdev->dev,
3755 "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
3756 vf->vf_id, ERR_PTR(ret),
3757 i40e_aq_str(&pf->hw,
3758 pf->hw.aq.asq_last_status));
3759
3760 hlist_del(&cfilter->cloud_node);
3761 kfree(cfilter);
3762 vf->num_cloud_filters--;
3763 }
3764 }
3765
3766 /**
3767 * i40e_vc_del_cloud_filter
3768 * @vf: pointer to the VF info
3769 * @msg: pointer to the msg buffer
3770 *
3771 * This function deletes a cloud filter programmed as TC filter for ADq
3772 **/
i40e_vc_del_cloud_filter(struct i40e_vf * vf,u8 * msg)3773 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3774 {
3775 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3776 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3777 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3778 struct i40e_cloud_filter cfilter, *cf = NULL;
3779 struct i40e_pf *pf = vf->pf;
3780 struct i40e_vsi *vsi = NULL;
3781 struct hlist_node *node;
3782 int aq_ret = 0;
3783 int i, ret;
3784
3785 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3786 aq_ret = -EINVAL;
3787 goto err;
3788 }
3789
3790 if (!vf->adq_enabled) {
3791 dev_info(&pf->pdev->dev,
3792 "VF %d: ADq not enabled, can't apply cloud filter\n",
3793 vf->vf_id);
3794 aq_ret = -EINVAL;
3795 goto err;
3796 }
3797
3798 if (i40e_validate_cloud_filter(vf, vcf)) {
3799 dev_info(&pf->pdev->dev,
3800 "VF %d: Invalid input, can't apply cloud filter\n",
3801 vf->vf_id);
3802 aq_ret = -EINVAL;
3803 goto err;
3804 }
3805
3806 memset(&cfilter, 0, sizeof(cfilter));
3807 /* parse destination mac address */
3808 for (i = 0; i < ETH_ALEN; i++)
3809 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3810
3811 /* parse source mac address */
3812 for (i = 0; i < ETH_ALEN; i++)
3813 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3814
3815 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3816 cfilter.dst_port = mask.dst_port & tcf.dst_port;
3817 cfilter.src_port = mask.src_port & tcf.src_port;
3818
3819 switch (vcf->flow_type) {
3820 case VIRTCHNL_TCP_V4_FLOW:
3821 cfilter.n_proto = ETH_P_IP;
3822 if (mask.dst_ip[0] & tcf.dst_ip[0])
3823 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3824 ARRAY_SIZE(tcf.dst_ip));
3825 else if (mask.src_ip[0] & tcf.dst_ip[0])
3826 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3827 ARRAY_SIZE(tcf.dst_ip));
3828 break;
3829 case VIRTCHNL_TCP_V6_FLOW:
3830 cfilter.n_proto = ETH_P_IPV6;
3831 if (mask.dst_ip[3] & tcf.dst_ip[3])
3832 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3833 sizeof(cfilter.ip.v6.dst_ip6));
3834 if (mask.src_ip[3] & tcf.src_ip[3])
3835 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3836 sizeof(cfilter.ip.v6.src_ip6));
3837 break;
3838 default:
3839 /* TC filter can be configured based on different combinations
3840 * and in this case IP is not a part of filter config
3841 */
3842 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3843 vf->vf_id);
3844 }
3845
3846 /* get the vsi to which the tc belongs to */
3847 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3848 cfilter.seid = vsi->seid;
3849 cfilter.flags = vcf->field_flags;
3850
3851 /* Deleting TC filter */
3852 if (tcf.dst_port)
3853 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3854 else
3855 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3856 if (ret) {
3857 dev_err(&pf->pdev->dev,
3858 "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
3859 vf->vf_id, ERR_PTR(ret),
3860 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3861 goto err;
3862 }
3863
3864 hlist_for_each_entry_safe(cf, node,
3865 &vf->cloud_filter_list, cloud_node) {
3866 if (cf->seid != cfilter.seid)
3867 continue;
3868 if (mask.dst_port)
3869 if (cfilter.dst_port != cf->dst_port)
3870 continue;
3871 if (mask.dst_mac[0])
3872 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3873 continue;
3874 /* for ipv4 data to be valid, only first byte of mask is set */
3875 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3876 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3877 ARRAY_SIZE(tcf.dst_ip)))
3878 continue;
3879 /* for ipv6, mask is set for all sixteen bytes (4 words) */
3880 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3881 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3882 sizeof(cfilter.ip.v6.src_ip6)))
3883 continue;
3884 if (mask.vlan_id)
3885 if (cfilter.vlan_id != cf->vlan_id)
3886 continue;
3887
3888 hlist_del(&cf->cloud_node);
3889 kfree(cf);
3890 vf->num_cloud_filters--;
3891 }
3892
3893 err:
3894 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3895 aq_ret);
3896 }
3897
3898 #define I40E_MAX_VF_CLOUD_FILTER 0xFF00
3899
3900 /**
3901 * i40e_vc_add_cloud_filter
3902 * @vf: pointer to the VF info
3903 * @msg: pointer to the msg buffer
3904 *
3905 * This function adds a cloud filter programmed as TC filter for ADq
3906 **/
i40e_vc_add_cloud_filter(struct i40e_vf * vf,u8 * msg)3907 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3908 {
3909 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3910 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3911 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3912 struct i40e_cloud_filter *cfilter = NULL;
3913 struct i40e_pf *pf = vf->pf;
3914 struct i40e_vsi *vsi = NULL;
3915 int aq_ret = 0;
3916 int i;
3917
3918 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3919 aq_ret = -EINVAL;
3920 goto err_out;
3921 }
3922
3923 if (!vf->adq_enabled) {
3924 dev_info(&pf->pdev->dev,
3925 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3926 vf->vf_id);
3927 aq_ret = -EINVAL;
3928 goto err_out;
3929 }
3930
3931 if (i40e_validate_cloud_filter(vf, vcf)) {
3932 dev_info(&pf->pdev->dev,
3933 "VF %d: Invalid input/s, can't apply cloud filter\n",
3934 vf->vf_id);
3935 aq_ret = -EINVAL;
3936 goto err_out;
3937 }
3938
3939 if (vf->num_cloud_filters >= I40E_MAX_VF_CLOUD_FILTER) {
3940 dev_warn(&pf->pdev->dev,
3941 "VF %d: Max number of filters reached, can't apply cloud filter\n",
3942 vf->vf_id);
3943 aq_ret = -ENOSPC;
3944 goto err_out;
3945 }
3946
3947 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3948 if (!cfilter) {
3949 aq_ret = -ENOMEM;
3950 goto err_out;
3951 }
3952
3953 /* parse destination mac address */
3954 for (i = 0; i < ETH_ALEN; i++)
3955 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3956
3957 /* parse source mac address */
3958 for (i = 0; i < ETH_ALEN; i++)
3959 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3960
3961 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3962 cfilter->dst_port = mask.dst_port & tcf.dst_port;
3963 cfilter->src_port = mask.src_port & tcf.src_port;
3964
3965 switch (vcf->flow_type) {
3966 case VIRTCHNL_TCP_V4_FLOW:
3967 cfilter->n_proto = ETH_P_IP;
3968 if (mask.dst_ip[0] & tcf.dst_ip[0])
3969 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3970 ARRAY_SIZE(tcf.dst_ip));
3971 else if (mask.src_ip[0] & tcf.dst_ip[0])
3972 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3973 ARRAY_SIZE(tcf.dst_ip));
3974 break;
3975 case VIRTCHNL_TCP_V6_FLOW:
3976 cfilter->n_proto = ETH_P_IPV6;
3977 if (mask.dst_ip[3] & tcf.dst_ip[3])
3978 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3979 sizeof(cfilter->ip.v6.dst_ip6));
3980 if (mask.src_ip[3] & tcf.src_ip[3])
3981 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3982 sizeof(cfilter->ip.v6.src_ip6));
3983 break;
3984 default:
3985 /* TC filter can be configured based on different combinations
3986 * and in this case IP is not a part of filter config
3987 */
3988 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3989 vf->vf_id);
3990 }
3991
3992 /* get the VSI to which the TC belongs to */
3993 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3994 cfilter->seid = vsi->seid;
3995 cfilter->flags = vcf->field_flags;
3996
3997 /* Adding cloud filter programmed as TC filter */
3998 if (tcf.dst_port)
3999 aq_ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
4000 else
4001 aq_ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
4002 if (aq_ret) {
4003 dev_err(&pf->pdev->dev,
4004 "VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
4005 vf->vf_id, ERR_PTR(aq_ret),
4006 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4007 goto err_free;
4008 }
4009
4010 INIT_HLIST_NODE(&cfilter->cloud_node);
4011 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
4012 /* release the pointer passing it to the collection */
4013 cfilter = NULL;
4014 vf->num_cloud_filters++;
4015 err_free:
4016 kfree(cfilter);
4017 err_out:
4018 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
4019 aq_ret);
4020 }
4021
4022 /**
4023 * i40e_vc_add_qch_msg: Add queue channel and enable ADq
4024 * @vf: pointer to the VF info
4025 * @msg: pointer to the msg buffer
4026 **/
i40e_vc_add_qch_msg(struct i40e_vf * vf,u8 * msg)4027 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
4028 {
4029 struct virtchnl_tc_info *tci =
4030 (struct virtchnl_tc_info *)msg;
4031 struct i40e_pf *pf = vf->pf;
4032 struct i40e_link_status *ls = &pf->hw.phy.link_info;
4033 int i, adq_request_qps = 0;
4034 int aq_ret = 0;
4035 u64 speed = 0;
4036
4037 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
4038 aq_ret = -EINVAL;
4039 goto err;
4040 }
4041
4042 /* ADq cannot be applied if spoof check is ON */
4043 if (vf->spoofchk) {
4044 dev_err(&pf->pdev->dev,
4045 "Spoof check is ON, turn it OFF to enable ADq\n");
4046 aq_ret = -EINVAL;
4047 goto err;
4048 }
4049
4050 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
4051 dev_err(&pf->pdev->dev,
4052 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
4053 vf->vf_id);
4054 aq_ret = -EINVAL;
4055 goto err;
4056 }
4057
4058 /* max number of traffic classes for VF currently capped at 4 */
4059 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
4060 dev_err(&pf->pdev->dev,
4061 "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
4062 vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
4063 aq_ret = -EINVAL;
4064 goto err;
4065 }
4066
4067 /* validate queues for each TC */
4068 for (i = 0; i < tci->num_tc; i++)
4069 if (!tci->list[i].count ||
4070 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
4071 dev_err(&pf->pdev->dev,
4072 "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
4073 vf->vf_id, i, tci->list[i].count,
4074 I40E_DEFAULT_QUEUES_PER_VF);
4075 aq_ret = -EINVAL;
4076 goto err;
4077 }
4078
4079 /* need Max VF queues but already have default number of queues */
4080 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
4081
4082 if (pf->queues_left < adq_request_qps) {
4083 dev_err(&pf->pdev->dev,
4084 "No queues left to allocate to VF %d\n",
4085 vf->vf_id);
4086 aq_ret = -EINVAL;
4087 goto err;
4088 } else {
4089 /* we need to allocate max VF queues to enable ADq so as to
4090 * make sure ADq enabled VF always gets back queues when it
4091 * goes through a reset.
4092 */
4093 vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
4094 }
4095
4096 /* get link speed in MB to validate rate limit */
4097 speed = i40e_vc_link_speed2mbps(ls->link_speed);
4098 if (speed == SPEED_UNKNOWN) {
4099 dev_err(&pf->pdev->dev,
4100 "Cannot detect link speed\n");
4101 aq_ret = -EINVAL;
4102 goto err;
4103 }
4104
4105 /* parse data from the queue channel info */
4106 vf->num_tc = tci->num_tc;
4107 for (i = 0; i < vf->num_tc; i++) {
4108 if (tci->list[i].max_tx_rate) {
4109 if (tci->list[i].max_tx_rate > speed) {
4110 dev_err(&pf->pdev->dev,
4111 "Invalid max tx rate %llu specified for VF %d.",
4112 tci->list[i].max_tx_rate,
4113 vf->vf_id);
4114 aq_ret = -EINVAL;
4115 goto err;
4116 } else {
4117 vf->ch[i].max_tx_rate =
4118 tci->list[i].max_tx_rate;
4119 }
4120 }
4121 vf->ch[i].num_qps = tci->list[i].count;
4122 }
4123
4124 /* set this flag only after making sure all inputs are sane */
4125 vf->adq_enabled = true;
4126
4127 /* reset the VF in order to allocate resources */
4128 i40e_vc_reset_vf(vf, true);
4129
4130 return 0;
4131
4132 /* send the response to the VF */
4133 err:
4134 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
4135 aq_ret);
4136 }
4137
4138 /**
4139 * i40e_vc_del_qch_msg
4140 * @vf: pointer to the VF info
4141 * @msg: pointer to the msg buffer
4142 **/
i40e_vc_del_qch_msg(struct i40e_vf * vf,u8 * msg)4143 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
4144 {
4145 struct i40e_pf *pf = vf->pf;
4146 int aq_ret = 0;
4147
4148 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
4149 aq_ret = -EINVAL;
4150 goto err;
4151 }
4152
4153 if (vf->adq_enabled) {
4154 i40e_del_all_cloud_filters(vf);
4155 i40e_del_qch(vf);
4156 vf->adq_enabled = false;
4157 vf->num_tc = 0;
4158 dev_info(&pf->pdev->dev,
4159 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
4160 vf->vf_id);
4161 } else {
4162 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
4163 vf->vf_id);
4164 aq_ret = -EINVAL;
4165 }
4166
4167 /* reset the VF in order to allocate resources */
4168 i40e_vc_reset_vf(vf, true);
4169
4170 return 0;
4171
4172 err:
4173 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
4174 aq_ret);
4175 }
4176
4177 /**
4178 * i40e_vc_process_vf_msg
4179 * @pf: pointer to the PF structure
4180 * @vf_id: source VF id
4181 * @v_opcode: operation code
4182 * @v_retval: unused return value code
4183 * @msg: pointer to the msg buffer
4184 * @msglen: msg length
4185 *
4186 * called from the common aeq/arq handler to
4187 * process request from VF
4188 **/
i40e_vc_process_vf_msg(struct i40e_pf * pf,s16 vf_id,u32 v_opcode,u32 __always_unused v_retval,u8 * msg,u16 msglen)4189 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
4190 u32 __always_unused v_retval, u8 *msg, u16 msglen)
4191 {
4192 struct i40e_hw *hw = &pf->hw;
4193 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
4194 struct i40e_vf *vf;
4195 int ret;
4196
4197 pf->vf_aq_requests++;
4198 if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
4199 return -EINVAL;
4200 vf = &(pf->vf[local_vf_id]);
4201
4202 /* Check if VF is disabled. */
4203 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
4204 return -EINVAL;
4205
4206 /* perform basic checks on the msg */
4207 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
4208
4209 if (ret) {
4210 i40e_vc_send_resp_to_vf(vf, v_opcode, -EINVAL);
4211 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
4212 local_vf_id, v_opcode, msglen);
4213 return ret;
4214 }
4215
4216 switch (v_opcode) {
4217 case VIRTCHNL_OP_VERSION:
4218 ret = i40e_vc_get_version_msg(vf, msg);
4219 break;
4220 case VIRTCHNL_OP_GET_VF_RESOURCES:
4221 ret = i40e_vc_get_vf_resources_msg(vf, msg);
4222 i40e_vc_notify_vf_link_state(vf);
4223 break;
4224 case VIRTCHNL_OP_RESET_VF:
4225 i40e_vc_reset_vf(vf, false);
4226 ret = 0;
4227 break;
4228 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
4229 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
4230 break;
4231 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
4232 ret = i40e_vc_config_queues_msg(vf, msg);
4233 break;
4234 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
4235 ret = i40e_vc_config_irq_map_msg(vf, msg);
4236 break;
4237 case VIRTCHNL_OP_ENABLE_QUEUES:
4238 ret = i40e_vc_enable_queues_msg(vf, msg);
4239 i40e_vc_notify_vf_link_state(vf);
4240 break;
4241 case VIRTCHNL_OP_DISABLE_QUEUES:
4242 ret = i40e_vc_disable_queues_msg(vf, msg);
4243 break;
4244 case VIRTCHNL_OP_ADD_ETH_ADDR:
4245 ret = i40e_vc_add_mac_addr_msg(vf, msg);
4246 break;
4247 case VIRTCHNL_OP_DEL_ETH_ADDR:
4248 ret = i40e_vc_del_mac_addr_msg(vf, msg);
4249 break;
4250 case VIRTCHNL_OP_ADD_VLAN:
4251 ret = i40e_vc_add_vlan_msg(vf, msg);
4252 break;
4253 case VIRTCHNL_OP_DEL_VLAN:
4254 ret = i40e_vc_remove_vlan_msg(vf, msg);
4255 break;
4256 case VIRTCHNL_OP_GET_STATS:
4257 ret = i40e_vc_get_stats_msg(vf, msg);
4258 break;
4259 case VIRTCHNL_OP_RDMA:
4260 ret = i40e_vc_rdma_msg(vf, msg, msglen);
4261 break;
4262 case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
4263 ret = i40e_vc_rdma_qvmap_msg(vf, msg, true);
4264 break;
4265 case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
4266 ret = i40e_vc_rdma_qvmap_msg(vf, msg, false);
4267 break;
4268 case VIRTCHNL_OP_CONFIG_RSS_KEY:
4269 ret = i40e_vc_config_rss_key(vf, msg);
4270 break;
4271 case VIRTCHNL_OP_CONFIG_RSS_LUT:
4272 ret = i40e_vc_config_rss_lut(vf, msg);
4273 break;
4274 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
4275 ret = i40e_vc_get_rss_hena(vf, msg);
4276 break;
4277 case VIRTCHNL_OP_SET_RSS_HENA:
4278 ret = i40e_vc_set_rss_hena(vf, msg);
4279 break;
4280 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
4281 ret = i40e_vc_enable_vlan_stripping(vf, msg);
4282 break;
4283 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
4284 ret = i40e_vc_disable_vlan_stripping(vf, msg);
4285 break;
4286 case VIRTCHNL_OP_REQUEST_QUEUES:
4287 ret = i40e_vc_request_queues_msg(vf, msg);
4288 break;
4289 case VIRTCHNL_OP_ENABLE_CHANNELS:
4290 ret = i40e_vc_add_qch_msg(vf, msg);
4291 break;
4292 case VIRTCHNL_OP_DISABLE_CHANNELS:
4293 ret = i40e_vc_del_qch_msg(vf, msg);
4294 break;
4295 case VIRTCHNL_OP_ADD_CLOUD_FILTER:
4296 ret = i40e_vc_add_cloud_filter(vf, msg);
4297 break;
4298 case VIRTCHNL_OP_DEL_CLOUD_FILTER:
4299 ret = i40e_vc_del_cloud_filter(vf, msg);
4300 break;
4301 case VIRTCHNL_OP_UNKNOWN:
4302 default:
4303 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
4304 v_opcode, local_vf_id);
4305 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
4306 -EOPNOTSUPP);
4307 break;
4308 }
4309
4310 return ret;
4311 }
4312
4313 /**
4314 * i40e_vc_process_vflr_event
4315 * @pf: pointer to the PF structure
4316 *
4317 * called from the vlfr irq handler to
4318 * free up VF resources and state variables
4319 **/
i40e_vc_process_vflr_event(struct i40e_pf * pf)4320 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
4321 {
4322 struct i40e_hw *hw = &pf->hw;
4323 u32 reg, reg_idx, bit_idx;
4324 struct i40e_vf *vf;
4325 int vf_id;
4326
4327 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
4328 return 0;
4329
4330 /* Re-enable the VFLR interrupt cause here, before looking for which
4331 * VF got reset. Otherwise, if another VF gets a reset while the
4332 * first one is being processed, that interrupt will be lost, and
4333 * that VF will be stuck in reset forever.
4334 */
4335 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4336 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
4337 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4338 i40e_flush(hw);
4339
4340 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4341 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
4342 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
4343 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
4344 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
4345 vf = &pf->vf[vf_id];
4346 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
4347 if (reg & BIT(bit_idx))
4348 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
4349 if (!i40e_reset_vf(vf, true)) {
4350 /* At least one VF did not finish resetting, retry next time */
4351 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4352 }
4353 }
4354
4355 return 0;
4356 }
4357
4358 /**
4359 * i40e_validate_vf
4360 * @pf: the physical function
4361 * @vf_id: VF identifier
4362 *
4363 * Check that the VF is enabled and the VSI exists.
4364 *
4365 * Returns 0 on success, negative on failure
4366 **/
i40e_validate_vf(struct i40e_pf * pf,int vf_id)4367 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
4368 {
4369 struct i40e_vsi *vsi;
4370 struct i40e_vf *vf;
4371 int ret = 0;
4372
4373 if (vf_id >= pf->num_alloc_vfs) {
4374 dev_err(&pf->pdev->dev,
4375 "Invalid VF Identifier %d\n", vf_id);
4376 ret = -EINVAL;
4377 goto err_out;
4378 }
4379 vf = &pf->vf[vf_id];
4380 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
4381 if (!vsi)
4382 ret = -EINVAL;
4383 err_out:
4384 return ret;
4385 }
4386
4387 /**
4388 * i40e_check_vf_init_timeout
4389 * @vf: the virtual function
4390 *
4391 * Check that the VF's initialization was successfully done and if not
4392 * wait up to 300ms for its finish.
4393 *
4394 * Returns true when VF is initialized, false on timeout
4395 **/
i40e_check_vf_init_timeout(struct i40e_vf * vf)4396 static bool i40e_check_vf_init_timeout(struct i40e_vf *vf)
4397 {
4398 int i;
4399
4400 /* When the VF is resetting wait until it is done.
4401 * It can take up to 200 milliseconds, but wait for
4402 * up to 300 milliseconds to be safe.
4403 */
4404 for (i = 0; i < 15; i++) {
4405 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
4406 return true;
4407 msleep(20);
4408 }
4409
4410 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4411 dev_err(&vf->pf->pdev->dev,
4412 "VF %d still in reset. Try again.\n", vf->vf_id);
4413 return false;
4414 }
4415
4416 return true;
4417 }
4418
4419 /**
4420 * i40e_ndo_set_vf_mac
4421 * @netdev: network interface device structure
4422 * @vf_id: VF identifier
4423 * @mac: mac address
4424 *
4425 * program VF mac address
4426 **/
i40e_ndo_set_vf_mac(struct net_device * netdev,int vf_id,u8 * mac)4427 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
4428 {
4429 struct i40e_netdev_priv *np = netdev_priv(netdev);
4430 struct i40e_vsi *vsi = np->vsi;
4431 struct i40e_pf *pf = vsi->back;
4432 struct i40e_mac_filter *f;
4433 struct i40e_vf *vf;
4434 int ret = 0;
4435 struct hlist_node *h;
4436 int bkt;
4437
4438 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4439 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4440 return -EAGAIN;
4441 }
4442
4443 /* validate the request */
4444 ret = i40e_validate_vf(pf, vf_id);
4445 if (ret)
4446 goto error_param;
4447
4448 vf = &pf->vf[vf_id];
4449 if (!i40e_check_vf_init_timeout(vf)) {
4450 ret = -EAGAIN;
4451 goto error_param;
4452 }
4453 vsi = pf->vsi[vf->lan_vsi_idx];
4454
4455 if (is_multicast_ether_addr(mac)) {
4456 dev_err(&pf->pdev->dev,
4457 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
4458 ret = -EINVAL;
4459 goto error_param;
4460 }
4461
4462 /* Lock once because below invoked function add/del_filter requires
4463 * mac_filter_hash_lock to be held
4464 */
4465 spin_lock_bh(&vsi->mac_filter_hash_lock);
4466
4467 /* delete the temporary mac address */
4468 if (!is_zero_ether_addr(vf->default_lan_addr.addr))
4469 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
4470
4471 /* Delete all the filters for this VSI - we're going to kill it
4472 * anyway.
4473 */
4474 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
4475 __i40e_del_filter(vsi, f);
4476
4477 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4478
4479 /* program mac filter */
4480 if (i40e_sync_vsi_filters(vsi)) {
4481 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
4482 ret = -EIO;
4483 goto error_param;
4484 }
4485 ether_addr_copy(vf->default_lan_addr.addr, mac);
4486
4487 if (is_zero_ether_addr(mac)) {
4488 vf->pf_set_mac = false;
4489 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
4490 } else {
4491 vf->pf_set_mac = true;
4492 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
4493 mac, vf_id);
4494 }
4495
4496 /* Force the VF interface down so it has to bring up with new MAC
4497 * address
4498 */
4499 i40e_vc_reset_vf(vf, true);
4500 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
4501
4502 error_param:
4503 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4504 return ret;
4505 }
4506
4507 /**
4508 * i40e_ndo_set_vf_port_vlan
4509 * @netdev: network interface device structure
4510 * @vf_id: VF identifier
4511 * @vlan_id: mac address
4512 * @qos: priority setting
4513 * @vlan_proto: vlan protocol
4514 *
4515 * program VF vlan id and/or qos
4516 **/
i40e_ndo_set_vf_port_vlan(struct net_device * netdev,int vf_id,u16 vlan_id,u8 qos,__be16 vlan_proto)4517 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
4518 u16 vlan_id, u8 qos, __be16 vlan_proto)
4519 {
4520 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
4521 struct i40e_netdev_priv *np = netdev_priv(netdev);
4522 bool allmulti = false, alluni = false;
4523 struct i40e_pf *pf = np->vsi->back;
4524 struct i40e_vsi *vsi;
4525 struct i40e_vf *vf;
4526 int ret = 0;
4527
4528 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4529 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4530 return -EAGAIN;
4531 }
4532
4533 /* validate the request */
4534 ret = i40e_validate_vf(pf, vf_id);
4535 if (ret)
4536 goto error_pvid;
4537
4538 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
4539 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
4540 ret = -EINVAL;
4541 goto error_pvid;
4542 }
4543
4544 if (vlan_proto != htons(ETH_P_8021Q)) {
4545 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
4546 ret = -EPROTONOSUPPORT;
4547 goto error_pvid;
4548 }
4549
4550 vf = &pf->vf[vf_id];
4551 if (!i40e_check_vf_init_timeout(vf)) {
4552 ret = -EAGAIN;
4553 goto error_pvid;
4554 }
4555 vsi = pf->vsi[vf->lan_vsi_idx];
4556
4557 if (le16_to_cpu(vsi->info.pvid) == vlanprio)
4558 /* duplicate request, so just return success */
4559 goto error_pvid;
4560
4561 i40e_vlan_stripping_enable(vsi);
4562
4563 /* Locked once because multiple functions below iterate list */
4564 spin_lock_bh(&vsi->mac_filter_hash_lock);
4565
4566 /* Check for condition where there was already a port VLAN ID
4567 * filter set and now it is being deleted by setting it to zero.
4568 * Additionally check for the condition where there was a port
4569 * VLAN but now there is a new and different port VLAN being set.
4570 * Before deleting all the old VLAN filters we must add new ones
4571 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
4572 * MAC addresses deleted.
4573 */
4574 if ((!(vlan_id || qos) ||
4575 vlanprio != le16_to_cpu(vsi->info.pvid)) &&
4576 vsi->info.pvid) {
4577 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
4578 if (ret) {
4579 dev_info(&vsi->back->pdev->dev,
4580 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4581 vsi->back->hw.aq.asq_last_status);
4582 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4583 goto error_pvid;
4584 }
4585 }
4586
4587 if (vsi->info.pvid) {
4588 /* remove all filters on the old VLAN */
4589 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4590 VLAN_VID_MASK));
4591 }
4592
4593 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4594
4595 /* disable promisc modes in case they were enabled */
4596 ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
4597 allmulti, alluni);
4598 if (ret) {
4599 dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
4600 goto error_pvid;
4601 }
4602
4603 if (vlan_id || qos)
4604 ret = i40e_vsi_add_pvid(vsi, vlanprio);
4605 else
4606 i40e_vsi_remove_pvid(vsi);
4607 spin_lock_bh(&vsi->mac_filter_hash_lock);
4608
4609 if (vlan_id) {
4610 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4611 vlan_id, qos, vf_id);
4612
4613 /* add new VLAN filter for each MAC */
4614 ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4615 if (ret) {
4616 dev_info(&vsi->back->pdev->dev,
4617 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4618 vsi->back->hw.aq.asq_last_status);
4619 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4620 goto error_pvid;
4621 }
4622
4623 /* remove the previously added non-VLAN MAC filters */
4624 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
4625 }
4626
4627 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4628
4629 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
4630 alluni = true;
4631
4632 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
4633 allmulti = true;
4634
4635 /* Schedule the worker thread to take care of applying changes */
4636 i40e_service_event_schedule(vsi->back);
4637
4638 if (ret) {
4639 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4640 goto error_pvid;
4641 }
4642
4643 /* The Port VLAN needs to be saved across resets the same as the
4644 * default LAN MAC address.
4645 */
4646 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4647
4648 i40e_vc_reset_vf(vf, true);
4649 /* During reset the VF got a new VSI, so refresh a pointer. */
4650 vsi = pf->vsi[vf->lan_vsi_idx];
4651
4652 ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
4653 if (ret) {
4654 dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
4655 goto error_pvid;
4656 }
4657
4658 ret = 0;
4659
4660 error_pvid:
4661 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4662 return ret;
4663 }
4664
4665 /**
4666 * i40e_ndo_set_vf_bw
4667 * @netdev: network interface device structure
4668 * @vf_id: VF identifier
4669 * @min_tx_rate: Minimum Tx rate
4670 * @max_tx_rate: Maximum Tx rate
4671 *
4672 * configure VF Tx rate
4673 **/
i40e_ndo_set_vf_bw(struct net_device * netdev,int vf_id,int min_tx_rate,int max_tx_rate)4674 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4675 int max_tx_rate)
4676 {
4677 struct i40e_netdev_priv *np = netdev_priv(netdev);
4678 struct i40e_pf *pf = np->vsi->back;
4679 struct i40e_vsi *vsi;
4680 struct i40e_vf *vf;
4681 int ret = 0;
4682
4683 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4684 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4685 return -EAGAIN;
4686 }
4687
4688 /* validate the request */
4689 ret = i40e_validate_vf(pf, vf_id);
4690 if (ret)
4691 goto error;
4692
4693 if (min_tx_rate) {
4694 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4695 min_tx_rate, vf_id);
4696 ret = -EINVAL;
4697 goto error;
4698 }
4699
4700 vf = &pf->vf[vf_id];
4701 if (!i40e_check_vf_init_timeout(vf)) {
4702 ret = -EAGAIN;
4703 goto error;
4704 }
4705 vsi = pf->vsi[vf->lan_vsi_idx];
4706
4707 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4708 if (ret)
4709 goto error;
4710
4711 vf->tx_rate = max_tx_rate;
4712 error:
4713 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4714 return ret;
4715 }
4716
4717 /**
4718 * i40e_ndo_get_vf_config
4719 * @netdev: network interface device structure
4720 * @vf_id: VF identifier
4721 * @ivi: VF configuration structure
4722 *
4723 * return VF configuration
4724 **/
i40e_ndo_get_vf_config(struct net_device * netdev,int vf_id,struct ifla_vf_info * ivi)4725 int i40e_ndo_get_vf_config(struct net_device *netdev,
4726 int vf_id, struct ifla_vf_info *ivi)
4727 {
4728 struct i40e_netdev_priv *np = netdev_priv(netdev);
4729 struct i40e_vsi *vsi = np->vsi;
4730 struct i40e_pf *pf = vsi->back;
4731 struct i40e_vf *vf;
4732 int ret = 0;
4733
4734 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4735 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4736 return -EAGAIN;
4737 }
4738
4739 /* validate the request */
4740 ret = i40e_validate_vf(pf, vf_id);
4741 if (ret)
4742 goto error_param;
4743
4744 vf = &pf->vf[vf_id];
4745 /* first vsi is always the LAN vsi */
4746 vsi = pf->vsi[vf->lan_vsi_idx];
4747 if (!vsi) {
4748 ret = -ENOENT;
4749 goto error_param;
4750 }
4751
4752 ivi->vf = vf_id;
4753
4754 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4755
4756 ivi->max_tx_rate = vf->tx_rate;
4757 ivi->min_tx_rate = 0;
4758 ivi->vlan = le16_get_bits(vsi->info.pvid, I40E_VLAN_MASK);
4759 ivi->qos = le16_get_bits(vsi->info.pvid, I40E_PRIORITY_MASK);
4760 if (vf->link_forced == false)
4761 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4762 else if (vf->link_up == true)
4763 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4764 else
4765 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4766 ivi->spoofchk = vf->spoofchk;
4767 ivi->trusted = vf->trusted;
4768 ret = 0;
4769
4770 error_param:
4771 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4772 return ret;
4773 }
4774
4775 /**
4776 * i40e_ndo_set_vf_link_state
4777 * @netdev: network interface device structure
4778 * @vf_id: VF identifier
4779 * @link: required link state
4780 *
4781 * Set the link state of a specified VF, regardless of physical link state
4782 **/
i40e_ndo_set_vf_link_state(struct net_device * netdev,int vf_id,int link)4783 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4784 {
4785 struct i40e_netdev_priv *np = netdev_priv(netdev);
4786 struct i40e_pf *pf = np->vsi->back;
4787 struct i40e_link_status *ls = &pf->hw.phy.link_info;
4788 struct virtchnl_pf_event pfe;
4789 struct i40e_hw *hw = &pf->hw;
4790 struct i40e_vsi *vsi;
4791 unsigned long q_map;
4792 struct i40e_vf *vf;
4793 int abs_vf_id;
4794 int ret = 0;
4795 int tmp;
4796
4797 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4798 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4799 return -EAGAIN;
4800 }
4801
4802 /* validate the request */
4803 if (vf_id >= pf->num_alloc_vfs) {
4804 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4805 ret = -EINVAL;
4806 goto error_out;
4807 }
4808
4809 vf = &pf->vf[vf_id];
4810 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4811
4812 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4813 pfe.severity = PF_EVENT_SEVERITY_INFO;
4814
4815 switch (link) {
4816 case IFLA_VF_LINK_STATE_AUTO:
4817 vf->link_forced = false;
4818 vf->is_disabled_from_host = false;
4819 /* reset needed to reinit VF resources */
4820 i40e_vc_reset_vf(vf, true);
4821 i40e_set_vf_link_state(vf, &pfe, ls);
4822 break;
4823 case IFLA_VF_LINK_STATE_ENABLE:
4824 vf->link_forced = true;
4825 vf->link_up = true;
4826 vf->is_disabled_from_host = false;
4827 /* reset needed to reinit VF resources */
4828 i40e_vc_reset_vf(vf, true);
4829 i40e_set_vf_link_state(vf, &pfe, ls);
4830 break;
4831 case IFLA_VF_LINK_STATE_DISABLE:
4832 vf->link_forced = true;
4833 vf->link_up = false;
4834 i40e_set_vf_link_state(vf, &pfe, ls);
4835
4836 vsi = pf->vsi[vf->lan_vsi_idx];
4837 q_map = BIT(vsi->num_queue_pairs) - 1;
4838
4839 vf->is_disabled_from_host = true;
4840
4841 /* Try to stop both Tx&Rx rings even if one of the calls fails
4842 * to ensure we stop the rings even in case of errors.
4843 * If any of them returns with an error then the first
4844 * error that occurred will be returned.
4845 */
4846 tmp = i40e_ctrl_vf_tx_rings(vsi, q_map, false);
4847 ret = i40e_ctrl_vf_rx_rings(vsi, q_map, false);
4848
4849 ret = tmp ? tmp : ret;
4850 break;
4851 default:
4852 ret = -EINVAL;
4853 goto error_out;
4854 }
4855 /* Notify the VF of its new link state */
4856 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4857 0, (u8 *)&pfe, sizeof(pfe), NULL);
4858
4859 error_out:
4860 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4861 return ret;
4862 }
4863
4864 /**
4865 * i40e_ndo_set_vf_spoofchk
4866 * @netdev: network interface device structure
4867 * @vf_id: VF identifier
4868 * @enable: flag to enable or disable feature
4869 *
4870 * Enable or disable VF spoof checking
4871 **/
i40e_ndo_set_vf_spoofchk(struct net_device * netdev,int vf_id,bool enable)4872 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4873 {
4874 struct i40e_netdev_priv *np = netdev_priv(netdev);
4875 struct i40e_vsi *vsi = np->vsi;
4876 struct i40e_pf *pf = vsi->back;
4877 struct i40e_vsi_context ctxt;
4878 struct i40e_hw *hw = &pf->hw;
4879 struct i40e_vf *vf;
4880 int ret = 0;
4881
4882 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4883 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4884 return -EAGAIN;
4885 }
4886
4887 /* validate the request */
4888 if (vf_id >= pf->num_alloc_vfs) {
4889 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4890 ret = -EINVAL;
4891 goto out;
4892 }
4893
4894 vf = &(pf->vf[vf_id]);
4895 if (!i40e_check_vf_init_timeout(vf)) {
4896 ret = -EAGAIN;
4897 goto out;
4898 }
4899
4900 if (enable == vf->spoofchk)
4901 goto out;
4902
4903 vf->spoofchk = enable;
4904 memset(&ctxt, 0, sizeof(ctxt));
4905 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4906 ctxt.pf_num = pf->hw.pf_id;
4907 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4908 if (enable)
4909 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4910 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4911 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4912 if (ret) {
4913 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4914 ret);
4915 ret = -EIO;
4916 }
4917 out:
4918 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4919 return ret;
4920 }
4921
4922 /**
4923 * i40e_ndo_set_vf_trust
4924 * @netdev: network interface device structure of the pf
4925 * @vf_id: VF identifier
4926 * @setting: trust setting
4927 *
4928 * Enable or disable VF trust setting
4929 **/
i40e_ndo_set_vf_trust(struct net_device * netdev,int vf_id,bool setting)4930 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4931 {
4932 struct i40e_netdev_priv *np = netdev_priv(netdev);
4933 struct i40e_pf *pf = np->vsi->back;
4934 struct i40e_vf *vf;
4935 int ret = 0;
4936
4937 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4938 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4939 return -EAGAIN;
4940 }
4941
4942 /* validate the request */
4943 if (vf_id >= pf->num_alloc_vfs) {
4944 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4945 ret = -EINVAL;
4946 goto out;
4947 }
4948
4949 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
4950 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4951 ret = -EINVAL;
4952 goto out;
4953 }
4954
4955 vf = &pf->vf[vf_id];
4956
4957 if (setting == vf->trusted)
4958 goto out;
4959
4960 vf->trusted = setting;
4961
4962 /* request PF to sync mac/vlan filters for the VF */
4963 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
4964 pf->vsi[vf->lan_vsi_idx]->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
4965
4966 i40e_vc_reset_vf(vf, true);
4967 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4968 vf_id, setting ? "" : "un");
4969
4970 if (vf->adq_enabled) {
4971 if (!vf->trusted) {
4972 dev_info(&pf->pdev->dev,
4973 "VF %u no longer Trusted, deleting all cloud filters\n",
4974 vf_id);
4975 i40e_del_all_cloud_filters(vf);
4976 }
4977 }
4978
4979 out:
4980 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4981 return ret;
4982 }
4983
4984 /**
4985 * i40e_get_vf_stats - populate some stats for the VF
4986 * @netdev: the netdev of the PF
4987 * @vf_id: the host OS identifier (0-127)
4988 * @vf_stats: pointer to the OS memory to be initialized
4989 */
i40e_get_vf_stats(struct net_device * netdev,int vf_id,struct ifla_vf_stats * vf_stats)4990 int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
4991 struct ifla_vf_stats *vf_stats)
4992 {
4993 struct i40e_netdev_priv *np = netdev_priv(netdev);
4994 struct i40e_pf *pf = np->vsi->back;
4995 struct i40e_eth_stats *stats;
4996 struct i40e_vsi *vsi;
4997 struct i40e_vf *vf;
4998
4999 /* validate the request */
5000 if (i40e_validate_vf(pf, vf_id))
5001 return -EINVAL;
5002
5003 vf = &pf->vf[vf_id];
5004 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
5005 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
5006 return -EBUSY;
5007 }
5008
5009 vsi = pf->vsi[vf->lan_vsi_idx];
5010 if (!vsi)
5011 return -EINVAL;
5012
5013 i40e_update_eth_stats(vsi);
5014 stats = &vsi->eth_stats;
5015
5016 memset(vf_stats, 0, sizeof(*vf_stats));
5017
5018 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
5019 stats->rx_multicast;
5020 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
5021 stats->tx_multicast;
5022 vf_stats->rx_bytes = stats->rx_bytes;
5023 vf_stats->tx_bytes = stats->tx_bytes;
5024 vf_stats->broadcast = stats->rx_broadcast;
5025 vf_stats->multicast = stats->rx_multicast;
5026 vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other;
5027 vf_stats->tx_dropped = stats->tx_errors;
5028
5029 return 0;
5030 }
5031