• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include "iavf.h"
5 #include "iavf_prototype.h"
6 #include "iavf_client.h"
7 
8 /* busy wait delay in msec */
9 #define IAVF_BUSY_WAIT_DELAY 10
10 #define IAVF_BUSY_WAIT_COUNT 50
11 
12 /**
13  * iavf_send_pf_msg
14  * @adapter: adapter structure
15  * @op: virtual channel opcode
16  * @msg: pointer to message buffer
17  * @len: message length
18  *
19  * Send message to PF and print status if failure.
20  **/
iavf_send_pf_msg(struct iavf_adapter * adapter,enum virtchnl_ops op,u8 * msg,u16 len)21 static int iavf_send_pf_msg(struct iavf_adapter *adapter,
22 			    enum virtchnl_ops op, u8 *msg, u16 len)
23 {
24 	struct iavf_hw *hw = &adapter->hw;
25 	enum iavf_status err;
26 
27 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
28 		return 0; /* nothing to see here, move along */
29 
30 	err = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
31 	if (err)
32 		dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
33 			op, iavf_stat_str(hw, err),
34 			iavf_aq_str(hw, hw->aq.asq_last_status));
35 	return err;
36 }
37 
38 /**
39  * iavf_send_api_ver
40  * @adapter: adapter structure
41  *
42  * Send API version admin queue message to the PF. The reply is not checked
43  * in this function. Returns 0 if the message was successfully
44  * sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
45  **/
iavf_send_api_ver(struct iavf_adapter * adapter)46 int iavf_send_api_ver(struct iavf_adapter *adapter)
47 {
48 	struct virtchnl_version_info vvi;
49 
50 	vvi.major = VIRTCHNL_VERSION_MAJOR;
51 	vvi.minor = VIRTCHNL_VERSION_MINOR;
52 
53 	return iavf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
54 				sizeof(vvi));
55 }
56 
57 /**
58  * iavf_verify_api_ver
59  * @adapter: adapter structure
60  *
61  * Compare API versions with the PF. Must be called after admin queue is
62  * initialized. Returns 0 if API versions match, -EIO if they do not,
63  * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
64  * from the firmware are propagated.
65  **/
iavf_verify_api_ver(struct iavf_adapter * adapter)66 int iavf_verify_api_ver(struct iavf_adapter *adapter)
67 {
68 	struct virtchnl_version_info *pf_vvi;
69 	struct iavf_hw *hw = &adapter->hw;
70 	struct iavf_arq_event_info event;
71 	enum virtchnl_ops op;
72 	enum iavf_status err;
73 
74 	event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
75 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
76 	if (!event.msg_buf) {
77 		err = -ENOMEM;
78 		goto out;
79 	}
80 
81 	while (1) {
82 		err = iavf_clean_arq_element(hw, &event, NULL);
83 		/* When the AQ is empty, iavf_clean_arq_element will return
84 		 * nonzero and this loop will terminate.
85 		 */
86 		if (err)
87 			goto out_alloc;
88 		op =
89 		    (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
90 		if (op == VIRTCHNL_OP_VERSION)
91 			break;
92 	}
93 
94 
95 	err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
96 	if (err)
97 		goto out_alloc;
98 
99 	if (op != VIRTCHNL_OP_VERSION) {
100 		dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
101 			op);
102 		err = -EIO;
103 		goto out_alloc;
104 	}
105 
106 	pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
107 	adapter->pf_version = *pf_vvi;
108 
109 	if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
110 	    ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
111 	     (pf_vvi->minor > VIRTCHNL_VERSION_MINOR)))
112 		err = -EIO;
113 
114 out_alloc:
115 	kfree(event.msg_buf);
116 out:
117 	return err;
118 }
119 
120 /**
121  * iavf_send_vf_config_msg
122  * @adapter: adapter structure
123  *
124  * Send VF configuration request admin queue message to the PF. The reply
125  * is not checked in this function. Returns 0 if the message was
126  * successfully sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
127  **/
iavf_send_vf_config_msg(struct iavf_adapter * adapter)128 int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
129 {
130 	u32 caps;
131 
132 	caps = VIRTCHNL_VF_OFFLOAD_L2 |
133 	       VIRTCHNL_VF_OFFLOAD_RSS_PF |
134 	       VIRTCHNL_VF_OFFLOAD_RSS_AQ |
135 	       VIRTCHNL_VF_OFFLOAD_RSS_REG |
136 	       VIRTCHNL_VF_OFFLOAD_VLAN |
137 	       VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
138 	       VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
139 	       VIRTCHNL_VF_OFFLOAD_ENCAP |
140 	       VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
141 	       VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
142 	       VIRTCHNL_VF_OFFLOAD_ADQ |
143 	       VIRTCHNL_VF_OFFLOAD_USO |
144 	       VIRTCHNL_VF_OFFLOAD_FDIR_PF |
145 	       VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
146 	       VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
147 
148 	adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
149 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG;
150 	if (PF_IS_V11(adapter))
151 		return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
152 					(u8 *)&caps, sizeof(caps));
153 	else
154 		return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
155 					NULL, 0);
156 }
157 
158 /**
159  * iavf_validate_num_queues
160  * @adapter: adapter structure
161  *
162  * Validate that the number of queues the PF has sent in
163  * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle.
164  **/
iavf_validate_num_queues(struct iavf_adapter * adapter)165 static void iavf_validate_num_queues(struct iavf_adapter *adapter)
166 {
167 	if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) {
168 		struct virtchnl_vsi_resource *vsi_res;
169 		int i;
170 
171 		dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n",
172 			 adapter->vf_res->num_queue_pairs,
173 			 IAVF_MAX_REQ_QUEUES);
174 		dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n",
175 			 IAVF_MAX_REQ_QUEUES);
176 		adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
177 		for (i = 0; i < adapter->vf_res->num_vsis; i++) {
178 			vsi_res = &adapter->vf_res->vsi_res[i];
179 			vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
180 		}
181 	}
182 }
183 
184 /**
185  * iavf_get_vf_config
186  * @adapter: private adapter structure
187  *
188  * Get VF configuration from PF and populate hw structure. Must be called after
189  * admin queue is initialized. Busy waits until response is received from PF,
190  * with maximum timeout. Response from PF is returned in the buffer for further
191  * processing by the caller.
192  **/
iavf_get_vf_config(struct iavf_adapter * adapter)193 int iavf_get_vf_config(struct iavf_adapter *adapter)
194 {
195 	struct iavf_hw *hw = &adapter->hw;
196 	struct iavf_arq_event_info event;
197 	enum virtchnl_ops op;
198 	enum iavf_status err;
199 	u16 len;
200 
201 	len =  sizeof(struct virtchnl_vf_resource) +
202 		IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
203 	event.buf_len = len;
204 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
205 	if (!event.msg_buf) {
206 		err = -ENOMEM;
207 		goto out;
208 	}
209 
210 	while (1) {
211 		/* When the AQ is empty, iavf_clean_arq_element will return
212 		 * nonzero and this loop will terminate.
213 		 */
214 		err = iavf_clean_arq_element(hw, &event, NULL);
215 		if (err)
216 			goto out_alloc;
217 		op =
218 		    (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
219 		if (op == VIRTCHNL_OP_GET_VF_RESOURCES)
220 			break;
221 	}
222 
223 	err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
224 	memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
225 
226 	/* some PFs send more queues than we should have so validate that
227 	 * we aren't getting too many queues
228 	 */
229 	if (!err)
230 		iavf_validate_num_queues(adapter);
231 	iavf_vf_parse_hw_config(hw, adapter->vf_res);
232 out_alloc:
233 	kfree(event.msg_buf);
234 out:
235 	return err;
236 }
237 
238 /**
239  * iavf_configure_queues
240  * @adapter: adapter structure
241  *
242  * Request that the PF set up our (previously allocated) queues.
243  **/
iavf_configure_queues(struct iavf_adapter * adapter)244 void iavf_configure_queues(struct iavf_adapter *adapter)
245 {
246 	struct virtchnl_vsi_queue_config_info *vqci;
247 	int i, max_frame = adapter->vf_res->max_mtu;
248 	int pairs = adapter->num_active_queues;
249 	struct virtchnl_queue_pair_info *vqpi;
250 	size_t len;
251 
252 	if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
253 		max_frame = IAVF_MAX_RXBUFFER;
254 
255 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
256 		/* bail because we already have a command pending */
257 		dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
258 			adapter->current_op);
259 		return;
260 	}
261 	adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
262 	len = struct_size(vqci, qpair, pairs);
263 	vqci = kzalloc(len, GFP_KERNEL);
264 	if (!vqci)
265 		return;
266 
267 	/* Limit maximum frame size when jumbo frames is not enabled */
268 	if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) &&
269 	    (adapter->netdev->mtu <= ETH_DATA_LEN))
270 		max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
271 
272 	vqci->vsi_id = adapter->vsi_res->vsi_id;
273 	vqci->num_queue_pairs = pairs;
274 	vqpi = vqci->qpair;
275 	/* Size check is not needed here - HW max is 16 queue pairs, and we
276 	 * can fit info for 31 of them into the AQ buffer before it overflows.
277 	 */
278 	for (i = 0; i < pairs; i++) {
279 		vqpi->txq.vsi_id = vqci->vsi_id;
280 		vqpi->txq.queue_id = i;
281 		vqpi->txq.ring_len = adapter->tx_rings[i].count;
282 		vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
283 		vqpi->rxq.vsi_id = vqci->vsi_id;
284 		vqpi->rxq.queue_id = i;
285 		vqpi->rxq.ring_len = adapter->rx_rings[i].count;
286 		vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
287 		vqpi->rxq.max_pkt_size = max_frame;
288 		vqpi->rxq.databuffer_size =
289 			ALIGN(adapter->rx_rings[i].rx_buf_len,
290 			      BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT));
291 		vqpi++;
292 	}
293 
294 	adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES;
295 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
296 			 (u8 *)vqci, len);
297 	kfree(vqci);
298 }
299 
300 /**
301  * iavf_enable_queues
302  * @adapter: adapter structure
303  *
304  * Request that the PF enable all of our queues.
305  **/
iavf_enable_queues(struct iavf_adapter * adapter)306 void iavf_enable_queues(struct iavf_adapter *adapter)
307 {
308 	struct virtchnl_queue_select vqs;
309 
310 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
311 		/* bail because we already have a command pending */
312 		dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
313 			adapter->current_op);
314 		return;
315 	}
316 	adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
317 	vqs.vsi_id = adapter->vsi_res->vsi_id;
318 	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
319 	vqs.rx_queues = vqs.tx_queues;
320 	adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES;
321 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
322 			 (u8 *)&vqs, sizeof(vqs));
323 }
324 
325 /**
326  * iavf_disable_queues
327  * @adapter: adapter structure
328  *
329  * Request that the PF disable all of our queues.
330  **/
iavf_disable_queues(struct iavf_adapter * adapter)331 void iavf_disable_queues(struct iavf_adapter *adapter)
332 {
333 	struct virtchnl_queue_select vqs;
334 
335 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
336 		/* bail because we already have a command pending */
337 		dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
338 			adapter->current_op);
339 		return;
340 	}
341 	adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
342 	vqs.vsi_id = adapter->vsi_res->vsi_id;
343 	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
344 	vqs.rx_queues = vqs.tx_queues;
345 	adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES;
346 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
347 			 (u8 *)&vqs, sizeof(vqs));
348 }
349 
350 /**
351  * iavf_map_queues
352  * @adapter: adapter structure
353  *
354  * Request that the PF map queues to interrupt vectors. Misc causes, including
355  * admin queue, are always mapped to vector 0.
356  **/
iavf_map_queues(struct iavf_adapter * adapter)357 void iavf_map_queues(struct iavf_adapter *adapter)
358 {
359 	struct virtchnl_irq_map_info *vimi;
360 	struct virtchnl_vector_map *vecmap;
361 	struct iavf_q_vector *q_vector;
362 	int v_idx, q_vectors;
363 	size_t len;
364 
365 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
366 		/* bail because we already have a command pending */
367 		dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
368 			adapter->current_op);
369 		return;
370 	}
371 	adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
372 
373 	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
374 
375 	len = struct_size(vimi, vecmap, adapter->num_msix_vectors);
376 	vimi = kzalloc(len, GFP_KERNEL);
377 	if (!vimi)
378 		return;
379 
380 	vimi->num_vectors = adapter->num_msix_vectors;
381 	/* Queue vectors first */
382 	for (v_idx = 0; v_idx < q_vectors; v_idx++) {
383 		q_vector = &adapter->q_vectors[v_idx];
384 		vecmap = &vimi->vecmap[v_idx];
385 
386 		vecmap->vsi_id = adapter->vsi_res->vsi_id;
387 		vecmap->vector_id = v_idx + NONQ_VECS;
388 		vecmap->txq_map = q_vector->ring_mask;
389 		vecmap->rxq_map = q_vector->ring_mask;
390 		vecmap->rxitr_idx = IAVF_RX_ITR;
391 		vecmap->txitr_idx = IAVF_TX_ITR;
392 	}
393 	/* Misc vector last - this is only for AdminQ messages */
394 	vecmap = &vimi->vecmap[v_idx];
395 	vecmap->vsi_id = adapter->vsi_res->vsi_id;
396 	vecmap->vector_id = 0;
397 	vecmap->txq_map = 0;
398 	vecmap->rxq_map = 0;
399 
400 	adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS;
401 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
402 			 (u8 *)vimi, len);
403 	kfree(vimi);
404 }
405 
406 /**
407  * iavf_add_ether_addrs
408  * @adapter: adapter structure
409  *
410  * Request that the PF add one or more addresses to our filters.
411  **/
iavf_add_ether_addrs(struct iavf_adapter * adapter)412 void iavf_add_ether_addrs(struct iavf_adapter *adapter)
413 {
414 	struct virtchnl_ether_addr_list *veal;
415 	struct iavf_mac_filter *f;
416 	int i = 0, count = 0;
417 	bool more = false;
418 	size_t len;
419 
420 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
421 		/* bail because we already have a command pending */
422 		dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
423 			adapter->current_op);
424 		return;
425 	}
426 
427 	spin_lock_bh(&adapter->mac_vlan_list_lock);
428 
429 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
430 		if (f->add)
431 			count++;
432 	}
433 	if (!count) {
434 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
435 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
436 		return;
437 	}
438 	adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
439 
440 	len = struct_size(veal, list, count);
441 	if (len > IAVF_MAX_AQ_BUF_SIZE) {
442 		dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
443 		count = (IAVF_MAX_AQ_BUF_SIZE -
444 			 sizeof(struct virtchnl_ether_addr_list)) /
445 			sizeof(struct virtchnl_ether_addr);
446 		len = struct_size(veal, list, count);
447 		more = true;
448 	}
449 
450 	veal = kzalloc(len, GFP_ATOMIC);
451 	if (!veal) {
452 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
453 		return;
454 	}
455 
456 	veal->vsi_id = adapter->vsi_res->vsi_id;
457 	veal->num_elements = count;
458 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
459 		if (f->add) {
460 			ether_addr_copy(veal->list[i].addr, f->macaddr);
461 			i++;
462 			f->add = false;
463 			if (i == count)
464 				break;
465 		}
466 	}
467 	if (!more)
468 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
469 
470 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
471 
472 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len);
473 	kfree(veal);
474 }
475 
476 /**
477  * iavf_del_ether_addrs
478  * @adapter: adapter structure
479  *
480  * Request that the PF remove one or more addresses from our filters.
481  **/
iavf_del_ether_addrs(struct iavf_adapter * adapter)482 void iavf_del_ether_addrs(struct iavf_adapter *adapter)
483 {
484 	struct virtchnl_ether_addr_list *veal;
485 	struct iavf_mac_filter *f, *ftmp;
486 	int i = 0, count = 0;
487 	bool more = false;
488 	size_t len;
489 
490 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
491 		/* bail because we already have a command pending */
492 		dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
493 			adapter->current_op);
494 		return;
495 	}
496 
497 	spin_lock_bh(&adapter->mac_vlan_list_lock);
498 
499 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
500 		if (f->remove)
501 			count++;
502 	}
503 	if (!count) {
504 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
505 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
506 		return;
507 	}
508 	adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
509 
510 	len = struct_size(veal, list, count);
511 	if (len > IAVF_MAX_AQ_BUF_SIZE) {
512 		dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
513 		count = (IAVF_MAX_AQ_BUF_SIZE -
514 			 sizeof(struct virtchnl_ether_addr_list)) /
515 			sizeof(struct virtchnl_ether_addr);
516 		len = struct_size(veal, list, count);
517 		more = true;
518 	}
519 	veal = kzalloc(len, GFP_ATOMIC);
520 	if (!veal) {
521 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
522 		return;
523 	}
524 
525 	veal->vsi_id = adapter->vsi_res->vsi_id;
526 	veal->num_elements = count;
527 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
528 		if (f->remove) {
529 			ether_addr_copy(veal->list[i].addr, f->macaddr);
530 			i++;
531 			list_del(&f->list);
532 			kfree(f);
533 			if (i == count)
534 				break;
535 		}
536 	}
537 	if (!more)
538 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
539 
540 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
541 
542 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len);
543 	kfree(veal);
544 }
545 
546 /**
547  * iavf_mac_add_ok
548  * @adapter: adapter structure
549  *
550  * Submit list of filters based on PF response.
551  **/
iavf_mac_add_ok(struct iavf_adapter * adapter)552 static void iavf_mac_add_ok(struct iavf_adapter *adapter)
553 {
554 	struct iavf_mac_filter *f, *ftmp;
555 
556 	spin_lock_bh(&adapter->mac_vlan_list_lock);
557 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
558 		f->is_new_mac = false;
559 	}
560 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
561 }
562 
563 /**
564  * iavf_mac_add_reject
565  * @adapter: adapter structure
566  *
567  * Remove filters from list based on PF response.
568  **/
iavf_mac_add_reject(struct iavf_adapter * adapter)569 static void iavf_mac_add_reject(struct iavf_adapter *adapter)
570 {
571 	struct net_device *netdev = adapter->netdev;
572 	struct iavf_mac_filter *f, *ftmp;
573 
574 	spin_lock_bh(&adapter->mac_vlan_list_lock);
575 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
576 		if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
577 			f->remove = false;
578 
579 		if (f->is_new_mac) {
580 			list_del(&f->list);
581 			kfree(f);
582 		}
583 	}
584 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
585 }
586 
587 /**
588  * iavf_add_vlans
589  * @adapter: adapter structure
590  *
591  * Request that the PF add one or more VLAN filters to our VSI.
592  **/
iavf_add_vlans(struct iavf_adapter * adapter)593 void iavf_add_vlans(struct iavf_adapter *adapter)
594 {
595 	struct virtchnl_vlan_filter_list *vvfl;
596 	int len, i = 0, count = 0;
597 	struct iavf_vlan_filter *f;
598 	bool more = false;
599 
600 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
601 		/* bail because we already have a command pending */
602 		dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
603 			adapter->current_op);
604 		return;
605 	}
606 
607 	spin_lock_bh(&adapter->mac_vlan_list_lock);
608 
609 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
610 		if (f->add)
611 			count++;
612 	}
613 	if (!count || !VLAN_ALLOWED(adapter)) {
614 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
615 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
616 		return;
617 	}
618 	adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
619 
620 	len = sizeof(struct virtchnl_vlan_filter_list) +
621 	      (count * sizeof(u16));
622 	if (len > IAVF_MAX_AQ_BUF_SIZE) {
623 		dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
624 		count = (IAVF_MAX_AQ_BUF_SIZE -
625 			 sizeof(struct virtchnl_vlan_filter_list)) /
626 			sizeof(u16);
627 		len = sizeof(struct virtchnl_vlan_filter_list) +
628 		      (count * sizeof(u16));
629 		more = true;
630 	}
631 	vvfl = kzalloc(len, GFP_ATOMIC);
632 	if (!vvfl) {
633 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
634 		return;
635 	}
636 
637 	vvfl->vsi_id = adapter->vsi_res->vsi_id;
638 	vvfl->num_elements = count;
639 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
640 		if (f->add) {
641 			vvfl->vlan_id[i] = f->vlan;
642 			i++;
643 			f->add = false;
644 			if (i == count)
645 				break;
646 		}
647 	}
648 	if (!more)
649 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
650 
651 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
652 
653 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
654 	kfree(vvfl);
655 }
656 
657 /**
658  * iavf_del_vlans
659  * @adapter: adapter structure
660  *
661  * Request that the PF remove one or more VLAN filters from our VSI.
662  **/
iavf_del_vlans(struct iavf_adapter * adapter)663 void iavf_del_vlans(struct iavf_adapter *adapter)
664 {
665 	struct virtchnl_vlan_filter_list *vvfl;
666 	struct iavf_vlan_filter *f, *ftmp;
667 	int len, i = 0, count = 0;
668 	bool more = false;
669 
670 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
671 		/* bail because we already have a command pending */
672 		dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
673 			adapter->current_op);
674 		return;
675 	}
676 
677 	spin_lock_bh(&adapter->mac_vlan_list_lock);
678 
679 	list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
680 		/* since VLAN capabilities are not allowed, we dont want to send
681 		 * a VLAN delete request because it will most likely fail and
682 		 * create unnecessary errors/noise, so just free the VLAN
683 		 * filters marked for removal to enable bailing out before
684 		 * sending a virtchnl message
685 		 */
686 		if (f->remove && !VLAN_ALLOWED(adapter)) {
687 			list_del(&f->list);
688 			kfree(f);
689 		} else if (f->remove) {
690 			count++;
691 		}
692 	}
693 	if (!count) {
694 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
695 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
696 		return;
697 	}
698 	adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
699 
700 	len = sizeof(struct virtchnl_vlan_filter_list) +
701 	      (count * sizeof(u16));
702 	if (len > IAVF_MAX_AQ_BUF_SIZE) {
703 		dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
704 		count = (IAVF_MAX_AQ_BUF_SIZE -
705 			 sizeof(struct virtchnl_vlan_filter_list)) /
706 			sizeof(u16);
707 		len = sizeof(struct virtchnl_vlan_filter_list) +
708 		      (count * sizeof(u16));
709 		more = true;
710 	}
711 	vvfl = kzalloc(len, GFP_ATOMIC);
712 	if (!vvfl) {
713 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
714 		return;
715 	}
716 
717 	vvfl->vsi_id = adapter->vsi_res->vsi_id;
718 	vvfl->num_elements = count;
719 	list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
720 		if (f->remove) {
721 			vvfl->vlan_id[i] = f->vlan;
722 			i++;
723 			list_del(&f->list);
724 			kfree(f);
725 			if (i == count)
726 				break;
727 		}
728 	}
729 	if (!more)
730 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
731 
732 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
733 
734 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
735 	kfree(vvfl);
736 }
737 
738 /**
739  * iavf_set_promiscuous
740  * @adapter: adapter structure
741  * @flags: bitmask to control unicast/multicast promiscuous.
742  *
743  * Request that the PF enable promiscuous mode for our VSI.
744  **/
iavf_set_promiscuous(struct iavf_adapter * adapter,int flags)745 void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
746 {
747 	struct virtchnl_promisc_info vpi;
748 	int promisc_all;
749 
750 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
751 		/* bail because we already have a command pending */
752 		dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
753 			adapter->current_op);
754 		return;
755 	}
756 
757 	promisc_all = FLAG_VF_UNICAST_PROMISC |
758 		      FLAG_VF_MULTICAST_PROMISC;
759 	if ((flags & promisc_all) == promisc_all) {
760 		adapter->flags |= IAVF_FLAG_PROMISC_ON;
761 		adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC;
762 		dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
763 	}
764 
765 	if (flags & FLAG_VF_MULTICAST_PROMISC) {
766 		adapter->flags |= IAVF_FLAG_ALLMULTI_ON;
767 		adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI;
768 		dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
769 	}
770 
771 	if (!flags) {
772 		adapter->flags &= ~(IAVF_FLAG_PROMISC_ON |
773 				    IAVF_FLAG_ALLMULTI_ON);
774 		adapter->aq_required &= ~(IAVF_FLAG_AQ_RELEASE_PROMISC |
775 					  IAVF_FLAG_AQ_RELEASE_ALLMULTI);
776 		dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
777 	}
778 
779 	adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
780 	vpi.vsi_id = adapter->vsi_res->vsi_id;
781 	vpi.flags = flags;
782 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
783 			 (u8 *)&vpi, sizeof(vpi));
784 }
785 
786 /**
787  * iavf_request_stats
788  * @adapter: adapter structure
789  *
790  * Request VSI statistics from PF.
791  **/
iavf_request_stats(struct iavf_adapter * adapter)792 void iavf_request_stats(struct iavf_adapter *adapter)
793 {
794 	struct virtchnl_queue_select vqs;
795 
796 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
797 		/* no error message, this isn't crucial */
798 		return;
799 	}
800 
801 	adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS;
802 	adapter->current_op = VIRTCHNL_OP_GET_STATS;
803 	vqs.vsi_id = adapter->vsi_res->vsi_id;
804 	/* queue maps are ignored for this message - only the vsi is used */
805 	if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs,
806 			     sizeof(vqs)))
807 		/* if the request failed, don't lock out others */
808 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
809 }
810 
811 /**
812  * iavf_get_hena
813  * @adapter: adapter structure
814  *
815  * Request hash enable capabilities from PF
816  **/
iavf_get_hena(struct iavf_adapter * adapter)817 void iavf_get_hena(struct iavf_adapter *adapter)
818 {
819 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
820 		/* bail because we already have a command pending */
821 		dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
822 			adapter->current_op);
823 		return;
824 	}
825 	adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
826 	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA;
827 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, 0);
828 }
829 
830 /**
831  * iavf_set_hena
832  * @adapter: adapter structure
833  *
834  * Request the PF to set our RSS hash capabilities
835  **/
iavf_set_hena(struct iavf_adapter * adapter)836 void iavf_set_hena(struct iavf_adapter *adapter)
837 {
838 	struct virtchnl_rss_hena vrh;
839 
840 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
841 		/* bail because we already have a command pending */
842 		dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
843 			adapter->current_op);
844 		return;
845 	}
846 	vrh.hena = adapter->hena;
847 	adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
848 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA;
849 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&vrh,
850 			 sizeof(vrh));
851 }
852 
853 /**
854  * iavf_set_rss_key
855  * @adapter: adapter structure
856  *
857  * Request the PF to set our RSS hash key
858  **/
iavf_set_rss_key(struct iavf_adapter * adapter)859 void iavf_set_rss_key(struct iavf_adapter *adapter)
860 {
861 	struct virtchnl_rss_key *vrk;
862 	int len;
863 
864 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
865 		/* bail because we already have a command pending */
866 		dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
867 			adapter->current_op);
868 		return;
869 	}
870 	len = sizeof(struct virtchnl_rss_key) +
871 	      (adapter->rss_key_size * sizeof(u8)) - 1;
872 	vrk = kzalloc(len, GFP_KERNEL);
873 	if (!vrk)
874 		return;
875 	vrk->vsi_id = adapter->vsi.id;
876 	vrk->key_len = adapter->rss_key_size;
877 	memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
878 
879 	adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
880 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY;
881 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)vrk, len);
882 	kfree(vrk);
883 }
884 
885 /**
886  * iavf_set_rss_lut
887  * @adapter: adapter structure
888  *
889  * Request the PF to set our RSS lookup table
890  **/
iavf_set_rss_lut(struct iavf_adapter * adapter)891 void iavf_set_rss_lut(struct iavf_adapter *adapter)
892 {
893 	struct virtchnl_rss_lut *vrl;
894 	int len;
895 
896 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
897 		/* bail because we already have a command pending */
898 		dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
899 			adapter->current_op);
900 		return;
901 	}
902 	len = sizeof(struct virtchnl_rss_lut) +
903 	      (adapter->rss_lut_size * sizeof(u8)) - 1;
904 	vrl = kzalloc(len, GFP_KERNEL);
905 	if (!vrl)
906 		return;
907 	vrl->vsi_id = adapter->vsi.id;
908 	vrl->lut_entries = adapter->rss_lut_size;
909 	memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
910 	adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
911 	adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT;
912 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)vrl, len);
913 	kfree(vrl);
914 }
915 
916 /**
917  * iavf_enable_vlan_stripping
918  * @adapter: adapter structure
919  *
920  * Request VLAN header stripping to be enabled
921  **/
iavf_enable_vlan_stripping(struct iavf_adapter * adapter)922 void iavf_enable_vlan_stripping(struct iavf_adapter *adapter)
923 {
924 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
925 		/* bail because we already have a command pending */
926 		dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n",
927 			adapter->current_op);
928 		return;
929 	}
930 	adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
931 	adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
932 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, 0);
933 }
934 
935 /**
936  * iavf_disable_vlan_stripping
937  * @adapter: adapter structure
938  *
939  * Request VLAN header stripping to be disabled
940  **/
iavf_disable_vlan_stripping(struct iavf_adapter * adapter)941 void iavf_disable_vlan_stripping(struct iavf_adapter *adapter)
942 {
943 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
944 		/* bail because we already have a command pending */
945 		dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n",
946 			adapter->current_op);
947 		return;
948 	}
949 	adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
950 	adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
951 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0);
952 }
953 
954 #define IAVF_MAX_SPEED_STRLEN	13
955 
956 /**
957  * iavf_print_link_message - print link up or down
958  * @adapter: adapter structure
959  *
960  * Log a message telling the world of our wonderous link status
961  */
iavf_print_link_message(struct iavf_adapter * adapter)962 static void iavf_print_link_message(struct iavf_adapter *adapter)
963 {
964 	struct net_device *netdev = adapter->netdev;
965 	int link_speed_mbps;
966 	char *speed;
967 
968 	if (!adapter->link_up) {
969 		netdev_info(netdev, "NIC Link is Down\n");
970 		return;
971 	}
972 
973 	speed = kzalloc(IAVF_MAX_SPEED_STRLEN, GFP_KERNEL);
974 	if (!speed)
975 		return;
976 
977 	if (ADV_LINK_SUPPORT(adapter)) {
978 		link_speed_mbps = adapter->link_speed_mbps;
979 		goto print_link_msg;
980 	}
981 
982 	switch (adapter->link_speed) {
983 	case VIRTCHNL_LINK_SPEED_40GB:
984 		link_speed_mbps = SPEED_40000;
985 		break;
986 	case VIRTCHNL_LINK_SPEED_25GB:
987 		link_speed_mbps = SPEED_25000;
988 		break;
989 	case VIRTCHNL_LINK_SPEED_20GB:
990 		link_speed_mbps = SPEED_20000;
991 		break;
992 	case VIRTCHNL_LINK_SPEED_10GB:
993 		link_speed_mbps = SPEED_10000;
994 		break;
995 	case VIRTCHNL_LINK_SPEED_5GB:
996 		link_speed_mbps = SPEED_5000;
997 		break;
998 	case VIRTCHNL_LINK_SPEED_2_5GB:
999 		link_speed_mbps = SPEED_2500;
1000 		break;
1001 	case VIRTCHNL_LINK_SPEED_1GB:
1002 		link_speed_mbps = SPEED_1000;
1003 		break;
1004 	case VIRTCHNL_LINK_SPEED_100MB:
1005 		link_speed_mbps = SPEED_100;
1006 		break;
1007 	default:
1008 		link_speed_mbps = SPEED_UNKNOWN;
1009 		break;
1010 	}
1011 
1012 print_link_msg:
1013 	if (link_speed_mbps > SPEED_1000) {
1014 		if (link_speed_mbps == SPEED_2500)
1015 			snprintf(speed, IAVF_MAX_SPEED_STRLEN, "2.5 Gbps");
1016 		else
1017 			/* convert to Gbps inline */
1018 			snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s",
1019 				 link_speed_mbps / 1000, "Gbps");
1020 	} else if (link_speed_mbps == SPEED_UNKNOWN) {
1021 		snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps");
1022 	} else {
1023 		snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%u %s",
1024 			 link_speed_mbps, "Mbps");
1025 	}
1026 
1027 	netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed);
1028 	kfree(speed);
1029 }
1030 
1031 /**
1032  * iavf_get_vpe_link_status
1033  * @adapter: adapter structure
1034  * @vpe: virtchnl_pf_event structure
1035  *
1036  * Helper function for determining the link status
1037  **/
1038 static bool
iavf_get_vpe_link_status(struct iavf_adapter * adapter,struct virtchnl_pf_event * vpe)1039 iavf_get_vpe_link_status(struct iavf_adapter *adapter,
1040 			 struct virtchnl_pf_event *vpe)
1041 {
1042 	if (ADV_LINK_SUPPORT(adapter))
1043 		return vpe->event_data.link_event_adv.link_status;
1044 	else
1045 		return vpe->event_data.link_event.link_status;
1046 }
1047 
1048 /**
1049  * iavf_set_adapter_link_speed_from_vpe
1050  * @adapter: adapter structure for which we are setting the link speed
1051  * @vpe: virtchnl_pf_event structure that contains the link speed we are setting
1052  *
1053  * Helper function for setting iavf_adapter link speed
1054  **/
1055 static void
iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter * adapter,struct virtchnl_pf_event * vpe)1056 iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter,
1057 				     struct virtchnl_pf_event *vpe)
1058 {
1059 	if (ADV_LINK_SUPPORT(adapter))
1060 		adapter->link_speed_mbps =
1061 			vpe->event_data.link_event_adv.link_speed;
1062 	else
1063 		adapter->link_speed = vpe->event_data.link_event.link_speed;
1064 }
1065 
1066 /**
1067  * iavf_enable_channels
1068  * @adapter: adapter structure
1069  *
1070  * Request that the PF enable channels as specified by
1071  * the user via tc tool.
1072  **/
iavf_enable_channels(struct iavf_adapter * adapter)1073 void iavf_enable_channels(struct iavf_adapter *adapter)
1074 {
1075 	struct virtchnl_tc_info *vti = NULL;
1076 	size_t len;
1077 	int i;
1078 
1079 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1080 		/* bail because we already have a command pending */
1081 		dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
1082 			adapter->current_op);
1083 		return;
1084 	}
1085 
1086 	len = struct_size(vti, list, adapter->num_tc - 1);
1087 	vti = kzalloc(len, GFP_KERNEL);
1088 	if (!vti)
1089 		return;
1090 	vti->num_tc = adapter->num_tc;
1091 	for (i = 0; i < vti->num_tc; i++) {
1092 		vti->list[i].count = adapter->ch_config.ch_info[i].count;
1093 		vti->list[i].offset = adapter->ch_config.ch_info[i].offset;
1094 		vti->list[i].pad = 0;
1095 		vti->list[i].max_tx_rate =
1096 				adapter->ch_config.ch_info[i].max_tx_rate;
1097 	}
1098 
1099 	adapter->ch_config.state = __IAVF_TC_RUNNING;
1100 	adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1101 	adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS;
1102 	adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS;
1103 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS, (u8 *)vti, len);
1104 	kfree(vti);
1105 }
1106 
1107 /**
1108  * iavf_disable_channels
1109  * @adapter: adapter structure
1110  *
1111  * Request that the PF disable channels that are configured
1112  **/
iavf_disable_channels(struct iavf_adapter * adapter)1113 void iavf_disable_channels(struct iavf_adapter *adapter)
1114 {
1115 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1116 		/* bail because we already have a command pending */
1117 		dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
1118 			adapter->current_op);
1119 		return;
1120 	}
1121 
1122 	adapter->ch_config.state = __IAVF_TC_INVALID;
1123 	adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1124 	adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS;
1125 	adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS;
1126 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS, NULL, 0);
1127 }
1128 
1129 /**
1130  * iavf_print_cloud_filter
1131  * @adapter: adapter structure
1132  * @f: cloud filter to print
1133  *
1134  * Print the cloud filter
1135  **/
iavf_print_cloud_filter(struct iavf_adapter * adapter,struct virtchnl_filter * f)1136 static void iavf_print_cloud_filter(struct iavf_adapter *adapter,
1137 				    struct virtchnl_filter *f)
1138 {
1139 	switch (f->flow_type) {
1140 	case VIRTCHNL_TCP_V4_FLOW:
1141 		dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n",
1142 			 &f->data.tcp_spec.dst_mac,
1143 			 &f->data.tcp_spec.src_mac,
1144 			 ntohs(f->data.tcp_spec.vlan_id),
1145 			 &f->data.tcp_spec.dst_ip[0],
1146 			 &f->data.tcp_spec.src_ip[0],
1147 			 ntohs(f->data.tcp_spec.dst_port),
1148 			 ntohs(f->data.tcp_spec.src_port));
1149 		break;
1150 	case VIRTCHNL_TCP_V6_FLOW:
1151 		dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n",
1152 			 &f->data.tcp_spec.dst_mac,
1153 			 &f->data.tcp_spec.src_mac,
1154 			 ntohs(f->data.tcp_spec.vlan_id),
1155 			 &f->data.tcp_spec.dst_ip,
1156 			 &f->data.tcp_spec.src_ip,
1157 			 ntohs(f->data.tcp_spec.dst_port),
1158 			 ntohs(f->data.tcp_spec.src_port));
1159 		break;
1160 	}
1161 }
1162 
1163 /**
1164  * iavf_add_cloud_filter
1165  * @adapter: adapter structure
1166  *
1167  * Request that the PF add cloud filters as specified
1168  * by the user via tc tool.
1169  **/
iavf_add_cloud_filter(struct iavf_adapter * adapter)1170 void iavf_add_cloud_filter(struct iavf_adapter *adapter)
1171 {
1172 	struct iavf_cloud_filter *cf;
1173 	struct virtchnl_filter *f;
1174 	int len = 0, count = 0;
1175 
1176 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1177 		/* bail because we already have a command pending */
1178 		dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n",
1179 			adapter->current_op);
1180 		return;
1181 	}
1182 	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1183 		if (cf->add) {
1184 			count++;
1185 			break;
1186 		}
1187 	}
1188 	if (!count) {
1189 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
1190 		return;
1191 	}
1192 	adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER;
1193 
1194 	len = sizeof(struct virtchnl_filter);
1195 	f = kzalloc(len, GFP_KERNEL);
1196 	if (!f)
1197 		return;
1198 
1199 	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1200 		if (cf->add) {
1201 			memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1202 			cf->add = false;
1203 			cf->state = __IAVF_CF_ADD_PENDING;
1204 			iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_CLOUD_FILTER,
1205 					 (u8 *)f, len);
1206 		}
1207 	}
1208 	kfree(f);
1209 }
1210 
1211 /**
1212  * iavf_del_cloud_filter
1213  * @adapter: adapter structure
1214  *
1215  * Request that the PF delete cloud filters as specified
1216  * by the user via tc tool.
1217  **/
iavf_del_cloud_filter(struct iavf_adapter * adapter)1218 void iavf_del_cloud_filter(struct iavf_adapter *adapter)
1219 {
1220 	struct iavf_cloud_filter *cf, *cftmp;
1221 	struct virtchnl_filter *f;
1222 	int len = 0, count = 0;
1223 
1224 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1225 		/* bail because we already have a command pending */
1226 		dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n",
1227 			adapter->current_op);
1228 		return;
1229 	}
1230 	list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1231 		if (cf->del) {
1232 			count++;
1233 			break;
1234 		}
1235 	}
1236 	if (!count) {
1237 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1238 		return;
1239 	}
1240 	adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER;
1241 
1242 	len = sizeof(struct virtchnl_filter);
1243 	f = kzalloc(len, GFP_KERNEL);
1244 	if (!f)
1245 		return;
1246 
1247 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
1248 		if (cf->del) {
1249 			memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1250 			cf->del = false;
1251 			cf->state = __IAVF_CF_DEL_PENDING;
1252 			iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_CLOUD_FILTER,
1253 					 (u8 *)f, len);
1254 		}
1255 	}
1256 	kfree(f);
1257 }
1258 
1259 /**
1260  * iavf_add_fdir_filter
1261  * @adapter: the VF adapter structure
1262  *
1263  * Request that the PF add Flow Director filters as specified
1264  * by the user via ethtool.
1265  **/
iavf_add_fdir_filter(struct iavf_adapter * adapter)1266 void iavf_add_fdir_filter(struct iavf_adapter *adapter)
1267 {
1268 	struct iavf_fdir_fltr *fdir;
1269 	struct virtchnl_fdir_add *f;
1270 	bool process_fltr = false;
1271 	int len;
1272 
1273 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1274 		/* bail because we already have a command pending */
1275 		dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n",
1276 			adapter->current_op);
1277 		return;
1278 	}
1279 
1280 	len = sizeof(struct virtchnl_fdir_add);
1281 	f = kzalloc(len, GFP_KERNEL);
1282 	if (!f)
1283 		return;
1284 
1285 	spin_lock_bh(&adapter->fdir_fltr_lock);
1286 	list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1287 		if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
1288 			process_fltr = true;
1289 			fdir->state = IAVF_FDIR_FLTR_ADD_PENDING;
1290 			memcpy(f, &fdir->vc_add_msg, len);
1291 			break;
1292 		}
1293 	}
1294 	spin_unlock_bh(&adapter->fdir_fltr_lock);
1295 
1296 	if (!process_fltr) {
1297 		/* prevent iavf_add_fdir_filter() from being called when there
1298 		 * are no filters to add
1299 		 */
1300 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER;
1301 		kfree(f);
1302 		return;
1303 	}
1304 	adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER;
1305 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len);
1306 	kfree(f);
1307 }
1308 
1309 /**
1310  * iavf_del_fdir_filter
1311  * @adapter: the VF adapter structure
1312  *
1313  * Request that the PF delete Flow Director filters as specified
1314  * by the user via ethtool.
1315  **/
iavf_del_fdir_filter(struct iavf_adapter * adapter)1316 void iavf_del_fdir_filter(struct iavf_adapter *adapter)
1317 {
1318 	struct iavf_fdir_fltr *fdir;
1319 	struct virtchnl_fdir_del f;
1320 	bool process_fltr = false;
1321 	int len;
1322 
1323 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1324 		/* bail because we already have a command pending */
1325 		dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n",
1326 			adapter->current_op);
1327 		return;
1328 	}
1329 
1330 	len = sizeof(struct virtchnl_fdir_del);
1331 
1332 	spin_lock_bh(&adapter->fdir_fltr_lock);
1333 	list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1334 		if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
1335 			process_fltr = true;
1336 			memset(&f, 0, len);
1337 			f.vsi_id = fdir->vc_add_msg.vsi_id;
1338 			f.flow_id = fdir->flow_id;
1339 			fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
1340 			break;
1341 		}
1342 	}
1343 	spin_unlock_bh(&adapter->fdir_fltr_lock);
1344 
1345 	if (!process_fltr) {
1346 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1347 		return;
1348 	}
1349 
1350 	adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER;
1351 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len);
1352 }
1353 
1354 /**
1355  * iavf_add_adv_rss_cfg
1356  * @adapter: the VF adapter structure
1357  *
1358  * Request that the PF add RSS configuration as specified
1359  * by the user via ethtool.
1360  **/
iavf_add_adv_rss_cfg(struct iavf_adapter * adapter)1361 void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter)
1362 {
1363 	struct virtchnl_rss_cfg *rss_cfg;
1364 	struct iavf_adv_rss *rss;
1365 	bool process_rss = false;
1366 	int len;
1367 
1368 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1369 		/* bail because we already have a command pending */
1370 		dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n",
1371 			adapter->current_op);
1372 		return;
1373 	}
1374 
1375 	len = sizeof(struct virtchnl_rss_cfg);
1376 	rss_cfg = kzalloc(len, GFP_KERNEL);
1377 	if (!rss_cfg)
1378 		return;
1379 
1380 	spin_lock_bh(&adapter->adv_rss_lock);
1381 	list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
1382 		if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
1383 			process_rss = true;
1384 			rss->state = IAVF_ADV_RSS_ADD_PENDING;
1385 			memcpy(rss_cfg, &rss->cfg_msg, len);
1386 			iavf_print_adv_rss_cfg(adapter, rss,
1387 					       "Input set change for",
1388 					       "is pending");
1389 			break;
1390 		}
1391 	}
1392 	spin_unlock_bh(&adapter->adv_rss_lock);
1393 
1394 	if (process_rss) {
1395 		adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG;
1396 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_RSS_CFG,
1397 				 (u8 *)rss_cfg, len);
1398 	} else {
1399 		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
1400 	}
1401 
1402 	kfree(rss_cfg);
1403 }
1404 
1405 /**
1406  * iavf_del_adv_rss_cfg
1407  * @adapter: the VF adapter structure
1408  *
1409  * Request that the PF delete RSS configuration as specified
1410  * by the user via ethtool.
1411  **/
iavf_del_adv_rss_cfg(struct iavf_adapter * adapter)1412 void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter)
1413 {
1414 	struct virtchnl_rss_cfg *rss_cfg;
1415 	struct iavf_adv_rss *rss;
1416 	bool process_rss = false;
1417 	int len;
1418 
1419 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1420 		/* bail because we already have a command pending */
1421 		dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n",
1422 			adapter->current_op);
1423 		return;
1424 	}
1425 
1426 	len = sizeof(struct virtchnl_rss_cfg);
1427 	rss_cfg = kzalloc(len, GFP_KERNEL);
1428 	if (!rss_cfg)
1429 		return;
1430 
1431 	spin_lock_bh(&adapter->adv_rss_lock);
1432 	list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
1433 		if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) {
1434 			process_rss = true;
1435 			rss->state = IAVF_ADV_RSS_DEL_PENDING;
1436 			memcpy(rss_cfg, &rss->cfg_msg, len);
1437 			break;
1438 		}
1439 	}
1440 	spin_unlock_bh(&adapter->adv_rss_lock);
1441 
1442 	if (process_rss) {
1443 		adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG;
1444 		iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_RSS_CFG,
1445 				 (u8 *)rss_cfg, len);
1446 	} else {
1447 		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1448 	}
1449 
1450 	kfree(rss_cfg);
1451 }
1452 
1453 /**
1454  * iavf_request_reset
1455  * @adapter: adapter structure
1456  *
1457  * Request that the PF reset this VF. No response is expected.
1458  **/
iavf_request_reset(struct iavf_adapter * adapter)1459 void iavf_request_reset(struct iavf_adapter *adapter)
1460 {
1461 	/* Don't check CURRENT_OP - this is always higher priority */
1462 	iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
1463 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1464 }
1465 
1466 /**
1467  * iavf_netdev_features_vlan_strip_set - update vlan strip status
1468  * @netdev: ptr to netdev being adjusted
1469  * @enable: enable or disable vlan strip
1470  *
1471  * Helper function to change vlan strip status in netdev->features.
1472  */
iavf_netdev_features_vlan_strip_set(struct net_device * netdev,const bool enable)1473 static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev,
1474 						const bool enable)
1475 {
1476 	if (enable)
1477 		netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1478 	else
1479 		netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
1480 }
1481 
1482 /**
1483  * iavf_virtchnl_completion
1484  * @adapter: adapter structure
1485  * @v_opcode: opcode sent by PF
1486  * @v_retval: retval sent by PF
1487  * @msg: message sent by PF
1488  * @msglen: message length
1489  *
1490  * Asynchronous completion function for admin queue messages. Rather than busy
1491  * wait, we fire off our requests and assume that no errors will be returned.
1492  * This function handles the reply messages.
1493  **/
iavf_virtchnl_completion(struct iavf_adapter * adapter,enum virtchnl_ops v_opcode,enum iavf_status v_retval,u8 * msg,u16 msglen)1494 void iavf_virtchnl_completion(struct iavf_adapter *adapter,
1495 			      enum virtchnl_ops v_opcode,
1496 			      enum iavf_status v_retval, u8 *msg, u16 msglen)
1497 {
1498 	struct net_device *netdev = adapter->netdev;
1499 
1500 	if (v_opcode == VIRTCHNL_OP_EVENT) {
1501 		struct virtchnl_pf_event *vpe =
1502 			(struct virtchnl_pf_event *)msg;
1503 		bool link_up = iavf_get_vpe_link_status(adapter, vpe);
1504 
1505 		switch (vpe->event) {
1506 		case VIRTCHNL_EVENT_LINK_CHANGE:
1507 			iavf_set_adapter_link_speed_from_vpe(adapter, vpe);
1508 
1509 			/* we've already got the right link status, bail */
1510 			if (adapter->link_up == link_up)
1511 				break;
1512 
1513 			if (link_up) {
1514 				/* If we get link up message and start queues
1515 				 * before our queues are configured it will
1516 				 * trigger a TX hang. In that case, just ignore
1517 				 * the link status message,we'll get another one
1518 				 * after we enable queues and actually prepared
1519 				 * to send traffic.
1520 				 */
1521 				if (adapter->state != __IAVF_RUNNING)
1522 					break;
1523 
1524 				/* For ADq enabled VF, we reconfigure VSIs and
1525 				 * re-allocate queues. Hence wait till all
1526 				 * queues are enabled.
1527 				 */
1528 				if (adapter->flags &
1529 				    IAVF_FLAG_QUEUES_DISABLED)
1530 					break;
1531 			}
1532 
1533 			adapter->link_up = link_up;
1534 			if (link_up) {
1535 				netif_tx_start_all_queues(netdev);
1536 				netif_carrier_on(netdev);
1537 			} else {
1538 				netif_tx_stop_all_queues(netdev);
1539 				netif_carrier_off(netdev);
1540 			}
1541 			iavf_print_link_message(adapter);
1542 			break;
1543 		case VIRTCHNL_EVENT_RESET_IMPENDING:
1544 			dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n");
1545 			if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
1546 				adapter->flags |= IAVF_FLAG_RESET_PENDING;
1547 				dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
1548 				queue_work(iavf_wq, &adapter->reset_task);
1549 			}
1550 			break;
1551 		default:
1552 			dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
1553 				vpe->event);
1554 			break;
1555 		}
1556 		return;
1557 	}
1558 	if (v_retval) {
1559 		switch (v_opcode) {
1560 		case VIRTCHNL_OP_ADD_VLAN:
1561 			dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
1562 				iavf_stat_str(&adapter->hw, v_retval));
1563 			break;
1564 		case VIRTCHNL_OP_ADD_ETH_ADDR:
1565 			dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
1566 				iavf_stat_str(&adapter->hw, v_retval));
1567 			iavf_mac_add_reject(adapter);
1568 			/* restore administratively set MAC address */
1569 			ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
1570 			break;
1571 		case VIRTCHNL_OP_DEL_VLAN:
1572 			dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
1573 				iavf_stat_str(&adapter->hw, v_retval));
1574 			break;
1575 		case VIRTCHNL_OP_DEL_ETH_ADDR:
1576 			dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
1577 				iavf_stat_str(&adapter->hw, v_retval));
1578 			break;
1579 		case VIRTCHNL_OP_ENABLE_CHANNELS:
1580 			dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n",
1581 				iavf_stat_str(&adapter->hw, v_retval));
1582 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
1583 			adapter->ch_config.state = __IAVF_TC_INVALID;
1584 			netdev_reset_tc(netdev);
1585 			netif_tx_start_all_queues(netdev);
1586 			break;
1587 		case VIRTCHNL_OP_DISABLE_CHANNELS:
1588 			dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n",
1589 				iavf_stat_str(&adapter->hw, v_retval));
1590 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
1591 			adapter->ch_config.state = __IAVF_TC_RUNNING;
1592 			netif_tx_start_all_queues(netdev);
1593 			break;
1594 		case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
1595 			struct iavf_cloud_filter *cf, *cftmp;
1596 
1597 			list_for_each_entry_safe(cf, cftmp,
1598 						 &adapter->cloud_filter_list,
1599 						 list) {
1600 				if (cf->state == __IAVF_CF_ADD_PENDING) {
1601 					cf->state = __IAVF_CF_INVALID;
1602 					dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n",
1603 						 iavf_stat_str(&adapter->hw,
1604 							       v_retval));
1605 					iavf_print_cloud_filter(adapter,
1606 								&cf->f);
1607 					list_del(&cf->list);
1608 					kfree(cf);
1609 					adapter->num_cloud_filters--;
1610 				}
1611 			}
1612 			}
1613 			break;
1614 		case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
1615 			struct iavf_cloud_filter *cf;
1616 
1617 			list_for_each_entry(cf, &adapter->cloud_filter_list,
1618 					    list) {
1619 				if (cf->state == __IAVF_CF_DEL_PENDING) {
1620 					cf->state = __IAVF_CF_ACTIVE;
1621 					dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n",
1622 						 iavf_stat_str(&adapter->hw,
1623 							       v_retval));
1624 					iavf_print_cloud_filter(adapter,
1625 								&cf->f);
1626 				}
1627 			}
1628 			}
1629 			break;
1630 		case VIRTCHNL_OP_ADD_FDIR_FILTER: {
1631 			struct iavf_fdir_fltr *fdir, *fdir_tmp;
1632 
1633 			spin_lock_bh(&adapter->fdir_fltr_lock);
1634 			list_for_each_entry_safe(fdir, fdir_tmp,
1635 						 &adapter->fdir_list_head,
1636 						 list) {
1637 				if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
1638 					dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n",
1639 						 iavf_stat_str(&adapter->hw,
1640 							       v_retval));
1641 					iavf_print_fdir_fltr(adapter, fdir);
1642 					if (msglen)
1643 						dev_err(&adapter->pdev->dev,
1644 							"%s\n", msg);
1645 					list_del(&fdir->list);
1646 					kfree(fdir);
1647 					adapter->fdir_active_fltr--;
1648 				}
1649 			}
1650 			spin_unlock_bh(&adapter->fdir_fltr_lock);
1651 			}
1652 			break;
1653 		case VIRTCHNL_OP_DEL_FDIR_FILTER: {
1654 			struct iavf_fdir_fltr *fdir;
1655 
1656 			spin_lock_bh(&adapter->fdir_fltr_lock);
1657 			list_for_each_entry(fdir, &adapter->fdir_list_head,
1658 					    list) {
1659 				if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
1660 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
1661 					dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
1662 						 iavf_stat_str(&adapter->hw,
1663 							       v_retval));
1664 					iavf_print_fdir_fltr(adapter, fdir);
1665 				}
1666 			}
1667 			spin_unlock_bh(&adapter->fdir_fltr_lock);
1668 			}
1669 			break;
1670 		case VIRTCHNL_OP_ADD_RSS_CFG: {
1671 			struct iavf_adv_rss *rss, *rss_tmp;
1672 
1673 			spin_lock_bh(&adapter->adv_rss_lock);
1674 			list_for_each_entry_safe(rss, rss_tmp,
1675 						 &adapter->adv_rss_list_head,
1676 						 list) {
1677 				if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
1678 					iavf_print_adv_rss_cfg(adapter, rss,
1679 							       "Failed to change the input set for",
1680 							       NULL);
1681 					list_del(&rss->list);
1682 					kfree(rss);
1683 				}
1684 			}
1685 			spin_unlock_bh(&adapter->adv_rss_lock);
1686 			}
1687 			break;
1688 		case VIRTCHNL_OP_DEL_RSS_CFG: {
1689 			struct iavf_adv_rss *rss;
1690 
1691 			spin_lock_bh(&adapter->adv_rss_lock);
1692 			list_for_each_entry(rss, &adapter->adv_rss_list_head,
1693 					    list) {
1694 				if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
1695 					rss->state = IAVF_ADV_RSS_ACTIVE;
1696 					dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n",
1697 						iavf_stat_str(&adapter->hw,
1698 							      v_retval));
1699 				}
1700 			}
1701 			spin_unlock_bh(&adapter->adv_rss_lock);
1702 			}
1703 			break;
1704 		case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
1705 			dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
1706 			/* Vlan stripping could not be enabled by ethtool.
1707 			 * Disable it in netdev->features.
1708 			 */
1709 			iavf_netdev_features_vlan_strip_set(netdev, false);
1710 			break;
1711 		case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
1712 			dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
1713 			/* Vlan stripping could not be disabled by ethtool.
1714 			 * Enable it in netdev->features.
1715 			 */
1716 			iavf_netdev_features_vlan_strip_set(netdev, true);
1717 			break;
1718 		default:
1719 			dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
1720 				v_retval, iavf_stat_str(&adapter->hw, v_retval),
1721 				v_opcode);
1722 		}
1723 	}
1724 	switch (v_opcode) {
1725 	case VIRTCHNL_OP_ADD_ETH_ADDR:
1726 		if (!v_retval)
1727 			iavf_mac_add_ok(adapter);
1728 		if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
1729 			eth_hw_addr_set(netdev, adapter->hw.mac.addr);
1730 		break;
1731 	case VIRTCHNL_OP_GET_STATS: {
1732 		struct iavf_eth_stats *stats =
1733 			(struct iavf_eth_stats *)msg;
1734 		netdev->stats.rx_packets = stats->rx_unicast +
1735 					   stats->rx_multicast +
1736 					   stats->rx_broadcast;
1737 		netdev->stats.tx_packets = stats->tx_unicast +
1738 					   stats->tx_multicast +
1739 					   stats->tx_broadcast;
1740 		netdev->stats.rx_bytes = stats->rx_bytes;
1741 		netdev->stats.tx_bytes = stats->tx_bytes;
1742 		netdev->stats.tx_errors = stats->tx_errors;
1743 		netdev->stats.rx_dropped = stats->rx_discards;
1744 		netdev->stats.tx_dropped = stats->tx_discards;
1745 		adapter->current_stats = *stats;
1746 		}
1747 		break;
1748 	case VIRTCHNL_OP_GET_VF_RESOURCES: {
1749 		u16 len = sizeof(struct virtchnl_vf_resource) +
1750 			  IAVF_MAX_VF_VSI *
1751 			  sizeof(struct virtchnl_vsi_resource);
1752 		memcpy(adapter->vf_res, msg, min(msglen, len));
1753 		iavf_validate_num_queues(adapter);
1754 		iavf_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
1755 		if (is_zero_ether_addr(adapter->hw.mac.addr)) {
1756 			/* restore current mac address */
1757 			ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
1758 		} else {
1759 			/* refresh current mac address if changed */
1760 			eth_hw_addr_set(netdev, adapter->hw.mac.addr);
1761 			ether_addr_copy(netdev->perm_addr,
1762 					adapter->hw.mac.addr);
1763 		}
1764 		spin_lock_bh(&adapter->mac_vlan_list_lock);
1765 		iavf_add_filter(adapter, adapter->hw.mac.addr);
1766 
1767 		if (VLAN_ALLOWED(adapter)) {
1768 			if (!list_empty(&adapter->vlan_filter_list)) {
1769 				struct iavf_vlan_filter *vlf;
1770 
1771 				/* re-add all VLAN filters over virtchnl */
1772 				list_for_each_entry(vlf,
1773 						    &adapter->vlan_filter_list,
1774 						    list)
1775 					vlf->add = true;
1776 
1777 				adapter->aq_required |=
1778 					IAVF_FLAG_AQ_ADD_VLAN_FILTER;
1779 			}
1780 		}
1781 
1782 		spin_unlock_bh(&adapter->mac_vlan_list_lock);
1783 		iavf_process_config(adapter);
1784 		adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
1785 		}
1786 		break;
1787 	case VIRTCHNL_OP_ENABLE_QUEUES:
1788 		/* enable transmits */
1789 		iavf_irq_enable(adapter, true);
1790 		adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED;
1791 		break;
1792 	case VIRTCHNL_OP_DISABLE_QUEUES:
1793 		iavf_free_all_tx_resources(adapter);
1794 		iavf_free_all_rx_resources(adapter);
1795 		if (adapter->state == __IAVF_DOWN_PENDING) {
1796 			iavf_change_state(adapter, __IAVF_DOWN);
1797 			wake_up(&adapter->down_waitqueue);
1798 		}
1799 		break;
1800 	case VIRTCHNL_OP_VERSION:
1801 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1802 		/* Don't display an error if we get these out of sequence.
1803 		 * If the firmware needed to get kicked, we'll get these and
1804 		 * it's no problem.
1805 		 */
1806 		if (v_opcode != adapter->current_op)
1807 			return;
1808 		break;
1809 	case VIRTCHNL_OP_IWARP:
1810 		/* Gobble zero-length replies from the PF. They indicate that
1811 		 * a previous message was received OK, and the client doesn't
1812 		 * care about that.
1813 		 */
1814 		if (msglen && CLIENT_ENABLED(adapter))
1815 			iavf_notify_client_message(&adapter->vsi, msg, msglen);
1816 		break;
1817 
1818 	case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
1819 		adapter->client_pending &=
1820 				~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
1821 		break;
1822 	case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
1823 		struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
1824 
1825 		if (msglen == sizeof(*vrh))
1826 			adapter->hena = vrh->hena;
1827 		else
1828 			dev_warn(&adapter->pdev->dev,
1829 				 "Invalid message %d from PF\n", v_opcode);
1830 		}
1831 		break;
1832 	case VIRTCHNL_OP_REQUEST_QUEUES: {
1833 		struct virtchnl_vf_res_request *vfres =
1834 			(struct virtchnl_vf_res_request *)msg;
1835 
1836 		if (vfres->num_queue_pairs != adapter->num_req_queues) {
1837 			dev_info(&adapter->pdev->dev,
1838 				 "Requested %d queues, PF can support %d\n",
1839 				 adapter->num_req_queues,
1840 				 vfres->num_queue_pairs);
1841 			adapter->num_req_queues = 0;
1842 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
1843 		}
1844 		}
1845 		break;
1846 	case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
1847 		struct iavf_cloud_filter *cf;
1848 
1849 		list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1850 			if (cf->state == __IAVF_CF_ADD_PENDING)
1851 				cf->state = __IAVF_CF_ACTIVE;
1852 		}
1853 		}
1854 		break;
1855 	case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
1856 		struct iavf_cloud_filter *cf, *cftmp;
1857 
1858 		list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
1859 					 list) {
1860 			if (cf->state == __IAVF_CF_DEL_PENDING) {
1861 				cf->state = __IAVF_CF_INVALID;
1862 				list_del(&cf->list);
1863 				kfree(cf);
1864 				adapter->num_cloud_filters--;
1865 			}
1866 		}
1867 		}
1868 		break;
1869 	case VIRTCHNL_OP_ADD_FDIR_FILTER: {
1870 		struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg;
1871 		struct iavf_fdir_fltr *fdir, *fdir_tmp;
1872 
1873 		spin_lock_bh(&adapter->fdir_fltr_lock);
1874 		list_for_each_entry_safe(fdir, fdir_tmp,
1875 					 &adapter->fdir_list_head,
1876 					 list) {
1877 			if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
1878 				if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
1879 					dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
1880 						 fdir->loc);
1881 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
1882 					fdir->flow_id = add_fltr->flow_id;
1883 				} else {
1884 					dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n",
1885 						 add_fltr->status);
1886 					iavf_print_fdir_fltr(adapter, fdir);
1887 					list_del(&fdir->list);
1888 					kfree(fdir);
1889 					adapter->fdir_active_fltr--;
1890 				}
1891 			}
1892 		}
1893 		spin_unlock_bh(&adapter->fdir_fltr_lock);
1894 		}
1895 		break;
1896 	case VIRTCHNL_OP_DEL_FDIR_FILTER: {
1897 		struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg;
1898 		struct iavf_fdir_fltr *fdir, *fdir_tmp;
1899 
1900 		spin_lock_bh(&adapter->fdir_fltr_lock);
1901 		list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
1902 					 list) {
1903 			if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
1904 				if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
1905 					dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
1906 						 fdir->loc);
1907 					list_del(&fdir->list);
1908 					kfree(fdir);
1909 					adapter->fdir_active_fltr--;
1910 				} else {
1911 					fdir->state = IAVF_FDIR_FLTR_ACTIVE;
1912 					dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n",
1913 						 del_fltr->status);
1914 					iavf_print_fdir_fltr(adapter, fdir);
1915 				}
1916 			}
1917 		}
1918 		spin_unlock_bh(&adapter->fdir_fltr_lock);
1919 		}
1920 		break;
1921 	case VIRTCHNL_OP_ADD_RSS_CFG: {
1922 		struct iavf_adv_rss *rss;
1923 
1924 		spin_lock_bh(&adapter->adv_rss_lock);
1925 		list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
1926 			if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
1927 				iavf_print_adv_rss_cfg(adapter, rss,
1928 						       "Input set change for",
1929 						       "successful");
1930 				rss->state = IAVF_ADV_RSS_ACTIVE;
1931 			}
1932 		}
1933 		spin_unlock_bh(&adapter->adv_rss_lock);
1934 		}
1935 		break;
1936 	case VIRTCHNL_OP_DEL_RSS_CFG: {
1937 		struct iavf_adv_rss *rss, *rss_tmp;
1938 
1939 		spin_lock_bh(&adapter->adv_rss_lock);
1940 		list_for_each_entry_safe(rss, rss_tmp,
1941 					 &adapter->adv_rss_list_head, list) {
1942 			if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
1943 				list_del(&rss->list);
1944 				kfree(rss);
1945 			}
1946 		}
1947 		spin_unlock_bh(&adapter->adv_rss_lock);
1948 		}
1949 		break;
1950 	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
1951 		/* PF enabled vlan strip on this VF.
1952 		 * Update netdev->features if needed to be in sync with ethtool.
1953 		 */
1954 		if (!v_retval)
1955 			iavf_netdev_features_vlan_strip_set(netdev, true);
1956 		break;
1957 	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
1958 		/* PF disabled vlan strip on this VF.
1959 		 * Update netdev->features if needed to be in sync with ethtool.
1960 		 */
1961 		if (!v_retval)
1962 			iavf_netdev_features_vlan_strip_set(netdev, false);
1963 		break;
1964 	default:
1965 		if (adapter->current_op && (v_opcode != adapter->current_op))
1966 			dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
1967 				 adapter->current_op, v_opcode);
1968 		break;
1969 	} /* switch v_opcode */
1970 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1971 }
1972