1 /*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27 #include "i40evf.h"
28 #include "i40e_prototype.h"
29 #include "i40evf_client.h"
30
31 /* busy wait delay in msec */
32 #define I40EVF_BUSY_WAIT_DELAY 10
33 #define I40EVF_BUSY_WAIT_COUNT 50
34
35 /**
36 * i40evf_send_pf_msg
37 * @adapter: adapter structure
38 * @op: virtual channel opcode
39 * @msg: pointer to message buffer
40 * @len: message length
41 *
42 * Send message to PF and print status if failure.
43 **/
i40evf_send_pf_msg(struct i40evf_adapter * adapter,enum virtchnl_ops op,u8 * msg,u16 len)44 static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
45 enum virtchnl_ops op, u8 *msg, u16 len)
46 {
47 struct i40e_hw *hw = &adapter->hw;
48 i40e_status err;
49
50 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
51 return 0; /* nothing to see here, move along */
52
53 err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
54 if (err)
55 dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
56 op, i40evf_stat_str(hw, err),
57 i40evf_aq_str(hw, hw->aq.asq_last_status));
58 return err;
59 }
60
61 /**
62 * i40evf_send_api_ver
63 * @adapter: adapter structure
64 *
65 * Send API version admin queue message to the PF. The reply is not checked
66 * in this function. Returns 0 if the message was successfully
67 * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
68 **/
i40evf_send_api_ver(struct i40evf_adapter * adapter)69 int i40evf_send_api_ver(struct i40evf_adapter *adapter)
70 {
71 struct virtchnl_version_info vvi;
72
73 vvi.major = VIRTCHNL_VERSION_MAJOR;
74 vvi.minor = VIRTCHNL_VERSION_MINOR;
75
76 return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi,
77 sizeof(vvi));
78 }
79
80 /**
81 * i40evf_verify_api_ver
82 * @adapter: adapter structure
83 *
84 * Compare API versions with the PF. Must be called after admin queue is
85 * initialized. Returns 0 if API versions match, -EIO if they do not,
86 * I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
87 * from the firmware are propagated.
88 **/
i40evf_verify_api_ver(struct i40evf_adapter * adapter)89 int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
90 {
91 struct virtchnl_version_info *pf_vvi;
92 struct i40e_hw *hw = &adapter->hw;
93 struct i40e_arq_event_info event;
94 enum virtchnl_ops op;
95 i40e_status err;
96
97 event.buf_len = I40EVF_MAX_AQ_BUF_SIZE;
98 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
99 if (!event.msg_buf) {
100 err = -ENOMEM;
101 goto out;
102 }
103
104 while (1) {
105 err = i40evf_clean_arq_element(hw, &event, NULL);
106 /* When the AQ is empty, i40evf_clean_arq_element will return
107 * nonzero and this loop will terminate.
108 */
109 if (err)
110 goto out_alloc;
111 op =
112 (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
113 if (op == VIRTCHNL_OP_VERSION)
114 break;
115 }
116
117
118 err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
119 if (err)
120 goto out_alloc;
121
122 if (op != VIRTCHNL_OP_VERSION) {
123 dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
124 op);
125 err = -EIO;
126 goto out_alloc;
127 }
128
129 pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
130 adapter->pf_version = *pf_vvi;
131
132 if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
133 ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
134 (pf_vvi->minor > VIRTCHNL_VERSION_MINOR)))
135 err = -EIO;
136
137 out_alloc:
138 kfree(event.msg_buf);
139 out:
140 return err;
141 }
142
143 /**
144 * i40evf_send_vf_config_msg
145 * @adapter: adapter structure
146 *
147 * Send VF configuration request admin queue message to the PF. The reply
148 * is not checked in this function. Returns 0 if the message was
149 * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
150 **/
i40evf_send_vf_config_msg(struct i40evf_adapter * adapter)151 int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
152 {
153 u32 caps;
154
155 caps = VIRTCHNL_VF_OFFLOAD_L2 |
156 VIRTCHNL_VF_OFFLOAD_RSS_PF |
157 VIRTCHNL_VF_OFFLOAD_RSS_AQ |
158 VIRTCHNL_VF_OFFLOAD_RSS_REG |
159 VIRTCHNL_VF_OFFLOAD_VLAN |
160 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
161 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
162 VIRTCHNL_VF_OFFLOAD_ENCAP |
163 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
164
165 adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
166 adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
167 if (PF_IS_V11(adapter))
168 return i40evf_send_pf_msg(adapter,
169 VIRTCHNL_OP_GET_VF_RESOURCES,
170 (u8 *)&caps, sizeof(caps));
171 else
172 return i40evf_send_pf_msg(adapter,
173 VIRTCHNL_OP_GET_VF_RESOURCES,
174 NULL, 0);
175 }
176
177 /**
178 * i40evf_get_vf_config
179 * @hw: pointer to the hardware structure
180 * @len: length of buffer
181 *
182 * Get VF configuration from PF and populate hw structure. Must be called after
183 * admin queue is initialized. Busy waits until response is received from PF,
184 * with maximum timeout. Response from PF is returned in the buffer for further
185 * processing by the caller.
186 **/
i40evf_get_vf_config(struct i40evf_adapter * adapter)187 int i40evf_get_vf_config(struct i40evf_adapter *adapter)
188 {
189 struct i40e_hw *hw = &adapter->hw;
190 struct i40e_arq_event_info event;
191 enum virtchnl_ops op;
192 i40e_status err;
193 u16 len;
194
195 len = sizeof(struct virtchnl_vf_resource) +
196 I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
197 event.buf_len = len;
198 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
199 if (!event.msg_buf) {
200 err = -ENOMEM;
201 goto out;
202 }
203
204 while (1) {
205 /* When the AQ is empty, i40evf_clean_arq_element will return
206 * nonzero and this loop will terminate.
207 */
208 err = i40evf_clean_arq_element(hw, &event, NULL);
209 if (err)
210 goto out_alloc;
211 op =
212 (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
213 if (op == VIRTCHNL_OP_GET_VF_RESOURCES)
214 break;
215 }
216
217 err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
218 memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
219
220 i40e_vf_parse_hw_config(hw, adapter->vf_res);
221 out_alloc:
222 kfree(event.msg_buf);
223 out:
224 return err;
225 }
226
227 /**
228 * i40evf_configure_queues
229 * @adapter: adapter structure
230 *
231 * Request that the PF set up our (previously allocated) queues.
232 **/
i40evf_configure_queues(struct i40evf_adapter * adapter)233 void i40evf_configure_queues(struct i40evf_adapter *adapter)
234 {
235 struct virtchnl_vsi_queue_config_info *vqci;
236 struct virtchnl_queue_pair_info *vqpi;
237 int pairs = adapter->num_active_queues;
238 int i, len, max_frame = I40E_MAX_RXBUFFER;
239
240 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
241 /* bail because we already have a command pending */
242 dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
243 adapter->current_op);
244 return;
245 }
246 adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
247 len = sizeof(struct virtchnl_vsi_queue_config_info) +
248 (sizeof(struct virtchnl_queue_pair_info) * pairs);
249 vqci = kzalloc(len, GFP_KERNEL);
250 if (!vqci)
251 return;
252
253 /* Limit maximum frame size when jumbo frames is not enabled */
254 if (!(adapter->flags & I40EVF_FLAG_LEGACY_RX) &&
255 (adapter->netdev->mtu <= ETH_DATA_LEN))
256 max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
257
258 vqci->vsi_id = adapter->vsi_res->vsi_id;
259 vqci->num_queue_pairs = pairs;
260 vqpi = vqci->qpair;
261 /* Size check is not needed here - HW max is 16 queue pairs, and we
262 * can fit info for 31 of them into the AQ buffer before it overflows.
263 */
264 for (i = 0; i < pairs; i++) {
265 vqpi->txq.vsi_id = vqci->vsi_id;
266 vqpi->txq.queue_id = i;
267 vqpi->txq.ring_len = adapter->tx_rings[i].count;
268 vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
269 vqpi->rxq.vsi_id = vqci->vsi_id;
270 vqpi->rxq.queue_id = i;
271 vqpi->rxq.ring_len = adapter->rx_rings[i].count;
272 vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
273 vqpi->rxq.max_pkt_size = max_frame;
274 vqpi->rxq.databuffer_size =
275 ALIGN(adapter->rx_rings[i].rx_buf_len,
276 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
277 vqpi++;
278 }
279
280 adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
281 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
282 (u8 *)vqci, len);
283 kfree(vqci);
284 }
285
286 /**
287 * i40evf_enable_queues
288 * @adapter: adapter structure
289 *
290 * Request that the PF enable all of our queues.
291 **/
i40evf_enable_queues(struct i40evf_adapter * adapter)292 void i40evf_enable_queues(struct i40evf_adapter *adapter)
293 {
294 struct virtchnl_queue_select vqs;
295
296 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
297 /* bail because we already have a command pending */
298 dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
299 adapter->current_op);
300 return;
301 }
302 adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
303 vqs.vsi_id = adapter->vsi_res->vsi_id;
304 vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
305 vqs.rx_queues = vqs.tx_queues;
306 adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
307 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES,
308 (u8 *)&vqs, sizeof(vqs));
309 }
310
311 /**
312 * i40evf_disable_queues
313 * @adapter: adapter structure
314 *
315 * Request that the PF disable all of our queues.
316 **/
i40evf_disable_queues(struct i40evf_adapter * adapter)317 void i40evf_disable_queues(struct i40evf_adapter *adapter)
318 {
319 struct virtchnl_queue_select vqs;
320
321 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
322 /* bail because we already have a command pending */
323 dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
324 adapter->current_op);
325 return;
326 }
327 adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
328 vqs.vsi_id = adapter->vsi_res->vsi_id;
329 vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
330 vqs.rx_queues = vqs.tx_queues;
331 adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
332 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_QUEUES,
333 (u8 *)&vqs, sizeof(vqs));
334 }
335
336 /**
337 * i40evf_map_queues
338 * @adapter: adapter structure
339 *
340 * Request that the PF map queues to interrupt vectors. Misc causes, including
341 * admin queue, are always mapped to vector 0.
342 **/
i40evf_map_queues(struct i40evf_adapter * adapter)343 void i40evf_map_queues(struct i40evf_adapter *adapter)
344 {
345 struct virtchnl_irq_map_info *vimi;
346 int v_idx, q_vectors, len;
347 struct i40e_q_vector *q_vector;
348
349 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
350 /* bail because we already have a command pending */
351 dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
352 adapter->current_op);
353 return;
354 }
355 adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
356
357 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
358
359 len = sizeof(struct virtchnl_irq_map_info) +
360 (adapter->num_msix_vectors *
361 sizeof(struct virtchnl_vector_map));
362 vimi = kzalloc(len, GFP_KERNEL);
363 if (!vimi)
364 return;
365
366 vimi->num_vectors = adapter->num_msix_vectors;
367 /* Queue vectors first */
368 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
369 q_vector = adapter->q_vectors + v_idx;
370 vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
371 vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS;
372 vimi->vecmap[v_idx].txq_map = q_vector->ring_mask;
373 vimi->vecmap[v_idx].rxq_map = q_vector->ring_mask;
374 }
375 /* Misc vector last - this is only for AdminQ messages */
376 vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
377 vimi->vecmap[v_idx].vector_id = 0;
378 vimi->vecmap[v_idx].txq_map = 0;
379 vimi->vecmap[v_idx].rxq_map = 0;
380
381 adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
382 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
383 (u8 *)vimi, len);
384 kfree(vimi);
385 }
386
387 /**
388 * i40evf_add_ether_addrs
389 * @adapter: adapter structure
390 * @addrs: the MAC address filters to add (contiguous)
391 * @count: number of filters
392 *
393 * Request that the PF add one or more addresses to our filters.
394 **/
i40evf_add_ether_addrs(struct i40evf_adapter * adapter)395 void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
396 {
397 struct virtchnl_ether_addr_list *veal;
398 int len, i = 0, count = 0;
399 struct i40evf_mac_filter *f;
400 bool more = false;
401
402 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
403 /* bail because we already have a command pending */
404 dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
405 adapter->current_op);
406 return;
407 }
408 list_for_each_entry(f, &adapter->mac_filter_list, list) {
409 if (f->add)
410 count++;
411 }
412 if (!count) {
413 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
414 return;
415 }
416 adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
417
418 len = sizeof(struct virtchnl_ether_addr_list) +
419 (count * sizeof(struct virtchnl_ether_addr));
420 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
421 dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
422 count = (I40EVF_MAX_AQ_BUF_SIZE -
423 sizeof(struct virtchnl_ether_addr_list)) /
424 sizeof(struct virtchnl_ether_addr);
425 len = sizeof(struct virtchnl_ether_addr_list) +
426 (count * sizeof(struct virtchnl_ether_addr));
427 more = true;
428 }
429
430 veal = kzalloc(len, GFP_KERNEL);
431 if (!veal)
432 return;
433
434 veal->vsi_id = adapter->vsi_res->vsi_id;
435 veal->num_elements = count;
436 list_for_each_entry(f, &adapter->mac_filter_list, list) {
437 if (f->add) {
438 ether_addr_copy(veal->list[i].addr, f->macaddr);
439 i++;
440 f->add = false;
441 if (i == count)
442 break;
443 }
444 }
445 if (!more)
446 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
447 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR,
448 (u8 *)veal, len);
449 kfree(veal);
450 }
451
452 /**
453 * i40evf_del_ether_addrs
454 * @adapter: adapter structure
455 * @addrs: the MAC address filters to remove (contiguous)
456 * @count: number of filtes
457 *
458 * Request that the PF remove one or more addresses from our filters.
459 **/
i40evf_del_ether_addrs(struct i40evf_adapter * adapter)460 void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
461 {
462 struct virtchnl_ether_addr_list *veal;
463 struct i40evf_mac_filter *f, *ftmp;
464 int len, i = 0, count = 0;
465 bool more = false;
466
467 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
468 /* bail because we already have a command pending */
469 dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
470 adapter->current_op);
471 return;
472 }
473 list_for_each_entry(f, &adapter->mac_filter_list, list) {
474 if (f->remove)
475 count++;
476 }
477 if (!count) {
478 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
479 return;
480 }
481 adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
482
483 len = sizeof(struct virtchnl_ether_addr_list) +
484 (count * sizeof(struct virtchnl_ether_addr));
485 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
486 dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
487 count = (I40EVF_MAX_AQ_BUF_SIZE -
488 sizeof(struct virtchnl_ether_addr_list)) /
489 sizeof(struct virtchnl_ether_addr);
490 len = sizeof(struct virtchnl_ether_addr_list) +
491 (count * sizeof(struct virtchnl_ether_addr));
492 more = true;
493 }
494 veal = kzalloc(len, GFP_KERNEL);
495 if (!veal)
496 return;
497
498 veal->vsi_id = adapter->vsi_res->vsi_id;
499 veal->num_elements = count;
500 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
501 if (f->remove) {
502 ether_addr_copy(veal->list[i].addr, f->macaddr);
503 i++;
504 list_del(&f->list);
505 kfree(f);
506 if (i == count)
507 break;
508 }
509 }
510 if (!more)
511 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
512 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR,
513 (u8 *)veal, len);
514 kfree(veal);
515 }
516
517 /**
518 * i40evf_add_vlans
519 * @adapter: adapter structure
520 * @vlans: the VLANs to add
521 * @count: number of VLANs
522 *
523 * Request that the PF add one or more VLAN filters to our VSI.
524 **/
i40evf_add_vlans(struct i40evf_adapter * adapter)525 void i40evf_add_vlans(struct i40evf_adapter *adapter)
526 {
527 struct virtchnl_vlan_filter_list *vvfl;
528 int len, i = 0, count = 0;
529 struct i40evf_vlan_filter *f;
530 bool more = false;
531
532 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
533 /* bail because we already have a command pending */
534 dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
535 adapter->current_op);
536 return;
537 }
538
539 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
540 if (f->add)
541 count++;
542 }
543 if (!count) {
544 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
545 return;
546 }
547 adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
548
549 len = sizeof(struct virtchnl_vlan_filter_list) +
550 (count * sizeof(u16));
551 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
552 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
553 count = (I40EVF_MAX_AQ_BUF_SIZE -
554 sizeof(struct virtchnl_vlan_filter_list)) /
555 sizeof(u16);
556 len = sizeof(struct virtchnl_vlan_filter_list) +
557 (count * sizeof(u16));
558 more = true;
559 }
560 vvfl = kzalloc(len, GFP_KERNEL);
561 if (!vvfl)
562 return;
563
564 vvfl->vsi_id = adapter->vsi_res->vsi_id;
565 vvfl->num_elements = count;
566 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
567 if (f->add) {
568 vvfl->vlan_id[i] = f->vlan;
569 i++;
570 f->add = false;
571 if (i == count)
572 break;
573 }
574 }
575 if (!more)
576 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
577 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
578 kfree(vvfl);
579 }
580
581 /**
582 * i40evf_del_vlans
583 * @adapter: adapter structure
584 * @vlans: the VLANs to remove
585 * @count: number of VLANs
586 *
587 * Request that the PF remove one or more VLAN filters from our VSI.
588 **/
i40evf_del_vlans(struct i40evf_adapter * adapter)589 void i40evf_del_vlans(struct i40evf_adapter *adapter)
590 {
591 struct virtchnl_vlan_filter_list *vvfl;
592 struct i40evf_vlan_filter *f, *ftmp;
593 int len, i = 0, count = 0;
594 bool more = false;
595
596 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
597 /* bail because we already have a command pending */
598 dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
599 adapter->current_op);
600 return;
601 }
602
603 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
604 if (f->remove)
605 count++;
606 }
607 if (!count) {
608 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
609 return;
610 }
611 adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
612
613 len = sizeof(struct virtchnl_vlan_filter_list) +
614 (count * sizeof(u16));
615 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
616 dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
617 count = (I40EVF_MAX_AQ_BUF_SIZE -
618 sizeof(struct virtchnl_vlan_filter_list)) /
619 sizeof(u16);
620 len = sizeof(struct virtchnl_vlan_filter_list) +
621 (count * sizeof(u16));
622 more = true;
623 }
624 vvfl = kzalloc(len, GFP_KERNEL);
625 if (!vvfl)
626 return;
627
628 vvfl->vsi_id = adapter->vsi_res->vsi_id;
629 vvfl->num_elements = count;
630 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
631 if (f->remove) {
632 vvfl->vlan_id[i] = f->vlan;
633 i++;
634 list_del(&f->list);
635 kfree(f);
636 if (i == count)
637 break;
638 }
639 }
640 if (!more)
641 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
642 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
643 kfree(vvfl);
644 }
645
646 /**
647 * i40evf_set_promiscuous
648 * @adapter: adapter structure
649 * @flags: bitmask to control unicast/multicast promiscuous.
650 *
651 * Request that the PF enable promiscuous mode for our VSI.
652 **/
i40evf_set_promiscuous(struct i40evf_adapter * adapter,int flags)653 void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
654 {
655 struct virtchnl_promisc_info vpi;
656 int promisc_all;
657
658 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
659 /* bail because we already have a command pending */
660 dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
661 adapter->current_op);
662 return;
663 }
664
665 promisc_all = FLAG_VF_UNICAST_PROMISC |
666 FLAG_VF_MULTICAST_PROMISC;
667 if ((flags & promisc_all) == promisc_all) {
668 adapter->flags |= I40EVF_FLAG_PROMISC_ON;
669 adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC;
670 dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
671 }
672
673 if (flags & FLAG_VF_MULTICAST_PROMISC) {
674 adapter->flags |= I40EVF_FLAG_ALLMULTI_ON;
675 adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
676 dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
677 }
678
679 if (!flags) {
680 adapter->flags &= ~I40EVF_FLAG_PROMISC_ON;
681 adapter->aq_required &= ~I40EVF_FLAG_AQ_RELEASE_PROMISC;
682 dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
683 }
684
685 adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
686 vpi.vsi_id = adapter->vsi_res->vsi_id;
687 vpi.flags = flags;
688 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
689 (u8 *)&vpi, sizeof(vpi));
690 }
691
692 /**
693 * i40evf_request_stats
694 * @adapter: adapter structure
695 *
696 * Request VSI statistics from PF.
697 **/
i40evf_request_stats(struct i40evf_adapter * adapter)698 void i40evf_request_stats(struct i40evf_adapter *adapter)
699 {
700 struct virtchnl_queue_select vqs;
701
702 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
703 /* no error message, this isn't crucial */
704 return;
705 }
706 adapter->current_op = VIRTCHNL_OP_GET_STATS;
707 vqs.vsi_id = adapter->vsi_res->vsi_id;
708 /* queue maps are ignored for this message - only the vsi is used */
709 if (i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS,
710 (u8 *)&vqs, sizeof(vqs)))
711 /* if the request failed, don't lock out others */
712 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
713 }
714
715 /**
716 * i40evf_get_hena
717 * @adapter: adapter structure
718 *
719 * Request hash enable capabilities from PF
720 **/
i40evf_get_hena(struct i40evf_adapter * adapter)721 void i40evf_get_hena(struct i40evf_adapter *adapter)
722 {
723 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
724 /* bail because we already have a command pending */
725 dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
726 adapter->current_op);
727 return;
728 }
729 adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
730 adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA;
731 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
732 NULL, 0);
733 }
734
735 /**
736 * i40evf_set_hena
737 * @adapter: adapter structure
738 *
739 * Request the PF to set our RSS hash capabilities
740 **/
i40evf_set_hena(struct i40evf_adapter * adapter)741 void i40evf_set_hena(struct i40evf_adapter *adapter)
742 {
743 struct virtchnl_rss_hena vrh;
744
745 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
746 /* bail because we already have a command pending */
747 dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
748 adapter->current_op);
749 return;
750 }
751 vrh.hena = adapter->hena;
752 adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
753 adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA;
754 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_SET_RSS_HENA,
755 (u8 *)&vrh, sizeof(vrh));
756 }
757
758 /**
759 * i40evf_set_rss_key
760 * @adapter: adapter structure
761 *
762 * Request the PF to set our RSS hash key
763 **/
i40evf_set_rss_key(struct i40evf_adapter * adapter)764 void i40evf_set_rss_key(struct i40evf_adapter *adapter)
765 {
766 struct virtchnl_rss_key *vrk;
767 int len;
768
769 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
770 /* bail because we already have a command pending */
771 dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
772 adapter->current_op);
773 return;
774 }
775 len = sizeof(struct virtchnl_rss_key) +
776 (adapter->rss_key_size * sizeof(u8)) - 1;
777 vrk = kzalloc(len, GFP_KERNEL);
778 if (!vrk)
779 return;
780 vrk->vsi_id = adapter->vsi.id;
781 vrk->key_len = adapter->rss_key_size;
782 memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
783
784 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
785 adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY;
786 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_KEY,
787 (u8 *)vrk, len);
788 kfree(vrk);
789 }
790
791 /**
792 * i40evf_set_rss_lut
793 * @adapter: adapter structure
794 *
795 * Request the PF to set our RSS lookup table
796 **/
i40evf_set_rss_lut(struct i40evf_adapter * adapter)797 void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
798 {
799 struct virtchnl_rss_lut *vrl;
800 int len;
801
802 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
803 /* bail because we already have a command pending */
804 dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
805 adapter->current_op);
806 return;
807 }
808 len = sizeof(struct virtchnl_rss_lut) +
809 (adapter->rss_lut_size * sizeof(u8)) - 1;
810 vrl = kzalloc(len, GFP_KERNEL);
811 if (!vrl)
812 return;
813 vrl->vsi_id = adapter->vsi.id;
814 vrl->lut_entries = adapter->rss_lut_size;
815 memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
816 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
817 adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT;
818 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS_LUT,
819 (u8 *)vrl, len);
820 kfree(vrl);
821 }
822
823 /**
824 * i40evf_enable_vlan_stripping
825 * @adapter: adapter structure
826 *
827 * Request VLAN header stripping to be enabled
828 **/
i40evf_enable_vlan_stripping(struct i40evf_adapter * adapter)829 void i40evf_enable_vlan_stripping(struct i40evf_adapter *adapter)
830 {
831 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
832 /* bail because we already have a command pending */
833 dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n",
834 adapter->current_op);
835 return;
836 }
837 adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
838 adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
839 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
840 NULL, 0);
841 }
842
843 /**
844 * i40evf_disable_vlan_stripping
845 * @adapter: adapter structure
846 *
847 * Request VLAN header stripping to be disabled
848 **/
i40evf_disable_vlan_stripping(struct i40evf_adapter * adapter)849 void i40evf_disable_vlan_stripping(struct i40evf_adapter *adapter)
850 {
851 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
852 /* bail because we already have a command pending */
853 dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n",
854 adapter->current_op);
855 return;
856 }
857 adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
858 adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
859 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
860 NULL, 0);
861 }
862
863 /**
864 * i40evf_print_link_message - print link up or down
865 * @adapter: adapter structure
866 *
867 * Log a message telling the world of our wonderous link status
868 */
i40evf_print_link_message(struct i40evf_adapter * adapter)869 static void i40evf_print_link_message(struct i40evf_adapter *adapter)
870 {
871 struct net_device *netdev = adapter->netdev;
872 char *speed = "Unknown ";
873
874 if (!adapter->link_up) {
875 netdev_info(netdev, "NIC Link is Down\n");
876 return;
877 }
878
879 switch (adapter->link_speed) {
880 case I40E_LINK_SPEED_40GB:
881 speed = "40 G";
882 break;
883 case I40E_LINK_SPEED_25GB:
884 speed = "25 G";
885 break;
886 case I40E_LINK_SPEED_20GB:
887 speed = "20 G";
888 break;
889 case I40E_LINK_SPEED_10GB:
890 speed = "10 G";
891 break;
892 case I40E_LINK_SPEED_1GB:
893 speed = "1000 M";
894 break;
895 case I40E_LINK_SPEED_100MB:
896 speed = "100 M";
897 break;
898 default:
899 break;
900 }
901
902 netdev_info(netdev, "NIC Link is Up %sbps Full Duplex\n", speed);
903 }
904
905 /**
906 * i40evf_request_reset
907 * @adapter: adapter structure
908 *
909 * Request that the PF reset this VF. No response is expected.
910 **/
i40evf_request_reset(struct i40evf_adapter * adapter)911 void i40evf_request_reset(struct i40evf_adapter *adapter)
912 {
913 /* Don't check CURRENT_OP - this is always higher priority */
914 i40evf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
915 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
916 }
917
918 /**
919 * i40evf_virtchnl_completion
920 * @adapter: adapter structure
921 * @v_opcode: opcode sent by PF
922 * @v_retval: retval sent by PF
923 * @msg: message sent by PF
924 * @msglen: message length
925 *
926 * Asynchronous completion function for admin queue messages. Rather than busy
927 * wait, we fire off our requests and assume that no errors will be returned.
928 * This function handles the reply messages.
929 **/
i40evf_virtchnl_completion(struct i40evf_adapter * adapter,enum virtchnl_ops v_opcode,i40e_status v_retval,u8 * msg,u16 msglen)930 void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
931 enum virtchnl_ops v_opcode,
932 i40e_status v_retval,
933 u8 *msg, u16 msglen)
934 {
935 struct net_device *netdev = adapter->netdev;
936
937 if (v_opcode == VIRTCHNL_OP_EVENT) {
938 struct virtchnl_pf_event *vpe =
939 (struct virtchnl_pf_event *)msg;
940 bool link_up = vpe->event_data.link_event.link_status;
941 switch (vpe->event) {
942 case VIRTCHNL_EVENT_LINK_CHANGE:
943 adapter->link_speed =
944 vpe->event_data.link_event.link_speed;
945
946 /* we've already got the right link status, bail */
947 if (adapter->link_up == link_up)
948 break;
949
950 /* If we get link up message and start queues before
951 * our queues are configured it will trigger a TX hang.
952 * In that case, just ignore the link status message,
953 * we'll get another one after we enable queues and
954 * actually prepared to send traffic.
955 */
956 if (link_up && adapter->state != __I40EVF_RUNNING)
957 break;
958
959 adapter->link_up = link_up;
960 if (link_up) {
961 netif_tx_start_all_queues(netdev);
962 netif_carrier_on(netdev);
963 } else {
964 netif_tx_stop_all_queues(netdev);
965 netif_carrier_off(netdev);
966 }
967 i40evf_print_link_message(adapter);
968 break;
969 case VIRTCHNL_EVENT_RESET_IMPENDING:
970 dev_info(&adapter->pdev->dev, "PF reset warning received\n");
971 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
972 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
973 dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
974 schedule_work(&adapter->reset_task);
975 }
976 break;
977 default:
978 dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
979 vpe->event);
980 break;
981 }
982 return;
983 }
984 if (v_retval) {
985 switch (v_opcode) {
986 case VIRTCHNL_OP_ADD_VLAN:
987 dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
988 i40evf_stat_str(&adapter->hw, v_retval));
989 break;
990 case VIRTCHNL_OP_ADD_ETH_ADDR:
991 dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
992 i40evf_stat_str(&adapter->hw, v_retval));
993 break;
994 case VIRTCHNL_OP_DEL_VLAN:
995 dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
996 i40evf_stat_str(&adapter->hw, v_retval));
997 break;
998 case VIRTCHNL_OP_DEL_ETH_ADDR:
999 dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
1000 i40evf_stat_str(&adapter->hw, v_retval));
1001 break;
1002 default:
1003 dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
1004 v_retval,
1005 i40evf_stat_str(&adapter->hw, v_retval),
1006 v_opcode);
1007 }
1008 }
1009 switch (v_opcode) {
1010 case VIRTCHNL_OP_GET_STATS: {
1011 struct i40e_eth_stats *stats =
1012 (struct i40e_eth_stats *)msg;
1013 netdev->stats.rx_packets = stats->rx_unicast +
1014 stats->rx_multicast +
1015 stats->rx_broadcast;
1016 netdev->stats.tx_packets = stats->tx_unicast +
1017 stats->tx_multicast +
1018 stats->tx_broadcast;
1019 netdev->stats.rx_bytes = stats->rx_bytes;
1020 netdev->stats.tx_bytes = stats->tx_bytes;
1021 netdev->stats.tx_errors = stats->tx_errors;
1022 netdev->stats.rx_dropped = stats->rx_discards;
1023 netdev->stats.tx_dropped = stats->tx_discards;
1024 adapter->current_stats = *stats;
1025 }
1026 break;
1027 case VIRTCHNL_OP_GET_VF_RESOURCES: {
1028 u16 len = sizeof(struct virtchnl_vf_resource) +
1029 I40E_MAX_VF_VSI *
1030 sizeof(struct virtchnl_vsi_resource);
1031 memcpy(adapter->vf_res, msg, min(msglen, len));
1032 i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
1033 /* restore current mac address */
1034 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
1035 i40evf_process_config(adapter);
1036 }
1037 break;
1038 case VIRTCHNL_OP_ENABLE_QUEUES:
1039 /* enable transmits */
1040 i40evf_irq_enable(adapter, true);
1041 break;
1042 case VIRTCHNL_OP_DISABLE_QUEUES:
1043 i40evf_free_all_tx_resources(adapter);
1044 i40evf_free_all_rx_resources(adapter);
1045 if (adapter->state == __I40EVF_DOWN_PENDING) {
1046 adapter->state = __I40EVF_DOWN;
1047 wake_up(&adapter->down_waitqueue);
1048 }
1049 break;
1050 case VIRTCHNL_OP_VERSION:
1051 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1052 /* Don't display an error if we get these out of sequence.
1053 * If the firmware needed to get kicked, we'll get these and
1054 * it's no problem.
1055 */
1056 if (v_opcode != adapter->current_op)
1057 return;
1058 break;
1059 case VIRTCHNL_OP_IWARP:
1060 /* Gobble zero-length replies from the PF. They indicate that
1061 * a previous message was received OK, and the client doesn't
1062 * care about that.
1063 */
1064 if (msglen && CLIENT_ENABLED(adapter))
1065 i40evf_notify_client_message(&adapter->vsi,
1066 msg, msglen);
1067 break;
1068
1069 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
1070 adapter->client_pending &=
1071 ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
1072 break;
1073 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
1074 struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
1075 if (msglen == sizeof(*vrh))
1076 adapter->hena = vrh->hena;
1077 else
1078 dev_warn(&adapter->pdev->dev,
1079 "Invalid message %d from PF\n", v_opcode);
1080 }
1081 break;
1082 default:
1083 if (adapter->current_op && (v_opcode != adapter->current_op))
1084 dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
1085 adapter->current_op, v_opcode);
1086 break;
1087 } /* switch v_opcode */
1088 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1089 }
1090