• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include "iavf.h"
5 #include "iavf_prototype.h"
6 #include "iavf_client.h"
7 /* All iavf tracepoints are defined by the include below, which must
8  * be included exactly once across the whole kernel with
9  * CREATE_TRACE_POINTS defined
10  */
11 #define CREATE_TRACE_POINTS
12 #include "iavf_trace.h"
13 
14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
16 static int iavf_close(struct net_device *netdev);
17 static void iavf_init_get_resources(struct iavf_adapter *adapter);
18 static int iavf_check_reset_complete(struct iavf_hw *hw);
19 
20 char iavf_driver_name[] = "iavf";
21 static const char iavf_driver_string[] =
22 	"Intel(R) Ethernet Adaptive Virtual Function Network Driver";
23 
24 static const char iavf_copyright[] =
25 	"Copyright (c) 2013 - 2018 Intel Corporation.";
26 
27 /* iavf_pci_tbl - PCI Device ID Table
28  *
29  * Wildcard entries (PCI_ANY_ID) should come last
30  * Last entry must be all 0s
31  *
32  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
33  *   Class, Class Mask, private data (not used) }
34  */
35 static const struct pci_device_id iavf_pci_tbl[] = {
36 	{PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
37 	{PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
38 	{PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
39 	{PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
40 	/* required last entry */
41 	{0, }
42 };
43 
44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
45 
46 MODULE_ALIAS("i40evf");
47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
49 MODULE_LICENSE("GPL v2");
50 
51 static const struct net_device_ops iavf_netdev_ops;
52 struct workqueue_struct *iavf_wq;
53 
54 /**
55  * iavf_pdev_to_adapter - go from pci_dev to adapter
56  * @pdev: pci_dev pointer
57  */
iavf_pdev_to_adapter(struct pci_dev * pdev)58 static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
59 {
60 	return netdev_priv(pci_get_drvdata(pdev));
61 }
62 
63 /**
64  * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
65  * @hw:   pointer to the HW structure
66  * @mem:  ptr to mem struct to fill out
67  * @size: size of memory requested
68  * @alignment: what to align the allocation to
69  **/
iavf_allocate_dma_mem_d(struct iavf_hw * hw,struct iavf_dma_mem * mem,u64 size,u32 alignment)70 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
71 					 struct iavf_dma_mem *mem,
72 					 u64 size, u32 alignment)
73 {
74 	struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
75 
76 	if (!mem)
77 		return IAVF_ERR_PARAM;
78 
79 	mem->size = ALIGN(size, alignment);
80 	mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
81 				     (dma_addr_t *)&mem->pa, GFP_KERNEL);
82 	if (mem->va)
83 		return 0;
84 	else
85 		return IAVF_ERR_NO_MEMORY;
86 }
87 
88 /**
89  * iavf_free_dma_mem_d - OS specific memory free for shared code
90  * @hw:   pointer to the HW structure
91  * @mem:  ptr to mem struct to free
92  **/
iavf_free_dma_mem_d(struct iavf_hw * hw,struct iavf_dma_mem * mem)93 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw,
94 				     struct iavf_dma_mem *mem)
95 {
96 	struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
97 
98 	if (!mem || !mem->va)
99 		return IAVF_ERR_PARAM;
100 	dma_free_coherent(&adapter->pdev->dev, mem->size,
101 			  mem->va, (dma_addr_t)mem->pa);
102 	return 0;
103 }
104 
105 /**
106  * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
107  * @hw:   pointer to the HW structure
108  * @mem:  ptr to mem struct to fill out
109  * @size: size of memory requested
110  **/
iavf_allocate_virt_mem_d(struct iavf_hw * hw,struct iavf_virt_mem * mem,u32 size)111 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
112 					  struct iavf_virt_mem *mem, u32 size)
113 {
114 	if (!mem)
115 		return IAVF_ERR_PARAM;
116 
117 	mem->size = size;
118 	mem->va = kzalloc(size, GFP_KERNEL);
119 
120 	if (mem->va)
121 		return 0;
122 	else
123 		return IAVF_ERR_NO_MEMORY;
124 }
125 
126 /**
127  * iavf_free_virt_mem_d - OS specific memory free for shared code
128  * @hw:   pointer to the HW structure
129  * @mem:  ptr to mem struct to free
130  **/
iavf_free_virt_mem_d(struct iavf_hw * hw,struct iavf_virt_mem * mem)131 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
132 				      struct iavf_virt_mem *mem)
133 {
134 	if (!mem)
135 		return IAVF_ERR_PARAM;
136 
137 	/* it's ok to kfree a NULL pointer */
138 	kfree(mem->va);
139 
140 	return 0;
141 }
142 
143 /**
144  * iavf_lock_timeout - try to lock mutex but give up after timeout
145  * @lock: mutex that should be locked
146  * @msecs: timeout in msecs
147  *
148  * Returns 0 on success, negative on failure
149  **/
iavf_lock_timeout(struct mutex * lock,unsigned int msecs)150 int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
151 {
152 	unsigned int wait, delay = 10;
153 
154 	for (wait = 0; wait < msecs; wait += delay) {
155 		if (mutex_trylock(lock))
156 			return 0;
157 
158 		msleep(delay);
159 	}
160 
161 	return -1;
162 }
163 
164 /**
165  * iavf_schedule_reset - Set the flags and schedule a reset event
166  * @adapter: board private structure
167  **/
iavf_schedule_reset(struct iavf_adapter * adapter)168 void iavf_schedule_reset(struct iavf_adapter *adapter)
169 {
170 	if (!(adapter->flags &
171 	      (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
172 		adapter->flags |= IAVF_FLAG_RESET_NEEDED;
173 		queue_work(iavf_wq, &adapter->reset_task);
174 	}
175 }
176 
177 /**
178  * iavf_schedule_request_stats - Set the flags and schedule statistics request
179  * @adapter: board private structure
180  *
181  * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly
182  * request and refresh ethtool stats
183  **/
iavf_schedule_request_stats(struct iavf_adapter * adapter)184 void iavf_schedule_request_stats(struct iavf_adapter *adapter)
185 {
186 	adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
187 	mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
188 }
189 
190 /**
191  * iavf_tx_timeout - Respond to a Tx Hang
192  * @netdev: network interface device structure
193  * @txqueue: queue number that is timing out
194  **/
iavf_tx_timeout(struct net_device * netdev,unsigned int txqueue)195 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
196 {
197 	struct iavf_adapter *adapter = netdev_priv(netdev);
198 
199 	adapter->tx_timeout_count++;
200 	iavf_schedule_reset(adapter);
201 }
202 
203 /**
204  * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
205  * @adapter: board private structure
206  **/
iavf_misc_irq_disable(struct iavf_adapter * adapter)207 static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
208 {
209 	struct iavf_hw *hw = &adapter->hw;
210 
211 	if (!adapter->msix_entries)
212 		return;
213 
214 	wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
215 
216 	iavf_flush(hw);
217 
218 	synchronize_irq(adapter->msix_entries[0].vector);
219 }
220 
221 /**
222  * iavf_misc_irq_enable - Enable default interrupt generation settings
223  * @adapter: board private structure
224  **/
iavf_misc_irq_enable(struct iavf_adapter * adapter)225 static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
226 {
227 	struct iavf_hw *hw = &adapter->hw;
228 
229 	wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
230 				       IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
231 	wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
232 
233 	iavf_flush(hw);
234 }
235 
236 /**
237  * iavf_irq_disable - Mask off interrupt generation on the NIC
238  * @adapter: board private structure
239  **/
iavf_irq_disable(struct iavf_adapter * adapter)240 static void iavf_irq_disable(struct iavf_adapter *adapter)
241 {
242 	int i;
243 	struct iavf_hw *hw = &adapter->hw;
244 
245 	if (!adapter->msix_entries)
246 		return;
247 
248 	for (i = 1; i < adapter->num_msix_vectors; i++) {
249 		wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
250 		synchronize_irq(adapter->msix_entries[i].vector);
251 	}
252 	iavf_flush(hw);
253 }
254 
255 /**
256  * iavf_irq_enable_queues - Enable interrupt for all queues
257  * @adapter: board private structure
258  **/
iavf_irq_enable_queues(struct iavf_adapter * adapter)259 void iavf_irq_enable_queues(struct iavf_adapter *adapter)
260 {
261 	struct iavf_hw *hw = &adapter->hw;
262 	int i;
263 
264 	for (i = 1; i < adapter->num_msix_vectors; i++) {
265 		wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
266 		     IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
267 		     IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
268 	}
269 }
270 
271 /**
272  * iavf_irq_enable - Enable default interrupt generation settings
273  * @adapter: board private structure
274  * @flush: boolean value whether to run rd32()
275  **/
iavf_irq_enable(struct iavf_adapter * adapter,bool flush)276 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
277 {
278 	struct iavf_hw *hw = &adapter->hw;
279 
280 	iavf_misc_irq_enable(adapter);
281 	iavf_irq_enable_queues(adapter);
282 
283 	if (flush)
284 		iavf_flush(hw);
285 }
286 
287 /**
288  * iavf_msix_aq - Interrupt handler for vector 0
289  * @irq: interrupt number
290  * @data: pointer to netdev
291  **/
iavf_msix_aq(int irq,void * data)292 static irqreturn_t iavf_msix_aq(int irq, void *data)
293 {
294 	struct net_device *netdev = data;
295 	struct iavf_adapter *adapter = netdev_priv(netdev);
296 	struct iavf_hw *hw = &adapter->hw;
297 
298 	/* handle non-queue interrupts, these reads clear the registers */
299 	rd32(hw, IAVF_VFINT_ICR01);
300 	rd32(hw, IAVF_VFINT_ICR0_ENA1);
301 
302 	if (adapter->state != __IAVF_REMOVE)
303 		/* schedule work on the private workqueue */
304 		queue_work(iavf_wq, &adapter->adminq_task);
305 
306 	return IRQ_HANDLED;
307 }
308 
309 /**
310  * iavf_msix_clean_rings - MSIX mode Interrupt Handler
311  * @irq: interrupt number
312  * @data: pointer to a q_vector
313  **/
iavf_msix_clean_rings(int irq,void * data)314 static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
315 {
316 	struct iavf_q_vector *q_vector = data;
317 
318 	if (!q_vector->tx.ring && !q_vector->rx.ring)
319 		return IRQ_HANDLED;
320 
321 	napi_schedule_irqoff(&q_vector->napi);
322 
323 	return IRQ_HANDLED;
324 }
325 
326 /**
327  * iavf_map_vector_to_rxq - associate irqs with rx queues
328  * @adapter: board private structure
329  * @v_idx: interrupt number
330  * @r_idx: queue number
331  **/
332 static void
iavf_map_vector_to_rxq(struct iavf_adapter * adapter,int v_idx,int r_idx)333 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
334 {
335 	struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
336 	struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
337 	struct iavf_hw *hw = &adapter->hw;
338 
339 	rx_ring->q_vector = q_vector;
340 	rx_ring->next = q_vector->rx.ring;
341 	rx_ring->vsi = &adapter->vsi;
342 	q_vector->rx.ring = rx_ring;
343 	q_vector->rx.count++;
344 	q_vector->rx.next_update = jiffies + 1;
345 	q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
346 	q_vector->ring_mask |= BIT(r_idx);
347 	wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
348 	     q_vector->rx.current_itr >> 1);
349 	q_vector->rx.current_itr = q_vector->rx.target_itr;
350 }
351 
352 /**
353  * iavf_map_vector_to_txq - associate irqs with tx queues
354  * @adapter: board private structure
355  * @v_idx: interrupt number
356  * @t_idx: queue number
357  **/
358 static void
iavf_map_vector_to_txq(struct iavf_adapter * adapter,int v_idx,int t_idx)359 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
360 {
361 	struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
362 	struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
363 	struct iavf_hw *hw = &adapter->hw;
364 
365 	tx_ring->q_vector = q_vector;
366 	tx_ring->next = q_vector->tx.ring;
367 	tx_ring->vsi = &adapter->vsi;
368 	q_vector->tx.ring = tx_ring;
369 	q_vector->tx.count++;
370 	q_vector->tx.next_update = jiffies + 1;
371 	q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
372 	q_vector->num_ringpairs++;
373 	wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
374 	     q_vector->tx.target_itr >> 1);
375 	q_vector->tx.current_itr = q_vector->tx.target_itr;
376 }
377 
378 /**
379  * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
380  * @adapter: board private structure to initialize
381  *
382  * This function maps descriptor rings to the queue-specific vectors
383  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
384  * one vector per ring/queue, but on a constrained vector budget, we
385  * group the rings as "efficiently" as possible.  You would add new
386  * mapping configurations in here.
387  **/
iavf_map_rings_to_vectors(struct iavf_adapter * adapter)388 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
389 {
390 	int rings_remaining = adapter->num_active_queues;
391 	int ridx = 0, vidx = 0;
392 	int q_vectors;
393 
394 	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
395 
396 	for (; ridx < rings_remaining; ridx++) {
397 		iavf_map_vector_to_rxq(adapter, vidx, ridx);
398 		iavf_map_vector_to_txq(adapter, vidx, ridx);
399 
400 		/* In the case where we have more queues than vectors, continue
401 		 * round-robin on vectors until all queues are mapped.
402 		 */
403 		if (++vidx >= q_vectors)
404 			vidx = 0;
405 	}
406 
407 	adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
408 }
409 
410 /**
411  * iavf_irq_affinity_notify - Callback for affinity changes
412  * @notify: context as to what irq was changed
413  * @mask: the new affinity mask
414  *
415  * This is a callback function used by the irq_set_affinity_notifier function
416  * so that we may register to receive changes to the irq affinity masks.
417  **/
iavf_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)418 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
419 				     const cpumask_t *mask)
420 {
421 	struct iavf_q_vector *q_vector =
422 		container_of(notify, struct iavf_q_vector, affinity_notify);
423 
424 	cpumask_copy(&q_vector->affinity_mask, mask);
425 }
426 
427 /**
428  * iavf_irq_affinity_release - Callback for affinity notifier release
429  * @ref: internal core kernel usage
430  *
431  * This is a callback function used by the irq_set_affinity_notifier function
432  * to inform the current notification subscriber that they will no longer
433  * receive notifications.
434  **/
iavf_irq_affinity_release(struct kref * ref)435 static void iavf_irq_affinity_release(struct kref *ref) {}
436 
437 /**
438  * iavf_request_traffic_irqs - Initialize MSI-X interrupts
439  * @adapter: board private structure
440  * @basename: device basename
441  *
442  * Allocates MSI-X vectors for tx and rx handling, and requests
443  * interrupts from the kernel.
444  **/
445 static int
iavf_request_traffic_irqs(struct iavf_adapter * adapter,char * basename)446 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
447 {
448 	unsigned int vector, q_vectors;
449 	unsigned int rx_int_idx = 0, tx_int_idx = 0;
450 	int irq_num, err;
451 	int cpu;
452 
453 	iavf_irq_disable(adapter);
454 	/* Decrement for Other and TCP Timer vectors */
455 	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
456 
457 	for (vector = 0; vector < q_vectors; vector++) {
458 		struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
459 
460 		irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
461 
462 		if (q_vector->tx.ring && q_vector->rx.ring) {
463 			snprintf(q_vector->name, sizeof(q_vector->name),
464 				 "iavf-%s-TxRx-%d", basename, rx_int_idx++);
465 			tx_int_idx++;
466 		} else if (q_vector->rx.ring) {
467 			snprintf(q_vector->name, sizeof(q_vector->name),
468 				 "iavf-%s-rx-%d", basename, rx_int_idx++);
469 		} else if (q_vector->tx.ring) {
470 			snprintf(q_vector->name, sizeof(q_vector->name),
471 				 "iavf-%s-tx-%d", basename, tx_int_idx++);
472 		} else {
473 			/* skip this unused q_vector */
474 			continue;
475 		}
476 		err = request_irq(irq_num,
477 				  iavf_msix_clean_rings,
478 				  0,
479 				  q_vector->name,
480 				  q_vector);
481 		if (err) {
482 			dev_info(&adapter->pdev->dev,
483 				 "Request_irq failed, error: %d\n", err);
484 			goto free_queue_irqs;
485 		}
486 		/* register for affinity change notifications */
487 		q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
488 		q_vector->affinity_notify.release =
489 						   iavf_irq_affinity_release;
490 		irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
491 		/* Spread the IRQ affinity hints across online CPUs. Note that
492 		 * get_cpu_mask returns a mask with a permanent lifetime so
493 		 * it's safe to use as a hint for irq_set_affinity_hint.
494 		 */
495 		cpu = cpumask_local_spread(q_vector->v_idx, -1);
496 		irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
497 	}
498 
499 	return 0;
500 
501 free_queue_irqs:
502 	while (vector) {
503 		vector--;
504 		irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
505 		irq_set_affinity_notifier(irq_num, NULL);
506 		irq_set_affinity_hint(irq_num, NULL);
507 		free_irq(irq_num, &adapter->q_vectors[vector]);
508 	}
509 	return err;
510 }
511 
512 /**
513  * iavf_request_misc_irq - Initialize MSI-X interrupts
514  * @adapter: board private structure
515  *
516  * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
517  * vector is only for the admin queue, and stays active even when the netdev
518  * is closed.
519  **/
iavf_request_misc_irq(struct iavf_adapter * adapter)520 static int iavf_request_misc_irq(struct iavf_adapter *adapter)
521 {
522 	struct net_device *netdev = adapter->netdev;
523 	int err;
524 
525 	snprintf(adapter->misc_vector_name,
526 		 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
527 		 dev_name(&adapter->pdev->dev));
528 	err = request_irq(adapter->msix_entries[0].vector,
529 			  &iavf_msix_aq, 0,
530 			  adapter->misc_vector_name, netdev);
531 	if (err) {
532 		dev_err(&adapter->pdev->dev,
533 			"request_irq for %s failed: %d\n",
534 			adapter->misc_vector_name, err);
535 		free_irq(adapter->msix_entries[0].vector, netdev);
536 	}
537 	return err;
538 }
539 
540 /**
541  * iavf_free_traffic_irqs - Free MSI-X interrupts
542  * @adapter: board private structure
543  *
544  * Frees all MSI-X vectors other than 0.
545  **/
iavf_free_traffic_irqs(struct iavf_adapter * adapter)546 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
547 {
548 	int vector, irq_num, q_vectors;
549 
550 	if (!adapter->msix_entries)
551 		return;
552 
553 	q_vectors = adapter->num_msix_vectors - NONQ_VECS;
554 
555 	for (vector = 0; vector < q_vectors; vector++) {
556 		irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
557 		irq_set_affinity_notifier(irq_num, NULL);
558 		irq_set_affinity_hint(irq_num, NULL);
559 		free_irq(irq_num, &adapter->q_vectors[vector]);
560 	}
561 }
562 
563 /**
564  * iavf_free_misc_irq - Free MSI-X miscellaneous vector
565  * @adapter: board private structure
566  *
567  * Frees MSI-X vector 0.
568  **/
iavf_free_misc_irq(struct iavf_adapter * adapter)569 static void iavf_free_misc_irq(struct iavf_adapter *adapter)
570 {
571 	struct net_device *netdev = adapter->netdev;
572 
573 	if (!adapter->msix_entries)
574 		return;
575 
576 	free_irq(adapter->msix_entries[0].vector, netdev);
577 }
578 
579 /**
580  * iavf_configure_tx - Configure Transmit Unit after Reset
581  * @adapter: board private structure
582  *
583  * Configure the Tx unit of the MAC after a reset.
584  **/
iavf_configure_tx(struct iavf_adapter * adapter)585 static void iavf_configure_tx(struct iavf_adapter *adapter)
586 {
587 	struct iavf_hw *hw = &adapter->hw;
588 	int i;
589 
590 	for (i = 0; i < adapter->num_active_queues; i++)
591 		adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
592 }
593 
594 /**
595  * iavf_configure_rx - Configure Receive Unit after Reset
596  * @adapter: board private structure
597  *
598  * Configure the Rx unit of the MAC after a reset.
599  **/
iavf_configure_rx(struct iavf_adapter * adapter)600 static void iavf_configure_rx(struct iavf_adapter *adapter)
601 {
602 	unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
603 	struct iavf_hw *hw = &adapter->hw;
604 	int i;
605 
606 	/* Legacy Rx will always default to a 2048 buffer size. */
607 #if (PAGE_SIZE < 8192)
608 	if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
609 		struct net_device *netdev = adapter->netdev;
610 
611 		/* For jumbo frames on systems with 4K pages we have to use
612 		 * an order 1 page, so we might as well increase the size
613 		 * of our Rx buffer to make better use of the available space
614 		 */
615 		rx_buf_len = IAVF_RXBUFFER_3072;
616 
617 		/* We use a 1536 buffer size for configurations with
618 		 * standard Ethernet mtu.  On x86 this gives us enough room
619 		 * for shared info and 192 bytes of padding.
620 		 */
621 		if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
622 		    (netdev->mtu <= ETH_DATA_LEN))
623 			rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
624 	}
625 #endif
626 
627 	for (i = 0; i < adapter->num_active_queues; i++) {
628 		adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
629 		adapter->rx_rings[i].rx_buf_len = rx_buf_len;
630 
631 		if (adapter->flags & IAVF_FLAG_LEGACY_RX)
632 			clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
633 		else
634 			set_ring_build_skb_enabled(&adapter->rx_rings[i]);
635 	}
636 }
637 
638 /**
639  * iavf_find_vlan - Search filter list for specific vlan filter
640  * @adapter: board private structure
641  * @vlan: vlan tag
642  *
643  * Returns ptr to the filter object or NULL. Must be called while holding the
644  * mac_vlan_list_lock.
645  **/
646 static struct
iavf_find_vlan(struct iavf_adapter * adapter,u16 vlan)647 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan)
648 {
649 	struct iavf_vlan_filter *f;
650 
651 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
652 		if (vlan == f->vlan)
653 			return f;
654 	}
655 	return NULL;
656 }
657 
658 /**
659  * iavf_add_vlan - Add a vlan filter to the list
660  * @adapter: board private structure
661  * @vlan: VLAN tag
662  *
663  * Returns ptr to the filter object or NULL when no memory available.
664  **/
665 static struct
iavf_add_vlan(struct iavf_adapter * adapter,u16 vlan)666 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan)
667 {
668 	struct iavf_vlan_filter *f = NULL;
669 
670 	spin_lock_bh(&adapter->mac_vlan_list_lock);
671 
672 	f = iavf_find_vlan(adapter, vlan);
673 	if (!f) {
674 		f = kzalloc(sizeof(*f), GFP_ATOMIC);
675 		if (!f)
676 			goto clearout;
677 
678 		f->vlan = vlan;
679 
680 		list_add_tail(&f->list, &adapter->vlan_filter_list);
681 		f->add = true;
682 		adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
683 	}
684 
685 clearout:
686 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
687 	return f;
688 }
689 
690 /**
691  * iavf_del_vlan - Remove a vlan filter from the list
692  * @adapter: board private structure
693  * @vlan: VLAN tag
694  **/
iavf_del_vlan(struct iavf_adapter * adapter,u16 vlan)695 static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
696 {
697 	struct iavf_vlan_filter *f;
698 
699 	spin_lock_bh(&adapter->mac_vlan_list_lock);
700 
701 	f = iavf_find_vlan(adapter, vlan);
702 	if (f) {
703 		f->remove = true;
704 		adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
705 	}
706 
707 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
708 }
709 
710 /**
711  * iavf_restore_filters
712  * @adapter: board private structure
713  *
714  * Restore existing non MAC filters when VF netdev comes back up
715  **/
iavf_restore_filters(struct iavf_adapter * adapter)716 static void iavf_restore_filters(struct iavf_adapter *adapter)
717 {
718 	u16 vid;
719 
720 	/* re-add all VLAN filters */
721 	for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID)
722 		iavf_add_vlan(adapter, vid);
723 }
724 
725 /**
726  * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
727  * @netdev: network device struct
728  * @proto: unused protocol data
729  * @vid: VLAN tag
730  **/
iavf_vlan_rx_add_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)731 static int iavf_vlan_rx_add_vid(struct net_device *netdev,
732 				__always_unused __be16 proto, u16 vid)
733 {
734 	struct iavf_adapter *adapter = netdev_priv(netdev);
735 
736 	if (!VLAN_ALLOWED(adapter))
737 		return -EIO;
738 
739 	if (iavf_add_vlan(adapter, vid) == NULL)
740 		return -ENOMEM;
741 
742 	set_bit(vid, adapter->vsi.active_vlans);
743 	return 0;
744 }
745 
746 /**
747  * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
748  * @netdev: network device struct
749  * @proto: unused protocol data
750  * @vid: VLAN tag
751  **/
iavf_vlan_rx_kill_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)752 static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
753 				 __always_unused __be16 proto, u16 vid)
754 {
755 	struct iavf_adapter *adapter = netdev_priv(netdev);
756 
757 	iavf_del_vlan(adapter, vid);
758 	clear_bit(vid, adapter->vsi.active_vlans);
759 
760 	return 0;
761 }
762 
763 /**
764  * iavf_find_filter - Search filter list for specific mac filter
765  * @adapter: board private structure
766  * @macaddr: the MAC address
767  *
768  * Returns ptr to the filter object or NULL. Must be called while holding the
769  * mac_vlan_list_lock.
770  **/
771 static struct
iavf_find_filter(struct iavf_adapter * adapter,const u8 * macaddr)772 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
773 				  const u8 *macaddr)
774 {
775 	struct iavf_mac_filter *f;
776 
777 	if (!macaddr)
778 		return NULL;
779 
780 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
781 		if (ether_addr_equal(macaddr, f->macaddr))
782 			return f;
783 	}
784 	return NULL;
785 }
786 
787 /**
788  * iavf_add_filter - Add a mac filter to the filter list
789  * @adapter: board private structure
790  * @macaddr: the MAC address
791  *
792  * Returns ptr to the filter object or NULL when no memory available.
793  **/
iavf_add_filter(struct iavf_adapter * adapter,const u8 * macaddr)794 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
795 					const u8 *macaddr)
796 {
797 	struct iavf_mac_filter *f;
798 
799 	if (!macaddr)
800 		return NULL;
801 
802 	f = iavf_find_filter(adapter, macaddr);
803 	if (!f) {
804 		f = kzalloc(sizeof(*f), GFP_ATOMIC);
805 		if (!f)
806 			return f;
807 
808 		ether_addr_copy(f->macaddr, macaddr);
809 
810 		list_add_tail(&f->list, &adapter->mac_filter_list);
811 		f->add = true;
812 		f->is_new_mac = true;
813 		adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
814 	} else {
815 		f->remove = false;
816 	}
817 
818 	return f;
819 }
820 
821 /**
822  * iavf_set_mac - NDO callback to set port mac address
823  * @netdev: network interface device structure
824  * @p: pointer to an address structure
825  *
826  * Returns 0 on success, negative on failure
827  **/
iavf_set_mac(struct net_device * netdev,void * p)828 static int iavf_set_mac(struct net_device *netdev, void *p)
829 {
830 	struct iavf_adapter *adapter = netdev_priv(netdev);
831 	struct iavf_hw *hw = &adapter->hw;
832 	struct iavf_mac_filter *f;
833 	struct sockaddr *addr = p;
834 
835 	if (!is_valid_ether_addr(addr->sa_data))
836 		return -EADDRNOTAVAIL;
837 
838 	if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
839 		return 0;
840 
841 	spin_lock_bh(&adapter->mac_vlan_list_lock);
842 
843 	f = iavf_find_filter(adapter, hw->mac.addr);
844 	if (f) {
845 		f->remove = true;
846 		adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
847 	}
848 
849 	f = iavf_add_filter(adapter, addr->sa_data);
850 
851 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
852 
853 	if (f) {
854 		ether_addr_copy(hw->mac.addr, addr->sa_data);
855 	}
856 
857 	return (f == NULL) ? -ENOMEM : 0;
858 }
859 
860 /**
861  * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
862  * @netdev: the netdevice
863  * @addr: address to add
864  *
865  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
866  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
867  */
iavf_addr_sync(struct net_device * netdev,const u8 * addr)868 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
869 {
870 	struct iavf_adapter *adapter = netdev_priv(netdev);
871 
872 	if (iavf_add_filter(adapter, addr))
873 		return 0;
874 	else
875 		return -ENOMEM;
876 }
877 
878 /**
879  * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
880  * @netdev: the netdevice
881  * @addr: address to add
882  *
883  * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
884  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
885  */
iavf_addr_unsync(struct net_device * netdev,const u8 * addr)886 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
887 {
888 	struct iavf_adapter *adapter = netdev_priv(netdev);
889 	struct iavf_mac_filter *f;
890 
891 	/* Under some circumstances, we might receive a request to delete
892 	 * our own device address from our uc list. Because we store the
893 	 * device address in the VSI's MAC/VLAN filter list, we need to ignore
894 	 * such requests and not delete our device address from this list.
895 	 */
896 	if (ether_addr_equal(addr, netdev->dev_addr))
897 		return 0;
898 
899 	f = iavf_find_filter(adapter, addr);
900 	if (f) {
901 		f->remove = true;
902 		adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
903 	}
904 	return 0;
905 }
906 
907 /**
908  * iavf_set_rx_mode - NDO callback to set the netdev filters
909  * @netdev: network interface device structure
910  **/
iavf_set_rx_mode(struct net_device * netdev)911 static void iavf_set_rx_mode(struct net_device *netdev)
912 {
913 	struct iavf_adapter *adapter = netdev_priv(netdev);
914 
915 	spin_lock_bh(&adapter->mac_vlan_list_lock);
916 	__dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
917 	__dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
918 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
919 
920 	if (netdev->flags & IFF_PROMISC &&
921 	    !(adapter->flags & IAVF_FLAG_PROMISC_ON))
922 		adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
923 	else if (!(netdev->flags & IFF_PROMISC) &&
924 		 adapter->flags & IAVF_FLAG_PROMISC_ON)
925 		adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
926 
927 	if (netdev->flags & IFF_ALLMULTI &&
928 	    !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
929 		adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
930 	else if (!(netdev->flags & IFF_ALLMULTI) &&
931 		 adapter->flags & IAVF_FLAG_ALLMULTI_ON)
932 		adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
933 }
934 
935 /**
936  * iavf_napi_enable_all - enable NAPI on all queue vectors
937  * @adapter: board private structure
938  **/
iavf_napi_enable_all(struct iavf_adapter * adapter)939 static void iavf_napi_enable_all(struct iavf_adapter *adapter)
940 {
941 	int q_idx;
942 	struct iavf_q_vector *q_vector;
943 	int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
944 
945 	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
946 		struct napi_struct *napi;
947 
948 		q_vector = &adapter->q_vectors[q_idx];
949 		napi = &q_vector->napi;
950 		napi_enable(napi);
951 	}
952 }
953 
954 /**
955  * iavf_napi_disable_all - disable NAPI on all queue vectors
956  * @adapter: board private structure
957  **/
iavf_napi_disable_all(struct iavf_adapter * adapter)958 static void iavf_napi_disable_all(struct iavf_adapter *adapter)
959 {
960 	int q_idx;
961 	struct iavf_q_vector *q_vector;
962 	int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
963 
964 	for (q_idx = 0; q_idx < q_vectors; q_idx++) {
965 		q_vector = &adapter->q_vectors[q_idx];
966 		napi_disable(&q_vector->napi);
967 	}
968 }
969 
970 /**
971  * iavf_configure - set up transmit and receive data structures
972  * @adapter: board private structure
973  **/
iavf_configure(struct iavf_adapter * adapter)974 static void iavf_configure(struct iavf_adapter *adapter)
975 {
976 	struct net_device *netdev = adapter->netdev;
977 	int i;
978 
979 	iavf_set_rx_mode(netdev);
980 
981 	iavf_configure_tx(adapter);
982 	iavf_configure_rx(adapter);
983 	adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
984 
985 	for (i = 0; i < adapter->num_active_queues; i++) {
986 		struct iavf_ring *ring = &adapter->rx_rings[i];
987 
988 		iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
989 	}
990 }
991 
992 /**
993  * iavf_up_complete - Finish the last steps of bringing up a connection
994  * @adapter: board private structure
995  *
996  * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
997  **/
iavf_up_complete(struct iavf_adapter * adapter)998 static void iavf_up_complete(struct iavf_adapter *adapter)
999 {
1000 	iavf_change_state(adapter, __IAVF_RUNNING);
1001 	clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1002 
1003 	iavf_napi_enable_all(adapter);
1004 
1005 	adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
1006 	if (CLIENT_ENABLED(adapter))
1007 		adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
1008 	mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
1009 }
1010 
1011 /**
1012  * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF
1013  * yet and mark other to be removed.
1014  * @adapter: board private structure
1015  **/
iavf_clear_mac_vlan_filters(struct iavf_adapter * adapter)1016 static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
1017 {
1018 	struct iavf_vlan_filter *vlf, *vlftmp;
1019 	struct iavf_mac_filter *f, *ftmp;
1020 
1021 	spin_lock_bh(&adapter->mac_vlan_list_lock);
1022 	/* clear the sync flag on all filters */
1023 	__dev_uc_unsync(adapter->netdev, NULL);
1024 	__dev_mc_unsync(adapter->netdev, NULL);
1025 
1026 	/* remove all MAC filters */
1027 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
1028 				 list) {
1029 		if (f->add) {
1030 			list_del(&f->list);
1031 			kfree(f);
1032 		} else {
1033 			f->remove = true;
1034 		}
1035 	}
1036 
1037 	/* remove all VLAN filters */
1038 	list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
1039 				 list) {
1040 		if (vlf->add) {
1041 			list_del(&vlf->list);
1042 			kfree(vlf);
1043 		} else {
1044 			vlf->remove = true;
1045 		}
1046 	}
1047 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
1048 }
1049 
1050 /**
1051  * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and
1052  * mark other to be removed.
1053  * @adapter: board private structure
1054  **/
iavf_clear_cloud_filters(struct iavf_adapter * adapter)1055 static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
1056 {
1057 	struct iavf_cloud_filter *cf, *cftmp;
1058 
1059 	/* remove all cloud filters */
1060 	spin_lock_bh(&adapter->cloud_filter_list_lock);
1061 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
1062 				 list) {
1063 		if (cf->add) {
1064 			list_del(&cf->list);
1065 			kfree(cf);
1066 			adapter->num_cloud_filters--;
1067 		} else {
1068 			cf->del = true;
1069 		}
1070 	}
1071 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
1072 }
1073 
1074 /**
1075  * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark
1076  * other to be removed.
1077  * @adapter: board private structure
1078  **/
iavf_clear_fdir_filters(struct iavf_adapter * adapter)1079 static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
1080 {
1081 	struct iavf_fdir_fltr *fdir, *fdirtmp;
1082 
1083 	/* remove all Flow Director filters */
1084 	spin_lock_bh(&adapter->fdir_fltr_lock);
1085 	list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
1086 				 list) {
1087 		if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
1088 			list_del(&fdir->list);
1089 			kfree(fdir);
1090 			adapter->fdir_active_fltr--;
1091 		} else {
1092 			fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
1093 		}
1094 	}
1095 	spin_unlock_bh(&adapter->fdir_fltr_lock);
1096 }
1097 
1098 /**
1099  * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark
1100  * other to be removed.
1101  * @adapter: board private structure
1102  **/
iavf_clear_adv_rss_conf(struct iavf_adapter * adapter)1103 static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
1104 {
1105 	struct iavf_adv_rss *rss, *rsstmp;
1106 
1107 	/* remove all advance RSS configuration */
1108 	spin_lock_bh(&adapter->adv_rss_lock);
1109 	list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
1110 				 list) {
1111 		if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
1112 			list_del(&rss->list);
1113 			kfree(rss);
1114 		} else {
1115 			rss->state = IAVF_ADV_RSS_DEL_REQUEST;
1116 		}
1117 	}
1118 	spin_unlock_bh(&adapter->adv_rss_lock);
1119 }
1120 
1121 /**
1122  * iavf_down - Shutdown the connection processing
1123  * @adapter: board private structure
1124  *
1125  * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
1126  **/
iavf_down(struct iavf_adapter * adapter)1127 void iavf_down(struct iavf_adapter *adapter)
1128 {
1129 	struct net_device *netdev = adapter->netdev;
1130 
1131 	if (adapter->state <= __IAVF_DOWN_PENDING)
1132 		return;
1133 
1134 	netif_carrier_off(netdev);
1135 	netif_tx_disable(netdev);
1136 	adapter->link_up = false;
1137 	iavf_napi_disable_all(adapter);
1138 	iavf_irq_disable(adapter);
1139 
1140 	iavf_clear_mac_vlan_filters(adapter);
1141 	iavf_clear_cloud_filters(adapter);
1142 	iavf_clear_fdir_filters(adapter);
1143 	iavf_clear_adv_rss_conf(adapter);
1144 
1145 	if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
1146 	    !(test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))) {
1147 		/* cancel any current operation */
1148 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1149 		/* Schedule operations to close down the HW. Don't wait
1150 		 * here for this to complete. The watchdog is still running
1151 		 * and it will take care of this.
1152 		 */
1153 		if (!list_empty(&adapter->mac_filter_list))
1154 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1155 		if (!list_empty(&adapter->vlan_filter_list))
1156 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1157 		if (!list_empty(&adapter->cloud_filter_list))
1158 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1159 		if (!list_empty(&adapter->fdir_list_head))
1160 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1161 		if (!list_empty(&adapter->adv_rss_list_head))
1162 			adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1163 		adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
1164 	}
1165 
1166 	mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
1167 }
1168 
1169 /**
1170  * iavf_acquire_msix_vectors - Setup the MSIX capability
1171  * @adapter: board private structure
1172  * @vectors: number of vectors to request
1173  *
1174  * Work with the OS to set up the MSIX vectors needed.
1175  *
1176  * Returns 0 on success, negative on failure
1177  **/
1178 static int
iavf_acquire_msix_vectors(struct iavf_adapter * adapter,int vectors)1179 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
1180 {
1181 	int err, vector_threshold;
1182 
1183 	/* We'll want at least 3 (vector_threshold):
1184 	 * 0) Other (Admin Queue and link, mostly)
1185 	 * 1) TxQ[0] Cleanup
1186 	 * 2) RxQ[0] Cleanup
1187 	 */
1188 	vector_threshold = MIN_MSIX_COUNT;
1189 
1190 	/* The more we get, the more we will assign to Tx/Rx Cleanup
1191 	 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1192 	 * Right now, we simply care about how many we'll get; we'll
1193 	 * set them up later while requesting irq's.
1194 	 */
1195 	err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1196 				    vector_threshold, vectors);
1197 	if (err < 0) {
1198 		dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1199 		kfree(adapter->msix_entries);
1200 		adapter->msix_entries = NULL;
1201 		return err;
1202 	}
1203 
1204 	/* Adjust for only the vectors we'll use, which is minimum
1205 	 * of max_msix_q_vectors + NONQ_VECS, or the number of
1206 	 * vectors we were allocated.
1207 	 */
1208 	adapter->num_msix_vectors = err;
1209 	return 0;
1210 }
1211 
1212 /**
1213  * iavf_free_queues - Free memory for all rings
1214  * @adapter: board private structure to initialize
1215  *
1216  * Free all of the memory associated with queue pairs.
1217  **/
iavf_free_queues(struct iavf_adapter * adapter)1218 static void iavf_free_queues(struct iavf_adapter *adapter)
1219 {
1220 	if (!adapter->vsi_res)
1221 		return;
1222 	adapter->num_active_queues = 0;
1223 	kfree(adapter->tx_rings);
1224 	adapter->tx_rings = NULL;
1225 	kfree(adapter->rx_rings);
1226 	adapter->rx_rings = NULL;
1227 }
1228 
1229 /**
1230  * iavf_alloc_queues - Allocate memory for all rings
1231  * @adapter: board private structure to initialize
1232  *
1233  * We allocate one ring per queue at run-time since we don't know the
1234  * number of queues at compile-time.  The polling_netdev array is
1235  * intended for Multiqueue, but should work fine with a single queue.
1236  **/
iavf_alloc_queues(struct iavf_adapter * adapter)1237 static int iavf_alloc_queues(struct iavf_adapter *adapter)
1238 {
1239 	int i, num_active_queues;
1240 
1241 	/* If we're in reset reallocating queues we don't actually know yet for
1242 	 * certain the PF gave us the number of queues we asked for but we'll
1243 	 * assume it did.  Once basic reset is finished we'll confirm once we
1244 	 * start negotiating config with PF.
1245 	 */
1246 	if (adapter->num_req_queues)
1247 		num_active_queues = adapter->num_req_queues;
1248 	else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1249 		 adapter->num_tc)
1250 		num_active_queues = adapter->ch_config.total_qps;
1251 	else
1252 		num_active_queues = min_t(int,
1253 					  adapter->vsi_res->num_queue_pairs,
1254 					  (int)(num_online_cpus()));
1255 
1256 
1257 	adapter->tx_rings = kcalloc(num_active_queues,
1258 				    sizeof(struct iavf_ring), GFP_KERNEL);
1259 	if (!adapter->tx_rings)
1260 		goto err_out;
1261 	adapter->rx_rings = kcalloc(num_active_queues,
1262 				    sizeof(struct iavf_ring), GFP_KERNEL);
1263 	if (!adapter->rx_rings)
1264 		goto err_out;
1265 
1266 	for (i = 0; i < num_active_queues; i++) {
1267 		struct iavf_ring *tx_ring;
1268 		struct iavf_ring *rx_ring;
1269 
1270 		tx_ring = &adapter->tx_rings[i];
1271 
1272 		tx_ring->queue_index = i;
1273 		tx_ring->netdev = adapter->netdev;
1274 		tx_ring->dev = &adapter->pdev->dev;
1275 		tx_ring->count = adapter->tx_desc_count;
1276 		tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1277 		if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1278 			tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
1279 
1280 		rx_ring = &adapter->rx_rings[i];
1281 		rx_ring->queue_index = i;
1282 		rx_ring->netdev = adapter->netdev;
1283 		rx_ring->dev = &adapter->pdev->dev;
1284 		rx_ring->count = adapter->rx_desc_count;
1285 		rx_ring->itr_setting = IAVF_ITR_RX_DEF;
1286 	}
1287 
1288 	adapter->num_active_queues = num_active_queues;
1289 
1290 	return 0;
1291 
1292 err_out:
1293 	iavf_free_queues(adapter);
1294 	return -ENOMEM;
1295 }
1296 
1297 /**
1298  * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
1299  * @adapter: board private structure to initialize
1300  *
1301  * Attempt to configure the interrupts using the best available
1302  * capabilities of the hardware and the kernel.
1303  **/
iavf_set_interrupt_capability(struct iavf_adapter * adapter)1304 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
1305 {
1306 	int vector, v_budget;
1307 	int pairs = 0;
1308 	int err = 0;
1309 
1310 	if (!adapter->vsi_res) {
1311 		err = -EIO;
1312 		goto out;
1313 	}
1314 	pairs = adapter->num_active_queues;
1315 
1316 	/* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1317 	 * us much good if we have more vectors than CPUs. However, we already
1318 	 * limit the total number of queues by the number of CPUs so we do not
1319 	 * need any further limiting here.
1320 	 */
1321 	v_budget = min_t(int, pairs + NONQ_VECS,
1322 			 (int)adapter->vf_res->max_vectors);
1323 
1324 	adapter->msix_entries = kcalloc(v_budget,
1325 					sizeof(struct msix_entry), GFP_KERNEL);
1326 	if (!adapter->msix_entries) {
1327 		err = -ENOMEM;
1328 		goto out;
1329 	}
1330 
1331 	for (vector = 0; vector < v_budget; vector++)
1332 		adapter->msix_entries[vector].entry = vector;
1333 
1334 	err = iavf_acquire_msix_vectors(adapter, v_budget);
1335 
1336 out:
1337 	netif_set_real_num_rx_queues(adapter->netdev, pairs);
1338 	netif_set_real_num_tx_queues(adapter->netdev, pairs);
1339 	return err;
1340 }
1341 
1342 /**
1343  * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
1344  * @adapter: board private structure
1345  *
1346  * Return 0 on success, negative on failure
1347  **/
iavf_config_rss_aq(struct iavf_adapter * adapter)1348 static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1349 {
1350 	struct iavf_aqc_get_set_rss_key_data *rss_key =
1351 		(struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
1352 	struct iavf_hw *hw = &adapter->hw;
1353 	int ret = 0;
1354 
1355 	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1356 		/* bail because we already have a command pending */
1357 		dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1358 			adapter->current_op);
1359 		return -EBUSY;
1360 	}
1361 
1362 	ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1363 	if (ret) {
1364 		dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1365 			iavf_stat_str(hw, ret),
1366 			iavf_aq_str(hw, hw->aq.asq_last_status));
1367 		return ret;
1368 
1369 	}
1370 
1371 	ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1372 				  adapter->rss_lut, adapter->rss_lut_size);
1373 	if (ret) {
1374 		dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1375 			iavf_stat_str(hw, ret),
1376 			iavf_aq_str(hw, hw->aq.asq_last_status));
1377 	}
1378 
1379 	return ret;
1380 
1381 }
1382 
1383 /**
1384  * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
1385  * @adapter: board private structure
1386  *
1387  * Returns 0 on success, negative on failure
1388  **/
iavf_config_rss_reg(struct iavf_adapter * adapter)1389 static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1390 {
1391 	struct iavf_hw *hw = &adapter->hw;
1392 	u32 *dw;
1393 	u16 i;
1394 
1395 	dw = (u32 *)adapter->rss_key;
1396 	for (i = 0; i <= adapter->rss_key_size / 4; i++)
1397 		wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1398 
1399 	dw = (u32 *)adapter->rss_lut;
1400 	for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1401 		wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1402 
1403 	iavf_flush(hw);
1404 
1405 	return 0;
1406 }
1407 
1408 /**
1409  * iavf_config_rss - Configure RSS keys and lut
1410  * @adapter: board private structure
1411  *
1412  * Returns 0 on success, negative on failure
1413  **/
iavf_config_rss(struct iavf_adapter * adapter)1414 int iavf_config_rss(struct iavf_adapter *adapter)
1415 {
1416 
1417 	if (RSS_PF(adapter)) {
1418 		adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1419 					IAVF_FLAG_AQ_SET_RSS_KEY;
1420 		return 0;
1421 	} else if (RSS_AQ(adapter)) {
1422 		return iavf_config_rss_aq(adapter);
1423 	} else {
1424 		return iavf_config_rss_reg(adapter);
1425 	}
1426 }
1427 
1428 /**
1429  * iavf_fill_rss_lut - Fill the lut with default values
1430  * @adapter: board private structure
1431  **/
iavf_fill_rss_lut(struct iavf_adapter * adapter)1432 static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1433 {
1434 	u16 i;
1435 
1436 	for (i = 0; i < adapter->rss_lut_size; i++)
1437 		adapter->rss_lut[i] = i % adapter->num_active_queues;
1438 }
1439 
1440 /**
1441  * iavf_init_rss - Prepare for RSS
1442  * @adapter: board private structure
1443  *
1444  * Return 0 on success, negative on failure
1445  **/
iavf_init_rss(struct iavf_adapter * adapter)1446 static int iavf_init_rss(struct iavf_adapter *adapter)
1447 {
1448 	struct iavf_hw *hw = &adapter->hw;
1449 
1450 	if (!RSS_PF(adapter)) {
1451 		/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1452 		if (adapter->vf_res->vf_cap_flags &
1453 		    VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1454 			adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
1455 		else
1456 			adapter->hena = IAVF_DEFAULT_RSS_HENA;
1457 
1458 		wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1459 		wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1460 	}
1461 
1462 	iavf_fill_rss_lut(adapter);
1463 	netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1464 
1465 	return iavf_config_rss(adapter);
1466 }
1467 
1468 /**
1469  * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
1470  * @adapter: board private structure to initialize
1471  *
1472  * We allocate one q_vector per queue interrupt.  If allocation fails we
1473  * return -ENOMEM.
1474  **/
iavf_alloc_q_vectors(struct iavf_adapter * adapter)1475 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
1476 {
1477 	int q_idx = 0, num_q_vectors;
1478 	struct iavf_q_vector *q_vector;
1479 
1480 	num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1481 	adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1482 				     GFP_KERNEL);
1483 	if (!adapter->q_vectors)
1484 		return -ENOMEM;
1485 
1486 	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1487 		q_vector = &adapter->q_vectors[q_idx];
1488 		q_vector->adapter = adapter;
1489 		q_vector->vsi = &adapter->vsi;
1490 		q_vector->v_idx = q_idx;
1491 		q_vector->reg_idx = q_idx;
1492 		cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1493 		netif_napi_add(adapter->netdev, &q_vector->napi,
1494 			       iavf_napi_poll, NAPI_POLL_WEIGHT);
1495 	}
1496 
1497 	return 0;
1498 }
1499 
1500 /**
1501  * iavf_free_q_vectors - Free memory allocated for interrupt vectors
1502  * @adapter: board private structure to initialize
1503  *
1504  * This function frees the memory allocated to the q_vectors.  In addition if
1505  * NAPI is enabled it will delete any references to the NAPI struct prior
1506  * to freeing the q_vector.
1507  **/
iavf_free_q_vectors(struct iavf_adapter * adapter)1508 static void iavf_free_q_vectors(struct iavf_adapter *adapter)
1509 {
1510 	int q_idx, num_q_vectors;
1511 
1512 	if (!adapter->q_vectors)
1513 		return;
1514 
1515 	num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1516 
1517 	for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1518 		struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1519 
1520 		netif_napi_del(&q_vector->napi);
1521 	}
1522 	kfree(adapter->q_vectors);
1523 	adapter->q_vectors = NULL;
1524 }
1525 
1526 /**
1527  * iavf_reset_interrupt_capability - Reset MSIX setup
1528  * @adapter: board private structure
1529  *
1530  **/
iavf_reset_interrupt_capability(struct iavf_adapter * adapter)1531 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
1532 {
1533 	if (!adapter->msix_entries)
1534 		return;
1535 
1536 	pci_disable_msix(adapter->pdev);
1537 	kfree(adapter->msix_entries);
1538 	adapter->msix_entries = NULL;
1539 }
1540 
1541 /**
1542  * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
1543  * @adapter: board private structure to initialize
1544  *
1545  **/
iavf_init_interrupt_scheme(struct iavf_adapter * adapter)1546 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
1547 {
1548 	int err;
1549 
1550 	err = iavf_alloc_queues(adapter);
1551 	if (err) {
1552 		dev_err(&adapter->pdev->dev,
1553 			"Unable to allocate memory for queues\n");
1554 		goto err_alloc_queues;
1555 	}
1556 
1557 	rtnl_lock();
1558 	err = iavf_set_interrupt_capability(adapter);
1559 	rtnl_unlock();
1560 	if (err) {
1561 		dev_err(&adapter->pdev->dev,
1562 			"Unable to setup interrupt capabilities\n");
1563 		goto err_set_interrupt;
1564 	}
1565 
1566 	err = iavf_alloc_q_vectors(adapter);
1567 	if (err) {
1568 		dev_err(&adapter->pdev->dev,
1569 			"Unable to allocate memory for queue vectors\n");
1570 		goto err_alloc_q_vectors;
1571 	}
1572 
1573 	/* If we've made it so far while ADq flag being ON, then we haven't
1574 	 * bailed out anywhere in middle. And ADq isn't just enabled but actual
1575 	 * resources have been allocated in the reset path.
1576 	 * Now we can truly claim that ADq is enabled.
1577 	 */
1578 	if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1579 	    adapter->num_tc)
1580 		dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1581 			 adapter->num_tc);
1582 
1583 	dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1584 		 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1585 		 adapter->num_active_queues);
1586 
1587 	return 0;
1588 err_alloc_q_vectors:
1589 	iavf_reset_interrupt_capability(adapter);
1590 err_set_interrupt:
1591 	iavf_free_queues(adapter);
1592 err_alloc_queues:
1593 	return err;
1594 }
1595 
1596 /**
1597  * iavf_free_rss - Free memory used by RSS structs
1598  * @adapter: board private structure
1599  **/
iavf_free_rss(struct iavf_adapter * adapter)1600 static void iavf_free_rss(struct iavf_adapter *adapter)
1601 {
1602 	kfree(adapter->rss_key);
1603 	adapter->rss_key = NULL;
1604 
1605 	kfree(adapter->rss_lut);
1606 	adapter->rss_lut = NULL;
1607 }
1608 
1609 /**
1610  * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
1611  * @adapter: board private structure
1612  *
1613  * Returns 0 on success, negative on failure
1614  **/
iavf_reinit_interrupt_scheme(struct iavf_adapter * adapter)1615 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
1616 {
1617 	struct net_device *netdev = adapter->netdev;
1618 	int err;
1619 
1620 	if (netif_running(netdev))
1621 		iavf_free_traffic_irqs(adapter);
1622 	iavf_free_misc_irq(adapter);
1623 	iavf_reset_interrupt_capability(adapter);
1624 	iavf_free_q_vectors(adapter);
1625 	iavf_free_queues(adapter);
1626 
1627 	err =  iavf_init_interrupt_scheme(adapter);
1628 	if (err)
1629 		goto err;
1630 
1631 	netif_tx_stop_all_queues(netdev);
1632 
1633 	err = iavf_request_misc_irq(adapter);
1634 	if (err)
1635 		goto err;
1636 
1637 	set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1638 
1639 	iavf_map_rings_to_vectors(adapter);
1640 err:
1641 	return err;
1642 }
1643 
1644 /**
1645  * iavf_process_aq_command - process aq_required flags
1646  * and sends aq command
1647  * @adapter: pointer to iavf adapter structure
1648  *
1649  * Returns 0 on success
1650  * Returns error code if no command was sent
1651  * or error code if the command failed.
1652  **/
iavf_process_aq_command(struct iavf_adapter * adapter)1653 static int iavf_process_aq_command(struct iavf_adapter *adapter)
1654 {
1655 	if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
1656 		return iavf_send_vf_config_msg(adapter);
1657 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
1658 		iavf_disable_queues(adapter);
1659 		return 0;
1660 	}
1661 
1662 	if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
1663 		iavf_map_queues(adapter);
1664 		return 0;
1665 	}
1666 
1667 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
1668 		iavf_add_ether_addrs(adapter);
1669 		return 0;
1670 	}
1671 
1672 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
1673 		iavf_add_vlans(adapter);
1674 		return 0;
1675 	}
1676 
1677 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
1678 		iavf_del_ether_addrs(adapter);
1679 		return 0;
1680 	}
1681 
1682 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
1683 		iavf_del_vlans(adapter);
1684 		return 0;
1685 	}
1686 
1687 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
1688 		iavf_enable_vlan_stripping(adapter);
1689 		return 0;
1690 	}
1691 
1692 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
1693 		iavf_disable_vlan_stripping(adapter);
1694 		return 0;
1695 	}
1696 
1697 	if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
1698 		iavf_configure_queues(adapter);
1699 		return 0;
1700 	}
1701 
1702 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
1703 		iavf_enable_queues(adapter);
1704 		return 0;
1705 	}
1706 
1707 	if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
1708 		/* This message goes straight to the firmware, not the
1709 		 * PF, so we don't have to set current_op as we will
1710 		 * not get a response through the ARQ.
1711 		 */
1712 		adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
1713 		return 0;
1714 	}
1715 	if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
1716 		iavf_get_hena(adapter);
1717 		return 0;
1718 	}
1719 	if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
1720 		iavf_set_hena(adapter);
1721 		return 0;
1722 	}
1723 	if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
1724 		iavf_set_rss_key(adapter);
1725 		return 0;
1726 	}
1727 	if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
1728 		iavf_set_rss_lut(adapter);
1729 		return 0;
1730 	}
1731 
1732 	if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
1733 		iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
1734 				       FLAG_VF_MULTICAST_PROMISC);
1735 		return 0;
1736 	}
1737 
1738 	if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
1739 		iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
1740 		return 0;
1741 	}
1742 	if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
1743 	    (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
1744 		iavf_set_promiscuous(adapter, 0);
1745 		return 0;
1746 	}
1747 
1748 	if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
1749 		iavf_enable_channels(adapter);
1750 		return 0;
1751 	}
1752 
1753 	if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
1754 		iavf_disable_channels(adapter);
1755 		return 0;
1756 	}
1757 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1758 		iavf_add_cloud_filter(adapter);
1759 		return 0;
1760 	}
1761 
1762 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1763 		iavf_del_cloud_filter(adapter);
1764 		return 0;
1765 	}
1766 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1767 		iavf_del_cloud_filter(adapter);
1768 		return 0;
1769 	}
1770 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1771 		iavf_add_cloud_filter(adapter);
1772 		return 0;
1773 	}
1774 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
1775 		iavf_add_fdir_filter(adapter);
1776 		return IAVF_SUCCESS;
1777 	}
1778 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
1779 		iavf_del_fdir_filter(adapter);
1780 		return IAVF_SUCCESS;
1781 	}
1782 	if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
1783 		iavf_add_adv_rss_cfg(adapter);
1784 		return 0;
1785 	}
1786 	if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
1787 		iavf_del_adv_rss_cfg(adapter);
1788 		return 0;
1789 	}
1790 	if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
1791 		iavf_request_stats(adapter);
1792 		return 0;
1793 	}
1794 
1795 	return -EAGAIN;
1796 }
1797 
1798 /**
1799  * iavf_startup - first step of driver startup
1800  * @adapter: board private structure
1801  *
1802  * Function process __IAVF_STARTUP driver state.
1803  * When success the state is changed to __IAVF_INIT_VERSION_CHECK
1804  * when fails the state is changed to __IAVF_INIT_FAILED
1805  **/
iavf_startup(struct iavf_adapter * adapter)1806 static void iavf_startup(struct iavf_adapter *adapter)
1807 {
1808 	struct pci_dev *pdev = adapter->pdev;
1809 	struct iavf_hw *hw = &adapter->hw;
1810 	int err;
1811 
1812 	WARN_ON(adapter->state != __IAVF_STARTUP);
1813 
1814 	/* driver loaded, probe complete */
1815 	adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
1816 	adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
1817 	err = iavf_set_mac_type(hw);
1818 	if (err) {
1819 		dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err);
1820 		goto err;
1821 	}
1822 
1823 	err = iavf_check_reset_complete(hw);
1824 	if (err) {
1825 		dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
1826 			 err);
1827 		goto err;
1828 	}
1829 	hw->aq.num_arq_entries = IAVF_AQ_LEN;
1830 	hw->aq.num_asq_entries = IAVF_AQ_LEN;
1831 	hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
1832 	hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
1833 
1834 	err = iavf_init_adminq(hw);
1835 	if (err) {
1836 		dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err);
1837 		goto err;
1838 	}
1839 	err = iavf_send_api_ver(adapter);
1840 	if (err) {
1841 		dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
1842 		iavf_shutdown_adminq(hw);
1843 		goto err;
1844 	}
1845 	iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
1846 	return;
1847 err:
1848 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
1849 }
1850 
1851 /**
1852  * iavf_init_version_check - second step of driver startup
1853  * @adapter: board private structure
1854  *
1855  * Function process __IAVF_INIT_VERSION_CHECK driver state.
1856  * When success the state is changed to __IAVF_INIT_GET_RESOURCES
1857  * when fails the state is changed to __IAVF_INIT_FAILED
1858  **/
iavf_init_version_check(struct iavf_adapter * adapter)1859 static void iavf_init_version_check(struct iavf_adapter *adapter)
1860 {
1861 	struct pci_dev *pdev = adapter->pdev;
1862 	struct iavf_hw *hw = &adapter->hw;
1863 	int err = -EAGAIN;
1864 
1865 	WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
1866 
1867 	if (!iavf_asq_done(hw)) {
1868 		dev_err(&pdev->dev, "Admin queue command never completed\n");
1869 		iavf_shutdown_adminq(hw);
1870 		iavf_change_state(adapter, __IAVF_STARTUP);
1871 		goto err;
1872 	}
1873 
1874 	/* aq msg sent, awaiting reply */
1875 	err = iavf_verify_api_ver(adapter);
1876 	if (err) {
1877 		if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
1878 			err = iavf_send_api_ver(adapter);
1879 		else
1880 			dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
1881 				adapter->pf_version.major,
1882 				adapter->pf_version.minor,
1883 				VIRTCHNL_VERSION_MAJOR,
1884 				VIRTCHNL_VERSION_MINOR);
1885 		goto err;
1886 	}
1887 	err = iavf_send_vf_config_msg(adapter);
1888 	if (err) {
1889 		dev_err(&pdev->dev, "Unable to send config request (%d)\n",
1890 			err);
1891 		goto err;
1892 	}
1893 	iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
1894 	return;
1895 err:
1896 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
1897 }
1898 
1899 /**
1900  * iavf_init_get_resources - third step of driver startup
1901  * @adapter: board private structure
1902  *
1903  * Function process __IAVF_INIT_GET_RESOURCES driver state and
1904  * finishes driver initialization procedure.
1905  * When success the state is changed to __IAVF_DOWN
1906  * when fails the state is changed to __IAVF_INIT_FAILED
1907  **/
iavf_init_get_resources(struct iavf_adapter * adapter)1908 static void iavf_init_get_resources(struct iavf_adapter *adapter)
1909 {
1910 	struct net_device *netdev = adapter->netdev;
1911 	struct pci_dev *pdev = adapter->pdev;
1912 	struct iavf_hw *hw = &adapter->hw;
1913 	int err;
1914 
1915 	WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
1916 	/* aq msg sent, awaiting reply */
1917 	if (!adapter->vf_res) {
1918 		adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
1919 					  GFP_KERNEL);
1920 		if (!adapter->vf_res) {
1921 			err = -ENOMEM;
1922 			goto err;
1923 		}
1924 	}
1925 	err = iavf_get_vf_config(adapter);
1926 	if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) {
1927 		err = iavf_send_vf_config_msg(adapter);
1928 		goto err;
1929 	} else if (err == IAVF_ERR_PARAM) {
1930 		/* We only get ERR_PARAM if the device is in a very bad
1931 		 * state or if we've been disabled for previous bad
1932 		 * behavior. Either way, we're done now.
1933 		 */
1934 		iavf_shutdown_adminq(hw);
1935 		dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
1936 		return;
1937 	}
1938 	if (err) {
1939 		dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
1940 		goto err_alloc;
1941 	}
1942 
1943 	err = iavf_process_config(adapter);
1944 	if (err)
1945 		goto err_alloc;
1946 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1947 
1948 	adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
1949 
1950 	netdev->netdev_ops = &iavf_netdev_ops;
1951 	iavf_set_ethtool_ops(netdev);
1952 	netdev->watchdog_timeo = 5 * HZ;
1953 
1954 	/* MTU range: 68 - 9710 */
1955 	netdev->min_mtu = ETH_MIN_MTU;
1956 	netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
1957 
1958 	if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1959 		dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
1960 			 adapter->hw.mac.addr);
1961 		eth_hw_addr_random(netdev);
1962 		ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
1963 	} else {
1964 		eth_hw_addr_set(netdev, adapter->hw.mac.addr);
1965 		ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
1966 	}
1967 
1968 	adapter->tx_desc_count = IAVF_DEFAULT_TXD;
1969 	adapter->rx_desc_count = IAVF_DEFAULT_RXD;
1970 	err = iavf_init_interrupt_scheme(adapter);
1971 	if (err)
1972 		goto err_sw_init;
1973 	iavf_map_rings_to_vectors(adapter);
1974 	if (adapter->vf_res->vf_cap_flags &
1975 		VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1976 		adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
1977 
1978 	err = iavf_request_misc_irq(adapter);
1979 	if (err)
1980 		goto err_sw_init;
1981 
1982 	netif_carrier_off(netdev);
1983 	adapter->link_up = false;
1984 
1985 	/* set the semaphore to prevent any callbacks after device registration
1986 	 * up to time when state of driver will be set to __IAVF_DOWN
1987 	 */
1988 	rtnl_lock();
1989 	if (!adapter->netdev_registered) {
1990 		err = register_netdevice(netdev);
1991 		if (err) {
1992 			rtnl_unlock();
1993 			goto err_register;
1994 		}
1995 	}
1996 
1997 	adapter->netdev_registered = true;
1998 
1999 	netif_tx_stop_all_queues(netdev);
2000 	if (CLIENT_ALLOWED(adapter)) {
2001 		err = iavf_lan_add_device(adapter);
2002 		if (err)
2003 			dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
2004 				 err);
2005 	}
2006 	dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2007 	if (netdev->features & NETIF_F_GRO)
2008 		dev_info(&pdev->dev, "GRO is enabled\n");
2009 
2010 	iavf_change_state(adapter, __IAVF_DOWN);
2011 	set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2012 	rtnl_unlock();
2013 
2014 	iavf_misc_irq_enable(adapter);
2015 	wake_up(&adapter->down_waitqueue);
2016 
2017 	adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
2018 	adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
2019 	if (!adapter->rss_key || !adapter->rss_lut) {
2020 		err = -ENOMEM;
2021 		goto err_mem;
2022 	}
2023 	if (RSS_AQ(adapter))
2024 		adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2025 	else
2026 		iavf_init_rss(adapter);
2027 
2028 	return;
2029 err_mem:
2030 	iavf_free_rss(adapter);
2031 err_register:
2032 	iavf_free_misc_irq(adapter);
2033 err_sw_init:
2034 	iavf_reset_interrupt_capability(adapter);
2035 err_alloc:
2036 	kfree(adapter->vf_res);
2037 	adapter->vf_res = NULL;
2038 err:
2039 	iavf_change_state(adapter, __IAVF_INIT_FAILED);
2040 }
2041 
2042 /**
2043  * iavf_watchdog_task - Periodic call-back task
2044  * @work: pointer to work_struct
2045  **/
iavf_watchdog_task(struct work_struct * work)2046 static void iavf_watchdog_task(struct work_struct *work)
2047 {
2048 	struct iavf_adapter *adapter = container_of(work,
2049 						    struct iavf_adapter,
2050 						    watchdog_task.work);
2051 	struct iavf_hw *hw = &adapter->hw;
2052 	u32 reg_val;
2053 
2054 	if (!mutex_trylock(&adapter->crit_lock)) {
2055 		if (adapter->state == __IAVF_REMOVE)
2056 			return;
2057 
2058 		goto restart_watchdog;
2059 	}
2060 
2061 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2062 		iavf_change_state(adapter, __IAVF_COMM_FAILED);
2063 
2064 	if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2065 		adapter->aq_required = 0;
2066 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2067 		mutex_unlock(&adapter->crit_lock);
2068 		queue_work(iavf_wq, &adapter->reset_task);
2069 		return;
2070 	}
2071 
2072 	switch (adapter->state) {
2073 	case __IAVF_STARTUP:
2074 		iavf_startup(adapter);
2075 		mutex_unlock(&adapter->crit_lock);
2076 		queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2077 				   msecs_to_jiffies(30));
2078 		return;
2079 	case __IAVF_INIT_VERSION_CHECK:
2080 		iavf_init_version_check(adapter);
2081 		mutex_unlock(&adapter->crit_lock);
2082 		queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2083 				   msecs_to_jiffies(30));
2084 		return;
2085 	case __IAVF_INIT_GET_RESOURCES:
2086 		iavf_init_get_resources(adapter);
2087 		mutex_unlock(&adapter->crit_lock);
2088 		queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2089 				   msecs_to_jiffies(1));
2090 		return;
2091 	case __IAVF_INIT_FAILED:
2092 		if (test_bit(__IAVF_IN_REMOVE_TASK,
2093 			     &adapter->crit_section)) {
2094 			/* Do not update the state and do not reschedule
2095 			 * watchdog task, iavf_remove should handle this state
2096 			 * as it can loop forever
2097 			 */
2098 			mutex_unlock(&adapter->crit_lock);
2099 			return;
2100 		}
2101 		if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
2102 			dev_err(&adapter->pdev->dev,
2103 				"Failed to communicate with PF; waiting before retry\n");
2104 			adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2105 			iavf_shutdown_adminq(hw);
2106 			mutex_unlock(&adapter->crit_lock);
2107 			queue_delayed_work(iavf_wq,
2108 					   &adapter->watchdog_task, (5 * HZ));
2109 			return;
2110 		}
2111 		/* Try again from failed step*/
2112 		iavf_change_state(adapter, adapter->last_state);
2113 		mutex_unlock(&adapter->crit_lock);
2114 		queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
2115 		return;
2116 	case __IAVF_COMM_FAILED:
2117 		if (test_bit(__IAVF_IN_REMOVE_TASK,
2118 			     &adapter->crit_section)) {
2119 			/* Set state to __IAVF_INIT_FAILED and perform remove
2120 			 * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
2121 			 * doesn't bring the state back to __IAVF_COMM_FAILED.
2122 			 */
2123 			iavf_change_state(adapter, __IAVF_INIT_FAILED);
2124 			adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2125 			mutex_unlock(&adapter->crit_lock);
2126 			return;
2127 		}
2128 		reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2129 			  IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2130 		if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
2131 		    reg_val == VIRTCHNL_VFR_COMPLETED) {
2132 			/* A chance for redemption! */
2133 			dev_err(&adapter->pdev->dev,
2134 				"Hardware came out of reset. Attempting reinit.\n");
2135 			/* When init task contacts the PF and
2136 			 * gets everything set up again, it'll restart the
2137 			 * watchdog for us. Down, boy. Sit. Stay. Woof.
2138 			 */
2139 			iavf_change_state(adapter, __IAVF_STARTUP);
2140 			adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2141 		}
2142 		adapter->aq_required = 0;
2143 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2144 		mutex_unlock(&adapter->crit_lock);
2145 		queue_delayed_work(iavf_wq,
2146 				   &adapter->watchdog_task,
2147 				   msecs_to_jiffies(10));
2148 		return;
2149 	case __IAVF_RESETTING:
2150 		mutex_unlock(&adapter->crit_lock);
2151 		queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
2152 		return;
2153 	case __IAVF_DOWN:
2154 	case __IAVF_DOWN_PENDING:
2155 	case __IAVF_TESTING:
2156 	case __IAVF_RUNNING:
2157 		if (adapter->current_op) {
2158 			if (!iavf_asq_done(hw)) {
2159 				dev_dbg(&adapter->pdev->dev,
2160 					"Admin queue timeout\n");
2161 				iavf_send_api_ver(adapter);
2162 			}
2163 		} else {
2164 			/* An error will be returned if no commands were
2165 			 * processed; use this opportunity to update stats
2166 			 */
2167 			if (iavf_process_aq_command(adapter) &&
2168 			    adapter->state == __IAVF_RUNNING)
2169 				iavf_request_stats(adapter);
2170 		}
2171 		if (adapter->state == __IAVF_RUNNING)
2172 			iavf_detect_recover_hung(&adapter->vsi);
2173 		break;
2174 	case __IAVF_REMOVE:
2175 	default:
2176 		mutex_unlock(&adapter->crit_lock);
2177 		return;
2178 	}
2179 
2180 	/* check for hw reset */
2181 	reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2182 	if (!reg_val) {
2183 		adapter->flags |= IAVF_FLAG_RESET_PENDING;
2184 		adapter->aq_required = 0;
2185 		adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2186 		dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
2187 		queue_work(iavf_wq, &adapter->reset_task);
2188 		mutex_unlock(&adapter->crit_lock);
2189 		queue_delayed_work(iavf_wq,
2190 				   &adapter->watchdog_task, HZ * 2);
2191 		return;
2192 	}
2193 
2194 	schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
2195 	mutex_unlock(&adapter->crit_lock);
2196 restart_watchdog:
2197 	if (adapter->state >= __IAVF_DOWN)
2198 		queue_work(iavf_wq, &adapter->adminq_task);
2199 	if (adapter->aq_required)
2200 		queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2201 				   msecs_to_jiffies(20));
2202 	else
2203 		queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
2204 }
2205 
2206 /**
2207  * iavf_disable_vf - disable VF
2208  * @adapter: board private structure
2209  *
2210  * Set communication failed flag and free all resources.
2211  * NOTE: This function is expected to be called with crit_lock being held.
2212  **/
iavf_disable_vf(struct iavf_adapter * adapter)2213 static void iavf_disable_vf(struct iavf_adapter *adapter)
2214 {
2215 	struct iavf_mac_filter *f, *ftmp;
2216 	struct iavf_vlan_filter *fv, *fvtmp;
2217 	struct iavf_cloud_filter *cf, *cftmp;
2218 
2219 	adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2220 
2221 	/* We don't use netif_running() because it may be true prior to
2222 	 * ndo_open() returning, so we can't assume it means all our open
2223 	 * tasks have finished, since we're not holding the rtnl_lock here.
2224 	 */
2225 	if (adapter->state == __IAVF_RUNNING) {
2226 		set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2227 		netif_carrier_off(adapter->netdev);
2228 		netif_tx_disable(adapter->netdev);
2229 		adapter->link_up = false;
2230 		iavf_napi_disable_all(adapter);
2231 		iavf_irq_disable(adapter);
2232 		iavf_free_traffic_irqs(adapter);
2233 		iavf_free_all_tx_resources(adapter);
2234 		iavf_free_all_rx_resources(adapter);
2235 	}
2236 
2237 	spin_lock_bh(&adapter->mac_vlan_list_lock);
2238 
2239 	/* Delete all of the filters */
2240 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2241 		list_del(&f->list);
2242 		kfree(f);
2243 	}
2244 
2245 	list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
2246 		list_del(&fv->list);
2247 		kfree(fv);
2248 	}
2249 
2250 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
2251 
2252 	spin_lock_bh(&adapter->cloud_filter_list_lock);
2253 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
2254 		list_del(&cf->list);
2255 		kfree(cf);
2256 		adapter->num_cloud_filters--;
2257 	}
2258 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
2259 
2260 	iavf_free_misc_irq(adapter);
2261 	iavf_reset_interrupt_capability(adapter);
2262 	iavf_free_q_vectors(adapter);
2263 	iavf_free_queues(adapter);
2264 	memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
2265 	iavf_shutdown_adminq(&adapter->hw);
2266 	adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2267 	iavf_change_state(adapter, __IAVF_DOWN);
2268 	wake_up(&adapter->down_waitqueue);
2269 	dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
2270 }
2271 
2272 /**
2273  * iavf_reset_task - Call-back task to handle hardware reset
2274  * @work: pointer to work_struct
2275  *
2276  * During reset we need to shut down and reinitialize the admin queue
2277  * before we can use it to communicate with the PF again. We also clear
2278  * and reinit the rings because that context is lost as well.
2279  **/
iavf_reset_task(struct work_struct * work)2280 static void iavf_reset_task(struct work_struct *work)
2281 {
2282 	struct iavf_adapter *adapter = container_of(work,
2283 						      struct iavf_adapter,
2284 						      reset_task);
2285 	struct virtchnl_vf_resource *vfres = adapter->vf_res;
2286 	struct net_device *netdev = adapter->netdev;
2287 	struct iavf_hw *hw = &adapter->hw;
2288 	struct iavf_mac_filter *f, *ftmp;
2289 	struct iavf_cloud_filter *cf;
2290 	u32 reg_val;
2291 	int i = 0, err;
2292 	bool running;
2293 
2294 	/* Detach interface to avoid subsequent NDO callbacks */
2295 	rtnl_lock();
2296 	netif_device_detach(netdev);
2297 	rtnl_unlock();
2298 
2299 	/* When device is being removed it doesn't make sense to run the reset
2300 	 * task, just return in such a case.
2301 	 */
2302 	if (!mutex_trylock(&adapter->crit_lock)) {
2303 		if (adapter->state != __IAVF_REMOVE)
2304 			queue_work(iavf_wq, &adapter->reset_task);
2305 
2306 		goto reset_finish;
2307 	}
2308 
2309 	while (!mutex_trylock(&adapter->client_lock))
2310 		usleep_range(500, 1000);
2311 	if (CLIENT_ENABLED(adapter)) {
2312 		adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
2313 				    IAVF_FLAG_CLIENT_NEEDS_CLOSE |
2314 				    IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
2315 				    IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
2316 		cancel_delayed_work_sync(&adapter->client_task);
2317 		iavf_notify_client_close(&adapter->vsi, true);
2318 	}
2319 	iavf_misc_irq_disable(adapter);
2320 	if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2321 		adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
2322 		/* Restart the AQ here. If we have been reset but didn't
2323 		 * detect it, or if the PF had to reinit, our AQ will be hosed.
2324 		 */
2325 		iavf_shutdown_adminq(hw);
2326 		iavf_init_adminq(hw);
2327 		iavf_request_reset(adapter);
2328 	}
2329 	adapter->flags |= IAVF_FLAG_RESET_PENDING;
2330 
2331 	/* poll until we see the reset actually happen */
2332 	for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) {
2333 		reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
2334 			  IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2335 		if (!reg_val)
2336 			break;
2337 		usleep_range(5000, 10000);
2338 	}
2339 	if (i == IAVF_RESET_WAIT_DETECTED_COUNT) {
2340 		dev_info(&adapter->pdev->dev, "Never saw reset\n");
2341 		goto continue_reset; /* act like the reset happened */
2342 	}
2343 
2344 	/* wait until the reset is complete and the PF is responding to us */
2345 	for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
2346 		/* sleep first to make sure a minimum wait time is met */
2347 		msleep(IAVF_RESET_WAIT_MS);
2348 
2349 		reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2350 			  IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2351 		if (reg_val == VIRTCHNL_VFR_VFACTIVE)
2352 			break;
2353 	}
2354 
2355 	pci_set_master(adapter->pdev);
2356 	pci_restore_msi_state(adapter->pdev);
2357 
2358 	if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
2359 		dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
2360 			reg_val);
2361 		iavf_disable_vf(adapter);
2362 		mutex_unlock(&adapter->client_lock);
2363 		mutex_unlock(&adapter->crit_lock);
2364 		if (netif_running(netdev)) {
2365 			rtnl_lock();
2366 			dev_close(netdev);
2367 			rtnl_unlock();
2368 		}
2369 		return; /* Do not attempt to reinit. It's dead, Jim. */
2370 	}
2371 
2372 continue_reset:
2373 	/* We don't use netif_running() because it may be true prior to
2374 	 * ndo_open() returning, so we can't assume it means all our open
2375 	 * tasks have finished, since we're not holding the rtnl_lock here.
2376 	 */
2377 	running = adapter->state == __IAVF_RUNNING;
2378 
2379 	if (running) {
2380 		netif_carrier_off(netdev);
2381 		netif_tx_stop_all_queues(netdev);
2382 		adapter->link_up = false;
2383 		iavf_napi_disable_all(adapter);
2384 	}
2385 	iavf_irq_disable(adapter);
2386 
2387 	iavf_change_state(adapter, __IAVF_RESETTING);
2388 	adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2389 
2390 	/* free the Tx/Rx rings and descriptors, might be better to just
2391 	 * re-use them sometime in the future
2392 	 */
2393 	iavf_free_all_rx_resources(adapter);
2394 	iavf_free_all_tx_resources(adapter);
2395 
2396 	adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
2397 	/* kill and reinit the admin queue */
2398 	iavf_shutdown_adminq(hw);
2399 	adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2400 	err = iavf_init_adminq(hw);
2401 	if (err)
2402 		dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
2403 			 err);
2404 	adapter->aq_required = 0;
2405 
2406 	if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
2407 		err = iavf_reinit_interrupt_scheme(adapter);
2408 		if (err)
2409 			goto reset_err;
2410 	}
2411 
2412 	if (RSS_AQ(adapter)) {
2413 		adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2414 	} else {
2415 		err = iavf_init_rss(adapter);
2416 		if (err)
2417 			goto reset_err;
2418 	}
2419 
2420 	adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
2421 	adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
2422 
2423 	spin_lock_bh(&adapter->mac_vlan_list_lock);
2424 
2425 	/* Delete filter for the current MAC address, it could have
2426 	 * been changed by the PF via administratively set MAC.
2427 	 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
2428 	 */
2429 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2430 		if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
2431 			list_del(&f->list);
2432 			kfree(f);
2433 		}
2434 	}
2435 	/* re-add all MAC filters */
2436 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
2437 		f->add = true;
2438 	}
2439 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
2440 
2441 	/* check if TCs are running and re-add all cloud filters */
2442 	spin_lock_bh(&adapter->cloud_filter_list_lock);
2443 	if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
2444 	    adapter->num_tc) {
2445 		list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
2446 			cf->add = true;
2447 		}
2448 	}
2449 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
2450 
2451 	adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
2452 	adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
2453 	iavf_misc_irq_enable(adapter);
2454 
2455 	mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
2456 
2457 	/* We were running when the reset started, so we need to restore some
2458 	 * state here.
2459 	 */
2460 	if (running) {
2461 		/* allocate transmit descriptors */
2462 		err = iavf_setup_all_tx_resources(adapter);
2463 		if (err)
2464 			goto reset_err;
2465 
2466 		/* allocate receive descriptors */
2467 		err = iavf_setup_all_rx_resources(adapter);
2468 		if (err)
2469 			goto reset_err;
2470 
2471 		if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
2472 			err = iavf_request_traffic_irqs(adapter, netdev->name);
2473 			if (err)
2474 				goto reset_err;
2475 
2476 			adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2477 		}
2478 
2479 		iavf_configure(adapter);
2480 
2481 		/* iavf_up_complete() will switch device back
2482 		 * to __IAVF_RUNNING
2483 		 */
2484 		iavf_up_complete(adapter);
2485 
2486 		iavf_irq_enable(adapter, true);
2487 	} else {
2488 		iavf_change_state(adapter, __IAVF_DOWN);
2489 		wake_up(&adapter->down_waitqueue);
2490 	}
2491 	mutex_unlock(&adapter->client_lock);
2492 	mutex_unlock(&adapter->crit_lock);
2493 
2494 	goto reset_finish;
2495 reset_err:
2496 	if (running) {
2497 		set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2498 		iavf_free_traffic_irqs(adapter);
2499 	}
2500 	iavf_disable_vf(adapter);
2501 
2502 	mutex_unlock(&adapter->client_lock);
2503 	mutex_unlock(&adapter->crit_lock);
2504 
2505 	if (netif_running(netdev)) {
2506 		/* Close device to ensure that Tx queues will not be started
2507 		 * during netif_device_attach() at the end of the reset task.
2508 		 */
2509 		rtnl_lock();
2510 		dev_close(netdev);
2511 		rtnl_unlock();
2512 	}
2513 
2514 	dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
2515 reset_finish:
2516 	rtnl_lock();
2517 	netif_device_attach(netdev);
2518 	rtnl_unlock();
2519 }
2520 
2521 /**
2522  * iavf_adminq_task - worker thread to clean the admin queue
2523  * @work: pointer to work_struct containing our data
2524  **/
iavf_adminq_task(struct work_struct * work)2525 static void iavf_adminq_task(struct work_struct *work)
2526 {
2527 	struct iavf_adapter *adapter =
2528 		container_of(work, struct iavf_adapter, adminq_task);
2529 	struct iavf_hw *hw = &adapter->hw;
2530 	struct iavf_arq_event_info event;
2531 	enum virtchnl_ops v_op;
2532 	enum iavf_status ret, v_ret;
2533 	u32 val, oldval;
2534 	u16 pending;
2535 
2536 	if (!mutex_trylock(&adapter->crit_lock)) {
2537 		if (adapter->state == __IAVF_REMOVE)
2538 			return;
2539 
2540 		queue_work(iavf_wq, &adapter->adminq_task);
2541 		goto out;
2542 	}
2543 
2544 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2545 		goto unlock;
2546 
2547 	event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
2548 	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
2549 	if (!event.msg_buf)
2550 		goto unlock;
2551 
2552 	do {
2553 		ret = iavf_clean_arq_element(hw, &event, &pending);
2554 		v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
2555 		v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
2556 
2557 		if (ret || !v_op)
2558 			break; /* No event to process or error cleaning ARQ */
2559 
2560 		iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
2561 					 event.msg_len);
2562 		if (pending != 0)
2563 			memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
2564 	} while (pending);
2565 
2566 	if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) {
2567 		if (adapter->netdev_registered ||
2568 		    !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
2569 			struct net_device *netdev = adapter->netdev;
2570 
2571 			rtnl_lock();
2572 			netdev_update_features(netdev);
2573 			rtnl_unlock();
2574 		}
2575 
2576 		adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
2577 	}
2578 	if ((adapter->flags &
2579 	     (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
2580 	    adapter->state == __IAVF_RESETTING)
2581 		goto freedom;
2582 
2583 	/* check for error indications */
2584 	val = rd32(hw, hw->aq.arq.len);
2585 	if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
2586 		goto freedom;
2587 	oldval = val;
2588 	if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
2589 		dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
2590 		val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
2591 	}
2592 	if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
2593 		dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
2594 		val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
2595 	}
2596 	if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
2597 		dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
2598 		val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
2599 	}
2600 	if (oldval != val)
2601 		wr32(hw, hw->aq.arq.len, val);
2602 
2603 	val = rd32(hw, hw->aq.asq.len);
2604 	oldval = val;
2605 	if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
2606 		dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
2607 		val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
2608 	}
2609 	if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
2610 		dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
2611 		val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
2612 	}
2613 	if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
2614 		dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
2615 		val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
2616 	}
2617 	if (oldval != val)
2618 		wr32(hw, hw->aq.asq.len, val);
2619 
2620 freedom:
2621 	kfree(event.msg_buf);
2622 unlock:
2623 	mutex_unlock(&adapter->crit_lock);
2624 out:
2625 	/* re-enable Admin queue interrupt cause */
2626 	iavf_misc_irq_enable(adapter);
2627 }
2628 
2629 /**
2630  * iavf_client_task - worker thread to perform client work
2631  * @work: pointer to work_struct containing our data
2632  *
2633  * This task handles client interactions. Because client calls can be
2634  * reentrant, we can't handle them in the watchdog.
2635  **/
iavf_client_task(struct work_struct * work)2636 static void iavf_client_task(struct work_struct *work)
2637 {
2638 	struct iavf_adapter *adapter =
2639 		container_of(work, struct iavf_adapter, client_task.work);
2640 
2641 	/* If we can't get the client bit, just give up. We'll be rescheduled
2642 	 * later.
2643 	 */
2644 
2645 	if (!mutex_trylock(&adapter->client_lock))
2646 		return;
2647 
2648 	if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
2649 		iavf_client_subtask(adapter);
2650 		adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
2651 		goto out;
2652 	}
2653 	if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
2654 		iavf_notify_client_l2_params(&adapter->vsi);
2655 		adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
2656 		goto out;
2657 	}
2658 	if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
2659 		iavf_notify_client_close(&adapter->vsi, false);
2660 		adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
2661 		goto out;
2662 	}
2663 	if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
2664 		iavf_notify_client_open(&adapter->vsi);
2665 		adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
2666 	}
2667 out:
2668 	mutex_unlock(&adapter->client_lock);
2669 }
2670 
2671 /**
2672  * iavf_free_all_tx_resources - Free Tx Resources for All Queues
2673  * @adapter: board private structure
2674  *
2675  * Free all transmit software resources
2676  **/
iavf_free_all_tx_resources(struct iavf_adapter * adapter)2677 void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
2678 {
2679 	int i;
2680 
2681 	if (!adapter->tx_rings)
2682 		return;
2683 
2684 	for (i = 0; i < adapter->num_active_queues; i++)
2685 		if (adapter->tx_rings[i].desc)
2686 			iavf_free_tx_resources(&adapter->tx_rings[i]);
2687 }
2688 
2689 /**
2690  * iavf_setup_all_tx_resources - allocate all queues Tx resources
2691  * @adapter: board private structure
2692  *
2693  * If this function returns with an error, then it's possible one or
2694  * more of the rings is populated (while the rest are not).  It is the
2695  * callers duty to clean those orphaned rings.
2696  *
2697  * Return 0 on success, negative on failure
2698  **/
iavf_setup_all_tx_resources(struct iavf_adapter * adapter)2699 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
2700 {
2701 	int i, err = 0;
2702 
2703 	for (i = 0; i < adapter->num_active_queues; i++) {
2704 		adapter->tx_rings[i].count = adapter->tx_desc_count;
2705 		err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
2706 		if (!err)
2707 			continue;
2708 		dev_err(&adapter->pdev->dev,
2709 			"Allocation for Tx Queue %u failed\n", i);
2710 		break;
2711 	}
2712 
2713 	return err;
2714 }
2715 
2716 /**
2717  * iavf_setup_all_rx_resources - allocate all queues Rx resources
2718  * @adapter: board private structure
2719  *
2720  * If this function returns with an error, then it's possible one or
2721  * more of the rings is populated (while the rest are not).  It is the
2722  * callers duty to clean those orphaned rings.
2723  *
2724  * Return 0 on success, negative on failure
2725  **/
iavf_setup_all_rx_resources(struct iavf_adapter * adapter)2726 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
2727 {
2728 	int i, err = 0;
2729 
2730 	for (i = 0; i < adapter->num_active_queues; i++) {
2731 		adapter->rx_rings[i].count = adapter->rx_desc_count;
2732 		err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
2733 		if (!err)
2734 			continue;
2735 		dev_err(&adapter->pdev->dev,
2736 			"Allocation for Rx Queue %u failed\n", i);
2737 		break;
2738 	}
2739 	return err;
2740 }
2741 
2742 /**
2743  * iavf_free_all_rx_resources - Free Rx Resources for All Queues
2744  * @adapter: board private structure
2745  *
2746  * Free all receive software resources
2747  **/
iavf_free_all_rx_resources(struct iavf_adapter * adapter)2748 void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
2749 {
2750 	int i;
2751 
2752 	if (!adapter->rx_rings)
2753 		return;
2754 
2755 	for (i = 0; i < adapter->num_active_queues; i++)
2756 		if (adapter->rx_rings[i].desc)
2757 			iavf_free_rx_resources(&adapter->rx_rings[i]);
2758 }
2759 
2760 /**
2761  * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
2762  * @adapter: board private structure
2763  * @max_tx_rate: max Tx bw for a tc
2764  **/
iavf_validate_tx_bandwidth(struct iavf_adapter * adapter,u64 max_tx_rate)2765 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
2766 				      u64 max_tx_rate)
2767 {
2768 	int speed = 0, ret = 0;
2769 
2770 	if (ADV_LINK_SUPPORT(adapter)) {
2771 		if (adapter->link_speed_mbps < U32_MAX) {
2772 			speed = adapter->link_speed_mbps;
2773 			goto validate_bw;
2774 		} else {
2775 			dev_err(&adapter->pdev->dev, "Unknown link speed\n");
2776 			return -EINVAL;
2777 		}
2778 	}
2779 
2780 	switch (adapter->link_speed) {
2781 	case VIRTCHNL_LINK_SPEED_40GB:
2782 		speed = SPEED_40000;
2783 		break;
2784 	case VIRTCHNL_LINK_SPEED_25GB:
2785 		speed = SPEED_25000;
2786 		break;
2787 	case VIRTCHNL_LINK_SPEED_20GB:
2788 		speed = SPEED_20000;
2789 		break;
2790 	case VIRTCHNL_LINK_SPEED_10GB:
2791 		speed = SPEED_10000;
2792 		break;
2793 	case VIRTCHNL_LINK_SPEED_5GB:
2794 		speed = SPEED_5000;
2795 		break;
2796 	case VIRTCHNL_LINK_SPEED_2_5GB:
2797 		speed = SPEED_2500;
2798 		break;
2799 	case VIRTCHNL_LINK_SPEED_1GB:
2800 		speed = SPEED_1000;
2801 		break;
2802 	case VIRTCHNL_LINK_SPEED_100MB:
2803 		speed = SPEED_100;
2804 		break;
2805 	default:
2806 		break;
2807 	}
2808 
2809 validate_bw:
2810 	if (max_tx_rate > speed) {
2811 		dev_err(&adapter->pdev->dev,
2812 			"Invalid tx rate specified\n");
2813 		ret = -EINVAL;
2814 	}
2815 
2816 	return ret;
2817 }
2818 
2819 /**
2820  * iavf_validate_ch_config - validate queue mapping info
2821  * @adapter: board private structure
2822  * @mqprio_qopt: queue parameters
2823  *
2824  * This function validates if the config provided by the user to
2825  * configure queue channels is valid or not. Returns 0 on a valid
2826  * config.
2827  **/
iavf_validate_ch_config(struct iavf_adapter * adapter,struct tc_mqprio_qopt_offload * mqprio_qopt)2828 static int iavf_validate_ch_config(struct iavf_adapter *adapter,
2829 				   struct tc_mqprio_qopt_offload *mqprio_qopt)
2830 {
2831 	u64 total_max_rate = 0;
2832 	u32 tx_rate_rem = 0;
2833 	int i, num_qps = 0;
2834 	u64 tx_rate = 0;
2835 	int ret = 0;
2836 
2837 	if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
2838 	    mqprio_qopt->qopt.num_tc < 1)
2839 		return -EINVAL;
2840 
2841 	for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
2842 		if (!mqprio_qopt->qopt.count[i] ||
2843 		    mqprio_qopt->qopt.offset[i] != num_qps)
2844 			return -EINVAL;
2845 		if (mqprio_qopt->min_rate[i]) {
2846 			dev_err(&adapter->pdev->dev,
2847 				"Invalid min tx rate (greater than 0) specified for TC%d\n",
2848 				i);
2849 			return -EINVAL;
2850 		}
2851 
2852 		/* convert to Mbps */
2853 		tx_rate = div_u64(mqprio_qopt->max_rate[i],
2854 				  IAVF_MBPS_DIVISOR);
2855 
2856 		if (mqprio_qopt->max_rate[i] &&
2857 		    tx_rate < IAVF_MBPS_QUANTA) {
2858 			dev_err(&adapter->pdev->dev,
2859 				"Invalid max tx rate for TC%d, minimum %dMbps\n",
2860 				i, IAVF_MBPS_QUANTA);
2861 			return -EINVAL;
2862 		}
2863 
2864 		(void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);
2865 
2866 		if (tx_rate_rem != 0) {
2867 			dev_err(&adapter->pdev->dev,
2868 				"Invalid max tx rate for TC%d, not divisible by %d\n",
2869 				i, IAVF_MBPS_QUANTA);
2870 			return -EINVAL;
2871 		}
2872 
2873 		total_max_rate += tx_rate;
2874 		num_qps += mqprio_qopt->qopt.count[i];
2875 	}
2876 	if (num_qps > adapter->num_active_queues) {
2877 		dev_err(&adapter->pdev->dev,
2878 			"Cannot support requested number of queues\n");
2879 		return -EINVAL;
2880 	}
2881 
2882 	ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
2883 	return ret;
2884 }
2885 
2886 /**
2887  * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
2888  * @adapter: board private structure
2889  **/
iavf_del_all_cloud_filters(struct iavf_adapter * adapter)2890 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
2891 {
2892 	struct iavf_cloud_filter *cf, *cftmp;
2893 
2894 	spin_lock_bh(&adapter->cloud_filter_list_lock);
2895 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
2896 				 list) {
2897 		list_del(&cf->list);
2898 		kfree(cf);
2899 		adapter->num_cloud_filters--;
2900 	}
2901 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
2902 }
2903 
2904 /**
2905  * __iavf_setup_tc - configure multiple traffic classes
2906  * @netdev: network interface device structure
2907  * @type_data: tc offload data
2908  *
2909  * This function processes the config information provided by the
2910  * user to configure traffic classes/queue channels and packages the
2911  * information to request the PF to setup traffic classes.
2912  *
2913  * Returns 0 on success.
2914  **/
__iavf_setup_tc(struct net_device * netdev,void * type_data)2915 static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
2916 {
2917 	struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
2918 	struct iavf_adapter *adapter = netdev_priv(netdev);
2919 	struct virtchnl_vf_resource *vfres = adapter->vf_res;
2920 	u8 num_tc = 0, total_qps = 0;
2921 	int ret = 0, netdev_tc = 0;
2922 	u64 max_tx_rate;
2923 	u16 mode;
2924 	int i;
2925 
2926 	num_tc = mqprio_qopt->qopt.num_tc;
2927 	mode = mqprio_qopt->mode;
2928 
2929 	/* delete queue_channel */
2930 	if (!mqprio_qopt->qopt.hw) {
2931 		if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
2932 			/* reset the tc configuration */
2933 			netdev_reset_tc(netdev);
2934 			adapter->num_tc = 0;
2935 			netif_tx_stop_all_queues(netdev);
2936 			netif_tx_disable(netdev);
2937 			iavf_del_all_cloud_filters(adapter);
2938 			adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
2939 			total_qps = adapter->orig_num_active_queues;
2940 			goto exit;
2941 		} else {
2942 			return -EINVAL;
2943 		}
2944 	}
2945 
2946 	/* add queue channel */
2947 	if (mode == TC_MQPRIO_MODE_CHANNEL) {
2948 		if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
2949 			dev_err(&adapter->pdev->dev, "ADq not supported\n");
2950 			return -EOPNOTSUPP;
2951 		}
2952 		if (adapter->ch_config.state != __IAVF_TC_INVALID) {
2953 			dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
2954 			return -EINVAL;
2955 		}
2956 
2957 		ret = iavf_validate_ch_config(adapter, mqprio_qopt);
2958 		if (ret)
2959 			return ret;
2960 		/* Return if same TC config is requested */
2961 		if (adapter->num_tc == num_tc)
2962 			return 0;
2963 		adapter->num_tc = num_tc;
2964 
2965 		for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
2966 			if (i < num_tc) {
2967 				adapter->ch_config.ch_info[i].count =
2968 					mqprio_qopt->qopt.count[i];
2969 				adapter->ch_config.ch_info[i].offset =
2970 					mqprio_qopt->qopt.offset[i];
2971 				total_qps += mqprio_qopt->qopt.count[i];
2972 				max_tx_rate = mqprio_qopt->max_rate[i];
2973 				/* convert to Mbps */
2974 				max_tx_rate = div_u64(max_tx_rate,
2975 						      IAVF_MBPS_DIVISOR);
2976 				adapter->ch_config.ch_info[i].max_tx_rate =
2977 					max_tx_rate;
2978 			} else {
2979 				adapter->ch_config.ch_info[i].count = 1;
2980 				adapter->ch_config.ch_info[i].offset = 0;
2981 			}
2982 		}
2983 
2984 		/* Take snapshot of original config such as "num_active_queues"
2985 		 * It is used later when delete ADQ flow is exercised, so that
2986 		 * once delete ADQ flow completes, VF shall go back to its
2987 		 * original queue configuration
2988 		 */
2989 
2990 		adapter->orig_num_active_queues = adapter->num_active_queues;
2991 
2992 		/* Store queue info based on TC so that VF gets configured
2993 		 * with correct number of queues when VF completes ADQ config
2994 		 * flow
2995 		 */
2996 		adapter->ch_config.total_qps = total_qps;
2997 
2998 		netif_tx_stop_all_queues(netdev);
2999 		netif_tx_disable(netdev);
3000 		adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
3001 		netdev_reset_tc(netdev);
3002 		/* Report the tc mapping up the stack */
3003 		netdev_set_num_tc(adapter->netdev, num_tc);
3004 		for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3005 			u16 qcount = mqprio_qopt->qopt.count[i];
3006 			u16 qoffset = mqprio_qopt->qopt.offset[i];
3007 
3008 			if (i < num_tc)
3009 				netdev_set_tc_queue(netdev, netdev_tc++, qcount,
3010 						    qoffset);
3011 		}
3012 	}
3013 exit:
3014 	if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
3015 		return 0;
3016 
3017 	netif_set_real_num_rx_queues(netdev, total_qps);
3018 	netif_set_real_num_tx_queues(netdev, total_qps);
3019 
3020 	return ret;
3021 }
3022 
3023 /**
3024  * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
3025  * @adapter: board private structure
3026  * @f: pointer to struct flow_cls_offload
3027  * @filter: pointer to cloud filter structure
3028  */
iavf_parse_cls_flower(struct iavf_adapter * adapter,struct flow_cls_offload * f,struct iavf_cloud_filter * filter)3029 static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
3030 				 struct flow_cls_offload *f,
3031 				 struct iavf_cloud_filter *filter)
3032 {
3033 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3034 	struct flow_dissector *dissector = rule->match.dissector;
3035 	u16 n_proto_mask = 0;
3036 	u16 n_proto_key = 0;
3037 	u8 field_flags = 0;
3038 	u16 addr_type = 0;
3039 	u16 n_proto = 0;
3040 	int i = 0;
3041 	struct virtchnl_filter *vf = &filter->f;
3042 
3043 	if (dissector->used_keys &
3044 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
3045 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
3046 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
3047 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
3048 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
3049 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
3050 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
3051 	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
3052 		dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
3053 			dissector->used_keys);
3054 		return -EOPNOTSUPP;
3055 	}
3056 
3057 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
3058 		struct flow_match_enc_keyid match;
3059 
3060 		flow_rule_match_enc_keyid(rule, &match);
3061 		if (match.mask->keyid != 0)
3062 			field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
3063 	}
3064 
3065 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
3066 		struct flow_match_basic match;
3067 
3068 		flow_rule_match_basic(rule, &match);
3069 		n_proto_key = ntohs(match.key->n_proto);
3070 		n_proto_mask = ntohs(match.mask->n_proto);
3071 
3072 		if (n_proto_key == ETH_P_ALL) {
3073 			n_proto_key = 0;
3074 			n_proto_mask = 0;
3075 		}
3076 		n_proto = n_proto_key & n_proto_mask;
3077 		if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
3078 			return -EINVAL;
3079 		if (n_proto == ETH_P_IPV6) {
3080 			/* specify flow type as TCP IPv6 */
3081 			vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
3082 		}
3083 
3084 		if (match.key->ip_proto != IPPROTO_TCP) {
3085 			dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
3086 			return -EINVAL;
3087 		}
3088 	}
3089 
3090 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
3091 		struct flow_match_eth_addrs match;
3092 
3093 		flow_rule_match_eth_addrs(rule, &match);
3094 
3095 		/* use is_broadcast and is_zero to check for all 0xf or 0 */
3096 		if (!is_zero_ether_addr(match.mask->dst)) {
3097 			if (is_broadcast_ether_addr(match.mask->dst)) {
3098 				field_flags |= IAVF_CLOUD_FIELD_OMAC;
3099 			} else {
3100 				dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
3101 					match.mask->dst);
3102 				return -EINVAL;
3103 			}
3104 		}
3105 
3106 		if (!is_zero_ether_addr(match.mask->src)) {
3107 			if (is_broadcast_ether_addr(match.mask->src)) {
3108 				field_flags |= IAVF_CLOUD_FIELD_IMAC;
3109 			} else {
3110 				dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
3111 					match.mask->src);
3112 				return -EINVAL;
3113 			}
3114 		}
3115 
3116 		if (!is_zero_ether_addr(match.key->dst))
3117 			if (is_valid_ether_addr(match.key->dst) ||
3118 			    is_multicast_ether_addr(match.key->dst)) {
3119 				/* set the mask if a valid dst_mac address */
3120 				for (i = 0; i < ETH_ALEN; i++)
3121 					vf->mask.tcp_spec.dst_mac[i] |= 0xff;
3122 				ether_addr_copy(vf->data.tcp_spec.dst_mac,
3123 						match.key->dst);
3124 			}
3125 
3126 		if (!is_zero_ether_addr(match.key->src))
3127 			if (is_valid_ether_addr(match.key->src) ||
3128 			    is_multicast_ether_addr(match.key->src)) {
3129 				/* set the mask if a valid dst_mac address */
3130 				for (i = 0; i < ETH_ALEN; i++)
3131 					vf->mask.tcp_spec.src_mac[i] |= 0xff;
3132 				ether_addr_copy(vf->data.tcp_spec.src_mac,
3133 						match.key->src);
3134 		}
3135 	}
3136 
3137 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
3138 		struct flow_match_vlan match;
3139 
3140 		flow_rule_match_vlan(rule, &match);
3141 		if (match.mask->vlan_id) {
3142 			if (match.mask->vlan_id == VLAN_VID_MASK) {
3143 				field_flags |= IAVF_CLOUD_FIELD_IVLAN;
3144 			} else {
3145 				dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
3146 					match.mask->vlan_id);
3147 				return -EINVAL;
3148 			}
3149 		}
3150 		vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
3151 		vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
3152 	}
3153 
3154 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
3155 		struct flow_match_control match;
3156 
3157 		flow_rule_match_control(rule, &match);
3158 		addr_type = match.key->addr_type;
3159 	}
3160 
3161 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
3162 		struct flow_match_ipv4_addrs match;
3163 
3164 		flow_rule_match_ipv4_addrs(rule, &match);
3165 		if (match.mask->dst) {
3166 			if (match.mask->dst == cpu_to_be32(0xffffffff)) {
3167 				field_flags |= IAVF_CLOUD_FIELD_IIP;
3168 			} else {
3169 				dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
3170 					be32_to_cpu(match.mask->dst));
3171 				return -EINVAL;
3172 			}
3173 		}
3174 
3175 		if (match.mask->src) {
3176 			if (match.mask->src == cpu_to_be32(0xffffffff)) {
3177 				field_flags |= IAVF_CLOUD_FIELD_IIP;
3178 			} else {
3179 				dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
3180 					be32_to_cpu(match.mask->src));
3181 				return -EINVAL;
3182 			}
3183 		}
3184 
3185 		if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
3186 			dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
3187 			return -EINVAL;
3188 		}
3189 		if (match.key->dst) {
3190 			vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
3191 			vf->data.tcp_spec.dst_ip[0] = match.key->dst;
3192 		}
3193 		if (match.key->src) {
3194 			vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
3195 			vf->data.tcp_spec.src_ip[0] = match.key->src;
3196 		}
3197 	}
3198 
3199 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
3200 		struct flow_match_ipv6_addrs match;
3201 
3202 		flow_rule_match_ipv6_addrs(rule, &match);
3203 
3204 		/* validate mask, make sure it is not IPV6_ADDR_ANY */
3205 		if (ipv6_addr_any(&match.mask->dst)) {
3206 			dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
3207 				IPV6_ADDR_ANY);
3208 			return -EINVAL;
3209 		}
3210 
3211 		/* src and dest IPv6 address should not be LOOPBACK
3212 		 * (0:0:0:0:0:0:0:1) which can be represented as ::1
3213 		 */
3214 		if (ipv6_addr_loopback(&match.key->dst) ||
3215 		    ipv6_addr_loopback(&match.key->src)) {
3216 			dev_err(&adapter->pdev->dev,
3217 				"ipv6 addr should not be loopback\n");
3218 			return -EINVAL;
3219 		}
3220 		if (!ipv6_addr_any(&match.mask->dst) ||
3221 		    !ipv6_addr_any(&match.mask->src))
3222 			field_flags |= IAVF_CLOUD_FIELD_IIP;
3223 
3224 		for (i = 0; i < 4; i++)
3225 			vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
3226 		memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
3227 		       sizeof(vf->data.tcp_spec.dst_ip));
3228 		for (i = 0; i < 4; i++)
3229 			vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
3230 		memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
3231 		       sizeof(vf->data.tcp_spec.src_ip));
3232 	}
3233 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
3234 		struct flow_match_ports match;
3235 
3236 		flow_rule_match_ports(rule, &match);
3237 		if (match.mask->src) {
3238 			if (match.mask->src == cpu_to_be16(0xffff)) {
3239 				field_flags |= IAVF_CLOUD_FIELD_IIP;
3240 			} else {
3241 				dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
3242 					be16_to_cpu(match.mask->src));
3243 				return -EINVAL;
3244 			}
3245 		}
3246 
3247 		if (match.mask->dst) {
3248 			if (match.mask->dst == cpu_to_be16(0xffff)) {
3249 				field_flags |= IAVF_CLOUD_FIELD_IIP;
3250 			} else {
3251 				dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
3252 					be16_to_cpu(match.mask->dst));
3253 				return -EINVAL;
3254 			}
3255 		}
3256 		if (match.key->dst) {
3257 			vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
3258 			vf->data.tcp_spec.dst_port = match.key->dst;
3259 		}
3260 
3261 		if (match.key->src) {
3262 			vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
3263 			vf->data.tcp_spec.src_port = match.key->src;
3264 		}
3265 	}
3266 	vf->field_flags = field_flags;
3267 
3268 	return 0;
3269 }
3270 
3271 /**
3272  * iavf_handle_tclass - Forward to a traffic class on the device
3273  * @adapter: board private structure
3274  * @tc: traffic class index on the device
3275  * @filter: pointer to cloud filter structure
3276  */
iavf_handle_tclass(struct iavf_adapter * adapter,u32 tc,struct iavf_cloud_filter * filter)3277 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
3278 			      struct iavf_cloud_filter *filter)
3279 {
3280 	if (tc == 0)
3281 		return 0;
3282 	if (tc < adapter->num_tc) {
3283 		if (!filter->f.data.tcp_spec.dst_port) {
3284 			dev_err(&adapter->pdev->dev,
3285 				"Specify destination port to redirect to traffic class other than TC0\n");
3286 			return -EINVAL;
3287 		}
3288 	}
3289 	/* redirect to a traffic class on the same device */
3290 	filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
3291 	filter->f.action_meta = tc;
3292 	return 0;
3293 }
3294 
3295 /**
3296  * iavf_configure_clsflower - Add tc flower filters
3297  * @adapter: board private structure
3298  * @cls_flower: Pointer to struct flow_cls_offload
3299  */
iavf_configure_clsflower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)3300 static int iavf_configure_clsflower(struct iavf_adapter *adapter,
3301 				    struct flow_cls_offload *cls_flower)
3302 {
3303 	int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
3304 	struct iavf_cloud_filter *filter = NULL;
3305 	int err = -EINVAL, count = 50;
3306 
3307 	if (tc < 0) {
3308 		dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
3309 		return -EINVAL;
3310 	}
3311 
3312 	filter = kzalloc(sizeof(*filter), GFP_KERNEL);
3313 	if (!filter)
3314 		return -ENOMEM;
3315 
3316 	while (!mutex_trylock(&adapter->crit_lock)) {
3317 		if (--count == 0) {
3318 			kfree(filter);
3319 			return err;
3320 		}
3321 		udelay(1);
3322 	}
3323 
3324 	filter->cookie = cls_flower->cookie;
3325 
3326 	/* set the mask to all zeroes to begin with */
3327 	memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
3328 	/* start out with flow type and eth type IPv4 to begin with */
3329 	filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
3330 	err = iavf_parse_cls_flower(adapter, cls_flower, filter);
3331 	if (err)
3332 		goto err;
3333 
3334 	err = iavf_handle_tclass(adapter, tc, filter);
3335 	if (err)
3336 		goto err;
3337 
3338 	/* add filter to the list */
3339 	spin_lock_bh(&adapter->cloud_filter_list_lock);
3340 	list_add_tail(&filter->list, &adapter->cloud_filter_list);
3341 	adapter->num_cloud_filters++;
3342 	filter->add = true;
3343 	adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
3344 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
3345 err:
3346 	if (err)
3347 		kfree(filter);
3348 
3349 	mutex_unlock(&adapter->crit_lock);
3350 	return err;
3351 }
3352 
3353 /* iavf_find_cf - Find the cloud filter in the list
3354  * @adapter: Board private structure
3355  * @cookie: filter specific cookie
3356  *
3357  * Returns ptr to the filter object or NULL. Must be called while holding the
3358  * cloud_filter_list_lock.
3359  */
iavf_find_cf(struct iavf_adapter * adapter,unsigned long * cookie)3360 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
3361 					      unsigned long *cookie)
3362 {
3363 	struct iavf_cloud_filter *filter = NULL;
3364 
3365 	if (!cookie)
3366 		return NULL;
3367 
3368 	list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
3369 		if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
3370 			return filter;
3371 	}
3372 	return NULL;
3373 }
3374 
3375 /**
3376  * iavf_delete_clsflower - Remove tc flower filters
3377  * @adapter: board private structure
3378  * @cls_flower: Pointer to struct flow_cls_offload
3379  */
iavf_delete_clsflower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)3380 static int iavf_delete_clsflower(struct iavf_adapter *adapter,
3381 				 struct flow_cls_offload *cls_flower)
3382 {
3383 	struct iavf_cloud_filter *filter = NULL;
3384 	int err = 0;
3385 
3386 	spin_lock_bh(&adapter->cloud_filter_list_lock);
3387 	filter = iavf_find_cf(adapter, &cls_flower->cookie);
3388 	if (filter) {
3389 		filter->del = true;
3390 		adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
3391 	} else {
3392 		err = -EINVAL;
3393 	}
3394 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
3395 
3396 	return err;
3397 }
3398 
3399 /**
3400  * iavf_setup_tc_cls_flower - flower classifier offloads
3401  * @adapter: board private structure
3402  * @cls_flower: pointer to flow_cls_offload struct with flow info
3403  */
iavf_setup_tc_cls_flower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)3404 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
3405 				    struct flow_cls_offload *cls_flower)
3406 {
3407 	switch (cls_flower->command) {
3408 	case FLOW_CLS_REPLACE:
3409 		return iavf_configure_clsflower(adapter, cls_flower);
3410 	case FLOW_CLS_DESTROY:
3411 		return iavf_delete_clsflower(adapter, cls_flower);
3412 	case FLOW_CLS_STATS:
3413 		return -EOPNOTSUPP;
3414 	default:
3415 		return -EOPNOTSUPP;
3416 	}
3417 }
3418 
3419 /**
3420  * iavf_setup_tc_block_cb - block callback for tc
3421  * @type: type of offload
3422  * @type_data: offload data
3423  * @cb_priv:
3424  *
3425  * This function is the block callback for traffic classes
3426  **/
iavf_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)3427 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3428 				  void *cb_priv)
3429 {
3430 	struct iavf_adapter *adapter = cb_priv;
3431 
3432 	if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
3433 		return -EOPNOTSUPP;
3434 
3435 	switch (type) {
3436 	case TC_SETUP_CLSFLOWER:
3437 		return iavf_setup_tc_cls_flower(cb_priv, type_data);
3438 	default:
3439 		return -EOPNOTSUPP;
3440 	}
3441 }
3442 
3443 static LIST_HEAD(iavf_block_cb_list);
3444 
3445 /**
3446  * iavf_setup_tc - configure multiple traffic classes
3447  * @netdev: network interface device structure
3448  * @type: type of offload
3449  * @type_data: tc offload data
3450  *
3451  * This function is the callback to ndo_setup_tc in the
3452  * netdev_ops.
3453  *
3454  * Returns 0 on success
3455  **/
iavf_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)3456 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
3457 			 void *type_data)
3458 {
3459 	struct iavf_adapter *adapter = netdev_priv(netdev);
3460 
3461 	switch (type) {
3462 	case TC_SETUP_QDISC_MQPRIO:
3463 		return __iavf_setup_tc(netdev, type_data);
3464 	case TC_SETUP_BLOCK:
3465 		return flow_block_cb_setup_simple(type_data,
3466 						  &iavf_block_cb_list,
3467 						  iavf_setup_tc_block_cb,
3468 						  adapter, adapter, true);
3469 	default:
3470 		return -EOPNOTSUPP;
3471 	}
3472 }
3473 
3474 /**
3475  * iavf_open - Called when a network interface is made active
3476  * @netdev: network interface device structure
3477  *
3478  * Returns 0 on success, negative value on failure
3479  *
3480  * The open entry point is called when a network interface is made
3481  * active by the system (IFF_UP).  At this point all resources needed
3482  * for transmit and receive operations are allocated, the interrupt
3483  * handler is registered with the OS, the watchdog is started,
3484  * and the stack is notified that the interface is ready.
3485  **/
iavf_open(struct net_device * netdev)3486 static int iavf_open(struct net_device *netdev)
3487 {
3488 	struct iavf_adapter *adapter = netdev_priv(netdev);
3489 	int err;
3490 
3491 	if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
3492 		dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
3493 		return -EIO;
3494 	}
3495 
3496 	while (!mutex_trylock(&adapter->crit_lock))
3497 		usleep_range(500, 1000);
3498 
3499 	if (adapter->state != __IAVF_DOWN) {
3500 		err = -EBUSY;
3501 		goto err_unlock;
3502 	}
3503 
3504 	if (adapter->state == __IAVF_RUNNING &&
3505 	    !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
3506 		dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
3507 		err = 0;
3508 		goto err_unlock;
3509 	}
3510 
3511 	/* allocate transmit descriptors */
3512 	err = iavf_setup_all_tx_resources(adapter);
3513 	if (err)
3514 		goto err_setup_tx;
3515 
3516 	/* allocate receive descriptors */
3517 	err = iavf_setup_all_rx_resources(adapter);
3518 	if (err)
3519 		goto err_setup_rx;
3520 
3521 	/* clear any pending interrupts, may auto mask */
3522 	err = iavf_request_traffic_irqs(adapter, netdev->name);
3523 	if (err)
3524 		goto err_req_irq;
3525 
3526 	spin_lock_bh(&adapter->mac_vlan_list_lock);
3527 
3528 	iavf_add_filter(adapter, adapter->hw.mac.addr);
3529 
3530 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
3531 
3532 	/* Restore VLAN filters that were removed with IFF_DOWN */
3533 	iavf_restore_filters(adapter);
3534 
3535 	iavf_configure(adapter);
3536 
3537 	iavf_up_complete(adapter);
3538 
3539 	iavf_irq_enable(adapter, true);
3540 
3541 	mutex_unlock(&adapter->crit_lock);
3542 
3543 	return 0;
3544 
3545 err_req_irq:
3546 	iavf_down(adapter);
3547 	iavf_free_traffic_irqs(adapter);
3548 err_setup_rx:
3549 	iavf_free_all_rx_resources(adapter);
3550 err_setup_tx:
3551 	iavf_free_all_tx_resources(adapter);
3552 err_unlock:
3553 	mutex_unlock(&adapter->crit_lock);
3554 
3555 	return err;
3556 }
3557 
3558 /**
3559  * iavf_close - Disables a network interface
3560  * @netdev: network interface device structure
3561  *
3562  * Returns 0, this is not allowed to fail
3563  *
3564  * The close entry point is called when an interface is de-activated
3565  * by the OS.  The hardware is still under the drivers control, but
3566  * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
3567  * are freed, along with all transmit and receive resources.
3568  **/
iavf_close(struct net_device * netdev)3569 static int iavf_close(struct net_device *netdev)
3570 {
3571 	struct iavf_adapter *adapter = netdev_priv(netdev);
3572 	u64 aq_to_restore;
3573 	int status;
3574 
3575 	mutex_lock(&adapter->crit_lock);
3576 
3577 	if (adapter->state <= __IAVF_DOWN_PENDING) {
3578 		mutex_unlock(&adapter->crit_lock);
3579 		return 0;
3580 	}
3581 
3582 	set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3583 	if (CLIENT_ENABLED(adapter))
3584 		adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
3585 	/* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
3586 	 * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl
3587 	 * deadlock with adminq_task() until iavf_close timeouts. We must send
3588 	 * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make
3589 	 * disable queues possible for vf. Give only necessary flags to
3590 	 * iavf_down and save other to set them right before iavf_close()
3591 	 * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and
3592 	 * iavf will be in DOWN state.
3593 	 */
3594 	aq_to_restore = adapter->aq_required;
3595 	adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG;
3596 
3597 	/* Remove flags which we do not want to send after close or we want to
3598 	 * send before disable queues.
3599 	 */
3600 	aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG		|
3601 			   IAVF_FLAG_AQ_ENABLE_QUEUES		|
3602 			   IAVF_FLAG_AQ_CONFIGURE_QUEUES	|
3603 			   IAVF_FLAG_AQ_ADD_VLAN_FILTER		|
3604 			   IAVF_FLAG_AQ_ADD_MAC_FILTER		|
3605 			   IAVF_FLAG_AQ_ADD_CLOUD_FILTER	|
3606 			   IAVF_FLAG_AQ_ADD_FDIR_FILTER		|
3607 			   IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
3608 
3609 	iavf_down(adapter);
3610 	iavf_change_state(adapter, __IAVF_DOWN_PENDING);
3611 	iavf_free_traffic_irqs(adapter);
3612 
3613 	mutex_unlock(&adapter->crit_lock);
3614 
3615 	/* We explicitly don't free resources here because the hardware is
3616 	 * still active and can DMA into memory. Resources are cleared in
3617 	 * iavf_virtchnl_completion() after we get confirmation from the PF
3618 	 * driver that the rings have been stopped.
3619 	 *
3620 	 * Also, we wait for state to transition to __IAVF_DOWN before
3621 	 * returning. State change occurs in iavf_virtchnl_completion() after
3622 	 * VF resources are released (which occurs after PF driver processes and
3623 	 * responds to admin queue commands).
3624 	 */
3625 
3626 	status = wait_event_timeout(adapter->down_waitqueue,
3627 				    adapter->state == __IAVF_DOWN,
3628 				    msecs_to_jiffies(500));
3629 	if (!status)
3630 		netdev_warn(netdev, "Device resources not yet released\n");
3631 
3632 	mutex_lock(&adapter->crit_lock);
3633 	adapter->aq_required |= aq_to_restore;
3634 	mutex_unlock(&adapter->crit_lock);
3635 	return 0;
3636 }
3637 
3638 /**
3639  * iavf_change_mtu - Change the Maximum Transfer Unit
3640  * @netdev: network interface device structure
3641  * @new_mtu: new value for maximum frame size
3642  *
3643  * Returns 0 on success, negative on failure
3644  **/
iavf_change_mtu(struct net_device * netdev,int new_mtu)3645 static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
3646 {
3647 	struct iavf_adapter *adapter = netdev_priv(netdev);
3648 
3649 	netdev->mtu = new_mtu;
3650 	if (CLIENT_ENABLED(adapter)) {
3651 		iavf_notify_client_l2_params(&adapter->vsi);
3652 		adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
3653 	}
3654 
3655 	if (netif_running(netdev)) {
3656 		adapter->flags |= IAVF_FLAG_RESET_NEEDED;
3657 		queue_work(iavf_wq, &adapter->reset_task);
3658 	}
3659 
3660 	return 0;
3661 }
3662 
3663 /**
3664  * iavf_set_features - set the netdev feature flags
3665  * @netdev: ptr to the netdev being adjusted
3666  * @features: the feature set that the stack is suggesting
3667  * Note: expects to be called while under rtnl_lock()
3668  **/
iavf_set_features(struct net_device * netdev,netdev_features_t features)3669 static int iavf_set_features(struct net_device *netdev,
3670 			     netdev_features_t features)
3671 {
3672 	struct iavf_adapter *adapter = netdev_priv(netdev);
3673 
3674 	/* Don't allow enabling VLAN features when adapter is not capable
3675 	 * of VLAN offload/filtering
3676 	 */
3677 	if (!VLAN_ALLOWED(adapter)) {
3678 		netdev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
3679 					 NETIF_F_HW_VLAN_CTAG_TX |
3680 					 NETIF_F_HW_VLAN_CTAG_FILTER);
3681 		if (features & (NETIF_F_HW_VLAN_CTAG_RX |
3682 				NETIF_F_HW_VLAN_CTAG_TX |
3683 				NETIF_F_HW_VLAN_CTAG_FILTER))
3684 			return -EINVAL;
3685 	} else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
3686 		if (features & NETIF_F_HW_VLAN_CTAG_RX)
3687 			adapter->aq_required |=
3688 				IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
3689 		else
3690 			adapter->aq_required |=
3691 				IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
3692 	}
3693 
3694 	return 0;
3695 }
3696 
3697 /**
3698  * iavf_features_check - Validate encapsulated packet conforms to limits
3699  * @skb: skb buff
3700  * @dev: This physical port's netdev
3701  * @features: Offload features that the stack believes apply
3702  **/
iavf_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)3703 static netdev_features_t iavf_features_check(struct sk_buff *skb,
3704 					     struct net_device *dev,
3705 					     netdev_features_t features)
3706 {
3707 	size_t len;
3708 
3709 	/* No point in doing any of this if neither checksum nor GSO are
3710 	 * being requested for this frame.  We can rule out both by just
3711 	 * checking for CHECKSUM_PARTIAL
3712 	 */
3713 	if (skb->ip_summed != CHECKSUM_PARTIAL)
3714 		return features;
3715 
3716 	/* We cannot support GSO if the MSS is going to be less than
3717 	 * 64 bytes.  If it is then we need to drop support for GSO.
3718 	 */
3719 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
3720 		features &= ~NETIF_F_GSO_MASK;
3721 
3722 	/* MACLEN can support at most 63 words */
3723 	len = skb_network_header(skb) - skb->data;
3724 	if (len & ~(63 * 2))
3725 		goto out_err;
3726 
3727 	/* IPLEN and EIPLEN can support at most 127 dwords */
3728 	len = skb_transport_header(skb) - skb_network_header(skb);
3729 	if (len & ~(127 * 4))
3730 		goto out_err;
3731 
3732 	if (skb->encapsulation) {
3733 		/* L4TUNLEN can support 127 words */
3734 		len = skb_inner_network_header(skb) - skb_transport_header(skb);
3735 		if (len & ~(127 * 2))
3736 			goto out_err;
3737 
3738 		/* IPLEN can support at most 127 dwords */
3739 		len = skb_inner_transport_header(skb) -
3740 		      skb_inner_network_header(skb);
3741 		if (len & ~(127 * 4))
3742 			goto out_err;
3743 	}
3744 
3745 	/* No need to validate L4LEN as TCP is the only protocol with a
3746 	 * a flexible value and we support all possible values supported
3747 	 * by TCP, which is at most 15 dwords
3748 	 */
3749 
3750 	return features;
3751 out_err:
3752 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3753 }
3754 
3755 /**
3756  * iavf_fix_features - fix up the netdev feature bits
3757  * @netdev: our net device
3758  * @features: desired feature bits
3759  *
3760  * Returns fixed-up features bits
3761  **/
iavf_fix_features(struct net_device * netdev,netdev_features_t features)3762 static netdev_features_t iavf_fix_features(struct net_device *netdev,
3763 					   netdev_features_t features)
3764 {
3765 	struct iavf_adapter *adapter = netdev_priv(netdev);
3766 
3767 	if (adapter->vf_res &&
3768 	    !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
3769 		features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3770 			      NETIF_F_HW_VLAN_CTAG_RX |
3771 			      NETIF_F_HW_VLAN_CTAG_FILTER);
3772 
3773 	return features;
3774 }
3775 
3776 static const struct net_device_ops iavf_netdev_ops = {
3777 	.ndo_open		= iavf_open,
3778 	.ndo_stop		= iavf_close,
3779 	.ndo_start_xmit		= iavf_xmit_frame,
3780 	.ndo_set_rx_mode	= iavf_set_rx_mode,
3781 	.ndo_validate_addr	= eth_validate_addr,
3782 	.ndo_set_mac_address	= iavf_set_mac,
3783 	.ndo_change_mtu		= iavf_change_mtu,
3784 	.ndo_tx_timeout		= iavf_tx_timeout,
3785 	.ndo_vlan_rx_add_vid	= iavf_vlan_rx_add_vid,
3786 	.ndo_vlan_rx_kill_vid	= iavf_vlan_rx_kill_vid,
3787 	.ndo_features_check	= iavf_features_check,
3788 	.ndo_fix_features	= iavf_fix_features,
3789 	.ndo_set_features	= iavf_set_features,
3790 	.ndo_setup_tc		= iavf_setup_tc,
3791 };
3792 
3793 /**
3794  * iavf_check_reset_complete - check that VF reset is complete
3795  * @hw: pointer to hw struct
3796  *
3797  * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
3798  **/
iavf_check_reset_complete(struct iavf_hw * hw)3799 static int iavf_check_reset_complete(struct iavf_hw *hw)
3800 {
3801 	u32 rstat;
3802 	int i;
3803 
3804 	for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
3805 		rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
3806 			     IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
3807 		if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
3808 		    (rstat == VIRTCHNL_VFR_COMPLETED))
3809 			return 0;
3810 		usleep_range(10, 20);
3811 	}
3812 	return -EBUSY;
3813 }
3814 
3815 /**
3816  * iavf_process_config - Process the config information we got from the PF
3817  * @adapter: board private structure
3818  *
3819  * Verify that we have a valid config struct, and set up our netdev features
3820  * and our VSI struct.
3821  **/
iavf_process_config(struct iavf_adapter * adapter)3822 int iavf_process_config(struct iavf_adapter *adapter)
3823 {
3824 	struct virtchnl_vf_resource *vfres = adapter->vf_res;
3825 	int i, num_req_queues = adapter->num_req_queues;
3826 	struct net_device *netdev = adapter->netdev;
3827 	struct iavf_vsi *vsi = &adapter->vsi;
3828 	netdev_features_t hw_enc_features;
3829 	netdev_features_t hw_features;
3830 
3831 	/* got VF config message back from PF, now we can parse it */
3832 	for (i = 0; i < vfres->num_vsis; i++) {
3833 		if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
3834 			adapter->vsi_res = &vfres->vsi_res[i];
3835 	}
3836 	if (!adapter->vsi_res) {
3837 		dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
3838 		return -ENODEV;
3839 	}
3840 
3841 	if (num_req_queues &&
3842 	    num_req_queues > adapter->vsi_res->num_queue_pairs) {
3843 		/* Problem.  The PF gave us fewer queues than what we had
3844 		 * negotiated in our request.  Need a reset to see if we can't
3845 		 * get back to a working state.
3846 		 */
3847 		dev_err(&adapter->pdev->dev,
3848 			"Requested %d queues, but PF only gave us %d.\n",
3849 			num_req_queues,
3850 			adapter->vsi_res->num_queue_pairs);
3851 		adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
3852 		adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
3853 		iavf_schedule_reset(adapter);
3854 		return -ENODEV;
3855 	}
3856 	adapter->num_req_queues = 0;
3857 
3858 	hw_enc_features = NETIF_F_SG			|
3859 			  NETIF_F_IP_CSUM		|
3860 			  NETIF_F_IPV6_CSUM		|
3861 			  NETIF_F_HIGHDMA		|
3862 			  NETIF_F_SOFT_FEATURES	|
3863 			  NETIF_F_TSO			|
3864 			  NETIF_F_TSO_ECN		|
3865 			  NETIF_F_TSO6			|
3866 			  NETIF_F_SCTP_CRC		|
3867 			  NETIF_F_RXHASH		|
3868 			  NETIF_F_RXCSUM		|
3869 			  0;
3870 
3871 	/* advertise to stack only if offloads for encapsulated packets is
3872 	 * supported
3873 	 */
3874 	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
3875 		hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL	|
3876 				   NETIF_F_GSO_GRE		|
3877 				   NETIF_F_GSO_GRE_CSUM		|
3878 				   NETIF_F_GSO_IPXIP4		|
3879 				   NETIF_F_GSO_IPXIP6		|
3880 				   NETIF_F_GSO_UDP_TUNNEL_CSUM	|
3881 				   NETIF_F_GSO_PARTIAL		|
3882 				   0;
3883 
3884 		if (!(vfres->vf_cap_flags &
3885 		      VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
3886 			netdev->gso_partial_features |=
3887 				NETIF_F_GSO_UDP_TUNNEL_CSUM;
3888 
3889 		netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
3890 		netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
3891 		netdev->hw_enc_features |= hw_enc_features;
3892 	}
3893 	/* record features VLANs can make use of */
3894 	netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
3895 
3896 	/* Write features and hw_features separately to avoid polluting
3897 	 * with, or dropping, features that are set when we registered.
3898 	 */
3899 	hw_features = hw_enc_features;
3900 
3901 	/* Enable VLAN features if supported */
3902 	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3903 		hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
3904 				NETIF_F_HW_VLAN_CTAG_RX);
3905 	/* Enable cloud filter if ADQ is supported */
3906 	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
3907 		hw_features |= NETIF_F_HW_TC;
3908 	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
3909 		hw_features |= NETIF_F_GSO_UDP_L4;
3910 
3911 	netdev->hw_features |= hw_features;
3912 
3913 	netdev->features |= hw_features;
3914 
3915 	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3916 		netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3917 
3918 	netdev->priv_flags |= IFF_UNICAST_FLT;
3919 
3920 	/* Do not turn on offloads when they are requested to be turned off.
3921 	 * TSO needs minimum 576 bytes to work correctly.
3922 	 */
3923 	if (netdev->wanted_features) {
3924 		if (!(netdev->wanted_features & NETIF_F_TSO) ||
3925 		    netdev->mtu < 576)
3926 			netdev->features &= ~NETIF_F_TSO;
3927 		if (!(netdev->wanted_features & NETIF_F_TSO6) ||
3928 		    netdev->mtu < 576)
3929 			netdev->features &= ~NETIF_F_TSO6;
3930 		if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
3931 			netdev->features &= ~NETIF_F_TSO_ECN;
3932 		if (!(netdev->wanted_features & NETIF_F_GRO))
3933 			netdev->features &= ~NETIF_F_GRO;
3934 		if (!(netdev->wanted_features & NETIF_F_GSO))
3935 			netdev->features &= ~NETIF_F_GSO;
3936 	}
3937 
3938 	adapter->vsi.id = adapter->vsi_res->vsi_id;
3939 
3940 	adapter->vsi.back = adapter;
3941 	adapter->vsi.base_vector = 1;
3942 	adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
3943 	vsi->netdev = adapter->netdev;
3944 	vsi->qs_handle = adapter->vsi_res->qset_handle;
3945 	if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
3946 		adapter->rss_key_size = vfres->rss_key_size;
3947 		adapter->rss_lut_size = vfres->rss_lut_size;
3948 	} else {
3949 		adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
3950 		adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
3951 	}
3952 
3953 	return 0;
3954 }
3955 
3956 /**
3957  * iavf_shutdown - Shutdown the device in preparation for a reboot
3958  * @pdev: pci device structure
3959  **/
iavf_shutdown(struct pci_dev * pdev)3960 static void iavf_shutdown(struct pci_dev *pdev)
3961 {
3962 	struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
3963 	struct net_device *netdev = adapter->netdev;
3964 
3965 	netif_device_detach(netdev);
3966 
3967 	if (netif_running(netdev))
3968 		iavf_close(netdev);
3969 
3970 	if (iavf_lock_timeout(&adapter->crit_lock, 5000))
3971 		dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
3972 	/* Prevent the watchdog from running. */
3973 	iavf_change_state(adapter, __IAVF_REMOVE);
3974 	adapter->aq_required = 0;
3975 	mutex_unlock(&adapter->crit_lock);
3976 
3977 #ifdef CONFIG_PM
3978 	pci_save_state(pdev);
3979 
3980 #endif
3981 	pci_disable_device(pdev);
3982 }
3983 
3984 /**
3985  * iavf_probe - Device Initialization Routine
3986  * @pdev: PCI device information struct
3987  * @ent: entry in iavf_pci_tbl
3988  *
3989  * Returns 0 on success, negative on failure
3990  *
3991  * iavf_probe initializes an adapter identified by a pci_dev structure.
3992  * The OS initialization, configuring of the adapter private structure,
3993  * and a hardware reset occur.
3994  **/
iavf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)3995 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3996 {
3997 	struct net_device *netdev;
3998 	struct iavf_adapter *adapter = NULL;
3999 	struct iavf_hw *hw = NULL;
4000 	int err;
4001 
4002 	err = pci_enable_device(pdev);
4003 	if (err)
4004 		return err;
4005 
4006 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4007 	if (err) {
4008 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4009 		if (err) {
4010 			dev_err(&pdev->dev,
4011 				"DMA configuration failed: 0x%x\n", err);
4012 			goto err_dma;
4013 		}
4014 	}
4015 
4016 	err = pci_request_regions(pdev, iavf_driver_name);
4017 	if (err) {
4018 		dev_err(&pdev->dev,
4019 			"pci_request_regions failed 0x%x\n", err);
4020 		goto err_pci_reg;
4021 	}
4022 
4023 	pci_enable_pcie_error_reporting(pdev);
4024 
4025 	pci_set_master(pdev);
4026 
4027 	netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
4028 				   IAVF_MAX_REQ_QUEUES);
4029 	if (!netdev) {
4030 		err = -ENOMEM;
4031 		goto err_alloc_etherdev;
4032 	}
4033 
4034 	SET_NETDEV_DEV(netdev, &pdev->dev);
4035 
4036 	pci_set_drvdata(pdev, netdev);
4037 	adapter = netdev_priv(netdev);
4038 
4039 	adapter->netdev = netdev;
4040 	adapter->pdev = pdev;
4041 
4042 	hw = &adapter->hw;
4043 	hw->back = adapter;
4044 
4045 	adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
4046 	iavf_change_state(adapter, __IAVF_STARTUP);
4047 
4048 	/* Call save state here because it relies on the adapter struct. */
4049 	pci_save_state(pdev);
4050 
4051 	hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4052 			      pci_resource_len(pdev, 0));
4053 	if (!hw->hw_addr) {
4054 		err = -EIO;
4055 		goto err_ioremap;
4056 	}
4057 	hw->vendor_id = pdev->vendor;
4058 	hw->device_id = pdev->device;
4059 	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4060 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
4061 	hw->subsystem_device_id = pdev->subsystem_device;
4062 	hw->bus.device = PCI_SLOT(pdev->devfn);
4063 	hw->bus.func = PCI_FUNC(pdev->devfn);
4064 	hw->bus.bus_id = pdev->bus->number;
4065 
4066 	/* set up the locks for the AQ, do this only once in probe
4067 	 * and destroy them only once in remove
4068 	 */
4069 	mutex_init(&adapter->crit_lock);
4070 	mutex_init(&adapter->client_lock);
4071 	mutex_init(&hw->aq.asq_mutex);
4072 	mutex_init(&hw->aq.arq_mutex);
4073 
4074 	spin_lock_init(&adapter->mac_vlan_list_lock);
4075 	spin_lock_init(&adapter->cloud_filter_list_lock);
4076 	spin_lock_init(&adapter->fdir_fltr_lock);
4077 	spin_lock_init(&adapter->adv_rss_lock);
4078 
4079 	INIT_LIST_HEAD(&adapter->mac_filter_list);
4080 	INIT_LIST_HEAD(&adapter->vlan_filter_list);
4081 	INIT_LIST_HEAD(&adapter->cloud_filter_list);
4082 	INIT_LIST_HEAD(&adapter->fdir_list_head);
4083 	INIT_LIST_HEAD(&adapter->adv_rss_list_head);
4084 
4085 	INIT_WORK(&adapter->reset_task, iavf_reset_task);
4086 	INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
4087 	INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
4088 	INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
4089 	queue_delayed_work(iavf_wq, &adapter->watchdog_task,
4090 			   msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
4091 
4092 	/* Setup the wait queue for indicating transition to down status */
4093 	init_waitqueue_head(&adapter->down_waitqueue);
4094 
4095 	return 0;
4096 
4097 err_ioremap:
4098 	free_netdev(netdev);
4099 err_alloc_etherdev:
4100 	pci_disable_pcie_error_reporting(pdev);
4101 	pci_release_regions(pdev);
4102 err_pci_reg:
4103 err_dma:
4104 	pci_disable_device(pdev);
4105 	return err;
4106 }
4107 
4108 /**
4109  * iavf_suspend - Power management suspend routine
4110  * @dev_d: device info pointer
4111  *
4112  * Called when the system (VM) is entering sleep/suspend.
4113  **/
iavf_suspend(struct device * dev_d)4114 static int __maybe_unused iavf_suspend(struct device *dev_d)
4115 {
4116 	struct net_device *netdev = dev_get_drvdata(dev_d);
4117 	struct iavf_adapter *adapter = netdev_priv(netdev);
4118 
4119 	netif_device_detach(netdev);
4120 
4121 	while (!mutex_trylock(&adapter->crit_lock))
4122 		usleep_range(500, 1000);
4123 
4124 	if (netif_running(netdev)) {
4125 		rtnl_lock();
4126 		iavf_down(adapter);
4127 		rtnl_unlock();
4128 	}
4129 	iavf_free_misc_irq(adapter);
4130 	iavf_reset_interrupt_capability(adapter);
4131 
4132 	mutex_unlock(&adapter->crit_lock);
4133 
4134 	return 0;
4135 }
4136 
4137 /**
4138  * iavf_resume - Power management resume routine
4139  * @dev_d: device info pointer
4140  *
4141  * Called when the system (VM) is resumed from sleep/suspend.
4142  **/
iavf_resume(struct device * dev_d)4143 static int __maybe_unused iavf_resume(struct device *dev_d)
4144 {
4145 	struct pci_dev *pdev = to_pci_dev(dev_d);
4146 	struct iavf_adapter *adapter;
4147 	u32 err;
4148 
4149 	adapter = iavf_pdev_to_adapter(pdev);
4150 
4151 	pci_set_master(pdev);
4152 
4153 	rtnl_lock();
4154 	err = iavf_set_interrupt_capability(adapter);
4155 	if (err) {
4156 		rtnl_unlock();
4157 		dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
4158 		return err;
4159 	}
4160 	err = iavf_request_misc_irq(adapter);
4161 	rtnl_unlock();
4162 	if (err) {
4163 		dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
4164 		return err;
4165 	}
4166 
4167 	queue_work(iavf_wq, &adapter->reset_task);
4168 
4169 	netif_device_attach(adapter->netdev);
4170 
4171 	return err;
4172 }
4173 
4174 /**
4175  * iavf_remove - Device Removal Routine
4176  * @pdev: PCI device information struct
4177  *
4178  * iavf_remove is called by the PCI subsystem to alert the driver
4179  * that it should release a PCI device.  The could be caused by a
4180  * Hot-Plug event, or because the driver is going to be removed from
4181  * memory.
4182  **/
iavf_remove(struct pci_dev * pdev)4183 static void iavf_remove(struct pci_dev *pdev)
4184 {
4185 	struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
4186 	struct iavf_fdir_fltr *fdir, *fdirtmp;
4187 	struct iavf_vlan_filter *vlf, *vlftmp;
4188 	struct iavf_cloud_filter *cf, *cftmp;
4189 	struct iavf_adv_rss *rss, *rsstmp;
4190 	struct iavf_mac_filter *f, *ftmp;
4191 	struct net_device *netdev;
4192 	struct iavf_hw *hw;
4193 	int err;
4194 
4195 	netdev = adapter->netdev;
4196 	hw = &adapter->hw;
4197 
4198 	if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
4199 		return;
4200 
4201 	/* Wait until port initialization is complete.
4202 	 * There are flows where register/unregister netdev may race.
4203 	 */
4204 	while (1) {
4205 		mutex_lock(&adapter->crit_lock);
4206 		if (adapter->state == __IAVF_RUNNING ||
4207 		    adapter->state == __IAVF_DOWN ||
4208 		    adapter->state == __IAVF_INIT_FAILED) {
4209 			mutex_unlock(&adapter->crit_lock);
4210 			break;
4211 		}
4212 		/* Simply return if we already went through iavf_shutdown */
4213 		if (adapter->state == __IAVF_REMOVE) {
4214 			mutex_unlock(&adapter->crit_lock);
4215 			return;
4216 		}
4217 
4218 		mutex_unlock(&adapter->crit_lock);
4219 		usleep_range(500, 1000);
4220 	}
4221 	cancel_delayed_work_sync(&adapter->watchdog_task);
4222 
4223 	if (adapter->netdev_registered) {
4224 		rtnl_lock();
4225 		unregister_netdevice(netdev);
4226 		adapter->netdev_registered = false;
4227 		rtnl_unlock();
4228 	}
4229 	if (CLIENT_ALLOWED(adapter)) {
4230 		err = iavf_lan_del_device(adapter);
4231 		if (err)
4232 			dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
4233 				 err);
4234 	}
4235 
4236 	mutex_lock(&adapter->crit_lock);
4237 	dev_info(&adapter->pdev->dev, "Remove device\n");
4238 	iavf_change_state(adapter, __IAVF_REMOVE);
4239 
4240 	iavf_request_reset(adapter);
4241 	msleep(50);
4242 	/* If the FW isn't responding, kick it once, but only once. */
4243 	if (!iavf_asq_done(hw)) {
4244 		iavf_request_reset(adapter);
4245 		msleep(50);
4246 	}
4247 
4248 	iavf_misc_irq_disable(adapter);
4249 	/* Shut down all the garbage mashers on the detention level */
4250 	cancel_work_sync(&adapter->reset_task);
4251 	cancel_delayed_work_sync(&adapter->watchdog_task);
4252 	cancel_work_sync(&adapter->adminq_task);
4253 	cancel_delayed_work_sync(&adapter->client_task);
4254 
4255 	adapter->aq_required = 0;
4256 	adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
4257 
4258 	iavf_free_all_tx_resources(adapter);
4259 	iavf_free_all_rx_resources(adapter);
4260 	iavf_free_misc_irq(adapter);
4261 
4262 	iavf_reset_interrupt_capability(adapter);
4263 	iavf_free_q_vectors(adapter);
4264 
4265 	iavf_free_rss(adapter);
4266 
4267 	if (hw->aq.asq.count)
4268 		iavf_shutdown_adminq(hw);
4269 
4270 	/* destroy the locks only once, here */
4271 	mutex_destroy(&hw->aq.arq_mutex);
4272 	mutex_destroy(&hw->aq.asq_mutex);
4273 	mutex_destroy(&adapter->client_lock);
4274 	mutex_unlock(&adapter->crit_lock);
4275 	mutex_destroy(&adapter->crit_lock);
4276 
4277 	iounmap(hw->hw_addr);
4278 	pci_release_regions(pdev);
4279 	iavf_free_queues(adapter);
4280 	kfree(adapter->vf_res);
4281 	spin_lock_bh(&adapter->mac_vlan_list_lock);
4282 	/* If we got removed before an up/down sequence, we've got a filter
4283 	 * hanging out there that we need to get rid of.
4284 	 */
4285 	list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
4286 		list_del(&f->list);
4287 		kfree(f);
4288 	}
4289 	list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
4290 				 list) {
4291 		list_del(&vlf->list);
4292 		kfree(vlf);
4293 	}
4294 
4295 	spin_unlock_bh(&adapter->mac_vlan_list_lock);
4296 
4297 	spin_lock_bh(&adapter->cloud_filter_list_lock);
4298 	list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
4299 		list_del(&cf->list);
4300 		kfree(cf);
4301 	}
4302 	spin_unlock_bh(&adapter->cloud_filter_list_lock);
4303 
4304 	spin_lock_bh(&adapter->fdir_fltr_lock);
4305 	list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
4306 		list_del(&fdir->list);
4307 		kfree(fdir);
4308 	}
4309 	spin_unlock_bh(&adapter->fdir_fltr_lock);
4310 
4311 	spin_lock_bh(&adapter->adv_rss_lock);
4312 	list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
4313 				 list) {
4314 		list_del(&rss->list);
4315 		kfree(rss);
4316 	}
4317 	spin_unlock_bh(&adapter->adv_rss_lock);
4318 
4319 	free_netdev(netdev);
4320 
4321 	pci_disable_pcie_error_reporting(pdev);
4322 
4323 	pci_disable_device(pdev);
4324 }
4325 
4326 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
4327 
4328 static struct pci_driver iavf_driver = {
4329 	.name      = iavf_driver_name,
4330 	.id_table  = iavf_pci_tbl,
4331 	.probe     = iavf_probe,
4332 	.remove    = iavf_remove,
4333 	.driver.pm = &iavf_pm_ops,
4334 	.shutdown  = iavf_shutdown,
4335 };
4336 
4337 /**
4338  * iavf_init_module - Driver Registration Routine
4339  *
4340  * iavf_init_module is the first routine called when the driver is
4341  * loaded. All it does is register with the PCI subsystem.
4342  **/
iavf_init_module(void)4343 static int __init iavf_init_module(void)
4344 {
4345 	int ret;
4346 
4347 	pr_info("iavf: %s\n", iavf_driver_string);
4348 
4349 	pr_info("%s\n", iavf_copyright);
4350 
4351 	iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
4352 				  iavf_driver_name);
4353 	if (!iavf_wq) {
4354 		pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
4355 		return -ENOMEM;
4356 	}
4357 
4358 	ret = pci_register_driver(&iavf_driver);
4359 	if (ret)
4360 		destroy_workqueue(iavf_wq);
4361 
4362 	return ret;
4363 }
4364 
4365 module_init(iavf_init_module);
4366 
4367 /**
4368  * iavf_exit_module - Driver Exit Cleanup Routine
4369  *
4370  * iavf_exit_module is called just before the driver is removed
4371  * from memory.
4372  **/
iavf_exit_module(void)4373 static void __exit iavf_exit_module(void)
4374 {
4375 	pci_unregister_driver(&iavf_driver);
4376 	destroy_workqueue(iavf_wq);
4377 }
4378 
4379 module_exit(iavf_exit_module);
4380 
4381 /* iavf_main.c */
4382