• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3 
4 #include <linux/prefetch.h>
5 
6 #include "iavf.h"
7 #include "iavf_trace.h"
8 #include "iavf_prototype.h"
9 
build_ctob(u32 td_cmd,u32 td_offset,unsigned int size,u32 td_tag)10 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
11 				u32 td_tag)
12 {
13 	return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA |
14 			   ((u64)td_cmd  << IAVF_TXD_QW1_CMD_SHIFT) |
15 			   ((u64)td_offset << IAVF_TXD_QW1_OFFSET_SHIFT) |
16 			   ((u64)size  << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
17 			   ((u64)td_tag  << IAVF_TXD_QW1_L2TAG1_SHIFT));
18 }
19 
20 #define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS)
21 
22 /**
23  * iavf_unmap_and_free_tx_resource - Release a Tx buffer
24  * @ring:      the ring that owns the buffer
25  * @tx_buffer: the buffer to free
26  **/
iavf_unmap_and_free_tx_resource(struct iavf_ring * ring,struct iavf_tx_buffer * tx_buffer)27 static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring,
28 					    struct iavf_tx_buffer *tx_buffer)
29 {
30 	if (tx_buffer->skb) {
31 		if (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB)
32 			kfree(tx_buffer->raw_buf);
33 		else
34 			dev_kfree_skb_any(tx_buffer->skb);
35 		if (dma_unmap_len(tx_buffer, len))
36 			dma_unmap_single(ring->dev,
37 					 dma_unmap_addr(tx_buffer, dma),
38 					 dma_unmap_len(tx_buffer, len),
39 					 DMA_TO_DEVICE);
40 	} else if (dma_unmap_len(tx_buffer, len)) {
41 		dma_unmap_page(ring->dev,
42 			       dma_unmap_addr(tx_buffer, dma),
43 			       dma_unmap_len(tx_buffer, len),
44 			       DMA_TO_DEVICE);
45 	}
46 
47 	tx_buffer->next_to_watch = NULL;
48 	tx_buffer->skb = NULL;
49 	dma_unmap_len_set(tx_buffer, len, 0);
50 	/* tx_buffer must be completely set up in the transmit path */
51 }
52 
53 /**
54  * iavf_clean_tx_ring - Free any empty Tx buffers
55  * @tx_ring: ring to be cleaned
56  **/
iavf_clean_tx_ring(struct iavf_ring * tx_ring)57 void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
58 {
59 	unsigned long bi_size;
60 	u16 i;
61 
62 	/* ring already cleared, nothing to do */
63 	if (!tx_ring->tx_bi)
64 		return;
65 
66 	/* Free all the Tx ring sk_buffs */
67 	for (i = 0; i < tx_ring->count; i++)
68 		iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
69 
70 	bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
71 	memset(tx_ring->tx_bi, 0, bi_size);
72 
73 	/* Zero out the descriptor ring */
74 	memset(tx_ring->desc, 0, tx_ring->size);
75 
76 	tx_ring->next_to_use = 0;
77 	tx_ring->next_to_clean = 0;
78 
79 	if (!tx_ring->netdev)
80 		return;
81 
82 	/* cleanup Tx queue statistics */
83 	netdev_tx_reset_queue(txring_txq(tx_ring));
84 }
85 
86 /**
87  * iavf_free_tx_resources - Free Tx resources per queue
88  * @tx_ring: Tx descriptor ring for a specific queue
89  *
90  * Free all transmit software resources
91  **/
iavf_free_tx_resources(struct iavf_ring * tx_ring)92 void iavf_free_tx_resources(struct iavf_ring *tx_ring)
93 {
94 	iavf_clean_tx_ring(tx_ring);
95 	kfree(tx_ring->tx_bi);
96 	tx_ring->tx_bi = NULL;
97 
98 	if (tx_ring->desc) {
99 		dma_free_coherent(tx_ring->dev, tx_ring->size,
100 				  tx_ring->desc, tx_ring->dma);
101 		tx_ring->desc = NULL;
102 	}
103 }
104 
105 /**
106  * iavf_get_tx_pending - how many Tx descriptors not processed
107  * @ring: the ring of descriptors
108  * @in_sw: is tx_pending being checked in SW or HW
109  *
110  * Since there is no access to the ring head register
111  * in XL710, we need to use our local copies
112  **/
iavf_get_tx_pending(struct iavf_ring * ring,bool in_sw)113 u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
114 {
115 	u32 head, tail;
116 
117 	/* underlying hardware might not allow access and/or always return
118 	 * 0 for the head/tail registers so just use the cached values
119 	 */
120 	head = ring->next_to_clean;
121 	tail = ring->next_to_use;
122 
123 	if (head != tail)
124 		return (head < tail) ?
125 			tail - head : (tail + ring->count - head);
126 
127 	return 0;
128 }
129 
130 /**
131  * iavf_detect_recover_hung - Function to detect and recover hung_queues
132  * @vsi:  pointer to vsi struct with tx queues
133  *
134  * VSI has netdev and netdev has TX queues. This function is to check each of
135  * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
136  **/
iavf_detect_recover_hung(struct iavf_vsi * vsi)137 void iavf_detect_recover_hung(struct iavf_vsi *vsi)
138 {
139 	struct iavf_ring *tx_ring = NULL;
140 	struct net_device *netdev;
141 	unsigned int i;
142 	int packets;
143 
144 	if (!vsi)
145 		return;
146 
147 	if (test_bit(__IAVF_VSI_DOWN, vsi->state))
148 		return;
149 
150 	netdev = vsi->netdev;
151 	if (!netdev)
152 		return;
153 
154 	if (!netif_carrier_ok(netdev))
155 		return;
156 
157 	for (i = 0; i < vsi->back->num_active_queues; i++) {
158 		tx_ring = &vsi->back->tx_rings[i];
159 		if (tx_ring && tx_ring->desc) {
160 			/* If packet counter has not changed the queue is
161 			 * likely stalled, so force an interrupt for this
162 			 * queue.
163 			 *
164 			 * prev_pkt_ctr would be negative if there was no
165 			 * pending work.
166 			 */
167 			packets = tx_ring->stats.packets & INT_MAX;
168 			if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
169 				iavf_force_wb(vsi, tx_ring->q_vector);
170 				continue;
171 			}
172 
173 			/* Memory barrier between read of packet count and call
174 			 * to iavf_get_tx_pending()
175 			 */
176 			smp_rmb();
177 			tx_ring->tx_stats.prev_pkt_ctr =
178 			  iavf_get_tx_pending(tx_ring, true) ? packets : -1;
179 		}
180 	}
181 }
182 
183 #define WB_STRIDE 4
184 
185 /**
186  * iavf_clean_tx_irq - Reclaim resources after transmit completes
187  * @vsi: the VSI we care about
188  * @tx_ring: Tx ring to clean
189  * @napi_budget: Used to determine if we are in netpoll
190  *
191  * Returns true if there's any budget left (e.g. the clean is finished)
192  **/
iavf_clean_tx_irq(struct iavf_vsi * vsi,struct iavf_ring * tx_ring,int napi_budget)193 static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
194 			      struct iavf_ring *tx_ring, int napi_budget)
195 {
196 	int i = tx_ring->next_to_clean;
197 	struct iavf_tx_buffer *tx_buf;
198 	struct iavf_tx_desc *tx_desc;
199 	unsigned int total_bytes = 0, total_packets = 0;
200 	unsigned int budget = vsi->work_limit;
201 
202 	tx_buf = &tx_ring->tx_bi[i];
203 	tx_desc = IAVF_TX_DESC(tx_ring, i);
204 	i -= tx_ring->count;
205 
206 	do {
207 		struct iavf_tx_desc *eop_desc = tx_buf->next_to_watch;
208 
209 		/* if next_to_watch is not set then there is no work pending */
210 		if (!eop_desc)
211 			break;
212 
213 		/* prevent any other reads prior to eop_desc */
214 		smp_rmb();
215 
216 		iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
217 		/* if the descriptor isn't done, no work yet to do */
218 		if (!(eop_desc->cmd_type_offset_bsz &
219 		      cpu_to_le64(IAVF_TX_DESC_DTYPE_DESC_DONE)))
220 			break;
221 
222 		/* clear next_to_watch to prevent false hangs */
223 		tx_buf->next_to_watch = NULL;
224 
225 		/* update the statistics for this packet */
226 		total_bytes += tx_buf->bytecount;
227 		total_packets += tx_buf->gso_segs;
228 
229 		/* free the skb */
230 		napi_consume_skb(tx_buf->skb, napi_budget);
231 
232 		/* unmap skb header data */
233 		dma_unmap_single(tx_ring->dev,
234 				 dma_unmap_addr(tx_buf, dma),
235 				 dma_unmap_len(tx_buf, len),
236 				 DMA_TO_DEVICE);
237 
238 		/* clear tx_buffer data */
239 		tx_buf->skb = NULL;
240 		dma_unmap_len_set(tx_buf, len, 0);
241 
242 		/* unmap remaining buffers */
243 		while (tx_desc != eop_desc) {
244 			iavf_trace(clean_tx_irq_unmap,
245 				   tx_ring, tx_desc, tx_buf);
246 
247 			tx_buf++;
248 			tx_desc++;
249 			i++;
250 			if (unlikely(!i)) {
251 				i -= tx_ring->count;
252 				tx_buf = tx_ring->tx_bi;
253 				tx_desc = IAVF_TX_DESC(tx_ring, 0);
254 			}
255 
256 			/* unmap any remaining paged data */
257 			if (dma_unmap_len(tx_buf, len)) {
258 				dma_unmap_page(tx_ring->dev,
259 					       dma_unmap_addr(tx_buf, dma),
260 					       dma_unmap_len(tx_buf, len),
261 					       DMA_TO_DEVICE);
262 				dma_unmap_len_set(tx_buf, len, 0);
263 			}
264 		}
265 
266 		/* move us one more past the eop_desc for start of next pkt */
267 		tx_buf++;
268 		tx_desc++;
269 		i++;
270 		if (unlikely(!i)) {
271 			i -= tx_ring->count;
272 			tx_buf = tx_ring->tx_bi;
273 			tx_desc = IAVF_TX_DESC(tx_ring, 0);
274 		}
275 
276 		prefetch(tx_desc);
277 
278 		/* update budget accounting */
279 		budget--;
280 	} while (likely(budget));
281 
282 	i += tx_ring->count;
283 	tx_ring->next_to_clean = i;
284 	u64_stats_update_begin(&tx_ring->syncp);
285 	tx_ring->stats.bytes += total_bytes;
286 	tx_ring->stats.packets += total_packets;
287 	u64_stats_update_end(&tx_ring->syncp);
288 	tx_ring->q_vector->tx.total_bytes += total_bytes;
289 	tx_ring->q_vector->tx.total_packets += total_packets;
290 
291 	if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) {
292 		/* check to see if there are < 4 descriptors
293 		 * waiting to be written back, then kick the hardware to force
294 		 * them to be written back in case we stay in NAPI.
295 		 * In this mode on X722 we do not enable Interrupt.
296 		 */
297 		unsigned int j = iavf_get_tx_pending(tx_ring, false);
298 
299 		if (budget &&
300 		    ((j / WB_STRIDE) == 0) && (j > 0) &&
301 		    !test_bit(__IAVF_VSI_DOWN, vsi->state) &&
302 		    (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count))
303 			tx_ring->arm_wb = true;
304 	}
305 
306 	/* notify netdev of completed buffers */
307 	netdev_tx_completed_queue(txring_txq(tx_ring),
308 				  total_packets, total_bytes);
309 
310 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
311 	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
312 		     (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
313 		/* Make sure that anybody stopping the queue after this
314 		 * sees the new next_to_clean.
315 		 */
316 		smp_mb();
317 		if (__netif_subqueue_stopped(tx_ring->netdev,
318 					     tx_ring->queue_index) &&
319 		   !test_bit(__IAVF_VSI_DOWN, vsi->state)) {
320 			netif_wake_subqueue(tx_ring->netdev,
321 					    tx_ring->queue_index);
322 			++tx_ring->tx_stats.restart_queue;
323 		}
324 	}
325 
326 	return !!budget;
327 }
328 
329 /**
330  * iavf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
331  * @vsi: the VSI we care about
332  * @q_vector: the vector on which to enable writeback
333  *
334  **/
iavf_enable_wb_on_itr(struct iavf_vsi * vsi,struct iavf_q_vector * q_vector)335 static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi,
336 				  struct iavf_q_vector *q_vector)
337 {
338 	u16 flags = q_vector->tx.ring[0].flags;
339 	u32 val;
340 
341 	if (!(flags & IAVF_TXR_FLAGS_WB_ON_ITR))
342 		return;
343 
344 	if (q_vector->arm_wb_state)
345 		return;
346 
347 	val = IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
348 	      IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */
349 
350 	wr32(&vsi->back->hw,
351 	     IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val);
352 	q_vector->arm_wb_state = true;
353 }
354 
355 /**
356  * iavf_force_wb - Issue SW Interrupt so HW does a wb
357  * @vsi: the VSI we care about
358  * @q_vector: the vector  on which to force writeback
359  *
360  **/
iavf_force_wb(struct iavf_vsi * vsi,struct iavf_q_vector * q_vector)361 void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector)
362 {
363 	u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
364 		  IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
365 		  IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
366 		  IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
367 		  /* allow 00 to be written to the index */;
368 
369 	wr32(&vsi->back->hw,
370 	     IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx),
371 	     val);
372 }
373 
iavf_container_is_rx(struct iavf_q_vector * q_vector,struct iavf_ring_container * rc)374 static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
375 					struct iavf_ring_container *rc)
376 {
377 	return &q_vector->rx == rc;
378 }
379 
iavf_itr_divisor(struct iavf_q_vector * q_vector)380 static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector)
381 {
382 	unsigned int divisor;
383 
384 	switch (q_vector->adapter->link_speed) {
385 	case VIRTCHNL_LINK_SPEED_40GB:
386 		divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024;
387 		break;
388 	case VIRTCHNL_LINK_SPEED_25GB:
389 	case VIRTCHNL_LINK_SPEED_20GB:
390 		divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512;
391 		break;
392 	default:
393 	case VIRTCHNL_LINK_SPEED_10GB:
394 		divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256;
395 		break;
396 	case VIRTCHNL_LINK_SPEED_1GB:
397 	case VIRTCHNL_LINK_SPEED_100MB:
398 		divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32;
399 		break;
400 	}
401 
402 	return divisor;
403 }
404 
405 /**
406  * iavf_update_itr - update the dynamic ITR value based on statistics
407  * @q_vector: structure containing interrupt and ring information
408  * @rc: structure containing ring performance data
409  *
410  * Stores a new ITR value based on packets and byte
411  * counts during the last interrupt.  The advantage of per interrupt
412  * computation is faster updates and more accurate ITR for the current
413  * traffic pattern.  Constants in this function were computed
414  * based on theoretical maximum wire speed and thresholds were set based
415  * on testing data as well as attempting to minimize response time
416  * while increasing bulk throughput.
417  **/
iavf_update_itr(struct iavf_q_vector * q_vector,struct iavf_ring_container * rc)418 static void iavf_update_itr(struct iavf_q_vector *q_vector,
419 			    struct iavf_ring_container *rc)
420 {
421 	unsigned int avg_wire_size, packets, bytes, itr;
422 	unsigned long next_update = jiffies;
423 
424 	/* If we don't have any rings just leave ourselves set for maximum
425 	 * possible latency so we take ourselves out of the equation.
426 	 */
427 	if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
428 		return;
429 
430 	/* For Rx we want to push the delay up and default to low latency.
431 	 * for Tx we want to pull the delay down and default to high latency.
432 	 */
433 	itr = iavf_container_is_rx(q_vector, rc) ?
434 	      IAVF_ITR_ADAPTIVE_MIN_USECS | IAVF_ITR_ADAPTIVE_LATENCY :
435 	      IAVF_ITR_ADAPTIVE_MAX_USECS | IAVF_ITR_ADAPTIVE_LATENCY;
436 
437 	/* If we didn't update within up to 1 - 2 jiffies we can assume
438 	 * that either packets are coming in so slow there hasn't been
439 	 * any work, or that there is so much work that NAPI is dealing
440 	 * with interrupt moderation and we don't need to do anything.
441 	 */
442 	if (time_after(next_update, rc->next_update))
443 		goto clear_counts;
444 
445 	/* If itr_countdown is set it means we programmed an ITR within
446 	 * the last 4 interrupt cycles. This has a side effect of us
447 	 * potentially firing an early interrupt. In order to work around
448 	 * this we need to throw out any data received for a few
449 	 * interrupts following the update.
450 	 */
451 	if (q_vector->itr_countdown) {
452 		itr = rc->target_itr;
453 		goto clear_counts;
454 	}
455 
456 	packets = rc->total_packets;
457 	bytes = rc->total_bytes;
458 
459 	if (iavf_container_is_rx(q_vector, rc)) {
460 		/* If Rx there are 1 to 4 packets and bytes are less than
461 		 * 9000 assume insufficient data to use bulk rate limiting
462 		 * approach unless Tx is already in bulk rate limiting. We
463 		 * are likely latency driven.
464 		 */
465 		if (packets && packets < 4 && bytes < 9000 &&
466 		    (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) {
467 			itr = IAVF_ITR_ADAPTIVE_LATENCY;
468 			goto adjust_by_size;
469 		}
470 	} else if (packets < 4) {
471 		/* If we have Tx and Rx ITR maxed and Tx ITR is running in
472 		 * bulk mode and we are receiving 4 or fewer packets just
473 		 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
474 		 * that the Rx can relax.
475 		 */
476 		if (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS &&
477 		    (q_vector->rx.target_itr & IAVF_ITR_MASK) ==
478 		     IAVF_ITR_ADAPTIVE_MAX_USECS)
479 			goto clear_counts;
480 	} else if (packets > 32) {
481 		/* If we have processed over 32 packets in a single interrupt
482 		 * for Tx assume we need to switch over to "bulk" mode.
483 		 */
484 		rc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY;
485 	}
486 
487 	/* We have no packets to actually measure against. This means
488 	 * either one of the other queues on this vector is active or
489 	 * we are a Tx queue doing TSO with too high of an interrupt rate.
490 	 *
491 	 * Between 4 and 56 we can assume that our current interrupt delay
492 	 * is only slightly too low. As such we should increase it by a small
493 	 * fixed amount.
494 	 */
495 	if (packets < 56) {
496 		itr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC;
497 		if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
498 			itr &= IAVF_ITR_ADAPTIVE_LATENCY;
499 			itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
500 		}
501 		goto clear_counts;
502 	}
503 
504 	if (packets <= 256) {
505 		itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
506 		itr &= IAVF_ITR_MASK;
507 
508 		/* Between 56 and 112 is our "goldilocks" zone where we are
509 		 * working out "just right". Just report that our current
510 		 * ITR is good for us.
511 		 */
512 		if (packets <= 112)
513 			goto clear_counts;
514 
515 		/* If packet count is 128 or greater we are likely looking
516 		 * at a slight overrun of the delay we want. Try halving
517 		 * our delay to see if that will cut the number of packets
518 		 * in half per interrupt.
519 		 */
520 		itr /= 2;
521 		itr &= IAVF_ITR_MASK;
522 		if (itr < IAVF_ITR_ADAPTIVE_MIN_USECS)
523 			itr = IAVF_ITR_ADAPTIVE_MIN_USECS;
524 
525 		goto clear_counts;
526 	}
527 
528 	/* The paths below assume we are dealing with a bulk ITR since
529 	 * number of packets is greater than 256. We are just going to have
530 	 * to compute a value and try to bring the count under control,
531 	 * though for smaller packet sizes there isn't much we can do as
532 	 * NAPI polling will likely be kicking in sooner rather than later.
533 	 */
534 	itr = IAVF_ITR_ADAPTIVE_BULK;
535 
536 adjust_by_size:
537 	/* If packet counts are 256 or greater we can assume we have a gross
538 	 * overestimation of what the rate should be. Instead of trying to fine
539 	 * tune it just use the formula below to try and dial in an exact value
540 	 * give the current packet size of the frame.
541 	 */
542 	avg_wire_size = bytes / packets;
543 
544 	/* The following is a crude approximation of:
545 	 *  wmem_default / (size + overhead) = desired_pkts_per_int
546 	 *  rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
547 	 *  (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
548 	 *
549 	 * Assuming wmem_default is 212992 and overhead is 640 bytes per
550 	 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
551 	 * formula down to
552 	 *
553 	 *  (170 * (size + 24)) / (size + 640) = ITR
554 	 *
555 	 * We first do some math on the packet size and then finally bitshift
556 	 * by 8 after rounding up. We also have to account for PCIe link speed
557 	 * difference as ITR scales based on this.
558 	 */
559 	if (avg_wire_size <= 60) {
560 		/* Start at 250k ints/sec */
561 		avg_wire_size = 4096;
562 	} else if (avg_wire_size <= 380) {
563 		/* 250K ints/sec to 60K ints/sec */
564 		avg_wire_size *= 40;
565 		avg_wire_size += 1696;
566 	} else if (avg_wire_size <= 1084) {
567 		/* 60K ints/sec to 36K ints/sec */
568 		avg_wire_size *= 15;
569 		avg_wire_size += 11452;
570 	} else if (avg_wire_size <= 1980) {
571 		/* 36K ints/sec to 30K ints/sec */
572 		avg_wire_size *= 5;
573 		avg_wire_size += 22420;
574 	} else {
575 		/* plateau at a limit of 30K ints/sec */
576 		avg_wire_size = 32256;
577 	}
578 
579 	/* If we are in low latency mode halve our delay which doubles the
580 	 * rate to somewhere between 100K to 16K ints/sec
581 	 */
582 	if (itr & IAVF_ITR_ADAPTIVE_LATENCY)
583 		avg_wire_size /= 2;
584 
585 	/* Resultant value is 256 times larger than it needs to be. This
586 	 * gives us room to adjust the value as needed to either increase
587 	 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
588 	 *
589 	 * Use addition as we have already recorded the new latency flag
590 	 * for the ITR value.
591 	 */
592 	itr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector)) *
593 	       IAVF_ITR_ADAPTIVE_MIN_INC;
594 
595 	if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
596 		itr &= IAVF_ITR_ADAPTIVE_LATENCY;
597 		itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
598 	}
599 
600 clear_counts:
601 	/* write back value */
602 	rc->target_itr = itr;
603 
604 	/* next update should occur within next jiffy */
605 	rc->next_update = next_update + 1;
606 
607 	rc->total_bytes = 0;
608 	rc->total_packets = 0;
609 }
610 
611 /**
612  * iavf_setup_tx_descriptors - Allocate the Tx descriptors
613  * @tx_ring: the tx ring to set up
614  *
615  * Return 0 on success, negative on error
616  **/
iavf_setup_tx_descriptors(struct iavf_ring * tx_ring)617 int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
618 {
619 	struct device *dev = tx_ring->dev;
620 	int bi_size;
621 
622 	if (!dev)
623 		return -ENOMEM;
624 
625 	/* warn if we are about to overwrite the pointer */
626 	WARN_ON(tx_ring->tx_bi);
627 	bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
628 	tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
629 	if (!tx_ring->tx_bi)
630 		goto err;
631 
632 	/* round up to nearest 4K */
633 	tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc);
634 	tx_ring->size = ALIGN(tx_ring->size, 4096);
635 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
636 					   &tx_ring->dma, GFP_KERNEL);
637 	if (!tx_ring->desc) {
638 		dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
639 			 tx_ring->size);
640 		goto err;
641 	}
642 
643 	tx_ring->next_to_use = 0;
644 	tx_ring->next_to_clean = 0;
645 	tx_ring->tx_stats.prev_pkt_ctr = -1;
646 	return 0;
647 
648 err:
649 	kfree(tx_ring->tx_bi);
650 	tx_ring->tx_bi = NULL;
651 	return -ENOMEM;
652 }
653 
654 /**
655  * iavf_clean_rx_ring - Free Rx buffers
656  * @rx_ring: ring to be cleaned
657  **/
iavf_clean_rx_ring(struct iavf_ring * rx_ring)658 void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
659 {
660 	unsigned long bi_size;
661 	u16 i;
662 
663 	/* ring already cleared, nothing to do */
664 	if (!rx_ring->rx_bi)
665 		return;
666 
667 	if (rx_ring->skb) {
668 		dev_kfree_skb(rx_ring->skb);
669 		rx_ring->skb = NULL;
670 	}
671 
672 	/* Free all the Rx ring sk_buffs */
673 	for (i = 0; i < rx_ring->count; i++) {
674 		struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
675 
676 		if (!rx_bi->page)
677 			continue;
678 
679 		/* Invalidate cache lines that may have been written to by
680 		 * device so that we avoid corrupting memory.
681 		 */
682 		dma_sync_single_range_for_cpu(rx_ring->dev,
683 					      rx_bi->dma,
684 					      rx_bi->page_offset,
685 					      rx_ring->rx_buf_len,
686 					      DMA_FROM_DEVICE);
687 
688 		/* free resources associated with mapping */
689 		dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
690 				     iavf_rx_pg_size(rx_ring),
691 				     DMA_FROM_DEVICE,
692 				     IAVF_RX_DMA_ATTR);
693 
694 		__page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
695 
696 		rx_bi->page = NULL;
697 		rx_bi->page_offset = 0;
698 	}
699 
700 	bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
701 	memset(rx_ring->rx_bi, 0, bi_size);
702 
703 	/* Zero out the descriptor ring */
704 	memset(rx_ring->desc, 0, rx_ring->size);
705 
706 	rx_ring->next_to_alloc = 0;
707 	rx_ring->next_to_clean = 0;
708 	rx_ring->next_to_use = 0;
709 }
710 
711 /**
712  * iavf_free_rx_resources - Free Rx resources
713  * @rx_ring: ring to clean the resources from
714  *
715  * Free all receive software resources
716  **/
iavf_free_rx_resources(struct iavf_ring * rx_ring)717 void iavf_free_rx_resources(struct iavf_ring *rx_ring)
718 {
719 	iavf_clean_rx_ring(rx_ring);
720 	kfree(rx_ring->rx_bi);
721 	rx_ring->rx_bi = NULL;
722 
723 	if (rx_ring->desc) {
724 		dma_free_coherent(rx_ring->dev, rx_ring->size,
725 				  rx_ring->desc, rx_ring->dma);
726 		rx_ring->desc = NULL;
727 	}
728 }
729 
730 /**
731  * iavf_setup_rx_descriptors - Allocate Rx descriptors
732  * @rx_ring: Rx descriptor ring (for a specific queue) to setup
733  *
734  * Returns 0 on success, negative on failure
735  **/
iavf_setup_rx_descriptors(struct iavf_ring * rx_ring)736 int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
737 {
738 	struct device *dev = rx_ring->dev;
739 	int bi_size;
740 
741 	/* warn if we are about to overwrite the pointer */
742 	WARN_ON(rx_ring->rx_bi);
743 	bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
744 	rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
745 	if (!rx_ring->rx_bi)
746 		goto err;
747 
748 	u64_stats_init(&rx_ring->syncp);
749 
750 	/* Round up to nearest 4K */
751 	rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
752 	rx_ring->size = ALIGN(rx_ring->size, 4096);
753 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
754 					   &rx_ring->dma, GFP_KERNEL);
755 
756 	if (!rx_ring->desc) {
757 		dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
758 			 rx_ring->size);
759 		goto err;
760 	}
761 
762 	rx_ring->next_to_alloc = 0;
763 	rx_ring->next_to_clean = 0;
764 	rx_ring->next_to_use = 0;
765 
766 	return 0;
767 err:
768 	kfree(rx_ring->rx_bi);
769 	rx_ring->rx_bi = NULL;
770 	return -ENOMEM;
771 }
772 
773 /**
774  * iavf_release_rx_desc - Store the new tail and head values
775  * @rx_ring: ring to bump
776  * @val: new head index
777  **/
iavf_release_rx_desc(struct iavf_ring * rx_ring,u32 val)778 static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
779 {
780 	rx_ring->next_to_use = val;
781 
782 	/* update next to alloc since we have filled the ring */
783 	rx_ring->next_to_alloc = val;
784 
785 	/* Force memory writes to complete before letting h/w
786 	 * know there are new descriptors to fetch.  (Only
787 	 * applicable for weak-ordered memory model archs,
788 	 * such as IA-64).
789 	 */
790 	wmb();
791 	writel(val, rx_ring->tail);
792 }
793 
794 /**
795  * iavf_rx_offset - Return expected offset into page to access data
796  * @rx_ring: Ring we are requesting offset of
797  *
798  * Returns the offset value for ring into the data buffer.
799  */
iavf_rx_offset(struct iavf_ring * rx_ring)800 static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)
801 {
802 	return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0;
803 }
804 
805 /**
806  * iavf_alloc_mapped_page - recycle or make a new page
807  * @rx_ring: ring to use
808  * @bi: rx_buffer struct to modify
809  *
810  * Returns true if the page was successfully allocated or
811  * reused.
812  **/
iavf_alloc_mapped_page(struct iavf_ring * rx_ring,struct iavf_rx_buffer * bi)813 static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring,
814 				   struct iavf_rx_buffer *bi)
815 {
816 	struct page *page = bi->page;
817 	dma_addr_t dma;
818 
819 	/* since we are recycling buffers we should seldom need to alloc */
820 	if (likely(page)) {
821 		rx_ring->rx_stats.page_reuse_count++;
822 		return true;
823 	}
824 
825 	/* alloc new page for storage */
826 	page = dev_alloc_pages(iavf_rx_pg_order(rx_ring));
827 	if (unlikely(!page)) {
828 		rx_ring->rx_stats.alloc_page_failed++;
829 		return false;
830 	}
831 
832 	/* map page for use */
833 	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
834 				 iavf_rx_pg_size(rx_ring),
835 				 DMA_FROM_DEVICE,
836 				 IAVF_RX_DMA_ATTR);
837 
838 	/* if mapping failed free memory back to system since
839 	 * there isn't much point in holding memory we can't use
840 	 */
841 	if (dma_mapping_error(rx_ring->dev, dma)) {
842 		__free_pages(page, iavf_rx_pg_order(rx_ring));
843 		rx_ring->rx_stats.alloc_page_failed++;
844 		return false;
845 	}
846 
847 	bi->dma = dma;
848 	bi->page = page;
849 	bi->page_offset = iavf_rx_offset(rx_ring);
850 
851 	/* initialize pagecnt_bias to 1 representing we fully own page */
852 	bi->pagecnt_bias = 1;
853 
854 	return true;
855 }
856 
857 /**
858  * iavf_receive_skb - Send a completed packet up the stack
859  * @rx_ring:  rx ring in play
860  * @skb: packet to send up
861  * @vlan_tag: vlan tag for packet
862  **/
iavf_receive_skb(struct iavf_ring * rx_ring,struct sk_buff * skb,u16 vlan_tag)863 static void iavf_receive_skb(struct iavf_ring *rx_ring,
864 			     struct sk_buff *skb, u16 vlan_tag)
865 {
866 	struct iavf_q_vector *q_vector = rx_ring->q_vector;
867 
868 	if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
869 	    (vlan_tag & VLAN_VID_MASK))
870 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
871 
872 	napi_gro_receive(&q_vector->napi, skb);
873 }
874 
875 /**
876  * iavf_alloc_rx_buffers - Replace used receive buffers
877  * @rx_ring: ring to place buffers on
878  * @cleaned_count: number of buffers to replace
879  *
880  * Returns false if all allocations were successful, true if any fail
881  **/
iavf_alloc_rx_buffers(struct iavf_ring * rx_ring,u16 cleaned_count)882 bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
883 {
884 	u16 ntu = rx_ring->next_to_use;
885 	union iavf_rx_desc *rx_desc;
886 	struct iavf_rx_buffer *bi;
887 
888 	/* do nothing if no valid netdev defined */
889 	if (!rx_ring->netdev || !cleaned_count)
890 		return false;
891 
892 	rx_desc = IAVF_RX_DESC(rx_ring, ntu);
893 	bi = &rx_ring->rx_bi[ntu];
894 
895 	do {
896 		if (!iavf_alloc_mapped_page(rx_ring, bi))
897 			goto no_buffers;
898 
899 		/* sync the buffer for use by the device */
900 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
901 						 bi->page_offset,
902 						 rx_ring->rx_buf_len,
903 						 DMA_FROM_DEVICE);
904 
905 		/* Refresh the desc even if buffer_addrs didn't change
906 		 * because each write-back erases this info.
907 		 */
908 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
909 
910 		rx_desc++;
911 		bi++;
912 		ntu++;
913 		if (unlikely(ntu == rx_ring->count)) {
914 			rx_desc = IAVF_RX_DESC(rx_ring, 0);
915 			bi = rx_ring->rx_bi;
916 			ntu = 0;
917 		}
918 
919 		/* clear the status bits for the next_to_use descriptor */
920 		rx_desc->wb.qword1.status_error_len = 0;
921 
922 		cleaned_count--;
923 	} while (cleaned_count);
924 
925 	if (rx_ring->next_to_use != ntu)
926 		iavf_release_rx_desc(rx_ring, ntu);
927 
928 	return false;
929 
930 no_buffers:
931 	if (rx_ring->next_to_use != ntu)
932 		iavf_release_rx_desc(rx_ring, ntu);
933 
934 	/* make sure to come back via polling to try again after
935 	 * allocation failure
936 	 */
937 	return true;
938 }
939 
940 /**
941  * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum
942  * @vsi: the VSI we care about
943  * @skb: skb currently being received and modified
944  * @rx_desc: the receive descriptor
945  **/
iavf_rx_checksum(struct iavf_vsi * vsi,struct sk_buff * skb,union iavf_rx_desc * rx_desc)946 static inline void iavf_rx_checksum(struct iavf_vsi *vsi,
947 				    struct sk_buff *skb,
948 				    union iavf_rx_desc *rx_desc)
949 {
950 	struct iavf_rx_ptype_decoded decoded;
951 	u32 rx_error, rx_status;
952 	bool ipv4, ipv6;
953 	u8 ptype;
954 	u64 qword;
955 
956 	qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
957 	ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT;
958 	rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >>
959 		   IAVF_RXD_QW1_ERROR_SHIFT;
960 	rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >>
961 		    IAVF_RXD_QW1_STATUS_SHIFT;
962 	decoded = decode_rx_desc_ptype(ptype);
963 
964 	skb->ip_summed = CHECKSUM_NONE;
965 
966 	skb_checksum_none_assert(skb);
967 
968 	/* Rx csum enabled and ip headers found? */
969 	if (!(vsi->netdev->features & NETIF_F_RXCSUM))
970 		return;
971 
972 	/* did the hardware decode the packet and checksum? */
973 	if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
974 		return;
975 
976 	/* both known and outer_ip must be set for the below code to work */
977 	if (!(decoded.known && decoded.outer_ip))
978 		return;
979 
980 	ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
981 	       (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4);
982 	ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
983 	       (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6);
984 
985 	if (ipv4 &&
986 	    (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
987 			 BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT))))
988 		goto checksum_fail;
989 
990 	/* likely incorrect csum if alternate IP extension headers found */
991 	if (ipv6 &&
992 	    rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT))
993 		/* don't increment checksum err here, non-fatal err */
994 		return;
995 
996 	/* there was some L4 error, count error and punt packet to the stack */
997 	if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT))
998 		goto checksum_fail;
999 
1000 	/* handle packets that were not able to be checksummed due
1001 	 * to arrival speed, in this case the stack can compute
1002 	 * the csum.
1003 	 */
1004 	if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT))
1005 		return;
1006 
1007 	/* Only report checksum unnecessary for TCP, UDP, or SCTP */
1008 	switch (decoded.inner_prot) {
1009 	case IAVF_RX_PTYPE_INNER_PROT_TCP:
1010 	case IAVF_RX_PTYPE_INNER_PROT_UDP:
1011 	case IAVF_RX_PTYPE_INNER_PROT_SCTP:
1012 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1013 		fallthrough;
1014 	default:
1015 		break;
1016 	}
1017 
1018 	return;
1019 
1020 checksum_fail:
1021 	vsi->back->hw_csum_rx_error++;
1022 }
1023 
1024 /**
1025  * iavf_ptype_to_htype - get a hash type
1026  * @ptype: the ptype value from the descriptor
1027  *
1028  * Returns a hash type to be used by skb_set_hash
1029  **/
iavf_ptype_to_htype(u8 ptype)1030 static inline int iavf_ptype_to_htype(u8 ptype)
1031 {
1032 	struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1033 
1034 	if (!decoded.known)
1035 		return PKT_HASH_TYPE_NONE;
1036 
1037 	if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
1038 	    decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1039 		return PKT_HASH_TYPE_L4;
1040 	else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
1041 		 decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1042 		return PKT_HASH_TYPE_L3;
1043 	else
1044 		return PKT_HASH_TYPE_L2;
1045 }
1046 
1047 /**
1048  * iavf_rx_hash - set the hash value in the skb
1049  * @ring: descriptor ring
1050  * @rx_desc: specific descriptor
1051  * @skb: skb currently being received and modified
1052  * @rx_ptype: Rx packet type
1053  **/
iavf_rx_hash(struct iavf_ring * ring,union iavf_rx_desc * rx_desc,struct sk_buff * skb,u8 rx_ptype)1054 static inline void iavf_rx_hash(struct iavf_ring *ring,
1055 				union iavf_rx_desc *rx_desc,
1056 				struct sk_buff *skb,
1057 				u8 rx_ptype)
1058 {
1059 	u32 hash;
1060 	const __le64 rss_mask =
1061 		cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
1062 			    IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
1063 
1064 	if (!(ring->netdev->features & NETIF_F_RXHASH))
1065 		return;
1066 
1067 	if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1068 		hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1069 		skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype));
1070 	}
1071 }
1072 
1073 /**
1074  * iavf_process_skb_fields - Populate skb header fields from Rx descriptor
1075  * @rx_ring: rx descriptor ring packet is being transacted on
1076  * @rx_desc: pointer to the EOP Rx descriptor
1077  * @skb: pointer to current skb being populated
1078  * @rx_ptype: the packet type decoded by hardware
1079  *
1080  * This function checks the ring, descriptor, and packet information in
1081  * order to populate the hash, checksum, VLAN, protocol, and
1082  * other fields within the skb.
1083  **/
1084 static inline
iavf_process_skb_fields(struct iavf_ring * rx_ring,union iavf_rx_desc * rx_desc,struct sk_buff * skb,u8 rx_ptype)1085 void iavf_process_skb_fields(struct iavf_ring *rx_ring,
1086 			     union iavf_rx_desc *rx_desc, struct sk_buff *skb,
1087 			     u8 rx_ptype)
1088 {
1089 	iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1090 
1091 	iavf_rx_checksum(rx_ring->vsi, skb, rx_desc);
1092 
1093 	skb_record_rx_queue(skb, rx_ring->queue_index);
1094 
1095 	/* modifies the skb - consumes the enet header */
1096 	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1097 }
1098 
1099 /**
1100  * iavf_cleanup_headers - Correct empty headers
1101  * @rx_ring: rx descriptor ring packet is being transacted on
1102  * @skb: pointer to current skb being fixed
1103  *
1104  * Also address the case where we are pulling data in on pages only
1105  * and as such no data is present in the skb header.
1106  *
1107  * In addition if skb is not at least 60 bytes we need to pad it so that
1108  * it is large enough to qualify as a valid Ethernet frame.
1109  *
1110  * Returns true if an error was encountered and skb was freed.
1111  **/
iavf_cleanup_headers(struct iavf_ring * rx_ring,struct sk_buff * skb)1112 static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb)
1113 {
1114 	/* if eth_skb_pad returns an error the skb was freed */
1115 	if (eth_skb_pad(skb))
1116 		return true;
1117 
1118 	return false;
1119 }
1120 
1121 /**
1122  * iavf_reuse_rx_page - page flip buffer and store it back on the ring
1123  * @rx_ring: rx descriptor ring to store buffers on
1124  * @old_buff: donor buffer to have page reused
1125  *
1126  * Synchronizes page for reuse by the adapter
1127  **/
iavf_reuse_rx_page(struct iavf_ring * rx_ring,struct iavf_rx_buffer * old_buff)1128 static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,
1129 			       struct iavf_rx_buffer *old_buff)
1130 {
1131 	struct iavf_rx_buffer *new_buff;
1132 	u16 nta = rx_ring->next_to_alloc;
1133 
1134 	new_buff = &rx_ring->rx_bi[nta];
1135 
1136 	/* update, and store next to alloc */
1137 	nta++;
1138 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1139 
1140 	/* transfer page from old buffer to new buffer */
1141 	new_buff->dma		= old_buff->dma;
1142 	new_buff->page		= old_buff->page;
1143 	new_buff->page_offset	= old_buff->page_offset;
1144 	new_buff->pagecnt_bias	= old_buff->pagecnt_bias;
1145 }
1146 
1147 /**
1148  * iavf_can_reuse_rx_page - Determine if this page can be reused by
1149  * the adapter for another receive
1150  *
1151  * @rx_buffer: buffer containing the page
1152  *
1153  * If page is reusable, rx_buffer->page_offset is adjusted to point to
1154  * an unused region in the page.
1155  *
1156  * For small pages, @truesize will be a constant value, half the size
1157  * of the memory at page.  We'll attempt to alternate between high and
1158  * low halves of the page, with one half ready for use by the hardware
1159  * and the other half being consumed by the stack.  We use the page
1160  * ref count to determine whether the stack has finished consuming the
1161  * portion of this page that was passed up with a previous packet.  If
1162  * the page ref count is >1, we'll assume the "other" half page is
1163  * still busy, and this page cannot be reused.
1164  *
1165  * For larger pages, @truesize will be the actual space used by the
1166  * received packet (adjusted upward to an even multiple of the cache
1167  * line size).  This will advance through the page by the amount
1168  * actually consumed by the received packets while there is still
1169  * space for a buffer.  Each region of larger pages will be used at
1170  * most once, after which the page will not be reused.
1171  *
1172  * In either case, if the page is reusable its refcount is increased.
1173  **/
iavf_can_reuse_rx_page(struct iavf_rx_buffer * rx_buffer)1174 static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
1175 {
1176 	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1177 	struct page *page = rx_buffer->page;
1178 
1179 	/* Is any reuse possible? */
1180 	if (!dev_page_is_reusable(page))
1181 		return false;
1182 
1183 #if (PAGE_SIZE < 8192)
1184 	/* if we are only owner of page we can reuse it */
1185 	if (unlikely((page_count(page) - pagecnt_bias) > 1))
1186 		return false;
1187 #else
1188 #define IAVF_LAST_OFFSET \
1189 	(SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048)
1190 	if (rx_buffer->page_offset > IAVF_LAST_OFFSET)
1191 		return false;
1192 #endif
1193 
1194 	/* If we have drained the page fragment pool we need to update
1195 	 * the pagecnt_bias and page count so that we fully restock the
1196 	 * number of references the driver holds.
1197 	 */
1198 	if (unlikely(!pagecnt_bias)) {
1199 		page_ref_add(page, USHRT_MAX);
1200 		rx_buffer->pagecnt_bias = USHRT_MAX;
1201 	}
1202 
1203 	return true;
1204 }
1205 
1206 /**
1207  * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff
1208  * @rx_ring: rx descriptor ring to transact packets on
1209  * @rx_buffer: buffer containing page to add
1210  * @skb: sk_buff to place the data into
1211  * @size: packet length from rx_desc
1212  *
1213  * This function will add the data contained in rx_buffer->page to the skb.
1214  * It will just attach the page as a frag to the skb.
1215  *
1216  * The function will then update the page offset.
1217  **/
iavf_add_rx_frag(struct iavf_ring * rx_ring,struct iavf_rx_buffer * rx_buffer,struct sk_buff * skb,unsigned int size)1218 static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
1219 			     struct iavf_rx_buffer *rx_buffer,
1220 			     struct sk_buff *skb,
1221 			     unsigned int size)
1222 {
1223 #if (PAGE_SIZE < 8192)
1224 	unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1225 #else
1226 	unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
1227 #endif
1228 
1229 	if (!size)
1230 		return;
1231 
1232 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1233 			rx_buffer->page_offset, size, truesize);
1234 
1235 	/* page is being used so we must update the page offset */
1236 #if (PAGE_SIZE < 8192)
1237 	rx_buffer->page_offset ^= truesize;
1238 #else
1239 	rx_buffer->page_offset += truesize;
1240 #endif
1241 }
1242 
1243 /**
1244  * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use
1245  * @rx_ring: rx descriptor ring to transact packets on
1246  * @size: size of buffer to add to skb
1247  *
1248  * This function will pull an Rx buffer from the ring and synchronize it
1249  * for use by the CPU.
1250  */
iavf_get_rx_buffer(struct iavf_ring * rx_ring,const unsigned int size)1251 static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
1252 						 const unsigned int size)
1253 {
1254 	struct iavf_rx_buffer *rx_buffer;
1255 
1256 	rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1257 	prefetchw(rx_buffer->page);
1258 	if (!size)
1259 		return rx_buffer;
1260 
1261 	/* we are reusing so sync this buffer for CPU use */
1262 	dma_sync_single_range_for_cpu(rx_ring->dev,
1263 				      rx_buffer->dma,
1264 				      rx_buffer->page_offset,
1265 				      size,
1266 				      DMA_FROM_DEVICE);
1267 
1268 	/* We have pulled a buffer for use, so decrement pagecnt_bias */
1269 	rx_buffer->pagecnt_bias--;
1270 
1271 	return rx_buffer;
1272 }
1273 
1274 /**
1275  * iavf_construct_skb - Allocate skb and populate it
1276  * @rx_ring: rx descriptor ring to transact packets on
1277  * @rx_buffer: rx buffer to pull data from
1278  * @size: size of buffer to add to skb
1279  *
1280  * This function allocates an skb.  It then populates it with the page
1281  * data from the current receive descriptor, taking care to set up the
1282  * skb correctly.
1283  */
iavf_construct_skb(struct iavf_ring * rx_ring,struct iavf_rx_buffer * rx_buffer,unsigned int size)1284 static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
1285 					  struct iavf_rx_buffer *rx_buffer,
1286 					  unsigned int size)
1287 {
1288 	void *va;
1289 #if (PAGE_SIZE < 8192)
1290 	unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1291 #else
1292 	unsigned int truesize = SKB_DATA_ALIGN(size);
1293 #endif
1294 	unsigned int headlen;
1295 	struct sk_buff *skb;
1296 
1297 	if (!rx_buffer)
1298 		return NULL;
1299 	/* prefetch first cache line of first page */
1300 	va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1301 	net_prefetch(va);
1302 
1303 	/* allocate a skb to store the frags */
1304 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1305 			       IAVF_RX_HDR_SIZE,
1306 			       GFP_ATOMIC | __GFP_NOWARN);
1307 	if (unlikely(!skb))
1308 		return NULL;
1309 
1310 	/* Determine available headroom for copy */
1311 	headlen = size;
1312 	if (headlen > IAVF_RX_HDR_SIZE)
1313 		headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE);
1314 
1315 	/* align pull length to size of long to optimize memcpy performance */
1316 	memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1317 
1318 	/* update all of the pointers */
1319 	size -= headlen;
1320 	if (size) {
1321 		skb_add_rx_frag(skb, 0, rx_buffer->page,
1322 				rx_buffer->page_offset + headlen,
1323 				size, truesize);
1324 
1325 		/* buffer is used by skb, update page_offset */
1326 #if (PAGE_SIZE < 8192)
1327 		rx_buffer->page_offset ^= truesize;
1328 #else
1329 		rx_buffer->page_offset += truesize;
1330 #endif
1331 	} else {
1332 		/* buffer is unused, reset bias back to rx_buffer */
1333 		rx_buffer->pagecnt_bias++;
1334 	}
1335 
1336 	return skb;
1337 }
1338 
1339 /**
1340  * iavf_build_skb - Build skb around an existing buffer
1341  * @rx_ring: Rx descriptor ring to transact packets on
1342  * @rx_buffer: Rx buffer to pull data from
1343  * @size: size of buffer to add to skb
1344  *
1345  * This function builds an skb around an existing Rx buffer, taking care
1346  * to set up the skb correctly and avoid any memcpy overhead.
1347  */
iavf_build_skb(struct iavf_ring * rx_ring,struct iavf_rx_buffer * rx_buffer,unsigned int size)1348 static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
1349 				      struct iavf_rx_buffer *rx_buffer,
1350 				      unsigned int size)
1351 {
1352 	void *va;
1353 #if (PAGE_SIZE < 8192)
1354 	unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1355 #else
1356 	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1357 				SKB_DATA_ALIGN(IAVF_SKB_PAD + size);
1358 #endif
1359 	struct sk_buff *skb;
1360 
1361 	if (!rx_buffer || !size)
1362 		return NULL;
1363 	/* prefetch first cache line of first page */
1364 	va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1365 	net_prefetch(va);
1366 
1367 	/* build an skb around the page buffer */
1368 	skb = build_skb(va - IAVF_SKB_PAD, truesize);
1369 	if (unlikely(!skb))
1370 		return NULL;
1371 
1372 	/* update pointers within the skb to store the data */
1373 	skb_reserve(skb, IAVF_SKB_PAD);
1374 	__skb_put(skb, size);
1375 
1376 	/* buffer is used by skb, update page_offset */
1377 #if (PAGE_SIZE < 8192)
1378 	rx_buffer->page_offset ^= truesize;
1379 #else
1380 	rx_buffer->page_offset += truesize;
1381 #endif
1382 
1383 	return skb;
1384 }
1385 
1386 /**
1387  * iavf_put_rx_buffer - Clean up used buffer and either recycle or free
1388  * @rx_ring: rx descriptor ring to transact packets on
1389  * @rx_buffer: rx buffer to pull data from
1390  *
1391  * This function will clean up the contents of the rx_buffer.  It will
1392  * either recycle the buffer or unmap it and free the associated resources.
1393  */
iavf_put_rx_buffer(struct iavf_ring * rx_ring,struct iavf_rx_buffer * rx_buffer)1394 static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
1395 			       struct iavf_rx_buffer *rx_buffer)
1396 {
1397 	if (!rx_buffer)
1398 		return;
1399 
1400 	if (iavf_can_reuse_rx_page(rx_buffer)) {
1401 		/* hand second half of page back to the ring */
1402 		iavf_reuse_rx_page(rx_ring, rx_buffer);
1403 		rx_ring->rx_stats.page_reuse_count++;
1404 	} else {
1405 		/* we are not reusing the buffer so unmap it */
1406 		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1407 				     iavf_rx_pg_size(rx_ring),
1408 				     DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR);
1409 		__page_frag_cache_drain(rx_buffer->page,
1410 					rx_buffer->pagecnt_bias);
1411 	}
1412 
1413 	/* clear contents of buffer_info */
1414 	rx_buffer->page = NULL;
1415 }
1416 
1417 /**
1418  * iavf_is_non_eop - process handling of non-EOP buffers
1419  * @rx_ring: Rx ring being processed
1420  * @rx_desc: Rx descriptor for current buffer
1421  * @skb: Current socket buffer containing buffer in progress
1422  *
1423  * This function updates next to clean.  If the buffer is an EOP buffer
1424  * this function exits returning false, otherwise it will place the
1425  * sk_buff in the next buffer to be chained and return true indicating
1426  * that this is in fact a non-EOP buffer.
1427  **/
iavf_is_non_eop(struct iavf_ring * rx_ring,union iavf_rx_desc * rx_desc,struct sk_buff * skb)1428 static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
1429 			    union iavf_rx_desc *rx_desc,
1430 			    struct sk_buff *skb)
1431 {
1432 	u32 ntc = rx_ring->next_to_clean + 1;
1433 
1434 	/* fetch, update, and store next to clean */
1435 	ntc = (ntc < rx_ring->count) ? ntc : 0;
1436 	rx_ring->next_to_clean = ntc;
1437 
1438 	prefetch(IAVF_RX_DESC(rx_ring, ntc));
1439 
1440 	/* if we are the last buffer then there is nothing else to do */
1441 #define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT)
1442 	if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF)))
1443 		return false;
1444 
1445 	rx_ring->rx_stats.non_eop_descs++;
1446 
1447 	return true;
1448 }
1449 
1450 /**
1451  * iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1452  * @rx_ring: rx descriptor ring to transact packets on
1453  * @budget: Total limit on number of packets to process
1454  *
1455  * This function provides a "bounce buffer" approach to Rx interrupt
1456  * processing.  The advantage to this is that on systems that have
1457  * expensive overhead for IOMMU access this provides a means of avoiding
1458  * it by maintaining the mapping of the page to the system.
1459  *
1460  * Returns amount of work completed
1461  **/
iavf_clean_rx_irq(struct iavf_ring * rx_ring,int budget)1462 static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
1463 {
1464 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1465 	struct sk_buff *skb = rx_ring->skb;
1466 	u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring);
1467 	bool failure = false;
1468 
1469 	while (likely(total_rx_packets < (unsigned int)budget)) {
1470 		struct iavf_rx_buffer *rx_buffer;
1471 		union iavf_rx_desc *rx_desc;
1472 		unsigned int size;
1473 		u16 vlan_tag;
1474 		u8 rx_ptype;
1475 		u64 qword;
1476 
1477 		/* return some buffers to hardware, one at a time is too slow */
1478 		if (cleaned_count >= IAVF_RX_BUFFER_WRITE) {
1479 			failure = failure ||
1480 				  iavf_alloc_rx_buffers(rx_ring, cleaned_count);
1481 			cleaned_count = 0;
1482 		}
1483 
1484 		rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
1485 
1486 		/* status_error_len will always be zero for unused descriptors
1487 		 * because it's cleared in cleanup, and overlaps with hdr_addr
1488 		 * which is always zero because packet split isn't used, if the
1489 		 * hardware wrote DD then the length will be non-zero
1490 		 */
1491 		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1492 
1493 		/* This memory barrier is needed to keep us from reading
1494 		 * any other fields out of the rx_desc until we have
1495 		 * verified the descriptor has been written back.
1496 		 */
1497 		dma_rmb();
1498 #define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
1499 		if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
1500 			break;
1501 
1502 		size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1503 		       IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1504 
1505 		iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
1506 		rx_buffer = iavf_get_rx_buffer(rx_ring, size);
1507 
1508 		/* retrieve a buffer from the ring */
1509 		if (skb)
1510 			iavf_add_rx_frag(rx_ring, rx_buffer, skb, size);
1511 		else if (ring_uses_build_skb(rx_ring))
1512 			skb = iavf_build_skb(rx_ring, rx_buffer, size);
1513 		else
1514 			skb = iavf_construct_skb(rx_ring, rx_buffer, size);
1515 
1516 		/* exit if we failed to retrieve a buffer */
1517 		if (!skb) {
1518 			rx_ring->rx_stats.alloc_buff_failed++;
1519 			if (rx_buffer && size)
1520 				rx_buffer->pagecnt_bias++;
1521 			break;
1522 		}
1523 
1524 		iavf_put_rx_buffer(rx_ring, rx_buffer);
1525 		cleaned_count++;
1526 
1527 		if (iavf_is_non_eop(rx_ring, rx_desc, skb))
1528 			continue;
1529 
1530 		/* ERR_MASK will only have valid bits if EOP set, and
1531 		 * what we are doing here is actually checking
1532 		 * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1533 		 * the error field
1534 		 */
1535 		if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) {
1536 			dev_kfree_skb_any(skb);
1537 			skb = NULL;
1538 			continue;
1539 		}
1540 
1541 		if (iavf_cleanup_headers(rx_ring, skb)) {
1542 			skb = NULL;
1543 			continue;
1544 		}
1545 
1546 		/* probably a little skewed due to removing CRC */
1547 		total_rx_bytes += skb->len;
1548 
1549 		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1550 		rx_ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >>
1551 			   IAVF_RXD_QW1_PTYPE_SHIFT;
1552 
1553 		/* populate checksum, VLAN, and protocol */
1554 		iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1555 
1556 
1557 		vlan_tag = (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
1558 			   le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
1559 
1560 		iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
1561 		iavf_receive_skb(rx_ring, skb, vlan_tag);
1562 		skb = NULL;
1563 
1564 		/* update budget accounting */
1565 		total_rx_packets++;
1566 	}
1567 
1568 	rx_ring->skb = skb;
1569 
1570 	u64_stats_update_begin(&rx_ring->syncp);
1571 	rx_ring->stats.packets += total_rx_packets;
1572 	rx_ring->stats.bytes += total_rx_bytes;
1573 	u64_stats_update_end(&rx_ring->syncp);
1574 	rx_ring->q_vector->rx.total_packets += total_rx_packets;
1575 	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1576 
1577 	/* guarantee a trip back through this routine if there was a failure */
1578 	return failure ? budget : (int)total_rx_packets;
1579 }
1580 
iavf_buildreg_itr(const int type,u16 itr)1581 static inline u32 iavf_buildreg_itr(const int type, u16 itr)
1582 {
1583 	u32 val;
1584 
1585 	/* We don't bother with setting the CLEARPBA bit as the data sheet
1586 	 * points out doing so is "meaningless since it was already
1587 	 * auto-cleared". The auto-clearing happens when the interrupt is
1588 	 * asserted.
1589 	 *
1590 	 * Hardware errata 28 for also indicates that writing to a
1591 	 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
1592 	 * an event in the PBA anyway so we need to rely on the automask
1593 	 * to hold pending events for us until the interrupt is re-enabled
1594 	 *
1595 	 * The itr value is reported in microseconds, and the register
1596 	 * value is recorded in 2 microsecond units. For this reason we
1597 	 * only need to shift by the interval shift - 1 instead of the
1598 	 * full value.
1599 	 */
1600 	itr &= IAVF_ITR_MASK;
1601 
1602 	val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1603 	      (type << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
1604 	      (itr << (IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1));
1605 
1606 	return val;
1607 }
1608 
1609 /* a small macro to shorten up some long lines */
1610 #define INTREG IAVF_VFINT_DYN_CTLN1
1611 
1612 /* The act of updating the ITR will cause it to immediately trigger. In order
1613  * to prevent this from throwing off adaptive update statistics we defer the
1614  * update so that it can only happen so often. So after either Tx or Rx are
1615  * updated we make the adaptive scheme wait until either the ITR completely
1616  * expires via the next_update expiration or we have been through at least
1617  * 3 interrupts.
1618  */
1619 #define ITR_COUNTDOWN_START 3
1620 
1621 /**
1622  * iavf_update_enable_itr - Update itr and re-enable MSIX interrupt
1623  * @vsi: the VSI we care about
1624  * @q_vector: q_vector for which itr is being updated and interrupt enabled
1625  *
1626  **/
iavf_update_enable_itr(struct iavf_vsi * vsi,struct iavf_q_vector * q_vector)1627 static inline void iavf_update_enable_itr(struct iavf_vsi *vsi,
1628 					  struct iavf_q_vector *q_vector)
1629 {
1630 	struct iavf_hw *hw = &vsi->back->hw;
1631 	u32 intval;
1632 
1633 	/* These will do nothing if dynamic updates are not enabled */
1634 	iavf_update_itr(q_vector, &q_vector->tx);
1635 	iavf_update_itr(q_vector, &q_vector->rx);
1636 
1637 	/* This block of logic allows us to get away with only updating
1638 	 * one ITR value with each interrupt. The idea is to perform a
1639 	 * pseudo-lazy update with the following criteria.
1640 	 *
1641 	 * 1. Rx is given higher priority than Tx if both are in same state
1642 	 * 2. If we must reduce an ITR that is given highest priority.
1643 	 * 3. We then give priority to increasing ITR based on amount.
1644 	 */
1645 	if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
1646 		/* Rx ITR needs to be reduced, this is highest priority */
1647 		intval = iavf_buildreg_itr(IAVF_RX_ITR,
1648 					   q_vector->rx.target_itr);
1649 		q_vector->rx.current_itr = q_vector->rx.target_itr;
1650 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
1651 	} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
1652 		   ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
1653 		    (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
1654 		/* Tx ITR needs to be reduced, this is second priority
1655 		 * Tx ITR needs to be increased more than Rx, fourth priority
1656 		 */
1657 		intval = iavf_buildreg_itr(IAVF_TX_ITR,
1658 					   q_vector->tx.target_itr);
1659 		q_vector->tx.current_itr = q_vector->tx.target_itr;
1660 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
1661 	} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
1662 		/* Rx ITR needs to be increased, third priority */
1663 		intval = iavf_buildreg_itr(IAVF_RX_ITR,
1664 					   q_vector->rx.target_itr);
1665 		q_vector->rx.current_itr = q_vector->rx.target_itr;
1666 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
1667 	} else {
1668 		/* No ITR update, lowest priority */
1669 		intval = iavf_buildreg_itr(IAVF_ITR_NONE, 0);
1670 		if (q_vector->itr_countdown)
1671 			q_vector->itr_countdown--;
1672 	}
1673 
1674 	if (!test_bit(__IAVF_VSI_DOWN, vsi->state))
1675 		wr32(hw, INTREG(q_vector->reg_idx), intval);
1676 }
1677 
1678 /**
1679  * iavf_napi_poll - NAPI polling Rx/Tx cleanup routine
1680  * @napi: napi struct with our devices info in it
1681  * @budget: amount of work driver is allowed to do this pass, in packets
1682  *
1683  * This function will clean all queues associated with a q_vector.
1684  *
1685  * Returns the amount of work done
1686  **/
iavf_napi_poll(struct napi_struct * napi,int budget)1687 int iavf_napi_poll(struct napi_struct *napi, int budget)
1688 {
1689 	struct iavf_q_vector *q_vector =
1690 			       container_of(napi, struct iavf_q_vector, napi);
1691 	struct iavf_vsi *vsi = q_vector->vsi;
1692 	struct iavf_ring *ring;
1693 	bool clean_complete = true;
1694 	bool arm_wb = false;
1695 	int budget_per_ring;
1696 	int work_done = 0;
1697 
1698 	if (test_bit(__IAVF_VSI_DOWN, vsi->state)) {
1699 		napi_complete(napi);
1700 		return 0;
1701 	}
1702 
1703 	/* Since the actual Tx work is minimal, we can give the Tx a larger
1704 	 * budget and be more aggressive about cleaning up the Tx descriptors.
1705 	 */
1706 	iavf_for_each_ring(ring, q_vector->tx) {
1707 		if (!iavf_clean_tx_irq(vsi, ring, budget)) {
1708 			clean_complete = false;
1709 			continue;
1710 		}
1711 		arm_wb |= ring->arm_wb;
1712 		ring->arm_wb = false;
1713 	}
1714 
1715 	/* Handle case where we are called by netpoll with a budget of 0 */
1716 	if (budget <= 0)
1717 		goto tx_only;
1718 
1719 	/* We attempt to distribute budget to each Rx queue fairly, but don't
1720 	 * allow the budget to go below 1 because that would exit polling early.
1721 	 */
1722 	budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1723 
1724 	iavf_for_each_ring(ring, q_vector->rx) {
1725 		int cleaned = iavf_clean_rx_irq(ring, budget_per_ring);
1726 
1727 		work_done += cleaned;
1728 		/* if we clean as many as budgeted, we must not be done */
1729 		if (cleaned >= budget_per_ring)
1730 			clean_complete = false;
1731 	}
1732 
1733 	/* If work not completed, return budget and polling will return */
1734 	if (!clean_complete) {
1735 		int cpu_id = smp_processor_id();
1736 
1737 		/* It is possible that the interrupt affinity has changed but,
1738 		 * if the cpu is pegged at 100%, polling will never exit while
1739 		 * traffic continues and the interrupt will be stuck on this
1740 		 * cpu.  We check to make sure affinity is correct before we
1741 		 * continue to poll, otherwise we must stop polling so the
1742 		 * interrupt can move to the correct cpu.
1743 		 */
1744 		if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
1745 			/* Tell napi that we are done polling */
1746 			napi_complete_done(napi, work_done);
1747 
1748 			/* Force an interrupt */
1749 			iavf_force_wb(vsi, q_vector);
1750 
1751 			/* Return budget-1 so that polling stops */
1752 			return budget - 1;
1753 		}
1754 tx_only:
1755 		if (arm_wb) {
1756 			q_vector->tx.ring[0].tx_stats.tx_force_wb++;
1757 			iavf_enable_wb_on_itr(vsi, q_vector);
1758 		}
1759 		return budget;
1760 	}
1761 
1762 	if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR)
1763 		q_vector->arm_wb_state = false;
1764 
1765 	/* Exit the polling mode, but don't re-enable interrupts if stack might
1766 	 * poll us due to busy-polling
1767 	 */
1768 	if (likely(napi_complete_done(napi, work_done)))
1769 		iavf_update_enable_itr(vsi, q_vector);
1770 
1771 	return min(work_done, budget - 1);
1772 }
1773 
1774 /**
1775  * iavf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1776  * @skb:     send buffer
1777  * @tx_ring: ring to send buffer on
1778  * @flags:   the tx flags to be set
1779  *
1780  * Checks the skb and set up correspondingly several generic transmit flags
1781  * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1782  *
1783  * Returns error code indicate the frame should be dropped upon error and the
1784  * otherwise  returns 0 to indicate the flags has been set properly.
1785  **/
iavf_tx_prepare_vlan_flags(struct sk_buff * skb,struct iavf_ring * tx_ring,u32 * flags)1786 static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
1787 					     struct iavf_ring *tx_ring,
1788 					     u32 *flags)
1789 {
1790 	__be16 protocol = skb->protocol;
1791 	u32  tx_flags = 0;
1792 
1793 	if (protocol == htons(ETH_P_8021Q) &&
1794 	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1795 		/* When HW VLAN acceleration is turned off by the user the
1796 		 * stack sets the protocol to 8021q so that the driver
1797 		 * can take any steps required to support the SW only
1798 		 * VLAN handling.  In our case the driver doesn't need
1799 		 * to take any further steps so just set the protocol
1800 		 * to the encapsulated ethertype.
1801 		 */
1802 		skb->protocol = vlan_get_protocol(skb);
1803 		goto out;
1804 	}
1805 
1806 	/* if we have a HW VLAN tag being added, default to the HW one */
1807 	if (skb_vlan_tag_present(skb)) {
1808 		tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
1809 		tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
1810 	/* else if it is a SW VLAN, check the next protocol and store the tag */
1811 	} else if (protocol == htons(ETH_P_8021Q)) {
1812 		struct vlan_hdr *vhdr, _vhdr;
1813 
1814 		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
1815 		if (!vhdr)
1816 			return -EINVAL;
1817 
1818 		protocol = vhdr->h_vlan_encapsulated_proto;
1819 		tx_flags |= ntohs(vhdr->h_vlan_TCI) << IAVF_TX_FLAGS_VLAN_SHIFT;
1820 		tx_flags |= IAVF_TX_FLAGS_SW_VLAN;
1821 	}
1822 
1823 out:
1824 	*flags = tx_flags;
1825 	return 0;
1826 }
1827 
1828 /**
1829  * iavf_tso - set up the tso context descriptor
1830  * @first:    pointer to first Tx buffer for xmit
1831  * @hdr_len:  ptr to the size of the packet header
1832  * @cd_type_cmd_tso_mss: Quad Word 1
1833  *
1834  * Returns 0 if no TSO can happen, 1 if tso is going, or error
1835  **/
iavf_tso(struct iavf_tx_buffer * first,u8 * hdr_len,u64 * cd_type_cmd_tso_mss)1836 static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len,
1837 		    u64 *cd_type_cmd_tso_mss)
1838 {
1839 	struct sk_buff *skb = first->skb;
1840 	u64 cd_cmd, cd_tso_len, cd_mss;
1841 	union {
1842 		struct iphdr *v4;
1843 		struct ipv6hdr *v6;
1844 		unsigned char *hdr;
1845 	} ip;
1846 	union {
1847 		struct tcphdr *tcp;
1848 		struct udphdr *udp;
1849 		unsigned char *hdr;
1850 	} l4;
1851 	u32 paylen, l4_offset;
1852 	u16 gso_segs, gso_size;
1853 	int err;
1854 
1855 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1856 		return 0;
1857 
1858 	if (!skb_is_gso(skb))
1859 		return 0;
1860 
1861 	err = skb_cow_head(skb, 0);
1862 	if (err < 0)
1863 		return err;
1864 
1865 	ip.hdr = skb_network_header(skb);
1866 	l4.hdr = skb_transport_header(skb);
1867 
1868 	/* initialize outer IP header fields */
1869 	if (ip.v4->version == 4) {
1870 		ip.v4->tot_len = 0;
1871 		ip.v4->check = 0;
1872 	} else {
1873 		ip.v6->payload_len = 0;
1874 	}
1875 
1876 	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1877 					 SKB_GSO_GRE_CSUM |
1878 					 SKB_GSO_IPXIP4 |
1879 					 SKB_GSO_IPXIP6 |
1880 					 SKB_GSO_UDP_TUNNEL |
1881 					 SKB_GSO_UDP_TUNNEL_CSUM)) {
1882 		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
1883 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
1884 			l4.udp->len = 0;
1885 
1886 			/* determine offset of outer transport header */
1887 			l4_offset = l4.hdr - skb->data;
1888 
1889 			/* remove payload length from outer checksum */
1890 			paylen = skb->len - l4_offset;
1891 			csum_replace_by_diff(&l4.udp->check,
1892 					     (__force __wsum)htonl(paylen));
1893 		}
1894 
1895 		/* reset pointers to inner headers */
1896 		ip.hdr = skb_inner_network_header(skb);
1897 		l4.hdr = skb_inner_transport_header(skb);
1898 
1899 		/* initialize inner IP header fields */
1900 		if (ip.v4->version == 4) {
1901 			ip.v4->tot_len = 0;
1902 			ip.v4->check = 0;
1903 		} else {
1904 			ip.v6->payload_len = 0;
1905 		}
1906 	}
1907 
1908 	/* determine offset of inner transport header */
1909 	l4_offset = l4.hdr - skb->data;
1910 	/* remove payload length from inner checksum */
1911 	paylen = skb->len - l4_offset;
1912 
1913 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
1914 		csum_replace_by_diff(&l4.udp->check,
1915 				     (__force __wsum)htonl(paylen));
1916 		/* compute length of UDP segmentation header */
1917 		*hdr_len = (u8)sizeof(l4.udp) + l4_offset;
1918 	} else {
1919 		csum_replace_by_diff(&l4.tcp->check,
1920 				     (__force __wsum)htonl(paylen));
1921 		/* compute length of TCP segmentation header */
1922 		*hdr_len = (u8)((l4.tcp->doff * 4) + l4_offset);
1923 	}
1924 
1925 	/* pull values out of skb_shinfo */
1926 	gso_size = skb_shinfo(skb)->gso_size;
1927 	gso_segs = skb_shinfo(skb)->gso_segs;
1928 
1929 	/* update GSO size and bytecount with header size */
1930 	first->gso_segs = gso_segs;
1931 	first->bytecount += (first->gso_segs - 1) * *hdr_len;
1932 
1933 	/* find the field values */
1934 	cd_cmd = IAVF_TX_CTX_DESC_TSO;
1935 	cd_tso_len = skb->len - *hdr_len;
1936 	cd_mss = gso_size;
1937 	*cd_type_cmd_tso_mss |= (cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
1938 				(cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1939 				(cd_mss << IAVF_TXD_CTX_QW1_MSS_SHIFT);
1940 	return 1;
1941 }
1942 
1943 /**
1944  * iavf_tx_enable_csum - Enable Tx checksum offloads
1945  * @skb: send buffer
1946  * @tx_flags: pointer to Tx flags currently set
1947  * @td_cmd: Tx descriptor command bits to set
1948  * @td_offset: Tx descriptor header offsets to set
1949  * @tx_ring: Tx descriptor ring
1950  * @cd_tunneling: ptr to context desc bits
1951  **/
iavf_tx_enable_csum(struct sk_buff * skb,u32 * tx_flags,u32 * td_cmd,u32 * td_offset,struct iavf_ring * tx_ring,u32 * cd_tunneling)1952 static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
1953 			       u32 *td_cmd, u32 *td_offset,
1954 			       struct iavf_ring *tx_ring,
1955 			       u32 *cd_tunneling)
1956 {
1957 	union {
1958 		struct iphdr *v4;
1959 		struct ipv6hdr *v6;
1960 		unsigned char *hdr;
1961 	} ip;
1962 	union {
1963 		struct tcphdr *tcp;
1964 		struct udphdr *udp;
1965 		unsigned char *hdr;
1966 	} l4;
1967 	unsigned char *exthdr;
1968 	u32 offset, cmd = 0;
1969 	__be16 frag_off;
1970 	u8 l4_proto = 0;
1971 
1972 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1973 		return 0;
1974 
1975 	ip.hdr = skb_network_header(skb);
1976 	l4.hdr = skb_transport_header(skb);
1977 
1978 	/* compute outer L2 header size */
1979 	offset = ((ip.hdr - skb->data) / 2) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
1980 
1981 	if (skb->encapsulation) {
1982 		u32 tunnel = 0;
1983 		/* define outer network header type */
1984 		if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
1985 			tunnel |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
1986 				  IAVF_TX_CTX_EXT_IP_IPV4 :
1987 				  IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1988 
1989 			l4_proto = ip.v4->protocol;
1990 		} else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
1991 			tunnel |= IAVF_TX_CTX_EXT_IP_IPV6;
1992 
1993 			exthdr = ip.hdr + sizeof(*ip.v6);
1994 			l4_proto = ip.v6->nexthdr;
1995 			if (l4.hdr != exthdr)
1996 				ipv6_skip_exthdr(skb, exthdr - skb->data,
1997 						 &l4_proto, &frag_off);
1998 		}
1999 
2000 		/* define outer transport */
2001 		switch (l4_proto) {
2002 		case IPPROTO_UDP:
2003 			tunnel |= IAVF_TXD_CTX_UDP_TUNNELING;
2004 			*tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2005 			break;
2006 		case IPPROTO_GRE:
2007 			tunnel |= IAVF_TXD_CTX_GRE_TUNNELING;
2008 			*tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2009 			break;
2010 		case IPPROTO_IPIP:
2011 		case IPPROTO_IPV6:
2012 			*tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2013 			l4.hdr = skb_inner_network_header(skb);
2014 			break;
2015 		default:
2016 			if (*tx_flags & IAVF_TX_FLAGS_TSO)
2017 				return -1;
2018 
2019 			skb_checksum_help(skb);
2020 			return 0;
2021 		}
2022 
2023 		/* compute outer L3 header size */
2024 		tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2025 			  IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2026 
2027 		/* switch IP header pointer from outer to inner header */
2028 		ip.hdr = skb_inner_network_header(skb);
2029 
2030 		/* compute tunnel header size */
2031 		tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2032 			  IAVF_TXD_CTX_QW0_NATLEN_SHIFT;
2033 
2034 		/* indicate if we need to offload outer UDP header */
2035 		if ((*tx_flags & IAVF_TX_FLAGS_TSO) &&
2036 		    !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2037 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2038 			tunnel |= IAVF_TXD_CTX_QW0_L4T_CS_MASK;
2039 
2040 		/* record tunnel offload values */
2041 		*cd_tunneling |= tunnel;
2042 
2043 		/* switch L4 header pointer from outer to inner */
2044 		l4.hdr = skb_inner_transport_header(skb);
2045 		l4_proto = 0;
2046 
2047 		/* reset type as we transition from outer to inner headers */
2048 		*tx_flags &= ~(IAVF_TX_FLAGS_IPV4 | IAVF_TX_FLAGS_IPV6);
2049 		if (ip.v4->version == 4)
2050 			*tx_flags |= IAVF_TX_FLAGS_IPV4;
2051 		if (ip.v6->version == 6)
2052 			*tx_flags |= IAVF_TX_FLAGS_IPV6;
2053 	}
2054 
2055 	/* Enable IP checksum offloads */
2056 	if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
2057 		l4_proto = ip.v4->protocol;
2058 		/* the stack computes the IP header already, the only time we
2059 		 * need the hardware to recompute it is in the case of TSO.
2060 		 */
2061 		cmd |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
2062 		       IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM :
2063 		       IAVF_TX_DESC_CMD_IIPT_IPV4;
2064 	} else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
2065 		cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
2066 
2067 		exthdr = ip.hdr + sizeof(*ip.v6);
2068 		l4_proto = ip.v6->nexthdr;
2069 		if (l4.hdr != exthdr)
2070 			ipv6_skip_exthdr(skb, exthdr - skb->data,
2071 					 &l4_proto, &frag_off);
2072 	}
2073 
2074 	/* compute inner L3 header size */
2075 	offset |= ((l4.hdr - ip.hdr) / 4) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2076 
2077 	/* Enable L4 checksum offloads */
2078 	switch (l4_proto) {
2079 	case IPPROTO_TCP:
2080 		/* enable checksum offloads */
2081 		cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2082 		offset |= l4.tcp->doff << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2083 		break;
2084 	case IPPROTO_SCTP:
2085 		/* enable SCTP checksum offload */
2086 		cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2087 		offset |= (sizeof(struct sctphdr) >> 2) <<
2088 			  IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2089 		break;
2090 	case IPPROTO_UDP:
2091 		/* enable UDP checksum offload */
2092 		cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2093 		offset |= (sizeof(struct udphdr) >> 2) <<
2094 			  IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2095 		break;
2096 	default:
2097 		if (*tx_flags & IAVF_TX_FLAGS_TSO)
2098 			return -1;
2099 		skb_checksum_help(skb);
2100 		return 0;
2101 	}
2102 
2103 	*td_cmd |= cmd;
2104 	*td_offset |= offset;
2105 
2106 	return 1;
2107 }
2108 
2109 /**
2110  * iavf_create_tx_ctx - Build the Tx context descriptor
2111  * @tx_ring:  ring to create the descriptor on
2112  * @cd_type_cmd_tso_mss: Quad Word 1
2113  * @cd_tunneling: Quad Word 0 - bits 0-31
2114  * @cd_l2tag2: Quad Word 0 - bits 32-63
2115  **/
iavf_create_tx_ctx(struct iavf_ring * tx_ring,const u64 cd_type_cmd_tso_mss,const u32 cd_tunneling,const u32 cd_l2tag2)2116 static void iavf_create_tx_ctx(struct iavf_ring *tx_ring,
2117 			       const u64 cd_type_cmd_tso_mss,
2118 			       const u32 cd_tunneling, const u32 cd_l2tag2)
2119 {
2120 	struct iavf_tx_context_desc *context_desc;
2121 	int i = tx_ring->next_to_use;
2122 
2123 	if ((cd_type_cmd_tso_mss == IAVF_TX_DESC_DTYPE_CONTEXT) &&
2124 	    !cd_tunneling && !cd_l2tag2)
2125 		return;
2126 
2127 	/* grab the next descriptor */
2128 	context_desc = IAVF_TX_CTXTDESC(tx_ring, i);
2129 
2130 	i++;
2131 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2132 
2133 	/* cpu_to_le32 and assign to struct fields */
2134 	context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2135 	context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2136 	context_desc->rsvd = cpu_to_le16(0);
2137 	context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2138 }
2139 
2140 /**
2141  * __iavf_chk_linearize - Check if there are more than 8 buffers per packet
2142  * @skb:      send buffer
2143  *
2144  * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
2145  * and so we need to figure out the cases where we need to linearize the skb.
2146  *
2147  * For TSO we need to count the TSO header and segment payload separately.
2148  * As such we need to check cases where we have 7 fragments or more as we
2149  * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2150  * the segment payload in the first descriptor, and another 7 for the
2151  * fragments.
2152  **/
__iavf_chk_linearize(struct sk_buff * skb)2153 bool __iavf_chk_linearize(struct sk_buff *skb)
2154 {
2155 	const skb_frag_t *frag, *stale;
2156 	int nr_frags, sum;
2157 
2158 	/* no need to check if number of frags is less than 7 */
2159 	nr_frags = skb_shinfo(skb)->nr_frags;
2160 	if (nr_frags < (IAVF_MAX_BUFFER_TXD - 1))
2161 		return false;
2162 
2163 	/* We need to walk through the list and validate that each group
2164 	 * of 6 fragments totals at least gso_size.
2165 	 */
2166 	nr_frags -= IAVF_MAX_BUFFER_TXD - 2;
2167 	frag = &skb_shinfo(skb)->frags[0];
2168 
2169 	/* Initialize size to the negative value of gso_size minus 1.  We
2170 	 * use this as the worst case scenerio in which the frag ahead
2171 	 * of us only provides one byte which is why we are limited to 6
2172 	 * descriptors for a single transmit as the header and previous
2173 	 * fragment are already consuming 2 descriptors.
2174 	 */
2175 	sum = 1 - skb_shinfo(skb)->gso_size;
2176 
2177 	/* Add size of frags 0 through 4 to create our initial sum */
2178 	sum += skb_frag_size(frag++);
2179 	sum += skb_frag_size(frag++);
2180 	sum += skb_frag_size(frag++);
2181 	sum += skb_frag_size(frag++);
2182 	sum += skb_frag_size(frag++);
2183 
2184 	/* Walk through fragments adding latest fragment, testing it, and
2185 	 * then removing stale fragments from the sum.
2186 	 */
2187 	for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2188 		int stale_size = skb_frag_size(stale);
2189 
2190 		sum += skb_frag_size(frag++);
2191 
2192 		/* The stale fragment may present us with a smaller
2193 		 * descriptor than the actual fragment size. To account
2194 		 * for that we need to remove all the data on the front and
2195 		 * figure out what the remainder would be in the last
2196 		 * descriptor associated with the fragment.
2197 		 */
2198 		if (stale_size > IAVF_MAX_DATA_PER_TXD) {
2199 			int align_pad = -(skb_frag_off(stale)) &
2200 					(IAVF_MAX_READ_REQ_SIZE - 1);
2201 
2202 			sum -= align_pad;
2203 			stale_size -= align_pad;
2204 
2205 			do {
2206 				sum -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
2207 				stale_size -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
2208 			} while (stale_size > IAVF_MAX_DATA_PER_TXD);
2209 		}
2210 
2211 		/* if sum is negative we failed to make sufficient progress */
2212 		if (sum < 0)
2213 			return true;
2214 
2215 		if (!nr_frags--)
2216 			break;
2217 
2218 		sum -= stale_size;
2219 	}
2220 
2221 	return false;
2222 }
2223 
2224 /**
2225  * __iavf_maybe_stop_tx - 2nd level check for tx stop conditions
2226  * @tx_ring: the ring to be checked
2227  * @size:    the size buffer we want to assure is available
2228  *
2229  * Returns -EBUSY if a stop is needed, else 0
2230  **/
__iavf_maybe_stop_tx(struct iavf_ring * tx_ring,int size)2231 int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
2232 {
2233 	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2234 	/* Memory barrier before checking head and tail */
2235 	smp_mb();
2236 
2237 	/* Check again in a case another CPU has just made room available. */
2238 	if (likely(IAVF_DESC_UNUSED(tx_ring) < size))
2239 		return -EBUSY;
2240 
2241 	/* A reprieve! - use start_queue because it doesn't call schedule */
2242 	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2243 	++tx_ring->tx_stats.restart_queue;
2244 	return 0;
2245 }
2246 
2247 /**
2248  * iavf_tx_map - Build the Tx descriptor
2249  * @tx_ring:  ring to send buffer on
2250  * @skb:      send buffer
2251  * @first:    first buffer info buffer to use
2252  * @tx_flags: collected send information
2253  * @hdr_len:  size of the packet header
2254  * @td_cmd:   the command field in the descriptor
2255  * @td_offset: offset for checksum or crc
2256  **/
iavf_tx_map(struct iavf_ring * tx_ring,struct sk_buff * skb,struct iavf_tx_buffer * first,u32 tx_flags,const u8 hdr_len,u32 td_cmd,u32 td_offset)2257 static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
2258 			       struct iavf_tx_buffer *first, u32 tx_flags,
2259 			       const u8 hdr_len, u32 td_cmd, u32 td_offset)
2260 {
2261 	unsigned int data_len = skb->data_len;
2262 	unsigned int size = skb_headlen(skb);
2263 	skb_frag_t *frag;
2264 	struct iavf_tx_buffer *tx_bi;
2265 	struct iavf_tx_desc *tx_desc;
2266 	u16 i = tx_ring->next_to_use;
2267 	u32 td_tag = 0;
2268 	dma_addr_t dma;
2269 
2270 	if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) {
2271 		td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
2272 		td_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
2273 			 IAVF_TX_FLAGS_VLAN_SHIFT;
2274 	}
2275 
2276 	first->tx_flags = tx_flags;
2277 
2278 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2279 
2280 	tx_desc = IAVF_TX_DESC(tx_ring, i);
2281 	tx_bi = first;
2282 
2283 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2284 		unsigned int max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
2285 
2286 		if (dma_mapping_error(tx_ring->dev, dma))
2287 			goto dma_error;
2288 
2289 		/* record length, and DMA address */
2290 		dma_unmap_len_set(tx_bi, len, size);
2291 		dma_unmap_addr_set(tx_bi, dma, dma);
2292 
2293 		/* align size to end of page */
2294 		max_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1);
2295 		tx_desc->buffer_addr = cpu_to_le64(dma);
2296 
2297 		while (unlikely(size > IAVF_MAX_DATA_PER_TXD)) {
2298 			tx_desc->cmd_type_offset_bsz =
2299 				build_ctob(td_cmd, td_offset,
2300 					   max_data, td_tag);
2301 
2302 			tx_desc++;
2303 			i++;
2304 
2305 			if (i == tx_ring->count) {
2306 				tx_desc = IAVF_TX_DESC(tx_ring, 0);
2307 				i = 0;
2308 			}
2309 
2310 			dma += max_data;
2311 			size -= max_data;
2312 
2313 			max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
2314 			tx_desc->buffer_addr = cpu_to_le64(dma);
2315 		}
2316 
2317 		if (likely(!data_len))
2318 			break;
2319 
2320 		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2321 							  size, td_tag);
2322 
2323 		tx_desc++;
2324 		i++;
2325 
2326 		if (i == tx_ring->count) {
2327 			tx_desc = IAVF_TX_DESC(tx_ring, 0);
2328 			i = 0;
2329 		}
2330 
2331 		size = skb_frag_size(frag);
2332 		data_len -= size;
2333 
2334 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2335 				       DMA_TO_DEVICE);
2336 
2337 		tx_bi = &tx_ring->tx_bi[i];
2338 	}
2339 
2340 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
2341 
2342 	i++;
2343 	if (i == tx_ring->count)
2344 		i = 0;
2345 
2346 	tx_ring->next_to_use = i;
2347 
2348 	iavf_maybe_stop_tx(tx_ring, DESC_NEEDED);
2349 
2350 	/* write last descriptor with RS and EOP bits */
2351 	td_cmd |= IAVF_TXD_CMD;
2352 	tx_desc->cmd_type_offset_bsz =
2353 			build_ctob(td_cmd, td_offset, size, td_tag);
2354 
2355 	skb_tx_timestamp(skb);
2356 
2357 	/* Force memory writes to complete before letting h/w know there
2358 	 * are new descriptors to fetch.
2359 	 *
2360 	 * We also use this memory barrier to make certain all of the
2361 	 * status bits have been updated before next_to_watch is written.
2362 	 */
2363 	wmb();
2364 
2365 	/* set next_to_watch value indicating a packet is present */
2366 	first->next_to_watch = tx_desc;
2367 
2368 	/* notify HW of packet */
2369 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
2370 		writel(i, tx_ring->tail);
2371 	}
2372 
2373 	return;
2374 
2375 dma_error:
2376 	dev_info(tx_ring->dev, "TX DMA map failed\n");
2377 
2378 	/* clear dma mappings for failed tx_bi map */
2379 	for (;;) {
2380 		tx_bi = &tx_ring->tx_bi[i];
2381 		iavf_unmap_and_free_tx_resource(tx_ring, tx_bi);
2382 		if (tx_bi == first)
2383 			break;
2384 		if (i == 0)
2385 			i = tx_ring->count;
2386 		i--;
2387 	}
2388 
2389 	tx_ring->next_to_use = i;
2390 }
2391 
2392 /**
2393  * iavf_xmit_frame_ring - Sends buffer on Tx ring
2394  * @skb:     send buffer
2395  * @tx_ring: ring to send buffer on
2396  *
2397  * Returns NETDEV_TX_OK if sent, else an error code
2398  **/
iavf_xmit_frame_ring(struct sk_buff * skb,struct iavf_ring * tx_ring)2399 static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
2400 					struct iavf_ring *tx_ring)
2401 {
2402 	u64 cd_type_cmd_tso_mss = IAVF_TX_DESC_DTYPE_CONTEXT;
2403 	u32 cd_tunneling = 0, cd_l2tag2 = 0;
2404 	struct iavf_tx_buffer *first;
2405 	u32 td_offset = 0;
2406 	u32 tx_flags = 0;
2407 	__be16 protocol;
2408 	u32 td_cmd = 0;
2409 	u8 hdr_len = 0;
2410 	int tso, count;
2411 
2412 	/* prefetch the data, we'll need it later */
2413 	prefetch(skb->data);
2414 
2415 	iavf_trace(xmit_frame_ring, skb, tx_ring);
2416 
2417 	count = iavf_xmit_descriptor_count(skb);
2418 	if (iavf_chk_linearize(skb, count)) {
2419 		if (__skb_linearize(skb)) {
2420 			dev_kfree_skb_any(skb);
2421 			return NETDEV_TX_OK;
2422 		}
2423 		count = iavf_txd_use_count(skb->len);
2424 		tx_ring->tx_stats.tx_linearize++;
2425 	}
2426 
2427 	/* need: 1 descriptor per page * PAGE_SIZE/IAVF_MAX_DATA_PER_TXD,
2428 	 *       + 1 desc for skb_head_len/IAVF_MAX_DATA_PER_TXD,
2429 	 *       + 4 desc gap to avoid the cache line where head is,
2430 	 *       + 1 desc for context descriptor,
2431 	 * otherwise try next time
2432 	 */
2433 	if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2434 		tx_ring->tx_stats.tx_busy++;
2435 		return NETDEV_TX_BUSY;
2436 	}
2437 
2438 	/* record the location of the first descriptor for this packet */
2439 	first = &tx_ring->tx_bi[tx_ring->next_to_use];
2440 	first->skb = skb;
2441 	first->bytecount = skb->len;
2442 	first->gso_segs = 1;
2443 
2444 	/* prepare the xmit flags */
2445 	if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2446 		goto out_drop;
2447 
2448 	/* obtain protocol of skb */
2449 	protocol = vlan_get_protocol(skb);
2450 
2451 	/* setup IPv4/IPv6 offloads */
2452 	if (protocol == htons(ETH_P_IP))
2453 		tx_flags |= IAVF_TX_FLAGS_IPV4;
2454 	else if (protocol == htons(ETH_P_IPV6))
2455 		tx_flags |= IAVF_TX_FLAGS_IPV6;
2456 
2457 	tso = iavf_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
2458 
2459 	if (tso < 0)
2460 		goto out_drop;
2461 	else if (tso)
2462 		tx_flags |= IAVF_TX_FLAGS_TSO;
2463 
2464 	/* Always offload the checksum, since it's in the data descriptor */
2465 	tso = iavf_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2466 				  tx_ring, &cd_tunneling);
2467 	if (tso < 0)
2468 		goto out_drop;
2469 
2470 	/* always enable CRC insertion offload */
2471 	td_cmd |= IAVF_TX_DESC_CMD_ICRC;
2472 
2473 	iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2474 			   cd_tunneling, cd_l2tag2);
2475 
2476 	iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2477 		    td_cmd, td_offset);
2478 
2479 	return NETDEV_TX_OK;
2480 
2481 out_drop:
2482 	iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring);
2483 	dev_kfree_skb_any(first->skb);
2484 	first->skb = NULL;
2485 	return NETDEV_TX_OK;
2486 }
2487 
2488 /**
2489  * iavf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2490  * @skb:    send buffer
2491  * @netdev: network interface device structure
2492  *
2493  * Returns NETDEV_TX_OK if sent, else an error code
2494  **/
iavf_xmit_frame(struct sk_buff * skb,struct net_device * netdev)2495 netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2496 {
2497 	struct iavf_adapter *adapter = netdev_priv(netdev);
2498 	struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
2499 
2500 	/* hardware can't handle really short frames, hardware padding works
2501 	 * beyond this point
2502 	 */
2503 	if (unlikely(skb->len < IAVF_MIN_TX_LEN)) {
2504 		if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len))
2505 			return NETDEV_TX_OK;
2506 		skb->len = IAVF_MIN_TX_LEN;
2507 		skb_set_tail_pointer(skb, IAVF_MIN_TX_LEN);
2508 	}
2509 
2510 	return iavf_xmit_frame_ring(skb, tx_ring);
2511 }
2512