• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 /* The driver transmit and receive code */
5 
6 #include <linux/prefetch.h>
7 #include <linux/mm.h>
8 #include <linux/bpf_trace.h>
9 #include <net/xdp.h>
10 #include "ice_txrx_lib.h"
11 #include "ice_lib.h"
12 #include "ice.h"
13 #include "ice_dcb_lib.h"
14 #include "ice_xsk.h"
15 
16 #define ICE_RX_HDR_SIZE		256
17 
18 #define FDIR_DESC_RXDID 0x40
19 #define ICE_FDIR_CLEAN_DELAY 10
20 
21 /**
22  * ice_prgm_fdir_fltr - Program a Flow Director filter
23  * @vsi: VSI to send dummy packet
24  * @fdir_desc: flow director descriptor
25  * @raw_packet: allocated buffer for flow director
26  */
27 int
ice_prgm_fdir_fltr(struct ice_vsi * vsi,struct ice_fltr_desc * fdir_desc,u8 * raw_packet)28 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
29 		   u8 *raw_packet)
30 {
31 	struct ice_tx_buf *tx_buf, *first;
32 	struct ice_fltr_desc *f_desc;
33 	struct ice_tx_desc *tx_desc;
34 	struct ice_ring *tx_ring;
35 	struct device *dev;
36 	dma_addr_t dma;
37 	u32 td_cmd;
38 	u16 i;
39 
40 	/* VSI and Tx ring */
41 	if (!vsi)
42 		return -ENOENT;
43 	tx_ring = vsi->tx_rings[0];
44 	if (!tx_ring || !tx_ring->desc)
45 		return -ENOENT;
46 	dev = tx_ring->dev;
47 
48 	/* we are using two descriptors to add/del a filter and we can wait */
49 	for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
50 		if (!i)
51 			return -EAGAIN;
52 		msleep_interruptible(1);
53 	}
54 
55 	dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
56 			     DMA_TO_DEVICE);
57 
58 	if (dma_mapping_error(dev, dma))
59 		return -EINVAL;
60 
61 	/* grab the next descriptor */
62 	i = tx_ring->next_to_use;
63 	first = &tx_ring->tx_buf[i];
64 	f_desc = ICE_TX_FDIRDESC(tx_ring, i);
65 	memcpy(f_desc, fdir_desc, sizeof(*f_desc));
66 
67 	i++;
68 	i = (i < tx_ring->count) ? i : 0;
69 	tx_desc = ICE_TX_DESC(tx_ring, i);
70 	tx_buf = &tx_ring->tx_buf[i];
71 
72 	i++;
73 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
74 
75 	memset(tx_buf, 0, sizeof(*tx_buf));
76 	dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
77 	dma_unmap_addr_set(tx_buf, dma, dma);
78 
79 	tx_desc->buf_addr = cpu_to_le64(dma);
80 	td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
81 		 ICE_TX_DESC_CMD_RE;
82 
83 	tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
84 	tx_buf->raw_buf = raw_packet;
85 
86 	tx_desc->cmd_type_offset_bsz =
87 		ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
88 
89 	/* Force memory write to complete before letting h/w know
90 	 * there are new descriptors to fetch.
91 	 */
92 	wmb();
93 
94 	/* mark the data descriptor to be watched */
95 	first->next_to_watch = tx_desc;
96 
97 	writel(tx_ring->next_to_use, tx_ring->tail);
98 
99 	return 0;
100 }
101 
102 /**
103  * ice_unmap_and_free_tx_buf - Release a Tx buffer
104  * @ring: the ring that owns the buffer
105  * @tx_buf: the buffer to free
106  */
107 static void
ice_unmap_and_free_tx_buf(struct ice_ring * ring,struct ice_tx_buf * tx_buf)108 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
109 {
110 	if (tx_buf->skb) {
111 		if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
112 			devm_kfree(ring->dev, tx_buf->raw_buf);
113 		else if (ice_ring_is_xdp(ring))
114 			page_frag_free(tx_buf->raw_buf);
115 		else
116 			dev_kfree_skb_any(tx_buf->skb);
117 		if (dma_unmap_len(tx_buf, len))
118 			dma_unmap_single(ring->dev,
119 					 dma_unmap_addr(tx_buf, dma),
120 					 dma_unmap_len(tx_buf, len),
121 					 DMA_TO_DEVICE);
122 	} else if (dma_unmap_len(tx_buf, len)) {
123 		dma_unmap_page(ring->dev,
124 			       dma_unmap_addr(tx_buf, dma),
125 			       dma_unmap_len(tx_buf, len),
126 			       DMA_TO_DEVICE);
127 	}
128 
129 	tx_buf->next_to_watch = NULL;
130 	tx_buf->skb = NULL;
131 	dma_unmap_len_set(tx_buf, len, 0);
132 	/* tx_buf must be completely set up in the transmit path */
133 }
134 
txring_txq(const struct ice_ring * ring)135 static struct netdev_queue *txring_txq(const struct ice_ring *ring)
136 {
137 	return netdev_get_tx_queue(ring->netdev, ring->q_index);
138 }
139 
140 /**
141  * ice_clean_tx_ring - Free any empty Tx buffers
142  * @tx_ring: ring to be cleaned
143  */
ice_clean_tx_ring(struct ice_ring * tx_ring)144 void ice_clean_tx_ring(struct ice_ring *tx_ring)
145 {
146 	u16 i;
147 
148 	if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
149 		ice_xsk_clean_xdp_ring(tx_ring);
150 		goto tx_skip_free;
151 	}
152 
153 	/* ring already cleared, nothing to do */
154 	if (!tx_ring->tx_buf)
155 		return;
156 
157 	/* Free all the Tx ring sk_buffs */
158 	for (i = 0; i < tx_ring->count; i++)
159 		ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
160 
161 tx_skip_free:
162 	memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
163 
164 	/* Zero out the descriptor ring */
165 	memset(tx_ring->desc, 0, tx_ring->size);
166 
167 	tx_ring->next_to_use = 0;
168 	tx_ring->next_to_clean = 0;
169 
170 	if (!tx_ring->netdev)
171 		return;
172 
173 	/* cleanup Tx queue statistics */
174 	netdev_tx_reset_queue(txring_txq(tx_ring));
175 }
176 
177 /**
178  * ice_free_tx_ring - Free Tx resources per queue
179  * @tx_ring: Tx descriptor ring for a specific queue
180  *
181  * Free all transmit software resources
182  */
ice_free_tx_ring(struct ice_ring * tx_ring)183 void ice_free_tx_ring(struct ice_ring *tx_ring)
184 {
185 	ice_clean_tx_ring(tx_ring);
186 	devm_kfree(tx_ring->dev, tx_ring->tx_buf);
187 	tx_ring->tx_buf = NULL;
188 
189 	if (tx_ring->desc) {
190 		dmam_free_coherent(tx_ring->dev, tx_ring->size,
191 				   tx_ring->desc, tx_ring->dma);
192 		tx_ring->desc = NULL;
193 	}
194 }
195 
196 /**
197  * ice_clean_tx_irq - Reclaim resources after transmit completes
198  * @tx_ring: Tx ring to clean
199  * @napi_budget: Used to determine if we are in netpoll
200  *
201  * Returns true if there's any budget left (e.g. the clean is finished)
202  */
ice_clean_tx_irq(struct ice_ring * tx_ring,int napi_budget)203 static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
204 {
205 	unsigned int total_bytes = 0, total_pkts = 0;
206 	unsigned int budget = ICE_DFLT_IRQ_WORK;
207 	struct ice_vsi *vsi = tx_ring->vsi;
208 	s16 i = tx_ring->next_to_clean;
209 	struct ice_tx_desc *tx_desc;
210 	struct ice_tx_buf *tx_buf;
211 
212 	tx_buf = &tx_ring->tx_buf[i];
213 	tx_desc = ICE_TX_DESC(tx_ring, i);
214 	i -= tx_ring->count;
215 
216 	prefetch(&vsi->state);
217 
218 	do {
219 		struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
220 
221 		/* if next_to_watch is not set then there is no work pending */
222 		if (!eop_desc)
223 			break;
224 
225 		smp_rmb();	/* prevent any other reads prior to eop_desc */
226 
227 		/* if the descriptor isn't done, no work yet to do */
228 		if (!(eop_desc->cmd_type_offset_bsz &
229 		      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
230 			break;
231 
232 		/* clear next_to_watch to prevent false hangs */
233 		tx_buf->next_to_watch = NULL;
234 
235 		/* update the statistics for this packet */
236 		total_bytes += tx_buf->bytecount;
237 		total_pkts += tx_buf->gso_segs;
238 
239 		if (ice_ring_is_xdp(tx_ring))
240 			page_frag_free(tx_buf->raw_buf);
241 		else
242 			/* free the skb */
243 			napi_consume_skb(tx_buf->skb, napi_budget);
244 
245 		/* unmap skb header data */
246 		dma_unmap_single(tx_ring->dev,
247 				 dma_unmap_addr(tx_buf, dma),
248 				 dma_unmap_len(tx_buf, len),
249 				 DMA_TO_DEVICE);
250 
251 		/* clear tx_buf data */
252 		tx_buf->skb = NULL;
253 		dma_unmap_len_set(tx_buf, len, 0);
254 
255 		/* unmap remaining buffers */
256 		while (tx_desc != eop_desc) {
257 			tx_buf++;
258 			tx_desc++;
259 			i++;
260 			if (unlikely(!i)) {
261 				i -= tx_ring->count;
262 				tx_buf = tx_ring->tx_buf;
263 				tx_desc = ICE_TX_DESC(tx_ring, 0);
264 			}
265 
266 			/* unmap any remaining paged data */
267 			if (dma_unmap_len(tx_buf, len)) {
268 				dma_unmap_page(tx_ring->dev,
269 					       dma_unmap_addr(tx_buf, dma),
270 					       dma_unmap_len(tx_buf, len),
271 					       DMA_TO_DEVICE);
272 				dma_unmap_len_set(tx_buf, len, 0);
273 			}
274 		}
275 
276 		/* move us one more past the eop_desc for start of next pkt */
277 		tx_buf++;
278 		tx_desc++;
279 		i++;
280 		if (unlikely(!i)) {
281 			i -= tx_ring->count;
282 			tx_buf = tx_ring->tx_buf;
283 			tx_desc = ICE_TX_DESC(tx_ring, 0);
284 		}
285 
286 		prefetch(tx_desc);
287 
288 		/* update budget accounting */
289 		budget--;
290 	} while (likely(budget));
291 
292 	i += tx_ring->count;
293 	tx_ring->next_to_clean = i;
294 
295 	ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
296 
297 	if (ice_ring_is_xdp(tx_ring))
298 		return !!budget;
299 
300 	netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
301 				  total_bytes);
302 
303 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
304 	if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
305 		     (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
306 		/* Make sure that anybody stopping the queue after this
307 		 * sees the new next_to_clean.
308 		 */
309 		smp_mb();
310 		if (__netif_subqueue_stopped(tx_ring->netdev,
311 					     tx_ring->q_index) &&
312 		    !test_bit(__ICE_DOWN, vsi->state)) {
313 			netif_wake_subqueue(tx_ring->netdev,
314 					    tx_ring->q_index);
315 			++tx_ring->tx_stats.restart_q;
316 		}
317 	}
318 
319 	return !!budget;
320 }
321 
322 /**
323  * ice_setup_tx_ring - Allocate the Tx descriptors
324  * @tx_ring: the Tx ring to set up
325  *
326  * Return 0 on success, negative on error
327  */
ice_setup_tx_ring(struct ice_ring * tx_ring)328 int ice_setup_tx_ring(struct ice_ring *tx_ring)
329 {
330 	struct device *dev = tx_ring->dev;
331 
332 	if (!dev)
333 		return -ENOMEM;
334 
335 	/* warn if we are about to overwrite the pointer */
336 	WARN_ON(tx_ring->tx_buf);
337 	tx_ring->tx_buf =
338 		devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
339 			     GFP_KERNEL);
340 	if (!tx_ring->tx_buf)
341 		return -ENOMEM;
342 
343 	/* round up to nearest page */
344 	tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
345 			      PAGE_SIZE);
346 	tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
347 					    GFP_KERNEL);
348 	if (!tx_ring->desc) {
349 		dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
350 			tx_ring->size);
351 		goto err;
352 	}
353 
354 	tx_ring->next_to_use = 0;
355 	tx_ring->next_to_clean = 0;
356 	tx_ring->tx_stats.prev_pkt = -1;
357 	return 0;
358 
359 err:
360 	devm_kfree(dev, tx_ring->tx_buf);
361 	tx_ring->tx_buf = NULL;
362 	return -ENOMEM;
363 }
364 
365 /**
366  * ice_clean_rx_ring - Free Rx buffers
367  * @rx_ring: ring to be cleaned
368  */
ice_clean_rx_ring(struct ice_ring * rx_ring)369 void ice_clean_rx_ring(struct ice_ring *rx_ring)
370 {
371 	struct device *dev = rx_ring->dev;
372 	u16 i;
373 
374 	/* ring already cleared, nothing to do */
375 	if (!rx_ring->rx_buf)
376 		return;
377 
378 	if (rx_ring->xsk_pool) {
379 		ice_xsk_clean_rx_ring(rx_ring);
380 		goto rx_skip_free;
381 	}
382 
383 	/* Free all the Rx ring sk_buffs */
384 	for (i = 0; i < rx_ring->count; i++) {
385 		struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
386 
387 		if (rx_buf->skb) {
388 			dev_kfree_skb(rx_buf->skb);
389 			rx_buf->skb = NULL;
390 		}
391 		if (!rx_buf->page)
392 			continue;
393 
394 		/* Invalidate cache lines that may have been written to by
395 		 * device so that we avoid corrupting memory.
396 		 */
397 		dma_sync_single_range_for_cpu(dev, rx_buf->dma,
398 					      rx_buf->page_offset,
399 					      rx_ring->rx_buf_len,
400 					      DMA_FROM_DEVICE);
401 
402 		/* free resources associated with mapping */
403 		dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
404 				     DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
405 		__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
406 
407 		rx_buf->page = NULL;
408 		rx_buf->page_offset = 0;
409 	}
410 
411 rx_skip_free:
412 	memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
413 
414 	/* Zero out the descriptor ring */
415 	memset(rx_ring->desc, 0, rx_ring->size);
416 
417 	rx_ring->next_to_alloc = 0;
418 	rx_ring->next_to_clean = 0;
419 	rx_ring->next_to_use = 0;
420 }
421 
422 /**
423  * ice_free_rx_ring - Free Rx resources
424  * @rx_ring: ring to clean the resources from
425  *
426  * Free all receive software resources
427  */
ice_free_rx_ring(struct ice_ring * rx_ring)428 void ice_free_rx_ring(struct ice_ring *rx_ring)
429 {
430 	ice_clean_rx_ring(rx_ring);
431 	if (rx_ring->vsi->type == ICE_VSI_PF)
432 		if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
433 			xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
434 	rx_ring->xdp_prog = NULL;
435 	devm_kfree(rx_ring->dev, rx_ring->rx_buf);
436 	rx_ring->rx_buf = NULL;
437 
438 	if (rx_ring->desc) {
439 		dmam_free_coherent(rx_ring->dev, rx_ring->size,
440 				   rx_ring->desc, rx_ring->dma);
441 		rx_ring->desc = NULL;
442 	}
443 }
444 
445 /**
446  * ice_setup_rx_ring - Allocate the Rx descriptors
447  * @rx_ring: the Rx ring to set up
448  *
449  * Return 0 on success, negative on error
450  */
ice_setup_rx_ring(struct ice_ring * rx_ring)451 int ice_setup_rx_ring(struct ice_ring *rx_ring)
452 {
453 	struct device *dev = rx_ring->dev;
454 
455 	if (!dev)
456 		return -ENOMEM;
457 
458 	/* warn if we are about to overwrite the pointer */
459 	WARN_ON(rx_ring->rx_buf);
460 	rx_ring->rx_buf =
461 		devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
462 			     GFP_KERNEL);
463 	if (!rx_ring->rx_buf)
464 		return -ENOMEM;
465 
466 	/* round up to nearest page */
467 	rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
468 			      PAGE_SIZE);
469 	rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
470 					    GFP_KERNEL);
471 	if (!rx_ring->desc) {
472 		dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
473 			rx_ring->size);
474 		goto err;
475 	}
476 
477 	rx_ring->next_to_use = 0;
478 	rx_ring->next_to_clean = 0;
479 
480 	if (ice_is_xdp_ena_vsi(rx_ring->vsi))
481 		WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
482 
483 	if (rx_ring->vsi->type == ICE_VSI_PF &&
484 	    !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
485 		if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
486 				     rx_ring->q_index))
487 			goto err;
488 	return 0;
489 
490 err:
491 	devm_kfree(dev, rx_ring->rx_buf);
492 	rx_ring->rx_buf = NULL;
493 	return -ENOMEM;
494 }
495 
496 /**
497  * ice_rx_offset - Return expected offset into page to access data
498  * @rx_ring: Ring we are requesting offset of
499  *
500  * Returns the offset value for ring into the data buffer.
501  */
ice_rx_offset(struct ice_ring * rx_ring)502 static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
503 {
504 	if (ice_ring_uses_build_skb(rx_ring))
505 		return ICE_SKB_PAD;
506 	else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
507 		return XDP_PACKET_HEADROOM;
508 
509 	return 0;
510 }
511 
512 static unsigned int
ice_rx_frame_truesize(struct ice_ring * rx_ring,unsigned int __maybe_unused size)513 ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
514 {
515 	unsigned int truesize;
516 
517 #if (PAGE_SIZE < 8192)
518 	truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
519 #else
520 	truesize = ice_rx_offset(rx_ring) ?
521 		SKB_DATA_ALIGN(ice_rx_offset(rx_ring) + size) +
522 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
523 		SKB_DATA_ALIGN(size);
524 #endif
525 	return truesize;
526 }
527 
528 /**
529  * ice_run_xdp - Executes an XDP program on initialized xdp_buff
530  * @rx_ring: Rx ring
531  * @xdp: xdp_buff used as input to the XDP program
532  * @xdp_prog: XDP program to run
533  *
534  * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
535  */
536 static int
ice_run_xdp(struct ice_ring * rx_ring,struct xdp_buff * xdp,struct bpf_prog * xdp_prog)537 ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
538 	    struct bpf_prog *xdp_prog)
539 {
540 	struct ice_ring *xdp_ring;
541 	int err, result;
542 	u32 act;
543 
544 	act = bpf_prog_run_xdp(xdp_prog, xdp);
545 	switch (act) {
546 	case XDP_PASS:
547 		return ICE_XDP_PASS;
548 	case XDP_TX:
549 		xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
550 		result = ice_xmit_xdp_buff(xdp, xdp_ring);
551 		if (result == ICE_XDP_CONSUMED)
552 			goto out_failure;
553 		return result;
554 	case XDP_REDIRECT:
555 		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
556 		if (err)
557 			goto out_failure;
558 		return ICE_XDP_REDIR;
559 	default:
560 		bpf_warn_invalid_xdp_action(act);
561 		fallthrough;
562 	case XDP_ABORTED:
563 out_failure:
564 		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
565 		fallthrough;
566 	case XDP_DROP:
567 		return ICE_XDP_CONSUMED;
568 	}
569 }
570 
571 /**
572  * ice_xdp_xmit - submit packets to XDP ring for transmission
573  * @dev: netdev
574  * @n: number of XDP frames to be transmitted
575  * @frames: XDP frames to be transmitted
576  * @flags: transmit flags
577  *
578  * Returns number of frames successfully sent. Frames that fail are
579  * free'ed via XDP return API.
580  * For error cases, a negative errno code is returned and no-frames
581  * are transmitted (caller must handle freeing frames).
582  */
583 int
ice_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)584 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
585 	     u32 flags)
586 {
587 	struct ice_netdev_priv *np = netdev_priv(dev);
588 	unsigned int queue_index = smp_processor_id();
589 	struct ice_vsi *vsi = np->vsi;
590 	struct ice_ring *xdp_ring;
591 	int drops = 0, i;
592 
593 	if (test_bit(__ICE_DOWN, vsi->state))
594 		return -ENETDOWN;
595 
596 	if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
597 		return -ENXIO;
598 
599 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
600 		return -EINVAL;
601 
602 	xdp_ring = vsi->xdp_rings[queue_index];
603 	for (i = 0; i < n; i++) {
604 		struct xdp_frame *xdpf = frames[i];
605 		int err;
606 
607 		err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
608 		if (err != ICE_XDP_TX) {
609 			xdp_return_frame_rx_napi(xdpf);
610 			drops++;
611 		}
612 	}
613 
614 	if (unlikely(flags & XDP_XMIT_FLUSH))
615 		ice_xdp_ring_update_tail(xdp_ring);
616 
617 	return n - drops;
618 }
619 
620 /**
621  * ice_alloc_mapped_page - recycle or make a new page
622  * @rx_ring: ring to use
623  * @bi: rx_buf struct to modify
624  *
625  * Returns true if the page was successfully allocated or
626  * reused.
627  */
628 static bool
ice_alloc_mapped_page(struct ice_ring * rx_ring,struct ice_rx_buf * bi)629 ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
630 {
631 	struct page *page = bi->page;
632 	dma_addr_t dma;
633 
634 	/* since we are recycling buffers we should seldom need to alloc */
635 	if (likely(page))
636 		return true;
637 
638 	/* alloc new page for storage */
639 	page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
640 	if (unlikely(!page)) {
641 		rx_ring->rx_stats.alloc_page_failed++;
642 		return false;
643 	}
644 
645 	/* map page for use */
646 	dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
647 				 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
648 
649 	/* if mapping failed free memory back to system since
650 	 * there isn't much point in holding memory we can't use
651 	 */
652 	if (dma_mapping_error(rx_ring->dev, dma)) {
653 		__free_pages(page, ice_rx_pg_order(rx_ring));
654 		rx_ring->rx_stats.alloc_page_failed++;
655 		return false;
656 	}
657 
658 	bi->dma = dma;
659 	bi->page = page;
660 	bi->page_offset = ice_rx_offset(rx_ring);
661 	page_ref_add(page, USHRT_MAX - 1);
662 	bi->pagecnt_bias = USHRT_MAX;
663 
664 	return true;
665 }
666 
667 /**
668  * ice_alloc_rx_bufs - Replace used receive buffers
669  * @rx_ring: ring to place buffers on
670  * @cleaned_count: number of buffers to replace
671  *
672  * Returns false if all allocations were successful, true if any fail. Returning
673  * true signals to the caller that we didn't replace cleaned_count buffers and
674  * there is more work to do.
675  *
676  * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
677  * buffers. Then bump tail at most one time. Grouping like this lets us avoid
678  * multiple tail writes per call.
679  */
ice_alloc_rx_bufs(struct ice_ring * rx_ring,u16 cleaned_count)680 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
681 {
682 	union ice_32b_rx_flex_desc *rx_desc;
683 	u16 ntu = rx_ring->next_to_use;
684 	struct ice_rx_buf *bi;
685 
686 	/* do nothing if no valid netdev defined */
687 	if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
688 	    !cleaned_count)
689 		return false;
690 
691 	/* get the Rx descriptor and buffer based on next_to_use */
692 	rx_desc = ICE_RX_DESC(rx_ring, ntu);
693 	bi = &rx_ring->rx_buf[ntu];
694 
695 	do {
696 		/* if we fail here, we have work remaining */
697 		if (!ice_alloc_mapped_page(rx_ring, bi))
698 			break;
699 
700 		/* sync the buffer for use by the device */
701 		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
702 						 bi->page_offset,
703 						 rx_ring->rx_buf_len,
704 						 DMA_FROM_DEVICE);
705 
706 		/* Refresh the desc even if buffer_addrs didn't change
707 		 * because each write-back erases this info.
708 		 */
709 		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
710 
711 		rx_desc++;
712 		bi++;
713 		ntu++;
714 		if (unlikely(ntu == rx_ring->count)) {
715 			rx_desc = ICE_RX_DESC(rx_ring, 0);
716 			bi = rx_ring->rx_buf;
717 			ntu = 0;
718 		}
719 
720 		/* clear the status bits for the next_to_use descriptor */
721 		rx_desc->wb.status_error0 = 0;
722 
723 		cleaned_count--;
724 	} while (cleaned_count);
725 
726 	if (rx_ring->next_to_use != ntu)
727 		ice_release_rx_desc(rx_ring, ntu);
728 
729 	return !!cleaned_count;
730 }
731 
732 /**
733  * ice_page_is_reserved - check if reuse is possible
734  * @page: page struct to check
735  */
ice_page_is_reserved(struct page * page)736 static bool ice_page_is_reserved(struct page *page)
737 {
738 	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
739 }
740 
741 /**
742  * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
743  * @rx_buf: Rx buffer to adjust
744  * @size: Size of adjustment
745  *
746  * Update the offset within page so that Rx buf will be ready to be reused.
747  * For systems with PAGE_SIZE < 8192 this function will flip the page offset
748  * so the second half of page assigned to Rx buffer will be used, otherwise
749  * the offset is moved by "size" bytes
750  */
751 static void
ice_rx_buf_adjust_pg_offset(struct ice_rx_buf * rx_buf,unsigned int size)752 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
753 {
754 #if (PAGE_SIZE < 8192)
755 	/* flip page offset to other buffer */
756 	rx_buf->page_offset ^= size;
757 #else
758 	/* move offset up to the next cache line */
759 	rx_buf->page_offset += size;
760 #endif
761 }
762 
763 /**
764  * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
765  * @rx_buf: buffer containing the page
766  * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
767  *
768  * If page is reusable, we have a green light for calling ice_reuse_rx_page,
769  * which will assign the current buffer to the buffer that next_to_alloc is
770  * pointing to; otherwise, the DMA mapping needs to be destroyed and
771  * page freed
772  */
773 static bool
ice_can_reuse_rx_page(struct ice_rx_buf * rx_buf,int rx_buf_pgcnt)774 ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
775 {
776 	unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
777 	struct page *page = rx_buf->page;
778 
779 	/* avoid re-using remote pages */
780 	if (unlikely(ice_page_is_reserved(page)))
781 		return false;
782 
783 #if (PAGE_SIZE < 8192)
784 	/* if we are only owner of page we can reuse it */
785 	if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
786 		return false;
787 #else
788 #define ICE_LAST_OFFSET \
789 	(SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
790 	if (rx_buf->page_offset > ICE_LAST_OFFSET)
791 		return false;
792 #endif /* PAGE_SIZE < 8192) */
793 
794 	/* If we have drained the page fragment pool we need to update
795 	 * the pagecnt_bias and page count so that we fully restock the
796 	 * number of references the driver holds.
797 	 */
798 	if (unlikely(pagecnt_bias == 1)) {
799 		page_ref_add(page, USHRT_MAX - 1);
800 		rx_buf->pagecnt_bias = USHRT_MAX;
801 	}
802 
803 	return true;
804 }
805 
806 /**
807  * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
808  * @rx_ring: Rx descriptor ring to transact packets on
809  * @rx_buf: buffer containing page to add
810  * @skb: sk_buff to place the data into
811  * @size: packet length from rx_desc
812  *
813  * This function will add the data contained in rx_buf->page to the skb.
814  * It will just attach the page as a frag to the skb.
815  * The function will then update the page offset.
816  */
817 static void
ice_add_rx_frag(struct ice_ring * rx_ring,struct ice_rx_buf * rx_buf,struct sk_buff * skb,unsigned int size)818 ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
819 		struct sk_buff *skb, unsigned int size)
820 {
821 #if (PAGE_SIZE >= 8192)
822 	unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring));
823 #else
824 	unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
825 #endif
826 
827 	if (!size)
828 		return;
829 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
830 			rx_buf->page_offset, size, truesize);
831 
832 	/* page is being used so we must update the page offset */
833 	ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
834 }
835 
836 /**
837  * ice_reuse_rx_page - page flip buffer and store it back on the ring
838  * @rx_ring: Rx descriptor ring to store buffers on
839  * @old_buf: donor buffer to have page reused
840  *
841  * Synchronizes page for reuse by the adapter
842  */
843 static void
ice_reuse_rx_page(struct ice_ring * rx_ring,struct ice_rx_buf * old_buf)844 ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
845 {
846 	u16 nta = rx_ring->next_to_alloc;
847 	struct ice_rx_buf *new_buf;
848 
849 	new_buf = &rx_ring->rx_buf[nta];
850 
851 	/* update, and store next to alloc */
852 	nta++;
853 	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
854 
855 	/* Transfer page from old buffer to new buffer.
856 	 * Move each member individually to avoid possible store
857 	 * forwarding stalls and unnecessary copy of skb.
858 	 */
859 	new_buf->dma = old_buf->dma;
860 	new_buf->page = old_buf->page;
861 	new_buf->page_offset = old_buf->page_offset;
862 	new_buf->pagecnt_bias = old_buf->pagecnt_bias;
863 }
864 
865 /**
866  * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
867  * @rx_ring: Rx descriptor ring to transact packets on
868  * @skb: skb to be used
869  * @size: size of buffer to add to skb
870  * @rx_buf_pgcnt: rx_buf page refcount
871  *
872  * This function will pull an Rx buffer from the ring and synchronize it
873  * for use by the CPU.
874  */
875 static struct ice_rx_buf *
ice_get_rx_buf(struct ice_ring * rx_ring,struct sk_buff ** skb,const unsigned int size,int * rx_buf_pgcnt)876 ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
877 	       const unsigned int size, int *rx_buf_pgcnt)
878 {
879 	struct ice_rx_buf *rx_buf;
880 
881 	rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
882 	*rx_buf_pgcnt =
883 #if (PAGE_SIZE < 8192)
884 		page_count(rx_buf->page);
885 #else
886 		0;
887 #endif
888 	prefetchw(rx_buf->page);
889 	*skb = rx_buf->skb;
890 
891 	if (!size)
892 		return rx_buf;
893 	/* we are reusing so sync this buffer for CPU use */
894 	dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
895 				      rx_buf->page_offset, size,
896 				      DMA_FROM_DEVICE);
897 
898 	/* We have pulled a buffer for use, so decrement pagecnt_bias */
899 	rx_buf->pagecnt_bias--;
900 
901 	return rx_buf;
902 }
903 
904 /**
905  * ice_build_skb - Build skb around an existing buffer
906  * @rx_ring: Rx descriptor ring to transact packets on
907  * @rx_buf: Rx buffer to pull data from
908  * @xdp: xdp_buff pointing to the data
909  *
910  * This function builds an skb around an existing Rx buffer, taking care
911  * to set up the skb correctly and avoid any memcpy overhead.
912  */
913 static struct sk_buff *
ice_build_skb(struct ice_ring * rx_ring,struct ice_rx_buf * rx_buf,struct xdp_buff * xdp)914 ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
915 	      struct xdp_buff *xdp)
916 {
917 	u8 metasize = xdp->data - xdp->data_meta;
918 #if (PAGE_SIZE < 8192)
919 	unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
920 #else
921 	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
922 				SKB_DATA_ALIGN(xdp->data_end -
923 					       xdp->data_hard_start);
924 #endif
925 	struct sk_buff *skb;
926 
927 	/* Prefetch first cache line of first page. If xdp->data_meta
928 	 * is unused, this points exactly as xdp->data, otherwise we
929 	 * likely have a consumer accessing first few bytes of meta
930 	 * data, and then actual data.
931 	 */
932 	net_prefetch(xdp->data_meta);
933 	/* build an skb around the page buffer */
934 	skb = build_skb(xdp->data_hard_start, truesize);
935 	if (unlikely(!skb))
936 		return NULL;
937 
938 	/* must to record Rx queue, otherwise OS features such as
939 	 * symmetric queue won't work
940 	 */
941 	skb_record_rx_queue(skb, rx_ring->q_index);
942 
943 	/* update pointers within the skb to store the data */
944 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
945 	__skb_put(skb, xdp->data_end - xdp->data);
946 	if (metasize)
947 		skb_metadata_set(skb, metasize);
948 
949 	/* buffer is used by skb, update page_offset */
950 	ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
951 
952 	return skb;
953 }
954 
955 /**
956  * ice_construct_skb - Allocate skb and populate it
957  * @rx_ring: Rx descriptor ring to transact packets on
958  * @rx_buf: Rx buffer to pull data from
959  * @xdp: xdp_buff pointing to the data
960  *
961  * This function allocates an skb. It then populates it with the page
962  * data from the current receive descriptor, taking care to set up the
963  * skb correctly.
964  */
965 static struct sk_buff *
ice_construct_skb(struct ice_ring * rx_ring,struct ice_rx_buf * rx_buf,struct xdp_buff * xdp)966 ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
967 		  struct xdp_buff *xdp)
968 {
969 	unsigned int size = xdp->data_end - xdp->data;
970 	unsigned int headlen;
971 	struct sk_buff *skb;
972 
973 	/* prefetch first cache line of first page */
974 	net_prefetch(xdp->data);
975 
976 	/* allocate a skb to store the frags */
977 	skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
978 			       GFP_ATOMIC | __GFP_NOWARN);
979 	if (unlikely(!skb))
980 		return NULL;
981 
982 	skb_record_rx_queue(skb, rx_ring->q_index);
983 	/* Determine available headroom for copy */
984 	headlen = size;
985 	if (headlen > ICE_RX_HDR_SIZE)
986 		headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
987 
988 	/* align pull length to size of long to optimize memcpy performance */
989 	memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
990 							 sizeof(long)));
991 
992 	/* if we exhaust the linear part then add what is left as a frag */
993 	size -= headlen;
994 	if (size) {
995 #if (PAGE_SIZE >= 8192)
996 		unsigned int truesize = SKB_DATA_ALIGN(size);
997 #else
998 		unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
999 #endif
1000 		skb_add_rx_frag(skb, 0, rx_buf->page,
1001 				rx_buf->page_offset + headlen, size, truesize);
1002 		/* buffer is used by skb, update page_offset */
1003 		ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
1004 	} else {
1005 		/* buffer is unused, reset bias back to rx_buf; data was copied
1006 		 * onto skb's linear part so there's no need for adjusting
1007 		 * page offset and we can reuse this buffer as-is
1008 		 */
1009 		rx_buf->pagecnt_bias++;
1010 	}
1011 
1012 	return skb;
1013 }
1014 
1015 /**
1016  * ice_put_rx_buf - Clean up used buffer and either recycle or free
1017  * @rx_ring: Rx descriptor ring to transact packets on
1018  * @rx_buf: Rx buffer to pull data from
1019  * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
1020  *
1021  * This function will update next_to_clean and then clean up the contents
1022  * of the rx_buf. It will either recycle the buffer or unmap it and free
1023  * the associated resources.
1024  */
1025 static void
ice_put_rx_buf(struct ice_ring * rx_ring,struct ice_rx_buf * rx_buf,int rx_buf_pgcnt)1026 ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
1027 	       int rx_buf_pgcnt)
1028 {
1029 	u16 ntc = rx_ring->next_to_clean + 1;
1030 
1031 	/* fetch, update, and store next to clean */
1032 	ntc = (ntc < rx_ring->count) ? ntc : 0;
1033 	rx_ring->next_to_clean = ntc;
1034 
1035 	if (!rx_buf)
1036 		return;
1037 
1038 	if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
1039 		/* hand second half of page back to the ring */
1040 		ice_reuse_rx_page(rx_ring, rx_buf);
1041 	} else {
1042 		/* we are not reusing the buffer so unmap it */
1043 		dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
1044 				     ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1045 				     ICE_RX_DMA_ATTR);
1046 		__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1047 	}
1048 
1049 	/* clear contents of buffer_info */
1050 	rx_buf->page = NULL;
1051 	rx_buf->skb = NULL;
1052 }
1053 
1054 /**
1055  * ice_is_non_eop - process handling of non-EOP buffers
1056  * @rx_ring: Rx ring being processed
1057  * @rx_desc: Rx descriptor for current buffer
1058  * @skb: Current socket buffer containing buffer in progress
1059  *
1060  * If the buffer is an EOP buffer, this function exits returning false,
1061  * otherwise return true indicating that this is in fact a non-EOP buffer.
1062  */
1063 static bool
ice_is_non_eop(struct ice_ring * rx_ring,union ice_32b_rx_flex_desc * rx_desc,struct sk_buff * skb)1064 ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
1065 	       struct sk_buff *skb)
1066 {
1067 	/* if we are the last buffer then there is nothing else to do */
1068 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
1069 	if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
1070 		return false;
1071 
1072 	/* place skb in next buffer to be received */
1073 	rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb;
1074 	rx_ring->rx_stats.non_eop_descs++;
1075 
1076 	return true;
1077 }
1078 
1079 /**
1080  * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1081  * @rx_ring: Rx descriptor ring to transact packets on
1082  * @budget: Total limit on number of packets to process
1083  *
1084  * This function provides a "bounce buffer" approach to Rx interrupt
1085  * processing. The advantage to this is that on systems that have
1086  * expensive overhead for IOMMU access this provides a means of avoiding
1087  * it by maintaining the mapping of the page to the system.
1088  *
1089  * Returns amount of work completed
1090  */
ice_clean_rx_irq(struct ice_ring * rx_ring,int budget)1091 int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
1092 {
1093 	unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
1094 	u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1095 	unsigned int xdp_res, xdp_xmit = 0;
1096 	struct bpf_prog *xdp_prog = NULL;
1097 	struct xdp_buff xdp;
1098 	bool failure;
1099 
1100 	xdp.rxq = &rx_ring->xdp_rxq;
1101 	/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
1102 #if (PAGE_SIZE < 8192)
1103 	xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0);
1104 #endif
1105 
1106 	/* start the loop to process Rx packets bounded by 'budget' */
1107 	while (likely(total_rx_pkts < (unsigned int)budget)) {
1108 		union ice_32b_rx_flex_desc *rx_desc;
1109 		struct ice_rx_buf *rx_buf;
1110 		struct sk_buff *skb;
1111 		unsigned int size;
1112 		u16 stat_err_bits;
1113 		int rx_buf_pgcnt;
1114 		u16 vlan_tag = 0;
1115 		u8 rx_ptype;
1116 
1117 		/* get the Rx desc from Rx ring based on 'next_to_clean' */
1118 		rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1119 
1120 		/* status_error_len will always be zero for unused descriptors
1121 		 * because it's cleared in cleanup, and overlaps with hdr_addr
1122 		 * which is always zero because packet split isn't used, if the
1123 		 * hardware wrote DD then it will be non-zero
1124 		 */
1125 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1126 		if (!ice_test_staterr(rx_desc, stat_err_bits))
1127 			break;
1128 
1129 		/* This memory barrier is needed to keep us from reading
1130 		 * any other fields out of the rx_desc until we know the
1131 		 * DD bit is set.
1132 		 */
1133 		dma_rmb();
1134 
1135 		if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
1136 			ice_put_rx_buf(rx_ring, NULL, 0);
1137 			cleaned_count++;
1138 			continue;
1139 		}
1140 
1141 		size = le16_to_cpu(rx_desc->wb.pkt_len) &
1142 			ICE_RX_FLX_DESC_PKT_LEN_M;
1143 
1144 		/* retrieve a buffer from the ring */
1145 		rx_buf = ice_get_rx_buf(rx_ring, &skb, size, &rx_buf_pgcnt);
1146 
1147 		if (!size) {
1148 			xdp.data = NULL;
1149 			xdp.data_end = NULL;
1150 			xdp.data_hard_start = NULL;
1151 			xdp.data_meta = NULL;
1152 			goto construct_skb;
1153 		}
1154 
1155 		xdp.data = page_address(rx_buf->page) + rx_buf->page_offset;
1156 		xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
1157 		xdp.data_meta = xdp.data;
1158 		xdp.data_end = xdp.data + size;
1159 #if (PAGE_SIZE > 4096)
1160 		/* At larger PAGE_SIZE, frame_sz depend on len size */
1161 		xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
1162 #endif
1163 
1164 		rcu_read_lock();
1165 		xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1166 		if (!xdp_prog) {
1167 			rcu_read_unlock();
1168 			goto construct_skb;
1169 		}
1170 
1171 		xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
1172 		rcu_read_unlock();
1173 		if (!xdp_res)
1174 			goto construct_skb;
1175 		if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
1176 			xdp_xmit |= xdp_res;
1177 			ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
1178 		} else {
1179 			rx_buf->pagecnt_bias++;
1180 		}
1181 		total_rx_bytes += size;
1182 		total_rx_pkts++;
1183 
1184 		cleaned_count++;
1185 		ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1186 		continue;
1187 construct_skb:
1188 		if (skb) {
1189 			ice_add_rx_frag(rx_ring, rx_buf, skb, size);
1190 		} else if (likely(xdp.data)) {
1191 			if (ice_ring_uses_build_skb(rx_ring))
1192 				skb = ice_build_skb(rx_ring, rx_buf, &xdp);
1193 			else
1194 				skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
1195 		}
1196 		/* exit if we failed to retrieve a buffer */
1197 		if (!skb) {
1198 			rx_ring->rx_stats.alloc_buf_failed++;
1199 			if (rx_buf)
1200 				rx_buf->pagecnt_bias++;
1201 			break;
1202 		}
1203 
1204 		ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1205 		cleaned_count++;
1206 
1207 		/* skip if it is NOP desc */
1208 		if (ice_is_non_eop(rx_ring, rx_desc, skb))
1209 			continue;
1210 
1211 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1212 		if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1213 			dev_kfree_skb_any(skb);
1214 			continue;
1215 		}
1216 
1217 		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1218 		if (ice_test_staterr(rx_desc, stat_err_bits))
1219 			vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1220 
1221 		/* pad the skb if needed, to make a valid ethernet frame */
1222 		if (eth_skb_pad(skb)) {
1223 			skb = NULL;
1224 			continue;
1225 		}
1226 
1227 		/* probably a little skewed due to removing CRC */
1228 		total_rx_bytes += skb->len;
1229 
1230 		/* populate checksum, VLAN, and protocol */
1231 		rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1232 			ICE_RX_FLEX_DESC_PTYPE_M;
1233 
1234 		ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1235 
1236 		/* send completed skb up the stack */
1237 		ice_receive_skb(rx_ring, skb, vlan_tag);
1238 
1239 		/* update budget accounting */
1240 		total_rx_pkts++;
1241 	}
1242 
1243 	/* return up to cleaned_count buffers to hardware */
1244 	failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1245 
1246 	if (xdp_prog)
1247 		ice_finalize_xdp_rx(rx_ring, xdp_xmit);
1248 
1249 	ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
1250 
1251 	/* guarantee a trip back through this routine if there was a failure */
1252 	return failure ? budget : (int)total_rx_pkts;
1253 }
1254 
1255 /**
1256  * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
1257  * @port_info: port_info structure containing the current link speed
1258  * @avg_pkt_size: average size of Tx or Rx packets based on clean routine
1259  * @itr: ITR value to update
1260  *
1261  * Calculate how big of an increment should be applied to the ITR value passed
1262  * in based on wmem_default, SKB overhead, ethernet overhead, and the current
1263  * link speed.
1264  *
1265  * The following is a calculation derived from:
1266  *  wmem_default / (size + overhead) = desired_pkts_per_int
1267  *  rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1268  *  (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1269  *
1270  * Assuming wmem_default is 212992 and overhead is 640 bytes per
1271  * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1272  * formula down to:
1273  *
1274  *	 wmem_default * bits_per_byte * usecs_per_sec   pkt_size + 24
1275  * ITR = -------------------------------------------- * --------------
1276  *			     rate			pkt_size + 640
1277  */
1278 static unsigned int
ice_adjust_itr_by_size_and_speed(struct ice_port_info * port_info,unsigned int avg_pkt_size,unsigned int itr)1279 ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
1280 				 unsigned int avg_pkt_size,
1281 				 unsigned int itr)
1282 {
1283 	switch (port_info->phy.link_info.link_speed) {
1284 	case ICE_AQ_LINK_SPEED_100GB:
1285 		itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
1286 				    avg_pkt_size + 640);
1287 		break;
1288 	case ICE_AQ_LINK_SPEED_50GB:
1289 		itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
1290 				    avg_pkt_size + 640);
1291 		break;
1292 	case ICE_AQ_LINK_SPEED_40GB:
1293 		itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
1294 				    avg_pkt_size + 640);
1295 		break;
1296 	case ICE_AQ_LINK_SPEED_25GB:
1297 		itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
1298 				    avg_pkt_size + 640);
1299 		break;
1300 	case ICE_AQ_LINK_SPEED_20GB:
1301 		itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
1302 				    avg_pkt_size + 640);
1303 		break;
1304 	case ICE_AQ_LINK_SPEED_10GB:
1305 	default:
1306 		itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
1307 				    avg_pkt_size + 640);
1308 		break;
1309 	}
1310 
1311 	if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1312 		itr &= ICE_ITR_ADAPTIVE_LATENCY;
1313 		itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1314 	}
1315 
1316 	return itr;
1317 }
1318 
1319 /**
1320  * ice_update_itr - update the adaptive ITR value based on statistics
1321  * @q_vector: structure containing interrupt and ring information
1322  * @rc: structure containing ring performance data
1323  *
1324  * Stores a new ITR value based on packets and byte
1325  * counts during the last interrupt.  The advantage of per interrupt
1326  * computation is faster updates and more accurate ITR for the current
1327  * traffic pattern.  Constants in this function were computed
1328  * based on theoretical maximum wire speed and thresholds were set based
1329  * on testing data as well as attempting to minimize response time
1330  * while increasing bulk throughput.
1331  */
1332 static void
ice_update_itr(struct ice_q_vector * q_vector,struct ice_ring_container * rc)1333 ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
1334 {
1335 	unsigned long next_update = jiffies;
1336 	unsigned int packets, bytes, itr;
1337 	bool container_is_rx;
1338 
1339 	if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
1340 		return;
1341 
1342 	/* If itr_countdown is set it means we programmed an ITR within
1343 	 * the last 4 interrupt cycles. This has a side effect of us
1344 	 * potentially firing an early interrupt. In order to work around
1345 	 * this we need to throw out any data received for a few
1346 	 * interrupts following the update.
1347 	 */
1348 	if (q_vector->itr_countdown) {
1349 		itr = rc->target_itr;
1350 		goto clear_counts;
1351 	}
1352 
1353 	container_is_rx = (&q_vector->rx == rc);
1354 	/* For Rx we want to push the delay up and default to low latency.
1355 	 * for Tx we want to pull the delay down and default to high latency.
1356 	 */
1357 	itr = container_is_rx ?
1358 		ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
1359 		ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
1360 
1361 	/* If we didn't update within up to 1 - 2 jiffies we can assume
1362 	 * that either packets are coming in so slow there hasn't been
1363 	 * any work, or that there is so much work that NAPI is dealing
1364 	 * with interrupt moderation and we don't need to do anything.
1365 	 */
1366 	if (time_after(next_update, rc->next_update))
1367 		goto clear_counts;
1368 
1369 	prefetch(q_vector->vsi->port_info);
1370 
1371 	packets = rc->total_pkts;
1372 	bytes = rc->total_bytes;
1373 
1374 	if (container_is_rx) {
1375 		/* If Rx there are 1 to 4 packets and bytes are less than
1376 		 * 9000 assume insufficient data to use bulk rate limiting
1377 		 * approach unless Tx is already in bulk rate limiting. We
1378 		 * are likely latency driven.
1379 		 */
1380 		if (packets && packets < 4 && bytes < 9000 &&
1381 		    (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
1382 			itr = ICE_ITR_ADAPTIVE_LATENCY;
1383 			goto adjust_by_size_and_speed;
1384 		}
1385 	} else if (packets < 4) {
1386 		/* If we have Tx and Rx ITR maxed and Tx ITR is running in
1387 		 * bulk mode and we are receiving 4 or fewer packets just
1388 		 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1389 		 * that the Rx can relax.
1390 		 */
1391 		if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
1392 		    (q_vector->rx.target_itr & ICE_ITR_MASK) ==
1393 		    ICE_ITR_ADAPTIVE_MAX_USECS)
1394 			goto clear_counts;
1395 	} else if (packets > 32) {
1396 		/* If we have processed over 32 packets in a single interrupt
1397 		 * for Tx assume we need to switch over to "bulk" mode.
1398 		 */
1399 		rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
1400 	}
1401 
1402 	/* We have no packets to actually measure against. This means
1403 	 * either one of the other queues on this vector is active or
1404 	 * we are a Tx queue doing TSO with too high of an interrupt rate.
1405 	 *
1406 	 * Between 4 and 56 we can assume that our current interrupt delay
1407 	 * is only slightly too low. As such we should increase it by a small
1408 	 * fixed amount.
1409 	 */
1410 	if (packets < 56) {
1411 		itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
1412 		if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1413 			itr &= ICE_ITR_ADAPTIVE_LATENCY;
1414 			itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1415 		}
1416 		goto clear_counts;
1417 	}
1418 
1419 	if (packets <= 256) {
1420 		itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1421 		itr &= ICE_ITR_MASK;
1422 
1423 		/* Between 56 and 112 is our "goldilocks" zone where we are
1424 		 * working out "just right". Just report that our current
1425 		 * ITR is good for us.
1426 		 */
1427 		if (packets <= 112)
1428 			goto clear_counts;
1429 
1430 		/* If packet count is 128 or greater we are likely looking
1431 		 * at a slight overrun of the delay we want. Try halving
1432 		 * our delay to see if that will cut the number of packets
1433 		 * in half per interrupt.
1434 		 */
1435 		itr >>= 1;
1436 		itr &= ICE_ITR_MASK;
1437 		if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
1438 			itr = ICE_ITR_ADAPTIVE_MIN_USECS;
1439 
1440 		goto clear_counts;
1441 	}
1442 
1443 	/* The paths below assume we are dealing with a bulk ITR since
1444 	 * number of packets is greater than 256. We are just going to have
1445 	 * to compute a value and try to bring the count under control,
1446 	 * though for smaller packet sizes there isn't much we can do as
1447 	 * NAPI polling will likely be kicking in sooner rather than later.
1448 	 */
1449 	itr = ICE_ITR_ADAPTIVE_BULK;
1450 
1451 adjust_by_size_and_speed:
1452 
1453 	/* based on checks above packets cannot be 0 so division is safe */
1454 	itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
1455 					       bytes / packets, itr);
1456 
1457 clear_counts:
1458 	/* write back value */
1459 	rc->target_itr = itr;
1460 
1461 	/* next update should occur within next jiffy */
1462 	rc->next_update = next_update + 1;
1463 
1464 	rc->total_bytes = 0;
1465 	rc->total_pkts = 0;
1466 }
1467 
1468 /**
1469  * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1470  * @itr_idx: interrupt throttling index
1471  * @itr: interrupt throttling value in usecs
1472  */
ice_buildreg_itr(u16 itr_idx,u16 itr)1473 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1474 {
1475 	/* The ITR value is reported in microseconds, and the register value is
1476 	 * recorded in 2 microsecond units. For this reason we only need to
1477 	 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1478 	 * granularity as a shift instead of division. The mask makes sure the
1479 	 * ITR value is never odd so we don't accidentally write into the field
1480 	 * prior to the ITR field.
1481 	 */
1482 	itr &= ICE_ITR_MASK;
1483 
1484 	return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1485 		(itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1486 		(itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1487 }
1488 
1489 /* The act of updating the ITR will cause it to immediately trigger. In order
1490  * to prevent this from throwing off adaptive update statistics we defer the
1491  * update so that it can only happen so often. So after either Tx or Rx are
1492  * updated we make the adaptive scheme wait until either the ITR completely
1493  * expires via the next_update expiration or we have been through at least
1494  * 3 interrupts.
1495  */
1496 #define ITR_COUNTDOWN_START 3
1497 
1498 /**
1499  * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
1500  * @q_vector: q_vector for which ITR is being updated and interrupt enabled
1501  */
ice_update_ena_itr(struct ice_q_vector * q_vector)1502 static void ice_update_ena_itr(struct ice_q_vector *q_vector)
1503 {
1504 	struct ice_ring_container *tx = &q_vector->tx;
1505 	struct ice_ring_container *rx = &q_vector->rx;
1506 	struct ice_vsi *vsi = q_vector->vsi;
1507 	u32 itr_val;
1508 
1509 	/* when exiting WB_ON_ITR lets set a low ITR value and trigger
1510 	 * interrupts to expire right away in case we have more work ready to go
1511 	 * already
1512 	 */
1513 	if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) {
1514 		itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS);
1515 		wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1516 		/* set target back to last user set value */
1517 		rx->target_itr = rx->itr_setting;
1518 		/* set current to what we just wrote and dynamic if needed */
1519 		rx->current_itr = ICE_WB_ON_ITR_USECS |
1520 			(rx->itr_setting & ICE_ITR_DYNAMIC);
1521 		/* allow normal interrupt flow to start */
1522 		q_vector->itr_countdown = 0;
1523 		return;
1524 	}
1525 
1526 	/* This will do nothing if dynamic updates are not enabled */
1527 	ice_update_itr(q_vector, tx);
1528 	ice_update_itr(q_vector, rx);
1529 
1530 	/* This block of logic allows us to get away with only updating
1531 	 * one ITR value with each interrupt. The idea is to perform a
1532 	 * pseudo-lazy update with the following criteria.
1533 	 *
1534 	 * 1. Rx is given higher priority than Tx if both are in same state
1535 	 * 2. If we must reduce an ITR that is given highest priority.
1536 	 * 3. We then give priority to increasing ITR based on amount.
1537 	 */
1538 	if (rx->target_itr < rx->current_itr) {
1539 		/* Rx ITR needs to be reduced, this is highest priority */
1540 		itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1541 		rx->current_itr = rx->target_itr;
1542 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
1543 	} else if ((tx->target_itr < tx->current_itr) ||
1544 		   ((rx->target_itr - rx->current_itr) <
1545 		    (tx->target_itr - tx->current_itr))) {
1546 		/* Tx ITR needs to be reduced, this is second priority
1547 		 * Tx ITR needs to be increased more than Rx, fourth priority
1548 		 */
1549 		itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
1550 		tx->current_itr = tx->target_itr;
1551 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
1552 	} else if (rx->current_itr != rx->target_itr) {
1553 		/* Rx ITR needs to be increased, third priority */
1554 		itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1555 		rx->current_itr = rx->target_itr;
1556 		q_vector->itr_countdown = ITR_COUNTDOWN_START;
1557 	} else {
1558 		/* Still have to re-enable the interrupts */
1559 		itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1560 		if (q_vector->itr_countdown)
1561 			q_vector->itr_countdown--;
1562 	}
1563 
1564 	if (!test_bit(__ICE_DOWN, q_vector->vsi->state))
1565 		wr32(&q_vector->vsi->back->hw,
1566 		     GLINT_DYN_CTL(q_vector->reg_idx),
1567 		     itr_val);
1568 }
1569 
1570 /**
1571  * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1572  * @q_vector: q_vector to set WB_ON_ITR on
1573  *
1574  * We need to tell hardware to write-back completed descriptors even when
1575  * interrupts are disabled. Descriptors will be written back on cache line
1576  * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
1577  * descriptors may not be written back if they don't fill a cache line until the
1578  * next interrupt.
1579  *
1580  * This sets the write-back frequency to 2 microseconds as that is the minimum
1581  * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to
1582  * make sure hardware knows we aren't meddling with the INTENA_M bit.
1583  */
ice_set_wb_on_itr(struct ice_q_vector * q_vector)1584 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1585 {
1586 	struct ice_vsi *vsi = q_vector->vsi;
1587 
1588 	/* already in WB_ON_ITR mode no need to change it */
1589 	if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
1590 		return;
1591 
1592 	if (q_vector->num_ring_rx)
1593 		wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1594 		     ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1595 						 ICE_RX_ITR));
1596 
1597 	if (q_vector->num_ring_tx)
1598 		wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1599 		     ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1600 						 ICE_TX_ITR));
1601 
1602 	q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE;
1603 }
1604 
1605 /**
1606  * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1607  * @napi: napi struct with our devices info in it
1608  * @budget: amount of work driver is allowed to do this pass, in packets
1609  *
1610  * This function will clean all queues associated with a q_vector.
1611  *
1612  * Returns the amount of work done
1613  */
ice_napi_poll(struct napi_struct * napi,int budget)1614 int ice_napi_poll(struct napi_struct *napi, int budget)
1615 {
1616 	struct ice_q_vector *q_vector =
1617 				container_of(napi, struct ice_q_vector, napi);
1618 	bool clean_complete = true;
1619 	struct ice_ring *ring;
1620 	int budget_per_ring;
1621 	int work_done = 0;
1622 
1623 	/* Since the actual Tx work is minimal, we can give the Tx a larger
1624 	 * budget and be more aggressive about cleaning up the Tx descriptors.
1625 	 */
1626 	ice_for_each_ring(ring, q_vector->tx) {
1627 		bool wd = ring->xsk_pool ?
1628 			  ice_clean_tx_irq_zc(ring, budget) :
1629 			  ice_clean_tx_irq(ring, budget);
1630 
1631 		if (!wd)
1632 			clean_complete = false;
1633 	}
1634 
1635 	/* Handle case where we are called by netpoll with a budget of 0 */
1636 	if (unlikely(budget <= 0))
1637 		return budget;
1638 
1639 	/* normally we have 1 Rx ring per q_vector */
1640 	if (unlikely(q_vector->num_ring_rx > 1))
1641 		/* We attempt to distribute budget to each Rx queue fairly, but
1642 		 * don't allow the budget to go below 1 because that would exit
1643 		 * polling early.
1644 		 */
1645 		budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
1646 	else
1647 		/* Max of 1 Rx ring in this q_vector so give it the budget */
1648 		budget_per_ring = budget;
1649 
1650 	ice_for_each_ring(ring, q_vector->rx) {
1651 		int cleaned;
1652 
1653 		/* A dedicated path for zero-copy allows making a single
1654 		 * comparison in the irq context instead of many inside the
1655 		 * ice_clean_rx_irq function and makes the codebase cleaner.
1656 		 */
1657 		cleaned = ring->xsk_pool ?
1658 			  ice_clean_rx_irq_zc(ring, budget_per_ring) :
1659 			  ice_clean_rx_irq(ring, budget_per_ring);
1660 		work_done += cleaned;
1661 		/* if we clean as many as budgeted, we must not be done */
1662 		if (cleaned >= budget_per_ring)
1663 			clean_complete = false;
1664 	}
1665 
1666 	/* If work not completed, return budget and polling will return */
1667 	if (!clean_complete)
1668 		return budget;
1669 
1670 	/* Exit the polling mode, but don't re-enable interrupts if stack might
1671 	 * poll us due to busy-polling
1672 	 */
1673 	if (likely(napi_complete_done(napi, work_done)))
1674 		ice_update_ena_itr(q_vector);
1675 	else
1676 		ice_set_wb_on_itr(q_vector);
1677 
1678 	return min_t(int, work_done, budget - 1);
1679 }
1680 
1681 /**
1682  * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1683  * @tx_ring: the ring to be checked
1684  * @size: the size buffer we want to assure is available
1685  *
1686  * Returns -EBUSY if a stop is needed, else 0
1687  */
__ice_maybe_stop_tx(struct ice_ring * tx_ring,unsigned int size)1688 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1689 {
1690 	netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1691 	/* Memory barrier before checking head and tail */
1692 	smp_mb();
1693 
1694 	/* Check again in a case another CPU has just made room available. */
1695 	if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1696 		return -EBUSY;
1697 
1698 	/* A reprieve! - use start_subqueue because it doesn't call schedule */
1699 	netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1700 	++tx_ring->tx_stats.restart_q;
1701 	return 0;
1702 }
1703 
1704 /**
1705  * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1706  * @tx_ring: the ring to be checked
1707  * @size:    the size buffer we want to assure is available
1708  *
1709  * Returns 0 if stop is not needed
1710  */
ice_maybe_stop_tx(struct ice_ring * tx_ring,unsigned int size)1711 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1712 {
1713 	if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1714 		return 0;
1715 
1716 	return __ice_maybe_stop_tx(tx_ring, size);
1717 }
1718 
1719 /**
1720  * ice_tx_map - Build the Tx descriptor
1721  * @tx_ring: ring to send buffer on
1722  * @first: first buffer info buffer to use
1723  * @off: pointer to struct that holds offload parameters
1724  *
1725  * This function loops over the skb data pointed to by *first
1726  * and gets a physical address for each memory location and programs
1727  * it and the length into the transmit descriptor.
1728  */
1729 static void
ice_tx_map(struct ice_ring * tx_ring,struct ice_tx_buf * first,struct ice_tx_offload_params * off)1730 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1731 	   struct ice_tx_offload_params *off)
1732 {
1733 	u64 td_offset, td_tag, td_cmd;
1734 	u16 i = tx_ring->next_to_use;
1735 	unsigned int data_len, size;
1736 	struct ice_tx_desc *tx_desc;
1737 	struct ice_tx_buf *tx_buf;
1738 	struct sk_buff *skb;
1739 	skb_frag_t *frag;
1740 	dma_addr_t dma;
1741 
1742 	td_tag = off->td_l2tag1;
1743 	td_cmd = off->td_cmd;
1744 	td_offset = off->td_offset;
1745 	skb = first->skb;
1746 
1747 	data_len = skb->data_len;
1748 	size = skb_headlen(skb);
1749 
1750 	tx_desc = ICE_TX_DESC(tx_ring, i);
1751 
1752 	if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1753 		td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1754 		td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1755 			  ICE_TX_FLAGS_VLAN_S;
1756 	}
1757 
1758 	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1759 
1760 	tx_buf = first;
1761 
1762 	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1763 		unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1764 
1765 		if (dma_mapping_error(tx_ring->dev, dma))
1766 			goto dma_error;
1767 
1768 		/* record length, and DMA address */
1769 		dma_unmap_len_set(tx_buf, len, size);
1770 		dma_unmap_addr_set(tx_buf, dma, dma);
1771 
1772 		/* align size to end of page */
1773 		max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1774 		tx_desc->buf_addr = cpu_to_le64(dma);
1775 
1776 		/* account for data chunks larger than the hardware
1777 		 * can handle
1778 		 */
1779 		while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1780 			tx_desc->cmd_type_offset_bsz =
1781 				ice_build_ctob(td_cmd, td_offset, max_data,
1782 					       td_tag);
1783 
1784 			tx_desc++;
1785 			i++;
1786 
1787 			if (i == tx_ring->count) {
1788 				tx_desc = ICE_TX_DESC(tx_ring, 0);
1789 				i = 0;
1790 			}
1791 
1792 			dma += max_data;
1793 			size -= max_data;
1794 
1795 			max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1796 			tx_desc->buf_addr = cpu_to_le64(dma);
1797 		}
1798 
1799 		if (likely(!data_len))
1800 			break;
1801 
1802 		tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
1803 							      size, td_tag);
1804 
1805 		tx_desc++;
1806 		i++;
1807 
1808 		if (i == tx_ring->count) {
1809 			tx_desc = ICE_TX_DESC(tx_ring, 0);
1810 			i = 0;
1811 		}
1812 
1813 		size = skb_frag_size(frag);
1814 		data_len -= size;
1815 
1816 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1817 				       DMA_TO_DEVICE);
1818 
1819 		tx_buf = &tx_ring->tx_buf[i];
1820 	}
1821 
1822 	/* record bytecount for BQL */
1823 	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1824 
1825 	/* record SW timestamp if HW timestamp is not available */
1826 	skb_tx_timestamp(first->skb);
1827 
1828 	i++;
1829 	if (i == tx_ring->count)
1830 		i = 0;
1831 
1832 	/* write last descriptor with RS and EOP bits */
1833 	td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1834 	tx_desc->cmd_type_offset_bsz =
1835 			ice_build_ctob(td_cmd, td_offset, size, td_tag);
1836 
1837 	/* Force memory writes to complete before letting h/w know there
1838 	 * are new descriptors to fetch.
1839 	 *
1840 	 * We also use this memory barrier to make certain all of the
1841 	 * status bits have been updated before next_to_watch is written.
1842 	 */
1843 	wmb();
1844 
1845 	/* set next_to_watch value indicating a packet is present */
1846 	first->next_to_watch = tx_desc;
1847 
1848 	tx_ring->next_to_use = i;
1849 
1850 	ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1851 
1852 	/* notify HW of packet */
1853 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
1854 		writel(i, tx_ring->tail);
1855 
1856 	return;
1857 
1858 dma_error:
1859 	/* clear DMA mappings for failed tx_buf map */
1860 	for (;;) {
1861 		tx_buf = &tx_ring->tx_buf[i];
1862 		ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1863 		if (tx_buf == first)
1864 			break;
1865 		if (i == 0)
1866 			i = tx_ring->count;
1867 		i--;
1868 	}
1869 
1870 	tx_ring->next_to_use = i;
1871 }
1872 
1873 /**
1874  * ice_tx_csum - Enable Tx checksum offloads
1875  * @first: pointer to the first descriptor
1876  * @off: pointer to struct that holds offload parameters
1877  *
1878  * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1879  */
1880 static
ice_tx_csum(struct ice_tx_buf * first,struct ice_tx_offload_params * off)1881 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1882 {
1883 	u32 l4_len = 0, l3_len = 0, l2_len = 0;
1884 	struct sk_buff *skb = first->skb;
1885 	union {
1886 		struct iphdr *v4;
1887 		struct ipv6hdr *v6;
1888 		unsigned char *hdr;
1889 	} ip;
1890 	union {
1891 		struct tcphdr *tcp;
1892 		unsigned char *hdr;
1893 	} l4;
1894 	__be16 frag_off, protocol;
1895 	unsigned char *exthdr;
1896 	u32 offset, cmd = 0;
1897 	u8 l4_proto = 0;
1898 
1899 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1900 		return 0;
1901 
1902 	ip.hdr = skb_network_header(skb);
1903 	l4.hdr = skb_transport_header(skb);
1904 
1905 	/* compute outer L2 header size */
1906 	l2_len = ip.hdr - skb->data;
1907 	offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1908 
1909 	protocol = vlan_get_protocol(skb);
1910 
1911 	if (protocol == htons(ETH_P_IP))
1912 		first->tx_flags |= ICE_TX_FLAGS_IPV4;
1913 	else if (protocol == htons(ETH_P_IPV6))
1914 		first->tx_flags |= ICE_TX_FLAGS_IPV6;
1915 
1916 	if (skb->encapsulation) {
1917 		bool gso_ena = false;
1918 		u32 tunnel = 0;
1919 
1920 		/* define outer network header type */
1921 		if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1922 			tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
1923 				  ICE_TX_CTX_EIPT_IPV4 :
1924 				  ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
1925 			l4_proto = ip.v4->protocol;
1926 		} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1927 			int ret;
1928 
1929 			tunnel |= ICE_TX_CTX_EIPT_IPV6;
1930 			exthdr = ip.hdr + sizeof(*ip.v6);
1931 			l4_proto = ip.v6->nexthdr;
1932 			ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
1933 					       &l4_proto, &frag_off);
1934 			if (ret < 0)
1935 				return -1;
1936 		}
1937 
1938 		/* define outer transport */
1939 		switch (l4_proto) {
1940 		case IPPROTO_UDP:
1941 			tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
1942 			first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1943 			break;
1944 		case IPPROTO_GRE:
1945 			tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
1946 			first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1947 			break;
1948 		case IPPROTO_IPIP:
1949 		case IPPROTO_IPV6:
1950 			first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1951 			l4.hdr = skb_inner_network_header(skb);
1952 			break;
1953 		default:
1954 			if (first->tx_flags & ICE_TX_FLAGS_TSO)
1955 				return -1;
1956 
1957 			skb_checksum_help(skb);
1958 			return 0;
1959 		}
1960 
1961 		/* compute outer L3 header size */
1962 		tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1963 			  ICE_TXD_CTX_QW0_EIPLEN_S;
1964 
1965 		/* switch IP header pointer from outer to inner header */
1966 		ip.hdr = skb_inner_network_header(skb);
1967 
1968 		/* compute tunnel header size */
1969 		tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1970 			   ICE_TXD_CTX_QW0_NATLEN_S;
1971 
1972 		gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
1973 		/* indicate if we need to offload outer UDP header */
1974 		if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
1975 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1976 			tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
1977 
1978 		/* record tunnel offload values */
1979 		off->cd_tunnel_params |= tunnel;
1980 
1981 		/* set DTYP=1 to indicate that it's an Tx context descriptor
1982 		 * in IPsec tunnel mode with Tx offloads in Quad word 1
1983 		 */
1984 		off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
1985 
1986 		/* switch L4 header pointer from outer to inner */
1987 		l4.hdr = skb_inner_transport_header(skb);
1988 		l4_proto = 0;
1989 
1990 		/* reset type as we transition from outer to inner headers */
1991 		first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
1992 		if (ip.v4->version == 4)
1993 			first->tx_flags |= ICE_TX_FLAGS_IPV4;
1994 		if (ip.v6->version == 6)
1995 			first->tx_flags |= ICE_TX_FLAGS_IPV6;
1996 	}
1997 
1998 	/* Enable IP checksum offloads */
1999 	if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
2000 		l4_proto = ip.v4->protocol;
2001 		/* the stack computes the IP header already, the only time we
2002 		 * need the hardware to recompute it is in the case of TSO.
2003 		 */
2004 		if (first->tx_flags & ICE_TX_FLAGS_TSO)
2005 			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
2006 		else
2007 			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
2008 
2009 	} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
2010 		cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2011 		exthdr = ip.hdr + sizeof(*ip.v6);
2012 		l4_proto = ip.v6->nexthdr;
2013 		if (l4.hdr != exthdr)
2014 			ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
2015 					 &frag_off);
2016 	} else {
2017 		return -1;
2018 	}
2019 
2020 	/* compute inner L3 header size */
2021 	l3_len = l4.hdr - ip.hdr;
2022 	offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
2023 
2024 	/* Enable L4 checksum offloads */
2025 	switch (l4_proto) {
2026 	case IPPROTO_TCP:
2027 		/* enable checksum offloads */
2028 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2029 		l4_len = l4.tcp->doff;
2030 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2031 		break;
2032 	case IPPROTO_UDP:
2033 		/* enable UDP checksum offload */
2034 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2035 		l4_len = (sizeof(struct udphdr) >> 2);
2036 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2037 		break;
2038 	case IPPROTO_SCTP:
2039 		/* enable SCTP checksum offload */
2040 		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2041 		l4_len = sizeof(struct sctphdr) >> 2;
2042 		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2043 		break;
2044 
2045 	default:
2046 		if (first->tx_flags & ICE_TX_FLAGS_TSO)
2047 			return -1;
2048 		skb_checksum_help(skb);
2049 		return 0;
2050 	}
2051 
2052 	off->td_cmd |= cmd;
2053 	off->td_offset |= offset;
2054 	return 1;
2055 }
2056 
2057 /**
2058  * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
2059  * @tx_ring: ring to send buffer on
2060  * @first: pointer to struct ice_tx_buf
2061  *
2062  * Checks the skb and set up correspondingly several generic transmit flags
2063  * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2064  */
2065 static void
ice_tx_prepare_vlan_flags(struct ice_ring * tx_ring,struct ice_tx_buf * first)2066 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
2067 {
2068 	struct sk_buff *skb = first->skb;
2069 
2070 	/* nothing left to do, software offloaded VLAN */
2071 	if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
2072 		return;
2073 
2074 	/* currently, we always assume 802.1Q for VLAN insertion as VLAN
2075 	 * insertion for 802.1AD is not supported
2076 	 */
2077 	if (skb_vlan_tag_present(skb)) {
2078 		first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
2079 		first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
2080 	}
2081 
2082 	ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
2083 }
2084 
2085 /**
2086  * ice_tso - computes mss and TSO length to prepare for TSO
2087  * @first: pointer to struct ice_tx_buf
2088  * @off: pointer to struct that holds offload parameters
2089  *
2090  * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
2091  */
2092 static
ice_tso(struct ice_tx_buf * first,struct ice_tx_offload_params * off)2093 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
2094 {
2095 	struct sk_buff *skb = first->skb;
2096 	union {
2097 		struct iphdr *v4;
2098 		struct ipv6hdr *v6;
2099 		unsigned char *hdr;
2100 	} ip;
2101 	union {
2102 		struct tcphdr *tcp;
2103 		struct udphdr *udp;
2104 		unsigned char *hdr;
2105 	} l4;
2106 	u64 cd_mss, cd_tso_len;
2107 	u32 paylen;
2108 	u8 l4_start;
2109 	int err;
2110 
2111 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2112 		return 0;
2113 
2114 	if (!skb_is_gso(skb))
2115 		return 0;
2116 
2117 	err = skb_cow_head(skb, 0);
2118 	if (err < 0)
2119 		return err;
2120 
2121 	/* cppcheck-suppress unreadVariable */
2122 	ip.hdr = skb_network_header(skb);
2123 	l4.hdr = skb_transport_header(skb);
2124 
2125 	/* initialize outer IP header fields */
2126 	if (ip.v4->version == 4) {
2127 		ip.v4->tot_len = 0;
2128 		ip.v4->check = 0;
2129 	} else {
2130 		ip.v6->payload_len = 0;
2131 	}
2132 
2133 	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2134 					 SKB_GSO_GRE_CSUM |
2135 					 SKB_GSO_IPXIP4 |
2136 					 SKB_GSO_IPXIP6 |
2137 					 SKB_GSO_UDP_TUNNEL |
2138 					 SKB_GSO_UDP_TUNNEL_CSUM)) {
2139 		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2140 		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2141 			l4.udp->len = 0;
2142 
2143 			/* determine offset of outer transport header */
2144 			l4_start = (u8)(l4.hdr - skb->data);
2145 
2146 			/* remove payload length from outer checksum */
2147 			paylen = skb->len - l4_start;
2148 			csum_replace_by_diff(&l4.udp->check,
2149 					     (__force __wsum)htonl(paylen));
2150 		}
2151 
2152 		/* reset pointers to inner headers */
2153 
2154 		/* cppcheck-suppress unreadVariable */
2155 		ip.hdr = skb_inner_network_header(skb);
2156 		l4.hdr = skb_inner_transport_header(skb);
2157 
2158 		/* initialize inner IP header fields */
2159 		if (ip.v4->version == 4) {
2160 			ip.v4->tot_len = 0;
2161 			ip.v4->check = 0;
2162 		} else {
2163 			ip.v6->payload_len = 0;
2164 		}
2165 	}
2166 
2167 	/* determine offset of transport header */
2168 	l4_start = (u8)(l4.hdr - skb->data);
2169 
2170 	/* remove payload length from checksum */
2171 	paylen = skb->len - l4_start;
2172 
2173 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
2174 		csum_replace_by_diff(&l4.udp->check,
2175 				     (__force __wsum)htonl(paylen));
2176 		/* compute length of UDP segmentation header */
2177 		off->header_len = (u8)sizeof(l4.udp) + l4_start;
2178 	} else {
2179 		csum_replace_by_diff(&l4.tcp->check,
2180 				     (__force __wsum)htonl(paylen));
2181 		/* compute length of TCP segmentation header */
2182 		off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
2183 	}
2184 
2185 	/* update gso_segs and bytecount */
2186 	first->gso_segs = skb_shinfo(skb)->gso_segs;
2187 	first->bytecount += (first->gso_segs - 1) * off->header_len;
2188 
2189 	cd_tso_len = skb->len - off->header_len;
2190 	cd_mss = skb_shinfo(skb)->gso_size;
2191 
2192 	/* record cdesc_qw1 with TSO parameters */
2193 	off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2194 			     (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
2195 			     (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2196 			     (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
2197 	first->tx_flags |= ICE_TX_FLAGS_TSO;
2198 	return 1;
2199 }
2200 
2201 /**
2202  * ice_txd_use_count  - estimate the number of descriptors needed for Tx
2203  * @size: transmit request size in bytes
2204  *
2205  * Due to hardware alignment restrictions (4K alignment), we need to
2206  * assume that we can have no more than 12K of data per descriptor, even
2207  * though each descriptor can take up to 16K - 1 bytes of aligned memory.
2208  * Thus, we need to divide by 12K. But division is slow! Instead,
2209  * we decompose the operation into shifts and one relatively cheap
2210  * multiply operation.
2211  *
2212  * To divide by 12K, we first divide by 4K, then divide by 3:
2213  *     To divide by 4K, shift right by 12 bits
2214  *     To divide by 3, multiply by 85, then divide by 256
2215  *     (Divide by 256 is done by shifting right by 8 bits)
2216  * Finally, we add one to round up. Because 256 isn't an exact multiple of
2217  * 3, we'll underestimate near each multiple of 12K. This is actually more
2218  * accurate as we have 4K - 1 of wiggle room that we can fit into the last
2219  * segment. For our purposes this is accurate out to 1M which is orders of
2220  * magnitude greater than our largest possible GSO size.
2221  *
2222  * This would then be implemented as:
2223  *     return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
2224  *
2225  * Since multiplication and division are commutative, we can reorder
2226  * operations into:
2227  *     return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2228  */
ice_txd_use_count(unsigned int size)2229 static unsigned int ice_txd_use_count(unsigned int size)
2230 {
2231 	return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2232 }
2233 
2234 /**
2235  * ice_xmit_desc_count - calculate number of Tx descriptors needed
2236  * @skb: send buffer
2237  *
2238  * Returns number of data descriptors needed for this skb.
2239  */
ice_xmit_desc_count(struct sk_buff * skb)2240 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
2241 {
2242 	const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2243 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2244 	unsigned int count = 0, size = skb_headlen(skb);
2245 
2246 	for (;;) {
2247 		count += ice_txd_use_count(size);
2248 
2249 		if (!nr_frags--)
2250 			break;
2251 
2252 		size = skb_frag_size(frag++);
2253 	}
2254 
2255 	return count;
2256 }
2257 
2258 /**
2259  * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2260  * @skb: send buffer
2261  *
2262  * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
2263  * and so we need to figure out the cases where we need to linearize the skb.
2264  *
2265  * For TSO we need to count the TSO header and segment payload separately.
2266  * As such we need to check cases where we have 7 fragments or more as we
2267  * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2268  * the segment payload in the first descriptor, and another 7 for the
2269  * fragments.
2270  */
__ice_chk_linearize(struct sk_buff * skb)2271 static bool __ice_chk_linearize(struct sk_buff *skb)
2272 {
2273 	const skb_frag_t *frag, *stale;
2274 	int nr_frags, sum;
2275 
2276 	/* no need to check if number of frags is less than 7 */
2277 	nr_frags = skb_shinfo(skb)->nr_frags;
2278 	if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2279 		return false;
2280 
2281 	/* We need to walk through the list and validate that each group
2282 	 * of 6 fragments totals at least gso_size.
2283 	 */
2284 	nr_frags -= ICE_MAX_BUF_TXD - 2;
2285 	frag = &skb_shinfo(skb)->frags[0];
2286 
2287 	/* Initialize size to the negative value of gso_size minus 1. We
2288 	 * use this as the worst case scenario in which the frag ahead
2289 	 * of us only provides one byte which is why we are limited to 6
2290 	 * descriptors for a single transmit as the header and previous
2291 	 * fragment are already consuming 2 descriptors.
2292 	 */
2293 	sum = 1 - skb_shinfo(skb)->gso_size;
2294 
2295 	/* Add size of frags 0 through 4 to create our initial sum */
2296 	sum += skb_frag_size(frag++);
2297 	sum += skb_frag_size(frag++);
2298 	sum += skb_frag_size(frag++);
2299 	sum += skb_frag_size(frag++);
2300 	sum += skb_frag_size(frag++);
2301 
2302 	/* Walk through fragments adding latest fragment, testing it, and
2303 	 * then removing stale fragments from the sum.
2304 	 */
2305 	for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2306 		int stale_size = skb_frag_size(stale);
2307 
2308 		sum += skb_frag_size(frag++);
2309 
2310 		/* The stale fragment may present us with a smaller
2311 		 * descriptor than the actual fragment size. To account
2312 		 * for that we need to remove all the data on the front and
2313 		 * figure out what the remainder would be in the last
2314 		 * descriptor associated with the fragment.
2315 		 */
2316 		if (stale_size > ICE_MAX_DATA_PER_TXD) {
2317 			int align_pad = -(skb_frag_off(stale)) &
2318 					(ICE_MAX_READ_REQ_SIZE - 1);
2319 
2320 			sum -= align_pad;
2321 			stale_size -= align_pad;
2322 
2323 			do {
2324 				sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2325 				stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2326 			} while (stale_size > ICE_MAX_DATA_PER_TXD);
2327 		}
2328 
2329 		/* if sum is negative we failed to make sufficient progress */
2330 		if (sum < 0)
2331 			return true;
2332 
2333 		if (!nr_frags--)
2334 			break;
2335 
2336 		sum -= stale_size;
2337 	}
2338 
2339 	return false;
2340 }
2341 
2342 /**
2343  * ice_chk_linearize - Check if there are more than 8 fragments per packet
2344  * @skb:      send buffer
2345  * @count:    number of buffers used
2346  *
2347  * Note: Our HW can't scatter-gather more than 8 fragments to build
2348  * a packet on the wire and so we need to figure out the cases where we
2349  * need to linearize the skb.
2350  */
ice_chk_linearize(struct sk_buff * skb,unsigned int count)2351 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2352 {
2353 	/* Both TSO and single send will work if count is less than 8 */
2354 	if (likely(count < ICE_MAX_BUF_TXD))
2355 		return false;
2356 
2357 	if (skb_is_gso(skb))
2358 		return __ice_chk_linearize(skb);
2359 
2360 	/* we can support up to 8 data buffers for a single send */
2361 	return count != ICE_MAX_BUF_TXD;
2362 }
2363 
2364 /**
2365  * ice_xmit_frame_ring - Sends buffer on Tx ring
2366  * @skb: send buffer
2367  * @tx_ring: ring to send buffer on
2368  *
2369  * Returns NETDEV_TX_OK if sent, else an error code
2370  */
2371 static netdev_tx_t
ice_xmit_frame_ring(struct sk_buff * skb,struct ice_ring * tx_ring)2372 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
2373 {
2374 	struct ice_tx_offload_params offload = { 0 };
2375 	struct ice_vsi *vsi = tx_ring->vsi;
2376 	struct ice_tx_buf *first;
2377 	struct ethhdr *eth;
2378 	unsigned int count;
2379 	int tso, csum;
2380 
2381 	count = ice_xmit_desc_count(skb);
2382 	if (ice_chk_linearize(skb, count)) {
2383 		if (__skb_linearize(skb))
2384 			goto out_drop;
2385 		count = ice_txd_use_count(skb->len);
2386 		tx_ring->tx_stats.tx_linearize++;
2387 	}
2388 
2389 	/* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2390 	 *       + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2391 	 *       + 4 desc gap to avoid the cache line where head is,
2392 	 *       + 1 desc for context descriptor,
2393 	 * otherwise try next time
2394 	 */
2395 	if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2396 			      ICE_DESCS_FOR_CTX_DESC)) {
2397 		tx_ring->tx_stats.tx_busy++;
2398 		return NETDEV_TX_BUSY;
2399 	}
2400 
2401 	offload.tx_ring = tx_ring;
2402 
2403 	/* record the location of the first descriptor for this packet */
2404 	first = &tx_ring->tx_buf[tx_ring->next_to_use];
2405 	first->skb = skb;
2406 	first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2407 	first->gso_segs = 1;
2408 	first->tx_flags = 0;
2409 
2410 	/* prepare the VLAN tagging flags for Tx */
2411 	ice_tx_prepare_vlan_flags(tx_ring, first);
2412 
2413 	/* set up TSO offload */
2414 	tso = ice_tso(first, &offload);
2415 	if (tso < 0)
2416 		goto out_drop;
2417 
2418 	/* always set up Tx checksum offload */
2419 	csum = ice_tx_csum(first, &offload);
2420 	if (csum < 0)
2421 		goto out_drop;
2422 
2423 	/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
2424 	eth = (struct ethhdr *)skb_mac_header(skb);
2425 	if (unlikely((skb->priority == TC_PRIO_CONTROL ||
2426 		      eth->h_proto == htons(ETH_P_LLDP)) &&
2427 		     vsi->type == ICE_VSI_PF &&
2428 		     vsi->port_info->qos_cfg.is_sw_lldp))
2429 		offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2430 					ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2431 					ICE_TXD_CTX_QW1_CMD_S);
2432 
2433 	if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2434 		struct ice_tx_ctx_desc *cdesc;
2435 		u16 i = tx_ring->next_to_use;
2436 
2437 		/* grab the next descriptor */
2438 		cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2439 		i++;
2440 		tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2441 
2442 		/* setup context descriptor */
2443 		cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2444 		cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2445 		cdesc->rsvd = cpu_to_le16(0);
2446 		cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2447 	}
2448 
2449 	ice_tx_map(tx_ring, first, &offload);
2450 	return NETDEV_TX_OK;
2451 
2452 out_drop:
2453 	dev_kfree_skb_any(skb);
2454 	return NETDEV_TX_OK;
2455 }
2456 
2457 /**
2458  * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2459  * @skb: send buffer
2460  * @netdev: network interface device structure
2461  *
2462  * Returns NETDEV_TX_OK if sent, else an error code
2463  */
ice_start_xmit(struct sk_buff * skb,struct net_device * netdev)2464 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2465 {
2466 	struct ice_netdev_priv *np = netdev_priv(netdev);
2467 	struct ice_vsi *vsi = np->vsi;
2468 	struct ice_ring *tx_ring;
2469 
2470 	tx_ring = vsi->tx_rings[skb->queue_mapping];
2471 
2472 	/* hardware can't handle really short frames, hardware padding works
2473 	 * beyond this point
2474 	 */
2475 	if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2476 		return NETDEV_TX_OK;
2477 
2478 	return ice_xmit_frame_ring(skb, tx_ring);
2479 }
2480 
2481 /**
2482  * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
2483  * @tx_ring: tx_ring to clean
2484  */
ice_clean_ctrl_tx_irq(struct ice_ring * tx_ring)2485 void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring)
2486 {
2487 	struct ice_vsi *vsi = tx_ring->vsi;
2488 	s16 i = tx_ring->next_to_clean;
2489 	int budget = ICE_DFLT_IRQ_WORK;
2490 	struct ice_tx_desc *tx_desc;
2491 	struct ice_tx_buf *tx_buf;
2492 
2493 	tx_buf = &tx_ring->tx_buf[i];
2494 	tx_desc = ICE_TX_DESC(tx_ring, i);
2495 	i -= tx_ring->count;
2496 
2497 	do {
2498 		struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
2499 
2500 		/* if next_to_watch is not set then there is no pending work */
2501 		if (!eop_desc)
2502 			break;
2503 
2504 		/* prevent any other reads prior to eop_desc */
2505 		smp_rmb();
2506 
2507 		/* if the descriptor isn't done, no work to do */
2508 		if (!(eop_desc->cmd_type_offset_bsz &
2509 		      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
2510 			break;
2511 
2512 		/* clear next_to_watch to prevent false hangs */
2513 		tx_buf->next_to_watch = NULL;
2514 		tx_desc->buf_addr = 0;
2515 		tx_desc->cmd_type_offset_bsz = 0;
2516 
2517 		/* move past filter desc */
2518 		tx_buf++;
2519 		tx_desc++;
2520 		i++;
2521 		if (unlikely(!i)) {
2522 			i -= tx_ring->count;
2523 			tx_buf = tx_ring->tx_buf;
2524 			tx_desc = ICE_TX_DESC(tx_ring, 0);
2525 		}
2526 
2527 		/* unmap the data header */
2528 		if (dma_unmap_len(tx_buf, len))
2529 			dma_unmap_single(tx_ring->dev,
2530 					 dma_unmap_addr(tx_buf, dma),
2531 					 dma_unmap_len(tx_buf, len),
2532 					 DMA_TO_DEVICE);
2533 		if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
2534 			devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2535 
2536 		/* clear next_to_watch to prevent false hangs */
2537 		tx_buf->raw_buf = NULL;
2538 		tx_buf->tx_flags = 0;
2539 		tx_buf->next_to_watch = NULL;
2540 		dma_unmap_len_set(tx_buf, len, 0);
2541 		tx_desc->buf_addr = 0;
2542 		tx_desc->cmd_type_offset_bsz = 0;
2543 
2544 		/* move past eop_desc for start of next FD desc */
2545 		tx_buf++;
2546 		tx_desc++;
2547 		i++;
2548 		if (unlikely(!i)) {
2549 			i -= tx_ring->count;
2550 			tx_buf = tx_ring->tx_buf;
2551 			tx_desc = ICE_TX_DESC(tx_ring, 0);
2552 		}
2553 
2554 		budget--;
2555 	} while (likely(budget));
2556 
2557 	i += tx_ring->count;
2558 	tx_ring->next_to_clean = i;
2559 
2560 	/* re-enable interrupt if needed */
2561 	ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
2562 }
2563