• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2019 Google, Inc.
5  */
6 
7 #include "gve.h"
8 #include "gve_adminq.h"
9 #include <linux/ip.h>
10 #include <linux/tcp.h>
11 #include <linux/vmalloc.h>
12 #include <linux/skbuff.h>
13 
gve_tx_put_doorbell(struct gve_priv * priv,struct gve_queue_resources * q_resources,u32 val)14 static inline void gve_tx_put_doorbell(struct gve_priv *priv,
15 				       struct gve_queue_resources *q_resources,
16 				       u32 val)
17 {
18 	iowrite32be(val, &priv->db_bar2[be32_to_cpu(q_resources->db_index)]);
19 }
20 
21 /* gvnic can only transmit from a Registered Segment.
22  * We copy skb payloads into the registered segment before writing Tx
23  * descriptors and ringing the Tx doorbell.
24  *
25  * gve_tx_fifo_* manages the Registered Segment as a FIFO - clients must
26  * free allocations in the order they were allocated.
27  */
28 
gve_tx_fifo_init(struct gve_priv * priv,struct gve_tx_fifo * fifo)29 static int gve_tx_fifo_init(struct gve_priv *priv, struct gve_tx_fifo *fifo)
30 {
31 	fifo->base = vmap(fifo->qpl->pages, fifo->qpl->num_entries, VM_MAP,
32 			  PAGE_KERNEL);
33 	if (unlikely(!fifo->base)) {
34 		netif_err(priv, drv, priv->dev, "Failed to vmap fifo, qpl_id = %d\n",
35 			  fifo->qpl->id);
36 		return -ENOMEM;
37 	}
38 
39 	fifo->size = fifo->qpl->num_entries * PAGE_SIZE;
40 	atomic_set(&fifo->available, fifo->size);
41 	fifo->head = 0;
42 	return 0;
43 }
44 
gve_tx_fifo_release(struct gve_priv * priv,struct gve_tx_fifo * fifo)45 static void gve_tx_fifo_release(struct gve_priv *priv, struct gve_tx_fifo *fifo)
46 {
47 	WARN(atomic_read(&fifo->available) != fifo->size,
48 	     "Releasing non-empty fifo");
49 
50 	vunmap(fifo->base);
51 }
52 
gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo * fifo,size_t bytes)53 static int gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo *fifo,
54 					  size_t bytes)
55 {
56 	return (fifo->head + bytes < fifo->size) ? 0 : fifo->size - fifo->head;
57 }
58 
gve_tx_fifo_can_alloc(struct gve_tx_fifo * fifo,size_t bytes)59 static bool gve_tx_fifo_can_alloc(struct gve_tx_fifo *fifo, size_t bytes)
60 {
61 	return (atomic_read(&fifo->available) <= bytes) ? false : true;
62 }
63 
64 /* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO
65  * @fifo: FIFO to allocate from
66  * @bytes: Allocation size
67  * @iov: Scatter-gather elements to fill with allocation fragment base/len
68  *
69  * Returns number of valid elements in iov[] or negative on error.
70  *
71  * Allocations from a given FIFO must be externally synchronized but concurrent
72  * allocation and frees are allowed.
73  */
gve_tx_alloc_fifo(struct gve_tx_fifo * fifo,size_t bytes,struct gve_tx_iovec iov[2])74 static int gve_tx_alloc_fifo(struct gve_tx_fifo *fifo, size_t bytes,
75 			     struct gve_tx_iovec iov[2])
76 {
77 	size_t overflow, padding;
78 	u32 aligned_head;
79 	int nfrags = 0;
80 
81 	if (!bytes)
82 		return 0;
83 
84 	/* This check happens before we know how much padding is needed to
85 	 * align to a cacheline boundary for the payload, but that is fine,
86 	 * because the FIFO head always start aligned, and the FIFO's boundaries
87 	 * are aligned, so if there is space for the data, there is space for
88 	 * the padding to the next alignment.
89 	 */
90 	WARN(!gve_tx_fifo_can_alloc(fifo, bytes),
91 	     "Reached %s when there's not enough space in the fifo", __func__);
92 
93 	nfrags++;
94 
95 	iov[0].iov_offset = fifo->head;
96 	iov[0].iov_len = bytes;
97 	fifo->head += bytes;
98 
99 	if (fifo->head > fifo->size) {
100 		/* If the allocation did not fit in the tail fragment of the
101 		 * FIFO, also use the head fragment.
102 		 */
103 		nfrags++;
104 		overflow = fifo->head - fifo->size;
105 		iov[0].iov_len -= overflow;
106 		iov[1].iov_offset = 0;	/* Start of fifo*/
107 		iov[1].iov_len = overflow;
108 
109 		fifo->head = overflow;
110 	}
111 
112 	/* Re-align to a cacheline boundary */
113 	aligned_head = L1_CACHE_ALIGN(fifo->head);
114 	padding = aligned_head - fifo->head;
115 	iov[nfrags - 1].iov_padding = padding;
116 	atomic_sub(bytes + padding, &fifo->available);
117 	fifo->head = aligned_head;
118 
119 	if (fifo->head == fifo->size)
120 		fifo->head = 0;
121 
122 	return nfrags;
123 }
124 
125 /* gve_tx_free_fifo - Return space to Tx FIFO
126  * @fifo: FIFO to return fragments to
127  * @bytes: Bytes to free
128  */
gve_tx_free_fifo(struct gve_tx_fifo * fifo,size_t bytes)129 static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes)
130 {
131 	atomic_add(bytes, &fifo->available);
132 }
133 
gve_tx_remove_from_block(struct gve_priv * priv,int queue_idx)134 static void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
135 {
136 	struct gve_notify_block *block =
137 			&priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)];
138 
139 	block->tx = NULL;
140 }
141 
142 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
143 			     u32 to_do, bool try_to_wake);
144 
gve_tx_free_ring(struct gve_priv * priv,int idx)145 static void gve_tx_free_ring(struct gve_priv *priv, int idx)
146 {
147 	struct gve_tx_ring *tx = &priv->tx[idx];
148 	struct device *hdev = &priv->pdev->dev;
149 	size_t bytes;
150 	u32 slots;
151 
152 	gve_tx_remove_from_block(priv, idx);
153 	slots = tx->mask + 1;
154 	gve_clean_tx_done(priv, tx, tx->req, false);
155 	netdev_tx_reset_queue(tx->netdev_txq);
156 
157 	dma_free_coherent(hdev, sizeof(*tx->q_resources),
158 			  tx->q_resources, tx->q_resources_bus);
159 	tx->q_resources = NULL;
160 
161 	gve_tx_fifo_release(priv, &tx->tx_fifo);
162 	gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
163 	tx->tx_fifo.qpl = NULL;
164 
165 	bytes = sizeof(*tx->desc) * slots;
166 	dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
167 	tx->desc = NULL;
168 
169 	vfree(tx->info);
170 	tx->info = NULL;
171 
172 	netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
173 }
174 
gve_tx_add_to_block(struct gve_priv * priv,int queue_idx)175 static void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
176 {
177 	int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx);
178 	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
179 	struct gve_tx_ring *tx = &priv->tx[queue_idx];
180 
181 	block->tx = tx;
182 	tx->ntfy_id = ntfy_idx;
183 }
184 
gve_tx_alloc_ring(struct gve_priv * priv,int idx)185 static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
186 {
187 	struct gve_tx_ring *tx = &priv->tx[idx];
188 	struct device *hdev = &priv->pdev->dev;
189 	u32 slots = priv->tx_desc_cnt;
190 	size_t bytes;
191 
192 	/* Make sure everything is zeroed to start */
193 	memset(tx, 0, sizeof(*tx));
194 	tx->q_num = idx;
195 
196 	tx->mask = slots - 1;
197 
198 	/* alloc metadata */
199 	tx->info = vzalloc(sizeof(*tx->info) * slots);
200 	if (!tx->info)
201 		return -ENOMEM;
202 
203 	/* alloc tx queue */
204 	bytes = sizeof(*tx->desc) * slots;
205 	tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
206 	if (!tx->desc)
207 		goto abort_with_info;
208 
209 	tx->tx_fifo.qpl = gve_assign_tx_qpl(priv);
210 	if (!tx->tx_fifo.qpl)
211 		goto abort_with_desc;
212 
213 	/* map Tx FIFO */
214 	if (gve_tx_fifo_init(priv, &tx->tx_fifo))
215 		goto abort_with_qpl;
216 
217 	tx->q_resources =
218 		dma_alloc_coherent(hdev,
219 				   sizeof(*tx->q_resources),
220 				   &tx->q_resources_bus,
221 				   GFP_KERNEL);
222 	if (!tx->q_resources)
223 		goto abort_with_fifo;
224 
225 	netif_dbg(priv, drv, priv->dev, "tx[%d]->bus=%lx\n", idx,
226 		  (unsigned long)tx->bus);
227 	tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
228 	gve_tx_add_to_block(priv, idx);
229 
230 	return 0;
231 
232 abort_with_fifo:
233 	gve_tx_fifo_release(priv, &tx->tx_fifo);
234 abort_with_qpl:
235 	gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
236 abort_with_desc:
237 	dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
238 	tx->desc = NULL;
239 abort_with_info:
240 	vfree(tx->info);
241 	tx->info = NULL;
242 	return -ENOMEM;
243 }
244 
gve_tx_alloc_rings(struct gve_priv * priv)245 int gve_tx_alloc_rings(struct gve_priv *priv)
246 {
247 	int err = 0;
248 	int i;
249 
250 	for (i = 0; i < priv->tx_cfg.num_queues; i++) {
251 		err = gve_tx_alloc_ring(priv, i);
252 		if (err) {
253 			netif_err(priv, drv, priv->dev,
254 				  "Failed to alloc tx ring=%d: err=%d\n",
255 				  i, err);
256 			break;
257 		}
258 	}
259 	/* Unallocate if there was an error */
260 	if (err) {
261 		int j;
262 
263 		for (j = 0; j < i; j++)
264 			gve_tx_free_ring(priv, j);
265 	}
266 	return err;
267 }
268 
gve_tx_free_rings(struct gve_priv * priv)269 void gve_tx_free_rings(struct gve_priv *priv)
270 {
271 	int i;
272 
273 	for (i = 0; i < priv->tx_cfg.num_queues; i++)
274 		gve_tx_free_ring(priv, i);
275 }
276 
277 /* gve_tx_avail - Calculates the number of slots available in the ring
278  * @tx: tx ring to check
279  *
280  * Returns the number of slots available
281  *
282  * The capacity of the queue is mask + 1. We don't need to reserve an entry.
283  **/
gve_tx_avail(struct gve_tx_ring * tx)284 static inline u32 gve_tx_avail(struct gve_tx_ring *tx)
285 {
286 	return tx->mask + 1 - (tx->req - tx->done);
287 }
288 
gve_skb_fifo_bytes_required(struct gve_tx_ring * tx,struct sk_buff * skb)289 static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
290 					      struct sk_buff *skb)
291 {
292 	int pad_bytes, align_hdr_pad;
293 	int bytes;
294 	int hlen;
295 
296 	hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) +
297 				 tcp_hdrlen(skb) : skb_headlen(skb);
298 
299 	pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo,
300 						   hlen);
301 	/* We need to take into account the header alignment padding. */
302 	align_hdr_pad = L1_CACHE_ALIGN(hlen) - hlen;
303 	bytes = align_hdr_pad + pad_bytes + skb->len;
304 
305 	return bytes;
306 }
307 
308 /* The most descriptors we could need are 3 - 1 for the headers, 1 for
309  * the beginning of the payload at the end of the FIFO, and 1 if the
310  * payload wraps to the beginning of the FIFO.
311  */
312 #define MAX_TX_DESC_NEEDED	3
313 
314 /* Check if sufficient resources (descriptor ring space, FIFO space) are
315  * available to transmit the given number of bytes.
316  */
gve_can_tx(struct gve_tx_ring * tx,int bytes_required)317 static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required)
318 {
319 	return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED &&
320 		gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required));
321 }
322 
323 /* Stops the queue if the skb cannot be transmitted. */
gve_maybe_stop_tx(struct gve_tx_ring * tx,struct sk_buff * skb)324 static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb)
325 {
326 	int bytes_required;
327 
328 	bytes_required = gve_skb_fifo_bytes_required(tx, skb);
329 	if (likely(gve_can_tx(tx, bytes_required)))
330 		return 0;
331 
332 	/* No space, so stop the queue */
333 	tx->stop_queue++;
334 	netif_tx_stop_queue(tx->netdev_txq);
335 	smp_mb();	/* sync with restarting queue in gve_clean_tx_done() */
336 
337 	/* Now check for resources again, in case gve_clean_tx_done() freed
338 	 * resources after we checked and we stopped the queue after
339 	 * gve_clean_tx_done() checked.
340 	 *
341 	 * gve_maybe_stop_tx()			gve_clean_tx_done()
342 	 *   nsegs/can_alloc test failed
343 	 *					  gve_tx_free_fifo()
344 	 *					  if (tx queue stopped)
345 	 *					    netif_tx_queue_wake()
346 	 *   netif_tx_stop_queue()
347 	 *   Need to check again for space here!
348 	 */
349 	if (likely(!gve_can_tx(tx, bytes_required)))
350 		return -EBUSY;
351 
352 	netif_tx_start_queue(tx->netdev_txq);
353 	tx->wake_queue++;
354 	return 0;
355 }
356 
gve_tx_fill_pkt_desc(union gve_tx_desc * pkt_desc,struct sk_buff * skb,bool is_gso,int l4_hdr_offset,u32 desc_cnt,u16 hlen,u64 addr)357 static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
358 				 struct sk_buff *skb, bool is_gso,
359 				 int l4_hdr_offset, u32 desc_cnt,
360 				 u16 hlen, u64 addr)
361 {
362 	/* l4_hdr_offset and csum_offset are in units of 16-bit words */
363 	if (is_gso) {
364 		pkt_desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM;
365 		pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1;
366 		pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
367 	} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
368 		pkt_desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM;
369 		pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1;
370 		pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
371 	} else {
372 		pkt_desc->pkt.type_flags = GVE_TXD_STD;
373 		pkt_desc->pkt.l4_csum_offset = 0;
374 		pkt_desc->pkt.l4_hdr_offset = 0;
375 	}
376 	pkt_desc->pkt.desc_cnt = desc_cnt;
377 	pkt_desc->pkt.len = cpu_to_be16(skb->len);
378 	pkt_desc->pkt.seg_len = cpu_to_be16(hlen);
379 	pkt_desc->pkt.seg_addr = cpu_to_be64(addr);
380 }
381 
gve_tx_fill_seg_desc(union gve_tx_desc * seg_desc,struct sk_buff * skb,bool is_gso,u16 len,u64 addr)382 static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
383 				 struct sk_buff *skb, bool is_gso,
384 				 u16 len, u64 addr)
385 {
386 	seg_desc->seg.type_flags = GVE_TXD_SEG;
387 	if (is_gso) {
388 		if (skb_is_gso_v6(skb))
389 			seg_desc->seg.type_flags |= GVE_TXSF_IPV6;
390 		seg_desc->seg.l3_offset = skb_network_offset(skb) >> 1;
391 		seg_desc->seg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
392 	}
393 	seg_desc->seg.seg_len = cpu_to_be16(len);
394 	seg_desc->seg.seg_addr = cpu_to_be64(addr);
395 }
396 
gve_dma_sync_for_device(struct device * dev,dma_addr_t * page_buses,u64 iov_offset,u64 iov_len)397 static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
398 				    u64 iov_offset, u64 iov_len)
399 {
400 	u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE;
401 	u64 first_page = iov_offset / PAGE_SIZE;
402 	dma_addr_t dma;
403 	u64 page;
404 
405 	for (page = first_page; page <= last_page; page++) {
406 		dma = page_buses[page];
407 		dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE);
408 	}
409 }
410 
gve_tx_add_skb(struct gve_tx_ring * tx,struct sk_buff * skb,struct device * dev)411 static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb,
412 			  struct device *dev)
413 {
414 	int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
415 	union gve_tx_desc *pkt_desc, *seg_desc;
416 	struct gve_tx_buffer_state *info;
417 	bool is_gso = skb_is_gso(skb);
418 	u32 idx = tx->req & tx->mask;
419 	int payload_iov = 2;
420 	int copy_offset;
421 	u32 next_idx;
422 	int i;
423 
424 	info = &tx->info[idx];
425 	pkt_desc = &tx->desc[idx];
426 
427 	l4_hdr_offset = skb_checksum_start_offset(skb);
428 	/* If the skb is gso, then we want the tcp header in the first segment
429 	 * otherwise we want the linear portion of the skb (which will contain
430 	 * the checksum because skb->csum_start and skb->csum_offset are given
431 	 * relative to skb->head) in the first segment.
432 	 */
433 	hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) :
434 			skb_headlen(skb);
435 
436 	info->skb =  skb;
437 	/* We don't want to split the header, so if necessary, pad to the end
438 	 * of the fifo and then put the header at the beginning of the fifo.
439 	 */
440 	pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen);
441 	hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes,
442 				       &info->iov[0]);
443 	WARN(!hdr_nfrags, "hdr_nfrags should never be 0!");
444 	payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen,
445 					   &info->iov[payload_iov]);
446 
447 	gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
448 			     1 + payload_nfrags, hlen,
449 			     info->iov[hdr_nfrags - 1].iov_offset);
450 
451 	skb_copy_bits(skb, 0,
452 		      tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
453 		      hlen);
454 	gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses,
455 				info->iov[hdr_nfrags - 1].iov_offset,
456 				info->iov[hdr_nfrags - 1].iov_len);
457 	copy_offset = hlen;
458 
459 	for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
460 		next_idx = (tx->req + 1 + i - payload_iov) & tx->mask;
461 		seg_desc = &tx->desc[next_idx];
462 
463 		gve_tx_fill_seg_desc(seg_desc, skb, is_gso,
464 				     info->iov[i].iov_len,
465 				     info->iov[i].iov_offset);
466 
467 		skb_copy_bits(skb, copy_offset,
468 			      tx->tx_fifo.base + info->iov[i].iov_offset,
469 			      info->iov[i].iov_len);
470 		gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses,
471 					info->iov[i].iov_offset,
472 					info->iov[i].iov_len);
473 		copy_offset += info->iov[i].iov_len;
474 	}
475 
476 	return 1 + payload_nfrags;
477 }
478 
gve_tx(struct sk_buff * skb,struct net_device * dev)479 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
480 {
481 	struct gve_priv *priv = netdev_priv(dev);
482 	struct gve_tx_ring *tx;
483 	int nsegs;
484 
485 	WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
486 	     "skb queue index out of range");
487 	tx = &priv->tx[skb_get_queue_mapping(skb)];
488 	if (unlikely(gve_maybe_stop_tx(tx, skb))) {
489 		/* We need to ring the txq doorbell -- we have stopped the Tx
490 		 * queue for want of resources, but prior calls to gve_tx()
491 		 * may have added descriptors without ringing the doorbell.
492 		 */
493 
494 		gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
495 		return NETDEV_TX_BUSY;
496 	}
497 	nsegs = gve_tx_add_skb(tx, skb, &priv->pdev->dev);
498 
499 	netdev_tx_sent_queue(tx->netdev_txq, skb->len);
500 	skb_tx_timestamp(skb);
501 
502 	/* give packets to NIC */
503 	tx->req += nsegs;
504 
505 	if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
506 		return NETDEV_TX_OK;
507 
508 	gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
509 	return NETDEV_TX_OK;
510 }
511 
512 #define GVE_TX_START_THRESH	PAGE_SIZE
513 
gve_clean_tx_done(struct gve_priv * priv,struct gve_tx_ring * tx,u32 to_do,bool try_to_wake)514 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
515 			     u32 to_do, bool try_to_wake)
516 {
517 	struct gve_tx_buffer_state *info;
518 	u64 pkts = 0, bytes = 0;
519 	size_t space_freed = 0;
520 	struct sk_buff *skb;
521 	int i, j;
522 	u32 idx;
523 
524 	for (j = 0; j < to_do; j++) {
525 		idx = tx->done & tx->mask;
526 		netif_info(priv, tx_done, priv->dev,
527 			   "[%d] %s: idx=%d (req=%u done=%u)\n",
528 			   tx->q_num, __func__, idx, tx->req, tx->done);
529 		info = &tx->info[idx];
530 		skb = info->skb;
531 
532 		/* Mark as free */
533 		if (skb) {
534 			info->skb = NULL;
535 			bytes += skb->len;
536 			pkts++;
537 			dev_consume_skb_any(skb);
538 			/* FIFO free */
539 			for (i = 0; i < ARRAY_SIZE(info->iov); i++) {
540 				space_freed += info->iov[i].iov_len +
541 					       info->iov[i].iov_padding;
542 				info->iov[i].iov_len = 0;
543 				info->iov[i].iov_padding = 0;
544 			}
545 		}
546 		tx->done++;
547 	}
548 
549 	gve_tx_free_fifo(&tx->tx_fifo, space_freed);
550 	u64_stats_update_begin(&tx->statss);
551 	tx->bytes_done += bytes;
552 	tx->pkt_done += pkts;
553 	u64_stats_update_end(&tx->statss);
554 	netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes);
555 
556 	/* start the queue if we've stopped it */
557 #ifndef CONFIG_BQL
558 	/* Make sure that the doorbells are synced */
559 	smp_mb();
560 #endif
561 	if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) &&
562 	    likely(gve_can_tx(tx, GVE_TX_START_THRESH))) {
563 		tx->wake_queue++;
564 		netif_tx_wake_queue(tx->netdev_txq);
565 	}
566 
567 	return pkts;
568 }
569 
gve_tx_load_event_counter(struct gve_priv * priv,struct gve_tx_ring * tx)570 __be32 gve_tx_load_event_counter(struct gve_priv *priv,
571 				 struct gve_tx_ring *tx)
572 {
573 	u32 counter_index = be32_to_cpu((tx->q_resources->counter_index));
574 
575 	return READ_ONCE(priv->counter_array[counter_index]);
576 }
577 
gve_tx_poll(struct gve_notify_block * block,int budget)578 bool gve_tx_poll(struct gve_notify_block *block, int budget)
579 {
580 	struct gve_priv *priv = block->priv;
581 	struct gve_tx_ring *tx = block->tx;
582 	bool repoll = false;
583 	u32 nic_done;
584 	u32 to_do;
585 
586 	/* If budget is 0, do all the work */
587 	if (budget == 0)
588 		budget = INT_MAX;
589 
590 	/* Find out how much work there is to be done */
591 	tx->last_nic_done = gve_tx_load_event_counter(priv, tx);
592 	nic_done = be32_to_cpu(tx->last_nic_done);
593 	if (budget > 0) {
594 		/* Do as much work as we have that the budget will
595 		 * allow
596 		 */
597 		to_do = min_t(u32, (nic_done - tx->done), budget);
598 		gve_clean_tx_done(priv, tx, to_do, true);
599 	}
600 	/* If we still have work we want to repoll */
601 	repoll |= (nic_done != tx->done);
602 	return repoll;
603 }
604