Lines Matching +full:max +full:- +full:functions
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* XDP user-space ring structure
58 * Documentation/core-api/circular-buffers.rst. For the Rx and
65 * if (LOAD ->consumer) { (A) LOAD.acq ->producer (C)
67 * STORE.rel ->producer (B) STORE.rel ->consumer (D)
82 * between ->producer and data.
84 * (A) is a control dependency that separates the load of ->consumer
85 * from the stores of $data. In case ->consumer indicates there is no
118 /* Functions that read and validate content from consumer rings. */
122 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in __xskq_cons_read_addr_unchecked()
123 u32 idx = cached_cons & q->ring_mask; in __xskq_cons_read_addr_unchecked()
125 *addr = ring->desc[idx]; in __xskq_cons_read_addr_unchecked()
130 if (q->cached_cons != q->cached_prod) { in xskq_cons_read_addr_unchecked()
131 __xskq_cons_read_addr_unchecked(q, q->cached_cons, addr); in xskq_cons_read_addr_unchecked()
146 u64 offset = desc->addr & (pool->chunk_size - 1); in xp_aligned_validate_desc()
148 if (!desc->len) in xp_aligned_validate_desc()
151 if (offset + desc->len > pool->chunk_size) in xp_aligned_validate_desc()
154 if (desc->addr >= pool->addrs_cnt) in xp_aligned_validate_desc()
157 if (xp_unused_options_set(desc->options)) in xp_aligned_validate_desc()
165 u64 addr = xp_unaligned_add_offset_to_addr(desc->addr); in xp_unaligned_validate_desc()
167 if (!desc->len) in xp_unaligned_validate_desc()
170 if (desc->len > pool->chunk_size) in xp_unaligned_validate_desc()
173 if (addr >= pool->addrs_cnt || addr + desc->len > pool->addrs_cnt || in xp_unaligned_validate_desc()
174 xp_desc_crosses_non_contig_pg(pool, addr, desc->len)) in xp_unaligned_validate_desc()
177 if (xp_unused_options_set(desc->options)) in xp_unaligned_validate_desc()
185 return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) : in xp_validate_desc()
191 return q->cached_cons != q->cached_prod; in xskq_has_descs()
199 q->invalid_descs++; in xskq_cons_is_valid_desc()
209 if (q->cached_cons != q->cached_prod) { in xskq_cons_read_desc()
210 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_cons_read_desc()
211 u32 idx = q->cached_cons & q->ring_mask; in xskq_cons_read_desc()
213 *desc = ring->desc[idx]; in xskq_cons_read_desc()
217 q->queue_empty_descs++; in xskq_cons_read_desc()
223 q->cached_cons += cnt; in xskq_cons_release_n()
229 parsed->valid = xskq_cons_is_valid_desc(q, desc, pool); in parse_desc()
230 parsed->mb = xp_mb_desc(desc); in parse_desc()
235 u32 max) in xskq_cons_read_desc_batch() argument
237 u32 cached_cons = q->cached_cons, nb_entries = 0; in xskq_cons_read_desc_batch()
238 struct xdp_desc *descs = pool->tx_descs; in xskq_cons_read_desc_batch()
244 while (cached_cons != q->cached_prod && nb_entries < max) { in xskq_cons_read_desc_batch()
245 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_cons_read_desc_batch()
246 u32 idx = cached_cons & q->ring_mask; in xskq_cons_read_desc_batch()
249 descs[nb_entries] = ring->desc[idx]; in xskq_cons_read_desc_batch()
260 if (nr_frags == pool->netdev->xdp_zc_max_segs) { in xskq_cons_read_desc_batch()
268 cached_cons -= nr_frags; in xskq_cons_read_desc_batch()
270 xskq_cons_release_n(q, cached_cons - q->cached_cons); in xskq_cons_read_desc_batch()
274 /* Functions for consumers */
278 smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */ in __xskq_cons_release()
284 q->cached_prod = smp_load_acquire(&q->ring->producer); /* C, matches B */ in __xskq_cons_peek()
293 static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max) in xskq_cons_nb_entries() argument
295 u32 entries = q->cached_prod - q->cached_cons; in xskq_cons_nb_entries()
297 if (entries >= max) in xskq_cons_nb_entries()
298 return max; in xskq_cons_nb_entries()
301 entries = q->cached_prod - q->cached_cons; in xskq_cons_nb_entries()
303 return entries >= max ? max : entries; in xskq_cons_nb_entries()
313 if (q->cached_prod == q->cached_cons) in xskq_cons_peek_addr_unchecked()
322 if (q->cached_prod == q->cached_cons) in xskq_cons_peek_desc()
327 /* To improve performance in the xskq_cons_release functions, only update local state here.
333 q->cached_cons++; in xskq_cons_release()
338 q->cached_cons -= cnt; in xskq_cons_cancel_n()
344 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer); in xskq_cons_present_entries()
347 /* Functions for producers */
349 static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max) in xskq_prod_nb_free() argument
351 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons); in xskq_prod_nb_free()
353 if (free_entries >= max) in xskq_prod_nb_free()
354 return max; in xskq_prod_nb_free()
357 q->cached_cons = READ_ONCE(q->ring->consumer); in xskq_prod_nb_free()
358 free_entries = q->nentries - (q->cached_prod - q->cached_cons); in xskq_prod_nb_free()
360 return free_entries >= max ? max : free_entries; in xskq_prod_nb_free()
370 q->cached_prod -= cnt; in xskq_prod_cancel_n()
376 return -ENOSPC; in xskq_prod_reserve()
379 q->cached_prod++; in xskq_prod_reserve()
385 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_prod_reserve_addr()
388 return -ENOSPC; in xskq_prod_reserve_addr()
391 ring->desc[q->cached_prod++ & q->ring_mask] = addr; in xskq_prod_reserve_addr()
398 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_prod_write_addr_batch()
402 cached_prod = q->cached_prod; in xskq_prod_write_addr_batch()
404 ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr; in xskq_prod_write_addr_batch()
405 q->cached_prod = cached_prod; in xskq_prod_write_addr_batch()
411 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_prod_reserve_desc()
415 return -ENOBUFS; in xskq_prod_reserve_desc()
418 idx = q->cached_prod++ & q->ring_mask; in xskq_prod_reserve_desc()
419 ring->desc[idx].addr = addr; in xskq_prod_reserve_desc()
420 ring->desc[idx].len = len; in xskq_prod_reserve_desc()
421 ring->desc[idx].options = flags; in xskq_prod_reserve_desc()
428 smp_store_release(&q->ring->producer, idx); /* B, matches C */ in __xskq_prod_submit()
433 __xskq_prod_submit(q, q->cached_prod); in xskq_prod_submit()
438 __xskq_prod_submit(q, q->ring->producer + nb_entries); in xskq_prod_submit_n()
444 return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer); in xskq_prod_is_empty()
451 return q ? q->invalid_descs : 0; in xskq_nb_invalid_descs()
456 return q ? q->queue_empty_descs : 0; in xskq_nb_queue_empty_descs()