• Home
  • Raw
  • Download

Lines Matching full:q

112 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)  in xskq_cons_read_addr_unchecked()  argument
114 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_cons_read_addr_unchecked()
116 if (q->cached_cons != q->cached_prod) { in xskq_cons_read_addr_unchecked()
117 u32 idx = q->cached_cons & q->ring_mask; in xskq_cons_read_addr_unchecked()
173 static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q, in xskq_cons_is_valid_desc() argument
178 q->invalid_descs++; in xskq_cons_is_valid_desc()
184 static inline bool xskq_cons_read_desc(struct xsk_queue *q, in xskq_cons_read_desc() argument
188 while (q->cached_cons != q->cached_prod) { in xskq_cons_read_desc()
189 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_cons_read_desc()
190 u32 idx = q->cached_cons & q->ring_mask; in xskq_cons_read_desc()
193 if (xskq_cons_is_valid_desc(q, desc, pool)) in xskq_cons_read_desc()
196 q->cached_cons++; in xskq_cons_read_desc()
204 static inline void __xskq_cons_release(struct xsk_queue *q) in __xskq_cons_release() argument
207 WRITE_ONCE(q->ring->consumer, q->cached_cons); in __xskq_cons_release()
210 static inline void __xskq_cons_peek(struct xsk_queue *q) in __xskq_cons_peek() argument
213 q->cached_prod = READ_ONCE(q->ring->producer); in __xskq_cons_peek()
217 static inline void xskq_cons_get_entries(struct xsk_queue *q) in xskq_cons_get_entries() argument
219 __xskq_cons_release(q); in xskq_cons_get_entries()
220 __xskq_cons_peek(q); in xskq_cons_get_entries()
223 static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt) in xskq_cons_has_entries() argument
225 u32 entries = q->cached_prod - q->cached_cons; in xskq_cons_has_entries()
230 __xskq_cons_peek(q); in xskq_cons_has_entries()
231 entries = q->cached_prod - q->cached_cons; in xskq_cons_has_entries()
236 static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr) in xskq_cons_peek_addr_unchecked() argument
238 if (q->cached_prod == q->cached_cons) in xskq_cons_peek_addr_unchecked()
239 xskq_cons_get_entries(q); in xskq_cons_peek_addr_unchecked()
240 return xskq_cons_read_addr_unchecked(q, addr); in xskq_cons_peek_addr_unchecked()
243 static inline bool xskq_cons_peek_desc(struct xsk_queue *q, in xskq_cons_peek_desc() argument
247 if (q->cached_prod == q->cached_cons) in xskq_cons_peek_desc()
248 xskq_cons_get_entries(q); in xskq_cons_peek_desc()
249 return xskq_cons_read_desc(q, desc, pool); in xskq_cons_peek_desc()
252 static inline void xskq_cons_release(struct xsk_queue *q) in xskq_cons_release() argument
259 q->cached_cons++; in xskq_cons_release()
262 static inline bool xskq_cons_is_full(struct xsk_queue *q) in xskq_cons_is_full() argument
265 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) == in xskq_cons_is_full()
266 q->nentries; in xskq_cons_is_full()
269 static inline u32 xskq_cons_present_entries(struct xsk_queue *q) in xskq_cons_present_entries() argument
272 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer); in xskq_cons_present_entries()
277 static inline bool xskq_prod_is_full(struct xsk_queue *q) in xskq_prod_is_full() argument
279 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons); in xskq_prod_is_full()
285 q->cached_cons = READ_ONCE(q->ring->consumer); in xskq_prod_is_full()
286 free_entries = q->nentries - (q->cached_prod - q->cached_cons); in xskq_prod_is_full()
291 static inline void xskq_prod_cancel(struct xsk_queue *q) in xskq_prod_cancel() argument
293 q->cached_prod--; in xskq_prod_cancel()
296 static inline int xskq_prod_reserve(struct xsk_queue *q) in xskq_prod_reserve() argument
298 if (xskq_prod_is_full(q)) in xskq_prod_reserve()
302 q->cached_prod++; in xskq_prod_reserve()
306 static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr) in xskq_prod_reserve_addr() argument
308 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_prod_reserve_addr()
310 if (xskq_prod_is_full(q)) in xskq_prod_reserve_addr()
314 ring->desc[q->cached_prod++ & q->ring_mask] = addr; in xskq_prod_reserve_addr()
318 static inline int xskq_prod_reserve_desc(struct xsk_queue *q, in xskq_prod_reserve_desc() argument
321 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; in xskq_prod_reserve_desc()
324 if (xskq_prod_is_full(q)) in xskq_prod_reserve_desc()
328 idx = q->cached_prod++ & q->ring_mask; in xskq_prod_reserve_desc()
335 static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx) in __xskq_prod_submit() argument
339 WRITE_ONCE(q->ring->producer, idx); in __xskq_prod_submit()
342 static inline void xskq_prod_submit(struct xsk_queue *q) in xskq_prod_submit() argument
344 __xskq_prod_submit(q, q->cached_prod); in xskq_prod_submit()
347 static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr) in xskq_prod_submit_addr() argument
349 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; in xskq_prod_submit_addr()
350 u32 idx = q->ring->producer; in xskq_prod_submit_addr()
352 ring->desc[idx++ & q->ring_mask] = addr; in xskq_prod_submit_addr()
354 __xskq_prod_submit(q, idx); in xskq_prod_submit_addr()
357 static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries) in xskq_prod_submit_n() argument
359 __xskq_prod_submit(q, q->ring->producer + nb_entries); in xskq_prod_submit_n()
362 static inline bool xskq_prod_is_empty(struct xsk_queue *q) in xskq_prod_is_empty() argument
365 return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer); in xskq_prod_is_empty()
370 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) in xskq_nb_invalid_descs() argument
372 return q ? q->invalid_descs : 0; in xskq_nb_invalid_descs()
375 static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q) in xskq_nb_queue_empty_descs() argument
377 return q ? q->queue_empty_descs : 0; in xskq_nb_queue_empty_descs()