Searched refs:nentries (Results 1 – 4 of 4) sorted by relevance
/net/xdp/ |
D | xsk_queue.c | 23 return sizeof(struct xdp_umem_ring) + q->nentries * sizeof(u64); in xskq_umem_get_ring_size() 28 return sizeof(struct xdp_ring) + q->nentries * sizeof(struct xdp_desc); in xskq_rxtx_get_ring_size() 31 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue) in xskq_create() argument 41 q->nentries = nentries; in xskq_create() 42 q->ring_mask = nentries - 1; in xskq_create() 68 struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries) in xsk_reuseq_prepare() argument 73 if (nentries > (u32)roundup_pow_of_two(nentries)) in xsk_reuseq_prepare() 75 nentries = roundup_pow_of_two(nentries); in xsk_reuseq_prepare() 77 newq = kvmalloc(struct_size(newq, handles, nentries), GFP_KERNEL); in xsk_reuseq_prepare() 82 newq->nentries = nentries; in xsk_reuseq_prepare() [all …]
|
D | xsk_queue.h | 38 u32 nentries; member 111 u32 free_entries = q->nentries - (producer - q->cons_tail); in xskq_nb_free() 118 return q->nentries - (producer - q->cons_tail); in xskq_nb_free() 368 q->nentries; in xskq_full_desc() 378 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
|
D | xsk_diag.c | 31 dr.entries = queue->nentries; in xsk_diag_put_ring()
|
/net/bridge/netfilter/ |
D | ebtables.c | 184 int i, nentries; in ebt_do_table() local 200 cb_base = COUNTER_BASE(private->counters, private->nentries, in ebt_do_table() 207 nentries = private->hook_entry[hook]->nentries; in ebt_do_table() 213 while (i < nentries) { in ebt_do_table() 259 nentries = chaininfo->nentries; in ebt_do_table() 285 nentries = chaininfo->nentries; in ebt_do_table() 439 newinfo->nentries = repl->nentries; in ebt_verify_pointers() 526 *n = ((struct ebt_entries *)e)->nentries; in ebt_check_entry_size_and_hooks() 764 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict; in check_chainloops() local 768 while (pos < nentries || chain_nr != -1) { in check_chainloops() [all …]
|