Home
last modified time | relevance | path

Searched refs:max_entries (Results 1 – 21 of 21) sorted by relevance

/kernel/bpf/
Darraymap.c25 for (i = 0; i < array->map.max_entries; i++) { in bpf_array_free_percpu()
36 for (i = 0; i < array->map.max_entries; i++) { in bpf_array_alloc_percpu()
57 if (attr->max_entries == 0 || attr->key_size != 4 || in array_map_alloc_check()
85 u32 elem_size, index_mask, max_entries; in array_map_alloc() local
93 max_entries = attr->max_entries; in array_map_alloc()
99 mask64 = fls_long(max_entries - 1); in array_map_alloc()
108 max_entries = index_mask + 1; in array_map_alloc()
110 if (max_entries < attr->max_entries) in array_map_alloc()
116 array_size += (u64) max_entries * sizeof(void *); in array_map_alloc()
123 array_size += PAGE_ALIGN((u64) max_entries * elem_size); in array_map_alloc()
[all …]
Dreuseport_array.c57 if (unlikely(index >= array->map.max_entries)) in reuseport_array_lookup_elem()
71 if (index >= map->max_entries) in reuseport_array_delete_elem()
128 for (i = 0; i < map->max_entries; i++) { in reuseport_array_free()
162 array_size += (u64)attr->max_entries * sizeof(struct sock *); in reuseport_array_alloc()
263 if (index >= map->max_entries) in bpf_fd_reuseport_array_update_elem()
340 if (index >= array->map.max_entries) { in reuseport_array_get_next_key()
345 if (index == array->map.max_entries - 1) in reuseport_array_get_next_key()
Dcpumap.c94 if (attr->max_entries == 0 || attr->key_size != 4 || in cpu_map_alloc()
107 if (cmap->map.max_entries > NR_CPUS) { in cpu_map_alloc()
113 cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *); in cpu_map_alloc()
123 cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries * in cpu_map_alloc()
538 if (key_cpu >= map->max_entries) in cpu_map_delete_elem()
559 if (unlikely(key_cpu >= cmap->map.max_entries)) in cpu_map_update_elem()
605 for (i = 0; i < cmap->map.max_entries; i++) { in cpu_map_free()
624 if (key >= map->max_entries) in __cpu_map_lookup_elem()
645 if (index >= cmap->map.max_entries) { in cpu_map_get_next_key()
650 if (index == cmap->map.max_entries - 1) in cpu_map_get_next_key()
Ddevmap.c119 if (attr->max_entries == 0 || attr->key_size != 4 || in dev_map_init_map()
134 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); in dev_map_init_map()
140 cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); in dev_map_init_map()
156 dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries * in dev_map_init_map()
239 for (i = 0; i < dtab->map.max_entries; i++) { in dev_map_free()
264 if (index >= dtab->map.max_entries) { in dev_map_get_next_key()
269 if (index == dtab->map.max_entries - 1) in dev_map_get_next_key()
414 if (key >= map->max_entries) in __dev_map_lookup_elem()
559 if (k >= map->max_entries) in dev_map_delete_elem()
654 if (unlikely(i >= dtab->map.max_entries)) in __dev_map_update_elem()
[all …]
Dringbuf.c162 !is_power_of_2(attr->max_entries) || in ringbuf_map_alloc()
163 !PAGE_ALIGNED(attr->max_entries)) in ringbuf_map_alloc()
168 if (attr->max_entries > RINGBUF_MAX_DATA_SZ) in ringbuf_map_alloc()
180 attr->max_entries; in ringbuf_map_alloc()
185 rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node); in ringbuf_map_alloc()
Dmap_in_map.c51 inner_map_meta->max_entries = inner_map->max_entries; in bpf_map_meta_alloc()
Dhashtab.c212 for (i = 0; i < htab->map.max_entries; i++) { in htab_free_elems()
252 u32 num_entries = htab->map.max_entries; in prealloc_init()
389 if (attr->max_entries == 0 || attr->key_size == 0 || in htab_map_alloc_check()
439 htab->map.max_entries = roundup(attr->max_entries, in htab_map_alloc()
441 if (htab->map.max_entries < attr->max_entries) in htab_map_alloc()
442 htab->map.max_entries = rounddown(attr->max_entries, in htab_map_alloc()
447 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); in htab_map_alloc()
463 (u64) htab->elem_size * htab->map.max_entries; in htab_map_alloc()
467 num_possible_cpus() * htab->map.max_entries; in htab_map_alloc()
884 if (atomic_inc_return(&htab->count) > htab->map.max_entries) in alloc_htab_elem()
Dstackmap.c71 smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries, in prealloc_elems_and_freelist()
81 smap->map.max_entries); in prealloc_elems_and_freelist()
105 if (attr->max_entries == 0 || attr->key_size != 4 || in stack_map_alloc()
119 n_buckets = roundup_pow_of_two(attr->max_entries); in stack_map_alloc()
124 err = bpf_map_charge_init(&mem, cost + attr->max_entries * in stack_map_alloc()
Dqueue_stack_maps.c52 if (attr->max_entries == 0 || attr->key_size != 0 || in queue_stack_map_alloc_check()
74 size = (u64) attr->max_entries + 1; in queue_stack_map_alloc()
Dlpm_trie.c325 if (trie->n_entries == trie->map.max_entries) { in trie_update_elem()
553 if (attr->max_entries == 0 || in trie_alloc()
575 cost += (u64) attr->max_entries * cost_per_node; in trie_alloc()
Dbpf_local_storage.c536 attr->max_entries || in bpf_local_storage_map_alloc_check()
Dbpf_struct_ops.c550 if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 || in bpf_struct_ops_map_alloc_check()
Dlocal_storage.c308 if (attr->max_entries) in cgroup_storage_map_alloc()
Dsyscall.c357 map->max_entries = attr->max_entries; in bpf_map_init_from_attr()
574 map->max_entries, in bpf_map_show_fdinfo()
3720 info.max_entries = map->max_entries; in bpf_map_get_info_by_fd()
Dcore.c1579 if (unlikely(index >= array->map.max_entries)) in ___bpf_prog_run()
Dverifier.c5521 max = map->max_entries; in record_func_key()
11797 map_ptr->max_entries, 2); in fixup_bpf_calls()
/kernel/
Dstacktrace.c276 .max_entries = size, in stack_trace_save()
300 .max_entries = size, in stack_trace_save_tsk()
323 .max_entries = size, in stack_trace_save_regs()
349 .max_entries = size, in stack_trace_save_tsk_reliable()
369 .max_entries = size, in stack_trace_save_user()
/kernel/bpf/preload/iterators/
Diterators.bpf.c19 __u32 max_entries; member
90 BPF_SEQ_PRINTF(seq, "%4u %-16s%6d\n", map->id, map->name, map->max_entries); in dump_bpf_map()
/kernel/trace/
Dbpf_trace.c816 if (unlikely(index >= array->map.max_entries)) in get_map_perf_counter()
888 if (unlikely(index >= array->map.max_entries)) in __bpf_perf_event_output()
1029 if (unlikely(idx >= array->map.max_entries)) in BPF_CALL_2()
Dtrace_events_hist.c4618 unsigned int max_entries) in hist_trigger_stacktrace_print() argument
4624 for (i = 0; i < max_entries; i++) { in hist_trigger_stacktrace_print()
/kernel/locking/
Dlockdep.c532 int max_entries; in save_trace() local
538 max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries - in save_trace()
541 if (max_entries <= 0) { in save_trace()
550 trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3); in save_trace()