Home
last modified time | relevance | path

Searched refs:elem_size (Results 1 – 8 of 8) sorted by relevance

/kernel/bpf/
Darraymap.c35 ptr = __alloc_percpu_gfp(array->elem_size, 8, in bpf_array_alloc_percpu()
75 u32 elem_size, index_mask, max_entries; in array_map_alloc() local
81 elem_size = round_up(attr->value_size, 8); in array_map_alloc()
108 array_size += (u64) max_entries * elem_size; in array_map_alloc()
113 cost += (u64)attr->max_entries * elem_size * num_possible_cpus(); in array_map_alloc()
131 array->elem_size = elem_size; in array_map_alloc()
151 return array->value + array->elem_size * (index & array->index_mask); in array_map_lookup_elem()
173 u64 range = array->elem_size; in array_map_direct_value_meta()
189 u32 elem_size = round_up(map->value_size, 8); in array_map_gen_lookup() local
203 if (is_power_of_2(elem_size)) { in array_map_gen_lookup()
[all …]
Dstackmap.c63 u64 elem_size = sizeof(struct stack_map_bucket) + in prealloc_elems_and_freelist() local
67 smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries, in prealloc_elems_and_freelist()
76 pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size, in prealloc_elems_and_freelist()
455 u32 init_nr, trace_nr, copy_len, elem_size, num_elem; in BPF_CALL_4() local
470 elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id) in BPF_CALL_4()
472 if (unlikely(size % elem_size)) in BPF_CALL_4()
475 num_elem = size / elem_size; in BPF_CALL_4()
491 copy_len = trace_nr * elem_size; in BPF_CALL_4()
Dbpf_lru_list.c565 u32 node_offset, u32 elem_size, in bpf_common_lru_populate() argument
578 buf += elem_size; in bpf_common_lru_populate()
583 u32 node_offset, u32 elem_size, in bpf_percpu_lru_populate() argument
605 buf += elem_size; in bpf_percpu_lru_populate()
614 u32 elem_size, u32 nr_elems) in bpf_lru_populate() argument
617 bpf_percpu_lru_populate(lru, buf, node_offset, elem_size, in bpf_lru_populate()
620 bpf_common_lru_populate(lru, buf, node_offset, elem_size, in bpf_lru_populate()
Dhashtab.c36 u32 elem_size; /* size of each element in bytes */ member
97 return (struct htab_elem *) (htab->elems + i * htab->elem_size); in get_htab_elem()
142 htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries, in prealloc_init()
179 htab->elem_size, num_entries); in prealloc_init()
183 htab->elem_size, num_entries); in prealloc_init()
339 htab->elem_size = sizeof(struct htab_elem) + in htab_map_alloc()
342 htab->elem_size += sizeof(void *); in htab_map_alloc()
344 htab->elem_size += round_up(htab->map.value_size, 8); in htab_map_alloc()
351 (u64) htab->elem_size * htab->map.max_entries; in htab_map_alloc()
357 cost += (u64) htab->elem_size * num_possible_cpus(); in htab_map_alloc()
[all …]
Dpercpu_freelist.c55 void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, in pcpu_freelist_populate() argument
75 buf += elem_size; in pcpu_freelist_populate()
Dpercpu_freelist.h28 void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
Dbpf_lru_list.h73 u32 elem_size, u32 nr_elems);
Dbtf.c1931 u32 elem_size; in btf_array_resolve() local
1967 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); in btf_array_resolve()
1978 if (array->nelems && elem_size > U32_MAX / array->nelems) { in btf_array_resolve()
1984 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems); in btf_array_resolve()
2005 u32 i, elem_size, elem_type_id; in btf_array_seq_show() local
2008 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); in btf_array_seq_show()
2017 data += elem_size; in btf_array_seq_show()
3025 u32 elem_size; in btf_resolve_valid() local
3027 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); in btf_resolve_valid()
3029 (array->nelems * elem_size == in btf_resolve_valid()