Home
last modified time | relevance | path

Searched refs:value_size (Results 1 – 13 of 13) sorted by relevance

/kernel/bpf/
Dqueue_stack_maps.c53 attr->value_size == 0 || in queue_stack_map_alloc_check()
58 if (attr->value_size > KMALLOC_MAX_SIZE) in queue_stack_map_alloc_check()
75 cost = queue_size = sizeof(*qs) + size * attr->value_size; in queue_stack_map_alloc()
129 memset(value, 0, qs->map.value_size); in __queue_map_get()
134 ptr = &qs->elements[qs->tail * qs->map.value_size]; in __queue_map_get()
135 memcpy(value, ptr, qs->map.value_size); in __queue_map_get()
164 memset(value, 0, qs->map.value_size); in __stack_map_get()
173 ptr = &qs->elements[index * qs->map.value_size]; in __stack_map_get()
174 memcpy(value, ptr, qs->map.value_size); in __stack_map_get()
243 dst = &qs->elements[qs->head * qs->map.value_size]; in queue_stack_map_push_elem()
[all …]
Dstackmap.c64 (u64)smap->map.value_size; in prealloc_elems_and_freelist()
88 u32 value_size = attr->value_size; in stack_map_alloc() local
102 value_size < 8 || value_size % 8) in stack_map_alloc()
107 if (value_size % sizeof(struct bpf_stack_build_id) || in stack_map_alloc()
108 value_size / sizeof(struct bpf_stack_build_id) in stack_map_alloc()
111 } else if (value_size / 8 > sysctl_perf_event_max_stack) in stack_map_alloc()
124 (sizeof(struct stack_map_bucket) + (u64)value_size)); in stack_map_alloc()
135 smap->map.value_size = value_size; in stack_map_alloc()
358 u32 max_depth = map->value_size / stack_map_data_size(map); in BPF_CALL_3()
541 memset(value + trace_len, 0, map->value_size - trace_len); in bpf_stackmap_copy()
Dmap_in_map.c55 inner_map_meta->value_size = inner_map->value_size; in bpf_map_meta_alloc()
83 meta0->value_size == meta1->value_size && in bpf_map_meta_equal()
Dlocal_storage.c155 map->value_size, in cgroup_storage_update_elem()
161 memcpy(&new->data[0], value, map->value_size); in cgroup_storage_update_elem()
190 size = round_up(_map->value_size, 8); in bpf_percpu_cgroup_storage_copy()
225 size = round_up(_map->value_size, 8); in bpf_percpu_cgroup_storage_update()
281 if (attr->value_size == 0) in cgroup_storage_map_alloc()
284 if (attr->value_size > PAGE_SIZE) in cgroup_storage_map_alloc()
465 size = sizeof(struct bpf_storage_buffer) + map->value_size; in bpf_cgroup_storage_calculate_size()
469 size = map->value_size; in bpf_cgroup_storage_calculate_size()
Dsyscall.c181 map->value_size = attr->value_size; in bpf_map_init_from_attr()
397 map->value_size, in bpf_map_show_fdinfo()
506 u32 key_size, value_size; in map_check_btf() local
520 value_type = btf_type_id_size(btf, &btf_value_id, &value_size); in map_check_btf()
521 if (!value_type || value_size != map->value_size) in map_check_btf()
535 map->value_size) { in map_check_btf()
538 map->spin_lock_off, map->value_size); in map_check_btf()
747 u32 value_size; in map_lookup_elem() local
782 value_size = round_up(map->value_size, 8) * num_possible_cpus(); in map_lookup_elem()
784 value_size = sizeof(u32); in map_lookup_elem()
[all …]
Darraymap.c56 attr->value_size == 0 || in array_map_alloc_check()
62 if (attr->value_size > KMALLOC_MAX_SIZE) in array_map_alloc_check()
81 elem_size = round_up(attr->value_size, 8); in array_map_alloc()
161 if (off >= map->value_size) in array_map_direct_value_addr()
189 u32 elem_size = round_up(map->value_size, 8); in array_map_gen_lookup()
241 size = round_up(map->value_size, 8); in bpf_percpu_array_copy()
297 value, map->value_size); in array_map_update_elem()
336 size = round_up(map->value_size, 8); in bpf_percpu_array_update()
477 if (attr->value_size != sizeof(u32)) in fd_array_map_alloc_check()
807 u32 elem_size = round_up(map->value_size, 8); in array_of_map_gen_lookup()
Dreuseport_array.c42 if (attr->value_size != sizeof(u32) && in reuseport_array_alloc_check()
43 attr->value_size != sizeof(u64)) in reuseport_array_alloc_check()
187 if (map->value_size != sizeof(u64)) in bpf_fd_reuseport_array_lookup_elem()
264 if (map->value_size == sizeof(u64)) { in bpf_fd_reuseport_array_update_elem()
Dhashtab.c151 u32 size = round_up(htab->map.value_size, 8); in prealloc_init()
274 attr->value_size == 0) in htab_map_alloc_check()
283 if (attr->value_size >= KMALLOC_MAX_SIZE - in htab_map_alloc_check()
344 htab->elem_size += round_up(htab->map.value_size, 8); in htab_map_alloc()
354 cost += (u64) round_up(htab->map.value_size, 8) * in htab_map_alloc()
703 memcpy(this_cpu_ptr(pptr), value, htab->map.value_size); in pcpu_copy_value()
705 u32 size = round_up(htab->map.value_size, 8); in pcpu_copy_value()
726 u32 size = round_up(htab->map.value_size, 8); in pcpu_init_value()
753 u32 size = htab->map.value_size; in alloc_htab_elem()
971 memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size); in htab_lru_map_update_elem()
[all …]
Dlpm_trie.c286 size += trie->map.value_size; in lpm_trie_node_alloc()
297 trie->map.value_size); in lpm_trie_node_alloc()
559 attr->value_size < LPM_VAL_SIZE_MIN || in trie_alloc()
560 attr->value_size > LPM_VAL_SIZE_MAX) in trie_alloc()
574 attr->value_size + trie->data_size; in trie_alloc()
Dxskmap.c91 attr->value_size != 4 || in xsk_map_alloc()
Dcpumap.c92 attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE) in cpu_map_alloc()
Ddevmap.c118 attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK) in dev_map_init_map()
Dverifier.c468 reg->map_ptr->value_size); in print_verifier_state()
2158 map->value_size, off, size); in check_map_access_type()
2164 map->value_size, off, size); in check_map_access_type()
2179 off + size > map->value_size) { in __check_map_access()
2181 map->value_size, off, size); in __check_map_access()
3461 meta->map_ptr->value_size, false, in check_func_arg()
4341 max = ptr_reg->map_ptr->value_size; in retrieve_ptr_limit()
8300 map->value_size, off); in replace_map_fd_with_map_ptr()