/kernel/linux/linux-5.10/kernel/bpf/ |
D | queue_stack_maps.c | 53 attr->value_size == 0 || in queue_stack_map_alloc_check() 58 if (attr->value_size > KMALLOC_MAX_SIZE) in queue_stack_map_alloc_check() 75 cost = queue_size = sizeof(*qs) + size * attr->value_size; in queue_stack_map_alloc() 117 memset(value, 0, qs->map.value_size); in __queue_map_get() 122 ptr = &qs->elements[qs->tail * qs->map.value_size]; in __queue_map_get() 123 memcpy(value, ptr, qs->map.value_size); in __queue_map_get() 147 memset(value, 0, qs->map.value_size); in __stack_map_get() 156 ptr = &qs->elements[index * qs->map.value_size]; in __stack_map_get() 157 memcpy(value, ptr, qs->map.value_size); in __stack_map_get() 221 dst = &qs->elements[qs->head * qs->map.value_size]; in queue_stack_map_push_elem() [all …]
|
D | map_iter.c | 105 u32 key_acc_size, value_acc_size, key_size, value_size; in bpf_iter_attach_map() local 130 value_size = map->value_size; in bpf_iter_attach_map() 132 value_size = round_up(map->value_size, 8) * num_possible_cpus(); in bpf_iter_attach_map() 134 if (key_acc_size > key_size || value_acc_size > value_size) { in bpf_iter_attach_map()
|
D | map_in_map.c | 49 inner_map_meta->value_size = inner_map->value_size; in bpf_map_meta_alloc() 77 meta0->value_size == meta1->value_size && in bpf_map_meta_equal()
|
D | stackmap.c | 68 (u64)smap->map.value_size; in prealloc_elems_and_freelist() 92 u32 value_size = attr->value_size; in stack_map_alloc() local 106 value_size < 8 || value_size % 8) in stack_map_alloc() 111 if (value_size % sizeof(struct bpf_stack_build_id) || in stack_map_alloc() 112 value_size / sizeof(struct bpf_stack_build_id) in stack_map_alloc() 115 } else if (value_size / 8 > sysctl_perf_event_max_stack) in stack_map_alloc() 125 (sizeof(struct stack_map_bucket) + (u64)value_size)); in stack_map_alloc() 136 smap->map.value_size = value_size; in stack_map_alloc() 470 u32 max_depth = map->value_size / stack_map_data_size(map); in BPF_CALL_3() 770 memset(value + trace_len, 0, map->value_size - trace_len); in bpf_stackmap_copy()
|
D | local_storage.c | 166 map->value_size, in cgroup_storage_update_elem() 172 memcpy(&new->data[0], value, map->value_size); in cgroup_storage_update_elem() 200 size = round_up(_map->value_size, 8); in bpf_percpu_cgroup_storage_copy() 234 size = round_up(_map->value_size, 8); in bpf_percpu_cgroup_storage_update() 295 if (attr->value_size == 0) in cgroup_storage_map_alloc() 298 if (attr->value_size > PAGE_SIZE) in cgroup_storage_map_alloc() 482 size = sizeof(struct bpf_storage_buffer) + map->value_size; in bpf_cgroup_storage_calculate_size() 486 size = map->value_size; in bpf_cgroup_storage_calculate_size()
|
D | hashtab.c | 267 u32 size = round_up(htab->map.value_size, 8); in prealloc_init() 390 attr->value_size == 0) in htab_map_alloc_check() 399 if (attr->value_size >= KMALLOC_MAX_SIZE - in htab_map_alloc_check() 454 htab->elem_size += round_up(htab->map.value_size, 8); in htab_map_alloc() 466 cost += (u64) round_up(htab->map.value_size, 8) * in htab_map_alloc() 811 memcpy(this_cpu_ptr(pptr), value, htab->map.value_size); in pcpu_copy_value() 813 u32 size = round_up(htab->map.value_size, 8); in pcpu_copy_value() 834 u32 size = round_up(htab->map.value_size, 8); in pcpu_init_value() 861 u32 size = htab->map.value_size; in alloc_htab_elem() 1078 memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size); in htab_lru_map_update_elem() [all …]
|
/kernel/linux/linux-5.10/tools/testing/selftests/bpf/prog_tests/ |
D | btf.c | 72 __u32 value_size; member 140 .value_size = 180, 195 .value_size = 68, 220 .value_size = 16, 261 .value_size = 48, 306 .value_size = 48, 328 .value_size = 4, 350 .value_size = 4, 372 .value_size = 4, 397 .value_size = 4, [all …]
|
/kernel/linux/linux-5.10/tools/lib/bpf/ |
D | libbpf_probes.c | 204 int key_size, value_size, max_entries, map_flags; in bpf_probe_map_type() local 210 value_size = sizeof(__u32); in bpf_probe_map_type() 216 value_size = sizeof(__u64); in bpf_probe_map_type() 220 value_size = sizeof(__u64); in bpf_probe_map_type() 226 value_size = sizeof(__u64); in bpf_probe_map_type() 237 value_size = 8; in bpf_probe_map_type() 246 value_size = 0; in bpf_probe_map_type() 292 attr.value_size = value_size; in bpf_probe_map_type()
|
/kernel/linux/linux-5.10/tools/bpf/bpftool/ |
D | map.c | 91 return malloc(round_up(info->value_size, 8) * in alloc_value() 94 return malloc(info->value_size); in alloc_value() 127 step = round_up(map_info->value_size, 8); in do_dump_btf() 167 print_hex_data_json(value, info->value_size); in print_entry_json() 182 step = round_up(info->value_size, 8); in print_entry_json() 196 info->value_size); in print_entry_json() 276 break_names = info->key_size > 16 || info->value_size > 16; in print_entry_plain() 277 single_line = info->key_size + info->value_size <= 24 && in print_entry_plain() 287 if (info->value_size) { in print_entry_plain() 289 fprint_hex(stdout, value, info->value_size, " "); in print_entry_plain() [all …]
|
/kernel/linux/linux-5.10/tools/testing/selftests/bpf/map_tests/ |
D | htab_map_batch_ops.c | 83 int err, step, value_size; in __test_map_lookup_and_delete_batch() local 91 .value_size = sizeof(int), in __test_map_lookup_and_delete_batch() 103 value_size = is_pcpu ? sizeof(value) : sizeof(int); in __test_map_lookup_and_delete_batch() 131 memset(values, 0, max_entries * value_size); in __test_map_lookup_and_delete_batch() 150 memset(values, 0, max_entries * value_size); in __test_map_lookup_and_delete_batch() 162 total * value_size, in __test_map_lookup_and_delete_batch() 216 memset(values, 0, max_entries * value_size); in __test_map_lookup_and_delete_batch() 225 total * value_size, in __test_map_lookup_and_delete_batch()
|
/kernel/linux/linux-5.10/drivers/md/persistent-data/ |
D | dm-btree-internal.h | 35 __le32 value_size; member 120 uint32_t value_size = le32_to_cpu(n->header.value_size); in value_ptr() local 121 return value_base(n) + (value_size * index); in value_ptr()
|
D | dm-btree-remove.c | 59 uint32_t value_size = le32_to_cpu(n->header.value_size); in node_shift() local 70 (nr_entries - shift) * value_size); in node_shift() 78 nr_entries * value_size); in node_shift() 85 uint32_t value_size = le32_to_cpu(left->header.value_size); in node_copy() local 86 BUG_ON(value_size != le32_to_cpu(right->header.value_size)); in node_copy() 96 shift * value_size); in node_copy() 104 shift * value_size); in node_copy() 115 uint32_t value_size = le32_to_cpu(n->header.value_size); in delete_at() local 125 nr_to_copy * value_size); in delete_at()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlxsw/ |
D | spectrum_dpipe.c | 137 match_value->value_size = sizeof(u32); in mlxsw_sp_erif_entry_prepare() 138 match_value->value = kmalloc(match_value->value_size, GFP_KERNEL); in mlxsw_sp_erif_entry_prepare() 143 action_value->value_size = sizeof(u32); in mlxsw_sp_erif_entry_prepare() 144 action_value->value = kmalloc(action_value->value_size, GFP_KERNEL); in mlxsw_sp_erif_entry_prepare() 421 match_value->value_size = sizeof(u32); in mlxsw_sp_dpipe_table_host_entry_prepare() 422 match_value->value = kmalloc(match_value->value_size, GFP_KERNEL); in mlxsw_sp_dpipe_table_host_entry_prepare() 432 match_value->value_size = sizeof(u32); in mlxsw_sp_dpipe_table_host_entry_prepare() 435 match_value->value_size = sizeof(struct in6_addr); in mlxsw_sp_dpipe_table_host_entry_prepare() 442 match_value->value = kmalloc(match_value->value_size, GFP_KERNEL); in mlxsw_sp_dpipe_table_host_entry_prepare() 447 action_value->value_size = sizeof(u64); in mlxsw_sp_dpipe_table_host_entry_prepare() [all …]
|
/kernel/linux/linux-5.10/tools/bpf/bpftool/skeleton/ |
D | profiler.bpf.c | 11 __uint(value_size, sizeof(int)); 18 __uint(value_size, sizeof(struct bpf_perf_event_value)); 25 __uint(value_size, sizeof(struct bpf_perf_event_value)); 32 __uint(value_size, sizeof(u64));
|
/kernel/linux/linux-5.10/tools/testing/selftests/bpf/progs/ |
D | sockmap_verdict_prog.c | 11 __uint(value_size, sizeof(int)); 18 __uint(value_size, sizeof(int)); 25 __uint(value_size, sizeof(int));
|
D | sample_map_ret0.c | 8 .value_size = sizeof(long), 15 .value_size = sizeof(long),
|
D | test_pe_preserve_elems.c | 11 __uint(value_size, sizeof(int)); 18 __uint(value_size, sizeof(int));
|
D | test_btf_map_in_map.c | 25 __uint(value_size, sizeof(int)); 65 __uint(value_size, sizeof(int)); 115 __uint(value_size, sizeof(int));
|
D | test_map_in_map.c | 14 __uint(value_size, sizeof(__u32)); 23 __uint(value_size, sizeof(__u32));
|
D | test_queue_stack_map.h | 18 __uint(value_size, sizeof(__u32)); 26 __uint(value_size, sizeof(__u32));
|
D | map_ptr_kern.c | 36 __u32 value_size; member 43 __u32 value_size, __u32 max_entries) in check_bpf_map_fields() argument 47 VERIFY(map->value_size == value_size); in check_bpf_map_fields() 60 VERIFY(indirect->value_size == direct->value_size); in check_bpf_map_ptr() 69 __u32 key_size, __u32 value_size, __u32 max_entries) in check() argument 72 VERIFY(check_bpf_map_fields(indirect, key_size, value_size, in check()
|
/kernel/linux/linux-5.10/scripts/dtc/ |
D | fdtput.c | 60 int value_size = 0; /* size of holding area */ in encode_value() local 83 if (upto + len > value_size) { in encode_value() 84 value_size = (upto + len) + 500; in encode_value() 85 value = realloc(value, value_size); in encode_value() 88 "%d bytes\n", value_size); in encode_value()
|
/kernel/linux/linux-5.10/tools/perf/tests/ |
D | bpf-script-test-relocation.c | 23 unsigned int value_size; member 31 .value_size = sizeof(int),
|
D | bpf-script-example.c | 23 unsigned int value_size; member 31 .value_size = sizeof(int),
|
/kernel/linux/linux-5.10/tools/perf/util/ |
D | bpf_map.c | 23 return malloc(round_up(def->value_size, 8) * sysconf(_SC_NPROCESSORS_CONF)); in bpf_map_def__alloc_value() 25 return malloc(def->value_size); in bpf_map_def__alloc_value()
|