| /tools/testing/selftests/bpf/prog_tests/ |
| D | lookup_and_delete.c | 13 static int fill_values(int map_fd) in fill_values() argument 19 err = bpf_map_update_elem(map_fd, &key, &value, BPF_NOEXIST); in fill_values() 27 static int fill_values_percpu(int map_fd) in fill_values_percpu() argument 36 err = bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST); in fill_values_percpu() 45 int *map_fd) in setup_prog() argument 66 *map_fd = bpf_map__fd(skel->maps.hash_map); in setup_prog() 67 if (!ASSERT_GE(*map_fd, 0, "bpf_map__fd")) in setup_prog() 102 int map_fd, err; in test_lookup_and_delete_hash() local 105 skel = setup_prog(BPF_MAP_TYPE_HASH, &map_fd); in test_lookup_and_delete_hash() 109 err = fill_values(map_fd); in test_lookup_and_delete_hash() [all …]
|
| D | l4lb_all.c | 26 int err, i, prog_fd, map_fd; in test_l4lb() local 41 map_fd = bpf_find_map(__func__, obj, "vip_map"); in test_l4lb() 42 if (map_fd < 0) in test_l4lb() 44 bpf_map_update_elem(map_fd, &key, &value, 0); in test_l4lb() 46 map_fd = bpf_find_map(__func__, obj, "ch_rings"); in test_l4lb() 47 if (map_fd < 0) in test_l4lb() 49 bpf_map_update_elem(map_fd, &ch_key, &real_num, 0); in test_l4lb() 51 map_fd = bpf_find_map(__func__, obj, "reals"); in test_l4lb() 52 if (map_fd < 0) in test_l4lb() 54 bpf_map_update_elem(map_fd, &real_num, &real_def, 0); in test_l4lb() [all …]
|
| D | global_data.c | 7 int i, err, map_fd; in test_global_data_number() local 10 map_fd = bpf_find_map(__func__, obj, "result_number"); in test_global_data_number() 11 if (CHECK_FAIL(map_fd < 0)) in test_global_data_number() 33 err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num); in test_global_data_number() 42 int i, err, map_fd; in test_global_data_string() local 45 map_fd = bpf_find_map(__func__, obj, "result_string"); in test_global_data_string() 46 if (CHECK_FAIL(map_fd < 0)) in test_global_data_string() 62 err = bpf_map_lookup_elem(map_fd, &tests[i].key, str); in test_global_data_string() 77 int i, err, map_fd; in test_global_data_struct() local 80 map_fd = bpf_find_map(__func__, obj, "result_struct"); in test_global_data_struct() [all …]
|
| D | map_init.c | 20 static int map_populate(int map_fd, int num) in map_populate() argument 30 err = bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST); in map_populate() 39 int *map_fd, int populate) in setup() argument 60 *map_fd = bpf_map__fd(skel->maps.hashmap1); in setup() 61 if (CHECK(*map_fd < 0, "bpf_map__fd", "failed\n")) in setup() 64 err = map_populate(*map_fd, populate); in setup() 71 close(*map_fd); in setup() 133 int map_fd, err; in test_pcpu_map_init() local 137 skel = setup(BPF_MAP_TYPE_PERCPU_HASH, 1, &map_fd, 1); in test_pcpu_map_init() 143 err = bpf_map_delete_elem(map_fd, &key); in test_pcpu_map_init() [all …]
|
| D | bpf_obj_pinning.c | 41 int map_fd = -1, map_fd2 = -1; in bpf_obj_pinning_detached() local 75 map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, map_name, 4, 4, 1, NULL); in bpf_obj_pinning_detached() 76 if (!ASSERT_GE(map_fd, 0, "map_fd")) in bpf_obj_pinning_detached() 82 err = bpf_obj_pin_opts(map_fd, map_name, &pin_opts); in bpf_obj_pinning_detached() 95 err = bpf_map_update_elem(map_fd, &zero, &src_value, 0); in bpf_obj_pinning_detached() 106 if (map_fd >= 0) in bpf_obj_pinning_detached() 107 ASSERT_OK(close(map_fd), "close_map_fd"); in bpf_obj_pinning_detached() 123 static void validate_pin(int map_fd, const char *map_name, int src_value, in validate_pin() argument 155 err = bpf_obj_pin_opts(map_fd, pin_path, &pin_opts); in validate_pin() 169 err = bpf_map_update_elem(map_fd, &zero, &src_value, 0); in validate_pin() [all …]
|
| D | map_lock.c | 23 int err, map_fd = *(u32 *) arg; in parallel_map_access() local 27 err = bpf_map_lookup_elem_flags(map_fd, &key, vars, BPF_F_LOCK); in parallel_map_access() 53 int prog_fd, map_fd[2], vars[17] = {}; in test_map_lock() local 64 map_fd[0] = bpf_find_map(__func__, obj, "hash_map"); in test_map_lock() 65 if (CHECK_FAIL(map_fd[0] < 0)) in test_map_lock() 67 map_fd[1] = bpf_find_map(__func__, obj, "array_map"); in test_map_lock() 68 if (CHECK_FAIL(map_fd[1] < 0)) in test_map_lock() 71 bpf_map_update_elem(map_fd[0], &key, vars, BPF_F_LOCK); in test_map_lock() 80 &map_fd[i - 4]))) in test_map_lock() 88 ret != (void *)&map_fd[i - 4])) in test_map_lock()
|
| D | tailcalls.c | 17 int err, map_fd, prog_fd, main_fd, i, j; in test_tailcall_1() local 46 map_fd = bpf_map__fd(prog_array); in test_tailcall_1() 47 if (CHECK_FAIL(map_fd < 0)) in test_tailcall_1() 61 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); in test_tailcall_1() 71 err = bpf_map_delete_elem(map_fd, &i); in test_tailcall_1() 91 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); in test_tailcall_1() 112 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); in test_tailcall_1() 124 err = bpf_map_delete_elem(map_fd, &i); in test_tailcall_1() 134 err = bpf_map_delete_elem(map_fd, &i); in test_tailcall_1() 153 int err, map_fd, prog_fd, main_fd, i; in test_tailcall_2() local [all …]
|
| D | xdp_cpumap_attach.c | 21 int err, prog_fd, prog_redir_fd, map_fd; in test_xdp_with_cpumap_helpers() local 41 map_fd = bpf_map__fd(skel->maps.cpu_map); in test_xdp_with_cpumap_helpers() 47 err = bpf_map_update_elem(map_fd, &idx, &val, 0); in test_xdp_with_cpumap_helpers() 50 err = bpf_map_lookup_elem(map_fd, &idx, &val); in test_xdp_with_cpumap_helpers() 81 err = bpf_map_update_elem(map_fd, &idx, &val, 0); in test_xdp_with_cpumap_helpers() 90 err = bpf_map_update_elem(map_fd, &idx, &val, 0); in test_xdp_with_cpumap_helpers() 107 int err, frags_prog_fd, map_fd; in test_xdp_with_cpumap_frags_helpers() local 115 map_fd = bpf_map__fd(skel->maps.cpu_map); in test_xdp_with_cpumap_frags_helpers() 121 err = bpf_map_update_elem(map_fd, &idx, &val, 0); in test_xdp_with_cpumap_frags_helpers() 124 err = bpf_map_lookup_elem(map_fd, &idx, &val); in test_xdp_with_cpumap_frags_helpers() [all …]
|
| D | legacy_printk.c | 9 int err, zero = 0, my_pid = getpid(), res, map_fd; in execute_one_variant() local 24 map_fd = bpf_map__fd(skel->maps.my_pid_map); in execute_one_variant() 25 err = bpf_map_update_elem(map_fd, &zero, &my_pid, BPF_ANY); in execute_one_variant() 28 err = bpf_map_lookup_elem(map_fd, &zero, &res); in execute_one_variant() 40 map_fd = bpf_map__fd(skel->maps.res_map); in execute_one_variant() 41 err = bpf_map_lookup_elem(map_fd, &zero, &res); in execute_one_variant()
|
| D | xdp_devmap_attach.c | 24 int err, dm_fd, dm_fd_redir, map_fd; in test_xdp_with_devmap_helpers() local 45 map_fd = bpf_map__fd(skel->maps.dm_ports); in test_xdp_with_devmap_helpers() 51 err = bpf_map_update_elem(map_fd, &idx, &val, 0); in test_xdp_with_devmap_helpers() 54 err = bpf_map_lookup_elem(map_fd, &idx, &val); in test_xdp_with_devmap_helpers() 81 err = bpf_map_update_elem(map_fd, &idx, &val, 0); in test_xdp_with_devmap_helpers() 90 err = bpf_map_update_elem(map_fd, &idx, &val, 0); in test_xdp_with_devmap_helpers() 118 int err, dm_fd_frags, map_fd; in test_xdp_with_devmap_frags_helpers() local 126 map_fd = bpf_map__fd(skel->maps.dm_ports); in test_xdp_with_devmap_frags_helpers() 132 err = bpf_map_update_elem(map_fd, &idx, &val, 0); in test_xdp_with_devmap_frags_helpers() 135 err = bpf_map_lookup_elem(map_fd, &idx, &val); in test_xdp_with_devmap_frags_helpers() [all …]
|
| D | cgroup_attach_multi.c | 11 static int map_fd = -1; variable 17 if (map_fd < 0) in prog_load_cnt() 18 map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL); in prog_load_cnt() 19 if (map_fd < 0) { in prog_load_cnt() 44 BPF_LD_MAP_FD(BPF_REG_1, map_fd), in prog_load_cnt() 151 CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); in serial_test_cgroup_attach_multi() 188 CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0)); in serial_test_cgroup_attach_multi() 190 CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); in serial_test_cgroup_attach_multi() 241 CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0)); in serial_test_cgroup_attach_multi() 243 CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); in serial_test_cgroup_attach_multi() [all …]
|
| D | xdp.c | 16 int err, prog_fd, map_fd; in test_xdp() local 29 map_fd = bpf_find_map(__func__, obj, "vip2tnl"); in test_xdp() 30 if (map_fd < 0) in test_xdp() 32 bpf_map_update_elem(map_fd, &key4, &value4, 0); in test_xdp() 33 bpf_map_update_elem(map_fd, &key6, &value6, 0); in test_xdp()
|
| D | syscall.c | 10 int map_fd; member 39 ASSERT_GT(ctx.map_fd, 0, "ctx.map_fd"); in test_syscall_load_prog() 44 err = bpf_map_lookup_elem(ctx.map_fd, &key, &value); in test_syscall_load_prog() 51 if (ctx.map_fd > 0) in test_syscall_load_prog() 52 close(ctx.map_fd); in test_syscall_load_prog()
|
| D | sk_storage_omem_uncharge.c | 12 int sk_fd = -1, map_fd, err, value; in test_sk_storage_omem_uncharge() local 18 map_fd = bpf_map__fd(skel->maps.sk_storage); in test_sk_storage_omem_uncharge() 33 err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0); in test_sk_storage_omem_uncharge() 38 err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0); in test_sk_storage_omem_uncharge()
|
| D | global_func_args.c | 9 int err, i, map_fd, actual_value; in test_global_func_args0() local 12 map_fd = bpf_find_map(__func__, obj, map_name); in test_global_func_args0() 13 if (CHECK(map_fd < 0, "bpf_find_map", "cannot find BPF map %s: %s\n", in test_global_func_args0() 33 err = bpf_map_lookup_elem(map_fd, &i, &actual_value); in test_global_func_args0()
|
| D | dmabuf_iter.c | 222 static void subtest_dmabuf_iter_check_open_coded(struct dmabuf_iter *skel, int map_fd) in subtest_dmabuf_iter_check_open_coded() argument 238 if (!ASSERT_OK(bpf_map_get_next_key(map_fd, NULL, key), "get next key")) in subtest_dmabuf_iter_check_open_coded() 242 ASSERT_OK(bpf_map_lookup_elem(map_fd, key, &found), "lookup"); in subtest_dmabuf_iter_check_open_coded() 244 } while (bpf_map_get_next_key(map_fd, key, key)); in subtest_dmabuf_iter_check_open_coded() 250 int map_fd; in test_dmabuf_iter() local 257 map_fd = bpf_map__fd(skel->maps.testbuf_hash); in test_dmabuf_iter() 258 if (!ASSERT_OK_FD(map_fd, "map_fd")) in test_dmabuf_iter() 261 if (!ASSERT_OK(bpf_map_update_elem(map_fd, udmabuf_test_buffer_name, &f, BPF_ANY), in test_dmabuf_iter() 264 if (!ASSERT_OK(bpf_map_update_elem(map_fd, sysheap_test_buffer_name, &f, BPF_ANY), in test_dmabuf_iter() 279 subtest_dmabuf_iter_check_open_coded(skel, map_fd); in test_dmabuf_iter()
|
| /tools/testing/selftests/bpf/map_tests/ |
| D | map_percpu_stats.c | 22 static void map_info(int map_fd, struct bpf_map_info *info) in map_info() argument 29 ret = bpf_obj_get_info_by_fd(map_fd, info, &len); in map_info() 51 static __u32 map_count_elements(__u32 type, int map_fd) in map_count_elements() argument 56 while (!bpf_map_get_next_key(map_fd, &key, &key)) in map_count_elements() 63 static void delete_and_lookup_batch(int map_fd, void *keys, __u32 count) in delete_and_lookup_batch() argument 70 ret = bpf_map_lookup_and_delete_batch(map_fd, in delete_and_lookup_batch() 89 static void delete_all_elements(__u32 type, int map_fd, bool batch) in delete_all_elements() argument 100 for (n = 0; !bpf_map_get_next_key(map_fd, &key, &key); n++) in delete_all_elements() 108 delete_and_lookup_batch(map_fd, keys, n); in delete_all_elements() 115 ret = bpf_map_delete_elem(map_fd, keyp); in delete_all_elements() [all …]
|
| D | sk_storage_map.c | 144 int btf_fd, map_fd; in create_sk_storage_map() local 151 map_fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "sk_storage_map", 4, 8, 0, &map_opts); in create_sk_storage_map() 154 CHECK(map_fd == -1, in create_sk_storage_map() 157 return map_fd; in create_sk_storage_map() 166 int i, map_fd, err, *sk_fds; in insert_close_thread() local 181 map_fd = READ_ONCE(sk_storage_map); in insert_close_thread() 189 err = bpf_map_update_elem(map_fd, &sk_fds[i], &value, in insert_close_thread() 225 int i, map_fd = -1, err = 0, nr_threads_created = 0; in do_sk_storage_map_stress_free() local 246 map_fd = create_sk_storage_map(); in do_sk_storage_map_stress_free() 247 WRITE_ONCE(sk_storage_map, map_fd); in do_sk_storage_map_stress_free() [all …]
|
| D | lpm_trie_map_get_next_key.c | 27 int map_fd; member 41 bpf_map_get_next_key(ctx->map_fd, &ctx->key, &next_key); in get_next_key_fn() 67 int map_fd; in test_lpm_trie_map_get_next_key() local 75 map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, "lpm_trie_map", in test_lpm_trie_map_get_next_key() 78 CHECK(map_fd == -1, "bpf_map_create()", "error:%s\n", in test_lpm_trie_map_get_next_key() 83 err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY); in test_lpm_trie_map_get_next_key() 90 ctx.map_fd = map_fd; in test_lpm_trie_map_get_next_key() 108 close(map_fd); in test_lpm_trie_map_get_next_key()
|
| D | htab_map_batch_ops.c | 14 static void map_batch_update(int map_fd, __u32 max_entries, int *keys, in map_batch_update() argument 37 err = bpf_map_update_batch(map_fd, keys, values, &max_entries, &opts); in map_batch_update() 81 int map_fd, *keys, *visited, key; in __test_map_lookup_and_delete_batch() local 92 map_fd = bpf_map_create(is_pcpu ? BPF_MAP_TYPE_PERCPU_HASH : BPF_MAP_TYPE_HASH, in __test_map_lookup_and_delete_batch() 94 CHECK(map_fd == -1, in __test_map_lookup_and_delete_batch() 109 err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys, in __test_map_lookup_and_delete_batch() 115 map_batch_update(map_fd, max_entries, keys, values, is_pcpu); in __test_map_lookup_and_delete_batch() 119 err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys, in __test_map_lookup_and_delete_batch() 127 err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys, in __test_map_lookup_and_delete_batch() 136 err = bpf_map_get_next_key(map_fd, NULL, &key); in __test_map_lookup_and_delete_batch() [all …]
|
| D | lpm_trie_map_batch_ops.c | 22 static void map_batch_update(int map_fd, __u32 max_entries, in map_batch_update() argument 40 err = bpf_map_update_batch(map_fd, keys, values, &max_entries, &opts); in map_batch_update() 70 int map_fd, *values, *visited; in test_lpm_trie_map_batch_ops() local 80 map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, "lpm_trie_map", in test_lpm_trie_map_batch_ops() 83 CHECK(map_fd == -1, "bpf_map_create()", "error:%s\n", in test_lpm_trie_map_batch_ops() 94 map_batch_update(map_fd, max_entries, keys, values); in test_lpm_trie_map_batch_ops() 105 err = bpf_map_lookup_batch(map_fd, in test_lpm_trie_map_batch_ops() 127 err = bpf_map_delete_batch(map_fd, keys + total, &count, in test_lpm_trie_map_batch_ops() 139 err = bpf_map_get_next_key(map_fd, NULL, &key); in test_lpm_trie_map_batch_ops() 154 close(map_fd); in test_lpm_trie_map_batch_ops()
|
| D | array_map_batch_ops.c | 15 static void map_batch_update(int map_fd, __u32 max_entries, int *keys, in map_batch_update() argument 36 err = bpf_map_update_batch(map_fd, keys, values, &max_entries, &opts); in map_batch_update() 72 int map_fd, *keys, *visited; in __test_map_lookup_and_update_batch() local 83 map_fd = bpf_map_create(is_pcpu ? BPF_MAP_TYPE_PERCPU_ARRAY : BPF_MAP_TYPE_ARRAY, in __test_map_lookup_and_update_batch() 85 CHECK(map_fd == -1, in __test_map_lookup_and_update_batch() 101 map_batch_update(map_fd, max_entries, keys, values, is_pcpu); in __test_map_lookup_and_update_batch() 112 err = bpf_map_lookup_batch(map_fd, in __test_map_lookup_and_update_batch() 141 close(map_fd); in __test_map_lookup_and_update_batch()
|
| /tools/testing/selftests/bpf/ |
| D | test_lpm_map.c | 428 int map_fd; in test_lpm_delete() local 434 map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, in test_lpm_delete() 437 assert(map_fd >= 0); in test_lpm_delete() 454 assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0); in test_lpm_delete() 459 assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0); in test_lpm_delete() 464 assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0); in test_lpm_delete() 469 assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0); in test_lpm_delete() 474 assert(bpf_map_lookup_elem(map_fd, key, &value) == -ENOENT); in test_lpm_delete() 478 assert(bpf_map_delete_elem(map_fd, key) == -ENOENT); in test_lpm_delete() 482 assert(bpf_map_delete_elem(map_fd, key) == -ENOENT); in test_lpm_delete() [all …]
|
| /tools/bpf/bpftool/ |
| D | iter.c | 22 int err = -1, map_fd = -1; in do_pin() local 40 map_fd = map_parse_fd(&argc, &argv); in do_pin() 41 if (map_fd < 0) in do_pin() 45 linfo.map.map_fd = map_fd; in do_pin() 95 if (map_fd >= 0) in do_pin() 96 close(map_fd); in do_pin()
|
| /tools/lib/bpf/ |
| D | ringbuf.c | 32 int map_fd; member 50 int map_fd; member 75 int ring_buffer__add(struct ring_buffer *rb, int map_fd, in ring_buffer__add() argument 88 err = bpf_map_get_info_by_fd(map_fd, &info, &len); in ring_buffer__add() 92 map_fd, err); in ring_buffer__add() 98 map_fd); in ring_buffer__add() 117 r->map_fd = map_fd; in ring_buffer__add() 123 tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0); in ring_buffer__add() 127 map_fd, err); in ring_buffer__add() 142 tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size); in ring_buffer__add() [all …]
|