/external/linux-kselftest/tools/testing/selftests/bpf/map_tests/ |
D | sk_storage_map.c | 148 int btf_fd, map_fd; in create_sk_storage_map() local 155 map_fd = bpf_create_map_xattr(&xattr); in create_sk_storage_map() 158 CHECK(map_fd == -1, in create_sk_storage_map() 161 return map_fd; in create_sk_storage_map() 170 int i, map_fd, err, *sk_fds; in insert_close_thread() local 185 map_fd = READ_ONCE(sk_storage_map); in insert_close_thread() 193 err = bpf_map_update_elem(map_fd, &sk_fds[i], &value, in insert_close_thread() 229 int i, map_fd = -1, err = 0, nr_threads_created = 0; in do_sk_storage_map_stress_free() local 250 map_fd = create_sk_storage_map(); in do_sk_storage_map_stress_free() 251 WRITE_ONCE(sk_storage_map, map_fd); in do_sk_storage_map_stress_free() [all …]
|
/external/linux-kselftest/tools/testing/selftests/bpf/ |
D | test_lpm_map.c | 428 int map_fd; in test_lpm_delete() local 434 map_fd = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, in test_lpm_delete() 437 assert(map_fd >= 0); in test_lpm_delete() 454 assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0); in test_lpm_delete() 459 assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0); in test_lpm_delete() 464 assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0); in test_lpm_delete() 469 assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0); in test_lpm_delete() 474 assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 && in test_lpm_delete() 479 assert(bpf_map_delete_elem(map_fd, key) == -1 && in test_lpm_delete() 484 assert(bpf_map_delete_elem(map_fd, key) == -1 && in test_lpm_delete() [all …]
|
D | test_skb_cgroup_id_user.c | 86 int map_fd = -1; in get_map_fd_by_prog_id() local 107 map_fd = bpf_map_get_fd_by_id(map_ids[0]); in get_map_fd_by_prog_id() 108 if (map_fd < 0) in get_map_fd_by_prog_id() 113 return map_fd; in get_map_fd_by_prog_id() 121 int map_fd; in check_ancestor_cgroup_ids() local 128 map_fd = get_map_fd_by_prog_id(prog_id); in check_ancestor_cgroup_ids() 129 if (map_fd < 0) in check_ancestor_cgroup_ids() 133 if (bpf_map_lookup_elem(map_fd, &level, &actual_ids[level])) { in check_ancestor_cgroup_ids() 148 if (map_fd >= 0) in check_ancestor_cgroup_ids() 149 close(map_fd); in check_ancestor_cgroup_ids()
|
D | test_cgroup_storage.c | 40 int map_fd, percpu_map_fd, prog_fd, cgroup_fd; in main() local 53 map_fd = bpf_create_map(BPF_MAP_TYPE_CGROUP_STORAGE, sizeof(key), in main() 55 if (map_fd < 0) { in main() 68 prog[7].imm = map_fd; in main() 100 if (bpf_map_get_next_key(map_fd, NULL, &key)) { in main() 105 if (bpf_map_lookup_elem(map_fd, &key, &value)) { in main() 124 if (bpf_map_lookup_elem(map_fd, &key, &value)) { in main() 136 if (bpf_map_update_elem(map_fd, &key, &value, 0)) { in main() 147 if (bpf_map_lookup_elem(map_fd, &key, &value)) { in main()
|
D | xdping.c | 96 int prog_fd = -1, map_fd = -1; in main() local 193 map_fd = bpf_map__fd(map); in main() 194 if (!map || map_fd < 0) { in main() 213 close(map_fd); in main() 228 if (bpf_map_update_elem(map_fd, &raddr, &pinginfo, BPF_ANY)) { in main() 247 ret = get_stats(map_fd, count, raddr); in main() 254 if (map_fd > 0) in main() 255 close(map_fd); in main()
|
D | test_cgroup_attach.c | 217 static int map_fd = -1; variable 223 if (map_fd < 0) in prog_load_cnt() 224 map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0); in prog_load_cnt() 225 if (map_fd < 0) { in prog_load_cnt() 250 BPF_LD_MAP_FD(BPF_REG_1, map_fd), in prog_load_cnt() 363 assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0); in test_multiprog() 398 assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0); in test_multiprog() 400 assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0); in test_multiprog() 414 assert(bpf_map_update_elem(map_fd, &key, &value, 0) == 0); in test_multiprog() 416 assert(bpf_map_lookup_elem(map_fd, &key, &value) == 0); in test_multiprog() [all …]
|
D | test_netcnt.c | 38 int map_fd, percpu_map_fd; in main() local 95 map_fd = bpf_find_map(__func__, obj, "netcnt"); in main() 96 if (map_fd < 0) { in main() 107 if (bpf_map_get_next_key(map_fd, NULL, &key)) { in main() 112 if (bpf_map_lookup_elem(map_fd, &key, &netcnt)) { in main()
|
D | test_sockmap.c | 66 int map_fd[8]; variable 807 err = bpf_prog_attach(prog_fd[0], map_fd[0], in run_options() 812 prog_fd[0], map_fd[0], err, strerror(errno)); in run_options() 816 err = bpf_prog_attach(prog_fd[1], map_fd[0], in run_options() 862 map_fd[1], BPF_SK_MSG_VERDICT, 0); in run_options() 870 err = bpf_map_update_elem(map_fd[1], &i, &c1, BPF_ANY); in run_options() 883 err = bpf_map_update_elem(map_fd[2], &i, &redir_fd, BPF_ANY); in run_options() 892 err = bpf_map_update_elem(map_fd[3], in run_options() 903 err = bpf_map_update_elem(map_fd[4], in run_options() 914 err = bpf_map_update_elem(map_fd[5], in run_options() [all …]
|
/external/linux-kselftest/tools/testing/selftests/bpf/prog_tests/ |
D | global_data.c | 6 int i, err, map_fd; in test_global_data_number() local 9 map_fd = bpf_find_map(__func__, obj, "result_number"); in test_global_data_number() 10 if (CHECK_FAIL(map_fd < 0)) in test_global_data_number() 32 err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num); in test_global_data_number() 41 int i, err, map_fd; in test_global_data_string() local 44 map_fd = bpf_find_map(__func__, obj, "result_string"); in test_global_data_string() 45 if (CHECK_FAIL(map_fd < 0)) in test_global_data_string() 61 err = bpf_map_lookup_elem(map_fd, &tests[i].key, str); in test_global_data_string() 76 int i, err, map_fd; in test_global_data_struct() local 79 map_fd = bpf_find_map(__func__, obj, "result_struct"); in test_global_data_struct() [all …]
|
D | xdp_noinline.c | 27 int err, i, prog_fd, map_fd; in test_xdp_noinline() local 37 map_fd = bpf_find_map(__func__, obj, "vip_map"); in test_xdp_noinline() 38 if (map_fd < 0) in test_xdp_noinline() 40 bpf_map_update_elem(map_fd, &key, &value, 0); in test_xdp_noinline() 42 map_fd = bpf_find_map(__func__, obj, "ch_rings"); in test_xdp_noinline() 43 if (map_fd < 0) in test_xdp_noinline() 45 bpf_map_update_elem(map_fd, &ch_key, &real_num, 0); in test_xdp_noinline() 47 map_fd = bpf_find_map(__func__, obj, "reals"); in test_xdp_noinline() 48 if (map_fd < 0) in test_xdp_noinline() 50 bpf_map_update_elem(map_fd, &real_num, &real_def, 0); in test_xdp_noinline() [all …]
|
D | l4lb_all.c | 26 int err, i, prog_fd, map_fd; in test_l4lb() local 36 map_fd = bpf_find_map(__func__, obj, "vip_map"); in test_l4lb() 37 if (map_fd < 0) in test_l4lb() 39 bpf_map_update_elem(map_fd, &key, &value, 0); in test_l4lb() 41 map_fd = bpf_find_map(__func__, obj, "ch_rings"); in test_l4lb() 42 if (map_fd < 0) in test_l4lb() 44 bpf_map_update_elem(map_fd, &ch_key, &real_num, 0); in test_l4lb() 46 map_fd = bpf_find_map(__func__, obj, "reals"); in test_l4lb() 47 if (map_fd < 0) in test_l4lb() 49 bpf_map_update_elem(map_fd, &real_num, &real_def, 0); in test_l4lb() [all …]
|
D | tailcalls.c | 10 int err, map_fd, prog_fd, main_fd, i, j; in test_tailcall_1() local 35 map_fd = bpf_map__fd(prog_array); in test_tailcall_1() 36 if (CHECK_FAIL(map_fd < 0)) in test_tailcall_1() 50 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); in test_tailcall_1() 61 err = bpf_map_delete_elem(map_fd, &i); in test_tailcall_1() 82 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); in test_tailcall_1() 104 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); in test_tailcall_1() 117 err = bpf_map_delete_elem(map_fd, &i); in test_tailcall_1() 128 err = bpf_map_delete_elem(map_fd, &i); in test_tailcall_1() 148 int err, map_fd, prog_fd, main_fd, i; in test_tailcall_2() local [all …]
|
D | map_lock.c | 6 int err, map_fd = *(u32 *) arg; in parallel_map_access() local 10 err = bpf_map_lookup_elem_flags(map_fd, &key, vars, BPF_F_LOCK); in parallel_map_access() 36 int prog_fd, map_fd[2], vars[17] = {}; in test_map_lock() local 47 map_fd[0] = bpf_find_map(__func__, obj, "hash_map"); in test_map_lock() 48 if (CHECK_FAIL(map_fd[0] < 0)) in test_map_lock() 50 map_fd[1] = bpf_find_map(__func__, obj, "array_map"); in test_map_lock() 51 if (CHECK_FAIL(map_fd[1] < 0)) in test_map_lock() 54 bpf_map_update_elem(map_fd[0], &key, vars, BPF_F_LOCK); in test_map_lock() 63 &map_fd[i - 4]))) in test_map_lock() 71 ret != (void *)&map_fd[i - 4])) in test_map_lock()
|
D | xdp.c | 16 int err, prog_fd, map_fd; in test_xdp() local 22 map_fd = bpf_find_map(__func__, obj, "vip2tnl"); in test_xdp() 23 if (map_fd < 0) in test_xdp() 25 bpf_map_update_elem(map_fd, &key4, &value4, 0); in test_xdp() 26 bpf_map_update_elem(map_fd, &key6, &value6, 0); in test_xdp()
|
/external/libbpf/src/ |
D | ringbuf.c | 31 int map_fd; member 55 int ring_buffer__add(struct ring_buffer *rb, int map_fd, in ring_buffer__add() argument 67 err = bpf_obj_get_info_by_fd(map_fd, &info, &len); in ring_buffer__add() 71 map_fd, err); in ring_buffer__add() 77 map_fd); in ring_buffer__add() 94 r->map_fd = map_fd; in ring_buffer__add() 101 map_fd, 0); in ring_buffer__add() 105 map_fd, err); in ring_buffer__add() 115 MAP_SHARED, map_fd, rb->page_size); in ring_buffer__add() 120 map_fd, err); in ring_buffer__add() [all …]
|
D | skel_internal.h | 36 int map_fd; member 105 attr.map_fd = fd; in skel_map_update_elem() 141 int map_fd = -1, prog_fd = -1, key = 0, err; in bpf_load_and_run() local 144 map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1); in bpf_load_and_run() 145 if (map_fd < 0) { in bpf_load_and_run() 151 err = skel_map_update_elem(map_fd, &key, opts->data, 0); in bpf_load_and_run() 164 attr.fd_array = (long) &map_fd; in bpf_load_and_run() 193 if (map_fd >= 0) in bpf_load_and_run() 194 close(map_fd); in bpf_load_and_run()
|
/external/bcc/src/lua/bcc/ |
D | table.lua | 34 function BaseTable:initialize(t_type, bpf, map_id, map_fd, key_type, leaf_type) 40 self.map_fd = map_fd 89 if libbcc.bpf_lookup_elem(self.map_fd, pkey, pvalue) < 0 then 99 assert(libbcc.bpf_update_elem(self.map_fd, pkey, pvalue, 0) == 0, "could not update table") 108 if libbcc.bpf_lookup_elem(self.map_fd, pkey, pvalue) < 0 then 122 if libbcc.bpf_get_next_key(self.map_fd, pkey, pkey_next) < 0 then 138 if libbcc.bpf_get_next_key(self.map_fd, pkey, pkey_next) < 0 then 143 assert(libbcc.bpf_lookup_elem(self.map_fd, pkey, pvalue) == 0) 152 function HashTable:initialize(bpf, map_id, map_fd, key_type, leaf_type) 153 BaseTable.initialize(self, BaseTable.BPF_MAP_TYPE_HASH, bpf, map_id, map_fd, key_type, leaf_type) [all …]
|
/external/ltp/testcases/kernel/syscalls/bpf/ |
D | bpf_prog05.c | 60 static int map_fd; variable 81 BPF_MAP_ARRAY_STX(map_fd, 0, BPF_REG_6), in load_prog() 82 BPF_MAP_ARRAY_STX(map_fd, 1, BPF_REG_7), in load_prog() 96 BPF_MAP_ARRAY_STX(map_fd, 2, BPF_REG_6), in load_prog() 97 BPF_MAP_ARRAY_STX(map_fd, 3, BPF_REG_7), in load_prog() 112 bpf_map_array_get(map_fd, key, val); in expect_reg_val() 135 map_fd = bpf_map_array_create(4); in run() 150 SAFE_CLOSE(map_fd); in run()
|
D | bpf_prog03.c | 117 int map_fd, prog_fd; in run() local 119 map_fd = bpf_map_array_create(32); in run() 122 attr->map_fd = map_fd; in run() 131 prog_fd = load_prog(map_fd); in run() 141 bpf_map_array_get(map_fd, key, val); in run() 144 SAFE_CLOSE(map_fd); in run()
|
D | bpf_prog01.c | 79 int map_fd, prog_fd; in run() local 83 map_fd = bpf_map_array_create(1); in run() 84 prog_fd = load_prog(map_fd); in run() 89 bpf_map_array_get(map_fd, &key, &val); in run() 98 SAFE_CLOSE(map_fd); in run()
|
D | bpf_prog02.c | 79 int map_fd, prog_fd; in run() local 81 map_fd = bpf_map_array_create(2); in run() 82 prog_fd = load_prog(map_fd); in run() 87 bpf_map_array_get(map_fd, key, val); in run() 97 bpf_map_array_get(map_fd, key, val); in run() 106 SAFE_CLOSE(map_fd); in run()
|
/external/bcc/libbpf-tools/ |
D | map_helpers.c | 14 dump_hash_iter(int map_fd, void *keys, __u32 key_size, in dump_hash_iter() argument 25 err = bpf_map_get_next_key(map_fd, key, next_key); in dump_hash_iter() 38 err = bpf_map_lookup_elem(map_fd, keys + key_size * i, in dump_hash_iter() 49 dump_hash_batch(int map_fd, void *keys, __u32 key_size, in dump_hash_batch() argument 58 err = bpf_map_lookup_batch(map_fd, &in, &out, in dump_hash_batch() 73 int dump_hash(int map_fd, in dump_hash() argument 86 err = dump_hash_batch(map_fd, keys, key_size, in dump_hash() 104 return dump_hash_iter(map_fd, keys, key_size, in dump_hash()
|
/external/bpftool/src/ |
D | iter.c | 19 int err = -1, map_fd = -1; in do_pin() local 37 map_fd = map_parse_fd(&argc, &argv); in do_pin() 38 if (map_fd < 0) in do_pin() 42 linfo.map.map_fd = map_fd; in do_pin() 91 if (map_fd >= 0) in do_pin() 92 close(map_fd); in do_pin()
|
D | map_perf_ring.c | 135 int err, map_fd; in do_event_pipe() local 138 map_fd = map_parse_fd_and_info(&argc, &argv, &map_info, &map_info_len); in do_event_pipe() 139 if (map_fd < 0) in do_event_pipe() 196 pb = perf_buffer__new_raw(map_fd, MMAP_PAGE_CNT, &perf_attr, in do_event_pipe() 225 close(map_fd); in do_event_pipe() 232 close(map_fd); in do_event_pipe()
|
/external/bcc/src/python/bcc/ |
D | table.py | 269 def Table(bpf, map_id, map_fd, keytype, leaftype, name, **kwargs): argument 277 t = HashTable(bpf, map_id, map_fd, keytype, leaftype) 279 t = Array(bpf, map_id, map_fd, keytype, leaftype) 281 t = ProgArray(bpf, map_id, map_fd, keytype, leaftype) 283 t = PerfEventArray(bpf, map_id, map_fd, keytype, leaftype, name) 285 t = PerCpuHash(bpf, map_id, map_fd, keytype, leaftype, **kwargs) 287 t = PerCpuArray(bpf, map_id, map_fd, keytype, leaftype, **kwargs) 289 t = LpmTrie(bpf, map_id, map_fd, keytype, leaftype) 291 t = StackTrace(bpf, map_id, map_fd, keytype, leaftype) 293 t = LruHash(bpf, map_id, map_fd, keytype, leaftype) [all …]
|