| /tools/perf/util/ |
| D | maps.h | 13 struct maps; 19 struct maps *kmaps; 23 struct maps *maps__new(struct machine *machine); 24 bool maps__empty(struct maps *maps); 25 int maps__copy_from(struct maps *maps, struct maps *parent); 27 struct maps *maps__get(struct maps *maps); 28 void maps__put(struct maps *maps); 30 static inline void __maps__zput(struct maps **map) in __maps__zput() 38 bool maps__equal(struct maps *a, struct maps *b); 41 int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data); [all …]
|
| D | maps.c | 26 DECLARE_RC_STRUCT(maps) { in DECLARE_RC_STRUCT() argument 67 static void check_invariants(const struct maps *maps __maybe_unused) in check_invariants() 70 assert(RC_CHK_ACCESS(maps)->nr_maps <= RC_CHK_ACCESS(maps)->nr_maps_allocated); in check_invariants() 71 for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) { in check_invariants() 72 struct map *map = RC_CHK_ACCESS(maps)->maps_by_address[i]; in check_invariants() 80 assert(RC_CHK_EQUAL(map__kmap(map)->kmaps, maps)); in check_invariants() 83 struct map *prev = RC_CHK_ACCESS(maps)->maps_by_address[i - 1]; in check_invariants() 86 if (RC_CHK_ACCESS(maps)->maps_by_address_sorted) { in check_invariants() 94 if (!RC_CHK_ACCESS(maps)->ends_broken) { in check_invariants() 102 if (RC_CHK_ACCESS(maps)->maps_by_name) { in check_invariants() [all …]
|
| D | unwind.h | 9 struct maps; 21 int (*prepare_access)(struct maps *maps); 22 void (*flush_access)(struct maps *maps); 23 void (*finish_access)(struct maps *maps); 46 int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized); 47 void unwind__flush_access(struct maps *maps); 48 void unwind__finish_access(struct maps *maps); 50 static inline int unwind__prepare_access(struct maps *maps __maybe_unused, in unwind__prepare_access() 57 static inline void unwind__flush_access(struct maps *maps __maybe_unused) {} in unwind__flush_access() 58 static inline void unwind__finish_access(struct maps *maps __maybe_unused) {} in unwind__finish_access() [all …]
|
| D | unwind-libunwind.c | 15 int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized) in unwind__prepare_access() argument 27 if (maps__addr_space(maps)) { in unwind__prepare_access() 34 machine = maps__machine(maps); in unwind__prepare_access() 58 maps__set_unwind_libunwind_ops(maps, ops); in unwind__prepare_access() 60 err = maps__unwind_libunwind_ops(maps)->prepare_access(maps); in unwind__prepare_access() 66 void unwind__flush_access(struct maps *maps) in unwind__flush_access() argument 68 const struct unwind_libunwind_ops *ops = maps__unwind_libunwind_ops(maps); in unwind__flush_access() 71 ops->flush_access(maps); in unwind__flush_access() 74 void unwind__finish_access(struct maps *maps) in unwind__finish_access() argument 76 const struct unwind_libunwind_ops *ops = maps__unwind_libunwind_ops(maps); in unwind__finish_access() [all …]
|
| D | find-map.c | 4 FILE *maps; in find_map() local 8 maps = fopen("/proc/self/maps", "r"); in find_map() 9 if (!maps) { in find_map() 14 while (!found && fgets(line, sizeof(line), maps)) { in find_map() 28 fclose(maps); in find_map()
|
| D | addr_location.c | 10 al->maps = NULL; in addr_location__init() 32 maps__zput(al->maps); in addr_location__exit() 38 maps__put(dst->maps); in addr_location__copy() 42 dst->maps = maps__get(src->maps); in addr_location__copy()
|
| D | bpf_lock_contention.c | 35 bpf_map__set_value_size(skel->maps.stacks, con->max_stack * sizeof(u64)); in lock_contention_prepare() 36 bpf_map__set_max_entries(skel->maps.lock_stat, con->map_nr_entries); in lock_contention_prepare() 37 bpf_map__set_max_entries(skel->maps.tstamp, con->map_nr_entries); in lock_contention_prepare() 40 bpf_map__set_max_entries(skel->maps.task_data, con->map_nr_entries); in lock_contention_prepare() 42 bpf_map__set_max_entries(skel->maps.task_data, 1); in lock_contention_prepare() 45 bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries); in lock_contention_prepare() 47 bpf_map__set_max_entries(skel->maps.stacks, 1); in lock_contention_prepare() 96 bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus); in lock_contention_prepare() 97 bpf_map__set_max_entries(skel->maps.task_filter, ntasks); in lock_contention_prepare() 98 bpf_map__set_max_entries(skel->maps.type_filter, ntypes); in lock_contention_prepare() [all …]
|
| D | addr_location.h | 8 struct maps; 14 struct maps *maps; member
|
| D | map_symbol.h | 7 struct maps; 12 struct maps *maps; member
|
| /tools/perf/tests/ |
| D | maps.c | 48 static int check_maps(struct map_def *merged, unsigned int size, struct maps *maps) in check_maps() argument 52 if (maps__nr_maps(maps) != size) { in check_maps() 53 pr_debug("Expected %d maps, got %d", size, maps__nr_maps(maps)); in check_maps() 60 failed = maps__for_each_map(maps, check_maps_cb, &args); in check_maps() 69 maps__for_each_map(maps, failed_cb, NULL); in check_maps() 103 struct maps *maps = maps__new(NULL); in test__maps__merge_in() local 105 TEST_ASSERT_VAL("failed to create maps", maps); in test__maps__merge_in() 115 TEST_ASSERT_VAL("failed to insert map", maps__insert(maps, map) == 0); in test__maps__merge_in() 140 ret = maps__merge_in(maps, map_kcore1); in test__maps__merge_in() 143 ret = check_maps(merged12, ARRAY_SIZE(merged12), maps); in test__maps__merge_in() [all …]
|
| D | thread-maps-share.c | 15 struct maps *maps; in test__thread_maps_share() local 19 struct maps *other_maps; in test__thread_maps_share() 45 maps = thread__maps(leader); in test__thread_maps_share() 46 TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(maps)), 4); in test__thread_maps_share() 49 TEST_ASSERT_VAL("maps don't match", maps__equal(maps, thread__maps(t1))); in test__thread_maps_share() 50 TEST_ASSERT_VAL("maps don't match", maps__equal(maps, thread__maps(t2))); in test__thread_maps_share() 51 TEST_ASSERT_VAL("maps don't match", maps__equal(maps, thread__maps(t3))); in test__thread_maps_share() 80 TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(maps)), 3); in test__thread_maps_share() 83 TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(maps)), 2); in test__thread_maps_share() 86 TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(maps)), 1); in test__thread_maps_share()
|
| /tools/testing/selftests/bpf/prog_tests/ |
| D | btf_map_in_map.c | 38 map1_fd = bpf_map__fd(skel->maps.inner_map1); in test_lookup_update() 39 map2_fd = bpf_map__fd(skel->maps.inner_map2); in test_lookup_update() 40 map3_fd = bpf_map__fd(skel->maps.inner_map3); in test_lookup_update() 41 map4_fd = bpf_map__fd(skel->maps.inner_map4); in test_lookup_update() 42 map5_fd = bpf_map__fd(skel->maps.inner_map5); in test_lookup_update() 43 outer_arr_dyn_fd = bpf_map__fd(skel->maps.outer_arr_dyn); in test_lookup_update() 44 outer_arr_fd = bpf_map__fd(skel->maps.outer_arr); in test_lookup_update() 45 outer_hash_fd = bpf_map__fd(skel->maps.outer_hash); in test_lookup_update() 100 map1_id = bpf_map_id(skel->maps.inner_map1); in test_lookup_update() 101 map2_id = bpf_map_id(skel->maps.inner_map2); in test_lookup_update() [all …]
|
| D | struct_ops_autocreate.c | 42 link = bpf_map__attach_struct_ops(skel->maps.testmod_1); in check_test_1_link() 61 err = bpf_map__set_autocreate(skel->maps.testmod_2, false); in can_load_partial_object() 75 check_test_1_link(skel, skel->maps.testmod_1); in can_load_partial_object() 90 ASSERT_TRUE(bpf_map__autocreate(skel->maps.testmod_1), "testmod_1 autocreate"); in optional_maps() 91 ASSERT_TRUE(bpf_map__autocreate(skel->maps.testmod_2), "testmod_2 autocreate"); in optional_maps() 92 ASSERT_FALSE(bpf_map__autocreate(skel->maps.optional_map), "optional_map autocreate"); in optional_maps() 93 ASSERT_FALSE(bpf_map__autocreate(skel->maps.optional_map2), "optional_map2 autocreate"); in optional_maps() 95 err = bpf_map__set_autocreate(skel->maps.testmod_1, false); in optional_maps() 96 err |= bpf_map__set_autocreate(skel->maps.testmod_2, false); in optional_maps() 97 err |= bpf_map__set_autocreate(skel->maps.optional_map2, true); in optional_maps() [all …]
|
| D | ringbuf_multi.c | 56 ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), page_size, "rb1_size_before"); in test_ringbuf_multi() 57 ASSERT_OK(bpf_map__set_max_entries(skel->maps.ringbuf1, page_size + 1), "rb1_resize"); in test_ringbuf_multi() 58 ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), 2 * page_size, "rb1_size_after"); in test_ringbuf_multi() 59 ASSERT_OK(bpf_map__set_max_entries(skel->maps.ringbuf1, page_size), "rb1_reset"); in test_ringbuf_multi() 60 ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), page_size, "rb1_size_final"); in test_ringbuf_multi() 66 err = bpf_map__set_inner_map_fd(skel->maps.ringbuf_hash, proto_fd); in test_ringbuf_multi() 78 …if (!ASSERT_ERR(bpf_map__set_max_entries(skel->maps.ringbuf1, 3 * page_size), "rb1_resize_after_lo… in test_ringbuf_multi() 84 ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf1), in test_ringbuf_multi() 97 err = ring_buffer__add(ringbuf, bpf_map__fd(skel->maps.ringbuf2), in test_ringbuf_multi()
|
| D | bpf_tcp_ca.c | 109 link = bpf_map__attach_struct_ops(cubic_skel->maps.cubic); in test_cubic() 161 link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp); in test_dctcp() 167 cb_opts.map_fd = bpf_map__fd(dctcp_skel->maps.sk_stg_map); in test_dctcp() 204 bpf_map__set_autoattach(dctcp_skel->maps.dctcp, true); in test_dctcp_autoattach_map() 205 bpf_map__set_autoattach(dctcp_skel->maps.dctcp_nouse, false); in test_dctcp_autoattach_map() 292 link = bpf_map__attach_struct_ops(dctcp_skel->maps.dctcp); in test_dctcp_fallback() 352 link = bpf_map__attach_struct_ops(skel->maps.write_sk_pacing); in test_write_sk_pacing() 371 link = bpf_map__attach_struct_ops(skel->maps.incompl_cong_ops); in test_incompl_cong_ops() 413 link = bpf_map__attach_struct_ops(skel->maps.ca_update_1); in test_update_ca() 421 err = bpf_link__update_map(link, skel->maps.ca_update_2); in test_update_ca() [all …]
|
| D | stacktrace_build_id.c | 27 control_map_fd = bpf_map__fd(skel->maps.control_map); in test_stacktrace_build_id() 28 stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap); in test_stacktrace_build_id() 29 stackmap_fd = bpf_map__fd(skel->maps.stackmap); in test_stacktrace_build_id() 30 stack_amap_fd = bpf_map__fd(skel->maps.stack_amap); in test_stacktrace_build_id() 61 err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key)); in test_stacktrace_build_id() 78 } while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0); in test_stacktrace_build_id()
|
| D | for_each.c | 30 max_entries = bpf_map__max_entries(skel->maps.hashmap); in test_hash_map() 34 err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key), in test_hash_map() 49 err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key), in test_hash_map() 64 err = bpf_map__lookup_elem(skel->maps.hashmap, &key, sizeof(key), &val, sizeof(val), 0); in test_hash_map() 97 max_entries = bpf_map__max_entries(skel->maps.arraymap); in test_array_map() 104 err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key), in test_array_map() 119 err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key), in test_array_map() 165 max_entries = bpf_map__max_entries(skel->maps.arraymap); in test_multi_maps() 170 err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key), in test_multi_maps() 177 max_entries = bpf_map__max_entries(skel->maps.hashmap); in test_multi_maps() [all …]
|
| D | recursion.c | 23 bpf_map_delete_elem(bpf_map__fd(skel->maps.hash1), &key); in test_recursion() 25 bpf_map_delete_elem(bpf_map__fd(skel->maps.hash1), &key); in test_recursion() 29 bpf_map_delete_elem(bpf_map__fd(skel->maps.hash2), &key); in test_recursion() 31 bpf_map_delete_elem(bpf_map__fd(skel->maps.hash2), &key); in test_recursion()
|
| D | cg_storage_multi.c | 119 if (assert_storage(obj->maps.cgroup_storage, in test_egress_only() 123 if (assert_storage_noexist(obj->maps.cgroup_storage, &key)) in test_egress_only() 142 if (assert_storage(obj->maps.cgroup_storage, in test_egress_only() 147 if (assert_storage(obj->maps.cgroup_storage, in test_egress_only() 198 if (assert_storage(obj->maps.cgroup_storage, in test_isolated() 203 if (assert_storage(obj->maps.cgroup_storage, in test_isolated() 208 if (assert_storage_noexist(obj->maps.cgroup_storage, &key)) in test_isolated() 211 if (assert_storage_noexist(obj->maps.cgroup_storage, &key)) in test_isolated() 240 if (assert_storage(obj->maps.cgroup_storage, in test_isolated() 245 if (assert_storage(obj->maps.cgroup_storage, in test_isolated() [all …]
|
| D | global_map_resize.c | 38 map = skel->maps.bss; in global_map_resize_bss_subtest() 46 err = bpf_map__set_value_size(skel->maps.data_percpu_arr, new_sz); in global_map_resize_bss_subtest() 54 skel->bss = bpf_map__initial_value(skel->maps.bss, &actual_sz); in global_map_resize_bss_subtest() 108 map = skel->maps.data_custom; in global_map_resize_data_subtest() 116 err = bpf_map__set_value_size(skel->maps.data_percpu_arr, new_sz); in global_map_resize_data_subtest() 124 skel->data_custom = bpf_map__initial_value(skel->maps.data_custom, &actual_sz); in global_map_resize_data_subtest() 173 map = skel->maps.data_custom; in global_map_resize_invalid_subtest() 190 map = skel->maps.data_non_array; in global_map_resize_invalid_subtest() 205 map = skel->maps.data_array_not_last; in global_map_resize_invalid_subtest()
|
| D | stacktrace_build_id_nmi.c | 55 control_map_fd = bpf_map__fd(skel->maps.control_map); in test_stacktrace_build_id_nmi() 56 stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap); in test_stacktrace_build_id_nmi() 57 stackmap_fd = bpf_map__fd(skel->maps.stackmap); in test_stacktrace_build_id_nmi() 88 err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key)); in test_stacktrace_build_id_nmi() 94 err = bpf_map__lookup_elem(skel->maps.stackmap, &key, sizeof(key), in test_stacktrace_build_id_nmi() 106 } while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0); in test_stacktrace_build_id_nmi()
|
| D | map_kptr.c | 51 ret = bpf_map__update_elem(skel->maps.array_map, in test_map_kptr_success() 59 ret = bpf_map__update_elem(skel->maps.pcpu_array_map, in test_map_kptr_success() 67 ret = bpf_map__delete_elem(skel->maps.hash_map, &key, sizeof(key), 0); in test_map_kptr_success() 74 ret = bpf_map__delete_elem(skel->maps.pcpu_hash_map, &key, sizeof(key), 0); in test_map_kptr_success() 81 ret = bpf_map__delete_elem(skel->maps.hash_malloc_map, &key, sizeof(key), 0); in test_map_kptr_success() 88 ret = bpf_map__delete_elem(skel->maps.pcpu_hash_malloc_map, &key, sizeof(key), 0); in test_map_kptr_success() 95 ret = bpf_map__delete_elem(skel->maps.lru_hash_map, &key, sizeof(key), 0); in test_map_kptr_success() 102 ret = bpf_map__delete_elem(skel->maps.lru_pcpu_hash_map, &key, sizeof(key), 0); in test_map_kptr_success()
|
| /tools/perf/arch/x86/util/ |
| D | machine.c | 20 struct extra_kernel_map *maps; member 34 buf = realloc(mi->maps, sz); in add_extra_kernel_map() 37 mi->maps = buf; in add_extra_kernel_map() 40 mi->maps[mi->cnt].start = start; in add_extra_kernel_map() 41 mi->maps[mi->cnt].end = end; in add_extra_kernel_map() 42 mi->maps[mi->cnt].pgoff = pgoff; in add_extra_kernel_map() 43 strlcpy(mi->maps[mi->cnt].name, name, KMAP_NAME_LEN); in add_extra_kernel_map() 91 struct extra_kernel_map *xm = &mi.maps[i]; in machine__create_extra_kernel_maps() 101 free(mi.maps); in machine__create_extra_kernel_maps()
|
| /tools/testing/selftests/exec/ |
| D | load_address.c | 56 FILE *maps; in main() local 63 maps = fopen("/proc/self/maps", "r"); in main() 64 if (!maps) in main() 66 while (fgets(buf, sizeof(buf), maps)) { in main() 69 fclose(maps); in main()
|
| /tools/testing/selftests/mm/ |
| D | seal_elf.c | 64 FILE *maps; in test_seal_elf() local 88 maps = fopen("/proc/self/maps", "r"); in test_seal_elf() 89 FAIL_TEST_IF_FALSE(maps); in test_seal_elf() 94 while (fgets(line, sizeof(line), maps)) { in test_seal_elf() 113 fclose(maps); in test_seal_elf()
|