• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 
read_perf_max_sample_freq(void)4 static __u64 read_perf_max_sample_freq(void)
5 {
6 	__u64 sample_freq = 5000; /* fallback to 5000 on error */
7 	FILE *f;
8 
9 	f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r");
10 	if (f == NULL)
11 		return sample_freq;
12 	fscanf(f, "%llu", &sample_freq);
13 	fclose(f);
14 	return sample_freq;
15 }
16 
test_stacktrace_build_id_nmi(void)17 void test_stacktrace_build_id_nmi(void)
18 {
19 	int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
20 	const char *prog_name = "tracepoint/random/urandom_read";
21 	const char *file = "./test_stacktrace_build_id.o";
22 	int err, pmu_fd, prog_fd;
23 	struct perf_event_attr attr = {
24 		.freq = 1,
25 		.type = PERF_TYPE_HARDWARE,
26 		.config = PERF_COUNT_HW_CPU_CYCLES,
27 	};
28 	__u32 key, previous_key, val, duration = 0;
29 	struct bpf_program *prog;
30 	struct bpf_object *obj;
31 	struct bpf_link *link;
32 	char buf[256];
33 	int i, j;
34 	struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
35 	int build_id_matches = 0;
36 	int retry = 1;
37 
38 	attr.sample_freq = read_perf_max_sample_freq();
39 
40 retry:
41 	err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
42 	if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
43 		return;
44 
45 	prog = bpf_object__find_program_by_title(obj, prog_name);
46 	if (CHECK(!prog, "find_prog", "prog '%s' not found\n", prog_name))
47 		goto close_prog;
48 
49 	pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
50 			 0 /* cpu 0 */, -1 /* group id */,
51 			 0 /* flags */);
52 	if (CHECK(pmu_fd < 0, "perf_event_open",
53 		  "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n",
54 		  pmu_fd, errno))
55 		goto close_prog;
56 
57 	link = bpf_program__attach_perf_event(prog, pmu_fd);
58 	if (CHECK(IS_ERR(link), "attach_perf_event",
59 		  "err %ld\n", PTR_ERR(link))) {
60 		close(pmu_fd);
61 		goto close_prog;
62 	}
63 
64 	/* find map fds */
65 	control_map_fd = bpf_find_map(__func__, obj, "control_map");
66 	if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
67 		  "err %d errno %d\n", err, errno))
68 		goto disable_pmu;
69 
70 	stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
71 	if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
72 		  "err %d errno %d\n", err, errno))
73 		goto disable_pmu;
74 
75 	stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
76 	if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
77 		  err, errno))
78 		goto disable_pmu;
79 
80 	stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
81 	if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
82 		  "err %d errno %d\n", err, errno))
83 		goto disable_pmu;
84 
85 	if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")))
86 		goto disable_pmu;
87 	if (CHECK_FAIL(system("taskset 0x1 ./urandom_read 100000")))
88 		goto disable_pmu;
89 	/* disable stack trace collection */
90 	key = 0;
91 	val = 1;
92 	bpf_map_update_elem(control_map_fd, &key, &val, 0);
93 
94 	/* for every element in stackid_hmap, we can find a corresponding one
95 	 * in stackmap, and vise versa.
96 	 */
97 	err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
98 	if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
99 		  "err %d errno %d\n", err, errno))
100 		goto disable_pmu;
101 
102 	err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
103 	if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
104 		  "err %d errno %d\n", err, errno))
105 		goto disable_pmu;
106 
107 	err = extract_build_id(buf, 256);
108 
109 	if (CHECK(err, "get build_id with readelf",
110 		  "err %d errno %d\n", err, errno))
111 		goto disable_pmu;
112 
113 	err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
114 	if (CHECK(err, "get_next_key from stackmap",
115 		  "err %d, errno %d\n", err, errno))
116 		goto disable_pmu;
117 
118 	do {
119 		char build_id[64];
120 
121 		err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
122 		if (CHECK(err, "lookup_elem from stackmap",
123 			  "err %d, errno %d\n", err, errno))
124 			goto disable_pmu;
125 		for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
126 			if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
127 			    id_offs[i].offset != 0) {
128 				for (j = 0; j < 20; ++j)
129 					sprintf(build_id + 2 * j, "%02x",
130 						id_offs[i].build_id[j] & 0xff);
131 				if (strstr(buf, build_id) != NULL)
132 					build_id_matches = 1;
133 			}
134 		previous_key = key;
135 	} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
136 
137 	/* stack_map_get_build_id_offset() is racy and sometimes can return
138 	 * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
139 	 * try it one more time.
140 	 */
141 	if (build_id_matches < 1 && retry--) {
142 		bpf_link__destroy(link);
143 		bpf_object__close(obj);
144 		printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
145 		       __func__);
146 		goto retry;
147 	}
148 
149 	if (CHECK(build_id_matches < 1, "build id match",
150 		  "Didn't find expected build ID from the map\n"))
151 		goto disable_pmu;
152 
153 	/*
154 	 * We intentionally skip compare_stack_ips(). This is because we
155 	 * only support one in_nmi() ips-to-build_id translation per cpu
156 	 * at any time, thus stack_amap here will always fallback to
157 	 * BPF_STACK_BUILD_ID_IP;
158 	 */
159 
160 disable_pmu:
161 	bpf_link__destroy(link);
162 close_prog:
163 	bpf_object__close(obj);
164 }
165