• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <stdio.h>
4 #include <stdlib.h>
5 #include <sys/epoll.h>
6 #include <sys/types.h>
7 #include <sys/stat.h>
8 #include <fcntl.h>
9 #include <util/record.h>
10 #include <util/util.h>
11 #include <util/bpf-loader.h>
12 #include <util/evlist.h>
13 #include <linux/bpf.h>
14 #include <linux/filter.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <api/fs/fs.h>
18 #include <bpf/bpf.h>
19 #include <perf/mmap.h>
20 #include "tests.h"
21 #include "llvm.h"
22 #include "debug.h"
23 #include "parse-events.h"
24 #include "util/mmap.h"
25 #define NR_ITERS       111
26 #define PERF_TEST_BPF_PATH "/sys/fs/bpf/perf_test"
27 
28 #ifdef HAVE_LIBBPF_SUPPORT
29 
epoll_pwait_loop(void)30 static int epoll_pwait_loop(void)
31 {
32 	int i;
33 
34 	/* Should fail NR_ITERS times */
35 	for (i = 0; i < NR_ITERS; i++)
36 		epoll_pwait(-(i + 1), NULL, 0, 0, NULL);
37 	return 0;
38 }
39 
40 #ifdef HAVE_BPF_PROLOGUE
41 
llseek_loop(void)42 static int llseek_loop(void)
43 {
44 	int fds[2], i;
45 
46 	fds[0] = open("/dev/null", O_RDONLY);
47 	fds[1] = open("/dev/null", O_RDWR);
48 
49 	if (fds[0] < 0 || fds[1] < 0)
50 		return -1;
51 
52 	for (i = 0; i < NR_ITERS; i++) {
53 		lseek(fds[i % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
54 		lseek(fds[(i + 1) % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
55 	}
56 	close(fds[0]);
57 	close(fds[1]);
58 	return 0;
59 }
60 
61 #endif
62 
63 static struct {
64 	enum test_llvm__testcase prog_id;
65 	const char *desc;
66 	const char *name;
67 	const char *msg_compile_fail;
68 	const char *msg_load_fail;
69 	int (*target_func)(void);
70 	int expect_result;
71 	bool	pin;
72 } bpf_testcase_table[] = {
73 	{
74 		.prog_id	  = LLVM_TESTCASE_BASE,
75 		.desc		  = "Basic BPF filtering",
76 		.name		  = "[basic_bpf_test]",
77 		.msg_compile_fail = "fix 'perf test LLVM' first",
78 		.msg_load_fail	  = "load bpf object failed",
79 		.target_func	  = &epoll_pwait_loop,
80 		.expect_result	  = (NR_ITERS + 1) / 2,
81 	},
82 	{
83 		.prog_id	  = LLVM_TESTCASE_BASE,
84 		.desc		  = "BPF pinning",
85 		.name		  = "[bpf_pinning]",
86 		.msg_compile_fail = "fix kbuild first",
87 		.msg_load_fail	  = "check your vmlinux setting?",
88 		.target_func	  = &epoll_pwait_loop,
89 		.expect_result	  = (NR_ITERS + 1) / 2,
90 		.pin 		  = true,
91 	},
92 #ifdef HAVE_BPF_PROLOGUE
93 	{
94 		.prog_id	  = LLVM_TESTCASE_BPF_PROLOGUE,
95 		.desc		  = "BPF prologue generation",
96 		.name		  = "[bpf_prologue_test]",
97 		.msg_compile_fail = "fix kbuild first",
98 		.msg_load_fail	  = "check your vmlinux setting?",
99 		.target_func	  = &llseek_loop,
100 		.expect_result	  = (NR_ITERS + 1) / 4,
101 	},
102 #endif
103 	{
104 		.prog_id	  = LLVM_TESTCASE_BPF_RELOCATION,
105 		.desc		  = "BPF relocation checker",
106 		.name		  = "[bpf_relocation_test]",
107 		.msg_compile_fail = "fix 'perf test LLVM' first",
108 		.msg_load_fail	  = "libbpf error when dealing with relocation",
109 	},
110 };
111 
do_test(struct bpf_object * obj,int (* func)(void),int expect)112 static int do_test(struct bpf_object *obj, int (*func)(void),
113 		   int expect)
114 {
115 	struct record_opts opts = {
116 		.target = {
117 			.uid = UINT_MAX,
118 			.uses_mmap = true,
119 		},
120 		.freq	      = 0,
121 		.mmap_pages   = 256,
122 		.default_interval = 1,
123 	};
124 
125 	char pid[16];
126 	char sbuf[STRERR_BUFSIZE];
127 	struct evlist *evlist;
128 	int i, ret = TEST_FAIL, err = 0, count = 0;
129 
130 	struct parse_events_state parse_state;
131 	struct parse_events_error parse_error;
132 
133 	bzero(&parse_error, sizeof(parse_error));
134 	bzero(&parse_state, sizeof(parse_state));
135 	parse_state.error = &parse_error;
136 	INIT_LIST_HEAD(&parse_state.list);
137 
138 	err = parse_events_load_bpf_obj(&parse_state, &parse_state.list, obj, NULL);
139 	if (err || list_empty(&parse_state.list)) {
140 		pr_debug("Failed to add events selected by BPF\n");
141 		return TEST_FAIL;
142 	}
143 
144 	snprintf(pid, sizeof(pid), "%d", getpid());
145 	pid[sizeof(pid) - 1] = '\0';
146 	opts.target.tid = opts.target.pid = pid;
147 
148 	/* Instead of perf_evlist__new_default, don't add default events */
149 	evlist = evlist__new();
150 	if (!evlist) {
151 		pr_debug("Not enough memory to create evlist\n");
152 		return TEST_FAIL;
153 	}
154 
155 	err = perf_evlist__create_maps(evlist, &opts.target);
156 	if (err < 0) {
157 		pr_debug("Not enough memory to create thread/cpu maps\n");
158 		goto out_delete_evlist;
159 	}
160 
161 	perf_evlist__splice_list_tail(evlist, &parse_state.list);
162 	evlist->nr_groups = parse_state.nr_groups;
163 
164 	perf_evlist__config(evlist, &opts, NULL);
165 
166 	err = evlist__open(evlist);
167 	if (err < 0) {
168 		pr_debug("perf_evlist__open: %s\n",
169 			 str_error_r(errno, sbuf, sizeof(sbuf)));
170 		goto out_delete_evlist;
171 	}
172 
173 	err = evlist__mmap(evlist, opts.mmap_pages);
174 	if (err < 0) {
175 		pr_debug("evlist__mmap: %s\n",
176 			 str_error_r(errno, sbuf, sizeof(sbuf)));
177 		goto out_delete_evlist;
178 	}
179 
180 	evlist__enable(evlist);
181 	(*func)();
182 	evlist__disable(evlist);
183 
184 	for (i = 0; i < evlist->core.nr_mmaps; i++) {
185 		union perf_event *event;
186 		struct mmap *md;
187 
188 		md = &evlist->mmap[i];
189 		if (perf_mmap__read_init(&md->core) < 0)
190 			continue;
191 
192 		while ((event = perf_mmap__read_event(&md->core)) != NULL) {
193 			const u32 type = event->header.type;
194 
195 			if (type == PERF_RECORD_SAMPLE)
196 				count ++;
197 		}
198 		perf_mmap__read_done(&md->core);
199 	}
200 
201 	if (count != expect * evlist->core.nr_entries) {
202 		pr_debug("BPF filter result incorrect, expected %d, got %d samples\n", expect * evlist->core.nr_entries, count);
203 		goto out_delete_evlist;
204 	}
205 
206 	ret = TEST_OK;
207 
208 out_delete_evlist:
209 	evlist__delete(evlist);
210 	return ret;
211 }
212 
213 static struct bpf_object *
prepare_bpf(void * obj_buf,size_t obj_buf_sz,const char * name)214 prepare_bpf(void *obj_buf, size_t obj_buf_sz, const char *name)
215 {
216 	struct bpf_object *obj;
217 
218 	obj = bpf__prepare_load_buffer(obj_buf, obj_buf_sz, name);
219 	if (IS_ERR(obj)) {
220 		pr_debug("Compile BPF program failed.\n");
221 		return NULL;
222 	}
223 	return obj;
224 }
225 
__test__bpf(int idx)226 static int __test__bpf(int idx)
227 {
228 	int ret;
229 	void *obj_buf;
230 	size_t obj_buf_sz;
231 	struct bpf_object *obj;
232 
233 	ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
234 				       bpf_testcase_table[idx].prog_id,
235 				       true, NULL);
236 	if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
237 		pr_debug("Unable to get BPF object, %s\n",
238 			 bpf_testcase_table[idx].msg_compile_fail);
239 		if (idx == 0)
240 			return TEST_SKIP;
241 		else
242 			return TEST_FAIL;
243 	}
244 
245 	obj = prepare_bpf(obj_buf, obj_buf_sz,
246 			  bpf_testcase_table[idx].name);
247 	if ((!!bpf_testcase_table[idx].target_func) != (!!obj)) {
248 		if (!obj)
249 			pr_debug("Fail to load BPF object: %s\n",
250 				 bpf_testcase_table[idx].msg_load_fail);
251 		else
252 			pr_debug("Success unexpectedly: %s\n",
253 				 bpf_testcase_table[idx].msg_load_fail);
254 		ret = TEST_FAIL;
255 		goto out;
256 	}
257 
258 	if (obj) {
259 		ret = do_test(obj,
260 			      bpf_testcase_table[idx].target_func,
261 			      bpf_testcase_table[idx].expect_result);
262 		if (ret != TEST_OK)
263 			goto out;
264 		if (bpf_testcase_table[idx].pin) {
265 			int err;
266 
267 			if (!bpf_fs__mount()) {
268 				pr_debug("BPF filesystem not mounted\n");
269 				ret = TEST_FAIL;
270 				goto out;
271 			}
272 			err = mkdir(PERF_TEST_BPF_PATH, 0777);
273 			if (err && errno != EEXIST) {
274 				pr_debug("Failed to make perf_test dir: %s\n",
275 					 strerror(errno));
276 				ret = TEST_FAIL;
277 				goto out;
278 			}
279 			if (bpf_object__pin(obj, PERF_TEST_BPF_PATH))
280 				ret = TEST_FAIL;
281 			if (rm_rf(PERF_TEST_BPF_PATH))
282 				ret = TEST_FAIL;
283 		}
284 	}
285 
286 out:
287 	free(obj_buf);
288 	bpf__clear();
289 	return ret;
290 }
291 
test__bpf_subtest_get_nr(void)292 int test__bpf_subtest_get_nr(void)
293 {
294 	return (int)ARRAY_SIZE(bpf_testcase_table);
295 }
296 
test__bpf_subtest_get_desc(int i)297 const char *test__bpf_subtest_get_desc(int i)
298 {
299 	if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
300 		return NULL;
301 	return bpf_testcase_table[i].desc;
302 }
303 
check_env(void)304 static int check_env(void)
305 {
306 	int err;
307 	unsigned int kver_int;
308 	char license[] = "GPL";
309 
310 	struct bpf_insn insns[] = {
311 		BPF_MOV64_IMM(BPF_REG_0, 1),
312 		BPF_EXIT_INSN(),
313 	};
314 
315 	err = fetch_kernel_version(&kver_int, NULL, 0);
316 	if (err) {
317 		pr_debug("Unable to get kernel version\n");
318 		return err;
319 	}
320 
321 	err = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
322 			       sizeof(insns) / sizeof(insns[0]),
323 			       license, kver_int, NULL, 0);
324 	if (err < 0) {
325 		pr_err("Missing basic BPF support, skip this test: %s\n",
326 		       strerror(errno));
327 		return err;
328 	}
329 	close(err);
330 
331 	return 0;
332 }
333 
test__bpf(struct test * test __maybe_unused,int i)334 int test__bpf(struct test *test __maybe_unused, int i)
335 {
336 	int err;
337 
338 	if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
339 		return TEST_FAIL;
340 
341 	if (geteuid() != 0) {
342 		pr_debug("Only root can run BPF test\n");
343 		return TEST_SKIP;
344 	}
345 
346 	if (check_env())
347 		return TEST_SKIP;
348 
349 	err = __test__bpf(i);
350 	return err;
351 }
352 
353 #else
test__bpf_subtest_get_nr(void)354 int test__bpf_subtest_get_nr(void)
355 {
356 	return 0;
357 }
358 
test__bpf_subtest_get_desc(int i __maybe_unused)359 const char *test__bpf_subtest_get_desc(int i __maybe_unused)
360 {
361 	return NULL;
362 }
363 
test__bpf(struct test * test __maybe_unused,int i __maybe_unused)364 int test__bpf(struct test *test __maybe_unused, int i __maybe_unused)
365 {
366 	pr_debug("Skip BPF test because BPF support is not compiled\n");
367 	return TEST_SKIP;
368 }
369 #endif
370