1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include "test_attach_probe.skel.h"
4
5 /* this is how USDT semaphore is actually defined, except volatile modifier */
6 volatile unsigned short uprobe_ref_ctr __attribute__((unused)) __attribute((section(".probes")));
7
test_attach_probe(void)8 void test_attach_probe(void)
9 {
10 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts);
11 int duration = 0;
12 struct bpf_link *kprobe_link, *kretprobe_link;
13 struct bpf_link *uprobe_link, *uretprobe_link;
14 struct test_attach_probe* skel;
15 size_t uprobe_offset;
16 ssize_t base_addr, ref_ctr_offset;
17
18 base_addr = get_base_addr();
19 if (CHECK(base_addr < 0, "get_base_addr",
20 "failed to find base addr: %zd", base_addr))
21 return;
22 uprobe_offset = get_uprobe_offset(&get_base_addr, base_addr);
23
24 ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr);
25 if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset"))
26 return;
27
28 skel = test_attach_probe__open_and_load();
29 if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
30 return;
31 if (CHECK(!skel->bss, "check_bss", ".bss wasn't mmap()-ed\n"))
32 goto cleanup;
33
34 kprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kprobe,
35 false /* retprobe */,
36 SYS_NANOSLEEP_KPROBE_NAME);
37 if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe"))
38 goto cleanup;
39 skel->links.handle_kprobe = kprobe_link;
40
41 kretprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kretprobe,
42 true /* retprobe */,
43 SYS_NANOSLEEP_KPROBE_NAME);
44 if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe"))
45 goto cleanup;
46 skel->links.handle_kretprobe = kretprobe_link;
47
48 ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before");
49
50 uprobe_opts.retprobe = false;
51 uprobe_opts.ref_ctr_offset = ref_ctr_offset;
52 uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe,
53 0 /* self pid */,
54 "/proc/self/exe",
55 uprobe_offset,
56 &uprobe_opts);
57 if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe"))
58 goto cleanup;
59 skel->links.handle_uprobe = uprobe_link;
60
61 ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after");
62
63 /* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */
64 uprobe_opts.retprobe = true;
65 uprobe_opts.ref_ctr_offset = ref_ctr_offset;
66 uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe,
67 -1 /* any pid */,
68 "/proc/self/exe",
69 uprobe_offset, &uprobe_opts);
70 if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe"))
71 goto cleanup;
72 skel->links.handle_uretprobe = uretprobe_link;
73
74 /* trigger & validate kprobe && kretprobe */
75 usleep(1);
76
77 if (CHECK(skel->bss->kprobe_res != 1, "check_kprobe_res",
78 "wrong kprobe res: %d\n", skel->bss->kprobe_res))
79 goto cleanup;
80 if (CHECK(skel->bss->kretprobe_res != 2, "check_kretprobe_res",
81 "wrong kretprobe res: %d\n", skel->bss->kretprobe_res))
82 goto cleanup;
83
84 /* trigger & validate uprobe & uretprobe */
85 get_base_addr();
86
87 if (CHECK(skel->bss->uprobe_res != 3, "check_uprobe_res",
88 "wrong uprobe res: %d\n", skel->bss->uprobe_res))
89 goto cleanup;
90 if (CHECK(skel->bss->uretprobe_res != 4, "check_uretprobe_res",
91 "wrong uretprobe res: %d\n", skel->bss->uretprobe_res))
92 goto cleanup;
93
94 cleanup:
95 test_attach_probe__destroy(skel);
96 ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_cleanup");
97 }
98