1 // SPDX-License-Identifier: GPL-2.0
2 #define _GNU_SOURCE
3 #include <pthread.h>
4 #include <sched.h>
5 #include <sys/socket.h>
6 #include <test_progs.h>
7
8 #define MAX_CNT_RAWTP 10ull
9 #define MAX_STACK_RAWTP 100
10
11 static int duration = 0;
12
13 struct get_stack_trace_t {
14 int pid;
15 int kern_stack_size;
16 int user_stack_size;
17 int user_stack_buildid_size;
18 __u64 kern_stack[MAX_STACK_RAWTP];
19 __u64 user_stack[MAX_STACK_RAWTP];
20 struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
21 };
22
get_stack_print_output(void * ctx,int cpu,void * data,__u32 size)23 static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size)
24 {
25 bool good_kern_stack = false, good_user_stack = false;
26 const char *nonjit_func = "___bpf_prog_run";
27 /* perfbuf-submitted data is 4-byte aligned, but we need 8-byte
28 * alignment, so copy data into a local variable, for simplicity
29 */
30 struct get_stack_trace_t e;
31 int i, num_stack;
32 static __u64 cnt;
33 struct ksym *ks;
34
35 cnt++;
36
37 memset(&e, 0, sizeof(e));
38 memcpy(&e, data, size <= sizeof(e) ? size : sizeof(e));
39
40 if (size < sizeof(struct get_stack_trace_t)) {
41 __u64 *raw_data = data;
42 bool found = false;
43
44 num_stack = size / sizeof(__u64);
45 /* If jit is enabled, we do not have a good way to
46 * verify the sanity of the kernel stack. So we
47 * just assume it is good if the stack is not empty.
48 * This could be improved in the future.
49 */
50 if (env.jit_enabled) {
51 found = num_stack > 0;
52 } else {
53 for (i = 0; i < num_stack; i++) {
54 ks = ksym_search(raw_data[i]);
55 if (ks && (strcmp(ks->name, nonjit_func) == 0)) {
56 found = true;
57 break;
58 }
59 }
60 }
61 if (found) {
62 good_kern_stack = true;
63 good_user_stack = true;
64 }
65 } else {
66 num_stack = e.kern_stack_size / sizeof(__u64);
67 if (env.jit_enabled) {
68 good_kern_stack = num_stack > 0;
69 } else {
70 for (i = 0; i < num_stack; i++) {
71 ks = ksym_search(e.kern_stack[i]);
72 if (ks && (strcmp(ks->name, nonjit_func) == 0)) {
73 good_kern_stack = true;
74 break;
75 }
76 }
77 }
78 if (e.user_stack_size > 0 && e.user_stack_buildid_size > 0)
79 good_user_stack = true;
80 }
81
82 if (!good_kern_stack)
83 CHECK(!good_kern_stack, "kern_stack", "corrupted kernel stack\n");
84 if (!good_user_stack)
85 CHECK(!good_user_stack, "user_stack", "corrupted user stack\n");
86 }
87
test_get_stack_raw_tp(void)88 void test_get_stack_raw_tp(void)
89 {
90 const char *file = "./test_get_stack_rawtp.o";
91 const char *file_err = "./test_get_stack_rawtp_err.o";
92 const char *prog_name = "bpf_prog1";
93 int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP;
94 struct perf_buffer *pb = NULL;
95 struct bpf_link *link = NULL;
96 struct timespec tv = {0, 10};
97 struct bpf_program *prog;
98 struct bpf_object *obj;
99 struct bpf_map *map;
100 cpu_set_t cpu_set;
101
102 err = bpf_prog_test_load(file_err, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
103 if (CHECK(err >= 0, "prog_load raw tp", "err %d errno %d\n", err, errno))
104 return;
105
106 err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
107 if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
108 return;
109
110 prog = bpf_object__find_program_by_name(obj, prog_name);
111 if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name))
112 goto close_prog;
113
114 map = bpf_object__find_map_by_name(obj, "perfmap");
115 if (CHECK(!map, "bpf_find_map", "not found\n"))
116 goto close_prog;
117
118 err = load_kallsyms();
119 if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
120 goto close_prog;
121
122 CPU_ZERO(&cpu_set);
123 CPU_SET(0, &cpu_set);
124 err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
125 if (CHECK(err, "set_affinity", "err %d, errno %d\n", err, errno))
126 goto close_prog;
127
128 link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
129 if (!ASSERT_OK_PTR(link, "attach_raw_tp"))
130 goto close_prog;
131
132 pb = perf_buffer__new(bpf_map__fd(map), 8, get_stack_print_output,
133 NULL, NULL, NULL);
134 if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
135 goto close_prog;
136
137 /* trigger some syscall action */
138 for (i = 0; i < MAX_CNT_RAWTP; i++)
139 nanosleep(&tv, NULL);
140
141 while (exp_cnt > 0) {
142 err = perf_buffer__poll(pb, 100);
143 if (err < 0 && CHECK(err < 0, "pb__poll", "err %d\n", err))
144 goto close_prog;
145 exp_cnt -= err;
146 }
147
148 close_prog:
149 bpf_link__destroy(link);
150 perf_buffer__free(pb);
151 bpf_object__close(obj);
152 }
153