1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <linux/bpf.h>
4 #include <linux/btf.h>
5 #include <linux/btf_ids.h>
6 #include <linux/delay.h>
7 #include <linux/error-injection.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/percpu-defs.h>
11 #include <linux/sysfs.h>
12 #include <linux/tracepoint.h>
13 #include <linux/net.h>
14 #include <linux/socket.h>
15 #include <linux/nsproxy.h>
16 #include <linux/inet.h>
17 #include <linux/in.h>
18 #include <linux/in6.h>
19 #include <linux/un.h>
20 #include <linux/filter.h>
21 #include <net/sock.h>
22 #include <linux/namei.h>
23 #include "bpf_testmod.h"
24 #include "bpf_testmod_kfunc.h"
25
26 #define CREATE_TRACE_POINTS
27 #include "bpf_testmod-events.h"
28
29 #define CONNECT_TIMEOUT_SEC 1
30
31 typedef int (*func_proto_typedef)(long);
32 typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
33 typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
34
35 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
36 long bpf_testmod_test_struct_arg_result;
37 static DEFINE_MUTEX(sock_lock);
38 static struct socket *sock;
39
40 struct bpf_testmod_struct_arg_1 {
41 int a;
42 };
43 struct bpf_testmod_struct_arg_2 {
44 long a;
45 long b;
46 };
47
48 struct bpf_testmod_struct_arg_3 {
49 int a;
50 int b[];
51 };
52
53 struct bpf_testmod_struct_arg_4 {
54 u64 a;
55 int b;
56 };
57
58 struct bpf_testmod_struct_arg_5 {
59 char a;
60 short b;
61 int c;
62 long d;
63 };
64
65 __bpf_hook_start();
66
67 noinline int
bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a,int b,int c)68 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
69 bpf_testmod_test_struct_arg_result = a.a + a.b + b + c;
70 return bpf_testmod_test_struct_arg_result;
71 }
72
73 noinline int
bpf_testmod_test_struct_arg_2(int a,struct bpf_testmod_struct_arg_2 b,int c)74 bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
75 bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
76 return bpf_testmod_test_struct_arg_result;
77 }
78
79 noinline int
bpf_testmod_test_struct_arg_3(int a,int b,struct bpf_testmod_struct_arg_2 c)80 bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
81 bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
82 return bpf_testmod_test_struct_arg_result;
83 }
84
85 noinline int
bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a,int b,int c,int d,struct bpf_testmod_struct_arg_2 e)86 bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
87 int c, int d, struct bpf_testmod_struct_arg_2 e) {
88 bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
89 return bpf_testmod_test_struct_arg_result;
90 }
91
92 noinline int
bpf_testmod_test_struct_arg_5(void)93 bpf_testmod_test_struct_arg_5(void) {
94 bpf_testmod_test_struct_arg_result = 1;
95 return bpf_testmod_test_struct_arg_result;
96 }
97
98 noinline int
bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 * a)99 bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
100 bpf_testmod_test_struct_arg_result = a->b[0];
101 return bpf_testmod_test_struct_arg_result;
102 }
103
104 noinline int
bpf_testmod_test_struct_arg_7(u64 a,void * b,short c,int d,void * e,struct bpf_testmod_struct_arg_4 f)105 bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e,
106 struct bpf_testmod_struct_arg_4 f)
107 {
108 bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
109 (long)e + f.a + f.b;
110 return bpf_testmod_test_struct_arg_result;
111 }
112
113 noinline int
bpf_testmod_test_struct_arg_8(u64 a,void * b,short c,int d,void * e,struct bpf_testmod_struct_arg_4 f,int g)114 bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
115 struct bpf_testmod_struct_arg_4 f, int g)
116 {
117 bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
118 (long)e + f.a + f.b + g;
119 return bpf_testmod_test_struct_arg_result;
120 }
121
122 noinline int
bpf_testmod_test_struct_arg_9(u64 a,void * b,short c,int d,void * e,char f,short g,struct bpf_testmod_struct_arg_5 h,long i)123 bpf_testmod_test_struct_arg_9(u64 a, void *b, short c, int d, void *e, char f,
124 short g, struct bpf_testmod_struct_arg_5 h, long i)
125 {
126 bpf_testmod_test_struct_arg_result = a + (long)b + c + d + (long)e +
127 f + g + h.a + h.b + h.c + h.d + i;
128 return bpf_testmod_test_struct_arg_result;
129 }
130
131 noinline int
bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 * a)132 bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
133 bpf_testmod_test_struct_arg_result = a->a;
134 return bpf_testmod_test_struct_arg_result;
135 }
136
137 __bpf_kfunc void
bpf_testmod_test_mod_kfunc(int i)138 bpf_testmod_test_mod_kfunc(int i)
139 {
140 *(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
141 }
142
bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq * it,s64 value,int cnt)143 __bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
144 {
145 it->cnt = cnt;
146
147 if (cnt < 0)
148 return -EINVAL;
149
150 it->value = value;
151
152 return 0;
153 }
154
bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq * it)155 __bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
156 {
157 if (it->cnt <= 0)
158 return NULL;
159
160 it->cnt--;
161
162 return &it->value;
163 }
164
bpf_iter_testmod_seq_value(int val,struct bpf_iter_testmod_seq * it__iter)165 __bpf_kfunc s64 bpf_iter_testmod_seq_value(int val, struct bpf_iter_testmod_seq* it__iter)
166 {
167 if (it__iter->cnt < 0)
168 return 0;
169
170 return val + it__iter->value;
171 }
172
bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq * it)173 __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
174 {
175 it->cnt = 0;
176 }
177
bpf_kfunc_common_test(void)178 __bpf_kfunc void bpf_kfunc_common_test(void)
179 {
180 }
181
bpf_kfunc_dynptr_test(struct bpf_dynptr * ptr,struct bpf_dynptr * ptr__nullable)182 __bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr,
183 struct bpf_dynptr *ptr__nullable)
184 {
185 }
186
bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head * ptr)187 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head *ptr)
188 {
189 return NULL;
190 }
191
bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common * ptr)192 __bpf_kfunc struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr)
193 {
194 return NULL;
195 }
196
bpf_kfunc_nested_release_test(struct sk_buff * ptr)197 __bpf_kfunc void bpf_kfunc_nested_release_test(struct sk_buff *ptr)
198 {
199 }
200
bpf_kfunc_trusted_vma_test(struct vm_area_struct * ptr)201 __bpf_kfunc void bpf_kfunc_trusted_vma_test(struct vm_area_struct *ptr)
202 {
203 }
204
bpf_kfunc_trusted_task_test(struct task_struct * ptr)205 __bpf_kfunc void bpf_kfunc_trusted_task_test(struct task_struct *ptr)
206 {
207 }
208
bpf_kfunc_trusted_num_test(int * ptr)209 __bpf_kfunc void bpf_kfunc_trusted_num_test(int *ptr)
210 {
211 }
212
bpf_kfunc_rcu_task_test(struct task_struct * ptr)213 __bpf_kfunc void bpf_kfunc_rcu_task_test(struct task_struct *ptr)
214 {
215 }
216
217 __bpf_kfunc struct bpf_testmod_ctx *
bpf_testmod_ctx_create(int * err)218 bpf_testmod_ctx_create(int *err)
219 {
220 struct bpf_testmod_ctx *ctx;
221
222 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
223 if (!ctx) {
224 *err = -ENOMEM;
225 return NULL;
226 }
227 refcount_set(&ctx->usage, 1);
228
229 return ctx;
230 }
231
testmod_free_cb(struct rcu_head * head)232 static void testmod_free_cb(struct rcu_head *head)
233 {
234 struct bpf_testmod_ctx *ctx;
235
236 ctx = container_of(head, struct bpf_testmod_ctx, rcu);
237 kfree(ctx);
238 }
239
bpf_testmod_ctx_release(struct bpf_testmod_ctx * ctx)240 __bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx)
241 {
242 if (!ctx)
243 return;
244 if (refcount_dec_and_test(&ctx->usage))
245 call_rcu(&ctx->rcu, testmod_free_cb);
246 }
247
248 struct bpf_testmod_btf_type_tag_1 {
249 int a;
250 };
251
252 struct bpf_testmod_btf_type_tag_2 {
253 struct bpf_testmod_btf_type_tag_1 __user *p;
254 };
255
256 struct bpf_testmod_btf_type_tag_3 {
257 struct bpf_testmod_btf_type_tag_1 __percpu *p;
258 };
259
260 noinline int
bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user * arg)261 bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
262 BTF_TYPE_EMIT(func_proto_typedef);
263 BTF_TYPE_EMIT(func_proto_typedef_nested1);
264 BTF_TYPE_EMIT(func_proto_typedef_nested2);
265 return arg->a;
266 }
267
268 noinline int
bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 * arg)269 bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
270 return arg->p->a;
271 }
272
273 noinline int
bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu * arg)274 bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
275 return arg->a;
276 }
277
278 noinline int
bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 * arg)279 bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
280 return arg->p->a;
281 }
282
bpf_testmod_loop_test(int n)283 noinline int bpf_testmod_loop_test(int n)
284 {
285 /* Make sum volatile, so smart compilers, such as clang, will not
286 * optimize the code by removing the loop.
287 */
288 volatile int sum = 0;
289 int i;
290
291 /* the primary goal of this test is to test LBR. Create a lot of
292 * branches in the function, so we can catch it easily.
293 */
294 for (i = 0; i < n; i++)
295 sum += i;
296 return sum;
297 }
298
bpf_testmod_return_ptr(int arg)299 __weak noinline struct file *bpf_testmod_return_ptr(int arg)
300 {
301 static struct file f = {};
302
303 switch (arg) {
304 case 1: return (void *)EINVAL; /* user addr */
305 case 2: return (void *)0xcafe4a11; /* user addr */
306 case 3: return (void *)-EINVAL; /* canonical, but invalid */
307 case 4: return (void *)(1ull << 60); /* non-canonical and invalid */
308 case 5: return (void *)~(1ull << 30); /* trigger extable */
309 case 6: return &f; /* valid addr */
310 case 7: return (void *)((long)&f | 1); /* kernel tricks */
311 #ifdef CONFIG_X86_64
312 case 8: return (void *)VSYSCALL_ADDR; /* vsyscall page address */
313 #endif
314 default: return NULL;
315 }
316 }
317
bpf_testmod_fentry_test1(int a)318 noinline int bpf_testmod_fentry_test1(int a)
319 {
320 return a + 1;
321 }
322
bpf_testmod_fentry_test2(int a,u64 b)323 noinline int bpf_testmod_fentry_test2(int a, u64 b)
324 {
325 return a + b;
326 }
327
bpf_testmod_fentry_test3(char a,int b,u64 c)328 noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
329 {
330 return a + b + c;
331 }
332
bpf_testmod_fentry_test7(u64 a,void * b,short c,int d,void * e,char f,int g)333 noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d,
334 void *e, char f, int g)
335 {
336 return a + (long)b + c + d + (long)e + f + g;
337 }
338
bpf_testmod_fentry_test11(u64 a,void * b,short c,int d,void * e,char f,int g,unsigned int h,long i,__u64 j,unsigned long k)339 noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
340 void *e, char f, int g,
341 unsigned int h, long i, __u64 j,
342 unsigned long k)
343 {
344 return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
345 }
346
347 int bpf_testmod_fentry_ok;
348
349 noinline ssize_t
bpf_testmod_test_read(struct file * file,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t len)350 bpf_testmod_test_read(struct file *file, struct kobject *kobj,
351 struct bin_attribute *bin_attr,
352 char *buf, loff_t off, size_t len)
353 {
354 struct bpf_testmod_test_read_ctx ctx = {
355 .buf = buf,
356 .off = off,
357 .len = len,
358 };
359 struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1};
360 struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
361 struct bpf_testmod_struct_arg_3 *struct_arg3;
362 struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
363 struct bpf_testmod_struct_arg_5 struct_arg5 = {23, 24, 25, 26};
364 int i = 1;
365
366 while (bpf_testmod_return_ptr(i))
367 i++;
368
369 (void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
370 (void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
371 (void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
372 (void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
373 (void)bpf_testmod_test_struct_arg_5();
374 (void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19,
375 (void *)20, struct_arg4);
376 (void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
377 (void *)20, struct_arg4, 23);
378 (void)bpf_testmod_test_struct_arg_9(16, (void *)17, 18, 19, (void *)20,
379 21, 22, struct_arg5, 27);
380
381 (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
382
383 (void)trace_bpf_testmod_test_raw_tp_null(NULL);
384
385 struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
386 sizeof(int)), GFP_KERNEL);
387 if (struct_arg3 != NULL) {
388 struct_arg3->b[0] = 1;
389 (void)bpf_testmod_test_struct_arg_6(struct_arg3);
390 kfree(struct_arg3);
391 }
392
393 /* This is always true. Use the check to make sure the compiler
394 * doesn't remove bpf_testmod_loop_test.
395 */
396 if (bpf_testmod_loop_test(101) > 100)
397 trace_bpf_testmod_test_read(current, &ctx);
398
399 trace_bpf_testmod_test_nullable_bare(NULL);
400
401 /* Magic number to enable writable tp */
402 if (len == 64) {
403 struct bpf_testmod_test_writable_ctx writable = {
404 .val = 1024,
405 };
406 trace_bpf_testmod_test_writable_bare(&writable);
407 if (writable.early_ret)
408 return snprintf(buf, len, "%d\n", writable.val);
409 }
410
411 if (bpf_testmod_fentry_test1(1) != 2 ||
412 bpf_testmod_fentry_test2(2, 3) != 5 ||
413 bpf_testmod_fentry_test3(4, 5, 6) != 15 ||
414 bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20,
415 21, 22) != 133 ||
416 bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20,
417 21, 22, 23, 24, 25, 26) != 231)
418 goto out;
419
420 bpf_testmod_fentry_ok = 1;
421 out:
422 return -EIO; /* always fail */
423 }
424 EXPORT_SYMBOL(bpf_testmod_test_read);
425 ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
426
427 noinline ssize_t
bpf_testmod_test_write(struct file * file,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t len)428 bpf_testmod_test_write(struct file *file, struct kobject *kobj,
429 struct bin_attribute *bin_attr,
430 char *buf, loff_t off, size_t len)
431 {
432 struct bpf_testmod_test_write_ctx ctx = {
433 .buf = buf,
434 .off = off,
435 .len = len,
436 };
437
438 trace_bpf_testmod_test_write_bare(current, &ctx);
439
440 return -EIO; /* always fail */
441 }
442 EXPORT_SYMBOL(bpf_testmod_test_write);
443 ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
444
bpf_fentry_shadow_test(int a)445 noinline int bpf_fentry_shadow_test(int a)
446 {
447 return a + 2;
448 }
449 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
450
451 __bpf_hook_end();
452
453 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
454 .attr = { .name = "bpf_testmod", .mode = 0666, },
455 .read = bpf_testmod_test_read,
456 .write = bpf_testmod_test_write,
457 };
458
459 /* bpf_testmod_uprobe sysfs attribute is so far enabled for x86_64 only,
460 * please see test_uretprobe_regs_change test
461 */
462 #ifdef __x86_64__
463
464 static int
uprobe_ret_handler(struct uprobe_consumer * self,unsigned long func,struct pt_regs * regs)465 uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func,
466 struct pt_regs *regs)
467
468 {
469 regs->ax = 0x12345678deadbeef;
470 regs->cx = 0x87654321feebdaed;
471 regs->r11 = (u64) -1;
472 return true;
473 }
474
475 struct testmod_uprobe {
476 struct path path;
477 struct uprobe *uprobe;
478 struct uprobe_consumer consumer;
479 };
480
481 static DEFINE_MUTEX(testmod_uprobe_mutex);
482
483 static struct testmod_uprobe uprobe = {
484 .consumer.ret_handler = uprobe_ret_handler,
485 };
486
testmod_register_uprobe(loff_t offset)487 static int testmod_register_uprobe(loff_t offset)
488 {
489 int err = -EBUSY;
490
491 if (uprobe.uprobe)
492 return -EBUSY;
493
494 mutex_lock(&testmod_uprobe_mutex);
495
496 if (uprobe.uprobe)
497 goto out;
498
499 err = kern_path("/proc/self/exe", LOOKUP_FOLLOW, &uprobe.path);
500 if (err)
501 goto out;
502
503 uprobe.uprobe = uprobe_register(d_real_inode(uprobe.path.dentry),
504 offset, 0, &uprobe.consumer);
505 if (IS_ERR(uprobe.uprobe)) {
506 err = PTR_ERR(uprobe.uprobe);
507 path_put(&uprobe.path);
508 uprobe.uprobe = NULL;
509 }
510 out:
511 mutex_unlock(&testmod_uprobe_mutex);
512 return err;
513 }
514
testmod_unregister_uprobe(void)515 static void testmod_unregister_uprobe(void)
516 {
517 mutex_lock(&testmod_uprobe_mutex);
518
519 if (uprobe.uprobe) {
520 uprobe_unregister_nosync(uprobe.uprobe, &uprobe.consumer);
521 uprobe_unregister_sync();
522 path_put(&uprobe.path);
523 uprobe.uprobe = NULL;
524 }
525
526 mutex_unlock(&testmod_uprobe_mutex);
527 }
528
529 static ssize_t
bpf_testmod_uprobe_write(struct file * file,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t len)530 bpf_testmod_uprobe_write(struct file *file, struct kobject *kobj,
531 struct bin_attribute *bin_attr,
532 char *buf, loff_t off, size_t len)
533 {
534 unsigned long offset = 0;
535 int err = 0;
536
537 if (kstrtoul(buf, 0, &offset))
538 return -EINVAL;
539
540 if (offset)
541 err = testmod_register_uprobe(offset);
542 else
543 testmod_unregister_uprobe();
544
545 return err ?: strlen(buf);
546 }
547
548 static struct bin_attribute bin_attr_bpf_testmod_uprobe_file __ro_after_init = {
549 .attr = { .name = "bpf_testmod_uprobe", .mode = 0666, },
550 .write = bpf_testmod_uprobe_write,
551 };
552
register_bpf_testmod_uprobe(void)553 static int register_bpf_testmod_uprobe(void)
554 {
555 return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
556 }
557
unregister_bpf_testmod_uprobe(void)558 static void unregister_bpf_testmod_uprobe(void)
559 {
560 testmod_unregister_uprobe();
561 sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_uprobe_file);
562 }
563
564 #else
register_bpf_testmod_uprobe(void)565 static int register_bpf_testmod_uprobe(void)
566 {
567 return 0;
568 }
569
unregister_bpf_testmod_uprobe(void)570 static void unregister_bpf_testmod_uprobe(void) { }
571 #endif
572
573 BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids)
574 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
575 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
576 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
577 BTF_ID_FLAGS(func, bpf_iter_testmod_seq_value)
578 BTF_ID_FLAGS(func, bpf_kfunc_common_test)
579 BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test)
580 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_nonzero_offset_test, KF_ACQUIRE)
581 BTF_ID_FLAGS(func, bpf_kfunc_nested_acquire_zero_offset_test, KF_ACQUIRE)
582 BTF_ID_FLAGS(func, bpf_kfunc_nested_release_test, KF_RELEASE)
583 BTF_ID_FLAGS(func, bpf_kfunc_trusted_vma_test, KF_TRUSTED_ARGS)
584 BTF_ID_FLAGS(func, bpf_kfunc_trusted_task_test, KF_TRUSTED_ARGS)
585 BTF_ID_FLAGS(func, bpf_kfunc_trusted_num_test, KF_TRUSTED_ARGS)
586 BTF_ID_FLAGS(func, bpf_kfunc_rcu_task_test, KF_RCU)
587 BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL)
588 BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE)
589 BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids)
590
591 BTF_ID_LIST(bpf_testmod_dtor_ids)
592 BTF_ID(struct, bpf_testmod_ctx)
593 BTF_ID(func, bpf_testmod_ctx_release)
594
595 static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
596 .owner = THIS_MODULE,
597 .set = &bpf_testmod_common_kfunc_ids,
598 };
599
bpf_kfunc_call_test1(struct sock * sk,u32 a,u64 b,u32 c,u64 d)600 __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
601 {
602 return a + b + c + d;
603 }
604
bpf_kfunc_call_test2(struct sock * sk,u32 a,u32 b)605 __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
606 {
607 return a + b;
608 }
609
bpf_kfunc_call_test3(struct sock * sk)610 __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
611 {
612 return sk;
613 }
614
bpf_kfunc_call_test4(signed char a,short b,int c,long d)615 __bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
616 {
617 /* Provoke the compiler to assume that the caller has sign-extended a,
618 * b and c on platforms where this is required (e.g. s390x).
619 */
620 return (long)a + (long)b + (long)c + d;
621 }
622
623 static struct prog_test_ref_kfunc prog_test_struct = {
624 .a = 42,
625 .b = 108,
626 .next = &prog_test_struct,
627 .cnt = REFCOUNT_INIT(1),
628 };
629
630 __bpf_kfunc struct prog_test_ref_kfunc *
bpf_kfunc_call_test_acquire(unsigned long * scalar_ptr)631 bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
632 {
633 refcount_inc(&prog_test_struct.cnt);
634 return &prog_test_struct;
635 }
636
bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc * p)637 __bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
638 {
639 WARN_ON_ONCE(1);
640 }
641
642 __bpf_kfunc struct prog_test_member *
bpf_kfunc_call_memb_acquire(void)643 bpf_kfunc_call_memb_acquire(void)
644 {
645 WARN_ON_ONCE(1);
646 return NULL;
647 }
648
bpf_kfunc_call_memb1_release(struct prog_test_member1 * p)649 __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
650 {
651 WARN_ON_ONCE(1);
652 }
653
__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc * p,const int size)654 static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
655 {
656 if (size > 2 * sizeof(int))
657 return NULL;
658
659 return (int *)p;
660 }
661
bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc * p,const int rdwr_buf_size)662 __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
663 const int rdwr_buf_size)
664 {
665 return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
666 }
667
bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc * p,const int rdonly_buf_size)668 __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
669 const int rdonly_buf_size)
670 {
671 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
672 }
673
674 /* the next 2 ones can't be really used for testing expect to ensure
675 * that the verifier rejects the call.
676 * Acquire functions must return struct pointers, so these ones are
677 * failing.
678 */
bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc * p,const int rdonly_buf_size)679 __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
680 const int rdonly_buf_size)
681 {
682 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
683 }
684
bpf_kfunc_call_int_mem_release(int * p)685 __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
686 {
687 }
688
bpf_kfunc_call_test_pass_ctx(struct __sk_buff * skb)689 __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
690 {
691 }
692
bpf_kfunc_call_test_pass1(struct prog_test_pass1 * p)693 __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
694 {
695 }
696
bpf_kfunc_call_test_pass2(struct prog_test_pass2 * p)697 __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
698 {
699 }
700
bpf_kfunc_call_test_fail1(struct prog_test_fail1 * p)701 __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
702 {
703 }
704
bpf_kfunc_call_test_fail2(struct prog_test_fail2 * p)705 __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
706 {
707 }
708
bpf_kfunc_call_test_fail3(struct prog_test_fail3 * p)709 __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
710 {
711 }
712
bpf_kfunc_call_test_mem_len_pass1(void * mem,int mem__sz)713 __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
714 {
715 }
716
bpf_kfunc_call_test_mem_len_fail1(void * mem,int len)717 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
718 {
719 }
720
bpf_kfunc_call_test_mem_len_fail2(u64 * mem,int len)721 __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
722 {
723 }
724
bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc * p)725 __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
726 {
727 /* p != NULL, but p->cnt could be 0 */
728 }
729
bpf_kfunc_call_test_destructive(void)730 __bpf_kfunc void bpf_kfunc_call_test_destructive(void)
731 {
732 }
733
bpf_kfunc_call_test_static_unused_arg(u32 arg,u32 unused)734 __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
735 {
736 return arg;
737 }
738
bpf_kfunc_call_test_sleepable(void)739 __bpf_kfunc void bpf_kfunc_call_test_sleepable(void)
740 {
741 }
742
bpf_kfunc_init_sock(struct init_sock_args * args)743 __bpf_kfunc int bpf_kfunc_init_sock(struct init_sock_args *args)
744 {
745 int proto;
746 int err;
747
748 mutex_lock(&sock_lock);
749
750 if (sock) {
751 pr_err("%s called without releasing old sock", __func__);
752 err = -EPERM;
753 goto out;
754 }
755
756 switch (args->af) {
757 case AF_INET:
758 case AF_INET6:
759 proto = args->type == SOCK_STREAM ? IPPROTO_TCP : IPPROTO_UDP;
760 break;
761 case AF_UNIX:
762 proto = PF_UNIX;
763 break;
764 default:
765 pr_err("invalid address family %d\n", args->af);
766 err = -EINVAL;
767 goto out;
768 }
769
770 err = sock_create_kern(current->nsproxy->net_ns, args->af, args->type,
771 proto, &sock);
772
773 if (!err)
774 /* Set timeout for call to kernel_connect() to prevent it from hanging,
775 * and consider the connection attempt failed if it returns
776 * -EINPROGRESS.
777 */
778 sock->sk->sk_sndtimeo = CONNECT_TIMEOUT_SEC * HZ;
779 out:
780 mutex_unlock(&sock_lock);
781
782 return err;
783 }
784
bpf_kfunc_close_sock(void)785 __bpf_kfunc void bpf_kfunc_close_sock(void)
786 {
787 mutex_lock(&sock_lock);
788
789 if (sock) {
790 sock_release(sock);
791 sock = NULL;
792 }
793
794 mutex_unlock(&sock_lock);
795 }
796
bpf_kfunc_call_kernel_connect(struct addr_args * args)797 __bpf_kfunc int bpf_kfunc_call_kernel_connect(struct addr_args *args)
798 {
799 int err;
800
801 if (args->addrlen > sizeof(args->addr))
802 return -EINVAL;
803
804 mutex_lock(&sock_lock);
805
806 if (!sock) {
807 pr_err("%s called without initializing sock", __func__);
808 err = -EPERM;
809 goto out;
810 }
811
812 err = kernel_connect(sock, (struct sockaddr *)&args->addr,
813 args->addrlen, 0);
814 out:
815 mutex_unlock(&sock_lock);
816
817 return err;
818 }
819
bpf_kfunc_call_kernel_bind(struct addr_args * args)820 __bpf_kfunc int bpf_kfunc_call_kernel_bind(struct addr_args *args)
821 {
822 int err;
823
824 if (args->addrlen > sizeof(args->addr))
825 return -EINVAL;
826
827 mutex_lock(&sock_lock);
828
829 if (!sock) {
830 pr_err("%s called without initializing sock", __func__);
831 err = -EPERM;
832 goto out;
833 }
834
835 err = kernel_bind(sock, (struct sockaddr *)&args->addr, args->addrlen);
836 out:
837 mutex_unlock(&sock_lock);
838
839 return err;
840 }
841
bpf_kfunc_call_kernel_listen(void)842 __bpf_kfunc int bpf_kfunc_call_kernel_listen(void)
843 {
844 int err;
845
846 mutex_lock(&sock_lock);
847
848 if (!sock) {
849 pr_err("%s called without initializing sock", __func__);
850 err = -EPERM;
851 goto out;
852 }
853
854 err = kernel_listen(sock, 128);
855 out:
856 mutex_unlock(&sock_lock);
857
858 return err;
859 }
860
bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args * args)861 __bpf_kfunc int bpf_kfunc_call_kernel_sendmsg(struct sendmsg_args *args)
862 {
863 struct msghdr msg = {
864 .msg_name = &args->addr.addr,
865 .msg_namelen = args->addr.addrlen,
866 };
867 struct kvec iov;
868 int err;
869
870 if (args->addr.addrlen > sizeof(args->addr.addr) ||
871 args->msglen > sizeof(args->msg))
872 return -EINVAL;
873
874 iov.iov_base = args->msg;
875 iov.iov_len = args->msglen;
876
877 mutex_lock(&sock_lock);
878
879 if (!sock) {
880 pr_err("%s called without initializing sock", __func__);
881 err = -EPERM;
882 goto out;
883 }
884
885 err = kernel_sendmsg(sock, &msg, &iov, 1, args->msglen);
886 args->addr.addrlen = msg.msg_namelen;
887 out:
888 mutex_unlock(&sock_lock);
889
890 return err;
891 }
892
bpf_kfunc_call_sock_sendmsg(struct sendmsg_args * args)893 __bpf_kfunc int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args)
894 {
895 struct msghdr msg = {
896 .msg_name = &args->addr.addr,
897 .msg_namelen = args->addr.addrlen,
898 };
899 struct kvec iov;
900 int err;
901
902 if (args->addr.addrlen > sizeof(args->addr.addr) ||
903 args->msglen > sizeof(args->msg))
904 return -EINVAL;
905
906 iov.iov_base = args->msg;
907 iov.iov_len = args->msglen;
908
909 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, args->msglen);
910 mutex_lock(&sock_lock);
911
912 if (!sock) {
913 pr_err("%s called without initializing sock", __func__);
914 err = -EPERM;
915 goto out;
916 }
917
918 err = sock_sendmsg(sock, &msg);
919 args->addr.addrlen = msg.msg_namelen;
920 out:
921 mutex_unlock(&sock_lock);
922
923 return err;
924 }
925
bpf_kfunc_call_kernel_getsockname(struct addr_args * args)926 __bpf_kfunc int bpf_kfunc_call_kernel_getsockname(struct addr_args *args)
927 {
928 int err;
929
930 mutex_lock(&sock_lock);
931
932 if (!sock) {
933 pr_err("%s called without initializing sock", __func__);
934 err = -EPERM;
935 goto out;
936 }
937
938 err = kernel_getsockname(sock, (struct sockaddr *)&args->addr);
939 if (err < 0)
940 goto out;
941
942 args->addrlen = err;
943 err = 0;
944 out:
945 mutex_unlock(&sock_lock);
946
947 return err;
948 }
949
bpf_kfunc_call_kernel_getpeername(struct addr_args * args)950 __bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args)
951 {
952 int err;
953
954 mutex_lock(&sock_lock);
955
956 if (!sock) {
957 pr_err("%s called without initializing sock", __func__);
958 err = -EPERM;
959 goto out;
960 }
961
962 err = kernel_getpeername(sock, (struct sockaddr *)&args->addr);
963 if (err < 0)
964 goto out;
965
966 args->addrlen = err;
967 err = 0;
968 out:
969 mutex_unlock(&sock_lock);
970
971 return err;
972 }
973
974 static DEFINE_MUTEX(st_ops_mutex);
975 static struct bpf_testmod_st_ops *st_ops;
976
bpf_kfunc_st_ops_test_prologue(struct st_ops_args * args)977 __bpf_kfunc int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args)
978 {
979 int ret = -1;
980
981 mutex_lock(&st_ops_mutex);
982 if (st_ops && st_ops->test_prologue)
983 ret = st_ops->test_prologue(args);
984 mutex_unlock(&st_ops_mutex);
985
986 return ret;
987 }
988
bpf_kfunc_st_ops_test_epilogue(struct st_ops_args * args)989 __bpf_kfunc int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args)
990 {
991 int ret = -1;
992
993 mutex_lock(&st_ops_mutex);
994 if (st_ops && st_ops->test_epilogue)
995 ret = st_ops->test_epilogue(args);
996 mutex_unlock(&st_ops_mutex);
997
998 return ret;
999 }
1000
bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args * args)1001 __bpf_kfunc int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args)
1002 {
1003 int ret = -1;
1004
1005 mutex_lock(&st_ops_mutex);
1006 if (st_ops && st_ops->test_pro_epilogue)
1007 ret = st_ops->test_pro_epilogue(args);
1008 mutex_unlock(&st_ops_mutex);
1009
1010 return ret;
1011 }
1012
bpf_kfunc_st_ops_inc10(struct st_ops_args * args)1013 __bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args)
1014 {
1015 args->a += 10;
1016 return args->a;
1017 }
1018
1019 BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
BTF_ID_FLAGS(func,bpf_testmod_test_mod_kfunc)1020 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
1021 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
1022 BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
1023 BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
1024 BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
1025 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
1026 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
1027 BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
1028 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
1029 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
1030 BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
1031 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
1032 BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
1033 BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
1034 BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
1035 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
1036 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
1037 BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
1038 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
1039 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
1040 BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
1041 BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
1042 BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
1043 BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
1044 BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
1045 BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE)
1046 BTF_ID_FLAGS(func, bpf_kfunc_init_sock, KF_SLEEPABLE)
1047 BTF_ID_FLAGS(func, bpf_kfunc_close_sock, KF_SLEEPABLE)
1048 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_connect, KF_SLEEPABLE)
1049 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_bind, KF_SLEEPABLE)
1050 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_listen, KF_SLEEPABLE)
1051 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE)
1052 BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE)
1053 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE)
1054 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE)
1055 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_prologue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
1056 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
1057 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
1058 BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10, KF_TRUSTED_ARGS)
1059 BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
1060
1061 static int bpf_testmod_ops_init(struct btf *btf)
1062 {
1063 return 0;
1064 }
1065
bpf_testmod_ops_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)1066 static bool bpf_testmod_ops_is_valid_access(int off, int size,
1067 enum bpf_access_type type,
1068 const struct bpf_prog *prog,
1069 struct bpf_insn_access_aux *info)
1070 {
1071 return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
1072 }
1073
bpf_testmod_ops_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)1074 static int bpf_testmod_ops_init_member(const struct btf_type *t,
1075 const struct btf_member *member,
1076 void *kdata, const void *udata)
1077 {
1078 if (member->offset == offsetof(struct bpf_testmod_ops, data) * 8) {
1079 /* For data fields, this function has to copy it and return
1080 * 1 to indicate that the data has been handled by the
1081 * struct_ops type, or the verifier will reject the map if
1082 * the value of the data field is not zero.
1083 */
1084 ((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data;
1085 return 1;
1086 }
1087 return 0;
1088 }
1089
1090 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
1091 .owner = THIS_MODULE,
1092 .set = &bpf_testmod_check_kfunc_ids,
1093 };
1094
1095 static const struct bpf_verifier_ops bpf_testmod_verifier_ops = {
1096 .is_valid_access = bpf_testmod_ops_is_valid_access,
1097 };
1098
bpf_dummy_reg(void * kdata,struct bpf_link * link)1099 static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
1100 {
1101 struct bpf_testmod_ops *ops = kdata;
1102
1103 if (ops->test_1)
1104 ops->test_1();
1105 /* Some test cases (ex. struct_ops_maybe_null) may not have test_2
1106 * initialized, so we need to check for NULL.
1107 */
1108 if (ops->test_2)
1109 ops->test_2(4, ops->data);
1110
1111 return 0;
1112 }
1113
bpf_dummy_unreg(void * kdata,struct bpf_link * link)1114 static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
1115 {
1116 }
1117
bpf_testmod_test_1(void)1118 static int bpf_testmod_test_1(void)
1119 {
1120 return 0;
1121 }
1122
bpf_testmod_test_2(int a,int b)1123 static void bpf_testmod_test_2(int a, int b)
1124 {
1125 }
1126
bpf_testmod_tramp(int value)1127 static int bpf_testmod_tramp(int value)
1128 {
1129 return 0;
1130 }
1131
bpf_testmod_ops__test_maybe_null(int dummy,struct task_struct * task__nullable)1132 static int bpf_testmod_ops__test_maybe_null(int dummy,
1133 struct task_struct *task__nullable)
1134 {
1135 return 0;
1136 }
1137
1138 static struct bpf_testmod_ops __bpf_testmod_ops = {
1139 .test_1 = bpf_testmod_test_1,
1140 .test_2 = bpf_testmod_test_2,
1141 .test_maybe_null = bpf_testmod_ops__test_maybe_null,
1142 };
1143
1144 struct bpf_struct_ops bpf_bpf_testmod_ops = {
1145 .verifier_ops = &bpf_testmod_verifier_ops,
1146 .init = bpf_testmod_ops_init,
1147 .init_member = bpf_testmod_ops_init_member,
1148 .reg = bpf_dummy_reg,
1149 .unreg = bpf_dummy_unreg,
1150 .cfi_stubs = &__bpf_testmod_ops,
1151 .name = "bpf_testmod_ops",
1152 .owner = THIS_MODULE,
1153 };
1154
bpf_dummy_reg2(void * kdata,struct bpf_link * link)1155 static int bpf_dummy_reg2(void *kdata, struct bpf_link *link)
1156 {
1157 struct bpf_testmod_ops2 *ops = kdata;
1158
1159 ops->test_1();
1160 return 0;
1161 }
1162
1163 static struct bpf_testmod_ops2 __bpf_testmod_ops2 = {
1164 .test_1 = bpf_testmod_test_1,
1165 };
1166
1167 struct bpf_struct_ops bpf_testmod_ops2 = {
1168 .verifier_ops = &bpf_testmod_verifier_ops,
1169 .init = bpf_testmod_ops_init,
1170 .init_member = bpf_testmod_ops_init_member,
1171 .reg = bpf_dummy_reg2,
1172 .unreg = bpf_dummy_unreg,
1173 .cfi_stubs = &__bpf_testmod_ops2,
1174 .name = "bpf_testmod_ops2",
1175 .owner = THIS_MODULE,
1176 };
1177
bpf_test_mod_st_ops__test_prologue(struct st_ops_args * args)1178 static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args)
1179 {
1180 return 0;
1181 }
1182
bpf_test_mod_st_ops__test_epilogue(struct st_ops_args * args)1183 static int bpf_test_mod_st_ops__test_epilogue(struct st_ops_args *args)
1184 {
1185 return 0;
1186 }
1187
bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args * args)1188 static int bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args *args)
1189 {
1190 return 0;
1191 }
1192
st_ops_gen_prologue(struct bpf_insn * insn_buf,bool direct_write,const struct bpf_prog * prog)1193 static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write,
1194 const struct bpf_prog *prog)
1195 {
1196 struct bpf_insn *insn = insn_buf;
1197
1198 if (strcmp(prog->aux->attach_func_name, "test_prologue") &&
1199 strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
1200 return 0;
1201
1202 /* r6 = r1[0]; // r6 will be "struct st_ops *args". r1 is "u64 *ctx".
1203 * r7 = r6->a;
1204 * r7 += 1000;
1205 * r6->a = r7;
1206 */
1207 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0);
1208 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a));
1209 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000);
1210 *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a));
1211 *insn++ = prog->insnsi[0];
1212
1213 return insn - insn_buf;
1214 }
1215
st_ops_gen_epilogue(struct bpf_insn * insn_buf,const struct bpf_prog * prog,s16 ctx_stack_off)1216 static int st_ops_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
1217 s16 ctx_stack_off)
1218 {
1219 struct bpf_insn *insn = insn_buf;
1220
1221 if (strcmp(prog->aux->attach_func_name, "test_epilogue") &&
1222 strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
1223 return 0;
1224
1225 /* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
1226 * r1 = r1[0]; // r1 will be "struct st_ops *args"
1227 * r6 = r1->a;
1228 * r6 += 10000;
1229 * r1->a = r6;
1230 * r0 = r6;
1231 * r0 *= 2;
1232 * BPF_EXIT;
1233 */
1234 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off);
1235 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
1236 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a));
1237 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000);
1238 *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a));
1239 *insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6);
1240 *insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2);
1241 *insn++ = BPF_EXIT_INSN();
1242
1243 return insn - insn_buf;
1244 }
1245
st_ops_btf_struct_access(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,int off,int size)1246 static int st_ops_btf_struct_access(struct bpf_verifier_log *log,
1247 const struct bpf_reg_state *reg,
1248 int off, int size)
1249 {
1250 if (off < 0 || off + size > sizeof(struct st_ops_args))
1251 return -EACCES;
1252 return 0;
1253 }
1254
1255 static const struct bpf_verifier_ops st_ops_verifier_ops = {
1256 .is_valid_access = bpf_testmod_ops_is_valid_access,
1257 .btf_struct_access = st_ops_btf_struct_access,
1258 .gen_prologue = st_ops_gen_prologue,
1259 .gen_epilogue = st_ops_gen_epilogue,
1260 .get_func_proto = bpf_base_func_proto,
1261 };
1262
1263 static struct bpf_testmod_st_ops st_ops_cfi_stubs = {
1264 .test_prologue = bpf_test_mod_st_ops__test_prologue,
1265 .test_epilogue = bpf_test_mod_st_ops__test_epilogue,
1266 .test_pro_epilogue = bpf_test_mod_st_ops__test_pro_epilogue,
1267 };
1268
st_ops_reg(void * kdata,struct bpf_link * link)1269 static int st_ops_reg(void *kdata, struct bpf_link *link)
1270 {
1271 int err = 0;
1272
1273 mutex_lock(&st_ops_mutex);
1274 if (st_ops) {
1275 pr_err("st_ops has already been registered\n");
1276 err = -EEXIST;
1277 goto unlock;
1278 }
1279 st_ops = kdata;
1280
1281 unlock:
1282 mutex_unlock(&st_ops_mutex);
1283 return err;
1284 }
1285
st_ops_unreg(void * kdata,struct bpf_link * link)1286 static void st_ops_unreg(void *kdata, struct bpf_link *link)
1287 {
1288 mutex_lock(&st_ops_mutex);
1289 st_ops = NULL;
1290 mutex_unlock(&st_ops_mutex);
1291 }
1292
st_ops_init(struct btf * btf)1293 static int st_ops_init(struct btf *btf)
1294 {
1295 return 0;
1296 }
1297
st_ops_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)1298 static int st_ops_init_member(const struct btf_type *t,
1299 const struct btf_member *member,
1300 void *kdata, const void *udata)
1301 {
1302 return 0;
1303 }
1304
1305 static struct bpf_struct_ops testmod_st_ops = {
1306 .verifier_ops = &st_ops_verifier_ops,
1307 .init = st_ops_init,
1308 .init_member = st_ops_init_member,
1309 .reg = st_ops_reg,
1310 .unreg = st_ops_unreg,
1311 .cfi_stubs = &st_ops_cfi_stubs,
1312 .name = "bpf_testmod_st_ops",
1313 .owner = THIS_MODULE,
1314 };
1315
1316 extern int bpf_fentry_test1(int a);
1317
bpf_testmod_init(void)1318 static int bpf_testmod_init(void)
1319 {
1320 const struct btf_id_dtor_kfunc bpf_testmod_dtors[] = {
1321 {
1322 .btf_id = bpf_testmod_dtor_ids[0],
1323 .kfunc_btf_id = bpf_testmod_dtor_ids[1]
1324 },
1325 };
1326 void **tramp;
1327 int ret;
1328
1329 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
1330 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
1331 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
1332 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
1333 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set);
1334 ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
1335 ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
1336 ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops);
1337 ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors,
1338 ARRAY_SIZE(bpf_testmod_dtors),
1339 THIS_MODULE);
1340 if (ret < 0)
1341 return ret;
1342 if (bpf_fentry_test1(0) < 0)
1343 return -EINVAL;
1344 sock = NULL;
1345 mutex_init(&sock_lock);
1346 ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1347 if (ret < 0)
1348 return ret;
1349 ret = register_bpf_testmod_uprobe();
1350 if (ret < 0)
1351 return ret;
1352
1353 /* Ensure nothing is between tramp_1..tramp_40 */
1354 BUILD_BUG_ON(offsetof(struct bpf_testmod_ops, tramp_1) + 40 * sizeof(long) !=
1355 offsetofend(struct bpf_testmod_ops, tramp_40));
1356 tramp = (void **)&__bpf_testmod_ops.tramp_1;
1357 while (tramp <= (void **)&__bpf_testmod_ops.tramp_40)
1358 *tramp++ = bpf_testmod_tramp;
1359
1360 return 0;
1361 }
1362
bpf_testmod_exit(void)1363 static void bpf_testmod_exit(void)
1364 {
1365 /* Need to wait for all references to be dropped because
1366 * bpf_kfunc_call_test_release() which currently resides in kernel can
1367 * be called after bpf_testmod is unloaded. Once release function is
1368 * moved into the module this wait can be removed.
1369 */
1370 while (refcount_read(&prog_test_struct.cnt) > 1)
1371 msleep(20);
1372
1373 bpf_kfunc_close_sock();
1374 sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
1375 unregister_bpf_testmod_uprobe();
1376 }
1377
1378 module_init(bpf_testmod_init);
1379 module_exit(bpf_testmod_exit);
1380
1381 MODULE_AUTHOR("Andrii Nakryiko");
1382 MODULE_DESCRIPTION("BPF selftests module");
1383 MODULE_LICENSE("Dual BSD/GPL");
1384