1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <network_helpers.h>
4 #include "tailcall_freplace.skel.h"
5 #include "tc_bpf2bpf.skel.h"
6
7 /* test_tailcall_1 checks basic functionality by patching multiple locations
8 * in a single program for a single tail call slot with nop->jmp, jmp->nop
9 * and jmp->jmp rewrites. Also checks for nop->nop.
10 */
test_tailcall_1(void)11 static void test_tailcall_1(void)
12 {
13 int err, map_fd, prog_fd, main_fd, i, j;
14 struct bpf_map *prog_array;
15 struct bpf_program *prog;
16 struct bpf_object *obj;
17 char prog_name[32];
18 char buff[128] = {};
19 LIBBPF_OPTS(bpf_test_run_opts, topts,
20 .data_in = buff,
21 .data_size_in = sizeof(buff),
22 .repeat = 1,
23 );
24
25 err = bpf_prog_test_load("tailcall1.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
26 &prog_fd);
27 if (CHECK_FAIL(err))
28 return;
29
30 prog = bpf_object__find_program_by_name(obj, "entry");
31 if (CHECK_FAIL(!prog))
32 goto out;
33
34 main_fd = bpf_program__fd(prog);
35 if (CHECK_FAIL(main_fd < 0))
36 goto out;
37
38 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
39 if (CHECK_FAIL(!prog_array))
40 goto out;
41
42 map_fd = bpf_map__fd(prog_array);
43 if (CHECK_FAIL(map_fd < 0))
44 goto out;
45
46 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
47 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
48
49 prog = bpf_object__find_program_by_name(obj, prog_name);
50 if (CHECK_FAIL(!prog))
51 goto out;
52
53 prog_fd = bpf_program__fd(prog);
54 if (CHECK_FAIL(prog_fd < 0))
55 goto out;
56
57 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
58 if (CHECK_FAIL(err))
59 goto out;
60 }
61
62 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
63 err = bpf_prog_test_run_opts(main_fd, &topts);
64 ASSERT_OK(err, "tailcall");
65 ASSERT_EQ(topts.retval, i, "tailcall retval");
66
67 err = bpf_map_delete_elem(map_fd, &i);
68 if (CHECK_FAIL(err))
69 goto out;
70 }
71
72 err = bpf_prog_test_run_opts(main_fd, &topts);
73 ASSERT_OK(err, "tailcall");
74 ASSERT_EQ(topts.retval, 3, "tailcall retval");
75
76 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
77 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
78
79 prog = bpf_object__find_program_by_name(obj, prog_name);
80 if (CHECK_FAIL(!prog))
81 goto out;
82
83 prog_fd = bpf_program__fd(prog);
84 if (CHECK_FAIL(prog_fd < 0))
85 goto out;
86
87 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
88 if (CHECK_FAIL(err))
89 goto out;
90 }
91
92 err = bpf_prog_test_run_opts(main_fd, &topts);
93 ASSERT_OK(err, "tailcall");
94 ASSERT_OK(topts.retval, "tailcall retval");
95
96 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
97 j = bpf_map__max_entries(prog_array) - 1 - i;
98 snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
99
100 prog = bpf_object__find_program_by_name(obj, prog_name);
101 if (CHECK_FAIL(!prog))
102 goto out;
103
104 prog_fd = bpf_program__fd(prog);
105 if (CHECK_FAIL(prog_fd < 0))
106 goto out;
107
108 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
109 if (CHECK_FAIL(err))
110 goto out;
111 }
112
113 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
114 j = bpf_map__max_entries(prog_array) - 1 - i;
115
116 err = bpf_prog_test_run_opts(main_fd, &topts);
117 ASSERT_OK(err, "tailcall");
118 ASSERT_EQ(topts.retval, j, "tailcall retval");
119
120 err = bpf_map_delete_elem(map_fd, &i);
121 if (CHECK_FAIL(err))
122 goto out;
123 }
124
125 err = bpf_prog_test_run_opts(main_fd, &topts);
126 ASSERT_OK(err, "tailcall");
127 ASSERT_EQ(topts.retval, 3, "tailcall retval");
128
129 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
130 err = bpf_map_delete_elem(map_fd, &i);
131 if (CHECK_FAIL(err >= 0 || errno != ENOENT))
132 goto out;
133
134 err = bpf_prog_test_run_opts(main_fd, &topts);
135 ASSERT_OK(err, "tailcall");
136 ASSERT_EQ(topts.retval, 3, "tailcall retval");
137 }
138
139 out:
140 bpf_object__close(obj);
141 }
142
143 /* test_tailcall_2 checks that patching multiple programs for a single
144 * tail call slot works. It also jumps through several programs and tests
145 * the tail call limit counter.
146 */
test_tailcall_2(void)147 static void test_tailcall_2(void)
148 {
149 int err, map_fd, prog_fd, main_fd, i;
150 struct bpf_map *prog_array;
151 struct bpf_program *prog;
152 struct bpf_object *obj;
153 char prog_name[32];
154 char buff[128] = {};
155 LIBBPF_OPTS(bpf_test_run_opts, topts,
156 .data_in = buff,
157 .data_size_in = sizeof(buff),
158 .repeat = 1,
159 );
160
161 err = bpf_prog_test_load("tailcall2.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
162 &prog_fd);
163 if (CHECK_FAIL(err))
164 return;
165
166 prog = bpf_object__find_program_by_name(obj, "entry");
167 if (CHECK_FAIL(!prog))
168 goto out;
169
170 main_fd = bpf_program__fd(prog);
171 if (CHECK_FAIL(main_fd < 0))
172 goto out;
173
174 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
175 if (CHECK_FAIL(!prog_array))
176 goto out;
177
178 map_fd = bpf_map__fd(prog_array);
179 if (CHECK_FAIL(map_fd < 0))
180 goto out;
181
182 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
183 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
184
185 prog = bpf_object__find_program_by_name(obj, prog_name);
186 if (CHECK_FAIL(!prog))
187 goto out;
188
189 prog_fd = bpf_program__fd(prog);
190 if (CHECK_FAIL(prog_fd < 0))
191 goto out;
192
193 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
194 if (CHECK_FAIL(err))
195 goto out;
196 }
197
198 err = bpf_prog_test_run_opts(main_fd, &topts);
199 ASSERT_OK(err, "tailcall");
200 ASSERT_EQ(topts.retval, 2, "tailcall retval");
201
202 i = 2;
203 err = bpf_map_delete_elem(map_fd, &i);
204 if (CHECK_FAIL(err))
205 goto out;
206
207 err = bpf_prog_test_run_opts(main_fd, &topts);
208 ASSERT_OK(err, "tailcall");
209 ASSERT_EQ(topts.retval, 1, "tailcall retval");
210
211 i = 0;
212 err = bpf_map_delete_elem(map_fd, &i);
213 if (CHECK_FAIL(err))
214 goto out;
215
216 err = bpf_prog_test_run_opts(main_fd, &topts);
217 ASSERT_OK(err, "tailcall");
218 ASSERT_EQ(topts.retval, 3, "tailcall retval");
219 out:
220 bpf_object__close(obj);
221 }
222
test_tailcall_count(const char * which)223 static void test_tailcall_count(const char *which)
224 {
225 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
226 struct bpf_map *prog_array, *data_map;
227 struct bpf_program *prog;
228 struct bpf_object *obj;
229 char buff[128] = {};
230 LIBBPF_OPTS(bpf_test_run_opts, topts,
231 .data_in = buff,
232 .data_size_in = sizeof(buff),
233 .repeat = 1,
234 );
235
236 err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
237 &prog_fd);
238 if (CHECK_FAIL(err))
239 return;
240
241 prog = bpf_object__find_program_by_name(obj, "entry");
242 if (CHECK_FAIL(!prog))
243 goto out;
244
245 main_fd = bpf_program__fd(prog);
246 if (CHECK_FAIL(main_fd < 0))
247 goto out;
248
249 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
250 if (CHECK_FAIL(!prog_array))
251 goto out;
252
253 map_fd = bpf_map__fd(prog_array);
254 if (CHECK_FAIL(map_fd < 0))
255 goto out;
256
257 prog = bpf_object__find_program_by_name(obj, "classifier_0");
258 if (CHECK_FAIL(!prog))
259 goto out;
260
261 prog_fd = bpf_program__fd(prog);
262 if (CHECK_FAIL(prog_fd < 0))
263 goto out;
264
265 i = 0;
266 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
267 if (CHECK_FAIL(err))
268 goto out;
269
270 err = bpf_prog_test_run_opts(main_fd, &topts);
271 ASSERT_OK(err, "tailcall");
272 ASSERT_EQ(topts.retval, 1, "tailcall retval");
273
274 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
275 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
276 goto out;
277
278 data_fd = bpf_map__fd(data_map);
279 if (CHECK_FAIL(data_fd < 0))
280 goto out;
281
282 i = 0;
283 err = bpf_map_lookup_elem(data_fd, &i, &val);
284 ASSERT_OK(err, "tailcall count");
285 ASSERT_EQ(val, 33, "tailcall count");
286
287 i = 0;
288 err = bpf_map_delete_elem(map_fd, &i);
289 if (CHECK_FAIL(err))
290 goto out;
291
292 err = bpf_prog_test_run_opts(main_fd, &topts);
293 ASSERT_OK(err, "tailcall");
294 ASSERT_OK(topts.retval, "tailcall retval");
295 out:
296 bpf_object__close(obj);
297 }
298
299 /* test_tailcall_3 checks that the count value of the tail call limit
300 * enforcement matches with expectations. JIT uses direct jump.
301 */
test_tailcall_3(void)302 static void test_tailcall_3(void)
303 {
304 test_tailcall_count("tailcall3.bpf.o");
305 }
306
307 /* test_tailcall_6 checks that the count value of the tail call limit
308 * enforcement matches with expectations. JIT uses indirect jump.
309 */
test_tailcall_6(void)310 static void test_tailcall_6(void)
311 {
312 test_tailcall_count("tailcall6.bpf.o");
313 }
314
315 /* test_tailcall_4 checks that the kernel properly selects indirect jump
316 * for the case where the key is not known. Latter is passed via global
317 * data to select different targets we can compare return value of.
318 */
test_tailcall_4(void)319 static void test_tailcall_4(void)
320 {
321 int err, map_fd, prog_fd, main_fd, data_fd, i;
322 struct bpf_map *prog_array, *data_map;
323 struct bpf_program *prog;
324 struct bpf_object *obj;
325 static const int zero = 0;
326 char buff[128] = {};
327 char prog_name[32];
328 LIBBPF_OPTS(bpf_test_run_opts, topts,
329 .data_in = buff,
330 .data_size_in = sizeof(buff),
331 .repeat = 1,
332 );
333
334 err = bpf_prog_test_load("tailcall4.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
335 &prog_fd);
336 if (CHECK_FAIL(err))
337 return;
338
339 prog = bpf_object__find_program_by_name(obj, "entry");
340 if (CHECK_FAIL(!prog))
341 goto out;
342
343 main_fd = bpf_program__fd(prog);
344 if (CHECK_FAIL(main_fd < 0))
345 goto out;
346
347 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
348 if (CHECK_FAIL(!prog_array))
349 goto out;
350
351 map_fd = bpf_map__fd(prog_array);
352 if (CHECK_FAIL(map_fd < 0))
353 goto out;
354
355 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
356 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
357 goto out;
358
359 data_fd = bpf_map__fd(data_map);
360 if (CHECK_FAIL(data_fd < 0))
361 goto out;
362
363 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
364 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
365
366 prog = bpf_object__find_program_by_name(obj, prog_name);
367 if (CHECK_FAIL(!prog))
368 goto out;
369
370 prog_fd = bpf_program__fd(prog);
371 if (CHECK_FAIL(prog_fd < 0))
372 goto out;
373
374 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
375 if (CHECK_FAIL(err))
376 goto out;
377 }
378
379 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
380 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
381 if (CHECK_FAIL(err))
382 goto out;
383
384 err = bpf_prog_test_run_opts(main_fd, &topts);
385 ASSERT_OK(err, "tailcall");
386 ASSERT_EQ(topts.retval, i, "tailcall retval");
387 }
388
389 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
390 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
391 if (CHECK_FAIL(err))
392 goto out;
393
394 err = bpf_map_delete_elem(map_fd, &i);
395 if (CHECK_FAIL(err))
396 goto out;
397
398 err = bpf_prog_test_run_opts(main_fd, &topts);
399 ASSERT_OK(err, "tailcall");
400 ASSERT_EQ(topts.retval, 3, "tailcall retval");
401 }
402 out:
403 bpf_object__close(obj);
404 }
405
406 /* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
407 * an indirect jump when the keys are const but different from different branches.
408 */
test_tailcall_5(void)409 static void test_tailcall_5(void)
410 {
411 int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
412 struct bpf_map *prog_array, *data_map;
413 struct bpf_program *prog;
414 struct bpf_object *obj;
415 static const int zero = 0;
416 char buff[128] = {};
417 char prog_name[32];
418 LIBBPF_OPTS(bpf_test_run_opts, topts,
419 .data_in = buff,
420 .data_size_in = sizeof(buff),
421 .repeat = 1,
422 );
423
424 err = bpf_prog_test_load("tailcall5.bpf.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
425 &prog_fd);
426 if (CHECK_FAIL(err))
427 return;
428
429 prog = bpf_object__find_program_by_name(obj, "entry");
430 if (CHECK_FAIL(!prog))
431 goto out;
432
433 main_fd = bpf_program__fd(prog);
434 if (CHECK_FAIL(main_fd < 0))
435 goto out;
436
437 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
438 if (CHECK_FAIL(!prog_array))
439 goto out;
440
441 map_fd = bpf_map__fd(prog_array);
442 if (CHECK_FAIL(map_fd < 0))
443 goto out;
444
445 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
446 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
447 goto out;
448
449 data_fd = bpf_map__fd(data_map);
450 if (CHECK_FAIL(data_fd < 0))
451 goto out;
452
453 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
454 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
455
456 prog = bpf_object__find_program_by_name(obj, prog_name);
457 if (CHECK_FAIL(!prog))
458 goto out;
459
460 prog_fd = bpf_program__fd(prog);
461 if (CHECK_FAIL(prog_fd < 0))
462 goto out;
463
464 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
465 if (CHECK_FAIL(err))
466 goto out;
467 }
468
469 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
470 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
471 if (CHECK_FAIL(err))
472 goto out;
473
474 err = bpf_prog_test_run_opts(main_fd, &topts);
475 ASSERT_OK(err, "tailcall");
476 ASSERT_EQ(topts.retval, i, "tailcall retval");
477 }
478
479 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
480 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
481 if (CHECK_FAIL(err))
482 goto out;
483
484 err = bpf_map_delete_elem(map_fd, &i);
485 if (CHECK_FAIL(err))
486 goto out;
487
488 err = bpf_prog_test_run_opts(main_fd, &topts);
489 ASSERT_OK(err, "tailcall");
490 ASSERT_EQ(topts.retval, 3, "tailcall retval");
491 }
492 out:
493 bpf_object__close(obj);
494 }
495
496 /* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
497 * correctly in correlation with BPF subprograms
498 */
test_tailcall_bpf2bpf_1(void)499 static void test_tailcall_bpf2bpf_1(void)
500 {
501 int err, map_fd, prog_fd, main_fd, i;
502 struct bpf_map *prog_array;
503 struct bpf_program *prog;
504 struct bpf_object *obj;
505 char prog_name[32];
506 LIBBPF_OPTS(bpf_test_run_opts, topts,
507 .data_in = &pkt_v4,
508 .data_size_in = sizeof(pkt_v4),
509 .repeat = 1,
510 );
511
512 err = bpf_prog_test_load("tailcall_bpf2bpf1.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
513 &obj, &prog_fd);
514 if (CHECK_FAIL(err))
515 return;
516
517 prog = bpf_object__find_program_by_name(obj, "entry");
518 if (CHECK_FAIL(!prog))
519 goto out;
520
521 main_fd = bpf_program__fd(prog);
522 if (CHECK_FAIL(main_fd < 0))
523 goto out;
524
525 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
526 if (CHECK_FAIL(!prog_array))
527 goto out;
528
529 map_fd = bpf_map__fd(prog_array);
530 if (CHECK_FAIL(map_fd < 0))
531 goto out;
532
533 /* nop -> jmp */
534 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
535 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
536
537 prog = bpf_object__find_program_by_name(obj, prog_name);
538 if (CHECK_FAIL(!prog))
539 goto out;
540
541 prog_fd = bpf_program__fd(prog);
542 if (CHECK_FAIL(prog_fd < 0))
543 goto out;
544
545 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
546 if (CHECK_FAIL(err))
547 goto out;
548 }
549
550 err = bpf_prog_test_run_opts(main_fd, &topts);
551 ASSERT_OK(err, "tailcall");
552 ASSERT_EQ(topts.retval, 1, "tailcall retval");
553
554 /* jmp -> nop, call subprog that will do tailcall */
555 i = 1;
556 err = bpf_map_delete_elem(map_fd, &i);
557 if (CHECK_FAIL(err))
558 goto out;
559
560 err = bpf_prog_test_run_opts(main_fd, &topts);
561 ASSERT_OK(err, "tailcall");
562 ASSERT_OK(topts.retval, "tailcall retval");
563
564 /* make sure that subprog can access ctx and entry prog that
565 * called this subprog can properly return
566 */
567 i = 0;
568 err = bpf_map_delete_elem(map_fd, &i);
569 if (CHECK_FAIL(err))
570 goto out;
571
572 err = bpf_prog_test_run_opts(main_fd, &topts);
573 ASSERT_OK(err, "tailcall");
574 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
575 out:
576 bpf_object__close(obj);
577 }
578
579 /* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
580 * enforcement matches with expectations when tailcall is preceded with
581 * bpf2bpf call.
582 */
test_tailcall_bpf2bpf_2(void)583 static void test_tailcall_bpf2bpf_2(void)
584 {
585 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
586 struct bpf_map *prog_array, *data_map;
587 struct bpf_program *prog;
588 struct bpf_object *obj;
589 char buff[128] = {};
590 LIBBPF_OPTS(bpf_test_run_opts, topts,
591 .data_in = buff,
592 .data_size_in = sizeof(buff),
593 .repeat = 1,
594 );
595
596 err = bpf_prog_test_load("tailcall_bpf2bpf2.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
597 &obj, &prog_fd);
598 if (CHECK_FAIL(err))
599 return;
600
601 prog = bpf_object__find_program_by_name(obj, "entry");
602 if (CHECK_FAIL(!prog))
603 goto out;
604
605 main_fd = bpf_program__fd(prog);
606 if (CHECK_FAIL(main_fd < 0))
607 goto out;
608
609 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
610 if (CHECK_FAIL(!prog_array))
611 goto out;
612
613 map_fd = bpf_map__fd(prog_array);
614 if (CHECK_FAIL(map_fd < 0))
615 goto out;
616
617 prog = bpf_object__find_program_by_name(obj, "classifier_0");
618 if (CHECK_FAIL(!prog))
619 goto out;
620
621 prog_fd = bpf_program__fd(prog);
622 if (CHECK_FAIL(prog_fd < 0))
623 goto out;
624
625 i = 0;
626 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
627 if (CHECK_FAIL(err))
628 goto out;
629
630 err = bpf_prog_test_run_opts(main_fd, &topts);
631 ASSERT_OK(err, "tailcall");
632 ASSERT_EQ(topts.retval, 1, "tailcall retval");
633
634 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
635 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
636 goto out;
637
638 data_fd = bpf_map__fd(data_map);
639 if (CHECK_FAIL(data_fd < 0))
640 goto out;
641
642 i = 0;
643 err = bpf_map_lookup_elem(data_fd, &i, &val);
644 ASSERT_OK(err, "tailcall count");
645 ASSERT_EQ(val, 33, "tailcall count");
646
647 i = 0;
648 err = bpf_map_delete_elem(map_fd, &i);
649 if (CHECK_FAIL(err))
650 goto out;
651
652 err = bpf_prog_test_run_opts(main_fd, &topts);
653 ASSERT_OK(err, "tailcall");
654 ASSERT_OK(topts.retval, "tailcall retval");
655 out:
656 bpf_object__close(obj);
657 }
658
659 /* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
660 * 256 bytes) can be used within bpf subprograms that have the tailcalls
661 * in them
662 */
test_tailcall_bpf2bpf_3(void)663 static void test_tailcall_bpf2bpf_3(void)
664 {
665 int err, map_fd, prog_fd, main_fd, i;
666 struct bpf_map *prog_array;
667 struct bpf_program *prog;
668 struct bpf_object *obj;
669 char prog_name[32];
670 LIBBPF_OPTS(bpf_test_run_opts, topts,
671 .data_in = &pkt_v4,
672 .data_size_in = sizeof(pkt_v4),
673 .repeat = 1,
674 );
675
676 err = bpf_prog_test_load("tailcall_bpf2bpf3.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
677 &obj, &prog_fd);
678 if (CHECK_FAIL(err))
679 return;
680
681 prog = bpf_object__find_program_by_name(obj, "entry");
682 if (CHECK_FAIL(!prog))
683 goto out;
684
685 main_fd = bpf_program__fd(prog);
686 if (CHECK_FAIL(main_fd < 0))
687 goto out;
688
689 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
690 if (CHECK_FAIL(!prog_array))
691 goto out;
692
693 map_fd = bpf_map__fd(prog_array);
694 if (CHECK_FAIL(map_fd < 0))
695 goto out;
696
697 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
698 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
699
700 prog = bpf_object__find_program_by_name(obj, prog_name);
701 if (CHECK_FAIL(!prog))
702 goto out;
703
704 prog_fd = bpf_program__fd(prog);
705 if (CHECK_FAIL(prog_fd < 0))
706 goto out;
707
708 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
709 if (CHECK_FAIL(err))
710 goto out;
711 }
712
713 err = bpf_prog_test_run_opts(main_fd, &topts);
714 ASSERT_OK(err, "tailcall");
715 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
716
717 i = 1;
718 err = bpf_map_delete_elem(map_fd, &i);
719 if (CHECK_FAIL(err))
720 goto out;
721
722 err = bpf_prog_test_run_opts(main_fd, &topts);
723 ASSERT_OK(err, "tailcall");
724 ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval");
725
726 i = 0;
727 err = bpf_map_delete_elem(map_fd, &i);
728 if (CHECK_FAIL(err))
729 goto out;
730
731 err = bpf_prog_test_run_opts(main_fd, &topts);
732 ASSERT_OK(err, "tailcall");
733 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval");
734 out:
735 bpf_object__close(obj);
736 }
737
738 #include "tailcall_bpf2bpf4.skel.h"
739
740 /* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
741 * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
742 * counter behaves correctly, bpf program will go through following flow:
743 *
744 * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
745 * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
746 * subprog2 [here bump global counter] --------^
747 *
748 * We go through first two tailcalls and start counting from the subprog2 where
749 * the loop begins. At the end of the test make sure that the global counter is
750 * equal to 31, because tailcall counter includes the first two tailcalls
751 * whereas global counter is incremented only on loop presented on flow above.
752 *
753 * The noise parameter is used to insert bpf_map_update calls into the logic
754 * to force verifier to patch instructions. This allows us to ensure jump
755 * logic remains correct with instruction movement.
756 */
test_tailcall_bpf2bpf_4(bool noise)757 static void test_tailcall_bpf2bpf_4(bool noise)
758 {
759 int err, map_fd, prog_fd, main_fd, data_fd, i;
760 struct tailcall_bpf2bpf4__bss val;
761 struct bpf_map *prog_array, *data_map;
762 struct bpf_program *prog;
763 struct bpf_object *obj;
764 char prog_name[32];
765 LIBBPF_OPTS(bpf_test_run_opts, topts,
766 .data_in = &pkt_v4,
767 .data_size_in = sizeof(pkt_v4),
768 .repeat = 1,
769 );
770
771 err = bpf_prog_test_load("tailcall_bpf2bpf4.bpf.o", BPF_PROG_TYPE_SCHED_CLS,
772 &obj, &prog_fd);
773 if (CHECK_FAIL(err))
774 return;
775
776 prog = bpf_object__find_program_by_name(obj, "entry");
777 if (CHECK_FAIL(!prog))
778 goto out;
779
780 main_fd = bpf_program__fd(prog);
781 if (CHECK_FAIL(main_fd < 0))
782 goto out;
783
784 prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
785 if (CHECK_FAIL(!prog_array))
786 goto out;
787
788 map_fd = bpf_map__fd(prog_array);
789 if (CHECK_FAIL(map_fd < 0))
790 goto out;
791
792 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
793 snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
794
795 prog = bpf_object__find_program_by_name(obj, prog_name);
796 if (CHECK_FAIL(!prog))
797 goto out;
798
799 prog_fd = bpf_program__fd(prog);
800 if (CHECK_FAIL(prog_fd < 0))
801 goto out;
802
803 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
804 if (CHECK_FAIL(err))
805 goto out;
806 }
807
808 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
809 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
810 goto out;
811
812 data_fd = bpf_map__fd(data_map);
813 if (CHECK_FAIL(data_fd < 0))
814 goto out;
815
816 i = 0;
817 val.noise = noise;
818 val.count = 0;
819 err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
820 if (CHECK_FAIL(err))
821 goto out;
822
823 err = bpf_prog_test_run_opts(main_fd, &topts);
824 ASSERT_OK(err, "tailcall");
825 ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval");
826
827 i = 0;
828 err = bpf_map_lookup_elem(data_fd, &i, &val);
829 ASSERT_OK(err, "tailcall count");
830 ASSERT_EQ(val.count, 31, "tailcall count");
831
832 out:
833 bpf_object__close(obj);
834 }
835
836 #include "tailcall_bpf2bpf6.skel.h"
837
838 /* Tail call counting works even when there is data on stack which is
839 * not aligned to 8 bytes.
840 */
test_tailcall_bpf2bpf_6(void)841 static void test_tailcall_bpf2bpf_6(void)
842 {
843 struct tailcall_bpf2bpf6 *obj;
844 int err, map_fd, prog_fd, main_fd, data_fd, i, val;
845 LIBBPF_OPTS(bpf_test_run_opts, topts,
846 .data_in = &pkt_v4,
847 .data_size_in = sizeof(pkt_v4),
848 .repeat = 1,
849 );
850
851 obj = tailcall_bpf2bpf6__open_and_load();
852 if (!ASSERT_OK_PTR(obj, "open and load"))
853 return;
854
855 main_fd = bpf_program__fd(obj->progs.entry);
856 if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
857 goto out;
858
859 map_fd = bpf_map__fd(obj->maps.jmp_table);
860 if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
861 goto out;
862
863 prog_fd = bpf_program__fd(obj->progs.classifier_0);
864 if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
865 goto out;
866
867 i = 0;
868 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
869 if (!ASSERT_OK(err, "jmp_table map update"))
870 goto out;
871
872 err = bpf_prog_test_run_opts(main_fd, &topts);
873 ASSERT_OK(err, "entry prog test run");
874 ASSERT_EQ(topts.retval, 0, "tailcall retval");
875
876 data_fd = bpf_map__fd(obj->maps.bss);
877 if (!ASSERT_GE(data_fd, 0, "bss map fd"))
878 goto out;
879
880 i = 0;
881 err = bpf_map_lookup_elem(data_fd, &i, &val);
882 ASSERT_OK(err, "bss map lookup");
883 ASSERT_EQ(val, 1, "done flag is set");
884
885 out:
886 tailcall_bpf2bpf6__destroy(obj);
887 }
888
889 /* test_tailcall_freplace checks that the freplace prog fails to update the
890 * prog_array map, no matter whether the freplace prog attaches to its target.
891 */
test_tailcall_freplace(void)892 static void test_tailcall_freplace(void)
893 {
894 struct tailcall_freplace *freplace_skel = NULL;
895 struct bpf_link *freplace_link = NULL;
896 struct bpf_program *freplace_prog;
897 struct tc_bpf2bpf *tc_skel = NULL;
898 int prog_fd, tc_prog_fd, map_fd;
899 char buff[128] = {};
900 int err, key;
901
902 LIBBPF_OPTS(bpf_test_run_opts, topts,
903 .data_in = buff,
904 .data_size_in = sizeof(buff),
905 .repeat = 1,
906 );
907
908 freplace_skel = tailcall_freplace__open();
909 if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open"))
910 return;
911
912 tc_skel = tc_bpf2bpf__open_and_load();
913 if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load"))
914 goto out;
915
916 tc_prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
917 freplace_prog = freplace_skel->progs.entry_freplace;
918 err = bpf_program__set_attach_target(freplace_prog, tc_prog_fd,
919 "subprog_tc");
920 if (!ASSERT_OK(err, "set_attach_target"))
921 goto out;
922
923 err = tailcall_freplace__load(freplace_skel);
924 if (!ASSERT_OK(err, "tailcall_freplace__load"))
925 goto out;
926
927 map_fd = bpf_map__fd(freplace_skel->maps.jmp_table);
928 prog_fd = bpf_program__fd(freplace_prog);
929 key = 0;
930 err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
931 ASSERT_ERR(err, "update jmp_table failure");
932
933 freplace_link = bpf_program__attach_freplace(freplace_prog, tc_prog_fd,
934 "subprog_tc");
935 if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
936 goto out;
937
938 err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
939 ASSERT_ERR(err, "update jmp_table failure");
940
941 out:
942 bpf_link__destroy(freplace_link);
943 tailcall_freplace__destroy(freplace_skel);
944 tc_bpf2bpf__destroy(tc_skel);
945 }
946
947 /* test_tailcall_bpf2bpf_freplace checks the failure that fails to attach a tail
948 * callee prog with freplace prog or fails to update an extended prog to
949 * prog_array map.
950 */
test_tailcall_bpf2bpf_freplace(void)951 static void test_tailcall_bpf2bpf_freplace(void)
952 {
953 struct tailcall_freplace *freplace_skel = NULL;
954 struct bpf_link *freplace_link = NULL;
955 struct tc_bpf2bpf *tc_skel = NULL;
956 char buff[128] = {};
957 int prog_fd, map_fd;
958 int err, key;
959
960 LIBBPF_OPTS(bpf_test_run_opts, topts,
961 .data_in = buff,
962 .data_size_in = sizeof(buff),
963 .repeat = 1,
964 );
965
966 tc_skel = tc_bpf2bpf__open_and_load();
967 if (!ASSERT_OK_PTR(tc_skel, "tc_bpf2bpf__open_and_load"))
968 goto out;
969
970 prog_fd = bpf_program__fd(tc_skel->progs.entry_tc);
971 freplace_skel = tailcall_freplace__open();
972 if (!ASSERT_OK_PTR(freplace_skel, "tailcall_freplace__open"))
973 goto out;
974
975 err = bpf_program__set_attach_target(freplace_skel->progs.entry_freplace,
976 prog_fd, "subprog_tc");
977 if (!ASSERT_OK(err, "set_attach_target"))
978 goto out;
979
980 err = tailcall_freplace__load(freplace_skel);
981 if (!ASSERT_OK(err, "tailcall_freplace__load"))
982 goto out;
983
984 /* OK to attach then detach freplace prog. */
985
986 freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
987 prog_fd, "subprog_tc");
988 if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
989 goto out;
990
991 err = bpf_link__destroy(freplace_link);
992 if (!ASSERT_OK(err, "destroy link"))
993 goto out;
994
995 /* OK to update prog_array map then delete element from the map. */
996
997 key = 0;
998 map_fd = bpf_map__fd(freplace_skel->maps.jmp_table);
999 err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1000 if (!ASSERT_OK(err, "update jmp_table"))
1001 goto out;
1002
1003 err = bpf_map_delete_elem(map_fd, &key);
1004 if (!ASSERT_OK(err, "delete_elem from jmp_table"))
1005 goto out;
1006
1007 /* Fail to attach a tail callee prog with freplace prog. */
1008
1009 err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1010 if (!ASSERT_OK(err, "update jmp_table"))
1011 goto out;
1012
1013 freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
1014 prog_fd, "subprog_tc");
1015 if (!ASSERT_ERR_PTR(freplace_link, "attach_freplace failure"))
1016 goto out;
1017
1018 err = bpf_map_delete_elem(map_fd, &key);
1019 if (!ASSERT_OK(err, "delete_elem from jmp_table"))
1020 goto out;
1021
1022 /* Fail to update an extended prog to prog_array map. */
1023
1024 freplace_link = bpf_program__attach_freplace(freplace_skel->progs.entry_freplace,
1025 prog_fd, "subprog_tc");
1026 if (!ASSERT_OK_PTR(freplace_link, "attach_freplace"))
1027 goto out;
1028
1029 err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY);
1030 if (!ASSERT_ERR(err, "update jmp_table failure"))
1031 goto out;
1032
1033 out:
1034 bpf_link__destroy(freplace_link);
1035 tailcall_freplace__destroy(freplace_skel);
1036 tc_bpf2bpf__destroy(tc_skel);
1037 }
1038
test_tailcalls(void)1039 void test_tailcalls(void)
1040 {
1041 if (test__start_subtest("tailcall_1"))
1042 test_tailcall_1();
1043 if (test__start_subtest("tailcall_2"))
1044 test_tailcall_2();
1045 if (test__start_subtest("tailcall_3"))
1046 test_tailcall_3();
1047 if (test__start_subtest("tailcall_4"))
1048 test_tailcall_4();
1049 if (test__start_subtest("tailcall_5"))
1050 test_tailcall_5();
1051 if (test__start_subtest("tailcall_6"))
1052 test_tailcall_6();
1053 if (test__start_subtest("tailcall_bpf2bpf_1"))
1054 test_tailcall_bpf2bpf_1();
1055 if (test__start_subtest("tailcall_bpf2bpf_2"))
1056 test_tailcall_bpf2bpf_2();
1057 if (test__start_subtest("tailcall_bpf2bpf_3"))
1058 test_tailcall_bpf2bpf_3();
1059 if (test__start_subtest("tailcall_bpf2bpf_4"))
1060 test_tailcall_bpf2bpf_4(false);
1061 if (test__start_subtest("tailcall_bpf2bpf_5"))
1062 test_tailcall_bpf2bpf_4(true);
1063 if (test__start_subtest("tailcall_bpf2bpf_6"))
1064 test_tailcall_bpf2bpf_6();
1065 if (test__start_subtest("tailcall_freplace"))
1066 test_tailcall_freplace();
1067 if (test__start_subtest("tailcall_bpf2bpf_freplace"))
1068 test_tailcall_bpf2bpf_freplace();
1069 }
1070