• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <network_helpers.h>
4 
5 /* test_tailcall_1 checks basic functionality by patching multiple locations
6  * in a single program for a single tail call slot with nop->jmp, jmp->nop
7  * and jmp->jmp rewrites. Also checks for nop->nop.
8  */
test_tailcall_1(void)9 static void test_tailcall_1(void)
10 {
11 	int err, map_fd, prog_fd, main_fd, i, j;
12 	struct bpf_map *prog_array;
13 	struct bpf_program *prog;
14 	struct bpf_object *obj;
15 	__u32 retval, duration;
16 	char prog_name[32];
17 	char buff[128] = {};
18 
19 	err = bpf_prog_test_load("tailcall1.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
20 			    &prog_fd);
21 	if (CHECK_FAIL(err))
22 		return;
23 
24 	prog = bpf_object__find_program_by_name(obj, "entry");
25 	if (CHECK_FAIL(!prog))
26 		goto out;
27 
28 	main_fd = bpf_program__fd(prog);
29 	if (CHECK_FAIL(main_fd < 0))
30 		goto out;
31 
32 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
33 	if (CHECK_FAIL(!prog_array))
34 		goto out;
35 
36 	map_fd = bpf_map__fd(prog_array);
37 	if (CHECK_FAIL(map_fd < 0))
38 		goto out;
39 
40 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
41 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
42 
43 		prog = bpf_object__find_program_by_name(obj, prog_name);
44 		if (CHECK_FAIL(!prog))
45 			goto out;
46 
47 		prog_fd = bpf_program__fd(prog);
48 		if (CHECK_FAIL(prog_fd < 0))
49 			goto out;
50 
51 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
52 		if (CHECK_FAIL(err))
53 			goto out;
54 	}
55 
56 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
57 		err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
58 					&duration, &retval, NULL);
59 		CHECK(err || retval != i, "tailcall",
60 		      "err %d errno %d retval %d\n", err, errno, retval);
61 
62 		err = bpf_map_delete_elem(map_fd, &i);
63 		if (CHECK_FAIL(err))
64 			goto out;
65 	}
66 
67 	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
68 				&duration, &retval, NULL);
69 	CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
70 	      err, errno, retval);
71 
72 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
73 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
74 
75 		prog = bpf_object__find_program_by_name(obj, prog_name);
76 		if (CHECK_FAIL(!prog))
77 			goto out;
78 
79 		prog_fd = bpf_program__fd(prog);
80 		if (CHECK_FAIL(prog_fd < 0))
81 			goto out;
82 
83 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
84 		if (CHECK_FAIL(err))
85 			goto out;
86 	}
87 
88 	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
89 				&duration, &retval, NULL);
90 	CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
91 	      err, errno, retval);
92 
93 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
94 		j = bpf_map__def(prog_array)->max_entries - 1 - i;
95 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
96 
97 		prog = bpf_object__find_program_by_name(obj, prog_name);
98 		if (CHECK_FAIL(!prog))
99 			goto out;
100 
101 		prog_fd = bpf_program__fd(prog);
102 		if (CHECK_FAIL(prog_fd < 0))
103 			goto out;
104 
105 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
106 		if (CHECK_FAIL(err))
107 			goto out;
108 	}
109 
110 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
111 		j = bpf_map__def(prog_array)->max_entries - 1 - i;
112 
113 		err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
114 					&duration, &retval, NULL);
115 		CHECK(err || retval != j, "tailcall",
116 		      "err %d errno %d retval %d\n", err, errno, retval);
117 
118 		err = bpf_map_delete_elem(map_fd, &i);
119 		if (CHECK_FAIL(err))
120 			goto out;
121 	}
122 
123 	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
124 				&duration, &retval, NULL);
125 	CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
126 	      err, errno, retval);
127 
128 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
129 		err = bpf_map_delete_elem(map_fd, &i);
130 		if (CHECK_FAIL(err >= 0 || errno != ENOENT))
131 			goto out;
132 
133 		err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
134 					&duration, &retval, NULL);
135 		CHECK(err || retval != 3, "tailcall",
136 		      "err %d errno %d retval %d\n", err, errno, retval);
137 	}
138 
139 out:
140 	bpf_object__close(obj);
141 }
142 
143 /* test_tailcall_2 checks that patching multiple programs for a single
144  * tail call slot works. It also jumps through several programs and tests
145  * the tail call limit counter.
146  */
test_tailcall_2(void)147 static void test_tailcall_2(void)
148 {
149 	int err, map_fd, prog_fd, main_fd, i;
150 	struct bpf_map *prog_array;
151 	struct bpf_program *prog;
152 	struct bpf_object *obj;
153 	__u32 retval, duration;
154 	char prog_name[32];
155 	char buff[128] = {};
156 
157 	err = bpf_prog_test_load("tailcall2.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
158 			    &prog_fd);
159 	if (CHECK_FAIL(err))
160 		return;
161 
162 	prog = bpf_object__find_program_by_name(obj, "entry");
163 	if (CHECK_FAIL(!prog))
164 		goto out;
165 
166 	main_fd = bpf_program__fd(prog);
167 	if (CHECK_FAIL(main_fd < 0))
168 		goto out;
169 
170 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
171 	if (CHECK_FAIL(!prog_array))
172 		goto out;
173 
174 	map_fd = bpf_map__fd(prog_array);
175 	if (CHECK_FAIL(map_fd < 0))
176 		goto out;
177 
178 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
179 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
180 
181 		prog = bpf_object__find_program_by_name(obj, prog_name);
182 		if (CHECK_FAIL(!prog))
183 			goto out;
184 
185 		prog_fd = bpf_program__fd(prog);
186 		if (CHECK_FAIL(prog_fd < 0))
187 			goto out;
188 
189 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
190 		if (CHECK_FAIL(err))
191 			goto out;
192 	}
193 
194 	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
195 				&duration, &retval, NULL);
196 	CHECK(err || retval != 2, "tailcall", "err %d errno %d retval %d\n",
197 	      err, errno, retval);
198 
199 	i = 2;
200 	err = bpf_map_delete_elem(map_fd, &i);
201 	if (CHECK_FAIL(err))
202 		goto out;
203 
204 	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
205 				&duration, &retval, NULL);
206 	CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
207 	      err, errno, retval);
208 
209 	i = 0;
210 	err = bpf_map_delete_elem(map_fd, &i);
211 	if (CHECK_FAIL(err))
212 		goto out;
213 
214 	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
215 				&duration, &retval, NULL);
216 	CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
217 	      err, errno, retval);
218 out:
219 	bpf_object__close(obj);
220 }
221 
test_tailcall_count(const char * which)222 static void test_tailcall_count(const char *which)
223 {
224 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
225 	struct bpf_map *prog_array, *data_map;
226 	struct bpf_program *prog;
227 	struct bpf_object *obj;
228 	__u32 retval, duration;
229 	char buff[128] = {};
230 
231 	err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj,
232 			    &prog_fd);
233 	if (CHECK_FAIL(err))
234 		return;
235 
236 	prog = bpf_object__find_program_by_name(obj, "entry");
237 	if (CHECK_FAIL(!prog))
238 		goto out;
239 
240 	main_fd = bpf_program__fd(prog);
241 	if (CHECK_FAIL(main_fd < 0))
242 		goto out;
243 
244 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
245 	if (CHECK_FAIL(!prog_array))
246 		goto out;
247 
248 	map_fd = bpf_map__fd(prog_array);
249 	if (CHECK_FAIL(map_fd < 0))
250 		goto out;
251 
252 	prog = bpf_object__find_program_by_name(obj, "classifier_0");
253 	if (CHECK_FAIL(!prog))
254 		goto out;
255 
256 	prog_fd = bpf_program__fd(prog);
257 	if (CHECK_FAIL(prog_fd < 0))
258 		goto out;
259 
260 	i = 0;
261 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
262 	if (CHECK_FAIL(err))
263 		goto out;
264 
265 	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
266 				&duration, &retval, NULL);
267 	CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
268 	      err, errno, retval);
269 
270 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
271 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
272 		return;
273 
274 	data_fd = bpf_map__fd(data_map);
275 	if (CHECK_FAIL(map_fd < 0))
276 		return;
277 
278 	i = 0;
279 	err = bpf_map_lookup_elem(data_fd, &i, &val);
280 	CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n",
281 	      err, errno, val);
282 
283 	i = 0;
284 	err = bpf_map_delete_elem(map_fd, &i);
285 	if (CHECK_FAIL(err))
286 		goto out;
287 
288 	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
289 				&duration, &retval, NULL);
290 	CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
291 	      err, errno, retval);
292 out:
293 	bpf_object__close(obj);
294 }
295 
296 /* test_tailcall_3 checks that the count value of the tail call limit
297  * enforcement matches with expectations. JIT uses direct jump.
298  */
test_tailcall_3(void)299 static void test_tailcall_3(void)
300 {
301 	test_tailcall_count("tailcall3.o");
302 }
303 
304 /* test_tailcall_6 checks that the count value of the tail call limit
305  * enforcement matches with expectations. JIT uses indirect jump.
306  */
test_tailcall_6(void)307 static void test_tailcall_6(void)
308 {
309 	test_tailcall_count("tailcall6.o");
310 }
311 
312 /* test_tailcall_4 checks that the kernel properly selects indirect jump
313  * for the case where the key is not known. Latter is passed via global
314  * data to select different targets we can compare return value of.
315  */
test_tailcall_4(void)316 static void test_tailcall_4(void)
317 {
318 	int err, map_fd, prog_fd, main_fd, data_fd, i;
319 	struct bpf_map *prog_array, *data_map;
320 	struct bpf_program *prog;
321 	struct bpf_object *obj;
322 	__u32 retval, duration;
323 	static const int zero = 0;
324 	char buff[128] = {};
325 	char prog_name[32];
326 
327 	err = bpf_prog_test_load("tailcall4.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
328 			    &prog_fd);
329 	if (CHECK_FAIL(err))
330 		return;
331 
332 	prog = bpf_object__find_program_by_name(obj, "entry");
333 	if (CHECK_FAIL(!prog))
334 		goto out;
335 
336 	main_fd = bpf_program__fd(prog);
337 	if (CHECK_FAIL(main_fd < 0))
338 		goto out;
339 
340 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
341 	if (CHECK_FAIL(!prog_array))
342 		goto out;
343 
344 	map_fd = bpf_map__fd(prog_array);
345 	if (CHECK_FAIL(map_fd < 0))
346 		goto out;
347 
348 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
349 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
350 		return;
351 
352 	data_fd = bpf_map__fd(data_map);
353 	if (CHECK_FAIL(map_fd < 0))
354 		return;
355 
356 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
357 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
358 
359 		prog = bpf_object__find_program_by_name(obj, prog_name);
360 		if (CHECK_FAIL(!prog))
361 			goto out;
362 
363 		prog_fd = bpf_program__fd(prog);
364 		if (CHECK_FAIL(prog_fd < 0))
365 			goto out;
366 
367 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
368 		if (CHECK_FAIL(err))
369 			goto out;
370 	}
371 
372 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
373 		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
374 		if (CHECK_FAIL(err))
375 			goto out;
376 
377 		err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
378 					&duration, &retval, NULL);
379 		CHECK(err || retval != i, "tailcall",
380 		      "err %d errno %d retval %d\n", err, errno, retval);
381 	}
382 
383 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
384 		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
385 		if (CHECK_FAIL(err))
386 			goto out;
387 
388 		err = bpf_map_delete_elem(map_fd, &i);
389 		if (CHECK_FAIL(err))
390 			goto out;
391 
392 		err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
393 					&duration, &retval, NULL);
394 		CHECK(err || retval != 3, "tailcall",
395 		      "err %d errno %d retval %d\n", err, errno, retval);
396 	}
397 out:
398 	bpf_object__close(obj);
399 }
400 
401 /* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
402  * an indirect jump when the keys are const but different from different branches.
403  */
test_tailcall_5(void)404 static void test_tailcall_5(void)
405 {
406 	int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
407 	struct bpf_map *prog_array, *data_map;
408 	struct bpf_program *prog;
409 	struct bpf_object *obj;
410 	__u32 retval, duration;
411 	static const int zero = 0;
412 	char buff[128] = {};
413 	char prog_name[32];
414 
415 	err = bpf_prog_test_load("tailcall5.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
416 			    &prog_fd);
417 	if (CHECK_FAIL(err))
418 		return;
419 
420 	prog = bpf_object__find_program_by_name(obj, "entry");
421 	if (CHECK_FAIL(!prog))
422 		goto out;
423 
424 	main_fd = bpf_program__fd(prog);
425 	if (CHECK_FAIL(main_fd < 0))
426 		goto out;
427 
428 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
429 	if (CHECK_FAIL(!prog_array))
430 		goto out;
431 
432 	map_fd = bpf_map__fd(prog_array);
433 	if (CHECK_FAIL(map_fd < 0))
434 		goto out;
435 
436 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
437 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
438 		return;
439 
440 	data_fd = bpf_map__fd(data_map);
441 	if (CHECK_FAIL(map_fd < 0))
442 		return;
443 
444 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
445 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
446 
447 		prog = bpf_object__find_program_by_name(obj, prog_name);
448 		if (CHECK_FAIL(!prog))
449 			goto out;
450 
451 		prog_fd = bpf_program__fd(prog);
452 		if (CHECK_FAIL(prog_fd < 0))
453 			goto out;
454 
455 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
456 		if (CHECK_FAIL(err))
457 			goto out;
458 	}
459 
460 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
461 		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
462 		if (CHECK_FAIL(err))
463 			goto out;
464 
465 		err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
466 					&duration, &retval, NULL);
467 		CHECK(err || retval != i, "tailcall",
468 		      "err %d errno %d retval %d\n", err, errno, retval);
469 	}
470 
471 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
472 		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
473 		if (CHECK_FAIL(err))
474 			goto out;
475 
476 		err = bpf_map_delete_elem(map_fd, &i);
477 		if (CHECK_FAIL(err))
478 			goto out;
479 
480 		err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
481 					&duration, &retval, NULL);
482 		CHECK(err || retval != 3, "tailcall",
483 		      "err %d errno %d retval %d\n", err, errno, retval);
484 	}
485 out:
486 	bpf_object__close(obj);
487 }
488 
489 /* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
490  * correctly in correlation with BPF subprograms
491  */
test_tailcall_bpf2bpf_1(void)492 static void test_tailcall_bpf2bpf_1(void)
493 {
494 	int err, map_fd, prog_fd, main_fd, i;
495 	struct bpf_map *prog_array;
496 	struct bpf_program *prog;
497 	struct bpf_object *obj;
498 	__u32 retval, duration;
499 	char prog_name[32];
500 
501 	err = bpf_prog_test_load("tailcall_bpf2bpf1.o", BPF_PROG_TYPE_SCHED_CLS,
502 			    &obj, &prog_fd);
503 	if (CHECK_FAIL(err))
504 		return;
505 
506 	prog = bpf_object__find_program_by_name(obj, "entry");
507 	if (CHECK_FAIL(!prog))
508 		goto out;
509 
510 	main_fd = bpf_program__fd(prog);
511 	if (CHECK_FAIL(main_fd < 0))
512 		goto out;
513 
514 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
515 	if (CHECK_FAIL(!prog_array))
516 		goto out;
517 
518 	map_fd = bpf_map__fd(prog_array);
519 	if (CHECK_FAIL(map_fd < 0))
520 		goto out;
521 
522 	/* nop -> jmp */
523 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
524 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
525 
526 		prog = bpf_object__find_program_by_name(obj, prog_name);
527 		if (CHECK_FAIL(!prog))
528 			goto out;
529 
530 		prog_fd = bpf_program__fd(prog);
531 		if (CHECK_FAIL(prog_fd < 0))
532 			goto out;
533 
534 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
535 		if (CHECK_FAIL(err))
536 			goto out;
537 	}
538 
539 	err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
540 				0, &retval, &duration);
541 	CHECK(err || retval != 1, "tailcall",
542 	      "err %d errno %d retval %d\n", err, errno, retval);
543 
544 	/* jmp -> nop, call subprog that will do tailcall */
545 	i = 1;
546 	err = bpf_map_delete_elem(map_fd, &i);
547 	if (CHECK_FAIL(err))
548 		goto out;
549 
550 	err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
551 				0, &retval, &duration);
552 	CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
553 	      err, errno, retval);
554 
555 	/* make sure that subprog can access ctx and entry prog that
556 	 * called this subprog can properly return
557 	 */
558 	i = 0;
559 	err = bpf_map_delete_elem(map_fd, &i);
560 	if (CHECK_FAIL(err))
561 		goto out;
562 
563 	err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
564 				0, &retval, &duration);
565 	CHECK(err || retval != sizeof(pkt_v4) * 2,
566 	      "tailcall", "err %d errno %d retval %d\n",
567 	      err, errno, retval);
568 out:
569 	bpf_object__close(obj);
570 }
571 
572 /* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
573  * enforcement matches with expectations when tailcall is preceded with
574  * bpf2bpf call.
575  */
test_tailcall_bpf2bpf_2(void)576 static void test_tailcall_bpf2bpf_2(void)
577 {
578 	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
579 	struct bpf_map *prog_array, *data_map;
580 	struct bpf_program *prog;
581 	struct bpf_object *obj;
582 	__u32 retval, duration;
583 	char buff[128] = {};
584 
585 	err = bpf_prog_test_load("tailcall_bpf2bpf2.o", BPF_PROG_TYPE_SCHED_CLS,
586 			    &obj, &prog_fd);
587 	if (CHECK_FAIL(err))
588 		return;
589 
590 	prog = bpf_object__find_program_by_name(obj, "entry");
591 	if (CHECK_FAIL(!prog))
592 		goto out;
593 
594 	main_fd = bpf_program__fd(prog);
595 	if (CHECK_FAIL(main_fd < 0))
596 		goto out;
597 
598 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
599 	if (CHECK_FAIL(!prog_array))
600 		goto out;
601 
602 	map_fd = bpf_map__fd(prog_array);
603 	if (CHECK_FAIL(map_fd < 0))
604 		goto out;
605 
606 	prog = bpf_object__find_program_by_name(obj, "classifier_0");
607 	if (CHECK_FAIL(!prog))
608 		goto out;
609 
610 	prog_fd = bpf_program__fd(prog);
611 	if (CHECK_FAIL(prog_fd < 0))
612 		goto out;
613 
614 	i = 0;
615 	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
616 	if (CHECK_FAIL(err))
617 		goto out;
618 
619 	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
620 				&duration, &retval, NULL);
621 	CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
622 	      err, errno, retval);
623 
624 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
625 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
626 		return;
627 
628 	data_fd = bpf_map__fd(data_map);
629 	if (CHECK_FAIL(map_fd < 0))
630 		return;
631 
632 	i = 0;
633 	err = bpf_map_lookup_elem(data_fd, &i, &val);
634 	CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n",
635 	      err, errno, val);
636 
637 	i = 0;
638 	err = bpf_map_delete_elem(map_fd, &i);
639 	if (CHECK_FAIL(err))
640 		goto out;
641 
642 	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
643 				&duration, &retval, NULL);
644 	CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
645 	      err, errno, retval);
646 out:
647 	bpf_object__close(obj);
648 }
649 
650 /* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
651  * 256 bytes) can be used within bpf subprograms that have the tailcalls
652  * in them
653  */
test_tailcall_bpf2bpf_3(void)654 static void test_tailcall_bpf2bpf_3(void)
655 {
656 	int err, map_fd, prog_fd, main_fd, i;
657 	struct bpf_map *prog_array;
658 	struct bpf_program *prog;
659 	struct bpf_object *obj;
660 	__u32 retval, duration;
661 	char prog_name[32];
662 
663 	err = bpf_prog_test_load("tailcall_bpf2bpf3.o", BPF_PROG_TYPE_SCHED_CLS,
664 			    &obj, &prog_fd);
665 	if (CHECK_FAIL(err))
666 		return;
667 
668 	prog = bpf_object__find_program_by_name(obj, "entry");
669 	if (CHECK_FAIL(!prog))
670 		goto out;
671 
672 	main_fd = bpf_program__fd(prog);
673 	if (CHECK_FAIL(main_fd < 0))
674 		goto out;
675 
676 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
677 	if (CHECK_FAIL(!prog_array))
678 		goto out;
679 
680 	map_fd = bpf_map__fd(prog_array);
681 	if (CHECK_FAIL(map_fd < 0))
682 		goto out;
683 
684 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
685 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
686 
687 		prog = bpf_object__find_program_by_name(obj, prog_name);
688 		if (CHECK_FAIL(!prog))
689 			goto out;
690 
691 		prog_fd = bpf_program__fd(prog);
692 		if (CHECK_FAIL(prog_fd < 0))
693 			goto out;
694 
695 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
696 		if (CHECK_FAIL(err))
697 			goto out;
698 	}
699 
700 	err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
701 				&duration, &retval, NULL);
702 	CHECK(err || retval != sizeof(pkt_v4) * 3,
703 	      "tailcall", "err %d errno %d retval %d\n",
704 	      err, errno, retval);
705 
706 	i = 1;
707 	err = bpf_map_delete_elem(map_fd, &i);
708 	if (CHECK_FAIL(err))
709 		goto out;
710 
711 	err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
712 				&duration, &retval, NULL);
713 	CHECK(err || retval != sizeof(pkt_v4),
714 	      "tailcall", "err %d errno %d retval %d\n",
715 	      err, errno, retval);
716 
717 	i = 0;
718 	err = bpf_map_delete_elem(map_fd, &i);
719 	if (CHECK_FAIL(err))
720 		goto out;
721 
722 	err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
723 				&duration, &retval, NULL);
724 	CHECK(err || retval != sizeof(pkt_v4) * 2,
725 	      "tailcall", "err %d errno %d retval %d\n",
726 	      err, errno, retval);
727 out:
728 	bpf_object__close(obj);
729 }
730 
731 #include "tailcall_bpf2bpf4.skel.h"
732 
733 /* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
734  * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
735  * counter behaves correctly, bpf program will go through following flow:
736  *
737  * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
738  * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
739  * subprog2 [here bump global counter] --------^
740  *
741  * We go through first two tailcalls and start counting from the subprog2 where
742  * the loop begins. At the end of the test make sure that the global counter is
743  * equal to 31, because tailcall counter includes the first two tailcalls
744  * whereas global counter is incremented only on loop presented on flow above.
745  *
746  * The noise parameter is used to insert bpf_map_update calls into the logic
747  * to force verifier to patch instructions. This allows us to ensure jump
748  * logic remains correct with instruction movement.
749  */
test_tailcall_bpf2bpf_4(bool noise)750 static void test_tailcall_bpf2bpf_4(bool noise)
751 {
752 	int err, map_fd, prog_fd, main_fd, data_fd, i;
753 	struct tailcall_bpf2bpf4__bss val;
754 	struct bpf_map *prog_array, *data_map;
755 	struct bpf_program *prog;
756 	struct bpf_object *obj;
757 	__u32 retval, duration;
758 	char prog_name[32];
759 
760 	err = bpf_prog_test_load("tailcall_bpf2bpf4.o", BPF_PROG_TYPE_SCHED_CLS,
761 			    &obj, &prog_fd);
762 	if (CHECK_FAIL(err))
763 		return;
764 
765 	prog = bpf_object__find_program_by_name(obj, "entry");
766 	if (CHECK_FAIL(!prog))
767 		goto out;
768 
769 	main_fd = bpf_program__fd(prog);
770 	if (CHECK_FAIL(main_fd < 0))
771 		goto out;
772 
773 	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
774 	if (CHECK_FAIL(!prog_array))
775 		goto out;
776 
777 	map_fd = bpf_map__fd(prog_array);
778 	if (CHECK_FAIL(map_fd < 0))
779 		goto out;
780 
781 	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
782 		snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
783 
784 		prog = bpf_object__find_program_by_name(obj, prog_name);
785 		if (CHECK_FAIL(!prog))
786 			goto out;
787 
788 		prog_fd = bpf_program__fd(prog);
789 		if (CHECK_FAIL(prog_fd < 0))
790 			goto out;
791 
792 		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
793 		if (CHECK_FAIL(err))
794 			goto out;
795 	}
796 
797 	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
798 	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
799 		return;
800 
801 	data_fd = bpf_map__fd(data_map);
802 	if (CHECK_FAIL(map_fd < 0))
803 		return;
804 
805 	i = 0;
806 	val.noise = noise;
807 	val.count = 0;
808 	err = bpf_map_update_elem(data_fd, &i, &val, BPF_ANY);
809 	if (CHECK_FAIL(err))
810 		goto out;
811 
812 	err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
813 				&duration, &retval, NULL);
814 	CHECK(err || retval != sizeof(pkt_v4) * 3, "tailcall", "err %d errno %d retval %d\n",
815 	      err, errno, retval);
816 
817 	i = 0;
818 	err = bpf_map_lookup_elem(data_fd, &i, &val);
819 	CHECK(err || val.count != 31, "tailcall count", "err %d errno %d count %d\n",
820 	      err, errno, val.count);
821 
822 out:
823 	bpf_object__close(obj);
824 }
825 
test_tailcalls(void)826 void test_tailcalls(void)
827 {
828 	if (test__start_subtest("tailcall_1"))
829 		test_tailcall_1();
830 	if (test__start_subtest("tailcall_2"))
831 		test_tailcall_2();
832 	if (test__start_subtest("tailcall_3"))
833 		test_tailcall_3();
834 	if (test__start_subtest("tailcall_4"))
835 		test_tailcall_4();
836 	if (test__start_subtest("tailcall_5"))
837 		test_tailcall_5();
838 	if (test__start_subtest("tailcall_6"))
839 		test_tailcall_6();
840 	if (test__start_subtest("tailcall_bpf2bpf_1"))
841 		test_tailcall_bpf2bpf_1();
842 	if (test__start_subtest("tailcall_bpf2bpf_2"))
843 		test_tailcall_bpf2bpf_2();
844 	if (test__start_subtest("tailcall_bpf2bpf_3"))
845 		test_tailcall_bpf2bpf_3();
846 	if (test__start_subtest("tailcall_bpf2bpf_4"))
847 		test_tailcall_bpf2bpf_4(false);
848 	if (test__start_subtest("tailcall_bpf2bpf_5"))
849 		test_tailcall_bpf2bpf_4(true);
850 }
851