1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Facebook */
3 #include <test_progs.h>
4 #include <unistd.h>
5 #include <sys/syscall.h>
6 #include "bpf_iter_ipv6_route.skel.h"
7 #include "bpf_iter_netlink.skel.h"
8 #include "bpf_iter_bpf_map.skel.h"
9 #include "bpf_iter_task.skel.h"
10 #include "bpf_iter_task_stack.skel.h"
11 #include "bpf_iter_task_file.skel.h"
12 #include "bpf_iter_task_vma.skel.h"
13 #include "bpf_iter_task_btf.skel.h"
14 #include "bpf_iter_tcp4.skel.h"
15 #include "bpf_iter_tcp6.skel.h"
16 #include "bpf_iter_udp4.skel.h"
17 #include "bpf_iter_udp6.skel.h"
18 #include "bpf_iter_unix.skel.h"
19 #include "bpf_iter_vma_offset.skel.h"
20 #include "bpf_iter_test_kern1.skel.h"
21 #include "bpf_iter_test_kern2.skel.h"
22 #include "bpf_iter_test_kern3.skel.h"
23 #include "bpf_iter_test_kern4.skel.h"
24 #include "bpf_iter_bpf_hash_map.skel.h"
25 #include "bpf_iter_bpf_percpu_hash_map.skel.h"
26 #include "bpf_iter_bpf_array_map.skel.h"
27 #include "bpf_iter_bpf_percpu_array_map.skel.h"
28 #include "bpf_iter_bpf_sk_storage_helpers.skel.h"
29 #include "bpf_iter_bpf_sk_storage_map.skel.h"
30 #include "bpf_iter_test_kern5.skel.h"
31 #include "bpf_iter_test_kern6.skel.h"
32 #include "bpf_iter_bpf_link.skel.h"
33 #include "bpf_iter_ksym.skel.h"
34 #include "bpf_iter_sockmap.skel.h"
35
36 static int duration;
37
test_btf_id_or_null(void)38 static void test_btf_id_or_null(void)
39 {
40 struct bpf_iter_test_kern3 *skel;
41
42 skel = bpf_iter_test_kern3__open_and_load();
43 if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) {
44 bpf_iter_test_kern3__destroy(skel);
45 return;
46 }
47 }
48
do_dummy_read_opts(struct bpf_program * prog,struct bpf_iter_attach_opts * opts)49 static void do_dummy_read_opts(struct bpf_program *prog, struct bpf_iter_attach_opts *opts)
50 {
51 struct bpf_link *link;
52 char buf[16] = {};
53 int iter_fd, len;
54
55 link = bpf_program__attach_iter(prog, opts);
56 if (!ASSERT_OK_PTR(link, "attach_iter"))
57 return;
58
59 iter_fd = bpf_iter_create(bpf_link__fd(link));
60 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
61 goto free_link;
62
63 /* not check contents, but ensure read() ends without error */
64 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
65 ;
66 CHECK(len < 0, "read", "read failed: %s\n", strerror(errno));
67
68 close(iter_fd);
69
70 free_link:
71 bpf_link__destroy(link);
72 }
73
do_dummy_read(struct bpf_program * prog)74 static void do_dummy_read(struct bpf_program *prog)
75 {
76 do_dummy_read_opts(prog, NULL);
77 }
78
do_read_map_iter_fd(struct bpf_object_skeleton ** skel,struct bpf_program * prog,struct bpf_map * map)79 static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog,
80 struct bpf_map *map)
81 {
82 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
83 union bpf_iter_link_info linfo;
84 struct bpf_link *link;
85 char buf[16] = {};
86 int iter_fd, len;
87
88 memset(&linfo, 0, sizeof(linfo));
89 linfo.map.map_fd = bpf_map__fd(map);
90 opts.link_info = &linfo;
91 opts.link_info_len = sizeof(linfo);
92 link = bpf_program__attach_iter(prog, &opts);
93 if (!ASSERT_OK_PTR(link, "attach_map_iter"))
94 return;
95
96 iter_fd = bpf_iter_create(bpf_link__fd(link));
97 if (!ASSERT_GE(iter_fd, 0, "create_map_iter")) {
98 bpf_link__destroy(link);
99 return;
100 }
101
102 /* Close link and map fd prematurely */
103 bpf_link__destroy(link);
104 bpf_object__destroy_skeleton(*skel);
105 *skel = NULL;
106
107 /* Try to let map free work to run first if map is freed */
108 usleep(100);
109 /* Memory used by both sock map and sock local storage map are
110 * freed after two synchronize_rcu() calls, so wait for it
111 */
112 kern_sync_rcu();
113 kern_sync_rcu();
114
115 /* Read after both map fd and link fd are closed */
116 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
117 ;
118 ASSERT_GE(len, 0, "read_iterator");
119
120 close(iter_fd);
121 }
122
read_fd_into_buffer(int fd,char * buf,int size)123 static int read_fd_into_buffer(int fd, char *buf, int size)
124 {
125 int bufleft = size;
126 int len;
127
128 do {
129 len = read(fd, buf, bufleft);
130 if (len > 0) {
131 buf += len;
132 bufleft -= len;
133 }
134 } while (len > 0);
135
136 return len < 0 ? len : size - bufleft;
137 }
138
test_ipv6_route(void)139 static void test_ipv6_route(void)
140 {
141 struct bpf_iter_ipv6_route *skel;
142
143 skel = bpf_iter_ipv6_route__open_and_load();
144 if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load"))
145 return;
146
147 do_dummy_read(skel->progs.dump_ipv6_route);
148
149 bpf_iter_ipv6_route__destroy(skel);
150 }
151
test_netlink(void)152 static void test_netlink(void)
153 {
154 struct bpf_iter_netlink *skel;
155
156 skel = bpf_iter_netlink__open_and_load();
157 if (!ASSERT_OK_PTR(skel, "bpf_iter_netlink__open_and_load"))
158 return;
159
160 do_dummy_read(skel->progs.dump_netlink);
161
162 bpf_iter_netlink__destroy(skel);
163 }
164
test_bpf_map(void)165 static void test_bpf_map(void)
166 {
167 struct bpf_iter_bpf_map *skel;
168
169 skel = bpf_iter_bpf_map__open_and_load();
170 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_map__open_and_load"))
171 return;
172
173 do_dummy_read(skel->progs.dump_bpf_map);
174
175 bpf_iter_bpf_map__destroy(skel);
176 }
177
pidfd_open(pid_t pid,unsigned int flags)178 static int pidfd_open(pid_t pid, unsigned int flags)
179 {
180 return syscall(SYS_pidfd_open, pid, flags);
181 }
182
check_bpf_link_info(const struct bpf_program * prog)183 static void check_bpf_link_info(const struct bpf_program *prog)
184 {
185 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
186 union bpf_iter_link_info linfo;
187 struct bpf_link_info info = {};
188 struct bpf_link *link;
189 __u32 info_len;
190 int err;
191
192 memset(&linfo, 0, sizeof(linfo));
193 linfo.task.tid = getpid();
194 opts.link_info = &linfo;
195 opts.link_info_len = sizeof(linfo);
196
197 link = bpf_program__attach_iter(prog, &opts);
198 if (!ASSERT_OK_PTR(link, "attach_iter"))
199 return;
200
201 info_len = sizeof(info);
202 err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &info, &info_len);
203 ASSERT_OK(err, "bpf_obj_get_info_by_fd");
204 ASSERT_EQ(info.iter.task.tid, getpid(), "check_task_tid");
205
206 bpf_link__destroy(link);
207 }
208
209 static pthread_mutex_t do_nothing_mutex;
210
do_nothing_wait(void * arg)211 static void *do_nothing_wait(void *arg)
212 {
213 pthread_mutex_lock(&do_nothing_mutex);
214 pthread_mutex_unlock(&do_nothing_mutex);
215
216 pthread_exit(arg);
217 }
218
test_task_common_nocheck(struct bpf_iter_attach_opts * opts,int * num_unknown,int * num_known)219 static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts,
220 int *num_unknown, int *num_known)
221 {
222 struct bpf_iter_task *skel;
223 pthread_t thread_id;
224 void *ret;
225
226 skel = bpf_iter_task__open_and_load();
227 if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
228 return;
229
230 ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock");
231
232 ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
233 "pthread_create");
234
235 skel->bss->tid = getpid();
236
237 do_dummy_read_opts(skel->progs.dump_task, opts);
238
239 *num_unknown = skel->bss->num_unknown_tid;
240 *num_known = skel->bss->num_known_tid;
241
242 ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
243 ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL,
244 "pthread_join");
245
246 bpf_iter_task__destroy(skel);
247 }
248
test_task_common(struct bpf_iter_attach_opts * opts,int num_unknown,int num_known)249 static void test_task_common(struct bpf_iter_attach_opts *opts, int num_unknown, int num_known)
250 {
251 int num_unknown_tid, num_known_tid;
252
253 test_task_common_nocheck(opts, &num_unknown_tid, &num_known_tid);
254 ASSERT_EQ(num_unknown_tid, num_unknown, "check_num_unknown_tid");
255 ASSERT_EQ(num_known_tid, num_known, "check_num_known_tid");
256 }
257
test_task_tid(void)258 static void test_task_tid(void)
259 {
260 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
261 union bpf_iter_link_info linfo;
262 int num_unknown_tid, num_known_tid;
263
264 memset(&linfo, 0, sizeof(linfo));
265 linfo.task.tid = getpid();
266 opts.link_info = &linfo;
267 opts.link_info_len = sizeof(linfo);
268 test_task_common(&opts, 0, 1);
269
270 linfo.task.tid = 0;
271 linfo.task.pid = getpid();
272 test_task_common(&opts, 1, 1);
273
274 test_task_common_nocheck(NULL, &num_unknown_tid, &num_known_tid);
275 ASSERT_GT(num_unknown_tid, 1, "check_num_unknown_tid");
276 ASSERT_EQ(num_known_tid, 1, "check_num_known_tid");
277 }
278
test_task_pid(void)279 static void test_task_pid(void)
280 {
281 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
282 union bpf_iter_link_info linfo;
283
284 memset(&linfo, 0, sizeof(linfo));
285 linfo.task.pid = getpid();
286 opts.link_info = &linfo;
287 opts.link_info_len = sizeof(linfo);
288
289 test_task_common(&opts, 1, 1);
290 }
291
test_task_pidfd(void)292 static void test_task_pidfd(void)
293 {
294 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
295 union bpf_iter_link_info linfo;
296 int pidfd;
297
298 pidfd = pidfd_open(getpid(), 0);
299 if (!ASSERT_GT(pidfd, 0, "pidfd_open"))
300 return;
301
302 memset(&linfo, 0, sizeof(linfo));
303 linfo.task.pid_fd = pidfd;
304 opts.link_info = &linfo;
305 opts.link_info_len = sizeof(linfo);
306
307 test_task_common(&opts, 1, 1);
308
309 close(pidfd);
310 }
311
test_task_sleepable(void)312 static void test_task_sleepable(void)
313 {
314 struct bpf_iter_task *skel;
315
316 skel = bpf_iter_task__open_and_load();
317 if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load"))
318 return;
319
320 do_dummy_read(skel->progs.dump_task_sleepable);
321
322 ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0,
323 "num_expected_failure_copy_from_user_task");
324 ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0,
325 "num_success_copy_from_user_task");
326
327 bpf_iter_task__destroy(skel);
328 }
329
test_task_stack(void)330 static void test_task_stack(void)
331 {
332 struct bpf_iter_task_stack *skel;
333
334 skel = bpf_iter_task_stack__open_and_load();
335 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_stack__open_and_load"))
336 return;
337
338 do_dummy_read(skel->progs.dump_task_stack);
339 do_dummy_read(skel->progs.get_task_user_stacks);
340
341 ASSERT_EQ(skel->bss->num_user_stacks, 1, "num_user_stacks");
342
343 bpf_iter_task_stack__destroy(skel);
344 }
345
test_task_file(void)346 static void test_task_file(void)
347 {
348 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
349 struct bpf_iter_task_file *skel;
350 union bpf_iter_link_info linfo;
351 pthread_t thread_id;
352 void *ret;
353
354 skel = bpf_iter_task_file__open_and_load();
355 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_file__open_and_load"))
356 return;
357
358 skel->bss->tgid = getpid();
359
360 ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock");
361
362 ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
363 "pthread_create");
364
365 memset(&linfo, 0, sizeof(linfo));
366 linfo.task.tid = getpid();
367 opts.link_info = &linfo;
368 opts.link_info_len = sizeof(linfo);
369
370 do_dummy_read_opts(skel->progs.dump_task_file, &opts);
371
372 ASSERT_EQ(skel->bss->count, 0, "check_count");
373 ASSERT_EQ(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
374
375 skel->bss->last_tgid = 0;
376 skel->bss->count = 0;
377 skel->bss->unique_tgid_count = 0;
378
379 do_dummy_read(skel->progs.dump_task_file);
380
381 ASSERT_EQ(skel->bss->count, 0, "check_count");
382 ASSERT_GT(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
383
384 check_bpf_link_info(skel->progs.dump_task_file);
385
386 ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
387 ASSERT_OK(pthread_join(thread_id, &ret), "pthread_join");
388 ASSERT_NULL(ret, "pthread_join");
389
390 bpf_iter_task_file__destroy(skel);
391 }
392
393 #define TASKBUFSZ 32768
394
395 static char taskbuf[TASKBUFSZ];
396
do_btf_read(struct bpf_iter_task_btf * skel)397 static int do_btf_read(struct bpf_iter_task_btf *skel)
398 {
399 struct bpf_program *prog = skel->progs.dump_task_struct;
400 struct bpf_iter_task_btf__bss *bss = skel->bss;
401 int iter_fd = -1, err;
402 struct bpf_link *link;
403 char *buf = taskbuf;
404 int ret = 0;
405
406 link = bpf_program__attach_iter(prog, NULL);
407 if (!ASSERT_OK_PTR(link, "attach_iter"))
408 return ret;
409
410 iter_fd = bpf_iter_create(bpf_link__fd(link));
411 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
412 goto free_link;
413
414 err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ);
415 if (bss->skip) {
416 printf("%s:SKIP:no __builtin_btf_type_id\n", __func__);
417 ret = 1;
418 test__skip();
419 goto free_link;
420 }
421
422 if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno)))
423 goto free_link;
424
425 ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)",
426 "check for btf representation of task_struct in iter data");
427 free_link:
428 if (iter_fd > 0)
429 close(iter_fd);
430 bpf_link__destroy(link);
431 return ret;
432 }
433
test_task_btf(void)434 static void test_task_btf(void)
435 {
436 struct bpf_iter_task_btf__bss *bss;
437 struct bpf_iter_task_btf *skel;
438 int ret;
439
440 skel = bpf_iter_task_btf__open_and_load();
441 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_btf__open_and_load"))
442 return;
443
444 bss = skel->bss;
445
446 ret = do_btf_read(skel);
447 if (ret)
448 goto cleanup;
449
450 if (!ASSERT_NEQ(bss->tasks, 0, "no task iteration, did BPF program run?"))
451 goto cleanup;
452
453 ASSERT_EQ(bss->seq_err, 0, "check for unexpected err");
454
455 cleanup:
456 bpf_iter_task_btf__destroy(skel);
457 }
458
test_tcp4(void)459 static void test_tcp4(void)
460 {
461 struct bpf_iter_tcp4 *skel;
462
463 skel = bpf_iter_tcp4__open_and_load();
464 if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp4__open_and_load"))
465 return;
466
467 do_dummy_read(skel->progs.dump_tcp4);
468
469 bpf_iter_tcp4__destroy(skel);
470 }
471
test_tcp6(void)472 static void test_tcp6(void)
473 {
474 struct bpf_iter_tcp6 *skel;
475
476 skel = bpf_iter_tcp6__open_and_load();
477 if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp6__open_and_load"))
478 return;
479
480 do_dummy_read(skel->progs.dump_tcp6);
481
482 bpf_iter_tcp6__destroy(skel);
483 }
484
test_udp4(void)485 static void test_udp4(void)
486 {
487 struct bpf_iter_udp4 *skel;
488
489 skel = bpf_iter_udp4__open_and_load();
490 if (!ASSERT_OK_PTR(skel, "bpf_iter_udp4__open_and_load"))
491 return;
492
493 do_dummy_read(skel->progs.dump_udp4);
494
495 bpf_iter_udp4__destroy(skel);
496 }
497
test_udp6(void)498 static void test_udp6(void)
499 {
500 struct bpf_iter_udp6 *skel;
501
502 skel = bpf_iter_udp6__open_and_load();
503 if (!ASSERT_OK_PTR(skel, "bpf_iter_udp6__open_and_load"))
504 return;
505
506 do_dummy_read(skel->progs.dump_udp6);
507
508 bpf_iter_udp6__destroy(skel);
509 }
510
test_unix(void)511 static void test_unix(void)
512 {
513 struct bpf_iter_unix *skel;
514
515 skel = bpf_iter_unix__open_and_load();
516 if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load"))
517 return;
518
519 do_dummy_read(skel->progs.dump_unix);
520
521 bpf_iter_unix__destroy(skel);
522 }
523
524 /* The expected string is less than 16 bytes */
do_read_with_fd(int iter_fd,const char * expected,bool read_one_char)525 static int do_read_with_fd(int iter_fd, const char *expected,
526 bool read_one_char)
527 {
528 int len, read_buf_len, start;
529 char buf[16] = {};
530
531 read_buf_len = read_one_char ? 1 : 16;
532 start = 0;
533 while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) {
534 start += len;
535 if (CHECK(start >= 16, "read", "read len %d\n", len))
536 return -1;
537 read_buf_len = read_one_char ? 1 : 16 - start;
538 }
539 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
540 return -1;
541
542 if (!ASSERT_STREQ(buf, expected, "read"))
543 return -1;
544
545 return 0;
546 }
547
test_anon_iter(bool read_one_char)548 static void test_anon_iter(bool read_one_char)
549 {
550 struct bpf_iter_test_kern1 *skel;
551 struct bpf_link *link;
552 int iter_fd, err;
553
554 skel = bpf_iter_test_kern1__open_and_load();
555 if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern1__open_and_load"))
556 return;
557
558 err = bpf_iter_test_kern1__attach(skel);
559 if (!ASSERT_OK(err, "bpf_iter_test_kern1__attach")) {
560 goto out;
561 }
562
563 link = skel->links.dump_task;
564 iter_fd = bpf_iter_create(bpf_link__fd(link));
565 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
566 goto out;
567
568 do_read_with_fd(iter_fd, "abcd", read_one_char);
569 close(iter_fd);
570
571 out:
572 bpf_iter_test_kern1__destroy(skel);
573 }
574
do_read(const char * path,const char * expected)575 static int do_read(const char *path, const char *expected)
576 {
577 int err, iter_fd;
578
579 iter_fd = open(path, O_RDONLY);
580 if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n",
581 path, strerror(errno)))
582 return -1;
583
584 err = do_read_with_fd(iter_fd, expected, false);
585 close(iter_fd);
586 return err;
587 }
588
test_file_iter(void)589 static void test_file_iter(void)
590 {
591 const char *path = "/sys/fs/bpf/bpf_iter_test1";
592 struct bpf_iter_test_kern1 *skel1;
593 struct bpf_iter_test_kern2 *skel2;
594 struct bpf_link *link;
595 int err;
596
597 skel1 = bpf_iter_test_kern1__open_and_load();
598 if (!ASSERT_OK_PTR(skel1, "bpf_iter_test_kern1__open_and_load"))
599 return;
600
601 link = bpf_program__attach_iter(skel1->progs.dump_task, NULL);
602 if (!ASSERT_OK_PTR(link, "attach_iter"))
603 goto out;
604
605 /* unlink this path if it exists. */
606 unlink(path);
607
608 err = bpf_link__pin(link, path);
609 if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err))
610 goto free_link;
611
612 err = do_read(path, "abcd");
613 if (err)
614 goto unlink_path;
615
616 /* file based iterator seems working fine. Let us a link update
617 * of the underlying link and `cat` the iterator again, its content
618 * should change.
619 */
620 skel2 = bpf_iter_test_kern2__open_and_load();
621 if (!ASSERT_OK_PTR(skel2, "bpf_iter_test_kern2__open_and_load"))
622 goto unlink_path;
623
624 err = bpf_link__update_program(link, skel2->progs.dump_task);
625 if (!ASSERT_OK(err, "update_prog"))
626 goto destroy_skel2;
627
628 do_read(path, "ABCD");
629
630 destroy_skel2:
631 bpf_iter_test_kern2__destroy(skel2);
632 unlink_path:
633 unlink(path);
634 free_link:
635 bpf_link__destroy(link);
636 out:
637 bpf_iter_test_kern1__destroy(skel1);
638 }
639
test_overflow(bool test_e2big_overflow,bool ret1)640 static void test_overflow(bool test_e2big_overflow, bool ret1)
641 {
642 __u32 map_info_len, total_read_len, expected_read_len;
643 int err, iter_fd, map1_fd, map2_fd, len;
644 struct bpf_map_info map_info = {};
645 struct bpf_iter_test_kern4 *skel;
646 struct bpf_link *link;
647 __u32 iter_size;
648 char *buf;
649
650 skel = bpf_iter_test_kern4__open();
651 if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern4__open"))
652 return;
653
654 /* create two maps: bpf program will only do bpf_seq_write
655 * for these two maps. The goal is one map output almost
656 * fills seq_file buffer and then the other will trigger
657 * overflow and needs restart.
658 */
659 map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
660 if (CHECK(map1_fd < 0, "bpf_map_create",
661 "map_creation failed: %s\n", strerror(errno)))
662 goto out;
663 map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
664 if (CHECK(map2_fd < 0, "bpf_map_create",
665 "map_creation failed: %s\n", strerror(errno)))
666 goto free_map1;
667
668 /* bpf_seq_printf kernel buffer is 8 pages, so one map
669 * bpf_seq_write will mostly fill it, and the other map
670 * will partially fill and then trigger overflow and need
671 * bpf_seq_read restart.
672 */
673 iter_size = sysconf(_SC_PAGE_SIZE) << 3;
674
675 if (test_e2big_overflow) {
676 skel->rodata->print_len = (iter_size + 8) / 8;
677 expected_read_len = 2 * (iter_size + 8);
678 } else if (!ret1) {
679 skel->rodata->print_len = (iter_size - 8) / 8;
680 expected_read_len = 2 * (iter_size - 8);
681 } else {
682 skel->rodata->print_len = 1;
683 expected_read_len = 2 * 8;
684 }
685 skel->rodata->ret1 = ret1;
686
687 if (!ASSERT_OK(bpf_iter_test_kern4__load(skel),
688 "bpf_iter_test_kern4__load"))
689 goto free_map2;
690
691 /* setup filtering map_id in bpf program */
692 map_info_len = sizeof(map_info);
693 err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len);
694 if (CHECK(err, "get_map_info", "get map info failed: %s\n",
695 strerror(errno)))
696 goto free_map2;
697 skel->bss->map1_id = map_info.id;
698
699 err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len);
700 if (CHECK(err, "get_map_info", "get map info failed: %s\n",
701 strerror(errno)))
702 goto free_map2;
703 skel->bss->map2_id = map_info.id;
704
705 link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL);
706 if (!ASSERT_OK_PTR(link, "attach_iter"))
707 goto free_map2;
708
709 iter_fd = bpf_iter_create(bpf_link__fd(link));
710 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
711 goto free_link;
712
713 buf = malloc(expected_read_len);
714 if (!buf)
715 goto close_iter;
716
717 /* do read */
718 total_read_len = 0;
719 if (test_e2big_overflow) {
720 while ((len = read(iter_fd, buf, expected_read_len)) > 0)
721 total_read_len += len;
722
723 CHECK(len != -1 || errno != E2BIG, "read",
724 "expected ret -1, errno E2BIG, but get ret %d, error %s\n",
725 len, strerror(errno));
726 goto free_buf;
727 } else if (!ret1) {
728 while ((len = read(iter_fd, buf, expected_read_len)) > 0)
729 total_read_len += len;
730
731 if (CHECK(len < 0, "read", "read failed: %s\n",
732 strerror(errno)))
733 goto free_buf;
734 } else {
735 do {
736 len = read(iter_fd, buf, expected_read_len);
737 if (len > 0)
738 total_read_len += len;
739 } while (len > 0 || len == -EAGAIN);
740
741 if (CHECK(len < 0, "read", "read failed: %s\n",
742 strerror(errno)))
743 goto free_buf;
744 }
745
746 if (!ASSERT_EQ(total_read_len, expected_read_len, "read"))
747 goto free_buf;
748
749 if (!ASSERT_EQ(skel->bss->map1_accessed, 1, "map1_accessed"))
750 goto free_buf;
751
752 if (!ASSERT_EQ(skel->bss->map2_accessed, 2, "map2_accessed"))
753 goto free_buf;
754
755 ASSERT_EQ(skel->bss->map2_seqnum1, skel->bss->map2_seqnum2, "map2_seqnum");
756
757 free_buf:
758 free(buf);
759 close_iter:
760 close(iter_fd);
761 free_link:
762 bpf_link__destroy(link);
763 free_map2:
764 close(map2_fd);
765 free_map1:
766 close(map1_fd);
767 out:
768 bpf_iter_test_kern4__destroy(skel);
769 }
770
test_bpf_hash_map(void)771 static void test_bpf_hash_map(void)
772 {
773 __u32 expected_key_a = 0, expected_key_b = 0;
774 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
775 struct bpf_iter_bpf_hash_map *skel;
776 int err, i, len, map_fd, iter_fd;
777 union bpf_iter_link_info linfo;
778 __u64 val, expected_val = 0;
779 struct bpf_link *link;
780 struct key_t {
781 int a;
782 int b;
783 int c;
784 } key;
785 char buf[64];
786
787 skel = bpf_iter_bpf_hash_map__open();
788 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_hash_map__open"))
789 return;
790
791 skel->bss->in_test_mode = true;
792
793 err = bpf_iter_bpf_hash_map__load(skel);
794 if (!ASSERT_OK(err, "bpf_iter_bpf_hash_map__load"))
795 goto out;
796
797 /* iterator with hashmap2 and hashmap3 should fail */
798 memset(&linfo, 0, sizeof(linfo));
799 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
800 opts.link_info = &linfo;
801 opts.link_info_len = sizeof(linfo);
802 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
803 if (!ASSERT_ERR_PTR(link, "attach_iter"))
804 goto out;
805
806 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
807 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
808 if (!ASSERT_ERR_PTR(link, "attach_iter"))
809 goto out;
810
811 /* hashmap1 should be good, update map values here */
812 map_fd = bpf_map__fd(skel->maps.hashmap1);
813 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
814 key.a = i + 1;
815 key.b = i + 2;
816 key.c = i + 3;
817 val = i + 4;
818 expected_key_a += key.a;
819 expected_key_b += key.b;
820 expected_val += val;
821
822 err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
823 if (!ASSERT_OK(err, "map_update"))
824 goto out;
825 }
826
827 /* Sleepable program is prohibited for hash map iterator */
828 linfo.map.map_fd = map_fd;
829 link = bpf_program__attach_iter(skel->progs.sleepable_dummy_dump, &opts);
830 if (!ASSERT_ERR_PTR(link, "attach_sleepable_prog_to_iter"))
831 goto out;
832
833 linfo.map.map_fd = map_fd;
834 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
835 if (!ASSERT_OK_PTR(link, "attach_iter"))
836 goto out;
837
838 iter_fd = bpf_iter_create(bpf_link__fd(link));
839 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
840 goto free_link;
841
842 /* do some tests */
843 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
844 ;
845 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
846 goto close_iter;
847
848 /* test results */
849 if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
850 goto close_iter;
851 if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
852 goto close_iter;
853 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
854 goto close_iter;
855
856 close_iter:
857 close(iter_fd);
858 free_link:
859 bpf_link__destroy(link);
860 out:
861 bpf_iter_bpf_hash_map__destroy(skel);
862 }
863
test_bpf_percpu_hash_map(void)864 static void test_bpf_percpu_hash_map(void)
865 {
866 __u32 expected_key_a = 0, expected_key_b = 0;
867 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
868 struct bpf_iter_bpf_percpu_hash_map *skel;
869 int err, i, j, len, map_fd, iter_fd;
870 union bpf_iter_link_info linfo;
871 __u32 expected_val = 0;
872 struct bpf_link *link;
873 struct key_t {
874 int a;
875 int b;
876 int c;
877 } key;
878 char buf[64];
879 void *val;
880
881 skel = bpf_iter_bpf_percpu_hash_map__open();
882 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__open"))
883 return;
884
885 skel->rodata->num_cpus = bpf_num_possible_cpus();
886 val = malloc(8 * bpf_num_possible_cpus());
887
888 err = bpf_iter_bpf_percpu_hash_map__load(skel);
889 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load"))
890 goto out;
891
892 /* update map values here */
893 map_fd = bpf_map__fd(skel->maps.hashmap1);
894 for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
895 key.a = i + 1;
896 key.b = i + 2;
897 key.c = i + 3;
898 expected_key_a += key.a;
899 expected_key_b += key.b;
900
901 for (j = 0; j < bpf_num_possible_cpus(); j++) {
902 *(__u32 *)(val + j * 8) = i + j;
903 expected_val += i + j;
904 }
905
906 err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY);
907 if (!ASSERT_OK(err, "map_update"))
908 goto out;
909 }
910
911 memset(&linfo, 0, sizeof(linfo));
912 linfo.map.map_fd = map_fd;
913 opts.link_info = &linfo;
914 opts.link_info_len = sizeof(linfo);
915 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
916 if (!ASSERT_OK_PTR(link, "attach_iter"))
917 goto out;
918
919 iter_fd = bpf_iter_create(bpf_link__fd(link));
920 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
921 goto free_link;
922
923 /* do some tests */
924 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
925 ;
926 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
927 goto close_iter;
928
929 /* test results */
930 if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
931 goto close_iter;
932 if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
933 goto close_iter;
934 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
935 goto close_iter;
936
937 close_iter:
938 close(iter_fd);
939 free_link:
940 bpf_link__destroy(link);
941 out:
942 bpf_iter_bpf_percpu_hash_map__destroy(skel);
943 free(val);
944 }
945
test_bpf_array_map(void)946 static void test_bpf_array_map(void)
947 {
948 __u64 val, expected_val = 0, res_first_val, first_val = 0;
949 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
950 __u32 expected_key = 0, res_first_key;
951 struct bpf_iter_bpf_array_map *skel;
952 union bpf_iter_link_info linfo;
953 int err, i, map_fd, iter_fd;
954 struct bpf_link *link;
955 char buf[64] = {};
956 int len, start;
957
958 skel = bpf_iter_bpf_array_map__open_and_load();
959 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
960 return;
961
962 map_fd = bpf_map__fd(skel->maps.arraymap1);
963 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
964 val = i + 4;
965 expected_key += i;
966 expected_val += val;
967
968 if (i == 0)
969 first_val = val;
970
971 err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY);
972 if (!ASSERT_OK(err, "map_update"))
973 goto out;
974 }
975
976 memset(&linfo, 0, sizeof(linfo));
977 linfo.map.map_fd = map_fd;
978 opts.link_info = &linfo;
979 opts.link_info_len = sizeof(linfo);
980 link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
981 if (!ASSERT_OK_PTR(link, "attach_iter"))
982 goto out;
983
984 iter_fd = bpf_iter_create(bpf_link__fd(link));
985 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
986 goto free_link;
987
988 /* do some tests */
989 start = 0;
990 while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0)
991 start += len;
992 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
993 goto close_iter;
994
995 /* test results */
996 res_first_key = *(__u32 *)buf;
997 res_first_val = *(__u64 *)(buf + sizeof(__u32));
998 if (CHECK(res_first_key != 0 || res_first_val != first_val,
999 "bpf_seq_write",
1000 "seq_write failure: first key %u vs expected 0, "
1001 " first value %llu vs expected %llu\n",
1002 res_first_key, res_first_val, first_val))
1003 goto close_iter;
1004
1005 if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
1006 goto close_iter;
1007 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1008 goto close_iter;
1009
1010 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
1011 err = bpf_map_lookup_elem(map_fd, &i, &val);
1012 if (!ASSERT_OK(err, "map_lookup"))
1013 goto out;
1014 if (!ASSERT_EQ(i, val, "invalid_val"))
1015 goto out;
1016 }
1017
1018 close_iter:
1019 close(iter_fd);
1020 free_link:
1021 bpf_link__destroy(link);
1022 out:
1023 bpf_iter_bpf_array_map__destroy(skel);
1024 }
1025
test_bpf_array_map_iter_fd(void)1026 static void test_bpf_array_map_iter_fd(void)
1027 {
1028 struct bpf_iter_bpf_array_map *skel;
1029
1030 skel = bpf_iter_bpf_array_map__open_and_load();
1031 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
1032 return;
1033
1034 do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_array_map,
1035 skel->maps.arraymap1);
1036
1037 bpf_iter_bpf_array_map__destroy(skel);
1038 }
1039
test_bpf_percpu_array_map(void)1040 static void test_bpf_percpu_array_map(void)
1041 {
1042 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1043 struct bpf_iter_bpf_percpu_array_map *skel;
1044 __u32 expected_key = 0, expected_val = 0;
1045 union bpf_iter_link_info linfo;
1046 int err, i, j, map_fd, iter_fd;
1047 struct bpf_link *link;
1048 char buf[64];
1049 void *val;
1050 int len;
1051
1052 skel = bpf_iter_bpf_percpu_array_map__open();
1053 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__open"))
1054 return;
1055
1056 skel->rodata->num_cpus = bpf_num_possible_cpus();
1057 val = malloc(8 * bpf_num_possible_cpus());
1058
1059 err = bpf_iter_bpf_percpu_array_map__load(skel);
1060 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load"))
1061 goto out;
1062
1063 /* update map values here */
1064 map_fd = bpf_map__fd(skel->maps.arraymap1);
1065 for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
1066 expected_key += i;
1067
1068 for (j = 0; j < bpf_num_possible_cpus(); j++) {
1069 *(__u32 *)(val + j * 8) = i + j;
1070 expected_val += i + j;
1071 }
1072
1073 err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY);
1074 if (!ASSERT_OK(err, "map_update"))
1075 goto out;
1076 }
1077
1078 memset(&linfo, 0, sizeof(linfo));
1079 linfo.map.map_fd = map_fd;
1080 opts.link_info = &linfo;
1081 opts.link_info_len = sizeof(linfo);
1082 link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
1083 if (!ASSERT_OK_PTR(link, "attach_iter"))
1084 goto out;
1085
1086 iter_fd = bpf_iter_create(bpf_link__fd(link));
1087 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1088 goto free_link;
1089
1090 /* do some tests */
1091 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1092 ;
1093 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1094 goto close_iter;
1095
1096 /* test results */
1097 if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
1098 goto close_iter;
1099 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1100 goto close_iter;
1101
1102 close_iter:
1103 close(iter_fd);
1104 free_link:
1105 bpf_link__destroy(link);
1106 out:
1107 bpf_iter_bpf_percpu_array_map__destroy(skel);
1108 free(val);
1109 }
1110
1111 /* An iterator program deletes all local storage in a map. */
test_bpf_sk_storage_delete(void)1112 static void test_bpf_sk_storage_delete(void)
1113 {
1114 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1115 struct bpf_iter_bpf_sk_storage_helpers *skel;
1116 union bpf_iter_link_info linfo;
1117 int err, len, map_fd, iter_fd;
1118 struct bpf_link *link;
1119 int sock_fd = -1;
1120 __u32 val = 42;
1121 char buf[64];
1122
1123 skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
1124 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
1125 return;
1126
1127 map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1128
1129 sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
1130 if (!ASSERT_GE(sock_fd, 0, "socket"))
1131 goto out;
1132 err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
1133 if (!ASSERT_OK(err, "map_update"))
1134 goto out;
1135
1136 memset(&linfo, 0, sizeof(linfo));
1137 linfo.map.map_fd = map_fd;
1138 opts.link_info = &linfo;
1139 opts.link_info_len = sizeof(linfo);
1140 link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map,
1141 &opts);
1142 if (!ASSERT_OK_PTR(link, "attach_iter"))
1143 goto out;
1144
1145 iter_fd = bpf_iter_create(bpf_link__fd(link));
1146 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1147 goto free_link;
1148
1149 /* do some tests */
1150 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1151 ;
1152 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1153 goto close_iter;
1154
1155 /* test results */
1156 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1157 if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem",
1158 "map value wasn't deleted (err=%d, errno=%d)\n", err, errno))
1159 goto close_iter;
1160
1161 close_iter:
1162 close(iter_fd);
1163 free_link:
1164 bpf_link__destroy(link);
1165 out:
1166 if (sock_fd >= 0)
1167 close(sock_fd);
1168 bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1169 }
1170
1171 /* This creates a socket and its local storage. It then runs a task_iter BPF
1172 * program that replaces the existing socket local storage with the tgid of the
1173 * only task owning a file descriptor to this socket, this process, prog_tests.
1174 * It then runs a tcp socket iterator that negates the value in the existing
1175 * socket local storage, the test verifies that the resulting value is -pid.
1176 */
test_bpf_sk_storage_get(void)1177 static void test_bpf_sk_storage_get(void)
1178 {
1179 struct bpf_iter_bpf_sk_storage_helpers *skel;
1180 int err, map_fd, val = -1;
1181 int sock_fd = -1;
1182
1183 skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
1184 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
1185 return;
1186
1187 sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
1188 if (!ASSERT_GE(sock_fd, 0, "socket"))
1189 goto out;
1190
1191 err = listen(sock_fd, 1);
1192 if (!ASSERT_OK(err, "listen"))
1193 goto close_socket;
1194
1195 map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1196
1197 err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
1198 if (!ASSERT_OK(err, "bpf_map_update_elem"))
1199 goto close_socket;
1200
1201 do_dummy_read(skel->progs.fill_socket_owner);
1202
1203 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1204 if (CHECK(err || val != getpid(), "bpf_map_lookup_elem",
1205 "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1206 getpid(), val, err))
1207 goto close_socket;
1208
1209 do_dummy_read(skel->progs.negate_socket_local_storage);
1210
1211 err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1212 CHECK(err || val != -getpid(), "bpf_map_lookup_elem",
1213 "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1214 -getpid(), val, err);
1215
1216 close_socket:
1217 close(sock_fd);
1218 out:
1219 bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1220 }
1221
test_bpf_sk_stoarge_map_iter_fd(void)1222 static void test_bpf_sk_stoarge_map_iter_fd(void)
1223 {
1224 struct bpf_iter_bpf_sk_storage_map *skel;
1225
1226 skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1227 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
1228 return;
1229
1230 do_read_map_iter_fd(&skel->skeleton, skel->progs.rw_bpf_sk_storage_map,
1231 skel->maps.sk_stg_map);
1232
1233 bpf_iter_bpf_sk_storage_map__destroy(skel);
1234 }
1235
test_bpf_sk_storage_map(void)1236 static void test_bpf_sk_storage_map(void)
1237 {
1238 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1239 int err, i, len, map_fd, iter_fd, num_sockets;
1240 struct bpf_iter_bpf_sk_storage_map *skel;
1241 union bpf_iter_link_info linfo;
1242 int sock_fd[3] = {-1, -1, -1};
1243 __u32 val, expected_val = 0;
1244 struct bpf_link *link;
1245 char buf[64];
1246
1247 skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1248 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
1249 return;
1250
1251 map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1252 num_sockets = ARRAY_SIZE(sock_fd);
1253 for (i = 0; i < num_sockets; i++) {
1254 sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0);
1255 if (!ASSERT_GE(sock_fd[i], 0, "socket"))
1256 goto out;
1257
1258 val = i + 1;
1259 expected_val += val;
1260
1261 err = bpf_map_update_elem(map_fd, &sock_fd[i], &val,
1262 BPF_NOEXIST);
1263 if (!ASSERT_OK(err, "map_update"))
1264 goto out;
1265 }
1266
1267 memset(&linfo, 0, sizeof(linfo));
1268 linfo.map.map_fd = map_fd;
1269 opts.link_info = &linfo;
1270 opts.link_info_len = sizeof(linfo);
1271 link = bpf_program__attach_iter(skel->progs.oob_write_bpf_sk_storage_map, &opts);
1272 err = libbpf_get_error(link);
1273 if (!ASSERT_EQ(err, -EACCES, "attach_oob_write_iter")) {
1274 if (!err)
1275 bpf_link__destroy(link);
1276 goto out;
1277 }
1278
1279 link = bpf_program__attach_iter(skel->progs.rw_bpf_sk_storage_map, &opts);
1280 if (!ASSERT_OK_PTR(link, "attach_iter"))
1281 goto out;
1282
1283 iter_fd = bpf_iter_create(bpf_link__fd(link));
1284 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1285 goto free_link;
1286
1287 skel->bss->to_add_val = time(NULL);
1288 /* do some tests */
1289 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1290 ;
1291 if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1292 goto close_iter;
1293
1294 /* test results */
1295 if (!ASSERT_EQ(skel->bss->ipv6_sk_count, num_sockets, "ipv6_sk_count"))
1296 goto close_iter;
1297
1298 if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1299 goto close_iter;
1300
1301 for (i = 0; i < num_sockets; i++) {
1302 err = bpf_map_lookup_elem(map_fd, &sock_fd[i], &val);
1303 if (!ASSERT_OK(err, "map_lookup") ||
1304 !ASSERT_EQ(val, i + 1 + skel->bss->to_add_val, "check_map_value"))
1305 break;
1306 }
1307
1308 close_iter:
1309 close(iter_fd);
1310 free_link:
1311 bpf_link__destroy(link);
1312 out:
1313 for (i = 0; i < num_sockets; i++) {
1314 if (sock_fd[i] >= 0)
1315 close(sock_fd[i]);
1316 }
1317 bpf_iter_bpf_sk_storage_map__destroy(skel);
1318 }
1319
test_rdonly_buf_out_of_bound(void)1320 static void test_rdonly_buf_out_of_bound(void)
1321 {
1322 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1323 struct bpf_iter_test_kern5 *skel;
1324 union bpf_iter_link_info linfo;
1325 struct bpf_link *link;
1326
1327 skel = bpf_iter_test_kern5__open_and_load();
1328 if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern5__open_and_load"))
1329 return;
1330
1331 memset(&linfo, 0, sizeof(linfo));
1332 linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1);
1333 opts.link_info = &linfo;
1334 opts.link_info_len = sizeof(linfo);
1335 link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
1336 if (!ASSERT_ERR_PTR(link, "attach_iter"))
1337 bpf_link__destroy(link);
1338
1339 bpf_iter_test_kern5__destroy(skel);
1340 }
1341
test_buf_neg_offset(void)1342 static void test_buf_neg_offset(void)
1343 {
1344 struct bpf_iter_test_kern6 *skel;
1345
1346 skel = bpf_iter_test_kern6__open_and_load();
1347 if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern6__open_and_load"))
1348 bpf_iter_test_kern6__destroy(skel);
1349 }
1350
test_link_iter(void)1351 static void test_link_iter(void)
1352 {
1353 struct bpf_iter_bpf_link *skel;
1354
1355 skel = bpf_iter_bpf_link__open_and_load();
1356 if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_link__open_and_load"))
1357 return;
1358
1359 do_dummy_read(skel->progs.dump_bpf_link);
1360
1361 bpf_iter_bpf_link__destroy(skel);
1362 }
1363
test_ksym_iter(void)1364 static void test_ksym_iter(void)
1365 {
1366 struct bpf_iter_ksym *skel;
1367
1368 skel = bpf_iter_ksym__open_and_load();
1369 if (!ASSERT_OK_PTR(skel, "bpf_iter_ksym__open_and_load"))
1370 return;
1371
1372 do_dummy_read(skel->progs.dump_ksym);
1373
1374 bpf_iter_ksym__destroy(skel);
1375 }
1376
1377 #define CMP_BUFFER_SIZE 1024
1378 static char task_vma_output[CMP_BUFFER_SIZE];
1379 static char proc_maps_output[CMP_BUFFER_SIZE];
1380
1381 /* remove \0 and \t from str, and only keep the first line */
str_strip_first_line(char * str)1382 static void str_strip_first_line(char *str)
1383 {
1384 char *dst = str, *src = str;
1385
1386 do {
1387 if (*src == ' ' || *src == '\t')
1388 src++;
1389 else
1390 *(dst++) = *(src++);
1391
1392 } while (*src != '\0' && *src != '\n');
1393
1394 *dst = '\0';
1395 }
1396
test_task_vma_common(struct bpf_iter_attach_opts * opts)1397 static void test_task_vma_common(struct bpf_iter_attach_opts *opts)
1398 {
1399 int err, iter_fd = -1, proc_maps_fd = -1;
1400 struct bpf_iter_task_vma *skel;
1401 int len, read_size = 4;
1402 char maps_path[64];
1403
1404 skel = bpf_iter_task_vma__open();
1405 if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open"))
1406 return;
1407
1408 skel->bss->pid = getpid();
1409 skel->bss->one_task = opts ? 1 : 0;
1410
1411 err = bpf_iter_task_vma__load(skel);
1412 if (!ASSERT_OK(err, "bpf_iter_task_vma__load"))
1413 goto out;
1414
1415 skel->links.proc_maps = bpf_program__attach_iter(
1416 skel->progs.proc_maps, opts);
1417
1418 if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
1419 skel->links.proc_maps = NULL;
1420 goto out;
1421 }
1422
1423 iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
1424 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1425 goto out;
1426
1427 /* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks
1428 * to trigger seq_file corner cases.
1429 */
1430 len = 0;
1431 while (len < CMP_BUFFER_SIZE) {
1432 err = read_fd_into_buffer(iter_fd, task_vma_output + len,
1433 MIN(read_size, CMP_BUFFER_SIZE - len));
1434 if (!err)
1435 break;
1436 if (!ASSERT_GE(err, 0, "read_iter_fd"))
1437 goto out;
1438 len += err;
1439 }
1440 if (opts)
1441 ASSERT_EQ(skel->bss->one_task_error, 0, "unexpected task");
1442
1443 /* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */
1444 snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid);
1445 proc_maps_fd = open(maps_path, O_RDONLY);
1446 if (!ASSERT_GE(proc_maps_fd, 0, "open_proc_maps"))
1447 goto out;
1448 err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE);
1449 if (!ASSERT_GE(err, 0, "read_prog_maps_fd"))
1450 goto out;
1451
1452 /* strip and compare the first line of the two files */
1453 str_strip_first_line(task_vma_output);
1454 str_strip_first_line(proc_maps_output);
1455
1456 ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output");
1457
1458 check_bpf_link_info(skel->progs.proc_maps);
1459
1460 out:
1461 close(proc_maps_fd);
1462 close(iter_fd);
1463 bpf_iter_task_vma__destroy(skel);
1464 }
1465
test_bpf_sockmap_map_iter_fd(void)1466 void test_bpf_sockmap_map_iter_fd(void)
1467 {
1468 struct bpf_iter_sockmap *skel;
1469
1470 skel = bpf_iter_sockmap__open_and_load();
1471 if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
1472 return;
1473
1474 do_read_map_iter_fd(&skel->skeleton, skel->progs.copy, skel->maps.sockmap);
1475
1476 bpf_iter_sockmap__destroy(skel);
1477 }
1478
test_task_vma(void)1479 static void test_task_vma(void)
1480 {
1481 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1482 union bpf_iter_link_info linfo;
1483
1484 memset(&linfo, 0, sizeof(linfo));
1485 linfo.task.tid = getpid();
1486 opts.link_info = &linfo;
1487 opts.link_info_len = sizeof(linfo);
1488
1489 test_task_vma_common(&opts);
1490 test_task_vma_common(NULL);
1491 }
1492
1493 /* uprobe attach point */
trigger_func(int arg)1494 static noinline int trigger_func(int arg)
1495 {
1496 asm volatile ("");
1497 return arg + 1;
1498 }
1499
test_task_vma_offset_common(struct bpf_iter_attach_opts * opts,bool one_proc)1500 static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool one_proc)
1501 {
1502 struct bpf_iter_vma_offset *skel;
1503 char buf[16] = {};
1504 int iter_fd, len;
1505 int pgsz, shift;
1506
1507 skel = bpf_iter_vma_offset__open_and_load();
1508 if (!ASSERT_OK_PTR(skel, "bpf_iter_vma_offset__open_and_load"))
1509 return;
1510
1511 skel->bss->pid = getpid();
1512 skel->bss->address = (uintptr_t)trigger_func;
1513 for (pgsz = getpagesize(), shift = 0; pgsz > 1; pgsz >>= 1, shift++)
1514 ;
1515 skel->bss->page_shift = shift;
1516
1517 skel->links.get_vma_offset = bpf_program__attach_iter(skel->progs.get_vma_offset, opts);
1518 if (!ASSERT_OK_PTR(skel->links.get_vma_offset, "attach_iter"))
1519 goto exit;
1520
1521 iter_fd = bpf_iter_create(bpf_link__fd(skel->links.get_vma_offset));
1522 if (!ASSERT_GT(iter_fd, 0, "create_iter"))
1523 goto exit;
1524
1525 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1526 ;
1527 buf[15] = 0;
1528 ASSERT_EQ(strcmp(buf, "OK\n"), 0, "strcmp");
1529
1530 ASSERT_EQ(skel->bss->offset, get_uprobe_offset(trigger_func), "offset");
1531 if (one_proc)
1532 ASSERT_EQ(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
1533 else
1534 ASSERT_GT(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
1535
1536 close(iter_fd);
1537
1538 exit:
1539 bpf_iter_vma_offset__destroy(skel);
1540 }
1541
test_task_vma_offset(void)1542 static void test_task_vma_offset(void)
1543 {
1544 LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1545 union bpf_iter_link_info linfo;
1546
1547 memset(&linfo, 0, sizeof(linfo));
1548 linfo.task.pid = getpid();
1549 opts.link_info = &linfo;
1550 opts.link_info_len = sizeof(linfo);
1551
1552 test_task_vma_offset_common(&opts, true);
1553
1554 linfo.task.pid = 0;
1555 linfo.task.tid = getpid();
1556 test_task_vma_offset_common(&opts, true);
1557
1558 test_task_vma_offset_common(NULL, false);
1559 }
1560
test_bpf_iter(void)1561 void test_bpf_iter(void)
1562 {
1563 ASSERT_OK(pthread_mutex_init(&do_nothing_mutex, NULL), "pthread_mutex_init");
1564
1565 if (test__start_subtest("btf_id_or_null"))
1566 test_btf_id_or_null();
1567 if (test__start_subtest("ipv6_route"))
1568 test_ipv6_route();
1569 if (test__start_subtest("netlink"))
1570 test_netlink();
1571 if (test__start_subtest("bpf_map"))
1572 test_bpf_map();
1573 if (test__start_subtest("task_tid"))
1574 test_task_tid();
1575 if (test__start_subtest("task_pid"))
1576 test_task_pid();
1577 if (test__start_subtest("task_pidfd"))
1578 test_task_pidfd();
1579 if (test__start_subtest("task_sleepable"))
1580 test_task_sleepable();
1581 if (test__start_subtest("task_stack"))
1582 test_task_stack();
1583 if (test__start_subtest("task_file"))
1584 test_task_file();
1585 if (test__start_subtest("task_vma"))
1586 test_task_vma();
1587 if (test__start_subtest("task_btf"))
1588 test_task_btf();
1589 if (test__start_subtest("tcp4"))
1590 test_tcp4();
1591 if (test__start_subtest("tcp6"))
1592 test_tcp6();
1593 if (test__start_subtest("udp4"))
1594 test_udp4();
1595 if (test__start_subtest("udp6"))
1596 test_udp6();
1597 if (test__start_subtest("unix"))
1598 test_unix();
1599 if (test__start_subtest("anon"))
1600 test_anon_iter(false);
1601 if (test__start_subtest("anon-read-one-char"))
1602 test_anon_iter(true);
1603 if (test__start_subtest("file"))
1604 test_file_iter();
1605 if (test__start_subtest("overflow"))
1606 test_overflow(false, false);
1607 if (test__start_subtest("overflow-e2big"))
1608 test_overflow(true, false);
1609 if (test__start_subtest("prog-ret-1"))
1610 test_overflow(false, true);
1611 if (test__start_subtest("bpf_hash_map"))
1612 test_bpf_hash_map();
1613 if (test__start_subtest("bpf_percpu_hash_map"))
1614 test_bpf_percpu_hash_map();
1615 if (test__start_subtest("bpf_array_map"))
1616 test_bpf_array_map();
1617 if (test__start_subtest("bpf_array_map_iter_fd"))
1618 test_bpf_array_map_iter_fd();
1619 if (test__start_subtest("bpf_percpu_array_map"))
1620 test_bpf_percpu_array_map();
1621 if (test__start_subtest("bpf_sk_storage_map"))
1622 test_bpf_sk_storage_map();
1623 if (test__start_subtest("bpf_sk_storage_map_iter_fd"))
1624 test_bpf_sk_stoarge_map_iter_fd();
1625 if (test__start_subtest("bpf_sk_storage_delete"))
1626 test_bpf_sk_storage_delete();
1627 if (test__start_subtest("bpf_sk_storage_get"))
1628 test_bpf_sk_storage_get();
1629 if (test__start_subtest("rdonly-buf-out-of-bound"))
1630 test_rdonly_buf_out_of_bound();
1631 if (test__start_subtest("buf-neg-offset"))
1632 test_buf_neg_offset();
1633 if (test__start_subtest("link-iter"))
1634 test_link_iter();
1635 if (test__start_subtest("ksym"))
1636 test_ksym_iter();
1637 if (test__start_subtest("bpf_sockmap_map_iter_fd"))
1638 test_bpf_sockmap_map_iter_fd();
1639 if (test__start_subtest("vma_offset"))
1640 test_task_vma_offset();
1641 }
1642