Home
last modified time | relevance | path

Searched refs:pb (Results 1 – 16 of 16) sorted by relevance

/tools/testing/selftests/bpf/benchs/
Drun_bench_ringbufs.sh10 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do
15 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do
20 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do
43 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do
Dbench_ringbufs.c451 struct perf_buffer *pb; member
478 struct perf_buffer *pb = ctx->perfbuf; in perfbuf_custom_consumer() local
481 size_t mmap_mask = pb->mmap_size - 1; in perfbuf_custom_consumer()
491 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, -1); in perfbuf_custom_consumer()
498 cpu_buf = pb->events[i].data.ptr; in perfbuf_custom_consumer()
500 base = ((void *)header) + pb->page_size; in perfbuf_custom_consumer()
/tools/testing/selftests/bpf/prog_tests/
Dperf_buffer.c52 struct perf_buffer *pb; in serial_test_perf_buffer() local
84 pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, in serial_test_perf_buffer()
86 if (!ASSERT_OK_PTR(pb, "perf_buf__new")) in serial_test_perf_buffer()
89 CHECK(perf_buffer__epoll_fd(pb) < 0, "epoll_fd", in serial_test_perf_buffer()
90 "bad fd: %d\n", perf_buffer__epoll_fd(pb)); in serial_test_perf_buffer()
105 err = perf_buffer__poll(pb, 100); in serial_test_perf_buffer()
113 if (CHECK(perf_buffer__buffer_cnt(pb) != nr_on_cpus, "buf_cnt", in serial_test_perf_buffer()
114 "got %zu, expected %d\n", perf_buffer__buffer_cnt(pb), nr_on_cpus)) in serial_test_perf_buffer()
121 fd = perf_buffer__buffer_fd(pb, j); in serial_test_perf_buffer()
125 err = perf_buffer__consume_buffer(pb, j); in serial_test_perf_buffer()
[all …]
Dxdp_bpf2bpf.c44 static void run_xdp_bpf2bpf_pkt_size(int pkt_fd, struct perf_buffer *pb, in run_xdp_bpf2bpf_pkt_size() argument
90 err = perf_buffer__poll(pb, 100); in run_xdp_bpf2bpf_pkt_size()
112 struct perf_buffer *pb = NULL; in test_xdp_bpf2bpf() local
149 pb = perf_buffer__new(bpf_map__fd(ftrace_skel->maps.perf_buf_map), 8, in test_xdp_bpf2bpf()
151 if (!ASSERT_OK_PTR(pb, "perf_buf__new")) in test_xdp_bpf2bpf()
155 run_xdp_bpf2bpf_pkt_size(pkt_fd, pb, ftrace_skel, in test_xdp_bpf2bpf()
158 perf_buffer__free(pb); in test_xdp_bpf2bpf()
Dget_stack_raw_tp.c91 struct perf_buffer *pb = NULL; in test_get_stack_raw_tp() local
129 pb = perf_buffer__new(bpf_map__fd(map), 8, get_stack_print_output, in test_get_stack_raw_tp()
131 if (!ASSERT_OK_PTR(pb, "perf_buf__new")) in test_get_stack_raw_tp()
139 err = perf_buffer__poll(pb, 100); in test_get_stack_raw_tp()
147 perf_buffer__free(pb); in test_get_stack_raw_tp()
Dkfree_skb.c65 struct perf_buffer *pb = NULL; in serial_test_kfree_skb() local
97 pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, in serial_test_kfree_skb()
99 if (!ASSERT_OK_PTR(pb, "perf_buf__new")) in serial_test_kfree_skb()
108 err = perf_buffer__poll(pb, 100); in serial_test_kfree_skb()
124 perf_buffer__free(pb); in serial_test_kfree_skb()
Dxdp_attach.c108 struct perf_buffer *pb = NULL; in test_xdp_attach_fail() local
123 pb = perf_buffer__new(bpf_map__fd(skel->maps.xdp_errmsg_pb), 1, in test_xdp_attach_fail()
125 if (!ASSERT_OK_PTR(pb, "perf_buffer__new")) in test_xdp_attach_fail()
138 err = perf_buffer__poll(pb, 100); in test_xdp_attach_fail()
146 perf_buffer__free(pb); in test_xdp_attach_fail()
/tools/testing/selftests/bpf/
Dtest_tcpnotify_user.c44 void tcp_notifier_poller(struct perf_buffer *pb) in tcp_notifier_poller() argument
49 err = perf_buffer__poll(pb, 100); in tcp_notifier_poller()
59 struct perf_buffer *pb = arg; in poller_thread() local
61 tcp_notifier_poller(pb); in poller_thread()
75 struct perf_buffer *pb = NULL; in main() local
118 pb = perf_buffer__new(bpf_map__fd(perf_map), 8, dummyfn, NULL, NULL, NULL); in main()
119 if (!pb) in main()
122 pthread_create(&tid, NULL, poller_thread, pb); in main()
166 perf_buffer__free(pb); in main()
/tools/bpf/runqslower/
Drunqslower.c115 struct perf_buffer *pb = NULL; in main() local
153 pb = perf_buffer__new(bpf_map__fd(obj->maps.events), 64, in main()
155 err = libbpf_get_error(pb); in main()
157 pb = NULL; in main()
162 while ((err = perf_buffer__poll(pb, 100)) >= 0) in main()
167 perf_buffer__free(pb); in main()
/tools/bpf/bpftool/
Dmap_perf_ring.c126 struct perf_buffer *pb; in do_event_pipe() local
189 pb = perf_buffer__new_raw(map_fd, MMAP_PAGE_CNT, &perf_attr, in do_event_pipe()
191 if (!pb) { in do_event_pipe()
205 err = perf_buffer__poll(pb, 200); in do_event_pipe()
216 perf_buffer__free(pb); in do_event_pipe()
222 perf_buffer__free(pb); in do_event_pipe()
/tools/testing/ktest/
Dconfig-bisect.pl320 my ($pa, $pb) = @_;
324 my %b = %{$pb};
339 my ($pa, $pb) = @_;
345 my %b = %{$pb};
361 my ($pa, $pb) = @_;
367 my %b = %{$pb};
/tools/memory-model/
Dlinux-kernel.cat107 let pb = prop ; strong-fence ; hb* ; [Marked]
108 acyclic pb as propagation
134 let rcu-link = po? ; hb* ; pb* ; prop ; po
158 (* rb orders instructions just as pb does *)
159 let rb = prop ; rcu-fence ; hb* ; pb* ; [Marked]
168 * let xb = hb | pb | rb
182 let xbstar = (hb | pb | rb)*
/tools/lib/bpf/
Dlibbpf.h1430 LIBBPF_API void perf_buffer__free(struct perf_buffer *pb);
1431 LIBBPF_API int perf_buffer__epoll_fd(const struct perf_buffer *pb);
1432 LIBBPF_API int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms);
1433 LIBBPF_API int perf_buffer__consume(struct perf_buffer *pb);
1434 LIBBPF_API int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx);
1435 LIBBPF_API size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb);
1436 LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx);
1451 LIBBPF_API int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf,
Dlibbpf.c12312 struct perf_buffer *pb; member
12336 static void perf_buffer__free_cpu_buf(struct perf_buffer *pb, in perf_buffer__free_cpu_buf() argument
12342 munmap(cpu_buf->base, pb->mmap_size + pb->page_size)) in perf_buffer__free_cpu_buf()
12352 void perf_buffer__free(struct perf_buffer *pb) in perf_buffer__free() argument
12356 if (IS_ERR_OR_NULL(pb)) in perf_buffer__free()
12358 if (pb->cpu_bufs) { in perf_buffer__free()
12359 for (i = 0; i < pb->cpu_cnt; i++) { in perf_buffer__free()
12360 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; in perf_buffer__free()
12365 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key); in perf_buffer__free()
12366 perf_buffer__free_cpu_buf(pb, cpu_buf); in perf_buffer__free()
[all …]
/tools/bpf/resolve_btfids/
Dmain.c660 static int cmp_id(const void *pa, const void *pb) in cmp_id() argument
662 const int *a = pa, *b = pb; in cmp_id()
/tools/memory-model/Documentation/
Dexplanation.txt29 21. THE PROPAGATES-BEFORE RELATION: pb
1433 THE PROPAGATES-BEFORE RELATION: pb
1436 The propagates-before (pb) relation capitalizes on the special
1463 The existence of a pb link from E to F implies that E must execute
1472 A good example illustrating how pb works is the SB pattern with strong
1495 If r0 = 0 at the end then there is a pb link from P0's load to P1's
1499 Note that this pb link is not included in hb as an instance of prop,
1502 Similarly, if r1 = 0 at the end then there is a pb link from P1's load
1504 cycle in pb, which is not possible since an instruction cannot execute
1508 In summary, the fact that the pb relation links events in the order
[all …]