| /tools/lib/bpf/ |
| D | skel_internal.h | 214 #ifndef offsetofend 215 #define offsetofend(TYPE, MEMBER) \ macro 225 const size_t attr_sz = offsetofend(union bpf_attr, map_extra); in skel_map_create() 242 const size_t attr_sz = offsetofend(union bpf_attr, flags); in skel_map_update_elem() 256 const size_t attr_sz = offsetofend(union bpf_attr, flags); in skel_map_delete_elem() 268 const size_t attr_sz = offsetofend(union bpf_attr, flags); in skel_map_get_fd_by_id() 279 const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint.prog_fd); in skel_raw_tracepoint_open() 292 const size_t attr_sz = offsetofend(union bpf_attr, link_create.iter_info_len); in skel_link_create() 311 const size_t prog_load_attr_sz = offsetofend(union bpf_attr, fd_array); in bpf_load_and_run() 312 const size_t test_run_attr_sz = offsetofend(union bpf_attr, test); in bpf_load_and_run()
|
| D | bpf.c | 108 const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd); in probe_memcg_account() 175 const size_t attr_sz = offsetofend(union bpf_attr, map_token_fd); in bpf_map_create() 241 const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd); in bpf_prog_load() 390 const size_t attr_sz = offsetofend(union bpf_attr, flags); in bpf_map_update_elem() 406 const size_t attr_sz = offsetofend(union bpf_attr, flags); in bpf_map_lookup_elem() 421 const size_t attr_sz = offsetofend(union bpf_attr, flags); in bpf_map_lookup_elem_flags() 437 const size_t attr_sz = offsetofend(union bpf_attr, flags); in bpf_map_lookup_and_delete_elem() 452 const size_t attr_sz = offsetofend(union bpf_attr, flags); in bpf_map_lookup_and_delete_elem_flags() 468 const size_t attr_sz = offsetofend(union bpf_attr, flags); in bpf_map_delete_elem() 482 const size_t attr_sz = offsetofend(union bpf_attr, flags); in bpf_map_delete_elem_flags() [all …]
|
| D | libbpf_internal.h | 106 #ifndef offsetofend 107 # define offsetofend(TYPE, FIELD) \ macro 317 offsetofend(struct type, \ 321 ((opts) && opts->sz >= offsetofend(typeof(*(opts)), field)) 332 ssize_t __off = offsetofend(typeof(*(opts)), last_nonzero_field); \
|
| D | gen_loader.c | 420 int attr_size = offsetofend(union bpf_attr, btf_log_level); in bpf_gen__load_btf() 454 int attr_size = offsetofend(union bpf_attr, map_extra); in bpf_gen__map_create() 746 emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code))); in emit_ksym_relo_log() 844 emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code))); in emit_relo_ksym_btf() 846 emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code))); in emit_relo_ksym_btf() 940 int attr_size = offsetofend(union bpf_attr, core_relo_rec_size); in bpf_gen__prog_load() 1037 int attr_size = offsetofend(union bpf_attr, flags); in bpf_gen__map_update_elem() 1084 int attr_size = offsetofend(union bpf_attr, flags); in bpf_gen__populate_outer_map() 1110 int attr_size = offsetofend(union bpf_attr, map_fd); in bpf_gen__map_freeze()
|
| D | features.c | 25 const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd); in probe_kern_prog_name()
|
| D | btf.c | 3031 if (data_size < offsetofend(struct btf_ext_header, hdr_len) || in btf_ext_parse_hdr() 3095 if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) { in btf_ext__new() 3108 if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) in btf_ext__new()
|
| D | libbpf.c | 13982 if (s->map_skel_sz < offsetofend(struct bpf_map_skeleton, link)) { in bpf_object__attach_skeleton()
|
| /tools/testing/selftests/bpf/ |
| D | bpf_util.h | 58 #ifndef offsetofend 59 #define offsetofend(TYPE, MEMBER) \ macro
|
| /tools/testing/selftests/bpf/progs/ |
| D | verifier_sock.c | 9 #define offsetofend(TYPE, MEMBER) \ macro 363 __imm_const(bpf_sock_dst_port__end, offsetofend(struct bpf_sock, dst_port)) in dst_port_half_load_invalid_2() 460 __imm_const(bpf_sock_rx_queue_mapping__end, offsetofend(struct bpf_sock, rx_queue_mapping)) in skb_sk_beyond_last_field_1() 569 __imm_const(bpf_tcp_sock_bytes_acked__end, offsetofend(struct bpf_tcp_sock, bytes_acked)) in skb_sk_beyond_last_field_2()
|
| D | test_cls_redirect.c | 34 #define offsetofend(TYPE, MEMBER) \ macro 94 offsetofend(struct bpf_sock_tuple, ipv4.dport) - 99 offsetofend(struct bpf_sock_tuple, ipv6.dport) -
|
| D | test_cls_redirect_dynptr.c | 28 #define offsetofend(TYPE, MEMBER) \ macro 88 offsetofend(struct bpf_sock_tuple, ipv4.dport) - 93 offsetofend(struct bpf_sock_tuple, ipv6.dport) -
|
| /tools/testing/selftests/iommu/ |
| D | iommufd_utils.h | 46 #define offsetofend(TYPE, MEMBER) \ macro 680 if (data_len >= offsetofend(struct iommu_test_hw_info, test_reg)) in _test_cmd_get_hw_info() 682 if (data_len >= offsetofend(struct iommu_test_hw_info, flags)) in _test_cmd_get_hw_info()
|
| D | iommufd.c | 92 size_t min_size = offsetofend(struct _struct, _last); \ in TEST_F() 272 offsetofend(struct iommu_hwpt_selftest, iotlb); in TEST_F()
|
| /tools/testing/selftests/bpf/bpf_testmod/ |
| D | bpf_testmod.c | 1355 offsetofend(struct bpf_testmod_ops, tramp_40)); in bpf_testmod_init()
|
| /tools/testing/selftests/bpf/verifier/ |
| D | ctx_skb.c | 1064 offsetofend(struct __sk_buff, gso_size)),
|
| /tools/testing/selftests/bpf/prog_tests/ |
| D | tc_opts.c | 2463 const size_t attr_size = offsetofend(union bpf_attr, query); in test_tc_opts_query_target()
|