• Home
  • Raw
  • Download

Lines Matching +full:function +full:- +full:enumerator

1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
71 #pragma GCC diagnostic ignored "-Wformat-nonliteral"
116 if (err != -EPERM || geteuid() != 0) in pr_perm_msg()
133 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n", in pr_perm_msg()
149 fd = -1; \
235 * program. For the entry-point (main) BPF program, this is always
236 * zero. For a sub-program, this gets reset before each of main BPF
238 * whether sub-program was already appended to the main program, and
256 * entry-point BPF programs this includes the size of main program
257 * itself plus all the used sub-programs, appended at the end
304 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
476 #define obj_elf_valid(o) ((o)->efile.elf)
497 * it is possible that prog->instances.nr == -1. in bpf_program__unload()
499 if (prog->instances.nr > 0) { in bpf_program__unload()
500 for (i = 0; i < prog->instances.nr; i++) in bpf_program__unload()
501 zclose(prog->instances.fds[i]); in bpf_program__unload()
502 } else if (prog->instances.nr != -1) { in bpf_program__unload()
504 prog->instances.nr); in bpf_program__unload()
507 prog->instances.nr = -1; in bpf_program__unload()
508 zfree(&prog->instances.fds); in bpf_program__unload()
510 zfree(&prog->func_info); in bpf_program__unload()
511 zfree(&prog->line_info); in bpf_program__unload()
519 if (prog->clear_priv) in bpf_program__exit()
520 prog->clear_priv(prog, prog->priv); in bpf_program__exit()
522 prog->priv = NULL; in bpf_program__exit()
523 prog->clear_priv = NULL; in bpf_program__exit()
526 zfree(&prog->name); in bpf_program__exit()
527 zfree(&prog->sec_name); in bpf_program__exit()
528 zfree(&prog->pin_name); in bpf_program__exit()
529 zfree(&prog->insns); in bpf_program__exit()
530 zfree(&prog->reloc_desc); in bpf_program__exit()
532 prog->nr_reloc = 0; in bpf_program__exit()
533 prog->insns_cnt = 0; in bpf_program__exit()
534 prog->sec_idx = -1; in bpf_program__exit()
541 name = p = strdup(prog->sec_name); in __bpf_program__pin_name()
550 return BPF_CLASS(insn->code) == BPF_JMP && in insn_is_subprog_call()
551 BPF_OP(insn->code) == BPF_CALL && in insn_is_subprog_call()
552 BPF_SRC(insn->code) == BPF_K && in insn_is_subprog_call()
553 insn->src_reg == BPF_PSEUDO_CALL && in insn_is_subprog_call()
554 insn->dst_reg == 0 && in insn_is_subprog_call()
555 insn->off == 0; in insn_is_subprog_call()
566 return -EINVAL; in bpf_object__init_prog()
570 prog->obj = obj; in bpf_object__init_prog()
572 prog->sec_idx = sec_idx; in bpf_object__init_prog()
573 prog->sec_insn_off = sec_off / BPF_INSN_SZ; in bpf_object__init_prog()
574 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ; in bpf_object__init_prog()
576 prog->insns_cnt = prog->sec_insn_cnt; in bpf_object__init_prog()
578 prog->type = BPF_PROG_TYPE_UNSPEC; in bpf_object__init_prog()
579 prog->load = true; in bpf_object__init_prog()
581 prog->instances.fds = NULL; in bpf_object__init_prog()
582 prog->instances.nr = -1; in bpf_object__init_prog()
584 prog->sec_name = strdup(sec_name); in bpf_object__init_prog()
585 if (!prog->sec_name) in bpf_object__init_prog()
588 prog->name = strdup(name); in bpf_object__init_prog()
589 if (!prog->name) in bpf_object__init_prog()
592 prog->pin_name = __bpf_program__pin_name(prog); in bpf_object__init_prog()
593 if (!prog->pin_name) in bpf_object__init_prog()
596 prog->insns = malloc(insn_data_sz); in bpf_object__init_prog()
597 if (!prog->insns) in bpf_object__init_prog()
599 memcpy(prog->insns, insn_data, insn_data_sz); in bpf_object__init_prog()
605 return -ENOMEM; in bpf_object__init_prog()
613 void *data = sec_data->d_buf; in bpf_object__add_programs()
614 size_t sec_sz = sec_data->d_size, sec_off, prog_sz; in bpf_object__add_programs()
619 progs = obj->programs; in bpf_object__add_programs()
620 nr_progs = obj->nr_programs; in bpf_object__add_programs()
627 return -LIBBPF_ERRNO__FORMAT; in bpf_object__add_programs()
636 return -LIBBPF_ERRNO__FORMAT; in bpf_object__add_programs()
642 return -LIBBPF_ERRNO__FORMAT; in bpf_object__add_programs()
651 * In this case the original obj->programs in bpf_object__add_programs()
657 return -ENOMEM; in bpf_object__add_programs()
659 obj->programs = progs; in bpf_object__add_programs()
669 obj->nr_programs = nr_progs; in bpf_object__add_programs()
710 if (!strcmp(btf__name_by_offset(btf, m->name_off), name)) in find_member_by_name()
762 if (kern_data_member->type == kern_type_id) in find_struct_ops_kern_types()
768 return -EINVAL; in find_struct_ops_kern_types()
782 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS; in bpf_map__is_struct_ops()
798 st_ops = map->st_ops; in bpf_map__init_kern_struct_ops()
799 type = st_ops->type; in bpf_map__init_kern_struct_ops()
800 tname = st_ops->tname; in bpf_map__init_kern_struct_ops()
809 map->name, st_ops->type_id, kern_type_id, kern_vtype_id); in bpf_map__init_kern_struct_ops()
811 map->def.value_size = kern_vtype->size; in bpf_map__init_kern_struct_ops()
812 map->btf_vmlinux_value_type_id = kern_vtype_id; in bpf_map__init_kern_struct_ops()
814 st_ops->kern_vdata = calloc(1, kern_vtype->size); in bpf_map__init_kern_struct_ops()
815 if (!st_ops->kern_vdata) in bpf_map__init_kern_struct_ops()
816 return -ENOMEM; in bpf_map__init_kern_struct_ops()
818 data = st_ops->data; in bpf_map__init_kern_struct_ops()
819 kern_data_off = kern_data_member->offset / 8; in bpf_map__init_kern_struct_ops()
820 kern_data = st_ops->kern_vdata + kern_data_off; in bpf_map__init_kern_struct_ops()
832 mname = btf__name_by_offset(btf, member->name_off); in bpf_map__init_kern_struct_ops()
836 map->name, mname); in bpf_map__init_kern_struct_ops()
837 return -ENOTSUP; in bpf_map__init_kern_struct_ops()
840 kern_member_idx = kern_member - btf_members(kern_type); in bpf_map__init_kern_struct_ops()
844 map->name, mname); in bpf_map__init_kern_struct_ops()
845 return -ENOTSUP; in bpf_map__init_kern_struct_ops()
848 moff = member->offset / 8; in bpf_map__init_kern_struct_ops()
849 kern_moff = kern_member->offset / 8; in bpf_map__init_kern_struct_ops()
854 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id); in bpf_map__init_kern_struct_ops()
855 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type, in bpf_map__init_kern_struct_ops()
857 if (BTF_INFO_KIND(mtype->info) != in bpf_map__init_kern_struct_ops()
858 BTF_INFO_KIND(kern_mtype->info)) { in bpf_map__init_kern_struct_ops()
860 map->name, mname, BTF_INFO_KIND(mtype->info), in bpf_map__init_kern_struct_ops()
861 BTF_INFO_KIND(kern_mtype->info)); in bpf_map__init_kern_struct_ops()
862 return -ENOTSUP; in bpf_map__init_kern_struct_ops()
868 prog = st_ops->progs[i]; in bpf_map__init_kern_struct_ops()
873 kern_mtype->type, in bpf_map__init_kern_struct_ops()
876 /* mtype->type must be a func_proto which was in bpf_map__init_kern_struct_ops()
882 map->name, mname); in bpf_map__init_kern_struct_ops()
883 return -ENOTSUP; in bpf_map__init_kern_struct_ops()
886 prog->attach_btf_id = kern_type_id; in bpf_map__init_kern_struct_ops()
887 prog->expected_attach_type = kern_member_idx; in bpf_map__init_kern_struct_ops()
889 st_ops->kern_func_off[i] = kern_data_off + kern_moff; in bpf_map__init_kern_struct_ops()
892 map->name, mname, prog->name, moff, in bpf_map__init_kern_struct_ops()
902 map->name, mname, (ssize_t)msize, in bpf_map__init_kern_struct_ops()
904 return -ENOTSUP; in bpf_map__init_kern_struct_ops()
908 map->name, mname, (unsigned int)msize, in bpf_map__init_kern_struct_ops()
922 for (i = 0; i < obj->nr_maps; i++) { in bpf_object__init_kern_struct_ops_maps()
923 map = &obj->maps[i]; in bpf_object__init_kern_struct_ops_maps()
928 err = bpf_map__init_kern_struct_ops(map, obj->btf, in bpf_object__init_kern_struct_ops_maps()
929 obj->btf_vmlinux); in bpf_object__init_kern_struct_ops_maps()
948 if (obj->efile.st_ops_shndx == -1) in bpf_object__init_struct_ops_maps()
951 btf = obj->btf; in bpf_object__init_struct_ops_maps()
957 return -EINVAL; in bpf_object__init_struct_ops_maps()
963 type = btf__type_by_id(obj->btf, vsi->type); in bpf_object__init_struct_ops_maps()
964 var_name = btf__name_by_offset(obj->btf, type->name_off); in bpf_object__init_struct_ops_maps()
966 type_id = btf__resolve_type(obj->btf, vsi->type); in bpf_object__init_struct_ops_maps()
969 vsi->type, STRUCT_OPS_SEC); in bpf_object__init_struct_ops_maps()
970 return -EINVAL; in bpf_object__init_struct_ops_maps()
973 type = btf__type_by_id(obj->btf, type_id); in bpf_object__init_struct_ops_maps()
974 tname = btf__name_by_offset(obj->btf, type->name_off); in bpf_object__init_struct_ops_maps()
977 return -ENOTSUP; in bpf_object__init_struct_ops_maps()
981 return -EINVAL; in bpf_object__init_struct_ops_maps()
988 map->sec_idx = obj->efile.st_ops_shndx; in bpf_object__init_struct_ops_maps()
989 map->sec_offset = vsi->offset; in bpf_object__init_struct_ops_maps()
990 map->name = strdup(var_name); in bpf_object__init_struct_ops_maps()
991 if (!map->name) in bpf_object__init_struct_ops_maps()
992 return -ENOMEM; in bpf_object__init_struct_ops_maps()
994 map->def.type = BPF_MAP_TYPE_STRUCT_OPS; in bpf_object__init_struct_ops_maps()
995 map->def.key_size = sizeof(int); in bpf_object__init_struct_ops_maps()
996 map->def.value_size = type->size; in bpf_object__init_struct_ops_maps()
997 map->def.max_entries = 1; in bpf_object__init_struct_ops_maps()
999 map->st_ops = calloc(1, sizeof(*map->st_ops)); in bpf_object__init_struct_ops_maps()
1000 if (!map->st_ops) in bpf_object__init_struct_ops_maps()
1001 return -ENOMEM; in bpf_object__init_struct_ops_maps()
1002 st_ops = map->st_ops; in bpf_object__init_struct_ops_maps()
1003 st_ops->data = malloc(type->size); in bpf_object__init_struct_ops_maps()
1004 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs)); in bpf_object__init_struct_ops_maps()
1005 st_ops->kern_func_off = malloc(btf_vlen(type) * in bpf_object__init_struct_ops_maps()
1006 sizeof(*st_ops->kern_func_off)); in bpf_object__init_struct_ops_maps()
1007 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off) in bpf_object__init_struct_ops_maps()
1008 return -ENOMEM; in bpf_object__init_struct_ops_maps()
1010 if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) { in bpf_object__init_struct_ops_maps()
1013 return -EINVAL; in bpf_object__init_struct_ops_maps()
1016 memcpy(st_ops->data, in bpf_object__init_struct_ops_maps()
1017 obj->efile.st_ops_data->d_buf + vsi->offset, in bpf_object__init_struct_ops_maps()
1018 type->size); in bpf_object__init_struct_ops_maps()
1019 st_ops->tname = tname; in bpf_object__init_struct_ops_maps()
1020 st_ops->type = type; in bpf_object__init_struct_ops_maps()
1021 st_ops->type_id = type_id; in bpf_object__init_struct_ops_maps()
1024 tname, type_id, var_name, vsi->offset); in bpf_object__init_struct_ops_maps()
1041 return ERR_PTR(-ENOMEM); in bpf_object__new()
1044 strcpy(obj->path, path); in bpf_object__new()
1046 strncpy(obj->name, obj_name, sizeof(obj->name) - 1); in bpf_object__new()
1047 obj->name[sizeof(obj->name) - 1] = 0; in bpf_object__new()
1050 strncpy(obj->name, basename((void *)path), in bpf_object__new()
1051 sizeof(obj->name) - 1); in bpf_object__new()
1052 end = strchr(obj->name, '.'); in bpf_object__new()
1057 obj->efile.fd = -1; in bpf_object__new()
1059 * Caller of this function should also call in bpf_object__new()
1064 obj->efile.obj_buf = obj_buf; in bpf_object__new()
1065 obj->efile.obj_buf_sz = obj_buf_sz; in bpf_object__new()
1066 obj->efile.maps_shndx = -1; in bpf_object__new()
1067 obj->efile.btf_maps_shndx = -1; in bpf_object__new()
1068 obj->efile.data_shndx = -1; in bpf_object__new()
1069 obj->efile.rodata_shndx = -1; in bpf_object__new()
1070 obj->efile.bss_shndx = -1; in bpf_object__new()
1071 obj->efile.st_ops_shndx = -1; in bpf_object__new()
1072 obj->kconfig_map_idx = -1; in bpf_object__new()
1073 obj->rodata_map_idx = -1; in bpf_object__new()
1075 obj->kern_version = get_kernel_version(); in bpf_object__new()
1076 obj->loaded = false; in bpf_object__new()
1078 INIT_LIST_HEAD(&obj->list); in bpf_object__new()
1079 list_add(&obj->list, &bpf_objects_list); in bpf_object__new()
1088 if (obj->efile.elf) { in bpf_object__elf_finish()
1089 elf_end(obj->efile.elf); in bpf_object__elf_finish()
1090 obj->efile.elf = NULL; in bpf_object__elf_finish()
1092 obj->efile.symbols = NULL; in bpf_object__elf_finish()
1093 obj->efile.data = NULL; in bpf_object__elf_finish()
1094 obj->efile.rodata = NULL; in bpf_object__elf_finish()
1095 obj->efile.bss = NULL; in bpf_object__elf_finish()
1096 obj->efile.st_ops_data = NULL; in bpf_object__elf_finish()
1098 zfree(&obj->efile.reloc_sects); in bpf_object__elf_finish()
1099 obj->efile.nr_reloc_sects = 0; in bpf_object__elf_finish()
1100 zclose(obj->efile.fd); in bpf_object__elf_finish()
1101 obj->efile.obj_buf = NULL; in bpf_object__elf_finish()
1102 obj->efile.obj_buf_sz = 0; in bpf_object__elf_finish()
1117 return -LIBBPF_ERRNO__LIBELF; in bpf_object__elf_init()
1120 if (obj->efile.obj_buf_sz > 0) { in bpf_object__elf_init()
1125 obj->efile.elf = elf_memory((char *)obj->efile.obj_buf, in bpf_object__elf_init()
1126 obj->efile.obj_buf_sz); in bpf_object__elf_init()
1128 obj->efile.fd = open(obj->path, O_RDONLY); in bpf_object__elf_init()
1129 if (obj->efile.fd < 0) { in bpf_object__elf_init()
1132 err = -errno; in bpf_object__elf_init()
1134 pr_warn("elf: failed to open %s: %s\n", obj->path, cp); in bpf_object__elf_init()
1138 obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL); in bpf_object__elf_init()
1141 if (!obj->efile.elf) { in bpf_object__elf_init()
1142 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1)); in bpf_object__elf_init()
1143 err = -LIBBPF_ERRNO__LIBELF; in bpf_object__elf_init()
1147 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) { in bpf_object__elf_init()
1148 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1)); in bpf_object__elf_init()
1149 err = -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_init()
1152 ep = &obj->efile.ehdr; in bpf_object__elf_init()
1154 if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) { in bpf_object__elf_init()
1156 obj->path, elf_errmsg(-1)); in bpf_object__elf_init()
1157 err = -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_init()
1162 if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) { in bpf_object__elf_init()
1164 obj->path, elf_errmsg(-1)); in bpf_object__elf_init()
1165 err = -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_init()
1170 if (ep->e_type != ET_REL || in bpf_object__elf_init()
1171 (ep->e_machine && ep->e_machine != EM_BPF)) { in bpf_object__elf_init()
1172 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path); in bpf_object__elf_init()
1173 err = -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_init()
1186 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB) in bpf_object__check_endianness()
1189 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB) in bpf_object__check_endianness()
1194 pr_warn("elf: endianness mismatch in %s.\n", obj->path); in bpf_object__check_endianness()
1195 return -LIBBPF_ERRNO__ENDIAN; in bpf_object__check_endianness()
1201 memcpy(obj->license, data, min(size, sizeof(obj->license) - 1)); in bpf_object__init_license()
1202 pr_debug("license of %s is %s\n", obj->path, obj->license); in bpf_object__init_license()
1212 pr_warn("invalid kver section in %s\n", obj->path); in bpf_object__init_kversion()
1213 return -LIBBPF_ERRNO__FORMAT; in bpf_object__init_kversion()
1216 obj->kern_version = kver; in bpf_object__init_kversion()
1217 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version); in bpf_object__init_kversion()
1232 int ret = -ENOENT; in bpf_object__section_size()
1236 return -EINVAL; in bpf_object__section_size()
1238 if (obj->efile.data) in bpf_object__section_size()
1239 *size = obj->efile.data->d_size; in bpf_object__section_size()
1241 if (obj->efile.bss) in bpf_object__section_size()
1242 *size = obj->efile.bss->d_size; in bpf_object__section_size()
1244 if (obj->efile.rodata) in bpf_object__section_size()
1245 *size = obj->efile.rodata->d_size; in bpf_object__section_size()
1247 if (obj->efile.st_ops_data) in bpf_object__section_size()
1248 *size = obj->efile.st_ops_data->d_size; in bpf_object__section_size()
1255 *size = data->d_size; in bpf_object__section_size()
1265 Elf_Data *symbols = obj->efile.symbols; in bpf_object__variable_offset()
1270 return -EINVAL; in bpf_object__variable_offset()
1272 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) { in bpf_object__variable_offset()
1285 return -EIO; in bpf_object__variable_offset()
1293 return -ENOENT; in bpf_object__variable_offset()
1302 if (obj->nr_maps < obj->maps_cap) in bpf_object__add_map()
1303 return &obj->maps[obj->nr_maps++]; in bpf_object__add_map()
1305 new_cap = max((size_t)4, obj->maps_cap * 3 / 2); in bpf_object__add_map()
1306 new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps)); in bpf_object__add_map()
1309 return ERR_PTR(-ENOMEM); in bpf_object__add_map()
1312 obj->maps_cap = new_cap; in bpf_object__add_map()
1313 obj->maps = new_maps; in bpf_object__add_map()
1316 memset(obj->maps + obj->nr_maps, 0, in bpf_object__add_map()
1317 (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps)); in bpf_object__add_map()
1319 * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin) in bpf_object__add_map()
1322 for (i = obj->nr_maps; i < obj->maps_cap; i++) { in bpf_object__add_map()
1323 obj->maps[i].fd = -1; in bpf_object__add_map()
1324 obj->maps[i].inner_map_fd = -1; in bpf_object__add_map()
1327 return &obj->maps[obj->nr_maps++]; in bpf_object__add_map()
1335 map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries; in bpf_map_mmap_sz()
1346 int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, in internal_map_name()
1347 strlen(obj->name)); in internal_map_name()
1349 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name, in internal_map_name()
1372 map->libbpf_type = type; in bpf_object__init_internal_map()
1373 map->sec_idx = sec_idx; in bpf_object__init_internal_map()
1374 map->sec_offset = 0; in bpf_object__init_internal_map()
1375 map->name = internal_map_name(obj, type); in bpf_object__init_internal_map()
1376 if (!map->name) { in bpf_object__init_internal_map()
1378 return -ENOMEM; in bpf_object__init_internal_map()
1381 def = &map->def; in bpf_object__init_internal_map()
1382 def->type = BPF_MAP_TYPE_ARRAY; in bpf_object__init_internal_map()
1383 def->key_size = sizeof(int); in bpf_object__init_internal_map()
1384 def->value_size = data_sz; in bpf_object__init_internal_map()
1385 def->max_entries = 1; in bpf_object__init_internal_map()
1386 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG in bpf_object__init_internal_map()
1388 def->map_flags |= BPF_F_MMAPABLE; in bpf_object__init_internal_map()
1391 map->name, map->sec_idx, map->sec_offset, def->map_flags); in bpf_object__init_internal_map()
1393 map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE, in bpf_object__init_internal_map()
1394 MAP_SHARED | MAP_ANONYMOUS, -1, 0); in bpf_object__init_internal_map()
1395 if (map->mmaped == MAP_FAILED) { in bpf_object__init_internal_map()
1396 err = -errno; in bpf_object__init_internal_map()
1397 map->mmaped = NULL; in bpf_object__init_internal_map()
1399 map->name, err); in bpf_object__init_internal_map()
1400 zfree(&map->name); in bpf_object__init_internal_map()
1405 memcpy(map->mmaped, data, data_sz); in bpf_object__init_internal_map()
1407 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name); in bpf_object__init_internal_map()
1416 * Populate obj->maps with libbpf internal maps. in bpf_object__init_global_data_maps()
1418 if (obj->efile.data_shndx >= 0) { in bpf_object__init_global_data_maps()
1420 obj->efile.data_shndx, in bpf_object__init_global_data_maps()
1421 obj->efile.data->d_buf, in bpf_object__init_global_data_maps()
1422 obj->efile.data->d_size); in bpf_object__init_global_data_maps()
1426 if (obj->efile.rodata_shndx >= 0) { in bpf_object__init_global_data_maps()
1428 obj->efile.rodata_shndx, in bpf_object__init_global_data_maps()
1429 obj->efile.rodata->d_buf, in bpf_object__init_global_data_maps()
1430 obj->efile.rodata->d_size); in bpf_object__init_global_data_maps()
1434 obj->rodata_map_idx = obj->nr_maps - 1; in bpf_object__init_global_data_maps()
1436 if (obj->efile.bss_shndx >= 0) { in bpf_object__init_global_data_maps()
1438 obj->efile.bss_shndx, in bpf_object__init_global_data_maps()
1440 obj->efile.bss->d_size); in bpf_object__init_global_data_maps()
1453 for (i = 0; i < obj->nr_extern; i++) { in find_extern_by_name()
1454 if (strcmp(obj->externs[i].name, name) == 0) in find_extern_by_name()
1455 return &obj->externs[i]; in find_extern_by_name()
1463 switch (ext->kcfg.type) { in set_kcfg_value_tri()
1467 ext->name, value); in set_kcfg_value_tri()
1468 return -EINVAL; in set_kcfg_value_tri()
1488 ext->name, value); in set_kcfg_value_tri()
1489 return -EINVAL; in set_kcfg_value_tri()
1491 ext->is_set = true; in set_kcfg_value_tri()
1500 if (ext->kcfg.type != KCFG_CHAR_ARR) { in set_kcfg_value_str()
1501 pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value); in set_kcfg_value_str()
1502 return -EINVAL; in set_kcfg_value_str()
1506 if (value[len - 1] != '"') { in set_kcfg_value_str()
1508 ext->name, value); in set_kcfg_value_str()
1509 return -EINVAL; in set_kcfg_value_str()
1513 len -= 2; in set_kcfg_value_str()
1514 if (len >= ext->kcfg.sz) { in set_kcfg_value_str()
1516 ext->name, value, len, ext->kcfg.sz - 1); in set_kcfg_value_str()
1517 len = ext->kcfg.sz - 1; in set_kcfg_value_str()
1521 ext->is_set = true; in set_kcfg_value_str()
1533 err = -errno; in parse_u64()
1539 return -EINVAL; in parse_u64()
1546 int bit_sz = ext->kcfg.sz * 8; in is_kcfg_value_in_range()
1548 if (ext->kcfg.sz == 8) in is_kcfg_value_in_range()
1551 /* Validate that value stored in u64 fits in integer of `ext->sz` in is_kcfg_value_in_range()
1556 * -2^(Y-1) <= X <= 2^(Y-1) - 1 in is_kcfg_value_in_range()
1557 * 0 <= X + 2^(Y-1) <= 2^Y - 1 in is_kcfg_value_in_range()
1558 * 0 <= X + 2^(Y-1) < 2^Y in is_kcfg_value_in_range()
1560 * For unsigned target integer, check that all the (64 - Y) bits are in is_kcfg_value_in_range()
1563 if (ext->kcfg.is_signed) in is_kcfg_value_in_range()
1564 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz); in is_kcfg_value_in_range()
1572 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) { in set_kcfg_value_num()
1574 ext->name, (unsigned long long)value); in set_kcfg_value_num()
1575 return -EINVAL; in set_kcfg_value_num()
1579 ext->name, (unsigned long long)value, ext->kcfg.sz); in set_kcfg_value_num()
1580 return -ERANGE; in set_kcfg_value_num()
1582 switch (ext->kcfg.sz) { in set_kcfg_value_num()
1588 return -EINVAL; in set_kcfg_value_num()
1590 ext->is_set = true; in set_kcfg_value_num()
1609 return -EINVAL; in bpf_object__process_kconfig_line()
1614 if (buf[len - 1] == '\n') in bpf_object__process_kconfig_line()
1615 buf[len - 1] = '\0'; in bpf_object__process_kconfig_line()
1621 return -EINVAL; in bpf_object__process_kconfig_line()
1625 if (!ext || ext->is_set) in bpf_object__process_kconfig_line()
1628 ext_val = data + ext->kcfg.data_off; in bpf_object__process_kconfig_line()
1643 ext->name, value); in bpf_object__process_kconfig_line()
1651 pr_debug("extern (kcfg) %s=%s\n", ext->name, value); in bpf_object__process_kconfig_line()
1663 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release); in bpf_object__read_kconfig_file()
1665 return -EINVAL; in bpf_object__read_kconfig_file()
1667 return -ENAMETOOLONG; in bpf_object__read_kconfig_file()
1676 return -ENOENT; in bpf_object__read_kconfig_file()
1702 err = -errno; in bpf_object__read_kconfig_mem()
1703 pr_warn("failed to open in-memory Kconfig: %d\n", err); in bpf_object__read_kconfig_mem()
1710 pr_warn("error parsing in-memory Kconfig line '%s': %d\n", in bpf_object__read_kconfig_mem()
1726 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__init_kconfig_map()
1727 ext = &obj->externs[i]; in bpf_object__init_kconfig_map()
1728 if (ext->type == EXT_KCFG) in bpf_object__init_kconfig_map()
1735 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz; in bpf_object__init_kconfig_map()
1737 obj->efile.symbols_shndx, in bpf_object__init_kconfig_map()
1742 obj->kconfig_map_idx = obj->nr_maps - 1; in bpf_object__init_kconfig_map()
1749 Elf_Data *symbols = obj->efile.symbols; in bpf_object__init_user_maps()
1754 if (obj->efile.maps_shndx < 0) in bpf_object__init_user_maps()
1758 return -EINVAL; in bpf_object__init_user_maps()
1761 scn = elf_sec_by_idx(obj, obj->efile.maps_shndx); in bpf_object__init_user_maps()
1765 obj->path); in bpf_object__init_user_maps()
1766 return -EINVAL; in bpf_object__init_user_maps()
1776 nr_syms = symbols->d_size / sizeof(GElf_Sym); in bpf_object__init_user_maps()
1782 if (sym.st_shndx != obj->efile.maps_shndx) in bpf_object__init_user_maps()
1788 nr_maps, data->d_size, obj->path); in bpf_object__init_user_maps()
1790 if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) { in bpf_object__init_user_maps()
1792 obj->path); in bpf_object__init_user_maps()
1793 return -EINVAL; in bpf_object__init_user_maps()
1795 map_def_sz = data->d_size / nr_maps; in bpf_object__init_user_maps()
1797 /* Fill obj->maps using data in "maps" section. */ in bpf_object__init_user_maps()
1806 if (sym.st_shndx != obj->efile.maps_shndx) in bpf_object__init_user_maps()
1816 i, obj->path); in bpf_object__init_user_maps()
1817 return -LIBBPF_ERRNO__FORMAT; in bpf_object__init_user_maps()
1820 map->libbpf_type = LIBBPF_MAP_UNSPEC; in bpf_object__init_user_maps()
1821 map->sec_idx = sym.st_shndx; in bpf_object__init_user_maps()
1822 map->sec_offset = sym.st_value; in bpf_object__init_user_maps()
1824 map_name, map->sec_idx, map->sec_offset); in bpf_object__init_user_maps()
1825 if (sym.st_value + map_def_sz > data->d_size) { in bpf_object__init_user_maps()
1827 obj->path, map_name); in bpf_object__init_user_maps()
1828 return -EINVAL; in bpf_object__init_user_maps()
1831 map->name = strdup(map_name); in bpf_object__init_user_maps()
1832 if (!map->name) { in bpf_object__init_user_maps()
1834 return -ENOMEM; in bpf_object__init_user_maps()
1836 pr_debug("map %d is \"%s\"\n", i, map->name); in bpf_object__init_user_maps()
1837 def = (struct bpf_map_def *)(data->d_buf + sym.st_value); in bpf_object__init_user_maps()
1845 memcpy(&map->def, def, map_def_sz); in bpf_object__init_user_maps()
1858 pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n", in bpf_object__init_user_maps()
1859 obj->path, map_name); in bpf_object__init_user_maps()
1861 return -EINVAL; in bpf_object__init_user_maps()
1864 memcpy(&map->def, def, sizeof(struct bpf_map_def)); in bpf_object__init_user_maps()
1880 *res_id = t->type; in skip_mods_and_typedefs()
1881 t = btf__type_by_id(btf, t->type); in skip_mods_and_typedefs()
1896 t = skip_mods_and_typedefs(btf, t->type, res_id); in resolve_func_ptr()
1934 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL); in get_map_field_int()
1935 const char *name = btf__name_by_offset(btf, m->name_off); in get_map_field_int()
1945 arr_t = btf__type_by_id(btf, t->type); in get_map_field_int()
1948 map_name, name, t->type); in get_map_field_int()
1957 *res = arr_info->nelems; in get_map_field_int()
1971 return -EINVAL; in build_map_pin_path()
1973 return -ENAMETOOLONG; in build_map_pin_path()
1992 const char *name = btf__name_by_offset(obj->btf, m->name_off); in parse_btf_map_def()
1995 pr_warn("map '%s': invalid field #%d.\n", map->name, i); in parse_btf_map_def()
1996 return -EINVAL; in parse_btf_map_def()
1999 if (!get_map_field_int(map->name, obj->btf, m, in parse_btf_map_def()
2000 &map->def.type)) in parse_btf_map_def()
2001 return -EINVAL; in parse_btf_map_def()
2003 map->name, map->def.type); in parse_btf_map_def()
2005 if (!get_map_field_int(map->name, obj->btf, m, in parse_btf_map_def()
2006 &map->def.max_entries)) in parse_btf_map_def()
2007 return -EINVAL; in parse_btf_map_def()
2009 map->name, map->def.max_entries); in parse_btf_map_def()
2011 if (!get_map_field_int(map->name, obj->btf, m, in parse_btf_map_def()
2012 &map->def.map_flags)) in parse_btf_map_def()
2013 return -EINVAL; in parse_btf_map_def()
2015 map->name, map->def.map_flags); in parse_btf_map_def()
2017 if (!get_map_field_int(map->name, obj->btf, m, &map->numa_node)) in parse_btf_map_def()
2018 return -EINVAL; in parse_btf_map_def()
2019 pr_debug("map '%s': found numa_node = %u.\n", map->name, map->numa_node); in parse_btf_map_def()
2023 if (!get_map_field_int(map->name, obj->btf, m, &sz)) in parse_btf_map_def()
2024 return -EINVAL; in parse_btf_map_def()
2026 map->name, sz); in parse_btf_map_def()
2027 if (map->def.key_size && map->def.key_size != sz) { in parse_btf_map_def()
2029 map->name, map->def.key_size, sz); in parse_btf_map_def()
2030 return -EINVAL; in parse_btf_map_def()
2032 map->def.key_size = sz; in parse_btf_map_def()
2036 t = btf__type_by_id(obj->btf, m->type); in parse_btf_map_def()
2039 map->name, m->type); in parse_btf_map_def()
2040 return -EINVAL; in parse_btf_map_def()
2044 map->name, btf_kind_str(t)); in parse_btf_map_def()
2045 return -EINVAL; in parse_btf_map_def()
2047 sz = btf__resolve_size(obj->btf, t->type); in parse_btf_map_def()
2050 map->name, t->type, (ssize_t)sz); in parse_btf_map_def()
2054 map->name, t->type, (ssize_t)sz); in parse_btf_map_def()
2055 if (map->def.key_size && map->def.key_size != sz) { in parse_btf_map_def()
2057 map->name, map->def.key_size, (ssize_t)sz); in parse_btf_map_def()
2058 return -EINVAL; in parse_btf_map_def()
2060 map->def.key_size = sz; in parse_btf_map_def()
2061 map->btf_key_type_id = t->type; in parse_btf_map_def()
2065 if (!get_map_field_int(map->name, obj->btf, m, &sz)) in parse_btf_map_def()
2066 return -EINVAL; in parse_btf_map_def()
2068 map->name, sz); in parse_btf_map_def()
2069 if (map->def.value_size && map->def.value_size != sz) { in parse_btf_map_def()
2071 map->name, map->def.value_size, sz); in parse_btf_map_def()
2072 return -EINVAL; in parse_btf_map_def()
2074 map->def.value_size = sz; in parse_btf_map_def()
2078 t = btf__type_by_id(obj->btf, m->type); in parse_btf_map_def()
2081 map->name, m->type); in parse_btf_map_def()
2082 return -EINVAL; in parse_btf_map_def()
2086 map->name, btf_kind_str(t)); in parse_btf_map_def()
2087 return -EINVAL; in parse_btf_map_def()
2089 sz = btf__resolve_size(obj->btf, t->type); in parse_btf_map_def()
2092 map->name, t->type, (ssize_t)sz); in parse_btf_map_def()
2096 map->name, t->type, (ssize_t)sz); in parse_btf_map_def()
2097 if (map->def.value_size && map->def.value_size != sz) { in parse_btf_map_def()
2099 map->name, map->def.value_size, (ssize_t)sz); in parse_btf_map_def()
2100 return -EINVAL; in parse_btf_map_def()
2102 map->def.value_size = sz; in parse_btf_map_def()
2103 map->btf_value_type_id = t->type; in parse_btf_map_def()
2109 pr_warn("map '%s': multi-level inner maps not supported.\n", in parse_btf_map_def()
2110 map->name); in parse_btf_map_def()
2111 return -ENOTSUP; in parse_btf_map_def()
2113 if (i != vlen - 1) { in parse_btf_map_def()
2115 map->name, name); in parse_btf_map_def()
2116 return -EINVAL; in parse_btf_map_def()
2118 if (!bpf_map_type__is_map_in_map(map->def.type)) { in parse_btf_map_def()
2119 pr_warn("map '%s': should be map-in-map.\n", in parse_btf_map_def()
2120 map->name); in parse_btf_map_def()
2121 return -ENOTSUP; in parse_btf_map_def()
2123 if (map->def.value_size && map->def.value_size != 4) { in parse_btf_map_def()
2125 map->name, map->def.value_size); in parse_btf_map_def()
2126 return -EINVAL; in parse_btf_map_def()
2128 map->def.value_size = 4; in parse_btf_map_def()
2129 t = btf__type_by_id(obj->btf, m->type); in parse_btf_map_def()
2131 pr_warn("map '%s': map-in-map inner type [%d] not found.\n", in parse_btf_map_def()
2132 map->name, m->type); in parse_btf_map_def()
2133 return -EINVAL; in parse_btf_map_def()
2135 if (!btf_is_array(t) || btf_array(t)->nelems) { in parse_btf_map_def()
2136 pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n", in parse_btf_map_def()
2137 map->name); in parse_btf_map_def()
2138 return -EINVAL; in parse_btf_map_def()
2140 t = skip_mods_and_typedefs(obj->btf, btf_array(t)->type, in parse_btf_map_def()
2143 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", in parse_btf_map_def()
2144 map->name, btf_kind_str(t)); in parse_btf_map_def()
2145 return -EINVAL; in parse_btf_map_def()
2147 t = skip_mods_and_typedefs(obj->btf, t->type, NULL); in parse_btf_map_def()
2149 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", in parse_btf_map_def()
2150 map->name, btf_kind_str(t)); in parse_btf_map_def()
2151 return -EINVAL; in parse_btf_map_def()
2154 map->inner_map = calloc(1, sizeof(*map->inner_map)); in parse_btf_map_def()
2155 if (!map->inner_map) in parse_btf_map_def()
2156 return -ENOMEM; in parse_btf_map_def()
2157 map->inner_map->sec_idx = obj->efile.btf_maps_shndx; in parse_btf_map_def()
2158 map->inner_map->name = malloc(strlen(map->name) + in parse_btf_map_def()
2160 if (!map->inner_map->name) in parse_btf_map_def()
2161 return -ENOMEM; in parse_btf_map_def()
2162 sprintf(map->inner_map->name, "%s.inner", map->name); in parse_btf_map_def()
2164 err = parse_btf_map_def(obj, map->inner_map, t, strict, in parse_btf_map_def()
2174 map->name); in parse_btf_map_def()
2175 return -EINVAL; in parse_btf_map_def()
2177 if (!get_map_field_int(map->name, obj->btf, m, &val)) in parse_btf_map_def()
2178 return -EINVAL; in parse_btf_map_def()
2180 map->name, val); in parse_btf_map_def()
2185 map->name, val); in parse_btf_map_def()
2186 return -EINVAL; in parse_btf_map_def()
2192 map->name); in parse_btf_map_def()
2199 map->name, name); in parse_btf_map_def()
2200 return -ENOTSUP; in parse_btf_map_def()
2203 map->name, name); in parse_btf_map_def()
2207 if (map->def.type == BPF_MAP_TYPE_UNSPEC) { in parse_btf_map_def()
2208 pr_warn("map '%s': map type isn't specified.\n", map->name); in parse_btf_map_def()
2209 return -EINVAL; in parse_btf_map_def()
2228 var = btf__type_by_id(obj->btf, vi->type); in bpf_object__init_user_btf_map()
2230 map_name = btf__name_by_offset(obj->btf, var->name_off); in bpf_object__init_user_btf_map()
2234 return -EINVAL; in bpf_object__init_user_btf_map()
2236 if ((__u64)vi->offset + vi->size > data->d_size) { in bpf_object__init_user_btf_map()
2238 return -EINVAL; in bpf_object__init_user_btf_map()
2243 return -EINVAL; in bpf_object__init_user_btf_map()
2245 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED && in bpf_object__init_user_btf_map()
2246 var_extra->linkage != BTF_VAR_STATIC) { in bpf_object__init_user_btf_map()
2248 map_name, var_extra->linkage); in bpf_object__init_user_btf_map()
2249 return -EOPNOTSUPP; in bpf_object__init_user_btf_map()
2252 def = skip_mods_and_typedefs(obj->btf, var->type, NULL); in bpf_object__init_user_btf_map()
2256 return -EINVAL; in bpf_object__init_user_btf_map()
2258 if (def->size > vi->size) { in bpf_object__init_user_btf_map()
2260 return -EINVAL; in bpf_object__init_user_btf_map()
2266 map->name = strdup(map_name); in bpf_object__init_user_btf_map()
2267 if (!map->name) { in bpf_object__init_user_btf_map()
2269 return -ENOMEM; in bpf_object__init_user_btf_map()
2271 map->libbpf_type = LIBBPF_MAP_UNSPEC; in bpf_object__init_user_btf_map()
2272 map->def.type = BPF_MAP_TYPE_UNSPEC; in bpf_object__init_user_btf_map()
2273 map->sec_idx = sec_idx; in bpf_object__init_user_btf_map()
2274 map->sec_offset = vi->offset; in bpf_object__init_user_btf_map()
2275 map->btf_var_idx = var_idx; in bpf_object__init_user_btf_map()
2277 map_name, map->sec_idx, map->sec_offset); in bpf_object__init_user_btf_map()
2292 if (obj->efile.btf_maps_shndx < 0) in bpf_object__init_user_btf_maps()
2295 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx); in bpf_object__init_user_btf_maps()
2299 MAPS_ELF_SEC, obj->path); in bpf_object__init_user_btf_maps()
2300 return -EINVAL; in bpf_object__init_user_btf_maps()
2303 nr_types = btf__get_nr_types(obj->btf); in bpf_object__init_user_btf_maps()
2305 t = btf__type_by_id(obj->btf, i); in bpf_object__init_user_btf_maps()
2308 name = btf__name_by_offset(obj->btf, t->name_off); in bpf_object__init_user_btf_maps()
2311 obj->efile.btf_maps_sec_btf_id = i; in bpf_object__init_user_btf_maps()
2318 return -ENOENT; in bpf_object__init_user_btf_maps()
2324 obj->efile.btf_maps_shndx, in bpf_object__init_user_btf_maps()
2387 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0); in bpf_object__sanitize_btf()
2393 t->size = 1; in bpf_object__sanitize_btf()
2402 name = (char *)btf__name_by_offset(btf, t->name_off); in bpf_object__sanitize_btf()
2410 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen); in bpf_object__sanitize_btf()
2413 m->offset = v->offset * 8; in bpf_object__sanitize_btf()
2414 m->type = v->type; in bpf_object__sanitize_btf()
2416 vt = (void *)btf__type_by_id(btf, v->type); in bpf_object__sanitize_btf()
2417 m->name_off = vt->name_off; in bpf_object__sanitize_btf()
2422 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen); in bpf_object__sanitize_btf()
2423 t->size = sizeof(__u32); /* kernel enforced */ in bpf_object__sanitize_btf()
2426 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0); in bpf_object__sanitize_btf()
2429 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0); in bpf_object__sanitize_btf()
2436 return obj->efile.btf_maps_shndx >= 0 || in libbpf_needs_btf()
2437 obj->efile.st_ops_shndx >= 0 || in libbpf_needs_btf()
2438 obj->nr_extern > 0; in libbpf_needs_btf()
2443 return obj->efile.st_ops_shndx >= 0; in kernel_needs_btf()
2450 int err = -ENOENT; in bpf_object__init_btf()
2453 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size); in bpf_object__init_btf()
2454 if (IS_ERR(obj->btf)) { in bpf_object__init_btf()
2455 err = PTR_ERR(obj->btf); in bpf_object__init_btf()
2456 obj->btf = NULL; in bpf_object__init_btf()
2461 /* enforce 8-byte pointers for BPF-targeted BTFs */ in bpf_object__init_btf()
2462 btf__set_pointer_size(obj->btf, 8); in bpf_object__init_btf()
2466 if (!obj->btf) { in bpf_object__init_btf()
2471 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, in bpf_object__init_btf()
2472 btf_ext_data->d_size); in bpf_object__init_btf()
2473 if (IS_ERR(obj->btf_ext)) { in bpf_object__init_btf()
2475 BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext)); in bpf_object__init_btf()
2476 obj->btf_ext = NULL; in bpf_object__init_btf()
2492 if (!obj->btf) in bpf_object__finalize_btf()
2495 err = btf__finalize_data(obj, obj->btf); in bpf_object__finalize_btf()
2506 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || in libbpf_prog_needs_vmlinux_btf()
2507 prog->type == BPF_PROG_TYPE_LSM) in libbpf_prog_needs_vmlinux_btf()
2513 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd) in libbpf_prog_needs_vmlinux_btf()
2525 /* CO-RE relocations need kernel BTF */ in bpf_object__load_vmlinux_btf()
2526 if (obj->btf_ext && obj->btf_ext->core_relo_info.len) in bpf_object__load_vmlinux_btf()
2530 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__load_vmlinux_btf()
2533 ext = &obj->externs[i]; in bpf_object__load_vmlinux_btf()
2534 if (ext->type == EXT_KSYM && ext->ksym.type_id) { in bpf_object__load_vmlinux_btf()
2541 if (!prog->load) in bpf_object__load_vmlinux_btf()
2552 obj->btf_vmlinux = libbpf_find_kernel_btf(); in bpf_object__load_vmlinux_btf()
2553 if (IS_ERR(obj->btf_vmlinux)) { in bpf_object__load_vmlinux_btf()
2554 err = PTR_ERR(obj->btf_vmlinux); in bpf_object__load_vmlinux_btf()
2556 obj->btf_vmlinux = NULL; in bpf_object__load_vmlinux_btf()
2564 struct btf *kern_btf = obj->btf; in bpf_object__sanitize_and_load_btf()
2568 if (!obj->btf) in bpf_object__sanitize_and_load_btf()
2573 err = -EOPNOTSUPP; in bpf_object__sanitize_and_load_btf()
2586 raw_data = btf__get_raw_data(obj->btf, &sz); in bpf_object__sanitize_and_load_btf()
2591 /* enforce 8-byte pointers for BPF-targeted BTFs */ in bpf_object__sanitize_and_load_btf()
2592 btf__set_pointer_size(obj->btf, 8); in bpf_object__sanitize_and_load_btf()
2600 btf__set_fd(obj->btf, btf__fd(kern_btf)); in bpf_object__sanitize_and_load_btf()
2601 btf__set_fd(kern_btf, -1); in bpf_object__sanitize_and_load_btf()
2621 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off); in elf_sym_str()
2624 off, obj->path, elf_errmsg(-1)); in elf_sym_str()
2635 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off); in elf_sec_str()
2638 off, obj->path, elf_errmsg(-1)); in elf_sec_str()
2649 scn = elf_getscn(obj->efile.elf, idx); in elf_sec_by_idx()
2652 idx, obj->path, elf_errmsg(-1)); in elf_sec_by_idx()
2661 Elf *elf = obj->efile.elf; in elf_sec_by_name()
2680 return -EINVAL; in elf_sec_hdr()
2684 elf_ndxscn(scn), obj->path, elf_errmsg(-1)); in elf_sec_hdr()
2685 return -EINVAL; in elf_sec_hdr()
2705 elf_ndxscn(scn), obj->path, elf_errmsg(-1)); in elf_sec_name()
2723 obj->path, elf_errmsg(-1)); in elf_sec_data()
2733 Elf_Data *symbols = obj->efile.symbols; in elf_sym_by_sec_off()
2734 size_t n = symbols->d_size / sizeof(GElf_Sym); in elf_sym_by_sec_off()
2740 if (sym->st_shndx != sec_idx || sym->st_value != off) in elf_sym_by_sec_off()
2742 if (GELF_ST_TYPE(sym->st_info) != sym_type) in elf_sym_by_sec_off()
2747 return -ENOENT; in elf_sym_by_sec_off()
2753 return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0; in is_sec_name_dwarf()
2759 if (hdr->sh_type == SHT_STRTAB) in ignore_elf_section()
2763 if (hdr->sh_type == 0x6FFF4C03 /* SHT_LLVM_ADDRSIG */) in ignore_elf_section()
2767 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 && in ignore_elf_section()
2775 if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) { in ignore_elf_section()
2776 name += sizeof(".rel") - 1; in ignore_elf_section()
2795 if (a->sec_idx != b->sec_idx) in cmp_progs()
2796 return a->sec_idx < b->sec_idx ? -1 : 1; in cmp_progs()
2799 return a->sec_insn_off < b->sec_insn_off ? -1 : 1; in cmp_progs()
2804 Elf *elf = obj->efile.elf; in bpf_object__elf_collect()
2819 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2822 if (obj->efile.symbols) { in bpf_object__elf_collect()
2823 pr_warn("elf: multiple symbol tables in %s\n", obj->path); in bpf_object__elf_collect()
2824 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2829 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2831 obj->efile.symbols = data; in bpf_object__elf_collect()
2832 obj->efile.symbols_shndx = elf_ndxscn(scn); in bpf_object__elf_collect()
2833 obj->efile.strtabidx = sh.sh_link; in bpf_object__elf_collect()
2842 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2846 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2853 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2856 idx, name, (unsigned long)data->d_size, in bpf_object__elf_collect()
2861 err = bpf_object__init_license(obj, data->d_buf, data->d_size); in bpf_object__elf_collect()
2865 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size); in bpf_object__elf_collect()
2869 obj->efile.maps_shndx = idx; in bpf_object__elf_collect()
2871 obj->efile.btf_maps_shndx = idx; in bpf_object__elf_collect()
2878 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) { in bpf_object__elf_collect()
2881 obj->efile.text_shndx = idx; in bpf_object__elf_collect()
2886 obj->efile.data = data; in bpf_object__elf_collect()
2887 obj->efile.data_shndx = idx; in bpf_object__elf_collect()
2889 obj->efile.rodata = data; in bpf_object__elf_collect()
2890 obj->efile.rodata_shndx = idx; in bpf_object__elf_collect()
2892 obj->efile.st_ops_data = data; in bpf_object__elf_collect()
2893 obj->efile.st_ops_shndx = idx; in bpf_object__elf_collect()
2899 int nr_sects = obj->efile.nr_reloc_sects; in bpf_object__elf_collect()
2900 void *sects = obj->efile.reloc_sects; in bpf_object__elf_collect()
2914 sizeof(*obj->efile.reloc_sects)); in bpf_object__elf_collect()
2916 return -ENOMEM; in bpf_object__elf_collect()
2918 obj->efile.reloc_sects = sects; in bpf_object__elf_collect()
2919 obj->efile.nr_reloc_sects++; in bpf_object__elf_collect()
2921 obj->efile.reloc_sects[nr_sects].shdr = sh; in bpf_object__elf_collect()
2922 obj->efile.reloc_sects[nr_sects].data = data; in bpf_object__elf_collect()
2924 obj->efile.bss = data; in bpf_object__elf_collect()
2925 obj->efile.bss_shndx = idx; in bpf_object__elf_collect()
2932 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) { in bpf_object__elf_collect()
2933 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path); in bpf_object__elf_collect()
2934 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2937 /* sort BPF programs by section name and in-section instruction offset in bpf_object__elf_collect()
2939 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs); in bpf_object__elf_collect()
2946 int bind = GELF_ST_BIND(sym->st_info); in sym_is_extern()
2948 return sym->st_shndx == SHN_UNDEF && in sym_is_extern()
2950 GELF_ST_TYPE(sym->st_info) == STT_NOTYPE; in sym_is_extern()
2960 return -ESRCH; in find_extern_btf_id()
2969 var_name = btf__name_by_offset(btf, t->name_off); in find_extern_btf_id()
2973 if (btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN) in find_extern_btf_id()
2974 return -EINVAL; in find_extern_btf_id()
2979 return -ENOENT; in find_extern_btf_id()
2988 return -ESRCH; in find_extern_sec_btf_id()
2999 if (vs->type == ext_btf_id) in find_extern_sec_btf_id()
3004 return -ENOENT; in find_extern_sec_btf_id()
3014 name = btf__name_by_offset(btf, t->name_off); in find_kcfg_type()
3023 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN; in find_kcfg_type()
3026 if (t->size == 1) in find_kcfg_type()
3028 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1))) in find_kcfg_type()
3033 if (t->size != 4) in find_kcfg_type()
3039 if (btf_array(t)->nelems == 0) in find_kcfg_type()
3041 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR) in find_kcfg_type()
3054 if (a->type != b->type) in cmp_externs()
3055 return a->type < b->type ? -1 : 1; in cmp_externs()
3057 if (a->type == EXT_KCFG) { in cmp_externs()
3059 if (a->kcfg.align != b->kcfg.align) in cmp_externs()
3060 return a->kcfg.align > b->kcfg.align ? -1 : 1; in cmp_externs()
3062 if (a->kcfg.sz != b->kcfg.sz) in cmp_externs()
3063 return a->kcfg.sz < b->kcfg.sz ? -1 : 1; in cmp_externs()
3067 return strcmp(a->name, b->name); in cmp_externs()
3096 if (!obj->efile.symbols) in bpf_object__collect_externs()
3099 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx); in bpf_object__collect_externs()
3101 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_externs()
3109 if (!gelf_getsym(obj->efile.symbols, i, &sym)) in bpf_object__collect_externs()
3110 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_externs()
3117 ext = obj->externs; in bpf_object__collect_externs()
3118 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext)); in bpf_object__collect_externs()
3120 return -ENOMEM; in bpf_object__collect_externs()
3121 obj->externs = ext; in bpf_object__collect_externs()
3122 ext = &ext[obj->nr_extern]; in bpf_object__collect_externs()
3124 obj->nr_extern++; in bpf_object__collect_externs()
3126 ext->btf_id = find_extern_btf_id(obj->btf, ext_name); in bpf_object__collect_externs()
3127 if (ext->btf_id <= 0) { in bpf_object__collect_externs()
3129 ext_name, ext->btf_id); in bpf_object__collect_externs()
3130 return ext->btf_id; in bpf_object__collect_externs()
3132 t = btf__type_by_id(obj->btf, ext->btf_id); in bpf_object__collect_externs()
3133 ext->name = btf__name_by_offset(obj->btf, t->name_off); in bpf_object__collect_externs()
3134 ext->sym_idx = i; in bpf_object__collect_externs()
3135 ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK; in bpf_object__collect_externs()
3137 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id); in bpf_object__collect_externs()
3138 if (ext->sec_btf_id <= 0) { in bpf_object__collect_externs()
3140 ext_name, ext->btf_id, ext->sec_btf_id); in bpf_object__collect_externs()
3141 return ext->sec_btf_id; in bpf_object__collect_externs()
3143 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id); in bpf_object__collect_externs()
3144 sec_name = btf__name_by_offset(obj->btf, sec->name_off); in bpf_object__collect_externs()
3148 ext->type = EXT_KCFG; in bpf_object__collect_externs()
3149 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type); in bpf_object__collect_externs()
3150 if (ext->kcfg.sz <= 0) { in bpf_object__collect_externs()
3152 ext_name, ext->kcfg.sz); in bpf_object__collect_externs()
3153 return ext->kcfg.sz; in bpf_object__collect_externs()
3155 ext->kcfg.align = btf__align_of(obj->btf, t->type); in bpf_object__collect_externs()
3156 if (ext->kcfg.align <= 0) { in bpf_object__collect_externs()
3158 ext_name, ext->kcfg.align); in bpf_object__collect_externs()
3159 return -EINVAL; in bpf_object__collect_externs()
3161 ext->kcfg.type = find_kcfg_type(obj->btf, t->type, in bpf_object__collect_externs()
3162 &ext->kcfg.is_signed); in bpf_object__collect_externs()
3163 if (ext->kcfg.type == KCFG_UNKNOWN) { in bpf_object__collect_externs()
3165 return -ENOTSUP; in bpf_object__collect_externs()
3169 ext->type = EXT_KSYM; in bpf_object__collect_externs()
3170 skip_mods_and_typedefs(obj->btf, t->type, in bpf_object__collect_externs()
3171 &ext->ksym.type_id); in bpf_object__collect_externs()
3174 return -ENOTSUP; in bpf_object__collect_externs()
3177 pr_debug("collected %d externs total\n", obj->nr_extern); in bpf_object__collect_externs()
3179 if (!obj->nr_extern) in bpf_object__collect_externs()
3183 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs); in bpf_object__collect_externs()
3187 * pretending that each extern is a 8-byte variable in bpf_object__collect_externs()
3190 /* find existing 4-byte integer type in BTF to use for fake in bpf_object__collect_externs()
3193 int int_btf_id = find_int_btf_id(obj->btf); in bpf_object__collect_externs()
3195 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__collect_externs()
3196 ext = &obj->externs[i]; in bpf_object__collect_externs()
3197 if (ext->type != EXT_KSYM) in bpf_object__collect_externs()
3200 i, ext->sym_idx, ext->name); in bpf_object__collect_externs()
3209 vt = (void *)btf__type_by_id(obj->btf, vs->type); in bpf_object__collect_externs()
3210 ext_name = btf__name_by_offset(obj->btf, vt->name_off); in bpf_object__collect_externs()
3215 return -ESRCH; in bpf_object__collect_externs()
3217 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED; in bpf_object__collect_externs()
3218 vt->type = int_btf_id; in bpf_object__collect_externs()
3219 vs->offset = off; in bpf_object__collect_externs()
3220 vs->size = sizeof(int); in bpf_object__collect_externs()
3222 sec->size = off; in bpf_object__collect_externs()
3229 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__collect_externs()
3230 ext = &obj->externs[i]; in bpf_object__collect_externs()
3231 if (ext->type != EXT_KCFG) in bpf_object__collect_externs()
3234 ext->kcfg.data_off = roundup(off, ext->kcfg.align); in bpf_object__collect_externs()
3235 off = ext->kcfg.data_off + ext->kcfg.sz; in bpf_object__collect_externs()
3237 i, ext->sym_idx, ext->kcfg.data_off, ext->name); in bpf_object__collect_externs()
3239 sec->size = off; in bpf_object__collect_externs()
3244 t = btf__type_by_id(obj->btf, vs->type); in bpf_object__collect_externs()
3245 ext_name = btf__name_by_offset(obj->btf, t->name_off); in bpf_object__collect_externs()
3250 return -ESRCH; in bpf_object__collect_externs()
3252 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED; in bpf_object__collect_externs()
3253 vs->offset = ext->kcfg.data_off; in bpf_object__collect_externs()
3266 if (pos->sec_name && !strcmp(pos->sec_name, title)) in bpf_object__find_program_by_title()
3275 /* For legacy reasons, libbpf supports an entry-point BPF programs in prog_is_subprog()
3278 * must be subprograms called from entry-point BPF programs in in prog_is_subprog()
3281 * Similarly, if there is a function/program in .text and at least one in prog_is_subprog()
3285 * SEC()-designated BPF programs and .text entry-point BPF programs. in prog_is_subprog()
3287 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1; in prog_is_subprog()
3299 if (!strcmp(prog->name, name)) in bpf_object__find_program_by_name()
3308 return shndx == obj->efile.data_shndx || in bpf_object__shndx_is_data()
3309 shndx == obj->efile.bss_shndx || in bpf_object__shndx_is_data()
3310 shndx == obj->efile.rodata_shndx; in bpf_object__shndx_is_data()
3316 return shndx == obj->efile.maps_shndx || in bpf_object__shndx_is_maps()
3317 shndx == obj->efile.btf_maps_shndx; in bpf_object__shndx_is_maps()
3323 if (shndx == obj->efile.data_shndx) in bpf_object__section_to_libbpf_map_type()
3325 else if (shndx == obj->efile.bss_shndx) in bpf_object__section_to_libbpf_map_type()
3327 else if (shndx == obj->efile.rodata_shndx) in bpf_object__section_to_libbpf_map_type()
3329 else if (shndx == obj->efile.symbols_shndx) in bpf_object__section_to_libbpf_map_type()
3340 struct bpf_insn *insn = &prog->insns[insn_idx]; in bpf_program__record_reloc()
3341 size_t map_idx, nr_maps = prog->obj->nr_maps; in bpf_program__record_reloc()
3342 struct bpf_object *obj = prog->obj; in bpf_program__record_reloc()
3343 __u32 shdr_idx = sym->st_shndx; in bpf_program__record_reloc()
3348 reloc_desc->processed = false; in bpf_program__record_reloc()
3350 /* sub-program call relocation */ in bpf_program__record_reloc()
3351 if (insn->code == (BPF_JMP | BPF_CALL)) { in bpf_program__record_reloc()
3352 if (insn->src_reg != BPF_PSEUDO_CALL) { in bpf_program__record_reloc()
3353 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name); in bpf_program__record_reloc()
3354 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3357 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) { in bpf_program__record_reloc()
3360 prog->name, sym_name, sym_sec_name); in bpf_program__record_reloc()
3361 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3363 if (sym->st_value % BPF_INSN_SZ) { in bpf_program__record_reloc()
3365 prog->name, sym_name, (size_t)sym->st_value); in bpf_program__record_reloc()
3366 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3368 reloc_desc->type = RELO_CALL; in bpf_program__record_reloc()
3369 reloc_desc->insn_idx = insn_idx; in bpf_program__record_reloc()
3370 reloc_desc->sym_off = sym->st_value; in bpf_program__record_reloc()
3374 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) { in bpf_program__record_reloc()
3376 prog->name, sym_name, insn_idx, insn->code); in bpf_program__record_reloc()
3377 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3381 int sym_idx = GELF_R_SYM(rel->r_info); in bpf_program__record_reloc()
3382 int i, n = obj->nr_extern; in bpf_program__record_reloc()
3386 ext = &obj->externs[i]; in bpf_program__record_reloc()
3387 if (ext->sym_idx == sym_idx) in bpf_program__record_reloc()
3392 prog->name, sym_name, sym_idx); in bpf_program__record_reloc()
3393 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3396 prog->name, i, ext->name, ext->sym_idx, insn_idx); in bpf_program__record_reloc()
3397 reloc_desc->type = RELO_EXTERN; in bpf_program__record_reloc()
3398 reloc_desc->insn_idx = insn_idx; in bpf_program__record_reloc()
3399 reloc_desc->sym_off = i; /* sym_off stores extern index */ in bpf_program__record_reloc()
3405 prog->name, sym_name, shdr_idx); in bpf_program__record_reloc()
3406 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3416 prog->name, sym_name, sym_sec_name); in bpf_program__record_reloc()
3417 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3420 map = &obj->maps[map_idx]; in bpf_program__record_reloc()
3421 if (map->libbpf_type != type || in bpf_program__record_reloc()
3422 map->sec_idx != sym->st_shndx || in bpf_program__record_reloc()
3423 map->sec_offset != sym->st_value) in bpf_program__record_reloc()
3426 prog->name, map_idx, map->name, map->sec_idx, in bpf_program__record_reloc()
3427 map->sec_offset, insn_idx); in bpf_program__record_reloc()
3432 prog->name, sym_sec_name, (size_t)sym->st_value); in bpf_program__record_reloc()
3433 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3435 reloc_desc->type = RELO_LD64; in bpf_program__record_reloc()
3436 reloc_desc->insn_idx = insn_idx; in bpf_program__record_reloc()
3437 reloc_desc->map_idx = map_idx; in bpf_program__record_reloc()
3438 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */ in bpf_program__record_reloc()
3445 prog->name, sym_sec_name); in bpf_program__record_reloc()
3446 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3449 map = &obj->maps[map_idx]; in bpf_program__record_reloc()
3450 if (map->libbpf_type != type) in bpf_program__record_reloc()
3453 prog->name, map_idx, map->name, map->sec_idx, in bpf_program__record_reloc()
3454 map->sec_offset, insn_idx); in bpf_program__record_reloc()
3459 prog->name, sym_sec_name); in bpf_program__record_reloc()
3460 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3463 reloc_desc->type = RELO_DATA; in bpf_program__record_reloc()
3464 reloc_desc->insn_idx = insn_idx; in bpf_program__record_reloc()
3465 reloc_desc->map_idx = map_idx; in bpf_program__record_reloc()
3466 reloc_desc->sym_off = sym->st_value; in bpf_program__record_reloc()
3472 return insn_idx >= prog->sec_insn_off && in prog_contains_insn()
3473 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt; in prog_contains_insn()
3479 int l = 0, r = obj->nr_programs - 1, m; in find_prog_by_sec_insn()
3482 if (!obj->nr_programs) in find_prog_by_sec_insn()
3486 m = l + (r - l + 1) / 2; in find_prog_by_sec_insn()
3487 prog = &obj->programs[m]; in find_prog_by_sec_insn()
3489 if (prog->sec_idx < sec_idx || in find_prog_by_sec_insn()
3490 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx)) in find_prog_by_sec_insn()
3493 r = m - 1; in find_prog_by_sec_insn()
3498 prog = &obj->programs[l]; in find_prog_by_sec_insn()
3499 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx)) in find_prog_by_sec_insn()
3507 Elf_Data *symbols = obj->efile.symbols; in bpf_object__collect_prog_relos()
3509 size_t sec_idx = shdr->sh_info; in bpf_object__collect_prog_relos()
3518 relo_sec_name = elf_sec_str(obj, shdr->sh_name); in bpf_object__collect_prog_relos()
3521 return -EINVAL; in bpf_object__collect_prog_relos()
3525 nrels = shdr->sh_size / shdr->sh_entsize; in bpf_object__collect_prog_relos()
3530 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_prog_relos()
3535 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_prog_relos()
3540 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_prog_relos()
3545 * relocations against the section that contains a function; in bpf_object__collect_prog_relos()
3563 return -LIBBPF_ERRNO__RELOC; in bpf_object__collect_prog_relos()
3566 relos = libbpf_reallocarray(prog->reloc_desc, in bpf_object__collect_prog_relos()
3567 prog->nr_reloc + 1, sizeof(*relos)); in bpf_object__collect_prog_relos()
3569 return -ENOMEM; in bpf_object__collect_prog_relos()
3570 prog->reloc_desc = relos; in bpf_object__collect_prog_relos()
3573 insn_idx -= prog->sec_insn_off; in bpf_object__collect_prog_relos()
3574 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc], in bpf_object__collect_prog_relos()
3579 prog->nr_reloc++; in bpf_object__collect_prog_relos()
3586 struct bpf_map_def *def = &map->def; in bpf_map_find_btf_info()
3590 /* if it's BTF-defined map, we don't need to search for type IDs. in bpf_map_find_btf_info()
3594 if (map->sec_idx == obj->efile.btf_maps_shndx || in bpf_map_find_btf_info()
3599 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size, in bpf_map_find_btf_info()
3600 def->value_size, &key_type_id, in bpf_map_find_btf_info()
3607 ret = btf__find_by_name(obj->btf, in bpf_map_find_btf_info()
3608 libbpf_type_to_btf_name[map->libbpf_type]); in bpf_map_find_btf_info()
3613 map->btf_key_type_id = key_type_id; in bpf_map_find_btf_info()
3614 map->btf_value_type_id = bpf_map__is_internal(map) ? in bpf_map_find_btf_info()
3631 err = -errno; in bpf_get_map_info_from_fdinfo()
3639 info->type = val; in bpf_get_map_info_from_fdinfo()
3641 info->key_size = val; in bpf_get_map_info_from_fdinfo()
3643 info->value_size = val; in bpf_get_map_info_from_fdinfo()
3645 info->max_entries = val; in bpf_get_map_info_from_fdinfo()
3647 info->map_flags = val; in bpf_get_map_info_from_fdinfo()
3670 return -errno; in bpf_map__reuse_fd()
3674 err = -errno; in bpf_map__reuse_fd()
3680 err = -errno; in bpf_map__reuse_fd()
3684 err = zclose(map->fd); in bpf_map__reuse_fd()
3686 err = -errno; in bpf_map__reuse_fd()
3689 free(map->name); in bpf_map__reuse_fd()
3691 map->fd = new_fd; in bpf_map__reuse_fd()
3692 map->name = new_name; in bpf_map__reuse_fd()
3693 map->def.type = info.type; in bpf_map__reuse_fd()
3694 map->def.key_size = info.key_size; in bpf_map__reuse_fd()
3695 map->def.value_size = info.value_size; in bpf_map__reuse_fd()
3696 map->def.max_entries = info.max_entries; in bpf_map__reuse_fd()
3697 map->def.map_flags = info.map_flags; in bpf_map__reuse_fd()
3698 map->btf_key_type_id = info.btf_key_type_id; in bpf_map__reuse_fd()
3699 map->btf_value_type_id = info.btf_value_type_id; in bpf_map__reuse_fd()
3700 map->reused = true; in bpf_map__reuse_fd()
3713 return map->def.max_entries; in bpf_map__max_entries()
3718 if (map->fd >= 0) in bpf_map__set_max_entries()
3719 return -EBUSY; in bpf_map__set_max_entries()
3720 map->def.max_entries = max_entries; in bpf_map__set_max_entries()
3727 return -EINVAL; in bpf_map__resize()
3759 return -ret; in bpf_object__probe_loading()
3815 ret = -errno; in probe_kern_global_data()
3818 __func__, cp, -ret); in probe_kern_global_data()
3925 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS) in probe_kern_exp_attach_type()
3943 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */ in probe_kern_probe_read_kernel()
3978 ret = -errno; in probe_prog_bind_map()
3981 __func__, cp, -ret); in probe_prog_bind_map()
4031 "BTF global function", probe_kern_btf_func_global,
4056 if (READ_ONCE(feat->res) == FEAT_UNKNOWN) { in kernel_supports()
4057 ret = feat->probe(); in kernel_supports()
4059 WRITE_ONCE(feat->res, FEAT_SUPPORTED); in kernel_supports()
4061 WRITE_ONCE(feat->res, FEAT_MISSING); in kernel_supports()
4063 pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret); in kernel_supports()
4064 WRITE_ONCE(feat->res, FEAT_MISSING); in kernel_supports()
4068 return READ_ONCE(feat->res) == FEAT_SUPPORTED; in kernel_supports()
4089 return (map_info.type == map->def.type && in map_is_reuse_compat()
4090 map_info.key_size == map->def.key_size && in map_is_reuse_compat()
4091 map_info.value_size == map->def.value_size && in map_is_reuse_compat()
4092 map_info.max_entries == map->def.max_entries && in map_is_reuse_compat()
4093 map_info.map_flags == map->def.map_flags); in map_is_reuse_compat()
4102 pin_fd = bpf_obj_get(map->pin_path); in bpf_object__reuse_map()
4104 err = -errno; in bpf_object__reuse_map()
4105 if (err == -ENOENT) { in bpf_object__reuse_map()
4107 map->pin_path); in bpf_object__reuse_map()
4111 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); in bpf_object__reuse_map()
4113 map->pin_path, cp); in bpf_object__reuse_map()
4119 map->pin_path); in bpf_object__reuse_map()
4121 return -EINVAL; in bpf_object__reuse_map()
4129 map->pinned = true; in bpf_object__reuse_map()
4130 pr_debug("reused pinned map at '%s'\n", map->pin_path); in bpf_object__reuse_map()
4138 enum libbpf_map_type map_type = map->libbpf_type; in bpf_object__populate_internal_map()
4142 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0); in bpf_object__populate_internal_map()
4144 err = -errno; in bpf_object__populate_internal_map()
4147 map->name, cp); in bpf_object__populate_internal_map()
4151 /* Freeze .rodata and .kconfig map as read-only from syscall side. */ in bpf_object__populate_internal_map()
4153 err = bpf_map_freeze(map->fd); in bpf_object__populate_internal_map()
4155 err = -errno; in bpf_object__populate_internal_map()
4157 pr_warn("Error freezing map(%s) as read-only: %s\n", in bpf_object__populate_internal_map()
4158 map->name, cp); in bpf_object__populate_internal_map()
4170 struct bpf_map_def *def = &map->def; in bpf_object__create_map()
4176 create_attr.name = map->name; in bpf_object__create_map()
4177 create_attr.map_ifindex = map->map_ifindex; in bpf_object__create_map()
4178 create_attr.map_type = def->type; in bpf_object__create_map()
4179 create_attr.map_flags = def->map_flags; in bpf_object__create_map()
4180 create_attr.key_size = def->key_size; in bpf_object__create_map()
4181 create_attr.value_size = def->value_size; in bpf_object__create_map()
4182 create_attr.numa_node = map->numa_node; in bpf_object__create_map()
4184 if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) { in bpf_object__create_map()
4190 map->name, nr_cpus); in bpf_object__create_map()
4193 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus); in bpf_object__create_map()
4196 create_attr.max_entries = def->max_entries; in bpf_object__create_map()
4201 map->btf_vmlinux_value_type_id; in bpf_object__create_map()
4206 if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) { in bpf_object__create_map()
4207 create_attr.btf_fd = btf__fd(obj->btf); in bpf_object__create_map()
4208 create_attr.btf_key_type_id = map->btf_key_type_id; in bpf_object__create_map()
4209 create_attr.btf_value_type_id = map->btf_value_type_id; in bpf_object__create_map()
4212 if (bpf_map_type__is_map_in_map(def->type)) { in bpf_object__create_map()
4213 if (map->inner_map) { in bpf_object__create_map()
4214 err = bpf_object__create_map(obj, map->inner_map); in bpf_object__create_map()
4217 map->name, err); in bpf_object__create_map()
4220 map->inner_map_fd = bpf_map__fd(map->inner_map); in bpf_object__create_map()
4222 if (map->inner_map_fd >= 0) in bpf_object__create_map()
4223 create_attr.inner_map_fd = map->inner_map_fd; in bpf_object__create_map()
4226 map->fd = bpf_create_map_xattr(&create_attr); in bpf_object__create_map()
4227 if (map->fd < 0 && (create_attr.btf_key_type_id || in bpf_object__create_map()
4231 err = -errno; in bpf_object__create_map()
4234 map->name, cp, err); in bpf_object__create_map()
4238 map->btf_key_type_id = 0; in bpf_object__create_map()
4239 map->btf_value_type_id = 0; in bpf_object__create_map()
4240 map->fd = bpf_create_map_xattr(&create_attr); in bpf_object__create_map()
4243 err = map->fd < 0 ? -errno : 0; in bpf_object__create_map()
4245 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) { in bpf_object__create_map()
4246 bpf_map__destroy(map->inner_map); in bpf_object__create_map()
4247 zfree(&map->inner_map); in bpf_object__create_map()
4259 for (i = 0; i < map->init_slots_sz; i++) { in init_map_slots()
4260 if (!map->init_slots[i]) in init_map_slots()
4263 targ_map = map->init_slots[i]; in init_map_slots()
4265 err = bpf_map_update_elem(map->fd, &i, &fd, 0); in init_map_slots()
4267 err = -errno; in init_map_slots()
4269 map->name, i, targ_map->name, in init_map_slots()
4274 map->name, i, targ_map->name, fd); in init_map_slots()
4277 zfree(&map->init_slots); in init_map_slots()
4278 map->init_slots_sz = 0; in init_map_slots()
4292 for (i = 0; i < obj->nr_maps; i++) { in bpf_object__create_maps()
4293 map = &obj->maps[i]; in bpf_object__create_maps()
4297 if (map->pin_path) { in bpf_object__create_maps()
4301 map->name); in bpf_object__create_maps()
4304 if (retried && map->fd < 0) { in bpf_object__create_maps()
4306 map->name); in bpf_object__create_maps()
4307 err = -ENOENT; in bpf_object__create_maps()
4312 if (map->fd >= 0) { in bpf_object__create_maps()
4314 map->name, map->fd); in bpf_object__create_maps()
4321 map->name, map->fd); in bpf_object__create_maps()
4326 zclose(map->fd); in bpf_object__create_maps()
4331 if (map->init_slots_sz) { in bpf_object__create_maps()
4334 zclose(map->fd); in bpf_object__create_maps()
4340 if (map->pin_path && !map->pinned) { in bpf_object__create_maps()
4343 zclose(map->fd); in bpf_object__create_maps()
4344 if (!retried && err == -EEXIST) { in bpf_object__create_maps()
4348 pr_warn("map '%s': failed to auto-pin at '%s': %d\n", in bpf_object__create_maps()
4349 map->name, map->pin_path, err); in bpf_object__create_maps()
4359 pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err); in bpf_object__create_maps()
4362 zclose(obj->maps[j].fd); in bpf_object__create_maps()
4368 /* represents BPF CO-RE field or array element accessor */
4377 /* high-level spec: named fields and array indices only */
4381 /* CO-RE relocation kind */
4383 /* high-level spec length */
4385 /* raw, low-level spec: 1-to-1 with accessor spec string */
4404 /* not a flexible array, if not inside a struct or has non-zero size */ in is_flex_arr()
4405 if (!acc->name || arr->nelems > 0) in is_flex_arr()
4409 t = btf__type_by_id(btf, acc->type_id); in is_flex_arr()
4410 return acc->idx == btf_vlen(t) - 1; in is_flex_arr()
4472 * Turn bpf_core_relo into a low- and high-level spec representation,
4474 * field bit offset, specified by accessor string. Low-level spec captures
4476 * struct/union members. High-level one only captures semantically meaningful
4491 * int x = &s->a[3]; // access string = '0:1:2:3'
4493 * Low-level spec has 1:1 mapping with each element of access string (it's
4496 * High-level spec will capture only 3 points:
4497 * - intial zero-index access by pointer (&s->... is the same as &s[0]...);
4498 * - field 'a' access (corresponds to '2' in low-level spec);
4499 * - array element #3 access (corresponds to '3' in low-level spec).
4501 * Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
4505 * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
4506 * string to specify enumerator's value index that need to be relocated.
4522 return -EINVAL; in bpf_core_parse_spec()
4525 spec->btf = btf; in bpf_core_parse_spec()
4526 spec->root_type_id = type_id; in bpf_core_parse_spec()
4527 spec->relo_kind = relo_kind; in bpf_core_parse_spec()
4529 /* type-based relocations don't have a field access string */ in bpf_core_parse_spec()
4532 return -EINVAL; in bpf_core_parse_spec()
4541 return -EINVAL; in bpf_core_parse_spec()
4542 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN) in bpf_core_parse_spec()
4543 return -E2BIG; in bpf_core_parse_spec()
4545 spec->raw_spec[spec->raw_len++] = access_idx; in bpf_core_parse_spec()
4548 if (spec->raw_len == 0) in bpf_core_parse_spec()
4549 return -EINVAL; in bpf_core_parse_spec()
4553 return -EINVAL; in bpf_core_parse_spec()
4555 access_idx = spec->raw_spec[0]; in bpf_core_parse_spec()
4556 acc = &spec->spec[0]; in bpf_core_parse_spec()
4557 acc->type_id = id; in bpf_core_parse_spec()
4558 acc->idx = access_idx; in bpf_core_parse_spec()
4559 spec->len++; in bpf_core_parse_spec()
4562 if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t)) in bpf_core_parse_spec()
4563 return -EINVAL; in bpf_core_parse_spec()
4565 /* record enumerator name in a first accessor */ in bpf_core_parse_spec()
4566 acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off); in bpf_core_parse_spec()
4571 return -EINVAL; in bpf_core_parse_spec()
4576 spec->bit_offset = access_idx * sz * 8; in bpf_core_parse_spec()
4578 for (i = 1; i < spec->raw_len; i++) { in bpf_core_parse_spec()
4581 return -EINVAL; in bpf_core_parse_spec()
4583 access_idx = spec->raw_spec[i]; in bpf_core_parse_spec()
4584 acc = &spec->spec[spec->len]; in bpf_core_parse_spec()
4591 return -EINVAL; in bpf_core_parse_spec()
4594 spec->bit_offset += bit_offset; in bpf_core_parse_spec()
4597 if (m->name_off) { in bpf_core_parse_spec()
4598 name = btf__name_by_offset(btf, m->name_off); in bpf_core_parse_spec()
4600 return -EINVAL; in bpf_core_parse_spec()
4602 acc->type_id = id; in bpf_core_parse_spec()
4603 acc->idx = access_idx; in bpf_core_parse_spec()
4604 acc->name = name; in bpf_core_parse_spec()
4605 spec->len++; in bpf_core_parse_spec()
4608 id = m->type; in bpf_core_parse_spec()
4613 t = skip_mods_and_typedefs(btf, a->type, &id); in bpf_core_parse_spec()
4615 return -EINVAL; in bpf_core_parse_spec()
4617 flex = is_flex_arr(btf, acc - 1, a); in bpf_core_parse_spec()
4618 if (!flex && access_idx >= a->nelems) in bpf_core_parse_spec()
4619 return -EINVAL; in bpf_core_parse_spec()
4621 spec->spec[spec->len].type_id = id; in bpf_core_parse_spec()
4622 spec->spec[spec->len].idx = access_idx; in bpf_core_parse_spec()
4623 spec->len++; in bpf_core_parse_spec()
4628 spec->bit_offset += access_idx * sz * 8; in bpf_core_parse_spec()
4632 return -EINVAL; in bpf_core_parse_spec()
4649 * underscore is ignored by BPF CO-RE relocation during relocation matching.
4656 for (i = n - 5; i >= 0; i--) { in bpf_core_essential_name_len()
4671 free(cand_ids->data); in bpf_core_free_cands()
4688 return ERR_PTR(-EINVAL); in bpf_core_find_cands()
4690 local_name = btf__name_by_offset(local_btf, local_t->name_off); in bpf_core_find_cands()
4692 return ERR_PTR(-EINVAL); in bpf_core_find_cands()
4697 return ERR_PTR(-ENOMEM); in bpf_core_find_cands()
4705 targ_name = btf__name_by_offset(targ_btf, t->name_off); in bpf_core_find_cands()
4714 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s\n", in bpf_core_find_cands()
4717 new_ids = libbpf_reallocarray(cand_ids->data, in bpf_core_find_cands()
4718 cand_ids->len + 1, in bpf_core_find_cands()
4719 sizeof(*cand_ids->data)); in bpf_core_find_cands()
4721 err = -ENOMEM; in bpf_core_find_cands()
4724 cand_ids->data = new_ids; in bpf_core_find_cands()
4725 cand_ids->data[cand_ids->len++] = i; in bpf_core_find_cands()
4737 * - any two STRUCTs/UNIONs are compatible and can be mixed;
4738 * - any two FWDs are compatible, if their names match (modulo flavor suffix);
4739 * - any two PTRs are always compatible;
4740 * - for ENUMs, names should be the same (ignoring flavor suffix) or at
4742 * - for ENUMs, check sizes, names are ignored;
4743 * - for INT, size and signedness are ignored;
4744 * - for ARRAY, dimensionality is ignored, element types are checked for
4746 * - everything else shouldn't be ever a target of relocation.
4748 * more experience with using BPF CO-RE relocations.
4761 return -EINVAL; in bpf_core_fields_are_compat()
4777 local_type->name_off); in bpf_core_fields_are_compat()
4778 targ_name = btf__name_by_offset(targ_btf, targ_type->name_off); in bpf_core_fields_are_compat()
4781 /* one of them is anonymous or both w/ same flavor-less names */ in bpf_core_fields_are_compat()
4787 /* just reject deprecated bitfield-like integers; all other in bpf_core_fields_are_compat()
4793 local_id = btf_array(local_type)->type; in bpf_core_fields_are_compat()
4794 targ_id = btf_array(targ_type)->type; in bpf_core_fields_are_compat()
4804 * Given single high-level named field accessor in local type, find
4805 * corresponding high-level accessor for a target type. Along the way,
4806 * maintain low-level spec for target as well. Also keep updating target
4834 return -EINVAL; in bpf_core_match_member()
4838 local_id = local_acc->type_id; in bpf_core_match_member()
4840 local_member = btf_members(local_type) + local_acc->idx; in bpf_core_match_member()
4841 local_name = btf__name_by_offset(local_btf, local_member->name_off); in bpf_core_match_member()
4851 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN) in bpf_core_match_member()
4852 return -E2BIG; in bpf_core_match_member()
4855 spec->bit_offset += bit_offset; in bpf_core_match_member()
4856 spec->raw_spec[spec->raw_len++] = i; in bpf_core_match_member()
4858 targ_name = btf__name_by_offset(targ_btf, m->name_off); in bpf_core_match_member()
4862 targ_btf, m->type, in bpf_core_match_member()
4870 targ_acc = &spec->spec[spec->len++]; in bpf_core_match_member()
4871 targ_acc->type_id = targ_id; in bpf_core_match_member()
4872 targ_acc->idx = i; in bpf_core_match_member()
4873 targ_acc->name = targ_name; in bpf_core_match_member()
4875 *next_targ_id = m->type; in bpf_core_match_member()
4877 local_member->type, in bpf_core_match_member()
4878 targ_btf, m->type); in bpf_core_match_member()
4880 spec->len--; /* pop accessor */ in bpf_core_match_member()
4884 spec->bit_offset -= bit_offset; in bpf_core_match_member()
4885 spec->raw_len--; in bpf_core_match_member()
4892 * type-based CO-RE relocations and follow slightly different rules than
4893 * field-based relocations. This function assumes that root types were already
4894 * checked for name match. Beyond that initial root-level name check, names
4896 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
4899 * - for ENUMs, the size is ignored;
4900 * - for INT, size and signedness are ignored;
4901 * - for ARRAY, dimensionality is ignored, element types are checked for
4903 * - CONST/VOLATILE/RESTRICT modifiers are ignored;
4904 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
4905 * - FUNC_PROTOs are compatible if they have compatible signature: same
4908 * more experience with using BPF CO-RE relocations.
4923 depth--; in bpf_core_types_are_compat()
4925 return -EINVAL; in bpf_core_types_are_compat()
4930 return -EINVAL; in bpf_core_types_are_compat()
4943 /* just reject deprecated bitfield-like integers; all other in bpf_core_types_are_compat()
4948 local_id = local_type->type; in bpf_core_types_are_compat()
4949 targ_id = targ_type->type; in bpf_core_types_are_compat()
4952 local_id = btf_array(local_type)->type; in bpf_core_types_are_compat()
4953 targ_id = btf_array(targ_type)->type; in bpf_core_types_are_compat()
4966 skip_mods_and_typedefs(local_btf, local_p->type, &local_id); in bpf_core_types_are_compat()
4967 skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id); in bpf_core_types_are_compat()
4974 skip_mods_and_typedefs(local_btf, local_type->type, &local_id); in bpf_core_types_are_compat()
4975 skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id); in bpf_core_types_are_compat()
4987 * target spec (high-level, low-level + bit offset).
4999 targ_spec->btf = targ_btf; in bpf_core_spec_match()
5000 targ_spec->root_type_id = targ_id; in bpf_core_spec_match()
5001 targ_spec->relo_kind = local_spec->relo_kind; in bpf_core_spec_match()
5003 if (core_relo_is_type_based(local_spec->relo_kind)) { in bpf_core_spec_match()
5004 return bpf_core_types_are_compat(local_spec->btf, in bpf_core_spec_match()
5005 local_spec->root_type_id, in bpf_core_spec_match()
5009 local_acc = &local_spec->spec[0]; in bpf_core_spec_match()
5010 targ_acc = &targ_spec->spec[0]; in bpf_core_spec_match()
5012 if (core_relo_is_enumval_based(local_spec->relo_kind)) { in bpf_core_spec_match()
5018 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id); in bpf_core_spec_match()
5022 local_essent_len = bpf_core_essential_name_len(local_acc->name); in bpf_core_spec_match()
5025 targ_name = btf__name_by_offset(targ_spec->btf, e->name_off); in bpf_core_spec_match()
5029 if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) { in bpf_core_spec_match()
5030 targ_acc->type_id = targ_id; in bpf_core_spec_match()
5031 targ_acc->idx = i; in bpf_core_spec_match()
5032 targ_acc->name = targ_name; in bpf_core_spec_match()
5033 targ_spec->len++; in bpf_core_spec_match()
5034 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx; in bpf_core_spec_match()
5035 targ_spec->raw_len++; in bpf_core_spec_match()
5042 if (!core_relo_is_field_based(local_spec->relo_kind)) in bpf_core_spec_match()
5043 return -EINVAL; in bpf_core_spec_match()
5045 for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) { in bpf_core_spec_match()
5046 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, in bpf_core_spec_match()
5049 return -EINVAL; in bpf_core_spec_match()
5051 if (local_acc->name) { in bpf_core_spec_match()
5052 matched = bpf_core_match_member(local_spec->btf, in bpf_core_spec_match()
5071 flex = is_flex_arr(targ_btf, targ_acc - 1, a); in bpf_core_spec_match()
5072 if (!flex && local_acc->idx >= a->nelems) in bpf_core_spec_match()
5074 if (!skip_mods_and_typedefs(targ_btf, a->type, in bpf_core_spec_match()
5076 return -EINVAL; in bpf_core_spec_match()
5080 if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN) in bpf_core_spec_match()
5081 return -E2BIG; in bpf_core_spec_match()
5083 targ_acc->type_id = targ_id; in bpf_core_spec_match()
5084 targ_acc->idx = local_acc->idx; in bpf_core_spec_match()
5085 targ_acc->name = NULL; in bpf_core_spec_match()
5086 targ_spec->len++; in bpf_core_spec_match()
5087 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx; in bpf_core_spec_match()
5088 targ_spec->raw_len++; in bpf_core_spec_match()
5093 targ_spec->bit_offset += local_acc->idx * sz * 8; in bpf_core_spec_match()
5116 if (relo->kind == BPF_FIELD_EXISTS) { in bpf_core_calc_field_relo()
5122 return -EUCLEAN; /* request instruction poisoning */ in bpf_core_calc_field_relo()
5124 acc = &spec->spec[spec->len - 1]; in bpf_core_calc_field_relo()
5125 t = btf__type_by_id(spec->btf, acc->type_id); in bpf_core_calc_field_relo()
5128 if (!acc->name) { in bpf_core_calc_field_relo()
5129 if (relo->kind == BPF_FIELD_BYTE_OFFSET) { in bpf_core_calc_field_relo()
5130 *val = spec->bit_offset / 8; in bpf_core_calc_field_relo()
5132 sz = btf__resolve_size(spec->btf, acc->type_id); in bpf_core_calc_field_relo()
5134 return -EINVAL; in bpf_core_calc_field_relo()
5136 *type_id = acc->type_id; in bpf_core_calc_field_relo()
5137 } else if (relo->kind == BPF_FIELD_BYTE_SIZE) { in bpf_core_calc_field_relo()
5138 sz = btf__resolve_size(spec->btf, acc->type_id); in bpf_core_calc_field_relo()
5140 return -EINVAL; in bpf_core_calc_field_relo()
5144 prog->name, relo->kind, relo->insn_off / 8); in bpf_core_calc_field_relo()
5145 return -EINVAL; in bpf_core_calc_field_relo()
5152 m = btf_members(t) + acc->idx; in bpf_core_calc_field_relo()
5153 mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id); in bpf_core_calc_field_relo()
5154 bit_off = spec->bit_offset; in bpf_core_calc_field_relo()
5155 bit_sz = btf_member_bitfield_size(t, acc->idx); in bpf_core_calc_field_relo()
5159 byte_sz = mt->size; in bpf_core_calc_field_relo()
5162 while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) { in bpf_core_calc_field_relo()
5164 /* bitfield can't be read with 64-bit read */ in bpf_core_calc_field_relo()
5166 prog->name, relo->kind, relo->insn_off / 8); in bpf_core_calc_field_relo()
5167 return -E2BIG; in bpf_core_calc_field_relo()
5173 sz = btf__resolve_size(spec->btf, field_type_id); in bpf_core_calc_field_relo()
5175 return -EINVAL; in bpf_core_calc_field_relo()
5177 byte_off = spec->bit_offset / 8; in bpf_core_calc_field_relo()
5188 switch (relo->kind) { in bpf_core_calc_field_relo()
5208 *val = 64 - (bit_off + bit_sz - byte_off * 8); in bpf_core_calc_field_relo()
5210 *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8); in bpf_core_calc_field_relo()
5214 *val = 64 - bit_sz; in bpf_core_calc_field_relo()
5220 return -EOPNOTSUPP; in bpf_core_calc_field_relo()
5232 /* type-based relos return zero when target type is not found */ in bpf_core_calc_type_relo()
5238 switch (relo->kind) { in bpf_core_calc_type_relo()
5240 *val = spec->root_type_id; in bpf_core_calc_type_relo()
5246 sz = btf__resolve_size(spec->btf, spec->root_type_id); in bpf_core_calc_type_relo()
5248 return -EINVAL; in bpf_core_calc_type_relo()
5254 return -EOPNOTSUPP; in bpf_core_calc_type_relo()
5267 switch (relo->kind) { in bpf_core_calc_enumval_relo()
5273 return -EUCLEAN; /* request instruction poisoning */ in bpf_core_calc_enumval_relo()
5274 t = btf__type_by_id(spec->btf, spec->spec[0].type_id); in bpf_core_calc_enumval_relo()
5275 e = btf_enum(t) + spec->spec[0].idx; in bpf_core_calc_enumval_relo()
5276 *val = e->val; in bpf_core_calc_enumval_relo()
5279 return -EOPNOTSUPP; in bpf_core_calc_enumval_relo()
5299 * memory loads of pointers and integers; this is necessary for 32-bit
5323 int err = -EOPNOTSUPP; in bpf_core_calc_relo()
5325 res->orig_val = 0; in bpf_core_calc_relo()
5326 res->new_val = 0; in bpf_core_calc_relo()
5327 res->poison = false; in bpf_core_calc_relo()
5328 res->validate = true; in bpf_core_calc_relo()
5329 res->fail_memsz_adjust = false; in bpf_core_calc_relo()
5330 res->orig_sz = res->new_sz = 0; in bpf_core_calc_relo()
5331 res->orig_type_id = res->new_type_id = 0; in bpf_core_calc_relo()
5333 if (core_relo_is_field_based(relo->kind)) { in bpf_core_calc_relo()
5335 &res->orig_val, &res->orig_sz, in bpf_core_calc_relo()
5336 &res->orig_type_id, &res->validate); in bpf_core_calc_relo()
5338 &res->new_val, &res->new_sz, in bpf_core_calc_relo()
5339 &res->new_type_id, NULL); in bpf_core_calc_relo()
5346 res->fail_memsz_adjust = false; in bpf_core_calc_relo()
5347 if (res->orig_sz != res->new_sz) { in bpf_core_calc_relo()
5350 orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id); in bpf_core_calc_relo()
5351 new_t = btf__type_by_id(targ_spec->btf, res->new_type_id); in bpf_core_calc_relo()
5355 * - reading a 32-bit kernel pointer, while on BPF in bpf_core_calc_relo()
5356 * size pointers are always 64-bit; in this case in bpf_core_calc_relo()
5359 * zero-extended upper 32-bits; in bpf_core_calc_relo()
5360 * - reading unsigned integers, again due to in bpf_core_calc_relo()
5361 * zero-extension is preserving the value correctly. in bpf_core_calc_relo()
5377 res->fail_memsz_adjust = true; in bpf_core_calc_relo()
5379 } else if (core_relo_is_type_based(relo->kind)) { in bpf_core_calc_relo()
5380 err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val); in bpf_core_calc_relo()
5381 err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val); in bpf_core_calc_relo()
5382 } else if (core_relo_is_enumval_based(relo->kind)) { in bpf_core_calc_relo()
5383 err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val); in bpf_core_calc_relo()
5384 err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val); in bpf_core_calc_relo()
5388 if (err == -EUCLEAN) { in bpf_core_calc_relo()
5390 res->poison = true; in bpf_core_calc_relo()
5392 } else if (err == -EOPNOTSUPP) { in bpf_core_calc_relo()
5394 pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n", in bpf_core_calc_relo()
5395 prog->name, relo_idx, core_relo_kind_str(relo->kind), in bpf_core_calc_relo()
5396 relo->kind, relo->insn_off / 8); in bpf_core_calc_relo()
5410 prog->name, relo_idx, insn_idx); in bpf_core_poison_insn()
5411 insn->code = BPF_JMP | BPF_CALL; in bpf_core_poison_insn()
5412 insn->dst_reg = 0; in bpf_core_poison_insn()
5413 insn->src_reg = 0; in bpf_core_poison_insn()
5414 insn->off = 0; in bpf_core_poison_insn()
5419 insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */ in bpf_core_poison_insn()
5424 return insn->code == (BPF_LD | BPF_IMM | BPF_DW); in is_ldimm64()
5429 switch (BPF_SIZE(insn->code)) { in insn_bpf_size_to_bytes()
5434 default: return -1; in insn_bpf_size_to_bytes()
5445 default: return -1; in insn_bytes_to_bpf_size()
5454 * Expected insn->imm value is determined using relocation kind and local
5455 * spec, and is checked before patching instruction. If actual insn->imm value
5461 * 3. rX = <imm64> (load with 64-bit immediate value);
5476 if (relo->insn_off % BPF_INSN_SZ) in bpf_core_patch_insn()
5477 return -EINVAL; in bpf_core_patch_insn()
5478 insn_idx = relo->insn_off / BPF_INSN_SZ; in bpf_core_patch_insn()
5480 * program's frame of reference; (sub-)program code is not yet in bpf_core_patch_insn()
5481 * relocated, so it's enough to just subtract in-section offset in bpf_core_patch_insn()
5483 insn_idx = insn_idx - prog->sec_insn_off; in bpf_core_patch_insn()
5484 insn = &prog->insns[insn_idx]; in bpf_core_patch_insn()
5485 class = BPF_CLASS(insn->code); in bpf_core_patch_insn()
5487 if (res->poison) { in bpf_core_patch_insn()
5498 orig_val = res->orig_val; in bpf_core_patch_insn()
5499 new_val = res->new_val; in bpf_core_patch_insn()
5504 if (BPF_SRC(insn->code) != BPF_K) in bpf_core_patch_insn()
5505 return -EINVAL; in bpf_core_patch_insn()
5506 if (res->validate && insn->imm != orig_val) { in bpf_core_patch_insn()
5507 pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n", in bpf_core_patch_insn()
5508 prog->name, relo_idx, in bpf_core_patch_insn()
5509 insn_idx, insn->imm, orig_val, new_val); in bpf_core_patch_insn()
5510 return -EINVAL; in bpf_core_patch_insn()
5512 orig_val = insn->imm; in bpf_core_patch_insn()
5513 insn->imm = new_val; in bpf_core_patch_insn()
5514 pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n", in bpf_core_patch_insn()
5515 prog->name, relo_idx, insn_idx, in bpf_core_patch_insn()
5521 if (res->validate && insn->off != orig_val) { in bpf_core_patch_insn()
5522 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n", in bpf_core_patch_insn()
5523 prog->name, relo_idx, insn_idx, insn->off, orig_val, new_val); in bpf_core_patch_insn()
5524 return -EINVAL; in bpf_core_patch_insn()
5528 prog->name, relo_idx, insn_idx, new_val); in bpf_core_patch_insn()
5529 return -ERANGE; in bpf_core_patch_insn()
5531 if (res->fail_memsz_adjust) { in bpf_core_patch_insn()
5534 prog->name, relo_idx, insn_idx); in bpf_core_patch_insn()
5538 orig_val = insn->off; in bpf_core_patch_insn()
5539 insn->off = new_val; in bpf_core_patch_insn()
5540 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n", in bpf_core_patch_insn()
5541 prog->name, relo_idx, insn_idx, orig_val, new_val); in bpf_core_patch_insn()
5543 if (res->new_sz != res->orig_sz) { in bpf_core_patch_insn()
5547 if (insn_bytes_sz != res->orig_sz) { in bpf_core_patch_insn()
5549 prog->name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz); in bpf_core_patch_insn()
5550 return -EINVAL; in bpf_core_patch_insn()
5553 insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz); in bpf_core_patch_insn()
5556 prog->name, relo_idx, insn_idx, res->new_sz); in bpf_core_patch_insn()
5557 return -EINVAL; in bpf_core_patch_insn()
5560 insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code); in bpf_core_patch_insn()
5561 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n", in bpf_core_patch_insn()
5562 prog->name, relo_idx, insn_idx, res->orig_sz, res->new_sz); in bpf_core_patch_insn()
5570 insn_idx + 1 >= prog->insns_cnt || in bpf_core_patch_insn()
5574 prog->name, relo_idx, insn_idx); in bpf_core_patch_insn()
5575 return -EINVAL; in bpf_core_patch_insn()
5579 if (res->validate && imm != orig_val) { in bpf_core_patch_insn()
5580 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n", in bpf_core_patch_insn()
5581 prog->name, relo_idx, in bpf_core_patch_insn()
5584 return -EINVAL; in bpf_core_patch_insn()
5588 insn[1].imm = 0; /* currently only 32-bit values are supported */ in bpf_core_patch_insn()
5589 pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n", in bpf_core_patch_insn()
5590 prog->name, relo_idx, insn_idx, in bpf_core_patch_insn()
5596 prog->name, relo_idx, insn_idx, insn->code, in bpf_core_patch_insn()
5597 insn->src_reg, insn->dst_reg, insn->off, insn->imm); in bpf_core_patch_insn()
5598 return -EINVAL; in bpf_core_patch_insn()
5605 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
5606 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
5616 type_id = spec->root_type_id; in bpf_core_dump_spec()
5617 t = btf__type_by_id(spec->btf, type_id); in bpf_core_dump_spec()
5618 s = btf__name_by_offset(spec->btf, t->name_off); in bpf_core_dump_spec()
5622 if (core_relo_is_type_based(spec->relo_kind)) in bpf_core_dump_spec()
5625 if (core_relo_is_enumval_based(spec->relo_kind)) { in bpf_core_dump_spec()
5626 t = skip_mods_and_typedefs(spec->btf, type_id, NULL); in bpf_core_dump_spec()
5627 e = btf_enum(t) + spec->raw_spec[0]; in bpf_core_dump_spec()
5628 s = btf__name_by_offset(spec->btf, e->name_off); in bpf_core_dump_spec()
5630 libbpf_print(level, "::%s = %u", s, e->val); in bpf_core_dump_spec()
5634 if (core_relo_is_field_based(spec->relo_kind)) { in bpf_core_dump_spec()
5635 for (i = 0; i < spec->len; i++) { in bpf_core_dump_spec()
5636 if (spec->spec[i].name) in bpf_core_dump_spec()
5637 libbpf_print(level, ".%s", spec->spec[i].name); in bpf_core_dump_spec()
5638 else if (i > 0 || spec->spec[i].idx > 0) in bpf_core_dump_spec()
5639 libbpf_print(level, "[%u]", spec->spec[i].idx); in bpf_core_dump_spec()
5643 for (i = 0; i < spec->raw_len; i++) in bpf_core_dump_spec()
5644 libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]); in bpf_core_dump_spec()
5646 if (spec->bit_offset % 8) in bpf_core_dump_spec()
5648 spec->bit_offset / 8, spec->bit_offset % 8); in bpf_core_dump_spec()
5650 libbpf_print(level, " @ offset %u)", spec->bit_offset / 8); in bpf_core_dump_spec()
5671 * CO-RE relocate single instruction.
5684 * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
5696 * high-level spec accessors, meaning that all named fields should match,
5702 * ambiguity, CO-RE relocation will fail. This is necessary to accomodate
5712 * efficient memory-wise and not significantly worse (if not better)
5713 * CPU-wise compared to prebuilding a map from all local type names to
5728 const void *type_key = u32_as_hash_key(relo->type_id); in bpf_core_apply_relo()
5737 local_id = relo->type_id; in bpf_core_apply_relo()
5740 return -EINVAL; in bpf_core_apply_relo()
5742 local_name = btf__name_by_offset(local_btf, local_type->name_off); in bpf_core_apply_relo()
5744 return -EINVAL; in bpf_core_apply_relo()
5746 spec_str = btf__name_by_offset(local_btf, relo->access_str_off); in bpf_core_apply_relo()
5748 return -EINVAL; in bpf_core_apply_relo()
5750 err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec); in bpf_core_apply_relo()
5753 prog->name, relo_idx, local_id, btf_kind_str(local_type), in bpf_core_apply_relo()
5756 return -EINVAL; in bpf_core_apply_relo()
5759 pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog->name, in bpf_core_apply_relo()
5760 relo_idx, core_relo_kind_str(relo->kind), relo->kind); in bpf_core_apply_relo()
5765 if (relo->kind == BPF_TYPE_ID_LOCAL) { in bpf_core_apply_relo()
5776 prog->name, relo_idx, core_relo_kind_str(relo->kind), relo->kind); in bpf_core_apply_relo()
5777 return -EOPNOTSUPP; in bpf_core_apply_relo()
5784 prog->name, relo_idx, local_id, btf_kind_str(local_type), in bpf_core_apply_relo()
5795 for (i = 0, j = 0; i < cand_ids->len; i++) { in bpf_core_apply_relo()
5796 cand_id = cand_ids->data[i]; in bpf_core_apply_relo()
5800 prog->name, relo_idx, i); in bpf_core_apply_relo()
5806 pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog->name, in bpf_core_apply_relo()
5807 relo_idx, err == 0 ? "non-matching" : "matching", i); in bpf_core_apply_relo()
5826 prog->name, relo_idx, cand_spec.bit_offset, in bpf_core_apply_relo()
5828 return -EINVAL; in bpf_core_apply_relo()
5835 prog->name, relo_idx, in bpf_core_apply_relo()
5838 return -EINVAL; in bpf_core_apply_relo()
5841 cand_ids->data[j++] = cand_spec.root_type_id; in bpf_core_apply_relo()
5853 cand_ids->len = j; in bpf_core_apply_relo()
5868 prog->name, relo_idx); in bpf_core_apply_relo()
5881 prog->name, relo_idx, relo->insn_off, err); in bpf_core_apply_relo()
5882 return -EINVAL; in bpf_core_apply_relo()
5901 if (obj->btf_ext->core_relo_info.len == 0) in bpf_object__relocate_core()
5907 targ_btf = obj->btf_vmlinux; in bpf_object__relocate_core()
5919 seg = &obj->btf_ext->core_relo_info; in bpf_object__relocate_core()
5921 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); in bpf_object__relocate_core()
5923 err = -EINVAL; in bpf_object__relocate_core()
5929 * prog->sec_idx to do a proper search by section index and in bpf_object__relocate_core()
5933 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__relocate_core()
5934 prog = &obj->programs[i]; in bpf_object__relocate_core()
5935 if (strcmp(prog->sec_name, sec_name) == 0) in bpf_object__relocate_core()
5940 return -ENOENT; in bpf_object__relocate_core()
5942 sec_idx = prog->sec_idx; in bpf_object__relocate_core()
5944 pr_debug("sec '%s': found %d CO-RE relocations\n", in bpf_object__relocate_core()
5945 sec_name, sec->num_info); in bpf_object__relocate_core()
5948 insn_idx = rec->insn_off / BPF_INSN_SZ; in bpf_object__relocate_core()
5951 pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n", in bpf_object__relocate_core()
5953 err = -EINVAL; in bpf_object__relocate_core()
5956 /* no need to apply CO-RE relocation if the program is in bpf_object__relocate_core()
5959 if (!prog->load) in bpf_object__relocate_core()
5962 err = bpf_core_apply_relo(prog, rec, i, obj->btf, in bpf_object__relocate_core()
5966 prog->name, i, err); in bpf_object__relocate_core()
5973 /* obj->btf_vmlinux is freed at the end of object load phase */ in bpf_object__relocate_core()
5974 if (targ_btf != obj->btf_vmlinux) in bpf_object__relocate_core()
5978 bpf_core_free_cands(entry->value); in bpf_object__relocate_core()
5986 * - map references;
5987 * - global variable references;
5988 * - extern references.
5995 for (i = 0; i < prog->nr_reloc; i++) { in bpf_object__relocate_data()
5996 struct reloc_desc *relo = &prog->reloc_desc[i]; in bpf_object__relocate_data()
5997 struct bpf_insn *insn = &prog->insns[relo->insn_idx]; in bpf_object__relocate_data()
6000 switch (relo->type) { in bpf_object__relocate_data()
6003 insn[0].imm = obj->maps[relo->map_idx].fd; in bpf_object__relocate_data()
6004 relo->processed = true; in bpf_object__relocate_data()
6008 insn[1].imm = insn[0].imm + relo->sym_off; in bpf_object__relocate_data()
6009 insn[0].imm = obj->maps[relo->map_idx].fd; in bpf_object__relocate_data()
6010 relo->processed = true; in bpf_object__relocate_data()
6013 ext = &obj->externs[relo->sym_off]; in bpf_object__relocate_data()
6014 if (ext->type == EXT_KCFG) { in bpf_object__relocate_data()
6016 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd; in bpf_object__relocate_data()
6017 insn[1].imm = ext->kcfg.data_off; in bpf_object__relocate_data()
6019 if (ext->ksym.type_id) { /* typed ksyms */ in bpf_object__relocate_data()
6021 insn[0].imm = ext->ksym.vmlinux_btf_id; in bpf_object__relocate_data()
6023 insn[0].imm = (__u32)ext->ksym.addr; in bpf_object__relocate_data()
6024 insn[1].imm = ext->ksym.addr >> 32; in bpf_object__relocate_data()
6027 relo->processed = true; in bpf_object__relocate_data()
6034 prog->name, i, relo->type); in bpf_object__relocate_data()
6035 return -EINVAL; in bpf_object__relocate_data()
6056 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); in adjust_prog_btf_ext_info()
6058 return -EINVAL; in adjust_prog_btf_ext_info()
6059 if (strcmp(sec_name, prog->sec_name) != 0) in adjust_prog_btf_ext_info()
6065 if (insn_off < prog->sec_insn_off) in adjust_prog_btf_ext_info()
6067 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt) in adjust_prog_btf_ext_info()
6072 copy_end = rec + ext_info->rec_size; in adjust_prog_btf_ext_info()
6076 return -ENOENT; in adjust_prog_btf_ext_info()
6078 /* append func/line info of a given (sub-)program to the main in adjust_prog_btf_ext_info()
6081 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size; in adjust_prog_btf_ext_info()
6082 new_sz = old_sz + (copy_end - copy_start); in adjust_prog_btf_ext_info()
6085 return -ENOMEM; in adjust_prog_btf_ext_info()
6087 *prog_rec_cnt = new_sz / ext_info->rec_size; in adjust_prog_btf_ext_info()
6088 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start); in adjust_prog_btf_ext_info()
6090 /* Kernel instruction offsets are in units of 8-byte in adjust_prog_btf_ext_info()
6096 off_adj = prog->sub_insn_off - prog->sec_insn_off; in adjust_prog_btf_ext_info()
6099 for (; rec < rec_end; rec += ext_info->rec_size) { in adjust_prog_btf_ext_info()
6104 *prog_rec_sz = ext_info->rec_size; in adjust_prog_btf_ext_info()
6108 return -ENOENT; in adjust_prog_btf_ext_info()
6121 if (!obj->btf_ext || !kernel_supports(FEAT_BTF_FUNC)) in reloc_prog_func_and_line_info()
6127 if (main_prog != prog && !main_prog->func_info) in reloc_prog_func_and_line_info()
6130 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info, in reloc_prog_func_and_line_info()
6131 &main_prog->func_info, in reloc_prog_func_and_line_info()
6132 &main_prog->func_info_cnt, in reloc_prog_func_and_line_info()
6133 &main_prog->func_info_rec_size); in reloc_prog_func_and_line_info()
6135 if (err != -ENOENT) { in reloc_prog_func_and_line_info()
6136 pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n", in reloc_prog_func_and_line_info()
6137 prog->name, err); in reloc_prog_func_and_line_info()
6140 if (main_prog->func_info) { in reloc_prog_func_and_line_info()
6145 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name); in reloc_prog_func_and_line_info()
6149 …pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext … in reloc_prog_func_and_line_info()
6150 prog->name); in reloc_prog_func_and_line_info()
6155 if (main_prog != prog && !main_prog->line_info) in reloc_prog_func_and_line_info()
6158 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info, in reloc_prog_func_and_line_info()
6159 &main_prog->line_info, in reloc_prog_func_and_line_info()
6160 &main_prog->line_info_cnt, in reloc_prog_func_and_line_info()
6161 &main_prog->line_info_rec_size); in reloc_prog_func_and_line_info()
6163 if (err != -ENOENT) { in reloc_prog_func_and_line_info()
6165 prog->name, err); in reloc_prog_func_and_line_info()
6168 if (main_prog->line_info) { in reloc_prog_func_and_line_info()
6173 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name); in reloc_prog_func_and_line_info()
6178 prog->name); in reloc_prog_func_and_line_info()
6188 if (insn_idx == relo->insn_idx) in cmp_relo_by_insn_idx()
6190 return insn_idx < relo->insn_idx ? -1 : 1; in cmp_relo_by_insn_idx()
6195 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc, in find_prog_insn_relo()
6196 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx); in find_prog_insn_relo()
6213 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) { in bpf_object__reloc_code()
6214 insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; in bpf_object__reloc_code()
6219 if (relo && relo->type != RELO_CALL) { in bpf_object__reloc_code()
6221 prog->name, insn_idx, relo->type); in bpf_object__reloc_code()
6222 return -LIBBPF_ERRNO__RELOC; in bpf_object__reloc_code()
6225 /* sub-program instruction index is a combination of in bpf_object__reloc_code()
6228 * call always has imm = -1, but for static functions in bpf_object__reloc_code()
6229 * relocation is against STT_SECTION and insn->imm in bpf_object__reloc_code()
6230 * points to a start of a static function in bpf_object__reloc_code()
6232 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1; in bpf_object__reloc_code()
6234 /* if subprogram call is to a static function within in bpf_object__reloc_code()
6237 * offset necessary, insns->imm is relative to in bpf_object__reloc_code()
6240 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1; in bpf_object__reloc_code()
6243 /* we enforce that sub-programs should be in .text section */ in bpf_object__reloc_code()
6244 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx); in bpf_object__reloc_code()
6246 pr_warn("prog '%s': no .text section found yet sub-program call exists\n", in bpf_object__reloc_code()
6247 prog->name); in bpf_object__reloc_code()
6248 return -LIBBPF_ERRNO__RELOC; in bpf_object__reloc_code()
6254 * - append it at the end of main program's instructions blog; in bpf_object__reloc_code()
6255 * - process is recursively, while current program is put on hold; in bpf_object__reloc_code()
6256 * - if that subprogram calls some other not yet processes in bpf_object__reloc_code()
6261 if (subprog->sub_insn_off == 0) { in bpf_object__reloc_code()
6262 subprog->sub_insn_off = main_prog->insns_cnt; in bpf_object__reloc_code()
6264 new_cnt = main_prog->insns_cnt + subprog->insns_cnt; in bpf_object__reloc_code()
6265 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns)); in bpf_object__reloc_code()
6267 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name); in bpf_object__reloc_code()
6268 return -ENOMEM; in bpf_object__reloc_code()
6270 main_prog->insns = insns; in bpf_object__reloc_code()
6271 main_prog->insns_cnt = new_cnt; in bpf_object__reloc_code()
6273 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns, in bpf_object__reloc_code()
6274 subprog->insns_cnt * sizeof(*insns)); in bpf_object__reloc_code()
6276 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n", in bpf_object__reloc_code()
6277 main_prog->name, subprog->insns_cnt, subprog->name); in bpf_object__reloc_code()
6284 /* main_prog->insns memory could have been re-allocated, so in bpf_object__reloc_code()
6287 insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; in bpf_object__reloc_code()
6293 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1; in bpf_object__reloc_code()
6296 relo->processed = true; in bpf_object__reloc_code()
6299 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off); in bpf_object__reloc_code()
6306 * Relocate sub-program calls.
6308 * Algorithm operates as follows. Each entry-point BPF program (referred to as
6309 * main prog) is processed separately. For each subprog (non-entry functions,
6318 * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
6334 * subprog->sub_insn_off as zero at all times and won't be appended to current
6343 * +--------+ +-------+
6345 * +--+---+ +--+-+-+ +---+--+
6347 * +--+---+ +------+ +---+--+
6350 * +---+-------+ +------+----+
6352 * +-----------+ +-----------+
6357 * +-----------+------+
6359 * +-----------+------+
6364 * +-----------+------+------+
6366 * +-----------+------+------+
6375 * +-----------+------+
6377 * +-----------+------+
6380 * +-----------+------+------+
6382 * +-----------+------+------+
6395 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__relocate_calls()
6396 subprog = &obj->programs[i]; in bpf_object__relocate_calls()
6400 subprog->sub_insn_off = 0; in bpf_object__relocate_calls()
6401 for (j = 0; j < subprog->nr_reloc; j++) in bpf_object__relocate_calls()
6402 if (subprog->reloc_desc[j].type == RELO_CALL) in bpf_object__relocate_calls()
6403 subprog->reloc_desc[j].processed = false; in bpf_object__relocate_calls()
6421 if (obj->btf_ext) { in bpf_object__relocate()
6424 pr_warn("failed to perform CO-RE relocations: %d\n", in bpf_object__relocate()
6429 /* relocate data references first for all programs and sub-programs, in bpf_object__relocate()
6431 * subprogram processing won't need to re-calculate any of them in bpf_object__relocate()
6433 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__relocate()
6434 prog = &obj->programs[i]; in bpf_object__relocate()
6438 prog->name, err); in bpf_object__relocate()
6447 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__relocate()
6448 prog = &obj->programs[i]; in bpf_object__relocate()
6449 /* sub-program's sub-calls are relocated within the context of in bpf_object__relocate()
6458 prog->name, err); in bpf_object__relocate()
6463 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__relocate()
6464 prog = &obj->programs[i]; in bpf_object__relocate()
6465 zfree(&prog->reloc_desc); in bpf_object__relocate()
6466 prog->nr_reloc = 0; in bpf_object__relocate()
6490 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf) in bpf_object__collect_map_relos()
6491 return -EINVAL; in bpf_object__collect_map_relos()
6492 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id); in bpf_object__collect_map_relos()
6494 return -EINVAL; in bpf_object__collect_map_relos()
6496 symbols = obj->efile.symbols; in bpf_object__collect_map_relos()
6497 nrels = shdr->sh_size / shdr->sh_entsize; in bpf_object__collect_map_relos()
6501 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_map_relos()
6506 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_map_relos()
6509 if (sym.st_shndx != obj->efile.btf_maps_shndx) { in bpf_object__collect_map_relos()
6510 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n", in bpf_object__collect_map_relos()
6512 return -LIBBPF_ERRNO__RELOC; in bpf_object__collect_map_relos()
6519 for (j = 0; j < obj->nr_maps; j++) { in bpf_object__collect_map_relos()
6520 map = &obj->maps[j]; in bpf_object__collect_map_relos()
6521 if (map->sec_idx != obj->efile.btf_maps_shndx) in bpf_object__collect_map_relos()
6524 vi = btf_var_secinfos(sec) + map->btf_var_idx; in bpf_object__collect_map_relos()
6525 if (vi->offset <= rel.r_offset && in bpf_object__collect_map_relos()
6526 rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size) in bpf_object__collect_map_relos()
6529 if (j == obj->nr_maps) { in bpf_object__collect_map_relos()
6532 return -EINVAL; in bpf_object__collect_map_relos()
6535 if (!bpf_map_type__is_map_in_map(map->def.type)) in bpf_object__collect_map_relos()
6536 return -EINVAL; in bpf_object__collect_map_relos()
6537 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS && in bpf_object__collect_map_relos()
6538 map->def.key_size != sizeof(int)) { in bpf_object__collect_map_relos()
6539 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n", in bpf_object__collect_map_relos()
6540 i, map->name, sizeof(int)); in bpf_object__collect_map_relos()
6541 return -EINVAL; in bpf_object__collect_map_relos()
6546 return -ESRCH; in bpf_object__collect_map_relos()
6548 var = btf__type_by_id(obj->btf, vi->type); in bpf_object__collect_map_relos()
6549 def = skip_mods_and_typedefs(obj->btf, var->type, NULL); in bpf_object__collect_map_relos()
6551 return -EINVAL; in bpf_object__collect_map_relos()
6552 member = btf_members(def) + btf_vlen(def) - 1; in bpf_object__collect_map_relos()
6553 mname = btf__name_by_offset(obj->btf, member->name_off); in bpf_object__collect_map_relos()
6555 return -EINVAL; in bpf_object__collect_map_relos()
6557 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8; in bpf_object__collect_map_relos()
6558 if (rel.r_offset - vi->offset < moff) in bpf_object__collect_map_relos()
6559 return -EINVAL; in bpf_object__collect_map_relos()
6561 moff = rel.r_offset - vi->offset - moff; in bpf_object__collect_map_relos()
6566 return -EINVAL; in bpf_object__collect_map_relos()
6568 if (moff >= map->init_slots_sz) { in bpf_object__collect_map_relos()
6570 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz); in bpf_object__collect_map_relos()
6572 return -ENOMEM; in bpf_object__collect_map_relos()
6573 map->init_slots = tmp; in bpf_object__collect_map_relos()
6574 memset(map->init_slots + map->init_slots_sz, 0, in bpf_object__collect_map_relos()
6575 (new_sz - map->init_slots_sz) * host_ptr_sz); in bpf_object__collect_map_relos()
6576 map->init_slots_sz = new_sz; in bpf_object__collect_map_relos()
6578 map->init_slots[moff] = targ_map; in bpf_object__collect_map_relos()
6581 i, map->name, moff, name); in bpf_object__collect_map_relos()
6592 if (a->insn_idx != b->insn_idx) in cmp_relocs()
6593 return a->insn_idx < b->insn_idx ? -1 : 1; in cmp_relocs()
6596 if (a->type != b->type) in cmp_relocs()
6597 return a->type < b->type ? -1 : 1; in cmp_relocs()
6606 for (i = 0; i < obj->efile.nr_reloc_sects; i++) { in bpf_object__collect_relos()
6607 GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr; in bpf_object__collect_relos()
6608 Elf_Data *data = obj->efile.reloc_sects[i].data; in bpf_object__collect_relos()
6609 int idx = shdr->sh_info; in bpf_object__collect_relos()
6611 if (shdr->sh_type != SHT_REL) { in bpf_object__collect_relos()
6613 return -LIBBPF_ERRNO__INTERNAL; in bpf_object__collect_relos()
6616 if (idx == obj->efile.st_ops_shndx) in bpf_object__collect_relos()
6618 else if (idx == obj->efile.btf_maps_shndx) in bpf_object__collect_relos()
6626 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__collect_relos()
6627 struct bpf_program *p = &obj->programs[i]; in bpf_object__collect_relos()
6629 if (!p->nr_reloc) in bpf_object__collect_relos()
6632 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs); in bpf_object__collect_relos()
6639 if (BPF_CLASS(insn->code) == BPF_JMP && in insn_is_helper_call()
6640 BPF_OP(insn->code) == BPF_CALL && in insn_is_helper_call()
6641 BPF_SRC(insn->code) == BPF_K && in insn_is_helper_call()
6642 insn->src_reg == 0 && in insn_is_helper_call()
6643 insn->dst_reg == 0) { in insn_is_helper_call()
6644 *func_id = insn->imm; in insn_is_helper_call()
6652 struct bpf_insn *insn = prog->insns; in bpf_object__sanitize_prog()
6656 for (i = 0; i < prog->insns_cnt; i++, insn++) { in bpf_object__sanitize_prog()
6668 insn->imm = BPF_FUNC_probe_read; in bpf_object__sanitize_prog()
6673 insn->imm = BPF_FUNC_probe_read_str; in bpf_object__sanitize_prog()
6693 return -EINVAL; in load_program()
6696 load_attr.prog_type = prog->type; in load_program()
6698 if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def && in load_program()
6699 prog->sec_def->is_exp_attach_type_optional) in load_program()
6702 load_attr.expected_attach_type = prog->expected_attach_type; in load_program()
6704 load_attr.name = prog->name; in load_program()
6708 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || in load_program()
6709 prog->type == BPF_PROG_TYPE_LSM) { in load_program()
6710 load_attr.attach_btf_id = prog->attach_btf_id; in load_program()
6711 } else if (prog->type == BPF_PROG_TYPE_TRACING || in load_program()
6712 prog->type == BPF_PROG_TYPE_EXT) { in load_program()
6713 load_attr.attach_prog_fd = prog->attach_prog_fd; in load_program()
6714 load_attr.attach_btf_id = prog->attach_btf_id; in load_program()
6717 load_attr.prog_ifindex = prog->prog_ifindex; in load_program()
6720 btf_fd = bpf_object__btf_fd(prog->obj); in load_program()
6723 load_attr.func_info = prog->func_info; in load_program()
6724 load_attr.func_info_rec_size = prog->func_info_rec_size; in load_program()
6725 load_attr.func_info_cnt = prog->func_info_cnt; in load_program()
6726 load_attr.line_info = prog->line_info; in load_program()
6727 load_attr.line_info_rec_size = prog->line_info_rec_size; in load_program()
6728 load_attr.line_info_cnt = prog->line_info_cnt; in load_program()
6730 load_attr.log_level = prog->log_level; in load_program()
6731 load_attr.prog_flags = prog->prog_flags; in load_program()
6737 return -ENOMEM; in load_program()
6748 if (prog->obj->rodata_map_idx >= 0 && in load_program()
6751 &prog->obj->maps[prog->obj->rodata_map_idx]; in load_program()
6756 prog->name, cp); in load_program()
6773 ret = errno ? -errno : -LIBBPF_ERRNO__LOAD; in load_program()
6779 ret = -LIBBPF_ERRNO__VERIFY; in load_program()
6780 pr_warn("-- BEGIN DUMP LOG ---\n"); in load_program()
6782 pr_warn("-- END LOG --\n"); in load_program()
6786 ret = -LIBBPF_ERRNO__PROG2BIG; in load_program()
6796 ret = -LIBBPF_ERRNO__PROGTYPE; in load_program()
6812 if (prog->obj->loaded) { in bpf_program__load()
6813 pr_warn("prog '%s': can't load after object was loaded\n", prog->name); in bpf_program__load()
6814 return -EINVAL; in bpf_program__load()
6817 if ((prog->type == BPF_PROG_TYPE_TRACING || in bpf_program__load()
6818 prog->type == BPF_PROG_TYPE_LSM || in bpf_program__load()
6819 prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) { in bpf_program__load()
6823 prog->attach_btf_id = btf_id; in bpf_program__load()
6826 if (prog->instances.nr < 0 || !prog->instances.fds) { in bpf_program__load()
6827 if (prog->preprocessor) { in bpf_program__load()
6829 prog->name); in bpf_program__load()
6830 return -LIBBPF_ERRNO__INTERNAL; in bpf_program__load()
6833 prog->instances.fds = malloc(sizeof(int)); in bpf_program__load()
6834 if (!prog->instances.fds) { in bpf_program__load()
6836 return -ENOMEM; in bpf_program__load()
6838 prog->instances.nr = 1; in bpf_program__load()
6839 prog->instances.fds[0] = -1; in bpf_program__load()
6842 if (!prog->preprocessor) { in bpf_program__load()
6843 if (prog->instances.nr != 1) { in bpf_program__load()
6845 prog->name, prog->instances.nr); in bpf_program__load()
6847 err = load_program(prog, prog->insns, prog->insns_cnt, in bpf_program__load()
6850 prog->instances.fds[0] = fd; in bpf_program__load()
6854 for (i = 0; i < prog->instances.nr; i++) { in bpf_program__load()
6856 bpf_program_prep_t preprocessor = prog->preprocessor; in bpf_program__load()
6859 err = preprocessor(prog, i, prog->insns, in bpf_program__load()
6860 prog->insns_cnt, &result); in bpf_program__load()
6863 i, prog->name); in bpf_program__load()
6869 i, prog->name); in bpf_program__load()
6870 prog->instances.fds[i] = -1; in bpf_program__load()
6872 *result.pfd = -1; in bpf_program__load()
6880 i, prog->name); in bpf_program__load()
6886 prog->instances.fds[i] = fd; in bpf_program__load()
6890 pr_warn("failed to load program '%s'\n", prog->name); in bpf_program__load()
6891 zfree(&prog->insns); in bpf_program__load()
6892 prog->insns_cnt = 0; in bpf_program__load()
6903 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__load_progs()
6904 prog = &obj->programs[i]; in bpf_object__load_progs()
6910 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__load_progs()
6911 prog = &obj->programs[i]; in bpf_object__load_progs()
6914 if (!prog->load) { in bpf_object__load_progs()
6915 pr_debug("prog '%s': skipped loading\n", prog->name); in bpf_object__load_progs()
6918 prog->log_level |= log_level; in bpf_object__load_progs()
6919 err = bpf_program__load(prog, obj->license, obj->kern_version); in bpf_object__load_progs()
6941 return ERR_PTR(-LIBBPF_ERRNO__LIBELF); in __bpf_object__open()
6945 return ERR_PTR(-EINVAL); in __bpf_object__open()
6950 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", in __bpf_object__open()
6965 obj->kconfig = strdup(kconfig); in __bpf_object__open()
6966 if (!obj->kconfig) { in __bpf_object__open()
6967 err = -ENOMEM; in __bpf_object__open()
6984 prog->sec_def = find_sec_def(prog->sec_name); in __bpf_object__open()
6985 if (!prog->sec_def) in __bpf_object__open()
6989 if (prog->sec_def->is_sleepable) in __bpf_object__open()
6990 prog->prog_flags |= BPF_F_SLEEPABLE; in __bpf_object__open()
6991 bpf_program__set_type(prog, prog->sec_def->prog_type); in __bpf_object__open()
6993 prog->sec_def->expected_attach_type); in __bpf_object__open()
6995 if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING || in __bpf_object__open()
6996 prog->sec_def->prog_type == BPF_PROG_TYPE_EXT) in __bpf_object__open()
6997 prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); in __bpf_object__open()
7014 if (!attr->file) in __bpf_object__open_xattr()
7017 pr_debug("loading %s\n", attr->file); in __bpf_object__open_xattr()
7018 return __bpf_object__open(attr->file, NULL, 0, &opts); in __bpf_object__open_xattr()
7040 return ERR_PTR(-EINVAL); in bpf_object__open_file()
7052 return ERR_PTR(-EINVAL); in bpf_object__open_mem()
7063 /* wrong default, but backwards-compatible */ in bpf_object__open_buffer()
7067 /* returning NULL is wrong, but backwards-compatible */ in bpf_object__open_buffer()
7079 return -EINVAL; in bpf_object__unload()
7081 for (i = 0; i < obj->nr_maps; i++) { in bpf_object__unload()
7082 zclose(obj->maps[i].fd); in bpf_object__unload()
7083 if (obj->maps[i].st_ops) in bpf_object__unload()
7084 zfree(&obj->maps[i].st_ops->kern_vdata); in bpf_object__unload()
7087 for (i = 0; i < obj->nr_programs; i++) in bpf_object__unload()
7088 bpf_program__unload(&obj->programs[i]); in bpf_object__unload()
7102 return -ENOTSUP; in bpf_object__sanitize_maps()
7105 m->def.map_flags ^= BPF_F_MMAPABLE; in bpf_object__sanitize_maps()
7121 err = -errno; in bpf_object__read_kallsyms_file()
7133 err = -EINVAL; in bpf_object__read_kallsyms_file()
7138 if (!ext || ext->type != EXT_KSYM) in bpf_object__read_kallsyms_file()
7141 if (ext->is_set && ext->ksym.addr != sym_addr) { in bpf_object__read_kallsyms_file()
7143 sym_name, ext->ksym.addr, sym_addr); in bpf_object__read_kallsyms_file()
7144 err = -EINVAL; in bpf_object__read_kallsyms_file()
7147 if (!ext->is_set) { in bpf_object__read_kallsyms_file()
7148 ext->is_set = true; in bpf_object__read_kallsyms_file()
7149 ext->ksym.addr = sym_addr; in bpf_object__read_kallsyms_file()
7164 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__resolve_ksyms_btf_id()
7170 ext = &obj->externs[i]; in bpf_object__resolve_ksyms_btf_id()
7171 if (ext->type != EXT_KSYM || !ext->ksym.type_id) in bpf_object__resolve_ksyms_btf_id()
7174 id = btf__find_by_name_kind(obj->btf_vmlinux, ext->name, in bpf_object__resolve_ksyms_btf_id()
7178 ext->name); in bpf_object__resolve_ksyms_btf_id()
7179 return -ESRCH; in bpf_object__resolve_ksyms_btf_id()
7183 local_type_id = ext->ksym.type_id; in bpf_object__resolve_ksyms_btf_id()
7186 targ_var = btf__type_by_id(obj->btf_vmlinux, id); in bpf_object__resolve_ksyms_btf_id()
7187 targ_var_name = btf__name_by_offset(obj->btf_vmlinux, in bpf_object__resolve_ksyms_btf_id()
7188 targ_var->name_off); in bpf_object__resolve_ksyms_btf_id()
7189 targ_type = skip_mods_and_typedefs(obj->btf_vmlinux, in bpf_object__resolve_ksyms_btf_id()
7190 targ_var->type, in bpf_object__resolve_ksyms_btf_id()
7193 ret = bpf_core_types_are_compat(obj->btf, local_type_id, in bpf_object__resolve_ksyms_btf_id()
7194 obj->btf_vmlinux, targ_type_id); in bpf_object__resolve_ksyms_btf_id()
7199 local_type = btf__type_by_id(obj->btf, local_type_id); in bpf_object__resolve_ksyms_btf_id()
7200 local_name = btf__name_by_offset(obj->btf, in bpf_object__resolve_ksyms_btf_id()
7201 local_type->name_off); in bpf_object__resolve_ksyms_btf_id()
7202 targ_name = btf__name_by_offset(obj->btf_vmlinux, in bpf_object__resolve_ksyms_btf_id()
7203 targ_type->name_off); in bpf_object__resolve_ksyms_btf_id()
7206 ext->name, local_type_id, in bpf_object__resolve_ksyms_btf_id()
7209 return -EINVAL; in bpf_object__resolve_ksyms_btf_id()
7212 ext->is_set = true; in bpf_object__resolve_ksyms_btf_id()
7213 ext->ksym.vmlinux_btf_id = id; in bpf_object__resolve_ksyms_btf_id()
7215 ext->name, id, btf_kind_str(targ_var), targ_var_name); in bpf_object__resolve_ksyms_btf_id()
7229 if (obj->nr_extern == 0) in bpf_object__resolve_externs()
7232 if (obj->kconfig_map_idx >= 0) in bpf_object__resolve_externs()
7233 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped; in bpf_object__resolve_externs()
7235 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__resolve_externs()
7236 ext = &obj->externs[i]; in bpf_object__resolve_externs()
7238 if (ext->type == EXT_KCFG && in bpf_object__resolve_externs()
7239 strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) { in bpf_object__resolve_externs()
7240 void *ext_val = kcfg_data + ext->kcfg.data_off; in bpf_object__resolve_externs()
7245 return -EINVAL; in bpf_object__resolve_externs()
7250 pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver); in bpf_object__resolve_externs()
7251 } else if (ext->type == EXT_KCFG && in bpf_object__resolve_externs()
7252 strncmp(ext->name, "CONFIG_", 7) == 0) { in bpf_object__resolve_externs()
7254 } else if (ext->type == EXT_KSYM) { in bpf_object__resolve_externs()
7255 if (ext->ksym.type_id) in bpf_object__resolve_externs()
7260 pr_warn("unrecognized extern '%s'\n", ext->name); in bpf_object__resolve_externs()
7261 return -EINVAL; in bpf_object__resolve_externs()
7267 return -EINVAL; in bpf_object__resolve_externs()
7269 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__resolve_externs()
7270 ext = &obj->externs[i]; in bpf_object__resolve_externs()
7271 if (ext->type == EXT_KCFG && !ext->is_set) { in bpf_object__resolve_externs()
7280 return -EINVAL; in bpf_object__resolve_externs()
7285 return -EINVAL; in bpf_object__resolve_externs()
7290 return -EINVAL; in bpf_object__resolve_externs()
7292 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__resolve_externs()
7293 ext = &obj->externs[i]; in bpf_object__resolve_externs()
7295 if (!ext->is_set && !ext->is_weak) { in bpf_object__resolve_externs()
7296 pr_warn("extern %s (strong) not resolved\n", ext->name); in bpf_object__resolve_externs()
7297 return -ESRCH; in bpf_object__resolve_externs()
7298 } else if (!ext->is_set) { in bpf_object__resolve_externs()
7300 ext->name); in bpf_object__resolve_externs()
7313 return -EINVAL; in bpf_object__load_xattr()
7314 obj = attr->obj; in bpf_object__load_xattr()
7316 return -EINVAL; in bpf_object__load_xattr()
7318 if (obj->loaded) { in bpf_object__load_xattr()
7319 pr_warn("object '%s': load can't be attempted twice\n", obj->name); in bpf_object__load_xattr()
7320 return -EINVAL; in bpf_object__load_xattr()
7325 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); in bpf_object__load_xattr()
7330 err = err ? : bpf_object__relocate(obj, attr->target_btf_path); in bpf_object__load_xattr()
7331 err = err ? : bpf_object__load_progs(obj, attr->log_level); in bpf_object__load_xattr()
7333 btf__free(obj->btf_vmlinux); in bpf_object__load_xattr()
7334 obj->btf_vmlinux = NULL; in bpf_object__load_xattr()
7336 obj->loaded = true; /* doesn't matter if successfully or not */ in bpf_object__load_xattr()
7343 /* unpin any maps that were auto-pinned during load */ in bpf_object__load_xattr()
7344 for (i = 0; i < obj->nr_maps; i++) in bpf_object__load_xattr()
7345 if (obj->maps[i].pinned && !obj->maps[i].reused) in bpf_object__load_xattr()
7346 bpf_map__unpin(&obj->maps[i], NULL); in bpf_object__load_xattr()
7349 pr_warn("failed to load object '%s'\n", obj->path); in bpf_object__load_xattr()
7370 return -ENOMEM; in make_parent_dir()
7374 err = -errno; in make_parent_dir()
7378 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); in make_parent_dir()
7392 return -EINVAL; in check_path()
7396 return -ENOMEM; in check_path()
7402 err = -errno; in check_path()
7408 err = -EINVAL; in check_path()
7430 return -EINVAL; in bpf_program__pin_instance()
7433 if (instance < 0 || instance >= prog->instances.nr) { in bpf_program__pin_instance()
7435 instance, prog->name, prog->instances.nr); in bpf_program__pin_instance()
7436 return -EINVAL; in bpf_program__pin_instance()
7439 if (bpf_obj_pin(prog->instances.fds[instance], path)) { in bpf_program__pin_instance()
7440 err = -errno; in bpf_program__pin_instance()
7461 return -EINVAL; in bpf_program__unpin_instance()
7464 if (instance < 0 || instance >= prog->instances.nr) { in bpf_program__unpin_instance()
7466 instance, prog->name, prog->instances.nr); in bpf_program__unpin_instance()
7467 return -EINVAL; in bpf_program__unpin_instance()
7472 return -errno; in bpf_program__unpin_instance()
7492 return -EINVAL; in bpf_program__pin()
7495 if (prog->instances.nr <= 0) { in bpf_program__pin()
7496 pr_warn("no instances of prog %s to pin\n", prog->name); in bpf_program__pin()
7497 return -EINVAL; in bpf_program__pin()
7500 if (prog->instances.nr == 1) { in bpf_program__pin()
7505 for (i = 0; i < prog->instances.nr; i++) { in bpf_program__pin()
7511 err = -EINVAL; in bpf_program__pin()
7514 err = -ENAMETOOLONG; in bpf_program__pin()
7526 for (i = i - 1; i >= 0; i--) { in bpf_program__pin()
7554 return -EINVAL; in bpf_program__unpin()
7557 if (prog->instances.nr <= 0) { in bpf_program__unpin()
7558 pr_warn("no instances of prog %s to pin\n", prog->name); in bpf_program__unpin()
7559 return -EINVAL; in bpf_program__unpin()
7562 if (prog->instances.nr == 1) { in bpf_program__unpin()
7567 for (i = 0; i < prog->instances.nr; i++) { in bpf_program__unpin()
7573 return -EINVAL; in bpf_program__unpin()
7575 return -ENAMETOOLONG; in bpf_program__unpin()
7584 return -errno; in bpf_program__unpin()
7596 return -EINVAL; in bpf_map__pin()
7599 if (map->pin_path) { in bpf_map__pin()
7600 if (path && strcmp(path, map->pin_path)) { in bpf_map__pin()
7602 bpf_map__name(map), map->pin_path, path); in bpf_map__pin()
7603 return -EINVAL; in bpf_map__pin()
7604 } else if (map->pinned) { in bpf_map__pin()
7605 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n", in bpf_map__pin()
7606 bpf_map__name(map), map->pin_path); in bpf_map__pin()
7613 return -EINVAL; in bpf_map__pin()
7614 } else if (map->pinned) { in bpf_map__pin()
7616 return -EEXIST; in bpf_map__pin()
7619 map->pin_path = strdup(path); in bpf_map__pin()
7620 if (!map->pin_path) { in bpf_map__pin()
7621 err = -errno; in bpf_map__pin()
7626 err = make_parent_dir(map->pin_path); in bpf_map__pin()
7630 err = check_path(map->pin_path); in bpf_map__pin()
7634 if (bpf_obj_pin(map->fd, map->pin_path)) { in bpf_map__pin()
7635 err = -errno; in bpf_map__pin()
7639 map->pinned = true; in bpf_map__pin()
7640 pr_debug("pinned map '%s'\n", map->pin_path); in bpf_map__pin()
7645 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); in bpf_map__pin()
7656 return -EINVAL; in bpf_map__unpin()
7659 if (map->pin_path) { in bpf_map__unpin()
7660 if (path && strcmp(path, map->pin_path)) { in bpf_map__unpin()
7662 bpf_map__name(map), map->pin_path, path); in bpf_map__unpin()
7663 return -EINVAL; in bpf_map__unpin()
7665 path = map->pin_path; in bpf_map__unpin()
7669 return -EINVAL; in bpf_map__unpin()
7678 return -errno; in bpf_map__unpin()
7680 map->pinned = false; in bpf_map__unpin()
7693 return -errno; in bpf_map__set_pin_path()
7696 free(map->pin_path); in bpf_map__set_pin_path()
7697 map->pin_path = new; in bpf_map__set_pin_path()
7703 return map->pin_path; in bpf_map__get_pin_path()
7708 return map->pinned; in bpf_map__is_pinned()
7727 return -ENOENT; in bpf_object__pin_maps()
7729 if (!obj->loaded) { in bpf_object__pin_maps()
7731 return -ENOENT; in bpf_object__pin_maps()
7744 err = -EINVAL; in bpf_object__pin_maps()
7747 err = -ENAMETOOLONG; in bpf_object__pin_maps()
7752 } else if (!map->pin_path) { in bpf_object__pin_maps()
7765 if (!map->pin_path) in bpf_object__pin_maps()
7780 return -ENOENT; in bpf_object__unpin_maps()
7792 return -EINVAL; in bpf_object__unpin_maps()
7794 return -ENAMETOOLONG; in bpf_object__unpin_maps()
7797 } else if (!map->pin_path) { in bpf_object__unpin_maps()
7815 return -ENOENT; in bpf_object__pin_programs()
7817 if (!obj->loaded) { in bpf_object__pin_programs()
7819 return -ENOENT; in bpf_object__pin_programs()
7827 prog->pin_name); in bpf_object__pin_programs()
7829 err = -EINVAL; in bpf_object__pin_programs()
7832 err = -ENAMETOOLONG; in bpf_object__pin_programs()
7849 prog->pin_name); in bpf_object__pin_programs()
7867 return -ENOENT; in bpf_object__unpin_programs()
7874 prog->pin_name); in bpf_object__unpin_programs()
7876 return -EINVAL; in bpf_object__unpin_programs()
7878 return -ENAMETOOLONG; in bpf_object__unpin_programs()
7907 if (map->clear_priv) in bpf_map__destroy()
7908 map->clear_priv(map, map->priv); in bpf_map__destroy()
7909 map->priv = NULL; in bpf_map__destroy()
7910 map->clear_priv = NULL; in bpf_map__destroy()
7912 if (map->inner_map) { in bpf_map__destroy()
7913 bpf_map__destroy(map->inner_map); in bpf_map__destroy()
7914 zfree(&map->inner_map); in bpf_map__destroy()
7917 zfree(&map->init_slots); in bpf_map__destroy()
7918 map->init_slots_sz = 0; in bpf_map__destroy()
7920 if (map->mmaped) { in bpf_map__destroy()
7921 munmap(map->mmaped, bpf_map_mmap_sz(map)); in bpf_map__destroy()
7922 map->mmaped = NULL; in bpf_map__destroy()
7925 if (map->st_ops) { in bpf_map__destroy()
7926 zfree(&map->st_ops->data); in bpf_map__destroy()
7927 zfree(&map->st_ops->progs); in bpf_map__destroy()
7928 zfree(&map->st_ops->kern_func_off); in bpf_map__destroy()
7929 zfree(&map->st_ops); in bpf_map__destroy()
7932 zfree(&map->name); in bpf_map__destroy()
7933 zfree(&map->pin_path); in bpf_map__destroy()
7935 if (map->fd >= 0) in bpf_map__destroy()
7936 zclose(map->fd); in bpf_map__destroy()
7946 if (obj->clear_priv) in bpf_object__close()
7947 obj->clear_priv(obj, obj->priv); in bpf_object__close()
7951 btf__free(obj->btf); in bpf_object__close()
7952 btf_ext__free(obj->btf_ext); in bpf_object__close()
7954 for (i = 0; i < obj->nr_maps; i++) in bpf_object__close()
7955 bpf_map__destroy(&obj->maps[i]); in bpf_object__close()
7957 zfree(&obj->kconfig); in bpf_object__close()
7958 zfree(&obj->externs); in bpf_object__close()
7959 obj->nr_extern = 0; in bpf_object__close()
7961 zfree(&obj->maps); in bpf_object__close()
7962 obj->nr_maps = 0; in bpf_object__close()
7964 if (obj->programs && obj->nr_programs) { in bpf_object__close()
7965 for (i = 0; i < obj->nr_programs; i++) in bpf_object__close()
7966 bpf_program__exit(&obj->programs[i]); in bpf_object__close()
7968 zfree(&obj->programs); in bpf_object__close()
7970 list_del(&obj->list); in bpf_object__close()
7987 if (&next->list == &bpf_objects_list) in bpf_object__next()
7995 return obj ? obj->name : ERR_PTR(-EINVAL); in bpf_object__name()
8000 return obj ? obj->kern_version : 0; in bpf_object__kversion()
8005 return obj ? obj->btf : NULL; in bpf_object__btf()
8010 return obj->btf ? btf__fd(obj->btf) : -1; in bpf_object__btf_fd()
8016 if (obj->priv && obj->clear_priv) in bpf_object__set_priv()
8017 obj->clear_priv(obj, obj->priv); in bpf_object__set_priv()
8019 obj->priv = priv; in bpf_object__set_priv()
8020 obj->clear_priv = clear_priv; in bpf_object__set_priv()
8026 return obj ? obj->priv : ERR_PTR(-EINVAL); in bpf_object__priv()
8033 size_t nr_programs = obj->nr_programs; in __bpf_program__iter()
8041 return forward ? &obj->programs[0] : in __bpf_program__iter()
8042 &obj->programs[nr_programs - 1]; in __bpf_program__iter()
8044 if (p->obj != obj) { in __bpf_program__iter()
8049 idx = (p - obj->programs) + (forward ? 1 : -1); in __bpf_program__iter()
8050 if (idx >= obj->nr_programs || idx < 0) in __bpf_program__iter()
8052 return &obj->programs[idx]; in __bpf_program__iter()
8082 if (prog->priv && prog->clear_priv) in bpf_program__set_priv()
8083 prog->clear_priv(prog, prog->priv); in bpf_program__set_priv()
8085 prog->priv = priv; in bpf_program__set_priv()
8086 prog->clear_priv = clear_priv; in bpf_program__set_priv()
8092 return prog ? prog->priv : ERR_PTR(-EINVAL); in bpf_program__priv()
8097 prog->prog_ifindex = ifindex; in bpf_program__set_ifindex()
8102 return prog->name; in bpf_program__name()
8107 return prog->sec_name; in bpf_program__section_name()
8114 title = prog->sec_name; in bpf_program__title()
8119 return ERR_PTR(-ENOMEM); in bpf_program__title()
8128 return prog->load; in bpf_program__autoload()
8133 if (prog->obj->loaded) in bpf_program__set_autoload()
8134 return -EINVAL; in bpf_program__set_autoload()
8136 prog->load = autoload; in bpf_program__set_autoload()
8147 return prog->insns_cnt * BPF_INSN_SZ; in bpf_program__size()
8156 return -EINVAL; in bpf_program__set_prep()
8158 if (prog->instances.nr > 0 || prog->instances.fds) { in bpf_program__set_prep()
8159 pr_warn("Can't set pre-processor after loading\n"); in bpf_program__set_prep()
8160 return -EINVAL; in bpf_program__set_prep()
8166 return -ENOMEM; in bpf_program__set_prep()
8169 /* fill all fd with -1 */ in bpf_program__set_prep()
8170 memset(instances_fds, -1, sizeof(int) * nr_instances); in bpf_program__set_prep()
8172 prog->instances.nr = nr_instances; in bpf_program__set_prep()
8173 prog->instances.fds = instances_fds; in bpf_program__set_prep()
8174 prog->preprocessor = prep; in bpf_program__set_prep()
8183 return -EINVAL; in bpf_program__nth_fd()
8185 if (n >= prog->instances.nr || n < 0) { in bpf_program__nth_fd()
8187 n, prog->name, prog->instances.nr); in bpf_program__nth_fd()
8188 return -EINVAL; in bpf_program__nth_fd()
8191 fd = prog->instances.fds[n]; in bpf_program__nth_fd()
8194 n, prog->name); in bpf_program__nth_fd()
8195 return -ENOENT; in bpf_program__nth_fd()
8203 return prog->type; in bpf_program__get_type()
8208 prog->type = type; in bpf_program__set_type()
8214 return prog ? (prog->type == type) : false; in bpf_program__is_type()
8221 return -EINVAL; \
8248 return prog->expected_attach_type; in bpf_program__get_expected_attach_type()
8254 prog->expected_attach_type = type; in bpf_program__set_expected_attach_type()
8261 .len = sizeof(string) - 1, \
8291 .len = sizeof(sec_pfx) - 1, \
8506 return -EINVAL; in libbpf_prog_type_by_name()
8510 *prog_type = sec_def->prog_type; in libbpf_prog_type_by_name()
8511 *expected_attach_type = sec_def->expected_attach_type; in libbpf_prog_type_by_name()
8522 return -ESRCH; in libbpf_prog_type_by_name()
8531 for (i = 0; i < obj->nr_maps; i++) { in find_struct_ops_map_by_offset()
8532 map = &obj->maps[i]; in find_struct_ops_map_by_offset()
8535 if (map->sec_offset <= offset && in find_struct_ops_map_by_offset()
8536 offset - map->sec_offset < map->def.value_size) in find_struct_ops_map_by_offset()
8543 /* Collect the reloc from ELF and populate the st_ops->progs[] */
8561 symbols = obj->efile.symbols; in bpf_object__collect_st_ops_relos()
8562 btf = obj->btf; in bpf_object__collect_st_ops_relos()
8563 nrels = shdr->sh_size / shdr->sh_entsize; in bpf_object__collect_st_ops_relos()
8567 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_st_ops_relos()
8573 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_st_ops_relos()
8581 return -EINVAL; in bpf_object__collect_st_ops_relos()
8584 moff = rel.r_offset - map->sec_offset; in bpf_object__collect_st_ops_relos()
8586 st_ops = map->st_ops; in bpf_object__collect_st_ops_relos()
8587 …pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %z… in bpf_object__collect_st_ops_relos()
8588 map->name, in bpf_object__collect_st_ops_relos()
8592 map->sec_offset, sym.st_name, name); in bpf_object__collect_st_ops_relos()
8595 pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n", in bpf_object__collect_st_ops_relos()
8596 map->name, (size_t)rel.r_offset, shdr_idx); in bpf_object__collect_st_ops_relos()
8597 return -LIBBPF_ERRNO__RELOC; in bpf_object__collect_st_ops_relos()
8601 map->name, (unsigned long long)sym.st_value); in bpf_object__collect_st_ops_relos()
8602 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_st_ops_relos()
8606 member = find_member_by_offset(st_ops->type, moff * 8); in bpf_object__collect_st_ops_relos()
8609 map->name, moff); in bpf_object__collect_st_ops_relos()
8610 return -EINVAL; in bpf_object__collect_st_ops_relos()
8612 member_idx = member - btf_members(st_ops->type); in bpf_object__collect_st_ops_relos()
8613 name = btf__name_by_offset(btf, member->name_off); in bpf_object__collect_st_ops_relos()
8615 if (!resolve_func_ptr(btf, member->type, NULL)) { in bpf_object__collect_st_ops_relos()
8617 map->name, name); in bpf_object__collect_st_ops_relos()
8618 return -EINVAL; in bpf_object__collect_st_ops_relos()
8624 map->name, shdr_idx, name); in bpf_object__collect_st_ops_relos()
8625 return -EINVAL; in bpf_object__collect_st_ops_relos()
8628 if (prog->type == BPF_PROG_TYPE_UNSPEC) { in bpf_object__collect_st_ops_relos()
8631 sec_def = find_sec_def(prog->sec_name); in bpf_object__collect_st_ops_relos()
8633 sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) { in bpf_object__collect_st_ops_relos()
8635 prog->type = sec_def->prog_type; in bpf_object__collect_st_ops_relos()
8639 prog->type = BPF_PROG_TYPE_STRUCT_OPS; in bpf_object__collect_st_ops_relos()
8640 prog->attach_btf_id = st_ops->type_id; in bpf_object__collect_st_ops_relos()
8641 prog->expected_attach_type = member_idx; in bpf_object__collect_st_ops_relos()
8642 } else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS || in bpf_object__collect_st_ops_relos()
8643 prog->attach_btf_id != st_ops->type_id || in bpf_object__collect_st_ops_relos()
8644 prog->expected_attach_type != member_idx) { in bpf_object__collect_st_ops_relos()
8647 st_ops->progs[member_idx] = prog; in bpf_object__collect_st_ops_relos()
8654 map->name, prog->name, prog->sec_name, prog->type, in bpf_object__collect_st_ops_relos()
8655 prog->attach_btf_id, prog->expected_attach_type, name); in bpf_object__collect_st_ops_relos()
8656 return -EINVAL; in bpf_object__collect_st_ops_relos()
8677 return -ENAMETOOLONG; in find_btf_by_prefix_kind()
8713 return -EINVAL; in libbpf_find_vmlinux_btf_id()
8726 int err = -EINVAL; in libbpf_find_prog_btf_id()
8732 return -EINVAL; in libbpf_find_prog_btf_id()
8734 info = &info_linear->info; in libbpf_find_prog_btf_id()
8735 if (!info->btf_id) { in libbpf_find_prog_btf_id()
8739 if (btf__get_from_id(info->btf_id, &btf)) { in libbpf_find_prog_btf_id()
8756 enum bpf_attach_type attach_type = prog->expected_attach_type; in libbpf_find_attach_btf_id()
8757 __u32 attach_prog_fd = prog->attach_prog_fd; in libbpf_find_attach_btf_id()
8758 const char *name = prog->sec_name; in libbpf_find_attach_btf_id()
8762 return -EINVAL; in libbpf_find_attach_btf_id()
8773 err = __find_vmlinux_btf_id(prog->obj->btf_vmlinux, in libbpf_find_attach_btf_id()
8779 return -ESRCH; in libbpf_find_attach_btf_id()
8789 return -EINVAL; in libbpf_attach_type_by_name()
8795 return -EINVAL; in libbpf_attach_type_by_name()
8806 return -EINVAL; in libbpf_attach_type_by_name()
8811 return map ? map->fd : -EINVAL; in bpf_map__fd()
8816 return map ? &map->def : ERR_PTR(-EINVAL); in bpf_map__def()
8821 return map ? map->name : NULL; in bpf_map__name()
8826 return map->def.type; in bpf_map__type()
8831 if (map->fd >= 0) in bpf_map__set_type()
8832 return -EBUSY; in bpf_map__set_type()
8833 map->def.type = type; in bpf_map__set_type()
8839 return map->def.map_flags; in bpf_map__map_flags()
8844 if (map->fd >= 0) in bpf_map__set_map_flags()
8845 return -EBUSY; in bpf_map__set_map_flags()
8846 map->def.map_flags = flags; in bpf_map__set_map_flags()
8852 return map->numa_node; in bpf_map__numa_node()
8857 if (map->fd >= 0) in bpf_map__set_numa_node()
8858 return -EBUSY; in bpf_map__set_numa_node()
8859 map->numa_node = numa_node; in bpf_map__set_numa_node()
8865 return map->def.key_size; in bpf_map__key_size()
8870 if (map->fd >= 0) in bpf_map__set_key_size()
8871 return -EBUSY; in bpf_map__set_key_size()
8872 map->def.key_size = size; in bpf_map__set_key_size()
8878 return map->def.value_size; in bpf_map__value_size()
8883 if (map->fd >= 0) in bpf_map__set_value_size()
8884 return -EBUSY; in bpf_map__set_value_size()
8885 map->def.value_size = size; in bpf_map__set_value_size()
8891 return map ? map->btf_key_type_id : 0; in bpf_map__btf_key_type_id()
8896 return map ? map->btf_value_type_id : 0; in bpf_map__btf_value_type_id()
8903 return -EINVAL; in bpf_map__set_priv()
8905 if (map->priv) { in bpf_map__set_priv()
8906 if (map->clear_priv) in bpf_map__set_priv()
8907 map->clear_priv(map, map->priv); in bpf_map__set_priv()
8910 map->priv = priv; in bpf_map__set_priv()
8911 map->clear_priv = clear_priv; in bpf_map__set_priv()
8917 return map ? map->priv : ERR_PTR(-EINVAL); in bpf_map__priv()
8923 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG || in bpf_map__set_initial_value()
8924 size != map->def.value_size || map->fd >= 0) in bpf_map__set_initial_value()
8925 return -EINVAL; in bpf_map__set_initial_value()
8927 memcpy(map->mmaped, data, size); in bpf_map__set_initial_value()
8933 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; in bpf_map__is_offload_neutral()
8938 return map->libbpf_type != LIBBPF_MAP_UNSPEC; in bpf_map__is_internal()
8943 return map->map_ifindex; in bpf_map__ifindex()
8948 if (map->fd >= 0) in bpf_map__set_ifindex()
8949 return -EBUSY; in bpf_map__set_ifindex()
8950 map->map_ifindex = ifindex; in bpf_map__set_ifindex()
8956 if (!bpf_map_type__is_map_in_map(map->def.type)) { in bpf_map__set_inner_map_fd()
8958 return -EINVAL; in bpf_map__set_inner_map_fd()
8960 if (map->inner_map_fd != -1) { in bpf_map__set_inner_map_fd()
8962 return -EINVAL; in bpf_map__set_inner_map_fd()
8964 map->inner_map_fd = fd; in bpf_map__set_inner_map_fd()
8974 if (!obj || !obj->maps) in __bpf_map__iter()
8977 s = obj->maps; in __bpf_map__iter()
8978 e = obj->maps + obj->nr_maps; in __bpf_map__iter()
8986 idx = (m - obj->maps) + i; in __bpf_map__iter()
8987 if (idx >= obj->nr_maps || idx < 0) in __bpf_map__iter()
8989 return &obj->maps[idx]; in __bpf_map__iter()
8996 return obj->maps; in bpf_map__next()
9005 if (!obj->nr_maps) in bpf_map__prev()
9007 return obj->maps + obj->nr_maps - 1; in bpf_map__prev()
9010 return __bpf_map__iter(next, obj, -1); in bpf_map__prev()
9019 if (pos->name && !strcmp(pos->name, name)) in bpf_object__find_map_by_name()
9034 return ERR_PTR(-ENOTSUP); in bpf_object__find_map_by_offset()
9065 return -EINVAL; in bpf_prog_load_xattr()
9066 if (!attr->file) in bpf_prog_load_xattr()
9067 return -EINVAL; in bpf_prog_load_xattr()
9069 open_attr.file = attr->file; in bpf_prog_load_xattr()
9070 open_attr.prog_type = attr->prog_type; in bpf_prog_load_xattr()
9074 return -ENOENT; in bpf_prog_load_xattr()
9077 enum bpf_attach_type attach_type = attr->expected_attach_type; in bpf_prog_load_xattr()
9080 * attr->prog_type, if specified, as an override to whatever in bpf_prog_load_xattr()
9083 if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) { in bpf_prog_load_xattr()
9084 bpf_program__set_type(prog, attr->prog_type); in bpf_prog_load_xattr()
9094 return -EINVAL; in bpf_prog_load_xattr()
9097 prog->prog_ifindex = attr->ifindex; in bpf_prog_load_xattr()
9098 prog->log_level = attr->log_level; in bpf_prog_load_xattr()
9099 prog->prog_flags |= attr->prog_flags; in bpf_prog_load_xattr()
9106 map->map_ifindex = attr->ifindex; in bpf_prog_load_xattr()
9112 return -ENOENT; in bpf_prog_load_xattr()
9130 int fd; /* hook FD, -1 if not applicable */
9152 link->disconnected = true; in bpf_link__disconnect()
9162 if (!link->disconnected && link->detach) in bpf_link__destroy()
9163 err = link->detach(link); in bpf_link__destroy()
9164 if (link->destroy) in bpf_link__destroy()
9165 link->destroy(link); in bpf_link__destroy()
9166 if (link->pin_path) in bpf_link__destroy()
9167 free(link->pin_path); in bpf_link__destroy()
9175 return link->fd; in bpf_link__fd()
9180 return link->pin_path; in bpf_link__pin_path()
9185 return close(link->fd); in bpf_link__detach_fd()
9195 fd = -errno; in bpf_link__open()
9203 return ERR_PTR(-ENOMEM); in bpf_link__open()
9205 link->detach = &bpf_link__detach_fd; in bpf_link__open()
9206 link->fd = fd; in bpf_link__open()
9208 link->pin_path = strdup(path); in bpf_link__open()
9209 if (!link->pin_path) { in bpf_link__open()
9211 return ERR_PTR(-ENOMEM); in bpf_link__open()
9219 return bpf_link_detach(link->fd) ? -errno : 0; in bpf_link__detach()
9226 if (link->pin_path) in bpf_link__pin()
9227 return -EBUSY; in bpf_link__pin()
9235 link->pin_path = strdup(path); in bpf_link__pin()
9236 if (!link->pin_path) in bpf_link__pin()
9237 return -ENOMEM; in bpf_link__pin()
9239 if (bpf_obj_pin(link->fd, link->pin_path)) { in bpf_link__pin()
9240 err = -errno; in bpf_link__pin()
9241 zfree(&link->pin_path); in bpf_link__pin()
9245 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path); in bpf_link__pin()
9253 if (!link->pin_path) in bpf_link__unpin()
9254 return -EINVAL; in bpf_link__unpin()
9256 err = unlink(link->pin_path); in bpf_link__unpin()
9258 return -errno; in bpf_link__unpin()
9260 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path); in bpf_link__unpin()
9261 zfree(&link->pin_path); in bpf_link__unpin()
9269 err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0); in bpf_link__detach_perf_event()
9271 err = -errno; in bpf_link__detach_perf_event()
9273 close(link->fd); in bpf_link__detach_perf_event()
9286 prog->name, pfd); in bpf_program__attach_perf_event()
9287 return ERR_PTR(-EINVAL); in bpf_program__attach_perf_event()
9292 prog->name); in bpf_program__attach_perf_event()
9293 return ERR_PTR(-EINVAL); in bpf_program__attach_perf_event()
9298 return ERR_PTR(-ENOMEM); in bpf_program__attach_perf_event()
9299 link->detach = &bpf_link__detach_perf_event; in bpf_program__attach_perf_event()
9300 link->fd = pfd; in bpf_program__attach_perf_event()
9303 err = -errno; in bpf_program__attach_perf_event()
9306 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); in bpf_program__attach_perf_event()
9307 if (err == -EPROTO) in bpf_program__attach_perf_event()
9309 prog->name, pfd); in bpf_program__attach_perf_event()
9313 err = -errno; in bpf_program__attach_perf_event()
9316 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); in bpf_program__attach_perf_event()
9323 * this function is expected to parse integer in the range of [0, 2^31-1] from
9335 err = -errno; in parse_uint_from_file()
9342 err = err == EOF ? -EIO : -errno; in parse_uint_from_file()
9414 pid < 0 ? -1 : pid /* pid */, in perf_event_open_probe()
9415 pid == -1 ? 0 : -1 /* cpu */, in perf_event_open_probe()
9416 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); in perf_event_open_probe()
9418 err = -errno; in perf_event_open_probe()
9436 0 /* offset */, -1 /* pid */); in bpf_program__attach_kprobe()
9439 prog->name, retprobe ? "kretprobe" : "kprobe", func_name, in bpf_program__attach_kprobe()
9448 prog->name, retprobe ? "kretprobe" : "kprobe", func_name, in bpf_program__attach_kprobe()
9461 func_name = prog->sec_name + sec->len; in attach_kprobe()
9462 retprobe = strcmp(sec->sec, "kretprobe/") == 0; in attach_kprobe()
9480 prog->name, retprobe ? "uretprobe" : "uprobe", in bpf_program__attach_uprobe()
9490 prog->name, retprobe ? "uretprobe" : "uprobe", in bpf_program__attach_uprobe()
9508 return -errno; in determine_tracepoint_id()
9512 return -E2BIG; in determine_tracepoint_id()
9536 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */, in perf_event_open_tracepoint()
9537 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); in perf_event_open_tracepoint()
9539 err = -errno; in perf_event_open_tracepoint()
9559 prog->name, tp_category, tp_name, in bpf_program__attach_tracepoint()
9568 prog->name, tp_category, tp_name, in bpf_program__attach_tracepoint()
9581 sec_name = strdup(prog->sec_name); in attach_tp()
9583 return ERR_PTR(-ENOMEM); in attach_tp()
9586 tp_cat = sec_name + sec->len; in attach_tp()
9589 link = ERR_PTR(-EINVAL); in attach_tp()
9610 pr_warn("prog '%s': can't attach before loaded\n", prog->name); in bpf_program__attach_raw_tracepoint()
9611 return ERR_PTR(-EINVAL); in bpf_program__attach_raw_tracepoint()
9616 return ERR_PTR(-ENOMEM); in bpf_program__attach_raw_tracepoint()
9617 link->detach = &bpf_link__detach_fd; in bpf_program__attach_raw_tracepoint()
9621 pfd = -errno; in bpf_program__attach_raw_tracepoint()
9624 prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); in bpf_program__attach_raw_tracepoint()
9627 link->fd = pfd; in bpf_program__attach_raw_tracepoint()
9634 const char *tp_name = prog->sec_name + sec->len; in attach_raw_tp()
9648 pr_warn("prog '%s': can't attach before loaded\n", prog->name); in bpf_program__attach_btf_id()
9649 return ERR_PTR(-EINVAL); in bpf_program__attach_btf_id()
9654 return ERR_PTR(-ENOMEM); in bpf_program__attach_btf_id()
9655 link->detach = &bpf_link__detach_fd; in bpf_program__attach_btf_id()
9659 pfd = -errno; in bpf_program__attach_btf_id()
9662 prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); in bpf_program__attach_btf_id()
9665 link->fd = pfd; in bpf_program__attach_btf_id()
9710 pr_warn("prog '%s': can't attach before loaded\n", prog->name); in bpf_program__attach_fd()
9711 return ERR_PTR(-EINVAL); in bpf_program__attach_fd()
9716 return ERR_PTR(-ENOMEM); in bpf_program__attach_fd()
9717 link->detach = &bpf_link__detach_fd; in bpf_program__attach_fd()
9722 link_fd = -errno; in bpf_program__attach_fd()
9725 prog->name, target_name, in bpf_program__attach_fd()
9729 link->fd = link_fd; in bpf_program__attach_fd()
9759 prog->name); in bpf_program__attach_freplace()
9760 return ERR_PTR(-EINVAL); in bpf_program__attach_freplace()
9763 if (prog->type != BPF_PROG_TYPE_EXT) { in bpf_program__attach_freplace()
9765 prog->name); in bpf_program__attach_freplace()
9766 return ERR_PTR(-EINVAL); in bpf_program__attach_freplace()
9794 return ERR_PTR(-EINVAL); in bpf_program__attach_iter()
9801 pr_warn("prog '%s': can't attach before loaded\n", prog->name); in bpf_program__attach_iter()
9802 return ERR_PTR(-EINVAL); in bpf_program__attach_iter()
9807 return ERR_PTR(-ENOMEM); in bpf_program__attach_iter()
9808 link->detach = &bpf_link__detach_fd; in bpf_program__attach_iter()
9813 link_fd = -errno; in bpf_program__attach_iter()
9816 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); in bpf_program__attach_iter()
9819 link->fd = link_fd; in bpf_program__attach_iter()
9827 sec_def = find_sec_def(prog->sec_name); in bpf_program__attach()
9828 if (!sec_def || !sec_def->attach_fn) in bpf_program__attach()
9829 return ERR_PTR(-ESRCH); in bpf_program__attach()
9831 return sec_def->attach_fn(sec_def, prog); in bpf_program__attach()
9838 if (bpf_map_delete_elem(link->fd, &zero)) in bpf_link__detach_struct_ops()
9839 return -errno; in bpf_link__detach_struct_ops()
9851 if (!bpf_map__is_struct_ops(map) || map->fd == -1) in bpf_map__attach_struct_ops()
9852 return ERR_PTR(-EINVAL); in bpf_map__attach_struct_ops()
9856 return ERR_PTR(-EINVAL); in bpf_map__attach_struct_ops()
9858 st_ops = map->st_ops; in bpf_map__attach_struct_ops()
9859 for (i = 0; i < btf_vlen(st_ops->type); i++) { in bpf_map__attach_struct_ops()
9860 struct bpf_program *prog = st_ops->progs[i]; in bpf_map__attach_struct_ops()
9868 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i]; in bpf_map__attach_struct_ops()
9872 err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0); in bpf_map__attach_struct_ops()
9874 err = -errno; in bpf_map__attach_struct_ops()
9879 link->detach = bpf_link__detach_struct_ops; in bpf_map__attach_struct_ops()
9880 link->fd = map->fd; in bpf_map__attach_struct_ops()
9892 __u64 data_tail = header->data_tail; in bpf_perf_event_read_simple()
9899 ehdr = base + (data_tail & (mmap_size - 1)); in bpf_perf_event_read_simple()
9900 ehdr_size = ehdr->size; in bpf_perf_event_read_simple()
9904 size_t len_first = base + mmap_size - copy_start; in bpf_perf_event_read_simple()
9905 size_t len_secnd = ehdr_size - len_first; in bpf_perf_event_read_simple()
9939 /* sample_cb and lost_cb are higher-level common-case callbacks */
9978 if (cpu_buf->base && in perf_buffer__free_cpu_buf()
9979 munmap(cpu_buf->base, pb->mmap_size + pb->page_size)) in perf_buffer__free_cpu_buf()
9980 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu); in perf_buffer__free_cpu_buf()
9981 if (cpu_buf->fd >= 0) { in perf_buffer__free_cpu_buf()
9982 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0); in perf_buffer__free_cpu_buf()
9983 close(cpu_buf->fd); in perf_buffer__free_cpu_buf()
9985 free(cpu_buf->buf); in perf_buffer__free_cpu_buf()
9995 if (pb->cpu_bufs) { in perf_buffer__free()
9996 for (i = 0; i < pb->cpu_cnt; i++) { in perf_buffer__free()
9997 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; in perf_buffer__free()
10002 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key); in perf_buffer__free()
10005 free(pb->cpu_bufs); in perf_buffer__free()
10007 if (pb->epoll_fd >= 0) in perf_buffer__free()
10008 close(pb->epoll_fd); in perf_buffer__free()
10009 free(pb->events); in perf_buffer__free()
10023 return ERR_PTR(-ENOMEM); in perf_buffer__open_cpu_buf()
10025 cpu_buf->pb = pb; in perf_buffer__open_cpu_buf()
10026 cpu_buf->cpu = cpu; in perf_buffer__open_cpu_buf()
10027 cpu_buf->map_key = map_key; in perf_buffer__open_cpu_buf()
10029 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu, in perf_buffer__open_cpu_buf()
10030 -1, PERF_FLAG_FD_CLOEXEC); in perf_buffer__open_cpu_buf()
10031 if (cpu_buf->fd < 0) { in perf_buffer__open_cpu_buf()
10032 err = -errno; in perf_buffer__open_cpu_buf()
10038 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size, in perf_buffer__open_cpu_buf()
10040 cpu_buf->fd, 0); in perf_buffer__open_cpu_buf()
10041 if (cpu_buf->base == MAP_FAILED) { in perf_buffer__open_cpu_buf()
10042 cpu_buf->base = NULL; in perf_buffer__open_cpu_buf()
10043 err = -errno; in perf_buffer__open_cpu_buf()
10049 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) { in perf_buffer__open_cpu_buf()
10050 err = -errno; in perf_buffer__open_cpu_buf()
10079 p.sample_cb = opts ? opts->sample_cb : NULL; in perf_buffer__new()
10080 p.lost_cb = opts ? opts->lost_cb : NULL; in perf_buffer__new()
10081 p.ctx = opts ? opts->ctx : NULL; in perf_buffer__new()
10092 p.attr = opts->attr; in perf_buffer__new_raw()
10093 p.event_cb = opts->event_cb; in perf_buffer__new_raw()
10094 p.ctx = opts->ctx; in perf_buffer__new_raw()
10095 p.cpu_cnt = opts->cpu_cnt; in perf_buffer__new_raw()
10096 p.cpus = opts->cpus; in perf_buffer__new_raw()
10097 p.map_keys = opts->map_keys; in perf_buffer__new_raw()
10113 if (page_cnt & (page_cnt - 1)) { in __perf_buffer__new()
10116 return ERR_PTR(-EINVAL); in __perf_buffer__new()
10119 /* best-effort sanity checks */ in __perf_buffer__new()
10124 err = -errno; in __perf_buffer__new()
10126 * -EBADFD, -EFAULT, or -E2BIG on real error in __perf_buffer__new()
10128 if (err != -EINVAL) { in __perf_buffer__new()
10139 return ERR_PTR(-EINVAL); in __perf_buffer__new()
10145 return ERR_PTR(-ENOMEM); in __perf_buffer__new()
10147 pb->event_cb = p->event_cb; in __perf_buffer__new()
10148 pb->sample_cb = p->sample_cb; in __perf_buffer__new()
10149 pb->lost_cb = p->lost_cb; in __perf_buffer__new()
10150 pb->ctx = p->ctx; in __perf_buffer__new()
10152 pb->page_size = getpagesize(); in __perf_buffer__new()
10153 pb->mmap_size = pb->page_size * page_cnt; in __perf_buffer__new()
10154 pb->map_fd = map_fd; in __perf_buffer__new()
10156 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); in __perf_buffer__new()
10157 if (pb->epoll_fd < 0) { in __perf_buffer__new()
10158 err = -errno; in __perf_buffer__new()
10164 if (p->cpu_cnt > 0) { in __perf_buffer__new()
10165 pb->cpu_cnt = p->cpu_cnt; in __perf_buffer__new()
10167 pb->cpu_cnt = libbpf_num_possible_cpus(); in __perf_buffer__new()
10168 if (pb->cpu_cnt < 0) { in __perf_buffer__new()
10169 err = pb->cpu_cnt; in __perf_buffer__new()
10172 if (map.max_entries && map.max_entries < pb->cpu_cnt) in __perf_buffer__new()
10173 pb->cpu_cnt = map.max_entries; in __perf_buffer__new()
10176 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events)); in __perf_buffer__new()
10177 if (!pb->events) { in __perf_buffer__new()
10178 err = -ENOMEM; in __perf_buffer__new()
10182 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs)); in __perf_buffer__new()
10183 if (!pb->cpu_bufs) { in __perf_buffer__new()
10184 err = -ENOMEM; in __perf_buffer__new()
10195 for (i = 0, j = 0; i < pb->cpu_cnt; i++) { in __perf_buffer__new()
10199 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i; in __perf_buffer__new()
10200 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i; in __perf_buffer__new()
10205 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu])) in __perf_buffer__new()
10208 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key); in __perf_buffer__new()
10214 pb->cpu_bufs[j] = cpu_buf; in __perf_buffer__new()
10216 err = bpf_map_update_elem(pb->map_fd, &map_key, in __perf_buffer__new()
10217 &cpu_buf->fd, 0); in __perf_buffer__new()
10219 err = -errno; in __perf_buffer__new()
10220 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n", in __perf_buffer__new()
10221 cpu, map_key, cpu_buf->fd, in __perf_buffer__new()
10226 pb->events[j].events = EPOLLIN; in __perf_buffer__new()
10227 pb->events[j].data.ptr = cpu_buf; in __perf_buffer__new()
10228 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd, in __perf_buffer__new()
10229 &pb->events[j]) < 0) { in __perf_buffer__new()
10230 err = -errno; in __perf_buffer__new()
10232 cpu, cpu_buf->fd, in __perf_buffer__new()
10238 pb->cpu_cnt = j; in __perf_buffer__new()
10267 struct perf_buffer *pb = cpu_buf->pb; in perf_buffer__process_record()
10271 if (pb->event_cb) in perf_buffer__process_record()
10272 return pb->event_cb(pb->ctx, cpu_buf->cpu, e); in perf_buffer__process_record()
10274 switch (e->type) { in perf_buffer__process_record()
10278 if (pb->sample_cb) in perf_buffer__process_record()
10279 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size); in perf_buffer__process_record()
10285 if (pb->lost_cb) in perf_buffer__process_record()
10286 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost); in perf_buffer__process_record()
10290 pr_warn("unknown perf sample type %d\n", e->type); in perf_buffer__process_record()
10301 ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size, in perf_buffer__process_records()
10302 pb->page_size, &cpu_buf->buf, in perf_buffer__process_records()
10303 &cpu_buf->buf_size, in perf_buffer__process_records()
10312 return pb->epoll_fd; in perf_buffer__epoll_fd()
10319 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms); in perf_buffer__poll()
10321 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr; in perf_buffer__poll()
10329 return cnt < 0 ? -errno : cnt; in perf_buffer__poll()
10337 return pb->cpu_cnt; in perf_buffer__buffer_cnt()
10349 if (buf_idx >= pb->cpu_cnt) in perf_buffer__buffer_fd()
10350 return -EINVAL; in perf_buffer__buffer_fd()
10352 cpu_buf = pb->cpu_bufs[buf_idx]; in perf_buffer__buffer_fd()
10354 return -ENOENT; in perf_buffer__buffer_fd()
10356 return cpu_buf->fd; in perf_buffer__buffer_fd()
10364 * - 0 on success;
10365 * - <0 on failure.
10371 if (buf_idx >= pb->cpu_cnt) in perf_buffer__consume_buffer()
10372 return -EINVAL; in perf_buffer__consume_buffer()
10374 cpu_buf = pb->cpu_bufs[buf_idx]; in perf_buffer__consume_buffer()
10376 return -ENOENT; in perf_buffer__consume_buffer()
10385 for (i = 0; i < pb->cpu_cnt; i++) { in perf_buffer__consume()
10386 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; in perf_buffer__consume()
10404 * < 0: fix size of -size_offset
10412 -1,
10417 -1,
10422 -(int)sizeof(__u32),
10427 -(int)sizeof(__u64),
10432 -(int)sizeof(__u32),
10452 -(int)sizeof(__u8) * BPF_TAG_SIZE,
10464 return -(int)offset; in bpf_prog_info_read_offset_u32()
10474 return -(int)offset; in bpf_prog_info_read_offset_u64()
10506 return ERR_PTR(-EINVAL); in bpf_program__get_prog_info_linear()
10512 return ERR_PTR(-EFAULT); in bpf_program__get_prog_info_linear()
10524 if (info_len < desc->array_offset + sizeof(__u32) || in bpf_program__get_prog_info_linear()
10525 info_len < desc->count_offset + sizeof(__u32) || in bpf_program__get_prog_info_linear()
10526 (desc->size_offset > 0 && info_len < desc->size_offset)) in bpf_program__get_prog_info_linear()
10534 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); in bpf_program__get_prog_info_linear()
10535 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); in bpf_program__get_prog_info_linear()
10544 return ERR_PTR(-ENOMEM); in bpf_program__get_prog_info_linear()
10546 /* step 4: fill data to info_linear->info */ in bpf_program__get_prog_info_linear()
10547 info_linear->arrays = arrays; in bpf_program__get_prog_info_linear()
10548 memset(&info_linear->info, 0, sizeof(info)); in bpf_program__get_prog_info_linear()
10549 ptr = info_linear->data; in bpf_program__get_prog_info_linear()
10559 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); in bpf_program__get_prog_info_linear()
10560 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); in bpf_program__get_prog_info_linear()
10561 bpf_prog_info_set_offset_u32(&info_linear->info, in bpf_program__get_prog_info_linear()
10562 desc->count_offset, count); in bpf_program__get_prog_info_linear()
10563 bpf_prog_info_set_offset_u32(&info_linear->info, in bpf_program__get_prog_info_linear()
10564 desc->size_offset, size); in bpf_program__get_prog_info_linear()
10565 bpf_prog_info_set_offset_u64(&info_linear->info, in bpf_program__get_prog_info_linear()
10566 desc->array_offset, in bpf_program__get_prog_info_linear()
10572 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len); in bpf_program__get_prog_info_linear()
10576 return ERR_PTR(-EFAULT); in bpf_program__get_prog_info_linear()
10588 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset); in bpf_program__get_prog_info_linear()
10589 v2 = bpf_prog_info_read_offset_u32(&info_linear->info, in bpf_program__get_prog_info_linear()
10590 desc->count_offset); in bpf_program__get_prog_info_linear()
10594 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset); in bpf_program__get_prog_info_linear()
10595 v2 = bpf_prog_info_read_offset_u32(&info_linear->info, in bpf_program__get_prog_info_linear()
10596 desc->size_offset); in bpf_program__get_prog_info_linear()
10602 info_linear->info_len = sizeof(struct bpf_prog_info); in bpf_program__get_prog_info_linear()
10603 info_linear->data_len = data_len; in bpf_program__get_prog_info_linear()
10616 if ((info_linear->arrays & (1UL << i)) == 0) in bpf_program__bpil_addr_to_offs()
10620 addr = bpf_prog_info_read_offset_u64(&info_linear->info, in bpf_program__bpil_addr_to_offs()
10621 desc->array_offset); in bpf_program__bpil_addr_to_offs()
10622 offs = addr - ptr_to_u64(info_linear->data); in bpf_program__bpil_addr_to_offs()
10623 bpf_prog_info_set_offset_u64(&info_linear->info, in bpf_program__bpil_addr_to_offs()
10624 desc->array_offset, offs); in bpf_program__bpil_addr_to_offs()
10636 if ((info_linear->arrays & (1UL << i)) == 0) in bpf_program__bpil_offs_to_addr()
10640 offs = bpf_prog_info_read_offset_u64(&info_linear->info, in bpf_program__bpil_offs_to_addr()
10641 desc->array_offset); in bpf_program__bpil_offs_to_addr()
10642 addr = offs + ptr_to_u64(info_linear->data); in bpf_program__bpil_offs_to_addr()
10643 bpf_prog_info_set_offset_u64(&info_linear->info, in bpf_program__bpil_offs_to_addr()
10644 desc->array_offset, addr); in bpf_program__bpil_offs_to_addr()
10655 return -EINVAL; in bpf_program__set_attach_target()
10662 prog->expected_attach_type); in bpf_program__set_attach_target()
10667 prog->attach_btf_id = btf_id; in bpf_program__set_attach_target()
10668 prog->attach_prog_fd = attach_prog_fd; in bpf_program__set_attach_target()
10674 int err = 0, n, len, start, end = -1; in parse_cpu_mask_str()
10680 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ in parse_cpu_mask_str()
10686 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len); in parse_cpu_mask_str()
10689 err = -EINVAL; in parse_cpu_mask_str()
10697 err = -EINVAL; in parse_cpu_mask_str()
10702 err = -ENOMEM; in parse_cpu_mask_str()
10706 memset(tmp + *mask_sz, 0, start - *mask_sz); in parse_cpu_mask_str()
10707 memset(tmp + start, 1, end - start + 1); in parse_cpu_mask_str()
10713 return -EINVAL; in parse_cpu_mask_str()
10729 err = -errno; in parse_cpu_mask_file()
10736 err = len ? -errno : -EINVAL; in parse_cpu_mask_file()
10742 return -E2BIG; in parse_cpu_mask_file()
10779 .object_name = s->name, in bpf_object__open_skeleton()
10784 /* Attempt to preserve opts->object_name, unless overriden by user in bpf_object__open_skeleton()
10792 if (!opts->object_name) in bpf_object__open_skeleton()
10793 skel_opts.object_name = s->name; in bpf_object__open_skeleton()
10796 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts); in bpf_object__open_skeleton()
10799 s->name, PTR_ERR(obj)); in bpf_object__open_skeleton()
10803 *s->obj = obj; in bpf_object__open_skeleton()
10805 for (i = 0; i < s->map_cnt; i++) { in bpf_object__open_skeleton()
10806 struct bpf_map **map = s->maps[i].map; in bpf_object__open_skeleton()
10807 const char *name = s->maps[i].name; in bpf_object__open_skeleton()
10808 void **mmaped = s->maps[i].mmaped; in bpf_object__open_skeleton()
10813 return -ESRCH; in bpf_object__open_skeleton()
10816 /* externs shouldn't be pre-setup from user code */ in bpf_object__open_skeleton()
10817 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG) in bpf_object__open_skeleton()
10818 *mmaped = (*map)->mmaped; in bpf_object__open_skeleton()
10821 for (i = 0; i < s->prog_cnt; i++) { in bpf_object__open_skeleton()
10822 struct bpf_program **prog = s->progs[i].prog; in bpf_object__open_skeleton()
10823 const char *name = s->progs[i].name; in bpf_object__open_skeleton()
10828 return -ESRCH; in bpf_object__open_skeleton()
10839 err = bpf_object__load(*s->obj); in bpf_object__load_skeleton()
10841 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err); in bpf_object__load_skeleton()
10845 for (i = 0; i < s->map_cnt; i++) { in bpf_object__load_skeleton()
10846 struct bpf_map *map = *s->maps[i].map; in bpf_object__load_skeleton()
10849 void **mmaped = s->maps[i].mmaped; in bpf_object__load_skeleton()
10854 if (!(map->def.map_flags & BPF_F_MMAPABLE)) { in bpf_object__load_skeleton()
10859 if (map->def.map_flags & BPF_F_RDONLY_PROG) in bpf_object__load_skeleton()
10864 /* Remap anonymous mmap()-ed "map initialization image" as in bpf_object__load_skeleton()
10865 * a BPF map-backed mmap()-ed memory, but preserving the same in bpf_object__load_skeleton()
10874 *mmaped = mmap(map->mmaped, mmap_sz, prot, in bpf_object__load_skeleton()
10877 err = -errno; in bpf_object__load_skeleton()
10879 pr_warn("failed to re-mmap() map '%s': %d\n", in bpf_object__load_skeleton()
10892 for (i = 0; i < s->prog_cnt; i++) { in bpf_object__attach_skeleton()
10893 struct bpf_program *prog = *s->progs[i].prog; in bpf_object__attach_skeleton()
10894 struct bpf_link **link = s->progs[i].link; in bpf_object__attach_skeleton()
10897 if (!prog->load) in bpf_object__attach_skeleton()
10900 sec_def = find_sec_def(prog->sec_name); in bpf_object__attach_skeleton()
10901 if (!sec_def || !sec_def->attach_fn) in bpf_object__attach_skeleton()
10904 *link = sec_def->attach_fn(sec_def, prog); in bpf_object__attach_skeleton()
10906 pr_warn("failed to auto-attach program '%s': %ld\n", in bpf_object__attach_skeleton()
10919 for (i = 0; i < s->prog_cnt; i++) { in bpf_object__detach_skeleton()
10920 struct bpf_link **link = s->progs[i].link; in bpf_object__detach_skeleton()
10929 if (s->progs) in bpf_object__destroy_skeleton()
10931 if (s->obj) in bpf_object__destroy_skeleton()
10932 bpf_object__close(*s->obj); in bpf_object__destroy_skeleton()
10933 free(s->maps); in bpf_object__destroy_skeleton()
10934 free(s->progs); in bpf_object__destroy_skeleton()