• Home
  • Raw
  • Download

Lines Matching +full:path +full:- +full:map

1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
71 #pragma GCC diagnostic ignored "-Wformat-nonliteral"
116 if (err != -EPERM || geteuid() != 0) in pr_perm_msg()
133 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n", in pr_perm_msg()
149 fd = -1; \
159 /* v4.14: kernel support for program & map names. */
235 * program. For the entry-point (main) BPF program, this is always
236 * zero. For a sub-program, this gets reset before each of main BPF
238 * whether sub-program was already appended to the main program, and
256 * entry-point BPF programs this includes the size of main program
257 * itself plus all the used sub-programs, appended at the end
304 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
474 char path[]; member
476 #define obj_elf_valid(o) ((o)->efile.elf)
497 * it is possible that prog->instances.nr == -1. in bpf_program__unload()
499 if (prog->instances.nr > 0) { in bpf_program__unload()
500 for (i = 0; i < prog->instances.nr; i++) in bpf_program__unload()
501 zclose(prog->instances.fds[i]); in bpf_program__unload()
502 } else if (prog->instances.nr != -1) { in bpf_program__unload()
504 prog->instances.nr); in bpf_program__unload()
507 prog->instances.nr = -1; in bpf_program__unload()
508 zfree(&prog->instances.fds); in bpf_program__unload()
510 zfree(&prog->func_info); in bpf_program__unload()
511 zfree(&prog->line_info); in bpf_program__unload()
519 if (prog->clear_priv) in bpf_program__exit()
520 prog->clear_priv(prog, prog->priv); in bpf_program__exit()
522 prog->priv = NULL; in bpf_program__exit()
523 prog->clear_priv = NULL; in bpf_program__exit()
526 zfree(&prog->name); in bpf_program__exit()
527 zfree(&prog->sec_name); in bpf_program__exit()
528 zfree(&prog->pin_name); in bpf_program__exit()
529 zfree(&prog->insns); in bpf_program__exit()
530 zfree(&prog->reloc_desc); in bpf_program__exit()
532 prog->nr_reloc = 0; in bpf_program__exit()
533 prog->insns_cnt = 0; in bpf_program__exit()
534 prog->sec_idx = -1; in bpf_program__exit()
541 name = p = strdup(prog->sec_name); in __bpf_program__pin_name()
550 return BPF_CLASS(insn->code) == BPF_JMP && in insn_is_subprog_call()
551 BPF_OP(insn->code) == BPF_CALL && in insn_is_subprog_call()
552 BPF_SRC(insn->code) == BPF_K && in insn_is_subprog_call()
553 insn->src_reg == BPF_PSEUDO_CALL && in insn_is_subprog_call()
554 insn->dst_reg == 0 && in insn_is_subprog_call()
555 insn->off == 0; in insn_is_subprog_call()
566 return -EINVAL; in bpf_object__init_prog()
570 prog->obj = obj; in bpf_object__init_prog()
572 prog->sec_idx = sec_idx; in bpf_object__init_prog()
573 prog->sec_insn_off = sec_off / BPF_INSN_SZ; in bpf_object__init_prog()
574 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ; in bpf_object__init_prog()
576 prog->insns_cnt = prog->sec_insn_cnt; in bpf_object__init_prog()
578 prog->type = BPF_PROG_TYPE_UNSPEC; in bpf_object__init_prog()
579 prog->load = true; in bpf_object__init_prog()
581 prog->instances.fds = NULL; in bpf_object__init_prog()
582 prog->instances.nr = -1; in bpf_object__init_prog()
584 prog->sec_name = strdup(sec_name); in bpf_object__init_prog()
585 if (!prog->sec_name) in bpf_object__init_prog()
588 prog->name = strdup(name); in bpf_object__init_prog()
589 if (!prog->name) in bpf_object__init_prog()
592 prog->pin_name = __bpf_program__pin_name(prog); in bpf_object__init_prog()
593 if (!prog->pin_name) in bpf_object__init_prog()
596 prog->insns = malloc(insn_data_sz); in bpf_object__init_prog()
597 if (!prog->insns) in bpf_object__init_prog()
599 memcpy(prog->insns, insn_data, insn_data_sz); in bpf_object__init_prog()
605 return -ENOMEM; in bpf_object__init_prog()
613 void *data = sec_data->d_buf; in bpf_object__add_programs()
614 size_t sec_sz = sec_data->d_size, sec_off, prog_sz; in bpf_object__add_programs()
619 progs = obj->programs; in bpf_object__add_programs()
620 nr_progs = obj->nr_programs; in bpf_object__add_programs()
627 return -LIBBPF_ERRNO__FORMAT; in bpf_object__add_programs()
636 return -LIBBPF_ERRNO__FORMAT; in bpf_object__add_programs()
642 return -LIBBPF_ERRNO__FORMAT; in bpf_object__add_programs()
651 * In this case the original obj->programs in bpf_object__add_programs()
657 return -ENOMEM; in bpf_object__add_programs()
659 obj->programs = progs; in bpf_object__add_programs()
669 obj->nr_programs = nr_progs; in bpf_object__add_programs()
710 if (!strcmp(btf__name_by_offset(btf, m->name_off), name)) in find_member_by_name()
762 if (kern_data_member->type == kern_type_id) in find_struct_ops_kern_types()
768 return -EINVAL; in find_struct_ops_kern_types()
780 static bool bpf_map__is_struct_ops(const struct bpf_map *map) in bpf_map__is_struct_ops() argument
782 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS; in bpf_map__is_struct_ops()
785 /* Init the map's fields that depend on kern_btf */
786 static int bpf_map__init_kern_struct_ops(struct bpf_map *map, in bpf_map__init_kern_struct_ops() argument
798 st_ops = map->st_ops; in bpf_map__init_kern_struct_ops()
799 type = st_ops->type; in bpf_map__init_kern_struct_ops()
800 tname = st_ops->tname; in bpf_map__init_kern_struct_ops()
809 map->name, st_ops->type_id, kern_type_id, kern_vtype_id); in bpf_map__init_kern_struct_ops()
811 map->def.value_size = kern_vtype->size; in bpf_map__init_kern_struct_ops()
812 map->btf_vmlinux_value_type_id = kern_vtype_id; in bpf_map__init_kern_struct_ops()
814 st_ops->kern_vdata = calloc(1, kern_vtype->size); in bpf_map__init_kern_struct_ops()
815 if (!st_ops->kern_vdata) in bpf_map__init_kern_struct_ops()
816 return -ENOMEM; in bpf_map__init_kern_struct_ops()
818 data = st_ops->data; in bpf_map__init_kern_struct_ops()
819 kern_data_off = kern_data_member->offset / 8; in bpf_map__init_kern_struct_ops()
820 kern_data = st_ops->kern_vdata + kern_data_off; in bpf_map__init_kern_struct_ops()
832 mname = btf__name_by_offset(btf, member->name_off); in bpf_map__init_kern_struct_ops()
836 map->name, mname); in bpf_map__init_kern_struct_ops()
837 return -ENOTSUP; in bpf_map__init_kern_struct_ops()
840 kern_member_idx = kern_member - btf_members(kern_type); in bpf_map__init_kern_struct_ops()
844 map->name, mname); in bpf_map__init_kern_struct_ops()
845 return -ENOTSUP; in bpf_map__init_kern_struct_ops()
848 moff = member->offset / 8; in bpf_map__init_kern_struct_ops()
849 kern_moff = kern_member->offset / 8; in bpf_map__init_kern_struct_ops()
854 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id); in bpf_map__init_kern_struct_ops()
855 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type, in bpf_map__init_kern_struct_ops()
857 if (BTF_INFO_KIND(mtype->info) != in bpf_map__init_kern_struct_ops()
858 BTF_INFO_KIND(kern_mtype->info)) { in bpf_map__init_kern_struct_ops()
860 map->name, mname, BTF_INFO_KIND(mtype->info), in bpf_map__init_kern_struct_ops()
861 BTF_INFO_KIND(kern_mtype->info)); in bpf_map__init_kern_struct_ops()
862 return -ENOTSUP; in bpf_map__init_kern_struct_ops()
868 prog = st_ops->progs[i]; in bpf_map__init_kern_struct_ops()
873 kern_mtype->type, in bpf_map__init_kern_struct_ops()
876 /* mtype->type must be a func_proto which was in bpf_map__init_kern_struct_ops()
882 map->name, mname); in bpf_map__init_kern_struct_ops()
883 return -ENOTSUP; in bpf_map__init_kern_struct_ops()
886 prog->attach_btf_id = kern_type_id; in bpf_map__init_kern_struct_ops()
887 prog->expected_attach_type = kern_member_idx; in bpf_map__init_kern_struct_ops()
889 st_ops->kern_func_off[i] = kern_data_off + kern_moff; in bpf_map__init_kern_struct_ops()
892 map->name, mname, prog->name, moff, in bpf_map__init_kern_struct_ops()
902 map->name, mname, (ssize_t)msize, in bpf_map__init_kern_struct_ops()
904 return -ENOTSUP; in bpf_map__init_kern_struct_ops()
908 map->name, mname, (unsigned int)msize, in bpf_map__init_kern_struct_ops()
918 struct bpf_map *map; in bpf_object__init_kern_struct_ops_maps() local
922 for (i = 0; i < obj->nr_maps; i++) { in bpf_object__init_kern_struct_ops_maps()
923 map = &obj->maps[i]; in bpf_object__init_kern_struct_ops_maps()
925 if (!bpf_map__is_struct_ops(map)) in bpf_object__init_kern_struct_ops_maps()
928 err = bpf_map__init_kern_struct_ops(map, obj->btf, in bpf_object__init_kern_struct_ops_maps()
929 obj->btf_vmlinux); in bpf_object__init_kern_struct_ops_maps()
945 struct bpf_map *map; in bpf_object__init_struct_ops_maps() local
948 if (obj->efile.st_ops_shndx == -1) in bpf_object__init_struct_ops_maps()
951 btf = obj->btf; in bpf_object__init_struct_ops_maps()
957 return -EINVAL; in bpf_object__init_struct_ops_maps()
963 type = btf__type_by_id(obj->btf, vsi->type); in bpf_object__init_struct_ops_maps()
964 var_name = btf__name_by_offset(obj->btf, type->name_off); in bpf_object__init_struct_ops_maps()
966 type_id = btf__resolve_type(obj->btf, vsi->type); in bpf_object__init_struct_ops_maps()
969 vsi->type, STRUCT_OPS_SEC); in bpf_object__init_struct_ops_maps()
970 return -EINVAL; in bpf_object__init_struct_ops_maps()
973 type = btf__type_by_id(obj->btf, type_id); in bpf_object__init_struct_ops_maps()
974 tname = btf__name_by_offset(obj->btf, type->name_off); in bpf_object__init_struct_ops_maps()
977 return -ENOTSUP; in bpf_object__init_struct_ops_maps()
981 return -EINVAL; in bpf_object__init_struct_ops_maps()
984 map = bpf_object__add_map(obj); in bpf_object__init_struct_ops_maps()
985 if (IS_ERR(map)) in bpf_object__init_struct_ops_maps()
986 return PTR_ERR(map); in bpf_object__init_struct_ops_maps()
988 map->sec_idx = obj->efile.st_ops_shndx; in bpf_object__init_struct_ops_maps()
989 map->sec_offset = vsi->offset; in bpf_object__init_struct_ops_maps()
990 map->name = strdup(var_name); in bpf_object__init_struct_ops_maps()
991 if (!map->name) in bpf_object__init_struct_ops_maps()
992 return -ENOMEM; in bpf_object__init_struct_ops_maps()
994 map->def.type = BPF_MAP_TYPE_STRUCT_OPS; in bpf_object__init_struct_ops_maps()
995 map->def.key_size = sizeof(int); in bpf_object__init_struct_ops_maps()
996 map->def.value_size = type->size; in bpf_object__init_struct_ops_maps()
997 map->def.max_entries = 1; in bpf_object__init_struct_ops_maps()
999 map->st_ops = calloc(1, sizeof(*map->st_ops)); in bpf_object__init_struct_ops_maps()
1000 if (!map->st_ops) in bpf_object__init_struct_ops_maps()
1001 return -ENOMEM; in bpf_object__init_struct_ops_maps()
1002 st_ops = map->st_ops; in bpf_object__init_struct_ops_maps()
1003 st_ops->data = malloc(type->size); in bpf_object__init_struct_ops_maps()
1004 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs)); in bpf_object__init_struct_ops_maps()
1005 st_ops->kern_func_off = malloc(btf_vlen(type) * in bpf_object__init_struct_ops_maps()
1006 sizeof(*st_ops->kern_func_off)); in bpf_object__init_struct_ops_maps()
1007 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off) in bpf_object__init_struct_ops_maps()
1008 return -ENOMEM; in bpf_object__init_struct_ops_maps()
1010 if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) { in bpf_object__init_struct_ops_maps()
1013 return -EINVAL; in bpf_object__init_struct_ops_maps()
1016 memcpy(st_ops->data, in bpf_object__init_struct_ops_maps()
1017 obj->efile.st_ops_data->d_buf + vsi->offset, in bpf_object__init_struct_ops_maps()
1018 type->size); in bpf_object__init_struct_ops_maps()
1019 st_ops->tname = tname; in bpf_object__init_struct_ops_maps()
1020 st_ops->type = type; in bpf_object__init_struct_ops_maps()
1021 st_ops->type_id = type_id; in bpf_object__init_struct_ops_maps()
1024 tname, type_id, var_name, vsi->offset); in bpf_object__init_struct_ops_maps()
1030 static struct bpf_object *bpf_object__new(const char *path, in bpf_object__new() argument
1038 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1); in bpf_object__new()
1040 pr_warn("alloc memory failed for %s\n", path); in bpf_object__new()
1041 return ERR_PTR(-ENOMEM); in bpf_object__new()
1044 strcpy(obj->path, path); in bpf_object__new()
1046 strncpy(obj->name, obj_name, sizeof(obj->name) - 1); in bpf_object__new()
1047 obj->name[sizeof(obj->name) - 1] = 0; in bpf_object__new()
1050 strncpy(obj->name, basename((void *)path), in bpf_object__new()
1051 sizeof(obj->name) - 1); in bpf_object__new()
1052 end = strchr(obj->name, '.'); in bpf_object__new()
1057 obj->efile.fd = -1; in bpf_object__new()
1064 obj->efile.obj_buf = obj_buf; in bpf_object__new()
1065 obj->efile.obj_buf_sz = obj_buf_sz; in bpf_object__new()
1066 obj->efile.maps_shndx = -1; in bpf_object__new()
1067 obj->efile.btf_maps_shndx = -1; in bpf_object__new()
1068 obj->efile.data_shndx = -1; in bpf_object__new()
1069 obj->efile.rodata_shndx = -1; in bpf_object__new()
1070 obj->efile.bss_shndx = -1; in bpf_object__new()
1071 obj->efile.st_ops_shndx = -1; in bpf_object__new()
1072 obj->kconfig_map_idx = -1; in bpf_object__new()
1073 obj->rodata_map_idx = -1; in bpf_object__new()
1075 obj->kern_version = get_kernel_version(); in bpf_object__new()
1076 obj->loaded = false; in bpf_object__new()
1078 INIT_LIST_HEAD(&obj->list); in bpf_object__new()
1079 list_add(&obj->list, &bpf_objects_list); in bpf_object__new()
1088 if (obj->efile.elf) { in bpf_object__elf_finish()
1089 elf_end(obj->efile.elf); in bpf_object__elf_finish()
1090 obj->efile.elf = NULL; in bpf_object__elf_finish()
1092 obj->efile.symbols = NULL; in bpf_object__elf_finish()
1093 obj->efile.data = NULL; in bpf_object__elf_finish()
1094 obj->efile.rodata = NULL; in bpf_object__elf_finish()
1095 obj->efile.bss = NULL; in bpf_object__elf_finish()
1096 obj->efile.st_ops_data = NULL; in bpf_object__elf_finish()
1098 zfree(&obj->efile.reloc_sects); in bpf_object__elf_finish()
1099 obj->efile.nr_reloc_sects = 0; in bpf_object__elf_finish()
1100 zclose(obj->efile.fd); in bpf_object__elf_finish()
1101 obj->efile.obj_buf = NULL; in bpf_object__elf_finish()
1102 obj->efile.obj_buf_sz = 0; in bpf_object__elf_finish()
1117 return -LIBBPF_ERRNO__LIBELF; in bpf_object__elf_init()
1120 if (obj->efile.obj_buf_sz > 0) { in bpf_object__elf_init()
1125 obj->efile.elf = elf_memory((char *)obj->efile.obj_buf, in bpf_object__elf_init()
1126 obj->efile.obj_buf_sz); in bpf_object__elf_init()
1128 obj->efile.fd = open(obj->path, O_RDONLY); in bpf_object__elf_init()
1129 if (obj->efile.fd < 0) { in bpf_object__elf_init()
1132 err = -errno; in bpf_object__elf_init()
1134 pr_warn("elf: failed to open %s: %s\n", obj->path, cp); in bpf_object__elf_init()
1138 obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL); in bpf_object__elf_init()
1141 if (!obj->efile.elf) { in bpf_object__elf_init()
1142 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1)); in bpf_object__elf_init()
1143 err = -LIBBPF_ERRNO__LIBELF; in bpf_object__elf_init()
1147 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) { in bpf_object__elf_init()
1148 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1)); in bpf_object__elf_init()
1149 err = -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_init()
1152 ep = &obj->efile.ehdr; in bpf_object__elf_init()
1154 if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) { in bpf_object__elf_init()
1156 obj->path, elf_errmsg(-1)); in bpf_object__elf_init()
1157 err = -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_init()
1162 if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) { in bpf_object__elf_init()
1164 obj->path, elf_errmsg(-1)); in bpf_object__elf_init()
1165 err = -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_init()
1170 if (ep->e_type != ET_REL || in bpf_object__elf_init()
1171 (ep->e_machine && ep->e_machine != EM_BPF)) { in bpf_object__elf_init()
1172 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path); in bpf_object__elf_init()
1173 err = -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_init()
1186 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB) in bpf_object__check_endianness()
1189 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB) in bpf_object__check_endianness()
1194 pr_warn("elf: endianness mismatch in %s.\n", obj->path); in bpf_object__check_endianness()
1195 return -LIBBPF_ERRNO__ENDIAN; in bpf_object__check_endianness()
1201 memcpy(obj->license, data, min(size, sizeof(obj->license) - 1)); in bpf_object__init_license()
1202 pr_debug("license of %s is %s\n", obj->path, obj->license); in bpf_object__init_license()
1212 pr_warn("invalid kver section in %s\n", obj->path); in bpf_object__init_kversion()
1213 return -LIBBPF_ERRNO__FORMAT; in bpf_object__init_kversion()
1216 obj->kern_version = kver; in bpf_object__init_kversion()
1217 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version); in bpf_object__init_kversion()
1232 int ret = -ENOENT; in bpf_object__section_size()
1236 return -EINVAL; in bpf_object__section_size()
1238 if (obj->efile.data) in bpf_object__section_size()
1239 *size = obj->efile.data->d_size; in bpf_object__section_size()
1241 if (obj->efile.bss) in bpf_object__section_size()
1242 *size = obj->efile.bss->d_size; in bpf_object__section_size()
1244 if (obj->efile.rodata) in bpf_object__section_size()
1245 *size = obj->efile.rodata->d_size; in bpf_object__section_size()
1247 if (obj->efile.st_ops_data) in bpf_object__section_size()
1248 *size = obj->efile.st_ops_data->d_size; in bpf_object__section_size()
1255 *size = data->d_size; in bpf_object__section_size()
1265 Elf_Data *symbols = obj->efile.symbols; in bpf_object__variable_offset()
1270 return -EINVAL; in bpf_object__variable_offset()
1272 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) { in bpf_object__variable_offset()
1285 return -EIO; in bpf_object__variable_offset()
1293 return -ENOENT; in bpf_object__variable_offset()
1302 if (obj->nr_maps < obj->maps_cap) in bpf_object__add_map()
1303 return &obj->maps[obj->nr_maps++]; in bpf_object__add_map()
1305 new_cap = max((size_t)4, obj->maps_cap * 3 / 2); in bpf_object__add_map()
1306 new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps)); in bpf_object__add_map()
1309 return ERR_PTR(-ENOMEM); in bpf_object__add_map()
1312 obj->maps_cap = new_cap; in bpf_object__add_map()
1313 obj->maps = new_maps; in bpf_object__add_map()
1316 memset(obj->maps + obj->nr_maps, 0, in bpf_object__add_map()
1317 (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps)); in bpf_object__add_map()
1319 * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin) in bpf_object__add_map()
1322 for (i = obj->nr_maps; i < obj->maps_cap; i++) { in bpf_object__add_map()
1323 obj->maps[i].fd = -1; in bpf_object__add_map()
1324 obj->maps[i].inner_map_fd = -1; in bpf_object__add_map()
1327 return &obj->maps[obj->nr_maps++]; in bpf_object__add_map()
1330 static size_t bpf_map_mmap_sz(const struct bpf_map *map) in bpf_map_mmap_sz() argument
1335 map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries; in bpf_map_mmap_sz()
1346 int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, in internal_map_name()
1347 strlen(obj->name)); in internal_map_name()
1349 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name, in internal_map_name()
1352 /* sanitise map name to characters allowed by kernel */ in internal_map_name()
1365 struct bpf_map *map; in bpf_object__init_internal_map() local
1368 map = bpf_object__add_map(obj); in bpf_object__init_internal_map()
1369 if (IS_ERR(map)) in bpf_object__init_internal_map()
1370 return PTR_ERR(map); in bpf_object__init_internal_map()
1372 map->libbpf_type = type; in bpf_object__init_internal_map()
1373 map->sec_idx = sec_idx; in bpf_object__init_internal_map()
1374 map->sec_offset = 0; in bpf_object__init_internal_map()
1375 map->name = internal_map_name(obj, type); in bpf_object__init_internal_map()
1376 if (!map->name) { in bpf_object__init_internal_map()
1377 pr_warn("failed to alloc map name\n"); in bpf_object__init_internal_map()
1378 return -ENOMEM; in bpf_object__init_internal_map()
1381 def = &map->def; in bpf_object__init_internal_map()
1382 def->type = BPF_MAP_TYPE_ARRAY; in bpf_object__init_internal_map()
1383 def->key_size = sizeof(int); in bpf_object__init_internal_map()
1384 def->value_size = data_sz; in bpf_object__init_internal_map()
1385 def->max_entries = 1; in bpf_object__init_internal_map()
1386 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG in bpf_object__init_internal_map()
1388 def->map_flags |= BPF_F_MMAPABLE; in bpf_object__init_internal_map()
1390 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n", in bpf_object__init_internal_map()
1391 map->name, map->sec_idx, map->sec_offset, def->map_flags); in bpf_object__init_internal_map()
1393 map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE, in bpf_object__init_internal_map()
1394 MAP_SHARED | MAP_ANONYMOUS, -1, 0); in bpf_object__init_internal_map()
1395 if (map->mmaped == MAP_FAILED) { in bpf_object__init_internal_map()
1396 err = -errno; in bpf_object__init_internal_map()
1397 map->mmaped = NULL; in bpf_object__init_internal_map()
1398 pr_warn("failed to alloc map '%s' content buffer: %d\n", in bpf_object__init_internal_map()
1399 map->name, err); in bpf_object__init_internal_map()
1400 zfree(&map->name); in bpf_object__init_internal_map()
1405 memcpy(map->mmaped, data, data_sz); in bpf_object__init_internal_map()
1407 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name); in bpf_object__init_internal_map()
1416 * Populate obj->maps with libbpf internal maps. in bpf_object__init_global_data_maps()
1418 if (obj->efile.data_shndx >= 0) { in bpf_object__init_global_data_maps()
1420 obj->efile.data_shndx, in bpf_object__init_global_data_maps()
1421 obj->efile.data->d_buf, in bpf_object__init_global_data_maps()
1422 obj->efile.data->d_size); in bpf_object__init_global_data_maps()
1426 if (obj->efile.rodata_shndx >= 0) { in bpf_object__init_global_data_maps()
1428 obj->efile.rodata_shndx, in bpf_object__init_global_data_maps()
1429 obj->efile.rodata->d_buf, in bpf_object__init_global_data_maps()
1430 obj->efile.rodata->d_size); in bpf_object__init_global_data_maps()
1434 obj->rodata_map_idx = obj->nr_maps - 1; in bpf_object__init_global_data_maps()
1436 if (obj->efile.bss_shndx >= 0) { in bpf_object__init_global_data_maps()
1438 obj->efile.bss_shndx, in bpf_object__init_global_data_maps()
1440 obj->efile.bss->d_size); in bpf_object__init_global_data_maps()
1453 for (i = 0; i < obj->nr_extern; i++) { in find_extern_by_name()
1454 if (strcmp(obj->externs[i].name, name) == 0) in find_extern_by_name()
1455 return &obj->externs[i]; in find_extern_by_name()
1463 switch (ext->kcfg.type) { in set_kcfg_value_tri()
1467 ext->name, value); in set_kcfg_value_tri()
1468 return -EINVAL; in set_kcfg_value_tri()
1488 ext->name, value); in set_kcfg_value_tri()
1489 return -EINVAL; in set_kcfg_value_tri()
1491 ext->is_set = true; in set_kcfg_value_tri()
1500 if (ext->kcfg.type != KCFG_CHAR_ARR) { in set_kcfg_value_str()
1501 pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value); in set_kcfg_value_str()
1502 return -EINVAL; in set_kcfg_value_str()
1506 if (value[len - 1] != '"') { in set_kcfg_value_str()
1508 ext->name, value); in set_kcfg_value_str()
1509 return -EINVAL; in set_kcfg_value_str()
1513 len -= 2; in set_kcfg_value_str()
1514 if (len >= ext->kcfg.sz) { in set_kcfg_value_str()
1516 ext->name, value, len, ext->kcfg.sz - 1); in set_kcfg_value_str()
1517 len = ext->kcfg.sz - 1; in set_kcfg_value_str()
1521 ext->is_set = true; in set_kcfg_value_str()
1533 err = -errno; in parse_u64()
1539 return -EINVAL; in parse_u64()
1546 int bit_sz = ext->kcfg.sz * 8; in is_kcfg_value_in_range()
1548 if (ext->kcfg.sz == 8) in is_kcfg_value_in_range()
1551 /* Validate that value stored in u64 fits in integer of `ext->sz` in is_kcfg_value_in_range()
1556 * -2^(Y-1) <= X <= 2^(Y-1) - 1 in is_kcfg_value_in_range()
1557 * 0 <= X + 2^(Y-1) <= 2^Y - 1 in is_kcfg_value_in_range()
1558 * 0 <= X + 2^(Y-1) < 2^Y in is_kcfg_value_in_range()
1560 * For unsigned target integer, check that all the (64 - Y) bits are in is_kcfg_value_in_range()
1563 if (ext->kcfg.is_signed) in is_kcfg_value_in_range()
1564 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz); in is_kcfg_value_in_range()
1572 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) { in set_kcfg_value_num()
1574 ext->name, (unsigned long long)value); in set_kcfg_value_num()
1575 return -EINVAL; in set_kcfg_value_num()
1579 ext->name, (unsigned long long)value, ext->kcfg.sz); in set_kcfg_value_num()
1580 return -ERANGE; in set_kcfg_value_num()
1582 switch (ext->kcfg.sz) { in set_kcfg_value_num()
1588 return -EINVAL; in set_kcfg_value_num()
1590 ext->is_set = true; in set_kcfg_value_num()
1609 return -EINVAL; in bpf_object__process_kconfig_line()
1614 if (buf[len - 1] == '\n') in bpf_object__process_kconfig_line()
1615 buf[len - 1] = '\0'; in bpf_object__process_kconfig_line()
1621 return -EINVAL; in bpf_object__process_kconfig_line()
1625 if (!ext || ext->is_set) in bpf_object__process_kconfig_line()
1628 ext_val = data + ext->kcfg.data_off; in bpf_object__process_kconfig_line()
1643 ext->name, value); in bpf_object__process_kconfig_line()
1651 pr_debug("extern (kcfg) %s=%s\n", ext->name, value); in bpf_object__process_kconfig_line()
1663 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release); in bpf_object__read_kconfig_file()
1665 return -EINVAL; in bpf_object__read_kconfig_file()
1667 return -ENAMETOOLONG; in bpf_object__read_kconfig_file()
1676 return -ENOENT; in bpf_object__read_kconfig_file()
1702 err = -errno; in bpf_object__read_kconfig_mem()
1703 pr_warn("failed to open in-memory Kconfig: %d\n", err); in bpf_object__read_kconfig_mem()
1710 pr_warn("error parsing in-memory Kconfig line '%s': %d\n", in bpf_object__read_kconfig_mem()
1726 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__init_kconfig_map()
1727 ext = &obj->externs[i]; in bpf_object__init_kconfig_map()
1728 if (ext->type == EXT_KCFG) in bpf_object__init_kconfig_map()
1735 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz; in bpf_object__init_kconfig_map()
1737 obj->efile.symbols_shndx, in bpf_object__init_kconfig_map()
1742 obj->kconfig_map_idx = obj->nr_maps - 1; in bpf_object__init_kconfig_map()
1749 Elf_Data *symbols = obj->efile.symbols; in bpf_object__init_user_maps()
1754 if (obj->efile.maps_shndx < 0) in bpf_object__init_user_maps()
1758 return -EINVAL; in bpf_object__init_user_maps()
1761 scn = elf_sec_by_idx(obj, obj->efile.maps_shndx); in bpf_object__init_user_maps()
1764 pr_warn("elf: failed to get legacy map definitions for %s\n", in bpf_object__init_user_maps()
1765 obj->path); in bpf_object__init_user_maps()
1766 return -EINVAL; in bpf_object__init_user_maps()
1770 * Count number of maps. Each map has a name. in bpf_object__init_user_maps()
1774 * TODO: Detect array of map and report error. in bpf_object__init_user_maps()
1776 nr_syms = symbols->d_size / sizeof(GElf_Sym); in bpf_object__init_user_maps()
1782 if (sym.st_shndx != obj->efile.maps_shndx) in bpf_object__init_user_maps()
1786 /* Assume equally sized map definitions */ in bpf_object__init_user_maps()
1787 pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n", in bpf_object__init_user_maps()
1788 nr_maps, data->d_size, obj->path); in bpf_object__init_user_maps()
1790 if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) { in bpf_object__init_user_maps()
1791 pr_warn("elf: unable to determine legacy map definition size in %s\n", in bpf_object__init_user_maps()
1792 obj->path); in bpf_object__init_user_maps()
1793 return -EINVAL; in bpf_object__init_user_maps()
1795 map_def_sz = data->d_size / nr_maps; in bpf_object__init_user_maps()
1797 /* Fill obj->maps using data in "maps" section. */ in bpf_object__init_user_maps()
1802 struct bpf_map *map; in bpf_object__init_user_maps() local
1806 if (sym.st_shndx != obj->efile.maps_shndx) in bpf_object__init_user_maps()
1809 map = bpf_object__add_map(obj); in bpf_object__init_user_maps()
1810 if (IS_ERR(map)) in bpf_object__init_user_maps()
1811 return PTR_ERR(map); in bpf_object__init_user_maps()
1815 pr_warn("failed to get map #%d name sym string for obj %s\n", in bpf_object__init_user_maps()
1816 i, obj->path); in bpf_object__init_user_maps()
1817 return -LIBBPF_ERRNO__FORMAT; in bpf_object__init_user_maps()
1820 map->libbpf_type = LIBBPF_MAP_UNSPEC; in bpf_object__init_user_maps()
1821 map->sec_idx = sym.st_shndx; in bpf_object__init_user_maps()
1822 map->sec_offset = sym.st_value; in bpf_object__init_user_maps()
1823 pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n", in bpf_object__init_user_maps()
1824 map_name, map->sec_idx, map->sec_offset); in bpf_object__init_user_maps()
1825 if (sym.st_value + map_def_sz > data->d_size) { in bpf_object__init_user_maps()
1826 pr_warn("corrupted maps section in %s: last map \"%s\" too small\n", in bpf_object__init_user_maps()
1827 obj->path, map_name); in bpf_object__init_user_maps()
1828 return -EINVAL; in bpf_object__init_user_maps()
1831 map->name = strdup(map_name); in bpf_object__init_user_maps()
1832 if (!map->name) { in bpf_object__init_user_maps()
1833 pr_warn("failed to alloc map name\n"); in bpf_object__init_user_maps()
1834 return -ENOMEM; in bpf_object__init_user_maps()
1836 pr_debug("map %d is \"%s\"\n", i, map->name); in bpf_object__init_user_maps()
1837 def = (struct bpf_map_def *)(data->d_buf + sym.st_value); in bpf_object__init_user_maps()
1839 * If the definition of the map in the object file fits in in bpf_object__init_user_maps()
1845 memcpy(&map->def, def, map_def_sz); in bpf_object__init_user_maps()
1848 * Here the map structure being read is bigger than what in bpf_object__init_user_maps()
1850 * If they are not zero, reject this map as in bpf_object__init_user_maps()
1858 pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n", in bpf_object__init_user_maps()
1859 obj->path, map_name); in bpf_object__init_user_maps()
1861 return -EINVAL; in bpf_object__init_user_maps()
1864 memcpy(&map->def, def, sizeof(struct bpf_map_def)); in bpf_object__init_user_maps()
1880 *res_id = t->type; in skip_mods_and_typedefs()
1881 t = btf__type_by_id(btf, t->type); in skip_mods_and_typedefs()
1896 t = skip_mods_and_typedefs(btf, t->type, res_id); in resolve_func_ptr()
1925 * Fetch integer attribute of BTF map definition. Such attributes are
1934 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL); in get_map_field_int()
1935 const char *name = btf__name_by_offset(btf, m->name_off); in get_map_field_int()
1940 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n", in get_map_field_int()
1945 arr_t = btf__type_by_id(btf, t->type); in get_map_field_int()
1947 pr_warn("map '%s': attr '%s': type [%u] not found.\n", in get_map_field_int()
1948 map_name, name, t->type); in get_map_field_int()
1952 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n", in get_map_field_int()
1957 *res = arr_info->nelems; in get_map_field_int()
1961 static int build_map_pin_path(struct bpf_map *map, const char *path) in build_map_pin_path() argument
1966 if (!path) in build_map_pin_path()
1967 path = "/sys/fs/bpf"; in build_map_pin_path()
1969 len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map)); in build_map_pin_path()
1971 return -EINVAL; in build_map_pin_path()
1973 return -ENAMETOOLONG; in build_map_pin_path()
1975 return bpf_map__set_pin_path(map, buf); in build_map_pin_path()
1980 struct bpf_map *map, in parse_btf_map_def() argument
1992 const char *name = btf__name_by_offset(obj->btf, m->name_off); in parse_btf_map_def()
1995 pr_warn("map '%s': invalid field #%d.\n", map->name, i); in parse_btf_map_def()
1996 return -EINVAL; in parse_btf_map_def()
1999 if (!get_map_field_int(map->name, obj->btf, m, in parse_btf_map_def()
2000 &map->def.type)) in parse_btf_map_def()
2001 return -EINVAL; in parse_btf_map_def()
2002 pr_debug("map '%s': found type = %u.\n", in parse_btf_map_def()
2003 map->name, map->def.type); in parse_btf_map_def()
2005 if (!get_map_field_int(map->name, obj->btf, m, in parse_btf_map_def()
2006 &map->def.max_entries)) in parse_btf_map_def()
2007 return -EINVAL; in parse_btf_map_def()
2008 pr_debug("map '%s': found max_entries = %u.\n", in parse_btf_map_def()
2009 map->name, map->def.max_entries); in parse_btf_map_def()
2011 if (!get_map_field_int(map->name, obj->btf, m, in parse_btf_map_def()
2012 &map->def.map_flags)) in parse_btf_map_def()
2013 return -EINVAL; in parse_btf_map_def()
2014 pr_debug("map '%s': found map_flags = %u.\n", in parse_btf_map_def()
2015 map->name, map->def.map_flags); in parse_btf_map_def()
2017 if (!get_map_field_int(map->name, obj->btf, m, &map->numa_node)) in parse_btf_map_def()
2018 return -EINVAL; in parse_btf_map_def()
2019 pr_debug("map '%s': found numa_node = %u.\n", map->name, map->numa_node); in parse_btf_map_def()
2023 if (!get_map_field_int(map->name, obj->btf, m, &sz)) in parse_btf_map_def()
2024 return -EINVAL; in parse_btf_map_def()
2025 pr_debug("map '%s': found key_size = %u.\n", in parse_btf_map_def()
2026 map->name, sz); in parse_btf_map_def()
2027 if (map->def.key_size && map->def.key_size != sz) { in parse_btf_map_def()
2028 pr_warn("map '%s': conflicting key size %u != %u.\n", in parse_btf_map_def()
2029 map->name, map->def.key_size, sz); in parse_btf_map_def()
2030 return -EINVAL; in parse_btf_map_def()
2032 map->def.key_size = sz; in parse_btf_map_def()
2036 t = btf__type_by_id(obj->btf, m->type); in parse_btf_map_def()
2038 pr_warn("map '%s': key type [%d] not found.\n", in parse_btf_map_def()
2039 map->name, m->type); in parse_btf_map_def()
2040 return -EINVAL; in parse_btf_map_def()
2043 pr_warn("map '%s': key spec is not PTR: %s.\n", in parse_btf_map_def()
2044 map->name, btf_kind_str(t)); in parse_btf_map_def()
2045 return -EINVAL; in parse_btf_map_def()
2047 sz = btf__resolve_size(obj->btf, t->type); in parse_btf_map_def()
2049 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n", in parse_btf_map_def()
2050 map->name, t->type, (ssize_t)sz); in parse_btf_map_def()
2053 pr_debug("map '%s': found key [%u], sz = %zd.\n", in parse_btf_map_def()
2054 map->name, t->type, (ssize_t)sz); in parse_btf_map_def()
2055 if (map->def.key_size && map->def.key_size != sz) { in parse_btf_map_def()
2056 pr_warn("map '%s': conflicting key size %u != %zd.\n", in parse_btf_map_def()
2057 map->name, map->def.key_size, (ssize_t)sz); in parse_btf_map_def()
2058 return -EINVAL; in parse_btf_map_def()
2060 map->def.key_size = sz; in parse_btf_map_def()
2061 map->btf_key_type_id = t->type; in parse_btf_map_def()
2065 if (!get_map_field_int(map->name, obj->btf, m, &sz)) in parse_btf_map_def()
2066 return -EINVAL; in parse_btf_map_def()
2067 pr_debug("map '%s': found value_size = %u.\n", in parse_btf_map_def()
2068 map->name, sz); in parse_btf_map_def()
2069 if (map->def.value_size && map->def.value_size != sz) { in parse_btf_map_def()
2070 pr_warn("map '%s': conflicting value size %u != %u.\n", in parse_btf_map_def()
2071 map->name, map->def.value_size, sz); in parse_btf_map_def()
2072 return -EINVAL; in parse_btf_map_def()
2074 map->def.value_size = sz; in parse_btf_map_def()
2078 t = btf__type_by_id(obj->btf, m->type); in parse_btf_map_def()
2080 pr_warn("map '%s': value type [%d] not found.\n", in parse_btf_map_def()
2081 map->name, m->type); in parse_btf_map_def()
2082 return -EINVAL; in parse_btf_map_def()
2085 pr_warn("map '%s': value spec is not PTR: %s.\n", in parse_btf_map_def()
2086 map->name, btf_kind_str(t)); in parse_btf_map_def()
2087 return -EINVAL; in parse_btf_map_def()
2089 sz = btf__resolve_size(obj->btf, t->type); in parse_btf_map_def()
2091 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n", in parse_btf_map_def()
2092 map->name, t->type, (ssize_t)sz); in parse_btf_map_def()
2095 pr_debug("map '%s': found value [%u], sz = %zd.\n", in parse_btf_map_def()
2096 map->name, t->type, (ssize_t)sz); in parse_btf_map_def()
2097 if (map->def.value_size && map->def.value_size != sz) { in parse_btf_map_def()
2098 pr_warn("map '%s': conflicting value size %u != %zd.\n", in parse_btf_map_def()
2099 map->name, map->def.value_size, (ssize_t)sz); in parse_btf_map_def()
2100 return -EINVAL; in parse_btf_map_def()
2102 map->def.value_size = sz; in parse_btf_map_def()
2103 map->btf_value_type_id = t->type; in parse_btf_map_def()
2109 pr_warn("map '%s': multi-level inner maps not supported.\n", in parse_btf_map_def()
2110 map->name); in parse_btf_map_def()
2111 return -ENOTSUP; in parse_btf_map_def()
2113 if (i != vlen - 1) { in parse_btf_map_def()
2114 pr_warn("map '%s': '%s' member should be last.\n", in parse_btf_map_def()
2115 map->name, name); in parse_btf_map_def()
2116 return -EINVAL; in parse_btf_map_def()
2118 if (!bpf_map_type__is_map_in_map(map->def.type)) { in parse_btf_map_def()
2119 pr_warn("map '%s': should be map-in-map.\n", in parse_btf_map_def()
2120 map->name); in parse_btf_map_def()
2121 return -ENOTSUP; in parse_btf_map_def()
2123 if (map->def.value_size && map->def.value_size != 4) { in parse_btf_map_def()
2124 pr_warn("map '%s': conflicting value size %u != 4.\n", in parse_btf_map_def()
2125 map->name, map->def.value_size); in parse_btf_map_def()
2126 return -EINVAL; in parse_btf_map_def()
2128 map->def.value_size = 4; in parse_btf_map_def()
2129 t = btf__type_by_id(obj->btf, m->type); in parse_btf_map_def()
2131 pr_warn("map '%s': map-in-map inner type [%d] not found.\n", in parse_btf_map_def()
2132 map->name, m->type); in parse_btf_map_def()
2133 return -EINVAL; in parse_btf_map_def()
2135 if (!btf_is_array(t) || btf_array(t)->nelems) { in parse_btf_map_def()
2136 pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n", in parse_btf_map_def()
2137 map->name); in parse_btf_map_def()
2138 return -EINVAL; in parse_btf_map_def()
2140 t = skip_mods_and_typedefs(obj->btf, btf_array(t)->type, in parse_btf_map_def()
2143 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", in parse_btf_map_def()
2144 map->name, btf_kind_str(t)); in parse_btf_map_def()
2145 return -EINVAL; in parse_btf_map_def()
2147 t = skip_mods_and_typedefs(obj->btf, t->type, NULL); in parse_btf_map_def()
2149 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", in parse_btf_map_def()
2150 map->name, btf_kind_str(t)); in parse_btf_map_def()
2151 return -EINVAL; in parse_btf_map_def()
2154 map->inner_map = calloc(1, sizeof(*map->inner_map)); in parse_btf_map_def()
2155 if (!map->inner_map) in parse_btf_map_def()
2156 return -ENOMEM; in parse_btf_map_def()
2157 map->inner_map->sec_idx = obj->efile.btf_maps_shndx; in parse_btf_map_def()
2158 map->inner_map->name = malloc(strlen(map->name) + in parse_btf_map_def()
2160 if (!map->inner_map->name) in parse_btf_map_def()
2161 return -ENOMEM; in parse_btf_map_def()
2162 sprintf(map->inner_map->name, "%s.inner", map->name); in parse_btf_map_def()
2164 err = parse_btf_map_def(obj, map->inner_map, t, strict, in parse_btf_map_def()
2173 pr_debug("map '%s': inner def can't be pinned.\n", in parse_btf_map_def()
2174 map->name); in parse_btf_map_def()
2175 return -EINVAL; in parse_btf_map_def()
2177 if (!get_map_field_int(map->name, obj->btf, m, &val)) in parse_btf_map_def()
2178 return -EINVAL; in parse_btf_map_def()
2179 pr_debug("map '%s': found pinning = %u.\n", in parse_btf_map_def()
2180 map->name, val); in parse_btf_map_def()
2184 pr_warn("map '%s': invalid pinning value %u.\n", in parse_btf_map_def()
2185 map->name, val); in parse_btf_map_def()
2186 return -EINVAL; in parse_btf_map_def()
2189 err = build_map_pin_path(map, pin_root_path); in parse_btf_map_def()
2191 pr_warn("map '%s': couldn't build pin path.\n", in parse_btf_map_def()
2192 map->name); in parse_btf_map_def()
2198 pr_warn("map '%s': unknown field '%s'.\n", in parse_btf_map_def()
2199 map->name, name); in parse_btf_map_def()
2200 return -ENOTSUP; in parse_btf_map_def()
2202 pr_debug("map '%s': ignoring unknown field '%s'.\n", in parse_btf_map_def()
2203 map->name, name); in parse_btf_map_def()
2207 if (map->def.type == BPF_MAP_TYPE_UNSPEC) { in parse_btf_map_def()
2208 pr_warn("map '%s': map type isn't specified.\n", map->name); in parse_btf_map_def()
2209 return -EINVAL; in parse_btf_map_def()
2225 struct bpf_map *map; in bpf_object__init_user_btf_map() local
2228 var = btf__type_by_id(obj->btf, vi->type); in bpf_object__init_user_btf_map()
2230 map_name = btf__name_by_offset(obj->btf, var->name_off); in bpf_object__init_user_btf_map()
2233 pr_warn("map #%d: empty name.\n", var_idx); in bpf_object__init_user_btf_map()
2234 return -EINVAL; in bpf_object__init_user_btf_map()
2236 if ((__u64)vi->offset + vi->size > data->d_size) { in bpf_object__init_user_btf_map()
2237 pr_warn("map '%s' BTF data is corrupted.\n", map_name); in bpf_object__init_user_btf_map()
2238 return -EINVAL; in bpf_object__init_user_btf_map()
2241 pr_warn("map '%s': unexpected var kind %s.\n", in bpf_object__init_user_btf_map()
2243 return -EINVAL; in bpf_object__init_user_btf_map()
2245 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED && in bpf_object__init_user_btf_map()
2246 var_extra->linkage != BTF_VAR_STATIC) { in bpf_object__init_user_btf_map()
2247 pr_warn("map '%s': unsupported var linkage %u.\n", in bpf_object__init_user_btf_map()
2248 map_name, var_extra->linkage); in bpf_object__init_user_btf_map()
2249 return -EOPNOTSUPP; in bpf_object__init_user_btf_map()
2252 def = skip_mods_and_typedefs(obj->btf, var->type, NULL); in bpf_object__init_user_btf_map()
2254 pr_warn("map '%s': unexpected def kind %s.\n", in bpf_object__init_user_btf_map()
2256 return -EINVAL; in bpf_object__init_user_btf_map()
2258 if (def->size > vi->size) { in bpf_object__init_user_btf_map()
2259 pr_warn("map '%s': invalid def size.\n", map_name); in bpf_object__init_user_btf_map()
2260 return -EINVAL; in bpf_object__init_user_btf_map()
2263 map = bpf_object__add_map(obj); in bpf_object__init_user_btf_map()
2264 if (IS_ERR(map)) in bpf_object__init_user_btf_map()
2265 return PTR_ERR(map); in bpf_object__init_user_btf_map()
2266 map->name = strdup(map_name); in bpf_object__init_user_btf_map()
2267 if (!map->name) { in bpf_object__init_user_btf_map()
2268 pr_warn("map '%s': failed to alloc map name.\n", map_name); in bpf_object__init_user_btf_map()
2269 return -ENOMEM; in bpf_object__init_user_btf_map()
2271 map->libbpf_type = LIBBPF_MAP_UNSPEC; in bpf_object__init_user_btf_map()
2272 map->def.type = BPF_MAP_TYPE_UNSPEC; in bpf_object__init_user_btf_map()
2273 map->sec_idx = sec_idx; in bpf_object__init_user_btf_map()
2274 map->sec_offset = vi->offset; in bpf_object__init_user_btf_map()
2275 map->btf_var_idx = var_idx; in bpf_object__init_user_btf_map()
2276 pr_debug("map '%s': at sec_idx %d, offset %zu.\n", in bpf_object__init_user_btf_map()
2277 map_name, map->sec_idx, map->sec_offset); in bpf_object__init_user_btf_map()
2279 return parse_btf_map_def(obj, map, def, strict, false, pin_root_path); in bpf_object__init_user_btf_map()
2292 if (obj->efile.btf_maps_shndx < 0) in bpf_object__init_user_btf_maps()
2295 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx); in bpf_object__init_user_btf_maps()
2298 pr_warn("elf: failed to get %s map definitions for %s\n", in bpf_object__init_user_btf_maps()
2299 MAPS_ELF_SEC, obj->path); in bpf_object__init_user_btf_maps()
2300 return -EINVAL; in bpf_object__init_user_btf_maps()
2303 nr_types = btf__get_nr_types(obj->btf); in bpf_object__init_user_btf_maps()
2305 t = btf__type_by_id(obj->btf, i); in bpf_object__init_user_btf_maps()
2308 name = btf__name_by_offset(obj->btf, t->name_off); in bpf_object__init_user_btf_maps()
2311 obj->efile.btf_maps_sec_btf_id = i; in bpf_object__init_user_btf_maps()
2318 return -ENOENT; in bpf_object__init_user_btf_maps()
2324 obj->efile.btf_maps_shndx, in bpf_object__init_user_btf_maps()
2387 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0); in bpf_object__sanitize_btf()
2393 t->size = 1; in bpf_object__sanitize_btf()
2402 name = (char *)btf__name_by_offset(btf, t->name_off); in bpf_object__sanitize_btf()
2410 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen); in bpf_object__sanitize_btf()
2413 m->offset = v->offset * 8; in bpf_object__sanitize_btf()
2414 m->type = v->type; in bpf_object__sanitize_btf()
2416 vt = (void *)btf__type_by_id(btf, v->type); in bpf_object__sanitize_btf()
2417 m->name_off = vt->name_off; in bpf_object__sanitize_btf()
2422 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen); in bpf_object__sanitize_btf()
2423 t->size = sizeof(__u32); /* kernel enforced */ in bpf_object__sanitize_btf()
2426 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0); in bpf_object__sanitize_btf()
2429 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0); in bpf_object__sanitize_btf()
2436 return obj->efile.btf_maps_shndx >= 0 || in libbpf_needs_btf()
2437 obj->efile.st_ops_shndx >= 0 || in libbpf_needs_btf()
2438 obj->nr_extern > 0; in libbpf_needs_btf()
2443 return obj->efile.st_ops_shndx >= 0; in kernel_needs_btf()
2450 int err = -ENOENT; in bpf_object__init_btf()
2453 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size); in bpf_object__init_btf()
2454 if (IS_ERR(obj->btf)) { in bpf_object__init_btf()
2455 err = PTR_ERR(obj->btf); in bpf_object__init_btf()
2456 obj->btf = NULL; in bpf_object__init_btf()
2461 /* enforce 8-byte pointers for BPF-targeted BTFs */ in bpf_object__init_btf()
2462 btf__set_pointer_size(obj->btf, 8); in bpf_object__init_btf()
2466 if (!obj->btf) { in bpf_object__init_btf()
2471 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, in bpf_object__init_btf()
2472 btf_ext_data->d_size); in bpf_object__init_btf()
2473 if (IS_ERR(obj->btf_ext)) { in bpf_object__init_btf()
2475 BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext)); in bpf_object__init_btf()
2476 obj->btf_ext = NULL; in bpf_object__init_btf()
2492 if (!obj->btf) in bpf_object__finalize_btf()
2495 err = btf__finalize_data(obj, obj->btf); in bpf_object__finalize_btf()
2506 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || in libbpf_prog_needs_vmlinux_btf()
2507 prog->type == BPF_PROG_TYPE_LSM) in libbpf_prog_needs_vmlinux_btf()
2513 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd) in libbpf_prog_needs_vmlinux_btf()
2525 /* CO-RE relocations need kernel BTF */ in bpf_object__load_vmlinux_btf()
2526 if (obj->btf_ext && obj->btf_ext->core_relo_info.len) in bpf_object__load_vmlinux_btf()
2530 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__load_vmlinux_btf()
2533 ext = &obj->externs[i]; in bpf_object__load_vmlinux_btf()
2534 if (ext->type == EXT_KSYM && ext->ksym.type_id) { in bpf_object__load_vmlinux_btf()
2541 if (!prog->load) in bpf_object__load_vmlinux_btf()
2552 obj->btf_vmlinux = libbpf_find_kernel_btf(); in bpf_object__load_vmlinux_btf()
2553 if (IS_ERR(obj->btf_vmlinux)) { in bpf_object__load_vmlinux_btf()
2554 err = PTR_ERR(obj->btf_vmlinux); in bpf_object__load_vmlinux_btf()
2556 obj->btf_vmlinux = NULL; in bpf_object__load_vmlinux_btf()
2564 struct btf *kern_btf = obj->btf; in bpf_object__sanitize_and_load_btf()
2568 if (!obj->btf) in bpf_object__sanitize_and_load_btf()
2573 err = -EOPNOTSUPP; in bpf_object__sanitize_and_load_btf()
2586 raw_data = btf__get_raw_data(obj->btf, &sz); in bpf_object__sanitize_and_load_btf()
2591 /* enforce 8-byte pointers for BPF-targeted BTFs */ in bpf_object__sanitize_and_load_btf()
2592 btf__set_pointer_size(obj->btf, 8); in bpf_object__sanitize_and_load_btf()
2600 btf__set_fd(obj->btf, btf__fd(kern_btf)); in bpf_object__sanitize_and_load_btf()
2601 btf__set_fd(kern_btf, -1); in bpf_object__sanitize_and_load_btf()
2621 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off); in elf_sym_str()
2624 off, obj->path, elf_errmsg(-1)); in elf_sym_str()
2635 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off); in elf_sec_str()
2638 off, obj->path, elf_errmsg(-1)); in elf_sec_str()
2649 scn = elf_getscn(obj->efile.elf, idx); in elf_sec_by_idx()
2652 idx, obj->path, elf_errmsg(-1)); in elf_sec_by_idx()
2661 Elf *elf = obj->efile.elf; in elf_sec_by_name()
2680 return -EINVAL; in elf_sec_hdr()
2684 elf_ndxscn(scn), obj->path, elf_errmsg(-1)); in elf_sec_hdr()
2685 return -EINVAL; in elf_sec_hdr()
2705 elf_ndxscn(scn), obj->path, elf_errmsg(-1)); in elf_sec_name()
2723 obj->path, elf_errmsg(-1)); in elf_sec_data()
2733 Elf_Data *symbols = obj->efile.symbols; in elf_sym_by_sec_off()
2734 size_t n = symbols->d_size / sizeof(GElf_Sym); in elf_sym_by_sec_off()
2740 if (sym->st_shndx != sec_idx || sym->st_value != off) in elf_sym_by_sec_off()
2742 if (GELF_ST_TYPE(sym->st_info) != sym_type) in elf_sym_by_sec_off()
2747 return -ENOENT; in elf_sym_by_sec_off()
2753 return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0; in is_sec_name_dwarf()
2759 if (hdr->sh_type == SHT_STRTAB) in ignore_elf_section()
2763 if (hdr->sh_type == 0x6FFF4C03 /* SHT_LLVM_ADDRSIG */) in ignore_elf_section()
2767 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 && in ignore_elf_section()
2775 if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) { in ignore_elf_section()
2776 name += sizeof(".rel") - 1; in ignore_elf_section()
2795 if (a->sec_idx != b->sec_idx) in cmp_progs()
2796 return a->sec_idx < b->sec_idx ? -1 : 1; in cmp_progs()
2799 return a->sec_insn_off < b->sec_insn_off ? -1 : 1; in cmp_progs()
2804 Elf *elf = obj->efile.elf; in bpf_object__elf_collect()
2819 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2822 if (obj->efile.symbols) { in bpf_object__elf_collect()
2823 pr_warn("elf: multiple symbol tables in %s\n", obj->path); in bpf_object__elf_collect()
2824 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2829 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2831 obj->efile.symbols = data; in bpf_object__elf_collect()
2832 obj->efile.symbols_shndx = elf_ndxscn(scn); in bpf_object__elf_collect()
2833 obj->efile.strtabidx = sh.sh_link; in bpf_object__elf_collect()
2842 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2846 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2853 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2856 idx, name, (unsigned long)data->d_size, in bpf_object__elf_collect()
2861 err = bpf_object__init_license(obj, data->d_buf, data->d_size); in bpf_object__elf_collect()
2865 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size); in bpf_object__elf_collect()
2869 obj->efile.maps_shndx = idx; in bpf_object__elf_collect()
2871 obj->efile.btf_maps_shndx = idx; in bpf_object__elf_collect()
2878 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) { in bpf_object__elf_collect()
2881 obj->efile.text_shndx = idx; in bpf_object__elf_collect()
2886 obj->efile.data = data; in bpf_object__elf_collect()
2887 obj->efile.data_shndx = idx; in bpf_object__elf_collect()
2889 obj->efile.rodata = data; in bpf_object__elf_collect()
2890 obj->efile.rodata_shndx = idx; in bpf_object__elf_collect()
2892 obj->efile.st_ops_data = data; in bpf_object__elf_collect()
2893 obj->efile.st_ops_shndx = idx; in bpf_object__elf_collect()
2899 int nr_sects = obj->efile.nr_reloc_sects; in bpf_object__elf_collect()
2900 void *sects = obj->efile.reloc_sects; in bpf_object__elf_collect()
2914 sizeof(*obj->efile.reloc_sects)); in bpf_object__elf_collect()
2916 return -ENOMEM; in bpf_object__elf_collect()
2918 obj->efile.reloc_sects = sects; in bpf_object__elf_collect()
2919 obj->efile.nr_reloc_sects++; in bpf_object__elf_collect()
2921 obj->efile.reloc_sects[nr_sects].shdr = sh; in bpf_object__elf_collect()
2922 obj->efile.reloc_sects[nr_sects].data = data; in bpf_object__elf_collect()
2924 obj->efile.bss = data; in bpf_object__elf_collect()
2925 obj->efile.bss_shndx = idx; in bpf_object__elf_collect()
2932 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) { in bpf_object__elf_collect()
2933 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path); in bpf_object__elf_collect()
2934 return -LIBBPF_ERRNO__FORMAT; in bpf_object__elf_collect()
2937 /* sort BPF programs by section name and in-section instruction offset in bpf_object__elf_collect()
2939 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs); in bpf_object__elf_collect()
2946 int bind = GELF_ST_BIND(sym->st_info); in sym_is_extern()
2948 return sym->st_shndx == SHN_UNDEF && in sym_is_extern()
2950 GELF_ST_TYPE(sym->st_info) == STT_NOTYPE; in sym_is_extern()
2960 return -ESRCH; in find_extern_btf_id()
2969 var_name = btf__name_by_offset(btf, t->name_off); in find_extern_btf_id()
2973 if (btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN) in find_extern_btf_id()
2974 return -EINVAL; in find_extern_btf_id()
2979 return -ENOENT; in find_extern_btf_id()
2988 return -ESRCH; in find_extern_sec_btf_id()
2999 if (vs->type == ext_btf_id) in find_extern_sec_btf_id()
3004 return -ENOENT; in find_extern_sec_btf_id()
3014 name = btf__name_by_offset(btf, t->name_off); in find_kcfg_type()
3023 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN; in find_kcfg_type()
3026 if (t->size == 1) in find_kcfg_type()
3028 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1))) in find_kcfg_type()
3033 if (t->size != 4) in find_kcfg_type()
3039 if (btf_array(t)->nelems == 0) in find_kcfg_type()
3041 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR) in find_kcfg_type()
3054 if (a->type != b->type) in cmp_externs()
3055 return a->type < b->type ? -1 : 1; in cmp_externs()
3057 if (a->type == EXT_KCFG) { in cmp_externs()
3059 if (a->kcfg.align != b->kcfg.align) in cmp_externs()
3060 return a->kcfg.align > b->kcfg.align ? -1 : 1; in cmp_externs()
3062 if (a->kcfg.sz != b->kcfg.sz) in cmp_externs()
3063 return a->kcfg.sz < b->kcfg.sz ? -1 : 1; in cmp_externs()
3067 return strcmp(a->name, b->name); in cmp_externs()
3096 if (!obj->efile.symbols) in bpf_object__collect_externs()
3099 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx); in bpf_object__collect_externs()
3101 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_externs()
3109 if (!gelf_getsym(obj->efile.symbols, i, &sym)) in bpf_object__collect_externs()
3110 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_externs()
3117 ext = obj->externs; in bpf_object__collect_externs()
3118 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext)); in bpf_object__collect_externs()
3120 return -ENOMEM; in bpf_object__collect_externs()
3121 obj->externs = ext; in bpf_object__collect_externs()
3122 ext = &ext[obj->nr_extern]; in bpf_object__collect_externs()
3124 obj->nr_extern++; in bpf_object__collect_externs()
3126 ext->btf_id = find_extern_btf_id(obj->btf, ext_name); in bpf_object__collect_externs()
3127 if (ext->btf_id <= 0) { in bpf_object__collect_externs()
3129 ext_name, ext->btf_id); in bpf_object__collect_externs()
3130 return ext->btf_id; in bpf_object__collect_externs()
3132 t = btf__type_by_id(obj->btf, ext->btf_id); in bpf_object__collect_externs()
3133 ext->name = btf__name_by_offset(obj->btf, t->name_off); in bpf_object__collect_externs()
3134 ext->sym_idx = i; in bpf_object__collect_externs()
3135 ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK; in bpf_object__collect_externs()
3137 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id); in bpf_object__collect_externs()
3138 if (ext->sec_btf_id <= 0) { in bpf_object__collect_externs()
3140 ext_name, ext->btf_id, ext->sec_btf_id); in bpf_object__collect_externs()
3141 return ext->sec_btf_id; in bpf_object__collect_externs()
3143 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id); in bpf_object__collect_externs()
3144 sec_name = btf__name_by_offset(obj->btf, sec->name_off); in bpf_object__collect_externs()
3148 ext->type = EXT_KCFG; in bpf_object__collect_externs()
3149 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type); in bpf_object__collect_externs()
3150 if (ext->kcfg.sz <= 0) { in bpf_object__collect_externs()
3152 ext_name, ext->kcfg.sz); in bpf_object__collect_externs()
3153 return ext->kcfg.sz; in bpf_object__collect_externs()
3155 ext->kcfg.align = btf__align_of(obj->btf, t->type); in bpf_object__collect_externs()
3156 if (ext->kcfg.align <= 0) { in bpf_object__collect_externs()
3158 ext_name, ext->kcfg.align); in bpf_object__collect_externs()
3159 return -EINVAL; in bpf_object__collect_externs()
3161 ext->kcfg.type = find_kcfg_type(obj->btf, t->type, in bpf_object__collect_externs()
3162 &ext->kcfg.is_signed); in bpf_object__collect_externs()
3163 if (ext->kcfg.type == KCFG_UNKNOWN) { in bpf_object__collect_externs()
3165 return -ENOTSUP; in bpf_object__collect_externs()
3169 ext->type = EXT_KSYM; in bpf_object__collect_externs()
3170 skip_mods_and_typedefs(obj->btf, t->type, in bpf_object__collect_externs()
3171 &ext->ksym.type_id); in bpf_object__collect_externs()
3174 return -ENOTSUP; in bpf_object__collect_externs()
3177 pr_debug("collected %d externs total\n", obj->nr_extern); in bpf_object__collect_externs()
3179 if (!obj->nr_extern) in bpf_object__collect_externs()
3183 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs); in bpf_object__collect_externs()
3187 * pretending that each extern is a 8-byte variable in bpf_object__collect_externs()
3190 /* find existing 4-byte integer type in BTF to use for fake in bpf_object__collect_externs()
3193 int int_btf_id = find_int_btf_id(obj->btf); in bpf_object__collect_externs()
3195 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__collect_externs()
3196 ext = &obj->externs[i]; in bpf_object__collect_externs()
3197 if (ext->type != EXT_KSYM) in bpf_object__collect_externs()
3200 i, ext->sym_idx, ext->name); in bpf_object__collect_externs()
3209 vt = (void *)btf__type_by_id(obj->btf, vs->type); in bpf_object__collect_externs()
3210 ext_name = btf__name_by_offset(obj->btf, vt->name_off); in bpf_object__collect_externs()
3215 return -ESRCH; in bpf_object__collect_externs()
3217 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED; in bpf_object__collect_externs()
3218 vt->type = int_btf_id; in bpf_object__collect_externs()
3219 vs->offset = off; in bpf_object__collect_externs()
3220 vs->size = sizeof(int); in bpf_object__collect_externs()
3222 sec->size = off; in bpf_object__collect_externs()
3227 /* for kcfg externs calculate their offsets within a .kconfig map */ in bpf_object__collect_externs()
3229 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__collect_externs()
3230 ext = &obj->externs[i]; in bpf_object__collect_externs()
3231 if (ext->type != EXT_KCFG) in bpf_object__collect_externs()
3234 ext->kcfg.data_off = roundup(off, ext->kcfg.align); in bpf_object__collect_externs()
3235 off = ext->kcfg.data_off + ext->kcfg.sz; in bpf_object__collect_externs()
3237 i, ext->sym_idx, ext->kcfg.data_off, ext->name); in bpf_object__collect_externs()
3239 sec->size = off; in bpf_object__collect_externs()
3244 t = btf__type_by_id(obj->btf, vs->type); in bpf_object__collect_externs()
3245 ext_name = btf__name_by_offset(obj->btf, t->name_off); in bpf_object__collect_externs()
3250 return -ESRCH; in bpf_object__collect_externs()
3252 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED; in bpf_object__collect_externs()
3253 vs->offset = ext->kcfg.data_off; in bpf_object__collect_externs()
3266 if (pos->sec_name && !strcmp(pos->sec_name, title)) in bpf_object__find_program_by_title()
3275 /* For legacy reasons, libbpf supports an entry-point BPF programs in prog_is_subprog()
3278 * must be subprograms called from entry-point BPF programs in in prog_is_subprog()
3285 * SEC()-designated BPF programs and .text entry-point BPF programs. in prog_is_subprog()
3287 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1; in prog_is_subprog()
3299 if (!strcmp(prog->name, name)) in bpf_object__find_program_by_name()
3308 return shndx == obj->efile.data_shndx || in bpf_object__shndx_is_data()
3309 shndx == obj->efile.bss_shndx || in bpf_object__shndx_is_data()
3310 shndx == obj->efile.rodata_shndx; in bpf_object__shndx_is_data()
3316 return shndx == obj->efile.maps_shndx || in bpf_object__shndx_is_maps()
3317 shndx == obj->efile.btf_maps_shndx; in bpf_object__shndx_is_maps()
3323 if (shndx == obj->efile.data_shndx) in bpf_object__section_to_libbpf_map_type()
3325 else if (shndx == obj->efile.bss_shndx) in bpf_object__section_to_libbpf_map_type()
3327 else if (shndx == obj->efile.rodata_shndx) in bpf_object__section_to_libbpf_map_type()
3329 else if (shndx == obj->efile.symbols_shndx) in bpf_object__section_to_libbpf_map_type()
3340 struct bpf_insn *insn = &prog->insns[insn_idx]; in bpf_program__record_reloc()
3341 size_t map_idx, nr_maps = prog->obj->nr_maps; in bpf_program__record_reloc()
3342 struct bpf_object *obj = prog->obj; in bpf_program__record_reloc()
3343 __u32 shdr_idx = sym->st_shndx; in bpf_program__record_reloc()
3346 struct bpf_map *map; in bpf_program__record_reloc() local
3348 reloc_desc->processed = false; in bpf_program__record_reloc()
3350 /* sub-program call relocation */ in bpf_program__record_reloc()
3351 if (insn->code == (BPF_JMP | BPF_CALL)) { in bpf_program__record_reloc()
3352 if (insn->src_reg != BPF_PSEUDO_CALL) { in bpf_program__record_reloc()
3353 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name); in bpf_program__record_reloc()
3354 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3357 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) { in bpf_program__record_reloc()
3360 prog->name, sym_name, sym_sec_name); in bpf_program__record_reloc()
3361 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3363 if (sym->st_value % BPF_INSN_SZ) { in bpf_program__record_reloc()
3365 prog->name, sym_name, (size_t)sym->st_value); in bpf_program__record_reloc()
3366 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3368 reloc_desc->type = RELO_CALL; in bpf_program__record_reloc()
3369 reloc_desc->insn_idx = insn_idx; in bpf_program__record_reloc()
3370 reloc_desc->sym_off = sym->st_value; in bpf_program__record_reloc()
3374 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) { in bpf_program__record_reloc()
3376 prog->name, sym_name, insn_idx, insn->code); in bpf_program__record_reloc()
3377 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3381 int sym_idx = GELF_R_SYM(rel->r_info); in bpf_program__record_reloc()
3382 int i, n = obj->nr_extern; in bpf_program__record_reloc()
3386 ext = &obj->externs[i]; in bpf_program__record_reloc()
3387 if (ext->sym_idx == sym_idx) in bpf_program__record_reloc()
3392 prog->name, sym_name, sym_idx); in bpf_program__record_reloc()
3393 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3396 prog->name, i, ext->name, ext->sym_idx, insn_idx); in bpf_program__record_reloc()
3397 reloc_desc->type = RELO_EXTERN; in bpf_program__record_reloc()
3398 reloc_desc->insn_idx = insn_idx; in bpf_program__record_reloc()
3399 reloc_desc->sym_off = i; /* sym_off stores extern index */ in bpf_program__record_reloc()
3405 prog->name, sym_name, shdr_idx); in bpf_program__record_reloc()
3406 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3412 /* generic map reference relocation */ in bpf_program__record_reloc()
3415 pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n", in bpf_program__record_reloc()
3416 prog->name, sym_name, sym_sec_name); in bpf_program__record_reloc()
3417 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3420 map = &obj->maps[map_idx]; in bpf_program__record_reloc()
3421 if (map->libbpf_type != type || in bpf_program__record_reloc()
3422 map->sec_idx != sym->st_shndx || in bpf_program__record_reloc()
3423 map->sec_offset != sym->st_value) in bpf_program__record_reloc()
3425 pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n", in bpf_program__record_reloc()
3426 prog->name, map_idx, map->name, map->sec_idx, in bpf_program__record_reloc()
3427 map->sec_offset, insn_idx); in bpf_program__record_reloc()
3431 pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n", in bpf_program__record_reloc()
3432 prog->name, sym_sec_name, (size_t)sym->st_value); in bpf_program__record_reloc()
3433 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3435 reloc_desc->type = RELO_LD64; in bpf_program__record_reloc()
3436 reloc_desc->insn_idx = insn_idx; in bpf_program__record_reloc()
3437 reloc_desc->map_idx = map_idx; in bpf_program__record_reloc()
3438 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */ in bpf_program__record_reloc()
3442 /* global data map relocation */ in bpf_program__record_reloc()
3445 prog->name, sym_sec_name); in bpf_program__record_reloc()
3446 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3449 map = &obj->maps[map_idx]; in bpf_program__record_reloc()
3450 if (map->libbpf_type != type) in bpf_program__record_reloc()
3452 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n", in bpf_program__record_reloc()
3453 prog->name, map_idx, map->name, map->sec_idx, in bpf_program__record_reloc()
3454 map->sec_offset, insn_idx); in bpf_program__record_reloc()
3458 pr_warn("prog '%s': data relo failed to find map for section '%s'\n", in bpf_program__record_reloc()
3459 prog->name, sym_sec_name); in bpf_program__record_reloc()
3460 return -LIBBPF_ERRNO__RELOC; in bpf_program__record_reloc()
3463 reloc_desc->type = RELO_DATA; in bpf_program__record_reloc()
3464 reloc_desc->insn_idx = insn_idx; in bpf_program__record_reloc()
3465 reloc_desc->map_idx = map_idx; in bpf_program__record_reloc()
3466 reloc_desc->sym_off = sym->st_value; in bpf_program__record_reloc()
3472 return insn_idx >= prog->sec_insn_off && in prog_contains_insn()
3473 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt; in prog_contains_insn()
3479 int l = 0, r = obj->nr_programs - 1, m; in find_prog_by_sec_insn()
3482 if (!obj->nr_programs) in find_prog_by_sec_insn()
3486 m = l + (r - l + 1) / 2; in find_prog_by_sec_insn()
3487 prog = &obj->programs[m]; in find_prog_by_sec_insn()
3489 if (prog->sec_idx < sec_idx || in find_prog_by_sec_insn()
3490 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx)) in find_prog_by_sec_insn()
3493 r = m - 1; in find_prog_by_sec_insn()
3498 prog = &obj->programs[l]; in find_prog_by_sec_insn()
3499 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx)) in find_prog_by_sec_insn()
3507 Elf_Data *symbols = obj->efile.symbols; in bpf_object__collect_prog_relos()
3509 size_t sec_idx = shdr->sh_info; in bpf_object__collect_prog_relos()
3518 relo_sec_name = elf_sec_str(obj, shdr->sh_name); in bpf_object__collect_prog_relos()
3521 return -EINVAL; in bpf_object__collect_prog_relos()
3525 nrels = shdr->sh_size / shdr->sh_entsize; in bpf_object__collect_prog_relos()
3530 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_prog_relos()
3535 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_prog_relos()
3540 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_prog_relos()
3563 return -LIBBPF_ERRNO__RELOC; in bpf_object__collect_prog_relos()
3566 relos = libbpf_reallocarray(prog->reloc_desc, in bpf_object__collect_prog_relos()
3567 prog->nr_reloc + 1, sizeof(*relos)); in bpf_object__collect_prog_relos()
3569 return -ENOMEM; in bpf_object__collect_prog_relos()
3570 prog->reloc_desc = relos; in bpf_object__collect_prog_relos()
3573 insn_idx -= prog->sec_insn_off; in bpf_object__collect_prog_relos()
3574 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc], in bpf_object__collect_prog_relos()
3579 prog->nr_reloc++; in bpf_object__collect_prog_relos()
3584 static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map) in bpf_map_find_btf_info() argument
3586 struct bpf_map_def *def = &map->def; in bpf_map_find_btf_info()
3590 /* if it's BTF-defined map, we don't need to search for type IDs. in bpf_map_find_btf_info()
3591 * For struct_ops map, it does not need btf_key_type_id and in bpf_map_find_btf_info()
3594 if (map->sec_idx == obj->efile.btf_maps_shndx || in bpf_map_find_btf_info()
3595 bpf_map__is_struct_ops(map)) in bpf_map_find_btf_info()
3598 if (!bpf_map__is_internal(map)) { in bpf_map_find_btf_info()
3599 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size, in bpf_map_find_btf_info()
3600 def->value_size, &key_type_id, in bpf_map_find_btf_info()
3607 ret = btf__find_by_name(obj->btf, in bpf_map_find_btf_info()
3608 libbpf_type_to_btf_name[map->libbpf_type]); in bpf_map_find_btf_info()
3613 map->btf_key_type_id = key_type_id; in bpf_map_find_btf_info()
3614 map->btf_value_type_id = bpf_map__is_internal(map) ? in bpf_map_find_btf_info()
3631 err = -errno; in bpf_get_map_info_from_fdinfo()
3639 info->type = val; in bpf_get_map_info_from_fdinfo()
3641 info->key_size = val; in bpf_get_map_info_from_fdinfo()
3643 info->value_size = val; in bpf_get_map_info_from_fdinfo()
3645 info->max_entries = val; in bpf_get_map_info_from_fdinfo()
3647 info->map_flags = val; in bpf_get_map_info_from_fdinfo()
3655 int bpf_map__reuse_fd(struct bpf_map *map, int fd) in bpf_map__reuse_fd() argument
3669 if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0) in bpf_map__reuse_fd()
3670 new_name = strdup(map->name); in bpf_map__reuse_fd()
3675 return -errno; in bpf_map__reuse_fd()
3679 err = -errno; in bpf_map__reuse_fd()
3685 err = -errno; in bpf_map__reuse_fd()
3689 err = zclose(map->fd); in bpf_map__reuse_fd()
3691 err = -errno; in bpf_map__reuse_fd()
3694 free(map->name); in bpf_map__reuse_fd()
3696 map->fd = new_fd; in bpf_map__reuse_fd()
3697 map->name = new_name; in bpf_map__reuse_fd()
3698 map->def.type = info.type; in bpf_map__reuse_fd()
3699 map->def.key_size = info.key_size; in bpf_map__reuse_fd()
3700 map->def.value_size = info.value_size; in bpf_map__reuse_fd()
3701 map->def.max_entries = info.max_entries; in bpf_map__reuse_fd()
3702 map->def.map_flags = info.map_flags; in bpf_map__reuse_fd()
3703 map->btf_key_type_id = info.btf_key_type_id; in bpf_map__reuse_fd()
3704 map->btf_value_type_id = info.btf_value_type_id; in bpf_map__reuse_fd()
3705 map->reused = true; in bpf_map__reuse_fd()
3716 __u32 bpf_map__max_entries(const struct bpf_map *map) in bpf_map__max_entries() argument
3718 return map->def.max_entries; in bpf_map__max_entries()
3721 int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries) in bpf_map__set_max_entries() argument
3723 if (map->fd >= 0) in bpf_map__set_max_entries()
3724 return -EBUSY; in bpf_map__set_max_entries()
3725 map->def.max_entries = max_entries; in bpf_map__set_max_entries()
3729 int bpf_map__resize(struct bpf_map *map, __u32 max_entries) in bpf_map__resize() argument
3731 if (!map || !max_entries) in bpf_map__resize()
3732 return -EINVAL; in bpf_map__resize()
3734 return bpf_map__set_max_entries(map, max_entries); in bpf_map__resize()
3764 return -ret; in bpf_object__probe_loading()
3810 int ret, map; in probe_kern_global_data() local
3818 map = bpf_create_map_xattr(&map_attr); in probe_kern_global_data()
3819 if (map < 0) { in probe_kern_global_data()
3820 ret = -errno; in probe_kern_global_data()
3822 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", in probe_kern_global_data()
3823 __func__, cp, -ret); in probe_kern_global_data()
3827 insns[0].imm = map; in probe_kern_global_data()
3836 close(map); in probe_kern_global_data()
3930 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS) in probe_kern_exp_attach_type()
3948 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */ in probe_kern_probe_read_kernel()
3973 int ret, map, prog; in probe_prog_bind_map() local
3981 map = bpf_create_map_xattr(&map_attr); in probe_prog_bind_map()
3982 if (map < 0) { in probe_prog_bind_map()
3983 ret = -errno; in probe_prog_bind_map()
3985 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", in probe_prog_bind_map()
3986 __func__, cp, -ret); in probe_prog_bind_map()
3998 close(map); in probe_prog_bind_map()
4002 ret = bpf_prog_bind_map(prog, map, NULL); in probe_prog_bind_map()
4004 close(map); in probe_prog_bind_map()
4042 "ARRAY map mmap()", probe_kern_array_mmap,
4061 if (READ_ONCE(feat->res) == FEAT_UNKNOWN) { in kernel_supports()
4062 ret = feat->probe(); in kernel_supports()
4064 WRITE_ONCE(feat->res, FEAT_SUPPORTED); in kernel_supports()
4066 WRITE_ONCE(feat->res, FEAT_MISSING); in kernel_supports()
4068 pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret); in kernel_supports()
4069 WRITE_ONCE(feat->res, FEAT_MISSING); in kernel_supports()
4073 return READ_ONCE(feat->res) == FEAT_SUPPORTED; in kernel_supports()
4076 static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) in map_is_reuse_compat() argument
4089 pr_warn("failed to get map info for map FD %d: %s\n", map_fd, in map_is_reuse_compat()
4094 return (map_info.type == map->def.type && in map_is_reuse_compat()
4095 map_info.key_size == map->def.key_size && in map_is_reuse_compat()
4096 map_info.value_size == map->def.value_size && in map_is_reuse_compat()
4097 map_info.max_entries == map->def.max_entries && in map_is_reuse_compat()
4098 map_info.map_flags == map->def.map_flags); in map_is_reuse_compat()
4102 bpf_object__reuse_map(struct bpf_map *map) in bpf_object__reuse_map() argument
4107 pin_fd = bpf_obj_get(map->pin_path); in bpf_object__reuse_map()
4109 err = -errno; in bpf_object__reuse_map()
4110 if (err == -ENOENT) { in bpf_object__reuse_map()
4111 pr_debug("found no pinned map to reuse at '%s'\n", in bpf_object__reuse_map()
4112 map->pin_path); in bpf_object__reuse_map()
4116 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); in bpf_object__reuse_map()
4117 pr_warn("couldn't retrieve pinned map '%s': %s\n", in bpf_object__reuse_map()
4118 map->pin_path, cp); in bpf_object__reuse_map()
4122 if (!map_is_reuse_compat(map, pin_fd)) { in bpf_object__reuse_map()
4123 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n", in bpf_object__reuse_map()
4124 map->pin_path); in bpf_object__reuse_map()
4126 return -EINVAL; in bpf_object__reuse_map()
4129 err = bpf_map__reuse_fd(map, pin_fd); in bpf_object__reuse_map()
4134 map->pinned = true; in bpf_object__reuse_map()
4135 pr_debug("reused pinned map at '%s'\n", map->pin_path); in bpf_object__reuse_map()
4141 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) in bpf_object__populate_internal_map() argument
4143 enum libbpf_map_type map_type = map->libbpf_type; in bpf_object__populate_internal_map()
4147 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0); in bpf_object__populate_internal_map()
4149 err = -errno; in bpf_object__populate_internal_map()
4151 pr_warn("Error setting initial map(%s) contents: %s\n", in bpf_object__populate_internal_map()
4152 map->name, cp); in bpf_object__populate_internal_map()
4156 /* Freeze .rodata and .kconfig map as read-only from syscall side. */ in bpf_object__populate_internal_map()
4158 err = bpf_map_freeze(map->fd); in bpf_object__populate_internal_map()
4160 err = -errno; in bpf_object__populate_internal_map()
4162 pr_warn("Error freezing map(%s) as read-only: %s\n", in bpf_object__populate_internal_map()
4163 map->name, cp); in bpf_object__populate_internal_map()
4170 static void bpf_map__destroy(struct bpf_map *map);
4172 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map) in bpf_object__create_map() argument
4175 struct bpf_map_def *def = &map->def; in bpf_object__create_map()
4181 create_attr.name = map->name; in bpf_object__create_map()
4182 create_attr.map_ifindex = map->map_ifindex; in bpf_object__create_map()
4183 create_attr.map_type = def->type; in bpf_object__create_map()
4184 create_attr.map_flags = def->map_flags; in bpf_object__create_map()
4185 create_attr.key_size = def->key_size; in bpf_object__create_map()
4186 create_attr.value_size = def->value_size; in bpf_object__create_map()
4187 create_attr.numa_node = map->numa_node; in bpf_object__create_map()
4189 if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) { in bpf_object__create_map()
4194 pr_warn("map '%s': failed to determine number of system CPUs: %d\n", in bpf_object__create_map()
4195 map->name, nr_cpus); in bpf_object__create_map()
4198 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus); in bpf_object__create_map()
4201 create_attr.max_entries = def->max_entries; in bpf_object__create_map()
4204 if (bpf_map__is_struct_ops(map)) in bpf_object__create_map()
4206 map->btf_vmlinux_value_type_id; in bpf_object__create_map()
4211 if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) { in bpf_object__create_map()
4212 create_attr.btf_fd = btf__fd(obj->btf); in bpf_object__create_map()
4213 create_attr.btf_key_type_id = map->btf_key_type_id; in bpf_object__create_map()
4214 create_attr.btf_value_type_id = map->btf_value_type_id; in bpf_object__create_map()
4217 if (bpf_map_type__is_map_in_map(def->type)) { in bpf_object__create_map()
4218 if (map->inner_map) { in bpf_object__create_map()
4219 err = bpf_object__create_map(obj, map->inner_map); in bpf_object__create_map()
4221 pr_warn("map '%s': failed to create inner map: %d\n", in bpf_object__create_map()
4222 map->name, err); in bpf_object__create_map()
4225 map->inner_map_fd = bpf_map__fd(map->inner_map); in bpf_object__create_map()
4227 if (map->inner_map_fd >= 0) in bpf_object__create_map()
4228 create_attr.inner_map_fd = map->inner_map_fd; in bpf_object__create_map()
4231 map->fd = bpf_create_map_xattr(&create_attr); in bpf_object__create_map()
4232 if (map->fd < 0 && (create_attr.btf_key_type_id || in bpf_object__create_map()
4236 err = -errno; in bpf_object__create_map()
4239 map->name, cp, err); in bpf_object__create_map()
4243 map->btf_key_type_id = 0; in bpf_object__create_map()
4244 map->btf_value_type_id = 0; in bpf_object__create_map()
4245 map->fd = bpf_create_map_xattr(&create_attr); in bpf_object__create_map()
4248 err = map->fd < 0 ? -errno : 0; in bpf_object__create_map()
4250 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) { in bpf_object__create_map()
4251 bpf_map__destroy(map->inner_map); in bpf_object__create_map()
4252 zfree(&map->inner_map); in bpf_object__create_map()
4258 static int init_map_slots(struct bpf_map *map) in init_map_slots() argument
4264 for (i = 0; i < map->init_slots_sz; i++) { in init_map_slots()
4265 if (!map->init_slots[i]) in init_map_slots()
4268 targ_map = map->init_slots[i]; in init_map_slots()
4270 err = bpf_map_update_elem(map->fd, &i, &fd, 0); in init_map_slots()
4272 err = -errno; in init_map_slots()
4273 pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n", in init_map_slots()
4274 map->name, i, targ_map->name, in init_map_slots()
4278 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n", in init_map_slots()
4279 map->name, i, targ_map->name, fd); in init_map_slots()
4282 zfree(&map->init_slots); in init_map_slots()
4283 map->init_slots_sz = 0; in init_map_slots()
4291 struct bpf_map *map; in bpf_object__create_maps() local
4297 for (i = 0; i < obj->nr_maps; i++) { in bpf_object__create_maps()
4298 map = &obj->maps[i]; in bpf_object__create_maps()
4302 if (map->pin_path) { in bpf_object__create_maps()
4303 err = bpf_object__reuse_map(map); in bpf_object__create_maps()
4305 pr_warn("map '%s': error reusing pinned map\n", in bpf_object__create_maps()
4306 map->name); in bpf_object__create_maps()
4309 if (retried && map->fd < 0) { in bpf_object__create_maps()
4310 pr_warn("map '%s': cannot find pinned map\n", in bpf_object__create_maps()
4311 map->name); in bpf_object__create_maps()
4312 err = -ENOENT; in bpf_object__create_maps()
4317 if (map->fd >= 0) { in bpf_object__create_maps()
4318 pr_debug("map '%s': skipping creation (preset fd=%d)\n", in bpf_object__create_maps()
4319 map->name, map->fd); in bpf_object__create_maps()
4321 err = bpf_object__create_map(obj, map); in bpf_object__create_maps()
4325 pr_debug("map '%s': created successfully, fd=%d\n", in bpf_object__create_maps()
4326 map->name, map->fd); in bpf_object__create_maps()
4328 if (bpf_map__is_internal(map)) { in bpf_object__create_maps()
4329 err = bpf_object__populate_internal_map(obj, map); in bpf_object__create_maps()
4331 zclose(map->fd); in bpf_object__create_maps()
4336 if (map->init_slots_sz) { in bpf_object__create_maps()
4337 err = init_map_slots(map); in bpf_object__create_maps()
4339 zclose(map->fd); in bpf_object__create_maps()
4345 if (map->pin_path && !map->pinned) { in bpf_object__create_maps()
4346 err = bpf_map__pin(map, NULL); in bpf_object__create_maps()
4348 zclose(map->fd); in bpf_object__create_maps()
4349 if (!retried && err == -EEXIST) { in bpf_object__create_maps()
4353 pr_warn("map '%s': failed to auto-pin at '%s': %d\n", in bpf_object__create_maps()
4354 map->name, map->pin_path, err); in bpf_object__create_maps()
4364 pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err); in bpf_object__create_maps()
4367 zclose(obj->maps[j].fd); in bpf_object__create_maps()
4373 /* represents BPF CO-RE field or array element accessor */
4382 /* high-level spec: named fields and array indices only */
4386 /* CO-RE relocation kind */
4388 /* high-level spec length */
4390 /* raw, low-level spec: 1-to-1 with accessor spec string */
4409 /* not a flexible array, if not inside a struct or has non-zero size */ in is_flex_arr()
4410 if (!acc->name || arr->nelems > 0) in is_flex_arr()
4414 t = btf__type_by_id(btf, acc->type_id); in is_flex_arr()
4415 return acc->idx == btf_vlen(t) - 1; in is_flex_arr()
4477 * Turn bpf_core_relo into a low- and high-level spec representation,
4479 * field bit offset, specified by accessor string. Low-level spec captures
4481 * struct/union members. High-level one only captures semantically meaningful
4496 * int x = &s->a[3]; // access string = '0:1:2:3'
4498 * Low-level spec has 1:1 mapping with each element of access string (it's
4501 * High-level spec will capture only 3 points:
4502 * - intial zero-index access by pointer (&s->... is the same as &s[0]...);
4503 * - field 'a' access (corresponds to '2' in low-level spec);
4504 * - array element #3 access (corresponds to '3' in low-level spec).
4506 * Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
4510 * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
4527 return -EINVAL; in bpf_core_parse_spec()
4530 spec->btf = btf; in bpf_core_parse_spec()
4531 spec->root_type_id = type_id; in bpf_core_parse_spec()
4532 spec->relo_kind = relo_kind; in bpf_core_parse_spec()
4534 /* type-based relocations don't have a field access string */ in bpf_core_parse_spec()
4537 return -EINVAL; in bpf_core_parse_spec()
4546 return -EINVAL; in bpf_core_parse_spec()
4547 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN) in bpf_core_parse_spec()
4548 return -E2BIG; in bpf_core_parse_spec()
4550 spec->raw_spec[spec->raw_len++] = access_idx; in bpf_core_parse_spec()
4553 if (spec->raw_len == 0) in bpf_core_parse_spec()
4554 return -EINVAL; in bpf_core_parse_spec()
4558 return -EINVAL; in bpf_core_parse_spec()
4560 access_idx = spec->raw_spec[0]; in bpf_core_parse_spec()
4561 acc = &spec->spec[0]; in bpf_core_parse_spec()
4562 acc->type_id = id; in bpf_core_parse_spec()
4563 acc->idx = access_idx; in bpf_core_parse_spec()
4564 spec->len++; in bpf_core_parse_spec()
4567 if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t)) in bpf_core_parse_spec()
4568 return -EINVAL; in bpf_core_parse_spec()
4571 acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off); in bpf_core_parse_spec()
4576 return -EINVAL; in bpf_core_parse_spec()
4581 spec->bit_offset = access_idx * sz * 8; in bpf_core_parse_spec()
4583 for (i = 1; i < spec->raw_len; i++) { in bpf_core_parse_spec()
4586 return -EINVAL; in bpf_core_parse_spec()
4588 access_idx = spec->raw_spec[i]; in bpf_core_parse_spec()
4589 acc = &spec->spec[spec->len]; in bpf_core_parse_spec()
4596 return -EINVAL; in bpf_core_parse_spec()
4599 spec->bit_offset += bit_offset; in bpf_core_parse_spec()
4602 if (m->name_off) { in bpf_core_parse_spec()
4603 name = btf__name_by_offset(btf, m->name_off); in bpf_core_parse_spec()
4605 return -EINVAL; in bpf_core_parse_spec()
4607 acc->type_id = id; in bpf_core_parse_spec()
4608 acc->idx = access_idx; in bpf_core_parse_spec()
4609 acc->name = name; in bpf_core_parse_spec()
4610 spec->len++; in bpf_core_parse_spec()
4613 id = m->type; in bpf_core_parse_spec()
4618 t = skip_mods_and_typedefs(btf, a->type, &id); in bpf_core_parse_spec()
4620 return -EINVAL; in bpf_core_parse_spec()
4622 flex = is_flex_arr(btf, acc - 1, a); in bpf_core_parse_spec()
4623 if (!flex && access_idx >= a->nelems) in bpf_core_parse_spec()
4624 return -EINVAL; in bpf_core_parse_spec()
4626 spec->spec[spec->len].type_id = id; in bpf_core_parse_spec()
4627 spec->spec[spec->len].idx = access_idx; in bpf_core_parse_spec()
4628 spec->len++; in bpf_core_parse_spec()
4633 spec->bit_offset += access_idx * sz * 8; in bpf_core_parse_spec()
4637 return -EINVAL; in bpf_core_parse_spec()
4654 * underscore is ignored by BPF CO-RE relocation during relocation matching.
4661 for (i = n - 5; i >= 0; i--) { in bpf_core_essential_name_len()
4676 free(cand_ids->data); in bpf_core_free_cands()
4693 return ERR_PTR(-EINVAL); in bpf_core_find_cands()
4695 local_name = btf__name_by_offset(local_btf, local_t->name_off); in bpf_core_find_cands()
4697 return ERR_PTR(-EINVAL); in bpf_core_find_cands()
4702 return ERR_PTR(-ENOMEM); in bpf_core_find_cands()
4710 targ_name = btf__name_by_offset(targ_btf, t->name_off); in bpf_core_find_cands()
4719 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s\n", in bpf_core_find_cands()
4722 new_ids = libbpf_reallocarray(cand_ids->data, in bpf_core_find_cands()
4723 cand_ids->len + 1, in bpf_core_find_cands()
4724 sizeof(*cand_ids->data)); in bpf_core_find_cands()
4726 err = -ENOMEM; in bpf_core_find_cands()
4729 cand_ids->data = new_ids; in bpf_core_find_cands()
4730 cand_ids->data[cand_ids->len++] = i; in bpf_core_find_cands()
4742 * - any two STRUCTs/UNIONs are compatible and can be mixed;
4743 * - any two FWDs are compatible, if their names match (modulo flavor suffix);
4744 * - any two PTRs are always compatible;
4745 * - for ENUMs, names should be the same (ignoring flavor suffix) or at
4747 * - for ENUMs, check sizes, names are ignored;
4748 * - for INT, size and signedness are ignored;
4749 * - for ARRAY, dimensionality is ignored, element types are checked for
4751 * - everything else shouldn't be ever a target of relocation.
4753 * more experience with using BPF CO-RE relocations.
4766 return -EINVAL; in bpf_core_fields_are_compat()
4782 local_type->name_off); in bpf_core_fields_are_compat()
4783 targ_name = btf__name_by_offset(targ_btf, targ_type->name_off); in bpf_core_fields_are_compat()
4786 /* one of them is anonymous or both w/ same flavor-less names */ in bpf_core_fields_are_compat()
4792 /* just reject deprecated bitfield-like integers; all other in bpf_core_fields_are_compat()
4798 local_id = btf_array(local_type)->type; in bpf_core_fields_are_compat()
4799 targ_id = btf_array(targ_type)->type; in bpf_core_fields_are_compat()
4809 * Given single high-level named field accessor in local type, find
4810 * corresponding high-level accessor for a target type. Along the way,
4811 * maintain low-level spec for target as well. Also keep updating target
4839 return -EINVAL; in bpf_core_match_member()
4843 local_id = local_acc->type_id; in bpf_core_match_member()
4845 local_member = btf_members(local_type) + local_acc->idx; in bpf_core_match_member()
4846 local_name = btf__name_by_offset(local_btf, local_member->name_off); in bpf_core_match_member()
4856 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN) in bpf_core_match_member()
4857 return -E2BIG; in bpf_core_match_member()
4860 spec->bit_offset += bit_offset; in bpf_core_match_member()
4861 spec->raw_spec[spec->raw_len++] = i; in bpf_core_match_member()
4863 targ_name = btf__name_by_offset(targ_btf, m->name_off); in bpf_core_match_member()
4867 targ_btf, m->type, in bpf_core_match_member()
4875 targ_acc = &spec->spec[spec->len++]; in bpf_core_match_member()
4876 targ_acc->type_id = targ_id; in bpf_core_match_member()
4877 targ_acc->idx = i; in bpf_core_match_member()
4878 targ_acc->name = targ_name; in bpf_core_match_member()
4880 *next_targ_id = m->type; in bpf_core_match_member()
4882 local_member->type, in bpf_core_match_member()
4883 targ_btf, m->type); in bpf_core_match_member()
4885 spec->len--; /* pop accessor */ in bpf_core_match_member()
4889 spec->bit_offset -= bit_offset; in bpf_core_match_member()
4890 spec->raw_len--; in bpf_core_match_member()
4897 * type-based CO-RE relocations and follow slightly different rules than
4898 * field-based relocations. This function assumes that root types were already
4899 * checked for name match. Beyond that initial root-level name check, names
4901 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
4904 * - for ENUMs, the size is ignored;
4905 * - for INT, size and signedness are ignored;
4906 * - for ARRAY, dimensionality is ignored, element types are checked for
4908 * - CONST/VOLATILE/RESTRICT modifiers are ignored;
4909 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
4910 * - FUNC_PROTOs are compatible if they have compatible signature: same
4913 * more experience with using BPF CO-RE relocations.
4928 depth--; in bpf_core_types_are_compat()
4930 return -EINVAL; in bpf_core_types_are_compat()
4935 return -EINVAL; in bpf_core_types_are_compat()
4948 /* just reject deprecated bitfield-like integers; all other in bpf_core_types_are_compat()
4953 local_id = local_type->type; in bpf_core_types_are_compat()
4954 targ_id = targ_type->type; in bpf_core_types_are_compat()
4957 local_id = btf_array(local_type)->type; in bpf_core_types_are_compat()
4958 targ_id = btf_array(targ_type)->type; in bpf_core_types_are_compat()
4971 skip_mods_and_typedefs(local_btf, local_p->type, &local_id); in bpf_core_types_are_compat()
4972 skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id); in bpf_core_types_are_compat()
4979 skip_mods_and_typedefs(local_btf, local_type->type, &local_id); in bpf_core_types_are_compat()
4980 skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id); in bpf_core_types_are_compat()
4992 * target spec (high-level, low-level + bit offset).
5004 targ_spec->btf = targ_btf; in bpf_core_spec_match()
5005 targ_spec->root_type_id = targ_id; in bpf_core_spec_match()
5006 targ_spec->relo_kind = local_spec->relo_kind; in bpf_core_spec_match()
5008 if (core_relo_is_type_based(local_spec->relo_kind)) { in bpf_core_spec_match()
5009 return bpf_core_types_are_compat(local_spec->btf, in bpf_core_spec_match()
5010 local_spec->root_type_id, in bpf_core_spec_match()
5014 local_acc = &local_spec->spec[0]; in bpf_core_spec_match()
5015 targ_acc = &targ_spec->spec[0]; in bpf_core_spec_match()
5017 if (core_relo_is_enumval_based(local_spec->relo_kind)) { in bpf_core_spec_match()
5023 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id); in bpf_core_spec_match()
5027 local_essent_len = bpf_core_essential_name_len(local_acc->name); in bpf_core_spec_match()
5030 targ_name = btf__name_by_offset(targ_spec->btf, e->name_off); in bpf_core_spec_match()
5034 if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) { in bpf_core_spec_match()
5035 targ_acc->type_id = targ_id; in bpf_core_spec_match()
5036 targ_acc->idx = i; in bpf_core_spec_match()
5037 targ_acc->name = targ_name; in bpf_core_spec_match()
5038 targ_spec->len++; in bpf_core_spec_match()
5039 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx; in bpf_core_spec_match()
5040 targ_spec->raw_len++; in bpf_core_spec_match()
5047 if (!core_relo_is_field_based(local_spec->relo_kind)) in bpf_core_spec_match()
5048 return -EINVAL; in bpf_core_spec_match()
5050 for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) { in bpf_core_spec_match()
5051 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, in bpf_core_spec_match()
5054 return -EINVAL; in bpf_core_spec_match()
5056 if (local_acc->name) { in bpf_core_spec_match()
5057 matched = bpf_core_match_member(local_spec->btf, in bpf_core_spec_match()
5076 flex = is_flex_arr(targ_btf, targ_acc - 1, a); in bpf_core_spec_match()
5077 if (!flex && local_acc->idx >= a->nelems) in bpf_core_spec_match()
5079 if (!skip_mods_and_typedefs(targ_btf, a->type, in bpf_core_spec_match()
5081 return -EINVAL; in bpf_core_spec_match()
5085 if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN) in bpf_core_spec_match()
5086 return -E2BIG; in bpf_core_spec_match()
5088 targ_acc->type_id = targ_id; in bpf_core_spec_match()
5089 targ_acc->idx = local_acc->idx; in bpf_core_spec_match()
5090 targ_acc->name = NULL; in bpf_core_spec_match()
5091 targ_spec->len++; in bpf_core_spec_match()
5092 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx; in bpf_core_spec_match()
5093 targ_spec->raw_len++; in bpf_core_spec_match()
5098 targ_spec->bit_offset += local_acc->idx * sz * 8; in bpf_core_spec_match()
5121 if (relo->kind == BPF_FIELD_EXISTS) { in bpf_core_calc_field_relo()
5127 return -EUCLEAN; /* request instruction poisoning */ in bpf_core_calc_field_relo()
5129 acc = &spec->spec[spec->len - 1]; in bpf_core_calc_field_relo()
5130 t = btf__type_by_id(spec->btf, acc->type_id); in bpf_core_calc_field_relo()
5133 if (!acc->name) { in bpf_core_calc_field_relo()
5134 if (relo->kind == BPF_FIELD_BYTE_OFFSET) { in bpf_core_calc_field_relo()
5135 *val = spec->bit_offset / 8; in bpf_core_calc_field_relo()
5137 sz = btf__resolve_size(spec->btf, acc->type_id); in bpf_core_calc_field_relo()
5139 return -EINVAL; in bpf_core_calc_field_relo()
5141 *type_id = acc->type_id; in bpf_core_calc_field_relo()
5142 } else if (relo->kind == BPF_FIELD_BYTE_SIZE) { in bpf_core_calc_field_relo()
5143 sz = btf__resolve_size(spec->btf, acc->type_id); in bpf_core_calc_field_relo()
5145 return -EINVAL; in bpf_core_calc_field_relo()
5149 prog->name, relo->kind, relo->insn_off / 8); in bpf_core_calc_field_relo()
5150 return -EINVAL; in bpf_core_calc_field_relo()
5157 m = btf_members(t) + acc->idx; in bpf_core_calc_field_relo()
5158 mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id); in bpf_core_calc_field_relo()
5159 bit_off = spec->bit_offset; in bpf_core_calc_field_relo()
5160 bit_sz = btf_member_bitfield_size(t, acc->idx); in bpf_core_calc_field_relo()
5164 byte_sz = mt->size; in bpf_core_calc_field_relo()
5167 while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) { in bpf_core_calc_field_relo()
5169 /* bitfield can't be read with 64-bit read */ in bpf_core_calc_field_relo()
5171 prog->name, relo->kind, relo->insn_off / 8); in bpf_core_calc_field_relo()
5172 return -E2BIG; in bpf_core_calc_field_relo()
5178 sz = btf__resolve_size(spec->btf, field_type_id); in bpf_core_calc_field_relo()
5180 return -EINVAL; in bpf_core_calc_field_relo()
5182 byte_off = spec->bit_offset / 8; in bpf_core_calc_field_relo()
5193 switch (relo->kind) { in bpf_core_calc_field_relo()
5213 *val = 64 - (bit_off + bit_sz - byte_off * 8); in bpf_core_calc_field_relo()
5215 *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8); in bpf_core_calc_field_relo()
5219 *val = 64 - bit_sz; in bpf_core_calc_field_relo()
5225 return -EOPNOTSUPP; in bpf_core_calc_field_relo()
5237 /* type-based relos return zero when target type is not found */ in bpf_core_calc_type_relo()
5243 switch (relo->kind) { in bpf_core_calc_type_relo()
5245 *val = spec->root_type_id; in bpf_core_calc_type_relo()
5251 sz = btf__resolve_size(spec->btf, spec->root_type_id); in bpf_core_calc_type_relo()
5253 return -EINVAL; in bpf_core_calc_type_relo()
5259 return -EOPNOTSUPP; in bpf_core_calc_type_relo()
5272 switch (relo->kind) { in bpf_core_calc_enumval_relo()
5278 return -EUCLEAN; /* request instruction poisoning */ in bpf_core_calc_enumval_relo()
5279 t = btf__type_by_id(spec->btf, spec->spec[0].type_id); in bpf_core_calc_enumval_relo()
5280 e = btf_enum(t) + spec->spec[0].idx; in bpf_core_calc_enumval_relo()
5281 *val = e->val; in bpf_core_calc_enumval_relo()
5284 return -EOPNOTSUPP; in bpf_core_calc_enumval_relo()
5304 * memory loads of pointers and integers; this is necessary for 32-bit
5328 int err = -EOPNOTSUPP; in bpf_core_calc_relo()
5330 res->orig_val = 0; in bpf_core_calc_relo()
5331 res->new_val = 0; in bpf_core_calc_relo()
5332 res->poison = false; in bpf_core_calc_relo()
5333 res->validate = true; in bpf_core_calc_relo()
5334 res->fail_memsz_adjust = false; in bpf_core_calc_relo()
5335 res->orig_sz = res->new_sz = 0; in bpf_core_calc_relo()
5336 res->orig_type_id = res->new_type_id = 0; in bpf_core_calc_relo()
5338 if (core_relo_is_field_based(relo->kind)) { in bpf_core_calc_relo()
5340 &res->orig_val, &res->orig_sz, in bpf_core_calc_relo()
5341 &res->orig_type_id, &res->validate); in bpf_core_calc_relo()
5343 &res->new_val, &res->new_sz, in bpf_core_calc_relo()
5344 &res->new_type_id, NULL); in bpf_core_calc_relo()
5351 res->fail_memsz_adjust = false; in bpf_core_calc_relo()
5352 if (res->orig_sz != res->new_sz) { in bpf_core_calc_relo()
5355 orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id); in bpf_core_calc_relo()
5356 new_t = btf__type_by_id(targ_spec->btf, res->new_type_id); in bpf_core_calc_relo()
5360 * - reading a 32-bit kernel pointer, while on BPF in bpf_core_calc_relo()
5361 * size pointers are always 64-bit; in this case in bpf_core_calc_relo()
5364 * zero-extended upper 32-bits; in bpf_core_calc_relo()
5365 * - reading unsigned integers, again due to in bpf_core_calc_relo()
5366 * zero-extension is preserving the value correctly. in bpf_core_calc_relo()
5382 res->fail_memsz_adjust = true; in bpf_core_calc_relo()
5384 } else if (core_relo_is_type_based(relo->kind)) { in bpf_core_calc_relo()
5385 err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val); in bpf_core_calc_relo()
5386 err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val); in bpf_core_calc_relo()
5387 } else if (core_relo_is_enumval_based(relo->kind)) { in bpf_core_calc_relo()
5388 err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val); in bpf_core_calc_relo()
5389 err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val); in bpf_core_calc_relo()
5393 if (err == -EUCLEAN) { in bpf_core_calc_relo()
5395 res->poison = true; in bpf_core_calc_relo()
5397 } else if (err == -EOPNOTSUPP) { in bpf_core_calc_relo()
5399 pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n", in bpf_core_calc_relo()
5400 prog->name, relo_idx, core_relo_kind_str(relo->kind), in bpf_core_calc_relo()
5401 relo->kind, relo->insn_off / 8); in bpf_core_calc_relo()
5415 prog->name, relo_idx, insn_idx); in bpf_core_poison_insn()
5416 insn->code = BPF_JMP | BPF_CALL; in bpf_core_poison_insn()
5417 insn->dst_reg = 0; in bpf_core_poison_insn()
5418 insn->src_reg = 0; in bpf_core_poison_insn()
5419 insn->off = 0; in bpf_core_poison_insn()
5424 insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */ in bpf_core_poison_insn()
5429 return insn->code == (BPF_LD | BPF_IMM | BPF_DW); in is_ldimm64()
5434 switch (BPF_SIZE(insn->code)) { in insn_bpf_size_to_bytes()
5439 default: return -1; in insn_bpf_size_to_bytes()
5450 default: return -1; in insn_bytes_to_bpf_size()
5459 * Expected insn->imm value is determined using relocation kind and local
5460 * spec, and is checked before patching instruction. If actual insn->imm value
5466 * 3. rX = <imm64> (load with 64-bit immediate value);
5481 if (relo->insn_off % BPF_INSN_SZ) in bpf_core_patch_insn()
5482 return -EINVAL; in bpf_core_patch_insn()
5483 insn_idx = relo->insn_off / BPF_INSN_SZ; in bpf_core_patch_insn()
5485 * program's frame of reference; (sub-)program code is not yet in bpf_core_patch_insn()
5486 * relocated, so it's enough to just subtract in-section offset in bpf_core_patch_insn()
5488 insn_idx = insn_idx - prog->sec_insn_off; in bpf_core_patch_insn()
5489 insn = &prog->insns[insn_idx]; in bpf_core_patch_insn()
5490 class = BPF_CLASS(insn->code); in bpf_core_patch_insn()
5492 if (res->poison) { in bpf_core_patch_insn()
5503 orig_val = res->orig_val; in bpf_core_patch_insn()
5504 new_val = res->new_val; in bpf_core_patch_insn()
5509 if (BPF_SRC(insn->code) != BPF_K) in bpf_core_patch_insn()
5510 return -EINVAL; in bpf_core_patch_insn()
5511 if (res->validate && insn->imm != orig_val) { in bpf_core_patch_insn()
5512 pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n", in bpf_core_patch_insn()
5513 prog->name, relo_idx, in bpf_core_patch_insn()
5514 insn_idx, insn->imm, orig_val, new_val); in bpf_core_patch_insn()
5515 return -EINVAL; in bpf_core_patch_insn()
5517 orig_val = insn->imm; in bpf_core_patch_insn()
5518 insn->imm = new_val; in bpf_core_patch_insn()
5519 pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n", in bpf_core_patch_insn()
5520 prog->name, relo_idx, insn_idx, in bpf_core_patch_insn()
5526 if (res->validate && insn->off != orig_val) { in bpf_core_patch_insn()
5527 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n", in bpf_core_patch_insn()
5528 prog->name, relo_idx, insn_idx, insn->off, orig_val, new_val); in bpf_core_patch_insn()
5529 return -EINVAL; in bpf_core_patch_insn()
5533 prog->name, relo_idx, insn_idx, new_val); in bpf_core_patch_insn()
5534 return -ERANGE; in bpf_core_patch_insn()
5536 if (res->fail_memsz_adjust) { in bpf_core_patch_insn()
5539 prog->name, relo_idx, insn_idx); in bpf_core_patch_insn()
5543 orig_val = insn->off; in bpf_core_patch_insn()
5544 insn->off = new_val; in bpf_core_patch_insn()
5545 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n", in bpf_core_patch_insn()
5546 prog->name, relo_idx, insn_idx, orig_val, new_val); in bpf_core_patch_insn()
5548 if (res->new_sz != res->orig_sz) { in bpf_core_patch_insn()
5552 if (insn_bytes_sz != res->orig_sz) { in bpf_core_patch_insn()
5554 prog->name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz); in bpf_core_patch_insn()
5555 return -EINVAL; in bpf_core_patch_insn()
5558 insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz); in bpf_core_patch_insn()
5561 prog->name, relo_idx, insn_idx, res->new_sz); in bpf_core_patch_insn()
5562 return -EINVAL; in bpf_core_patch_insn()
5565 insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code); in bpf_core_patch_insn()
5566 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n", in bpf_core_patch_insn()
5567 prog->name, relo_idx, insn_idx, res->orig_sz, res->new_sz); in bpf_core_patch_insn()
5575 insn_idx + 1 >= prog->insns_cnt || in bpf_core_patch_insn()
5579 prog->name, relo_idx, insn_idx); in bpf_core_patch_insn()
5580 return -EINVAL; in bpf_core_patch_insn()
5584 if (res->validate && imm != orig_val) { in bpf_core_patch_insn()
5585 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n", in bpf_core_patch_insn()
5586 prog->name, relo_idx, in bpf_core_patch_insn()
5589 return -EINVAL; in bpf_core_patch_insn()
5593 insn[1].imm = 0; /* currently only 32-bit values are supported */ in bpf_core_patch_insn()
5594 pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n", in bpf_core_patch_insn()
5595 prog->name, relo_idx, insn_idx, in bpf_core_patch_insn()
5601 prog->name, relo_idx, insn_idx, insn->code, in bpf_core_patch_insn()
5602 insn->src_reg, insn->dst_reg, insn->off, insn->imm); in bpf_core_patch_insn()
5603 return -EINVAL; in bpf_core_patch_insn()
5610 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
5611 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
5621 type_id = spec->root_type_id; in bpf_core_dump_spec()
5622 t = btf__type_by_id(spec->btf, type_id); in bpf_core_dump_spec()
5623 s = btf__name_by_offset(spec->btf, t->name_off); in bpf_core_dump_spec()
5627 if (core_relo_is_type_based(spec->relo_kind)) in bpf_core_dump_spec()
5630 if (core_relo_is_enumval_based(spec->relo_kind)) { in bpf_core_dump_spec()
5631 t = skip_mods_and_typedefs(spec->btf, type_id, NULL); in bpf_core_dump_spec()
5632 e = btf_enum(t) + spec->raw_spec[0]; in bpf_core_dump_spec()
5633 s = btf__name_by_offset(spec->btf, e->name_off); in bpf_core_dump_spec()
5635 libbpf_print(level, "::%s = %u", s, e->val); in bpf_core_dump_spec()
5639 if (core_relo_is_field_based(spec->relo_kind)) { in bpf_core_dump_spec()
5640 for (i = 0; i < spec->len; i++) { in bpf_core_dump_spec()
5641 if (spec->spec[i].name) in bpf_core_dump_spec()
5642 libbpf_print(level, ".%s", spec->spec[i].name); in bpf_core_dump_spec()
5643 else if (i > 0 || spec->spec[i].idx > 0) in bpf_core_dump_spec()
5644 libbpf_print(level, "[%u]", spec->spec[i].idx); in bpf_core_dump_spec()
5648 for (i = 0; i < spec->raw_len; i++) in bpf_core_dump_spec()
5649 libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]); in bpf_core_dump_spec()
5651 if (spec->bit_offset % 8) in bpf_core_dump_spec()
5653 spec->bit_offset / 8, spec->bit_offset % 8); in bpf_core_dump_spec()
5655 libbpf_print(level, " @ offset %u)", spec->bit_offset / 8); in bpf_core_dump_spec()
5676 * CO-RE relocate single instruction.
5689 * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
5701 * high-level spec accessors, meaning that all named fields should match,
5707 * ambiguity, CO-RE relocation will fail. This is necessary to accomodate
5717 * efficient memory-wise and not significantly worse (if not better)
5718 * CPU-wise compared to prebuilding a map from all local type names to
5733 const void *type_key = u32_as_hash_key(relo->type_id); in bpf_core_apply_relo()
5742 local_id = relo->type_id; in bpf_core_apply_relo()
5745 return -EINVAL; in bpf_core_apply_relo()
5747 local_name = btf__name_by_offset(local_btf, local_type->name_off); in bpf_core_apply_relo()
5749 return -EINVAL; in bpf_core_apply_relo()
5751 spec_str = btf__name_by_offset(local_btf, relo->access_str_off); in bpf_core_apply_relo()
5753 return -EINVAL; in bpf_core_apply_relo()
5755 err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec); in bpf_core_apply_relo()
5758 prog->name, relo_idx, local_id, btf_kind_str(local_type), in bpf_core_apply_relo()
5761 return -EINVAL; in bpf_core_apply_relo()
5764 pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog->name, in bpf_core_apply_relo()
5765 relo_idx, core_relo_kind_str(relo->kind), relo->kind); in bpf_core_apply_relo()
5770 if (relo->kind == BPF_TYPE_ID_LOCAL) { in bpf_core_apply_relo()
5781 prog->name, relo_idx, core_relo_kind_str(relo->kind), relo->kind); in bpf_core_apply_relo()
5782 return -EOPNOTSUPP; in bpf_core_apply_relo()
5789 prog->name, relo_idx, local_id, btf_kind_str(local_type), in bpf_core_apply_relo()
5800 for (i = 0, j = 0; i < cand_ids->len; i++) { in bpf_core_apply_relo()
5801 cand_id = cand_ids->data[i]; in bpf_core_apply_relo()
5805 prog->name, relo_idx, i); in bpf_core_apply_relo()
5811 pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog->name, in bpf_core_apply_relo()
5812 relo_idx, err == 0 ? "non-matching" : "matching", i); in bpf_core_apply_relo()
5831 prog->name, relo_idx, cand_spec.bit_offset, in bpf_core_apply_relo()
5833 return -EINVAL; in bpf_core_apply_relo()
5840 prog->name, relo_idx, in bpf_core_apply_relo()
5843 return -EINVAL; in bpf_core_apply_relo()
5846 cand_ids->data[j++] = cand_spec.root_type_id; in bpf_core_apply_relo()
5858 cand_ids->len = j; in bpf_core_apply_relo()
5873 prog->name, relo_idx); in bpf_core_apply_relo()
5886 prog->name, relo_idx, relo->insn_off, err); in bpf_core_apply_relo()
5887 return -EINVAL; in bpf_core_apply_relo()
5906 if (obj->btf_ext->core_relo_info.len == 0) in bpf_object__relocate_core()
5912 targ_btf = obj->btf_vmlinux; in bpf_object__relocate_core()
5924 seg = &obj->btf_ext->core_relo_info; in bpf_object__relocate_core()
5926 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); in bpf_object__relocate_core()
5928 err = -EINVAL; in bpf_object__relocate_core()
5934 * prog->sec_idx to do a proper search by section index and in bpf_object__relocate_core()
5938 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__relocate_core()
5939 if (strcmp(obj->programs[i].sec_name, sec_name) == 0) { in bpf_object__relocate_core()
5940 prog = &obj->programs[i]; in bpf_object__relocate_core()
5946 return -ENOENT; in bpf_object__relocate_core()
5948 sec_idx = prog->sec_idx; in bpf_object__relocate_core()
5950 pr_debug("sec '%s': found %d CO-RE relocations\n", in bpf_object__relocate_core()
5951 sec_name, sec->num_info); in bpf_object__relocate_core()
5954 insn_idx = rec->insn_off / BPF_INSN_SZ; in bpf_object__relocate_core()
5961 * This is similar to what x86-64 linker does for relocations. in bpf_object__relocate_core()
5965 …pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subpro… in bpf_object__relocate_core()
5969 /* no need to apply CO-RE relocation if the program is in bpf_object__relocate_core()
5972 if (!prog->load) in bpf_object__relocate_core()
5975 err = bpf_core_apply_relo(prog, rec, i, obj->btf, in bpf_object__relocate_core()
5979 prog->name, i, err); in bpf_object__relocate_core()
5986 /* obj->btf_vmlinux is freed at the end of object load phase */ in bpf_object__relocate_core()
5987 if (targ_btf != obj->btf_vmlinux) in bpf_object__relocate_core()
5991 bpf_core_free_cands(entry->value); in bpf_object__relocate_core()
5999 * - map references;
6000 * - global variable references;
6001 * - extern references.
6008 for (i = 0; i < prog->nr_reloc; i++) { in bpf_object__relocate_data()
6009 struct reloc_desc *relo = &prog->reloc_desc[i]; in bpf_object__relocate_data()
6010 struct bpf_insn *insn = &prog->insns[relo->insn_idx]; in bpf_object__relocate_data()
6013 switch (relo->type) { in bpf_object__relocate_data()
6016 insn[0].imm = obj->maps[relo->map_idx].fd; in bpf_object__relocate_data()
6017 relo->processed = true; in bpf_object__relocate_data()
6021 insn[1].imm = insn[0].imm + relo->sym_off; in bpf_object__relocate_data()
6022 insn[0].imm = obj->maps[relo->map_idx].fd; in bpf_object__relocate_data()
6023 relo->processed = true; in bpf_object__relocate_data()
6026 ext = &obj->externs[relo->sym_off]; in bpf_object__relocate_data()
6027 if (ext->type == EXT_KCFG) { in bpf_object__relocate_data()
6029 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd; in bpf_object__relocate_data()
6030 insn[1].imm = ext->kcfg.data_off; in bpf_object__relocate_data()
6032 if (ext->ksym.type_id) { /* typed ksyms */ in bpf_object__relocate_data()
6034 insn[0].imm = ext->ksym.vmlinux_btf_id; in bpf_object__relocate_data()
6036 insn[0].imm = (__u32)ext->ksym.addr; in bpf_object__relocate_data()
6037 insn[1].imm = ext->ksym.addr >> 32; in bpf_object__relocate_data()
6040 relo->processed = true; in bpf_object__relocate_data()
6047 prog->name, i, relo->type); in bpf_object__relocate_data()
6048 return -EINVAL; in bpf_object__relocate_data()
6069 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); in adjust_prog_btf_ext_info()
6071 return -EINVAL; in adjust_prog_btf_ext_info()
6072 if (strcmp(sec_name, prog->sec_name) != 0) in adjust_prog_btf_ext_info()
6078 if (insn_off < prog->sec_insn_off) in adjust_prog_btf_ext_info()
6080 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt) in adjust_prog_btf_ext_info()
6085 copy_end = rec + ext_info->rec_size; in adjust_prog_btf_ext_info()
6089 return -ENOENT; in adjust_prog_btf_ext_info()
6091 /* append func/line info of a given (sub-)program to the main in adjust_prog_btf_ext_info()
6094 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size; in adjust_prog_btf_ext_info()
6095 new_sz = old_sz + (copy_end - copy_start); in adjust_prog_btf_ext_info()
6098 return -ENOMEM; in adjust_prog_btf_ext_info()
6100 *prog_rec_cnt = new_sz / ext_info->rec_size; in adjust_prog_btf_ext_info()
6101 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start); in adjust_prog_btf_ext_info()
6103 /* Kernel instruction offsets are in units of 8-byte in adjust_prog_btf_ext_info()
6109 off_adj = prog->sub_insn_off - prog->sec_insn_off; in adjust_prog_btf_ext_info()
6112 for (; rec < rec_end; rec += ext_info->rec_size) { in adjust_prog_btf_ext_info()
6117 *prog_rec_sz = ext_info->rec_size; in adjust_prog_btf_ext_info()
6121 return -ENOENT; in adjust_prog_btf_ext_info()
6134 if (!obj->btf_ext || !kernel_supports(FEAT_BTF_FUNC)) in reloc_prog_func_and_line_info()
6140 if (main_prog != prog && !main_prog->func_info) in reloc_prog_func_and_line_info()
6143 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info, in reloc_prog_func_and_line_info()
6144 &main_prog->func_info, in reloc_prog_func_and_line_info()
6145 &main_prog->func_info_cnt, in reloc_prog_func_and_line_info()
6146 &main_prog->func_info_rec_size); in reloc_prog_func_and_line_info()
6148 if (err != -ENOENT) { in reloc_prog_func_and_line_info()
6150 prog->name, err); in reloc_prog_func_and_line_info()
6153 if (main_prog->func_info) { in reloc_prog_func_and_line_info()
6158 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name); in reloc_prog_func_and_line_info()
6163 prog->name); in reloc_prog_func_and_line_info()
6168 if (main_prog != prog && !main_prog->line_info) in reloc_prog_func_and_line_info()
6171 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info, in reloc_prog_func_and_line_info()
6172 &main_prog->line_info, in reloc_prog_func_and_line_info()
6173 &main_prog->line_info_cnt, in reloc_prog_func_and_line_info()
6174 &main_prog->line_info_rec_size); in reloc_prog_func_and_line_info()
6176 if (err != -ENOENT) { in reloc_prog_func_and_line_info()
6178 prog->name, err); in reloc_prog_func_and_line_info()
6181 if (main_prog->line_info) { in reloc_prog_func_and_line_info()
6186 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name); in reloc_prog_func_and_line_info()
6191 prog->name); in reloc_prog_func_and_line_info()
6201 if (insn_idx == relo->insn_idx) in cmp_relo_by_insn_idx()
6203 return insn_idx < relo->insn_idx ? -1 : 1; in cmp_relo_by_insn_idx()
6208 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc, in find_prog_insn_relo()
6209 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx); in find_prog_insn_relo()
6226 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) { in bpf_object__reloc_code()
6227 insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; in bpf_object__reloc_code()
6232 if (relo && relo->type != RELO_CALL) { in bpf_object__reloc_code()
6234 prog->name, insn_idx, relo->type); in bpf_object__reloc_code()
6235 return -LIBBPF_ERRNO__RELOC; in bpf_object__reloc_code()
6238 /* sub-program instruction index is a combination of in bpf_object__reloc_code()
6241 * call always has imm = -1, but for static functions in bpf_object__reloc_code()
6242 * relocation is against STT_SECTION and insn->imm in bpf_object__reloc_code()
6245 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1; in bpf_object__reloc_code()
6250 * offset necessary, insns->imm is relative to in bpf_object__reloc_code()
6253 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1; in bpf_object__reloc_code()
6256 /* we enforce that sub-programs should be in .text section */ in bpf_object__reloc_code()
6257 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx); in bpf_object__reloc_code()
6259 pr_warn("prog '%s': no .text section found yet sub-program call exists\n", in bpf_object__reloc_code()
6260 prog->name); in bpf_object__reloc_code()
6261 return -LIBBPF_ERRNO__RELOC; in bpf_object__reloc_code()
6267 * - append it at the end of main program's instructions blog; in bpf_object__reloc_code()
6268 * - process is recursively, while current program is put on hold; in bpf_object__reloc_code()
6269 * - if that subprogram calls some other not yet processes in bpf_object__reloc_code()
6274 if (subprog->sub_insn_off == 0) { in bpf_object__reloc_code()
6275 subprog->sub_insn_off = main_prog->insns_cnt; in bpf_object__reloc_code()
6277 new_cnt = main_prog->insns_cnt + subprog->insns_cnt; in bpf_object__reloc_code()
6278 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns)); in bpf_object__reloc_code()
6280 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name); in bpf_object__reloc_code()
6281 return -ENOMEM; in bpf_object__reloc_code()
6283 main_prog->insns = insns; in bpf_object__reloc_code()
6284 main_prog->insns_cnt = new_cnt; in bpf_object__reloc_code()
6286 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns, in bpf_object__reloc_code()
6287 subprog->insns_cnt * sizeof(*insns)); in bpf_object__reloc_code()
6289 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n", in bpf_object__reloc_code()
6290 main_prog->name, subprog->insns_cnt, subprog->name); in bpf_object__reloc_code()
6297 /* main_prog->insns memory could have been re-allocated, so in bpf_object__reloc_code()
6300 insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; in bpf_object__reloc_code()
6306 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1; in bpf_object__reloc_code()
6309 relo->processed = true; in bpf_object__reloc_code()
6312 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off); in bpf_object__reloc_code()
6319 * Relocate sub-program calls.
6321 * Algorithm operates as follows. Each entry-point BPF program (referred to as
6322 * main prog) is processed separately. For each subprog (non-entry functions,
6331 * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
6347 * subprog->sub_insn_off as zero at all times and won't be appended to current
6356 * +--------+ +-------+
6358 * +--+---+ +--+-+-+ +---+--+
6360 * +--+---+ +------+ +---+--+
6363 * +---+-------+ +------+----+
6365 * +-----------+ +-----------+
6370 * +-----------+------+
6372 * +-----------+------+
6377 * +-----------+------+------+
6379 * +-----------+------+------+
6388 * +-----------+------+
6390 * +-----------+------+
6393 * +-----------+------+------+
6395 * +-----------+------+------+
6408 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__relocate_calls()
6409 subprog = &obj->programs[i]; in bpf_object__relocate_calls()
6413 subprog->sub_insn_off = 0; in bpf_object__relocate_calls()
6414 for (j = 0; j < subprog->nr_reloc; j++) in bpf_object__relocate_calls()
6415 if (subprog->reloc_desc[j].type == RELO_CALL) in bpf_object__relocate_calls()
6416 subprog->reloc_desc[j].processed = false; in bpf_object__relocate_calls()
6434 if (obj->btf_ext) { in bpf_object__relocate()
6437 pr_warn("failed to perform CO-RE relocations: %d\n", in bpf_object__relocate()
6442 /* relocate data references first for all programs and sub-programs, in bpf_object__relocate()
6444 * subprogram processing won't need to re-calculate any of them in bpf_object__relocate()
6446 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__relocate()
6447 prog = &obj->programs[i]; in bpf_object__relocate()
6451 prog->name, err); in bpf_object__relocate()
6460 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__relocate()
6461 prog = &obj->programs[i]; in bpf_object__relocate()
6462 /* sub-program's sub-calls are relocated within the context of in bpf_object__relocate()
6471 prog->name, err); in bpf_object__relocate()
6476 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__relocate()
6477 prog = &obj->programs[i]; in bpf_object__relocate()
6478 zfree(&prog->reloc_desc); in bpf_object__relocate()
6479 prog->nr_reloc = 0; in bpf_object__relocate()
6494 struct bpf_map *map = NULL, *targ_map; in bpf_object__collect_map_relos() local
6503 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf) in bpf_object__collect_map_relos()
6504 return -EINVAL; in bpf_object__collect_map_relos()
6505 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id); in bpf_object__collect_map_relos()
6507 return -EINVAL; in bpf_object__collect_map_relos()
6509 symbols = obj->efile.symbols; in bpf_object__collect_map_relos()
6510 nrels = shdr->sh_size / shdr->sh_entsize; in bpf_object__collect_map_relos()
6514 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_map_relos()
6519 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_map_relos()
6522 if (sym.st_shndx != obj->efile.btf_maps_shndx) { in bpf_object__collect_map_relos()
6523 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n", in bpf_object__collect_map_relos()
6525 return -LIBBPF_ERRNO__RELOC; in bpf_object__collect_map_relos()
6532 for (j = 0; j < obj->nr_maps; j++) { in bpf_object__collect_map_relos()
6533 map = &obj->maps[j]; in bpf_object__collect_map_relos()
6534 if (map->sec_idx != obj->efile.btf_maps_shndx) in bpf_object__collect_map_relos()
6537 vi = btf_var_secinfos(sec) + map->btf_var_idx; in bpf_object__collect_map_relos()
6538 if (vi->offset <= rel.r_offset && in bpf_object__collect_map_relos()
6539 rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size) in bpf_object__collect_map_relos()
6542 if (j == obj->nr_maps) { in bpf_object__collect_map_relos()
6543 pr_warn(".maps relo #%d: cannot find map '%s' at rel.r_offset %zu\n", in bpf_object__collect_map_relos()
6545 return -EINVAL; in bpf_object__collect_map_relos()
6548 if (!bpf_map_type__is_map_in_map(map->def.type)) in bpf_object__collect_map_relos()
6549 return -EINVAL; in bpf_object__collect_map_relos()
6550 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS && in bpf_object__collect_map_relos()
6551 map->def.key_size != sizeof(int)) { in bpf_object__collect_map_relos()
6552 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n", in bpf_object__collect_map_relos()
6553 i, map->name, sizeof(int)); in bpf_object__collect_map_relos()
6554 return -EINVAL; in bpf_object__collect_map_relos()
6559 return -ESRCH; in bpf_object__collect_map_relos()
6561 var = btf__type_by_id(obj->btf, vi->type); in bpf_object__collect_map_relos()
6562 def = skip_mods_and_typedefs(obj->btf, var->type, NULL); in bpf_object__collect_map_relos()
6564 return -EINVAL; in bpf_object__collect_map_relos()
6565 member = btf_members(def) + btf_vlen(def) - 1; in bpf_object__collect_map_relos()
6566 mname = btf__name_by_offset(obj->btf, member->name_off); in bpf_object__collect_map_relos()
6568 return -EINVAL; in bpf_object__collect_map_relos()
6570 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8; in bpf_object__collect_map_relos()
6571 if (rel.r_offset - vi->offset < moff) in bpf_object__collect_map_relos()
6572 return -EINVAL; in bpf_object__collect_map_relos()
6574 moff = rel.r_offset - vi->offset - moff; in bpf_object__collect_map_relos()
6579 return -EINVAL; in bpf_object__collect_map_relos()
6581 if (moff >= map->init_slots_sz) { in bpf_object__collect_map_relos()
6583 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz); in bpf_object__collect_map_relos()
6585 return -ENOMEM; in bpf_object__collect_map_relos()
6586 map->init_slots = tmp; in bpf_object__collect_map_relos()
6587 memset(map->init_slots + map->init_slots_sz, 0, in bpf_object__collect_map_relos()
6588 (new_sz - map->init_slots_sz) * host_ptr_sz); in bpf_object__collect_map_relos()
6589 map->init_slots_sz = new_sz; in bpf_object__collect_map_relos()
6591 map->init_slots[moff] = targ_map; in bpf_object__collect_map_relos()
6593 pr_debug(".maps relo #%d: map '%s' slot [%d] points to map '%s'\n", in bpf_object__collect_map_relos()
6594 i, map->name, moff, name); in bpf_object__collect_map_relos()
6605 if (a->insn_idx != b->insn_idx) in cmp_relocs()
6606 return a->insn_idx < b->insn_idx ? -1 : 1; in cmp_relocs()
6609 if (a->type != b->type) in cmp_relocs()
6610 return a->type < b->type ? -1 : 1; in cmp_relocs()
6619 for (i = 0; i < obj->efile.nr_reloc_sects; i++) { in bpf_object__collect_relos()
6620 GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr; in bpf_object__collect_relos()
6621 Elf_Data *data = obj->efile.reloc_sects[i].data; in bpf_object__collect_relos()
6622 int idx = shdr->sh_info; in bpf_object__collect_relos()
6624 if (shdr->sh_type != SHT_REL) { in bpf_object__collect_relos()
6626 return -LIBBPF_ERRNO__INTERNAL; in bpf_object__collect_relos()
6629 if (idx == obj->efile.st_ops_shndx) in bpf_object__collect_relos()
6631 else if (idx == obj->efile.btf_maps_shndx) in bpf_object__collect_relos()
6639 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__collect_relos()
6640 struct bpf_program *p = &obj->programs[i]; in bpf_object__collect_relos()
6642 if (!p->nr_reloc) in bpf_object__collect_relos()
6645 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs); in bpf_object__collect_relos()
6652 if (BPF_CLASS(insn->code) == BPF_JMP && in insn_is_helper_call()
6653 BPF_OP(insn->code) == BPF_CALL && in insn_is_helper_call()
6654 BPF_SRC(insn->code) == BPF_K && in insn_is_helper_call()
6655 insn->src_reg == 0 && in insn_is_helper_call()
6656 insn->dst_reg == 0) { in insn_is_helper_call()
6657 *func_id = insn->imm; in insn_is_helper_call()
6665 struct bpf_insn *insn = prog->insns; in bpf_object__sanitize_prog()
6669 for (i = 0; i < prog->insns_cnt; i++, insn++) { in bpf_object__sanitize_prog()
6681 insn->imm = BPF_FUNC_probe_read; in bpf_object__sanitize_prog()
6686 insn->imm = BPF_FUNC_probe_read_str; in bpf_object__sanitize_prog()
6706 return -EINVAL; in load_program()
6709 load_attr.prog_type = prog->type; in load_program()
6711 if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def && in load_program()
6712 prog->sec_def->is_exp_attach_type_optional) in load_program()
6715 load_attr.expected_attach_type = prog->expected_attach_type; in load_program()
6717 load_attr.name = prog->name; in load_program()
6721 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || in load_program()
6722 prog->type == BPF_PROG_TYPE_LSM) { in load_program()
6723 load_attr.attach_btf_id = prog->attach_btf_id; in load_program()
6724 } else if (prog->type == BPF_PROG_TYPE_TRACING || in load_program()
6725 prog->type == BPF_PROG_TYPE_EXT) { in load_program()
6726 load_attr.attach_prog_fd = prog->attach_prog_fd; in load_program()
6727 load_attr.attach_btf_id = prog->attach_btf_id; in load_program()
6730 load_attr.prog_ifindex = prog->prog_ifindex; in load_program()
6733 btf_fd = bpf_object__btf_fd(prog->obj); in load_program()
6736 load_attr.func_info = prog->func_info; in load_program()
6737 load_attr.func_info_rec_size = prog->func_info_rec_size; in load_program()
6738 load_attr.func_info_cnt = prog->func_info_cnt; in load_program()
6739 load_attr.line_info = prog->line_info; in load_program()
6740 load_attr.line_info_rec_size = prog->line_info_rec_size; in load_program()
6741 load_attr.line_info_cnt = prog->line_info_cnt; in load_program()
6743 load_attr.log_level = prog->log_level; in load_program()
6744 load_attr.prog_flags = prog->prog_flags; in load_program()
6750 return -ENOMEM; in load_program()
6761 if (prog->obj->rodata_map_idx >= 0 && in load_program()
6764 &prog->obj->maps[prog->obj->rodata_map_idx]; in load_program()
6768 pr_warn("prog '%s': failed to bind .rodata map: %s\n", in load_program()
6769 prog->name, cp); in load_program()
6786 ret = errno ? -errno : -LIBBPF_ERRNO__LOAD; in load_program()
6792 ret = -LIBBPF_ERRNO__VERIFY; in load_program()
6793 pr_warn("-- BEGIN DUMP LOG ---\n"); in load_program()
6795 pr_warn("-- END LOG --\n"); in load_program()
6799 ret = -LIBBPF_ERRNO__PROG2BIG; in load_program()
6809 ret = -LIBBPF_ERRNO__PROGTYPE; in load_program()
6825 if (prog->obj->loaded) { in bpf_program__load()
6826 pr_warn("prog '%s': can't load after object was loaded\n", prog->name); in bpf_program__load()
6827 return -EINVAL; in bpf_program__load()
6830 if ((prog->type == BPF_PROG_TYPE_TRACING || in bpf_program__load()
6831 prog->type == BPF_PROG_TYPE_LSM || in bpf_program__load()
6832 prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) { in bpf_program__load()
6836 prog->attach_btf_id = btf_id; in bpf_program__load()
6839 if (prog->instances.nr < 0 || !prog->instances.fds) { in bpf_program__load()
6840 if (prog->preprocessor) { in bpf_program__load()
6842 prog->name); in bpf_program__load()
6843 return -LIBBPF_ERRNO__INTERNAL; in bpf_program__load()
6846 prog->instances.fds = malloc(sizeof(int)); in bpf_program__load()
6847 if (!prog->instances.fds) { in bpf_program__load()
6849 return -ENOMEM; in bpf_program__load()
6851 prog->instances.nr = 1; in bpf_program__load()
6852 prog->instances.fds[0] = -1; in bpf_program__load()
6855 if (!prog->preprocessor) { in bpf_program__load()
6856 if (prog->instances.nr != 1) { in bpf_program__load()
6858 prog->name, prog->instances.nr); in bpf_program__load()
6860 err = load_program(prog, prog->insns, prog->insns_cnt, in bpf_program__load()
6863 prog->instances.fds[0] = fd; in bpf_program__load()
6867 for (i = 0; i < prog->instances.nr; i++) { in bpf_program__load()
6869 bpf_program_prep_t preprocessor = prog->preprocessor; in bpf_program__load()
6872 err = preprocessor(prog, i, prog->insns, in bpf_program__load()
6873 prog->insns_cnt, &result); in bpf_program__load()
6876 i, prog->name); in bpf_program__load()
6882 i, prog->name); in bpf_program__load()
6883 prog->instances.fds[i] = -1; in bpf_program__load()
6885 *result.pfd = -1; in bpf_program__load()
6893 i, prog->name); in bpf_program__load()
6899 prog->instances.fds[i] = fd; in bpf_program__load()
6903 pr_warn("failed to load program '%s'\n", prog->name); in bpf_program__load()
6904 zfree(&prog->insns); in bpf_program__load()
6905 prog->insns_cnt = 0; in bpf_program__load()
6916 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__load_progs()
6917 prog = &obj->programs[i]; in bpf_object__load_progs()
6923 for (i = 0; i < obj->nr_programs; i++) { in bpf_object__load_progs()
6924 prog = &obj->programs[i]; in bpf_object__load_progs()
6927 if (!prog->load) { in bpf_object__load_progs()
6928 pr_debug("prog '%s': skipped loading\n", prog->name); in bpf_object__load_progs()
6931 prog->log_level |= log_level; in bpf_object__load_progs()
6932 err = bpf_program__load(prog, obj->license, obj->kern_version); in bpf_object__load_progs()
6942 __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, in __bpf_object__open() argument
6953 path ? : "(mem buf)"); in __bpf_object__open()
6954 return ERR_PTR(-LIBBPF_ERRNO__LIBELF); in __bpf_object__open()
6958 return ERR_PTR(-EINVAL); in __bpf_object__open()
6963 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", in __bpf_object__open()
6968 path = obj_name; in __bpf_object__open()
6972 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name); in __bpf_object__open()
6978 obj->kconfig = strdup(kconfig); in __bpf_object__open()
6979 if (!obj->kconfig) { in __bpf_object__open()
6980 err = -ENOMEM; in __bpf_object__open()
6997 prog->sec_def = find_sec_def(prog->sec_name); in __bpf_object__open()
6998 if (!prog->sec_def) in __bpf_object__open()
7002 if (prog->sec_def->is_sleepable) in __bpf_object__open()
7003 prog->prog_flags |= BPF_F_SLEEPABLE; in __bpf_object__open()
7004 bpf_program__set_type(prog, prog->sec_def->prog_type); in __bpf_object__open()
7006 prog->sec_def->expected_attach_type); in __bpf_object__open()
7008 if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING || in __bpf_object__open()
7009 prog->sec_def->prog_type == BPF_PROG_TYPE_EXT) in __bpf_object__open()
7010 prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); in __bpf_object__open()
7027 if (!attr->file) in __bpf_object__open_xattr()
7030 pr_debug("loading %s\n", attr->file); in __bpf_object__open_xattr()
7031 return __bpf_object__open(attr->file, NULL, 0, &opts); in __bpf_object__open_xattr()
7039 struct bpf_object *bpf_object__open(const char *path) in bpf_object__open() argument
7042 .file = path, in bpf_object__open()
7050 bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts) in bpf_object__open_file() argument
7052 if (!path) in bpf_object__open_file()
7053 return ERR_PTR(-EINVAL); in bpf_object__open_file()
7055 pr_debug("loading %s\n", path); in bpf_object__open_file()
7057 return __bpf_object__open(path, NULL, 0, opts); in bpf_object__open_file()
7065 return ERR_PTR(-EINVAL); in bpf_object__open_mem()
7076 /* wrong default, but backwards-compatible */ in bpf_object__open_buffer()
7080 /* returning NULL is wrong, but backwards-compatible */ in bpf_object__open_buffer()
7092 return -EINVAL; in bpf_object__unload()
7094 for (i = 0; i < obj->nr_maps; i++) { in bpf_object__unload()
7095 zclose(obj->maps[i].fd); in bpf_object__unload()
7096 if (obj->maps[i].st_ops) in bpf_object__unload()
7097 zfree(&obj->maps[i].st_ops->kern_vdata); in bpf_object__unload()
7100 for (i = 0; i < obj->nr_programs; i++) in bpf_object__unload()
7101 bpf_program__unload(&obj->programs[i]); in bpf_object__unload()
7115 return -ENOTSUP; in bpf_object__sanitize_maps()
7118 m->def.map_flags ^= BPF_F_MMAPABLE; in bpf_object__sanitize_maps()
7134 err = -errno; in bpf_object__read_kallsyms_file()
7146 err = -EINVAL; in bpf_object__read_kallsyms_file()
7151 if (!ext || ext->type != EXT_KSYM) in bpf_object__read_kallsyms_file()
7154 if (ext->is_set && ext->ksym.addr != sym_addr) { in bpf_object__read_kallsyms_file()
7156 sym_name, ext->ksym.addr, sym_addr); in bpf_object__read_kallsyms_file()
7157 err = -EINVAL; in bpf_object__read_kallsyms_file()
7160 if (!ext->is_set) { in bpf_object__read_kallsyms_file()
7161 ext->is_set = true; in bpf_object__read_kallsyms_file()
7162 ext->ksym.addr = sym_addr; in bpf_object__read_kallsyms_file()
7177 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__resolve_ksyms_btf_id()
7183 ext = &obj->externs[i]; in bpf_object__resolve_ksyms_btf_id()
7184 if (ext->type != EXT_KSYM || !ext->ksym.type_id) in bpf_object__resolve_ksyms_btf_id()
7187 id = btf__find_by_name_kind(obj->btf_vmlinux, ext->name, in bpf_object__resolve_ksyms_btf_id()
7191 ext->name); in bpf_object__resolve_ksyms_btf_id()
7192 return -ESRCH; in bpf_object__resolve_ksyms_btf_id()
7196 local_type_id = ext->ksym.type_id; in bpf_object__resolve_ksyms_btf_id()
7199 targ_var = btf__type_by_id(obj->btf_vmlinux, id); in bpf_object__resolve_ksyms_btf_id()
7200 targ_var_name = btf__name_by_offset(obj->btf_vmlinux, in bpf_object__resolve_ksyms_btf_id()
7201 targ_var->name_off); in bpf_object__resolve_ksyms_btf_id()
7202 targ_type = skip_mods_and_typedefs(obj->btf_vmlinux, in bpf_object__resolve_ksyms_btf_id()
7203 targ_var->type, in bpf_object__resolve_ksyms_btf_id()
7206 ret = bpf_core_types_are_compat(obj->btf, local_type_id, in bpf_object__resolve_ksyms_btf_id()
7207 obj->btf_vmlinux, targ_type_id); in bpf_object__resolve_ksyms_btf_id()
7212 local_type = btf__type_by_id(obj->btf, local_type_id); in bpf_object__resolve_ksyms_btf_id()
7213 local_name = btf__name_by_offset(obj->btf, in bpf_object__resolve_ksyms_btf_id()
7214 local_type->name_off); in bpf_object__resolve_ksyms_btf_id()
7215 targ_name = btf__name_by_offset(obj->btf_vmlinux, in bpf_object__resolve_ksyms_btf_id()
7216 targ_type->name_off); in bpf_object__resolve_ksyms_btf_id()
7219 ext->name, local_type_id, in bpf_object__resolve_ksyms_btf_id()
7222 return -EINVAL; in bpf_object__resolve_ksyms_btf_id()
7225 ext->is_set = true; in bpf_object__resolve_ksyms_btf_id()
7226 ext->ksym.vmlinux_btf_id = id; in bpf_object__resolve_ksyms_btf_id()
7228 ext->name, id, btf_kind_str(targ_var), targ_var_name); in bpf_object__resolve_ksyms_btf_id()
7242 if (obj->nr_extern == 0) in bpf_object__resolve_externs()
7245 if (obj->kconfig_map_idx >= 0) in bpf_object__resolve_externs()
7246 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped; in bpf_object__resolve_externs()
7248 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__resolve_externs()
7249 ext = &obj->externs[i]; in bpf_object__resolve_externs()
7251 if (ext->type == EXT_KCFG && in bpf_object__resolve_externs()
7252 strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) { in bpf_object__resolve_externs()
7253 void *ext_val = kcfg_data + ext->kcfg.data_off; in bpf_object__resolve_externs()
7258 return -EINVAL; in bpf_object__resolve_externs()
7263 pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver); in bpf_object__resolve_externs()
7264 } else if (ext->type == EXT_KCFG && in bpf_object__resolve_externs()
7265 strncmp(ext->name, "CONFIG_", 7) == 0) { in bpf_object__resolve_externs()
7267 } else if (ext->type == EXT_KSYM) { in bpf_object__resolve_externs()
7268 if (ext->ksym.type_id) in bpf_object__resolve_externs()
7273 pr_warn("unrecognized extern '%s'\n", ext->name); in bpf_object__resolve_externs()
7274 return -EINVAL; in bpf_object__resolve_externs()
7280 return -EINVAL; in bpf_object__resolve_externs()
7282 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__resolve_externs()
7283 ext = &obj->externs[i]; in bpf_object__resolve_externs()
7284 if (ext->type == EXT_KCFG && !ext->is_set) { in bpf_object__resolve_externs()
7293 return -EINVAL; in bpf_object__resolve_externs()
7298 return -EINVAL; in bpf_object__resolve_externs()
7303 return -EINVAL; in bpf_object__resolve_externs()
7305 for (i = 0; i < obj->nr_extern; i++) { in bpf_object__resolve_externs()
7306 ext = &obj->externs[i]; in bpf_object__resolve_externs()
7308 if (!ext->is_set && !ext->is_weak) { in bpf_object__resolve_externs()
7309 pr_warn("extern %s (strong) not resolved\n", ext->name); in bpf_object__resolve_externs()
7310 return -ESRCH; in bpf_object__resolve_externs()
7311 } else if (!ext->is_set) { in bpf_object__resolve_externs()
7313 ext->name); in bpf_object__resolve_externs()
7326 return -EINVAL; in bpf_object__load_xattr()
7327 obj = attr->obj; in bpf_object__load_xattr()
7329 return -EINVAL; in bpf_object__load_xattr()
7331 if (obj->loaded) { in bpf_object__load_xattr()
7332 pr_warn("object '%s': load can't be attempted twice\n", obj->name); in bpf_object__load_xattr()
7333 return -EINVAL; in bpf_object__load_xattr()
7338 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); in bpf_object__load_xattr()
7343 err = err ? : bpf_object__relocate(obj, attr->target_btf_path); in bpf_object__load_xattr()
7344 err = err ? : bpf_object__load_progs(obj, attr->log_level); in bpf_object__load_xattr()
7346 btf__free(obj->btf_vmlinux); in bpf_object__load_xattr()
7347 obj->btf_vmlinux = NULL; in bpf_object__load_xattr()
7349 obj->loaded = true; /* doesn't matter if successfully or not */ in bpf_object__load_xattr()
7356 /* unpin any maps that were auto-pinned during load */ in bpf_object__load_xattr()
7357 for (i = 0; i < obj->nr_maps; i++) in bpf_object__load_xattr()
7358 if (obj->maps[i].pinned && !obj->maps[i].reused) in bpf_object__load_xattr()
7359 bpf_map__unpin(&obj->maps[i], NULL); in bpf_object__load_xattr()
7362 pr_warn("failed to load object '%s'\n", obj->path); in bpf_object__load_xattr()
7375 static int make_parent_dir(const char *path) in make_parent_dir() argument
7381 dname = strdup(path); in make_parent_dir()
7383 return -ENOMEM; in make_parent_dir()
7387 err = -errno; in make_parent_dir()
7391 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); in make_parent_dir()
7392 pr_warn("failed to mkdir %s: %s\n", path, cp); in make_parent_dir()
7397 static int check_path(const char *path) in check_path() argument
7404 if (path == NULL) in check_path()
7405 return -EINVAL; in check_path()
7407 dname = strdup(path); in check_path()
7409 return -ENOMEM; in check_path()
7415 err = -errno; in check_path()
7420 pr_warn("specified path %s is not on BPF FS\n", path); in check_path()
7421 err = -EINVAL; in check_path()
7427 int bpf_program__pin_instance(struct bpf_program *prog, const char *path, in bpf_program__pin_instance() argument
7433 err = make_parent_dir(path); in bpf_program__pin_instance()
7437 err = check_path(path); in bpf_program__pin_instance()
7443 return -EINVAL; in bpf_program__pin_instance()
7446 if (instance < 0 || instance >= prog->instances.nr) { in bpf_program__pin_instance()
7448 instance, prog->name, prog->instances.nr); in bpf_program__pin_instance()
7449 return -EINVAL; in bpf_program__pin_instance()
7452 if (bpf_obj_pin(prog->instances.fds[instance], path)) { in bpf_program__pin_instance()
7453 err = -errno; in bpf_program__pin_instance()
7458 pr_debug("pinned program '%s'\n", path); in bpf_program__pin_instance()
7463 int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, in bpf_program__unpin_instance() argument
7468 err = check_path(path); in bpf_program__unpin_instance()
7474 return -EINVAL; in bpf_program__unpin_instance()
7477 if (instance < 0 || instance >= prog->instances.nr) { in bpf_program__unpin_instance()
7479 instance, prog->name, prog->instances.nr); in bpf_program__unpin_instance()
7480 return -EINVAL; in bpf_program__unpin_instance()
7483 err = unlink(path); in bpf_program__unpin_instance()
7485 return -errno; in bpf_program__unpin_instance()
7486 pr_debug("unpinned program '%s'\n", path); in bpf_program__unpin_instance()
7491 int bpf_program__pin(struct bpf_program *prog, const char *path) in bpf_program__pin() argument
7495 err = make_parent_dir(path); in bpf_program__pin()
7499 err = check_path(path); in bpf_program__pin()
7505 return -EINVAL; in bpf_program__pin()
7508 if (prog->instances.nr <= 0) { in bpf_program__pin()
7509 pr_warn("no instances of prog %s to pin\n", prog->name); in bpf_program__pin()
7510 return -EINVAL; in bpf_program__pin()
7513 if (prog->instances.nr == 1) { in bpf_program__pin()
7515 return bpf_program__pin_instance(prog, path, 0); in bpf_program__pin()
7518 for (i = 0; i < prog->instances.nr; i++) { in bpf_program__pin()
7522 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); in bpf_program__pin()
7524 err = -EINVAL; in bpf_program__pin()
7527 err = -ENAMETOOLONG; in bpf_program__pin()
7539 for (i = i - 1; i >= 0; i--) { in bpf_program__pin()
7543 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); in bpf_program__pin()
7552 rmdir(path); in bpf_program__pin()
7557 int bpf_program__unpin(struct bpf_program *prog, const char *path) in bpf_program__unpin() argument
7561 err = check_path(path); in bpf_program__unpin()
7567 return -EINVAL; in bpf_program__unpin()
7570 if (prog->instances.nr <= 0) { in bpf_program__unpin()
7571 pr_warn("no instances of prog %s to pin\n", prog->name); in bpf_program__unpin()
7572 return -EINVAL; in bpf_program__unpin()
7575 if (prog->instances.nr == 1) { in bpf_program__unpin()
7577 return bpf_program__unpin_instance(prog, path, 0); in bpf_program__unpin()
7580 for (i = 0; i < prog->instances.nr; i++) { in bpf_program__unpin()
7584 len = snprintf(buf, PATH_MAX, "%s/%d", path, i); in bpf_program__unpin()
7586 return -EINVAL; in bpf_program__unpin()
7588 return -ENAMETOOLONG; in bpf_program__unpin()
7595 err = rmdir(path); in bpf_program__unpin()
7597 return -errno; in bpf_program__unpin()
7602 int bpf_map__pin(struct bpf_map *map, const char *path) in bpf_map__pin() argument
7607 if (map == NULL) { in bpf_map__pin()
7608 pr_warn("invalid map pointer\n"); in bpf_map__pin()
7609 return -EINVAL; in bpf_map__pin()
7612 if (map->pin_path) { in bpf_map__pin()
7613 if (path && strcmp(path, map->pin_path)) { in bpf_map__pin()
7614 pr_warn("map '%s' already has pin path '%s' different from '%s'\n", in bpf_map__pin()
7615 bpf_map__name(map), map->pin_path, path); in bpf_map__pin()
7616 return -EINVAL; in bpf_map__pin()
7617 } else if (map->pinned) { in bpf_map__pin()
7618 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n", in bpf_map__pin()
7619 bpf_map__name(map), map->pin_path); in bpf_map__pin()
7623 if (!path) { in bpf_map__pin()
7624 pr_warn("missing a path to pin map '%s' at\n", in bpf_map__pin()
7625 bpf_map__name(map)); in bpf_map__pin()
7626 return -EINVAL; in bpf_map__pin()
7627 } else if (map->pinned) { in bpf_map__pin()
7628 pr_warn("map '%s' already pinned\n", bpf_map__name(map)); in bpf_map__pin()
7629 return -EEXIST; in bpf_map__pin()
7632 map->pin_path = strdup(path); in bpf_map__pin()
7633 if (!map->pin_path) { in bpf_map__pin()
7634 err = -errno; in bpf_map__pin()
7639 err = make_parent_dir(map->pin_path); in bpf_map__pin()
7643 err = check_path(map->pin_path); in bpf_map__pin()
7647 if (bpf_obj_pin(map->fd, map->pin_path)) { in bpf_map__pin()
7648 err = -errno; in bpf_map__pin()
7652 map->pinned = true; in bpf_map__pin()
7653 pr_debug("pinned map '%s'\n", map->pin_path); in bpf_map__pin()
7658 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); in bpf_map__pin()
7659 pr_warn("failed to pin map: %s\n", cp); in bpf_map__pin()
7663 int bpf_map__unpin(struct bpf_map *map, const char *path) in bpf_map__unpin() argument
7667 if (map == NULL) { in bpf_map__unpin()
7668 pr_warn("invalid map pointer\n"); in bpf_map__unpin()
7669 return -EINVAL; in bpf_map__unpin()
7672 if (map->pin_path) { in bpf_map__unpin()
7673 if (path && strcmp(path, map->pin_path)) { in bpf_map__unpin()
7674 pr_warn("map '%s' already has pin path '%s' different from '%s'\n", in bpf_map__unpin()
7675 bpf_map__name(map), map->pin_path, path); in bpf_map__unpin()
7676 return -EINVAL; in bpf_map__unpin()
7678 path = map->pin_path; in bpf_map__unpin()
7679 } else if (!path) { in bpf_map__unpin()
7680 pr_warn("no path to unpin map '%s' from\n", in bpf_map__unpin()
7681 bpf_map__name(map)); in bpf_map__unpin()
7682 return -EINVAL; in bpf_map__unpin()
7685 err = check_path(path); in bpf_map__unpin()
7689 err = unlink(path); in bpf_map__unpin()
7691 return -errno; in bpf_map__unpin()
7693 map->pinned = false; in bpf_map__unpin()
7694 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path); in bpf_map__unpin()
7699 int bpf_map__set_pin_path(struct bpf_map *map, const char *path) in bpf_map__set_pin_path() argument
7703 if (path) { in bpf_map__set_pin_path()
7704 new = strdup(path); in bpf_map__set_pin_path()
7706 return -errno; in bpf_map__set_pin_path()
7709 free(map->pin_path); in bpf_map__set_pin_path()
7710 map->pin_path = new; in bpf_map__set_pin_path()
7714 const char *bpf_map__get_pin_path(const struct bpf_map *map) in bpf_map__get_pin_path() argument
7716 return map->pin_path; in bpf_map__get_pin_path()
7719 bool bpf_map__is_pinned(const struct bpf_map *map) in bpf_map__is_pinned() argument
7721 return map->pinned; in bpf_map__is_pinned()
7726 /* bpffs disallows periods in path names */ in sanitize_pin_path()
7734 int bpf_object__pin_maps(struct bpf_object *obj, const char *path) in bpf_object__pin_maps() argument
7736 struct bpf_map *map; in bpf_object__pin_maps() local
7740 return -ENOENT; in bpf_object__pin_maps()
7742 if (!obj->loaded) { in bpf_object__pin_maps()
7744 return -ENOENT; in bpf_object__pin_maps()
7747 bpf_object__for_each_map(map, obj) { in bpf_object__pin_maps()
7751 if (path) { in bpf_object__pin_maps()
7754 len = snprintf(buf, PATH_MAX, "%s/%s", path, in bpf_object__pin_maps()
7755 bpf_map__name(map)); in bpf_object__pin_maps()
7757 err = -EINVAL; in bpf_object__pin_maps()
7760 err = -ENAMETOOLONG; in bpf_object__pin_maps()
7765 } else if (!map->pin_path) { in bpf_object__pin_maps()
7769 err = bpf_map__pin(map, pin_path); in bpf_object__pin_maps()
7777 while ((map = bpf_map__prev(map, obj))) { in bpf_object__pin_maps()
7778 if (!map->pin_path) in bpf_object__pin_maps()
7781 bpf_map__unpin(map, NULL); in bpf_object__pin_maps()
7787 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) in bpf_object__unpin_maps() argument
7789 struct bpf_map *map; in bpf_object__unpin_maps() local
7793 return -ENOENT; in bpf_object__unpin_maps()
7795 bpf_object__for_each_map(map, obj) { in bpf_object__unpin_maps()
7799 if (path) { in bpf_object__unpin_maps()
7802 len = snprintf(buf, PATH_MAX, "%s/%s", path, in bpf_object__unpin_maps()
7803 bpf_map__name(map)); in bpf_object__unpin_maps()
7805 return -EINVAL; in bpf_object__unpin_maps()
7807 return -ENAMETOOLONG; in bpf_object__unpin_maps()
7810 } else if (!map->pin_path) { in bpf_object__unpin_maps()
7814 err = bpf_map__unpin(map, pin_path); in bpf_object__unpin_maps()
7822 int bpf_object__pin_programs(struct bpf_object *obj, const char *path) in bpf_object__pin_programs() argument
7828 return -ENOENT; in bpf_object__pin_programs()
7830 if (!obj->loaded) { in bpf_object__pin_programs()
7832 return -ENOENT; in bpf_object__pin_programs()
7839 len = snprintf(buf, PATH_MAX, "%s/%s", path, in bpf_object__pin_programs()
7840 prog->pin_name); in bpf_object__pin_programs()
7842 err = -EINVAL; in bpf_object__pin_programs()
7845 err = -ENAMETOOLONG; in bpf_object__pin_programs()
7861 len = snprintf(buf, PATH_MAX, "%s/%s", path, in bpf_object__pin_programs()
7862 prog->pin_name); in bpf_object__pin_programs()
7874 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path) in bpf_object__unpin_programs() argument
7880 return -ENOENT; in bpf_object__unpin_programs()
7886 len = snprintf(buf, PATH_MAX, "%s/%s", path, in bpf_object__unpin_programs()
7887 prog->pin_name); in bpf_object__unpin_programs()
7889 return -EINVAL; in bpf_object__unpin_programs()
7891 return -ENAMETOOLONG; in bpf_object__unpin_programs()
7901 int bpf_object__pin(struct bpf_object *obj, const char *path) in bpf_object__pin() argument
7905 err = bpf_object__pin_maps(obj, path); in bpf_object__pin()
7909 err = bpf_object__pin_programs(obj, path); in bpf_object__pin()
7911 bpf_object__unpin_maps(obj, path); in bpf_object__pin()
7918 static void bpf_map__destroy(struct bpf_map *map) in bpf_map__destroy() argument
7920 if (map->clear_priv) in bpf_map__destroy()
7921 map->clear_priv(map, map->priv); in bpf_map__destroy()
7922 map->priv = NULL; in bpf_map__destroy()
7923 map->clear_priv = NULL; in bpf_map__destroy()
7925 if (map->inner_map) { in bpf_map__destroy()
7926 bpf_map__destroy(map->inner_map); in bpf_map__destroy()
7927 zfree(&map->inner_map); in bpf_map__destroy()
7930 zfree(&map->init_slots); in bpf_map__destroy()
7931 map->init_slots_sz = 0; in bpf_map__destroy()
7933 if (map->mmaped) { in bpf_map__destroy()
7934 munmap(map->mmaped, bpf_map_mmap_sz(map)); in bpf_map__destroy()
7935 map->mmaped = NULL; in bpf_map__destroy()
7938 if (map->st_ops) { in bpf_map__destroy()
7939 zfree(&map->st_ops->data); in bpf_map__destroy()
7940 zfree(&map->st_ops->progs); in bpf_map__destroy()
7941 zfree(&map->st_ops->kern_func_off); in bpf_map__destroy()
7942 zfree(&map->st_ops); in bpf_map__destroy()
7945 zfree(&map->name); in bpf_map__destroy()
7946 zfree(&map->pin_path); in bpf_map__destroy()
7948 if (map->fd >= 0) in bpf_map__destroy()
7949 zclose(map->fd); in bpf_map__destroy()
7959 if (obj->clear_priv) in bpf_object__close()
7960 obj->clear_priv(obj, obj->priv); in bpf_object__close()
7964 btf__free(obj->btf); in bpf_object__close()
7965 btf_ext__free(obj->btf_ext); in bpf_object__close()
7967 for (i = 0; i < obj->nr_maps; i++) in bpf_object__close()
7968 bpf_map__destroy(&obj->maps[i]); in bpf_object__close()
7970 zfree(&obj->kconfig); in bpf_object__close()
7971 zfree(&obj->externs); in bpf_object__close()
7972 obj->nr_extern = 0; in bpf_object__close()
7974 zfree(&obj->maps); in bpf_object__close()
7975 obj->nr_maps = 0; in bpf_object__close()
7977 if (obj->programs && obj->nr_programs) { in bpf_object__close()
7978 for (i = 0; i < obj->nr_programs; i++) in bpf_object__close()
7979 bpf_program__exit(&obj->programs[i]); in bpf_object__close()
7981 zfree(&obj->programs); in bpf_object__close()
7983 list_del(&obj->list); in bpf_object__close()
8000 if (&next->list == &bpf_objects_list) in bpf_object__next()
8008 return obj ? obj->name : ERR_PTR(-EINVAL); in bpf_object__name()
8013 return obj ? obj->kern_version : 0; in bpf_object__kversion()
8018 return obj ? obj->btf : NULL; in bpf_object__btf()
8023 return obj->btf ? btf__fd(obj->btf) : -1; in bpf_object__btf_fd()
8029 if (obj->priv && obj->clear_priv) in bpf_object__set_priv()
8030 obj->clear_priv(obj, obj->priv); in bpf_object__set_priv()
8032 obj->priv = priv; in bpf_object__set_priv()
8033 obj->clear_priv = clear_priv; in bpf_object__set_priv()
8039 return obj ? obj->priv : ERR_PTR(-EINVAL); in bpf_object__priv()
8046 size_t nr_programs = obj->nr_programs; in __bpf_program__iter()
8054 return forward ? &obj->programs[0] : in __bpf_program__iter()
8055 &obj->programs[nr_programs - 1]; in __bpf_program__iter()
8057 if (p->obj != obj) { in __bpf_program__iter()
8062 idx = (p - obj->programs) + (forward ? 1 : -1); in __bpf_program__iter()
8063 if (idx >= obj->nr_programs || idx < 0) in __bpf_program__iter()
8065 return &obj->programs[idx]; in __bpf_program__iter()
8095 if (prog->priv && prog->clear_priv) in bpf_program__set_priv()
8096 prog->clear_priv(prog, prog->priv); in bpf_program__set_priv()
8098 prog->priv = priv; in bpf_program__set_priv()
8099 prog->clear_priv = clear_priv; in bpf_program__set_priv()
8105 return prog ? prog->priv : ERR_PTR(-EINVAL); in bpf_program__priv()
8110 prog->prog_ifindex = ifindex; in bpf_program__set_ifindex()
8115 return prog->name; in bpf_program__name()
8120 return prog->sec_name; in bpf_program__section_name()
8127 title = prog->sec_name; in bpf_program__title()
8132 return ERR_PTR(-ENOMEM); in bpf_program__title()
8141 return prog->load; in bpf_program__autoload()
8146 if (prog->obj->loaded) in bpf_program__set_autoload()
8147 return -EINVAL; in bpf_program__set_autoload()
8149 prog->load = autoload; in bpf_program__set_autoload()
8160 return prog->insns_cnt * BPF_INSN_SZ; in bpf_program__size()
8169 return -EINVAL; in bpf_program__set_prep()
8171 if (prog->instances.nr > 0 || prog->instances.fds) { in bpf_program__set_prep()
8172 pr_warn("Can't set pre-processor after loading\n"); in bpf_program__set_prep()
8173 return -EINVAL; in bpf_program__set_prep()
8179 return -ENOMEM; in bpf_program__set_prep()
8182 /* fill all fd with -1 */ in bpf_program__set_prep()
8183 memset(instances_fds, -1, sizeof(int) * nr_instances); in bpf_program__set_prep()
8185 prog->instances.nr = nr_instances; in bpf_program__set_prep()
8186 prog->instances.fds = instances_fds; in bpf_program__set_prep()
8187 prog->preprocessor = prep; in bpf_program__set_prep()
8196 return -EINVAL; in bpf_program__nth_fd()
8198 if (n >= prog->instances.nr || n < 0) { in bpf_program__nth_fd()
8200 n, prog->name, prog->instances.nr); in bpf_program__nth_fd()
8201 return -EINVAL; in bpf_program__nth_fd()
8204 fd = prog->instances.fds[n]; in bpf_program__nth_fd()
8207 n, prog->name); in bpf_program__nth_fd()
8208 return -ENOENT; in bpf_program__nth_fd()
8216 return prog->type; in bpf_program__get_type()
8221 prog->type = type; in bpf_program__set_type()
8227 return prog ? (prog->type == type) : false; in bpf_program__is_type()
8234 return -EINVAL; \
8261 return prog->expected_attach_type; in bpf_program__get_expected_attach_type()
8267 prog->expected_attach_type = type; in bpf_program__set_expected_attach_type()
8274 .len = sizeof(string) - 1, \
8304 .len = sizeof(sec_pfx) - 1, \
8519 return -EINVAL; in libbpf_prog_type_by_name()
8523 *prog_type = sec_def->prog_type; in libbpf_prog_type_by_name()
8524 *expected_attach_type = sec_def->expected_attach_type; in libbpf_prog_type_by_name()
8535 return -ESRCH; in libbpf_prog_type_by_name()
8541 struct bpf_map *map; in find_struct_ops_map_by_offset() local
8544 for (i = 0; i < obj->nr_maps; i++) { in find_struct_ops_map_by_offset()
8545 map = &obj->maps[i]; in find_struct_ops_map_by_offset()
8546 if (!bpf_map__is_struct_ops(map)) in find_struct_ops_map_by_offset()
8548 if (map->sec_offset <= offset && in find_struct_ops_map_by_offset()
8549 offset - map->sec_offset < map->def.value_size) in find_struct_ops_map_by_offset()
8550 return map; in find_struct_ops_map_by_offset()
8556 /* Collect the reloc from ELF and populate the st_ops->progs[] */
8565 struct bpf_map *map; in bpf_object__collect_st_ops_relos() local
8574 symbols = obj->efile.symbols; in bpf_object__collect_st_ops_relos()
8575 btf = obj->btf; in bpf_object__collect_st_ops_relos()
8576 nrels = shdr->sh_size / shdr->sh_entsize; in bpf_object__collect_st_ops_relos()
8580 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_st_ops_relos()
8586 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_st_ops_relos()
8590 map = find_struct_ops_map_by_offset(obj, rel.r_offset); in bpf_object__collect_st_ops_relos()
8591 if (!map) { in bpf_object__collect_st_ops_relos()
8592 pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n", in bpf_object__collect_st_ops_relos()
8594 return -EINVAL; in bpf_object__collect_st_ops_relos()
8597 moff = rel.r_offset - map->sec_offset; in bpf_object__collect_st_ops_relos()
8599 st_ops = map->st_ops; in bpf_object__collect_st_ops_relos()
8600 …pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %z… in bpf_object__collect_st_ops_relos()
8601 map->name, in bpf_object__collect_st_ops_relos()
8605 map->sec_offset, sym.st_name, name); in bpf_object__collect_st_ops_relos()
8608 pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n", in bpf_object__collect_st_ops_relos()
8609 map->name, (size_t)rel.r_offset, shdr_idx); in bpf_object__collect_st_ops_relos()
8610 return -LIBBPF_ERRNO__RELOC; in bpf_object__collect_st_ops_relos()
8614 map->name, (unsigned long long)sym.st_value); in bpf_object__collect_st_ops_relos()
8615 return -LIBBPF_ERRNO__FORMAT; in bpf_object__collect_st_ops_relos()
8619 member = find_member_by_offset(st_ops->type, moff * 8); in bpf_object__collect_st_ops_relos()
8622 map->name, moff); in bpf_object__collect_st_ops_relos()
8623 return -EINVAL; in bpf_object__collect_st_ops_relos()
8625 member_idx = member - btf_members(st_ops->type); in bpf_object__collect_st_ops_relos()
8626 name = btf__name_by_offset(btf, member->name_off); in bpf_object__collect_st_ops_relos()
8628 if (!resolve_func_ptr(btf, member->type, NULL)) { in bpf_object__collect_st_ops_relos()
8630 map->name, name); in bpf_object__collect_st_ops_relos()
8631 return -EINVAL; in bpf_object__collect_st_ops_relos()
8637 map->name, shdr_idx, name); in bpf_object__collect_st_ops_relos()
8638 return -EINVAL; in bpf_object__collect_st_ops_relos()
8641 if (prog->type == BPF_PROG_TYPE_UNSPEC) { in bpf_object__collect_st_ops_relos()
8644 sec_def = find_sec_def(prog->sec_name); in bpf_object__collect_st_ops_relos()
8646 sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) { in bpf_object__collect_st_ops_relos()
8648 prog->type = sec_def->prog_type; in bpf_object__collect_st_ops_relos()
8652 prog->type = BPF_PROG_TYPE_STRUCT_OPS; in bpf_object__collect_st_ops_relos()
8653 prog->attach_btf_id = st_ops->type_id; in bpf_object__collect_st_ops_relos()
8654 prog->expected_attach_type = member_idx; in bpf_object__collect_st_ops_relos()
8655 } else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS || in bpf_object__collect_st_ops_relos()
8656 prog->attach_btf_id != st_ops->type_id || in bpf_object__collect_st_ops_relos()
8657 prog->expected_attach_type != member_idx) { in bpf_object__collect_st_ops_relos()
8660 st_ops->progs[member_idx] = prog; in bpf_object__collect_st_ops_relos()
8667 map->name, prog->name, prog->sec_name, prog->type, in bpf_object__collect_st_ops_relos()
8668 prog->attach_btf_id, prog->expected_attach_type, name); in bpf_object__collect_st_ops_relos()
8669 return -EINVAL; in bpf_object__collect_st_ops_relos()
8690 return -ENAMETOOLONG; in find_btf_by_prefix_kind()
8726 return -EINVAL; in libbpf_find_vmlinux_btf_id()
8739 int err = -EINVAL; in libbpf_find_prog_btf_id()
8745 return -EINVAL; in libbpf_find_prog_btf_id()
8747 info = &info_linear->info; in libbpf_find_prog_btf_id()
8748 if (!info->btf_id) { in libbpf_find_prog_btf_id()
8752 if (btf__get_from_id(info->btf_id, &btf)) { in libbpf_find_prog_btf_id()
8769 enum bpf_attach_type attach_type = prog->expected_attach_type; in libbpf_find_attach_btf_id()
8770 __u32 attach_prog_fd = prog->attach_prog_fd; in libbpf_find_attach_btf_id()
8771 const char *name = prog->sec_name; in libbpf_find_attach_btf_id()
8775 return -EINVAL; in libbpf_find_attach_btf_id()
8786 err = __find_vmlinux_btf_id(prog->obj->btf_vmlinux, in libbpf_find_attach_btf_id()
8792 return -ESRCH; in libbpf_find_attach_btf_id()
8802 return -EINVAL; in libbpf_attach_type_by_name()
8808 return -EINVAL; in libbpf_attach_type_by_name()
8819 return -EINVAL; in libbpf_attach_type_by_name()
8822 int bpf_map__fd(const struct bpf_map *map) in bpf_map__fd() argument
8824 return map ? map->fd : -EINVAL; in bpf_map__fd()
8827 const struct bpf_map_def *bpf_map__def(const struct bpf_map *map) in bpf_map__def() argument
8829 return map ? &map->def : ERR_PTR(-EINVAL); in bpf_map__def()
8832 const char *bpf_map__name(const struct bpf_map *map) in bpf_map__name() argument
8834 return map ? map->name : NULL; in bpf_map__name()
8837 enum bpf_map_type bpf_map__type(const struct bpf_map *map) in bpf_map__type() argument
8839 return map->def.type; in bpf_map__type()
8842 int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type) in bpf_map__set_type() argument
8844 if (map->fd >= 0) in bpf_map__set_type()
8845 return -EBUSY; in bpf_map__set_type()
8846 map->def.type = type; in bpf_map__set_type()
8850 __u32 bpf_map__map_flags(const struct bpf_map *map) in bpf_map__map_flags() argument
8852 return map->def.map_flags; in bpf_map__map_flags()
8855 int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags) in bpf_map__set_map_flags() argument
8857 if (map->fd >= 0) in bpf_map__set_map_flags()
8858 return -EBUSY; in bpf_map__set_map_flags()
8859 map->def.map_flags = flags; in bpf_map__set_map_flags()
8863 __u32 bpf_map__numa_node(const struct bpf_map *map) in bpf_map__numa_node() argument
8865 return map->numa_node; in bpf_map__numa_node()
8868 int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node) in bpf_map__set_numa_node() argument
8870 if (map->fd >= 0) in bpf_map__set_numa_node()
8871 return -EBUSY; in bpf_map__set_numa_node()
8872 map->numa_node = numa_node; in bpf_map__set_numa_node()
8876 __u32 bpf_map__key_size(const struct bpf_map *map) in bpf_map__key_size() argument
8878 return map->def.key_size; in bpf_map__key_size()
8881 int bpf_map__set_key_size(struct bpf_map *map, __u32 size) in bpf_map__set_key_size() argument
8883 if (map->fd >= 0) in bpf_map__set_key_size()
8884 return -EBUSY; in bpf_map__set_key_size()
8885 map->def.key_size = size; in bpf_map__set_key_size()
8889 __u32 bpf_map__value_size(const struct bpf_map *map) in bpf_map__value_size() argument
8891 return map->def.value_size; in bpf_map__value_size()
8894 int bpf_map__set_value_size(struct bpf_map *map, __u32 size) in bpf_map__set_value_size() argument
8896 if (map->fd >= 0) in bpf_map__set_value_size()
8897 return -EBUSY; in bpf_map__set_value_size()
8898 map->def.value_size = size; in bpf_map__set_value_size()
8902 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map) in bpf_map__btf_key_type_id() argument
8904 return map ? map->btf_key_type_id : 0; in bpf_map__btf_key_type_id()
8907 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map) in bpf_map__btf_value_type_id() argument
8909 return map ? map->btf_value_type_id : 0; in bpf_map__btf_value_type_id()
8912 int bpf_map__set_priv(struct bpf_map *map, void *priv, in bpf_map__set_priv() argument
8915 if (!map) in bpf_map__set_priv()
8916 return -EINVAL; in bpf_map__set_priv()
8918 if (map->priv) { in bpf_map__set_priv()
8919 if (map->clear_priv) in bpf_map__set_priv()
8920 map->clear_priv(map, map->priv); in bpf_map__set_priv()
8923 map->priv = priv; in bpf_map__set_priv()
8924 map->clear_priv = clear_priv; in bpf_map__set_priv()
8928 void *bpf_map__priv(const struct bpf_map *map) in bpf_map__priv() argument
8930 return map ? map->priv : ERR_PTR(-EINVAL); in bpf_map__priv()
8933 int bpf_map__set_initial_value(struct bpf_map *map, in bpf_map__set_initial_value() argument
8936 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG || in bpf_map__set_initial_value()
8937 size != map->def.value_size || map->fd >= 0) in bpf_map__set_initial_value()
8938 return -EINVAL; in bpf_map__set_initial_value()
8940 memcpy(map->mmaped, data, size); in bpf_map__set_initial_value()
8944 bool bpf_map__is_offload_neutral(const struct bpf_map *map) in bpf_map__is_offload_neutral() argument
8946 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; in bpf_map__is_offload_neutral()
8949 bool bpf_map__is_internal(const struct bpf_map *map) in bpf_map__is_internal() argument
8951 return map->libbpf_type != LIBBPF_MAP_UNSPEC; in bpf_map__is_internal()
8954 __u32 bpf_map__ifindex(const struct bpf_map *map) in bpf_map__ifindex() argument
8956 return map->map_ifindex; in bpf_map__ifindex()
8959 int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) in bpf_map__set_ifindex() argument
8961 if (map->fd >= 0) in bpf_map__set_ifindex()
8962 return -EBUSY; in bpf_map__set_ifindex()
8963 map->map_ifindex = ifindex; in bpf_map__set_ifindex()
8967 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) in bpf_map__set_inner_map_fd() argument
8969 if (!bpf_map_type__is_map_in_map(map->def.type)) { in bpf_map__set_inner_map_fd()
8970 pr_warn("error: unsupported map type\n"); in bpf_map__set_inner_map_fd()
8971 return -EINVAL; in bpf_map__set_inner_map_fd()
8973 if (map->inner_map_fd != -1) { in bpf_map__set_inner_map_fd()
8975 return -EINVAL; in bpf_map__set_inner_map_fd()
8977 map->inner_map_fd = fd; in bpf_map__set_inner_map_fd()
8987 if (!obj || !obj->maps) in __bpf_map__iter()
8990 s = obj->maps; in __bpf_map__iter()
8991 e = obj->maps + obj->nr_maps; in __bpf_map__iter()
8994 pr_warn("error in %s: map handler doesn't belong to object\n", in __bpf_map__iter()
8999 idx = (m - obj->maps) + i; in __bpf_map__iter()
9000 if (idx >= obj->nr_maps || idx < 0) in __bpf_map__iter()
9002 return &obj->maps[idx]; in __bpf_map__iter()
9009 return obj->maps; in bpf_map__next()
9018 if (!obj->nr_maps) in bpf_map__prev()
9020 return obj->maps + obj->nr_maps - 1; in bpf_map__prev()
9023 return __bpf_map__iter(next, obj, -1); in bpf_map__prev()
9032 if (pos->name && !strcmp(pos->name, name)) in bpf_object__find_map_by_name()
9047 return ERR_PTR(-ENOTSUP); in bpf_object__find_map_by_offset()
9074 struct bpf_map *map; in bpf_prog_load_xattr() local
9078 return -EINVAL; in bpf_prog_load_xattr()
9079 if (!attr->file) in bpf_prog_load_xattr()
9080 return -EINVAL; in bpf_prog_load_xattr()
9082 open_attr.file = attr->file; in bpf_prog_load_xattr()
9083 open_attr.prog_type = attr->prog_type; in bpf_prog_load_xattr()
9087 return -ENOENT; in bpf_prog_load_xattr()
9090 enum bpf_attach_type attach_type = attr->expected_attach_type; in bpf_prog_load_xattr()
9093 * attr->prog_type, if specified, as an override to whatever in bpf_prog_load_xattr()
9096 if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) { in bpf_prog_load_xattr()
9097 bpf_program__set_type(prog, attr->prog_type); in bpf_prog_load_xattr()
9107 return -EINVAL; in bpf_prog_load_xattr()
9110 prog->prog_ifindex = attr->ifindex; in bpf_prog_load_xattr()
9111 prog->log_level = attr->log_level; in bpf_prog_load_xattr()
9112 prog->prog_flags |= attr->prog_flags; in bpf_prog_load_xattr()
9117 bpf_object__for_each_map(map, obj) { in bpf_prog_load_xattr()
9118 if (!bpf_map__is_offload_neutral(map)) in bpf_prog_load_xattr()
9119 map->map_ifindex = attr->ifindex; in bpf_prog_load_xattr()
9125 return -ENOENT; in bpf_prog_load_xattr()
9143 int fd; /* hook FD, -1 if not applicable */
9165 link->disconnected = true; in bpf_link__disconnect()
9175 if (!link->disconnected && link->detach) in bpf_link__destroy()
9176 err = link->detach(link); in bpf_link__destroy()
9177 if (link->destroy) in bpf_link__destroy()
9178 link->destroy(link); in bpf_link__destroy()
9179 if (link->pin_path) in bpf_link__destroy()
9180 free(link->pin_path); in bpf_link__destroy()
9188 return link->fd; in bpf_link__fd()
9193 return link->pin_path; in bpf_link__pin_path()
9198 return close(link->fd); in bpf_link__detach_fd()
9201 struct bpf_link *bpf_link__open(const char *path) in bpf_link__open() argument
9206 fd = bpf_obj_get(path); in bpf_link__open()
9208 fd = -errno; in bpf_link__open()
9209 pr_warn("failed to open link at %s: %d\n", path, fd); in bpf_link__open()
9216 return ERR_PTR(-ENOMEM); in bpf_link__open()
9218 link->detach = &bpf_link__detach_fd; in bpf_link__open()
9219 link->fd = fd; in bpf_link__open()
9221 link->pin_path = strdup(path); in bpf_link__open()
9222 if (!link->pin_path) { in bpf_link__open()
9224 return ERR_PTR(-ENOMEM); in bpf_link__open()
9232 return bpf_link_detach(link->fd) ? -errno : 0; in bpf_link__detach()
9235 int bpf_link__pin(struct bpf_link *link, const char *path) in bpf_link__pin() argument
9239 if (link->pin_path) in bpf_link__pin()
9240 return -EBUSY; in bpf_link__pin()
9241 err = make_parent_dir(path); in bpf_link__pin()
9244 err = check_path(path); in bpf_link__pin()
9248 link->pin_path = strdup(path); in bpf_link__pin()
9249 if (!link->pin_path) in bpf_link__pin()
9250 return -ENOMEM; in bpf_link__pin()
9252 if (bpf_obj_pin(link->fd, link->pin_path)) { in bpf_link__pin()
9253 err = -errno; in bpf_link__pin()
9254 zfree(&link->pin_path); in bpf_link__pin()
9258 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path); in bpf_link__pin()
9266 if (!link->pin_path) in bpf_link__unpin()
9267 return -EINVAL; in bpf_link__unpin()
9269 err = unlink(link->pin_path); in bpf_link__unpin()
9271 return -errno; in bpf_link__unpin()
9273 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path); in bpf_link__unpin()
9274 zfree(&link->pin_path); in bpf_link__unpin()
9282 err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0); in bpf_link__detach_perf_event()
9284 err = -errno; in bpf_link__detach_perf_event()
9286 close(link->fd); in bpf_link__detach_perf_event()
9299 prog->name, pfd); in bpf_program__attach_perf_event()
9300 return ERR_PTR(-EINVAL); in bpf_program__attach_perf_event()
9305 prog->name); in bpf_program__attach_perf_event()
9306 return ERR_PTR(-EINVAL); in bpf_program__attach_perf_event()
9311 return ERR_PTR(-ENOMEM); in bpf_program__attach_perf_event()
9312 link->detach = &bpf_link__detach_perf_event; in bpf_program__attach_perf_event()
9313 link->fd = pfd; in bpf_program__attach_perf_event()
9316 err = -errno; in bpf_program__attach_perf_event()
9319 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); in bpf_program__attach_perf_event()
9320 if (err == -EPROTO) in bpf_program__attach_perf_event()
9322 prog->name, pfd); in bpf_program__attach_perf_event()
9326 err = -errno; in bpf_program__attach_perf_event()
9329 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); in bpf_program__attach_perf_event()
9336 * this function is expected to parse integer in the range of [0, 2^31-1] from
9348 err = -errno; in parse_uint_from_file()
9355 err = err == EOF ? -EIO : -errno; in parse_uint_from_file()
9427 pid < 0 ? -1 : pid /* pid */, in perf_event_open_probe()
9428 pid == -1 ? 0 : -1 /* cpu */, in perf_event_open_probe()
9429 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); in perf_event_open_probe()
9431 err = -errno; in perf_event_open_probe()
9449 0 /* offset */, -1 /* pid */); in bpf_program__attach_kprobe()
9452 prog->name, retprobe ? "kretprobe" : "kprobe", func_name, in bpf_program__attach_kprobe()
9461 prog->name, retprobe ? "kretprobe" : "kprobe", func_name, in bpf_program__attach_kprobe()
9474 func_name = prog->sec_name + sec->len; in attach_kprobe()
9475 retprobe = strcmp(sec->sec, "kretprobe/") == 0; in attach_kprobe()
9493 prog->name, retprobe ? "uretprobe" : "uprobe", in bpf_program__attach_uprobe()
9503 prog->name, retprobe ? "uretprobe" : "uprobe", in bpf_program__attach_uprobe()
9521 return -errno; in determine_tracepoint_id()
9523 pr_debug("tracepoint %s/%s path is too long\n", in determine_tracepoint_id()
9525 return -E2BIG; in determine_tracepoint_id()
9549 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */, in perf_event_open_tracepoint()
9550 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); in perf_event_open_tracepoint()
9552 err = -errno; in perf_event_open_tracepoint()
9572 prog->name, tp_category, tp_name, in bpf_program__attach_tracepoint()
9581 prog->name, tp_category, tp_name, in bpf_program__attach_tracepoint()
9594 sec_name = strdup(prog->sec_name); in attach_tp()
9596 return ERR_PTR(-ENOMEM); in attach_tp()
9599 tp_cat = sec_name + sec->len; in attach_tp()
9602 link = ERR_PTR(-EINVAL); in attach_tp()
9623 pr_warn("prog '%s': can't attach before loaded\n", prog->name); in bpf_program__attach_raw_tracepoint()
9624 return ERR_PTR(-EINVAL); in bpf_program__attach_raw_tracepoint()
9629 return ERR_PTR(-ENOMEM); in bpf_program__attach_raw_tracepoint()
9630 link->detach = &bpf_link__detach_fd; in bpf_program__attach_raw_tracepoint()
9634 pfd = -errno; in bpf_program__attach_raw_tracepoint()
9637 prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); in bpf_program__attach_raw_tracepoint()
9640 link->fd = pfd; in bpf_program__attach_raw_tracepoint()
9647 const char *tp_name = prog->sec_name + sec->len; in attach_raw_tp()
9661 pr_warn("prog '%s': can't attach before loaded\n", prog->name); in bpf_program__attach_btf_id()
9662 return ERR_PTR(-EINVAL); in bpf_program__attach_btf_id()
9667 return ERR_PTR(-ENOMEM); in bpf_program__attach_btf_id()
9668 link->detach = &bpf_link__detach_fd; in bpf_program__attach_btf_id()
9672 pfd = -errno; in bpf_program__attach_btf_id()
9675 prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); in bpf_program__attach_btf_id()
9678 link->fd = pfd; in bpf_program__attach_btf_id()
9723 pr_warn("prog '%s': can't attach before loaded\n", prog->name); in bpf_program__attach_fd()
9724 return ERR_PTR(-EINVAL); in bpf_program__attach_fd()
9729 return ERR_PTR(-ENOMEM); in bpf_program__attach_fd()
9730 link->detach = &bpf_link__detach_fd; in bpf_program__attach_fd()
9735 link_fd = -errno; in bpf_program__attach_fd()
9738 prog->name, target_name, in bpf_program__attach_fd()
9742 link->fd = link_fd; in bpf_program__attach_fd()
9772 prog->name); in bpf_program__attach_freplace()
9773 return ERR_PTR(-EINVAL); in bpf_program__attach_freplace()
9776 if (prog->type != BPF_PROG_TYPE_EXT) { in bpf_program__attach_freplace()
9778 prog->name); in bpf_program__attach_freplace()
9779 return ERR_PTR(-EINVAL); in bpf_program__attach_freplace()
9807 return ERR_PTR(-EINVAL); in bpf_program__attach_iter()
9814 pr_warn("prog '%s': can't attach before loaded\n", prog->name); in bpf_program__attach_iter()
9815 return ERR_PTR(-EINVAL); in bpf_program__attach_iter()
9820 return ERR_PTR(-ENOMEM); in bpf_program__attach_iter()
9821 link->detach = &bpf_link__detach_fd; in bpf_program__attach_iter()
9826 link_fd = -errno; in bpf_program__attach_iter()
9829 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); in bpf_program__attach_iter()
9832 link->fd = link_fd; in bpf_program__attach_iter()
9840 sec_def = find_sec_def(prog->sec_name); in bpf_program__attach()
9841 if (!sec_def || !sec_def->attach_fn) in bpf_program__attach()
9842 return ERR_PTR(-ESRCH); in bpf_program__attach()
9844 return sec_def->attach_fn(sec_def, prog); in bpf_program__attach()
9851 if (bpf_map_delete_elem(link->fd, &zero)) in bpf_link__detach_struct_ops()
9852 return -errno; in bpf_link__detach_struct_ops()
9857 struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map) in bpf_map__attach_struct_ops() argument
9864 if (!bpf_map__is_struct_ops(map) || map->fd == -1) in bpf_map__attach_struct_ops()
9865 return ERR_PTR(-EINVAL); in bpf_map__attach_struct_ops()
9869 return ERR_PTR(-EINVAL); in bpf_map__attach_struct_ops()
9871 st_ops = map->st_ops; in bpf_map__attach_struct_ops()
9872 for (i = 0; i < btf_vlen(st_ops->type); i++) { in bpf_map__attach_struct_ops()
9873 struct bpf_program *prog = st_ops->progs[i]; in bpf_map__attach_struct_ops()
9881 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i]; in bpf_map__attach_struct_ops()
9885 err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0); in bpf_map__attach_struct_ops()
9887 err = -errno; in bpf_map__attach_struct_ops()
9892 link->detach = bpf_link__detach_struct_ops; in bpf_map__attach_struct_ops()
9893 link->fd = map->fd; in bpf_map__attach_struct_ops()
9905 __u64 data_tail = header->data_tail; in bpf_perf_event_read_simple()
9912 ehdr = base + (data_tail & (mmap_size - 1)); in bpf_perf_event_read_simple()
9913 ehdr_size = ehdr->size; in bpf_perf_event_read_simple()
9917 size_t len_first = base + mmap_size - copy_start; in bpf_perf_event_read_simple()
9918 size_t len_secnd = ehdr_size - len_first; in bpf_perf_event_read_simple()
9952 /* sample_cb and lost_cb are higher-level common-case callbacks */
9983 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
9991 if (cpu_buf->base && in perf_buffer__free_cpu_buf()
9992 munmap(cpu_buf->base, pb->mmap_size + pb->page_size)) in perf_buffer__free_cpu_buf()
9993 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu); in perf_buffer__free_cpu_buf()
9994 if (cpu_buf->fd >= 0) { in perf_buffer__free_cpu_buf()
9995 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0); in perf_buffer__free_cpu_buf()
9996 close(cpu_buf->fd); in perf_buffer__free_cpu_buf()
9998 free(cpu_buf->buf); in perf_buffer__free_cpu_buf()
10008 if (pb->cpu_bufs) { in perf_buffer__free()
10009 for (i = 0; i < pb->cpu_cnt; i++) { in perf_buffer__free()
10010 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; in perf_buffer__free()
10015 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key); in perf_buffer__free()
10018 free(pb->cpu_bufs); in perf_buffer__free()
10020 if (pb->epoll_fd >= 0) in perf_buffer__free()
10021 close(pb->epoll_fd); in perf_buffer__free()
10022 free(pb->events); in perf_buffer__free()
10036 return ERR_PTR(-ENOMEM); in perf_buffer__open_cpu_buf()
10038 cpu_buf->pb = pb; in perf_buffer__open_cpu_buf()
10039 cpu_buf->cpu = cpu; in perf_buffer__open_cpu_buf()
10040 cpu_buf->map_key = map_key; in perf_buffer__open_cpu_buf()
10042 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu, in perf_buffer__open_cpu_buf()
10043 -1, PERF_FLAG_FD_CLOEXEC); in perf_buffer__open_cpu_buf()
10044 if (cpu_buf->fd < 0) { in perf_buffer__open_cpu_buf()
10045 err = -errno; in perf_buffer__open_cpu_buf()
10051 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size, in perf_buffer__open_cpu_buf()
10053 cpu_buf->fd, 0); in perf_buffer__open_cpu_buf()
10054 if (cpu_buf->base == MAP_FAILED) { in perf_buffer__open_cpu_buf()
10055 cpu_buf->base = NULL; in perf_buffer__open_cpu_buf()
10056 err = -errno; in perf_buffer__open_cpu_buf()
10062 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) { in perf_buffer__open_cpu_buf()
10063 err = -errno; in perf_buffer__open_cpu_buf()
10092 p.sample_cb = opts ? opts->sample_cb : NULL; in perf_buffer__new()
10093 p.lost_cb = opts ? opts->lost_cb : NULL; in perf_buffer__new()
10094 p.ctx = opts ? opts->ctx : NULL; in perf_buffer__new()
10105 p.attr = opts->attr; in perf_buffer__new_raw()
10106 p.event_cb = opts->event_cb; in perf_buffer__new_raw()
10107 p.ctx = opts->ctx; in perf_buffer__new_raw()
10108 p.cpu_cnt = opts->cpu_cnt; in perf_buffer__new_raw()
10109 p.cpus = opts->cpus; in perf_buffer__new_raw()
10110 p.map_keys = opts->map_keys; in perf_buffer__new_raw()
10119 struct bpf_map_info map; in __perf_buffer__new() local
10126 if (page_cnt & (page_cnt - 1)) { in __perf_buffer__new()
10129 return ERR_PTR(-EINVAL); in __perf_buffer__new()
10132 /* best-effort sanity checks */ in __perf_buffer__new()
10133 memset(&map, 0, sizeof(map)); in __perf_buffer__new()
10134 map_info_len = sizeof(map); in __perf_buffer__new()
10135 err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len); in __perf_buffer__new()
10137 err = -errno; in __perf_buffer__new()
10139 * -EBADFD, -EFAULT, or -E2BIG on real error in __perf_buffer__new()
10141 if (err != -EINVAL) { in __perf_buffer__new()
10142 pr_warn("failed to get map info for map FD %d: %s\n", in __perf_buffer__new()
10146 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n", in __perf_buffer__new()
10149 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { in __perf_buffer__new()
10150 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", in __perf_buffer__new()
10151 map.name); in __perf_buffer__new()
10152 return ERR_PTR(-EINVAL); in __perf_buffer__new()
10158 return ERR_PTR(-ENOMEM); in __perf_buffer__new()
10160 pb->event_cb = p->event_cb; in __perf_buffer__new()
10161 pb->sample_cb = p->sample_cb; in __perf_buffer__new()
10162 pb->lost_cb = p->lost_cb; in __perf_buffer__new()
10163 pb->ctx = p->ctx; in __perf_buffer__new()
10165 pb->page_size = getpagesize(); in __perf_buffer__new()
10166 pb->mmap_size = pb->page_size * page_cnt; in __perf_buffer__new()
10167 pb->map_fd = map_fd; in __perf_buffer__new()
10169 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); in __perf_buffer__new()
10170 if (pb->epoll_fd < 0) { in __perf_buffer__new()
10171 err = -errno; in __perf_buffer__new()
10177 if (p->cpu_cnt > 0) { in __perf_buffer__new()
10178 pb->cpu_cnt = p->cpu_cnt; in __perf_buffer__new()
10180 pb->cpu_cnt = libbpf_num_possible_cpus(); in __perf_buffer__new()
10181 if (pb->cpu_cnt < 0) { in __perf_buffer__new()
10182 err = pb->cpu_cnt; in __perf_buffer__new()
10185 if (map.max_entries && map.max_entries < pb->cpu_cnt) in __perf_buffer__new()
10186 pb->cpu_cnt = map.max_entries; in __perf_buffer__new()
10189 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events)); in __perf_buffer__new()
10190 if (!pb->events) { in __perf_buffer__new()
10191 err = -ENOMEM; in __perf_buffer__new()
10195 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs)); in __perf_buffer__new()
10196 if (!pb->cpu_bufs) { in __perf_buffer__new()
10197 err = -ENOMEM; in __perf_buffer__new()
10208 for (i = 0, j = 0; i < pb->cpu_cnt; i++) { in __perf_buffer__new()
10212 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i; in __perf_buffer__new()
10213 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i; in __perf_buffer__new()
10218 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu])) in __perf_buffer__new()
10221 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key); in __perf_buffer__new()
10227 pb->cpu_bufs[j] = cpu_buf; in __perf_buffer__new()
10229 err = bpf_map_update_elem(pb->map_fd, &map_key, in __perf_buffer__new()
10230 &cpu_buf->fd, 0); in __perf_buffer__new()
10232 err = -errno; in __perf_buffer__new()
10233 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n", in __perf_buffer__new()
10234 cpu, map_key, cpu_buf->fd, in __perf_buffer__new()
10239 pb->events[j].events = EPOLLIN; in __perf_buffer__new()
10240 pb->events[j].data.ptr = cpu_buf; in __perf_buffer__new()
10241 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd, in __perf_buffer__new()
10242 &pb->events[j]) < 0) { in __perf_buffer__new()
10243 err = -errno; in __perf_buffer__new()
10245 cpu, cpu_buf->fd, in __perf_buffer__new()
10251 pb->cpu_cnt = j; in __perf_buffer__new()
10280 struct perf_buffer *pb = cpu_buf->pb; in perf_buffer__process_record()
10284 if (pb->event_cb) in perf_buffer__process_record()
10285 return pb->event_cb(pb->ctx, cpu_buf->cpu, e); in perf_buffer__process_record()
10287 switch (e->type) { in perf_buffer__process_record()
10291 if (pb->sample_cb) in perf_buffer__process_record()
10292 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size); in perf_buffer__process_record()
10298 if (pb->lost_cb) in perf_buffer__process_record()
10299 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost); in perf_buffer__process_record()
10303 pr_warn("unknown perf sample type %d\n", e->type); in perf_buffer__process_record()
10314 ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size, in perf_buffer__process_records()
10315 pb->page_size, &cpu_buf->buf, in perf_buffer__process_records()
10316 &cpu_buf->buf_size, in perf_buffer__process_records()
10325 return pb->epoll_fd; in perf_buffer__epoll_fd()
10332 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms); in perf_buffer__poll()
10334 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr; in perf_buffer__poll()
10342 return cnt < 0 ? -errno : cnt; in perf_buffer__poll()
10345 /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
10350 return pb->cpu_cnt; in perf_buffer__buffer_cnt()
10355 * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
10362 if (buf_idx >= pb->cpu_cnt) in perf_buffer__buffer_fd()
10363 return -EINVAL; in perf_buffer__buffer_fd()
10365 cpu_buf = pb->cpu_bufs[buf_idx]; in perf_buffer__buffer_fd()
10367 return -ENOENT; in perf_buffer__buffer_fd()
10369 return cpu_buf->fd; in perf_buffer__buffer_fd()
10374 * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
10377 * - 0 on success;
10378 * - <0 on failure.
10384 if (buf_idx >= pb->cpu_cnt) in perf_buffer__consume_buffer()
10385 return -EINVAL; in perf_buffer__consume_buffer()
10387 cpu_buf = pb->cpu_bufs[buf_idx]; in perf_buffer__consume_buffer()
10389 return -ENOENT; in perf_buffer__consume_buffer()
10398 for (i = 0; i < pb->cpu_cnt; i++) { in perf_buffer__consume()
10399 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; in perf_buffer__consume()
10417 * < 0: fix size of -size_offset
10425 -1,
10430 -1,
10435 -(int)sizeof(__u32),
10440 -(int)sizeof(__u64),
10445 -(int)sizeof(__u32),
10465 -(int)sizeof(__u8) * BPF_TAG_SIZE,
10477 return -(int)offset; in bpf_prog_info_read_offset_u32()
10487 return -(int)offset; in bpf_prog_info_read_offset_u64()
10519 return ERR_PTR(-EINVAL); in bpf_program__get_prog_info_linear()
10525 return ERR_PTR(-EFAULT); in bpf_program__get_prog_info_linear()
10537 if (info_len < desc->array_offset + sizeof(__u32) || in bpf_program__get_prog_info_linear()
10538 info_len < desc->count_offset + sizeof(__u32) || in bpf_program__get_prog_info_linear()
10539 (desc->size_offset > 0 && info_len < desc->size_offset)) in bpf_program__get_prog_info_linear()
10547 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); in bpf_program__get_prog_info_linear()
10548 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); in bpf_program__get_prog_info_linear()
10557 return ERR_PTR(-ENOMEM); in bpf_program__get_prog_info_linear()
10559 /* step 4: fill data to info_linear->info */ in bpf_program__get_prog_info_linear()
10560 info_linear->arrays = arrays; in bpf_program__get_prog_info_linear()
10561 memset(&info_linear->info, 0, sizeof(info)); in bpf_program__get_prog_info_linear()
10562 ptr = info_linear->data; in bpf_program__get_prog_info_linear()
10572 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); in bpf_program__get_prog_info_linear()
10573 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); in bpf_program__get_prog_info_linear()
10574 bpf_prog_info_set_offset_u32(&info_linear->info, in bpf_program__get_prog_info_linear()
10575 desc->count_offset, count); in bpf_program__get_prog_info_linear()
10576 bpf_prog_info_set_offset_u32(&info_linear->info, in bpf_program__get_prog_info_linear()
10577 desc->size_offset, size); in bpf_program__get_prog_info_linear()
10578 bpf_prog_info_set_offset_u64(&info_linear->info, in bpf_program__get_prog_info_linear()
10579 desc->array_offset, in bpf_program__get_prog_info_linear()
10585 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len); in bpf_program__get_prog_info_linear()
10589 return ERR_PTR(-EFAULT); in bpf_program__get_prog_info_linear()
10601 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset); in bpf_program__get_prog_info_linear()
10602 v2 = bpf_prog_info_read_offset_u32(&info_linear->info, in bpf_program__get_prog_info_linear()
10603 desc->count_offset); in bpf_program__get_prog_info_linear()
10607 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset); in bpf_program__get_prog_info_linear()
10608 v2 = bpf_prog_info_read_offset_u32(&info_linear->info, in bpf_program__get_prog_info_linear()
10609 desc->size_offset); in bpf_program__get_prog_info_linear()
10615 info_linear->info_len = sizeof(struct bpf_prog_info); in bpf_program__get_prog_info_linear()
10616 info_linear->data_len = data_len; in bpf_program__get_prog_info_linear()
10629 if ((info_linear->arrays & (1UL << i)) == 0) in bpf_program__bpil_addr_to_offs()
10633 addr = bpf_prog_info_read_offset_u64(&info_linear->info, in bpf_program__bpil_addr_to_offs()
10634 desc->array_offset); in bpf_program__bpil_addr_to_offs()
10635 offs = addr - ptr_to_u64(info_linear->data); in bpf_program__bpil_addr_to_offs()
10636 bpf_prog_info_set_offset_u64(&info_linear->info, in bpf_program__bpil_addr_to_offs()
10637 desc->array_offset, offs); in bpf_program__bpil_addr_to_offs()
10649 if ((info_linear->arrays & (1UL << i)) == 0) in bpf_program__bpil_offs_to_addr()
10653 offs = bpf_prog_info_read_offset_u64(&info_linear->info, in bpf_program__bpil_offs_to_addr()
10654 desc->array_offset); in bpf_program__bpil_offs_to_addr()
10655 addr = offs + ptr_to_u64(info_linear->data); in bpf_program__bpil_offs_to_addr()
10656 bpf_prog_info_set_offset_u64(&info_linear->info, in bpf_program__bpil_offs_to_addr()
10657 desc->array_offset, addr); in bpf_program__bpil_offs_to_addr()
10668 return -EINVAL; in bpf_program__set_attach_target()
10675 prog->expected_attach_type); in bpf_program__set_attach_target()
10680 prog->attach_btf_id = btf_id; in bpf_program__set_attach_target()
10681 prog->attach_prog_fd = attach_prog_fd; in bpf_program__set_attach_target()
10687 int err = 0, n, len, start, end = -1; in parse_cpu_mask_str()
10693 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ in parse_cpu_mask_str()
10699 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len); in parse_cpu_mask_str()
10702 err = -EINVAL; in parse_cpu_mask_str()
10710 err = -EINVAL; in parse_cpu_mask_str()
10715 err = -ENOMEM; in parse_cpu_mask_str()
10719 memset(tmp + *mask_sz, 0, start - *mask_sz); in parse_cpu_mask_str()
10720 memset(tmp + start, 1, end - start + 1); in parse_cpu_mask_str()
10726 return -EINVAL; in parse_cpu_mask_str()
10742 err = -errno; in parse_cpu_mask_file()
10749 err = len ? -errno : -EINVAL; in parse_cpu_mask_file()
10755 return -E2BIG; in parse_cpu_mask_file()
10792 .object_name = s->name, in bpf_object__open_skeleton()
10797 /* Attempt to preserve opts->object_name, unless overriden by user in bpf_object__open_skeleton()
10800 * prefix as their own map name prefix. When skeleton is generated, in bpf_object__open_skeleton()
10805 if (!opts->object_name) in bpf_object__open_skeleton()
10806 skel_opts.object_name = s->name; in bpf_object__open_skeleton()
10809 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts); in bpf_object__open_skeleton()
10812 s->name, PTR_ERR(obj)); in bpf_object__open_skeleton()
10816 *s->obj = obj; in bpf_object__open_skeleton()
10818 for (i = 0; i < s->map_cnt; i++) { in bpf_object__open_skeleton()
10819 struct bpf_map **map = s->maps[i].map; in bpf_object__open_skeleton() local
10820 const char *name = s->maps[i].name; in bpf_object__open_skeleton()
10821 void **mmaped = s->maps[i].mmaped; in bpf_object__open_skeleton()
10823 *map = bpf_object__find_map_by_name(obj, name); in bpf_object__open_skeleton()
10824 if (!*map) { in bpf_object__open_skeleton()
10825 pr_warn("failed to find skeleton map '%s'\n", name); in bpf_object__open_skeleton()
10826 return -ESRCH; in bpf_object__open_skeleton()
10829 /* externs shouldn't be pre-setup from user code */ in bpf_object__open_skeleton()
10830 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG) in bpf_object__open_skeleton()
10831 *mmaped = (*map)->mmaped; in bpf_object__open_skeleton()
10834 for (i = 0; i < s->prog_cnt; i++) { in bpf_object__open_skeleton()
10835 struct bpf_program **prog = s->progs[i].prog; in bpf_object__open_skeleton()
10836 const char *name = s->progs[i].name; in bpf_object__open_skeleton()
10841 return -ESRCH; in bpf_object__open_skeleton()
10852 err = bpf_object__load(*s->obj); in bpf_object__load_skeleton()
10854 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err); in bpf_object__load_skeleton()
10858 for (i = 0; i < s->map_cnt; i++) { in bpf_object__load_skeleton()
10859 struct bpf_map *map = *s->maps[i].map; in bpf_object__load_skeleton() local
10860 size_t mmap_sz = bpf_map_mmap_sz(map); in bpf_object__load_skeleton()
10861 int prot, map_fd = bpf_map__fd(map); in bpf_object__load_skeleton()
10862 void **mmaped = s->maps[i].mmaped; in bpf_object__load_skeleton()
10867 if (!(map->def.map_flags & BPF_F_MMAPABLE)) { in bpf_object__load_skeleton()
10872 if (map->def.map_flags & BPF_F_RDONLY_PROG) in bpf_object__load_skeleton()
10877 /* Remap anonymous mmap()-ed "map initialization image" as in bpf_object__load_skeleton()
10878 * a BPF map-backed mmap()-ed memory, but preserving the same in bpf_object__load_skeleton()
10887 *mmaped = mmap(map->mmaped, mmap_sz, prot, in bpf_object__load_skeleton()
10890 err = -errno; in bpf_object__load_skeleton()
10892 pr_warn("failed to re-mmap() map '%s': %d\n", in bpf_object__load_skeleton()
10893 bpf_map__name(map), err); in bpf_object__load_skeleton()
10905 for (i = 0; i < s->prog_cnt; i++) { in bpf_object__attach_skeleton()
10906 struct bpf_program *prog = *s->progs[i].prog; in bpf_object__attach_skeleton()
10907 struct bpf_link **link = s->progs[i].link; in bpf_object__attach_skeleton()
10910 if (!prog->load) in bpf_object__attach_skeleton()
10913 sec_def = find_sec_def(prog->sec_name); in bpf_object__attach_skeleton()
10914 if (!sec_def || !sec_def->attach_fn) in bpf_object__attach_skeleton()
10917 *link = sec_def->attach_fn(sec_def, prog); in bpf_object__attach_skeleton()
10919 pr_warn("failed to auto-attach program '%s': %ld\n", in bpf_object__attach_skeleton()
10932 for (i = 0; i < s->prog_cnt; i++) { in bpf_object__detach_skeleton()
10933 struct bpf_link **link = s->progs[i].link; in bpf_object__detach_skeleton()
10945 if (s->progs) in bpf_object__destroy_skeleton()
10947 if (s->obj) in bpf_object__destroy_skeleton()
10948 bpf_object__close(*s->obj); in bpf_object__destroy_skeleton()
10949 free(s->maps); in bpf_object__destroy_skeleton()
10950 free(s->progs); in bpf_object__destroy_skeleton()