• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 
3 /*
4  * Common eBPF ELF object loading operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  * Copyright (C) 2017 Nicira, Inc.
10  * Copyright (C) 2019 Isovalent, Inc.
11  */
12 
13 #ifndef _GNU_SOURCE
14 #define _GNU_SOURCE
15 #endif
16 #include <stdlib.h>
17 #include <stdio.h>
18 #include <stdarg.h>
19 #include <libgen.h>
20 #include <inttypes.h>
21 #include <string.h>
22 #include <unistd.h>
23 #include <endian.h>
24 #include <fcntl.h>
25 #include <errno.h>
26 #include <asm/unistd.h>
27 #include <linux/err.h>
28 #include <linux/kernel.h>
29 #include <linux/bpf.h>
30 #include <linux/btf.h>
31 #include <linux/filter.h>
32 #include <linux/list.h>
33 #include <linux/limits.h>
34 #include <linux/perf_event.h>
35 #include <linux/ring_buffer.h>
36 #include <sys/epoll.h>
37 #include <sys/ioctl.h>
38 #include <sys/mman.h>
39 #include <sys/stat.h>
40 #include <sys/types.h>
41 #include <sys/vfs.h>
42 #include <sys/utsname.h>
43 #include <tools/libc_compat.h>
44 #include <libelf.h>
45 #include <gelf.h>
46 
47 #include "libbpf.h"
48 #include "bpf.h"
49 #include "btf.h"
50 #include "str_error.h"
51 #include "libbpf_internal.h"
52 #include "hashmap.h"
53 
54 #ifndef EM_BPF
55 #define EM_BPF 247
56 #endif
57 
58 #ifndef BPF_FS_MAGIC
59 #define BPF_FS_MAGIC		0xcafe4a11
60 #endif
61 
62 /* vsprintf() in __base_pr() uses nonliteral format string. It may break
63  * compilation if user enables corresponding warning. Disable it explicitly.
64  */
65 #pragma GCC diagnostic ignored "-Wformat-nonliteral"
66 
67 #define __printf(a, b)	__attribute__((format(printf, a, b)))
68 
__base_pr(enum libbpf_print_level level,const char * format,va_list args)69 static int __base_pr(enum libbpf_print_level level, const char *format,
70 		     va_list args)
71 {
72 	if (level == LIBBPF_DEBUG)
73 		return 0;
74 
75 	return vfprintf(stderr, format, args);
76 }
77 
78 static libbpf_print_fn_t __libbpf_pr = __base_pr;
79 
libbpf_set_print(libbpf_print_fn_t fn)80 libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
81 {
82 	libbpf_print_fn_t old_print_fn = __libbpf_pr;
83 
84 	__libbpf_pr = fn;
85 	return old_print_fn;
86 }
87 
88 __printf(2, 3)
libbpf_print(enum libbpf_print_level level,const char * format,...)89 void libbpf_print(enum libbpf_print_level level, const char *format, ...)
90 {
91 	va_list args;
92 
93 	if (!__libbpf_pr)
94 		return;
95 
96 	va_start(args, format);
97 	__libbpf_pr(level, format, args);
98 	va_end(args);
99 }
100 
101 #define STRERR_BUFSIZE  128
102 
103 #define CHECK_ERR(action, err, out) do {	\
104 	err = action;			\
105 	if (err)			\
106 		goto out;		\
107 } while(0)
108 
109 
110 /* Copied from tools/perf/util/util.h */
111 #ifndef zfree
112 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
113 #endif
114 
115 #ifndef zclose
116 # define zclose(fd) ({			\
117 	int ___err = 0;			\
118 	if ((fd) >= 0)			\
119 		___err = close((fd));	\
120 	fd = -1;			\
121 	___err; })
122 #endif
123 
124 #ifdef HAVE_LIBELF_MMAP_SUPPORT
125 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
126 #else
127 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
128 #endif
129 
ptr_to_u64(const void * ptr)130 static inline __u64 ptr_to_u64(const void *ptr)
131 {
132 	return (__u64) (unsigned long) ptr;
133 }
134 
135 struct bpf_capabilities {
136 	/* v4.14: kernel support for program & map names. */
137 	__u32 name:1;
138 	/* v5.2: kernel support for global data sections. */
139 	__u32 global_data:1;
140 	/* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
141 	__u32 btf_func:1;
142 	/* BTF_KIND_VAR and BTF_KIND_DATASEC support */
143 	__u32 btf_datasec:1;
144 };
145 
146 /*
147  * bpf_prog should be a better name but it has been used in
148  * linux/filter.h.
149  */
150 struct bpf_program {
151 	/* Index in elf obj file, for relocation use. */
152 	int idx;
153 	char *name;
154 	int prog_ifindex;
155 	char *section_name;
156 	/* section_name with / replaced by _; makes recursive pinning
157 	 * in bpf_object__pin_programs easier
158 	 */
159 	char *pin_name;
160 	struct bpf_insn *insns;
161 	size_t insns_cnt, main_prog_cnt;
162 	enum bpf_prog_type type;
163 
164 	struct reloc_desc {
165 		enum {
166 			RELO_LD64,
167 			RELO_CALL,
168 			RELO_DATA,
169 		} type;
170 		int insn_idx;
171 		union {
172 			int map_idx;
173 			int text_off;
174 		};
175 	} *reloc_desc;
176 	int nr_reloc;
177 	int log_level;
178 
179 	struct {
180 		int nr;
181 		int *fds;
182 	} instances;
183 	bpf_program_prep_t preprocessor;
184 
185 	struct bpf_object *obj;
186 	void *priv;
187 	bpf_program_clear_priv_t clear_priv;
188 
189 	enum bpf_attach_type expected_attach_type;
190 	void *func_info;
191 	__u32 func_info_rec_size;
192 	__u32 func_info_cnt;
193 
194 	struct bpf_capabilities *caps;
195 
196 	void *line_info;
197 	__u32 line_info_rec_size;
198 	__u32 line_info_cnt;
199 	__u32 prog_flags;
200 };
201 
202 enum libbpf_map_type {
203 	LIBBPF_MAP_UNSPEC,
204 	LIBBPF_MAP_DATA,
205 	LIBBPF_MAP_BSS,
206 	LIBBPF_MAP_RODATA,
207 };
208 
209 static const char * const libbpf_type_to_btf_name[] = {
210 	[LIBBPF_MAP_DATA]	= ".data",
211 	[LIBBPF_MAP_BSS]	= ".bss",
212 	[LIBBPF_MAP_RODATA]	= ".rodata",
213 };
214 
215 struct bpf_map {
216 	int fd;
217 	char *name;
218 	int sec_idx;
219 	size_t sec_offset;
220 	int map_ifindex;
221 	int inner_map_fd;
222 	struct bpf_map_def def;
223 	__u32 btf_key_type_id;
224 	__u32 btf_value_type_id;
225 	void *priv;
226 	bpf_map_clear_priv_t clear_priv;
227 	enum libbpf_map_type libbpf_type;
228 };
229 
230 struct bpf_secdata {
231 	void *rodata;
232 	void *data;
233 };
234 
235 static LIST_HEAD(bpf_objects_list);
236 
237 struct bpf_object {
238 	char name[BPF_OBJ_NAME_LEN];
239 	char license[64];
240 	__u32 kern_version;
241 
242 	struct bpf_program *programs;
243 	size_t nr_programs;
244 	struct bpf_map *maps;
245 	size_t nr_maps;
246 	size_t maps_cap;
247 	struct bpf_secdata sections;
248 
249 	bool loaded;
250 	bool has_pseudo_calls;
251 
252 	/*
253 	 * Information when doing elf related work. Only valid if fd
254 	 * is valid.
255 	 */
256 	struct {
257 		int fd;
258 		void *obj_buf;
259 		size_t obj_buf_sz;
260 		Elf *elf;
261 		GElf_Ehdr ehdr;
262 		Elf_Data *symbols;
263 		Elf_Data *data;
264 		Elf_Data *rodata;
265 		Elf_Data *bss;
266 		size_t strtabidx;
267 		struct {
268 			GElf_Shdr shdr;
269 			Elf_Data *data;
270 		} *reloc;
271 		int nr_reloc;
272 		int maps_shndx;
273 		int btf_maps_shndx;
274 		int text_shndx;
275 		int data_shndx;
276 		int rodata_shndx;
277 		int bss_shndx;
278 	} efile;
279 	/*
280 	 * All loaded bpf_object is linked in a list, which is
281 	 * hidden to caller. bpf_objects__<func> handlers deal with
282 	 * all objects.
283 	 */
284 	struct list_head list;
285 
286 	struct btf *btf;
287 	struct btf_ext *btf_ext;
288 
289 	void *priv;
290 	bpf_object_clear_priv_t clear_priv;
291 
292 	struct bpf_capabilities caps;
293 
294 	char path[];
295 };
296 #define obj_elf_valid(o)	((o)->efile.elf)
297 
bpf_program__unload(struct bpf_program * prog)298 void bpf_program__unload(struct bpf_program *prog)
299 {
300 	int i;
301 
302 	if (!prog)
303 		return;
304 
305 	/*
306 	 * If the object is opened but the program was never loaded,
307 	 * it is possible that prog->instances.nr == -1.
308 	 */
309 	if (prog->instances.nr > 0) {
310 		for (i = 0; i < prog->instances.nr; i++)
311 			zclose(prog->instances.fds[i]);
312 	} else if (prog->instances.nr != -1) {
313 		pr_warning("Internal error: instances.nr is %d\n",
314 			   prog->instances.nr);
315 	}
316 
317 	prog->instances.nr = -1;
318 	zfree(&prog->instances.fds);
319 
320 	zfree(&prog->func_info);
321 	zfree(&prog->line_info);
322 }
323 
bpf_program__exit(struct bpf_program * prog)324 static void bpf_program__exit(struct bpf_program *prog)
325 {
326 	if (!prog)
327 		return;
328 
329 	if (prog->clear_priv)
330 		prog->clear_priv(prog, prog->priv);
331 
332 	prog->priv = NULL;
333 	prog->clear_priv = NULL;
334 
335 	bpf_program__unload(prog);
336 	zfree(&prog->name);
337 	zfree(&prog->section_name);
338 	zfree(&prog->pin_name);
339 	zfree(&prog->insns);
340 	zfree(&prog->reloc_desc);
341 
342 	prog->nr_reloc = 0;
343 	prog->insns_cnt = 0;
344 	prog->idx = -1;
345 }
346 
__bpf_program__pin_name(struct bpf_program * prog)347 static char *__bpf_program__pin_name(struct bpf_program *prog)
348 {
349 	char *name, *p;
350 
351 	name = p = strdup(prog->section_name);
352 	while ((p = strchr(p, '/')))
353 		*p = '_';
354 
355 	return name;
356 }
357 
358 static int
bpf_program__init(void * data,size_t size,char * section_name,int idx,struct bpf_program * prog)359 bpf_program__init(void *data, size_t size, char *section_name, int idx,
360 		  struct bpf_program *prog)
361 {
362 	const size_t bpf_insn_sz = sizeof(struct bpf_insn);
363 
364 	if (size == 0 || size % bpf_insn_sz) {
365 		pr_warning("corrupted section '%s', size: %zu\n",
366 			   section_name, size);
367 		return -EINVAL;
368 	}
369 
370 	memset(prog, 0, sizeof(*prog));
371 
372 	prog->section_name = strdup(section_name);
373 	if (!prog->section_name) {
374 		pr_warning("failed to alloc name for prog under section(%d) %s\n",
375 			   idx, section_name);
376 		goto errout;
377 	}
378 
379 	prog->pin_name = __bpf_program__pin_name(prog);
380 	if (!prog->pin_name) {
381 		pr_warning("failed to alloc pin name for prog under section(%d) %s\n",
382 			   idx, section_name);
383 		goto errout;
384 	}
385 
386 	prog->insns = malloc(size);
387 	if (!prog->insns) {
388 		pr_warning("failed to alloc insns for prog under section %s\n",
389 			   section_name);
390 		goto errout;
391 	}
392 	prog->insns_cnt = size / bpf_insn_sz;
393 	memcpy(prog->insns, data, size);
394 	prog->idx = idx;
395 	prog->instances.fds = NULL;
396 	prog->instances.nr = -1;
397 	prog->type = BPF_PROG_TYPE_UNSPEC;
398 
399 	return 0;
400 errout:
401 	bpf_program__exit(prog);
402 	return -ENOMEM;
403 }
404 
405 static int
bpf_object__add_program(struct bpf_object * obj,void * data,size_t size,char * section_name,int idx)406 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
407 			char *section_name, int idx)
408 {
409 	struct bpf_program prog, *progs;
410 	int nr_progs, err;
411 
412 	err = bpf_program__init(data, size, section_name, idx, &prog);
413 	if (err)
414 		return err;
415 
416 	prog.caps = &obj->caps;
417 	progs = obj->programs;
418 	nr_progs = obj->nr_programs;
419 
420 	progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
421 	if (!progs) {
422 		/*
423 		 * In this case the original obj->programs
424 		 * is still valid, so don't need special treat for
425 		 * bpf_close_object().
426 		 */
427 		pr_warning("failed to alloc a new program under section '%s'\n",
428 			   section_name);
429 		bpf_program__exit(&prog);
430 		return -ENOMEM;
431 	}
432 
433 	pr_debug("found program %s\n", prog.section_name);
434 	obj->programs = progs;
435 	obj->nr_programs = nr_progs + 1;
436 	prog.obj = obj;
437 	progs[nr_progs] = prog;
438 	return 0;
439 }
440 
441 static int
bpf_object__init_prog_names(struct bpf_object * obj)442 bpf_object__init_prog_names(struct bpf_object *obj)
443 {
444 	Elf_Data *symbols = obj->efile.symbols;
445 	struct bpf_program *prog;
446 	size_t pi, si;
447 
448 	for (pi = 0; pi < obj->nr_programs; pi++) {
449 		const char *name = NULL;
450 
451 		prog = &obj->programs[pi];
452 
453 		for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
454 		     si++) {
455 			GElf_Sym sym;
456 
457 			if (!gelf_getsym(symbols, si, &sym))
458 				continue;
459 			if (sym.st_shndx != prog->idx)
460 				continue;
461 			if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
462 				continue;
463 
464 			name = elf_strptr(obj->efile.elf,
465 					  obj->efile.strtabidx,
466 					  sym.st_name);
467 			if (!name) {
468 				pr_warning("failed to get sym name string for prog %s\n",
469 					   prog->section_name);
470 				return -LIBBPF_ERRNO__LIBELF;
471 			}
472 		}
473 
474 		if (!name && prog->idx == obj->efile.text_shndx)
475 			name = ".text";
476 
477 		if (!name) {
478 			pr_warning("failed to find sym for prog %s\n",
479 				   prog->section_name);
480 			return -EINVAL;
481 		}
482 
483 		prog->name = strdup(name);
484 		if (!prog->name) {
485 			pr_warning("failed to allocate memory for prog sym %s\n",
486 				   name);
487 			return -ENOMEM;
488 		}
489 	}
490 
491 	return 0;
492 }
493 
bpf_object__new(const char * path,void * obj_buf,size_t obj_buf_sz)494 static struct bpf_object *bpf_object__new(const char *path,
495 					  void *obj_buf,
496 					  size_t obj_buf_sz)
497 {
498 	struct bpf_object *obj;
499 	char *end;
500 
501 	obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
502 	if (!obj) {
503 		pr_warning("alloc memory failed for %s\n", path);
504 		return ERR_PTR(-ENOMEM);
505 	}
506 
507 	strcpy(obj->path, path);
508 	/* Using basename() GNU version which doesn't modify arg. */
509 	strncpy(obj->name, basename((void *)path), sizeof(obj->name) - 1);
510 	end = strchr(obj->name, '.');
511 	if (end)
512 		*end = 0;
513 
514 	obj->efile.fd = -1;
515 	/*
516 	 * Caller of this function should also call
517 	 * bpf_object__elf_finish() after data collection to return
518 	 * obj_buf to user. If not, we should duplicate the buffer to
519 	 * avoid user freeing them before elf finish.
520 	 */
521 	obj->efile.obj_buf = obj_buf;
522 	obj->efile.obj_buf_sz = obj_buf_sz;
523 	obj->efile.maps_shndx = -1;
524 	obj->efile.btf_maps_shndx = -1;
525 	obj->efile.data_shndx = -1;
526 	obj->efile.rodata_shndx = -1;
527 	obj->efile.bss_shndx = -1;
528 
529 	obj->loaded = false;
530 
531 	INIT_LIST_HEAD(&obj->list);
532 	list_add(&obj->list, &bpf_objects_list);
533 	return obj;
534 }
535 
bpf_object__elf_finish(struct bpf_object * obj)536 static void bpf_object__elf_finish(struct bpf_object *obj)
537 {
538 	if (!obj_elf_valid(obj))
539 		return;
540 
541 	if (obj->efile.elf) {
542 		elf_end(obj->efile.elf);
543 		obj->efile.elf = NULL;
544 	}
545 	obj->efile.symbols = NULL;
546 	obj->efile.data = NULL;
547 	obj->efile.rodata = NULL;
548 	obj->efile.bss = NULL;
549 
550 	zfree(&obj->efile.reloc);
551 	obj->efile.nr_reloc = 0;
552 	zclose(obj->efile.fd);
553 	obj->efile.obj_buf = NULL;
554 	obj->efile.obj_buf_sz = 0;
555 }
556 
bpf_object__elf_init(struct bpf_object * obj)557 static int bpf_object__elf_init(struct bpf_object *obj)
558 {
559 	int err = 0;
560 	GElf_Ehdr *ep;
561 
562 	if (obj_elf_valid(obj)) {
563 		pr_warning("elf init: internal error\n");
564 		return -LIBBPF_ERRNO__LIBELF;
565 	}
566 
567 	if (obj->efile.obj_buf_sz > 0) {
568 		/*
569 		 * obj_buf should have been validated by
570 		 * bpf_object__open_buffer().
571 		 */
572 		obj->efile.elf = elf_memory(obj->efile.obj_buf,
573 					    obj->efile.obj_buf_sz);
574 	} else {
575 		obj->efile.fd = open(obj->path, O_RDONLY);
576 		if (obj->efile.fd < 0) {
577 			char errmsg[STRERR_BUFSIZE], *cp;
578 
579 			err = -errno;
580 			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
581 			pr_warning("failed to open %s: %s\n", obj->path, cp);
582 			return err;
583 		}
584 
585 		obj->efile.elf = elf_begin(obj->efile.fd,
586 					   LIBBPF_ELF_C_READ_MMAP, NULL);
587 	}
588 
589 	if (!obj->efile.elf) {
590 		pr_warning("failed to open %s as ELF file\n", obj->path);
591 		err = -LIBBPF_ERRNO__LIBELF;
592 		goto errout;
593 	}
594 
595 	if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
596 		pr_warning("failed to get EHDR from %s\n", obj->path);
597 		err = -LIBBPF_ERRNO__FORMAT;
598 		goto errout;
599 	}
600 	ep = &obj->efile.ehdr;
601 
602 	/* Old LLVM set e_machine to EM_NONE */
603 	if (ep->e_type != ET_REL ||
604 	    (ep->e_machine && ep->e_machine != EM_BPF)) {
605 		pr_warning("%s is not an eBPF object file\n", obj->path);
606 		err = -LIBBPF_ERRNO__FORMAT;
607 		goto errout;
608 	}
609 
610 	return 0;
611 errout:
612 	bpf_object__elf_finish(obj);
613 	return err;
614 }
615 
bpf_object__check_endianness(struct bpf_object * obj)616 static int bpf_object__check_endianness(struct bpf_object *obj)
617 {
618 #if __BYTE_ORDER == __LITTLE_ENDIAN
619 	if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
620 		return 0;
621 #elif __BYTE_ORDER == __BIG_ENDIAN
622 	if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
623 		return 0;
624 #else
625 # error "Unrecognized __BYTE_ORDER__"
626 #endif
627 	pr_warning("endianness mismatch.\n");
628 	return -LIBBPF_ERRNO__ENDIAN;
629 }
630 
631 static int
bpf_object__init_license(struct bpf_object * obj,void * data,size_t size)632 bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
633 {
634 	memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
635 	pr_debug("license of %s is %s\n", obj->path, obj->license);
636 	return 0;
637 }
638 
639 static int
bpf_object__init_kversion(struct bpf_object * obj,void * data,size_t size)640 bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
641 {
642 	__u32 kver;
643 
644 	if (size != sizeof(kver)) {
645 		pr_warning("invalid kver section in %s\n", obj->path);
646 		return -LIBBPF_ERRNO__FORMAT;
647 	}
648 	memcpy(&kver, data, sizeof(kver));
649 	obj->kern_version = kver;
650 	pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
651 	return 0;
652 }
653 
compare_bpf_map(const void * _a,const void * _b)654 static int compare_bpf_map(const void *_a, const void *_b)
655 {
656 	const struct bpf_map *a = _a;
657 	const struct bpf_map *b = _b;
658 
659 	if (a->sec_idx != b->sec_idx)
660 		return a->sec_idx - b->sec_idx;
661 	return a->sec_offset - b->sec_offset;
662 }
663 
bpf_map_type__is_map_in_map(enum bpf_map_type type)664 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
665 {
666 	if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
667 	    type == BPF_MAP_TYPE_HASH_OF_MAPS)
668 		return true;
669 	return false;
670 }
671 
bpf_object_search_section_size(const struct bpf_object * obj,const char * name,size_t * d_size)672 static int bpf_object_search_section_size(const struct bpf_object *obj,
673 					  const char *name, size_t *d_size)
674 {
675 	const GElf_Ehdr *ep = &obj->efile.ehdr;
676 	Elf *elf = obj->efile.elf;
677 	Elf_Scn *scn = NULL;
678 	int idx = 0;
679 
680 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
681 		const char *sec_name;
682 		Elf_Data *data;
683 		GElf_Shdr sh;
684 
685 		idx++;
686 		if (gelf_getshdr(scn, &sh) != &sh) {
687 			pr_warning("failed to get section(%d) header from %s\n",
688 				   idx, obj->path);
689 			return -EIO;
690 		}
691 
692 		sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
693 		if (!sec_name) {
694 			pr_warning("failed to get section(%d) name from %s\n",
695 				   idx, obj->path);
696 			return -EIO;
697 		}
698 
699 		if (strcmp(name, sec_name))
700 			continue;
701 
702 		data = elf_getdata(scn, 0);
703 		if (!data) {
704 			pr_warning("failed to get section(%d) data from %s(%s)\n",
705 				   idx, name, obj->path);
706 			return -EIO;
707 		}
708 
709 		*d_size = data->d_size;
710 		return 0;
711 	}
712 
713 	return -ENOENT;
714 }
715 
bpf_object__section_size(const struct bpf_object * obj,const char * name,__u32 * size)716 int bpf_object__section_size(const struct bpf_object *obj, const char *name,
717 			     __u32 *size)
718 {
719 	int ret = -ENOENT;
720 	size_t d_size;
721 
722 	*size = 0;
723 	if (!name) {
724 		return -EINVAL;
725 	} else if (!strcmp(name, ".data")) {
726 		if (obj->efile.data)
727 			*size = obj->efile.data->d_size;
728 	} else if (!strcmp(name, ".bss")) {
729 		if (obj->efile.bss)
730 			*size = obj->efile.bss->d_size;
731 	} else if (!strcmp(name, ".rodata")) {
732 		if (obj->efile.rodata)
733 			*size = obj->efile.rodata->d_size;
734 	} else {
735 		ret = bpf_object_search_section_size(obj, name, &d_size);
736 		if (!ret)
737 			*size = d_size;
738 	}
739 
740 	return *size ? 0 : ret;
741 }
742 
bpf_object__variable_offset(const struct bpf_object * obj,const char * name,__u32 * off)743 int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
744 				__u32 *off)
745 {
746 	Elf_Data *symbols = obj->efile.symbols;
747 	const char *sname;
748 	size_t si;
749 
750 	if (!name || !off)
751 		return -EINVAL;
752 
753 	for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
754 		GElf_Sym sym;
755 
756 		if (!gelf_getsym(symbols, si, &sym))
757 			continue;
758 		if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
759 		    GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
760 			continue;
761 
762 		sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
763 				   sym.st_name);
764 		if (!sname) {
765 			pr_warning("failed to get sym name string for var %s\n",
766 				   name);
767 			return -EIO;
768 		}
769 		if (strcmp(name, sname) == 0) {
770 			*off = sym.st_value;
771 			return 0;
772 		}
773 	}
774 
775 	return -ENOENT;
776 }
777 
bpf_object__add_map(struct bpf_object * obj)778 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
779 {
780 	struct bpf_map *new_maps;
781 	size_t new_cap;
782 	int i;
783 
784 	if (obj->nr_maps < obj->maps_cap)
785 		return &obj->maps[obj->nr_maps++];
786 
787 	new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
788 	new_maps = realloc(obj->maps, new_cap * sizeof(*obj->maps));
789 	if (!new_maps) {
790 		pr_warning("alloc maps for object failed\n");
791 		return ERR_PTR(-ENOMEM);
792 	}
793 
794 	obj->maps_cap = new_cap;
795 	obj->maps = new_maps;
796 
797 	/* zero out new maps */
798 	memset(obj->maps + obj->nr_maps, 0,
799 	       (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
800 	/*
801 	 * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
802 	 * when failure (zclose won't close negative fd)).
803 	 */
804 	for (i = obj->nr_maps; i < obj->maps_cap; i++) {
805 		obj->maps[i].fd = -1;
806 		obj->maps[i].inner_map_fd = -1;
807 	}
808 
809 	return &obj->maps[obj->nr_maps++];
810 }
811 
812 static int
bpf_object__init_internal_map(struct bpf_object * obj,enum libbpf_map_type type,int sec_idx,Elf_Data * data,void ** data_buff)813 bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
814 			      int sec_idx, Elf_Data *data, void **data_buff)
815 {
816 	char map_name[BPF_OBJ_NAME_LEN];
817 	struct bpf_map_def *def;
818 	struct bpf_map *map;
819 
820 	map = bpf_object__add_map(obj);
821 	if (IS_ERR(map))
822 		return PTR_ERR(map);
823 
824 	map->libbpf_type = type;
825 	map->sec_idx = sec_idx;
826 	map->sec_offset = 0;
827 	snprintf(map_name, sizeof(map_name), "%.8s%.7s", obj->name,
828 		 libbpf_type_to_btf_name[type]);
829 	map->name = strdup(map_name);
830 	if (!map->name) {
831 		pr_warning("failed to alloc map name\n");
832 		return -ENOMEM;
833 	}
834 	pr_debug("map '%s' (global data): at sec_idx %d, offset %zu.\n",
835 		 map_name, map->sec_idx, map->sec_offset);
836 
837 	def = &map->def;
838 	def->type = BPF_MAP_TYPE_ARRAY;
839 	def->key_size = sizeof(int);
840 	def->value_size = data->d_size;
841 	def->max_entries = 1;
842 	def->map_flags = type == LIBBPF_MAP_RODATA ? BPF_F_RDONLY_PROG : 0;
843 	if (data_buff) {
844 		*data_buff = malloc(data->d_size);
845 		if (!*data_buff) {
846 			zfree(&map->name);
847 			pr_warning("failed to alloc map content buffer\n");
848 			return -ENOMEM;
849 		}
850 		memcpy(*data_buff, data->d_buf, data->d_size);
851 	}
852 
853 	pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
854 	return 0;
855 }
856 
bpf_object__init_global_data_maps(struct bpf_object * obj)857 static int bpf_object__init_global_data_maps(struct bpf_object *obj)
858 {
859 	int err;
860 
861 	if (!obj->caps.global_data)
862 		return 0;
863 	/*
864 	 * Populate obj->maps with libbpf internal maps.
865 	 */
866 	if (obj->efile.data_shndx >= 0) {
867 		err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
868 						    obj->efile.data_shndx,
869 						    obj->efile.data,
870 						    &obj->sections.data);
871 		if (err)
872 			return err;
873 	}
874 	if (obj->efile.rodata_shndx >= 0) {
875 		err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
876 						    obj->efile.rodata_shndx,
877 						    obj->efile.rodata,
878 						    &obj->sections.rodata);
879 		if (err)
880 			return err;
881 	}
882 	if (obj->efile.bss_shndx >= 0) {
883 		err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
884 						    obj->efile.bss_shndx,
885 						    obj->efile.bss, NULL);
886 		if (err)
887 			return err;
888 	}
889 	return 0;
890 }
891 
bpf_object__init_user_maps(struct bpf_object * obj,bool strict)892 static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
893 {
894 	Elf_Data *symbols = obj->efile.symbols;
895 	int i, map_def_sz = 0, nr_maps = 0, nr_syms;
896 	Elf_Data *data = NULL;
897 	Elf_Scn *scn;
898 
899 	if (obj->efile.maps_shndx < 0)
900 		return 0;
901 
902 	if (!symbols)
903 		return -EINVAL;
904 
905 	scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
906 	if (scn)
907 		data = elf_getdata(scn, NULL);
908 	if (!scn || !data) {
909 		pr_warning("failed to get Elf_Data from map section %d\n",
910 			   obj->efile.maps_shndx);
911 		return -EINVAL;
912 	}
913 
914 	/*
915 	 * Count number of maps. Each map has a name.
916 	 * Array of maps is not supported: only the first element is
917 	 * considered.
918 	 *
919 	 * TODO: Detect array of map and report error.
920 	 */
921 	nr_syms = symbols->d_size / sizeof(GElf_Sym);
922 	for (i = 0; i < nr_syms; i++) {
923 		GElf_Sym sym;
924 
925 		if (!gelf_getsym(symbols, i, &sym))
926 			continue;
927 		if (sym.st_shndx != obj->efile.maps_shndx)
928 			continue;
929 		nr_maps++;
930 	}
931 	/* Assume equally sized map definitions */
932 	pr_debug("maps in %s: %d maps in %zd bytes\n",
933 		 obj->path, nr_maps, data->d_size);
934 
935 	map_def_sz = data->d_size / nr_maps;
936 	if (!data->d_size || (data->d_size % nr_maps) != 0) {
937 		pr_warning("unable to determine map definition size "
938 			   "section %s, %d maps in %zd bytes\n",
939 			   obj->path, nr_maps, data->d_size);
940 		return -EINVAL;
941 	}
942 
943 	/* Fill obj->maps using data in "maps" section.  */
944 	for (i = 0; i < nr_syms; i++) {
945 		GElf_Sym sym;
946 		const char *map_name;
947 		struct bpf_map_def *def;
948 		struct bpf_map *map;
949 
950 		if (!gelf_getsym(symbols, i, &sym))
951 			continue;
952 		if (sym.st_shndx != obj->efile.maps_shndx)
953 			continue;
954 
955 		map = bpf_object__add_map(obj);
956 		if (IS_ERR(map))
957 			return PTR_ERR(map);
958 
959 		map_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
960 				      sym.st_name);
961 		if (!map_name) {
962 			pr_warning("failed to get map #%d name sym string for obj %s\n",
963 				   i, obj->path);
964 			return -LIBBPF_ERRNO__FORMAT;
965 		}
966 
967 		map->libbpf_type = LIBBPF_MAP_UNSPEC;
968 		map->sec_idx = sym.st_shndx;
969 		map->sec_offset = sym.st_value;
970 		pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
971 			 map_name, map->sec_idx, map->sec_offset);
972 		if (sym.st_value + map_def_sz > data->d_size) {
973 			pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
974 				   obj->path, map_name);
975 			return -EINVAL;
976 		}
977 
978 		map->name = strdup(map_name);
979 		if (!map->name) {
980 			pr_warning("failed to alloc map name\n");
981 			return -ENOMEM;
982 		}
983 		pr_debug("map %d is \"%s\"\n", i, map->name);
984 		def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
985 		/*
986 		 * If the definition of the map in the object file fits in
987 		 * bpf_map_def, copy it.  Any extra fields in our version
988 		 * of bpf_map_def will default to zero as a result of the
989 		 * calloc above.
990 		 */
991 		if (map_def_sz <= sizeof(struct bpf_map_def)) {
992 			memcpy(&map->def, def, map_def_sz);
993 		} else {
994 			/*
995 			 * Here the map structure being read is bigger than what
996 			 * we expect, truncate if the excess bits are all zero.
997 			 * If they are not zero, reject this map as
998 			 * incompatible.
999 			 */
1000 			char *b;
1001 			for (b = ((char *)def) + sizeof(struct bpf_map_def);
1002 			     b < ((char *)def) + map_def_sz; b++) {
1003 				if (*b != 0) {
1004 					pr_warning("maps section in %s: \"%s\" "
1005 						   "has unrecognized, non-zero "
1006 						   "options\n",
1007 						   obj->path, map_name);
1008 					if (strict)
1009 						return -EINVAL;
1010 				}
1011 			}
1012 			memcpy(&map->def, def, sizeof(struct bpf_map_def));
1013 		}
1014 	}
1015 	return 0;
1016 }
1017 
1018 static const struct btf_type *
skip_mods_and_typedefs(const struct btf * btf,__u32 id,__u32 * res_id)1019 skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
1020 {
1021 	const struct btf_type *t = btf__type_by_id(btf, id);
1022 
1023 	if (res_id)
1024 		*res_id = id;
1025 
1026 	while (btf_is_mod(t) || btf_is_typedef(t)) {
1027 		if (res_id)
1028 			*res_id = t->type;
1029 		t = btf__type_by_id(btf, t->type);
1030 	}
1031 
1032 	return t;
1033 }
1034 
1035 /*
1036  * Fetch integer attribute of BTF map definition. Such attributes are
1037  * represented using a pointer to an array, in which dimensionality of array
1038  * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
1039  * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
1040  * type definition, while using only sizeof(void *) space in ELF data section.
1041  */
get_map_field_int(const char * map_name,const struct btf * btf,const struct btf_type * def,const struct btf_member * m,__u32 * res)1042 static bool get_map_field_int(const char *map_name, const struct btf *btf,
1043 			      const struct btf_type *def,
1044 			      const struct btf_member *m, __u32 *res) {
1045 	const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
1046 	const char *name = btf__name_by_offset(btf, m->name_off);
1047 	const struct btf_array *arr_info;
1048 	const struct btf_type *arr_t;
1049 
1050 	if (!btf_is_ptr(t)) {
1051 		pr_warning("map '%s': attr '%s': expected PTR, got %u.\n",
1052 			   map_name, name, btf_kind(t));
1053 		return false;
1054 	}
1055 
1056 	arr_t = btf__type_by_id(btf, t->type);
1057 	if (!arr_t) {
1058 		pr_warning("map '%s': attr '%s': type [%u] not found.\n",
1059 			   map_name, name, t->type);
1060 		return false;
1061 	}
1062 	if (!btf_is_array(arr_t)) {
1063 		pr_warning("map '%s': attr '%s': expected ARRAY, got %u.\n",
1064 			   map_name, name, btf_kind(arr_t));
1065 		return false;
1066 	}
1067 	arr_info = btf_array(arr_t);
1068 	*res = arr_info->nelems;
1069 	return true;
1070 }
1071 
bpf_object__init_user_btf_map(struct bpf_object * obj,const struct btf_type * sec,int var_idx,int sec_idx,const Elf_Data * data,bool strict)1072 static int bpf_object__init_user_btf_map(struct bpf_object *obj,
1073 					 const struct btf_type *sec,
1074 					 int var_idx, int sec_idx,
1075 					 const Elf_Data *data, bool strict)
1076 {
1077 	const struct btf_type *var, *def, *t;
1078 	const struct btf_var_secinfo *vi;
1079 	const struct btf_var *var_extra;
1080 	const struct btf_member *m;
1081 	const char *map_name;
1082 	struct bpf_map *map;
1083 	int vlen, i;
1084 
1085 	vi = btf_var_secinfos(sec) + var_idx;
1086 	var = btf__type_by_id(obj->btf, vi->type);
1087 	var_extra = btf_var(var);
1088 	map_name = btf__name_by_offset(obj->btf, var->name_off);
1089 	vlen = btf_vlen(var);
1090 
1091 	if (map_name == NULL || map_name[0] == '\0') {
1092 		pr_warning("map #%d: empty name.\n", var_idx);
1093 		return -EINVAL;
1094 	}
1095 	if ((__u64)vi->offset + vi->size > data->d_size) {
1096 		pr_warning("map '%s' BTF data is corrupted.\n", map_name);
1097 		return -EINVAL;
1098 	}
1099 	if (!btf_is_var(var)) {
1100 		pr_warning("map '%s': unexpected var kind %u.\n",
1101 			   map_name, btf_kind(var));
1102 		return -EINVAL;
1103 	}
1104 	if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
1105 	    var_extra->linkage != BTF_VAR_STATIC) {
1106 		pr_warning("map '%s': unsupported var linkage %u.\n",
1107 			   map_name, var_extra->linkage);
1108 		return -EOPNOTSUPP;
1109 	}
1110 
1111 	def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
1112 	if (!btf_is_struct(def)) {
1113 		pr_warning("map '%s': unexpected def kind %u.\n",
1114 			   map_name, btf_kind(var));
1115 		return -EINVAL;
1116 	}
1117 	if (def->size > vi->size) {
1118 		pr_warning("map '%s': invalid def size.\n", map_name);
1119 		return -EINVAL;
1120 	}
1121 
1122 	map = bpf_object__add_map(obj);
1123 	if (IS_ERR(map))
1124 		return PTR_ERR(map);
1125 	map->name = strdup(map_name);
1126 	if (!map->name) {
1127 		pr_warning("map '%s': failed to alloc map name.\n", map_name);
1128 		return -ENOMEM;
1129 	}
1130 	map->libbpf_type = LIBBPF_MAP_UNSPEC;
1131 	map->def.type = BPF_MAP_TYPE_UNSPEC;
1132 	map->sec_idx = sec_idx;
1133 	map->sec_offset = vi->offset;
1134 	pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
1135 		 map_name, map->sec_idx, map->sec_offset);
1136 
1137 	vlen = btf_vlen(def);
1138 	m = btf_members(def);
1139 	for (i = 0; i < vlen; i++, m++) {
1140 		const char *name = btf__name_by_offset(obj->btf, m->name_off);
1141 
1142 		if (!name) {
1143 			pr_warning("map '%s': invalid field #%d.\n",
1144 				   map_name, i);
1145 			return -EINVAL;
1146 		}
1147 		if (strcmp(name, "type") == 0) {
1148 			if (!get_map_field_int(map_name, obj->btf, def, m,
1149 					       &map->def.type))
1150 				return -EINVAL;
1151 			pr_debug("map '%s': found type = %u.\n",
1152 				 map_name, map->def.type);
1153 		} else if (strcmp(name, "max_entries") == 0) {
1154 			if (!get_map_field_int(map_name, obj->btf, def, m,
1155 					       &map->def.max_entries))
1156 				return -EINVAL;
1157 			pr_debug("map '%s': found max_entries = %u.\n",
1158 				 map_name, map->def.max_entries);
1159 		} else if (strcmp(name, "map_flags") == 0) {
1160 			if (!get_map_field_int(map_name, obj->btf, def, m,
1161 					       &map->def.map_flags))
1162 				return -EINVAL;
1163 			pr_debug("map '%s': found map_flags = %u.\n",
1164 				 map_name, map->def.map_flags);
1165 		} else if (strcmp(name, "key_size") == 0) {
1166 			__u32 sz;
1167 
1168 			if (!get_map_field_int(map_name, obj->btf, def, m,
1169 					       &sz))
1170 				return -EINVAL;
1171 			pr_debug("map '%s': found key_size = %u.\n",
1172 				 map_name, sz);
1173 			if (map->def.key_size && map->def.key_size != sz) {
1174 				pr_warning("map '%s': conflicting key size %u != %u.\n",
1175 					   map_name, map->def.key_size, sz);
1176 				return -EINVAL;
1177 			}
1178 			map->def.key_size = sz;
1179 		} else if (strcmp(name, "key") == 0) {
1180 			__s64 sz;
1181 
1182 			t = btf__type_by_id(obj->btf, m->type);
1183 			if (!t) {
1184 				pr_warning("map '%s': key type [%d] not found.\n",
1185 					   map_name, m->type);
1186 				return -EINVAL;
1187 			}
1188 			if (!btf_is_ptr(t)) {
1189 				pr_warning("map '%s': key spec is not PTR: %u.\n",
1190 					   map_name, btf_kind(t));
1191 				return -EINVAL;
1192 			}
1193 			sz = btf__resolve_size(obj->btf, t->type);
1194 			if (sz < 0) {
1195 				pr_warning("map '%s': can't determine key size for type [%u]: %lld.\n",
1196 					   map_name, t->type, sz);
1197 				return sz;
1198 			}
1199 			pr_debug("map '%s': found key [%u], sz = %lld.\n",
1200 				 map_name, t->type, sz);
1201 			if (map->def.key_size && map->def.key_size != sz) {
1202 				pr_warning("map '%s': conflicting key size %u != %lld.\n",
1203 					   map_name, map->def.key_size, sz);
1204 				return -EINVAL;
1205 			}
1206 			map->def.key_size = sz;
1207 			map->btf_key_type_id = t->type;
1208 		} else if (strcmp(name, "value_size") == 0) {
1209 			__u32 sz;
1210 
1211 			if (!get_map_field_int(map_name, obj->btf, def, m,
1212 					       &sz))
1213 				return -EINVAL;
1214 			pr_debug("map '%s': found value_size = %u.\n",
1215 				 map_name, sz);
1216 			if (map->def.value_size && map->def.value_size != sz) {
1217 				pr_warning("map '%s': conflicting value size %u != %u.\n",
1218 					   map_name, map->def.value_size, sz);
1219 				return -EINVAL;
1220 			}
1221 			map->def.value_size = sz;
1222 		} else if (strcmp(name, "value") == 0) {
1223 			__s64 sz;
1224 
1225 			t = btf__type_by_id(obj->btf, m->type);
1226 			if (!t) {
1227 				pr_warning("map '%s': value type [%d] not found.\n",
1228 					   map_name, m->type);
1229 				return -EINVAL;
1230 			}
1231 			if (!btf_is_ptr(t)) {
1232 				pr_warning("map '%s': value spec is not PTR: %u.\n",
1233 					   map_name, btf_kind(t));
1234 				return -EINVAL;
1235 			}
1236 			sz = btf__resolve_size(obj->btf, t->type);
1237 			if (sz < 0) {
1238 				pr_warning("map '%s': can't determine value size for type [%u]: %lld.\n",
1239 					   map_name, t->type, sz);
1240 				return sz;
1241 			}
1242 			pr_debug("map '%s': found value [%u], sz = %lld.\n",
1243 				 map_name, t->type, sz);
1244 			if (map->def.value_size && map->def.value_size != sz) {
1245 				pr_warning("map '%s': conflicting value size %u != %lld.\n",
1246 					   map_name, map->def.value_size, sz);
1247 				return -EINVAL;
1248 			}
1249 			map->def.value_size = sz;
1250 			map->btf_value_type_id = t->type;
1251 		} else {
1252 			if (strict) {
1253 				pr_warning("map '%s': unknown field '%s'.\n",
1254 					   map_name, name);
1255 				return -ENOTSUP;
1256 			}
1257 			pr_debug("map '%s': ignoring unknown field '%s'.\n",
1258 				 map_name, name);
1259 		}
1260 	}
1261 
1262 	if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
1263 		pr_warning("map '%s': map type isn't specified.\n", map_name);
1264 		return -EINVAL;
1265 	}
1266 
1267 	return 0;
1268 }
1269 
bpf_object__init_user_btf_maps(struct bpf_object * obj,bool strict)1270 static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict)
1271 {
1272 	const struct btf_type *sec = NULL;
1273 	int nr_types, i, vlen, err;
1274 	const struct btf_type *t;
1275 	const char *name;
1276 	Elf_Data *data;
1277 	Elf_Scn *scn;
1278 
1279 	if (obj->efile.btf_maps_shndx < 0)
1280 		return 0;
1281 
1282 	scn = elf_getscn(obj->efile.elf, obj->efile.btf_maps_shndx);
1283 	if (scn)
1284 		data = elf_getdata(scn, NULL);
1285 	if (!scn || !data) {
1286 		pr_warning("failed to get Elf_Data from map section %d (%s)\n",
1287 			   obj->efile.maps_shndx, MAPS_ELF_SEC);
1288 		return -EINVAL;
1289 	}
1290 
1291 	nr_types = btf__get_nr_types(obj->btf);
1292 	for (i = 1; i <= nr_types; i++) {
1293 		t = btf__type_by_id(obj->btf, i);
1294 		if (!btf_is_datasec(t))
1295 			continue;
1296 		name = btf__name_by_offset(obj->btf, t->name_off);
1297 		if (strcmp(name, MAPS_ELF_SEC) == 0) {
1298 			sec = t;
1299 			break;
1300 		}
1301 	}
1302 
1303 	if (!sec) {
1304 		pr_warning("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
1305 		return -ENOENT;
1306 	}
1307 
1308 	vlen = btf_vlen(sec);
1309 	for (i = 0; i < vlen; i++) {
1310 		err = bpf_object__init_user_btf_map(obj, sec, i,
1311 						    obj->efile.btf_maps_shndx,
1312 						    data, strict);
1313 		if (err)
1314 			return err;
1315 	}
1316 
1317 	return 0;
1318 }
1319 
bpf_object__init_maps(struct bpf_object * obj,int flags)1320 static int bpf_object__init_maps(struct bpf_object *obj, int flags)
1321 {
1322 	bool strict = !(flags & MAPS_RELAX_COMPAT);
1323 	int err;
1324 
1325 	err = bpf_object__init_user_maps(obj, strict);
1326 	if (err)
1327 		return err;
1328 
1329 	err = bpf_object__init_user_btf_maps(obj, strict);
1330 	if (err)
1331 		return err;
1332 
1333 	err = bpf_object__init_global_data_maps(obj);
1334 	if (err)
1335 		return err;
1336 
1337 	if (obj->nr_maps) {
1338 		qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]),
1339 		      compare_bpf_map);
1340 	}
1341 	return 0;
1342 }
1343 
section_have_execinstr(struct bpf_object * obj,int idx)1344 static bool section_have_execinstr(struct bpf_object *obj, int idx)
1345 {
1346 	Elf_Scn *scn;
1347 	GElf_Shdr sh;
1348 
1349 	scn = elf_getscn(obj->efile.elf, idx);
1350 	if (!scn)
1351 		return false;
1352 
1353 	if (gelf_getshdr(scn, &sh) != &sh)
1354 		return false;
1355 
1356 	if (sh.sh_flags & SHF_EXECINSTR)
1357 		return true;
1358 
1359 	return false;
1360 }
1361 
bpf_object__sanitize_btf(struct bpf_object * obj)1362 static void bpf_object__sanitize_btf(struct bpf_object *obj)
1363 {
1364 	bool has_datasec = obj->caps.btf_datasec;
1365 	bool has_func = obj->caps.btf_func;
1366 	struct btf *btf = obj->btf;
1367 	struct btf_type *t;
1368 	int i, j, vlen;
1369 
1370 	if (!obj->btf || (has_func && has_datasec))
1371 		return;
1372 
1373 	for (i = 1; i <= btf__get_nr_types(btf); i++) {
1374 		t = (struct btf_type *)btf__type_by_id(btf, i);
1375 
1376 		if (!has_datasec && btf_is_var(t)) {
1377 			/* replace VAR with INT */
1378 			t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
1379 			/*
1380 			 * using size = 1 is the safest choice, 4 will be too
1381 			 * big and cause kernel BTF validation failure if
1382 			 * original variable took less than 4 bytes
1383 			 */
1384 			t->size = 1;
1385 			*(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
1386 		} else if (!has_datasec && btf_is_datasec(t)) {
1387 			/* replace DATASEC with STRUCT */
1388 			const struct btf_var_secinfo *v = btf_var_secinfos(t);
1389 			struct btf_member *m = btf_members(t);
1390 			struct btf_type *vt;
1391 			char *name;
1392 
1393 			name = (char *)btf__name_by_offset(btf, t->name_off);
1394 			while (*name) {
1395 				if (*name == '.')
1396 					*name = '_';
1397 				name++;
1398 			}
1399 
1400 			vlen = btf_vlen(t);
1401 			t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
1402 			for (j = 0; j < vlen; j++, v++, m++) {
1403 				/* order of field assignments is important */
1404 				m->offset = v->offset * 8;
1405 				m->type = v->type;
1406 				/* preserve variable name as member name */
1407 				vt = (void *)btf__type_by_id(btf, v->type);
1408 				m->name_off = vt->name_off;
1409 			}
1410 		} else if (!has_func && btf_is_func_proto(t)) {
1411 			/* replace FUNC_PROTO with ENUM */
1412 			vlen = btf_vlen(t);
1413 			t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
1414 			t->size = sizeof(__u32); /* kernel enforced */
1415 		} else if (!has_func && btf_is_func(t)) {
1416 			/* replace FUNC with TYPEDEF */
1417 			t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
1418 		}
1419 	}
1420 }
1421 
bpf_object__sanitize_btf_ext(struct bpf_object * obj)1422 static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
1423 {
1424 	if (!obj->btf_ext)
1425 		return;
1426 
1427 	if (!obj->caps.btf_func) {
1428 		btf_ext__free(obj->btf_ext);
1429 		obj->btf_ext = NULL;
1430 	}
1431 }
1432 
bpf_object__is_btf_mandatory(const struct bpf_object * obj)1433 static bool bpf_object__is_btf_mandatory(const struct bpf_object *obj)
1434 {
1435 	return obj->efile.btf_maps_shndx >= 0;
1436 }
1437 
bpf_object__init_btf(struct bpf_object * obj,Elf_Data * btf_data,Elf_Data * btf_ext_data)1438 static int bpf_object__init_btf(struct bpf_object *obj,
1439 				Elf_Data *btf_data,
1440 				Elf_Data *btf_ext_data)
1441 {
1442 	bool btf_required = bpf_object__is_btf_mandatory(obj);
1443 	int err = 0;
1444 
1445 	if (btf_data) {
1446 		obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
1447 		if (IS_ERR(obj->btf)) {
1448 			pr_warning("Error loading ELF section %s: %d.\n",
1449 				   BTF_ELF_SEC, err);
1450 			goto out;
1451 		}
1452 		err = btf__finalize_data(obj, obj->btf);
1453 		if (err) {
1454 			pr_warning("Error finalizing %s: %d.\n",
1455 				   BTF_ELF_SEC, err);
1456 			goto out;
1457 		}
1458 	}
1459 	if (btf_ext_data) {
1460 		if (!obj->btf) {
1461 			pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
1462 				 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
1463 			goto out;
1464 		}
1465 		obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
1466 					    btf_ext_data->d_size);
1467 		if (IS_ERR(obj->btf_ext)) {
1468 			pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
1469 				   BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
1470 			obj->btf_ext = NULL;
1471 			goto out;
1472 		}
1473 	}
1474 out:
1475 	if (err || IS_ERR(obj->btf)) {
1476 		if (btf_required)
1477 			err = err ? : PTR_ERR(obj->btf);
1478 		else
1479 			err = 0;
1480 		if (!IS_ERR_OR_NULL(obj->btf))
1481 			btf__free(obj->btf);
1482 		obj->btf = NULL;
1483 	}
1484 	if (btf_required && !obj->btf) {
1485 		pr_warning("BTF is required, but is missing or corrupted.\n");
1486 		return err == 0 ? -ENOENT : err;
1487 	}
1488 	return 0;
1489 }
1490 
bpf_object__sanitize_and_load_btf(struct bpf_object * obj)1491 static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
1492 {
1493 	int err = 0;
1494 
1495 	if (!obj->btf)
1496 		return 0;
1497 
1498 	bpf_object__sanitize_btf(obj);
1499 	bpf_object__sanitize_btf_ext(obj);
1500 
1501 	err = btf__load(obj->btf);
1502 	if (err) {
1503 		pr_warning("Error loading %s into kernel: %d.\n",
1504 			   BTF_ELF_SEC, err);
1505 		btf__free(obj->btf);
1506 		obj->btf = NULL;
1507 		/* btf_ext can't exist without btf, so free it as well */
1508 		if (obj->btf_ext) {
1509 			btf_ext__free(obj->btf_ext);
1510 			obj->btf_ext = NULL;
1511 		}
1512 
1513 		if (bpf_object__is_btf_mandatory(obj))
1514 			return err;
1515 	}
1516 	return 0;
1517 }
1518 
bpf_object__elf_collect(struct bpf_object * obj,int flags)1519 static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
1520 {
1521 	Elf *elf = obj->efile.elf;
1522 	GElf_Ehdr *ep = &obj->efile.ehdr;
1523 	Elf_Data *btf_ext_data = NULL;
1524 	Elf_Data *btf_data = NULL;
1525 	Elf_Scn *scn = NULL;
1526 	int idx = 0, err = 0;
1527 
1528 	/* Elf is corrupted/truncated, avoid calling elf_strptr. */
1529 	if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
1530 		pr_warning("failed to get e_shstrndx from %s\n", obj->path);
1531 		return -LIBBPF_ERRNO__FORMAT;
1532 	}
1533 
1534 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
1535 		char *name;
1536 		GElf_Shdr sh;
1537 		Elf_Data *data;
1538 
1539 		idx++;
1540 		if (gelf_getshdr(scn, &sh) != &sh) {
1541 			pr_warning("failed to get section(%d) header from %s\n",
1542 				   idx, obj->path);
1543 			return -LIBBPF_ERRNO__FORMAT;
1544 		}
1545 
1546 		name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
1547 		if (!name) {
1548 			pr_warning("failed to get section(%d) name from %s\n",
1549 				   idx, obj->path);
1550 			return -LIBBPF_ERRNO__FORMAT;
1551 		}
1552 
1553 		data = elf_getdata(scn, 0);
1554 		if (!data) {
1555 			pr_warning("failed to get section(%d) data from %s(%s)\n",
1556 				   idx, name, obj->path);
1557 			return -LIBBPF_ERRNO__FORMAT;
1558 		}
1559 		pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
1560 			 idx, name, (unsigned long)data->d_size,
1561 			 (int)sh.sh_link, (unsigned long)sh.sh_flags,
1562 			 (int)sh.sh_type);
1563 
1564 		if (strcmp(name, "license") == 0) {
1565 			err = bpf_object__init_license(obj,
1566 						       data->d_buf,
1567 						       data->d_size);
1568 			if (err)
1569 				return err;
1570 		} else if (strcmp(name, "version") == 0) {
1571 			err = bpf_object__init_kversion(obj,
1572 							data->d_buf,
1573 							data->d_size);
1574 			if (err)
1575 				return err;
1576 		} else if (strcmp(name, "maps") == 0) {
1577 			obj->efile.maps_shndx = idx;
1578 		} else if (strcmp(name, MAPS_ELF_SEC) == 0) {
1579 			obj->efile.btf_maps_shndx = idx;
1580 		} else if (strcmp(name, BTF_ELF_SEC) == 0) {
1581 			btf_data = data;
1582 		} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
1583 			btf_ext_data = data;
1584 		} else if (sh.sh_type == SHT_SYMTAB) {
1585 			if (obj->efile.symbols) {
1586 				pr_warning("bpf: multiple SYMTAB in %s\n",
1587 					   obj->path);
1588 				return -LIBBPF_ERRNO__FORMAT;
1589 			}
1590 			obj->efile.symbols = data;
1591 			obj->efile.strtabidx = sh.sh_link;
1592 		} else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
1593 			if (sh.sh_flags & SHF_EXECINSTR) {
1594 				if (strcmp(name, ".text") == 0)
1595 					obj->efile.text_shndx = idx;
1596 				err = bpf_object__add_program(obj, data->d_buf,
1597 							      data->d_size, name, idx);
1598 				if (err) {
1599 					char errmsg[STRERR_BUFSIZE];
1600 					char *cp = libbpf_strerror_r(-err, errmsg,
1601 								     sizeof(errmsg));
1602 
1603 					pr_warning("failed to alloc program %s (%s): %s",
1604 						   name, obj->path, cp);
1605 					return err;
1606 				}
1607 			} else if (strcmp(name, ".data") == 0) {
1608 				obj->efile.data = data;
1609 				obj->efile.data_shndx = idx;
1610 			} else if (strcmp(name, ".rodata") == 0) {
1611 				obj->efile.rodata = data;
1612 				obj->efile.rodata_shndx = idx;
1613 			} else {
1614 				pr_debug("skip section(%d) %s\n", idx, name);
1615 			}
1616 		} else if (sh.sh_type == SHT_REL) {
1617 			int nr_reloc = obj->efile.nr_reloc;
1618 			void *reloc = obj->efile.reloc;
1619 			int sec = sh.sh_info; /* points to other section */
1620 
1621 			/* Only do relo for section with exec instructions */
1622 			if (!section_have_execinstr(obj, sec)) {
1623 				pr_debug("skip relo %s(%d) for section(%d)\n",
1624 					 name, idx, sec);
1625 				continue;
1626 			}
1627 
1628 			reloc = reallocarray(reloc, nr_reloc + 1,
1629 					     sizeof(*obj->efile.reloc));
1630 			if (!reloc) {
1631 				pr_warning("realloc failed\n");
1632 				return -ENOMEM;
1633 			}
1634 
1635 			obj->efile.reloc = reloc;
1636 			obj->efile.nr_reloc++;
1637 
1638 			obj->efile.reloc[nr_reloc].shdr = sh;
1639 			obj->efile.reloc[nr_reloc].data = data;
1640 		} else if (sh.sh_type == SHT_NOBITS && strcmp(name, ".bss") == 0) {
1641 			obj->efile.bss = data;
1642 			obj->efile.bss_shndx = idx;
1643 		} else {
1644 			pr_debug("skip section(%d) %s\n", idx, name);
1645 		}
1646 	}
1647 
1648 	if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
1649 		pr_warning("Corrupted ELF file: index of strtab invalid\n");
1650 		return -LIBBPF_ERRNO__FORMAT;
1651 	}
1652 	err = bpf_object__init_btf(obj, btf_data, btf_ext_data);
1653 	if (!err)
1654 		err = bpf_object__init_maps(obj, flags);
1655 	if (!err)
1656 		err = bpf_object__sanitize_and_load_btf(obj);
1657 	if (!err)
1658 		err = bpf_object__init_prog_names(obj);
1659 	return err;
1660 }
1661 
1662 static struct bpf_program *
bpf_object__find_prog_by_idx(struct bpf_object * obj,int idx)1663 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
1664 {
1665 	struct bpf_program *prog;
1666 	size_t i;
1667 
1668 	for (i = 0; i < obj->nr_programs; i++) {
1669 		prog = &obj->programs[i];
1670 		if (prog->idx == idx)
1671 			return prog;
1672 	}
1673 	return NULL;
1674 }
1675 
1676 struct bpf_program *
bpf_object__find_program_by_title(const struct bpf_object * obj,const char * title)1677 bpf_object__find_program_by_title(const struct bpf_object *obj,
1678 				  const char *title)
1679 {
1680 	struct bpf_program *pos;
1681 
1682 	bpf_object__for_each_program(pos, obj) {
1683 		if (pos->section_name && !strcmp(pos->section_name, title))
1684 			return pos;
1685 	}
1686 	return NULL;
1687 }
1688 
bpf_object__shndx_is_data(const struct bpf_object * obj,int shndx)1689 static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
1690 				      int shndx)
1691 {
1692 	return shndx == obj->efile.data_shndx ||
1693 	       shndx == obj->efile.bss_shndx ||
1694 	       shndx == obj->efile.rodata_shndx;
1695 }
1696 
bpf_object__shndx_is_maps(const struct bpf_object * obj,int shndx)1697 static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
1698 				      int shndx)
1699 {
1700 	return shndx == obj->efile.maps_shndx ||
1701 	       shndx == obj->efile.btf_maps_shndx;
1702 }
1703 
bpf_object__relo_in_known_section(const struct bpf_object * obj,int shndx)1704 static bool bpf_object__relo_in_known_section(const struct bpf_object *obj,
1705 					      int shndx)
1706 {
1707 	return shndx == obj->efile.text_shndx ||
1708 	       bpf_object__shndx_is_maps(obj, shndx) ||
1709 	       bpf_object__shndx_is_data(obj, shndx);
1710 }
1711 
1712 static enum libbpf_map_type
bpf_object__section_to_libbpf_map_type(const struct bpf_object * obj,int shndx)1713 bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
1714 {
1715 	if (shndx == obj->efile.data_shndx)
1716 		return LIBBPF_MAP_DATA;
1717 	else if (shndx == obj->efile.bss_shndx)
1718 		return LIBBPF_MAP_BSS;
1719 	else if (shndx == obj->efile.rodata_shndx)
1720 		return LIBBPF_MAP_RODATA;
1721 	else
1722 		return LIBBPF_MAP_UNSPEC;
1723 }
1724 
1725 static int
bpf_program__collect_reloc(struct bpf_program * prog,GElf_Shdr * shdr,Elf_Data * data,struct bpf_object * obj)1726 bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
1727 			   Elf_Data *data, struct bpf_object *obj)
1728 {
1729 	Elf_Data *symbols = obj->efile.symbols;
1730 	struct bpf_map *maps = obj->maps;
1731 	size_t nr_maps = obj->nr_maps;
1732 	int i, nrels;
1733 
1734 	pr_debug("collecting relocating info for: '%s'\n", prog->section_name);
1735 	nrels = shdr->sh_size / shdr->sh_entsize;
1736 
1737 	prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
1738 	if (!prog->reloc_desc) {
1739 		pr_warning("failed to alloc memory in relocation\n");
1740 		return -ENOMEM;
1741 	}
1742 	prog->nr_reloc = nrels;
1743 
1744 	for (i = 0; i < nrels; i++) {
1745 		struct bpf_insn *insns = prog->insns;
1746 		enum libbpf_map_type type;
1747 		unsigned int insn_idx;
1748 		unsigned int shdr_idx;
1749 		const char *name;
1750 		size_t map_idx;
1751 		GElf_Sym sym;
1752 		GElf_Rel rel;
1753 
1754 		if (!gelf_getrel(data, i, &rel)) {
1755 			pr_warning("relocation: failed to get %d reloc\n", i);
1756 			return -LIBBPF_ERRNO__FORMAT;
1757 		}
1758 
1759 		if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
1760 			pr_warning("relocation: symbol %"PRIx64" not found\n",
1761 				   GELF_R_SYM(rel.r_info));
1762 			return -LIBBPF_ERRNO__FORMAT;
1763 		}
1764 
1765 		name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
1766 				  sym.st_name) ? : "<?>";
1767 
1768 		pr_debug("relo for %lld value %lld name %d (\'%s\')\n",
1769 			 (long long) (rel.r_info >> 32),
1770 			 (long long) sym.st_value, sym.st_name, name);
1771 
1772 		shdr_idx = sym.st_shndx;
1773 		insn_idx = rel.r_offset / sizeof(struct bpf_insn);
1774 		pr_debug("relocation: insn_idx=%u, shdr_idx=%u\n",
1775 			 insn_idx, shdr_idx);
1776 
1777 		if (shdr_idx >= SHN_LORESERVE) {
1778 			pr_warning("relocation: not yet supported relo for non-static global \'%s\' variable in special section (0x%x) found in insns[%d].code 0x%x\n",
1779 				   name, shdr_idx, insn_idx,
1780 				   insns[insn_idx].code);
1781 			return -LIBBPF_ERRNO__RELOC;
1782 		}
1783 		if (!bpf_object__relo_in_known_section(obj, shdr_idx)) {
1784 			pr_warning("Program '%s' contains unrecognized relo data pointing to section %u\n",
1785 				   prog->section_name, shdr_idx);
1786 			return -LIBBPF_ERRNO__RELOC;
1787 		}
1788 
1789 		if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
1790 			if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
1791 				pr_warning("incorrect bpf_call opcode\n");
1792 				return -LIBBPF_ERRNO__RELOC;
1793 			}
1794 			prog->reloc_desc[i].type = RELO_CALL;
1795 			prog->reloc_desc[i].insn_idx = insn_idx;
1796 			prog->reloc_desc[i].text_off = sym.st_value;
1797 			obj->has_pseudo_calls = true;
1798 			continue;
1799 		}
1800 
1801 		if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
1802 			pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
1803 				   insn_idx, insns[insn_idx].code);
1804 			return -LIBBPF_ERRNO__RELOC;
1805 		}
1806 
1807 		if (bpf_object__shndx_is_maps(obj, shdr_idx) ||
1808 		    bpf_object__shndx_is_data(obj, shdr_idx)) {
1809 			type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
1810 			if (type != LIBBPF_MAP_UNSPEC) {
1811 				if (GELF_ST_BIND(sym.st_info) == STB_GLOBAL) {
1812 					pr_warning("bpf: relocation: not yet supported relo for non-static global \'%s\' variable found in insns[%d].code 0x%x\n",
1813 						   name, insn_idx, insns[insn_idx].code);
1814 					return -LIBBPF_ERRNO__RELOC;
1815 				}
1816 				if (!obj->caps.global_data) {
1817 					pr_warning("bpf: relocation: kernel does not support global \'%s\' variable access in insns[%d]\n",
1818 						   name, insn_idx);
1819 					return -LIBBPF_ERRNO__RELOC;
1820 				}
1821 			}
1822 
1823 			for (map_idx = 0; map_idx < nr_maps; map_idx++) {
1824 				if (maps[map_idx].libbpf_type != type)
1825 					continue;
1826 				if (type != LIBBPF_MAP_UNSPEC ||
1827 				    (maps[map_idx].sec_idx == sym.st_shndx &&
1828 				     maps[map_idx].sec_offset == sym.st_value)) {
1829 					pr_debug("relocation: found map %zd (%s, sec_idx %d, offset %zu) for insn %u\n",
1830 						 map_idx, maps[map_idx].name,
1831 						 maps[map_idx].sec_idx,
1832 						 maps[map_idx].sec_offset,
1833 						 insn_idx);
1834 					break;
1835 				}
1836 			}
1837 
1838 			if (map_idx >= nr_maps) {
1839 				pr_warning("bpf relocation: map_idx %d larger than %d\n",
1840 					   (int)map_idx, (int)nr_maps - 1);
1841 				return -LIBBPF_ERRNO__RELOC;
1842 			}
1843 
1844 			prog->reloc_desc[i].type = type != LIBBPF_MAP_UNSPEC ?
1845 						   RELO_DATA : RELO_LD64;
1846 			prog->reloc_desc[i].insn_idx = insn_idx;
1847 			prog->reloc_desc[i].map_idx = map_idx;
1848 		}
1849 	}
1850 	return 0;
1851 }
1852 
bpf_map_find_btf_info(struct bpf_object * obj,struct bpf_map * map)1853 static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
1854 {
1855 	struct bpf_map_def *def = &map->def;
1856 	__u32 key_type_id = 0, value_type_id = 0;
1857 	int ret;
1858 
1859 	/* if it's BTF-defined map, we don't need to search for type IDs */
1860 	if (map->sec_idx == obj->efile.btf_maps_shndx)
1861 		return 0;
1862 
1863 	if (!bpf_map__is_internal(map)) {
1864 		ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
1865 					   def->value_size, &key_type_id,
1866 					   &value_type_id);
1867 	} else {
1868 		/*
1869 		 * LLVM annotates global data differently in BTF, that is,
1870 		 * only as '.data', '.bss' or '.rodata'.
1871 		 */
1872 		ret = btf__find_by_name(obj->btf,
1873 				libbpf_type_to_btf_name[map->libbpf_type]);
1874 	}
1875 	if (ret < 0)
1876 		return ret;
1877 
1878 	map->btf_key_type_id = key_type_id;
1879 	map->btf_value_type_id = bpf_map__is_internal(map) ?
1880 				 ret : value_type_id;
1881 	return 0;
1882 }
1883 
bpf_map__reuse_fd(struct bpf_map * map,int fd)1884 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1885 {
1886 	struct bpf_map_info info = {};
1887 	__u32 len = sizeof(info), name_len;
1888 	int new_fd, err;
1889 	char *new_name;
1890 
1891 	err = bpf_obj_get_info_by_fd(fd, &info, &len);
1892 	if (err)
1893 		return err;
1894 
1895 	name_len = strlen(info.name);
1896 	if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0)
1897 		new_name = strdup(map->name);
1898 	else
1899 		new_name = strdup(info.name);
1900 
1901 	if (!new_name)
1902 		return -errno;
1903 
1904 	new_fd = open("/", O_RDONLY | O_CLOEXEC);
1905 	if (new_fd < 0) {
1906 		err = -errno;
1907 		goto err_free_new_name;
1908 	}
1909 
1910 	new_fd = dup3(fd, new_fd, O_CLOEXEC);
1911 	if (new_fd < 0) {
1912 		err = -errno;
1913 		goto err_close_new_fd;
1914 	}
1915 
1916 	err = zclose(map->fd);
1917 	if (err) {
1918 		err = -errno;
1919 		goto err_close_new_fd;
1920 	}
1921 	free(map->name);
1922 
1923 	map->fd = new_fd;
1924 	map->name = new_name;
1925 	map->def.type = info.type;
1926 	map->def.key_size = info.key_size;
1927 	map->def.value_size = info.value_size;
1928 	map->def.max_entries = info.max_entries;
1929 	map->def.map_flags = info.map_flags;
1930 	map->btf_key_type_id = info.btf_key_type_id;
1931 	map->btf_value_type_id = info.btf_value_type_id;
1932 
1933 	return 0;
1934 
1935 err_close_new_fd:
1936 	close(new_fd);
1937 err_free_new_name:
1938 	free(new_name);
1939 	return err;
1940 }
1941 
bpf_map__resize(struct bpf_map * map,__u32 max_entries)1942 int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
1943 {
1944 	if (!map || !max_entries)
1945 		return -EINVAL;
1946 
1947 	/* If map already created, its attributes can't be changed. */
1948 	if (map->fd >= 0)
1949 		return -EBUSY;
1950 
1951 	map->def.max_entries = max_entries;
1952 
1953 	return 0;
1954 }
1955 
1956 static int
bpf_object__probe_name(struct bpf_object * obj)1957 bpf_object__probe_name(struct bpf_object *obj)
1958 {
1959 	struct bpf_load_program_attr attr;
1960 	char *cp, errmsg[STRERR_BUFSIZE];
1961 	struct bpf_insn insns[] = {
1962 		BPF_MOV64_IMM(BPF_REG_0, 0),
1963 		BPF_EXIT_INSN(),
1964 	};
1965 	int ret;
1966 
1967 	/* make sure basic loading works */
1968 
1969 	memset(&attr, 0, sizeof(attr));
1970 	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
1971 	attr.insns = insns;
1972 	attr.insns_cnt = ARRAY_SIZE(insns);
1973 	attr.license = "GPL";
1974 
1975 	ret = bpf_load_program_xattr(&attr, NULL, 0);
1976 	if (ret < 0) {
1977 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1978 		pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
1979 			   __func__, cp, errno);
1980 		return -errno;
1981 	}
1982 	close(ret);
1983 
1984 	/* now try the same program, but with the name */
1985 
1986 	attr.name = "test";
1987 	ret = bpf_load_program_xattr(&attr, NULL, 0);
1988 	if (ret >= 0) {
1989 		obj->caps.name = 1;
1990 		close(ret);
1991 	}
1992 
1993 	return 0;
1994 }
1995 
1996 static int
bpf_object__probe_global_data(struct bpf_object * obj)1997 bpf_object__probe_global_data(struct bpf_object *obj)
1998 {
1999 	struct bpf_load_program_attr prg_attr;
2000 	struct bpf_create_map_attr map_attr;
2001 	char *cp, errmsg[STRERR_BUFSIZE];
2002 	struct bpf_insn insns[] = {
2003 		BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
2004 		BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
2005 		BPF_MOV64_IMM(BPF_REG_0, 0),
2006 		BPF_EXIT_INSN(),
2007 	};
2008 	int ret, map;
2009 
2010 	memset(&map_attr, 0, sizeof(map_attr));
2011 	map_attr.map_type = BPF_MAP_TYPE_ARRAY;
2012 	map_attr.key_size = sizeof(int);
2013 	map_attr.value_size = 32;
2014 	map_attr.max_entries = 1;
2015 
2016 	map = bpf_create_map_xattr(&map_attr);
2017 	if (map < 0) {
2018 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2019 		pr_warning("Error in %s():%s(%d). Couldn't create simple array map.\n",
2020 			   __func__, cp, errno);
2021 		return -errno;
2022 	}
2023 
2024 	insns[0].imm = map;
2025 
2026 	memset(&prg_attr, 0, sizeof(prg_attr));
2027 	prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
2028 	prg_attr.insns = insns;
2029 	prg_attr.insns_cnt = ARRAY_SIZE(insns);
2030 	prg_attr.license = "GPL";
2031 
2032 	ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
2033 	if (ret >= 0) {
2034 		obj->caps.global_data = 1;
2035 		close(ret);
2036 	}
2037 
2038 	close(map);
2039 	return 0;
2040 }
2041 
bpf_object__probe_btf_func(struct bpf_object * obj)2042 static int bpf_object__probe_btf_func(struct bpf_object *obj)
2043 {
2044 	const char strs[] = "\0int\0x\0a";
2045 	/* void x(int a) {} */
2046 	__u32 types[] = {
2047 		/* int */
2048 		BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
2049 		/* FUNC_PROTO */                                /* [2] */
2050 		BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
2051 		BTF_PARAM_ENC(7, 1),
2052 		/* FUNC x */                                    /* [3] */
2053 		BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
2054 	};
2055 	int btf_fd;
2056 
2057 	btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
2058 				      strs, sizeof(strs));
2059 	if (btf_fd >= 0) {
2060 		obj->caps.btf_func = 1;
2061 		close(btf_fd);
2062 		return 1;
2063 	}
2064 
2065 	return 0;
2066 }
2067 
bpf_object__probe_btf_datasec(struct bpf_object * obj)2068 static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
2069 {
2070 	const char strs[] = "\0x\0.data";
2071 	/* static int a; */
2072 	__u32 types[] = {
2073 		/* int */
2074 		BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
2075 		/* VAR x */                                     /* [2] */
2076 		BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
2077 		BTF_VAR_STATIC,
2078 		/* DATASEC val */                               /* [3] */
2079 		BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
2080 		BTF_VAR_SECINFO_ENC(2, 0, 4),
2081 	};
2082 	int btf_fd;
2083 
2084 	btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
2085 				      strs, sizeof(strs));
2086 	if (btf_fd >= 0) {
2087 		obj->caps.btf_datasec = 1;
2088 		close(btf_fd);
2089 		return 1;
2090 	}
2091 
2092 	return 0;
2093 }
2094 
2095 static int
bpf_object__probe_caps(struct bpf_object * obj)2096 bpf_object__probe_caps(struct bpf_object *obj)
2097 {
2098 	int (*probe_fn[])(struct bpf_object *obj) = {
2099 		bpf_object__probe_name,
2100 		bpf_object__probe_global_data,
2101 		bpf_object__probe_btf_func,
2102 		bpf_object__probe_btf_datasec,
2103 	};
2104 	int i, ret;
2105 
2106 	for (i = 0; i < ARRAY_SIZE(probe_fn); i++) {
2107 		ret = probe_fn[i](obj);
2108 		if (ret < 0)
2109 			pr_debug("Probe #%d failed with %d.\n", i, ret);
2110 	}
2111 
2112 	return 0;
2113 }
2114 
2115 static int
bpf_object__populate_internal_map(struct bpf_object * obj,struct bpf_map * map)2116 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
2117 {
2118 	char *cp, errmsg[STRERR_BUFSIZE];
2119 	int err, zero = 0;
2120 	__u8 *data;
2121 
2122 	/* Nothing to do here since kernel already zero-initializes .bss map. */
2123 	if (map->libbpf_type == LIBBPF_MAP_BSS)
2124 		return 0;
2125 
2126 	data = map->libbpf_type == LIBBPF_MAP_DATA ?
2127 	       obj->sections.data : obj->sections.rodata;
2128 
2129 	err = bpf_map_update_elem(map->fd, &zero, data, 0);
2130 	/* Freeze .rodata map as read-only from syscall side. */
2131 	if (!err && map->libbpf_type == LIBBPF_MAP_RODATA) {
2132 		err = bpf_map_freeze(map->fd);
2133 		if (err) {
2134 			cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2135 			pr_warning("Error freezing map(%s) as read-only: %s\n",
2136 				   map->name, cp);
2137 			err = 0;
2138 		}
2139 	}
2140 	return err;
2141 }
2142 
2143 static int
bpf_object__create_maps(struct bpf_object * obj)2144 bpf_object__create_maps(struct bpf_object *obj)
2145 {
2146 	struct bpf_create_map_attr create_attr = {};
2147 	int nr_cpus = 0;
2148 	unsigned int i;
2149 	int err;
2150 
2151 	for (i = 0; i < obj->nr_maps; i++) {
2152 		struct bpf_map *map = &obj->maps[i];
2153 		struct bpf_map_def *def = &map->def;
2154 		char *cp, errmsg[STRERR_BUFSIZE];
2155 		int *pfd = &map->fd;
2156 
2157 		if (map->fd >= 0) {
2158 			pr_debug("skip map create (preset) %s: fd=%d\n",
2159 				 map->name, map->fd);
2160 			continue;
2161 		}
2162 
2163 		if (obj->caps.name)
2164 			create_attr.name = map->name;
2165 		create_attr.map_ifindex = map->map_ifindex;
2166 		create_attr.map_type = def->type;
2167 		create_attr.map_flags = def->map_flags;
2168 		create_attr.key_size = def->key_size;
2169 		create_attr.value_size = def->value_size;
2170 		if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
2171 		    !def->max_entries) {
2172 			if (!nr_cpus)
2173 				nr_cpus = libbpf_num_possible_cpus();
2174 			if (nr_cpus < 0) {
2175 				pr_warning("failed to determine number of system CPUs: %d\n",
2176 					   nr_cpus);
2177 				err = nr_cpus;
2178 				goto err_out;
2179 			}
2180 			pr_debug("map '%s': setting size to %d\n",
2181 				 map->name, nr_cpus);
2182 			create_attr.max_entries = nr_cpus;
2183 		} else {
2184 			create_attr.max_entries = def->max_entries;
2185 		}
2186 		create_attr.btf_fd = 0;
2187 		create_attr.btf_key_type_id = 0;
2188 		create_attr.btf_value_type_id = 0;
2189 		if (bpf_map_type__is_map_in_map(def->type) &&
2190 		    map->inner_map_fd >= 0)
2191 			create_attr.inner_map_fd = map->inner_map_fd;
2192 
2193 		if (obj->btf && !bpf_map_find_btf_info(obj, map)) {
2194 			create_attr.btf_fd = btf__fd(obj->btf);
2195 			create_attr.btf_key_type_id = map->btf_key_type_id;
2196 			create_attr.btf_value_type_id = map->btf_value_type_id;
2197 		}
2198 
2199 		*pfd = bpf_create_map_xattr(&create_attr);
2200 		if (*pfd < 0 && (create_attr.btf_key_type_id ||
2201 				 create_attr.btf_value_type_id)) {
2202 			err = -errno;
2203 			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
2204 			pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
2205 				   map->name, cp, err);
2206 			create_attr.btf_fd = 0;
2207 			create_attr.btf_key_type_id = 0;
2208 			create_attr.btf_value_type_id = 0;
2209 			map->btf_key_type_id = 0;
2210 			map->btf_value_type_id = 0;
2211 			*pfd = bpf_create_map_xattr(&create_attr);
2212 		}
2213 
2214 		if (*pfd < 0) {
2215 			size_t j;
2216 
2217 			err = -errno;
2218 err_out:
2219 			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
2220 			pr_warning("failed to create map (name: '%s'): %s(%d)\n",
2221 				   map->name, cp, err);
2222 			for (j = 0; j < i; j++)
2223 				zclose(obj->maps[j].fd);
2224 			return err;
2225 		}
2226 
2227 		if (bpf_map__is_internal(map)) {
2228 			err = bpf_object__populate_internal_map(obj, map);
2229 			if (err < 0) {
2230 				zclose(*pfd);
2231 				goto err_out;
2232 			}
2233 		}
2234 
2235 		pr_debug("created map %s: fd=%d\n", map->name, *pfd);
2236 	}
2237 
2238 	return 0;
2239 }
2240 
2241 static int
check_btf_ext_reloc_err(struct bpf_program * prog,int err,void * btf_prog_info,const char * info_name)2242 check_btf_ext_reloc_err(struct bpf_program *prog, int err,
2243 			void *btf_prog_info, const char *info_name)
2244 {
2245 	if (err != -ENOENT) {
2246 		pr_warning("Error in loading %s for sec %s.\n",
2247 			   info_name, prog->section_name);
2248 		return err;
2249 	}
2250 
2251 	/* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */
2252 
2253 	if (btf_prog_info) {
2254 		/*
2255 		 * Some info has already been found but has problem
2256 		 * in the last btf_ext reloc. Must have to error out.
2257 		 */
2258 		pr_warning("Error in relocating %s for sec %s.\n",
2259 			   info_name, prog->section_name);
2260 		return err;
2261 	}
2262 
2263 	/* Have problem loading the very first info. Ignore the rest. */
2264 	pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n",
2265 		   info_name, prog->section_name, info_name);
2266 	return 0;
2267 }
2268 
2269 static int
bpf_program_reloc_btf_ext(struct bpf_program * prog,struct bpf_object * obj,const char * section_name,__u32 insn_offset)2270 bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
2271 			  const char *section_name,  __u32 insn_offset)
2272 {
2273 	int err;
2274 
2275 	if (!insn_offset || prog->func_info) {
2276 		/*
2277 		 * !insn_offset => main program
2278 		 *
2279 		 * For sub prog, the main program's func_info has to
2280 		 * be loaded first (i.e. prog->func_info != NULL)
2281 		 */
2282 		err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
2283 					       section_name, insn_offset,
2284 					       &prog->func_info,
2285 					       &prog->func_info_cnt);
2286 		if (err)
2287 			return check_btf_ext_reloc_err(prog, err,
2288 						       prog->func_info,
2289 						       "bpf_func_info");
2290 
2291 		prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
2292 	}
2293 
2294 	if (!insn_offset || prog->line_info) {
2295 		err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
2296 					       section_name, insn_offset,
2297 					       &prog->line_info,
2298 					       &prog->line_info_cnt);
2299 		if (err)
2300 			return check_btf_ext_reloc_err(prog, err,
2301 						       prog->line_info,
2302 						       "bpf_line_info");
2303 
2304 		prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
2305 	}
2306 
2307 	return 0;
2308 }
2309 
2310 #define BPF_CORE_SPEC_MAX_LEN 64
2311 
2312 /* represents BPF CO-RE field or array element accessor */
2313 struct bpf_core_accessor {
2314 	__u32 type_id;		/* struct/union type or array element type */
2315 	__u32 idx;		/* field index or array index */
2316 	const char *name;	/* field name or NULL for array accessor */
2317 };
2318 
2319 struct bpf_core_spec {
2320 	const struct btf *btf;
2321 	/* high-level spec: named fields and array indices only */
2322 	struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
2323 	/* high-level spec length */
2324 	int len;
2325 	/* raw, low-level spec: 1-to-1 with accessor spec string */
2326 	int raw_spec[BPF_CORE_SPEC_MAX_LEN];
2327 	/* raw spec length */
2328 	int raw_len;
2329 	/* field byte offset represented by spec */
2330 	__u32 offset;
2331 };
2332 
str_is_empty(const char * s)2333 static bool str_is_empty(const char *s)
2334 {
2335 	return !s || !s[0];
2336 }
2337 
2338 /*
2339  * Turn bpf_offset_reloc into a low- and high-level spec representation,
2340  * validating correctness along the way, as well as calculating resulting
2341  * field offset (in bytes), specified by accessor string. Low-level spec
2342  * captures every single level of nestedness, including traversing anonymous
2343  * struct/union members. High-level one only captures semantically meaningful
2344  * "turning points": named fields and array indicies.
2345  * E.g., for this case:
2346  *
2347  *   struct sample {
2348  *       int __unimportant;
2349  *       struct {
2350  *           int __1;
2351  *           int __2;
2352  *           int a[7];
2353  *       };
2354  *   };
2355  *
2356  *   struct sample *s = ...;
2357  *
2358  *   int x = &s->a[3]; // access string = '0:1:2:3'
2359  *
2360  * Low-level spec has 1:1 mapping with each element of access string (it's
2361  * just a parsed access string representation): [0, 1, 2, 3].
2362  *
2363  * High-level spec will capture only 3 points:
2364  *   - intial zero-index access by pointer (&s->... is the same as &s[0]...);
2365  *   - field 'a' access (corresponds to '2' in low-level spec);
2366  *   - array element #3 access (corresponds to '3' in low-level spec).
2367  *
2368  */
bpf_core_spec_parse(const struct btf * btf,__u32 type_id,const char * spec_str,struct bpf_core_spec * spec)2369 static int bpf_core_spec_parse(const struct btf *btf,
2370 			       __u32 type_id,
2371 			       const char *spec_str,
2372 			       struct bpf_core_spec *spec)
2373 {
2374 	int access_idx, parsed_len, i;
2375 	const struct btf_type *t;
2376 	const char *name;
2377 	__u32 id;
2378 	__s64 sz;
2379 
2380 	if (str_is_empty(spec_str) || *spec_str == ':')
2381 		return -EINVAL;
2382 
2383 	memset(spec, 0, sizeof(*spec));
2384 	spec->btf = btf;
2385 
2386 	/* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
2387 	while (*spec_str) {
2388 		if (*spec_str == ':')
2389 			++spec_str;
2390 		if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
2391 			return -EINVAL;
2392 		if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
2393 			return -E2BIG;
2394 		spec_str += parsed_len;
2395 		spec->raw_spec[spec->raw_len++] = access_idx;
2396 	}
2397 
2398 	if (spec->raw_len == 0)
2399 		return -EINVAL;
2400 
2401 	/* first spec value is always reloc type array index */
2402 	t = skip_mods_and_typedefs(btf, type_id, &id);
2403 	if (!t)
2404 		return -EINVAL;
2405 
2406 	access_idx = spec->raw_spec[0];
2407 	spec->spec[0].type_id = id;
2408 	spec->spec[0].idx = access_idx;
2409 	spec->len++;
2410 
2411 	sz = btf__resolve_size(btf, id);
2412 	if (sz < 0)
2413 		return sz;
2414 	spec->offset = access_idx * sz;
2415 
2416 	for (i = 1; i < spec->raw_len; i++) {
2417 		t = skip_mods_and_typedefs(btf, id, &id);
2418 		if (!t)
2419 			return -EINVAL;
2420 
2421 		access_idx = spec->raw_spec[i];
2422 
2423 		if (btf_is_composite(t)) {
2424 			const struct btf_member *m;
2425 			__u32 offset;
2426 
2427 			if (access_idx >= btf_vlen(t))
2428 				return -EINVAL;
2429 			if (btf_member_bitfield_size(t, access_idx))
2430 				return -EINVAL;
2431 
2432 			offset = btf_member_bit_offset(t, access_idx);
2433 			if (offset % 8)
2434 				return -EINVAL;
2435 			spec->offset += offset / 8;
2436 
2437 			m = btf_members(t) + access_idx;
2438 			if (m->name_off) {
2439 				name = btf__name_by_offset(btf, m->name_off);
2440 				if (str_is_empty(name))
2441 					return -EINVAL;
2442 
2443 				spec->spec[spec->len].type_id = id;
2444 				spec->spec[spec->len].idx = access_idx;
2445 				spec->spec[spec->len].name = name;
2446 				spec->len++;
2447 			}
2448 
2449 			id = m->type;
2450 		} else if (btf_is_array(t)) {
2451 			const struct btf_array *a = btf_array(t);
2452 
2453 			t = skip_mods_and_typedefs(btf, a->type, &id);
2454 			if (!t || access_idx >= a->nelems)
2455 				return -EINVAL;
2456 
2457 			spec->spec[spec->len].type_id = id;
2458 			spec->spec[spec->len].idx = access_idx;
2459 			spec->len++;
2460 
2461 			sz = btf__resolve_size(btf, id);
2462 			if (sz < 0)
2463 				return sz;
2464 			spec->offset += access_idx * sz;
2465 		} else {
2466 			pr_warning("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n",
2467 				   type_id, spec_str, i, id, btf_kind(t));
2468 			return -EINVAL;
2469 		}
2470 	}
2471 
2472 	return 0;
2473 }
2474 
bpf_core_is_flavor_sep(const char * s)2475 static bool bpf_core_is_flavor_sep(const char *s)
2476 {
2477 	/* check X___Y name pattern, where X and Y are not underscores */
2478 	return s[0] != '_' &&				      /* X */
2479 	       s[1] == '_' && s[2] == '_' && s[3] == '_' &&   /* ___ */
2480 	       s[4] != '_';				      /* Y */
2481 }
2482 
2483 /* Given 'some_struct_name___with_flavor' return the length of a name prefix
2484  * before last triple underscore. Struct name part after last triple
2485  * underscore is ignored by BPF CO-RE relocation during relocation matching.
2486  */
bpf_core_essential_name_len(const char * name)2487 static size_t bpf_core_essential_name_len(const char *name)
2488 {
2489 	size_t n = strlen(name);
2490 	int i;
2491 
2492 	for (i = n - 5; i >= 0; i--) {
2493 		if (bpf_core_is_flavor_sep(name + i))
2494 			return i + 1;
2495 	}
2496 	return n;
2497 }
2498 
2499 /* dynamically sized list of type IDs */
2500 struct ids_vec {
2501 	__u32 *data;
2502 	int len;
2503 };
2504 
bpf_core_free_cands(struct ids_vec * cand_ids)2505 static void bpf_core_free_cands(struct ids_vec *cand_ids)
2506 {
2507 	free(cand_ids->data);
2508 	free(cand_ids);
2509 }
2510 
bpf_core_find_cands(const struct btf * local_btf,__u32 local_type_id,const struct btf * targ_btf)2511 static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
2512 					   __u32 local_type_id,
2513 					   const struct btf *targ_btf)
2514 {
2515 	size_t local_essent_len, targ_essent_len;
2516 	const char *local_name, *targ_name;
2517 	const struct btf_type *t;
2518 	struct ids_vec *cand_ids;
2519 	__u32 *new_ids;
2520 	int i, err, n;
2521 
2522 	t = btf__type_by_id(local_btf, local_type_id);
2523 	if (!t)
2524 		return ERR_PTR(-EINVAL);
2525 
2526 	local_name = btf__name_by_offset(local_btf, t->name_off);
2527 	if (str_is_empty(local_name))
2528 		return ERR_PTR(-EINVAL);
2529 	local_essent_len = bpf_core_essential_name_len(local_name);
2530 
2531 	cand_ids = calloc(1, sizeof(*cand_ids));
2532 	if (!cand_ids)
2533 		return ERR_PTR(-ENOMEM);
2534 
2535 	n = btf__get_nr_types(targ_btf);
2536 	for (i = 1; i <= n; i++) {
2537 		t = btf__type_by_id(targ_btf, i);
2538 		targ_name = btf__name_by_offset(targ_btf, t->name_off);
2539 		if (str_is_empty(targ_name))
2540 			continue;
2541 
2542 		targ_essent_len = bpf_core_essential_name_len(targ_name);
2543 		if (targ_essent_len != local_essent_len)
2544 			continue;
2545 
2546 		if (strncmp(local_name, targ_name, local_essent_len) == 0) {
2547 			pr_debug("[%d] %s: found candidate [%d] %s\n",
2548 				 local_type_id, local_name, i, targ_name);
2549 			new_ids = reallocarray(cand_ids->data,
2550 					       cand_ids->len + 1,
2551 					       sizeof(*cand_ids->data));
2552 			if (!new_ids) {
2553 				err = -ENOMEM;
2554 				goto err_out;
2555 			}
2556 			cand_ids->data = new_ids;
2557 			cand_ids->data[cand_ids->len++] = i;
2558 		}
2559 	}
2560 	return cand_ids;
2561 err_out:
2562 	bpf_core_free_cands(cand_ids);
2563 	return ERR_PTR(err);
2564 }
2565 
2566 /* Check two types for compatibility, skipping const/volatile/restrict and
2567  * typedefs, to ensure we are relocating offset to the compatible entities:
2568  *   - any two STRUCTs/UNIONs are compatible and can be mixed;
2569  *   - any two FWDs are compatible;
2570  *   - any two PTRs are always compatible;
2571  *   - for ENUMs, check sizes, names are ignored;
2572  *   - for INT, size and bitness should match, signedness is ignored;
2573  *   - for ARRAY, dimensionality is ignored, element types are checked for
2574  *     compatibility recursively;
2575  *   - everything else shouldn't be ever a target of relocation.
2576  * These rules are not set in stone and probably will be adjusted as we get
2577  * more experience with using BPF CO-RE relocations.
2578  */
bpf_core_fields_are_compat(const struct btf * local_btf,__u32 local_id,const struct btf * targ_btf,__u32 targ_id)2579 static int bpf_core_fields_are_compat(const struct btf *local_btf,
2580 				      __u32 local_id,
2581 				      const struct btf *targ_btf,
2582 				      __u32 targ_id)
2583 {
2584 	const struct btf_type *local_type, *targ_type;
2585 
2586 recur:
2587 	local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
2588 	targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
2589 	if (!local_type || !targ_type)
2590 		return -EINVAL;
2591 
2592 	if (btf_is_composite(local_type) && btf_is_composite(targ_type))
2593 		return 1;
2594 	if (btf_kind(local_type) != btf_kind(targ_type))
2595 		return 0;
2596 
2597 	switch (btf_kind(local_type)) {
2598 	case BTF_KIND_FWD:
2599 	case BTF_KIND_PTR:
2600 		return 1;
2601 	case BTF_KIND_ENUM:
2602 		return local_type->size == targ_type->size;
2603 	case BTF_KIND_INT:
2604 		return btf_int_offset(local_type) == 0 &&
2605 		       btf_int_offset(targ_type) == 0 &&
2606 		       local_type->size == targ_type->size &&
2607 		       btf_int_bits(local_type) == btf_int_bits(targ_type);
2608 	case BTF_KIND_ARRAY:
2609 		local_id = btf_array(local_type)->type;
2610 		targ_id = btf_array(targ_type)->type;
2611 		goto recur;
2612 	default:
2613 		pr_warning("unexpected kind %d relocated, local [%d], target [%d]\n",
2614 			   btf_kind(local_type), local_id, targ_id);
2615 		return 0;
2616 	}
2617 }
2618 
2619 /*
2620  * Given single high-level named field accessor in local type, find
2621  * corresponding high-level accessor for a target type. Along the way,
2622  * maintain low-level spec for target as well. Also keep updating target
2623  * offset.
2624  *
2625  * Searching is performed through recursive exhaustive enumeration of all
2626  * fields of a struct/union. If there are any anonymous (embedded)
2627  * structs/unions, they are recursively searched as well. If field with
2628  * desired name is found, check compatibility between local and target types,
2629  * before returning result.
2630  *
2631  * 1 is returned, if field is found.
2632  * 0 is returned if no compatible field is found.
2633  * <0 is returned on error.
2634  */
bpf_core_match_member(const struct btf * local_btf,const struct bpf_core_accessor * local_acc,const struct btf * targ_btf,__u32 targ_id,struct bpf_core_spec * spec,__u32 * next_targ_id)2635 static int bpf_core_match_member(const struct btf *local_btf,
2636 				 const struct bpf_core_accessor *local_acc,
2637 				 const struct btf *targ_btf,
2638 				 __u32 targ_id,
2639 				 struct bpf_core_spec *spec,
2640 				 __u32 *next_targ_id)
2641 {
2642 	const struct btf_type *local_type, *targ_type;
2643 	const struct btf_member *local_member, *m;
2644 	const char *local_name, *targ_name;
2645 	__u32 local_id;
2646 	int i, n, found;
2647 
2648 	targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
2649 	if (!targ_type)
2650 		return -EINVAL;
2651 	if (!btf_is_composite(targ_type))
2652 		return 0;
2653 
2654 	local_id = local_acc->type_id;
2655 	local_type = btf__type_by_id(local_btf, local_id);
2656 	local_member = btf_members(local_type) + local_acc->idx;
2657 	local_name = btf__name_by_offset(local_btf, local_member->name_off);
2658 
2659 	n = btf_vlen(targ_type);
2660 	m = btf_members(targ_type);
2661 	for (i = 0; i < n; i++, m++) {
2662 		__u32 offset;
2663 
2664 		/* bitfield relocations not supported */
2665 		if (btf_member_bitfield_size(targ_type, i))
2666 			continue;
2667 		offset = btf_member_bit_offset(targ_type, i);
2668 		if (offset % 8)
2669 			continue;
2670 
2671 		/* too deep struct/union/array nesting */
2672 		if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
2673 			return -E2BIG;
2674 
2675 		/* speculate this member will be the good one */
2676 		spec->offset += offset / 8;
2677 		spec->raw_spec[spec->raw_len++] = i;
2678 
2679 		targ_name = btf__name_by_offset(targ_btf, m->name_off);
2680 		if (str_is_empty(targ_name)) {
2681 			/* embedded struct/union, we need to go deeper */
2682 			found = bpf_core_match_member(local_btf, local_acc,
2683 						      targ_btf, m->type,
2684 						      spec, next_targ_id);
2685 			if (found) /* either found or error */
2686 				return found;
2687 		} else if (strcmp(local_name, targ_name) == 0) {
2688 			/* matching named field */
2689 			struct bpf_core_accessor *targ_acc;
2690 
2691 			targ_acc = &spec->spec[spec->len++];
2692 			targ_acc->type_id = targ_id;
2693 			targ_acc->idx = i;
2694 			targ_acc->name = targ_name;
2695 
2696 			*next_targ_id = m->type;
2697 			found = bpf_core_fields_are_compat(local_btf,
2698 							   local_member->type,
2699 							   targ_btf, m->type);
2700 			if (!found)
2701 				spec->len--; /* pop accessor */
2702 			return found;
2703 		}
2704 		/* member turned out not to be what we looked for */
2705 		spec->offset -= offset / 8;
2706 		spec->raw_len--;
2707 	}
2708 
2709 	return 0;
2710 }
2711 
2712 /*
2713  * Try to match local spec to a target type and, if successful, produce full
2714  * target spec (high-level, low-level + offset).
2715  */
bpf_core_spec_match(struct bpf_core_spec * local_spec,const struct btf * targ_btf,__u32 targ_id,struct bpf_core_spec * targ_spec)2716 static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
2717 			       const struct btf *targ_btf, __u32 targ_id,
2718 			       struct bpf_core_spec *targ_spec)
2719 {
2720 	const struct btf_type *targ_type;
2721 	const struct bpf_core_accessor *local_acc;
2722 	struct bpf_core_accessor *targ_acc;
2723 	int i, sz, matched;
2724 
2725 	memset(targ_spec, 0, sizeof(*targ_spec));
2726 	targ_spec->btf = targ_btf;
2727 
2728 	local_acc = &local_spec->spec[0];
2729 	targ_acc = &targ_spec->spec[0];
2730 
2731 	for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
2732 		targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
2733 						   &targ_id);
2734 		if (!targ_type)
2735 			return -EINVAL;
2736 
2737 		if (local_acc->name) {
2738 			matched = bpf_core_match_member(local_spec->btf,
2739 							local_acc,
2740 							targ_btf, targ_id,
2741 							targ_spec, &targ_id);
2742 			if (matched <= 0)
2743 				return matched;
2744 		} else {
2745 			/* for i=0, targ_id is already treated as array element
2746 			 * type (because it's the original struct), for others
2747 			 * we should find array element type first
2748 			 */
2749 			if (i > 0) {
2750 				const struct btf_array *a;
2751 
2752 				if (!btf_is_array(targ_type))
2753 					return 0;
2754 
2755 				a = btf_array(targ_type);
2756 				if (local_acc->idx >= a->nelems)
2757 					return 0;
2758 				if (!skip_mods_and_typedefs(targ_btf, a->type,
2759 							    &targ_id))
2760 					return -EINVAL;
2761 			}
2762 
2763 			/* too deep struct/union/array nesting */
2764 			if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
2765 				return -E2BIG;
2766 
2767 			targ_acc->type_id = targ_id;
2768 			targ_acc->idx = local_acc->idx;
2769 			targ_acc->name = NULL;
2770 			targ_spec->len++;
2771 			targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
2772 			targ_spec->raw_len++;
2773 
2774 			sz = btf__resolve_size(targ_btf, targ_id);
2775 			if (sz < 0)
2776 				return sz;
2777 			targ_spec->offset += local_acc->idx * sz;
2778 		}
2779 	}
2780 
2781 	return 1;
2782 }
2783 
2784 /*
2785  * Patch relocatable BPF instruction.
2786  * Expected insn->imm value is provided for validation, as well as the new
2787  * relocated value.
2788  *
2789  * Currently three kinds of BPF instructions are supported:
2790  * 1. rX = <imm> (assignment with immediate operand);
2791  * 2. rX += <imm> (arithmetic operations with immediate operand);
2792  * 3. *(rX) = <imm> (indirect memory assignment with immediate operand).
2793  *
2794  * If actual insn->imm value is wrong, bail out.
2795  */
bpf_core_reloc_insn(struct bpf_program * prog,int insn_off,__u32 orig_off,__u32 new_off)2796 static int bpf_core_reloc_insn(struct bpf_program *prog, int insn_off,
2797 			       __u32 orig_off, __u32 new_off)
2798 {
2799 	struct bpf_insn *insn;
2800 	int insn_idx;
2801 	__u8 class;
2802 
2803 	if (insn_off % sizeof(struct bpf_insn))
2804 		return -EINVAL;
2805 	insn_idx = insn_off / sizeof(struct bpf_insn);
2806 
2807 	insn = &prog->insns[insn_idx];
2808 	class = BPF_CLASS(insn->code);
2809 
2810 	if (class == BPF_ALU || class == BPF_ALU64) {
2811 		if (BPF_SRC(insn->code) != BPF_K)
2812 			return -EINVAL;
2813 		if (insn->imm != orig_off)
2814 			return -EINVAL;
2815 		insn->imm = new_off;
2816 		pr_debug("prog '%s': patched insn #%d (ALU/ALU64) imm %d -> %d\n",
2817 			 bpf_program__title(prog, false),
2818 			 insn_idx, orig_off, new_off);
2819 	} else {
2820 		pr_warning("prog '%s': trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n",
2821 			   bpf_program__title(prog, false),
2822 			   insn_idx, insn->code, insn->src_reg, insn->dst_reg,
2823 			   insn->off, insn->imm);
2824 		return -EINVAL;
2825 	}
2826 	return 0;
2827 }
2828 
btf_load_raw(const char * path)2829 static struct btf *btf_load_raw(const char *path)
2830 {
2831 	struct btf *btf;
2832 	size_t read_cnt;
2833 	struct stat st;
2834 	void *data;
2835 	FILE *f;
2836 
2837 	if (stat(path, &st))
2838 		return ERR_PTR(-errno);
2839 
2840 	data = malloc(st.st_size);
2841 	if (!data)
2842 		return ERR_PTR(-ENOMEM);
2843 
2844 	f = fopen(path, "rb");
2845 	if (!f) {
2846 		btf = ERR_PTR(-errno);
2847 		goto cleanup;
2848 	}
2849 
2850 	read_cnt = fread(data, 1, st.st_size, f);
2851 	fclose(f);
2852 	if (read_cnt < st.st_size) {
2853 		btf = ERR_PTR(-EBADF);
2854 		goto cleanup;
2855 	}
2856 
2857 	btf = btf__new(data, read_cnt);
2858 
2859 cleanup:
2860 	free(data);
2861 	return btf;
2862 }
2863 
2864 /*
2865  * Probe few well-known locations for vmlinux kernel image and try to load BTF
2866  * data out of it to use for target BTF.
2867  */
bpf_core_find_kernel_btf(void)2868 static struct btf *bpf_core_find_kernel_btf(void)
2869 {
2870 	struct {
2871 		const char *path_fmt;
2872 		bool raw_btf;
2873 	} locations[] = {
2874 		/* try canonical vmlinux BTF through sysfs first */
2875 		{ "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
2876 		/* fall back to trying to find vmlinux ELF on disk otherwise */
2877 		{ "/boot/vmlinux-%1$s" },
2878 		{ "/lib/modules/%1$s/vmlinux-%1$s" },
2879 		{ "/lib/modules/%1$s/build/vmlinux" },
2880 		{ "/usr/lib/modules/%1$s/kernel/vmlinux" },
2881 		{ "/usr/lib/debug/boot/vmlinux-%1$s" },
2882 		{ "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
2883 		{ "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
2884 	};
2885 	char path[PATH_MAX + 1];
2886 	struct utsname buf;
2887 	struct btf *btf;
2888 	int i;
2889 
2890 	uname(&buf);
2891 
2892 	for (i = 0; i < ARRAY_SIZE(locations); i++) {
2893 		snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);
2894 
2895 		if (access(path, R_OK))
2896 			continue;
2897 
2898 		if (locations[i].raw_btf)
2899 			btf = btf_load_raw(path);
2900 		else
2901 			btf = btf__parse_elf(path, NULL);
2902 
2903 		pr_debug("loading kernel BTF '%s': %ld\n",
2904 			 path, IS_ERR(btf) ? PTR_ERR(btf) : 0);
2905 		if (IS_ERR(btf))
2906 			continue;
2907 
2908 		return btf;
2909 	}
2910 
2911 	pr_warning("failed to find valid kernel BTF\n");
2912 	return ERR_PTR(-ESRCH);
2913 }
2914 
2915 /* Output spec definition in the format:
2916  * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
2917  * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
2918  */
bpf_core_dump_spec(int level,const struct bpf_core_spec * spec)2919 static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
2920 {
2921 	const struct btf_type *t;
2922 	const char *s;
2923 	__u32 type_id;
2924 	int i;
2925 
2926 	type_id = spec->spec[0].type_id;
2927 	t = btf__type_by_id(spec->btf, type_id);
2928 	s = btf__name_by_offset(spec->btf, t->name_off);
2929 	libbpf_print(level, "[%u] %s + ", type_id, s);
2930 
2931 	for (i = 0; i < spec->raw_len; i++)
2932 		libbpf_print(level, "%d%s", spec->raw_spec[i],
2933 			     i == spec->raw_len - 1 ? " => " : ":");
2934 
2935 	libbpf_print(level, "%u @ &x", spec->offset);
2936 
2937 	for (i = 0; i < spec->len; i++) {
2938 		if (spec->spec[i].name)
2939 			libbpf_print(level, ".%s", spec->spec[i].name);
2940 		else
2941 			libbpf_print(level, "[%u]", spec->spec[i].idx);
2942 	}
2943 
2944 }
2945 
bpf_core_hash_fn(const void * key,void * ctx)2946 static size_t bpf_core_hash_fn(const void *key, void *ctx)
2947 {
2948 	return (size_t)key;
2949 }
2950 
bpf_core_equal_fn(const void * k1,const void * k2,void * ctx)2951 static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
2952 {
2953 	return k1 == k2;
2954 }
2955 
u32_as_hash_key(__u32 x)2956 static void *u32_as_hash_key(__u32 x)
2957 {
2958 	return (void *)(uintptr_t)x;
2959 }
2960 
2961 /*
2962  * CO-RE relocate single instruction.
2963  *
2964  * The outline and important points of the algorithm:
2965  * 1. For given local type, find corresponding candidate target types.
2966  *    Candidate type is a type with the same "essential" name, ignoring
2967  *    everything after last triple underscore (___). E.g., `sample`,
2968  *    `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
2969  *    for each other. Names with triple underscore are referred to as
2970  *    "flavors" and are useful, among other things, to allow to
2971  *    specify/support incompatible variations of the same kernel struct, which
2972  *    might differ between different kernel versions and/or build
2973  *    configurations.
2974  *
2975  *    N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
2976  *    converter, when deduplicated BTF of a kernel still contains more than
2977  *    one different types with the same name. In that case, ___2, ___3, etc
2978  *    are appended starting from second name conflict. But start flavors are
2979  *    also useful to be defined "locally", in BPF program, to extract same
2980  *    data from incompatible changes between different kernel
2981  *    versions/configurations. For instance, to handle field renames between
2982  *    kernel versions, one can use two flavors of the struct name with the
2983  *    same common name and use conditional relocations to extract that field,
2984  *    depending on target kernel version.
2985  * 2. For each candidate type, try to match local specification to this
2986  *    candidate target type. Matching involves finding corresponding
2987  *    high-level spec accessors, meaning that all named fields should match,
2988  *    as well as all array accesses should be within the actual bounds. Also,
2989  *    types should be compatible (see bpf_core_fields_are_compat for details).
2990  * 3. It is supported and expected that there might be multiple flavors
2991  *    matching the spec. As long as all the specs resolve to the same set of
2992  *    offsets across all candidates, there is not error. If there is any
2993  *    ambiguity, CO-RE relocation will fail. This is necessary to accomodate
2994  *    imprefection of BTF deduplication, which can cause slight duplication of
2995  *    the same BTF type, if some directly or indirectly referenced (by
2996  *    pointer) type gets resolved to different actual types in different
2997  *    object files. If such situation occurs, deduplicated BTF will end up
2998  *    with two (or more) structurally identical types, which differ only in
2999  *    types they refer to through pointer. This should be OK in most cases and
3000  *    is not an error.
3001  * 4. Candidate types search is performed by linearly scanning through all
3002  *    types in target BTF. It is anticipated that this is overall more
3003  *    efficient memory-wise and not significantly worse (if not better)
3004  *    CPU-wise compared to prebuilding a map from all local type names to
3005  *    a list of candidate type names. It's also sped up by caching resolved
3006  *    list of matching candidates per each local "root" type ID, that has at
3007  *    least one bpf_offset_reloc associated with it. This list is shared
3008  *    between multiple relocations for the same type ID and is updated as some
3009  *    of the candidates are pruned due to structural incompatibility.
3010  */
bpf_core_reloc_offset(struct bpf_program * prog,const struct bpf_offset_reloc * relo,int relo_idx,const struct btf * local_btf,const struct btf * targ_btf,struct hashmap * cand_cache)3011 static int bpf_core_reloc_offset(struct bpf_program *prog,
3012 				 const struct bpf_offset_reloc *relo,
3013 				 int relo_idx,
3014 				 const struct btf *local_btf,
3015 				 const struct btf *targ_btf,
3016 				 struct hashmap *cand_cache)
3017 {
3018 	const char *prog_name = bpf_program__title(prog, false);
3019 	struct bpf_core_spec local_spec, cand_spec, targ_spec;
3020 	const void *type_key = u32_as_hash_key(relo->type_id);
3021 	const struct btf_type *local_type, *cand_type;
3022 	const char *local_name, *cand_name;
3023 	struct ids_vec *cand_ids;
3024 	__u32 local_id, cand_id;
3025 	const char *spec_str;
3026 	int i, j, err;
3027 
3028 	local_id = relo->type_id;
3029 	local_type = btf__type_by_id(local_btf, local_id);
3030 	if (!local_type)
3031 		return -EINVAL;
3032 
3033 	local_name = btf__name_by_offset(local_btf, local_type->name_off);
3034 	if (str_is_empty(local_name))
3035 		return -EINVAL;
3036 
3037 	spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
3038 	if (str_is_empty(spec_str))
3039 		return -EINVAL;
3040 
3041 	err = bpf_core_spec_parse(local_btf, local_id, spec_str, &local_spec);
3042 	if (err) {
3043 		pr_warning("prog '%s': relo #%d: parsing [%d] %s + %s failed: %d\n",
3044 			   prog_name, relo_idx, local_id, local_name, spec_str,
3045 			   err);
3046 		return -EINVAL;
3047 	}
3048 
3049 	pr_debug("prog '%s': relo #%d: spec is ", prog_name, relo_idx);
3050 	bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
3051 	libbpf_print(LIBBPF_DEBUG, "\n");
3052 
3053 	if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) {
3054 		cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
3055 		if (IS_ERR(cand_ids)) {
3056 			pr_warning("prog '%s': relo #%d: target candidate search failed for [%d] %s: %ld",
3057 				   prog_name, relo_idx, local_id, local_name,
3058 				   PTR_ERR(cand_ids));
3059 			return PTR_ERR(cand_ids);
3060 		}
3061 		err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL);
3062 		if (err) {
3063 			bpf_core_free_cands(cand_ids);
3064 			return err;
3065 		}
3066 	}
3067 
3068 	for (i = 0, j = 0; i < cand_ids->len; i++) {
3069 		cand_id = cand_ids->data[i];
3070 		cand_type = btf__type_by_id(targ_btf, cand_id);
3071 		cand_name = btf__name_by_offset(targ_btf, cand_type->name_off);
3072 
3073 		err = bpf_core_spec_match(&local_spec, targ_btf,
3074 					  cand_id, &cand_spec);
3075 		pr_debug("prog '%s': relo #%d: matching candidate #%d %s against spec ",
3076 			 prog_name, relo_idx, i, cand_name);
3077 		bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
3078 		libbpf_print(LIBBPF_DEBUG, ": %d\n", err);
3079 		if (err < 0) {
3080 			pr_warning("prog '%s': relo #%d: matching error: %d\n",
3081 				   prog_name, relo_idx, err);
3082 			return err;
3083 		}
3084 		if (err == 0)
3085 			continue;
3086 
3087 		if (j == 0) {
3088 			targ_spec = cand_spec;
3089 		} else if (cand_spec.offset != targ_spec.offset) {
3090 			/* if there are many candidates, they should all
3091 			 * resolve to the same offset
3092 			 */
3093 			pr_warning("prog '%s': relo #%d: offset ambiguity: %u != %u\n",
3094 				   prog_name, relo_idx, cand_spec.offset,
3095 				   targ_spec.offset);
3096 			return -EINVAL;
3097 		}
3098 
3099 		cand_ids->data[j++] = cand_spec.spec[0].type_id;
3100 	}
3101 
3102 	cand_ids->len = j;
3103 	if (cand_ids->len == 0) {
3104 		pr_warning("prog '%s': relo #%d: no matching targets found for [%d] %s + %s\n",
3105 			   prog_name, relo_idx, local_id, local_name, spec_str);
3106 		return -ESRCH;
3107 	}
3108 
3109 	err = bpf_core_reloc_insn(prog, relo->insn_off,
3110 				  local_spec.offset, targ_spec.offset);
3111 	if (err) {
3112 		pr_warning("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
3113 			   prog_name, relo_idx, relo->insn_off, err);
3114 		return -EINVAL;
3115 	}
3116 
3117 	return 0;
3118 }
3119 
3120 static int
bpf_core_reloc_offsets(struct bpf_object * obj,const char * targ_btf_path)3121 bpf_core_reloc_offsets(struct bpf_object *obj, const char *targ_btf_path)
3122 {
3123 	const struct btf_ext_info_sec *sec;
3124 	const struct bpf_offset_reloc *rec;
3125 	const struct btf_ext_info *seg;
3126 	struct hashmap_entry *entry;
3127 	struct hashmap *cand_cache = NULL;
3128 	struct bpf_program *prog;
3129 	struct btf *targ_btf;
3130 	const char *sec_name;
3131 	int i, err = 0;
3132 
3133 	if (targ_btf_path)
3134 		targ_btf = btf__parse_elf(targ_btf_path, NULL);
3135 	else
3136 		targ_btf = bpf_core_find_kernel_btf();
3137 	if (IS_ERR(targ_btf)) {
3138 		pr_warning("failed to get target BTF: %ld\n",
3139 			   PTR_ERR(targ_btf));
3140 		return PTR_ERR(targ_btf);
3141 	}
3142 
3143 	cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
3144 	if (IS_ERR(cand_cache)) {
3145 		err = PTR_ERR(cand_cache);
3146 		goto out;
3147 	}
3148 
3149 	seg = &obj->btf_ext->offset_reloc_info;
3150 	for_each_btf_ext_sec(seg, sec) {
3151 		sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
3152 		if (str_is_empty(sec_name)) {
3153 			err = -EINVAL;
3154 			goto out;
3155 		}
3156 		prog = bpf_object__find_program_by_title(obj, sec_name);
3157 		if (!prog) {
3158 			pr_warning("failed to find program '%s' for CO-RE offset relocation\n",
3159 				   sec_name);
3160 			err = -EINVAL;
3161 			goto out;
3162 		}
3163 
3164 		pr_debug("prog '%s': performing %d CO-RE offset relocs\n",
3165 			 sec_name, sec->num_info);
3166 
3167 		for_each_btf_ext_rec(seg, sec, i, rec) {
3168 			err = bpf_core_reloc_offset(prog, rec, i, obj->btf,
3169 						    targ_btf, cand_cache);
3170 			if (err) {
3171 				pr_warning("prog '%s': relo #%d: failed to relocate: %d\n",
3172 					   sec_name, i, err);
3173 				goto out;
3174 			}
3175 		}
3176 	}
3177 
3178 out:
3179 	btf__free(targ_btf);
3180 	if (!IS_ERR_OR_NULL(cand_cache)) {
3181 		hashmap__for_each_entry(cand_cache, entry, i) {
3182 			bpf_core_free_cands(entry->value);
3183 		}
3184 		hashmap__free(cand_cache);
3185 	}
3186 	return err;
3187 }
3188 
3189 static int
bpf_object__relocate_core(struct bpf_object * obj,const char * targ_btf_path)3190 bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
3191 {
3192 	int err = 0;
3193 
3194 	if (obj->btf_ext->offset_reloc_info.len)
3195 		err = bpf_core_reloc_offsets(obj, targ_btf_path);
3196 
3197 	return err;
3198 }
3199 
3200 static int
bpf_program__reloc_text(struct bpf_program * prog,struct bpf_object * obj,struct reloc_desc * relo)3201 bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
3202 			struct reloc_desc *relo)
3203 {
3204 	struct bpf_insn *insn, *new_insn;
3205 	struct bpf_program *text;
3206 	size_t new_cnt;
3207 	int err;
3208 
3209 	if (relo->type != RELO_CALL)
3210 		return -LIBBPF_ERRNO__RELOC;
3211 
3212 	if (prog->idx == obj->efile.text_shndx) {
3213 		pr_warning("relo in .text insn %d into off %d\n",
3214 			   relo->insn_idx, relo->text_off);
3215 		return -LIBBPF_ERRNO__RELOC;
3216 	}
3217 
3218 	if (prog->main_prog_cnt == 0) {
3219 		text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
3220 		if (!text) {
3221 			pr_warning("no .text section found yet relo into text exist\n");
3222 			return -LIBBPF_ERRNO__RELOC;
3223 		}
3224 		new_cnt = prog->insns_cnt + text->insns_cnt;
3225 		new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
3226 		if (!new_insn) {
3227 			pr_warning("oom in prog realloc\n");
3228 			return -ENOMEM;
3229 		}
3230 		prog->insns = new_insn;
3231 
3232 		if (obj->btf_ext) {
3233 			err = bpf_program_reloc_btf_ext(prog, obj,
3234 							text->section_name,
3235 							prog->insns_cnt);
3236 			if (err)
3237 				return err;
3238 		}
3239 
3240 		memcpy(new_insn + prog->insns_cnt, text->insns,
3241 		       text->insns_cnt * sizeof(*insn));
3242 		prog->main_prog_cnt = prog->insns_cnt;
3243 		prog->insns_cnt = new_cnt;
3244 		pr_debug("added %zd insn from %s to prog %s\n",
3245 			 text->insns_cnt, text->section_name,
3246 			 prog->section_name);
3247 	}
3248 	insn = &prog->insns[relo->insn_idx];
3249 	insn->imm += prog->main_prog_cnt - relo->insn_idx;
3250 	return 0;
3251 }
3252 
3253 static int
bpf_program__relocate(struct bpf_program * prog,struct bpf_object * obj)3254 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
3255 {
3256 	int i, err;
3257 
3258 	if (!prog)
3259 		return 0;
3260 
3261 	if (obj->btf_ext) {
3262 		err = bpf_program_reloc_btf_ext(prog, obj,
3263 						prog->section_name, 0);
3264 		if (err)
3265 			return err;
3266 	}
3267 
3268 	if (!prog->reloc_desc)
3269 		return 0;
3270 
3271 	for (i = 0; i < prog->nr_reloc; i++) {
3272 		if (prog->reloc_desc[i].type == RELO_LD64 ||
3273 		    prog->reloc_desc[i].type == RELO_DATA) {
3274 			bool relo_data = prog->reloc_desc[i].type == RELO_DATA;
3275 			struct bpf_insn *insns = prog->insns;
3276 			int insn_idx, map_idx;
3277 
3278 			insn_idx = prog->reloc_desc[i].insn_idx;
3279 			map_idx = prog->reloc_desc[i].map_idx;
3280 
3281 			if (insn_idx + 1 >= (int)prog->insns_cnt) {
3282 				pr_warning("relocation out of range: '%s'\n",
3283 					   prog->section_name);
3284 				return -LIBBPF_ERRNO__RELOC;
3285 			}
3286 
3287 			if (!relo_data) {
3288 				insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
3289 			} else {
3290 				insns[insn_idx].src_reg = BPF_PSEUDO_MAP_VALUE;
3291 				insns[insn_idx + 1].imm = insns[insn_idx].imm;
3292 			}
3293 			insns[insn_idx].imm = obj->maps[map_idx].fd;
3294 		} else if (prog->reloc_desc[i].type == RELO_CALL) {
3295 			err = bpf_program__reloc_text(prog, obj,
3296 						      &prog->reloc_desc[i]);
3297 			if (err)
3298 				return err;
3299 		}
3300 	}
3301 
3302 	zfree(&prog->reloc_desc);
3303 	prog->nr_reloc = 0;
3304 	return 0;
3305 }
3306 
3307 static int
bpf_object__relocate(struct bpf_object * obj,const char * targ_btf_path)3308 bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
3309 {
3310 	struct bpf_program *prog;
3311 	size_t i;
3312 	int err;
3313 
3314 	if (obj->btf_ext) {
3315 		err = bpf_object__relocate_core(obj, targ_btf_path);
3316 		if (err) {
3317 			pr_warning("failed to perform CO-RE relocations: %d\n",
3318 				   err);
3319 			return err;
3320 		}
3321 	}
3322 	for (i = 0; i < obj->nr_programs; i++) {
3323 		prog = &obj->programs[i];
3324 
3325 		err = bpf_program__relocate(prog, obj);
3326 		if (err) {
3327 			pr_warning("failed to relocate '%s'\n",
3328 				   prog->section_name);
3329 			return err;
3330 		}
3331 	}
3332 	return 0;
3333 }
3334 
bpf_object__collect_reloc(struct bpf_object * obj)3335 static int bpf_object__collect_reloc(struct bpf_object *obj)
3336 {
3337 	int i, err;
3338 
3339 	if (!obj_elf_valid(obj)) {
3340 		pr_warning("Internal error: elf object is closed\n");
3341 		return -LIBBPF_ERRNO__INTERNAL;
3342 	}
3343 
3344 	for (i = 0; i < obj->efile.nr_reloc; i++) {
3345 		GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
3346 		Elf_Data *data = obj->efile.reloc[i].data;
3347 		int idx = shdr->sh_info;
3348 		struct bpf_program *prog;
3349 
3350 		if (shdr->sh_type != SHT_REL) {
3351 			pr_warning("internal error at %d\n", __LINE__);
3352 			return -LIBBPF_ERRNO__INTERNAL;
3353 		}
3354 
3355 		prog = bpf_object__find_prog_by_idx(obj, idx);
3356 		if (!prog) {
3357 			pr_warning("relocation failed: no section(%d)\n", idx);
3358 			return -LIBBPF_ERRNO__RELOC;
3359 		}
3360 
3361 		err = bpf_program__collect_reloc(prog, shdr, data, obj);
3362 		if (err)
3363 			return err;
3364 	}
3365 	return 0;
3366 }
3367 
3368 static int
load_program(struct bpf_program * prog,struct bpf_insn * insns,int insns_cnt,char * license,__u32 kern_version,int * pfd)3369 load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
3370 	     char *license, __u32 kern_version, int *pfd)
3371 {
3372 	struct bpf_load_program_attr load_attr;
3373 	char *cp, errmsg[STRERR_BUFSIZE];
3374 	int log_buf_size = BPF_LOG_BUF_SIZE;
3375 	char *log_buf;
3376 	int btf_fd, ret;
3377 
3378 	if (!insns || !insns_cnt)
3379 		return -EINVAL;
3380 
3381 	memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
3382 	load_attr.prog_type = prog->type;
3383 	load_attr.expected_attach_type = prog->expected_attach_type;
3384 	if (prog->caps->name)
3385 		load_attr.name = prog->name;
3386 	load_attr.insns = insns;
3387 	load_attr.insns_cnt = insns_cnt;
3388 	load_attr.license = license;
3389 	load_attr.kern_version = kern_version;
3390 	load_attr.prog_ifindex = prog->prog_ifindex;
3391 	/* if .BTF.ext was loaded, kernel supports associated BTF for prog */
3392 	if (prog->obj->btf_ext)
3393 		btf_fd = bpf_object__btf_fd(prog->obj);
3394 	else
3395 		btf_fd = -1;
3396 	load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
3397 	load_attr.func_info = prog->func_info;
3398 	load_attr.func_info_rec_size = prog->func_info_rec_size;
3399 	load_attr.func_info_cnt = prog->func_info_cnt;
3400 	load_attr.line_info = prog->line_info;
3401 	load_attr.line_info_rec_size = prog->line_info_rec_size;
3402 	load_attr.line_info_cnt = prog->line_info_cnt;
3403 	load_attr.log_level = prog->log_level;
3404 	load_attr.prog_flags = prog->prog_flags;
3405 
3406 retry_load:
3407 	log_buf = malloc(log_buf_size);
3408 	if (!log_buf)
3409 		pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
3410 
3411 	ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
3412 
3413 	if (ret >= 0) {
3414 		if (load_attr.log_level)
3415 			pr_debug("verifier log:\n%s", log_buf);
3416 		*pfd = ret;
3417 		ret = 0;
3418 		goto out;
3419 	}
3420 
3421 	if (errno == ENOSPC) {
3422 		log_buf_size <<= 1;
3423 		free(log_buf);
3424 		goto retry_load;
3425 	}
3426 	ret = -LIBBPF_ERRNO__LOAD;
3427 	cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
3428 	pr_warning("load bpf program failed: %s\n", cp);
3429 
3430 	if (log_buf && log_buf[0] != '\0') {
3431 		ret = -LIBBPF_ERRNO__VERIFY;
3432 		pr_warning("-- BEGIN DUMP LOG ---\n");
3433 		pr_warning("\n%s\n", log_buf);
3434 		pr_warning("-- END LOG --\n");
3435 	} else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
3436 		pr_warning("Program too large (%zu insns), at most %d insns\n",
3437 			   load_attr.insns_cnt, BPF_MAXINSNS);
3438 		ret = -LIBBPF_ERRNO__PROG2BIG;
3439 	} else {
3440 		/* Wrong program type? */
3441 		if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
3442 			int fd;
3443 
3444 			load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
3445 			load_attr.expected_attach_type = 0;
3446 			fd = bpf_load_program_xattr(&load_attr, NULL, 0);
3447 			if (fd >= 0) {
3448 				close(fd);
3449 				ret = -LIBBPF_ERRNO__PROGTYPE;
3450 				goto out;
3451 			}
3452 		}
3453 
3454 		if (log_buf)
3455 			ret = -LIBBPF_ERRNO__KVER;
3456 	}
3457 
3458 out:
3459 	free(log_buf);
3460 	return ret;
3461 }
3462 
3463 int
bpf_program__load(struct bpf_program * prog,char * license,__u32 kern_version)3464 bpf_program__load(struct bpf_program *prog,
3465 		  char *license, __u32 kern_version)
3466 {
3467 	int err = 0, fd, i;
3468 
3469 	if (prog->instances.nr < 0 || !prog->instances.fds) {
3470 		if (prog->preprocessor) {
3471 			pr_warning("Internal error: can't load program '%s'\n",
3472 				   prog->section_name);
3473 			return -LIBBPF_ERRNO__INTERNAL;
3474 		}
3475 
3476 		prog->instances.fds = malloc(sizeof(int));
3477 		if (!prog->instances.fds) {
3478 			pr_warning("Not enough memory for BPF fds\n");
3479 			return -ENOMEM;
3480 		}
3481 		prog->instances.nr = 1;
3482 		prog->instances.fds[0] = -1;
3483 	}
3484 
3485 	if (!prog->preprocessor) {
3486 		if (prog->instances.nr != 1) {
3487 			pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
3488 				   prog->section_name, prog->instances.nr);
3489 		}
3490 		err = load_program(prog, prog->insns, prog->insns_cnt,
3491 				   license, kern_version, &fd);
3492 		if (!err)
3493 			prog->instances.fds[0] = fd;
3494 		goto out;
3495 	}
3496 
3497 	for (i = 0; i < prog->instances.nr; i++) {
3498 		struct bpf_prog_prep_result result;
3499 		bpf_program_prep_t preprocessor = prog->preprocessor;
3500 
3501 		memset(&result, 0, sizeof(result));
3502 		err = preprocessor(prog, i, prog->insns,
3503 				   prog->insns_cnt, &result);
3504 		if (err) {
3505 			pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
3506 				   i, prog->section_name);
3507 			goto out;
3508 		}
3509 
3510 		if (!result.new_insn_ptr || !result.new_insn_cnt) {
3511 			pr_debug("Skip loading the %dth instance of program '%s'\n",
3512 				 i, prog->section_name);
3513 			prog->instances.fds[i] = -1;
3514 			if (result.pfd)
3515 				*result.pfd = -1;
3516 			continue;
3517 		}
3518 
3519 		err = load_program(prog, result.new_insn_ptr,
3520 				   result.new_insn_cnt,
3521 				   license, kern_version, &fd);
3522 
3523 		if (err) {
3524 			pr_warning("Loading the %dth instance of program '%s' failed\n",
3525 					i, prog->section_name);
3526 			goto out;
3527 		}
3528 
3529 		if (result.pfd)
3530 			*result.pfd = fd;
3531 		prog->instances.fds[i] = fd;
3532 	}
3533 out:
3534 	if (err)
3535 		pr_warning("failed to load program '%s'\n",
3536 			   prog->section_name);
3537 	zfree(&prog->insns);
3538 	prog->insns_cnt = 0;
3539 	return err;
3540 }
3541 
bpf_program__is_function_storage(const struct bpf_program * prog,const struct bpf_object * obj)3542 static bool bpf_program__is_function_storage(const struct bpf_program *prog,
3543 					     const struct bpf_object *obj)
3544 {
3545 	return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
3546 }
3547 
3548 static int
bpf_object__load_progs(struct bpf_object * obj,int log_level)3549 bpf_object__load_progs(struct bpf_object *obj, int log_level)
3550 {
3551 	size_t i;
3552 	int err;
3553 
3554 	for (i = 0; i < obj->nr_programs; i++) {
3555 		if (bpf_program__is_function_storage(&obj->programs[i], obj))
3556 			continue;
3557 		obj->programs[i].log_level |= log_level;
3558 		err = bpf_program__load(&obj->programs[i],
3559 					obj->license,
3560 					obj->kern_version);
3561 		if (err)
3562 			return err;
3563 	}
3564 	return 0;
3565 }
3566 
bpf_prog_type__needs_kver(enum bpf_prog_type type)3567 static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
3568 {
3569 	switch (type) {
3570 	case BPF_PROG_TYPE_SOCKET_FILTER:
3571 	case BPF_PROG_TYPE_SCHED_CLS:
3572 	case BPF_PROG_TYPE_SCHED_ACT:
3573 	case BPF_PROG_TYPE_XDP:
3574 	case BPF_PROG_TYPE_CGROUP_SKB:
3575 	case BPF_PROG_TYPE_CGROUP_SOCK:
3576 	case BPF_PROG_TYPE_LWT_IN:
3577 	case BPF_PROG_TYPE_LWT_OUT:
3578 	case BPF_PROG_TYPE_LWT_XMIT:
3579 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
3580 	case BPF_PROG_TYPE_SOCK_OPS:
3581 	case BPF_PROG_TYPE_SK_SKB:
3582 	case BPF_PROG_TYPE_CGROUP_DEVICE:
3583 	case BPF_PROG_TYPE_SK_MSG:
3584 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3585 	case BPF_PROG_TYPE_LIRC_MODE2:
3586 	case BPF_PROG_TYPE_SK_REUSEPORT:
3587 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
3588 	case BPF_PROG_TYPE_UNSPEC:
3589 	case BPF_PROG_TYPE_TRACEPOINT:
3590 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
3591 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
3592 	case BPF_PROG_TYPE_PERF_EVENT:
3593 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
3594 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3595 		return false;
3596 	case BPF_PROG_TYPE_KPROBE:
3597 	default:
3598 		return true;
3599 	}
3600 }
3601 
bpf_object__validate(struct bpf_object * obj,bool needs_kver)3602 static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
3603 {
3604 	if (needs_kver && obj->kern_version == 0) {
3605 		pr_warning("%s doesn't provide kernel version\n",
3606 			   obj->path);
3607 		return -LIBBPF_ERRNO__KVERSION;
3608 	}
3609 	return 0;
3610 }
3611 
3612 static struct bpf_object *
__bpf_object__open(const char * path,void * obj_buf,size_t obj_buf_sz,bool needs_kver,int flags)3613 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
3614 		   bool needs_kver, int flags)
3615 {
3616 	struct bpf_object *obj;
3617 	int err;
3618 
3619 	if (elf_version(EV_CURRENT) == EV_NONE) {
3620 		pr_warning("failed to init libelf for %s\n", path);
3621 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
3622 	}
3623 
3624 	obj = bpf_object__new(path, obj_buf, obj_buf_sz);
3625 	if (IS_ERR(obj))
3626 		return obj;
3627 
3628 	CHECK_ERR(bpf_object__elf_init(obj), err, out);
3629 	CHECK_ERR(bpf_object__check_endianness(obj), err, out);
3630 	CHECK_ERR(bpf_object__probe_caps(obj), err, out);
3631 	CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
3632 	CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
3633 	CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
3634 
3635 	bpf_object__elf_finish(obj);
3636 	return obj;
3637 out:
3638 	bpf_object__close(obj);
3639 	return ERR_PTR(err);
3640 }
3641 
__bpf_object__open_xattr(struct bpf_object_open_attr * attr,int flags)3642 struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
3643 					    int flags)
3644 {
3645 	/* param validation */
3646 	if (!attr->file)
3647 		return NULL;
3648 
3649 	pr_debug("loading %s\n", attr->file);
3650 
3651 	return __bpf_object__open(attr->file, NULL, 0,
3652 				  bpf_prog_type__needs_kver(attr->prog_type),
3653 				  flags);
3654 }
3655 
bpf_object__open_xattr(struct bpf_object_open_attr * attr)3656 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
3657 {
3658 	return __bpf_object__open_xattr(attr, 0);
3659 }
3660 
bpf_object__open(const char * path)3661 struct bpf_object *bpf_object__open(const char *path)
3662 {
3663 	struct bpf_object_open_attr attr = {
3664 		.file		= path,
3665 		.prog_type	= BPF_PROG_TYPE_UNSPEC,
3666 	};
3667 
3668 	return bpf_object__open_xattr(&attr);
3669 }
3670 
bpf_object__open_buffer(void * obj_buf,size_t obj_buf_sz,const char * name)3671 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
3672 					   size_t obj_buf_sz,
3673 					   const char *name)
3674 {
3675 	char tmp_name[64];
3676 
3677 	/* param validation */
3678 	if (!obj_buf || obj_buf_sz <= 0)
3679 		return NULL;
3680 
3681 	if (!name) {
3682 		snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
3683 			 (unsigned long)obj_buf,
3684 			 (unsigned long)obj_buf_sz);
3685 		name = tmp_name;
3686 	}
3687 	pr_debug("loading object '%s' from buffer\n", name);
3688 
3689 	return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
3690 }
3691 
bpf_object__unload(struct bpf_object * obj)3692 int bpf_object__unload(struct bpf_object *obj)
3693 {
3694 	size_t i;
3695 
3696 	if (!obj)
3697 		return -EINVAL;
3698 
3699 	for (i = 0; i < obj->nr_maps; i++)
3700 		zclose(obj->maps[i].fd);
3701 
3702 	for (i = 0; i < obj->nr_programs; i++)
3703 		bpf_program__unload(&obj->programs[i]);
3704 
3705 	return 0;
3706 }
3707 
bpf_object__load_xattr(struct bpf_object_load_attr * attr)3708 int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
3709 {
3710 	struct bpf_object *obj;
3711 	int err;
3712 
3713 	if (!attr)
3714 		return -EINVAL;
3715 	obj = attr->obj;
3716 	if (!obj)
3717 		return -EINVAL;
3718 
3719 	if (obj->loaded) {
3720 		pr_warning("object should not be loaded twice\n");
3721 		return -EINVAL;
3722 	}
3723 
3724 	obj->loaded = true;
3725 
3726 	CHECK_ERR(bpf_object__create_maps(obj), err, out);
3727 	CHECK_ERR(bpf_object__relocate(obj, attr->target_btf_path), err, out);
3728 	CHECK_ERR(bpf_object__load_progs(obj, attr->log_level), err, out);
3729 
3730 	return 0;
3731 out:
3732 	bpf_object__unload(obj);
3733 	pr_warning("failed to load object '%s'\n", obj->path);
3734 	return err;
3735 }
3736 
bpf_object__load(struct bpf_object * obj)3737 int bpf_object__load(struct bpf_object *obj)
3738 {
3739 	struct bpf_object_load_attr attr = {
3740 		.obj = obj,
3741 	};
3742 
3743 	return bpf_object__load_xattr(&attr);
3744 }
3745 
check_path(const char * path)3746 static int check_path(const char *path)
3747 {
3748 	char *cp, errmsg[STRERR_BUFSIZE];
3749 	struct statfs st_fs;
3750 	char *dname, *dir;
3751 	int err = 0;
3752 
3753 	if (path == NULL)
3754 		return -EINVAL;
3755 
3756 	dname = strdup(path);
3757 	if (dname == NULL)
3758 		return -ENOMEM;
3759 
3760 	dir = dirname(dname);
3761 	if (statfs(dir, &st_fs)) {
3762 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
3763 		pr_warning("failed to statfs %s: %s\n", dir, cp);
3764 		err = -errno;
3765 	}
3766 	free(dname);
3767 
3768 	if (!err && st_fs.f_type != BPF_FS_MAGIC) {
3769 		pr_warning("specified path %s is not on BPF FS\n", path);
3770 		err = -EINVAL;
3771 	}
3772 
3773 	return err;
3774 }
3775 
bpf_program__pin_instance(struct bpf_program * prog,const char * path,int instance)3776 int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
3777 			      int instance)
3778 {
3779 	char *cp, errmsg[STRERR_BUFSIZE];
3780 	int err;
3781 
3782 	err = check_path(path);
3783 	if (err)
3784 		return err;
3785 
3786 	if (prog == NULL) {
3787 		pr_warning("invalid program pointer\n");
3788 		return -EINVAL;
3789 	}
3790 
3791 	if (instance < 0 || instance >= prog->instances.nr) {
3792 		pr_warning("invalid prog instance %d of prog %s (max %d)\n",
3793 			   instance, prog->section_name, prog->instances.nr);
3794 		return -EINVAL;
3795 	}
3796 
3797 	if (bpf_obj_pin(prog->instances.fds[instance], path)) {
3798 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
3799 		pr_warning("failed to pin program: %s\n", cp);
3800 		return -errno;
3801 	}
3802 	pr_debug("pinned program '%s'\n", path);
3803 
3804 	return 0;
3805 }
3806 
bpf_program__unpin_instance(struct bpf_program * prog,const char * path,int instance)3807 int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
3808 				int instance)
3809 {
3810 	int err;
3811 
3812 	err = check_path(path);
3813 	if (err)
3814 		return err;
3815 
3816 	if (prog == NULL) {
3817 		pr_warning("invalid program pointer\n");
3818 		return -EINVAL;
3819 	}
3820 
3821 	if (instance < 0 || instance >= prog->instances.nr) {
3822 		pr_warning("invalid prog instance %d of prog %s (max %d)\n",
3823 			   instance, prog->section_name, prog->instances.nr);
3824 		return -EINVAL;
3825 	}
3826 
3827 	err = unlink(path);
3828 	if (err != 0)
3829 		return -errno;
3830 	pr_debug("unpinned program '%s'\n", path);
3831 
3832 	return 0;
3833 }
3834 
make_dir(const char * path)3835 static int make_dir(const char *path)
3836 {
3837 	char *cp, errmsg[STRERR_BUFSIZE];
3838 	int err = 0;
3839 
3840 	if (mkdir(path, 0700) && errno != EEXIST)
3841 		err = -errno;
3842 
3843 	if (err) {
3844 		cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
3845 		pr_warning("failed to mkdir %s: %s\n", path, cp);
3846 	}
3847 	return err;
3848 }
3849 
bpf_program__pin(struct bpf_program * prog,const char * path)3850 int bpf_program__pin(struct bpf_program *prog, const char *path)
3851 {
3852 	int i, err;
3853 
3854 	err = check_path(path);
3855 	if (err)
3856 		return err;
3857 
3858 	if (prog == NULL) {
3859 		pr_warning("invalid program pointer\n");
3860 		return -EINVAL;
3861 	}
3862 
3863 	if (prog->instances.nr <= 0) {
3864 		pr_warning("no instances of prog %s to pin\n",
3865 			   prog->section_name);
3866 		return -EINVAL;
3867 	}
3868 
3869 	if (prog->instances.nr == 1) {
3870 		/* don't create subdirs when pinning single instance */
3871 		return bpf_program__pin_instance(prog, path, 0);
3872 	}
3873 
3874 	err = make_dir(path);
3875 	if (err)
3876 		return err;
3877 
3878 	for (i = 0; i < prog->instances.nr; i++) {
3879 		char buf[PATH_MAX];
3880 		int len;
3881 
3882 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
3883 		if (len < 0) {
3884 			err = -EINVAL;
3885 			goto err_unpin;
3886 		} else if (len >= PATH_MAX) {
3887 			err = -ENAMETOOLONG;
3888 			goto err_unpin;
3889 		}
3890 
3891 		err = bpf_program__pin_instance(prog, buf, i);
3892 		if (err)
3893 			goto err_unpin;
3894 	}
3895 
3896 	return 0;
3897 
3898 err_unpin:
3899 	for (i = i - 1; i >= 0; i--) {
3900 		char buf[PATH_MAX];
3901 		int len;
3902 
3903 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
3904 		if (len < 0)
3905 			continue;
3906 		else if (len >= PATH_MAX)
3907 			continue;
3908 
3909 		bpf_program__unpin_instance(prog, buf, i);
3910 	}
3911 
3912 	rmdir(path);
3913 
3914 	return err;
3915 }
3916 
bpf_program__unpin(struct bpf_program * prog,const char * path)3917 int bpf_program__unpin(struct bpf_program *prog, const char *path)
3918 {
3919 	int i, err;
3920 
3921 	err = check_path(path);
3922 	if (err)
3923 		return err;
3924 
3925 	if (prog == NULL) {
3926 		pr_warning("invalid program pointer\n");
3927 		return -EINVAL;
3928 	}
3929 
3930 	if (prog->instances.nr <= 0) {
3931 		pr_warning("no instances of prog %s to pin\n",
3932 			   prog->section_name);
3933 		return -EINVAL;
3934 	}
3935 
3936 	if (prog->instances.nr == 1) {
3937 		/* don't create subdirs when pinning single instance */
3938 		return bpf_program__unpin_instance(prog, path, 0);
3939 	}
3940 
3941 	for (i = 0; i < prog->instances.nr; i++) {
3942 		char buf[PATH_MAX];
3943 		int len;
3944 
3945 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
3946 		if (len < 0)
3947 			return -EINVAL;
3948 		else if (len >= PATH_MAX)
3949 			return -ENAMETOOLONG;
3950 
3951 		err = bpf_program__unpin_instance(prog, buf, i);
3952 		if (err)
3953 			return err;
3954 	}
3955 
3956 	err = rmdir(path);
3957 	if (err)
3958 		return -errno;
3959 
3960 	return 0;
3961 }
3962 
bpf_map__pin(struct bpf_map * map,const char * path)3963 int bpf_map__pin(struct bpf_map *map, const char *path)
3964 {
3965 	char *cp, errmsg[STRERR_BUFSIZE];
3966 	int err;
3967 
3968 	err = check_path(path);
3969 	if (err)
3970 		return err;
3971 
3972 	if (map == NULL) {
3973 		pr_warning("invalid map pointer\n");
3974 		return -EINVAL;
3975 	}
3976 
3977 	if (bpf_obj_pin(map->fd, path)) {
3978 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
3979 		pr_warning("failed to pin map: %s\n", cp);
3980 		return -errno;
3981 	}
3982 
3983 	pr_debug("pinned map '%s'\n", path);
3984 
3985 	return 0;
3986 }
3987 
bpf_map__unpin(struct bpf_map * map,const char * path)3988 int bpf_map__unpin(struct bpf_map *map, const char *path)
3989 {
3990 	int err;
3991 
3992 	err = check_path(path);
3993 	if (err)
3994 		return err;
3995 
3996 	if (map == NULL) {
3997 		pr_warning("invalid map pointer\n");
3998 		return -EINVAL;
3999 	}
4000 
4001 	err = unlink(path);
4002 	if (err != 0)
4003 		return -errno;
4004 	pr_debug("unpinned map '%s'\n", path);
4005 
4006 	return 0;
4007 }
4008 
bpf_object__pin_maps(struct bpf_object * obj,const char * path)4009 int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
4010 {
4011 	struct bpf_map *map;
4012 	int err;
4013 
4014 	if (!obj)
4015 		return -ENOENT;
4016 
4017 	if (!obj->loaded) {
4018 		pr_warning("object not yet loaded; load it first\n");
4019 		return -ENOENT;
4020 	}
4021 
4022 	err = make_dir(path);
4023 	if (err)
4024 		return err;
4025 
4026 	bpf_object__for_each_map(map, obj) {
4027 		char buf[PATH_MAX];
4028 		int len;
4029 
4030 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
4031 			       bpf_map__name(map));
4032 		if (len < 0) {
4033 			err = -EINVAL;
4034 			goto err_unpin_maps;
4035 		} else if (len >= PATH_MAX) {
4036 			err = -ENAMETOOLONG;
4037 			goto err_unpin_maps;
4038 		}
4039 
4040 		err = bpf_map__pin(map, buf);
4041 		if (err)
4042 			goto err_unpin_maps;
4043 	}
4044 
4045 	return 0;
4046 
4047 err_unpin_maps:
4048 	while ((map = bpf_map__prev(map, obj))) {
4049 		char buf[PATH_MAX];
4050 		int len;
4051 
4052 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
4053 			       bpf_map__name(map));
4054 		if (len < 0)
4055 			continue;
4056 		else if (len >= PATH_MAX)
4057 			continue;
4058 
4059 		bpf_map__unpin(map, buf);
4060 	}
4061 
4062 	return err;
4063 }
4064 
bpf_object__unpin_maps(struct bpf_object * obj,const char * path)4065 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
4066 {
4067 	struct bpf_map *map;
4068 	int err;
4069 
4070 	if (!obj)
4071 		return -ENOENT;
4072 
4073 	bpf_object__for_each_map(map, obj) {
4074 		char buf[PATH_MAX];
4075 		int len;
4076 
4077 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
4078 			       bpf_map__name(map));
4079 		if (len < 0)
4080 			return -EINVAL;
4081 		else if (len >= PATH_MAX)
4082 			return -ENAMETOOLONG;
4083 
4084 		err = bpf_map__unpin(map, buf);
4085 		if (err)
4086 			return err;
4087 	}
4088 
4089 	return 0;
4090 }
4091 
bpf_object__pin_programs(struct bpf_object * obj,const char * path)4092 int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
4093 {
4094 	struct bpf_program *prog;
4095 	int err;
4096 
4097 	if (!obj)
4098 		return -ENOENT;
4099 
4100 	if (!obj->loaded) {
4101 		pr_warning("object not yet loaded; load it first\n");
4102 		return -ENOENT;
4103 	}
4104 
4105 	err = make_dir(path);
4106 	if (err)
4107 		return err;
4108 
4109 	bpf_object__for_each_program(prog, obj) {
4110 		char buf[PATH_MAX];
4111 		int len;
4112 
4113 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
4114 			       prog->pin_name);
4115 		if (len < 0) {
4116 			err = -EINVAL;
4117 			goto err_unpin_programs;
4118 		} else if (len >= PATH_MAX) {
4119 			err = -ENAMETOOLONG;
4120 			goto err_unpin_programs;
4121 		}
4122 
4123 		err = bpf_program__pin(prog, buf);
4124 		if (err)
4125 			goto err_unpin_programs;
4126 	}
4127 
4128 	return 0;
4129 
4130 err_unpin_programs:
4131 	while ((prog = bpf_program__prev(prog, obj))) {
4132 		char buf[PATH_MAX];
4133 		int len;
4134 
4135 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
4136 			       prog->pin_name);
4137 		if (len < 0)
4138 			continue;
4139 		else if (len >= PATH_MAX)
4140 			continue;
4141 
4142 		bpf_program__unpin(prog, buf);
4143 	}
4144 
4145 	return err;
4146 }
4147 
bpf_object__unpin_programs(struct bpf_object * obj,const char * path)4148 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
4149 {
4150 	struct bpf_program *prog;
4151 	int err;
4152 
4153 	if (!obj)
4154 		return -ENOENT;
4155 
4156 	bpf_object__for_each_program(prog, obj) {
4157 		char buf[PATH_MAX];
4158 		int len;
4159 
4160 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
4161 			       prog->pin_name);
4162 		if (len < 0)
4163 			return -EINVAL;
4164 		else if (len >= PATH_MAX)
4165 			return -ENAMETOOLONG;
4166 
4167 		err = bpf_program__unpin(prog, buf);
4168 		if (err)
4169 			return err;
4170 	}
4171 
4172 	return 0;
4173 }
4174 
bpf_object__pin(struct bpf_object * obj,const char * path)4175 int bpf_object__pin(struct bpf_object *obj, const char *path)
4176 {
4177 	int err;
4178 
4179 	err = bpf_object__pin_maps(obj, path);
4180 	if (err)
4181 		return err;
4182 
4183 	err = bpf_object__pin_programs(obj, path);
4184 	if (err) {
4185 		bpf_object__unpin_maps(obj, path);
4186 		return err;
4187 	}
4188 
4189 	return 0;
4190 }
4191 
bpf_object__close(struct bpf_object * obj)4192 void bpf_object__close(struct bpf_object *obj)
4193 {
4194 	size_t i;
4195 
4196 	if (!obj)
4197 		return;
4198 
4199 	if (obj->clear_priv)
4200 		obj->clear_priv(obj, obj->priv);
4201 
4202 	bpf_object__elf_finish(obj);
4203 	bpf_object__unload(obj);
4204 	btf__free(obj->btf);
4205 	btf_ext__free(obj->btf_ext);
4206 
4207 	for (i = 0; i < obj->nr_maps; i++) {
4208 		zfree(&obj->maps[i].name);
4209 		if (obj->maps[i].clear_priv)
4210 			obj->maps[i].clear_priv(&obj->maps[i],
4211 						obj->maps[i].priv);
4212 		obj->maps[i].priv = NULL;
4213 		obj->maps[i].clear_priv = NULL;
4214 	}
4215 
4216 	zfree(&obj->sections.rodata);
4217 	zfree(&obj->sections.data);
4218 	zfree(&obj->maps);
4219 	obj->nr_maps = 0;
4220 
4221 	if (obj->programs && obj->nr_programs) {
4222 		for (i = 0; i < obj->nr_programs; i++)
4223 			bpf_program__exit(&obj->programs[i]);
4224 	}
4225 	zfree(&obj->programs);
4226 
4227 	list_del(&obj->list);
4228 	free(obj);
4229 }
4230 
4231 struct bpf_object *
bpf_object__next(struct bpf_object * prev)4232 bpf_object__next(struct bpf_object *prev)
4233 {
4234 	struct bpf_object *next;
4235 
4236 	if (!prev)
4237 		next = list_first_entry(&bpf_objects_list,
4238 					struct bpf_object,
4239 					list);
4240 	else
4241 		next = list_next_entry(prev, list);
4242 
4243 	/* Empty list is noticed here so don't need checking on entry. */
4244 	if (&next->list == &bpf_objects_list)
4245 		return NULL;
4246 
4247 	return next;
4248 }
4249 
bpf_object__name(const struct bpf_object * obj)4250 const char *bpf_object__name(const struct bpf_object *obj)
4251 {
4252 	return obj ? obj->path : ERR_PTR(-EINVAL);
4253 }
4254 
bpf_object__kversion(const struct bpf_object * obj)4255 unsigned int bpf_object__kversion(const struct bpf_object *obj)
4256 {
4257 	return obj ? obj->kern_version : 0;
4258 }
4259 
bpf_object__btf(const struct bpf_object * obj)4260 struct btf *bpf_object__btf(const struct bpf_object *obj)
4261 {
4262 	return obj ? obj->btf : NULL;
4263 }
4264 
bpf_object__btf_fd(const struct bpf_object * obj)4265 int bpf_object__btf_fd(const struct bpf_object *obj)
4266 {
4267 	return obj->btf ? btf__fd(obj->btf) : -1;
4268 }
4269 
bpf_object__set_priv(struct bpf_object * obj,void * priv,bpf_object_clear_priv_t clear_priv)4270 int bpf_object__set_priv(struct bpf_object *obj, void *priv,
4271 			 bpf_object_clear_priv_t clear_priv)
4272 {
4273 	if (obj->priv && obj->clear_priv)
4274 		obj->clear_priv(obj, obj->priv);
4275 
4276 	obj->priv = priv;
4277 	obj->clear_priv = clear_priv;
4278 	return 0;
4279 }
4280 
bpf_object__priv(const struct bpf_object * obj)4281 void *bpf_object__priv(const struct bpf_object *obj)
4282 {
4283 	return obj ? obj->priv : ERR_PTR(-EINVAL);
4284 }
4285 
4286 static struct bpf_program *
__bpf_program__iter(const struct bpf_program * p,const struct bpf_object * obj,bool forward)4287 __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
4288 		    bool forward)
4289 {
4290 	size_t nr_programs = obj->nr_programs;
4291 	ssize_t idx;
4292 
4293 	if (!nr_programs)
4294 		return NULL;
4295 
4296 	if (!p)
4297 		/* Iter from the beginning */
4298 		return forward ? &obj->programs[0] :
4299 			&obj->programs[nr_programs - 1];
4300 
4301 	if (p->obj != obj) {
4302 		pr_warning("error: program handler doesn't match object\n");
4303 		return NULL;
4304 	}
4305 
4306 	idx = (p - obj->programs) + (forward ? 1 : -1);
4307 	if (idx >= obj->nr_programs || idx < 0)
4308 		return NULL;
4309 	return &obj->programs[idx];
4310 }
4311 
4312 struct bpf_program *
bpf_program__next(struct bpf_program * prev,const struct bpf_object * obj)4313 bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
4314 {
4315 	struct bpf_program *prog = prev;
4316 
4317 	do {
4318 		prog = __bpf_program__iter(prog, obj, true);
4319 	} while (prog && bpf_program__is_function_storage(prog, obj));
4320 
4321 	return prog;
4322 }
4323 
4324 struct bpf_program *
bpf_program__prev(struct bpf_program * next,const struct bpf_object * obj)4325 bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
4326 {
4327 	struct bpf_program *prog = next;
4328 
4329 	do {
4330 		prog = __bpf_program__iter(prog, obj, false);
4331 	} while (prog && bpf_program__is_function_storage(prog, obj));
4332 
4333 	return prog;
4334 }
4335 
bpf_program__set_priv(struct bpf_program * prog,void * priv,bpf_program_clear_priv_t clear_priv)4336 int bpf_program__set_priv(struct bpf_program *prog, void *priv,
4337 			  bpf_program_clear_priv_t clear_priv)
4338 {
4339 	if (prog->priv && prog->clear_priv)
4340 		prog->clear_priv(prog, prog->priv);
4341 
4342 	prog->priv = priv;
4343 	prog->clear_priv = clear_priv;
4344 	return 0;
4345 }
4346 
bpf_program__priv(const struct bpf_program * prog)4347 void *bpf_program__priv(const struct bpf_program *prog)
4348 {
4349 	return prog ? prog->priv : ERR_PTR(-EINVAL);
4350 }
4351 
bpf_program__set_ifindex(struct bpf_program * prog,__u32 ifindex)4352 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
4353 {
4354 	prog->prog_ifindex = ifindex;
4355 }
4356 
bpf_program__title(const struct bpf_program * prog,bool needs_copy)4357 const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
4358 {
4359 	const char *title;
4360 
4361 	title = prog->section_name;
4362 	if (needs_copy) {
4363 		title = strdup(title);
4364 		if (!title) {
4365 			pr_warning("failed to strdup program title\n");
4366 			return ERR_PTR(-ENOMEM);
4367 		}
4368 	}
4369 
4370 	return title;
4371 }
4372 
bpf_program__fd(const struct bpf_program * prog)4373 int bpf_program__fd(const struct bpf_program *prog)
4374 {
4375 	return bpf_program__nth_fd(prog, 0);
4376 }
4377 
bpf_program__set_prep(struct bpf_program * prog,int nr_instances,bpf_program_prep_t prep)4378 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
4379 			  bpf_program_prep_t prep)
4380 {
4381 	int *instances_fds;
4382 
4383 	if (nr_instances <= 0 || !prep)
4384 		return -EINVAL;
4385 
4386 	if (prog->instances.nr > 0 || prog->instances.fds) {
4387 		pr_warning("Can't set pre-processor after loading\n");
4388 		return -EINVAL;
4389 	}
4390 
4391 	instances_fds = malloc(sizeof(int) * nr_instances);
4392 	if (!instances_fds) {
4393 		pr_warning("alloc memory failed for fds\n");
4394 		return -ENOMEM;
4395 	}
4396 
4397 	/* fill all fd with -1 */
4398 	memset(instances_fds, -1, sizeof(int) * nr_instances);
4399 
4400 	prog->instances.nr = nr_instances;
4401 	prog->instances.fds = instances_fds;
4402 	prog->preprocessor = prep;
4403 	return 0;
4404 }
4405 
bpf_program__nth_fd(const struct bpf_program * prog,int n)4406 int bpf_program__nth_fd(const struct bpf_program *prog, int n)
4407 {
4408 	int fd;
4409 
4410 	if (!prog)
4411 		return -EINVAL;
4412 
4413 	if (n >= prog->instances.nr || n < 0) {
4414 		pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
4415 			   n, prog->section_name, prog->instances.nr);
4416 		return -EINVAL;
4417 	}
4418 
4419 	fd = prog->instances.fds[n];
4420 	if (fd < 0) {
4421 		pr_warning("%dth instance of program '%s' is invalid\n",
4422 			   n, prog->section_name);
4423 		return -ENOENT;
4424 	}
4425 
4426 	return fd;
4427 }
4428 
bpf_program__set_type(struct bpf_program * prog,enum bpf_prog_type type)4429 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
4430 {
4431 	prog->type = type;
4432 }
4433 
bpf_program__is_type(const struct bpf_program * prog,enum bpf_prog_type type)4434 static bool bpf_program__is_type(const struct bpf_program *prog,
4435 				 enum bpf_prog_type type)
4436 {
4437 	return prog ? (prog->type == type) : false;
4438 }
4439 
4440 #define BPF_PROG_TYPE_FNS(NAME, TYPE)				\
4441 int bpf_program__set_##NAME(struct bpf_program *prog)		\
4442 {								\
4443 	if (!prog)						\
4444 		return -EINVAL;					\
4445 	bpf_program__set_type(prog, TYPE);			\
4446 	return 0;						\
4447 }								\
4448 								\
4449 bool bpf_program__is_##NAME(const struct bpf_program *prog)	\
4450 {								\
4451 	return bpf_program__is_type(prog, TYPE);		\
4452 }								\
4453 
4454 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
4455 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
4456 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
4457 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
4458 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
4459 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
4460 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
4461 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
4462 
bpf_program__set_expected_attach_type(struct bpf_program * prog,enum bpf_attach_type type)4463 void bpf_program__set_expected_attach_type(struct bpf_program *prog,
4464 					   enum bpf_attach_type type)
4465 {
4466 	prog->expected_attach_type = type;
4467 }
4468 
4469 #define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \
4470 	{ string, sizeof(string) - 1, ptype, eatype, is_attachable, atype }
4471 
4472 /* Programs that can NOT be attached. */
4473 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0)
4474 
4475 /* Programs that can be attached. */
4476 #define BPF_APROG_SEC(string, ptype, atype) \
4477 	BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype)
4478 
4479 /* Programs that must specify expected attach type at load time. */
4480 #define BPF_EAPROG_SEC(string, ptype, eatype) \
4481 	BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype)
4482 
4483 /* Programs that can be attached but attach type can't be identified by section
4484  * name. Kept for backward compatibility.
4485  */
4486 #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
4487 
4488 static const struct {
4489 	const char *sec;
4490 	size_t len;
4491 	enum bpf_prog_type prog_type;
4492 	enum bpf_attach_type expected_attach_type;
4493 	int is_attachable;
4494 	enum bpf_attach_type attach_type;
4495 } section_names[] = {
4496 	BPF_PROG_SEC("socket",			BPF_PROG_TYPE_SOCKET_FILTER),
4497 	BPF_PROG_SEC("kprobe/",			BPF_PROG_TYPE_KPROBE),
4498 	BPF_PROG_SEC("kretprobe/",		BPF_PROG_TYPE_KPROBE),
4499 	BPF_PROG_SEC("classifier",		BPF_PROG_TYPE_SCHED_CLS),
4500 	BPF_PROG_SEC("action",			BPF_PROG_TYPE_SCHED_ACT),
4501 	BPF_PROG_SEC("tracepoint/",		BPF_PROG_TYPE_TRACEPOINT),
4502 	BPF_PROG_SEC("raw_tracepoint/",		BPF_PROG_TYPE_RAW_TRACEPOINT),
4503 	BPF_PROG_SEC("xdp",			BPF_PROG_TYPE_XDP),
4504 	BPF_PROG_SEC("perf_event",		BPF_PROG_TYPE_PERF_EVENT),
4505 	BPF_PROG_SEC("lwt_in",			BPF_PROG_TYPE_LWT_IN),
4506 	BPF_PROG_SEC("lwt_out",			BPF_PROG_TYPE_LWT_OUT),
4507 	BPF_PROG_SEC("lwt_xmit",		BPF_PROG_TYPE_LWT_XMIT),
4508 	BPF_PROG_SEC("lwt_seg6local",		BPF_PROG_TYPE_LWT_SEG6LOCAL),
4509 	BPF_APROG_SEC("cgroup_skb/ingress",	BPF_PROG_TYPE_CGROUP_SKB,
4510 						BPF_CGROUP_INET_INGRESS),
4511 	BPF_APROG_SEC("cgroup_skb/egress",	BPF_PROG_TYPE_CGROUP_SKB,
4512 						BPF_CGROUP_INET_EGRESS),
4513 	BPF_APROG_COMPAT("cgroup/skb",		BPF_PROG_TYPE_CGROUP_SKB),
4514 	BPF_APROG_SEC("cgroup/sock",		BPF_PROG_TYPE_CGROUP_SOCK,
4515 						BPF_CGROUP_INET_SOCK_CREATE),
4516 	BPF_EAPROG_SEC("cgroup/post_bind4",	BPF_PROG_TYPE_CGROUP_SOCK,
4517 						BPF_CGROUP_INET4_POST_BIND),
4518 	BPF_EAPROG_SEC("cgroup/post_bind6",	BPF_PROG_TYPE_CGROUP_SOCK,
4519 						BPF_CGROUP_INET6_POST_BIND),
4520 	BPF_APROG_SEC("cgroup/dev",		BPF_PROG_TYPE_CGROUP_DEVICE,
4521 						BPF_CGROUP_DEVICE),
4522 	BPF_APROG_SEC("sockops",		BPF_PROG_TYPE_SOCK_OPS,
4523 						BPF_CGROUP_SOCK_OPS),
4524 	BPF_APROG_SEC("sk_skb/stream_parser",	BPF_PROG_TYPE_SK_SKB,
4525 						BPF_SK_SKB_STREAM_PARSER),
4526 	BPF_APROG_SEC("sk_skb/stream_verdict",	BPF_PROG_TYPE_SK_SKB,
4527 						BPF_SK_SKB_STREAM_VERDICT),
4528 	BPF_APROG_COMPAT("sk_skb",		BPF_PROG_TYPE_SK_SKB),
4529 	BPF_APROG_SEC("sk_msg",			BPF_PROG_TYPE_SK_MSG,
4530 						BPF_SK_MSG_VERDICT),
4531 	BPF_APROG_SEC("lirc_mode2",		BPF_PROG_TYPE_LIRC_MODE2,
4532 						BPF_LIRC_MODE2),
4533 	BPF_APROG_SEC("flow_dissector",		BPF_PROG_TYPE_FLOW_DISSECTOR,
4534 						BPF_FLOW_DISSECTOR),
4535 	BPF_EAPROG_SEC("cgroup/bind4",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4536 						BPF_CGROUP_INET4_BIND),
4537 	BPF_EAPROG_SEC("cgroup/bind6",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4538 						BPF_CGROUP_INET6_BIND),
4539 	BPF_EAPROG_SEC("cgroup/connect4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4540 						BPF_CGROUP_INET4_CONNECT),
4541 	BPF_EAPROG_SEC("cgroup/connect6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4542 						BPF_CGROUP_INET6_CONNECT),
4543 	BPF_EAPROG_SEC("cgroup/sendmsg4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4544 						BPF_CGROUP_UDP4_SENDMSG),
4545 	BPF_EAPROG_SEC("cgroup/sendmsg6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4546 						BPF_CGROUP_UDP6_SENDMSG),
4547 	BPF_EAPROG_SEC("cgroup/recvmsg4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4548 						BPF_CGROUP_UDP4_RECVMSG),
4549 	BPF_EAPROG_SEC("cgroup/recvmsg6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
4550 						BPF_CGROUP_UDP6_RECVMSG),
4551 	BPF_EAPROG_SEC("cgroup/sysctl",		BPF_PROG_TYPE_CGROUP_SYSCTL,
4552 						BPF_CGROUP_SYSCTL),
4553 	BPF_EAPROG_SEC("cgroup/getsockopt",	BPF_PROG_TYPE_CGROUP_SOCKOPT,
4554 						BPF_CGROUP_GETSOCKOPT),
4555 	BPF_EAPROG_SEC("cgroup/setsockopt",	BPF_PROG_TYPE_CGROUP_SOCKOPT,
4556 						BPF_CGROUP_SETSOCKOPT),
4557 };
4558 
4559 #undef BPF_PROG_SEC_IMPL
4560 #undef BPF_PROG_SEC
4561 #undef BPF_APROG_SEC
4562 #undef BPF_EAPROG_SEC
4563 #undef BPF_APROG_COMPAT
4564 
4565 #define MAX_TYPE_NAME_SIZE 32
4566 
libbpf_get_type_names(bool attach_type)4567 static char *libbpf_get_type_names(bool attach_type)
4568 {
4569 	int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE;
4570 	char *buf;
4571 
4572 	buf = malloc(len);
4573 	if (!buf)
4574 		return NULL;
4575 
4576 	buf[0] = '\0';
4577 	/* Forge string buf with all available names */
4578 	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
4579 		if (attach_type && !section_names[i].is_attachable)
4580 			continue;
4581 
4582 		if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) {
4583 			free(buf);
4584 			return NULL;
4585 		}
4586 		strcat(buf, " ");
4587 		strcat(buf, section_names[i].sec);
4588 	}
4589 
4590 	return buf;
4591 }
4592 
libbpf_prog_type_by_name(const char * name,enum bpf_prog_type * prog_type,enum bpf_attach_type * expected_attach_type)4593 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
4594 			     enum bpf_attach_type *expected_attach_type)
4595 {
4596 	char *type_names;
4597 	int i;
4598 
4599 	if (!name)
4600 		return -EINVAL;
4601 
4602 	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
4603 		if (strncmp(name, section_names[i].sec, section_names[i].len))
4604 			continue;
4605 		*prog_type = section_names[i].prog_type;
4606 		*expected_attach_type = section_names[i].expected_attach_type;
4607 		return 0;
4608 	}
4609 	pr_warning("failed to guess program type based on ELF section name '%s'\n", name);
4610 	type_names = libbpf_get_type_names(false);
4611 	if (type_names != NULL) {
4612 		pr_info("supported section(type) names are:%s\n", type_names);
4613 		free(type_names);
4614 	}
4615 
4616 	return -EINVAL;
4617 }
4618 
libbpf_attach_type_by_name(const char * name,enum bpf_attach_type * attach_type)4619 int libbpf_attach_type_by_name(const char *name,
4620 			       enum bpf_attach_type *attach_type)
4621 {
4622 	char *type_names;
4623 	int i;
4624 
4625 	if (!name)
4626 		return -EINVAL;
4627 
4628 	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
4629 		if (strncmp(name, section_names[i].sec, section_names[i].len))
4630 			continue;
4631 		if (!section_names[i].is_attachable)
4632 			return -EINVAL;
4633 		*attach_type = section_names[i].attach_type;
4634 		return 0;
4635 	}
4636 	pr_warning("failed to guess attach type based on ELF section name '%s'\n", name);
4637 	type_names = libbpf_get_type_names(true);
4638 	if (type_names != NULL) {
4639 		pr_info("attachable section(type) names are:%s\n", type_names);
4640 		free(type_names);
4641 	}
4642 
4643 	return -EINVAL;
4644 }
4645 
4646 static int
bpf_program__identify_section(struct bpf_program * prog,enum bpf_prog_type * prog_type,enum bpf_attach_type * expected_attach_type)4647 bpf_program__identify_section(struct bpf_program *prog,
4648 			      enum bpf_prog_type *prog_type,
4649 			      enum bpf_attach_type *expected_attach_type)
4650 {
4651 	return libbpf_prog_type_by_name(prog->section_name, prog_type,
4652 					expected_attach_type);
4653 }
4654 
bpf_map__fd(const struct bpf_map * map)4655 int bpf_map__fd(const struct bpf_map *map)
4656 {
4657 	return map ? map->fd : -EINVAL;
4658 }
4659 
bpf_map__def(const struct bpf_map * map)4660 const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
4661 {
4662 	return map ? &map->def : ERR_PTR(-EINVAL);
4663 }
4664 
bpf_map__name(const struct bpf_map * map)4665 const char *bpf_map__name(const struct bpf_map *map)
4666 {
4667 	return map ? map->name : NULL;
4668 }
4669 
bpf_map__btf_key_type_id(const struct bpf_map * map)4670 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
4671 {
4672 	return map ? map->btf_key_type_id : 0;
4673 }
4674 
bpf_map__btf_value_type_id(const struct bpf_map * map)4675 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
4676 {
4677 	return map ? map->btf_value_type_id : 0;
4678 }
4679 
bpf_map__set_priv(struct bpf_map * map,void * priv,bpf_map_clear_priv_t clear_priv)4680 int bpf_map__set_priv(struct bpf_map *map, void *priv,
4681 		     bpf_map_clear_priv_t clear_priv)
4682 {
4683 	if (!map)
4684 		return -EINVAL;
4685 
4686 	if (map->priv) {
4687 		if (map->clear_priv)
4688 			map->clear_priv(map, map->priv);
4689 	}
4690 
4691 	map->priv = priv;
4692 	map->clear_priv = clear_priv;
4693 	return 0;
4694 }
4695 
bpf_map__priv(const struct bpf_map * map)4696 void *bpf_map__priv(const struct bpf_map *map)
4697 {
4698 	return map ? map->priv : ERR_PTR(-EINVAL);
4699 }
4700 
bpf_map__is_offload_neutral(const struct bpf_map * map)4701 bool bpf_map__is_offload_neutral(const struct bpf_map *map)
4702 {
4703 	return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
4704 }
4705 
bpf_map__is_internal(const struct bpf_map * map)4706 bool bpf_map__is_internal(const struct bpf_map *map)
4707 {
4708 	return map->libbpf_type != LIBBPF_MAP_UNSPEC;
4709 }
4710 
bpf_map__set_ifindex(struct bpf_map * map,__u32 ifindex)4711 void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
4712 {
4713 	map->map_ifindex = ifindex;
4714 }
4715 
bpf_map__set_inner_map_fd(struct bpf_map * map,int fd)4716 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
4717 {
4718 	if (!bpf_map_type__is_map_in_map(map->def.type)) {
4719 		pr_warning("error: unsupported map type\n");
4720 		return -EINVAL;
4721 	}
4722 	if (map->inner_map_fd != -1) {
4723 		pr_warning("error: inner_map_fd already specified\n");
4724 		return -EINVAL;
4725 	}
4726 	map->inner_map_fd = fd;
4727 	return 0;
4728 }
4729 
4730 static struct bpf_map *
__bpf_map__iter(const struct bpf_map * m,const struct bpf_object * obj,int i)4731 __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
4732 {
4733 	ssize_t idx;
4734 	struct bpf_map *s, *e;
4735 
4736 	if (!obj || !obj->maps)
4737 		return NULL;
4738 
4739 	s = obj->maps;
4740 	e = obj->maps + obj->nr_maps;
4741 
4742 	if ((m < s) || (m >= e)) {
4743 		pr_warning("error in %s: map handler doesn't belong to object\n",
4744 			   __func__);
4745 		return NULL;
4746 	}
4747 
4748 	idx = (m - obj->maps) + i;
4749 	if (idx >= obj->nr_maps || idx < 0)
4750 		return NULL;
4751 	return &obj->maps[idx];
4752 }
4753 
4754 struct bpf_map *
bpf_map__next(const struct bpf_map * prev,const struct bpf_object * obj)4755 bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
4756 {
4757 	if (prev == NULL)
4758 		return obj->maps;
4759 
4760 	return __bpf_map__iter(prev, obj, 1);
4761 }
4762 
4763 struct bpf_map *
bpf_map__prev(const struct bpf_map * next,const struct bpf_object * obj)4764 bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
4765 {
4766 	if (next == NULL) {
4767 		if (!obj->nr_maps)
4768 			return NULL;
4769 		return obj->maps + obj->nr_maps - 1;
4770 	}
4771 
4772 	return __bpf_map__iter(next, obj, -1);
4773 }
4774 
4775 struct bpf_map *
bpf_object__find_map_by_name(const struct bpf_object * obj,const char * name)4776 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
4777 {
4778 	struct bpf_map *pos;
4779 
4780 	bpf_object__for_each_map(pos, obj) {
4781 		if (pos->name && !strcmp(pos->name, name))
4782 			return pos;
4783 	}
4784 	return NULL;
4785 }
4786 
4787 int
bpf_object__find_map_fd_by_name(const struct bpf_object * obj,const char * name)4788 bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
4789 {
4790 	return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
4791 }
4792 
4793 struct bpf_map *
bpf_object__find_map_by_offset(struct bpf_object * obj,size_t offset)4794 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
4795 {
4796 	return ERR_PTR(-ENOTSUP);
4797 }
4798 
libbpf_get_error(const void * ptr)4799 long libbpf_get_error(const void *ptr)
4800 {
4801 	return PTR_ERR_OR_ZERO(ptr);
4802 }
4803 
bpf_prog_load(const char * file,enum bpf_prog_type type,struct bpf_object ** pobj,int * prog_fd)4804 int bpf_prog_load(const char *file, enum bpf_prog_type type,
4805 		  struct bpf_object **pobj, int *prog_fd)
4806 {
4807 	struct bpf_prog_load_attr attr;
4808 
4809 	memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
4810 	attr.file = file;
4811 	attr.prog_type = type;
4812 	attr.expected_attach_type = 0;
4813 
4814 	return bpf_prog_load_xattr(&attr, pobj, prog_fd);
4815 }
4816 
bpf_prog_load_xattr(const struct bpf_prog_load_attr * attr,struct bpf_object ** pobj,int * prog_fd)4817 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
4818 			struct bpf_object **pobj, int *prog_fd)
4819 {
4820 	struct bpf_object_open_attr open_attr = {};
4821 	struct bpf_program *prog, *first_prog = NULL;
4822 	enum bpf_attach_type expected_attach_type;
4823 	enum bpf_prog_type prog_type;
4824 	struct bpf_object *obj;
4825 	struct bpf_map *map;
4826 	int err;
4827 
4828 	if (!attr)
4829 		return -EINVAL;
4830 	if (!attr->file)
4831 		return -EINVAL;
4832 
4833 	open_attr.file = attr->file;
4834 	open_attr.prog_type = attr->prog_type;
4835 
4836 	obj = bpf_object__open_xattr(&open_attr);
4837 	if (IS_ERR_OR_NULL(obj))
4838 		return -ENOENT;
4839 
4840 	bpf_object__for_each_program(prog, obj) {
4841 		/*
4842 		 * If type is not specified, try to guess it based on
4843 		 * section name.
4844 		 */
4845 		prog_type = attr->prog_type;
4846 		prog->prog_ifindex = attr->ifindex;
4847 		expected_attach_type = attr->expected_attach_type;
4848 		if (prog_type == BPF_PROG_TYPE_UNSPEC) {
4849 			err = bpf_program__identify_section(prog, &prog_type,
4850 							    &expected_attach_type);
4851 			if (err < 0) {
4852 				bpf_object__close(obj);
4853 				return -EINVAL;
4854 			}
4855 		}
4856 
4857 		bpf_program__set_type(prog, prog_type);
4858 		bpf_program__set_expected_attach_type(prog,
4859 						      expected_attach_type);
4860 
4861 		prog->log_level = attr->log_level;
4862 		prog->prog_flags = attr->prog_flags;
4863 		if (!first_prog)
4864 			first_prog = prog;
4865 	}
4866 
4867 	bpf_object__for_each_map(map, obj) {
4868 		if (!bpf_map__is_offload_neutral(map))
4869 			map->map_ifindex = attr->ifindex;
4870 	}
4871 
4872 	if (!first_prog) {
4873 		pr_warning("object file doesn't contain bpf program\n");
4874 		bpf_object__close(obj);
4875 		return -ENOENT;
4876 	}
4877 
4878 	err = bpf_object__load(obj);
4879 	if (err) {
4880 		bpf_object__close(obj);
4881 		return -EINVAL;
4882 	}
4883 
4884 	*pobj = obj;
4885 	*prog_fd = bpf_program__fd(first_prog);
4886 	return 0;
4887 }
4888 
4889 struct bpf_link {
4890 	int (*destroy)(struct bpf_link *link);
4891 };
4892 
bpf_link__destroy(struct bpf_link * link)4893 int bpf_link__destroy(struct bpf_link *link)
4894 {
4895 	int err;
4896 
4897 	if (!link)
4898 		return 0;
4899 
4900 	err = link->destroy(link);
4901 	free(link);
4902 
4903 	return err;
4904 }
4905 
4906 struct bpf_link_fd {
4907 	struct bpf_link link; /* has to be at the top of struct */
4908 	int fd; /* hook FD */
4909 };
4910 
bpf_link__destroy_perf_event(struct bpf_link * link)4911 static int bpf_link__destroy_perf_event(struct bpf_link *link)
4912 {
4913 	struct bpf_link_fd *l = (void *)link;
4914 	int err;
4915 
4916 	err = ioctl(l->fd, PERF_EVENT_IOC_DISABLE, 0);
4917 	if (err)
4918 		err = -errno;
4919 
4920 	close(l->fd);
4921 	return err;
4922 }
4923 
bpf_program__attach_perf_event(struct bpf_program * prog,int pfd)4924 struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
4925 						int pfd)
4926 {
4927 	char errmsg[STRERR_BUFSIZE];
4928 	struct bpf_link_fd *link;
4929 	int prog_fd, err;
4930 
4931 	if (pfd < 0) {
4932 		pr_warning("program '%s': invalid perf event FD %d\n",
4933 			   bpf_program__title(prog, false), pfd);
4934 		return ERR_PTR(-EINVAL);
4935 	}
4936 	prog_fd = bpf_program__fd(prog);
4937 	if (prog_fd < 0) {
4938 		pr_warning("program '%s': can't attach BPF program w/o FD (did you load it?)\n",
4939 			   bpf_program__title(prog, false));
4940 		return ERR_PTR(-EINVAL);
4941 	}
4942 
4943 	link = malloc(sizeof(*link));
4944 	if (!link)
4945 		return ERR_PTR(-ENOMEM);
4946 	link->link.destroy = &bpf_link__destroy_perf_event;
4947 	link->fd = pfd;
4948 
4949 	if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
4950 		err = -errno;
4951 		free(link);
4952 		pr_warning("program '%s': failed to attach to pfd %d: %s\n",
4953 			   bpf_program__title(prog, false), pfd,
4954 			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
4955 		return ERR_PTR(err);
4956 	}
4957 	if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
4958 		err = -errno;
4959 		free(link);
4960 		pr_warning("program '%s': failed to enable pfd %d: %s\n",
4961 			   bpf_program__title(prog, false), pfd,
4962 			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
4963 		return ERR_PTR(err);
4964 	}
4965 	return (struct bpf_link *)link;
4966 }
4967 
4968 /*
4969  * this function is expected to parse integer in the range of [0, 2^31-1] from
4970  * given file using scanf format string fmt. If actual parsed value is
4971  * negative, the result might be indistinguishable from error
4972  */
parse_uint_from_file(const char * file,const char * fmt)4973 static int parse_uint_from_file(const char *file, const char *fmt)
4974 {
4975 	char buf[STRERR_BUFSIZE];
4976 	int err, ret;
4977 	FILE *f;
4978 
4979 	f = fopen(file, "r");
4980 	if (!f) {
4981 		err = -errno;
4982 		pr_debug("failed to open '%s': %s\n", file,
4983 			 libbpf_strerror_r(err, buf, sizeof(buf)));
4984 		return err;
4985 	}
4986 	err = fscanf(f, fmt, &ret);
4987 	if (err != 1) {
4988 		err = err == EOF ? -EIO : -errno;
4989 		pr_debug("failed to parse '%s': %s\n", file,
4990 			libbpf_strerror_r(err, buf, sizeof(buf)));
4991 		fclose(f);
4992 		return err;
4993 	}
4994 	fclose(f);
4995 	return ret;
4996 }
4997 
determine_kprobe_perf_type(void)4998 static int determine_kprobe_perf_type(void)
4999 {
5000 	const char *file = "/sys/bus/event_source/devices/kprobe/type";
5001 
5002 	return parse_uint_from_file(file, "%d\n");
5003 }
5004 
determine_uprobe_perf_type(void)5005 static int determine_uprobe_perf_type(void)
5006 {
5007 	const char *file = "/sys/bus/event_source/devices/uprobe/type";
5008 
5009 	return parse_uint_from_file(file, "%d\n");
5010 }
5011 
determine_kprobe_retprobe_bit(void)5012 static int determine_kprobe_retprobe_bit(void)
5013 {
5014 	const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
5015 
5016 	return parse_uint_from_file(file, "config:%d\n");
5017 }
5018 
determine_uprobe_retprobe_bit(void)5019 static int determine_uprobe_retprobe_bit(void)
5020 {
5021 	const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
5022 
5023 	return parse_uint_from_file(file, "config:%d\n");
5024 }
5025 
perf_event_open_probe(bool uprobe,bool retprobe,const char * name,uint64_t offset,int pid)5026 static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
5027 				 uint64_t offset, int pid)
5028 {
5029 	struct perf_event_attr attr = {};
5030 	char errmsg[STRERR_BUFSIZE];
5031 	int type, pfd, err;
5032 
5033 	type = uprobe ? determine_uprobe_perf_type()
5034 		      : determine_kprobe_perf_type();
5035 	if (type < 0) {
5036 		pr_warning("failed to determine %s perf type: %s\n",
5037 			   uprobe ? "uprobe" : "kprobe",
5038 			   libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
5039 		return type;
5040 	}
5041 	if (retprobe) {
5042 		int bit = uprobe ? determine_uprobe_retprobe_bit()
5043 				 : determine_kprobe_retprobe_bit();
5044 
5045 		if (bit < 0) {
5046 			pr_warning("failed to determine %s retprobe bit: %s\n",
5047 				   uprobe ? "uprobe" : "kprobe",
5048 				   libbpf_strerror_r(bit, errmsg,
5049 						     sizeof(errmsg)));
5050 			return bit;
5051 		}
5052 		attr.config |= 1 << bit;
5053 	}
5054 	attr.size = sizeof(attr);
5055 	attr.type = type;
5056 	attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
5057 	attr.config2 = offset;		 /* kprobe_addr or probe_offset */
5058 
5059 	/* pid filter is meaningful only for uprobes */
5060 	pfd = syscall(__NR_perf_event_open, &attr,
5061 		      pid < 0 ? -1 : pid /* pid */,
5062 		      pid == -1 ? 0 : -1 /* cpu */,
5063 		      -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
5064 	if (pfd < 0) {
5065 		err = -errno;
5066 		pr_warning("%s perf_event_open() failed: %s\n",
5067 			   uprobe ? "uprobe" : "kprobe",
5068 			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5069 		return err;
5070 	}
5071 	return pfd;
5072 }
5073 
bpf_program__attach_kprobe(struct bpf_program * prog,bool retprobe,const char * func_name)5074 struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
5075 					    bool retprobe,
5076 					    const char *func_name)
5077 {
5078 	char errmsg[STRERR_BUFSIZE];
5079 	struct bpf_link *link;
5080 	int pfd, err;
5081 
5082 	pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
5083 				    0 /* offset */, -1 /* pid */);
5084 	if (pfd < 0) {
5085 		pr_warning("program '%s': failed to create %s '%s' perf event: %s\n",
5086 			   bpf_program__title(prog, false),
5087 			   retprobe ? "kretprobe" : "kprobe", func_name,
5088 			   libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5089 		return ERR_PTR(pfd);
5090 	}
5091 	link = bpf_program__attach_perf_event(prog, pfd);
5092 	if (IS_ERR(link)) {
5093 		close(pfd);
5094 		err = PTR_ERR(link);
5095 		pr_warning("program '%s': failed to attach to %s '%s': %s\n",
5096 			   bpf_program__title(prog, false),
5097 			   retprobe ? "kretprobe" : "kprobe", func_name,
5098 			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5099 		return link;
5100 	}
5101 	return link;
5102 }
5103 
bpf_program__attach_uprobe(struct bpf_program * prog,bool retprobe,pid_t pid,const char * binary_path,size_t func_offset)5104 struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
5105 					    bool retprobe, pid_t pid,
5106 					    const char *binary_path,
5107 					    size_t func_offset)
5108 {
5109 	char errmsg[STRERR_BUFSIZE];
5110 	struct bpf_link *link;
5111 	int pfd, err;
5112 
5113 	pfd = perf_event_open_probe(true /* uprobe */, retprobe,
5114 				    binary_path, func_offset, pid);
5115 	if (pfd < 0) {
5116 		pr_warning("program '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
5117 			   bpf_program__title(prog, false),
5118 			   retprobe ? "uretprobe" : "uprobe",
5119 			   binary_path, func_offset,
5120 			   libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5121 		return ERR_PTR(pfd);
5122 	}
5123 	link = bpf_program__attach_perf_event(prog, pfd);
5124 	if (IS_ERR(link)) {
5125 		close(pfd);
5126 		err = PTR_ERR(link);
5127 		pr_warning("program '%s': failed to attach to %s '%s:0x%zx': %s\n",
5128 			   bpf_program__title(prog, false),
5129 			   retprobe ? "uretprobe" : "uprobe",
5130 			   binary_path, func_offset,
5131 			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5132 		return link;
5133 	}
5134 	return link;
5135 }
5136 
determine_tracepoint_id(const char * tp_category,const char * tp_name)5137 static int determine_tracepoint_id(const char *tp_category,
5138 				   const char *tp_name)
5139 {
5140 	char file[PATH_MAX];
5141 	int ret;
5142 
5143 	ret = snprintf(file, sizeof(file),
5144 		       "/sys/kernel/debug/tracing/events/%s/%s/id",
5145 		       tp_category, tp_name);
5146 	if (ret < 0)
5147 		return -errno;
5148 	if (ret >= sizeof(file)) {
5149 		pr_debug("tracepoint %s/%s path is too long\n",
5150 			 tp_category, tp_name);
5151 		return -E2BIG;
5152 	}
5153 	return parse_uint_from_file(file, "%d\n");
5154 }
5155 
perf_event_open_tracepoint(const char * tp_category,const char * tp_name)5156 static int perf_event_open_tracepoint(const char *tp_category,
5157 				      const char *tp_name)
5158 {
5159 	struct perf_event_attr attr = {};
5160 	char errmsg[STRERR_BUFSIZE];
5161 	int tp_id, pfd, err;
5162 
5163 	tp_id = determine_tracepoint_id(tp_category, tp_name);
5164 	if (tp_id < 0) {
5165 		pr_warning("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
5166 			   tp_category, tp_name,
5167 			   libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
5168 		return tp_id;
5169 	}
5170 
5171 	attr.type = PERF_TYPE_TRACEPOINT;
5172 	attr.size = sizeof(attr);
5173 	attr.config = tp_id;
5174 
5175 	pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
5176 		      -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
5177 	if (pfd < 0) {
5178 		err = -errno;
5179 		pr_warning("tracepoint '%s/%s' perf_event_open() failed: %s\n",
5180 			   tp_category, tp_name,
5181 			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5182 		return err;
5183 	}
5184 	return pfd;
5185 }
5186 
bpf_program__attach_tracepoint(struct bpf_program * prog,const char * tp_category,const char * tp_name)5187 struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
5188 						const char *tp_category,
5189 						const char *tp_name)
5190 {
5191 	char errmsg[STRERR_BUFSIZE];
5192 	struct bpf_link *link;
5193 	int pfd, err;
5194 
5195 	pfd = perf_event_open_tracepoint(tp_category, tp_name);
5196 	if (pfd < 0) {
5197 		pr_warning("program '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
5198 			   bpf_program__title(prog, false),
5199 			   tp_category, tp_name,
5200 			   libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5201 		return ERR_PTR(pfd);
5202 	}
5203 	link = bpf_program__attach_perf_event(prog, pfd);
5204 	if (IS_ERR(link)) {
5205 		close(pfd);
5206 		err = PTR_ERR(link);
5207 		pr_warning("program '%s': failed to attach to tracepoint '%s/%s': %s\n",
5208 			   bpf_program__title(prog, false),
5209 			   tp_category, tp_name,
5210 			   libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
5211 		return link;
5212 	}
5213 	return link;
5214 }
5215 
bpf_link__destroy_fd(struct bpf_link * link)5216 static int bpf_link__destroy_fd(struct bpf_link *link)
5217 {
5218 	struct bpf_link_fd *l = (void *)link;
5219 
5220 	return close(l->fd);
5221 }
5222 
bpf_program__attach_raw_tracepoint(struct bpf_program * prog,const char * tp_name)5223 struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
5224 						    const char *tp_name)
5225 {
5226 	char errmsg[STRERR_BUFSIZE];
5227 	struct bpf_link_fd *link;
5228 	int prog_fd, pfd;
5229 
5230 	prog_fd = bpf_program__fd(prog);
5231 	if (prog_fd < 0) {
5232 		pr_warning("program '%s': can't attach before loaded\n",
5233 			   bpf_program__title(prog, false));
5234 		return ERR_PTR(-EINVAL);
5235 	}
5236 
5237 	link = malloc(sizeof(*link));
5238 	if (!link)
5239 		return ERR_PTR(-ENOMEM);
5240 	link->link.destroy = &bpf_link__destroy_fd;
5241 
5242 	pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
5243 	if (pfd < 0) {
5244 		pfd = -errno;
5245 		free(link);
5246 		pr_warning("program '%s': failed to attach to raw tracepoint '%s': %s\n",
5247 			   bpf_program__title(prog, false), tp_name,
5248 			   libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
5249 		return ERR_PTR(pfd);
5250 	}
5251 	link->fd = pfd;
5252 	return (struct bpf_link *)link;
5253 }
5254 
5255 enum bpf_perf_event_ret
bpf_perf_event_read_simple(void * mmap_mem,size_t mmap_size,size_t page_size,void ** copy_mem,size_t * copy_size,bpf_perf_event_print_t fn,void * private_data)5256 bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
5257 			   void **copy_mem, size_t *copy_size,
5258 			   bpf_perf_event_print_t fn, void *private_data)
5259 {
5260 	struct perf_event_mmap_page *header = mmap_mem;
5261 	__u64 data_head = ring_buffer_read_head(header);
5262 	__u64 data_tail = header->data_tail;
5263 	void *base = ((__u8 *)header) + page_size;
5264 	int ret = LIBBPF_PERF_EVENT_CONT;
5265 	struct perf_event_header *ehdr;
5266 	size_t ehdr_size;
5267 
5268 	while (data_head != data_tail) {
5269 		ehdr = base + (data_tail & (mmap_size - 1));
5270 		ehdr_size = ehdr->size;
5271 
5272 		if (((void *)ehdr) + ehdr_size > base + mmap_size) {
5273 			void *copy_start = ehdr;
5274 			size_t len_first = base + mmap_size - copy_start;
5275 			size_t len_secnd = ehdr_size - len_first;
5276 
5277 			if (*copy_size < ehdr_size) {
5278 				free(*copy_mem);
5279 				*copy_mem = malloc(ehdr_size);
5280 				if (!*copy_mem) {
5281 					*copy_size = 0;
5282 					ret = LIBBPF_PERF_EVENT_ERROR;
5283 					break;
5284 				}
5285 				*copy_size = ehdr_size;
5286 			}
5287 
5288 			memcpy(*copy_mem, copy_start, len_first);
5289 			memcpy(*copy_mem + len_first, base, len_secnd);
5290 			ehdr = *copy_mem;
5291 		}
5292 
5293 		ret = fn(ehdr, private_data);
5294 		data_tail += ehdr_size;
5295 		if (ret != LIBBPF_PERF_EVENT_CONT)
5296 			break;
5297 	}
5298 
5299 	ring_buffer_write_tail(header, data_tail);
5300 	return ret;
5301 }
5302 
5303 struct perf_buffer;
5304 
5305 struct perf_buffer_params {
5306 	struct perf_event_attr *attr;
5307 	/* if event_cb is specified, it takes precendence */
5308 	perf_buffer_event_fn event_cb;
5309 	/* sample_cb and lost_cb are higher-level common-case callbacks */
5310 	perf_buffer_sample_fn sample_cb;
5311 	perf_buffer_lost_fn lost_cb;
5312 	void *ctx;
5313 	int cpu_cnt;
5314 	int *cpus;
5315 	int *map_keys;
5316 };
5317 
5318 struct perf_cpu_buf {
5319 	struct perf_buffer *pb;
5320 	void *base; /* mmap()'ed memory */
5321 	void *buf; /* for reconstructing segmented data */
5322 	size_t buf_size;
5323 	int fd;
5324 	int cpu;
5325 	int map_key;
5326 };
5327 
5328 struct perf_buffer {
5329 	perf_buffer_event_fn event_cb;
5330 	perf_buffer_sample_fn sample_cb;
5331 	perf_buffer_lost_fn lost_cb;
5332 	void *ctx; /* passed into callbacks */
5333 
5334 	size_t page_size;
5335 	size_t mmap_size;
5336 	struct perf_cpu_buf **cpu_bufs;
5337 	struct epoll_event *events;
5338 	int cpu_cnt;
5339 	int epoll_fd; /* perf event FD */
5340 	int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
5341 };
5342 
perf_buffer__free_cpu_buf(struct perf_buffer * pb,struct perf_cpu_buf * cpu_buf)5343 static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
5344 				      struct perf_cpu_buf *cpu_buf)
5345 {
5346 	if (!cpu_buf)
5347 		return;
5348 	if (cpu_buf->base &&
5349 	    munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
5350 		pr_warning("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
5351 	if (cpu_buf->fd >= 0) {
5352 		ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
5353 		close(cpu_buf->fd);
5354 	}
5355 	free(cpu_buf->buf);
5356 	free(cpu_buf);
5357 }
5358 
perf_buffer__free(struct perf_buffer * pb)5359 void perf_buffer__free(struct perf_buffer *pb)
5360 {
5361 	int i;
5362 
5363 	if (!pb)
5364 		return;
5365 	if (pb->cpu_bufs) {
5366 		for (i = 0; i < pb->cpu_cnt; i++) {
5367 			struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
5368 
5369 			if (!cpu_buf)
5370 				continue;
5371 
5372 			bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
5373 			perf_buffer__free_cpu_buf(pb, cpu_buf);
5374 		}
5375 		free(pb->cpu_bufs);
5376 	}
5377 	if (pb->epoll_fd >= 0)
5378 		close(pb->epoll_fd);
5379 	free(pb->events);
5380 	free(pb);
5381 }
5382 
5383 static struct perf_cpu_buf *
perf_buffer__open_cpu_buf(struct perf_buffer * pb,struct perf_event_attr * attr,int cpu,int map_key)5384 perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
5385 			  int cpu, int map_key)
5386 {
5387 	struct perf_cpu_buf *cpu_buf;
5388 	char msg[STRERR_BUFSIZE];
5389 	int err;
5390 
5391 	cpu_buf = calloc(1, sizeof(*cpu_buf));
5392 	if (!cpu_buf)
5393 		return ERR_PTR(-ENOMEM);
5394 
5395 	cpu_buf->pb = pb;
5396 	cpu_buf->cpu = cpu;
5397 	cpu_buf->map_key = map_key;
5398 
5399 	cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
5400 			      -1, PERF_FLAG_FD_CLOEXEC);
5401 	if (cpu_buf->fd < 0) {
5402 		err = -errno;
5403 		pr_warning("failed to open perf buffer event on cpu #%d: %s\n",
5404 			   cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
5405 		goto error;
5406 	}
5407 
5408 	cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
5409 			     PROT_READ | PROT_WRITE, MAP_SHARED,
5410 			     cpu_buf->fd, 0);
5411 	if (cpu_buf->base == MAP_FAILED) {
5412 		cpu_buf->base = NULL;
5413 		err = -errno;
5414 		pr_warning("failed to mmap perf buffer on cpu #%d: %s\n",
5415 			   cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
5416 		goto error;
5417 	}
5418 
5419 	if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
5420 		err = -errno;
5421 		pr_warning("failed to enable perf buffer event on cpu #%d: %s\n",
5422 			   cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
5423 		goto error;
5424 	}
5425 
5426 	return cpu_buf;
5427 
5428 error:
5429 	perf_buffer__free_cpu_buf(pb, cpu_buf);
5430 	return (struct perf_cpu_buf *)ERR_PTR(err);
5431 }
5432 
5433 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
5434 					      struct perf_buffer_params *p);
5435 
perf_buffer__new(int map_fd,size_t page_cnt,const struct perf_buffer_opts * opts)5436 struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
5437 				     const struct perf_buffer_opts *opts)
5438 {
5439 	struct perf_buffer_params p = {};
5440 	struct perf_event_attr attr = { 0, };
5441 
5442 	attr.config = PERF_COUNT_SW_BPF_OUTPUT,
5443 	attr.type = PERF_TYPE_SOFTWARE;
5444 	attr.sample_type = PERF_SAMPLE_RAW;
5445 	attr.sample_period = 1;
5446 	attr.wakeup_events = 1;
5447 
5448 	p.attr = &attr;
5449 	p.sample_cb = opts ? opts->sample_cb : NULL;
5450 	p.lost_cb = opts ? opts->lost_cb : NULL;
5451 	p.ctx = opts ? opts->ctx : NULL;
5452 
5453 	return __perf_buffer__new(map_fd, page_cnt, &p);
5454 }
5455 
5456 struct perf_buffer *
perf_buffer__new_raw(int map_fd,size_t page_cnt,const struct perf_buffer_raw_opts * opts)5457 perf_buffer__new_raw(int map_fd, size_t page_cnt,
5458 		     const struct perf_buffer_raw_opts *opts)
5459 {
5460 	struct perf_buffer_params p = {};
5461 
5462 	p.attr = opts->attr;
5463 	p.event_cb = opts->event_cb;
5464 	p.ctx = opts->ctx;
5465 	p.cpu_cnt = opts->cpu_cnt;
5466 	p.cpus = opts->cpus;
5467 	p.map_keys = opts->map_keys;
5468 
5469 	return __perf_buffer__new(map_fd, page_cnt, &p);
5470 }
5471 
__perf_buffer__new(int map_fd,size_t page_cnt,struct perf_buffer_params * p)5472 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
5473 					      struct perf_buffer_params *p)
5474 {
5475 	struct bpf_map_info map = {};
5476 	char msg[STRERR_BUFSIZE];
5477 	struct perf_buffer *pb;
5478 	__u32 map_info_len;
5479 	int err, i;
5480 
5481 	if (page_cnt & (page_cnt - 1)) {
5482 		pr_warning("page count should be power of two, but is %zu\n",
5483 			   page_cnt);
5484 		return ERR_PTR(-EINVAL);
5485 	}
5486 
5487 	map_info_len = sizeof(map);
5488 	err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
5489 	if (err) {
5490 		err = -errno;
5491 		pr_warning("failed to get map info for map FD %d: %s\n",
5492 			   map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
5493 		return ERR_PTR(err);
5494 	}
5495 
5496 	if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
5497 		pr_warning("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
5498 			   map.name);
5499 		return ERR_PTR(-EINVAL);
5500 	}
5501 
5502 	pb = calloc(1, sizeof(*pb));
5503 	if (!pb)
5504 		return ERR_PTR(-ENOMEM);
5505 
5506 	pb->event_cb = p->event_cb;
5507 	pb->sample_cb = p->sample_cb;
5508 	pb->lost_cb = p->lost_cb;
5509 	pb->ctx = p->ctx;
5510 
5511 	pb->page_size = getpagesize();
5512 	pb->mmap_size = pb->page_size * page_cnt;
5513 	pb->map_fd = map_fd;
5514 
5515 	pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
5516 	if (pb->epoll_fd < 0) {
5517 		err = -errno;
5518 		pr_warning("failed to create epoll instance: %s\n",
5519 			   libbpf_strerror_r(err, msg, sizeof(msg)));
5520 		goto error;
5521 	}
5522 
5523 	if (p->cpu_cnt > 0) {
5524 		pb->cpu_cnt = p->cpu_cnt;
5525 	} else {
5526 		pb->cpu_cnt = libbpf_num_possible_cpus();
5527 		if (pb->cpu_cnt < 0) {
5528 			err = pb->cpu_cnt;
5529 			goto error;
5530 		}
5531 		if (map.max_entries < pb->cpu_cnt)
5532 			pb->cpu_cnt = map.max_entries;
5533 	}
5534 
5535 	pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
5536 	if (!pb->events) {
5537 		err = -ENOMEM;
5538 		pr_warning("failed to allocate events: out of memory\n");
5539 		goto error;
5540 	}
5541 	pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
5542 	if (!pb->cpu_bufs) {
5543 		err = -ENOMEM;
5544 		pr_warning("failed to allocate buffers: out of memory\n");
5545 		goto error;
5546 	}
5547 
5548 	for (i = 0; i < pb->cpu_cnt; i++) {
5549 		struct perf_cpu_buf *cpu_buf;
5550 		int cpu, map_key;
5551 
5552 		cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
5553 		map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
5554 
5555 		cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
5556 		if (IS_ERR(cpu_buf)) {
5557 			err = PTR_ERR(cpu_buf);
5558 			goto error;
5559 		}
5560 
5561 		pb->cpu_bufs[i] = cpu_buf;
5562 
5563 		err = bpf_map_update_elem(pb->map_fd, &map_key,
5564 					  &cpu_buf->fd, 0);
5565 		if (err) {
5566 			err = -errno;
5567 			pr_warning("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
5568 				   cpu, map_key, cpu_buf->fd,
5569 				   libbpf_strerror_r(err, msg, sizeof(msg)));
5570 			goto error;
5571 		}
5572 
5573 		pb->events[i].events = EPOLLIN;
5574 		pb->events[i].data.ptr = cpu_buf;
5575 		if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
5576 			      &pb->events[i]) < 0) {
5577 			err = -errno;
5578 			pr_warning("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
5579 				   cpu, cpu_buf->fd,
5580 				   libbpf_strerror_r(err, msg, sizeof(msg)));
5581 			goto error;
5582 		}
5583 	}
5584 
5585 	return pb;
5586 
5587 error:
5588 	if (pb)
5589 		perf_buffer__free(pb);
5590 	return ERR_PTR(err);
5591 }
5592 
5593 struct perf_sample_raw {
5594 	struct perf_event_header header;
5595 	uint32_t size;
5596 	char data[0];
5597 };
5598 
5599 struct perf_sample_lost {
5600 	struct perf_event_header header;
5601 	uint64_t id;
5602 	uint64_t lost;
5603 	uint64_t sample_id;
5604 };
5605 
5606 static enum bpf_perf_event_ret
perf_buffer__process_record(struct perf_event_header * e,void * ctx)5607 perf_buffer__process_record(struct perf_event_header *e, void *ctx)
5608 {
5609 	struct perf_cpu_buf *cpu_buf = ctx;
5610 	struct perf_buffer *pb = cpu_buf->pb;
5611 	void *data = e;
5612 
5613 	/* user wants full control over parsing perf event */
5614 	if (pb->event_cb)
5615 		return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
5616 
5617 	switch (e->type) {
5618 	case PERF_RECORD_SAMPLE: {
5619 		struct perf_sample_raw *s = data;
5620 
5621 		if (pb->sample_cb)
5622 			pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
5623 		break;
5624 	}
5625 	case PERF_RECORD_LOST: {
5626 		struct perf_sample_lost *s = data;
5627 
5628 		if (pb->lost_cb)
5629 			pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
5630 		break;
5631 	}
5632 	default:
5633 		pr_warning("unknown perf sample type %d\n", e->type);
5634 		return LIBBPF_PERF_EVENT_ERROR;
5635 	}
5636 	return LIBBPF_PERF_EVENT_CONT;
5637 }
5638 
perf_buffer__process_records(struct perf_buffer * pb,struct perf_cpu_buf * cpu_buf)5639 static int perf_buffer__process_records(struct perf_buffer *pb,
5640 					struct perf_cpu_buf *cpu_buf)
5641 {
5642 	enum bpf_perf_event_ret ret;
5643 
5644 	ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
5645 					 pb->page_size, &cpu_buf->buf,
5646 					 &cpu_buf->buf_size,
5647 					 perf_buffer__process_record, cpu_buf);
5648 	if (ret != LIBBPF_PERF_EVENT_CONT)
5649 		return ret;
5650 	return 0;
5651 }
5652 
perf_buffer__poll(struct perf_buffer * pb,int timeout_ms)5653 int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
5654 {
5655 	int i, cnt, err;
5656 
5657 	cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
5658 	for (i = 0; i < cnt; i++) {
5659 		struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
5660 
5661 		err = perf_buffer__process_records(pb, cpu_buf);
5662 		if (err) {
5663 			pr_warning("error while processing records: %d\n", err);
5664 			return err;
5665 		}
5666 	}
5667 	return cnt < 0 ? -errno : cnt;
5668 }
5669 
5670 struct bpf_prog_info_array_desc {
5671 	int	array_offset;	/* e.g. offset of jited_prog_insns */
5672 	int	count_offset;	/* e.g. offset of jited_prog_len */
5673 	int	size_offset;	/* > 0: offset of rec size,
5674 				 * < 0: fix size of -size_offset
5675 				 */
5676 };
5677 
5678 static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
5679 	[BPF_PROG_INFO_JITED_INSNS] = {
5680 		offsetof(struct bpf_prog_info, jited_prog_insns),
5681 		offsetof(struct bpf_prog_info, jited_prog_len),
5682 		-1,
5683 	},
5684 	[BPF_PROG_INFO_XLATED_INSNS] = {
5685 		offsetof(struct bpf_prog_info, xlated_prog_insns),
5686 		offsetof(struct bpf_prog_info, xlated_prog_len),
5687 		-1,
5688 	},
5689 	[BPF_PROG_INFO_MAP_IDS] = {
5690 		offsetof(struct bpf_prog_info, map_ids),
5691 		offsetof(struct bpf_prog_info, nr_map_ids),
5692 		-(int)sizeof(__u32),
5693 	},
5694 	[BPF_PROG_INFO_JITED_KSYMS] = {
5695 		offsetof(struct bpf_prog_info, jited_ksyms),
5696 		offsetof(struct bpf_prog_info, nr_jited_ksyms),
5697 		-(int)sizeof(__u64),
5698 	},
5699 	[BPF_PROG_INFO_JITED_FUNC_LENS] = {
5700 		offsetof(struct bpf_prog_info, jited_func_lens),
5701 		offsetof(struct bpf_prog_info, nr_jited_func_lens),
5702 		-(int)sizeof(__u32),
5703 	},
5704 	[BPF_PROG_INFO_FUNC_INFO] = {
5705 		offsetof(struct bpf_prog_info, func_info),
5706 		offsetof(struct bpf_prog_info, nr_func_info),
5707 		offsetof(struct bpf_prog_info, func_info_rec_size),
5708 	},
5709 	[BPF_PROG_INFO_LINE_INFO] = {
5710 		offsetof(struct bpf_prog_info, line_info),
5711 		offsetof(struct bpf_prog_info, nr_line_info),
5712 		offsetof(struct bpf_prog_info, line_info_rec_size),
5713 	},
5714 	[BPF_PROG_INFO_JITED_LINE_INFO] = {
5715 		offsetof(struct bpf_prog_info, jited_line_info),
5716 		offsetof(struct bpf_prog_info, nr_jited_line_info),
5717 		offsetof(struct bpf_prog_info, jited_line_info_rec_size),
5718 	},
5719 	[BPF_PROG_INFO_PROG_TAGS] = {
5720 		offsetof(struct bpf_prog_info, prog_tags),
5721 		offsetof(struct bpf_prog_info, nr_prog_tags),
5722 		-(int)sizeof(__u8) * BPF_TAG_SIZE,
5723 	},
5724 
5725 };
5726 
bpf_prog_info_read_offset_u32(struct bpf_prog_info * info,int offset)5727 static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset)
5728 {
5729 	__u32 *array = (__u32 *)info;
5730 
5731 	if (offset >= 0)
5732 		return array[offset / sizeof(__u32)];
5733 	return -(int)offset;
5734 }
5735 
bpf_prog_info_read_offset_u64(struct bpf_prog_info * info,int offset)5736 static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset)
5737 {
5738 	__u64 *array = (__u64 *)info;
5739 
5740 	if (offset >= 0)
5741 		return array[offset / sizeof(__u64)];
5742 	return -(int)offset;
5743 }
5744 
bpf_prog_info_set_offset_u32(struct bpf_prog_info * info,int offset,__u32 val)5745 static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
5746 					 __u32 val)
5747 {
5748 	__u32 *array = (__u32 *)info;
5749 
5750 	if (offset >= 0)
5751 		array[offset / sizeof(__u32)] = val;
5752 }
5753 
bpf_prog_info_set_offset_u64(struct bpf_prog_info * info,int offset,__u64 val)5754 static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
5755 					 __u64 val)
5756 {
5757 	__u64 *array = (__u64 *)info;
5758 
5759 	if (offset >= 0)
5760 		array[offset / sizeof(__u64)] = val;
5761 }
5762 
5763 struct bpf_prog_info_linear *
bpf_program__get_prog_info_linear(int fd,__u64 arrays)5764 bpf_program__get_prog_info_linear(int fd, __u64 arrays)
5765 {
5766 	struct bpf_prog_info_linear *info_linear;
5767 	struct bpf_prog_info info = {};
5768 	__u32 info_len = sizeof(info);
5769 	__u32 data_len = 0;
5770 	int i, err;
5771 	void *ptr;
5772 
5773 	if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
5774 		return ERR_PTR(-EINVAL);
5775 
5776 	/* step 1: get array dimensions */
5777 	err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
5778 	if (err) {
5779 		pr_debug("can't get prog info: %s", strerror(errno));
5780 		return ERR_PTR(-EFAULT);
5781 	}
5782 
5783 	/* step 2: calculate total size of all arrays */
5784 	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
5785 		bool include_array = (arrays & (1UL << i)) > 0;
5786 		struct bpf_prog_info_array_desc *desc;
5787 		__u32 count, size;
5788 
5789 		desc = bpf_prog_info_array_desc + i;
5790 
5791 		/* kernel is too old to support this field */
5792 		if (info_len < desc->array_offset + sizeof(__u32) ||
5793 		    info_len < desc->count_offset + sizeof(__u32) ||
5794 		    (desc->size_offset > 0 && info_len < desc->size_offset))
5795 			include_array = false;
5796 
5797 		if (!include_array) {
5798 			arrays &= ~(1UL << i);	/* clear the bit */
5799 			continue;
5800 		}
5801 
5802 		count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
5803 		size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
5804 
5805 		data_len += count * size;
5806 	}
5807 
5808 	/* step 3: allocate continuous memory */
5809 	data_len = roundup(data_len, sizeof(__u64));
5810 	info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
5811 	if (!info_linear)
5812 		return ERR_PTR(-ENOMEM);
5813 
5814 	/* step 4: fill data to info_linear->info */
5815 	info_linear->arrays = arrays;
5816 	memset(&info_linear->info, 0, sizeof(info));
5817 	ptr = info_linear->data;
5818 
5819 	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
5820 		struct bpf_prog_info_array_desc *desc;
5821 		__u32 count, size;
5822 
5823 		if ((arrays & (1UL << i)) == 0)
5824 			continue;
5825 
5826 		desc  = bpf_prog_info_array_desc + i;
5827 		count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
5828 		size  = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
5829 		bpf_prog_info_set_offset_u32(&info_linear->info,
5830 					     desc->count_offset, count);
5831 		bpf_prog_info_set_offset_u32(&info_linear->info,
5832 					     desc->size_offset, size);
5833 		bpf_prog_info_set_offset_u64(&info_linear->info,
5834 					     desc->array_offset,
5835 					     ptr_to_u64(ptr));
5836 		ptr += count * size;
5837 	}
5838 
5839 	/* step 5: call syscall again to get required arrays */
5840 	err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
5841 	if (err) {
5842 		pr_debug("can't get prog info: %s", strerror(errno));
5843 		free(info_linear);
5844 		return ERR_PTR(-EFAULT);
5845 	}
5846 
5847 	/* step 6: verify the data */
5848 	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
5849 		struct bpf_prog_info_array_desc *desc;
5850 		__u32 v1, v2;
5851 
5852 		if ((arrays & (1UL << i)) == 0)
5853 			continue;
5854 
5855 		desc = bpf_prog_info_array_desc + i;
5856 		v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
5857 		v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
5858 						   desc->count_offset);
5859 		if (v1 != v2)
5860 			pr_warning("%s: mismatch in element count\n", __func__);
5861 
5862 		v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
5863 		v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
5864 						   desc->size_offset);
5865 		if (v1 != v2)
5866 			pr_warning("%s: mismatch in rec size\n", __func__);
5867 	}
5868 
5869 	/* step 7: update info_len and data_len */
5870 	info_linear->info_len = sizeof(struct bpf_prog_info);
5871 	info_linear->data_len = data_len;
5872 
5873 	return info_linear;
5874 }
5875 
bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear * info_linear)5876 void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
5877 {
5878 	int i;
5879 
5880 	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
5881 		struct bpf_prog_info_array_desc *desc;
5882 		__u64 addr, offs;
5883 
5884 		if ((info_linear->arrays & (1UL << i)) == 0)
5885 			continue;
5886 
5887 		desc = bpf_prog_info_array_desc + i;
5888 		addr = bpf_prog_info_read_offset_u64(&info_linear->info,
5889 						     desc->array_offset);
5890 		offs = addr - ptr_to_u64(info_linear->data);
5891 		bpf_prog_info_set_offset_u64(&info_linear->info,
5892 					     desc->array_offset, offs);
5893 	}
5894 }
5895 
bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear * info_linear)5896 void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
5897 {
5898 	int i;
5899 
5900 	for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
5901 		struct bpf_prog_info_array_desc *desc;
5902 		__u64 addr, offs;
5903 
5904 		if ((info_linear->arrays & (1UL << i)) == 0)
5905 			continue;
5906 
5907 		desc = bpf_prog_info_array_desc + i;
5908 		offs = bpf_prog_info_read_offset_u64(&info_linear->info,
5909 						     desc->array_offset);
5910 		addr = offs + ptr_to_u64(info_linear->data);
5911 		bpf_prog_info_set_offset_u64(&info_linear->info,
5912 					     desc->array_offset, addr);
5913 	}
5914 }
5915 
parse_cpu_mask_str(const char * s,bool ** mask,int * mask_sz)5916 int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
5917 {
5918 	int err = 0, n, len, start, end = -1;
5919 	bool *tmp;
5920 
5921 	*mask = NULL;
5922 	*mask_sz = 0;
5923 
5924 	/* Each sub string separated by ',' has format \d+-\d+ or \d+ */
5925 	while (*s) {
5926 		if (*s == ',' || *s == '\n') {
5927 			s++;
5928 			continue;
5929 		}
5930 		n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
5931 		if (n <= 0 || n > 2) {
5932 			pr_warning("Failed to get CPU range %s: %d\n", s, n);
5933 			err = -EINVAL;
5934 			goto cleanup;
5935 		} else if (n == 1) {
5936 			end = start;
5937 		}
5938 		if (start < 0 || start > end) {
5939 			pr_warning("Invalid CPU range [%d,%d] in %s\n",
5940 				   start, end, s);
5941 			err = -EINVAL;
5942 			goto cleanup;
5943 		}
5944 		tmp = realloc(*mask, end + 1);
5945 		if (!tmp) {
5946 			err = -ENOMEM;
5947 			goto cleanup;
5948 		}
5949 		*mask = tmp;
5950 		memset(tmp + *mask_sz, 0, start - *mask_sz);
5951 		memset(tmp + start, 1, end - start + 1);
5952 		*mask_sz = end + 1;
5953 		s += len;
5954 	}
5955 	if (!*mask_sz) {
5956 		pr_warning("Empty CPU range\n");
5957 		return -EINVAL;
5958 	}
5959 	return 0;
5960 cleanup:
5961 	free(*mask);
5962 	*mask = NULL;
5963 	return err;
5964 }
5965 
parse_cpu_mask_file(const char * fcpu,bool ** mask,int * mask_sz)5966 int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
5967 {
5968 	int fd, err = 0, len;
5969 	char buf[128];
5970 
5971 	fd = open(fcpu, O_RDONLY);
5972 	if (fd < 0) {
5973 		err = -errno;
5974 		pr_warning("Failed to open cpu mask file %s: %d\n", fcpu, err);
5975 		return err;
5976 	}
5977 	len = read(fd, buf, sizeof(buf));
5978 	close(fd);
5979 	if (len <= 0) {
5980 		err = len ? -errno : -EINVAL;
5981 		pr_warning("Failed to read cpu mask from %s: %d\n", fcpu, err);
5982 		return err;
5983 	}
5984 	if (len >= sizeof(buf)) {
5985 		pr_warning("CPU mask is too big in file %s\n", fcpu);
5986 		return -E2BIG;
5987 	}
5988 	buf[len] = '\0';
5989 
5990 	return parse_cpu_mask_str(buf, mask, mask_sz);
5991 }
5992 
libbpf_num_possible_cpus(void)5993 int libbpf_num_possible_cpus(void)
5994 {
5995 	static const char *fcpu = "/sys/devices/system/cpu/possible";
5996 	static int cpus;
5997 	int err, n, i, tmp_cpus;
5998 	bool *mask;
5999 
6000 	tmp_cpus = READ_ONCE(cpus);
6001 	if (tmp_cpus > 0)
6002 		return tmp_cpus;
6003 
6004 	err = parse_cpu_mask_file(fcpu, &mask, &n);
6005 	if (err)
6006 		return err;
6007 
6008 	tmp_cpus = 0;
6009 	for (i = 0; i < n; i++) {
6010 		if (mask[i])
6011 			tmp_cpus++;
6012 	}
6013 	free(mask);
6014 
6015 	WRITE_ONCE(cpus, tmp_cpus);
6016 	return tmp_cpus;
6017 }
6018