1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2
3 /*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
9 * Copyright (C) 2017 Nicira, Inc.
10 * Copyright (C) 2019 Isovalent, Inc.
11 */
12
13 #ifndef _GNU_SOURCE
14 #define _GNU_SOURCE
15 #endif
16 #include <stdlib.h>
17 #include <stdio.h>
18 #include <stdarg.h>
19 #include <libgen.h>
20 #include <inttypes.h>
21 #include <limits.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <endian.h>
25 #include <fcntl.h>
26 #include <errno.h>
27 #include <ctype.h>
28 #include <asm/unistd.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/bpf.h>
32 #include <linux/btf.h>
33 #include <linux/filter.h>
34 #include <linux/limits.h>
35 #include <linux/perf_event.h>
36 #include <linux/ring_buffer.h>
37 #include <linux/version.h>
38 #include <sys/epoll.h>
39 #include <sys/ioctl.h>
40 #include <sys/mman.h>
41 #include <sys/stat.h>
42 #include <sys/types.h>
43 #include <sys/vfs.h>
44 #include <sys/utsname.h>
45 #include <sys/resource.h>
46 #ifdef HAVE_LIBELF
47 #include <libelf.h>
48 #include <gelf.h>
49 #endif
50
51 #include <zlib.h>
52
53 #include "libbpf.h"
54 #include "bpf.h"
55 #include "btf.h"
56 #include "str_error.h"
57 #include "libbpf_internal.h"
58 #include "hashmap.h"
59 #include "bpf_gen_internal.h"
60 #include "zip.h"
61
62 #ifndef BPF_FS_MAGIC
63 #define BPF_FS_MAGIC 0xcafe4a11
64 #endif
65
66 #define BPF_INSN_SZ (sizeof(struct bpf_insn))
67
68 /* vsprintf() in __base_pr() uses nonliteral format string. It may break
69 * compilation if user enables corresponding warning. Disable it explicitly.
70 */
71 #pragma GCC diagnostic ignored "-Wformat-nonliteral"
72
73 #define __printf(a, b) __attribute__((format(printf, a, b)))
74
75 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
76 static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
77
78 static const char * const attach_type_name[] = {
79 [BPF_CGROUP_INET_INGRESS] = "cgroup_inet_ingress",
80 [BPF_CGROUP_INET_EGRESS] = "cgroup_inet_egress",
81 [BPF_CGROUP_INET_SOCK_CREATE] = "cgroup_inet_sock_create",
82 [BPF_CGROUP_INET_SOCK_RELEASE] = "cgroup_inet_sock_release",
83 [BPF_CGROUP_SOCK_OPS] = "cgroup_sock_ops",
84 [BPF_CGROUP_DEVICE] = "cgroup_device",
85 [BPF_CGROUP_INET4_BIND] = "cgroup_inet4_bind",
86 [BPF_CGROUP_INET6_BIND] = "cgroup_inet6_bind",
87 [BPF_CGROUP_INET4_CONNECT] = "cgroup_inet4_connect",
88 [BPF_CGROUP_INET6_CONNECT] = "cgroup_inet6_connect",
89 [BPF_CGROUP_UNIX_CONNECT] = "cgroup_unix_connect",
90 [BPF_CGROUP_INET4_POST_BIND] = "cgroup_inet4_post_bind",
91 [BPF_CGROUP_INET6_POST_BIND] = "cgroup_inet6_post_bind",
92 [BPF_CGROUP_INET4_GETPEERNAME] = "cgroup_inet4_getpeername",
93 [BPF_CGROUP_INET6_GETPEERNAME] = "cgroup_inet6_getpeername",
94 [BPF_CGROUP_UNIX_GETPEERNAME] = "cgroup_unix_getpeername",
95 [BPF_CGROUP_INET4_GETSOCKNAME] = "cgroup_inet4_getsockname",
96 [BPF_CGROUP_INET6_GETSOCKNAME] = "cgroup_inet6_getsockname",
97 [BPF_CGROUP_UNIX_GETSOCKNAME] = "cgroup_unix_getsockname",
98 [BPF_CGROUP_UDP4_SENDMSG] = "cgroup_udp4_sendmsg",
99 [BPF_CGROUP_UDP6_SENDMSG] = "cgroup_udp6_sendmsg",
100 [BPF_CGROUP_UNIX_SENDMSG] = "cgroup_unix_sendmsg",
101 [BPF_CGROUP_SYSCTL] = "cgroup_sysctl",
102 [BPF_CGROUP_UDP4_RECVMSG] = "cgroup_udp4_recvmsg",
103 [BPF_CGROUP_UDP6_RECVMSG] = "cgroup_udp6_recvmsg",
104 [BPF_CGROUP_UNIX_RECVMSG] = "cgroup_unix_recvmsg",
105 [BPF_CGROUP_GETSOCKOPT] = "cgroup_getsockopt",
106 [BPF_CGROUP_SETSOCKOPT] = "cgroup_setsockopt",
107 [BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser",
108 [BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict",
109 [BPF_SK_SKB_VERDICT] = "sk_skb_verdict",
110 [BPF_SK_MSG_VERDICT] = "sk_msg_verdict",
111 [BPF_LIRC_MODE2] = "lirc_mode2",
112 [BPF_FLOW_DISSECTOR] = "flow_dissector",
113 [BPF_TRACE_RAW_TP] = "trace_raw_tp",
114 [BPF_TRACE_FENTRY] = "trace_fentry",
115 [BPF_TRACE_FEXIT] = "trace_fexit",
116 [BPF_MODIFY_RETURN] = "modify_return",
117 [BPF_LSM_MAC] = "lsm_mac",
118 [BPF_LSM_CGROUP] = "lsm_cgroup",
119 [BPF_SK_LOOKUP] = "sk_lookup",
120 [BPF_TRACE_ITER] = "trace_iter",
121 [BPF_XDP_DEVMAP] = "xdp_devmap",
122 [BPF_XDP_CPUMAP] = "xdp_cpumap",
123 [BPF_XDP] = "xdp",
124 [BPF_SK_REUSEPORT_SELECT] = "sk_reuseport_select",
125 [BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_reuseport_select_or_migrate",
126 [BPF_PERF_EVENT] = "perf_event",
127 [BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi",
128 [BPF_STRUCT_OPS] = "struct_ops",
129 [BPF_NETFILTER] = "netfilter",
130 [BPF_TCX_INGRESS] = "tcx_ingress",
131 [BPF_TCX_EGRESS] = "tcx_egress",
132 [BPF_TRACE_UPROBE_MULTI] = "trace_uprobe_multi",
133 [BPF_NETKIT_PRIMARY] = "netkit_primary",
134 [BPF_NETKIT_PEER] = "netkit_peer",
135 };
136
137 static const char * const link_type_name[] = {
138 [BPF_LINK_TYPE_UNSPEC] = "unspec",
139 [BPF_LINK_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
140 [BPF_LINK_TYPE_TRACING] = "tracing",
141 [BPF_LINK_TYPE_CGROUP] = "cgroup",
142 [BPF_LINK_TYPE_ITER] = "iter",
143 [BPF_LINK_TYPE_NETNS] = "netns",
144 [BPF_LINK_TYPE_XDP] = "xdp",
145 [BPF_LINK_TYPE_PERF_EVENT] = "perf_event",
146 [BPF_LINK_TYPE_KPROBE_MULTI] = "kprobe_multi",
147 [BPF_LINK_TYPE_STRUCT_OPS] = "struct_ops",
148 [BPF_LINK_TYPE_NETFILTER] = "netfilter",
149 [BPF_LINK_TYPE_TCX] = "tcx",
150 [BPF_LINK_TYPE_UPROBE_MULTI] = "uprobe_multi",
151 [BPF_LINK_TYPE_NETKIT] = "netkit",
152 };
153
154 static const char * const map_type_name[] = {
155 [BPF_MAP_TYPE_UNSPEC] = "unspec",
156 [BPF_MAP_TYPE_HASH] = "hash",
157 [BPF_MAP_TYPE_ARRAY] = "array",
158 [BPF_MAP_TYPE_PROG_ARRAY] = "prog_array",
159 [BPF_MAP_TYPE_PERF_EVENT_ARRAY] = "perf_event_array",
160 [BPF_MAP_TYPE_PERCPU_HASH] = "percpu_hash",
161 [BPF_MAP_TYPE_PERCPU_ARRAY] = "percpu_array",
162 [BPF_MAP_TYPE_STACK_TRACE] = "stack_trace",
163 [BPF_MAP_TYPE_CGROUP_ARRAY] = "cgroup_array",
164 [BPF_MAP_TYPE_LRU_HASH] = "lru_hash",
165 [BPF_MAP_TYPE_LRU_PERCPU_HASH] = "lru_percpu_hash",
166 [BPF_MAP_TYPE_LPM_TRIE] = "lpm_trie",
167 [BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps",
168 [BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps",
169 [BPF_MAP_TYPE_DEVMAP] = "devmap",
170 [BPF_MAP_TYPE_DEVMAP_HASH] = "devmap_hash",
171 [BPF_MAP_TYPE_SOCKMAP] = "sockmap",
172 [BPF_MAP_TYPE_CPUMAP] = "cpumap",
173 [BPF_MAP_TYPE_XSKMAP] = "xskmap",
174 [BPF_MAP_TYPE_SOCKHASH] = "sockhash",
175 [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage",
176 [BPF_MAP_TYPE_REUSEPORT_SOCKARRAY] = "reuseport_sockarray",
177 [BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage",
178 [BPF_MAP_TYPE_QUEUE] = "queue",
179 [BPF_MAP_TYPE_STACK] = "stack",
180 [BPF_MAP_TYPE_SK_STORAGE] = "sk_storage",
181 [BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops",
182 [BPF_MAP_TYPE_RINGBUF] = "ringbuf",
183 [BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage",
184 [BPF_MAP_TYPE_TASK_STORAGE] = "task_storage",
185 [BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter",
186 [BPF_MAP_TYPE_USER_RINGBUF] = "user_ringbuf",
187 [BPF_MAP_TYPE_CGRP_STORAGE] = "cgrp_storage",
188 };
189
190 static const char * const prog_type_name[] = {
191 [BPF_PROG_TYPE_UNSPEC] = "unspec",
192 [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
193 [BPF_PROG_TYPE_KPROBE] = "kprobe",
194 [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls",
195 [BPF_PROG_TYPE_SCHED_ACT] = "sched_act",
196 [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint",
197 [BPF_PROG_TYPE_XDP] = "xdp",
198 [BPF_PROG_TYPE_PERF_EVENT] = "perf_event",
199 [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb",
200 [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock",
201 [BPF_PROG_TYPE_LWT_IN] = "lwt_in",
202 [BPF_PROG_TYPE_LWT_OUT] = "lwt_out",
203 [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
204 [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
205 [BPF_PROG_TYPE_SK_SKB] = "sk_skb",
206 [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
207 [BPF_PROG_TYPE_SK_MSG] = "sk_msg",
208 [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
209 [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr",
210 [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local",
211 [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2",
212 [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport",
213 [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
214 [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl",
215 [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
216 [BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt",
217 [BPF_PROG_TYPE_TRACING] = "tracing",
218 [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops",
219 [BPF_PROG_TYPE_EXT] = "ext",
220 [BPF_PROG_TYPE_LSM] = "lsm",
221 [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup",
222 [BPF_PROG_TYPE_SYSCALL] = "syscall",
223 [BPF_PROG_TYPE_NETFILTER] = "netfilter",
224 };
225
__base_pr(enum libbpf_print_level level,const char * format,va_list args)226 static int __base_pr(enum libbpf_print_level level, const char *format,
227 va_list args)
228 {
229 if (level == LIBBPF_DEBUG)
230 return 0;
231
232 return vfprintf(stderr, format, args);
233 }
234
235 static libbpf_print_fn_t __libbpf_pr = __base_pr;
236
libbpf_set_print(libbpf_print_fn_t fn)237 libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
238 {
239 libbpf_print_fn_t old_print_fn;
240
241 old_print_fn = __atomic_exchange_n(&__libbpf_pr, fn, __ATOMIC_RELAXED);
242
243 return old_print_fn;
244 }
245
246 __printf(2, 3)
libbpf_print(enum libbpf_print_level level,const char * format,...)247 void libbpf_print(enum libbpf_print_level level, const char *format, ...)
248 {
249 va_list args;
250 int old_errno;
251 libbpf_print_fn_t print_fn;
252
253 print_fn = __atomic_load_n(&__libbpf_pr, __ATOMIC_RELAXED);
254 if (!print_fn)
255 return;
256
257 old_errno = errno;
258
259 va_start(args, format);
260 __libbpf_pr(level, format, args);
261 va_end(args);
262
263 errno = old_errno;
264 }
265
pr_perm_msg(int err)266 static void pr_perm_msg(int err)
267 {
268 struct rlimit limit;
269 char buf[100];
270
271 if (err != -EPERM || geteuid() != 0)
272 return;
273
274 err = getrlimit(RLIMIT_MEMLOCK, &limit);
275 if (err)
276 return;
277
278 if (limit.rlim_cur == RLIM_INFINITY)
279 return;
280
281 if (limit.rlim_cur < 1024)
282 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
283 else if (limit.rlim_cur < 1024*1024)
284 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
285 else
286 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
287
288 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
289 buf);
290 }
291
292 #define STRERR_BUFSIZE 128
293
294 /* Copied from tools/perf/util/util.h */
295 #ifndef zfree
296 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
297 #endif
298
299 #ifndef zclose
300 # define zclose(fd) ({ \
301 int ___err = 0; \
302 if ((fd) >= 0) \
303 ___err = close((fd)); \
304 fd = -1; \
305 ___err; })
306 #endif
307
ptr_to_u64(const void * ptr)308 static inline __u64 ptr_to_u64(const void *ptr)
309 {
310 return (__u64) (unsigned long) ptr;
311 }
312
libbpf_set_strict_mode(enum libbpf_strict_mode mode)313 int libbpf_set_strict_mode(enum libbpf_strict_mode mode)
314 {
315 /* as of v1.0 libbpf_set_strict_mode() is a no-op */
316 return 0;
317 }
318
libbpf_major_version(void)319 __u32 libbpf_major_version(void)
320 {
321 return LIBBPF_MAJOR_VERSION;
322 }
323
libbpf_minor_version(void)324 __u32 libbpf_minor_version(void)
325 {
326 return LIBBPF_MINOR_VERSION;
327 }
328
libbpf_version_string(void)329 const char *libbpf_version_string(void)
330 {
331 #define __S(X) #X
332 #define _S(X) __S(X)
333 return "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION);
334 #undef _S
335 #undef __S
336 }
337
338 enum reloc_type {
339 RELO_LD64,
340 RELO_CALL,
341 RELO_DATA,
342 RELO_EXTERN_LD64,
343 RELO_EXTERN_CALL,
344 RELO_SUBPROG_ADDR,
345 RELO_CORE,
346 };
347
348 struct reloc_desc {
349 enum reloc_type type;
350 int insn_idx;
351 union {
352 const struct bpf_core_relo *core_relo; /* used when type == RELO_CORE */
353 struct {
354 int map_idx;
355 int sym_off;
356 int ext_idx;
357 };
358 };
359 };
360
361 /* stored as sec_def->cookie for all libbpf-supported SEC()s */
362 enum sec_def_flags {
363 SEC_NONE = 0,
364 /* expected_attach_type is optional, if kernel doesn't support that */
365 SEC_EXP_ATTACH_OPT = 1,
366 /* legacy, only used by libbpf_get_type_names() and
367 * libbpf_attach_type_by_name(), not used by libbpf itself at all.
368 * This used to be associated with cgroup (and few other) BPF programs
369 * that were attachable through BPF_PROG_ATTACH command. Pretty
370 * meaningless nowadays, though.
371 */
372 SEC_ATTACHABLE = 2,
373 SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT,
374 /* attachment target is specified through BTF ID in either kernel or
375 * other BPF program's BTF object
376 */
377 SEC_ATTACH_BTF = 4,
378 /* BPF program type allows sleeping/blocking in kernel */
379 SEC_SLEEPABLE = 8,
380 /* BPF program support non-linear XDP buffer */
381 SEC_XDP_FRAGS = 16,
382 /* Setup proper attach type for usdt probes. */
383 SEC_USDT = 32,
384 };
385
386 struct bpf_sec_def {
387 char *sec;
388 enum bpf_prog_type prog_type;
389 enum bpf_attach_type expected_attach_type;
390 long cookie;
391 int handler_id;
392
393 libbpf_prog_setup_fn_t prog_setup_fn;
394 libbpf_prog_prepare_load_fn_t prog_prepare_load_fn;
395 libbpf_prog_attach_fn_t prog_attach_fn;
396 };
397
398 /*
399 * bpf_prog should be a better name but it has been used in
400 * linux/filter.h.
401 */
402 struct bpf_program {
403 char *name;
404 char *sec_name;
405 size_t sec_idx;
406 const struct bpf_sec_def *sec_def;
407 /* this program's instruction offset (in number of instructions)
408 * within its containing ELF section
409 */
410 size_t sec_insn_off;
411 /* number of original instructions in ELF section belonging to this
412 * program, not taking into account subprogram instructions possible
413 * appended later during relocation
414 */
415 size_t sec_insn_cnt;
416 /* Offset (in number of instructions) of the start of instruction
417 * belonging to this BPF program within its containing main BPF
418 * program. For the entry-point (main) BPF program, this is always
419 * zero. For a sub-program, this gets reset before each of main BPF
420 * programs are processed and relocated and is used to determined
421 * whether sub-program was already appended to the main program, and
422 * if yes, at which instruction offset.
423 */
424 size_t sub_insn_off;
425
426 /* instructions that belong to BPF program; insns[0] is located at
427 * sec_insn_off instruction within its ELF section in ELF file, so
428 * when mapping ELF file instruction index to the local instruction,
429 * one needs to subtract sec_insn_off; and vice versa.
430 */
431 struct bpf_insn *insns;
432 /* actual number of instruction in this BPF program's image; for
433 * entry-point BPF programs this includes the size of main program
434 * itself plus all the used sub-programs, appended at the end
435 */
436 size_t insns_cnt;
437
438 struct reloc_desc *reloc_desc;
439 int nr_reloc;
440
441 /* BPF verifier log settings */
442 char *log_buf;
443 size_t log_size;
444 __u32 log_level;
445
446 struct bpf_object *obj;
447
448 int fd;
449 bool autoload;
450 bool autoattach;
451 bool sym_global;
452 bool mark_btf_static;
453 enum bpf_prog_type type;
454 enum bpf_attach_type expected_attach_type;
455 int exception_cb_idx;
456
457 int prog_ifindex;
458 __u32 attach_btf_obj_fd;
459 __u32 attach_btf_id;
460 __u32 attach_prog_fd;
461
462 void *func_info;
463 __u32 func_info_rec_size;
464 __u32 func_info_cnt;
465
466 void *line_info;
467 __u32 line_info_rec_size;
468 __u32 line_info_cnt;
469 __u32 prog_flags;
470 };
471
472 struct bpf_struct_ops {
473 const char *tname;
474 const struct btf_type *type;
475 struct bpf_program **progs;
476 __u32 *kern_func_off;
477 /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
478 void *data;
479 /* e.g. struct bpf_struct_ops_tcp_congestion_ops in
480 * btf_vmlinux's format.
481 * struct bpf_struct_ops_tcp_congestion_ops {
482 * [... some other kernel fields ...]
483 * struct tcp_congestion_ops data;
484 * }
485 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
486 * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
487 * from "data".
488 */
489 void *kern_vdata;
490 __u32 type_id;
491 };
492
493 #define DATA_SEC ".data"
494 #define BSS_SEC ".bss"
495 #define RODATA_SEC ".rodata"
496 #define KCONFIG_SEC ".kconfig"
497 #define KSYMS_SEC ".ksyms"
498 #define STRUCT_OPS_SEC ".struct_ops"
499 #define STRUCT_OPS_LINK_SEC ".struct_ops.link"
500
501 enum libbpf_map_type {
502 LIBBPF_MAP_UNSPEC,
503 LIBBPF_MAP_DATA,
504 LIBBPF_MAP_BSS,
505 LIBBPF_MAP_RODATA,
506 LIBBPF_MAP_KCONFIG,
507 };
508
509 struct bpf_map_def {
510 unsigned int type;
511 unsigned int key_size;
512 unsigned int value_size;
513 unsigned int max_entries;
514 unsigned int map_flags;
515 };
516
517 struct bpf_map {
518 struct bpf_object *obj;
519 char *name;
520 /* real_name is defined for special internal maps (.rodata*,
521 * .data*, .bss, .kconfig) and preserves their original ELF section
522 * name. This is important to be able to find corresponding BTF
523 * DATASEC information.
524 */
525 char *real_name;
526 int fd;
527 int sec_idx;
528 size_t sec_offset;
529 int map_ifindex;
530 int inner_map_fd;
531 struct bpf_map_def def;
532 __u32 numa_node;
533 __u32 btf_var_idx;
534 __u32 btf_key_type_id;
535 __u32 btf_value_type_id;
536 __u32 btf_vmlinux_value_type_id;
537 enum libbpf_map_type libbpf_type;
538 void *mmaped;
539 struct bpf_struct_ops *st_ops;
540 struct bpf_map *inner_map;
541 void **init_slots;
542 int init_slots_sz;
543 char *pin_path;
544 bool pinned;
545 bool reused;
546 bool autocreate;
547 __u64 map_extra;
548 };
549
550 enum extern_type {
551 EXT_UNKNOWN,
552 EXT_KCFG,
553 EXT_KSYM,
554 };
555
556 enum kcfg_type {
557 KCFG_UNKNOWN,
558 KCFG_CHAR,
559 KCFG_BOOL,
560 KCFG_INT,
561 KCFG_TRISTATE,
562 KCFG_CHAR_ARR,
563 };
564
565 struct extern_desc {
566 enum extern_type type;
567 int sym_idx;
568 int btf_id;
569 int sec_btf_id;
570 const char *name;
571 char *essent_name;
572 bool is_set;
573 bool is_weak;
574 union {
575 struct {
576 enum kcfg_type type;
577 int sz;
578 int align;
579 int data_off;
580 bool is_signed;
581 } kcfg;
582 struct {
583 unsigned long long addr;
584
585 /* target btf_id of the corresponding kernel var. */
586 int kernel_btf_obj_fd;
587 int kernel_btf_id;
588
589 /* local btf_id of the ksym extern's type. */
590 __u32 type_id;
591 /* BTF fd index to be patched in for insn->off, this is
592 * 0 for vmlinux BTF, index in obj->fd_array for module
593 * BTF
594 */
595 __s16 btf_fd_idx;
596 } ksym;
597 };
598 };
599
600 struct module_btf {
601 struct btf *btf;
602 char *name;
603 __u32 id;
604 int fd;
605 int fd_array_idx;
606 };
607
608 enum sec_type {
609 SEC_UNUSED = 0,
610 SEC_RELO,
611 SEC_BSS,
612 SEC_DATA,
613 SEC_RODATA,
614 };
615
616 struct elf_sec_desc {
617 enum sec_type sec_type;
618 #if defined HAVE_LIBELF
619 Elf64_Shdr *shdr;
620 #elif defined HAVE_ELFIO
621 psection_t psection;
622 Elf_Data realdata;
623 #endif
624 Elf_Data *data;
625 };
626
627 struct elf_state {
628 int fd;
629 const void *obj_buf;
630 size_t obj_buf_sz;
631 #if defined HAVE_LIBELF
632 Elf *elf;
633 #elif defined HAVE_ELFIO
634 pelfio_t elf;
635 Elf64_Ehdr eheader;
636 pstring_t shstring;
637 pstring_t strstring;
638 Elf_Data realsymbols;
639 Elf_Data realst_ops_data;
640 Elf_Data realst_ops_link_data;
641 #endif
642 Elf64_Ehdr *ehdr;
643 Elf_Data *symbols;
644 Elf_Data *st_ops_data;
645 Elf_Data *st_ops_link_data;
646 size_t shstrndx; /* section index for section name strings */
647 size_t strtabidx;
648 struct elf_sec_desc *secs;
649 size_t sec_cnt;
650 int btf_maps_shndx;
651 __u32 btf_maps_sec_btf_id;
652 int text_shndx;
653 int symbols_shndx;
654 int st_ops_shndx;
655 int st_ops_link_shndx;
656 };
657
658 struct usdt_manager;
659
660 struct bpf_object {
661 char name[BPF_OBJ_NAME_LEN];
662 char license[64];
663 __u32 kern_version;
664
665 struct bpf_program *programs;
666 size_t nr_programs;
667 struct bpf_map *maps;
668 size_t nr_maps;
669 size_t maps_cap;
670
671 char *kconfig;
672 struct extern_desc *externs;
673 int nr_extern;
674 int kconfig_map_idx;
675
676 bool loaded;
677 bool has_subcalls;
678 bool has_rodata;
679
680 struct bpf_gen *gen_loader;
681
682 /* Information when doing ELF related work. Only valid if efile.elf is not NULL */
683 struct elf_state efile;
684
685 struct btf *btf;
686 struct btf_ext *btf_ext;
687
688 /* Parse and load BTF vmlinux if any of the programs in the object need
689 * it at load time.
690 */
691 struct btf *btf_vmlinux;
692 /* Path to the custom BTF to be used for BPF CO-RE relocations as an
693 * override for vmlinux BTF.
694 */
695 char *btf_custom_path;
696 /* vmlinux BTF override for CO-RE relocations */
697 struct btf *btf_vmlinux_override;
698 /* Lazily initialized kernel module BTFs */
699 struct module_btf *btf_modules;
700 bool btf_modules_loaded;
701 size_t btf_module_cnt;
702 size_t btf_module_cap;
703
704 /* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */
705 char *log_buf;
706 size_t log_size;
707 __u32 log_level;
708
709 int *fd_array;
710 size_t fd_array_cap;
711 size_t fd_array_cnt;
712
713 struct usdt_manager *usdt_man;
714
715 char path[];
716 };
717
718 static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
719 static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
720 #ifdef HAVE_LIBELF
721 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
722 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
723 #endif
724 #if defined HAVE_LIBELF
725 static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn);
726 #elif defined HAVE_ELFIO
727 static Elf64_Shdr *elf_sec_hdr_by_idx(const struct bpf_object *obj, size_t idx, Elf64_Shdr *sheader);
728 #endif
729 #if defined HAVE_LIBELF
730 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
731 #elif defined HAVE_ELFIO
732 static const char *elf_sec_name_by_idx(const struct bpf_object *obj, size_t idx);
733 #endif
734 #if defined HAVE_LIBELF
735 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
736 #elif defined HAVE_ELFIO
737 static Elf_Data *elf_sec_data_by_name(const struct bpf_object *obj, const char *name, Elf_Data *data);
738 static Elf_Data *elf_sec_data_by_idx(const struct bpf_object *obj, size_t idx, Elf_Data *data);
739 #endif
740 static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx);
741 static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx);
742
bpf_program__unload(struct bpf_program * prog)743 void bpf_program__unload(struct bpf_program *prog)
744 {
745 if (!prog)
746 return;
747
748 zclose(prog->fd);
749
750 zfree(&prog->func_info);
751 zfree(&prog->line_info);
752 }
753
bpf_program__exit(struct bpf_program * prog)754 static void bpf_program__exit(struct bpf_program *prog)
755 {
756 if (!prog)
757 return;
758
759 bpf_program__unload(prog);
760 zfree(&prog->name);
761 zfree(&prog->sec_name);
762 zfree(&prog->insns);
763 zfree(&prog->reloc_desc);
764
765 prog->nr_reloc = 0;
766 prog->insns_cnt = 0;
767 prog->sec_idx = -1;
768 }
769
insn_is_subprog_call(const struct bpf_insn * insn)770 static bool insn_is_subprog_call(const struct bpf_insn *insn)
771 {
772 return BPF_CLASS(insn->code) == BPF_JMP &&
773 BPF_OP(insn->code) == BPF_CALL &&
774 BPF_SRC(insn->code) == BPF_K &&
775 insn->src_reg == BPF_PSEUDO_CALL &&
776 insn->dst_reg == 0 &&
777 insn->off == 0;
778 }
779
is_call_insn(const struct bpf_insn * insn)780 static bool is_call_insn(const struct bpf_insn *insn)
781 {
782 return insn->code == (BPF_JMP | BPF_CALL);
783 }
784
insn_is_pseudo_func(struct bpf_insn * insn)785 static bool insn_is_pseudo_func(struct bpf_insn *insn)
786 {
787 return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
788 }
789
790 static int
bpf_object__init_prog(struct bpf_object * obj,struct bpf_program * prog,const char * name,size_t sec_idx,const char * sec_name,size_t sec_off,void * insn_data,size_t insn_data_sz)791 bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
792 const char *name, size_t sec_idx, const char *sec_name,
793 size_t sec_off, void *insn_data, size_t insn_data_sz)
794 {
795 if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
796 pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
797 sec_name, name, sec_off, insn_data_sz);
798 return -EINVAL;
799 }
800
801 memset(prog, 0, sizeof(*prog));
802 prog->obj = obj;
803
804 prog->sec_idx = sec_idx;
805 prog->sec_insn_off = sec_off / BPF_INSN_SZ;
806 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
807 /* insns_cnt can later be increased by appending used subprograms */
808 prog->insns_cnt = prog->sec_insn_cnt;
809
810 prog->type = BPF_PROG_TYPE_UNSPEC;
811 prog->fd = -1;
812 prog->exception_cb_idx = -1;
813
814 /* libbpf's convention for SEC("?abc...") is that it's just like
815 * SEC("abc...") but the corresponding bpf_program starts out with
816 * autoload set to false.
817 */
818 if (sec_name[0] == '?') {
819 prog->autoload = false;
820 /* from now on forget there was ? in section name */
821 sec_name++;
822 } else {
823 prog->autoload = true;
824 }
825
826 prog->autoattach = true;
827
828 /* inherit object's log_level */
829 prog->log_level = obj->log_level;
830
831 prog->sec_name = strdup(sec_name);
832 if (!prog->sec_name)
833 goto errout;
834
835 prog->name = strdup(name);
836 if (!prog->name)
837 goto errout;
838
839 prog->insns = malloc(insn_data_sz);
840 if (!prog->insns)
841 goto errout;
842 memcpy(prog->insns, insn_data, insn_data_sz);
843
844 return 0;
845 errout:
846 pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
847 bpf_program__exit(prog);
848 return -ENOMEM;
849 }
850
851 static int
bpf_object__add_programs(struct bpf_object * obj,Elf_Data * sec_data,const char * sec_name,int sec_idx)852 bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
853 const char *sec_name, int sec_idx)
854 {
855 Elf_Data *symbols = obj->efile.symbols;
856 struct bpf_program *prog, *progs;
857 void *data = sec_data->d_buf;
858 size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
859 int nr_progs, err, i;
860 const char *name;
861 Elf64_Sym *sym;
862
863 progs = obj->programs;
864 nr_progs = obj->nr_programs;
865 nr_syms = symbols->d_size / sizeof(Elf64_Sym);
866
867 for (i = 0; i < nr_syms; i++) {
868 sym = elf_sym_by_idx(obj, i);
869
870 if (sym->st_shndx != sec_idx)
871 continue;
872 if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
873 continue;
874
875 prog_sz = sym->st_size;
876 sec_off = sym->st_value;
877
878 name = elf_sym_str(obj, sym->st_name);
879 if (!name) {
880 pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
881 sec_name, sec_off);
882 return -LIBBPF_ERRNO__FORMAT;
883 }
884
885 if (sec_off + prog_sz > sec_sz) {
886 pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
887 sec_name, sec_off);
888 return -LIBBPF_ERRNO__FORMAT;
889 }
890
891 if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
892 pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
893 return -ENOTSUP;
894 }
895
896 pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
897 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
898
899 progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
900 if (!progs) {
901 /*
902 * In this case the original obj->programs
903 * is still valid, so don't need special treat for
904 * bpf_close_object().
905 */
906 pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
907 sec_name, name);
908 return -ENOMEM;
909 }
910 obj->programs = progs;
911
912 prog = &progs[nr_progs];
913
914 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
915 sec_off, data + sec_off, prog_sz);
916 if (err)
917 return err;
918
919 if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL)
920 prog->sym_global = true;
921
922 /* if function is a global/weak symbol, but has restricted
923 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC
924 * as static to enable more permissive BPF verification mode
925 * with more outside context available to BPF verifier
926 */
927 if (prog->sym_global && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
928 || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
929 prog->mark_btf_static = true;
930
931 nr_progs++;
932 obj->nr_programs = nr_progs;
933 }
934
935 return 0;
936 }
937
938 static const struct btf_member *
find_member_by_offset(const struct btf_type * t,__u32 bit_offset)939 find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
940 {
941 struct btf_member *m;
942 int i;
943
944 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
945 if (btf_member_bit_offset(t, i) == bit_offset)
946 return m;
947 }
948
949 return NULL;
950 }
951
952 static const struct btf_member *
find_member_by_name(const struct btf * btf,const struct btf_type * t,const char * name)953 find_member_by_name(const struct btf *btf, const struct btf_type *t,
954 const char *name)
955 {
956 struct btf_member *m;
957 int i;
958
959 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
960 if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
961 return m;
962 }
963
964 return NULL;
965 }
966
967 #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
968 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
969 const char *name, __u32 kind);
970
971 static int
find_struct_ops_kern_types(const struct btf * btf,const char * tname,const struct btf_type ** type,__u32 * type_id,const struct btf_type ** vtype,__u32 * vtype_id,const struct btf_member ** data_member)972 find_struct_ops_kern_types(const struct btf *btf, const char *tname,
973 const struct btf_type **type, __u32 *type_id,
974 const struct btf_type **vtype, __u32 *vtype_id,
975 const struct btf_member **data_member)
976 {
977 const struct btf_type *kern_type, *kern_vtype;
978 const struct btf_member *kern_data_member;
979 __s32 kern_vtype_id, kern_type_id;
980 __u32 i;
981
982 kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
983 if (kern_type_id < 0) {
984 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
985 tname);
986 return kern_type_id;
987 }
988 kern_type = btf__type_by_id(btf, kern_type_id);
989
990 /* Find the corresponding "map_value" type that will be used
991 * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example,
992 * find "struct bpf_struct_ops_tcp_congestion_ops" from the
993 * btf_vmlinux.
994 */
995 kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
996 tname, BTF_KIND_STRUCT);
997 if (kern_vtype_id < 0) {
998 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
999 STRUCT_OPS_VALUE_PREFIX, tname);
1000 return kern_vtype_id;
1001 }
1002 kern_vtype = btf__type_by_id(btf, kern_vtype_id);
1003
1004 /* Find "struct tcp_congestion_ops" from
1005 * struct bpf_struct_ops_tcp_congestion_ops {
1006 * [ ... ]
1007 * struct tcp_congestion_ops data;
1008 * }
1009 */
1010 kern_data_member = btf_members(kern_vtype);
1011 for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
1012 if (kern_data_member->type == kern_type_id)
1013 break;
1014 }
1015 if (i == btf_vlen(kern_vtype)) {
1016 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
1017 tname, STRUCT_OPS_VALUE_PREFIX, tname);
1018 return -EINVAL;
1019 }
1020
1021 *type = kern_type;
1022 *type_id = kern_type_id;
1023 *vtype = kern_vtype;
1024 *vtype_id = kern_vtype_id;
1025 *data_member = kern_data_member;
1026
1027 return 0;
1028 }
1029
bpf_map__is_struct_ops(const struct bpf_map * map)1030 static bool bpf_map__is_struct_ops(const struct bpf_map *map)
1031 {
1032 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
1033 }
1034
1035 /* Init the map's fields that depend on kern_btf */
bpf_map__init_kern_struct_ops(struct bpf_map * map,const struct btf * btf,const struct btf * kern_btf)1036 static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
1037 const struct btf *btf,
1038 const struct btf *kern_btf)
1039 {
1040 const struct btf_member *member, *kern_member, *kern_data_member;
1041 const struct btf_type *type, *kern_type, *kern_vtype;
1042 __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
1043 struct bpf_struct_ops *st_ops;
1044 void *data, *kern_data;
1045 const char *tname;
1046 int err;
1047
1048 st_ops = map->st_ops;
1049 type = st_ops->type;
1050 tname = st_ops->tname;
1051 err = find_struct_ops_kern_types(kern_btf, tname,
1052 &kern_type, &kern_type_id,
1053 &kern_vtype, &kern_vtype_id,
1054 &kern_data_member);
1055 if (err)
1056 return err;
1057
1058 pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
1059 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
1060
1061 map->def.value_size = kern_vtype->size;
1062 map->btf_vmlinux_value_type_id = kern_vtype_id;
1063
1064 st_ops->kern_vdata = calloc(1, kern_vtype->size);
1065 if (!st_ops->kern_vdata)
1066 return -ENOMEM;
1067
1068 data = st_ops->data;
1069 kern_data_off = kern_data_member->offset / 8;
1070 kern_data = st_ops->kern_vdata + kern_data_off;
1071
1072 member = btf_members(type);
1073 for (i = 0; i < btf_vlen(type); i++, member++) {
1074 const struct btf_type *mtype, *kern_mtype;
1075 __u32 mtype_id, kern_mtype_id;
1076 void *mdata, *kern_mdata;
1077 __s64 msize, kern_msize;
1078 __u32 moff, kern_moff;
1079 __u32 kern_member_idx;
1080 const char *mname;
1081
1082 mname = btf__name_by_offset(btf, member->name_off);
1083 kern_member = find_member_by_name(kern_btf, kern_type, mname);
1084 if (!kern_member) {
1085 pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
1086 map->name, mname);
1087 return -ENOTSUP;
1088 }
1089
1090 kern_member_idx = kern_member - btf_members(kern_type);
1091 if (btf_member_bitfield_size(type, i) ||
1092 btf_member_bitfield_size(kern_type, kern_member_idx)) {
1093 pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
1094 map->name, mname);
1095 return -ENOTSUP;
1096 }
1097
1098 moff = member->offset / 8;
1099 kern_moff = kern_member->offset / 8;
1100
1101 mdata = data + moff;
1102 kern_mdata = kern_data + kern_moff;
1103
1104 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
1105 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
1106 &kern_mtype_id);
1107 if (BTF_INFO_KIND(mtype->info) !=
1108 BTF_INFO_KIND(kern_mtype->info)) {
1109 pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
1110 map->name, mname, BTF_INFO_KIND(mtype->info),
1111 BTF_INFO_KIND(kern_mtype->info));
1112 return -ENOTSUP;
1113 }
1114
1115 if (btf_is_ptr(mtype)) {
1116 struct bpf_program *prog;
1117
1118 prog = st_ops->progs[i];
1119 if (!prog)
1120 continue;
1121
1122 kern_mtype = skip_mods_and_typedefs(kern_btf,
1123 kern_mtype->type,
1124 &kern_mtype_id);
1125
1126 /* mtype->type must be a func_proto which was
1127 * guaranteed in bpf_object__collect_st_ops_relos(),
1128 * so only check kern_mtype for func_proto here.
1129 */
1130 if (!btf_is_func_proto(kern_mtype)) {
1131 pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
1132 map->name, mname);
1133 return -ENOTSUP;
1134 }
1135
1136 prog->attach_btf_id = kern_type_id;
1137 prog->expected_attach_type = kern_member_idx;
1138
1139 st_ops->kern_func_off[i] = kern_data_off + kern_moff;
1140
1141 pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
1142 map->name, mname, prog->name, moff,
1143 kern_moff);
1144
1145 continue;
1146 }
1147
1148 msize = btf__resolve_size(btf, mtype_id);
1149 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
1150 if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
1151 pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
1152 map->name, mname, (ssize_t)msize,
1153 (ssize_t)kern_msize);
1154 return -ENOTSUP;
1155 }
1156
1157 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
1158 map->name, mname, (unsigned int)msize,
1159 moff, kern_moff);
1160 memcpy(kern_mdata, mdata, msize);
1161 }
1162
1163 return 0;
1164 }
1165
bpf_object__init_kern_struct_ops_maps(struct bpf_object * obj)1166 static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
1167 {
1168 struct bpf_map *map;
1169 size_t i;
1170 int err;
1171
1172 for (i = 0; i < obj->nr_maps; i++) {
1173 map = &obj->maps[i];
1174
1175 if (!bpf_map__is_struct_ops(map))
1176 continue;
1177
1178 err = bpf_map__init_kern_struct_ops(map, obj->btf,
1179 obj->btf_vmlinux);
1180 if (err)
1181 return err;
1182 }
1183
1184 return 0;
1185 }
1186
init_struct_ops_maps(struct bpf_object * obj,const char * sec_name,int shndx,Elf_Data * data,__u32 map_flags)1187 static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
1188 int shndx, Elf_Data *data, __u32 map_flags)
1189 {
1190 const struct btf_type *type, *datasec;
1191 const struct btf_var_secinfo *vsi;
1192 struct bpf_struct_ops *st_ops;
1193 const char *tname, *var_name;
1194 __s32 type_id, datasec_id;
1195 const struct btf *btf;
1196 struct bpf_map *map;
1197 __u32 i;
1198
1199 if (shndx == -1)
1200 return 0;
1201
1202 btf = obj->btf;
1203 datasec_id = btf__find_by_name_kind(btf, sec_name,
1204 BTF_KIND_DATASEC);
1205 if (datasec_id < 0) {
1206 pr_warn("struct_ops init: DATASEC %s not found\n",
1207 sec_name);
1208 return -EINVAL;
1209 }
1210
1211 datasec = btf__type_by_id(btf, datasec_id);
1212 vsi = btf_var_secinfos(datasec);
1213 for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
1214 type = btf__type_by_id(obj->btf, vsi->type);
1215 var_name = btf__name_by_offset(obj->btf, type->name_off);
1216
1217 type_id = btf__resolve_type(obj->btf, vsi->type);
1218 if (type_id < 0) {
1219 pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
1220 vsi->type, sec_name);
1221 return -EINVAL;
1222 }
1223
1224 type = btf__type_by_id(obj->btf, type_id);
1225 tname = btf__name_by_offset(obj->btf, type->name_off);
1226 if (!tname[0]) {
1227 pr_warn("struct_ops init: anonymous type is not supported\n");
1228 return -ENOTSUP;
1229 }
1230 if (!btf_is_struct(type)) {
1231 pr_warn("struct_ops init: %s is not a struct\n", tname);
1232 return -EINVAL;
1233 }
1234
1235 map = bpf_object__add_map(obj);
1236 if (IS_ERR(map))
1237 return PTR_ERR(map);
1238
1239 map->sec_idx = shndx;
1240 map->sec_offset = vsi->offset;
1241 map->name = strdup(var_name);
1242 if (!map->name)
1243 return -ENOMEM;
1244
1245 map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1246 map->def.key_size = sizeof(int);
1247 map->def.value_size = type->size;
1248 map->def.max_entries = 1;
1249 map->def.map_flags = map_flags;
1250
1251 map->st_ops = calloc(1, sizeof(*map->st_ops));
1252 if (!map->st_ops)
1253 return -ENOMEM;
1254 st_ops = map->st_ops;
1255 st_ops->data = malloc(type->size);
1256 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1257 st_ops->kern_func_off = malloc(btf_vlen(type) *
1258 sizeof(*st_ops->kern_func_off));
1259 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1260 return -ENOMEM;
1261
1262 if (vsi->offset + type->size > data->d_size) {
1263 pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
1264 var_name, sec_name);
1265 return -EINVAL;
1266 }
1267
1268 memcpy(st_ops->data,
1269 data->d_buf + vsi->offset,
1270 type->size);
1271 st_ops->tname = tname;
1272 st_ops->type = type;
1273 st_ops->type_id = type_id;
1274
1275 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1276 tname, type_id, var_name, vsi->offset);
1277 }
1278
1279 return 0;
1280 }
1281
bpf_object_init_struct_ops(struct bpf_object * obj)1282 static int bpf_object_init_struct_ops(struct bpf_object *obj)
1283 {
1284 int err;
1285
1286 err = init_struct_ops_maps(obj, STRUCT_OPS_SEC, obj->efile.st_ops_shndx,
1287 obj->efile.st_ops_data, 0);
1288 err = err ?: init_struct_ops_maps(obj, STRUCT_OPS_LINK_SEC,
1289 obj->efile.st_ops_link_shndx,
1290 obj->efile.st_ops_link_data,
1291 BPF_F_LINK);
1292 return err;
1293 }
1294
bpf_object__new(const char * path,const void * obj_buf,size_t obj_buf_sz,const char * obj_name)1295 static struct bpf_object *bpf_object__new(const char *path,
1296 const void *obj_buf,
1297 size_t obj_buf_sz,
1298 const char *obj_name)
1299 {
1300 struct bpf_object *obj;
1301 char *end;
1302
1303 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1304 if (!obj) {
1305 pr_warn("alloc memory failed for %s\n", path);
1306 return ERR_PTR(-ENOMEM);
1307 }
1308
1309 strcpy(obj->path, path);
1310 if (obj_name) {
1311 libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name));
1312 } else {
1313 /* Using basename() GNU version which doesn't modify arg. */
1314 libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name));
1315 end = strchr(obj->name, '.');
1316 if (end)
1317 *end = 0;
1318 }
1319
1320 obj->efile.fd = -1;
1321 /*
1322 * Caller of this function should also call
1323 * bpf_object__elf_finish() after data collection to return
1324 * obj_buf to user. If not, we should duplicate the buffer to
1325 * avoid user freeing them before elf finish.
1326 */
1327 obj->efile.obj_buf = obj_buf;
1328 obj->efile.obj_buf_sz = obj_buf_sz;
1329 obj->efile.btf_maps_shndx = -1;
1330 obj->efile.st_ops_shndx = -1;
1331 obj->efile.st_ops_link_shndx = -1;
1332 obj->kconfig_map_idx = -1;
1333
1334 obj->kern_version = get_kernel_version();
1335 obj->loaded = false;
1336
1337 return obj;
1338 }
1339
bpf_object__elf_finish(struct bpf_object * obj)1340 static void bpf_object__elf_finish(struct bpf_object *obj)
1341 {
1342 if (!obj->efile.elf)
1343 return;
1344 #if defined HAVE_LIBELF
1345 elf_end(obj->efile.elf);
1346 #elif defined HAVE_ELFIO
1347 if (obj->efile.shstring) {
1348 elfio_string_section_accessor_delete(obj->efile.shstring);
1349 }
1350 if (obj->efile.strstring) {
1351 elfio_string_section_accessor_delete(obj->efile.strstring);
1352 }
1353 elfio_delete(obj->efile.elf);
1354 #endif
1355 obj->efile.elf = NULL;
1356 obj->efile.symbols = NULL;
1357 obj->efile.st_ops_data = NULL;
1358 obj->efile.st_ops_link_data = NULL;
1359
1360 zfree(&obj->efile.secs);
1361 obj->efile.sec_cnt = 0;
1362 zclose(obj->efile.fd);
1363 obj->efile.obj_buf = NULL;
1364 obj->efile.obj_buf_sz = 0;
1365 }
1366
bpf_object__elf_init(struct bpf_object * obj)1367 static int bpf_object__elf_init(struct bpf_object *obj)
1368 {
1369 Elf64_Ehdr *ehdr;
1370 int err = 0;
1371 #ifdef HAVE_LIBELF
1372 Elf *elf;
1373 #elif defined HAVE_ELFIO
1374 pelfio_t elf;
1375 #endif
1376
1377 if (obj->efile.elf) {
1378 pr_warn("elf: init internal error\n");
1379 return -LIBBPF_ERRNO__LIBELF;
1380 }
1381
1382 if (obj->efile.obj_buf_sz > 0) {
1383 /* obj_buf should have been validated by bpf_object__open_mem(). */
1384 #ifdef HAVE_LIBELF
1385 elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
1386 #elif defined HAVE_ELFIO
1387 char memfd_path[PATH_MAX] = {0};
1388 elf = elfio_new();
1389 int fdm = syscall(__NR_memfd_create, "bpfelf", MFD_CLOEXEC);
1390 ftruncate(fdm, obj->efile.obj_buf_sz);
1391 write(fdm, (char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
1392 snprintf(memfd_path, PATH_MAX, "/proc/self/fd/%d", fdm);
1393 elfio_load(elf, memfd_path);
1394 #endif
1395 } else {
1396 obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC);
1397 if (obj->efile.fd < 0) {
1398 char errmsg[STRERR_BUFSIZE], *cp;
1399
1400 err = -errno;
1401 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
1402 pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
1403 return err;
1404 }
1405 #ifdef HAVE_LIBELF
1406 elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1407 #endif
1408 }
1409
1410 if (!elf) {
1411 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1412 err = -LIBBPF_ERRNO__LIBELF;
1413 goto errout;
1414 }
1415
1416 obj->efile.elf = elf;
1417 #ifdef HAVE_LIBELF
1418 if (elf_kind(elf) != ELF_K_ELF) {
1419 err = -LIBBPF_ERRNO__FORMAT;
1420 pr_warn("elf: '%s' is not a proper ELF object\n", obj->path);
1421 goto errout;
1422 }
1423
1424 if (gelf_getclass(elf) != ELFCLASS64) {
1425 #elif defined HAVE_ELFIO
1426 if (elfio_get_class(elf) != ELFCLASS64 ) {
1427 #endif
1428 err = -LIBBPF_ERRNO__FORMAT;
1429 pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path);
1430 goto errout;
1431 }
1432 #ifdef HAVE_LIBELF
1433 obj->efile.ehdr = ehdr = elf64_getehdr(elf);
1434 #elif defined HAVE_ELFIO
1435 obj->efile.ehdr = ehdr = (Elf64_Ehdr*)obj->efile.obj_buf;
1436 #endif
1437 if (!obj->efile.ehdr) {
1438 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1439 err = -LIBBPF_ERRNO__FORMAT;
1440 goto errout;
1441 }
1442
1443 #ifdef HAVE_LIBELF
1444 if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
1445 pr_warn("elf: failed to get section names section index for %s: %s\n",
1446 obj->path, elf_errmsg(-1));
1447 err = -LIBBPF_ERRNO__FORMAT;
1448 goto errout;
1449 }
1450
1451 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
1452 if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) {
1453 pr_warn("elf: failed to get section names strings from %s: %s\n",
1454 obj->path, elf_errmsg(-1));
1455 err = -LIBBPF_ERRNO__FORMAT;
1456 goto errout;
1457 }
1458 #elif defined HAVE_ELFIO
1459 obj->efile.shstrndx = elfio_get_section_name_str_index(elf);
1460 #endif
1461 /* Old LLVM set e_machine to EM_NONE */
1462 if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) {
1463 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1464 err = -LIBBPF_ERRNO__FORMAT;
1465 goto errout;
1466 }
1467
1468 return 0;
1469 errout:
1470 bpf_object__elf_finish(obj);
1471 return err;
1472 }
1473
1474 static int bpf_object__check_endianness(struct bpf_object *obj)
1475 {
1476 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1477 if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
1478 return 0;
1479 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1480 if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
1481 return 0;
1482 #else
1483 # error "Unrecognized __BYTE_ORDER__"
1484 #endif
1485 pr_warn("elf: endianness mismatch in %s.\n", obj->path);
1486 return -LIBBPF_ERRNO__ENDIAN;
1487 }
1488
1489 static int
1490 bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1491 {
1492 if (!data) {
1493 pr_warn("invalid license section in %s\n", obj->path);
1494 return -LIBBPF_ERRNO__FORMAT;
1495 }
1496 /* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't
1497 * go over allowed ELF data section buffer
1498 */
1499 libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license)));
1500 pr_debug("license of %s is %s\n", obj->path, obj->license);
1501 return 0;
1502 }
1503
1504 static int
1505 bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1506 {
1507 __u32 kver;
1508
1509 if (!data || size != sizeof(kver)) {
1510 pr_warn("invalid kver section in %s\n", obj->path);
1511 return -LIBBPF_ERRNO__FORMAT;
1512 }
1513 memcpy(&kver, data, sizeof(kver));
1514 obj->kern_version = kver;
1515 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1516 return 0;
1517 }
1518
1519 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1520 {
1521 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1522 type == BPF_MAP_TYPE_HASH_OF_MAPS)
1523 return true;
1524 return false;
1525 }
1526
1527 static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size)
1528 {
1529 Elf_Data *data;
1530 #ifdef HAVE_LIBELF
1531 Elf_Scn *scn;
1532 #endif
1533
1534 if (!name)
1535 return -EINVAL;
1536 #if defined HAVE_LIBELF
1537 scn = elf_sec_by_name(obj, name);
1538 data = elf_sec_data(obj, scn);
1539 #elif defined HAVE_ELFIO
1540 Elf_Data realdata;
1541 data = &realdata;
1542 data = elf_sec_data_by_name(obj, name, data);
1543 #endif
1544 if (data) {
1545 *size = data->d_size;
1546 return 0; /* found it */
1547 }
1548
1549 return -ENOENT;
1550 }
1551
1552 static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *name)
1553 {
1554 Elf_Data *symbols = obj->efile.symbols;
1555 const char *sname;
1556 size_t si;
1557
1558 for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
1559 Elf64_Sym *sym = elf_sym_by_idx(obj, si);
1560
1561 if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT)
1562 continue;
1563
1564 if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
1565 ELF64_ST_BIND(sym->st_info) != STB_WEAK)
1566 continue;
1567
1568 sname = elf_sym_str(obj, sym->st_name);
1569 if (!sname) {
1570 pr_warn("failed to get sym name string for var %s\n", name);
1571 return ERR_PTR(-EIO);
1572 }
1573 if (strcmp(name, sname) == 0)
1574 return sym;
1575 }
1576
1577 return ERR_PTR(-ENOENT);
1578 }
1579
1580 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1581 {
1582 struct bpf_map *map;
1583 int err;
1584
1585 err = libbpf_ensure_mem((void **)&obj->maps, &obj->maps_cap,
1586 sizeof(*obj->maps), obj->nr_maps + 1);
1587 if (err)
1588 return ERR_PTR(err);
1589
1590 map = &obj->maps[obj->nr_maps++];
1591 map->obj = obj;
1592 map->fd = -1;
1593 map->inner_map_fd = -1;
1594 map->autocreate = true;
1595
1596 return map;
1597 }
1598
1599 static size_t bpf_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
1600 {
1601 const long page_sz = sysconf(_SC_PAGE_SIZE);
1602 size_t map_sz;
1603
1604 map_sz = (size_t)roundup(value_sz, 8) * max_entries;
1605 map_sz = roundup(map_sz, page_sz);
1606 return map_sz;
1607 }
1608
1609 static int bpf_map_mmap_resize(struct bpf_map *map, size_t old_sz, size_t new_sz)
1610 {
1611 void *mmaped;
1612
1613 if (!map->mmaped)
1614 return -EINVAL;
1615
1616 if (old_sz == new_sz)
1617 return 0;
1618
1619 mmaped = mmap(NULL, new_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1620 if (mmaped == MAP_FAILED)
1621 return -errno;
1622
1623 memcpy(mmaped, map->mmaped, min(old_sz, new_sz));
1624 munmap(map->mmaped, old_sz);
1625 map->mmaped = mmaped;
1626 return 0;
1627 }
1628
1629 static char *internal_map_name(struct bpf_object *obj, const char *real_name)
1630 {
1631 char map_name[BPF_OBJ_NAME_LEN], *p;
1632 int pfx_len, sfx_len = max((size_t)7, strlen(real_name));
1633
1634 /* This is one of the more confusing parts of libbpf for various
1635 * reasons, some of which are historical. The original idea for naming
1636 * internal names was to include as much of BPF object name prefix as
1637 * possible, so that it can be distinguished from similar internal
1638 * maps of a different BPF object.
1639 * As an example, let's say we have bpf_object named 'my_object_name'
1640 * and internal map corresponding to '.rodata' ELF section. The final
1641 * map name advertised to user and to the kernel will be
1642 * 'my_objec.rodata', taking first 8 characters of object name and
1643 * entire 7 characters of '.rodata'.
1644 * Somewhat confusingly, if internal map ELF section name is shorter
1645 * than 7 characters, e.g., '.bss', we still reserve 7 characters
1646 * for the suffix, even though we only have 4 actual characters, and
1647 * resulting map will be called 'my_objec.bss', not even using all 15
1648 * characters allowed by the kernel. Oh well, at least the truncated
1649 * object name is somewhat consistent in this case. But if the map
1650 * name is '.kconfig', we'll still have entirety of '.kconfig' added
1651 * (8 chars) and thus will be left with only first 7 characters of the
1652 * object name ('my_obje'). Happy guessing, user, that the final map
1653 * name will be "my_obje.kconfig".
1654 * Now, with libbpf starting to support arbitrarily named .rodata.*
1655 * and .data.* data sections, it's possible that ELF section name is
1656 * longer than allowed 15 chars, so we now need to be careful to take
1657 * only up to 15 first characters of ELF name, taking no BPF object
1658 * name characters at all. So '.rodata.abracadabra' will result in
1659 * '.rodata.abracad' kernel and user-visible name.
1660 * We need to keep this convoluted logic intact for .data, .bss and
1661 * .rodata maps, but for new custom .data.custom and .rodata.custom
1662 * maps we use their ELF names as is, not prepending bpf_object name
1663 * in front. We still need to truncate them to 15 characters for the
1664 * kernel. Full name can be recovered for such maps by using DATASEC
1665 * BTF type associated with such map's value type, though.
1666 */
1667 if (sfx_len >= BPF_OBJ_NAME_LEN)
1668 sfx_len = BPF_OBJ_NAME_LEN - 1;
1669
1670 /* if there are two or more dots in map name, it's a custom dot map */
1671 if (strchr(real_name + 1, '.') != NULL)
1672 pfx_len = 0;
1673 else
1674 pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name));
1675
1676 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1677 sfx_len, real_name);
1678
1679 /* sanitise map name to characters allowed by kernel */
1680 for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1681 if (!isalnum(*p) && *p != '_' && *p != '.')
1682 *p = '_';
1683
1684 return strdup(map_name);
1685 }
1686
1687 static int
1688 map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map);
1689
1690 /* Internal BPF map is mmap()'able only if at least one of corresponding
1691 * DATASEC's VARs are to be exposed through BPF skeleton. I.e., it's a GLOBAL
1692 * variable and it's not marked as __hidden (which turns it into, effectively,
1693 * a STATIC variable).
1694 */
1695 static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map)
1696 {
1697 const struct btf_type *t, *vt;
1698 struct btf_var_secinfo *vsi;
1699 int i, n;
1700
1701 if (!map->btf_value_type_id)
1702 return false;
1703
1704 t = btf__type_by_id(obj->btf, map->btf_value_type_id);
1705 if (!btf_is_datasec(t))
1706 return false;
1707
1708 vsi = btf_var_secinfos(t);
1709 for (i = 0, n = btf_vlen(t); i < n; i++, vsi++) {
1710 vt = btf__type_by_id(obj->btf, vsi->type);
1711 if (!btf_is_var(vt))
1712 continue;
1713
1714 if (btf_var(vt)->linkage != BTF_VAR_STATIC)
1715 return true;
1716 }
1717
1718 return false;
1719 }
1720
1721 static int
1722 bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1723 const char *real_name, int sec_idx, void *data, size_t data_sz)
1724 {
1725 struct bpf_map_def *def;
1726 struct bpf_map *map;
1727 size_t mmap_sz;
1728 int err;
1729
1730 map = bpf_object__add_map(obj);
1731 if (IS_ERR(map))
1732 return PTR_ERR(map);
1733
1734 map->libbpf_type = type;
1735 map->sec_idx = sec_idx;
1736 map->sec_offset = 0;
1737 map->real_name = strdup(real_name);
1738 map->name = internal_map_name(obj, real_name);
1739 if (!map->real_name || !map->name) {
1740 zfree(&map->real_name);
1741 zfree(&map->name);
1742 return -ENOMEM;
1743 }
1744
1745 def = &map->def;
1746 def->type = BPF_MAP_TYPE_ARRAY;
1747 def->key_size = sizeof(int);
1748 def->value_size = data_sz;
1749 def->max_entries = 1;
1750 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
1751 ? BPF_F_RDONLY_PROG : 0;
1752
1753 /* failures are fine because of maps like .rodata.str1.1 */
1754 (void) map_fill_btf_type_info(obj, map);
1755
1756 if (map_is_mmapable(obj, map))
1757 def->map_flags |= BPF_F_MMAPABLE;
1758
1759 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
1760 map->name, map->sec_idx, map->sec_offset, def->map_flags);
1761
1762 mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
1763 map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
1764 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1765 if (map->mmaped == MAP_FAILED) {
1766 err = -errno;
1767 map->mmaped = NULL;
1768 pr_warn("failed to alloc map '%s' content buffer: %d\n",
1769 map->name, err);
1770 zfree(&map->real_name);
1771 zfree(&map->name);
1772 return err;
1773 }
1774
1775 if (data)
1776 memcpy(map->mmaped, data, data_sz);
1777
1778 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1779 return 0;
1780 }
1781
1782 static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1783 {
1784 struct elf_sec_desc *sec_desc;
1785 const char *sec_name;
1786 int err = 0, sec_idx;
1787
1788 /*
1789 * Populate obj->maps with libbpf internal maps.
1790 */
1791 for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) {
1792 sec_desc = &obj->efile.secs[sec_idx];
1793
1794 /* Skip recognized sections with size 0. */
1795 if (!sec_desc->data || sec_desc->data->d_size == 0)
1796 continue;
1797
1798 switch (sec_desc->sec_type) {
1799 case SEC_DATA:
1800 #if defined HAVE_LIBELF
1801 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1802 #elif defined HAVE_ELFIO
1803 sec_name = elf_sec_name_by_idx(obj, sec_idx);
1804 #endif
1805 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
1806 sec_name, sec_idx,
1807 sec_desc->data->d_buf,
1808 sec_desc->data->d_size);
1809 break;
1810 case SEC_RODATA:
1811 obj->has_rodata = true;
1812 #if defined HAVE_LIBELF
1813 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1814 #elif defined HAVE_ELFIO
1815 sec_name = elf_sec_name_by_idx(obj, sec_idx);
1816 #endif
1817 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
1818 sec_name, sec_idx,
1819 sec_desc->data->d_buf,
1820 sec_desc->data->d_size);
1821 break;
1822 case SEC_BSS:
1823 #if defined HAVE_LIBELF
1824 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1825 #elif defined HAVE_ELFIO
1826 sec_name = elf_sec_name_by_idx(obj, sec_idx);
1827 #endif
1828 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
1829 sec_name, sec_idx,
1830 NULL,
1831 sec_desc->data->d_size);
1832 break;
1833 default:
1834 /* skip */
1835 break;
1836 }
1837 if (err)
1838 return err;
1839 }
1840 return 0;
1841 }
1842
1843
1844 static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1845 const void *name)
1846 {
1847 int i;
1848
1849 for (i = 0; i < obj->nr_extern; i++) {
1850 if (strcmp(obj->externs[i].name, name) == 0)
1851 return &obj->externs[i];
1852 }
1853 return NULL;
1854 }
1855
1856 static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
1857 char value)
1858 {
1859 switch (ext->kcfg.type) {
1860 case KCFG_BOOL:
1861 if (value == 'm') {
1862 pr_warn("extern (kcfg) '%s': value '%c' implies tristate or char type\n",
1863 ext->name, value);
1864 return -EINVAL;
1865 }
1866 *(bool *)ext_val = value == 'y' ? true : false;
1867 break;
1868 case KCFG_TRISTATE:
1869 if (value == 'y')
1870 *(enum libbpf_tristate *)ext_val = TRI_YES;
1871 else if (value == 'm')
1872 *(enum libbpf_tristate *)ext_val = TRI_MODULE;
1873 else /* value == 'n' */
1874 *(enum libbpf_tristate *)ext_val = TRI_NO;
1875 break;
1876 case KCFG_CHAR:
1877 *(char *)ext_val = value;
1878 break;
1879 case KCFG_UNKNOWN:
1880 case KCFG_INT:
1881 case KCFG_CHAR_ARR:
1882 default:
1883 pr_warn("extern (kcfg) '%s': value '%c' implies bool, tristate, or char type\n",
1884 ext->name, value);
1885 return -EINVAL;
1886 }
1887 ext->is_set = true;
1888 return 0;
1889 }
1890
1891 static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
1892 const char *value)
1893 {
1894 size_t len;
1895
1896 if (ext->kcfg.type != KCFG_CHAR_ARR) {
1897 pr_warn("extern (kcfg) '%s': value '%s' implies char array type\n",
1898 ext->name, value);
1899 return -EINVAL;
1900 }
1901
1902 len = strlen(value);
1903 if (len < 2 || value[len - 1] != '"') {
1904 pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
1905 ext->name, value);
1906 return -EINVAL;
1907 }
1908
1909 /* strip quotes */
1910 len -= 2;
1911 if (len >= ext->kcfg.sz) {
1912 pr_warn("extern (kcfg) '%s': long string '%s' of (%zu bytes) truncated to %d bytes\n",
1913 ext->name, value, len, ext->kcfg.sz - 1);
1914 len = ext->kcfg.sz - 1;
1915 }
1916 memcpy(ext_val, value + 1, len);
1917 ext_val[len] = '\0';
1918 ext->is_set = true;
1919 return 0;
1920 }
1921
1922 static int parse_u64(const char *value, __u64 *res)
1923 {
1924 char *value_end;
1925 int err;
1926
1927 errno = 0;
1928 *res = strtoull(value, &value_end, 0);
1929 if (errno) {
1930 err = -errno;
1931 pr_warn("failed to parse '%s' as integer: %d\n", value, err);
1932 return err;
1933 }
1934 if (*value_end) {
1935 pr_warn("failed to parse '%s' as integer completely\n", value);
1936 return -EINVAL;
1937 }
1938 return 0;
1939 }
1940
1941 static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
1942 {
1943 int bit_sz = ext->kcfg.sz * 8;
1944
1945 if (ext->kcfg.sz == 8)
1946 return true;
1947
1948 /* Validate that value stored in u64 fits in integer of `ext->sz`
1949 * bytes size without any loss of information. If the target integer
1950 * is signed, we rely on the following limits of integer type of
1951 * Y bits and subsequent transformation:
1952 *
1953 * -2^(Y-1) <= X <= 2^(Y-1) - 1
1954 * 0 <= X + 2^(Y-1) <= 2^Y - 1
1955 * 0 <= X + 2^(Y-1) < 2^Y
1956 *
1957 * For unsigned target integer, check that all the (64 - Y) bits are
1958 * zero.
1959 */
1960 if (ext->kcfg.is_signed)
1961 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
1962 else
1963 return (v >> bit_sz) == 0;
1964 }
1965
1966 static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
1967 __u64 value)
1968 {
1969 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR &&
1970 ext->kcfg.type != KCFG_BOOL) {
1971 pr_warn("extern (kcfg) '%s': value '%llu' implies integer, char, or boolean type\n",
1972 ext->name, (unsigned long long)value);
1973 return -EINVAL;
1974 }
1975 if (ext->kcfg.type == KCFG_BOOL && value > 1) {
1976 pr_warn("extern (kcfg) '%s': value '%llu' isn't boolean compatible\n",
1977 ext->name, (unsigned long long)value);
1978 return -EINVAL;
1979
1980 }
1981 if (!is_kcfg_value_in_range(ext, value)) {
1982 pr_warn("extern (kcfg) '%s': value '%llu' doesn't fit in %d bytes\n",
1983 ext->name, (unsigned long long)value, ext->kcfg.sz);
1984 return -ERANGE;
1985 }
1986 switch (ext->kcfg.sz) {
1987 case 1:
1988 *(__u8 *)ext_val = value;
1989 break;
1990 case 2:
1991 *(__u16 *)ext_val = value;
1992 break;
1993 case 4:
1994 *(__u32 *)ext_val = value;
1995 break;
1996 case 8:
1997 *(__u64 *)ext_val = value;
1998 break;
1999 default:
2000 return -EINVAL;
2001 }
2002 ext->is_set = true;
2003 return 0;
2004 }
2005
2006 static int bpf_object__process_kconfig_line(struct bpf_object *obj,
2007 char *buf, void *data)
2008 {
2009 struct extern_desc *ext;
2010 char *sep, *value;
2011 int len, err = 0;
2012 void *ext_val;
2013 __u64 num;
2014
2015 if (!str_has_pfx(buf, "CONFIG_"))
2016 return 0;
2017
2018 sep = strchr(buf, '=');
2019 if (!sep) {
2020 pr_warn("failed to parse '%s': no separator\n", buf);
2021 return -EINVAL;
2022 }
2023
2024 /* Trim ending '\n' */
2025 len = strlen(buf);
2026 if (buf[len - 1] == '\n')
2027 buf[len - 1] = '\0';
2028 /* Split on '=' and ensure that a value is present. */
2029 *sep = '\0';
2030 if (!sep[1]) {
2031 *sep = '=';
2032 pr_warn("failed to parse '%s': no value\n", buf);
2033 return -EINVAL;
2034 }
2035
2036 ext = find_extern_by_name(obj, buf);
2037 if (!ext || ext->is_set)
2038 return 0;
2039
2040 ext_val = data + ext->kcfg.data_off;
2041 value = sep + 1;
2042
2043 switch (*value) {
2044 case 'y': case 'n': case 'm':
2045 err = set_kcfg_value_tri(ext, ext_val, *value);
2046 break;
2047 case '"':
2048 err = set_kcfg_value_str(ext, ext_val, value);
2049 break;
2050 default:
2051 /* assume integer */
2052 err = parse_u64(value, &num);
2053 if (err) {
2054 pr_warn("extern (kcfg) '%s': value '%s' isn't a valid integer\n", ext->name, value);
2055 return err;
2056 }
2057 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
2058 pr_warn("extern (kcfg) '%s': value '%s' implies integer type\n", ext->name, value);
2059 return -EINVAL;
2060 }
2061 err = set_kcfg_value_num(ext, ext_val, num);
2062 break;
2063 }
2064 if (err)
2065 return err;
2066 pr_debug("extern (kcfg) '%s': set to %s\n", ext->name, value);
2067 return 0;
2068 }
2069
2070 static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
2071 {
2072 char buf[PATH_MAX];
2073 struct utsname uts;
2074 int len, err = 0;
2075 gzFile file;
2076
2077 uname(&uts);
2078 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
2079 if (len < 0)
2080 return -EINVAL;
2081 else if (len >= PATH_MAX)
2082 return -ENAMETOOLONG;
2083
2084 /* gzopen also accepts uncompressed files. */
2085 file = gzopen(buf, "re");
2086 if (!file)
2087 file = gzopen("/proc/config.gz", "re");
2088
2089 if (!file) {
2090 pr_warn("failed to open system Kconfig\n");
2091 return -ENOENT;
2092 }
2093
2094 while (gzgets(file, buf, sizeof(buf))) {
2095 err = bpf_object__process_kconfig_line(obj, buf, data);
2096 if (err) {
2097 pr_warn("error parsing system Kconfig line '%s': %d\n",
2098 buf, err);
2099 goto out;
2100 }
2101 }
2102
2103 out:
2104 gzclose(file);
2105 return err;
2106 }
2107
2108 static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
2109 const char *config, void *data)
2110 {
2111 char buf[PATH_MAX];
2112 int err = 0;
2113 FILE *file;
2114
2115 file = fmemopen((void *)config, strlen(config), "r");
2116 if (!file) {
2117 err = -errno;
2118 pr_warn("failed to open in-memory Kconfig: %d\n", err);
2119 return err;
2120 }
2121
2122 while (fgets(buf, sizeof(buf), file)) {
2123 err = bpf_object__process_kconfig_line(obj, buf, data);
2124 if (err) {
2125 pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
2126 buf, err);
2127 break;
2128 }
2129 }
2130
2131 fclose(file);
2132 return err;
2133 }
2134
2135 static int bpf_object__init_kconfig_map(struct bpf_object *obj)
2136 {
2137 struct extern_desc *last_ext = NULL, *ext;
2138 size_t map_sz;
2139 int i, err;
2140
2141 for (i = 0; i < obj->nr_extern; i++) {
2142 ext = &obj->externs[i];
2143 if (ext->type == EXT_KCFG)
2144 last_ext = ext;
2145 }
2146
2147 if (!last_ext)
2148 return 0;
2149
2150 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
2151 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
2152 ".kconfig", obj->efile.symbols_shndx,
2153 NULL, map_sz);
2154 if (err)
2155 return err;
2156
2157 obj->kconfig_map_idx = obj->nr_maps - 1;
2158
2159 return 0;
2160 }
2161
2162 const struct btf_type *
2163 skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
2164 {
2165 const struct btf_type *t = btf__type_by_id(btf, id);
2166
2167 if (res_id)
2168 *res_id = id;
2169
2170 while (btf_is_mod(t) || btf_is_typedef(t)) {
2171 if (res_id)
2172 *res_id = t->type;
2173 t = btf__type_by_id(btf, t->type);
2174 }
2175
2176 return t;
2177 }
2178
2179 static const struct btf_type *
2180 resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
2181 {
2182 const struct btf_type *t;
2183
2184 t = skip_mods_and_typedefs(btf, id, NULL);
2185 if (!btf_is_ptr(t))
2186 return NULL;
2187
2188 t = skip_mods_and_typedefs(btf, t->type, res_id);
2189
2190 return btf_is_func_proto(t) ? t : NULL;
2191 }
2192
2193 static const char *__btf_kind_str(__u16 kind)
2194 {
2195 switch (kind) {
2196 case BTF_KIND_UNKN: return "void";
2197 case BTF_KIND_INT: return "int";
2198 case BTF_KIND_PTR: return "ptr";
2199 case BTF_KIND_ARRAY: return "array";
2200 case BTF_KIND_STRUCT: return "struct";
2201 case BTF_KIND_UNION: return "union";
2202 case BTF_KIND_ENUM: return "enum";
2203 case BTF_KIND_FWD: return "fwd";
2204 case BTF_KIND_TYPEDEF: return "typedef";
2205 case BTF_KIND_VOLATILE: return "volatile";
2206 case BTF_KIND_CONST: return "const";
2207 case BTF_KIND_RESTRICT: return "restrict";
2208 case BTF_KIND_FUNC: return "func";
2209 case BTF_KIND_FUNC_PROTO: return "func_proto";
2210 case BTF_KIND_VAR: return "var";
2211 case BTF_KIND_DATASEC: return "datasec";
2212 case BTF_KIND_FLOAT: return "float";
2213 case BTF_KIND_DECL_TAG: return "decl_tag";
2214 case BTF_KIND_TYPE_TAG: return "type_tag";
2215 case BTF_KIND_ENUM64: return "enum64";
2216 default: return "unknown";
2217 }
2218 }
2219
2220 const char *btf_kind_str(const struct btf_type *t)
2221 {
2222 return __btf_kind_str(btf_kind(t));
2223 }
2224
2225 /*
2226 * Fetch integer attribute of BTF map definition. Such attributes are
2227 * represented using a pointer to an array, in which dimensionality of array
2228 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
2229 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
2230 * type definition, while using only sizeof(void *) space in ELF data section.
2231 */
2232 static bool get_map_field_int(const char *map_name, const struct btf *btf,
2233 const struct btf_member *m, __u32 *res)
2234 {
2235 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2236 const char *name = btf__name_by_offset(btf, m->name_off);
2237 const struct btf_array *arr_info;
2238 const struct btf_type *arr_t;
2239
2240 if (!btf_is_ptr(t)) {
2241 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
2242 map_name, name, btf_kind_str(t));
2243 return false;
2244 }
2245
2246 arr_t = btf__type_by_id(btf, t->type);
2247 if (!arr_t) {
2248 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
2249 map_name, name, t->type);
2250 return false;
2251 }
2252 if (!btf_is_array(arr_t)) {
2253 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
2254 map_name, name, btf_kind_str(arr_t));
2255 return false;
2256 }
2257 arr_info = btf_array(arr_t);
2258 *res = arr_info->nelems;
2259 return true;
2260 }
2261
2262 static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name)
2263 {
2264 int len;
2265
2266 len = snprintf(buf, buf_sz, "%s/%s", path, name);
2267 if (len < 0)
2268 return -EINVAL;
2269 if (len >= buf_sz)
2270 return -ENAMETOOLONG;
2271
2272 return 0;
2273 }
2274
2275 static int build_map_pin_path(struct bpf_map *map, const char *path)
2276 {
2277 char buf[PATH_MAX];
2278 int err;
2279
2280 if (!path)
2281 path = "/sys/fs/bpf";
2282
2283 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
2284 if (err)
2285 return err;
2286
2287 return bpf_map__set_pin_path(map, buf);
2288 }
2289
2290 /* should match definition in bpf_helpers.h */
2291 enum libbpf_pin_type {
2292 LIBBPF_PIN_NONE,
2293 /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
2294 LIBBPF_PIN_BY_NAME,
2295 };
2296
2297 int parse_btf_map_def(const char *map_name, struct btf *btf,
2298 const struct btf_type *def_t, bool strict,
2299 struct btf_map_def *map_def, struct btf_map_def *inner_def)
2300 {
2301 const struct btf_type *t;
2302 const struct btf_member *m;
2303 bool is_inner = inner_def == NULL;
2304 int vlen, i;
2305
2306 vlen = btf_vlen(def_t);
2307 m = btf_members(def_t);
2308 for (i = 0; i < vlen; i++, m++) {
2309 const char *name = btf__name_by_offset(btf, m->name_off);
2310
2311 if (!name) {
2312 pr_warn("map '%s': invalid field #%d.\n", map_name, i);
2313 return -EINVAL;
2314 }
2315 if (strcmp(name, "type") == 0) {
2316 if (!get_map_field_int(map_name, btf, m, &map_def->map_type))
2317 return -EINVAL;
2318 map_def->parts |= MAP_DEF_MAP_TYPE;
2319 } else if (strcmp(name, "max_entries") == 0) {
2320 if (!get_map_field_int(map_name, btf, m, &map_def->max_entries))
2321 return -EINVAL;
2322 map_def->parts |= MAP_DEF_MAX_ENTRIES;
2323 } else if (strcmp(name, "map_flags") == 0) {
2324 if (!get_map_field_int(map_name, btf, m, &map_def->map_flags))
2325 return -EINVAL;
2326 map_def->parts |= MAP_DEF_MAP_FLAGS;
2327 } else if (strcmp(name, "numa_node") == 0) {
2328 if (!get_map_field_int(map_name, btf, m, &map_def->numa_node))
2329 return -EINVAL;
2330 map_def->parts |= MAP_DEF_NUMA_NODE;
2331 } else if (strcmp(name, "key_size") == 0) {
2332 __u32 sz;
2333
2334 if (!get_map_field_int(map_name, btf, m, &sz))
2335 return -EINVAL;
2336 if (map_def->key_size && map_def->key_size != sz) {
2337 pr_warn("map '%s': conflicting key size %u != %u.\n",
2338 map_name, map_def->key_size, sz);
2339 return -EINVAL;
2340 }
2341 map_def->key_size = sz;
2342 map_def->parts |= MAP_DEF_KEY_SIZE;
2343 } else if (strcmp(name, "key") == 0) {
2344 __s64 sz;
2345
2346 t = btf__type_by_id(btf, m->type);
2347 if (!t) {
2348 pr_warn("map '%s': key type [%d] not found.\n",
2349 map_name, m->type);
2350 return -EINVAL;
2351 }
2352 if (!btf_is_ptr(t)) {
2353 pr_warn("map '%s': key spec is not PTR: %s.\n",
2354 map_name, btf_kind_str(t));
2355 return -EINVAL;
2356 }
2357 sz = btf__resolve_size(btf, t->type);
2358 if (sz < 0) {
2359 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
2360 map_name, t->type, (ssize_t)sz);
2361 return sz;
2362 }
2363 if (map_def->key_size && map_def->key_size != sz) {
2364 pr_warn("map '%s': conflicting key size %u != %zd.\n",
2365 map_name, map_def->key_size, (ssize_t)sz);
2366 return -EINVAL;
2367 }
2368 map_def->key_size = sz;
2369 map_def->key_type_id = t->type;
2370 map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE;
2371 } else if (strcmp(name, "value_size") == 0) {
2372 __u32 sz;
2373
2374 if (!get_map_field_int(map_name, btf, m, &sz))
2375 return -EINVAL;
2376 if (map_def->value_size && map_def->value_size != sz) {
2377 pr_warn("map '%s': conflicting value size %u != %u.\n",
2378 map_name, map_def->value_size, sz);
2379 return -EINVAL;
2380 }
2381 map_def->value_size = sz;
2382 map_def->parts |= MAP_DEF_VALUE_SIZE;
2383 } else if (strcmp(name, "value") == 0) {
2384 __s64 sz;
2385
2386 t = btf__type_by_id(btf, m->type);
2387 if (!t) {
2388 pr_warn("map '%s': value type [%d] not found.\n",
2389 map_name, m->type);
2390 return -EINVAL;
2391 }
2392 if (!btf_is_ptr(t)) {
2393 pr_warn("map '%s': value spec is not PTR: %s.\n",
2394 map_name, btf_kind_str(t));
2395 return -EINVAL;
2396 }
2397 sz = btf__resolve_size(btf, t->type);
2398 if (sz < 0) {
2399 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
2400 map_name, t->type, (ssize_t)sz);
2401 return sz;
2402 }
2403 if (map_def->value_size && map_def->value_size != sz) {
2404 pr_warn("map '%s': conflicting value size %u != %zd.\n",
2405 map_name, map_def->value_size, (ssize_t)sz);
2406 return -EINVAL;
2407 }
2408 map_def->value_size = sz;
2409 map_def->value_type_id = t->type;
2410 map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE;
2411 }
2412 else if (strcmp(name, "values") == 0) {
2413 bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type);
2414 bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY;
2415 const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value";
2416 char inner_map_name[128];
2417 int err;
2418
2419 if (is_inner) {
2420 pr_warn("map '%s': multi-level inner maps not supported.\n",
2421 map_name);
2422 return -ENOTSUP;
2423 }
2424 if (i != vlen - 1) {
2425 pr_warn("map '%s': '%s' member should be last.\n",
2426 map_name, name);
2427 return -EINVAL;
2428 }
2429 if (!is_map_in_map && !is_prog_array) {
2430 pr_warn("map '%s': should be map-in-map or prog-array.\n",
2431 map_name);
2432 return -ENOTSUP;
2433 }
2434 if (map_def->value_size && map_def->value_size != 4) {
2435 pr_warn("map '%s': conflicting value size %u != 4.\n",
2436 map_name, map_def->value_size);
2437 return -EINVAL;
2438 }
2439 map_def->value_size = 4;
2440 t = btf__type_by_id(btf, m->type);
2441 if (!t) {
2442 pr_warn("map '%s': %s type [%d] not found.\n",
2443 map_name, desc, m->type);
2444 return -EINVAL;
2445 }
2446 if (!btf_is_array(t) || btf_array(t)->nelems) {
2447 pr_warn("map '%s': %s spec is not a zero-sized array.\n",
2448 map_name, desc);
2449 return -EINVAL;
2450 }
2451 t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL);
2452 if (!btf_is_ptr(t)) {
2453 pr_warn("map '%s': %s def is of unexpected kind %s.\n",
2454 map_name, desc, btf_kind_str(t));
2455 return -EINVAL;
2456 }
2457 t = skip_mods_and_typedefs(btf, t->type, NULL);
2458 if (is_prog_array) {
2459 if (!btf_is_func_proto(t)) {
2460 pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n",
2461 map_name, btf_kind_str(t));
2462 return -EINVAL;
2463 }
2464 continue;
2465 }
2466 if (!btf_is_struct(t)) {
2467 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2468 map_name, btf_kind_str(t));
2469 return -EINVAL;
2470 }
2471
2472 snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name);
2473 err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL);
2474 if (err)
2475 return err;
2476
2477 map_def->parts |= MAP_DEF_INNER_MAP;
2478 } else if (strcmp(name, "pinning") == 0) {
2479 __u32 val;
2480
2481 if (is_inner) {
2482 pr_warn("map '%s': inner def can't be pinned.\n", map_name);
2483 return -EINVAL;
2484 }
2485 if (!get_map_field_int(map_name, btf, m, &val))
2486 return -EINVAL;
2487 if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) {
2488 pr_warn("map '%s': invalid pinning value %u.\n",
2489 map_name, val);
2490 return -EINVAL;
2491 }
2492 map_def->pinning = val;
2493 map_def->parts |= MAP_DEF_PINNING;
2494 } else if (strcmp(name, "map_extra") == 0) {
2495 __u32 map_extra;
2496
2497 if (!get_map_field_int(map_name, btf, m, &map_extra))
2498 return -EINVAL;
2499 map_def->map_extra = map_extra;
2500 map_def->parts |= MAP_DEF_MAP_EXTRA;
2501 } else {
2502 if (strict) {
2503 pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
2504 return -ENOTSUP;
2505 }
2506 pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name);
2507 }
2508 }
2509
2510 if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) {
2511 pr_warn("map '%s': map type isn't specified.\n", map_name);
2512 return -EINVAL;
2513 }
2514
2515 return 0;
2516 }
2517
2518 static size_t adjust_ringbuf_sz(size_t sz)
2519 {
2520 __u32 page_sz = sysconf(_SC_PAGE_SIZE);
2521 __u32 mul;
2522
2523 /* if user forgot to set any size, make sure they see error */
2524 if (sz == 0)
2525 return 0;
2526 /* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be
2527 * a power-of-2 multiple of kernel's page size. If user diligently
2528 * satisified these conditions, pass the size through.
2529 */
2530 if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz))
2531 return sz;
2532
2533 /* Otherwise find closest (page_sz * power_of_2) product bigger than
2534 * user-set size to satisfy both user size request and kernel
2535 * requirements and substitute correct max_entries for map creation.
2536 */
2537 for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) {
2538 if (mul * page_sz > sz)
2539 return mul * page_sz;
2540 }
2541
2542 /* if it's impossible to satisfy the conditions (i.e., user size is
2543 * very close to UINT_MAX but is not a power-of-2 multiple of
2544 * page_size) then just return original size and let kernel reject it
2545 */
2546 return sz;
2547 }
2548
2549 static bool map_is_ringbuf(const struct bpf_map *map)
2550 {
2551 return map->def.type == BPF_MAP_TYPE_RINGBUF ||
2552 map->def.type == BPF_MAP_TYPE_USER_RINGBUF;
2553 }
2554
2555 static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
2556 {
2557 map->def.type = def->map_type;
2558 map->def.key_size = def->key_size;
2559 map->def.value_size = def->value_size;
2560 map->def.max_entries = def->max_entries;
2561 map->def.map_flags = def->map_flags;
2562 map->map_extra = def->map_extra;
2563
2564 map->numa_node = def->numa_node;
2565 map->btf_key_type_id = def->key_type_id;
2566 map->btf_value_type_id = def->value_type_id;
2567
2568 /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
2569 if (map_is_ringbuf(map))
2570 map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
2571
2572 if (def->parts & MAP_DEF_MAP_TYPE)
2573 pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
2574
2575 if (def->parts & MAP_DEF_KEY_TYPE)
2576 pr_debug("map '%s': found key [%u], sz = %u.\n",
2577 map->name, def->key_type_id, def->key_size);
2578 else if (def->parts & MAP_DEF_KEY_SIZE)
2579 pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size);
2580
2581 if (def->parts & MAP_DEF_VALUE_TYPE)
2582 pr_debug("map '%s': found value [%u], sz = %u.\n",
2583 map->name, def->value_type_id, def->value_size);
2584 else if (def->parts & MAP_DEF_VALUE_SIZE)
2585 pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size);
2586
2587 if (def->parts & MAP_DEF_MAX_ENTRIES)
2588 pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
2589 if (def->parts & MAP_DEF_MAP_FLAGS)
2590 pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags);
2591 if (def->parts & MAP_DEF_MAP_EXTRA)
2592 pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name,
2593 (unsigned long long)def->map_extra);
2594 if (def->parts & MAP_DEF_PINNING)
2595 pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
2596 if (def->parts & MAP_DEF_NUMA_NODE)
2597 pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node);
2598
2599 if (def->parts & MAP_DEF_INNER_MAP)
2600 pr_debug("map '%s': found inner map definition.\n", map->name);
2601 }
2602
2603 static const char *btf_var_linkage_str(__u32 linkage)
2604 {
2605 switch (linkage) {
2606 case BTF_VAR_STATIC: return "static";
2607 case BTF_VAR_GLOBAL_ALLOCATED: return "global";
2608 case BTF_VAR_GLOBAL_EXTERN: return "extern";
2609 default: return "unknown";
2610 }
2611 }
2612
2613 static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2614 const struct btf_type *sec,
2615 int var_idx, int sec_idx,
2616 const Elf_Data *data, bool strict,
2617 const char *pin_root_path)
2618 {
2619 struct btf_map_def map_def = {}, inner_def = {};
2620 const struct btf_type *var, *def;
2621 const struct btf_var_secinfo *vi;
2622 const struct btf_var *var_extra;
2623 const char *map_name;
2624 struct bpf_map *map;
2625 int err;
2626
2627 vi = btf_var_secinfos(sec) + var_idx;
2628 var = btf__type_by_id(obj->btf, vi->type);
2629 var_extra = btf_var(var);
2630 map_name = btf__name_by_offset(obj->btf, var->name_off);
2631
2632 if (map_name == NULL || map_name[0] == '\0') {
2633 pr_warn("map #%d: empty name.\n", var_idx);
2634 return -EINVAL;
2635 }
2636 if ((__u64)vi->offset + vi->size > data->d_size) {
2637 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2638 return -EINVAL;
2639 }
2640 if (!btf_is_var(var)) {
2641 pr_warn("map '%s': unexpected var kind %s.\n",
2642 map_name, btf_kind_str(var));
2643 return -EINVAL;
2644 }
2645 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2646 pr_warn("map '%s': unsupported map linkage %s.\n",
2647 map_name, btf_var_linkage_str(var_extra->linkage));
2648 return -EOPNOTSUPP;
2649 }
2650
2651 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2652 if (!btf_is_struct(def)) {
2653 pr_warn("map '%s': unexpected def kind %s.\n",
2654 map_name, btf_kind_str(var));
2655 return -EINVAL;
2656 }
2657 if (def->size > vi->size) {
2658 pr_warn("map '%s': invalid def size.\n", map_name);
2659 return -EINVAL;
2660 }
2661
2662 map = bpf_object__add_map(obj);
2663 if (IS_ERR(map))
2664 return PTR_ERR(map);
2665 map->name = strdup(map_name);
2666 if (!map->name) {
2667 pr_warn("map '%s': failed to alloc map name.\n", map_name);
2668 return -ENOMEM;
2669 }
2670 map->libbpf_type = LIBBPF_MAP_UNSPEC;
2671 map->def.type = BPF_MAP_TYPE_UNSPEC;
2672 map->sec_idx = sec_idx;
2673 map->sec_offset = vi->offset;
2674 map->btf_var_idx = var_idx;
2675 pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2676 map_name, map->sec_idx, map->sec_offset);
2677
2678 err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
2679 if (err)
2680 return err;
2681
2682 fill_map_from_def(map, &map_def);
2683
2684 if (map_def.pinning == LIBBPF_PIN_BY_NAME) {
2685 err = build_map_pin_path(map, pin_root_path);
2686 if (err) {
2687 pr_warn("map '%s': couldn't build pin path.\n", map->name);
2688 return err;
2689 }
2690 }
2691
2692 if (map_def.parts & MAP_DEF_INNER_MAP) {
2693 map->inner_map = calloc(1, sizeof(*map->inner_map));
2694 if (!map->inner_map)
2695 return -ENOMEM;
2696 map->inner_map->fd = -1;
2697 map->inner_map->sec_idx = sec_idx;
2698 map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
2699 if (!map->inner_map->name)
2700 return -ENOMEM;
2701 sprintf(map->inner_map->name, "%s.inner", map_name);
2702
2703 fill_map_from_def(map->inner_map, &inner_def);
2704 }
2705
2706 err = map_fill_btf_type_info(obj, map);
2707 if (err)
2708 return err;
2709
2710 return 0;
2711 }
2712
2713 static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2714 const char *pin_root_path)
2715 {
2716 const struct btf_type *sec = NULL;
2717 int nr_types, i, vlen, err;
2718 const struct btf_type *t;
2719 const char *name;
2720 Elf_Data *data;
2721 #ifdef HAVE_LIBELF
2722 Elf_Scn *scn;
2723 #endif
2724
2725 if (obj->efile.btf_maps_shndx < 0)
2726 return 0;
2727 #if defined HAVE_LIBELF
2728 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2729 data = elf_sec_data(obj, scn);
2730 if (!scn || !data) {
2731 #elif defined HAVE_ELFIO
2732 Elf_Data realdata;
2733 data = elf_sec_data_by_idx(obj, obj->efile.btf_maps_shndx, &realdata);
2734 if (!data) {
2735 #endif
2736 pr_warn("elf: failed to get %s map definitions for %s\n",
2737 MAPS_ELF_SEC, obj->path);
2738 return -EINVAL;
2739 }
2740
2741 nr_types = btf__type_cnt(obj->btf);
2742 for (i = 1; i < nr_types; i++) {
2743 t = btf__type_by_id(obj->btf, i);
2744 if (!btf_is_datasec(t))
2745 continue;
2746 name = btf__name_by_offset(obj->btf, t->name_off);
2747 if (strcmp(name, MAPS_ELF_SEC) == 0) {
2748 sec = t;
2749 obj->efile.btf_maps_sec_btf_id = i;
2750 break;
2751 }
2752 }
2753
2754 if (!sec) {
2755 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
2756 return -ENOENT;
2757 }
2758
2759 vlen = btf_vlen(sec);
2760 for (i = 0; i < vlen; i++) {
2761 err = bpf_object__init_user_btf_map(obj, sec, i,
2762 obj->efile.btf_maps_shndx,
2763 data, strict,
2764 pin_root_path);
2765 if (err)
2766 return err;
2767 }
2768
2769 return 0;
2770 }
2771
2772 static int bpf_object__init_maps(struct bpf_object *obj,
2773 const struct bpf_object_open_opts *opts)
2774 {
2775 const char *pin_root_path;
2776 bool strict;
2777 int err = 0;
2778
2779 strict = !OPTS_GET(opts, relaxed_maps, false);
2780 pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
2781
2782 err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2783 err = err ?: bpf_object__init_global_data_maps(obj);
2784 err = err ?: bpf_object__init_kconfig_map(obj);
2785 err = err ?: bpf_object_init_struct_ops(obj);
2786
2787 return err;
2788 }
2789
2790 static bool section_have_execinstr(struct bpf_object *obj, int idx)
2791 {
2792 Elf64_Shdr *sh;
2793 #if defined HAVE_LIBELF
2794 sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx));
2795 #elif defined HAVE_ELFIO
2796 Elf64_Shdr header;
2797 sh = elf_sec_hdr_by_idx(obj, idx, &header);
2798 #endif
2799 if (!sh)
2800 return false;
2801
2802 return sh->sh_flags & SHF_EXECINSTR;
2803 }
2804
2805 static bool btf_needs_sanitization(struct bpf_object *obj)
2806 {
2807 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
2808 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
2809 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
2810 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
2811 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
2812 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
2813 bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
2814
2815 return !has_func || !has_datasec || !has_func_global || !has_float ||
2816 !has_decl_tag || !has_type_tag || !has_enum64;
2817 }
2818
2819 static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
2820 {
2821 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
2822 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
2823 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
2824 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
2825 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
2826 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
2827 bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
2828 int enum64_placeholder_id = 0;
2829 struct btf_type *t;
2830 int i, j, vlen;
2831
2832 for (i = 1; i < btf__type_cnt(btf); i++) {
2833 t = (struct btf_type *)btf__type_by_id(btf, i);
2834
2835 if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) {
2836 /* replace VAR/DECL_TAG with INT */
2837 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
2838 /*
2839 * using size = 1 is the safest choice, 4 will be too
2840 * big and cause kernel BTF validation failure if
2841 * original variable took less than 4 bytes
2842 */
2843 t->size = 1;
2844 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
2845 } else if (!has_datasec && btf_is_datasec(t)) {
2846 /* replace DATASEC with STRUCT */
2847 const struct btf_var_secinfo *v = btf_var_secinfos(t);
2848 struct btf_member *m = btf_members(t);
2849 struct btf_type *vt;
2850 char *name;
2851
2852 name = (char *)btf__name_by_offset(btf, t->name_off);
2853 while (*name) {
2854 if (*name == '.')
2855 *name = '_';
2856 name++;
2857 }
2858
2859 vlen = btf_vlen(t);
2860 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
2861 for (j = 0; j < vlen; j++, v++, m++) {
2862 /* order of field assignments is important */
2863 m->offset = v->offset * 8;
2864 m->type = v->type;
2865 /* preserve variable name as member name */
2866 vt = (void *)btf__type_by_id(btf, v->type);
2867 m->name_off = vt->name_off;
2868 }
2869 } else if (!has_func && btf_is_func_proto(t)) {
2870 /* replace FUNC_PROTO with ENUM */
2871 vlen = btf_vlen(t);
2872 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
2873 t->size = sizeof(__u32); /* kernel enforced */
2874 } else if (!has_func && btf_is_func(t)) {
2875 /* replace FUNC with TYPEDEF */
2876 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
2877 } else if (!has_func_global && btf_is_func(t)) {
2878 /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
2879 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
2880 } else if (!has_float && btf_is_float(t)) {
2881 /* replace FLOAT with an equally-sized empty STRUCT;
2882 * since C compilers do not accept e.g. "float" as a
2883 * valid struct name, make it anonymous
2884 */
2885 t->name_off = 0;
2886 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
2887 } else if (!has_type_tag && btf_is_type_tag(t)) {
2888 /* replace TYPE_TAG with a CONST */
2889 t->name_off = 0;
2890 t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0);
2891 } else if (!has_enum64 && btf_is_enum(t)) {
2892 /* clear the kflag */
2893 t->info = btf_type_info(btf_kind(t), btf_vlen(t), false);
2894 } else if (!has_enum64 && btf_is_enum64(t)) {
2895 /* replace ENUM64 with a union */
2896 struct btf_member *m;
2897
2898 if (enum64_placeholder_id == 0) {
2899 enum64_placeholder_id = btf__add_int(btf, "enum64_placeholder", 1, 0);
2900 if (enum64_placeholder_id < 0)
2901 return enum64_placeholder_id;
2902
2903 t = (struct btf_type *)btf__type_by_id(btf, i);
2904 }
2905
2906 m = btf_members(t);
2907 vlen = btf_vlen(t);
2908 t->info = BTF_INFO_ENC(BTF_KIND_UNION, 0, vlen);
2909 for (j = 0; j < vlen; j++, m++) {
2910 m->type = enum64_placeholder_id;
2911 m->offset = 0;
2912 }
2913 }
2914 }
2915
2916 return 0;
2917 }
2918
2919 static bool libbpf_needs_btf(const struct bpf_object *obj)
2920 {
2921 return obj->efile.btf_maps_shndx >= 0 ||
2922 obj->efile.st_ops_shndx >= 0 ||
2923 obj->efile.st_ops_link_shndx >= 0 ||
2924 obj->nr_extern > 0;
2925 }
2926
2927 static bool kernel_needs_btf(const struct bpf_object *obj)
2928 {
2929 return obj->efile.st_ops_shndx >= 0 || obj->efile.st_ops_link_shndx >= 0;
2930 }
2931
2932 static int bpf_object__init_btf(struct bpf_object *obj,
2933 Elf_Data *btf_data,
2934 Elf_Data *btf_ext_data)
2935 {
2936 int err = -ENOENT;
2937
2938 if (btf_data) {
2939 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
2940 err = libbpf_get_error(obj->btf);
2941 if (err) {
2942 obj->btf = NULL;
2943 pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err);
2944 goto out;
2945 }
2946 /* enforce 8-byte pointers for BPF-targeted BTFs */
2947 btf__set_pointer_size(obj->btf, 8);
2948 }
2949 if (btf_ext_data) {
2950 struct btf_ext_info *ext_segs[3];
2951 int seg_num, sec_num;
2952
2953 if (!obj->btf) {
2954 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
2955 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
2956 goto out;
2957 }
2958 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
2959 err = libbpf_get_error(obj->btf_ext);
2960 if (err) {
2961 pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n",
2962 BTF_EXT_ELF_SEC, err);
2963 obj->btf_ext = NULL;
2964 goto out;
2965 }
2966
2967 /* setup .BTF.ext to ELF section mapping */
2968 ext_segs[0] = &obj->btf_ext->func_info;
2969 ext_segs[1] = &obj->btf_ext->line_info;
2970 ext_segs[2] = &obj->btf_ext->core_relo_info;
2971 for (seg_num = 0; seg_num < ARRAY_SIZE(ext_segs); seg_num++) {
2972 struct btf_ext_info *seg = ext_segs[seg_num];
2973 const struct btf_ext_info_sec *sec;
2974 const char *sec_name;
2975 #ifdef HAVE_LIBELF
2976 Elf_Scn *scn;
2977 #elif defined HAVE_ELFIO
2978 psection_t sec_obj;
2979 #endif
2980
2981 if (seg->sec_cnt == 0)
2982 continue;
2983
2984 seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs));
2985 if (!seg->sec_idxs) {
2986 err = -ENOMEM;
2987 goto out;
2988 }
2989
2990 sec_num = 0;
2991 for_each_btf_ext_sec(seg, sec) {
2992 /* preventively increment index to avoid doing
2993 * this before every continue below
2994 */
2995 sec_num++;
2996
2997 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
2998 if (str_is_empty(sec_name))
2999 continue;
3000 #ifdef HAVE_LIBELF
3001 scn = elf_sec_by_name(obj, sec_name);
3002 if (!scn)
3003 continue;
3004 #elif defined HAVE_ELFIO
3005 pelfio_t elf = obj->efile.elf;
3006 sec_obj = elfio_get_section_by_name(elf, sec_name);
3007 if (!sec_obj)
3008 continue;
3009 #endif
3010 #ifdef HAVE_LIBELF
3011 seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn);
3012 #elif defined HAVE_ELFIO
3013 seg->sec_idxs[sec_num - 1] = elfio_section_get_index(sec_obj);
3014 #endif
3015 }
3016 }
3017 }
3018 out:
3019 if (err && libbpf_needs_btf(obj)) {
3020 pr_warn("BTF is required, but is missing or corrupted.\n");
3021 return err;
3022 }
3023 return 0;
3024 }
3025
3026 static int compare_vsi_off(const void *_a, const void *_b)
3027 {
3028 const struct btf_var_secinfo *a = _a;
3029 const struct btf_var_secinfo *b = _b;
3030
3031 return a->offset - b->offset;
3032 }
3033
3034 static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
3035 struct btf_type *t)
3036 {
3037 __u32 size = 0, i, vars = btf_vlen(t);
3038 const char *sec_name = btf__name_by_offset(btf, t->name_off);
3039 struct btf_var_secinfo *vsi;
3040 bool fixup_offsets = false;
3041 int err;
3042
3043 if (!sec_name) {
3044 pr_debug("No name found in string section for DATASEC kind.\n");
3045 return -ENOENT;
3046 }
3047
3048 /* Extern-backing datasecs (.ksyms, .kconfig) have their size and
3049 * variable offsets set at the previous step. Further, not every
3050 * extern BTF VAR has corresponding ELF symbol preserved, so we skip
3051 * all fixups altogether for such sections and go straight to sorting
3052 * VARs within their DATASEC.
3053 */
3054 if (strcmp(sec_name, KCONFIG_SEC) == 0 || strcmp(sec_name, KSYMS_SEC) == 0)
3055 goto sort_vars;
3056
3057 /* Clang leaves DATASEC size and VAR offsets as zeroes, so we need to
3058 * fix this up. But BPF static linker already fixes this up and fills
3059 * all the sizes and offsets during static linking. So this step has
3060 * to be optional. But the STV_HIDDEN handling is non-optional for any
3061 * non-extern DATASEC, so the variable fixup loop below handles both
3062 * functions at the same time, paying the cost of BTF VAR <-> ELF
3063 * symbol matching just once.
3064 */
3065 if (t->size == 0) {
3066 err = find_elf_sec_sz(obj, sec_name, &size);
3067 if (err || !size) {
3068 pr_debug("sec '%s': failed to determine size from ELF: size %u, err %d\n",
3069 sec_name, size, err);
3070 return -ENOENT;
3071 }
3072
3073 t->size = size;
3074 fixup_offsets = true;
3075 }
3076
3077 for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
3078 const struct btf_type *t_var;
3079 struct btf_var *var;
3080 const char *var_name;
3081 Elf64_Sym *sym;
3082
3083 t_var = btf__type_by_id(btf, vsi->type);
3084 if (!t_var || !btf_is_var(t_var)) {
3085 pr_debug("sec '%s': unexpected non-VAR type found\n", sec_name);
3086 return -EINVAL;
3087 }
3088
3089 var = btf_var(t_var);
3090 if (var->linkage == BTF_VAR_STATIC || var->linkage == BTF_VAR_GLOBAL_EXTERN)
3091 continue;
3092
3093 var_name = btf__name_by_offset(btf, t_var->name_off);
3094 if (!var_name) {
3095 pr_debug("sec '%s': failed to find name of DATASEC's member #%d\n",
3096 sec_name, i);
3097 return -ENOENT;
3098 }
3099
3100 sym = find_elf_var_sym(obj, var_name);
3101 if (IS_ERR(sym)) {
3102 pr_debug("sec '%s': failed to find ELF symbol for VAR '%s'\n",
3103 sec_name, var_name);
3104 return -ENOENT;
3105 }
3106
3107 if (fixup_offsets)
3108 vsi->offset = sym->st_value;
3109
3110 /* if variable is a global/weak symbol, but has restricted
3111 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF VAR
3112 * as static. This follows similar logic for functions (BPF
3113 * subprogs) and influences libbpf's further decisions about
3114 * whether to make global data BPF array maps as
3115 * BPF_F_MMAPABLE.
3116 */
3117 if (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
3118 || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)
3119 var->linkage = BTF_VAR_STATIC;
3120 }
3121
3122 sort_vars:
3123 qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
3124 return 0;
3125 }
3126
3127 static int bpf_object_fixup_btf(struct bpf_object *obj)
3128 {
3129 int i, n, err = 0;
3130
3131 if (!obj->btf)
3132 return 0;
3133
3134 n = btf__type_cnt(obj->btf);
3135 for (i = 1; i < n; i++) {
3136 struct btf_type *t = btf_type_by_id(obj->btf, i);
3137
3138 /* Loader needs to fix up some of the things compiler
3139 * couldn't get its hands on while emitting BTF. This
3140 * is section size and global variable offset. We use
3141 * the info from the ELF itself for this purpose.
3142 */
3143 if (btf_is_datasec(t)) {
3144 err = btf_fixup_datasec(obj, obj->btf, t);
3145 if (err)
3146 return err;
3147 }
3148 }
3149
3150 return 0;
3151 }
3152
3153 static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
3154 {
3155 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
3156 prog->type == BPF_PROG_TYPE_LSM)
3157 return true;
3158
3159 /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
3160 * also need vmlinux BTF
3161 */
3162 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
3163 return true;
3164
3165 return false;
3166 }
3167
3168 static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
3169 {
3170 struct bpf_program *prog;
3171 int i;
3172
3173 /* CO-RE relocations need kernel BTF, only when btf_custom_path
3174 * is not specified
3175 */
3176 if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path)
3177 return true;
3178
3179 /* Support for typed ksyms needs kernel BTF */
3180 for (i = 0; i < obj->nr_extern; i++) {
3181 const struct extern_desc *ext;
3182
3183 ext = &obj->externs[i];
3184 if (ext->type == EXT_KSYM && ext->ksym.type_id)
3185 return true;
3186 }
3187
3188 bpf_object__for_each_program(prog, obj) {
3189 if (!prog->autoload)
3190 continue;
3191 if (prog_needs_vmlinux_btf(prog))
3192 return true;
3193 }
3194
3195 return false;
3196 }
3197
3198 static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
3199 {
3200 int err;
3201
3202 /* btf_vmlinux could be loaded earlier */
3203 if (obj->btf_vmlinux || obj->gen_loader)
3204 return 0;
3205
3206 if (!force && !obj_needs_vmlinux_btf(obj))
3207 return 0;
3208
3209 obj->btf_vmlinux = btf__load_vmlinux_btf();
3210 err = libbpf_get_error(obj->btf_vmlinux);
3211 if (err) {
3212 pr_warn("Error loading vmlinux BTF: %d\n", err);
3213 obj->btf_vmlinux = NULL;
3214 return err;
3215 }
3216 return 0;
3217 }
3218
3219 static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
3220 {
3221 struct btf *kern_btf = obj->btf;
3222 bool btf_mandatory, sanitize;
3223 int i, err = 0;
3224
3225 if (!obj->btf)
3226 return 0;
3227
3228 if (!kernel_supports(obj, FEAT_BTF)) {
3229 if (kernel_needs_btf(obj)) {
3230 err = -EOPNOTSUPP;
3231 goto report;
3232 }
3233 pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
3234 return 0;
3235 }
3236
3237 /* Even though some subprogs are global/weak, user might prefer more
3238 * permissive BPF verification process that BPF verifier performs for
3239 * static functions, taking into account more context from the caller
3240 * functions. In such case, they need to mark such subprogs with
3241 * __attribute__((visibility("hidden"))) and libbpf will adjust
3242 * corresponding FUNC BTF type to be marked as static and trigger more
3243 * involved BPF verification process.
3244 */
3245 for (i = 0; i < obj->nr_programs; i++) {
3246 struct bpf_program *prog = &obj->programs[i];
3247 struct btf_type *t;
3248 const char *name;
3249 int j, n;
3250
3251 if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
3252 continue;
3253
3254 n = btf__type_cnt(obj->btf);
3255 for (j = 1; j < n; j++) {
3256 t = btf_type_by_id(obj->btf, j);
3257 if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL)
3258 continue;
3259
3260 name = btf__str_by_offset(obj->btf, t->name_off);
3261 if (strcmp(name, prog->name) != 0)
3262 continue;
3263
3264 t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0);
3265 break;
3266 }
3267 }
3268
3269 if (!kernel_supports(obj, FEAT_BTF_DECL_TAG))
3270 goto skip_exception_cb;
3271 for (i = 0; i < obj->nr_programs; i++) {
3272 struct bpf_program *prog = &obj->programs[i];
3273 int j, k, n;
3274
3275 if (prog_is_subprog(obj, prog))
3276 continue;
3277 n = btf__type_cnt(obj->btf);
3278 for (j = 1; j < n; j++) {
3279 const char *str = "exception_callback:", *name;
3280 size_t len = strlen(str);
3281 struct btf_type *t;
3282
3283 t = btf_type_by_id(obj->btf, j);
3284 if (!btf_is_decl_tag(t) || btf_decl_tag(t)->component_idx != -1)
3285 continue;
3286
3287 name = btf__str_by_offset(obj->btf, t->name_off);
3288 if (strncmp(name, str, len))
3289 continue;
3290
3291 t = btf_type_by_id(obj->btf, t->type);
3292 if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) {
3293 pr_warn("prog '%s': exception_callback:<value> decl tag not applied to the main program\n",
3294 prog->name);
3295 return -EINVAL;
3296 }
3297 if (strcmp(prog->name, btf__str_by_offset(obj->btf, t->name_off)))
3298 continue;
3299 /* Multiple callbacks are specified for the same prog,
3300 * the verifier will eventually return an error for this
3301 * case, hence simply skip appending a subprog.
3302 */
3303 if (prog->exception_cb_idx >= 0) {
3304 prog->exception_cb_idx = -1;
3305 break;
3306 }
3307
3308 name += len;
3309 if (str_is_empty(name)) {
3310 pr_warn("prog '%s': exception_callback:<value> decl tag contains empty value\n",
3311 prog->name);
3312 return -EINVAL;
3313 }
3314
3315 for (k = 0; k < obj->nr_programs; k++) {
3316 struct bpf_program *subprog = &obj->programs[k];
3317
3318 if (!prog_is_subprog(obj, subprog))
3319 continue;
3320 if (strcmp(name, subprog->name))
3321 continue;
3322 /* Enforce non-hidden, as from verifier point of
3323 * view it expects global functions, whereas the
3324 * mark_btf_static fixes up linkage as static.
3325 */
3326 if (!subprog->sym_global || subprog->mark_btf_static) {
3327 pr_warn("prog '%s': exception callback %s must be a global non-hidden function\n",
3328 prog->name, subprog->name);
3329 return -EINVAL;
3330 }
3331 /* Let's see if we already saw a static exception callback with the same name */
3332 if (prog->exception_cb_idx >= 0) {
3333 pr_warn("prog '%s': multiple subprogs with same name as exception callback '%s'\n",
3334 prog->name, subprog->name);
3335 return -EINVAL;
3336 }
3337 prog->exception_cb_idx = k;
3338 break;
3339 }
3340
3341 if (prog->exception_cb_idx >= 0)
3342 continue;
3343 pr_warn("prog '%s': cannot find exception callback '%s'\n", prog->name, name);
3344 return -ENOENT;
3345 }
3346 }
3347 skip_exception_cb:
3348
3349 sanitize = btf_needs_sanitization(obj);
3350 if (sanitize) {
3351 const void *raw_data;
3352 __u32 sz;
3353
3354 /* clone BTF to sanitize a copy and leave the original intact */
3355 raw_data = btf__raw_data(obj->btf, &sz);
3356 kern_btf = btf__new(raw_data, sz);
3357 err = libbpf_get_error(kern_btf);
3358 if (err)
3359 return err;
3360
3361 /* enforce 8-byte pointers for BPF-targeted BTFs */
3362 btf__set_pointer_size(obj->btf, 8);
3363 err = bpf_object__sanitize_btf(obj, kern_btf);
3364 if (err)
3365 return err;
3366 }
3367
3368 if (obj->gen_loader) {
3369 __u32 raw_size = 0;
3370 const void *raw_data = btf__raw_data(kern_btf, &raw_size);
3371
3372 if (!raw_data)
3373 return -ENOMEM;
3374 bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
3375 /* Pretend to have valid FD to pass various fd >= 0 checks.
3376 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
3377 */
3378 btf__set_fd(kern_btf, 0);
3379 } else {
3380 /* currently BPF_BTF_LOAD only supports log_level 1 */
3381 err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
3382 obj->log_level ? 1 : 0);
3383 }
3384 if (sanitize) {
3385 if (!err) {
3386 /* move fd to libbpf's BTF */
3387 btf__set_fd(obj->btf, btf__fd(kern_btf));
3388 btf__set_fd(kern_btf, -1);
3389 }
3390 btf__free(kern_btf);
3391 }
3392 report:
3393 if (err) {
3394 btf_mandatory = kernel_needs_btf(obj);
3395 pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
3396 btf_mandatory ? "BTF is mandatory, can't proceed."
3397 : "BTF is optional, ignoring.");
3398 if (!btf_mandatory)
3399 err = 0;
3400 }
3401 return err;
3402 }
3403
3404 static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
3405 {
3406 const char *name;
3407 #if defined HAVE_LIBELF
3408 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
3409 #elif defined HAVE_ELFIO
3410 name = elfio_string_get_string(obj->efile.strstring, off);
3411 #endif
3412 if (!name) {
3413 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3414 off, obj->path, elf_errmsg(-1));
3415 return NULL;
3416 }
3417
3418 return name;
3419 }
3420
3421 static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
3422 {
3423 const char *name;
3424 #if defined HAVE_LIBELF
3425 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
3426 #elif defined HAVE_ELFIO
3427 name = elfio_string_get_string(obj->efile.shstring, off);
3428 #endif
3429
3430 if (!name) {
3431 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3432 off, obj->path, elf_errmsg(-1));
3433 return NULL;
3434 }
3435
3436 return name;
3437 }
3438
3439 #ifdef HAVE_LIBELF
3440 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
3441 {
3442 Elf_Scn *scn;
3443
3444 scn = elf_getscn(obj->efile.elf, idx);
3445 if (!scn) {
3446 pr_warn("elf: failed to get section(%zu) from %s: %s\n",
3447 idx, obj->path, elf_errmsg(-1));
3448 return NULL;
3449 }
3450 return scn;
3451 }
3452
3453 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
3454 {
3455 Elf_Scn *scn = NULL;
3456 Elf *elf = obj->efile.elf;
3457 const char *sec_name;
3458
3459 while ((scn = elf_nextscn(elf, scn)) != NULL) {
3460 sec_name = elf_sec_name(obj, scn);
3461 if (!sec_name)
3462 return NULL;
3463
3464 if (strcmp(sec_name, name) != 0)
3465 continue;
3466
3467 return scn;
3468 }
3469 return NULL;
3470 }
3471
3472 static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn)
3473 {
3474 Elf64_Shdr *shdr;
3475
3476 if (!scn)
3477 return NULL;
3478
3479 shdr = elf64_getshdr(scn);
3480 if (!shdr) {
3481 pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
3482 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3483 return NULL;
3484 }
3485
3486 return shdr;
3487 }
3488
3489 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
3490 {
3491 const char *name;
3492 Elf64_Shdr *sh;
3493
3494 if (!scn)
3495 return NULL;
3496
3497 sh = elf_sec_hdr(obj, scn);
3498 if (!sh)
3499 return NULL;
3500
3501 name = elf_sec_str(obj, sh->sh_name);
3502 if (!name) {
3503 pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
3504 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3505 return NULL;
3506 }
3507
3508 return name;
3509 }
3510 #elif defined HAVE_ELFIO
3511 static Elf64_Shdr *elf_sec_hdr_by_idx(const struct bpf_object *obj, size_t idx, Elf64_Shdr *sheader)
3512 {
3513 psection_t psection = elfio_get_section_by_index(obj->efile.elf, idx);
3514
3515 sheader->sh_name = elfio_section_get_name_string_offset(psection);
3516 sheader->sh_type = elfio_section_get_type(psection);
3517 sheader->sh_flags = elfio_section_get_flags(psection);
3518 sheader->sh_addr = elfio_section_get_address(psection);
3519 sheader->sh_offset = elfio_section_get_offset(psection);
3520 sheader->sh_size = elfio_section_get_size(psection);
3521 sheader->sh_link = elfio_section_get_link(psection);
3522 sheader->sh_info = elfio_section_get_info(psection);
3523 sheader->sh_addralign = elfio_section_get_addr_align(psection);
3524 sheader->sh_entsize = elfio_section_get_entry_size(psection);
3525
3526 return sheader;
3527 }
3528
3529 static const char *elf_sec_name_by_idx(const struct bpf_object *obj, size_t idx)
3530 {
3531 const char *name;
3532 Elf64_Shdr sh;
3533
3534 elf_sec_hdr_by_idx(obj, idx, &sh);
3535
3536 name = elf_sec_str(obj, sh.sh_name);
3537 if (!name) {
3538 pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
3539 idx, obj->path, elf_errmsg(-1));
3540 return NULL;
3541 }
3542
3543 return name;
3544 }
3545 #endif
3546
3547 #if defined HAVE_LIBELF
3548 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
3549 {
3550 Elf_Data *data;
3551
3552 if (!scn)
3553 return NULL;
3554
3555 data = elf_getdata(scn, 0);
3556 if (!data) {
3557 pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
3558 elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
3559 obj->path, elf_errmsg(-1));
3560 return NULL;
3561 }
3562
3563 return data;
3564 }
3565 #elif defined HAVE_ELFIO
3566 static Elf_Data *elf_sec_data_by_name(const struct bpf_object *obj, const char *name, Elf_Data *data)
3567 {
3568 pelfio_t elf = obj->efile.elf;
3569 psection_t psection_name = elfio_get_section_by_name(elf, name);
3570 data->d_buf = (void*)elfio_section_get_data(psection_name);
3571 data->d_size = elfio_section_get_size(psection_name);
3572
3573 return data;
3574 }
3575
3576 static Elf_Data *elf_sec_data_by_idx(const struct bpf_object *obj, size_t idx, Elf_Data *data)
3577 {
3578 pelfio_t elf = obj->efile.elf;
3579 psection_t psection_index = elfio_get_section_by_index(elf, idx);
3580 data->d_buf = (void*)elfio_section_get_data(psection_index);
3581 data->d_size = elfio_section_get_size(psection_index);
3582
3583 return data;
3584 }
3585 #endif
3586
3587 static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx)
3588 {
3589 if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym))
3590 return NULL;
3591
3592 return (Elf64_Sym *)obj->efile.symbols->d_buf + idx;
3593 }
3594
3595 static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx)
3596 {
3597 if (idx >= data->d_size / sizeof(Elf64_Rel))
3598 return NULL;
3599
3600 return (Elf64_Rel *)data->d_buf + idx;
3601 }
3602
3603 static bool is_sec_name_dwarf(const char *name)
3604 {
3605 /* approximation, but the actual list is too long */
3606 return str_has_pfx(name, ".debug_");
3607 }
3608
3609 static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name)
3610 {
3611 /* no special handling of .strtab */
3612 if (hdr->sh_type == SHT_STRTAB)
3613 return true;
3614
3615 /* ignore .llvm_addrsig section as well */
3616 if (hdr->sh_type == SHT_LLVM_ADDRSIG)
3617 return true;
3618
3619 /* no subprograms will lead to an empty .text section, ignore it */
3620 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
3621 strcmp(name, ".text") == 0)
3622 return true;
3623
3624 /* DWARF sections */
3625 if (is_sec_name_dwarf(name))
3626 return true;
3627
3628 if (str_has_pfx(name, ".rel")) {
3629 name += sizeof(".rel") - 1;
3630 /* DWARF section relocations */
3631 if (is_sec_name_dwarf(name))
3632 return true;
3633
3634 /* .BTF and .BTF.ext don't need relocations */
3635 if (strcmp(name, BTF_ELF_SEC) == 0 ||
3636 strcmp(name, BTF_EXT_ELF_SEC) == 0)
3637 return true;
3638 }
3639
3640 return false;
3641 }
3642
3643 static int cmp_progs(const void *_a, const void *_b)
3644 {
3645 const struct bpf_program *a = _a;
3646 const struct bpf_program *b = _b;
3647
3648 if (a->sec_idx != b->sec_idx)
3649 return a->sec_idx < b->sec_idx ? -1 : 1;
3650
3651 /* sec_insn_off can't be the same within the section */
3652 return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
3653 }
3654
3655 static int bpf_object__elf_collect(struct bpf_object *obj)
3656 {
3657 struct elf_sec_desc *sec_desc;
3658 #if defined HAVE_LIBELF
3659 Elf *elf = obj->efile.elf;
3660 #elif defined HAVE_ELFIO
3661 pelfio_t elf = obj->efile.elf;
3662 #endif
3663 Elf_Data *btf_ext_data = NULL;
3664 Elf_Data *btf_data = NULL;
3665 int idx = 0, err = 0;
3666 const char *name;
3667 Elf_Data *data;
3668 #ifdef HAVE_LIBELF
3669 Elf_Scn *scn;
3670 #endif
3671 Elf64_Shdr *sh;
3672 #ifdef HAVE_ELFIO
3673 Elf64_Shdr secHeader = {0};
3674 sh = &secHeader;
3675 #endif
3676
3677 /* ELF section indices are 0-based, but sec #0 is special "invalid"
3678 * section. Since section count retrieved by elf_getshdrnum() does
3679 * include sec #0, it is already the necessary size of an array to keep
3680 * all the sections.
3681 */
3682 #ifdef HAVE_LIBELF
3683 if (elf_getshdrnum(obj->efile.elf, &obj->efile.sec_cnt)) {
3684 pr_warn("elf: failed to get the number of sections for %s: %s\n",
3685 obj->path, elf_errmsg(-1));
3686 return -LIBBPF_ERRNO__FORMAT;
3687 }
3688 #elif defined HAVE_ELFIO
3689 obj->efile.sec_cnt = elfio_get_sections_num(elf);
3690 #endif
3691 obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
3692 if (!obj->efile.secs)
3693 return -ENOMEM;
3694
3695 /* a bunch of ELF parsing functionality depends on processing symbols,
3696 * so do the first pass and find the symbol table
3697 */
3698 #if defined HAVE_LIBELF
3699 scn = NULL;
3700 while ((scn = elf_nextscn(elf, scn)) != NULL) {
3701 sh = elf_sec_hdr(obj, scn);
3702 #elif defined HAVE_ELFIO
3703 int secno = elfio_get_sections_num(elf);
3704 for ( int i = 0; i < secno; i++ ) {
3705 Elf_Data realdata;
3706 sh = elf_sec_hdr_by_idx(obj, i, sh);
3707 #endif
3708 if (!sh)
3709 return -LIBBPF_ERRNO__FORMAT;
3710
3711 if (sh->sh_type == SHT_SYMTAB) {
3712 if (obj->efile.symbols) {
3713 pr_warn("elf: multiple symbol tables in %s\n", obj->path);
3714 return -LIBBPF_ERRNO__FORMAT;
3715 }
3716 #if defined HAVE_LIBELF
3717 data = elf_sec_data(obj, scn);
3718 #elif defined HAVE_ELFIO
3719 data = elf_sec_data_by_idx(obj, i, &realdata);
3720 #endif
3721 if (!data)
3722 return -LIBBPF_ERRNO__FORMAT;
3723 #ifdef HAVE_LIBELF
3724 idx = elf_ndxscn(scn);
3725 #endif
3726
3727 #if defined HAVE_LIBELF
3728 obj->efile.symbols = data;
3729 #elif defined HAVE_ELFIO
3730 obj->efile.realsymbols.d_buf = data->d_buf;
3731 obj->efile.realsymbols.d_size = data->d_size;
3732 obj->efile.symbols = &(obj->efile.realsymbols);
3733 #endif
3734
3735 #if defined HAVE_LIBELF
3736 obj->efile.symbols_shndx = idx;
3737 #elif defined HAVE_ELFIO
3738 obj->efile.symbols_shndx = i;
3739 #endif
3740 obj->efile.strtabidx = sh->sh_link;
3741 }
3742 }
3743
3744 #ifdef HAVE_ELFIO
3745 pstring_t shstring;
3746 pstring_t strstring;
3747
3748 psection_t psection = elfio_get_section_by_index(elf, obj->efile.strtabidx);
3749 if (!psection)
3750 return -LIBBPF_ERRNO__FORMAT;
3751 strstring = elfio_string_section_accessor_new(psection);
3752
3753 psection = elfio_get_section_by_index(elf, obj->efile.shstrndx);
3754 if (!psection)
3755 return -LIBBPF_ERRNO__FORMAT;
3756 shstring = elfio_string_section_accessor_new(psection);
3757
3758 if (!strstring || !shstring)
3759 return -LIBBPF_ERRNO__FORMAT;
3760 obj->efile.strstring = strstring;
3761 obj->efile.shstring = shstring;
3762 #endif
3763
3764 if (!obj->efile.symbols) {
3765 pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n",
3766 obj->path);
3767 return -ENOENT;
3768 }
3769
3770 #ifdef HAVE_LIBELF
3771 scn = NULL;
3772 while ((scn = elf_nextscn(elf, scn)) != NULL) {
3773 #elif defined HAVE_ELFIO
3774 for ( int i = 0; i < secno; i++ ) {
3775 psection_t ptmpsection = elfio_get_section_by_index(elf, i);
3776 elf_sec_hdr_by_idx(obj, i, sh);
3777 #endif
3778
3779 #if defined HAVE_LIBELF
3780 idx = elf_ndxscn(scn);
3781 #elif defined HAVE_ELFIO
3782 idx = i;
3783 #endif
3784 sec_desc = &obj->efile.secs[idx];
3785
3786 #if defined HAVE_LIBELF
3787 sh = elf_sec_hdr(obj, scn);
3788 #elif defined HAVE_ELFIO
3789 sh = elf_sec_hdr_by_idx(obj, i, sh);
3790 #endif
3791
3792 if (!sh)
3793 return -LIBBPF_ERRNO__FORMAT;
3794
3795 name = elf_sec_str(obj, sh->sh_name);
3796 if (!name)
3797 return -LIBBPF_ERRNO__FORMAT;
3798
3799 if (ignore_elf_section(sh, name))
3800 continue;
3801
3802 #if defined HAVE_LIBELF
3803 data = elf_sec_data(obj, scn);
3804 #elif defined HAVE_ELFIO
3805 data = elf_sec_data_by_idx(obj, i, &sec_desc->realdata);
3806 #endif
3807 if (!data)
3808 return -LIBBPF_ERRNO__FORMAT;
3809
3810 pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
3811 idx, name, (unsigned long)data->d_size,
3812 (int)sh->sh_link, (unsigned long)sh->sh_flags,
3813 (int)sh->sh_type);
3814
3815 if (strcmp(name, "license") == 0) {
3816 err = bpf_object__init_license(obj, data->d_buf, data->d_size);
3817 if (err)
3818 return err;
3819 } else if (strcmp(name, "version") == 0) {
3820 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
3821 if (err)
3822 return err;
3823 } else if (strcmp(name, "maps") == 0) {
3824 pr_warn("elf: legacy map definitions in 'maps' section are not supported by libbpf v1.0+\n");
3825 return -ENOTSUP;
3826 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
3827 obj->efile.btf_maps_shndx = idx;
3828 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
3829 if (sh->sh_type != SHT_PROGBITS)
3830 return -LIBBPF_ERRNO__FORMAT;
3831 btf_data = data;
3832 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
3833 if (sh->sh_type != SHT_PROGBITS)
3834 return -LIBBPF_ERRNO__FORMAT;
3835 btf_ext_data = data;
3836 } else if (sh->sh_type == SHT_SYMTAB) {
3837 /* already processed during the first pass above */
3838 } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) {
3839 if (sh->sh_flags & SHF_EXECINSTR) {
3840 if (strcmp(name, ".text") == 0)
3841 obj->efile.text_shndx = idx;
3842 err = bpf_object__add_programs(obj, data, name, idx);
3843 if (err)
3844 return err;
3845 } else if (strcmp(name, DATA_SEC) == 0 ||
3846 str_has_pfx(name, DATA_SEC ".")) {
3847 sec_desc->sec_type = SEC_DATA;
3848 #if defined HAVE_LIBELF
3849 sec_desc->shdr = sh;
3850 sec_desc->data = data;
3851 #elif defined HAVE_ELFIO
3852 sec_desc->psection = ptmpsection;
3853 sec_desc->realdata.d_buf = data->d_buf;
3854 sec_desc->realdata.d_size = data->d_size;
3855 sec_desc->data = &(sec_desc->realdata);
3856 #endif
3857 } else if (strcmp(name, RODATA_SEC) == 0 ||
3858 str_has_pfx(name, RODATA_SEC ".")) {
3859 sec_desc->sec_type = SEC_RODATA;
3860 #if defined HAVE_LIBELF
3861 sec_desc->shdr = sh;
3862 sec_desc->data = data;
3863 #elif defined HAVE_ELFIO
3864 sec_desc->psection = ptmpsection;
3865 sec_desc->realdata.d_buf = data->d_buf;
3866 sec_desc->realdata.d_size = data->d_size;
3867 sec_desc->data = &(sec_desc->realdata);
3868 #endif
3869
3870 } else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
3871 #if defined HAVE_LIBELF
3872 obj->efile.st_ops_data = data;
3873 #elif defined HAVE_ELFIO
3874 obj->efile.realst_ops_data.d_buf = data->d_buf;
3875 obj->efile.realst_ops_data.d_size = data->d_size;
3876 obj->efile.st_ops_data = &(obj->efile.realst_ops_data);
3877 #endif
3878 obj->efile.st_ops_shndx = idx;
3879 } else if (strcmp(name, STRUCT_OPS_LINK_SEC) == 0) {
3880 #if defined HAVE_LIBELF
3881 obj->efile.st_ops_link_data = data;
3882 #elif defined HAVE_ELFIO
3883 obj->efile.realst_ops_link_data.d_buf = data->d_buf;
3884 obj->efile.realst_ops_link_data.d_size = data->d_size;
3885 obj->efile.st_ops_link_data = &(obj->efile.realst_ops_link_data);
3886 #endif
3887 obj->efile.st_ops_link_shndx = idx;
3888 } else {
3889 pr_info("elf: skipping unrecognized data section(%d) %s\n",
3890 idx, name);
3891 }
3892 } else if (sh->sh_type == SHT_REL) {
3893 int targ_sec_idx = sh->sh_info; /* points to other section */
3894
3895 if (sh->sh_entsize != sizeof(Elf64_Rel) ||
3896 targ_sec_idx >= obj->efile.sec_cnt)
3897 return -LIBBPF_ERRNO__FORMAT;
3898
3899 /* Only do relo for section with exec instructions */
3900 if (!section_have_execinstr(obj, targ_sec_idx) &&
3901 strcmp(name, ".rel" STRUCT_OPS_SEC) &&
3902 strcmp(name, ".rel" STRUCT_OPS_LINK_SEC) &&
3903 strcmp(name, ".rel" MAPS_ELF_SEC)) {
3904 #if defined HAVE_LIBELF
3905 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
3906 idx, name, targ_sec_idx,
3907 elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>");
3908 #elif defined HAVE_ELFIO
3909 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
3910 idx, name, targ_sec_idx,
3911 elf_sec_name_by_idx(obj, targ_sec_idx) ?: "<?>");
3912 #endif
3913 continue;
3914 }
3915
3916 sec_desc->sec_type = SEC_RELO;
3917 #if defined HAVE_LIBELF
3918 sec_desc->shdr = sh;
3919 #elif defined HAVE_ELFIO
3920 sec_desc->psection = ptmpsection;
3921 #endif
3922 sec_desc->data = data;
3923 } else if (sh->sh_type == SHT_NOBITS && (strcmp(name, BSS_SEC) == 0 ||
3924 str_has_pfx(name, BSS_SEC "."))) {
3925 sec_desc->sec_type = SEC_BSS;
3926 #if defined HAVE_LIBELF
3927 sec_desc->shdr = sh;
3928 #elif defined HAVE_ELFIO
3929 sec_desc->psection = ptmpsection;
3930 #endif
3931 sec_desc->data = data;
3932 } else {
3933 pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
3934 (size_t)sh->sh_size);
3935 }
3936 }
3937
3938 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
3939 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
3940 return -LIBBPF_ERRNO__FORMAT;
3941 }
3942
3943 /* sort BPF programs by section name and in-section instruction offset
3944 * for faster search
3945 */
3946 if (obj->nr_programs)
3947 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
3948
3949 return bpf_object__init_btf(obj, btf_data, btf_ext_data);
3950 }
3951
3952 static bool sym_is_extern(const Elf64_Sym *sym)
3953 {
3954 int bind = ELF64_ST_BIND(sym->st_info);
3955 /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
3956 return sym->st_shndx == SHN_UNDEF &&
3957 (bind == STB_GLOBAL || bind == STB_WEAK) &&
3958 ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE;
3959 }
3960
3961 static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
3962 {
3963 int bind = ELF64_ST_BIND(sym->st_info);
3964 int type = ELF64_ST_TYPE(sym->st_info);
3965
3966 /* in .text section */
3967 if (sym->st_shndx != text_shndx)
3968 return false;
3969
3970 /* local function */
3971 if (bind == STB_LOCAL && type == STT_SECTION)
3972 return true;
3973
3974 /* global function */
3975 return bind == STB_GLOBAL && type == STT_FUNC;
3976 }
3977
3978 static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
3979 {
3980 const struct btf_type *t;
3981 const char *tname;
3982 int i, n;
3983
3984 if (!btf)
3985 return -ESRCH;
3986
3987 n = btf__type_cnt(btf);
3988 for (i = 1; i < n; i++) {
3989 t = btf__type_by_id(btf, i);
3990
3991 if (!btf_is_var(t) && !btf_is_func(t))
3992 continue;
3993
3994 tname = btf__name_by_offset(btf, t->name_off);
3995 if (strcmp(tname, ext_name))
3996 continue;
3997
3998 if (btf_is_var(t) &&
3999 btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
4000 return -EINVAL;
4001
4002 if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN)
4003 return -EINVAL;
4004
4005 return i;
4006 }
4007
4008 return -ENOENT;
4009 }
4010
4011 static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
4012 const struct btf_var_secinfo *vs;
4013 const struct btf_type *t;
4014 int i, j, n;
4015
4016 if (!btf)
4017 return -ESRCH;
4018
4019 n = btf__type_cnt(btf);
4020 for (i = 1; i < n; i++) {
4021 t = btf__type_by_id(btf, i);
4022
4023 if (!btf_is_datasec(t))
4024 continue;
4025
4026 vs = btf_var_secinfos(t);
4027 for (j = 0; j < btf_vlen(t); j++, vs++) {
4028 if (vs->type == ext_btf_id)
4029 return i;
4030 }
4031 }
4032
4033 return -ENOENT;
4034 }
4035
4036 static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
4037 bool *is_signed)
4038 {
4039 const struct btf_type *t;
4040 const char *name;
4041
4042 t = skip_mods_and_typedefs(btf, id, NULL);
4043 name = btf__name_by_offset(btf, t->name_off);
4044
4045 if (is_signed)
4046 *is_signed = false;
4047 switch (btf_kind(t)) {
4048 case BTF_KIND_INT: {
4049 int enc = btf_int_encoding(t);
4050
4051 if (enc & BTF_INT_BOOL)
4052 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
4053 if (is_signed)
4054 *is_signed = enc & BTF_INT_SIGNED;
4055 if (t->size == 1)
4056 return KCFG_CHAR;
4057 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
4058 return KCFG_UNKNOWN;
4059 return KCFG_INT;
4060 }
4061 case BTF_KIND_ENUM:
4062 if (t->size != 4)
4063 return KCFG_UNKNOWN;
4064 if (strcmp(name, "libbpf_tristate"))
4065 return KCFG_UNKNOWN;
4066 return KCFG_TRISTATE;
4067 case BTF_KIND_ENUM64:
4068 if (strcmp(name, "libbpf_tristate"))
4069 return KCFG_UNKNOWN;
4070 return KCFG_TRISTATE;
4071 case BTF_KIND_ARRAY:
4072 if (btf_array(t)->nelems == 0)
4073 return KCFG_UNKNOWN;
4074 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
4075 return KCFG_UNKNOWN;
4076 return KCFG_CHAR_ARR;
4077 default:
4078 return KCFG_UNKNOWN;
4079 }
4080 }
4081
4082 static int cmp_externs(const void *_a, const void *_b)
4083 {
4084 const struct extern_desc *a = _a;
4085 const struct extern_desc *b = _b;
4086
4087 if (a->type != b->type)
4088 return a->type < b->type ? -1 : 1;
4089
4090 if (a->type == EXT_KCFG) {
4091 /* descending order by alignment requirements */
4092 if (a->kcfg.align != b->kcfg.align)
4093 return a->kcfg.align > b->kcfg.align ? -1 : 1;
4094 /* ascending order by size, within same alignment class */
4095 if (a->kcfg.sz != b->kcfg.sz)
4096 return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
4097 }
4098
4099 /* resolve ties by name */
4100 return strcmp(a->name, b->name);
4101 }
4102
4103 static int find_int_btf_id(const struct btf *btf)
4104 {
4105 const struct btf_type *t;
4106 int i, n;
4107
4108 n = btf__type_cnt(btf);
4109 for (i = 1; i < n; i++) {
4110 t = btf__type_by_id(btf, i);
4111
4112 if (btf_is_int(t) && btf_int_bits(t) == 32)
4113 return i;
4114 }
4115
4116 return 0;
4117 }
4118
4119 static int add_dummy_ksym_var(struct btf *btf)
4120 {
4121 int i, int_btf_id, sec_btf_id, dummy_var_btf_id;
4122 const struct btf_var_secinfo *vs;
4123 const struct btf_type *sec;
4124
4125 if (!btf)
4126 return 0;
4127
4128 sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
4129 BTF_KIND_DATASEC);
4130 if (sec_btf_id < 0)
4131 return 0;
4132
4133 sec = btf__type_by_id(btf, sec_btf_id);
4134 vs = btf_var_secinfos(sec);
4135 for (i = 0; i < btf_vlen(sec); i++, vs++) {
4136 const struct btf_type *vt;
4137
4138 vt = btf__type_by_id(btf, vs->type);
4139 if (btf_is_func(vt))
4140 break;
4141 }
4142
4143 /* No func in ksyms sec. No need to add dummy var. */
4144 if (i == btf_vlen(sec))
4145 return 0;
4146
4147 int_btf_id = find_int_btf_id(btf);
4148 dummy_var_btf_id = btf__add_var(btf,
4149 "dummy_ksym",
4150 BTF_VAR_GLOBAL_ALLOCATED,
4151 int_btf_id);
4152 if (dummy_var_btf_id < 0)
4153 pr_warn("cannot create a dummy_ksym var\n");
4154
4155 return dummy_var_btf_id;
4156 }
4157
4158 static int bpf_object__collect_externs(struct bpf_object *obj)
4159 {
4160 struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
4161 const struct btf_type *t;
4162 struct extern_desc *ext;
4163 int i, n, off, dummy_var_btf_id;
4164 const char *ext_name, *sec_name;
4165 size_t ext_essent_len;
4166 #ifdef HAVE_LIBELF
4167 Elf_Scn *scn;
4168 #endif
4169 Elf64_Shdr *sh;
4170 Elf64_Shdr shheader;
4171
4172 if (!obj->efile.symbols)
4173 return 0;
4174
4175 #if defined HAVE_LIBELF
4176 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
4177 sh = elf_sec_hdr(obj, scn);
4178 #elif defined HAVE_ELFIO
4179 sh = &shheader;
4180 sh = elf_sec_hdr_by_idx(obj, obj->efile.symbols_shndx, sh);
4181 #endif
4182
4183 if (!sh || sh->sh_entsize != sizeof(Elf64_Sym))
4184 return -LIBBPF_ERRNO__FORMAT;
4185
4186 dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
4187 if (dummy_var_btf_id < 0)
4188 return dummy_var_btf_id;
4189
4190 n = sh->sh_size / sh->sh_entsize;
4191 pr_debug("looking for externs among %d symbols...\n", n);
4192
4193 for (i = 0; i < n; i++) {
4194 Elf64_Sym *sym = elf_sym_by_idx(obj, i);
4195
4196 if (!sym)
4197 return -LIBBPF_ERRNO__FORMAT;
4198 if (!sym_is_extern(sym))
4199 continue;
4200 ext_name = elf_sym_str(obj, sym->st_name);
4201 if (!ext_name || !ext_name[0])
4202 continue;
4203
4204 ext = obj->externs;
4205 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
4206 if (!ext)
4207 return -ENOMEM;
4208 obj->externs = ext;
4209 ext = &ext[obj->nr_extern];
4210 memset(ext, 0, sizeof(*ext));
4211 obj->nr_extern++;
4212
4213 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
4214 if (ext->btf_id <= 0) {
4215 pr_warn("failed to find BTF for extern '%s': %d\n",
4216 ext_name, ext->btf_id);
4217 return ext->btf_id;
4218 }
4219 t = btf__type_by_id(obj->btf, ext->btf_id);
4220 ext->name = btf__name_by_offset(obj->btf, t->name_off);
4221 ext->sym_idx = i;
4222 ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
4223
4224 ext_essent_len = bpf_core_essential_name_len(ext->name);
4225 ext->essent_name = NULL;
4226 if (ext_essent_len != strlen(ext->name)) {
4227 ext->essent_name = strndup(ext->name, ext_essent_len);
4228 if (!ext->essent_name)
4229 return -ENOMEM;
4230 }
4231
4232 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
4233 if (ext->sec_btf_id <= 0) {
4234 pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
4235 ext_name, ext->btf_id, ext->sec_btf_id);
4236 return ext->sec_btf_id;
4237 }
4238 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
4239 sec_name = btf__name_by_offset(obj->btf, sec->name_off);
4240
4241 if (strcmp(sec_name, KCONFIG_SEC) == 0) {
4242 if (btf_is_func(t)) {
4243 pr_warn("extern function %s is unsupported under %s section\n",
4244 ext->name, KCONFIG_SEC);
4245 return -ENOTSUP;
4246 }
4247 kcfg_sec = sec;
4248 ext->type = EXT_KCFG;
4249 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
4250 if (ext->kcfg.sz <= 0) {
4251 pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
4252 ext_name, ext->kcfg.sz);
4253 return ext->kcfg.sz;
4254 }
4255 ext->kcfg.align = btf__align_of(obj->btf, t->type);
4256 if (ext->kcfg.align <= 0) {
4257 pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
4258 ext_name, ext->kcfg.align);
4259 return -EINVAL;
4260 }
4261 ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
4262 &ext->kcfg.is_signed);
4263 if (ext->kcfg.type == KCFG_UNKNOWN) {
4264 pr_warn("extern (kcfg) '%s': type is unsupported\n", ext_name);
4265 return -ENOTSUP;
4266 }
4267 } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
4268 ksym_sec = sec;
4269 ext->type = EXT_KSYM;
4270 skip_mods_and_typedefs(obj->btf, t->type,
4271 &ext->ksym.type_id);
4272 } else {
4273 pr_warn("unrecognized extern section '%s'\n", sec_name);
4274 return -ENOTSUP;
4275 }
4276 }
4277 pr_debug("collected %d externs total\n", obj->nr_extern);
4278
4279 if (!obj->nr_extern)
4280 return 0;
4281
4282 /* sort externs by type, for kcfg ones also by (align, size, name) */
4283 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
4284
4285 /* for .ksyms section, we need to turn all externs into allocated
4286 * variables in BTF to pass kernel verification; we do this by
4287 * pretending that each extern is a 8-byte variable
4288 */
4289 if (ksym_sec) {
4290 /* find existing 4-byte integer type in BTF to use for fake
4291 * extern variables in DATASEC
4292 */
4293 int int_btf_id = find_int_btf_id(obj->btf);
4294 /* For extern function, a dummy_var added earlier
4295 * will be used to replace the vs->type and
4296 * its name string will be used to refill
4297 * the missing param's name.
4298 */
4299 const struct btf_type *dummy_var;
4300
4301 dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id);
4302 for (i = 0; i < obj->nr_extern; i++) {
4303 ext = &obj->externs[i];
4304 if (ext->type != EXT_KSYM)
4305 continue;
4306 pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
4307 i, ext->sym_idx, ext->name);
4308 }
4309
4310 sec = ksym_sec;
4311 n = btf_vlen(sec);
4312 for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
4313 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
4314 struct btf_type *vt;
4315
4316 vt = (void *)btf__type_by_id(obj->btf, vs->type);
4317 ext_name = btf__name_by_offset(obj->btf, vt->name_off);
4318 ext = find_extern_by_name(obj, ext_name);
4319 if (!ext) {
4320 pr_warn("failed to find extern definition for BTF %s '%s'\n",
4321 btf_kind_str(vt), ext_name);
4322 return -ESRCH;
4323 }
4324 if (btf_is_func(vt)) {
4325 const struct btf_type *func_proto;
4326 struct btf_param *param;
4327 int j;
4328
4329 func_proto = btf__type_by_id(obj->btf,
4330 vt->type);
4331 param = btf_params(func_proto);
4332 /* Reuse the dummy_var string if the
4333 * func proto does not have param name.
4334 */
4335 for (j = 0; j < btf_vlen(func_proto); j++)
4336 if (param[j].type && !param[j].name_off)
4337 param[j].name_off =
4338 dummy_var->name_off;
4339 vs->type = dummy_var_btf_id;
4340 vt->info &= ~0xffff;
4341 vt->info |= BTF_FUNC_GLOBAL;
4342 } else {
4343 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
4344 vt->type = int_btf_id;
4345 }
4346 vs->offset = off;
4347 vs->size = sizeof(int);
4348 }
4349 sec->size = off;
4350 }
4351
4352 if (kcfg_sec) {
4353 sec = kcfg_sec;
4354 /* for kcfg externs calculate their offsets within a .kconfig map */
4355 off = 0;
4356 for (i = 0; i < obj->nr_extern; i++) {
4357 ext = &obj->externs[i];
4358 if (ext->type != EXT_KCFG)
4359 continue;
4360
4361 ext->kcfg.data_off = roundup(off, ext->kcfg.align);
4362 off = ext->kcfg.data_off + ext->kcfg.sz;
4363 pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
4364 i, ext->sym_idx, ext->kcfg.data_off, ext->name);
4365 }
4366 sec->size = off;
4367 n = btf_vlen(sec);
4368 for (i = 0; i < n; i++) {
4369 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
4370
4371 t = btf__type_by_id(obj->btf, vs->type);
4372 ext_name = btf__name_by_offset(obj->btf, t->name_off);
4373 ext = find_extern_by_name(obj, ext_name);
4374 if (!ext) {
4375 pr_warn("failed to find extern definition for BTF var '%s'\n",
4376 ext_name);
4377 return -ESRCH;
4378 }
4379 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
4380 vs->offset = ext->kcfg.data_off;
4381 }
4382 }
4383 return 0;
4384 }
4385
4386 static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog)
4387 {
4388 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
4389 }
4390
4391 struct bpf_program *
4392 bpf_object__find_program_by_name(const struct bpf_object *obj,
4393 const char *name)
4394 {
4395 struct bpf_program *prog;
4396
4397 bpf_object__for_each_program(prog, obj) {
4398 if (prog_is_subprog(obj, prog))
4399 continue;
4400 if (!strcmp(prog->name, name))
4401 return prog;
4402 }
4403 return errno = ENOENT, NULL;
4404 }
4405
4406 static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
4407 int shndx)
4408 {
4409 switch (obj->efile.secs[shndx].sec_type) {
4410 case SEC_BSS:
4411 case SEC_DATA:
4412 case SEC_RODATA:
4413 return true;
4414 default:
4415 return false;
4416 }
4417 }
4418
4419 static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
4420 int shndx)
4421 {
4422 return shndx == obj->efile.btf_maps_shndx;
4423 }
4424
4425 static enum libbpf_map_type
4426 bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
4427 {
4428 if (shndx == obj->efile.symbols_shndx)
4429 return LIBBPF_MAP_KCONFIG;
4430
4431 switch (obj->efile.secs[shndx].sec_type) {
4432 case SEC_BSS:
4433 return LIBBPF_MAP_BSS;
4434 case SEC_DATA:
4435 return LIBBPF_MAP_DATA;
4436 case SEC_RODATA:
4437 return LIBBPF_MAP_RODATA;
4438 default:
4439 return LIBBPF_MAP_UNSPEC;
4440 }
4441 }
4442
4443 static int bpf_program__record_reloc(struct bpf_program *prog,
4444 struct reloc_desc *reloc_desc,
4445 __u32 insn_idx, const char *sym_name,
4446 const Elf64_Sym *sym, const Elf64_Rel *rel)
4447 {
4448 struct bpf_insn *insn = &prog->insns[insn_idx];
4449 size_t map_idx, nr_maps = prog->obj->nr_maps;
4450 struct bpf_object *obj = prog->obj;
4451 __u32 shdr_idx = sym->st_shndx;
4452 enum libbpf_map_type type;
4453 const char *sym_sec_name;
4454 struct bpf_map *map;
4455
4456 if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) {
4457 pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
4458 prog->name, sym_name, insn_idx, insn->code);
4459 return -LIBBPF_ERRNO__RELOC;
4460 }
4461
4462 if (sym_is_extern(sym)) {
4463 int sym_idx = ELF64_R_SYM(rel->r_info);
4464 int i, n = obj->nr_extern;
4465 struct extern_desc *ext;
4466
4467 for (i = 0; i < n; i++) {
4468 ext = &obj->externs[i];
4469 if (ext->sym_idx == sym_idx)
4470 break;
4471 }
4472 if (i >= n) {
4473 pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
4474 prog->name, sym_name, sym_idx);
4475 return -LIBBPF_ERRNO__RELOC;
4476 }
4477 pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
4478 prog->name, i, ext->name, ext->sym_idx, insn_idx);
4479 if (insn->code == (BPF_JMP | BPF_CALL))
4480 reloc_desc->type = RELO_EXTERN_CALL;
4481 else
4482 reloc_desc->type = RELO_EXTERN_LD64;
4483 reloc_desc->insn_idx = insn_idx;
4484 reloc_desc->ext_idx = i;
4485 return 0;
4486 }
4487
4488 /* sub-program call relocation */
4489 if (is_call_insn(insn)) {
4490 if (insn->src_reg != BPF_PSEUDO_CALL) {
4491 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
4492 return -LIBBPF_ERRNO__RELOC;
4493 }
4494 /* text_shndx can be 0, if no default "main" program exists */
4495 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
4496 #if defined HAVE_LIBELF
4497 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4498 #elif defined HAVE_ELFIO
4499 sym_sec_name = elf_sec_name_by_idx(obj, shdr_idx);
4500 #endif
4501 pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
4502 prog->name, sym_name, sym_sec_name);
4503 return -LIBBPF_ERRNO__RELOC;
4504 }
4505 if (sym->st_value % BPF_INSN_SZ) {
4506 pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
4507 prog->name, sym_name, (size_t)sym->st_value);
4508 return -LIBBPF_ERRNO__RELOC;
4509 }
4510 reloc_desc->type = RELO_CALL;
4511 reloc_desc->insn_idx = insn_idx;
4512 reloc_desc->sym_off = sym->st_value;
4513 return 0;
4514 }
4515
4516 if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
4517 pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
4518 prog->name, sym_name, shdr_idx);
4519 return -LIBBPF_ERRNO__RELOC;
4520 }
4521
4522 /* loading subprog addresses */
4523 if (sym_is_subprog(sym, obj->efile.text_shndx)) {
4524 /* global_func: sym->st_value = offset in the section, insn->imm = 0.
4525 * local_func: sym->st_value = 0, insn->imm = offset in the section.
4526 */
4527 if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) {
4528 pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n",
4529 prog->name, sym_name, (size_t)sym->st_value, insn->imm);
4530 return -LIBBPF_ERRNO__RELOC;
4531 }
4532
4533 reloc_desc->type = RELO_SUBPROG_ADDR;
4534 reloc_desc->insn_idx = insn_idx;
4535 reloc_desc->sym_off = sym->st_value;
4536 return 0;
4537 }
4538
4539 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
4540 #if defined HAVE_LIBELF
4541 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4542 #elif defined HAVE_ELFIO
4543 sym_sec_name = elf_sec_name_by_idx(obj, shdr_idx);
4544 #endif
4545 /* generic map reference relocation */
4546 if (type == LIBBPF_MAP_UNSPEC) {
4547 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
4548 pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
4549 prog->name, sym_name, sym_sec_name);
4550 return -LIBBPF_ERRNO__RELOC;
4551 }
4552 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4553 map = &obj->maps[map_idx];
4554 if (map->libbpf_type != type ||
4555 map->sec_idx != sym->st_shndx ||
4556 map->sec_offset != sym->st_value)
4557 continue;
4558 pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
4559 prog->name, map_idx, map->name, map->sec_idx,
4560 map->sec_offset, insn_idx);
4561 break;
4562 }
4563 if (map_idx >= nr_maps) {
4564 pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
4565 prog->name, sym_sec_name, (size_t)sym->st_value);
4566 return -LIBBPF_ERRNO__RELOC;
4567 }
4568 reloc_desc->type = RELO_LD64;
4569 reloc_desc->insn_idx = insn_idx;
4570 reloc_desc->map_idx = map_idx;
4571 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
4572 return 0;
4573 }
4574
4575 /* global data map relocation */
4576 if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
4577 pr_warn("prog '%s': bad data relo against section '%s'\n",
4578 prog->name, sym_sec_name);
4579 return -LIBBPF_ERRNO__RELOC;
4580 }
4581 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4582 map = &obj->maps[map_idx];
4583 if (map->libbpf_type != type || map->sec_idx != sym->st_shndx)
4584 continue;
4585 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
4586 prog->name, map_idx, map->name, map->sec_idx,
4587 map->sec_offset, insn_idx);
4588 break;
4589 }
4590 if (map_idx >= nr_maps) {
4591 pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
4592 prog->name, sym_sec_name);
4593 return -LIBBPF_ERRNO__RELOC;
4594 }
4595
4596 reloc_desc->type = RELO_DATA;
4597 reloc_desc->insn_idx = insn_idx;
4598 reloc_desc->map_idx = map_idx;
4599 reloc_desc->sym_off = sym->st_value;
4600 return 0;
4601 }
4602
4603 static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
4604 {
4605 return insn_idx >= prog->sec_insn_off &&
4606 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
4607 }
4608
4609 static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
4610 size_t sec_idx, size_t insn_idx)
4611 {
4612 int l = 0, r = obj->nr_programs - 1, m;
4613 struct bpf_program *prog;
4614
4615 if (!obj->nr_programs)
4616 return NULL;
4617
4618 while (l < r) {
4619 m = l + (r - l + 1) / 2;
4620 prog = &obj->programs[m];
4621
4622 if (prog->sec_idx < sec_idx ||
4623 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
4624 l = m;
4625 else
4626 r = m - 1;
4627 }
4628 /* matching program could be at index l, but it still might be the
4629 * wrong one, so we need to double check conditions for the last time
4630 */
4631 prog = &obj->programs[l];
4632 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
4633 return prog;
4634 return NULL;
4635 }
4636
4637 static int
4638 bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data)
4639 {
4640 const char *relo_sec_name, *sec_name;
4641 size_t sec_idx = shdr->sh_info, sym_idx;
4642 struct bpf_program *prog;
4643 struct reloc_desc *relos;
4644 int err, i, nrels;
4645 const char *sym_name;
4646 __u32 insn_idx;
4647 #ifdef HAVE_LIBELF
4648 Elf_Scn *scn;
4649 #endif
4650 Elf_Data *scn_data;
4651 Elf64_Sym *sym;
4652 Elf64_Rel *rel;
4653
4654 if (sec_idx >= obj->efile.sec_cnt)
4655 return -EINVAL;
4656
4657 #if defined HAVE_LIBELF
4658 scn = elf_sec_by_idx(obj, sec_idx);
4659 scn_data = elf_sec_data(obj, scn);
4660
4661 relo_sec_name = elf_sec_str(obj, shdr->sh_name);
4662 sec_name = elf_sec_name(obj, scn);
4663 if (!relo_sec_name || !sec_name)
4664 return -EINVAL;
4665 #elif defined HAVE_ELFIO
4666 Elf_Data realdata;
4667 scn_data = elf_sec_data_by_idx(obj, sec_idx, &realdata);
4668
4669 relo_sec_name = elf_sec_str(obj, shdr->sh_name);
4670 sec_name = elf_sec_name_by_idx(obj, sec_idx);
4671 if (!relo_sec_name || !sec_name)
4672 return -EINVAL;
4673 #endif
4674
4675 pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
4676 relo_sec_name, sec_idx, sec_name);
4677 nrels = shdr->sh_size / shdr->sh_entsize;
4678
4679 for (i = 0; i < nrels; i++) {
4680 rel = elf_rel_by_idx(data, i);
4681 if (!rel) {
4682 pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
4683 return -LIBBPF_ERRNO__FORMAT;
4684 }
4685
4686 sym_idx = ELF64_R_SYM(rel->r_info);
4687 sym = elf_sym_by_idx(obj, sym_idx);
4688 if (!sym) {
4689 pr_warn("sec '%s': symbol #%zu not found for relo #%d\n",
4690 relo_sec_name, sym_idx, i);
4691 return -LIBBPF_ERRNO__FORMAT;
4692 }
4693
4694 if (sym->st_shndx >= obj->efile.sec_cnt) {
4695 pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n",
4696 relo_sec_name, sym_idx, (size_t)sym->st_shndx, i);
4697 return -LIBBPF_ERRNO__FORMAT;
4698 }
4699
4700 if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) {
4701 pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
4702 relo_sec_name, (size_t)rel->r_offset, i);
4703 return -LIBBPF_ERRNO__FORMAT;
4704 }
4705
4706 insn_idx = rel->r_offset / BPF_INSN_SZ;
4707 /* relocations against static functions are recorded as
4708 * relocations against the section that contains a function;
4709 * in such case, symbol will be STT_SECTION and sym.st_name
4710 * will point to empty string (0), so fetch section name
4711 * instead
4712 */
4713 if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0)
4714 #if defined HAVE_LIBELF
4715 sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx));
4716 #elif defined HAVE_ELFIO
4717 sym_name = elf_sec_name_by_idx(obj, sym->st_shndx);
4718 #endif
4719 else
4720 sym_name = elf_sym_str(obj, sym->st_name);
4721 sym_name = sym_name ?: "<?";
4722
4723 pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
4724 relo_sec_name, i, insn_idx, sym_name);
4725
4726 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
4727 if (!prog) {
4728 pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n",
4729 relo_sec_name, i, sec_name, insn_idx);
4730 continue;
4731 }
4732
4733 relos = libbpf_reallocarray(prog->reloc_desc,
4734 prog->nr_reloc + 1, sizeof(*relos));
4735 if (!relos)
4736 return -ENOMEM;
4737 prog->reloc_desc = relos;
4738
4739 /* adjust insn_idx to local BPF program frame of reference */
4740 insn_idx -= prog->sec_insn_off;
4741 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
4742 insn_idx, sym_name, sym, rel);
4743 if (err)
4744 return err;
4745
4746 prog->nr_reloc++;
4747 }
4748 return 0;
4749 }
4750
4751 static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map)
4752 {
4753 int id;
4754
4755 if (!obj->btf)
4756 return -ENOENT;
4757
4758 /* if it's BTF-defined map, we don't need to search for type IDs.
4759 * For struct_ops map, it does not need btf_key_type_id and
4760 * btf_value_type_id.
4761 */
4762 if (map->sec_idx == obj->efile.btf_maps_shndx || bpf_map__is_struct_ops(map))
4763 return 0;
4764
4765 /*
4766 * LLVM annotates global data differently in BTF, that is,
4767 * only as '.data', '.bss' or '.rodata'.
4768 */
4769 if (!bpf_map__is_internal(map))
4770 return -ENOENT;
4771
4772 id = btf__find_by_name(obj->btf, map->real_name);
4773 if (id < 0)
4774 return id;
4775
4776 map->btf_key_type_id = 0;
4777 map->btf_value_type_id = id;
4778 return 0;
4779 }
4780
4781 static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
4782 {
4783 char file[PATH_MAX], buff[4096];
4784 FILE *fp;
4785 __u32 val;
4786 int err;
4787
4788 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
4789 memset(info, 0, sizeof(*info));
4790
4791 fp = fopen(file, "re");
4792 if (!fp) {
4793 err = -errno;
4794 pr_warn("failed to open %s: %d. No procfs support?\n", file,
4795 err);
4796 return err;
4797 }
4798
4799 while (fgets(buff, sizeof(buff), fp)) {
4800 if (sscanf(buff, "map_type:\t%u", &val) == 1)
4801 info->type = val;
4802 else if (sscanf(buff, "key_size:\t%u", &val) == 1)
4803 info->key_size = val;
4804 else if (sscanf(buff, "value_size:\t%u", &val) == 1)
4805 info->value_size = val;
4806 else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
4807 info->max_entries = val;
4808 else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
4809 info->map_flags = val;
4810 }
4811
4812 fclose(fp);
4813
4814 return 0;
4815 }
4816
4817 bool bpf_map__autocreate(const struct bpf_map *map)
4818 {
4819 return map->autocreate;
4820 }
4821
4822 int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
4823 {
4824 if (map->obj->loaded)
4825 return libbpf_err(-EBUSY);
4826
4827 map->autocreate = autocreate;
4828 return 0;
4829 }
4830
4831 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
4832 {
4833 struct bpf_map_info info;
4834 __u32 len = sizeof(info), name_len;
4835 int new_fd, err;
4836 char *new_name;
4837
4838 memset(&info, 0, len);
4839 err = bpf_map_get_info_by_fd(fd, &info, &len);
4840 if (err && errno == EINVAL)
4841 err = bpf_get_map_info_from_fdinfo(fd, &info);
4842 if (err)
4843 return libbpf_err(err);
4844
4845 name_len = strlen(info.name);
4846 if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0)
4847 new_name = strdup(map->name);
4848 else
4849 new_name = strdup(info.name);
4850
4851 if (!new_name)
4852 return libbpf_err(-errno);
4853
4854 /*
4855 * Like dup(), but make sure new FD is >= 3 and has O_CLOEXEC set.
4856 * This is similar to what we do in ensure_good_fd(), but without
4857 * closing original FD.
4858 */
4859 new_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
4860 if (new_fd < 0) {
4861 err = -errno;
4862 goto err_free_new_name;
4863 }
4864
4865 err = zclose(map->fd);
4866 if (err) {
4867 err = -errno;
4868 goto err_close_new_fd;
4869 }
4870 free(map->name);
4871
4872 map->fd = new_fd;
4873 map->name = new_name;
4874 map->def.type = info.type;
4875 map->def.key_size = info.key_size;
4876 map->def.value_size = info.value_size;
4877 map->def.max_entries = info.max_entries;
4878 map->def.map_flags = info.map_flags;
4879 map->btf_key_type_id = info.btf_key_type_id;
4880 map->btf_value_type_id = info.btf_value_type_id;
4881 map->reused = true;
4882 map->map_extra = info.map_extra;
4883
4884 return 0;
4885
4886 err_close_new_fd:
4887 close(new_fd);
4888 err_free_new_name:
4889 free(new_name);
4890 return libbpf_err(err);
4891 }
4892
4893 __u32 bpf_map__max_entries(const struct bpf_map *map)
4894 {
4895 return map->def.max_entries;
4896 }
4897
4898 struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
4899 {
4900 if (!bpf_map_type__is_map_in_map(map->def.type))
4901 return errno = EINVAL, NULL;
4902
4903 return map->inner_map;
4904 }
4905
4906 int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
4907 {
4908 if (map->obj->loaded)
4909 return libbpf_err(-EBUSY);
4910
4911 map->def.max_entries = max_entries;
4912
4913 /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
4914 if (map_is_ringbuf(map))
4915 map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
4916
4917 return 0;
4918 }
4919
4920 static int
4921 bpf_object__probe_loading(struct bpf_object *obj)
4922 {
4923 char *cp, errmsg[STRERR_BUFSIZE];
4924 struct bpf_insn insns[] = {
4925 BPF_MOV64_IMM(BPF_REG_0, 0),
4926 BPF_EXIT_INSN(),
4927 };
4928 int ret, insn_cnt = ARRAY_SIZE(insns);
4929
4930 if (obj->gen_loader)
4931 return 0;
4932
4933 ret = bump_rlimit_memlock();
4934 if (ret)
4935 pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret);
4936
4937 /* make sure basic loading works */
4938 ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
4939 if (ret < 0)
4940 ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
4941 if (ret < 0) {
4942 ret = errno;
4943 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4944 pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
4945 "program. Make sure your kernel supports BPF "
4946 "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
4947 "set to big enough value.\n", __func__, cp, ret);
4948 return -ret;
4949 }
4950 close(ret);
4951
4952 return 0;
4953 }
4954
4955 static int probe_fd(int fd)
4956 {
4957 if (fd >= 0)
4958 close(fd);
4959 return fd >= 0;
4960 }
4961
4962 static int probe_kern_prog_name(void)
4963 {
4964 const size_t attr_sz = offsetofend(union bpf_attr, prog_name);
4965 struct bpf_insn insns[] = {
4966 BPF_MOV64_IMM(BPF_REG_0, 0),
4967 BPF_EXIT_INSN(),
4968 };
4969 union bpf_attr attr;
4970 int ret;
4971
4972 memset(&attr, 0, attr_sz);
4973 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
4974 attr.license = ptr_to_u64("GPL");
4975 attr.insns = ptr_to_u64(insns);
4976 attr.insn_cnt = (__u32)ARRAY_SIZE(insns);
4977 libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name));
4978
4979 /* make sure loading with name works */
4980 ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS);
4981 return probe_fd(ret);
4982 }
4983
4984 static int probe_kern_global_data(void)
4985 {
4986 char *cp, errmsg[STRERR_BUFSIZE];
4987 struct bpf_insn insns[] = {
4988 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
4989 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
4990 BPF_MOV64_IMM(BPF_REG_0, 0),
4991 BPF_EXIT_INSN(),
4992 };
4993 int ret, map, insn_cnt = ARRAY_SIZE(insns);
4994
4995 map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, NULL);
4996 if (map < 0) {
4997 ret = -errno;
4998 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4999 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
5000 __func__, cp, -ret);
5001 return ret;
5002 }
5003
5004 insns[0].imm = map;
5005
5006 ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
5007 close(map);
5008 return probe_fd(ret);
5009 }
5010
5011 static int probe_kern_btf(void)
5012 {
5013 static const char strs[] = "\0int";
5014 __u32 types[] = {
5015 /* int */
5016 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
5017 };
5018
5019 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
5020 strs, sizeof(strs)));
5021 }
5022
5023 static int probe_kern_btf_func(void)
5024 {
5025 static const char strs[] = "\0int\0x\0a";
5026 /* void x(int a) {} */
5027 __u32 types[] = {
5028 /* int */
5029 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
5030 /* FUNC_PROTO */ /* [2] */
5031 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
5032 BTF_PARAM_ENC(7, 1),
5033 /* FUNC x */ /* [3] */
5034 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
5035 };
5036
5037 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
5038 strs, sizeof(strs)));
5039 }
5040
5041 static int probe_kern_btf_func_global(void)
5042 {
5043 static const char strs[] = "\0int\0x\0a";
5044 /* static void x(int a) {} */
5045 __u32 types[] = {
5046 /* int */
5047 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
5048 /* FUNC_PROTO */ /* [2] */
5049 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
5050 BTF_PARAM_ENC(7, 1),
5051 /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
5052 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
5053 };
5054
5055 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
5056 strs, sizeof(strs)));
5057 }
5058
5059 static int probe_kern_btf_datasec(void)
5060 {
5061 static const char strs[] = "\0x\0.data";
5062 /* static int a; */
5063 __u32 types[] = {
5064 /* int */
5065 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
5066 /* VAR x */ /* [2] */
5067 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
5068 BTF_VAR_STATIC,
5069 /* DATASEC val */ /* [3] */
5070 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
5071 BTF_VAR_SECINFO_ENC(2, 0, 4),
5072 };
5073
5074 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
5075 strs, sizeof(strs)));
5076 }
5077
5078 static int probe_kern_btf_float(void)
5079 {
5080 static const char strs[] = "\0float";
5081 __u32 types[] = {
5082 /* float */
5083 BTF_TYPE_FLOAT_ENC(1, 4),
5084 };
5085
5086 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
5087 strs, sizeof(strs)));
5088 }
5089
5090 static int probe_kern_btf_decl_tag(void)
5091 {
5092 static const char strs[] = "\0tag";
5093 __u32 types[] = {
5094 /* int */
5095 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
5096 /* VAR x */ /* [2] */
5097 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
5098 BTF_VAR_STATIC,
5099 /* attr */
5100 BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
5101 };
5102
5103 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
5104 strs, sizeof(strs)));
5105 }
5106
5107 static int probe_kern_btf_type_tag(void)
5108 {
5109 static const char strs[] = "\0tag";
5110 __u32 types[] = {
5111 /* int */
5112 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
5113 /* attr */
5114 BTF_TYPE_TYPE_TAG_ENC(1, 1), /* [2] */
5115 /* ptr */
5116 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), /* [3] */
5117 };
5118
5119 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
5120 strs, sizeof(strs)));
5121 }
5122
5123 static int probe_kern_array_mmap(void)
5124 {
5125 LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_MMAPABLE);
5126 int fd;
5127
5128 fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts);
5129 return probe_fd(fd);
5130 }
5131
5132 static int probe_kern_exp_attach_type(void)
5133 {
5134 LIBBPF_OPTS(bpf_prog_load_opts, opts, .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE);
5135 struct bpf_insn insns[] = {
5136 BPF_MOV64_IMM(BPF_REG_0, 0),
5137 BPF_EXIT_INSN(),
5138 };
5139 int fd, insn_cnt = ARRAY_SIZE(insns);
5140
5141 /* use any valid combination of program type and (optional)
5142 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
5143 * to see if kernel supports expected_attach_type field for
5144 * BPF_PROG_LOAD command
5145 */
5146 fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
5147 return probe_fd(fd);
5148 }
5149
5150 static int probe_kern_probe_read_kernel(void)
5151 {
5152 struct bpf_insn insns[] = {
5153 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */
5154 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */
5155 BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */
5156 BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */
5157 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
5158 BPF_EXIT_INSN(),
5159 };
5160 int fd, insn_cnt = ARRAY_SIZE(insns);
5161
5162 fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
5163 return probe_fd(fd);
5164 }
5165
5166 static int probe_prog_bind_map(void)
5167 {
5168 char *cp, errmsg[STRERR_BUFSIZE];
5169 struct bpf_insn insns[] = {
5170 BPF_MOV64_IMM(BPF_REG_0, 0),
5171 BPF_EXIT_INSN(),
5172 };
5173 int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
5174
5175 map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, NULL);
5176 if (map < 0) {
5177 ret = -errno;
5178 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
5179 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
5180 __func__, cp, -ret);
5181 return ret;
5182 }
5183
5184 prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
5185 if (prog < 0) {
5186 close(map);
5187 return 0;
5188 }
5189
5190 ret = bpf_prog_bind_map(prog, map, NULL);
5191
5192 close(map);
5193 close(prog);
5194
5195 return ret >= 0;
5196 }
5197
5198 static int probe_module_btf(void)
5199 {
5200 static const char strs[] = "\0int";
5201 __u32 types[] = {
5202 /* int */
5203 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
5204 };
5205 struct bpf_btf_info info;
5206 __u32 len = sizeof(info);
5207 char name[16];
5208 int fd, err;
5209
5210 fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
5211 if (fd < 0)
5212 return 0; /* BTF not supported at all */
5213
5214 memset(&info, 0, sizeof(info));
5215 info.name = ptr_to_u64(name);
5216 info.name_len = sizeof(name);
5217
5218 /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
5219 * kernel's module BTF support coincides with support for
5220 * name/name_len fields in struct bpf_btf_info.
5221 */
5222 err = bpf_btf_get_info_by_fd(fd, &info, &len);
5223 close(fd);
5224 return !err;
5225 }
5226
5227 static int probe_perf_link(void)
5228 {
5229 struct bpf_insn insns[] = {
5230 BPF_MOV64_IMM(BPF_REG_0, 0),
5231 BPF_EXIT_INSN(),
5232 };
5233 int prog_fd, link_fd, err;
5234
5235 prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
5236 insns, ARRAY_SIZE(insns), NULL);
5237 if (prog_fd < 0)
5238 return -errno;
5239
5240 /* use invalid perf_event FD to get EBADF, if link is supported;
5241 * otherwise EINVAL should be returned
5242 */
5243 link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
5244 err = -errno; /* close() can clobber errno */
5245
5246 if (link_fd >= 0)
5247 close(link_fd);
5248 close(prog_fd);
5249
5250 return link_fd < 0 && err == -EBADF;
5251 }
5252
5253 static int probe_uprobe_multi_link(void)
5254 {
5255 LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
5256 .expected_attach_type = BPF_TRACE_UPROBE_MULTI,
5257 );
5258 LIBBPF_OPTS(bpf_link_create_opts, link_opts);
5259 struct bpf_insn insns[] = {
5260 BPF_MOV64_IMM(BPF_REG_0, 0),
5261 BPF_EXIT_INSN(),
5262 };
5263 int prog_fd, link_fd, err;
5264 unsigned long offset = 0;
5265
5266 prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL",
5267 insns, ARRAY_SIZE(insns), &load_opts);
5268 if (prog_fd < 0)
5269 return -errno;
5270
5271 /* Creating uprobe in '/' binary should fail with -EBADF. */
5272 link_opts.uprobe_multi.path = "/";
5273 link_opts.uprobe_multi.offsets = &offset;
5274 link_opts.uprobe_multi.cnt = 1;
5275
5276 link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
5277 err = -errno; /* close() can clobber errno */
5278
5279 if (link_fd >= 0 || err != -EBADF) {
5280 close(link_fd);
5281 close(prog_fd);
5282 return 0;
5283 }
5284
5285 /* Initial multi-uprobe support in kernel didn't handle PID filtering
5286 * correctly (it was doing thread filtering, not process filtering).
5287 * So now we'll detect if PID filtering logic was fixed, and, if not,
5288 * we'll pretend multi-uprobes are not supported, if not.
5289 * Multi-uprobes are used in USDT attachment logic, and we need to be
5290 * conservative here, because multi-uprobe selection happens early at
5291 * load time, while the use of PID filtering is known late at
5292 * attachment time, at which point it's too late to undo multi-uprobe
5293 * selection.
5294 *
5295 * Creating uprobe with pid == -1 for (invalid) '/' binary will fail
5296 * early with -EINVAL on kernels with fixed PID filtering logic;
5297 * otherwise -ESRCH would be returned if passed correct binary path
5298 * (but we'll just get -BADF, of course).
5299 */
5300 link_opts.uprobe_multi.pid = -1; /* invalid PID */
5301 link_opts.uprobe_multi.path = "/"; /* invalid path */
5302 link_opts.uprobe_multi.offsets = &offset;
5303 link_opts.uprobe_multi.cnt = 1;
5304
5305 link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
5306 err = -errno; /* close() can clobber errno */
5307
5308 if (link_fd >= 0)
5309 close(link_fd);
5310 close(prog_fd);
5311
5312 return link_fd < 0 && err == -EINVAL;
5313 }
5314
5315 static int probe_kern_bpf_cookie(void)
5316 {
5317 struct bpf_insn insns[] = {
5318 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
5319 BPF_EXIT_INSN(),
5320 };
5321 int ret, insn_cnt = ARRAY_SIZE(insns);
5322
5323 ret = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, insn_cnt, NULL);
5324 return probe_fd(ret);
5325 }
5326
5327 static int probe_kern_btf_enum64(void)
5328 {
5329 static const char strs[] = "\0enum64";
5330 __u32 types[] = {
5331 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
5332 };
5333
5334 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
5335 strs, sizeof(strs)));
5336 }
5337
5338 static int probe_kern_syscall_wrapper(void);
5339
5340 enum kern_feature_result {
5341 FEAT_UNKNOWN = 0,
5342 FEAT_SUPPORTED = 1,
5343 FEAT_MISSING = 2,
5344 };
5345
5346 typedef int (*feature_probe_fn)(void);
5347
5348 static struct kern_feature_desc {
5349 const char *desc;
5350 feature_probe_fn probe;
5351 enum kern_feature_result res;
5352 } feature_probes[__FEAT_CNT] = {
5353 [FEAT_PROG_NAME] = {
5354 "BPF program name", probe_kern_prog_name,
5355 },
5356 [FEAT_GLOBAL_DATA] = {
5357 "global variables", probe_kern_global_data,
5358 },
5359 [FEAT_BTF] = {
5360 "minimal BTF", probe_kern_btf,
5361 },
5362 [FEAT_BTF_FUNC] = {
5363 "BTF functions", probe_kern_btf_func,
5364 },
5365 [FEAT_BTF_GLOBAL_FUNC] = {
5366 "BTF global function", probe_kern_btf_func_global,
5367 },
5368 [FEAT_BTF_DATASEC] = {
5369 "BTF data section and variable", probe_kern_btf_datasec,
5370 },
5371 [FEAT_ARRAY_MMAP] = {
5372 "ARRAY map mmap()", probe_kern_array_mmap,
5373 },
5374 [FEAT_EXP_ATTACH_TYPE] = {
5375 "BPF_PROG_LOAD expected_attach_type attribute",
5376 probe_kern_exp_attach_type,
5377 },
5378 [FEAT_PROBE_READ_KERN] = {
5379 "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
5380 },
5381 [FEAT_PROG_BIND_MAP] = {
5382 "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
5383 },
5384 [FEAT_MODULE_BTF] = {
5385 "module BTF support", probe_module_btf,
5386 },
5387 [FEAT_BTF_FLOAT] = {
5388 "BTF_KIND_FLOAT support", probe_kern_btf_float,
5389 },
5390 [FEAT_PERF_LINK] = {
5391 "BPF perf link support", probe_perf_link,
5392 },
5393 [FEAT_BTF_DECL_TAG] = {
5394 "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
5395 },
5396 [FEAT_BTF_TYPE_TAG] = {
5397 "BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
5398 },
5399 [FEAT_MEMCG_ACCOUNT] = {
5400 "memcg-based memory accounting", probe_memcg_account,
5401 },
5402 [FEAT_BPF_COOKIE] = {
5403 "BPF cookie support", probe_kern_bpf_cookie,
5404 },
5405 [FEAT_BTF_ENUM64] = {
5406 "BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
5407 },
5408 [FEAT_SYSCALL_WRAPPER] = {
5409 "Kernel using syscall wrapper", probe_kern_syscall_wrapper,
5410 },
5411 [FEAT_UPROBE_MULTI_LINK] = {
5412 "BPF multi-uprobe link support", probe_uprobe_multi_link,
5413 },
5414 };
5415
5416 bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
5417 {
5418 struct kern_feature_desc *feat = &feature_probes[feat_id];
5419 int ret;
5420
5421 if (obj && obj->gen_loader)
5422 /* To generate loader program assume the latest kernel
5423 * to avoid doing extra prog_load, map_create syscalls.
5424 */
5425 return true;
5426
5427 if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
5428 ret = feat->probe();
5429 if (ret > 0) {
5430 WRITE_ONCE(feat->res, FEAT_SUPPORTED);
5431 } else if (ret == 0) {
5432 WRITE_ONCE(feat->res, FEAT_MISSING);
5433 } else {
5434 pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
5435 WRITE_ONCE(feat->res, FEAT_MISSING);
5436 }
5437 }
5438
5439 return READ_ONCE(feat->res) == FEAT_SUPPORTED;
5440 }
5441
5442 static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
5443 {
5444 struct bpf_map_info map_info;
5445 char msg[STRERR_BUFSIZE];
5446 __u32 map_info_len = sizeof(map_info);
5447 int err;
5448
5449 memset(&map_info, 0, map_info_len);
5450 err = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len);
5451 if (err && errno == EINVAL)
5452 err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
5453 if (err) {
5454 pr_warn("failed to get map info for map FD %d: %s\n", map_fd,
5455 libbpf_strerror_r(errno, msg, sizeof(msg)));
5456 return false;
5457 }
5458
5459 return (map_info.type == map->def.type &&
5460 map_info.key_size == map->def.key_size &&
5461 map_info.value_size == map->def.value_size &&
5462 map_info.max_entries == map->def.max_entries &&
5463 map_info.map_flags == map->def.map_flags &&
5464 map_info.map_extra == map->map_extra);
5465 }
5466
5467 static int
5468 bpf_object__reuse_map(struct bpf_map *map)
5469 {
5470 char *cp, errmsg[STRERR_BUFSIZE];
5471 int err, pin_fd;
5472
5473 pin_fd = bpf_obj_get(map->pin_path);
5474 if (pin_fd < 0) {
5475 err = -errno;
5476 if (err == -ENOENT) {
5477 pr_debug("found no pinned map to reuse at '%s'\n",
5478 map->pin_path);
5479 return 0;
5480 }
5481
5482 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
5483 pr_warn("couldn't retrieve pinned map '%s': %s\n",
5484 map->pin_path, cp);
5485 return err;
5486 }
5487
5488 if (!map_is_reuse_compat(map, pin_fd)) {
5489 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
5490 map->pin_path);
5491 close(pin_fd);
5492 return -EINVAL;
5493 }
5494
5495 err = bpf_map__reuse_fd(map, pin_fd);
5496 close(pin_fd);
5497 if (err)
5498 return err;
5499
5500 map->pinned = true;
5501 pr_debug("reused pinned map at '%s'\n", map->pin_path);
5502
5503 return 0;
5504 }
5505
5506 static int
5507 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
5508 {
5509 enum libbpf_map_type map_type = map->libbpf_type;
5510 char *cp, errmsg[STRERR_BUFSIZE];
5511 int err, zero = 0;
5512
5513 if (obj->gen_loader) {
5514 bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
5515 map->mmaped, map->def.value_size);
5516 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG)
5517 bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
5518 return 0;
5519 }
5520 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
5521 if (err) {
5522 err = -errno;
5523 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5524 pr_warn("Error setting initial map(%s) contents: %s\n",
5525 map->name, cp);
5526 return err;
5527 }
5528
5529 /* Freeze .rodata and .kconfig map as read-only from syscall side. */
5530 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
5531 err = bpf_map_freeze(map->fd);
5532 if (err) {
5533 err = -errno;
5534 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5535 pr_warn("Error freezing map(%s) as read-only: %s\n",
5536 map->name, cp);
5537 return err;
5538 }
5539 }
5540 return 0;
5541 }
5542
5543 static void bpf_map__destroy(struct bpf_map *map);
5544
5545 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
5546 {
5547 LIBBPF_OPTS(bpf_map_create_opts, create_attr);
5548 struct bpf_map_def *def = &map->def;
5549 const char *map_name = NULL;
5550 int err = 0;
5551
5552 if (kernel_supports(obj, FEAT_PROG_NAME))
5553 map_name = map->name;
5554 create_attr.map_ifindex = map->map_ifindex;
5555 create_attr.map_flags = def->map_flags;
5556 create_attr.numa_node = map->numa_node;
5557 create_attr.map_extra = map->map_extra;
5558
5559 if (bpf_map__is_struct_ops(map))
5560 create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
5561
5562 if (obj->btf && btf__fd(obj->btf) >= 0) {
5563 create_attr.btf_fd = btf__fd(obj->btf);
5564 create_attr.btf_key_type_id = map->btf_key_type_id;
5565 create_attr.btf_value_type_id = map->btf_value_type_id;
5566 }
5567
5568 if (bpf_map_type__is_map_in_map(def->type)) {
5569 if (map->inner_map) {
5570 err = bpf_object__create_map(obj, map->inner_map, true);
5571 if (err) {
5572 pr_warn("map '%s': failed to create inner map: %d\n",
5573 map->name, err);
5574 return err;
5575 }
5576 map->inner_map_fd = bpf_map__fd(map->inner_map);
5577 }
5578 if (map->inner_map_fd >= 0)
5579 create_attr.inner_map_fd = map->inner_map_fd;
5580 }
5581
5582 switch (def->type) {
5583 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
5584 case BPF_MAP_TYPE_CGROUP_ARRAY:
5585 case BPF_MAP_TYPE_STACK_TRACE:
5586 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
5587 case BPF_MAP_TYPE_HASH_OF_MAPS:
5588 case BPF_MAP_TYPE_DEVMAP:
5589 case BPF_MAP_TYPE_DEVMAP_HASH:
5590 case BPF_MAP_TYPE_CPUMAP:
5591 case BPF_MAP_TYPE_XSKMAP:
5592 case BPF_MAP_TYPE_SOCKMAP:
5593 case BPF_MAP_TYPE_SOCKHASH:
5594 case BPF_MAP_TYPE_QUEUE:
5595 case BPF_MAP_TYPE_STACK:
5596 create_attr.btf_fd = 0;
5597 create_attr.btf_key_type_id = 0;
5598 create_attr.btf_value_type_id = 0;
5599 map->btf_key_type_id = 0;
5600 map->btf_value_type_id = 0;
5601 break;
5602 default:
5603 break;
5604 }
5605
5606 if (obj->gen_loader) {
5607 bpf_gen__map_create(obj->gen_loader, def->type, map_name,
5608 def->key_size, def->value_size, def->max_entries,
5609 &create_attr, is_inner ? -1 : map - obj->maps);
5610 /* Pretend to have valid FD to pass various fd >= 0 checks.
5611 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
5612 */
5613 map->fd = 0;
5614 } else {
5615 map->fd = bpf_map_create(def->type, map_name,
5616 def->key_size, def->value_size,
5617 def->max_entries, &create_attr);
5618 }
5619 if (map->fd < 0 && (create_attr.btf_key_type_id ||
5620 create_attr.btf_value_type_id)) {
5621 char *cp, errmsg[STRERR_BUFSIZE];
5622
5623 err = -errno;
5624 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5625 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
5626 map->name, cp, err);
5627 create_attr.btf_fd = 0;
5628 create_attr.btf_key_type_id = 0;
5629 create_attr.btf_value_type_id = 0;
5630 map->btf_key_type_id = 0;
5631 map->btf_value_type_id = 0;
5632 map->fd = bpf_map_create(def->type, map_name,
5633 def->key_size, def->value_size,
5634 def->max_entries, &create_attr);
5635 }
5636
5637 err = map->fd < 0 ? -errno : 0;
5638
5639 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
5640 if (obj->gen_loader)
5641 map->inner_map->fd = -1;
5642 bpf_map__destroy(map->inner_map);
5643 zfree(&map->inner_map);
5644 }
5645
5646 return err;
5647 }
5648
5649 static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
5650 {
5651 const struct bpf_map *targ_map;
5652 unsigned int i;
5653 int fd, err = 0;
5654
5655 for (i = 0; i < map->init_slots_sz; i++) {
5656 if (!map->init_slots[i])
5657 continue;
5658
5659 targ_map = map->init_slots[i];
5660 fd = bpf_map__fd(targ_map);
5661
5662 if (obj->gen_loader) {
5663 bpf_gen__populate_outer_map(obj->gen_loader,
5664 map - obj->maps, i,
5665 targ_map - obj->maps);
5666 } else {
5667 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5668 }
5669 if (err) {
5670 err = -errno;
5671 pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
5672 map->name, i, targ_map->name, fd, err);
5673 return err;
5674 }
5675 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
5676 map->name, i, targ_map->name, fd);
5677 }
5678
5679 zfree(&map->init_slots);
5680 map->init_slots_sz = 0;
5681
5682 return 0;
5683 }
5684
5685 static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map)
5686 {
5687 const struct bpf_program *targ_prog;
5688 unsigned int i;
5689 int fd, err;
5690
5691 if (obj->gen_loader)
5692 return -ENOTSUP;
5693
5694 for (i = 0; i < map->init_slots_sz; i++) {
5695 if (!map->init_slots[i])
5696 continue;
5697
5698 targ_prog = map->init_slots[i];
5699 fd = bpf_program__fd(targ_prog);
5700
5701 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5702 if (err) {
5703 err = -errno;
5704 pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n",
5705 map->name, i, targ_prog->name, fd, err);
5706 return err;
5707 }
5708 pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n",
5709 map->name, i, targ_prog->name, fd);
5710 }
5711
5712 zfree(&map->init_slots);
5713 map->init_slots_sz = 0;
5714
5715 return 0;
5716 }
5717
5718 static int bpf_object_init_prog_arrays(struct bpf_object *obj)
5719 {
5720 struct bpf_map *map;
5721 int i, err;
5722
5723 for (i = 0; i < obj->nr_maps; i++) {
5724 map = &obj->maps[i];
5725
5726 if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY)
5727 continue;
5728
5729 err = init_prog_array_slots(obj, map);
5730 if (err < 0) {
5731 zclose(map->fd);
5732 return err;
5733 }
5734 }
5735 return 0;
5736 }
5737
5738 static int map_set_def_max_entries(struct bpf_map *map)
5739 {
5740 if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) {
5741 int nr_cpus;
5742
5743 nr_cpus = libbpf_num_possible_cpus();
5744 if (nr_cpus < 0) {
5745 pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
5746 map->name, nr_cpus);
5747 return nr_cpus;
5748 }
5749 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
5750 map->def.max_entries = nr_cpus;
5751 }
5752
5753 return 0;
5754 }
5755
5756 static int
5757 bpf_object__create_maps(struct bpf_object *obj)
5758 {
5759 struct bpf_map *map;
5760 char *cp, errmsg[STRERR_BUFSIZE];
5761 unsigned int i, j;
5762 int err;
5763 bool retried;
5764
5765 for (i = 0; i < obj->nr_maps; i++) {
5766 map = &obj->maps[i];
5767
5768 /* To support old kernels, we skip creating global data maps
5769 * (.rodata, .data, .kconfig, etc); later on, during program
5770 * loading, if we detect that at least one of the to-be-loaded
5771 * programs is referencing any global data map, we'll error
5772 * out with program name and relocation index logged.
5773 * This approach allows to accommodate Clang emitting
5774 * unnecessary .rodata.str1.1 sections for string literals,
5775 * but also it allows to have CO-RE applications that use
5776 * global variables in some of BPF programs, but not others.
5777 * If those global variable-using programs are not loaded at
5778 * runtime due to bpf_program__set_autoload(prog, false),
5779 * bpf_object loading will succeed just fine even on old
5780 * kernels.
5781 */
5782 if (bpf_map__is_internal(map) && !kernel_supports(obj, FEAT_GLOBAL_DATA))
5783 map->autocreate = false;
5784
5785 if (!map->autocreate) {
5786 pr_debug("map '%s': skipped auto-creating...\n", map->name);
5787 continue;
5788 }
5789
5790 err = map_set_def_max_entries(map);
5791 if (err)
5792 goto err_out;
5793
5794 retried = false;
5795 retry:
5796 if (map->pin_path) {
5797 err = bpf_object__reuse_map(map);
5798 if (err) {
5799 pr_warn("map '%s': error reusing pinned map\n",
5800 map->name);
5801 goto err_out;
5802 }
5803 if (retried && map->fd < 0) {
5804 pr_warn("map '%s': cannot find pinned map\n",
5805 map->name);
5806 err = -ENOENT;
5807 goto err_out;
5808 }
5809 }
5810
5811 if (map->fd >= 0) {
5812 pr_debug("map '%s': skipping creation (preset fd=%d)\n",
5813 map->name, map->fd);
5814 } else {
5815 err = bpf_object__create_map(obj, map, false);
5816 if (err)
5817 goto err_out;
5818
5819 pr_debug("map '%s': created successfully, fd=%d\n",
5820 map->name, map->fd);
5821
5822 if (bpf_map__is_internal(map)) {
5823 err = bpf_object__populate_internal_map(obj, map);
5824 if (err < 0) {
5825 zclose(map->fd);
5826 goto err_out;
5827 }
5828 }
5829
5830 if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
5831 err = init_map_in_map_slots(obj, map);
5832 if (err < 0) {
5833 zclose(map->fd);
5834 goto err_out;
5835 }
5836 }
5837 }
5838
5839 if (map->pin_path && !map->pinned) {
5840 err = bpf_map__pin(map, NULL);
5841 if (err) {
5842 zclose(map->fd);
5843 if (!retried && err == -EEXIST) {
5844 retried = true;
5845 goto retry;
5846 }
5847 pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
5848 map->name, map->pin_path, err);
5849 goto err_out;
5850 }
5851 }
5852 }
5853
5854 return 0;
5855
5856 err_out:
5857 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5858 pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
5859 pr_perm_msg(err);
5860 for (j = 0; j < i; j++)
5861 zclose(obj->maps[j].fd);
5862 return err;
5863 }
5864
5865 static bool bpf_core_is_flavor_sep(const char *s)
5866 {
5867 /* check X___Y name pattern, where X and Y are not underscores */
5868 return s[0] != '_' && /* X */
5869 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
5870 s[4] != '_'; /* Y */
5871 }
5872
5873 /* Given 'some_struct_name___with_flavor' return the length of a name prefix
5874 * before last triple underscore. Struct name part after last triple
5875 * underscore is ignored by BPF CO-RE relocation during relocation matching.
5876 */
5877 size_t bpf_core_essential_name_len(const char *name)
5878 {
5879 size_t n = strlen(name);
5880 int i;
5881
5882 for (i = n - 5; i >= 0; i--) {
5883 if (bpf_core_is_flavor_sep(name + i))
5884 return i + 1;
5885 }
5886 return n;
5887 }
5888
5889 void bpf_core_free_cands(struct bpf_core_cand_list *cands)
5890 {
5891 if (!cands)
5892 return;
5893
5894 free(cands->cands);
5895 free(cands);
5896 }
5897
5898 int bpf_core_add_cands(struct bpf_core_cand *local_cand,
5899 size_t local_essent_len,
5900 const struct btf *targ_btf,
5901 const char *targ_btf_name,
5902 int targ_start_id,
5903 struct bpf_core_cand_list *cands)
5904 {
5905 struct bpf_core_cand *new_cands, *cand;
5906 const struct btf_type *t, *local_t;
5907 const char *targ_name, *local_name;
5908 size_t targ_essent_len;
5909 int n, i;
5910
5911 local_t = btf__type_by_id(local_cand->btf, local_cand->id);
5912 local_name = btf__str_by_offset(local_cand->btf, local_t->name_off);
5913
5914 n = btf__type_cnt(targ_btf);
5915 for (i = targ_start_id; i < n; i++) {
5916 t = btf__type_by_id(targ_btf, i);
5917 if (!btf_kind_core_compat(t, local_t))
5918 continue;
5919
5920 targ_name = btf__name_by_offset(targ_btf, t->name_off);
5921 if (str_is_empty(targ_name))
5922 continue;
5923
5924 targ_essent_len = bpf_core_essential_name_len(targ_name);
5925 if (targ_essent_len != local_essent_len)
5926 continue;
5927
5928 if (strncmp(local_name, targ_name, local_essent_len) != 0)
5929 continue;
5930
5931 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
5932 local_cand->id, btf_kind_str(local_t),
5933 local_name, i, btf_kind_str(t), targ_name,
5934 targ_btf_name);
5935 new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
5936 sizeof(*cands->cands));
5937 if (!new_cands)
5938 return -ENOMEM;
5939
5940 cand = &new_cands[cands->len];
5941 cand->btf = targ_btf;
5942 cand->id = i;
5943
5944 cands->cands = new_cands;
5945 cands->len++;
5946 }
5947 return 0;
5948 }
5949
5950 static int load_module_btfs(struct bpf_object *obj)
5951 {
5952 struct bpf_btf_info info;
5953 struct module_btf *mod_btf;
5954 struct btf *btf;
5955 char name[64];
5956 __u32 id = 0, len;
5957 int err, fd;
5958
5959 if (obj->btf_modules_loaded)
5960 return 0;
5961
5962 if (obj->gen_loader)
5963 return 0;
5964
5965 /* don't do this again, even if we find no module BTFs */
5966 obj->btf_modules_loaded = true;
5967
5968 /* kernel too old to support module BTFs */
5969 if (!kernel_supports(obj, FEAT_MODULE_BTF))
5970 return 0;
5971
5972 while (true) {
5973 err = bpf_btf_get_next_id(id, &id);
5974 if (err && errno == ENOENT)
5975 return 0;
5976 if (err && errno == EPERM) {
5977 pr_debug("skipping module BTFs loading, missing privileges\n");
5978 return 0;
5979 }
5980 if (err) {
5981 err = -errno;
5982 pr_warn("failed to iterate BTF objects: %d\n", err);
5983 return err;
5984 }
5985
5986 fd = bpf_btf_get_fd_by_id(id);
5987 if (fd < 0) {
5988 if (errno == ENOENT)
5989 continue; /* expected race: BTF was unloaded */
5990 err = -errno;
5991 pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
5992 return err;
5993 }
5994
5995 len = sizeof(info);
5996 memset(&info, 0, sizeof(info));
5997 info.name = ptr_to_u64(name);
5998 info.name_len = sizeof(name);
5999
6000 err = bpf_btf_get_info_by_fd(fd, &info, &len);
6001 if (err) {
6002 err = -errno;
6003 pr_warn("failed to get BTF object #%d info: %d\n", id, err);
6004 goto err_out;
6005 }
6006
6007 /* ignore non-module BTFs */
6008 if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
6009 close(fd);
6010 continue;
6011 }
6012
6013 btf = btf_get_from_fd(fd, obj->btf_vmlinux);
6014 err = libbpf_get_error(btf);
6015 if (err) {
6016 pr_warn("failed to load module [%s]'s BTF object #%d: %d\n",
6017 name, id, err);
6018 goto err_out;
6019 }
6020
6021 err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
6022 sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
6023 if (err)
6024 goto err_out;
6025
6026 mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
6027
6028 mod_btf->btf = btf;
6029 mod_btf->id = id;
6030 mod_btf->fd = fd;
6031 mod_btf->name = strdup(name);
6032 if (!mod_btf->name) {
6033 err = -ENOMEM;
6034 goto err_out;
6035 }
6036 continue;
6037
6038 err_out:
6039 close(fd);
6040 return err;
6041 }
6042
6043 return 0;
6044 }
6045
6046 static struct bpf_core_cand_list *
6047 bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
6048 {
6049 struct bpf_core_cand local_cand = {};
6050 struct bpf_core_cand_list *cands;
6051 const struct btf *main_btf;
6052 const struct btf_type *local_t;
6053 const char *local_name;
6054 size_t local_essent_len;
6055 int err, i;
6056
6057 local_cand.btf = local_btf;
6058 local_cand.id = local_type_id;
6059 local_t = btf__type_by_id(local_btf, local_type_id);
6060 if (!local_t)
6061 return ERR_PTR(-EINVAL);
6062
6063 local_name = btf__name_by_offset(local_btf, local_t->name_off);
6064 if (str_is_empty(local_name))
6065 return ERR_PTR(-EINVAL);
6066 local_essent_len = bpf_core_essential_name_len(local_name);
6067
6068 cands = calloc(1, sizeof(*cands));
6069 if (!cands)
6070 return ERR_PTR(-ENOMEM);
6071
6072 /* Attempt to find target candidates in vmlinux BTF first */
6073 main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
6074 err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
6075 if (err)
6076 goto err_out;
6077
6078 /* if vmlinux BTF has any candidate, don't got for module BTFs */
6079 if (cands->len)
6080 return cands;
6081
6082 /* if vmlinux BTF was overridden, don't attempt to load module BTFs */
6083 if (obj->btf_vmlinux_override)
6084 return cands;
6085
6086 /* now look through module BTFs, trying to still find candidates */
6087 err = load_module_btfs(obj);
6088 if (err)
6089 goto err_out;
6090
6091 for (i = 0; i < obj->btf_module_cnt; i++) {
6092 err = bpf_core_add_cands(&local_cand, local_essent_len,
6093 obj->btf_modules[i].btf,
6094 obj->btf_modules[i].name,
6095 btf__type_cnt(obj->btf_vmlinux),
6096 cands);
6097 if (err)
6098 goto err_out;
6099 }
6100
6101 return cands;
6102 err_out:
6103 bpf_core_free_cands(cands);
6104 return ERR_PTR(err);
6105 }
6106
6107 /* Check local and target types for compatibility. This check is used for
6108 * type-based CO-RE relocations and follow slightly different rules than
6109 * field-based relocations. This function assumes that root types were already
6110 * checked for name match. Beyond that initial root-level name check, names
6111 * are completely ignored. Compatibility rules are as follows:
6112 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
6113 * kind should match for local and target types (i.e., STRUCT is not
6114 * compatible with UNION);
6115 * - for ENUMs, the size is ignored;
6116 * - for INT, size and signedness are ignored;
6117 * - for ARRAY, dimensionality is ignored, element types are checked for
6118 * compatibility recursively;
6119 * - CONST/VOLATILE/RESTRICT modifiers are ignored;
6120 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
6121 * - FUNC_PROTOs are compatible if they have compatible signature: same
6122 * number of input args and compatible return and argument types.
6123 * These rules are not set in stone and probably will be adjusted as we get
6124 * more experience with using BPF CO-RE relocations.
6125 */
6126 int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
6127 const struct btf *targ_btf, __u32 targ_id)
6128 {
6129 return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, 32);
6130 }
6131
6132 int bpf_core_types_match(const struct btf *local_btf, __u32 local_id,
6133 const struct btf *targ_btf, __u32 targ_id)
6134 {
6135 return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false, 32);
6136 }
6137
6138 static size_t bpf_core_hash_fn(const long key, void *ctx)
6139 {
6140 return key;
6141 }
6142
6143 static bool bpf_core_equal_fn(const long k1, const long k2, void *ctx)
6144 {
6145 return k1 == k2;
6146 }
6147
6148 static int record_relo_core(struct bpf_program *prog,
6149 const struct bpf_core_relo *core_relo, int insn_idx)
6150 {
6151 struct reloc_desc *relos, *relo;
6152
6153 relos = libbpf_reallocarray(prog->reloc_desc,
6154 prog->nr_reloc + 1, sizeof(*relos));
6155 if (!relos)
6156 return -ENOMEM;
6157 relo = &relos[prog->nr_reloc];
6158 relo->type = RELO_CORE;
6159 relo->insn_idx = insn_idx;
6160 relo->core_relo = core_relo;
6161 prog->reloc_desc = relos;
6162 prog->nr_reloc++;
6163 return 0;
6164 }
6165
6166 static const struct bpf_core_relo *find_relo_core(struct bpf_program *prog, int insn_idx)
6167 {
6168 struct reloc_desc *relo;
6169 int i;
6170
6171 for (i = 0; i < prog->nr_reloc; i++) {
6172 relo = &prog->reloc_desc[i];
6173 if (relo->type != RELO_CORE || relo->insn_idx != insn_idx)
6174 continue;
6175
6176 return relo->core_relo;
6177 }
6178
6179 return NULL;
6180 }
6181
6182 static int bpf_core_resolve_relo(struct bpf_program *prog,
6183 const struct bpf_core_relo *relo,
6184 int relo_idx,
6185 const struct btf *local_btf,
6186 struct hashmap *cand_cache,
6187 struct bpf_core_relo_res *targ_res)
6188 {
6189 struct bpf_core_spec specs_scratch[3] = {};
6190 struct bpf_core_cand_list *cands = NULL;
6191 const char *prog_name = prog->name;
6192 const struct btf_type *local_type;
6193 const char *local_name;
6194 __u32 local_id = relo->type_id;
6195 int err;
6196
6197 local_type = btf__type_by_id(local_btf, local_id);
6198 if (!local_type)
6199 return -EINVAL;
6200
6201 local_name = btf__name_by_offset(local_btf, local_type->name_off);
6202 if (!local_name)
6203 return -EINVAL;
6204
6205 if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
6206 !hashmap__find(cand_cache, local_id, &cands)) {
6207 cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
6208 if (IS_ERR(cands)) {
6209 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
6210 prog_name, relo_idx, local_id, btf_kind_str(local_type),
6211 local_name, PTR_ERR(cands));
6212 return PTR_ERR(cands);
6213 }
6214 err = hashmap__set(cand_cache, local_id, cands, NULL, NULL);
6215 if (err) {
6216 bpf_core_free_cands(cands);
6217 return err;
6218 }
6219 }
6220
6221 return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch,
6222 targ_res);
6223 }
6224
6225 static int
6226 bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
6227 {
6228 const struct btf_ext_info_sec *sec;
6229 struct bpf_core_relo_res targ_res;
6230 const struct bpf_core_relo *rec;
6231 const struct btf_ext_info *seg;
6232 struct hashmap_entry *entry;
6233 struct hashmap *cand_cache = NULL;
6234 struct bpf_program *prog;
6235 struct bpf_insn *insn;
6236 const char *sec_name;
6237 int i, err = 0, insn_idx, sec_idx, sec_num;
6238
6239 if (obj->btf_ext->core_relo_info.len == 0)
6240 return 0;
6241
6242 if (targ_btf_path) {
6243 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
6244 err = libbpf_get_error(obj->btf_vmlinux_override);
6245 if (err) {
6246 pr_warn("failed to parse target BTF: %d\n", err);
6247 return err;
6248 }
6249 }
6250
6251 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
6252 if (IS_ERR(cand_cache)) {
6253 err = PTR_ERR(cand_cache);
6254 goto out;
6255 }
6256
6257 seg = &obj->btf_ext->core_relo_info;
6258 sec_num = 0;
6259 for_each_btf_ext_sec(seg, sec) {
6260 sec_idx = seg->sec_idxs[sec_num];
6261 sec_num++;
6262
6263 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
6264 if (str_is_empty(sec_name)) {
6265 err = -EINVAL;
6266 goto out;
6267 }
6268
6269 pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info);
6270
6271 for_each_btf_ext_rec(seg, sec, i, rec) {
6272 if (rec->insn_off % BPF_INSN_SZ)
6273 return -EINVAL;
6274 insn_idx = rec->insn_off / BPF_INSN_SZ;
6275 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
6276 if (!prog) {
6277 /* When __weak subprog is "overridden" by another instance
6278 * of the subprog from a different object file, linker still
6279 * appends all the .BTF.ext info that used to belong to that
6280 * eliminated subprogram.
6281 * This is similar to what x86-64 linker does for relocations.
6282 * So just ignore such relocations just like we ignore
6283 * subprog instructions when discovering subprograms.
6284 */
6285 pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n",
6286 sec_name, i, insn_idx);
6287 continue;
6288 }
6289 /* no need to apply CO-RE relocation if the program is
6290 * not going to be loaded
6291 */
6292 if (!prog->autoload)
6293 continue;
6294
6295 /* adjust insn_idx from section frame of reference to the local
6296 * program's frame of reference; (sub-)program code is not yet
6297 * relocated, so it's enough to just subtract in-section offset
6298 */
6299 insn_idx = insn_idx - prog->sec_insn_off;
6300 if (insn_idx >= prog->insns_cnt)
6301 return -EINVAL;
6302 insn = &prog->insns[insn_idx];
6303
6304 err = record_relo_core(prog, rec, insn_idx);
6305 if (err) {
6306 pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n",
6307 prog->name, i, err);
6308 goto out;
6309 }
6310
6311 if (prog->obj->gen_loader)
6312 continue;
6313
6314 err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res);
6315 if (err) {
6316 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
6317 prog->name, i, err);
6318 goto out;
6319 }
6320
6321 err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res);
6322 if (err) {
6323 pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n",
6324 prog->name, i, insn_idx, err);
6325 goto out;
6326 }
6327 }
6328 }
6329
6330 out:
6331 /* obj->btf_vmlinux and module BTFs are freed after object load */
6332 btf__free(obj->btf_vmlinux_override);
6333 obj->btf_vmlinux_override = NULL;
6334
6335 if (!IS_ERR_OR_NULL(cand_cache)) {
6336 hashmap__for_each_entry(cand_cache, entry, i) {
6337 bpf_core_free_cands(entry->pvalue);
6338 }
6339 hashmap__free(cand_cache);
6340 }
6341 return err;
6342 }
6343
6344 /* base map load ldimm64 special constant, used also for log fixup logic */
6345 #define POISON_LDIMM64_MAP_BASE 2001000000
6346 #define POISON_LDIMM64_MAP_PFX "200100"
6347
6348 static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx,
6349 int insn_idx, struct bpf_insn *insn,
6350 int map_idx, const struct bpf_map *map)
6351 {
6352 int i;
6353
6354 pr_debug("prog '%s': relo #%d: poisoning insn #%d that loads map #%d '%s'\n",
6355 prog->name, relo_idx, insn_idx, map_idx, map->name);
6356
6357 /* we turn single ldimm64 into two identical invalid calls */
6358 for (i = 0; i < 2; i++) {
6359 insn->code = BPF_JMP | BPF_CALL;
6360 insn->dst_reg = 0;
6361 insn->src_reg = 0;
6362 insn->off = 0;
6363 /* if this instruction is reachable (not a dead code),
6364 * verifier will complain with something like:
6365 * invalid func unknown#2001000123
6366 * where lower 123 is map index into obj->maps[] array
6367 */
6368 insn->imm = POISON_LDIMM64_MAP_BASE + map_idx;
6369
6370 insn++;
6371 }
6372 }
6373
6374 /* unresolved kfunc call special constant, used also for log fixup logic */
6375 #define POISON_CALL_KFUNC_BASE 2002000000
6376 #define POISON_CALL_KFUNC_PFX "2002"
6377
6378 static void poison_kfunc_call(struct bpf_program *prog, int relo_idx,
6379 int insn_idx, struct bpf_insn *insn,
6380 int ext_idx, const struct extern_desc *ext)
6381 {
6382 pr_debug("prog '%s': relo #%d: poisoning insn #%d that calls kfunc '%s'\n",
6383 prog->name, relo_idx, insn_idx, ext->name);
6384
6385 /* we turn kfunc call into invalid helper call with identifiable constant */
6386 insn->code = BPF_JMP | BPF_CALL;
6387 insn->dst_reg = 0;
6388 insn->src_reg = 0;
6389 insn->off = 0;
6390 /* if this instruction is reachable (not a dead code),
6391 * verifier will complain with something like:
6392 * invalid func unknown#2001000123
6393 * where lower 123 is extern index into obj->externs[] array
6394 */
6395 insn->imm = POISON_CALL_KFUNC_BASE + ext_idx;
6396 }
6397
6398 /* Relocate data references within program code:
6399 * - map references;
6400 * - global variable references;
6401 * - extern references.
6402 */
6403 static int
6404 bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
6405 {
6406 int i;
6407
6408 for (i = 0; i < prog->nr_reloc; i++) {
6409 struct reloc_desc *relo = &prog->reloc_desc[i];
6410 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
6411 const struct bpf_map *map;
6412 struct extern_desc *ext;
6413
6414 switch (relo->type) {
6415 case RELO_LD64:
6416 map = &obj->maps[relo->map_idx];
6417 if (obj->gen_loader) {
6418 insn[0].src_reg = BPF_PSEUDO_MAP_IDX;
6419 insn[0].imm = relo->map_idx;
6420 } else if (map->autocreate) {
6421 insn[0].src_reg = BPF_PSEUDO_MAP_FD;
6422 insn[0].imm = map->fd;
6423 } else {
6424 poison_map_ldimm64(prog, i, relo->insn_idx, insn,
6425 relo->map_idx, map);
6426 }
6427 break;
6428 case RELO_DATA:
6429 map = &obj->maps[relo->map_idx];
6430 insn[1].imm = insn[0].imm + relo->sym_off;
6431 if (obj->gen_loader) {
6432 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
6433 insn[0].imm = relo->map_idx;
6434 } else if (map->autocreate) {
6435 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6436 insn[0].imm = map->fd;
6437 } else {
6438 poison_map_ldimm64(prog, i, relo->insn_idx, insn,
6439 relo->map_idx, map);
6440 }
6441 break;
6442 case RELO_EXTERN_LD64:
6443 ext = &obj->externs[relo->ext_idx];
6444 if (ext->type == EXT_KCFG) {
6445 if (obj->gen_loader) {
6446 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
6447 insn[0].imm = obj->kconfig_map_idx;
6448 } else {
6449 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6450 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
6451 }
6452 insn[1].imm = ext->kcfg.data_off;
6453 } else /* EXT_KSYM */ {
6454 if (ext->ksym.type_id && ext->is_set) { /* typed ksyms */
6455 insn[0].src_reg = BPF_PSEUDO_BTF_ID;
6456 insn[0].imm = ext->ksym.kernel_btf_id;
6457 insn[1].imm = ext->ksym.kernel_btf_obj_fd;
6458 } else { /* typeless ksyms or unresolved typed ksyms */
6459 insn[0].imm = (__u32)ext->ksym.addr;
6460 insn[1].imm = ext->ksym.addr >> 32;
6461 }
6462 }
6463 break;
6464 case RELO_EXTERN_CALL:
6465 ext = &obj->externs[relo->ext_idx];
6466 insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
6467 if (ext->is_set) {
6468 insn[0].imm = ext->ksym.kernel_btf_id;
6469 insn[0].off = ext->ksym.btf_fd_idx;
6470 } else { /* unresolved weak kfunc call */
6471 poison_kfunc_call(prog, i, relo->insn_idx, insn,
6472 relo->ext_idx, ext);
6473 }
6474 break;
6475 case RELO_SUBPROG_ADDR:
6476 if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
6477 pr_warn("prog '%s': relo #%d: bad insn\n",
6478 prog->name, i);
6479 return -EINVAL;
6480 }
6481 /* handled already */
6482 break;
6483 case RELO_CALL:
6484 /* handled already */
6485 break;
6486 case RELO_CORE:
6487 /* will be handled by bpf_program_record_relos() */
6488 break;
6489 default:
6490 pr_warn("prog '%s': relo #%d: bad relo type %d\n",
6491 prog->name, i, relo->type);
6492 return -EINVAL;
6493 }
6494 }
6495
6496 return 0;
6497 }
6498
6499 static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
6500 const struct bpf_program *prog,
6501 const struct btf_ext_info *ext_info,
6502 void **prog_info, __u32 *prog_rec_cnt,
6503 __u32 *prog_rec_sz)
6504 {
6505 void *copy_start = NULL, *copy_end = NULL;
6506 void *rec, *rec_end, *new_prog_info;
6507 const struct btf_ext_info_sec *sec;
6508 size_t old_sz, new_sz;
6509 int i, sec_num, sec_idx, off_adj;
6510
6511 sec_num = 0;
6512 for_each_btf_ext_sec(ext_info, sec) {
6513 sec_idx = ext_info->sec_idxs[sec_num];
6514 sec_num++;
6515 if (prog->sec_idx != sec_idx)
6516 continue;
6517
6518 for_each_btf_ext_rec(ext_info, sec, i, rec) {
6519 __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
6520
6521 if (insn_off < prog->sec_insn_off)
6522 continue;
6523 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
6524 break;
6525
6526 if (!copy_start)
6527 copy_start = rec;
6528 copy_end = rec + ext_info->rec_size;
6529 }
6530
6531 if (!copy_start)
6532 return -ENOENT;
6533
6534 /* append func/line info of a given (sub-)program to the main
6535 * program func/line info
6536 */
6537 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
6538 new_sz = old_sz + (copy_end - copy_start);
6539 new_prog_info = realloc(*prog_info, new_sz);
6540 if (!new_prog_info)
6541 return -ENOMEM;
6542 *prog_info = new_prog_info;
6543 *prog_rec_cnt = new_sz / ext_info->rec_size;
6544 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
6545
6546 /* Kernel instruction offsets are in units of 8-byte
6547 * instructions, while .BTF.ext instruction offsets generated
6548 * by Clang are in units of bytes. So convert Clang offsets
6549 * into kernel offsets and adjust offset according to program
6550 * relocated position.
6551 */
6552 off_adj = prog->sub_insn_off - prog->sec_insn_off;
6553 rec = new_prog_info + old_sz;
6554 rec_end = new_prog_info + new_sz;
6555 for (; rec < rec_end; rec += ext_info->rec_size) {
6556 __u32 *insn_off = rec;
6557
6558 *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
6559 }
6560 *prog_rec_sz = ext_info->rec_size;
6561 return 0;
6562 }
6563
6564 return -ENOENT;
6565 }
6566
6567 static int
6568 reloc_prog_func_and_line_info(const struct bpf_object *obj,
6569 struct bpf_program *main_prog,
6570 const struct bpf_program *prog)
6571 {
6572 int err;
6573
6574 /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
6575 * supprot func/line info
6576 */
6577 if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
6578 return 0;
6579
6580 /* only attempt func info relocation if main program's func_info
6581 * relocation was successful
6582 */
6583 if (main_prog != prog && !main_prog->func_info)
6584 goto line_info;
6585
6586 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
6587 &main_prog->func_info,
6588 &main_prog->func_info_cnt,
6589 &main_prog->func_info_rec_size);
6590 if (err) {
6591 if (err != -ENOENT) {
6592 pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
6593 prog->name, err);
6594 return err;
6595 }
6596 if (main_prog->func_info) {
6597 /*
6598 * Some info has already been found but has problem
6599 * in the last btf_ext reloc. Must have to error out.
6600 */
6601 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
6602 return err;
6603 }
6604 /* Have problem loading the very first info. Ignore the rest. */
6605 pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
6606 prog->name);
6607 }
6608
6609 line_info:
6610 /* don't relocate line info if main program's relocation failed */
6611 if (main_prog != prog && !main_prog->line_info)
6612 return 0;
6613
6614 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
6615 &main_prog->line_info,
6616 &main_prog->line_info_cnt,
6617 &main_prog->line_info_rec_size);
6618 if (err) {
6619 if (err != -ENOENT) {
6620 pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
6621 prog->name, err);
6622 return err;
6623 }
6624 if (main_prog->line_info) {
6625 /*
6626 * Some info has already been found but has problem
6627 * in the last btf_ext reloc. Must have to error out.
6628 */
6629 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
6630 return err;
6631 }
6632 /* Have problem loading the very first info. Ignore the rest. */
6633 pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
6634 prog->name);
6635 }
6636 return 0;
6637 }
6638
6639 static int cmp_relo_by_insn_idx(const void *key, const void *elem)
6640 {
6641 size_t insn_idx = *(const size_t *)key;
6642 const struct reloc_desc *relo = elem;
6643
6644 if (insn_idx == relo->insn_idx)
6645 return 0;
6646 return insn_idx < relo->insn_idx ? -1 : 1;
6647 }
6648
6649 static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
6650 {
6651 if (!prog->nr_reloc)
6652 return NULL;
6653 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
6654 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
6655 }
6656
6657 static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog)
6658 {
6659 int new_cnt = main_prog->nr_reloc + subprog->nr_reloc;
6660 struct reloc_desc *relos;
6661 int i;
6662
6663 if (main_prog == subprog)
6664 return 0;
6665 relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
6666 /* if new count is zero, reallocarray can return a valid NULL result;
6667 * in this case the previous pointer will be freed, so we *have to*
6668 * reassign old pointer to the new value (even if it's NULL)
6669 */
6670 if (!relos && new_cnt)
6671 return -ENOMEM;
6672 if (subprog->nr_reloc)
6673 memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
6674 sizeof(*relos) * subprog->nr_reloc);
6675
6676 for (i = main_prog->nr_reloc; i < new_cnt; i++)
6677 relos[i].insn_idx += subprog->sub_insn_off;
6678 /* After insn_idx adjustment the 'relos' array is still sorted
6679 * by insn_idx and doesn't break bsearch.
6680 */
6681 main_prog->reloc_desc = relos;
6682 main_prog->nr_reloc = new_cnt;
6683 return 0;
6684 }
6685
6686 static int
6687 bpf_object__append_subprog_code(struct bpf_object *obj, struct bpf_program *main_prog,
6688 struct bpf_program *subprog)
6689 {
6690 struct bpf_insn *insns;
6691 size_t new_cnt;
6692 int err;
6693
6694 subprog->sub_insn_off = main_prog->insns_cnt;
6695
6696 new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
6697 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
6698 if (!insns) {
6699 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
6700 return -ENOMEM;
6701 }
6702 main_prog->insns = insns;
6703 main_prog->insns_cnt = new_cnt;
6704
6705 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
6706 subprog->insns_cnt * sizeof(*insns));
6707
6708 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
6709 main_prog->name, subprog->insns_cnt, subprog->name);
6710
6711 /* The subprog insns are now appended. Append its relos too. */
6712 err = append_subprog_relos(main_prog, subprog);
6713 if (err)
6714 return err;
6715 return 0;
6716 }
6717
6718 static int
6719 bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
6720 struct bpf_program *prog)
6721 {
6722 size_t sub_insn_idx, insn_idx;
6723 struct bpf_program *subprog;
6724 struct reloc_desc *relo;
6725 struct bpf_insn *insn;
6726 int err;
6727
6728 err = reloc_prog_func_and_line_info(obj, main_prog, prog);
6729 if (err)
6730 return err;
6731
6732 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
6733 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6734 if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn))
6735 continue;
6736
6737 relo = find_prog_insn_relo(prog, insn_idx);
6738 if (relo && relo->type == RELO_EXTERN_CALL)
6739 /* kfunc relocations will be handled later
6740 * in bpf_object__relocate_data()
6741 */
6742 continue;
6743 if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
6744 pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
6745 prog->name, insn_idx, relo->type);
6746 return -LIBBPF_ERRNO__RELOC;
6747 }
6748 if (relo) {
6749 /* sub-program instruction index is a combination of
6750 * an offset of a symbol pointed to by relocation and
6751 * call instruction's imm field; for global functions,
6752 * call always has imm = -1, but for static functions
6753 * relocation is against STT_SECTION and insn->imm
6754 * points to a start of a static function
6755 *
6756 * for subprog addr relocation, the relo->sym_off + insn->imm is
6757 * the byte offset in the corresponding section.
6758 */
6759 if (relo->type == RELO_CALL)
6760 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
6761 else
6762 sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ;
6763 } else if (insn_is_pseudo_func(insn)) {
6764 /*
6765 * RELO_SUBPROG_ADDR relo is always emitted even if both
6766 * functions are in the same section, so it shouldn't reach here.
6767 */
6768 pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n",
6769 prog->name, insn_idx);
6770 return -LIBBPF_ERRNO__RELOC;
6771 } else {
6772 /* if subprogram call is to a static function within
6773 * the same ELF section, there won't be any relocation
6774 * emitted, but it also means there is no additional
6775 * offset necessary, insns->imm is relative to
6776 * instruction's original position within the section
6777 */
6778 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
6779 }
6780
6781 /* we enforce that sub-programs should be in .text section */
6782 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6783 if (!subprog) {
6784 pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
6785 prog->name);
6786 return -LIBBPF_ERRNO__RELOC;
6787 }
6788
6789 /* if it's the first call instruction calling into this
6790 * subprogram (meaning this subprog hasn't been processed
6791 * yet) within the context of current main program:
6792 * - append it at the end of main program's instructions blog;
6793 * - process is recursively, while current program is put on hold;
6794 * - if that subprogram calls some other not yet processes
6795 * subprogram, same thing will happen recursively until
6796 * there are no more unprocesses subprograms left to append
6797 * and relocate.
6798 */
6799 if (subprog->sub_insn_off == 0) {
6800 err = bpf_object__append_subprog_code(obj, main_prog, subprog);
6801 if (err)
6802 return err;
6803 err = bpf_object__reloc_code(obj, main_prog, subprog);
6804 if (err)
6805 return err;
6806 }
6807
6808 /* main_prog->insns memory could have been re-allocated, so
6809 * calculate pointer again
6810 */
6811 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6812 /* calculate correct instruction position within current main
6813 * prog; each main prog can have a different set of
6814 * subprograms appended (potentially in different order as
6815 * well), so position of any subprog can be different for
6816 * different main programs
6817 */
6818 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
6819
6820 pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
6821 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
6822 }
6823
6824 return 0;
6825 }
6826
6827 /*
6828 * Relocate sub-program calls.
6829 *
6830 * Algorithm operates as follows. Each entry-point BPF program (referred to as
6831 * main prog) is processed separately. For each subprog (non-entry functions,
6832 * that can be called from either entry progs or other subprogs) gets their
6833 * sub_insn_off reset to zero. This serves as indicator that this subprogram
6834 * hasn't been yet appended and relocated within current main prog. Once its
6835 * relocated, sub_insn_off will point at the position within current main prog
6836 * where given subprog was appended. This will further be used to relocate all
6837 * the call instructions jumping into this subprog.
6838 *
6839 * We start with main program and process all call instructions. If the call
6840 * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
6841 * is zero), subprog instructions are appended at the end of main program's
6842 * instruction array. Then main program is "put on hold" while we recursively
6843 * process newly appended subprogram. If that subprogram calls into another
6844 * subprogram that hasn't been appended, new subprogram is appended again to
6845 * the *main* prog's instructions (subprog's instructions are always left
6846 * untouched, as they need to be in unmodified state for subsequent main progs
6847 * and subprog instructions are always sent only as part of a main prog) and
6848 * the process continues recursively. Once all the subprogs called from a main
6849 * prog or any of its subprogs are appended (and relocated), all their
6850 * positions within finalized instructions array are known, so it's easy to
6851 * rewrite call instructions with correct relative offsets, corresponding to
6852 * desired target subprog.
6853 *
6854 * Its important to realize that some subprogs might not be called from some
6855 * main prog and any of its called/used subprogs. Those will keep their
6856 * subprog->sub_insn_off as zero at all times and won't be appended to current
6857 * main prog and won't be relocated within the context of current main prog.
6858 * They might still be used from other main progs later.
6859 *
6860 * Visually this process can be shown as below. Suppose we have two main
6861 * programs mainA and mainB and BPF object contains three subprogs: subA,
6862 * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
6863 * subC both call subB:
6864 *
6865 * +--------+ +-------+
6866 * | v v |
6867 * +--+---+ +--+-+-+ +---+--+
6868 * | subA | | subB | | subC |
6869 * +--+---+ +------+ +---+--+
6870 * ^ ^
6871 * | |
6872 * +---+-------+ +------+----+
6873 * | mainA | | mainB |
6874 * +-----------+ +-----------+
6875 *
6876 * We'll start relocating mainA, will find subA, append it and start
6877 * processing sub A recursively:
6878 *
6879 * +-----------+------+
6880 * | mainA | subA |
6881 * +-----------+------+
6882 *
6883 * At this point we notice that subB is used from subA, so we append it and
6884 * relocate (there are no further subcalls from subB):
6885 *
6886 * +-----------+------+------+
6887 * | mainA | subA | subB |
6888 * +-----------+------+------+
6889 *
6890 * At this point, we relocate subA calls, then go one level up and finish with
6891 * relocatin mainA calls. mainA is done.
6892 *
6893 * For mainB process is similar but results in different order. We start with
6894 * mainB and skip subA and subB, as mainB never calls them (at least
6895 * directly), but we see subC is needed, so we append and start processing it:
6896 *
6897 * +-----------+------+
6898 * | mainB | subC |
6899 * +-----------+------+
6900 * Now we see subC needs subB, so we go back to it, append and relocate it:
6901 *
6902 * +-----------+------+------+
6903 * | mainB | subC | subB |
6904 * +-----------+------+------+
6905 *
6906 * At this point we unwind recursion, relocate calls in subC, then in mainB.
6907 */
6908 static int
6909 bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6910 {
6911 struct bpf_program *subprog;
6912 int i, err;
6913
6914 /* mark all subprogs as not relocated (yet) within the context of
6915 * current main program
6916 */
6917 for (i = 0; i < obj->nr_programs; i++) {
6918 subprog = &obj->programs[i];
6919 if (!prog_is_subprog(obj, subprog))
6920 continue;
6921
6922 subprog->sub_insn_off = 0;
6923 }
6924
6925 err = bpf_object__reloc_code(obj, prog, prog);
6926 if (err)
6927 return err;
6928
6929 return 0;
6930 }
6931
6932 static void
6933 bpf_object__free_relocs(struct bpf_object *obj)
6934 {
6935 struct bpf_program *prog;
6936 int i;
6937
6938 /* free up relocation descriptors */
6939 for (i = 0; i < obj->nr_programs; i++) {
6940 prog = &obj->programs[i];
6941 zfree(&prog->reloc_desc);
6942 prog->nr_reloc = 0;
6943 }
6944 }
6945
6946 static int cmp_relocs(const void *_a, const void *_b)
6947 {
6948 const struct reloc_desc *a = _a;
6949 const struct reloc_desc *b = _b;
6950
6951 if (a->insn_idx != b->insn_idx)
6952 return a->insn_idx < b->insn_idx ? -1 : 1;
6953
6954 /* no two relocations should have the same insn_idx, but ... */
6955 if (a->type != b->type)
6956 return a->type < b->type ? -1 : 1;
6957
6958 return 0;
6959 }
6960
6961 static void bpf_object__sort_relos(struct bpf_object *obj)
6962 {
6963 int i;
6964
6965 for (i = 0; i < obj->nr_programs; i++) {
6966 struct bpf_program *p = &obj->programs[i];
6967
6968 if (!p->nr_reloc)
6969 continue;
6970
6971 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
6972 }
6973 }
6974
6975 static int
6976 bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
6977 {
6978 struct bpf_program *prog;
6979 size_t i, j;
6980 int err;
6981
6982 if (obj->btf_ext) {
6983 err = bpf_object__relocate_core(obj, targ_btf_path);
6984 if (err) {
6985 pr_warn("failed to perform CO-RE relocations: %d\n",
6986 err);
6987 return err;
6988 }
6989 bpf_object__sort_relos(obj);
6990 }
6991
6992 /* Before relocating calls pre-process relocations and mark
6993 * few ld_imm64 instructions that points to subprogs.
6994 * Otherwise bpf_object__reloc_code() later would have to consider
6995 * all ld_imm64 insns as relocation candidates. That would
6996 * reduce relocation speed, since amount of find_prog_insn_relo()
6997 * would increase and most of them will fail to find a relo.
6998 */
6999 for (i = 0; i < obj->nr_programs; i++) {
7000 prog = &obj->programs[i];
7001 for (j = 0; j < prog->nr_reloc; j++) {
7002 struct reloc_desc *relo = &prog->reloc_desc[j];
7003 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
7004
7005 /* mark the insn, so it's recognized by insn_is_pseudo_func() */
7006 if (relo->type == RELO_SUBPROG_ADDR)
7007 insn[0].src_reg = BPF_PSEUDO_FUNC;
7008 }
7009 }
7010
7011 /* relocate subprogram calls and append used subprograms to main
7012 * programs; each copy of subprogram code needs to be relocated
7013 * differently for each main program, because its code location might
7014 * have changed.
7015 * Append subprog relos to main programs to allow data relos to be
7016 * processed after text is completely relocated.
7017 */
7018 for (i = 0; i < obj->nr_programs; i++) {
7019 prog = &obj->programs[i];
7020 /* sub-program's sub-calls are relocated within the context of
7021 * its main program only
7022 */
7023 if (prog_is_subprog(obj, prog))
7024 continue;
7025 if (!prog->autoload)
7026 continue;
7027
7028 err = bpf_object__relocate_calls(obj, prog);
7029 if (err) {
7030 pr_warn("prog '%s': failed to relocate calls: %d\n",
7031 prog->name, err);
7032 return err;
7033 }
7034
7035 /* Now, also append exception callback if it has not been done already. */
7036 if (prog->exception_cb_idx >= 0) {
7037 struct bpf_program *subprog = &obj->programs[prog->exception_cb_idx];
7038
7039 /* Calling exception callback directly is disallowed, which the
7040 * verifier will reject later. In case it was processed already,
7041 * we can skip this step, otherwise for all other valid cases we
7042 * have to append exception callback now.
7043 */
7044 if (subprog->sub_insn_off == 0) {
7045 err = bpf_object__append_subprog_code(obj, prog, subprog);
7046 if (err)
7047 return err;
7048 err = bpf_object__reloc_code(obj, prog, subprog);
7049 if (err)
7050 return err;
7051 }
7052 }
7053 }
7054 /* Process data relos for main programs */
7055 for (i = 0; i < obj->nr_programs; i++) {
7056 prog = &obj->programs[i];
7057 if (prog_is_subprog(obj, prog))
7058 continue;
7059 if (!prog->autoload)
7060 continue;
7061 err = bpf_object__relocate_data(obj, prog);
7062 if (err) {
7063 pr_warn("prog '%s': failed to relocate data references: %d\n",
7064 prog->name, err);
7065 return err;
7066 }
7067 }
7068
7069 return 0;
7070 }
7071
7072 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
7073 Elf64_Shdr *shdr, Elf_Data *data);
7074
7075 static int bpf_object__collect_map_relos(struct bpf_object *obj,
7076 Elf64_Shdr *shdr, Elf_Data *data)
7077 {
7078 const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
7079 int i, j, nrels, new_sz;
7080 const struct btf_var_secinfo *vi = NULL;
7081 const struct btf_type *sec, *var, *def;
7082 struct bpf_map *map = NULL, *targ_map = NULL;
7083 struct bpf_program *targ_prog = NULL;
7084 bool is_prog_array, is_map_in_map;
7085 const struct btf_member *member;
7086 const char *name, *mname, *type;
7087 unsigned int moff;
7088 Elf64_Sym *sym;
7089 Elf64_Rel *rel;
7090 void *tmp;
7091
7092 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
7093 return -EINVAL;
7094 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
7095 if (!sec)
7096 return -EINVAL;
7097
7098 nrels = shdr->sh_size / shdr->sh_entsize;
7099 for (i = 0; i < nrels; i++) {
7100 rel = elf_rel_by_idx(data, i);
7101 if (!rel) {
7102 pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
7103 return -LIBBPF_ERRNO__FORMAT;
7104 }
7105
7106 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
7107 if (!sym) {
7108 pr_warn(".maps relo #%d: symbol %zx not found\n",
7109 i, (size_t)ELF64_R_SYM(rel->r_info));
7110 return -LIBBPF_ERRNO__FORMAT;
7111 }
7112 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
7113
7114 pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n",
7115 i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value,
7116 (size_t)rel->r_offset, sym->st_name, name);
7117
7118 for (j = 0; j < obj->nr_maps; j++) {
7119 map = &obj->maps[j];
7120 if (map->sec_idx != obj->efile.btf_maps_shndx)
7121 continue;
7122
7123 vi = btf_var_secinfos(sec) + map->btf_var_idx;
7124 if (vi->offset <= rel->r_offset &&
7125 rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size)
7126 break;
7127 }
7128 if (j == obj->nr_maps) {
7129 pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n",
7130 i, name, (size_t)rel->r_offset);
7131 return -EINVAL;
7132 }
7133
7134 is_map_in_map = bpf_map_type__is_map_in_map(map->def.type);
7135 is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY;
7136 type = is_map_in_map ? "map" : "prog";
7137 if (is_map_in_map) {
7138 if (sym->st_shndx != obj->efile.btf_maps_shndx) {
7139 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
7140 i, name);
7141 return -LIBBPF_ERRNO__RELOC;
7142 }
7143 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
7144 map->def.key_size != sizeof(int)) {
7145 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
7146 i, map->name, sizeof(int));
7147 return -EINVAL;
7148 }
7149 targ_map = bpf_object__find_map_by_name(obj, name);
7150 if (!targ_map) {
7151 pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n",
7152 i, name);
7153 return -ESRCH;
7154 }
7155 } else if (is_prog_array) {
7156 targ_prog = bpf_object__find_program_by_name(obj, name);
7157 if (!targ_prog) {
7158 pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n",
7159 i, name);
7160 return -ESRCH;
7161 }
7162 if (targ_prog->sec_idx != sym->st_shndx ||
7163 targ_prog->sec_insn_off * 8 != sym->st_value ||
7164 prog_is_subprog(obj, targ_prog)) {
7165 pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n",
7166 i, name);
7167 return -LIBBPF_ERRNO__RELOC;
7168 }
7169 } else {
7170 return -EINVAL;
7171 }
7172
7173 var = btf__type_by_id(obj->btf, vi->type);
7174 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
7175 if (btf_vlen(def) == 0)
7176 return -EINVAL;
7177 member = btf_members(def) + btf_vlen(def) - 1;
7178 mname = btf__name_by_offset(obj->btf, member->name_off);
7179 if (strcmp(mname, "values"))
7180 return -EINVAL;
7181
7182 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
7183 if (rel->r_offset - vi->offset < moff)
7184 return -EINVAL;
7185
7186 moff = rel->r_offset - vi->offset - moff;
7187 /* here we use BPF pointer size, which is always 64 bit, as we
7188 * are parsing ELF that was built for BPF target
7189 */
7190 if (moff % bpf_ptr_sz)
7191 return -EINVAL;
7192 moff /= bpf_ptr_sz;
7193 if (moff >= map->init_slots_sz) {
7194 new_sz = moff + 1;
7195 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
7196 if (!tmp)
7197 return -ENOMEM;
7198 map->init_slots = tmp;
7199 memset(map->init_slots + map->init_slots_sz, 0,
7200 (new_sz - map->init_slots_sz) * host_ptr_sz);
7201 map->init_slots_sz = new_sz;
7202 }
7203 map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog;
7204
7205 pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n",
7206 i, map->name, moff, type, name);
7207 }
7208
7209 return 0;
7210 }
7211
7212 static int bpf_object__collect_relos(struct bpf_object *obj)
7213 {
7214 int i, err;
7215
7216 for (i = 0; i < obj->efile.sec_cnt; i++) {
7217 struct elf_sec_desc *sec_desc = &obj->efile.secs[i];
7218 Elf64_Shdr *shdr;
7219 Elf_Data *data;
7220 int idx;
7221 Elf64_Shdr shdrelf;
7222
7223 if (sec_desc->sec_type != SEC_RELO)
7224 continue;
7225
7226 #if defined HAVE_LIBELF
7227 shdr = sec_desc->shdr;
7228 #elif defined HAVE_ELFIO
7229 shdr = elf_sec_hdr_by_idx(obj, i, &shdrelf);
7230 #endif
7231 data = sec_desc->data;
7232 idx = shdr->sh_info;
7233
7234 if (shdr->sh_type != SHT_REL) {
7235 pr_warn("internal error at %d\n", __LINE__);
7236 return -LIBBPF_ERRNO__INTERNAL;
7237 }
7238
7239 if (idx == obj->efile.st_ops_shndx || idx == obj->efile.st_ops_link_shndx)
7240 err = bpf_object__collect_st_ops_relos(obj, shdr, data);
7241 else if (idx == obj->efile.btf_maps_shndx)
7242 err = bpf_object__collect_map_relos(obj, shdr, data);
7243 else
7244 err = bpf_object__collect_prog_relos(obj, shdr, data);
7245 if (err)
7246 return err;
7247 }
7248
7249 bpf_object__sort_relos(obj);
7250 return 0;
7251 }
7252
7253 static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
7254 {
7255 if (BPF_CLASS(insn->code) == BPF_JMP &&
7256 BPF_OP(insn->code) == BPF_CALL &&
7257 BPF_SRC(insn->code) == BPF_K &&
7258 insn->src_reg == 0 &&
7259 insn->dst_reg == 0) {
7260 *func_id = insn->imm;
7261 return true;
7262 }
7263 return false;
7264 }
7265
7266 static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog)
7267 {
7268 struct bpf_insn *insn = prog->insns;
7269 enum bpf_func_id func_id;
7270 int i;
7271
7272 if (obj->gen_loader)
7273 return 0;
7274
7275 for (i = 0; i < prog->insns_cnt; i++, insn++) {
7276 if (!insn_is_helper_call(insn, &func_id))
7277 continue;
7278
7279 /* on kernels that don't yet support
7280 * bpf_probe_read_{kernel,user}[_str] helpers, fall back
7281 * to bpf_probe_read() which works well for old kernels
7282 */
7283 switch (func_id) {
7284 case BPF_FUNC_probe_read_kernel:
7285 case BPF_FUNC_probe_read_user:
7286 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
7287 insn->imm = BPF_FUNC_probe_read;
7288 break;
7289 case BPF_FUNC_probe_read_kernel_str:
7290 case BPF_FUNC_probe_read_user_str:
7291 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
7292 insn->imm = BPF_FUNC_probe_read_str;
7293 break;
7294 default:
7295 break;
7296 }
7297 }
7298 return 0;
7299 }
7300
7301 static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
7302 int *btf_obj_fd, int *btf_type_id);
7303
7304 /* this is called as prog->sec_def->prog_prepare_load_fn for libbpf-supported sec_defs */
7305 static int libbpf_prepare_prog_load(struct bpf_program *prog,
7306 struct bpf_prog_load_opts *opts, long cookie)
7307 {
7308 enum sec_def_flags def = cookie;
7309
7310 /* old kernels might not support specifying expected_attach_type */
7311 if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE))
7312 opts->expected_attach_type = 0;
7313
7314 if (def & SEC_SLEEPABLE)
7315 opts->prog_flags |= BPF_F_SLEEPABLE;
7316
7317 if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS))
7318 opts->prog_flags |= BPF_F_XDP_HAS_FRAGS;
7319
7320 /* special check for usdt to use uprobe_multi link */
7321 if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK))
7322 prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI;
7323
7324 if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) {
7325 int btf_obj_fd = 0, btf_type_id = 0, err;
7326 const char *attach_name;
7327
7328 attach_name = strchr(prog->sec_name, '/');
7329 if (!attach_name) {
7330 /* if BPF program is annotated with just SEC("fentry")
7331 * (or similar) without declaratively specifying
7332 * target, then it is expected that target will be
7333 * specified with bpf_program__set_attach_target() at
7334 * runtime before BPF object load step. If not, then
7335 * there is nothing to load into the kernel as BPF
7336 * verifier won't be able to validate BPF program
7337 * correctness anyways.
7338 */
7339 pr_warn("prog '%s': no BTF-based attach target is specified, use bpf_program__set_attach_target()\n",
7340 prog->name);
7341 return -EINVAL;
7342 }
7343 attach_name++; /* skip over / */
7344
7345 err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id);
7346 if (err)
7347 return err;
7348
7349 /* cache resolved BTF FD and BTF type ID in the prog */
7350 prog->attach_btf_obj_fd = btf_obj_fd;
7351 prog->attach_btf_id = btf_type_id;
7352
7353 /* but by now libbpf common logic is not utilizing
7354 * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because
7355 * this callback is called after opts were populated by
7356 * libbpf, so this callback has to update opts explicitly here
7357 */
7358 opts->attach_btf_obj_fd = btf_obj_fd;
7359 opts->attach_btf_id = btf_type_id;
7360 }
7361 return 0;
7362 }
7363
7364 static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz);
7365
7366 static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog,
7367 struct bpf_insn *insns, int insns_cnt,
7368 const char *license, __u32 kern_version, int *prog_fd)
7369 {
7370 LIBBPF_OPTS(bpf_prog_load_opts, load_attr);
7371 const char *prog_name = NULL;
7372 char *cp, errmsg[STRERR_BUFSIZE];
7373 size_t log_buf_size = 0;
7374 char *log_buf = NULL, *tmp;
7375 int btf_fd, ret, err;
7376 bool own_log_buf = true;
7377 __u32 log_level = prog->log_level;
7378
7379 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
7380 /*
7381 * The program type must be set. Most likely we couldn't find a proper
7382 * section definition at load time, and thus we didn't infer the type.
7383 */
7384 pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
7385 prog->name, prog->sec_name);
7386 return -EINVAL;
7387 }
7388
7389 if (!insns || !insns_cnt)
7390 return -EINVAL;
7391
7392 if (kernel_supports(obj, FEAT_PROG_NAME))
7393 prog_name = prog->name;
7394 load_attr.attach_prog_fd = prog->attach_prog_fd;
7395 load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
7396 load_attr.attach_btf_id = prog->attach_btf_id;
7397 load_attr.kern_version = kern_version;
7398 load_attr.prog_ifindex = prog->prog_ifindex;
7399
7400 /* specify func_info/line_info only if kernel supports them */
7401 btf_fd = bpf_object__btf_fd(obj);
7402 if (btf_fd >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
7403 load_attr.prog_btf_fd = btf_fd;
7404 load_attr.func_info = prog->func_info;
7405 load_attr.func_info_rec_size = prog->func_info_rec_size;
7406 load_attr.func_info_cnt = prog->func_info_cnt;
7407 load_attr.line_info = prog->line_info;
7408 load_attr.line_info_rec_size = prog->line_info_rec_size;
7409 load_attr.line_info_cnt = prog->line_info_cnt;
7410 }
7411 load_attr.log_level = log_level;
7412 load_attr.prog_flags = prog->prog_flags;
7413 load_attr.fd_array = obj->fd_array;
7414
7415 /* adjust load_attr if sec_def provides custom preload callback */
7416 if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) {
7417 err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie);
7418 if (err < 0) {
7419 pr_warn("prog '%s': failed to prepare load attributes: %d\n",
7420 prog->name, err);
7421 return err;
7422 }
7423 insns = prog->insns;
7424 insns_cnt = prog->insns_cnt;
7425 }
7426
7427 /* allow prog_prepare_load_fn to change expected_attach_type */
7428 load_attr.expected_attach_type = prog->expected_attach_type;
7429
7430 if (obj->gen_loader) {
7431 bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
7432 license, insns, insns_cnt, &load_attr,
7433 prog - obj->programs);
7434 *prog_fd = -1;
7435 return 0;
7436 }
7437
7438 retry_load:
7439 /* if log_level is zero, we don't request logs initially even if
7440 * custom log_buf is specified; if the program load fails, then we'll
7441 * bump log_level to 1 and use either custom log_buf or we'll allocate
7442 * our own and retry the load to get details on what failed
7443 */
7444 if (log_level) {
7445 if (prog->log_buf) {
7446 log_buf = prog->log_buf;
7447 log_buf_size = prog->log_size;
7448 own_log_buf = false;
7449 } else if (obj->log_buf) {
7450 log_buf = obj->log_buf;
7451 log_buf_size = obj->log_size;
7452 own_log_buf = false;
7453 } else {
7454 log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2);
7455 tmp = realloc(log_buf, log_buf_size);
7456 if (!tmp) {
7457 ret = -ENOMEM;
7458 goto out;
7459 }
7460 log_buf = tmp;
7461 log_buf[0] = '\0';
7462 own_log_buf = true;
7463 }
7464 }
7465
7466 load_attr.log_buf = log_buf;
7467 load_attr.log_size = log_buf_size;
7468 load_attr.log_level = log_level;
7469
7470 ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr);
7471 if (ret >= 0) {
7472 if (log_level && own_log_buf) {
7473 pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
7474 prog->name, log_buf);
7475 }
7476
7477 if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) {
7478 struct bpf_map *map;
7479 int i;
7480
7481 for (i = 0; i < obj->nr_maps; i++) {
7482 map = &prog->obj->maps[i];
7483 if (map->libbpf_type != LIBBPF_MAP_RODATA)
7484 continue;
7485
7486 if (bpf_prog_bind_map(ret, bpf_map__fd(map), NULL)) {
7487 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7488 pr_warn("prog '%s': failed to bind map '%s': %s\n",
7489 prog->name, map->real_name, cp);
7490 /* Don't fail hard if can't bind rodata. */
7491 }
7492 }
7493 }
7494
7495 *prog_fd = ret;
7496 ret = 0;
7497 goto out;
7498 }
7499
7500 if (log_level == 0) {
7501 log_level = 1;
7502 goto retry_load;
7503 }
7504 /* On ENOSPC, increase log buffer size and retry, unless custom
7505 * log_buf is specified.
7506 * Be careful to not overflow u32, though. Kernel's log buf size limit
7507 * isn't part of UAPI so it can always be bumped to full 4GB. So don't
7508 * multiply by 2 unless we are sure we'll fit within 32 bits.
7509 * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2).
7510 */
7511 if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2)
7512 goto retry_load;
7513
7514 ret = -errno;
7515
7516 /* post-process verifier log to improve error descriptions */
7517 fixup_verifier_log(prog, log_buf, log_buf_size);
7518
7519 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7520 pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp);
7521 pr_perm_msg(ret);
7522
7523 if (own_log_buf && log_buf && log_buf[0] != '\0') {
7524 pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
7525 prog->name, log_buf);
7526 }
7527
7528 out:
7529 if (own_log_buf)
7530 free(log_buf);
7531 return ret;
7532 }
7533
7534 static char *find_prev_line(char *buf, char *cur)
7535 {
7536 char *p;
7537
7538 if (cur == buf) /* end of a log buf */
7539 return NULL;
7540
7541 p = cur - 1;
7542 while (p - 1 >= buf && *(p - 1) != '\n')
7543 p--;
7544
7545 return p;
7546 }
7547
7548 static void patch_log(char *buf, size_t buf_sz, size_t log_sz,
7549 char *orig, size_t orig_sz, const char *patch)
7550 {
7551 /* size of the remaining log content to the right from the to-be-replaced part */
7552 size_t rem_sz = (buf + log_sz) - (orig + orig_sz);
7553 size_t patch_sz = strlen(patch);
7554
7555 if (patch_sz != orig_sz) {
7556 /* If patch line(s) are longer than original piece of verifier log,
7557 * shift log contents by (patch_sz - orig_sz) bytes to the right
7558 * starting from after to-be-replaced part of the log.
7559 *
7560 * If patch line(s) are shorter than original piece of verifier log,
7561 * shift log contents by (orig_sz - patch_sz) bytes to the left
7562 * starting from after to-be-replaced part of the log
7563 *
7564 * We need to be careful about not overflowing available
7565 * buf_sz capacity. If that's the case, we'll truncate the end
7566 * of the original log, as necessary.
7567 */
7568 if (patch_sz > orig_sz) {
7569 if (orig + patch_sz >= buf + buf_sz) {
7570 /* patch is big enough to cover remaining space completely */
7571 patch_sz -= (orig + patch_sz) - (buf + buf_sz) + 1;
7572 rem_sz = 0;
7573 } else if (patch_sz - orig_sz > buf_sz - log_sz) {
7574 /* patch causes part of remaining log to be truncated */
7575 rem_sz -= (patch_sz - orig_sz) - (buf_sz - log_sz);
7576 }
7577 }
7578 /* shift remaining log to the right by calculated amount */
7579 memmove(orig + patch_sz, orig + orig_sz, rem_sz);
7580 }
7581
7582 memcpy(orig, patch, patch_sz);
7583 }
7584
7585 static void fixup_log_failed_core_relo(struct bpf_program *prog,
7586 char *buf, size_t buf_sz, size_t log_sz,
7587 char *line1, char *line2, char *line3)
7588 {
7589 /* Expected log for failed and not properly guarded CO-RE relocation:
7590 * line1 -> 123: (85) call unknown#195896080
7591 * line2 -> invalid func unknown#195896080
7592 * line3 -> <anything else or end of buffer>
7593 *
7594 * "123" is the index of the instruction that was poisoned. We extract
7595 * instruction index to find corresponding CO-RE relocation and
7596 * replace this part of the log with more relevant information about
7597 * failed CO-RE relocation.
7598 */
7599 const struct bpf_core_relo *relo;
7600 struct bpf_core_spec spec;
7601 char patch[512], spec_buf[256];
7602 int insn_idx, err, spec_len;
7603
7604 if (sscanf(line1, "%d: (%*d) call unknown#195896080\n", &insn_idx) != 1)
7605 return;
7606
7607 relo = find_relo_core(prog, insn_idx);
7608 if (!relo)
7609 return;
7610
7611 err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec);
7612 if (err)
7613 return;
7614
7615 spec_len = bpf_core_format_spec(spec_buf, sizeof(spec_buf), &spec);
7616 snprintf(patch, sizeof(patch),
7617 "%d: <invalid CO-RE relocation>\n"
7618 "failed to resolve CO-RE relocation %s%s\n",
7619 insn_idx, spec_buf, spec_len >= sizeof(spec_buf) ? "..." : "");
7620
7621 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7622 }
7623
7624 static void fixup_log_missing_map_load(struct bpf_program *prog,
7625 char *buf, size_t buf_sz, size_t log_sz,
7626 char *line1, char *line2, char *line3)
7627 {
7628 /* Expected log for failed and not properly guarded map reference:
7629 * line1 -> 123: (85) call unknown#2001000345
7630 * line2 -> invalid func unknown#2001000345
7631 * line3 -> <anything else or end of buffer>
7632 *
7633 * "123" is the index of the instruction that was poisoned.
7634 * "345" in "2001000345" is a map index in obj->maps to fetch map name.
7635 */
7636 struct bpf_object *obj = prog->obj;
7637 const struct bpf_map *map;
7638 int insn_idx, map_idx;
7639 char patch[128];
7640
7641 if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &map_idx) != 2)
7642 return;
7643
7644 map_idx -= POISON_LDIMM64_MAP_BASE;
7645 if (map_idx < 0 || map_idx >= obj->nr_maps)
7646 return;
7647 map = &obj->maps[map_idx];
7648
7649 snprintf(patch, sizeof(patch),
7650 "%d: <invalid BPF map reference>\n"
7651 "BPF map '%s' is referenced but wasn't created\n",
7652 insn_idx, map->name);
7653
7654 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7655 }
7656
7657 static void fixup_log_missing_kfunc_call(struct bpf_program *prog,
7658 char *buf, size_t buf_sz, size_t log_sz,
7659 char *line1, char *line2, char *line3)
7660 {
7661 /* Expected log for failed and not properly guarded kfunc call:
7662 * line1 -> 123: (85) call unknown#2002000345
7663 * line2 -> invalid func unknown#2002000345
7664 * line3 -> <anything else or end of buffer>
7665 *
7666 * "123" is the index of the instruction that was poisoned.
7667 * "345" in "2002000345" is an extern index in obj->externs to fetch kfunc name.
7668 */
7669 struct bpf_object *obj = prog->obj;
7670 const struct extern_desc *ext;
7671 int insn_idx, ext_idx;
7672 char patch[128];
7673
7674 if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &ext_idx) != 2)
7675 return;
7676
7677 ext_idx -= POISON_CALL_KFUNC_BASE;
7678 if (ext_idx < 0 || ext_idx >= obj->nr_extern)
7679 return;
7680 ext = &obj->externs[ext_idx];
7681
7682 snprintf(patch, sizeof(patch),
7683 "%d: <invalid kfunc call>\n"
7684 "kfunc '%s' is referenced but wasn't resolved\n",
7685 insn_idx, ext->name);
7686
7687 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7688 }
7689
7690 static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz)
7691 {
7692 /* look for familiar error patterns in last N lines of the log */
7693 const size_t max_last_line_cnt = 10;
7694 char *prev_line, *cur_line, *next_line;
7695 size_t log_sz;
7696 int i;
7697
7698 if (!buf)
7699 return;
7700
7701 log_sz = strlen(buf) + 1;
7702 next_line = buf + log_sz - 1;
7703
7704 for (i = 0; i < max_last_line_cnt; i++, next_line = cur_line) {
7705 cur_line = find_prev_line(buf, next_line);
7706 if (!cur_line)
7707 return;
7708
7709 if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) {
7710 prev_line = find_prev_line(buf, cur_line);
7711 if (!prev_line)
7712 continue;
7713
7714 /* failed CO-RE relocation case */
7715 fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz,
7716 prev_line, cur_line, next_line);
7717 return;
7718 } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_LDIMM64_MAP_PFX)) {
7719 prev_line = find_prev_line(buf, cur_line);
7720 if (!prev_line)
7721 continue;
7722
7723 /* reference to uncreated BPF map */
7724 fixup_log_missing_map_load(prog, buf, buf_sz, log_sz,
7725 prev_line, cur_line, next_line);
7726 return;
7727 } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_CALL_KFUNC_PFX)) {
7728 prev_line = find_prev_line(buf, cur_line);
7729 if (!prev_line)
7730 continue;
7731
7732 /* reference to unresolved kfunc */
7733 fixup_log_missing_kfunc_call(prog, buf, buf_sz, log_sz,
7734 prev_line, cur_line, next_line);
7735 return;
7736 }
7737 }
7738 }
7739
7740 static int bpf_program_record_relos(struct bpf_program *prog)
7741 {
7742 struct bpf_object *obj = prog->obj;
7743 int i;
7744
7745 for (i = 0; i < prog->nr_reloc; i++) {
7746 struct reloc_desc *relo = &prog->reloc_desc[i];
7747 struct extern_desc *ext = &obj->externs[relo->ext_idx];
7748 int kind;
7749
7750 switch (relo->type) {
7751 case RELO_EXTERN_LD64:
7752 if (ext->type != EXT_KSYM)
7753 continue;
7754 kind = btf_is_var(btf__type_by_id(obj->btf, ext->btf_id)) ?
7755 BTF_KIND_VAR : BTF_KIND_FUNC;
7756 bpf_gen__record_extern(obj->gen_loader, ext->name,
7757 ext->is_weak, !ext->ksym.type_id,
7758 true, kind, relo->insn_idx);
7759 break;
7760 case RELO_EXTERN_CALL:
7761 bpf_gen__record_extern(obj->gen_loader, ext->name,
7762 ext->is_weak, false, false, BTF_KIND_FUNC,
7763 relo->insn_idx);
7764 break;
7765 case RELO_CORE: {
7766 struct bpf_core_relo cr = {
7767 .insn_off = relo->insn_idx * 8,
7768 .type_id = relo->core_relo->type_id,
7769 .access_str_off = relo->core_relo->access_str_off,
7770 .kind = relo->core_relo->kind,
7771 };
7772
7773 bpf_gen__record_relo_core(obj->gen_loader, &cr);
7774 break;
7775 }
7776 default:
7777 continue;
7778 }
7779 }
7780 return 0;
7781 }
7782
7783 static int
7784 bpf_object__load_progs(struct bpf_object *obj, int log_level)
7785 {
7786 struct bpf_program *prog;
7787 size_t i;
7788 int err;
7789
7790 for (i = 0; i < obj->nr_programs; i++) {
7791 prog = &obj->programs[i];
7792 err = bpf_object__sanitize_prog(obj, prog);
7793 if (err)
7794 return err;
7795 }
7796
7797 for (i = 0; i < obj->nr_programs; i++) {
7798 prog = &obj->programs[i];
7799 if (prog_is_subprog(obj, prog))
7800 continue;
7801 if (!prog->autoload) {
7802 pr_debug("prog '%s': skipped loading\n", prog->name);
7803 continue;
7804 }
7805 prog->log_level |= log_level;
7806
7807 if (obj->gen_loader)
7808 bpf_program_record_relos(prog);
7809
7810 err = bpf_object_load_prog(obj, prog, prog->insns, prog->insns_cnt,
7811 obj->license, obj->kern_version, &prog->fd);
7812 if (err) {
7813 pr_warn("prog '%s': failed to load: %d\n", prog->name, err);
7814 return err;
7815 }
7816 }
7817
7818 bpf_object__free_relocs(obj);
7819 return 0;
7820 }
7821
7822 static const struct bpf_sec_def *find_sec_def(const char *sec_name);
7823
7824 static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
7825 {
7826 struct bpf_program *prog;
7827 int err;
7828
7829 bpf_object__for_each_program(prog, obj) {
7830 prog->sec_def = find_sec_def(prog->sec_name);
7831 if (!prog->sec_def) {
7832 /* couldn't guess, but user might manually specify */
7833 pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
7834 prog->name, prog->sec_name);
7835 continue;
7836 }
7837
7838 prog->type = prog->sec_def->prog_type;
7839 prog->expected_attach_type = prog->sec_def->expected_attach_type;
7840
7841 /* sec_def can have custom callback which should be called
7842 * after bpf_program is initialized to adjust its properties
7843 */
7844 if (prog->sec_def->prog_setup_fn) {
7845 err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie);
7846 if (err < 0) {
7847 pr_warn("prog '%s': failed to initialize: %d\n",
7848 prog->name, err);
7849 return err;
7850 }
7851 }
7852 }
7853
7854 return 0;
7855 }
7856
7857 static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
7858 const char *obj_name,
7859 const struct bpf_object_open_opts *opts)
7860 {
7861 const char *kconfig, *btf_tmp_path;
7862 struct bpf_object *obj;
7863 int err;
7864 char *log_buf;
7865 size_t log_size;
7866 __u32 log_level;
7867
7868 if (obj_buf && !obj_name)
7869 return ERR_PTR(-EINVAL);
7870 #ifdef HAVE_LIBELF
7871 if (elf_version(EV_CURRENT) == EV_NONE) {
7872 pr_warn("failed to init libelf for %s\n",
7873 path ? : "(mem buf)");
7874 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
7875 }
7876 #endif
7877
7878 if (!OPTS_VALID(opts, bpf_object_open_opts))
7879 return ERR_PTR(-EINVAL);
7880
7881 obj_name = OPTS_GET(opts, object_name, NULL) ?: obj_name;
7882 if (obj_buf) {
7883 path = obj_name;
7884 pr_debug("loading object '%s' from buffer\n", obj_name);
7885 } else {
7886 pr_debug("loading object from %s\n", path);
7887 }
7888
7889 log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
7890 log_size = OPTS_GET(opts, kernel_log_size, 0);
7891 log_level = OPTS_GET(opts, kernel_log_level, 0);
7892 if (log_size > UINT_MAX)
7893 return ERR_PTR(-EINVAL);
7894 if (log_size && !log_buf)
7895 return ERR_PTR(-EINVAL);
7896
7897 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
7898 if (IS_ERR(obj))
7899 return obj;
7900
7901 obj->log_buf = log_buf;
7902 obj->log_size = log_size;
7903 obj->log_level = log_level;
7904
7905 btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL);
7906 if (btf_tmp_path) {
7907 if (strlen(btf_tmp_path) >= PATH_MAX) {
7908 err = -ENAMETOOLONG;
7909 goto out;
7910 }
7911 obj->btf_custom_path = strdup(btf_tmp_path);
7912 if (!obj->btf_custom_path) {
7913 err = -ENOMEM;
7914 goto out;
7915 }
7916 }
7917
7918 kconfig = OPTS_GET(opts, kconfig, NULL);
7919 if (kconfig) {
7920 obj->kconfig = strdup(kconfig);
7921 if (!obj->kconfig) {
7922 err = -ENOMEM;
7923 goto out;
7924 }
7925 }
7926
7927 err = bpf_object__elf_init(obj);
7928 err = err ? : bpf_object__check_endianness(obj);
7929 err = err ? : bpf_object__elf_collect(obj);
7930 err = err ? : bpf_object__collect_externs(obj);
7931 err = err ? : bpf_object_fixup_btf(obj);
7932 err = err ? : bpf_object__init_maps(obj, opts);
7933 err = err ? : bpf_object_init_progs(obj, opts);
7934 err = err ? : bpf_object__collect_relos(obj);
7935 if (err)
7936 goto out;
7937
7938 bpf_object__elf_finish(obj);
7939
7940 return obj;
7941 out:
7942 bpf_object__close(obj);
7943 return ERR_PTR(err);
7944 }
7945
7946 struct bpf_object *
7947 bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
7948 {
7949 if (!path)
7950 return libbpf_err_ptr(-EINVAL);
7951
7952 return libbpf_ptr(bpf_object_open(path, NULL, 0, NULL, opts));
7953 }
7954
7955 struct bpf_object *bpf_object__open(const char *path)
7956 {
7957 return bpf_object__open_file(path, NULL);
7958 }
7959
7960 struct bpf_object *
7961 bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
7962 const struct bpf_object_open_opts *opts)
7963 {
7964 char tmp_name[64];
7965
7966 if (!obj_buf || obj_buf_sz == 0)
7967 return libbpf_err_ptr(-EINVAL);
7968
7969 /* create a (quite useless) default "name" for this memory buffer object */
7970 snprintf(tmp_name, sizeof(tmp_name), "%lx-%zx", (unsigned long)obj_buf, obj_buf_sz);
7971
7972 return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, tmp_name, opts));
7973 }
7974
7975 static int bpf_object_unload(struct bpf_object *obj)
7976 {
7977 size_t i;
7978
7979 if (!obj)
7980 return libbpf_err(-EINVAL);
7981
7982 for (i = 0; i < obj->nr_maps; i++) {
7983 zclose(obj->maps[i].fd);
7984 if (obj->maps[i].st_ops)
7985 zfree(&obj->maps[i].st_ops->kern_vdata);
7986 }
7987
7988 for (i = 0; i < obj->nr_programs; i++)
7989 bpf_program__unload(&obj->programs[i]);
7990
7991 return 0;
7992 }
7993
7994 static int bpf_object__sanitize_maps(struct bpf_object *obj)
7995 {
7996 struct bpf_map *m;
7997
7998 bpf_object__for_each_map(m, obj) {
7999 if (!bpf_map__is_internal(m))
8000 continue;
8001 if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
8002 m->def.map_flags &= ~BPF_F_MMAPABLE;
8003 }
8004
8005 return 0;
8006 }
8007
8008 int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx)
8009 {
8010 char sym_type, sym_name[500];
8011 unsigned long long sym_addr;
8012 int ret, err = 0;
8013 FILE *f;
8014
8015 f = fopen("/proc/kallsyms", "re");
8016 if (!f) {
8017 err = -errno;
8018 pr_warn("failed to open /proc/kallsyms: %d\n", err);
8019 return err;
8020 }
8021
8022 while (true) {
8023 ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
8024 &sym_addr, &sym_type, sym_name);
8025 if (ret == EOF && feof(f))
8026 break;
8027 if (ret != 3) {
8028 pr_warn("failed to read kallsyms entry: %d\n", ret);
8029 err = -EINVAL;
8030 break;
8031 }
8032
8033 err = cb(sym_addr, sym_type, sym_name, ctx);
8034 if (err)
8035 break;
8036 }
8037
8038 fclose(f);
8039 return err;
8040 }
8041
8042 static int kallsyms_cb(unsigned long long sym_addr, char sym_type,
8043 const char *sym_name, void *ctx)
8044 {
8045 struct bpf_object *obj = ctx;
8046 const struct btf_type *t;
8047 struct extern_desc *ext;
8048
8049 ext = find_extern_by_name(obj, sym_name);
8050 if (!ext || ext->type != EXT_KSYM)
8051 return 0;
8052
8053 t = btf__type_by_id(obj->btf, ext->btf_id);
8054 if (!btf_is_var(t))
8055 return 0;
8056
8057 if (ext->is_set && ext->ksym.addr != sym_addr) {
8058 pr_warn("extern (ksym) '%s': resolution is ambiguous: 0x%llx or 0x%llx\n",
8059 sym_name, ext->ksym.addr, sym_addr);
8060 return -EINVAL;
8061 }
8062 if (!ext->is_set) {
8063 ext->is_set = true;
8064 ext->ksym.addr = sym_addr;
8065 pr_debug("extern (ksym) '%s': set to 0x%llx\n", sym_name, sym_addr);
8066 }
8067 return 0;
8068 }
8069
8070 static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
8071 {
8072 return libbpf_kallsyms_parse(kallsyms_cb, obj);
8073 }
8074
8075 static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
8076 __u16 kind, struct btf **res_btf,
8077 struct module_btf **res_mod_btf)
8078 {
8079 struct module_btf *mod_btf;
8080 struct btf *btf;
8081 int i, id, err;
8082
8083 btf = obj->btf_vmlinux;
8084 mod_btf = NULL;
8085 id = btf__find_by_name_kind(btf, ksym_name, kind);
8086
8087 if (id == -ENOENT) {
8088 err = load_module_btfs(obj);
8089 if (err)
8090 return err;
8091
8092 for (i = 0; i < obj->btf_module_cnt; i++) {
8093 /* we assume module_btf's BTF FD is always >0 */
8094 mod_btf = &obj->btf_modules[i];
8095 btf = mod_btf->btf;
8096 id = btf__find_by_name_kind_own(btf, ksym_name, kind);
8097 if (id != -ENOENT)
8098 break;
8099 }
8100 }
8101 if (id <= 0)
8102 return -ESRCH;
8103
8104 *res_btf = btf;
8105 *res_mod_btf = mod_btf;
8106 return id;
8107 }
8108
8109 static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
8110 struct extern_desc *ext)
8111 {
8112 const struct btf_type *targ_var, *targ_type;
8113 __u32 targ_type_id, local_type_id;
8114 struct module_btf *mod_btf = NULL;
8115 const char *targ_var_name;
8116 struct btf *btf = NULL;
8117 int id, err;
8118
8119 id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf);
8120 if (id < 0) {
8121 if (id == -ESRCH && ext->is_weak)
8122 return 0;
8123 pr_warn("extern (var ksym) '%s': not found in kernel BTF\n",
8124 ext->name);
8125 return id;
8126 }
8127
8128 /* find local type_id */
8129 local_type_id = ext->ksym.type_id;
8130
8131 /* find target type_id */
8132 targ_var = btf__type_by_id(btf, id);
8133 targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
8134 targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
8135
8136 err = bpf_core_types_are_compat(obj->btf, local_type_id,
8137 btf, targ_type_id);
8138 if (err <= 0) {
8139 const struct btf_type *local_type;
8140 const char *targ_name, *local_name;
8141
8142 local_type = btf__type_by_id(obj->btf, local_type_id);
8143 local_name = btf__name_by_offset(obj->btf, local_type->name_off);
8144 targ_name = btf__name_by_offset(btf, targ_type->name_off);
8145
8146 pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
8147 ext->name, local_type_id,
8148 btf_kind_str(local_type), local_name, targ_type_id,
8149 btf_kind_str(targ_type), targ_name);
8150 return -EINVAL;
8151 }
8152
8153 ext->is_set = true;
8154 ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
8155 ext->ksym.kernel_btf_id = id;
8156 pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
8157 ext->name, id, btf_kind_str(targ_var), targ_var_name);
8158
8159 return 0;
8160 }
8161
8162 static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
8163 struct extern_desc *ext)
8164 {
8165 int local_func_proto_id, kfunc_proto_id, kfunc_id;
8166 struct module_btf *mod_btf = NULL;
8167 const struct btf_type *kern_func;
8168 struct btf *kern_btf = NULL;
8169 int ret;
8170
8171 local_func_proto_id = ext->ksym.type_id;
8172
8173 kfunc_id = find_ksym_btf_id(obj, ext->essent_name ?: ext->name, BTF_KIND_FUNC, &kern_btf,
8174 &mod_btf);
8175 if (kfunc_id < 0) {
8176 if (kfunc_id == -ESRCH && ext->is_weak)
8177 return 0;
8178 pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n",
8179 ext->name);
8180 return kfunc_id;
8181 }
8182
8183 kern_func = btf__type_by_id(kern_btf, kfunc_id);
8184 kfunc_proto_id = kern_func->type;
8185
8186 ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id,
8187 kern_btf, kfunc_proto_id);
8188 if (ret <= 0) {
8189 if (ext->is_weak)
8190 return 0;
8191
8192 pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with %s [%d]\n",
8193 ext->name, local_func_proto_id,
8194 mod_btf ? mod_btf->name : "vmlinux", kfunc_proto_id);
8195 return -EINVAL;
8196 }
8197
8198 /* set index for module BTF fd in fd_array, if unset */
8199 if (mod_btf && !mod_btf->fd_array_idx) {
8200 /* insn->off is s16 */
8201 if (obj->fd_array_cnt == INT16_MAX) {
8202 pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n",
8203 ext->name, mod_btf->fd_array_idx);
8204 return -E2BIG;
8205 }
8206 /* Cannot use index 0 for module BTF fd */
8207 if (!obj->fd_array_cnt)
8208 obj->fd_array_cnt = 1;
8209
8210 ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int),
8211 obj->fd_array_cnt + 1);
8212 if (ret)
8213 return ret;
8214 mod_btf->fd_array_idx = obj->fd_array_cnt;
8215 /* we assume module BTF FD is always >0 */
8216 obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd;
8217 }
8218
8219 ext->is_set = true;
8220 ext->ksym.kernel_btf_id = kfunc_id;
8221 ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0;
8222 /* Also set kernel_btf_obj_fd to make sure that bpf_object__relocate_data()
8223 * populates FD into ld_imm64 insn when it's used to point to kfunc.
8224 * {kernel_btf_id, btf_fd_idx} -> fixup bpf_call.
8225 * {kernel_btf_id, kernel_btf_obj_fd} -> fixup ld_imm64.
8226 */
8227 ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
8228 pr_debug("extern (func ksym) '%s': resolved to %s [%d]\n",
8229 ext->name, mod_btf ? mod_btf->name : "vmlinux", kfunc_id);
8230
8231 return 0;
8232 }
8233
8234 static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
8235 {
8236 const struct btf_type *t;
8237 struct extern_desc *ext;
8238 int i, err;
8239
8240 for (i = 0; i < obj->nr_extern; i++) {
8241 ext = &obj->externs[i];
8242 if (ext->type != EXT_KSYM || !ext->ksym.type_id)
8243 continue;
8244
8245 if (obj->gen_loader) {
8246 ext->is_set = true;
8247 ext->ksym.kernel_btf_obj_fd = 0;
8248 ext->ksym.kernel_btf_id = 0;
8249 continue;
8250 }
8251 t = btf__type_by_id(obj->btf, ext->btf_id);
8252 if (btf_is_var(t))
8253 err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
8254 else
8255 err = bpf_object__resolve_ksym_func_btf_id(obj, ext);
8256 if (err)
8257 return err;
8258 }
8259 return 0;
8260 }
8261
8262 static int bpf_object__resolve_externs(struct bpf_object *obj,
8263 const char *extra_kconfig)
8264 {
8265 bool need_config = false, need_kallsyms = false;
8266 bool need_vmlinux_btf = false;
8267 struct extern_desc *ext;
8268 void *kcfg_data = NULL;
8269 int err, i;
8270
8271 if (obj->nr_extern == 0)
8272 return 0;
8273
8274 if (obj->kconfig_map_idx >= 0)
8275 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
8276
8277 for (i = 0; i < obj->nr_extern; i++) {
8278 ext = &obj->externs[i];
8279
8280 if (ext->type == EXT_KSYM) {
8281 if (ext->ksym.type_id)
8282 need_vmlinux_btf = true;
8283 else
8284 need_kallsyms = true;
8285 continue;
8286 } else if (ext->type == EXT_KCFG) {
8287 void *ext_ptr = kcfg_data + ext->kcfg.data_off;
8288 __u64 value = 0;
8289
8290 /* Kconfig externs need actual /proc/config.gz */
8291 if (str_has_pfx(ext->name, "CONFIG_")) {
8292 need_config = true;
8293 continue;
8294 }
8295
8296 /* Virtual kcfg externs are customly handled by libbpf */
8297 if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
8298 value = get_kernel_version();
8299 if (!value) {
8300 pr_warn("extern (kcfg) '%s': failed to get kernel version\n", ext->name);
8301 return -EINVAL;
8302 }
8303 } else if (strcmp(ext->name, "LINUX_HAS_BPF_COOKIE") == 0) {
8304 value = kernel_supports(obj, FEAT_BPF_COOKIE);
8305 } else if (strcmp(ext->name, "LINUX_HAS_SYSCALL_WRAPPER") == 0) {
8306 value = kernel_supports(obj, FEAT_SYSCALL_WRAPPER);
8307 } else if (!str_has_pfx(ext->name, "LINUX_") || !ext->is_weak) {
8308 /* Currently libbpf supports only CONFIG_ and LINUX_ prefixed
8309 * __kconfig externs, where LINUX_ ones are virtual and filled out
8310 * customly by libbpf (their values don't come from Kconfig).
8311 * If LINUX_xxx variable is not recognized by libbpf, but is marked
8312 * __weak, it defaults to zero value, just like for CONFIG_xxx
8313 * externs.
8314 */
8315 pr_warn("extern (kcfg) '%s': unrecognized virtual extern\n", ext->name);
8316 return -EINVAL;
8317 }
8318
8319 err = set_kcfg_value_num(ext, ext_ptr, value);
8320 if (err)
8321 return err;
8322 pr_debug("extern (kcfg) '%s': set to 0x%llx\n",
8323 ext->name, (long long)value);
8324 } else {
8325 pr_warn("extern '%s': unrecognized extern kind\n", ext->name);
8326 return -EINVAL;
8327 }
8328 }
8329 if (need_config && extra_kconfig) {
8330 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
8331 if (err)
8332 return -EINVAL;
8333 need_config = false;
8334 for (i = 0; i < obj->nr_extern; i++) {
8335 ext = &obj->externs[i];
8336 if (ext->type == EXT_KCFG && !ext->is_set) {
8337 need_config = true;
8338 break;
8339 }
8340 }
8341 }
8342 if (need_config) {
8343 err = bpf_object__read_kconfig_file(obj, kcfg_data);
8344 if (err)
8345 return -EINVAL;
8346 }
8347 if (need_kallsyms) {
8348 err = bpf_object__read_kallsyms_file(obj);
8349 if (err)
8350 return -EINVAL;
8351 }
8352 if (need_vmlinux_btf) {
8353 err = bpf_object__resolve_ksyms_btf_id(obj);
8354 if (err)
8355 return -EINVAL;
8356 }
8357 for (i = 0; i < obj->nr_extern; i++) {
8358 ext = &obj->externs[i];
8359
8360 if (!ext->is_set && !ext->is_weak) {
8361 pr_warn("extern '%s' (strong): not resolved\n", ext->name);
8362 return -ESRCH;
8363 } else if (!ext->is_set) {
8364 pr_debug("extern '%s' (weak): not resolved, defaulting to zero\n",
8365 ext->name);
8366 }
8367 }
8368
8369 return 0;
8370 }
8371
8372 static void bpf_map_prepare_vdata(const struct bpf_map *map)
8373 {
8374 struct bpf_struct_ops *st_ops;
8375 __u32 i;
8376
8377 st_ops = map->st_ops;
8378 for (i = 0; i < btf_vlen(st_ops->type); i++) {
8379 struct bpf_program *prog = st_ops->progs[i];
8380 void *kern_data;
8381 int prog_fd;
8382
8383 if (!prog)
8384 continue;
8385
8386 prog_fd = bpf_program__fd(prog);
8387 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
8388 *(unsigned long *)kern_data = prog_fd;
8389 }
8390 }
8391
8392 static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
8393 {
8394 int i;
8395
8396 for (i = 0; i < obj->nr_maps; i++)
8397 if (bpf_map__is_struct_ops(&obj->maps[i]))
8398 bpf_map_prepare_vdata(&obj->maps[i]);
8399
8400 return 0;
8401 }
8402
8403 static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
8404 {
8405 int err, i;
8406
8407 if (!obj)
8408 return libbpf_err(-EINVAL);
8409
8410 if (obj->loaded) {
8411 pr_warn("object '%s': load can't be attempted twice\n", obj->name);
8412 return libbpf_err(-EINVAL);
8413 }
8414
8415 if (obj->gen_loader)
8416 bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
8417
8418 err = bpf_object__probe_loading(obj);
8419 err = err ? : bpf_object__load_vmlinux_btf(obj, false);
8420 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
8421 err = err ? : bpf_object__sanitize_and_load_btf(obj);
8422 err = err ? : bpf_object__sanitize_maps(obj);
8423 err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
8424 err = err ? : bpf_object__create_maps(obj);
8425 err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
8426 err = err ? : bpf_object__load_progs(obj, extra_log_level);
8427 err = err ? : bpf_object_init_prog_arrays(obj);
8428 err = err ? : bpf_object_prepare_struct_ops(obj);
8429
8430 if (obj->gen_loader) {
8431 /* reset FDs */
8432 if (obj->btf)
8433 btf__set_fd(obj->btf, -1);
8434 for (i = 0; i < obj->nr_maps; i++)
8435 obj->maps[i].fd = -1;
8436 if (!err)
8437 err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
8438 }
8439
8440 /* clean up fd_array */
8441 zfree(&obj->fd_array);
8442
8443 /* clean up module BTFs */
8444 for (i = 0; i < obj->btf_module_cnt; i++) {
8445 close(obj->btf_modules[i].fd);
8446 btf__free(obj->btf_modules[i].btf);
8447 free(obj->btf_modules[i].name);
8448 }
8449 free(obj->btf_modules);
8450
8451 /* clean up vmlinux BTF */
8452 btf__free(obj->btf_vmlinux);
8453 obj->btf_vmlinux = NULL;
8454
8455 obj->loaded = true; /* doesn't matter if successfully or not */
8456
8457 if (err)
8458 goto out;
8459
8460 return 0;
8461 out:
8462 /* unpin any maps that were auto-pinned during load */
8463 for (i = 0; i < obj->nr_maps; i++)
8464 if (obj->maps[i].pinned && !obj->maps[i].reused)
8465 bpf_map__unpin(&obj->maps[i], NULL);
8466
8467 bpf_object_unload(obj);
8468 pr_warn("failed to load object '%s'\n", obj->path);
8469 return libbpf_err(err);
8470 }
8471
8472 int bpf_object__load(struct bpf_object *obj)
8473 {
8474 return bpf_object_load(obj, 0, NULL);
8475 }
8476
8477 static int make_parent_dir(const char *path)
8478 {
8479 char *cp, errmsg[STRERR_BUFSIZE];
8480 char *dname, *dir;
8481 int err = 0;
8482
8483 dname = strdup(path);
8484 if (dname == NULL)
8485 return -ENOMEM;
8486
8487 dir = dirname(dname);
8488 if (mkdir(dir, 0700) && errno != EEXIST)
8489 err = -errno;
8490
8491 free(dname);
8492 if (err) {
8493 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
8494 pr_warn("failed to mkdir %s: %s\n", path, cp);
8495 }
8496 return err;
8497 }
8498
8499 static int check_path(const char *path)
8500 {
8501 char *cp, errmsg[STRERR_BUFSIZE];
8502 struct statfs st_fs;
8503 char *dname, *dir;
8504 int err = 0;
8505
8506 if (path == NULL)
8507 return -EINVAL;
8508
8509 dname = strdup(path);
8510 if (dname == NULL)
8511 return -ENOMEM;
8512
8513 dir = dirname(dname);
8514 if (statfs(dir, &st_fs)) {
8515 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
8516 pr_warn("failed to statfs %s: %s\n", dir, cp);
8517 err = -errno;
8518 }
8519 free(dname);
8520
8521 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
8522 pr_warn("specified path %s is not on BPF FS\n", path);
8523 err = -EINVAL;
8524 }
8525
8526 return err;
8527 }
8528
8529 int bpf_program__pin(struct bpf_program *prog, const char *path)
8530 {
8531 char *cp, errmsg[STRERR_BUFSIZE];
8532 int err;
8533
8534 if (prog->fd < 0) {
8535 pr_warn("prog '%s': can't pin program that wasn't loaded\n", prog->name);
8536 return libbpf_err(-EINVAL);
8537 }
8538
8539 err = make_parent_dir(path);
8540 if (err)
8541 return libbpf_err(err);
8542
8543 err = check_path(path);
8544 if (err)
8545 return libbpf_err(err);
8546
8547 if (bpf_obj_pin(prog->fd, path)) {
8548 err = -errno;
8549 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
8550 pr_warn("prog '%s': failed to pin at '%s': %s\n", prog->name, path, cp);
8551 return libbpf_err(err);
8552 }
8553
8554 pr_debug("prog '%s': pinned at '%s'\n", prog->name, path);
8555 return 0;
8556 }
8557
8558 int bpf_program__unpin(struct bpf_program *prog, const char *path)
8559 {
8560 int err;
8561
8562 if (prog->fd < 0) {
8563 pr_warn("prog '%s': can't unpin program that wasn't loaded\n", prog->name);
8564 return libbpf_err(-EINVAL);
8565 }
8566
8567 err = check_path(path);
8568 if (err)
8569 return libbpf_err(err);
8570
8571 err = unlink(path);
8572 if (err)
8573 return libbpf_err(-errno);
8574
8575 pr_debug("prog '%s': unpinned from '%s'\n", prog->name, path);
8576 return 0;
8577 }
8578
8579 int bpf_map__pin(struct bpf_map *map, const char *path)
8580 {
8581 char *cp, errmsg[STRERR_BUFSIZE];
8582 int err;
8583
8584 if (map == NULL) {
8585 pr_warn("invalid map pointer\n");
8586 return libbpf_err(-EINVAL);
8587 }
8588
8589 if (map->pin_path) {
8590 if (path && strcmp(path, map->pin_path)) {
8591 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
8592 bpf_map__name(map), map->pin_path, path);
8593 return libbpf_err(-EINVAL);
8594 } else if (map->pinned) {
8595 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
8596 bpf_map__name(map), map->pin_path);
8597 return 0;
8598 }
8599 } else {
8600 if (!path) {
8601 pr_warn("missing a path to pin map '%s' at\n",
8602 bpf_map__name(map));
8603 return libbpf_err(-EINVAL);
8604 } else if (map->pinned) {
8605 pr_warn("map '%s' already pinned\n", bpf_map__name(map));
8606 return libbpf_err(-EEXIST);
8607 }
8608
8609 map->pin_path = strdup(path);
8610 if (!map->pin_path) {
8611 err = -errno;
8612 goto out_err;
8613 }
8614 }
8615
8616 err = make_parent_dir(map->pin_path);
8617 if (err)
8618 return libbpf_err(err);
8619
8620 err = check_path(map->pin_path);
8621 if (err)
8622 return libbpf_err(err);
8623
8624 if (bpf_obj_pin(map->fd, map->pin_path)) {
8625 err = -errno;
8626 goto out_err;
8627 }
8628
8629 map->pinned = true;
8630 pr_debug("pinned map '%s'\n", map->pin_path);
8631
8632 return 0;
8633
8634 out_err:
8635 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
8636 pr_warn("failed to pin map: %s\n", cp);
8637 return libbpf_err(err);
8638 }
8639
8640 int bpf_map__unpin(struct bpf_map *map, const char *path)
8641 {
8642 int err;
8643
8644 if (map == NULL) {
8645 pr_warn("invalid map pointer\n");
8646 return libbpf_err(-EINVAL);
8647 }
8648
8649 if (map->pin_path) {
8650 if (path && strcmp(path, map->pin_path)) {
8651 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
8652 bpf_map__name(map), map->pin_path, path);
8653 return libbpf_err(-EINVAL);
8654 }
8655 path = map->pin_path;
8656 } else if (!path) {
8657 pr_warn("no path to unpin map '%s' from\n",
8658 bpf_map__name(map));
8659 return libbpf_err(-EINVAL);
8660 }
8661
8662 err = check_path(path);
8663 if (err)
8664 return libbpf_err(err);
8665
8666 err = unlink(path);
8667 if (err != 0)
8668 return libbpf_err(-errno);
8669
8670 map->pinned = false;
8671 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
8672
8673 return 0;
8674 }
8675
8676 int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
8677 {
8678 char *new = NULL;
8679
8680 if (path) {
8681 new = strdup(path);
8682 if (!new)
8683 return libbpf_err(-errno);
8684 }
8685
8686 free(map->pin_path);
8687 map->pin_path = new;
8688 return 0;
8689 }
8690
8691 __alias(bpf_map__pin_path)
8692 const char *bpf_map__get_pin_path(const struct bpf_map *map);
8693
8694 const char *bpf_map__pin_path(const struct bpf_map *map)
8695 {
8696 return map->pin_path;
8697 }
8698
8699 bool bpf_map__is_pinned(const struct bpf_map *map)
8700 {
8701 return map->pinned;
8702 }
8703
8704 static void sanitize_pin_path(char *s)
8705 {
8706 /* bpffs disallows periods in path names */
8707 while (*s) {
8708 if (*s == '.')
8709 *s = '_';
8710 s++;
8711 }
8712 }
8713
8714 int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
8715 {
8716 struct bpf_map *map;
8717 int err;
8718
8719 if (!obj)
8720 return libbpf_err(-ENOENT);
8721
8722 if (!obj->loaded) {
8723 pr_warn("object not yet loaded; load it first\n");
8724 return libbpf_err(-ENOENT);
8725 }
8726
8727 bpf_object__for_each_map(map, obj) {
8728 char *pin_path = NULL;
8729 char buf[PATH_MAX];
8730
8731 if (!map->autocreate)
8732 continue;
8733
8734 if (path) {
8735 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
8736 if (err)
8737 goto err_unpin_maps;
8738 sanitize_pin_path(buf);
8739 pin_path = buf;
8740 } else if (!map->pin_path) {
8741 continue;
8742 }
8743
8744 err = bpf_map__pin(map, pin_path);
8745 if (err)
8746 goto err_unpin_maps;
8747 }
8748
8749 return 0;
8750
8751 err_unpin_maps:
8752 while ((map = bpf_object__prev_map(obj, map))) {
8753 if (!map->pin_path)
8754 continue;
8755
8756 bpf_map__unpin(map, NULL);
8757 }
8758
8759 return libbpf_err(err);
8760 }
8761
8762 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
8763 {
8764 struct bpf_map *map;
8765 int err;
8766
8767 if (!obj)
8768 return libbpf_err(-ENOENT);
8769
8770 bpf_object__for_each_map(map, obj) {
8771 char *pin_path = NULL;
8772 char buf[PATH_MAX];
8773
8774 if (path) {
8775 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
8776 if (err)
8777 return libbpf_err(err);
8778 sanitize_pin_path(buf);
8779 pin_path = buf;
8780 } else if (!map->pin_path) {
8781 continue;
8782 }
8783
8784 err = bpf_map__unpin(map, pin_path);
8785 if (err)
8786 return libbpf_err(err);
8787 }
8788
8789 return 0;
8790 }
8791
8792 int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
8793 {
8794 struct bpf_program *prog;
8795 char buf[PATH_MAX];
8796 int err;
8797
8798 if (!obj)
8799 return libbpf_err(-ENOENT);
8800
8801 if (!obj->loaded) {
8802 pr_warn("object not yet loaded; load it first\n");
8803 return libbpf_err(-ENOENT);
8804 }
8805
8806 bpf_object__for_each_program(prog, obj) {
8807 err = pathname_concat(buf, sizeof(buf), path, prog->name);
8808 if (err)
8809 goto err_unpin_programs;
8810
8811 err = bpf_program__pin(prog, buf);
8812 if (err)
8813 goto err_unpin_programs;
8814 }
8815
8816 return 0;
8817
8818 err_unpin_programs:
8819 while ((prog = bpf_object__prev_program(obj, prog))) {
8820 if (pathname_concat(buf, sizeof(buf), path, prog->name))
8821 continue;
8822
8823 bpf_program__unpin(prog, buf);
8824 }
8825
8826 return libbpf_err(err);
8827 }
8828
8829 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
8830 {
8831 struct bpf_program *prog;
8832 int err;
8833
8834 if (!obj)
8835 return libbpf_err(-ENOENT);
8836
8837 bpf_object__for_each_program(prog, obj) {
8838 char buf[PATH_MAX];
8839
8840 err = pathname_concat(buf, sizeof(buf), path, prog->name);
8841 if (err)
8842 return libbpf_err(err);
8843
8844 err = bpf_program__unpin(prog, buf);
8845 if (err)
8846 return libbpf_err(err);
8847 }
8848
8849 return 0;
8850 }
8851
8852 int bpf_object__pin(struct bpf_object *obj, const char *path)
8853 {
8854 int err;
8855
8856 err = bpf_object__pin_maps(obj, path);
8857 if (err)
8858 return libbpf_err(err);
8859
8860 err = bpf_object__pin_programs(obj, path);
8861 if (err) {
8862 bpf_object__unpin_maps(obj, path);
8863 return libbpf_err(err);
8864 }
8865
8866 return 0;
8867 }
8868
8869 int bpf_object__unpin(struct bpf_object *obj, const char *path)
8870 {
8871 int err;
8872
8873 err = bpf_object__unpin_programs(obj, path);
8874 if (err)
8875 return libbpf_err(err);
8876
8877 err = bpf_object__unpin_maps(obj, path);
8878 if (err)
8879 return libbpf_err(err);
8880
8881 return 0;
8882 }
8883
8884 static void bpf_map__destroy(struct bpf_map *map)
8885 {
8886 if (map->inner_map) {
8887 bpf_map__destroy(map->inner_map);
8888 zfree(&map->inner_map);
8889 }
8890
8891 zfree(&map->init_slots);
8892 map->init_slots_sz = 0;
8893
8894 if (map->mmaped) {
8895 size_t mmap_sz;
8896
8897 mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
8898 munmap(map->mmaped, mmap_sz);
8899 map->mmaped = NULL;
8900 }
8901
8902 if (map->st_ops) {
8903 zfree(&map->st_ops->data);
8904 zfree(&map->st_ops->progs);
8905 zfree(&map->st_ops->kern_func_off);
8906 zfree(&map->st_ops);
8907 }
8908
8909 zfree(&map->name);
8910 zfree(&map->real_name);
8911 zfree(&map->pin_path);
8912
8913 if (map->fd >= 0)
8914 zclose(map->fd);
8915 }
8916
8917 void bpf_object__close(struct bpf_object *obj)
8918 {
8919 size_t i;
8920
8921 if (IS_ERR_OR_NULL(obj))
8922 return;
8923 #ifdef HAVE_LIBELF
8924 usdt_manager_free(obj->usdt_man);
8925 obj->usdt_man = NULL;
8926 #endif //HAVE_LIBELF
8927 bpf_gen__free(obj->gen_loader);
8928 bpf_object__elf_finish(obj);
8929 bpf_object_unload(obj);
8930 btf__free(obj->btf);
8931 btf__free(obj->btf_vmlinux);
8932 btf_ext__free(obj->btf_ext);
8933
8934 for (i = 0; i < obj->nr_maps; i++)
8935 bpf_map__destroy(&obj->maps[i]);
8936
8937 zfree(&obj->btf_custom_path);
8938 zfree(&obj->kconfig);
8939
8940 for (i = 0; i < obj->nr_extern; i++)
8941 zfree(&obj->externs[i].essent_name);
8942
8943 zfree(&obj->externs);
8944 obj->nr_extern = 0;
8945
8946 zfree(&obj->maps);
8947 obj->nr_maps = 0;
8948
8949 if (obj->programs && obj->nr_programs) {
8950 for (i = 0; i < obj->nr_programs; i++)
8951 bpf_program__exit(&obj->programs[i]);
8952 }
8953 zfree(&obj->programs);
8954
8955 free(obj);
8956 }
8957
8958 const char *bpf_object__name(const struct bpf_object *obj)
8959 {
8960 return obj ? obj->name : libbpf_err_ptr(-EINVAL);
8961 }
8962
8963 unsigned int bpf_object__kversion(const struct bpf_object *obj)
8964 {
8965 return obj ? obj->kern_version : 0;
8966 }
8967
8968 struct btf *bpf_object__btf(const struct bpf_object *obj)
8969 {
8970 return obj ? obj->btf : NULL;
8971 }
8972
8973 int bpf_object__btf_fd(const struct bpf_object *obj)
8974 {
8975 return obj->btf ? btf__fd(obj->btf) : -1;
8976 }
8977
8978 int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
8979 {
8980 if (obj->loaded)
8981 return libbpf_err(-EINVAL);
8982
8983 obj->kern_version = kern_version;
8984
8985 return 0;
8986 }
8987
8988 int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
8989 {
8990 struct bpf_gen *gen;
8991
8992 if (!opts)
8993 return -EFAULT;
8994 if (!OPTS_VALID(opts, gen_loader_opts))
8995 return -EINVAL;
8996 gen = calloc(sizeof(*gen), 1);
8997 if (!gen)
8998 return -ENOMEM;
8999 gen->opts = opts;
9000 obj->gen_loader = gen;
9001 return 0;
9002 }
9003
9004 static struct bpf_program *
9005 __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
9006 bool forward)
9007 {
9008 size_t nr_programs = obj->nr_programs;
9009 ssize_t idx;
9010
9011 if (!nr_programs)
9012 return NULL;
9013
9014 if (!p)
9015 /* Iter from the beginning */
9016 return forward ? &obj->programs[0] :
9017 &obj->programs[nr_programs - 1];
9018
9019 if (p->obj != obj) {
9020 pr_warn("error: program handler doesn't match object\n");
9021 return errno = EINVAL, NULL;
9022 }
9023
9024 idx = (p - obj->programs) + (forward ? 1 : -1);
9025 if (idx >= obj->nr_programs || idx < 0)
9026 return NULL;
9027 return &obj->programs[idx];
9028 }
9029
9030 struct bpf_program *
9031 bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
9032 {
9033 struct bpf_program *prog = prev;
9034
9035 do {
9036 prog = __bpf_program__iter(prog, obj, true);
9037 } while (prog && prog_is_subprog(obj, prog));
9038
9039 return prog;
9040 }
9041
9042 struct bpf_program *
9043 bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next)
9044 {
9045 struct bpf_program *prog = next;
9046
9047 do {
9048 prog = __bpf_program__iter(prog, obj, false);
9049 } while (prog && prog_is_subprog(obj, prog));
9050
9051 return prog;
9052 }
9053
9054 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
9055 {
9056 prog->prog_ifindex = ifindex;
9057 }
9058
9059 const char *bpf_program__name(const struct bpf_program *prog)
9060 {
9061 return prog->name;
9062 }
9063
9064 const char *bpf_program__section_name(const struct bpf_program *prog)
9065 {
9066 return prog->sec_name;
9067 }
9068
9069 bool bpf_program__autoload(const struct bpf_program *prog)
9070 {
9071 return prog->autoload;
9072 }
9073
9074 int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
9075 {
9076 if (prog->obj->loaded)
9077 return libbpf_err(-EINVAL);
9078
9079 prog->autoload = autoload;
9080 return 0;
9081 }
9082
9083 bool bpf_program__autoattach(const struct bpf_program *prog)
9084 {
9085 return prog->autoattach;
9086 }
9087
9088 void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach)
9089 {
9090 prog->autoattach = autoattach;
9091 }
9092
9093 const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog)
9094 {
9095 return prog->insns;
9096 }
9097
9098 size_t bpf_program__insn_cnt(const struct bpf_program *prog)
9099 {
9100 return prog->insns_cnt;
9101 }
9102
9103 int bpf_program__set_insns(struct bpf_program *prog,
9104 struct bpf_insn *new_insns, size_t new_insn_cnt)
9105 {
9106 struct bpf_insn *insns;
9107
9108 if (prog->obj->loaded)
9109 return -EBUSY;
9110
9111 insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns));
9112 /* NULL is a valid return from reallocarray if the new count is zero */
9113 if (!insns && new_insn_cnt) {
9114 pr_warn("prog '%s': failed to realloc prog code\n", prog->name);
9115 return -ENOMEM;
9116 }
9117 memcpy(insns, new_insns, new_insn_cnt * sizeof(*insns));
9118
9119 prog->insns = insns;
9120 prog->insns_cnt = new_insn_cnt;
9121 return 0;
9122 }
9123
9124 int bpf_program__fd(const struct bpf_program *prog)
9125 {
9126 if (!prog)
9127 return libbpf_err(-EINVAL);
9128
9129 if (prog->fd < 0)
9130 return libbpf_err(-ENOENT);
9131
9132 return prog->fd;
9133 }
9134
9135 __alias(bpf_program__type)
9136 enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog);
9137
9138 enum bpf_prog_type bpf_program__type(const struct bpf_program *prog)
9139 {
9140 return prog->type;
9141 }
9142
9143 static size_t custom_sec_def_cnt;
9144 static struct bpf_sec_def *custom_sec_defs;
9145 static struct bpf_sec_def custom_fallback_def;
9146 static bool has_custom_fallback_def;
9147 static int last_custom_sec_def_handler_id;
9148
9149 int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
9150 {
9151 if (prog->obj->loaded)
9152 return libbpf_err(-EBUSY);
9153
9154 /* if type is not changed, do nothing */
9155 if (prog->type == type)
9156 return 0;
9157
9158 prog->type = type;
9159
9160 /* If a program type was changed, we need to reset associated SEC()
9161 * handler, as it will be invalid now. The only exception is a generic
9162 * fallback handler, which by definition is program type-agnostic and
9163 * is a catch-all custom handler, optionally set by the application,
9164 * so should be able to handle any type of BPF program.
9165 */
9166 if (prog->sec_def != &custom_fallback_def)
9167 prog->sec_def = NULL;
9168 return 0;
9169 }
9170
9171 __alias(bpf_program__expected_attach_type)
9172 enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog);
9173
9174 enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog)
9175 {
9176 return prog->expected_attach_type;
9177 }
9178
9179 int bpf_program__set_expected_attach_type(struct bpf_program *prog,
9180 enum bpf_attach_type type)
9181 {
9182 if (prog->obj->loaded)
9183 return libbpf_err(-EBUSY);
9184
9185 prog->expected_attach_type = type;
9186 return 0;
9187 }
9188
9189 __u32 bpf_program__flags(const struct bpf_program *prog)
9190 {
9191 return prog->prog_flags;
9192 }
9193
9194 int bpf_program__set_flags(struct bpf_program *prog, __u32 flags)
9195 {
9196 if (prog->obj->loaded)
9197 return libbpf_err(-EBUSY);
9198
9199 prog->prog_flags = flags;
9200 return 0;
9201 }
9202
9203 __u32 bpf_program__log_level(const struct bpf_program *prog)
9204 {
9205 return prog->log_level;
9206 }
9207
9208 int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level)
9209 {
9210 if (prog->obj->loaded)
9211 return libbpf_err(-EBUSY);
9212
9213 prog->log_level = log_level;
9214 return 0;
9215 }
9216
9217 const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size)
9218 {
9219 *log_size = prog->log_size;
9220 return prog->log_buf;
9221 }
9222
9223 int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size)
9224 {
9225 if (log_size && !log_buf)
9226 return -EINVAL;
9227 if (prog->log_size > UINT_MAX)
9228 return -EINVAL;
9229 if (prog->obj->loaded)
9230 return -EBUSY;
9231
9232 prog->log_buf = log_buf;
9233 prog->log_size = log_size;
9234 return 0;
9235 }
9236
9237 #define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \
9238 .sec = (char *)sec_pfx, \
9239 .prog_type = BPF_PROG_TYPE_##ptype, \
9240 .expected_attach_type = atype, \
9241 .cookie = (long)(flags), \
9242 .prog_prepare_load_fn = libbpf_prepare_prog_load, \
9243 __VA_ARGS__ \
9244 }
9245
9246 static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9247 static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9248 static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9249 #ifdef HAVE_LIBELF
9250 static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9251 #endif //HAVE_LIBELF
9252 static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9253 static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9254 static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9255 static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9256 static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9257 static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9258 static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9259
9260 static const struct bpf_sec_def section_defs[] = {
9261 SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE),
9262 SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE),
9263 SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE),
9264 SEC_DEF("kprobe+", KPROBE, 0, SEC_NONE, attach_kprobe),
9265 SEC_DEF("uprobe+", KPROBE, 0, SEC_NONE, attach_uprobe),
9266 SEC_DEF("uprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
9267 SEC_DEF("kretprobe+", KPROBE, 0, SEC_NONE, attach_kprobe),
9268 SEC_DEF("uretprobe+", KPROBE, 0, SEC_NONE, attach_uprobe),
9269 SEC_DEF("uretprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
9270 SEC_DEF("kprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
9271 SEC_DEF("kretprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
9272 SEC_DEF("uprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
9273 SEC_DEF("uretprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
9274 SEC_DEF("uprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
9275 SEC_DEF("uretprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
9276 SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
9277 SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
9278 #ifdef HAVE_LIBELF
9279 SEC_DEF("usdt+", KPROBE, 0, SEC_USDT, attach_usdt),
9280 SEC_DEF("usdt.s+", KPROBE, 0, SEC_USDT | SEC_SLEEPABLE, attach_usdt),
9281 #endif //HAVE_LIBELF
9282 SEC_DEF("tc/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE), /* alias for tcx */
9283 SEC_DEF("tc/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE), /* alias for tcx */
9284 SEC_DEF("tcx/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE),
9285 SEC_DEF("tcx/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE),
9286 SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9287 SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9288 SEC_DEF("action", SCHED_ACT, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9289 SEC_DEF("netkit/primary", SCHED_CLS, BPF_NETKIT_PRIMARY, SEC_NONE),
9290 SEC_DEF("netkit/peer", SCHED_CLS, BPF_NETKIT_PEER, SEC_NONE),
9291 SEC_DEF("tracepoint+", TRACEPOINT, 0, SEC_NONE, attach_tp),
9292 SEC_DEF("tp+", TRACEPOINT, 0, SEC_NONE, attach_tp),
9293 SEC_DEF("raw_tracepoint+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
9294 SEC_DEF("raw_tp+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
9295 SEC_DEF("raw_tracepoint.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
9296 SEC_DEF("raw_tp.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
9297 SEC_DEF("tp_btf+", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace),
9298 SEC_DEF("fentry+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace),
9299 SEC_DEF("fmod_ret+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace),
9300 SEC_DEF("fexit+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace),
9301 SEC_DEF("fentry.s+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9302 SEC_DEF("fmod_ret.s+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9303 SEC_DEF("fexit.s+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9304 SEC_DEF("freplace+", EXT, 0, SEC_ATTACH_BTF, attach_trace),
9305 SEC_DEF("lsm+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm),
9306 SEC_DEF("lsm.s+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm),
9307 SEC_DEF("lsm_cgroup+", LSM, BPF_LSM_CGROUP, SEC_ATTACH_BTF),
9308 SEC_DEF("iter+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter),
9309 SEC_DEF("iter.s+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter),
9310 SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE),
9311 SEC_DEF("xdp.frags/devmap", XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS),
9312 SEC_DEF("xdp/devmap", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE),
9313 SEC_DEF("xdp.frags/cpumap", XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS),
9314 SEC_DEF("xdp/cpumap", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE),
9315 SEC_DEF("xdp.frags", XDP, BPF_XDP, SEC_XDP_FRAGS),
9316 SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT),
9317 SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE),
9318 SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE),
9319 SEC_DEF("lwt_out", LWT_OUT, 0, SEC_NONE),
9320 SEC_DEF("lwt_xmit", LWT_XMIT, 0, SEC_NONE),
9321 SEC_DEF("lwt_seg6local", LWT_SEG6LOCAL, 0, SEC_NONE),
9322 SEC_DEF("sockops", SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT),
9323 SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT),
9324 SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT),
9325 SEC_DEF("sk_skb", SK_SKB, 0, SEC_NONE),
9326 SEC_DEF("sk_msg", SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT),
9327 SEC_DEF("lirc_mode2", LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT),
9328 SEC_DEF("flow_dissector", FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT),
9329 SEC_DEF("cgroup_skb/ingress", CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT),
9330 SEC_DEF("cgroup_skb/egress", CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT),
9331 SEC_DEF("cgroup/skb", CGROUP_SKB, 0, SEC_NONE),
9332 SEC_DEF("cgroup/sock_create", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE),
9333 SEC_DEF("cgroup/sock_release", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE),
9334 SEC_DEF("cgroup/sock", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT),
9335 SEC_DEF("cgroup/post_bind4", CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE),
9336 SEC_DEF("cgroup/post_bind6", CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE),
9337 SEC_DEF("cgroup/bind4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE),
9338 SEC_DEF("cgroup/bind6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE),
9339 SEC_DEF("cgroup/connect4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE),
9340 SEC_DEF("cgroup/connect6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE),
9341 SEC_DEF("cgroup/connect_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_CONNECT, SEC_ATTACHABLE),
9342 SEC_DEF("cgroup/sendmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE),
9343 SEC_DEF("cgroup/sendmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE),
9344 SEC_DEF("cgroup/sendmsg_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_SENDMSG, SEC_ATTACHABLE),
9345 SEC_DEF("cgroup/recvmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE),
9346 SEC_DEF("cgroup/recvmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE),
9347 SEC_DEF("cgroup/recvmsg_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_RECVMSG, SEC_ATTACHABLE),
9348 SEC_DEF("cgroup/getpeername4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE),
9349 SEC_DEF("cgroup/getpeername6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE),
9350 SEC_DEF("cgroup/getpeername_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETPEERNAME, SEC_ATTACHABLE),
9351 SEC_DEF("cgroup/getsockname4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE),
9352 SEC_DEF("cgroup/getsockname6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE),
9353 SEC_DEF("cgroup/getsockname_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETSOCKNAME, SEC_ATTACHABLE),
9354 SEC_DEF("cgroup/sysctl", CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE),
9355 SEC_DEF("cgroup/getsockopt", CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE),
9356 SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE),
9357 SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT),
9358 SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE),
9359 SEC_DEF("struct_ops.s+", STRUCT_OPS, 0, SEC_SLEEPABLE),
9360 SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE),
9361 SEC_DEF("netfilter", NETFILTER, BPF_NETFILTER, SEC_NONE),
9362 };
9363
9364 int libbpf_register_prog_handler(const char *sec,
9365 enum bpf_prog_type prog_type,
9366 enum bpf_attach_type exp_attach_type,
9367 const struct libbpf_prog_handler_opts *opts)
9368 {
9369 struct bpf_sec_def *sec_def;
9370
9371 if (!OPTS_VALID(opts, libbpf_prog_handler_opts))
9372 return libbpf_err(-EINVAL);
9373
9374 if (last_custom_sec_def_handler_id == INT_MAX) /* prevent overflow */
9375 return libbpf_err(-E2BIG);
9376
9377 if (sec) {
9378 sec_def = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt + 1,
9379 sizeof(*sec_def));
9380 if (!sec_def)
9381 return libbpf_err(-ENOMEM);
9382
9383 custom_sec_defs = sec_def;
9384 sec_def = &custom_sec_defs[custom_sec_def_cnt];
9385 } else {
9386 if (has_custom_fallback_def)
9387 return libbpf_err(-EBUSY);
9388
9389 sec_def = &custom_fallback_def;
9390 }
9391
9392 sec_def->sec = sec ? strdup(sec) : NULL;
9393 if (sec && !sec_def->sec)
9394 return libbpf_err(-ENOMEM);
9395
9396 sec_def->prog_type = prog_type;
9397 sec_def->expected_attach_type = exp_attach_type;
9398 sec_def->cookie = OPTS_GET(opts, cookie, 0);
9399
9400 sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL);
9401 sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL);
9402 sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL);
9403
9404 sec_def->handler_id = ++last_custom_sec_def_handler_id;
9405
9406 if (sec)
9407 custom_sec_def_cnt++;
9408 else
9409 has_custom_fallback_def = true;
9410
9411 return sec_def->handler_id;
9412 }
9413
9414 int libbpf_unregister_prog_handler(int handler_id)
9415 {
9416 struct bpf_sec_def *sec_defs;
9417 int i;
9418
9419 if (handler_id <= 0)
9420 return libbpf_err(-EINVAL);
9421
9422 if (has_custom_fallback_def && custom_fallback_def.handler_id == handler_id) {
9423 memset(&custom_fallback_def, 0, sizeof(custom_fallback_def));
9424 has_custom_fallback_def = false;
9425 return 0;
9426 }
9427
9428 for (i = 0; i < custom_sec_def_cnt; i++) {
9429 if (custom_sec_defs[i].handler_id == handler_id)
9430 break;
9431 }
9432
9433 if (i == custom_sec_def_cnt)
9434 return libbpf_err(-ENOENT);
9435
9436 free(custom_sec_defs[i].sec);
9437 for (i = i + 1; i < custom_sec_def_cnt; i++)
9438 custom_sec_defs[i - 1] = custom_sec_defs[i];
9439 custom_sec_def_cnt--;
9440
9441 /* try to shrink the array, but it's ok if we couldn't */
9442 sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs));
9443 /* if new count is zero, reallocarray can return a valid NULL result;
9444 * in this case the previous pointer will be freed, so we *have to*
9445 * reassign old pointer to the new value (even if it's NULL)
9446 */
9447 if (sec_defs || custom_sec_def_cnt == 0)
9448 custom_sec_defs = sec_defs;
9449
9450 return 0;
9451 }
9452
9453 static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name)
9454 {
9455 size_t len = strlen(sec_def->sec);
9456
9457 /* "type/" always has to have proper SEC("type/extras") form */
9458 if (sec_def->sec[len - 1] == '/') {
9459 if (str_has_pfx(sec_name, sec_def->sec))
9460 return true;
9461 return false;
9462 }
9463
9464 /* "type+" means it can be either exact SEC("type") or
9465 * well-formed SEC("type/extras") with proper '/' separator
9466 */
9467 if (sec_def->sec[len - 1] == '+') {
9468 len--;
9469 /* not even a prefix */
9470 if (strncmp(sec_name, sec_def->sec, len) != 0)
9471 return false;
9472 /* exact match or has '/' separator */
9473 if (sec_name[len] == '\0' || sec_name[len] == '/')
9474 return true;
9475 return false;
9476 }
9477
9478 return strcmp(sec_name, sec_def->sec) == 0;
9479 }
9480
9481 static const struct bpf_sec_def *find_sec_def(const char *sec_name)
9482 {
9483 const struct bpf_sec_def *sec_def;
9484 int i, n;
9485
9486 n = custom_sec_def_cnt;
9487 for (i = 0; i < n; i++) {
9488 sec_def = &custom_sec_defs[i];
9489 if (sec_def_matches(sec_def, sec_name))
9490 return sec_def;
9491 }
9492
9493 n = ARRAY_SIZE(section_defs);
9494 for (i = 0; i < n; i++) {
9495 sec_def = §ion_defs[i];
9496 if (sec_def_matches(sec_def, sec_name))
9497 return sec_def;
9498 }
9499
9500 if (has_custom_fallback_def)
9501 return &custom_fallback_def;
9502
9503 return NULL;
9504 }
9505
9506 #define MAX_TYPE_NAME_SIZE 32
9507
9508 static char *libbpf_get_type_names(bool attach_type)
9509 {
9510 int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
9511 char *buf;
9512
9513 buf = malloc(len);
9514 if (!buf)
9515 return NULL;
9516
9517 buf[0] = '\0';
9518 /* Forge string buf with all available names */
9519 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
9520 const struct bpf_sec_def *sec_def = §ion_defs[i];
9521
9522 if (attach_type) {
9523 if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
9524 continue;
9525
9526 if (!(sec_def->cookie & SEC_ATTACHABLE))
9527 continue;
9528 }
9529
9530 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
9531 free(buf);
9532 return NULL;
9533 }
9534 strcat(buf, " ");
9535 strcat(buf, section_defs[i].sec);
9536 }
9537
9538 return buf;
9539 }
9540
9541 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
9542 enum bpf_attach_type *expected_attach_type)
9543 {
9544 const struct bpf_sec_def *sec_def;
9545 char *type_names;
9546
9547 if (!name)
9548 return libbpf_err(-EINVAL);
9549
9550 sec_def = find_sec_def(name);
9551 if (sec_def) {
9552 *prog_type = sec_def->prog_type;
9553 *expected_attach_type = sec_def->expected_attach_type;
9554 return 0;
9555 }
9556
9557 pr_debug("failed to guess program type from ELF section '%s'\n", name);
9558 type_names = libbpf_get_type_names(false);
9559 if (type_names != NULL) {
9560 pr_debug("supported section(type) names are:%s\n", type_names);
9561 free(type_names);
9562 }
9563
9564 return libbpf_err(-ESRCH);
9565 }
9566
9567 const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t)
9568 {
9569 if (t < 0 || t >= ARRAY_SIZE(attach_type_name))
9570 return NULL;
9571
9572 return attach_type_name[t];
9573 }
9574
9575 const char *libbpf_bpf_link_type_str(enum bpf_link_type t)
9576 {
9577 if (t < 0 || t >= ARRAY_SIZE(link_type_name))
9578 return NULL;
9579
9580 return link_type_name[t];
9581 }
9582
9583 const char *libbpf_bpf_map_type_str(enum bpf_map_type t)
9584 {
9585 if (t < 0 || t >= ARRAY_SIZE(map_type_name))
9586 return NULL;
9587
9588 return map_type_name[t];
9589 }
9590
9591 const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t)
9592 {
9593 if (t < 0 || t >= ARRAY_SIZE(prog_type_name))
9594 return NULL;
9595
9596 return prog_type_name[t];
9597 }
9598
9599 static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
9600 int sec_idx,
9601 size_t offset)
9602 {
9603 struct bpf_map *map;
9604 size_t i;
9605
9606 for (i = 0; i < obj->nr_maps; i++) {
9607 map = &obj->maps[i];
9608 if (!bpf_map__is_struct_ops(map))
9609 continue;
9610 if (map->sec_idx == sec_idx &&
9611 map->sec_offset <= offset &&
9612 offset - map->sec_offset < map->def.value_size)
9613 return map;
9614 }
9615
9616 return NULL;
9617 }
9618
9619 /* Collect the reloc from ELF and populate the st_ops->progs[] */
9620 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
9621 Elf64_Shdr *shdr, Elf_Data *data)
9622 {
9623 const struct btf_member *member;
9624 struct bpf_struct_ops *st_ops;
9625 struct bpf_program *prog;
9626 unsigned int shdr_idx;
9627 const struct btf *btf;
9628 struct bpf_map *map;
9629 unsigned int moff, insn_idx;
9630 const char *name;
9631 __u32 member_idx;
9632 Elf64_Sym *sym;
9633 Elf64_Rel *rel;
9634 int i, nrels;
9635
9636 btf = obj->btf;
9637 nrels = shdr->sh_size / shdr->sh_entsize;
9638 for (i = 0; i < nrels; i++) {
9639 rel = elf_rel_by_idx(data, i);
9640 if (!rel) {
9641 pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
9642 return -LIBBPF_ERRNO__FORMAT;
9643 }
9644
9645 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
9646 if (!sym) {
9647 pr_warn("struct_ops reloc: symbol %zx not found\n",
9648 (size_t)ELF64_R_SYM(rel->r_info));
9649 return -LIBBPF_ERRNO__FORMAT;
9650 }
9651
9652 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
9653 map = find_struct_ops_map_by_offset(obj, shdr->sh_info, rel->r_offset);
9654 if (!map) {
9655 pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n",
9656 (size_t)rel->r_offset);
9657 return -EINVAL;
9658 }
9659
9660 moff = rel->r_offset - map->sec_offset;
9661 shdr_idx = sym->st_shndx;
9662 st_ops = map->st_ops;
9663 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
9664 map->name,
9665 (long long)(rel->r_info >> 32),
9666 (long long)sym->st_value,
9667 shdr_idx, (size_t)rel->r_offset,
9668 map->sec_offset, sym->st_name, name);
9669
9670 if (shdr_idx >= SHN_LORESERVE) {
9671 pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n",
9672 map->name, (size_t)rel->r_offset, shdr_idx);
9673 return -LIBBPF_ERRNO__RELOC;
9674 }
9675 if (sym->st_value % BPF_INSN_SZ) {
9676 pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
9677 map->name, (unsigned long long)sym->st_value);
9678 return -LIBBPF_ERRNO__FORMAT;
9679 }
9680 insn_idx = sym->st_value / BPF_INSN_SZ;
9681
9682 member = find_member_by_offset(st_ops->type, moff * 8);
9683 if (!member) {
9684 pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
9685 map->name, moff);
9686 return -EINVAL;
9687 }
9688 member_idx = member - btf_members(st_ops->type);
9689 name = btf__name_by_offset(btf, member->name_off);
9690
9691 if (!resolve_func_ptr(btf, member->type, NULL)) {
9692 pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
9693 map->name, name);
9694 return -EINVAL;
9695 }
9696
9697 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
9698 if (!prog) {
9699 pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
9700 map->name, shdr_idx, name);
9701 return -EINVAL;
9702 }
9703
9704 /* prevent the use of BPF prog with invalid type */
9705 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) {
9706 pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n",
9707 map->name, prog->name);
9708 return -EINVAL;
9709 }
9710
9711 /* if we haven't yet processed this BPF program, record proper
9712 * attach_btf_id and member_idx
9713 */
9714 if (!prog->attach_btf_id) {
9715 prog->attach_btf_id = st_ops->type_id;
9716 prog->expected_attach_type = member_idx;
9717 }
9718
9719 /* struct_ops BPF prog can be re-used between multiple
9720 * .struct_ops & .struct_ops.link as long as it's the
9721 * same struct_ops struct definition and the same
9722 * function pointer field
9723 */
9724 if (prog->attach_btf_id != st_ops->type_id ||
9725 prog->expected_attach_type != member_idx) {
9726 pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
9727 map->name, prog->name, prog->sec_name, prog->type,
9728 prog->attach_btf_id, prog->expected_attach_type, name);
9729 return -EINVAL;
9730 }
9731
9732 st_ops->progs[member_idx] = prog;
9733 }
9734
9735 return 0;
9736 }
9737
9738 #define BTF_TRACE_PREFIX "btf_trace_"
9739 #define BTF_LSM_PREFIX "bpf_lsm_"
9740 #define BTF_ITER_PREFIX "bpf_iter_"
9741 #define BTF_MAX_NAME_SIZE 128
9742
9743 void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
9744 const char **prefix, int *kind)
9745 {
9746 switch (attach_type) {
9747 case BPF_TRACE_RAW_TP:
9748 *prefix = BTF_TRACE_PREFIX;
9749 *kind = BTF_KIND_TYPEDEF;
9750 break;
9751 case BPF_LSM_MAC:
9752 case BPF_LSM_CGROUP:
9753 *prefix = BTF_LSM_PREFIX;
9754 *kind = BTF_KIND_FUNC;
9755 break;
9756 case BPF_TRACE_ITER:
9757 *prefix = BTF_ITER_PREFIX;
9758 *kind = BTF_KIND_FUNC;
9759 break;
9760 default:
9761 *prefix = "";
9762 *kind = BTF_KIND_FUNC;
9763 }
9764 }
9765
9766 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
9767 const char *name, __u32 kind)
9768 {
9769 char btf_type_name[BTF_MAX_NAME_SIZE];
9770 int ret;
9771
9772 ret = snprintf(btf_type_name, sizeof(btf_type_name),
9773 "%s%s", prefix, name);
9774 /* snprintf returns the number of characters written excluding the
9775 * terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
9776 * indicates truncation.
9777 */
9778 if (ret < 0 || ret >= sizeof(btf_type_name))
9779 return -ENAMETOOLONG;
9780 return btf__find_by_name_kind(btf, btf_type_name, kind);
9781 }
9782
9783 static inline int find_attach_btf_id(struct btf *btf, const char *name,
9784 enum bpf_attach_type attach_type)
9785 {
9786 const char *prefix;
9787 int kind;
9788
9789 btf_get_kernel_prefix_kind(attach_type, &prefix, &kind);
9790 return find_btf_by_prefix_kind(btf, prefix, name, kind);
9791 }
9792
9793 int libbpf_find_vmlinux_btf_id(const char *name,
9794 enum bpf_attach_type attach_type)
9795 {
9796 struct btf *btf;
9797 int err;
9798
9799 btf = btf__load_vmlinux_btf();
9800 err = libbpf_get_error(btf);
9801 if (err) {
9802 pr_warn("vmlinux BTF is not found\n");
9803 return libbpf_err(err);
9804 }
9805
9806 err = find_attach_btf_id(btf, name, attach_type);
9807 if (err <= 0)
9808 pr_warn("%s is not found in vmlinux BTF\n", name);
9809
9810 btf__free(btf);
9811 return libbpf_err(err);
9812 }
9813
9814 static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
9815 {
9816 struct bpf_prog_info info;
9817 __u32 info_len = sizeof(info);
9818 struct btf *btf;
9819 int err;
9820
9821 memset(&info, 0, info_len);
9822 err = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len);
9823 if (err) {
9824 pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %d\n",
9825 attach_prog_fd, err);
9826 return err;
9827 }
9828
9829 err = -EINVAL;
9830 if (!info.btf_id) {
9831 pr_warn("The target program doesn't have BTF\n");
9832 goto out;
9833 }
9834 btf = btf__load_from_kernel_by_id(info.btf_id);
9835 err = libbpf_get_error(btf);
9836 if (err) {
9837 pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err);
9838 goto out;
9839 }
9840 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
9841 btf__free(btf);
9842 if (err <= 0) {
9843 pr_warn("%s is not found in prog's BTF\n", name);
9844 goto out;
9845 }
9846 out:
9847 return err;
9848 }
9849
9850 static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
9851 enum bpf_attach_type attach_type,
9852 int *btf_obj_fd, int *btf_type_id)
9853 {
9854 int ret, i;
9855
9856 ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
9857 if (ret > 0) {
9858 *btf_obj_fd = 0; /* vmlinux BTF */
9859 *btf_type_id = ret;
9860 return 0;
9861 }
9862 if (ret != -ENOENT)
9863 return ret;
9864
9865 ret = load_module_btfs(obj);
9866 if (ret)
9867 return ret;
9868
9869 for (i = 0; i < obj->btf_module_cnt; i++) {
9870 const struct module_btf *mod = &obj->btf_modules[i];
9871
9872 ret = find_attach_btf_id(mod->btf, attach_name, attach_type);
9873 if (ret > 0) {
9874 *btf_obj_fd = mod->fd;
9875 *btf_type_id = ret;
9876 return 0;
9877 }
9878 if (ret == -ENOENT)
9879 continue;
9880
9881 return ret;
9882 }
9883
9884 return -ESRCH;
9885 }
9886
9887 static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
9888 int *btf_obj_fd, int *btf_type_id)
9889 {
9890 enum bpf_attach_type attach_type = prog->expected_attach_type;
9891 __u32 attach_prog_fd = prog->attach_prog_fd;
9892 int err = 0;
9893
9894 /* BPF program's BTF ID */
9895 if (prog->type == BPF_PROG_TYPE_EXT || attach_prog_fd) {
9896 if (!attach_prog_fd) {
9897 pr_warn("prog '%s': attach program FD is not set\n", prog->name);
9898 return -EINVAL;
9899 }
9900 err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
9901 if (err < 0) {
9902 pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
9903 prog->name, attach_prog_fd, attach_name, err);
9904 return err;
9905 }
9906 *btf_obj_fd = 0;
9907 *btf_type_id = err;
9908 return 0;
9909 }
9910
9911 /* kernel/module BTF ID */
9912 if (prog->obj->gen_loader) {
9913 bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type);
9914 *btf_obj_fd = 0;
9915 *btf_type_id = 1;
9916 } else {
9917 err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
9918 }
9919 if (err) {
9920 pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n",
9921 prog->name, attach_name, err);
9922 return err;
9923 }
9924 return 0;
9925 }
9926
9927 int libbpf_attach_type_by_name(const char *name,
9928 enum bpf_attach_type *attach_type)
9929 {
9930 char *type_names;
9931 const struct bpf_sec_def *sec_def;
9932
9933 if (!name)
9934 return libbpf_err(-EINVAL);
9935
9936 sec_def = find_sec_def(name);
9937 if (!sec_def) {
9938 pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
9939 type_names = libbpf_get_type_names(true);
9940 if (type_names != NULL) {
9941 pr_debug("attachable section(type) names are:%s\n", type_names);
9942 free(type_names);
9943 }
9944
9945 return libbpf_err(-EINVAL);
9946 }
9947
9948 if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
9949 return libbpf_err(-EINVAL);
9950 if (!(sec_def->cookie & SEC_ATTACHABLE))
9951 return libbpf_err(-EINVAL);
9952
9953 *attach_type = sec_def->expected_attach_type;
9954 return 0;
9955 }
9956
9957 int bpf_map__fd(const struct bpf_map *map)
9958 {
9959 return map ? map->fd : libbpf_err(-EINVAL);
9960 }
9961
9962 static bool map_uses_real_name(const struct bpf_map *map)
9963 {
9964 /* Since libbpf started to support custom .data.* and .rodata.* maps,
9965 * their user-visible name differs from kernel-visible name. Users see
9966 * such map's corresponding ELF section name as a map name.
9967 * This check distinguishes .data/.rodata from .data.* and .rodata.*
9968 * maps to know which name has to be returned to the user.
9969 */
9970 if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0)
9971 return true;
9972 if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0)
9973 return true;
9974 return false;
9975 }
9976
9977 const char *bpf_map__name(const struct bpf_map *map)
9978 {
9979 if (!map)
9980 return NULL;
9981
9982 if (map_uses_real_name(map))
9983 return map->real_name;
9984
9985 return map->name;
9986 }
9987
9988 enum bpf_map_type bpf_map__type(const struct bpf_map *map)
9989 {
9990 return map->def.type;
9991 }
9992
9993 int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
9994 {
9995 if (map->fd >= 0)
9996 return libbpf_err(-EBUSY);
9997 map->def.type = type;
9998 return 0;
9999 }
10000
10001 __u32 bpf_map__map_flags(const struct bpf_map *map)
10002 {
10003 return map->def.map_flags;
10004 }
10005
10006 int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
10007 {
10008 if (map->fd >= 0)
10009 return libbpf_err(-EBUSY);
10010 map->def.map_flags = flags;
10011 return 0;
10012 }
10013
10014 __u64 bpf_map__map_extra(const struct bpf_map *map)
10015 {
10016 return map->map_extra;
10017 }
10018
10019 int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra)
10020 {
10021 if (map->fd >= 0)
10022 return libbpf_err(-EBUSY);
10023 map->map_extra = map_extra;
10024 return 0;
10025 }
10026
10027 __u32 bpf_map__numa_node(const struct bpf_map *map)
10028 {
10029 return map->numa_node;
10030 }
10031
10032 int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
10033 {
10034 if (map->fd >= 0)
10035 return libbpf_err(-EBUSY);
10036 map->numa_node = numa_node;
10037 return 0;
10038 }
10039
10040 __u32 bpf_map__key_size(const struct bpf_map *map)
10041 {
10042 return map->def.key_size;
10043 }
10044
10045 int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
10046 {
10047 if (map->fd >= 0)
10048 return libbpf_err(-EBUSY);
10049 map->def.key_size = size;
10050 return 0;
10051 }
10052
10053 __u32 bpf_map__value_size(const struct bpf_map *map)
10054 {
10055 return map->def.value_size;
10056 }
10057
10058 static int map_btf_datasec_resize(struct bpf_map *map, __u32 size)
10059 {
10060 struct btf *btf;
10061 struct btf_type *datasec_type, *var_type;
10062 struct btf_var_secinfo *var;
10063 const struct btf_type *array_type;
10064 const struct btf_array *array;
10065 int vlen, element_sz, new_array_id;
10066 __u32 nr_elements;
10067
10068 /* check btf existence */
10069 btf = bpf_object__btf(map->obj);
10070 if (!btf)
10071 return -ENOENT;
10072
10073 /* verify map is datasec */
10074 datasec_type = btf_type_by_id(btf, bpf_map__btf_value_type_id(map));
10075 if (!btf_is_datasec(datasec_type)) {
10076 pr_warn("map '%s': cannot be resized, map value type is not a datasec\n",
10077 bpf_map__name(map));
10078 return -EINVAL;
10079 }
10080
10081 /* verify datasec has at least one var */
10082 vlen = btf_vlen(datasec_type);
10083 if (vlen == 0) {
10084 pr_warn("map '%s': cannot be resized, map value datasec is empty\n",
10085 bpf_map__name(map));
10086 return -EINVAL;
10087 }
10088
10089 /* verify last var in the datasec is an array */
10090 var = &btf_var_secinfos(datasec_type)[vlen - 1];
10091 var_type = btf_type_by_id(btf, var->type);
10092 array_type = skip_mods_and_typedefs(btf, var_type->type, NULL);
10093 if (!btf_is_array(array_type)) {
10094 pr_warn("map '%s': cannot be resized, last var must be an array\n",
10095 bpf_map__name(map));
10096 return -EINVAL;
10097 }
10098
10099 /* verify request size aligns with array */
10100 array = btf_array(array_type);
10101 element_sz = btf__resolve_size(btf, array->type);
10102 if (element_sz <= 0 || (size - var->offset) % element_sz != 0) {
10103 pr_warn("map '%s': cannot be resized, element size (%d) doesn't align with new total size (%u)\n",
10104 bpf_map__name(map), element_sz, size);
10105 return -EINVAL;
10106 }
10107
10108 /* create a new array based on the existing array, but with new length */
10109 nr_elements = (size - var->offset) / element_sz;
10110 new_array_id = btf__add_array(btf, array->index_type, array->type, nr_elements);
10111 if (new_array_id < 0)
10112 return new_array_id;
10113
10114 /* adding a new btf type invalidates existing pointers to btf objects,
10115 * so refresh pointers before proceeding
10116 */
10117 datasec_type = btf_type_by_id(btf, map->btf_value_type_id);
10118 var = &btf_var_secinfos(datasec_type)[vlen - 1];
10119 var_type = btf_type_by_id(btf, var->type);
10120
10121 /* finally update btf info */
10122 datasec_type->size = size;
10123 var->size = size - var->offset;
10124 var_type->type = new_array_id;
10125
10126 return 0;
10127 }
10128
10129 int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
10130 {
10131 if (map->fd >= 0)
10132 return libbpf_err(-EBUSY);
10133
10134 if (map->mmaped) {
10135 int err;
10136 size_t mmap_old_sz, mmap_new_sz;
10137
10138 mmap_old_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
10139 mmap_new_sz = bpf_map_mmap_sz(size, map->def.max_entries);
10140 err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz);
10141 if (err) {
10142 pr_warn("map '%s': failed to resize memory-mapped region: %d\n",
10143 bpf_map__name(map), err);
10144 return err;
10145 }
10146 err = map_btf_datasec_resize(map, size);
10147 if (err && err != -ENOENT) {
10148 pr_warn("map '%s': failed to adjust resized BTF, clearing BTF key/value info: %d\n",
10149 bpf_map__name(map), err);
10150 map->btf_value_type_id = 0;
10151 map->btf_key_type_id = 0;
10152 }
10153 }
10154
10155 map->def.value_size = size;
10156 return 0;
10157 }
10158
10159 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
10160 {
10161 return map ? map->btf_key_type_id : 0;
10162 }
10163
10164 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
10165 {
10166 return map ? map->btf_value_type_id : 0;
10167 }
10168
10169 int bpf_map__set_initial_value(struct bpf_map *map,
10170 const void *data, size_t size)
10171 {
10172 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
10173 size != map->def.value_size || map->fd >= 0)
10174 return libbpf_err(-EINVAL);
10175
10176 memcpy(map->mmaped, data, size);
10177 return 0;
10178 }
10179
10180 void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
10181 {
10182 if (!map->mmaped)
10183 return NULL;
10184 *psize = map->def.value_size;
10185 return map->mmaped;
10186 }
10187
10188 bool bpf_map__is_internal(const struct bpf_map *map)
10189 {
10190 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
10191 }
10192
10193 __u32 bpf_map__ifindex(const struct bpf_map *map)
10194 {
10195 return map->map_ifindex;
10196 }
10197
10198 int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
10199 {
10200 if (map->fd >= 0)
10201 return libbpf_err(-EBUSY);
10202 map->map_ifindex = ifindex;
10203 return 0;
10204 }
10205
10206 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
10207 {
10208 if (!bpf_map_type__is_map_in_map(map->def.type)) {
10209 pr_warn("error: unsupported map type\n");
10210 return libbpf_err(-EINVAL);
10211 }
10212 if (map->inner_map_fd != -1) {
10213 pr_warn("error: inner_map_fd already specified\n");
10214 return libbpf_err(-EINVAL);
10215 }
10216 if (map->inner_map) {
10217 bpf_map__destroy(map->inner_map);
10218 zfree(&map->inner_map);
10219 }
10220 map->inner_map_fd = fd;
10221 return 0;
10222 }
10223
10224 static struct bpf_map *
10225 __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
10226 {
10227 ssize_t idx;
10228 struct bpf_map *s, *e;
10229
10230 if (!obj || !obj->maps)
10231 return errno = EINVAL, NULL;
10232
10233 s = obj->maps;
10234 e = obj->maps + obj->nr_maps;
10235
10236 if ((m < s) || (m >= e)) {
10237 pr_warn("error in %s: map handler doesn't belong to object\n",
10238 __func__);
10239 return errno = EINVAL, NULL;
10240 }
10241
10242 idx = (m - obj->maps) + i;
10243 if (idx >= obj->nr_maps || idx < 0)
10244 return NULL;
10245 return &obj->maps[idx];
10246 }
10247
10248 struct bpf_map *
10249 bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
10250 {
10251 if (prev == NULL)
10252 return obj->maps;
10253
10254 return __bpf_map__iter(prev, obj, 1);
10255 }
10256
10257 struct bpf_map *
10258 bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
10259 {
10260 if (next == NULL) {
10261 if (!obj->nr_maps)
10262 return NULL;
10263 return obj->maps + obj->nr_maps - 1;
10264 }
10265
10266 return __bpf_map__iter(next, obj, -1);
10267 }
10268
10269 struct bpf_map *
10270 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
10271 {
10272 struct bpf_map *pos;
10273
10274 bpf_object__for_each_map(pos, obj) {
10275 /* if it's a special internal map name (which always starts
10276 * with dot) then check if that special name matches the
10277 * real map name (ELF section name)
10278 */
10279 if (name[0] == '.') {
10280 if (pos->real_name && strcmp(pos->real_name, name) == 0)
10281 return pos;
10282 continue;
10283 }
10284 /* otherwise map name has to be an exact match */
10285 if (map_uses_real_name(pos)) {
10286 if (strcmp(pos->real_name, name) == 0)
10287 return pos;
10288 continue;
10289 }
10290 if (strcmp(pos->name, name) == 0)
10291 return pos;
10292 }
10293 return errno = ENOENT, NULL;
10294 }
10295
10296 int
10297 bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
10298 {
10299 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
10300 }
10301
10302 static int validate_map_op(const struct bpf_map *map, size_t key_sz,
10303 size_t value_sz, bool check_value_sz)
10304 {
10305 if (map->fd <= 0)
10306 return -ENOENT;
10307
10308 if (map->def.key_size != key_sz) {
10309 pr_warn("map '%s': unexpected key size %zu provided, expected %u\n",
10310 map->name, key_sz, map->def.key_size);
10311 return -EINVAL;
10312 }
10313
10314 if (!check_value_sz)
10315 return 0;
10316
10317 switch (map->def.type) {
10318 case BPF_MAP_TYPE_PERCPU_ARRAY:
10319 case BPF_MAP_TYPE_PERCPU_HASH:
10320 case BPF_MAP_TYPE_LRU_PERCPU_HASH:
10321 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: {
10322 int num_cpu = libbpf_num_possible_cpus();
10323 size_t elem_sz = roundup(map->def.value_size, 8);
10324
10325 if (value_sz != num_cpu * elem_sz) {
10326 pr_warn("map '%s': unexpected value size %zu provided for per-CPU map, expected %d * %zu = %zd\n",
10327 map->name, value_sz, num_cpu, elem_sz, num_cpu * elem_sz);
10328 return -EINVAL;
10329 }
10330 break;
10331 }
10332 default:
10333 if (map->def.value_size != value_sz) {
10334 pr_warn("map '%s': unexpected value size %zu provided, expected %u\n",
10335 map->name, value_sz, map->def.value_size);
10336 return -EINVAL;
10337 }
10338 break;
10339 }
10340 return 0;
10341 }
10342
10343 int bpf_map__lookup_elem(const struct bpf_map *map,
10344 const void *key, size_t key_sz,
10345 void *value, size_t value_sz, __u64 flags)
10346 {
10347 int err;
10348
10349 err = validate_map_op(map, key_sz, value_sz, true);
10350 if (err)
10351 return libbpf_err(err);
10352
10353 return bpf_map_lookup_elem_flags(map->fd, key, value, flags);
10354 }
10355
10356 int bpf_map__update_elem(const struct bpf_map *map,
10357 const void *key, size_t key_sz,
10358 const void *value, size_t value_sz, __u64 flags)
10359 {
10360 int err;
10361
10362 err = validate_map_op(map, key_sz, value_sz, true);
10363 if (err)
10364 return libbpf_err(err);
10365
10366 return bpf_map_update_elem(map->fd, key, value, flags);
10367 }
10368
10369 int bpf_map__delete_elem(const struct bpf_map *map,
10370 const void *key, size_t key_sz, __u64 flags)
10371 {
10372 int err;
10373
10374 err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
10375 if (err)
10376 return libbpf_err(err);
10377
10378 return bpf_map_delete_elem_flags(map->fd, key, flags);
10379 }
10380
10381 int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
10382 const void *key, size_t key_sz,
10383 void *value, size_t value_sz, __u64 flags)
10384 {
10385 int err;
10386
10387 err = validate_map_op(map, key_sz, value_sz, true);
10388 if (err)
10389 return libbpf_err(err);
10390
10391 return bpf_map_lookup_and_delete_elem_flags(map->fd, key, value, flags);
10392 }
10393
10394 int bpf_map__get_next_key(const struct bpf_map *map,
10395 const void *cur_key, void *next_key, size_t key_sz)
10396 {
10397 int err;
10398
10399 err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
10400 if (err)
10401 return libbpf_err(err);
10402
10403 return bpf_map_get_next_key(map->fd, cur_key, next_key);
10404 }
10405
10406 long libbpf_get_error(const void *ptr)
10407 {
10408 if (!IS_ERR_OR_NULL(ptr))
10409 return 0;
10410
10411 if (IS_ERR(ptr))
10412 errno = -PTR_ERR(ptr);
10413
10414 /* If ptr == NULL, then errno should be already set by the failing
10415 * API, because libbpf never returns NULL on success and it now always
10416 * sets errno on error. So no extra errno handling for ptr == NULL
10417 * case.
10418 */
10419 return -errno;
10420 }
10421
10422 /* Replace link's underlying BPF program with the new one */
10423 int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
10424 {
10425 int ret;
10426
10427 ret = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
10428 return libbpf_err_errno(ret);
10429 }
10430
10431 /* Release "ownership" of underlying BPF resource (typically, BPF program
10432 * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
10433 * link, when destructed through bpf_link__destroy() call won't attempt to
10434 * detach/unregisted that BPF resource. This is useful in situations where,
10435 * say, attached BPF program has to outlive userspace program that attached it
10436 * in the system. Depending on type of BPF program, though, there might be
10437 * additional steps (like pinning BPF program in BPF FS) necessary to ensure
10438 * exit of userspace program doesn't trigger automatic detachment and clean up
10439 * inside the kernel.
10440 */
10441 void bpf_link__disconnect(struct bpf_link *link)
10442 {
10443 link->disconnected = true;
10444 }
10445
10446 int bpf_link__destroy(struct bpf_link *link)
10447 {
10448 int err = 0;
10449
10450 if (IS_ERR_OR_NULL(link))
10451 return 0;
10452
10453 if (!link->disconnected && link->detach)
10454 err = link->detach(link);
10455 if (link->pin_path)
10456 free(link->pin_path);
10457 if (link->dealloc)
10458 link->dealloc(link);
10459 else
10460 free(link);
10461
10462 return libbpf_err(err);
10463 }
10464
10465 int bpf_link__fd(const struct bpf_link *link)
10466 {
10467 return link->fd;
10468 }
10469
10470 const char *bpf_link__pin_path(const struct bpf_link *link)
10471 {
10472 return link->pin_path;
10473 }
10474
10475 static int bpf_link__detach_fd(struct bpf_link *link)
10476 {
10477 return libbpf_err_errno(close(link->fd));
10478 }
10479
10480 struct bpf_link *bpf_link__open(const char *path)
10481 {
10482 struct bpf_link *link;
10483 int fd;
10484
10485 fd = bpf_obj_get(path);
10486 if (fd < 0) {
10487 fd = -errno;
10488 pr_warn("failed to open link at %s: %d\n", path, fd);
10489 return libbpf_err_ptr(fd);
10490 }
10491
10492 link = calloc(1, sizeof(*link));
10493 if (!link) {
10494 close(fd);
10495 return libbpf_err_ptr(-ENOMEM);
10496 }
10497 link->detach = &bpf_link__detach_fd;
10498 link->fd = fd;
10499
10500 link->pin_path = strdup(path);
10501 if (!link->pin_path) {
10502 bpf_link__destroy(link);
10503 return libbpf_err_ptr(-ENOMEM);
10504 }
10505
10506 return link;
10507 }
10508
10509 int bpf_link__detach(struct bpf_link *link)
10510 {
10511 return bpf_link_detach(link->fd) ? -errno : 0;
10512 }
10513
10514 int bpf_link__pin(struct bpf_link *link, const char *path)
10515 {
10516 int err;
10517
10518 if (link->pin_path)
10519 return libbpf_err(-EBUSY);
10520 err = make_parent_dir(path);
10521 if (err)
10522 return libbpf_err(err);
10523 err = check_path(path);
10524 if (err)
10525 return libbpf_err(err);
10526
10527 link->pin_path = strdup(path);
10528 if (!link->pin_path)
10529 return libbpf_err(-ENOMEM);
10530
10531 if (bpf_obj_pin(link->fd, link->pin_path)) {
10532 err = -errno;
10533 zfree(&link->pin_path);
10534 return libbpf_err(err);
10535 }
10536
10537 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
10538 return 0;
10539 }
10540
10541 int bpf_link__unpin(struct bpf_link *link)
10542 {
10543 int err;
10544
10545 if (!link->pin_path)
10546 return libbpf_err(-EINVAL);
10547
10548 err = unlink(link->pin_path);
10549 if (err != 0)
10550 return -errno;
10551
10552 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
10553 zfree(&link->pin_path);
10554 return 0;
10555 }
10556
10557 struct bpf_link_perf {
10558 struct bpf_link link;
10559 int perf_event_fd;
10560 /* legacy kprobe support: keep track of probe identifier and type */
10561 char *legacy_probe_name;
10562 bool legacy_is_kprobe;
10563 bool legacy_is_retprobe;
10564 };
10565
10566 static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe);
10567 static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe);
10568
10569 static int bpf_link_perf_detach(struct bpf_link *link)
10570 {
10571 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10572 int err = 0;
10573
10574 if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0)
10575 err = -errno;
10576
10577 if (perf_link->perf_event_fd != link->fd)
10578 close(perf_link->perf_event_fd);
10579 close(link->fd);
10580
10581 /* legacy uprobe/kprobe needs to be removed after perf event fd closure */
10582 if (perf_link->legacy_probe_name) {
10583 if (perf_link->legacy_is_kprobe) {
10584 err = remove_kprobe_event_legacy(perf_link->legacy_probe_name,
10585 perf_link->legacy_is_retprobe);
10586 } else {
10587 err = remove_uprobe_event_legacy(perf_link->legacy_probe_name,
10588 perf_link->legacy_is_retprobe);
10589 }
10590 }
10591
10592 return err;
10593 }
10594
10595 static void bpf_link_perf_dealloc(struct bpf_link *link)
10596 {
10597 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10598
10599 free(perf_link->legacy_probe_name);
10600 free(perf_link);
10601 }
10602
10603 struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
10604 const struct bpf_perf_event_opts *opts)
10605 {
10606 char errmsg[STRERR_BUFSIZE];
10607 struct bpf_link_perf *link;
10608 int prog_fd, link_fd = -1, err;
10609 bool force_ioctl_attach;
10610
10611 if (!OPTS_VALID(opts, bpf_perf_event_opts))
10612 return libbpf_err_ptr(-EINVAL);
10613
10614 if (pfd < 0) {
10615 pr_warn("prog '%s': invalid perf event FD %d\n",
10616 prog->name, pfd);
10617 return libbpf_err_ptr(-EINVAL);
10618 }
10619 prog_fd = bpf_program__fd(prog);
10620 if (prog_fd < 0) {
10621 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
10622 prog->name);
10623 return libbpf_err_ptr(-EINVAL);
10624 }
10625
10626 link = calloc(1, sizeof(*link));
10627 if (!link)
10628 return libbpf_err_ptr(-ENOMEM);
10629 link->link.detach = &bpf_link_perf_detach;
10630 link->link.dealloc = &bpf_link_perf_dealloc;
10631 link->perf_event_fd = pfd;
10632
10633 force_ioctl_attach = OPTS_GET(opts, force_ioctl_attach, false);
10634 if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !force_ioctl_attach) {
10635 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts,
10636 .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0));
10637
10638 link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts);
10639 if (link_fd < 0) {
10640 err = -errno;
10641 pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %d (%s)\n",
10642 prog->name, pfd,
10643 err, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10644 goto err_out;
10645 }
10646 link->link.fd = link_fd;
10647 } else {
10648 if (OPTS_GET(opts, bpf_cookie, 0)) {
10649 pr_warn("prog '%s': user context value is not supported\n", prog->name);
10650 err = -EOPNOTSUPP;
10651 goto err_out;
10652 }
10653
10654 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
10655 err = -errno;
10656 pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n",
10657 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10658 if (err == -EPROTO)
10659 pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
10660 prog->name, pfd);
10661 goto err_out;
10662 }
10663 link->link.fd = pfd;
10664 }
10665 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
10666 err = -errno;
10667 pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n",
10668 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10669 goto err_out;
10670 }
10671
10672 return &link->link;
10673 err_out:
10674 if (link_fd >= 0)
10675 close(link_fd);
10676 free(link);
10677 return libbpf_err_ptr(err);
10678 }
10679
10680 struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd)
10681 {
10682 return bpf_program__attach_perf_event_opts(prog, pfd, NULL);
10683 }
10684
10685 /*
10686 * this function is expected to parse integer in the range of [0, 2^31-1] from
10687 * given file using scanf format string fmt. If actual parsed value is
10688 * negative, the result might be indistinguishable from error
10689 */
10690 static int parse_uint_from_file(const char *file, const char *fmt)
10691 {
10692 char buf[STRERR_BUFSIZE];
10693 int err, ret;
10694 FILE *f;
10695
10696 f = fopen(file, "re");
10697 if (!f) {
10698 err = -errno;
10699 pr_debug("failed to open '%s': %s\n", file,
10700 libbpf_strerror_r(err, buf, sizeof(buf)));
10701 return err;
10702 }
10703 err = fscanf(f, fmt, &ret);
10704 if (err != 1) {
10705 err = err == EOF ? -EIO : -errno;
10706 pr_debug("failed to parse '%s': %s\n", file,
10707 libbpf_strerror_r(err, buf, sizeof(buf)));
10708 fclose(f);
10709 return err;
10710 }
10711 fclose(f);
10712 return ret;
10713 }
10714
10715 static int determine_kprobe_perf_type(void)
10716 {
10717 const char *file = "/sys/bus/event_source/devices/kprobe/type";
10718
10719 return parse_uint_from_file(file, "%d\n");
10720 }
10721
10722 static int determine_uprobe_perf_type(void)
10723 {
10724 const char *file = "/sys/bus/event_source/devices/uprobe/type";
10725
10726 return parse_uint_from_file(file, "%d\n");
10727 }
10728
10729 static int determine_kprobe_retprobe_bit(void)
10730 {
10731 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
10732
10733 return parse_uint_from_file(file, "config:%d\n");
10734 }
10735
10736 static int determine_uprobe_retprobe_bit(void)
10737 {
10738 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
10739
10740 return parse_uint_from_file(file, "config:%d\n");
10741 }
10742
10743 #define PERF_UPROBE_REF_CTR_OFFSET_BITS 32
10744 #define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32
10745
10746 static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
10747 uint64_t offset, int pid, size_t ref_ctr_off)
10748 {
10749 const size_t attr_sz = sizeof(struct perf_event_attr);
10750 struct perf_event_attr attr;
10751 char errmsg[STRERR_BUFSIZE];
10752 int type, pfd;
10753
10754 if ((__u64)ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS))
10755 return -EINVAL;
10756
10757 memset(&attr, 0, attr_sz);
10758
10759 type = uprobe ? determine_uprobe_perf_type()
10760 : determine_kprobe_perf_type();
10761 if (type < 0) {
10762 pr_warn("failed to determine %s perf type: %s\n",
10763 uprobe ? "uprobe" : "kprobe",
10764 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
10765 return type;
10766 }
10767 if (retprobe) {
10768 int bit = uprobe ? determine_uprobe_retprobe_bit()
10769 : determine_kprobe_retprobe_bit();
10770
10771 if (bit < 0) {
10772 pr_warn("failed to determine %s retprobe bit: %s\n",
10773 uprobe ? "uprobe" : "kprobe",
10774 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
10775 return bit;
10776 }
10777 attr.config |= 1 << bit;
10778 }
10779 attr.size = attr_sz;
10780 attr.type = type;
10781 attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
10782 attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
10783 attr.config2 = offset; /* kprobe_addr or probe_offset */
10784
10785 /* pid filter is meaningful only for uprobes */
10786 pfd = syscall(__NR_perf_event_open, &attr,
10787 pid < 0 ? -1 : pid /* pid */,
10788 pid == -1 ? 0 : -1 /* cpu */,
10789 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
10790 return pfd >= 0 ? pfd : -errno;
10791 }
10792
10793 static int append_to_file(const char *file, const char *fmt, ...)
10794 {
10795 int fd, n, err = 0;
10796 va_list ap;
10797 char buf[1024];
10798
10799 va_start(ap, fmt);
10800 n = vsnprintf(buf, sizeof(buf), fmt, ap);
10801 va_end(ap);
10802
10803 if (n < 0 || n >= sizeof(buf))
10804 return -EINVAL;
10805
10806 fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0);
10807 if (fd < 0)
10808 return -errno;
10809
10810 if (write(fd, buf, n) < 0)
10811 err = -errno;
10812
10813 close(fd);
10814 return err;
10815 }
10816
10817 #define DEBUGFS "/sys/kernel/debug/tracing"
10818 #define TRACEFS "/sys/kernel/tracing"
10819
10820 static bool use_debugfs(void)
10821 {
10822 static int has_debugfs = -1;
10823
10824 if (has_debugfs < 0)
10825 has_debugfs = faccessat(AT_FDCWD, DEBUGFS, F_OK, AT_EACCESS) == 0;
10826
10827 return has_debugfs == 1;
10828 }
10829
10830 static const char *tracefs_path(void)
10831 {
10832 return use_debugfs() ? DEBUGFS : TRACEFS;
10833 }
10834
10835 static const char *tracefs_kprobe_events(void)
10836 {
10837 return use_debugfs() ? DEBUGFS"/kprobe_events" : TRACEFS"/kprobe_events";
10838 }
10839
10840 static const char *tracefs_uprobe_events(void)
10841 {
10842 return use_debugfs() ? DEBUGFS"/uprobe_events" : TRACEFS"/uprobe_events";
10843 }
10844
10845 static const char *tracefs_available_filter_functions(void)
10846 {
10847 return use_debugfs() ? DEBUGFS"/available_filter_functions"
10848 : TRACEFS"/available_filter_functions";
10849 }
10850
10851 static const char *tracefs_available_filter_functions_addrs(void)
10852 {
10853 return use_debugfs() ? DEBUGFS"/available_filter_functions_addrs"
10854 : TRACEFS"/available_filter_functions_addrs";
10855 }
10856
10857 static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz,
10858 const char *kfunc_name, size_t offset)
10859 {
10860 static int index = 0;
10861 int i;
10862
10863 snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset,
10864 __sync_fetch_and_add(&index, 1));
10865
10866 /* sanitize binary_path in the probe name */
10867 for (i = 0; buf[i]; i++) {
10868 if (!isalnum(buf[i]))
10869 buf[i] = '_';
10870 }
10871 }
10872
10873 static int add_kprobe_event_legacy(const char *probe_name, bool retprobe,
10874 const char *kfunc_name, size_t offset)
10875 {
10876 return append_to_file(tracefs_kprobe_events(), "%c:%s/%s %s+0x%zx",
10877 retprobe ? 'r' : 'p',
10878 retprobe ? "kretprobes" : "kprobes",
10879 probe_name, kfunc_name, offset);
10880 }
10881
10882 static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe)
10883 {
10884 return append_to_file(tracefs_kprobe_events(), "-:%s/%s",
10885 retprobe ? "kretprobes" : "kprobes", probe_name);
10886 }
10887
10888 static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe)
10889 {
10890 char file[256];
10891
10892 snprintf(file, sizeof(file), "%s/events/%s/%s/id",
10893 tracefs_path(), retprobe ? "kretprobes" : "kprobes", probe_name);
10894
10895 return parse_uint_from_file(file, "%d\n");
10896 }
10897
10898 static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
10899 const char *kfunc_name, size_t offset, int pid)
10900 {
10901 const size_t attr_sz = sizeof(struct perf_event_attr);
10902 struct perf_event_attr attr;
10903 char errmsg[STRERR_BUFSIZE];
10904 int type, pfd, err;
10905
10906 err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset);
10907 if (err < 0) {
10908 pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n",
10909 kfunc_name, offset,
10910 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10911 return err;
10912 }
10913 type = determine_kprobe_perf_type_legacy(probe_name, retprobe);
10914 if (type < 0) {
10915 err = type;
10916 pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n",
10917 kfunc_name, offset,
10918 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10919 goto err_clean_legacy;
10920 }
10921
10922 memset(&attr, 0, attr_sz);
10923 attr.size = attr_sz;
10924 attr.config = type;
10925 attr.type = PERF_TYPE_TRACEPOINT;
10926
10927 pfd = syscall(__NR_perf_event_open, &attr,
10928 pid < 0 ? -1 : pid, /* pid */
10929 pid == -1 ? 0 : -1, /* cpu */
10930 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
10931 if (pfd < 0) {
10932 err = -errno;
10933 pr_warn("legacy kprobe perf_event_open() failed: %s\n",
10934 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10935 goto err_clean_legacy;
10936 }
10937 return pfd;
10938
10939 err_clean_legacy:
10940 /* Clear the newly added legacy kprobe_event */
10941 remove_kprobe_event_legacy(probe_name, retprobe);
10942 return err;
10943 }
10944
10945 static const char *arch_specific_syscall_pfx(void)
10946 {
10947 #if defined(__x86_64__)
10948 return "x64";
10949 #elif defined(__i386__)
10950 return "ia32";
10951 #elif defined(__s390x__)
10952 return "s390x";
10953 #elif defined(__s390__)
10954 return "s390";
10955 #elif defined(__arm__)
10956 return "arm";
10957 #elif defined(__aarch64__)
10958 return "arm64";
10959 #elif defined(__mips__)
10960 return "mips";
10961 #elif defined(__riscv)
10962 return "riscv";
10963 #elif defined(__powerpc__)
10964 return "powerpc";
10965 #elif defined(__powerpc64__)
10966 return "powerpc64";
10967 #else
10968 return NULL;
10969 #endif
10970 }
10971
10972 static int probe_kern_syscall_wrapper(void)
10973 {
10974 char syscall_name[64];
10975 const char *ksys_pfx;
10976
10977 ksys_pfx = arch_specific_syscall_pfx();
10978 if (!ksys_pfx)
10979 return 0;
10980
10981 snprintf(syscall_name, sizeof(syscall_name), "__%s_sys_bpf", ksys_pfx);
10982
10983 if (determine_kprobe_perf_type() >= 0) {
10984 int pfd;
10985
10986 pfd = perf_event_open_probe(false, false, syscall_name, 0, getpid(), 0);
10987 if (pfd >= 0)
10988 close(pfd);
10989
10990 return pfd >= 0 ? 1 : 0;
10991 } else { /* legacy mode */
10992 char probe_name[128];
10993
10994 gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0);
10995 if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0)
10996 return 0;
10997
10998 (void)remove_kprobe_event_legacy(probe_name, false);
10999 return 1;
11000 }
11001 }
11002
11003 struct bpf_link *
11004 bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
11005 const char *func_name,
11006 const struct bpf_kprobe_opts *opts)
11007 {
11008 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
11009 enum probe_attach_mode attach_mode;
11010 char errmsg[STRERR_BUFSIZE];
11011 char *legacy_probe = NULL;
11012 struct bpf_link *link;
11013 size_t offset;
11014 bool retprobe, legacy;
11015 int pfd, err;
11016
11017 if (!OPTS_VALID(opts, bpf_kprobe_opts))
11018 return libbpf_err_ptr(-EINVAL);
11019
11020 attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
11021 retprobe = OPTS_GET(opts, retprobe, false);
11022 offset = OPTS_GET(opts, offset, 0);
11023 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
11024
11025 legacy = determine_kprobe_perf_type() < 0;
11026 switch (attach_mode) {
11027 case PROBE_ATTACH_MODE_LEGACY:
11028 legacy = true;
11029 pe_opts.force_ioctl_attach = true;
11030 break;
11031 case PROBE_ATTACH_MODE_PERF:
11032 if (legacy)
11033 return libbpf_err_ptr(-ENOTSUP);
11034 pe_opts.force_ioctl_attach = true;
11035 break;
11036 case PROBE_ATTACH_MODE_LINK:
11037 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
11038 return libbpf_err_ptr(-ENOTSUP);
11039 break;
11040 case PROBE_ATTACH_MODE_DEFAULT:
11041 break;
11042 default:
11043 return libbpf_err_ptr(-EINVAL);
11044 }
11045
11046 if (!legacy) {
11047 pfd = perf_event_open_probe(false /* uprobe */, retprobe,
11048 func_name, offset,
11049 -1 /* pid */, 0 /* ref_ctr_off */);
11050 } else {
11051 char probe_name[256];
11052
11053 gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name),
11054 func_name, offset);
11055
11056 legacy_probe = strdup(probe_name);
11057 if (!legacy_probe)
11058 return libbpf_err_ptr(-ENOMEM);
11059
11060 pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name,
11061 offset, -1 /* pid */);
11062 }
11063 if (pfd < 0) {
11064 err = -errno;
11065 pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n",
11066 prog->name, retprobe ? "kretprobe" : "kprobe",
11067 func_name, offset,
11068 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11069 goto err_out;
11070 }
11071 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
11072 err = libbpf_get_error(link);
11073 if (err) {
11074 close(pfd);
11075 pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n",
11076 prog->name, retprobe ? "kretprobe" : "kprobe",
11077 func_name, offset,
11078 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11079 goto err_clean_legacy;
11080 }
11081 if (legacy) {
11082 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
11083
11084 perf_link->legacy_probe_name = legacy_probe;
11085 perf_link->legacy_is_kprobe = true;
11086 perf_link->legacy_is_retprobe = retprobe;
11087 }
11088
11089 return link;
11090
11091 err_clean_legacy:
11092 if (legacy)
11093 remove_kprobe_event_legacy(legacy_probe, retprobe);
11094 err_out:
11095 free(legacy_probe);
11096 return libbpf_err_ptr(err);
11097 }
11098
11099 struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
11100 bool retprobe,
11101 const char *func_name)
11102 {
11103 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
11104 .retprobe = retprobe,
11105 );
11106
11107 return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
11108 }
11109
11110 struct bpf_link *bpf_program__attach_ksyscall(const struct bpf_program *prog,
11111 const char *syscall_name,
11112 const struct bpf_ksyscall_opts *opts)
11113 {
11114 LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
11115 char func_name[128];
11116
11117 if (!OPTS_VALID(opts, bpf_ksyscall_opts))
11118 return libbpf_err_ptr(-EINVAL);
11119
11120 if (kernel_supports(prog->obj, FEAT_SYSCALL_WRAPPER)) {
11121 /* arch_specific_syscall_pfx() should never return NULL here
11122 * because it is guarded by kernel_supports(). However, since
11123 * compiler does not know that we have an explicit conditional
11124 * as well.
11125 */
11126 snprintf(func_name, sizeof(func_name), "__%s_sys_%s",
11127 arch_specific_syscall_pfx() ? : "", syscall_name);
11128 } else {
11129 snprintf(func_name, sizeof(func_name), "__se_sys_%s", syscall_name);
11130 }
11131
11132 kprobe_opts.retprobe = OPTS_GET(opts, retprobe, false);
11133 kprobe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
11134
11135 return bpf_program__attach_kprobe_opts(prog, func_name, &kprobe_opts);
11136 }
11137
11138 /* Adapted from perf/util/string.c */
11139 bool glob_match(const char *str, const char *pat)
11140 {
11141 while (*str && *pat && *pat != '*') {
11142 if (*pat == '?') { /* Matches any single character */
11143 str++;
11144 pat++;
11145 continue;
11146 }
11147 if (*str != *pat)
11148 return false;
11149 str++;
11150 pat++;
11151 }
11152 /* Check wild card */
11153 if (*pat == '*') {
11154 while (*pat == '*')
11155 pat++;
11156 if (!*pat) /* Tail wild card matches all */
11157 return true;
11158 while (*str)
11159 if (glob_match(str++, pat))
11160 return true;
11161 }
11162 return !*str && !*pat;
11163 }
11164
11165 struct kprobe_multi_resolve {
11166 const char *pattern;
11167 unsigned long *addrs;
11168 size_t cap;
11169 size_t cnt;
11170 };
11171
11172 struct avail_kallsyms_data {
11173 char **syms;
11174 size_t cnt;
11175 struct kprobe_multi_resolve *res;
11176 };
11177
11178 static int avail_func_cmp(const void *a, const void *b)
11179 {
11180 return strcmp(*(const char **)a, *(const char **)b);
11181 }
11182
11183 static int avail_kallsyms_cb(unsigned long long sym_addr, char sym_type,
11184 const char *sym_name, void *ctx)
11185 {
11186 struct avail_kallsyms_data *data = ctx;
11187 struct kprobe_multi_resolve *res = data->res;
11188 int err;
11189
11190 if (!bsearch(&sym_name, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp))
11191 return 0;
11192
11193 err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, sizeof(*res->addrs), res->cnt + 1);
11194 if (err)
11195 return err;
11196
11197 res->addrs[res->cnt++] = (unsigned long)sym_addr;
11198 return 0;
11199 }
11200
11201 static int libbpf_available_kallsyms_parse(struct kprobe_multi_resolve *res)
11202 {
11203 const char *available_functions_file = tracefs_available_filter_functions();
11204 struct avail_kallsyms_data data;
11205 char sym_name[500];
11206 FILE *f;
11207 int err = 0, ret, i;
11208 char **syms = NULL;
11209 size_t cap = 0, cnt = 0;
11210
11211 f = fopen(available_functions_file, "re");
11212 if (!f) {
11213 err = -errno;
11214 pr_warn("failed to open %s: %d\n", available_functions_file, err);
11215 return err;
11216 }
11217
11218 while (true) {
11219 char *name;
11220
11221 ret = fscanf(f, "%499s%*[^\n]\n", sym_name);
11222 if (ret == EOF && feof(f))
11223 break;
11224
11225 if (ret != 1) {
11226 pr_warn("failed to parse available_filter_functions entry: %d\n", ret);
11227 err = -EINVAL;
11228 goto cleanup;
11229 }
11230
11231 if (!glob_match(sym_name, res->pattern))
11232 continue;
11233
11234 err = libbpf_ensure_mem((void **)&syms, &cap, sizeof(*syms), cnt + 1);
11235 if (err)
11236 goto cleanup;
11237
11238 name = strdup(sym_name);
11239 if (!name) {
11240 err = -errno;
11241 goto cleanup;
11242 }
11243
11244 syms[cnt++] = name;
11245 }
11246
11247 /* no entries found, bail out */
11248 if (cnt == 0) {
11249 err = -ENOENT;
11250 goto cleanup;
11251 }
11252
11253 /* sort available functions */
11254 qsort(syms, cnt, sizeof(*syms), avail_func_cmp);
11255
11256 data.syms = syms;
11257 data.res = res;
11258 data.cnt = cnt;
11259 libbpf_kallsyms_parse(avail_kallsyms_cb, &data);
11260
11261 if (res->cnt == 0)
11262 err = -ENOENT;
11263
11264 cleanup:
11265 for (i = 0; i < cnt; i++)
11266 free((char *)syms[i]);
11267 free(syms);
11268
11269 fclose(f);
11270 return err;
11271 }
11272
11273 static bool has_available_filter_functions_addrs(void)
11274 {
11275 return access(tracefs_available_filter_functions_addrs(), R_OK) != -1;
11276 }
11277
11278 static int libbpf_available_kprobes_parse(struct kprobe_multi_resolve *res)
11279 {
11280 const char *available_path = tracefs_available_filter_functions_addrs();
11281 char sym_name[500];
11282 FILE *f;
11283 int ret, err = 0;
11284 unsigned long long sym_addr;
11285
11286 f = fopen(available_path, "re");
11287 if (!f) {
11288 err = -errno;
11289 pr_warn("failed to open %s: %d\n", available_path, err);
11290 return err;
11291 }
11292
11293 while (true) {
11294 ret = fscanf(f, "%llx %499s%*[^\n]\n", &sym_addr, sym_name);
11295 if (ret == EOF && feof(f))
11296 break;
11297
11298 if (ret != 2) {
11299 pr_warn("failed to parse available_filter_functions_addrs entry: %d\n",
11300 ret);
11301 err = -EINVAL;
11302 goto cleanup;
11303 }
11304
11305 if (!glob_match(sym_name, res->pattern))
11306 continue;
11307
11308 err = libbpf_ensure_mem((void **)&res->addrs, &res->cap,
11309 sizeof(*res->addrs), res->cnt + 1);
11310 if (err)
11311 goto cleanup;
11312
11313 res->addrs[res->cnt++] = (unsigned long)sym_addr;
11314 }
11315
11316 if (res->cnt == 0)
11317 err = -ENOENT;
11318
11319 cleanup:
11320 fclose(f);
11321 return err;
11322 }
11323
11324 struct bpf_link *
11325 bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
11326 const char *pattern,
11327 const struct bpf_kprobe_multi_opts *opts)
11328 {
11329 LIBBPF_OPTS(bpf_link_create_opts, lopts);
11330 struct kprobe_multi_resolve res = {
11331 .pattern = pattern,
11332 };
11333 struct bpf_link *link = NULL;
11334 char errmsg[STRERR_BUFSIZE];
11335 const unsigned long *addrs;
11336 int err, link_fd, prog_fd;
11337 const __u64 *cookies;
11338 const char **syms;
11339 bool retprobe;
11340 size_t cnt;
11341
11342 if (!OPTS_VALID(opts, bpf_kprobe_multi_opts))
11343 return libbpf_err_ptr(-EINVAL);
11344
11345 syms = OPTS_GET(opts, syms, false);
11346 addrs = OPTS_GET(opts, addrs, false);
11347 cnt = OPTS_GET(opts, cnt, false);
11348 cookies = OPTS_GET(opts, cookies, false);
11349
11350 if (!pattern && !addrs && !syms)
11351 return libbpf_err_ptr(-EINVAL);
11352 if (pattern && (addrs || syms || cookies || cnt))
11353 return libbpf_err_ptr(-EINVAL);
11354 if (!pattern && !cnt)
11355 return libbpf_err_ptr(-EINVAL);
11356 if (addrs && syms)
11357 return libbpf_err_ptr(-EINVAL);
11358
11359 if (pattern) {
11360 if (has_available_filter_functions_addrs())
11361 err = libbpf_available_kprobes_parse(&res);
11362 else
11363 err = libbpf_available_kallsyms_parse(&res);
11364 if (err)
11365 goto error;
11366 addrs = res.addrs;
11367 cnt = res.cnt;
11368 }
11369
11370 retprobe = OPTS_GET(opts, retprobe, false);
11371
11372 lopts.kprobe_multi.syms = syms;
11373 lopts.kprobe_multi.addrs = addrs;
11374 lopts.kprobe_multi.cookies = cookies;
11375 lopts.kprobe_multi.cnt = cnt;
11376 lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0;
11377
11378 link = calloc(1, sizeof(*link));
11379 if (!link) {
11380 err = -ENOMEM;
11381 goto error;
11382 }
11383 link->detach = &bpf_link__detach_fd;
11384
11385 prog_fd = bpf_program__fd(prog);
11386 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &lopts);
11387 if (link_fd < 0) {
11388 err = -errno;
11389 pr_warn("prog '%s': failed to attach: %s\n",
11390 prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11391 goto error;
11392 }
11393 link->fd = link_fd;
11394 free(res.addrs);
11395 return link;
11396
11397 error:
11398 free(link);
11399 free(res.addrs);
11400 return libbpf_err_ptr(err);
11401 }
11402
11403 static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11404 {
11405 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
11406 unsigned long offset = 0;
11407 const char *func_name;
11408 char *func;
11409 int n;
11410
11411 *link = NULL;
11412
11413 /* no auto-attach for SEC("kprobe") and SEC("kretprobe") */
11414 if (strcmp(prog->sec_name, "kprobe") == 0 || strcmp(prog->sec_name, "kretprobe") == 0)
11415 return 0;
11416
11417 opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/");
11418 if (opts.retprobe)
11419 func_name = prog->sec_name + sizeof("kretprobe/") - 1;
11420 else
11421 func_name = prog->sec_name + sizeof("kprobe/") - 1;
11422
11423 n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset);
11424 if (n < 1) {
11425 pr_warn("kprobe name is invalid: %s\n", func_name);
11426 return -EINVAL;
11427 }
11428 if (opts.retprobe && offset != 0) {
11429 free(func);
11430 pr_warn("kretprobes do not support offset specification\n");
11431 return -EINVAL;
11432 }
11433
11434 opts.offset = offset;
11435 *link = bpf_program__attach_kprobe_opts(prog, func, &opts);
11436 free(func);
11437 return libbpf_get_error(*link);
11438 }
11439
11440 static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11441 {
11442 LIBBPF_OPTS(bpf_ksyscall_opts, opts);
11443 const char *syscall_name;
11444
11445 *link = NULL;
11446
11447 /* no auto-attach for SEC("ksyscall") and SEC("kretsyscall") */
11448 if (strcmp(prog->sec_name, "ksyscall") == 0 || strcmp(prog->sec_name, "kretsyscall") == 0)
11449 return 0;
11450
11451 opts.retprobe = str_has_pfx(prog->sec_name, "kretsyscall/");
11452 if (opts.retprobe)
11453 syscall_name = prog->sec_name + sizeof("kretsyscall/") - 1;
11454 else
11455 syscall_name = prog->sec_name + sizeof("ksyscall/") - 1;
11456
11457 *link = bpf_program__attach_ksyscall(prog, syscall_name, &opts);
11458 return *link ? 0 : -errno;
11459 }
11460
11461 static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11462 {
11463 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
11464 const char *spec;
11465 char *pattern;
11466 int n;
11467
11468 *link = NULL;
11469
11470 /* no auto-attach for SEC("kprobe.multi") and SEC("kretprobe.multi") */
11471 if (strcmp(prog->sec_name, "kprobe.multi") == 0 ||
11472 strcmp(prog->sec_name, "kretprobe.multi") == 0)
11473 return 0;
11474
11475 opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/");
11476 if (opts.retprobe)
11477 spec = prog->sec_name + sizeof("kretprobe.multi/") - 1;
11478 else
11479 spec = prog->sec_name + sizeof("kprobe.multi/") - 1;
11480
11481 n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
11482 if (n < 1) {
11483 pr_warn("kprobe multi pattern is invalid: %s\n", pattern);
11484 return -EINVAL;
11485 }
11486
11487 *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts);
11488 free(pattern);
11489 return libbpf_get_error(*link);
11490 }
11491
11492 static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11493 {
11494 char *probe_type = NULL, *binary_path = NULL, *func_name = NULL;
11495 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
11496 int n, ret = -EINVAL;
11497
11498 *link = NULL;
11499
11500 n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]",
11501 &probe_type, &binary_path, &func_name);
11502 switch (n) {
11503 case 1:
11504 /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
11505 ret = 0;
11506 break;
11507 case 3:
11508 opts.retprobe = strcmp(probe_type, "uretprobe.multi") == 0;
11509 *link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts);
11510 ret = libbpf_get_error(*link);
11511 break;
11512 default:
11513 pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
11514 prog->sec_name);
11515 break;
11516 }
11517 free(probe_type);
11518 free(binary_path);
11519 free(func_name);
11520 return ret;
11521 }
11522
11523 static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
11524 const char *binary_path, uint64_t offset)
11525 {
11526 int i;
11527
11528 snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset);
11529
11530 /* sanitize binary_path in the probe name */
11531 for (i = 0; buf[i]; i++) {
11532 if (!isalnum(buf[i]))
11533 buf[i] = '_';
11534 }
11535 }
11536
11537 static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,
11538 const char *binary_path, size_t offset)
11539 {
11540 return append_to_file(tracefs_uprobe_events(), "%c:%s/%s %s:0x%zx",
11541 retprobe ? 'r' : 'p',
11542 retprobe ? "uretprobes" : "uprobes",
11543 probe_name, binary_path, offset);
11544 }
11545
11546 static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe)
11547 {
11548 return append_to_file(tracefs_uprobe_events(), "-:%s/%s",
11549 retprobe ? "uretprobes" : "uprobes", probe_name);
11550 }
11551
11552 static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe)
11553 {
11554 char file[512];
11555
11556 snprintf(file, sizeof(file), "%s/events/%s/%s/id",
11557 tracefs_path(), retprobe ? "uretprobes" : "uprobes", probe_name);
11558
11559 return parse_uint_from_file(file, "%d\n");
11560 }
11561
11562 static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
11563 const char *binary_path, size_t offset, int pid)
11564 {
11565 const size_t attr_sz = sizeof(struct perf_event_attr);
11566 struct perf_event_attr attr;
11567 int type, pfd, err;
11568
11569 err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset);
11570 if (err < 0) {
11571 pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n",
11572 binary_path, (size_t)offset, err);
11573 return err;
11574 }
11575 type = determine_uprobe_perf_type_legacy(probe_name, retprobe);
11576 if (type < 0) {
11577 err = type;
11578 pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n",
11579 binary_path, offset, err);
11580 goto err_clean_legacy;
11581 }
11582
11583 memset(&attr, 0, attr_sz);
11584 attr.size = attr_sz;
11585 attr.config = type;
11586 attr.type = PERF_TYPE_TRACEPOINT;
11587
11588 pfd = syscall(__NR_perf_event_open, &attr,
11589 pid < 0 ? -1 : pid, /* pid */
11590 pid == -1 ? 0 : -1, /* cpu */
11591 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
11592 if (pfd < 0) {
11593 err = -errno;
11594 pr_warn("legacy uprobe perf_event_open() failed: %d\n", err);
11595 goto err_clean_legacy;
11596 }
11597 return pfd;
11598
11599 err_clean_legacy:
11600 /* Clear the newly added legacy uprobe_event */
11601 remove_uprobe_event_legacy(probe_name, retprobe);
11602 return err;
11603 }
11604
11605 /* Find offset of function name in archive specified by path. Currently
11606 * supported are .zip files that do not compress their contents, as used on
11607 * Android in the form of APKs, for example. "file_name" is the name of the ELF
11608 * file inside the archive. "func_name" matches symbol name or name@@LIB for
11609 * library functions.
11610 *
11611 * An overview of the APK format specifically provided here:
11612 * https://en.wikipedia.org/w/index.php?title=Apk_(file_format)&oldid=1139099120#Package_contents
11613 */
11614 static long elf_find_func_offset_from_archive(const char *archive_path, const char *file_name,
11615 const char *func_name)
11616 {
11617 struct zip_archive *archive;
11618 struct zip_entry entry;
11619 long ret;
11620 #ifdef HAVE_LIBELF
11621 Elf *elf;
11622 #elif defined HAVE_ELFIO
11623 pelfio_t elf;
11624 #endif
11625
11626 archive = zip_archive_open(archive_path);
11627 if (IS_ERR(archive)) {
11628 ret = PTR_ERR(archive);
11629 pr_warn("zip: failed to open %s: %ld\n", archive_path, ret);
11630 return ret;
11631 }
11632
11633 ret = zip_archive_find_entry(archive, file_name, &entry);
11634 if (ret) {
11635 pr_warn("zip: could not find archive member %s in %s: %ld\n", file_name,
11636 archive_path, ret);
11637 goto out;
11638 }
11639 pr_debug("zip: found entry for %s in %s at 0x%lx\n", file_name, archive_path,
11640 (unsigned long)entry.data_offset);
11641
11642 if (entry.compression) {
11643 pr_warn("zip: entry %s of %s is compressed and cannot be handled\n", file_name,
11644 archive_path);
11645 ret = -LIBBPF_ERRNO__FORMAT;
11646 goto out;
11647 }
11648 #ifdef HAVE_LIBELF
11649 elf = elf_memory((void *)entry.data, entry.data_length);
11650 #elif defined HAVE_ELFIO
11651 char memfd_path[PATH_MAX] = {0};
11652 elf = elfio_new();
11653 int fdm = syscall(__NR_memfd_create, "bpfelf", MFD_CLOEXEC);
11654 ftruncate(fdm, entry.data_length);
11655 write(fdm, (char *)entry.data, entry.data_length);
11656 snprintf(memfd_path, PATH_MAX, "/proc/self/fd/%d", fdm);
11657 elfio_load(elf, memfd_path);
11658 #endif
11659 if (!elf) {
11660 pr_warn("elf: could not read elf file %s from %s: %s\n", file_name, archive_path,
11661 elf_errmsg(-1));
11662 ret = -LIBBPF_ERRNO__LIBELF;
11663 goto out;
11664 }
11665
11666 ret = elf_find_func_offset(elf, file_name, func_name);
11667 if (ret > 0) {
11668 pr_debug("elf: symbol address match for %s of %s in %s: 0x%x + 0x%lx = 0x%lx\n",
11669 func_name, file_name, archive_path, entry.data_offset, ret,
11670 ret + entry.data_offset);
11671 ret += entry.data_offset;
11672 }
11673 #ifdef HAVA_LIBELF
11674 elf_end(elf);
11675 #elif HAVA_ELFIO
11676 elfio_delete(elf);
11677 #endif
11678 out:
11679 zip_archive_close(archive);
11680 return ret;
11681 }
11682
11683 static const char *arch_specific_lib_paths(void)
11684 {
11685 /*
11686 * Based on https://packages.debian.org/sid/libc6.
11687 *
11688 * Assume that the traced program is built for the same architecture
11689 * as libbpf, which should cover the vast majority of cases.
11690 */
11691 #if defined(__x86_64__)
11692 return "/lib/x86_64-linux-gnu";
11693 #elif defined(__i386__)
11694 return "/lib/i386-linux-gnu";
11695 #elif defined(__s390x__)
11696 return "/lib/s390x-linux-gnu";
11697 #elif defined(__s390__)
11698 return "/lib/s390-linux-gnu";
11699 #elif defined(__arm__) && defined(__SOFTFP__)
11700 return "/lib/arm-linux-gnueabi";
11701 #elif defined(__arm__) && !defined(__SOFTFP__)
11702 return "/lib/arm-linux-gnueabihf";
11703 #elif defined(__aarch64__)
11704 return "/lib/aarch64-linux-gnu";
11705 #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 64
11706 return "/lib/mips64el-linux-gnuabi64";
11707 #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 32
11708 return "/lib/mipsel-linux-gnu";
11709 #elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
11710 return "/lib/powerpc64le-linux-gnu";
11711 #elif defined(__sparc__) && defined(__arch64__)
11712 return "/lib/sparc64-linux-gnu";
11713 #elif defined(__riscv) && __riscv_xlen == 64
11714 return "/lib/riscv64-linux-gnu";
11715 #else
11716 return NULL;
11717 #endif
11718 }
11719
11720 /* Get full path to program/shared library. */
11721 static int resolve_full_path(const char *file, char *result, size_t result_sz)
11722 {
11723 const char *search_paths[3] = {};
11724 int i, perm;
11725
11726 if (str_has_sfx(file, ".so") || strstr(file, ".so.")) {
11727 search_paths[0] = getenv("LD_LIBRARY_PATH");
11728 search_paths[1] = "/usr/lib64:/usr/lib";
11729 search_paths[2] = arch_specific_lib_paths();
11730 perm = R_OK;
11731 } else {
11732 search_paths[0] = getenv("PATH");
11733 search_paths[1] = "/usr/bin:/usr/sbin";
11734 perm = R_OK | X_OK;
11735 }
11736
11737 for (i = 0; i < ARRAY_SIZE(search_paths); i++) {
11738 const char *s;
11739
11740 if (!search_paths[i])
11741 continue;
11742 for (s = search_paths[i]; s != NULL; s = strchr(s, ':')) {
11743 char *next_path;
11744 int seg_len;
11745
11746 if (s[0] == ':')
11747 s++;
11748 next_path = strchr(s, ':');
11749 seg_len = next_path ? next_path - s : strlen(s);
11750 if (!seg_len)
11751 continue;
11752 snprintf(result, result_sz, "%.*s/%s", seg_len, s, file);
11753 /* ensure it has required permissions */
11754 if (faccessat(AT_FDCWD, result, perm, AT_EACCESS) < 0)
11755 continue;
11756 pr_debug("resolved '%s' to '%s'\n", file, result);
11757 return 0;
11758 }
11759 }
11760 return -ENOENT;
11761 }
11762
11763 struct bpf_link *
11764 bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
11765 pid_t pid,
11766 const char *path,
11767 const char *func_pattern,
11768 const struct bpf_uprobe_multi_opts *opts)
11769 {
11770 const unsigned long *ref_ctr_offsets = NULL, *offsets = NULL;
11771 LIBBPF_OPTS(bpf_link_create_opts, lopts);
11772 unsigned long *resolved_offsets = NULL;
11773 int err = 0, link_fd, prog_fd;
11774 struct bpf_link *link = NULL;
11775 char errmsg[STRERR_BUFSIZE];
11776 char full_path[PATH_MAX];
11777 const __u64 *cookies;
11778 const char **syms;
11779 size_t cnt;
11780
11781 if (!OPTS_VALID(opts, bpf_uprobe_multi_opts))
11782 return libbpf_err_ptr(-EINVAL);
11783
11784 syms = OPTS_GET(opts, syms, NULL);
11785 offsets = OPTS_GET(opts, offsets, NULL);
11786 ref_ctr_offsets = OPTS_GET(opts, ref_ctr_offsets, NULL);
11787 cookies = OPTS_GET(opts, cookies, NULL);
11788 cnt = OPTS_GET(opts, cnt, 0);
11789
11790 /*
11791 * User can specify 2 mutually exclusive set of inputs:
11792 *
11793 * 1) use only path/func_pattern/pid arguments
11794 *
11795 * 2) use path/pid with allowed combinations of:
11796 * syms/offsets/ref_ctr_offsets/cookies/cnt
11797 *
11798 * - syms and offsets are mutually exclusive
11799 * - ref_ctr_offsets and cookies are optional
11800 *
11801 * Any other usage results in error.
11802 */
11803
11804 if (!path)
11805 return libbpf_err_ptr(-EINVAL);
11806 if (!func_pattern && cnt == 0)
11807 return libbpf_err_ptr(-EINVAL);
11808
11809 if (func_pattern) {
11810 if (syms || offsets || ref_ctr_offsets || cookies || cnt)
11811 return libbpf_err_ptr(-EINVAL);
11812 } else {
11813 if (!!syms == !!offsets)
11814 return libbpf_err_ptr(-EINVAL);
11815 }
11816
11817 if (func_pattern) {
11818 if (!strchr(path, '/')) {
11819 err = resolve_full_path(path, full_path, sizeof(full_path));
11820 if (err) {
11821 pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
11822 prog->name, path, err);
11823 return libbpf_err_ptr(err);
11824 }
11825 path = full_path;
11826 }
11827
11828 err = elf_resolve_pattern_offsets(path, func_pattern,
11829 &resolved_offsets, &cnt);
11830 if (err < 0)
11831 return libbpf_err_ptr(err);
11832 offsets = resolved_offsets;
11833 } else if (syms) {
11834 err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets);
11835 if (err < 0)
11836 return libbpf_err_ptr(err);
11837 offsets = resolved_offsets;
11838 }
11839
11840 lopts.uprobe_multi.path = path;
11841 lopts.uprobe_multi.offsets = offsets;
11842 lopts.uprobe_multi.ref_ctr_offsets = ref_ctr_offsets;
11843 lopts.uprobe_multi.cookies = cookies;
11844 lopts.uprobe_multi.cnt = cnt;
11845 lopts.uprobe_multi.flags = OPTS_GET(opts, retprobe, false) ? BPF_F_UPROBE_MULTI_RETURN : 0;
11846
11847 if (pid == 0)
11848 pid = getpid();
11849 if (pid > 0)
11850 lopts.uprobe_multi.pid = pid;
11851
11852 link = calloc(1, sizeof(*link));
11853 if (!link) {
11854 err = -ENOMEM;
11855 goto error;
11856 }
11857 link->detach = &bpf_link__detach_fd;
11858
11859 prog_fd = bpf_program__fd(prog);
11860 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &lopts);
11861 if (link_fd < 0) {
11862 err = -errno;
11863 pr_warn("prog '%s': failed to attach multi-uprobe: %s\n",
11864 prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11865 goto error;
11866 }
11867 link->fd = link_fd;
11868 free(resolved_offsets);
11869 return link;
11870
11871 error:
11872 free(resolved_offsets);
11873 free(link);
11874 return libbpf_err_ptr(err);
11875 }
11876
11877 LIBBPF_API struct bpf_link *
11878 bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
11879 const char *binary_path, size_t func_offset,
11880 const struct bpf_uprobe_opts *opts)
11881 {
11882 const char *archive_path = NULL, *archive_sep = NULL;
11883 char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL;
11884 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
11885 enum probe_attach_mode attach_mode;
11886 char full_path[PATH_MAX];
11887 struct bpf_link *link;
11888 size_t ref_ctr_off;
11889 int pfd, err;
11890 bool retprobe, legacy;
11891 const char *func_name;
11892
11893 if (!OPTS_VALID(opts, bpf_uprobe_opts))
11894 return libbpf_err_ptr(-EINVAL);
11895
11896 attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
11897 retprobe = OPTS_GET(opts, retprobe, false);
11898 ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
11899 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
11900
11901 if (!binary_path)
11902 return libbpf_err_ptr(-EINVAL);
11903
11904 /* Check if "binary_path" refers to an archive. */
11905 archive_sep = strstr(binary_path, "!/");
11906 if (archive_sep) {
11907 full_path[0] = '\0';
11908 libbpf_strlcpy(full_path, binary_path,
11909 min(sizeof(full_path), (size_t)(archive_sep - binary_path + 1)));
11910 archive_path = full_path;
11911 binary_path = archive_sep + 2;
11912 } else if (!strchr(binary_path, '/')) {
11913 err = resolve_full_path(binary_path, full_path, sizeof(full_path));
11914 if (err) {
11915 pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
11916 prog->name, binary_path, err);
11917 return libbpf_err_ptr(err);
11918 }
11919 binary_path = full_path;
11920 }
11921 func_name = OPTS_GET(opts, func_name, NULL);
11922 if (func_name) {
11923 long sym_off;
11924
11925 if (archive_path) {
11926 sym_off = elf_find_func_offset_from_archive(archive_path, binary_path,
11927 func_name);
11928 binary_path = archive_path;
11929 } else {
11930 sym_off = elf_find_func_offset_from_file(binary_path, func_name);
11931 }
11932 if (sym_off < 0)
11933 return libbpf_err_ptr(sym_off);
11934 func_offset += sym_off;
11935 }
11936
11937 legacy = determine_uprobe_perf_type() < 0;
11938 switch (attach_mode) {
11939 case PROBE_ATTACH_MODE_LEGACY:
11940 legacy = true;
11941 pe_opts.force_ioctl_attach = true;
11942 break;
11943 case PROBE_ATTACH_MODE_PERF:
11944 if (legacy)
11945 return libbpf_err_ptr(-ENOTSUP);
11946 pe_opts.force_ioctl_attach = true;
11947 break;
11948 case PROBE_ATTACH_MODE_LINK:
11949 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
11950 return libbpf_err_ptr(-ENOTSUP);
11951 break;
11952 case PROBE_ATTACH_MODE_DEFAULT:
11953 break;
11954 default:
11955 return libbpf_err_ptr(-EINVAL);
11956 }
11957
11958 if (!legacy) {
11959 pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
11960 func_offset, pid, ref_ctr_off);
11961 } else {
11962 char probe_name[PATH_MAX + 64];
11963
11964 if (ref_ctr_off)
11965 return libbpf_err_ptr(-EINVAL);
11966
11967 gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name),
11968 binary_path, func_offset);
11969
11970 legacy_probe = strdup(probe_name);
11971 if (!legacy_probe)
11972 return libbpf_err_ptr(-ENOMEM);
11973
11974 pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe,
11975 binary_path, func_offset, pid);
11976 }
11977 if (pfd < 0) {
11978 err = -errno;
11979 pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
11980 prog->name, retprobe ? "uretprobe" : "uprobe",
11981 binary_path, func_offset,
11982 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11983 goto err_out;
11984 }
11985
11986 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
11987 err = libbpf_get_error(link);
11988 if (err) {
11989 close(pfd);
11990 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
11991 prog->name, retprobe ? "uretprobe" : "uprobe",
11992 binary_path, func_offset,
11993 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11994 goto err_clean_legacy;
11995 }
11996 if (legacy) {
11997 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
11998
11999 perf_link->legacy_probe_name = legacy_probe;
12000 perf_link->legacy_is_kprobe = false;
12001 perf_link->legacy_is_retprobe = retprobe;
12002 }
12003 return link;
12004
12005 err_clean_legacy:
12006 if (legacy)
12007 remove_uprobe_event_legacy(legacy_probe, retprobe);
12008 err_out:
12009 free(legacy_probe);
12010 return libbpf_err_ptr(err);
12011 }
12012
12013 /* Format of u[ret]probe section definition supporting auto-attach:
12014 * u[ret]probe/binary:function[+offset]
12015 *
12016 * binary can be an absolute/relative path or a filename; the latter is resolved to a
12017 * full binary path via bpf_program__attach_uprobe_opts.
12018 *
12019 * Specifying uprobe+ ensures we carry out strict matching; either "uprobe" must be
12020 * specified (and auto-attach is not possible) or the above format is specified for
12021 * auto-attach.
12022 */
12023 static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12024 {
12025 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
12026 char *probe_type = NULL, *binary_path = NULL, *func_name = NULL, *func_off;
12027 int n, c, ret = -EINVAL;
12028 long offset = 0;
12029
12030 *link = NULL;
12031
12032 n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]",
12033 &probe_type, &binary_path, &func_name);
12034 switch (n) {
12035 case 1:
12036 /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
12037 ret = 0;
12038 break;
12039 case 2:
12040 pr_warn("prog '%s': section '%s' missing ':function[+offset]' specification\n",
12041 prog->name, prog->sec_name);
12042 break;
12043 case 3:
12044 /* check if user specifies `+offset`, if yes, this should be
12045 * the last part of the string, make sure sscanf read to EOL
12046 */
12047 func_off = strrchr(func_name, '+');
12048 if (func_off) {
12049 n = sscanf(func_off, "+%li%n", &offset, &c);
12050 if (n == 1 && *(func_off + c) == '\0')
12051 func_off[0] = '\0';
12052 else
12053 offset = 0;
12054 }
12055 opts.retprobe = strcmp(probe_type, "uretprobe") == 0 ||
12056 strcmp(probe_type, "uretprobe.s") == 0;
12057 if (opts.retprobe && offset != 0) {
12058 pr_warn("prog '%s': uretprobes do not support offset specification\n",
12059 prog->name);
12060 break;
12061 }
12062 opts.func_name = func_name;
12063 *link = bpf_program__attach_uprobe_opts(prog, -1, binary_path, offset, &opts);
12064 ret = libbpf_get_error(*link);
12065 break;
12066 default:
12067 pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
12068 prog->sec_name);
12069 break;
12070 }
12071 free(probe_type);
12072 free(binary_path);
12073 free(func_name);
12074
12075 return ret;
12076 }
12077
12078 struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog,
12079 bool retprobe, pid_t pid,
12080 const char *binary_path,
12081 size_t func_offset)
12082 {
12083 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, .retprobe = retprobe);
12084
12085 return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts);
12086 }
12087
12088 #ifdef HAVE_LIBELF
12089 struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog,
12090 pid_t pid, const char *binary_path,
12091 const char *usdt_provider, const char *usdt_name,
12092 const struct bpf_usdt_opts *opts)
12093 {
12094 char resolved_path[512];
12095 struct bpf_object *obj = prog->obj;
12096 struct bpf_link *link;
12097 __u64 usdt_cookie;
12098 int err;
12099
12100 if (!OPTS_VALID(opts, bpf_uprobe_opts))
12101 return libbpf_err_ptr(-EINVAL);
12102
12103 if (bpf_program__fd(prog) < 0) {
12104 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
12105 prog->name);
12106 return libbpf_err_ptr(-EINVAL);
12107 }
12108
12109 if (!binary_path)
12110 return libbpf_err_ptr(-EINVAL);
12111
12112 if (!strchr(binary_path, '/')) {
12113 err = resolve_full_path(binary_path, resolved_path, sizeof(resolved_path));
12114 if (err) {
12115 pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
12116 prog->name, binary_path, err);
12117 return libbpf_err_ptr(err);
12118 }
12119 binary_path = resolved_path;
12120 }
12121
12122 /* USDT manager is instantiated lazily on first USDT attach. It will
12123 * be destroyed together with BPF object in bpf_object__close().
12124 */
12125 if (IS_ERR(obj->usdt_man))
12126 return libbpf_ptr(obj->usdt_man);
12127 if (!obj->usdt_man) {
12128 obj->usdt_man = usdt_manager_new(obj);
12129 if (IS_ERR(obj->usdt_man))
12130 return libbpf_ptr(obj->usdt_man);
12131 }
12132
12133 usdt_cookie = OPTS_GET(opts, usdt_cookie, 0);
12134 link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path,
12135 usdt_provider, usdt_name, usdt_cookie);
12136 err = libbpf_get_error(link);
12137 if (err)
12138 return libbpf_err_ptr(err);
12139 return link;
12140 }
12141 #endif //HAVE_LIBELF
12142
12143 #ifdef HAVE_LIBELF
12144 static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12145 {
12146 char *path = NULL, *provider = NULL, *name = NULL;
12147 const char *sec_name;
12148 int n, err;
12149
12150 sec_name = bpf_program__section_name(prog);
12151 if (strcmp(sec_name, "usdt") == 0) {
12152 /* no auto-attach for just SEC("usdt") */
12153 *link = NULL;
12154 return 0;
12155 }
12156
12157 n = sscanf(sec_name, "usdt/%m[^:]:%m[^:]:%m[^:]", &path, &provider, &name);
12158 if (n != 3) {
12159 pr_warn("invalid section '%s', expected SEC(\"usdt/<path>:<provider>:<name>\")\n",
12160 sec_name);
12161 err = -EINVAL;
12162 } else {
12163 *link = bpf_program__attach_usdt(prog, -1 /* any process */, path,
12164 provider, name, NULL);
12165 err = libbpf_get_error(*link);
12166 }
12167 free(path);
12168 free(provider);
12169 free(name);
12170 return err;
12171 }
12172 #endif //HAVE_LIBELF
12173
12174 static int determine_tracepoint_id(const char *tp_category,
12175 const char *tp_name)
12176 {
12177 char file[PATH_MAX];
12178 int ret;
12179
12180 ret = snprintf(file, sizeof(file), "%s/events/%s/%s/id",
12181 tracefs_path(), tp_category, tp_name);
12182 if (ret < 0)
12183 return -errno;
12184 if (ret >= sizeof(file)) {
12185 pr_debug("tracepoint %s/%s path is too long\n",
12186 tp_category, tp_name);
12187 return -E2BIG;
12188 }
12189 return parse_uint_from_file(file, "%d\n");
12190 }
12191
12192 static int perf_event_open_tracepoint(const char *tp_category,
12193 const char *tp_name)
12194 {
12195 const size_t attr_sz = sizeof(struct perf_event_attr);
12196 struct perf_event_attr attr;
12197 char errmsg[STRERR_BUFSIZE];
12198 int tp_id, pfd, err;
12199
12200 tp_id = determine_tracepoint_id(tp_category, tp_name);
12201 if (tp_id < 0) {
12202 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
12203 tp_category, tp_name,
12204 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
12205 return tp_id;
12206 }
12207
12208 memset(&attr, 0, attr_sz);
12209 attr.type = PERF_TYPE_TRACEPOINT;
12210 attr.size = attr_sz;
12211 attr.config = tp_id;
12212
12213 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
12214 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
12215 if (pfd < 0) {
12216 err = -errno;
12217 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
12218 tp_category, tp_name,
12219 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
12220 return err;
12221 }
12222 return pfd;
12223 }
12224
12225 struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
12226 const char *tp_category,
12227 const char *tp_name,
12228 const struct bpf_tracepoint_opts *opts)
12229 {
12230 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
12231 char errmsg[STRERR_BUFSIZE];
12232 struct bpf_link *link;
12233 int pfd, err;
12234
12235 if (!OPTS_VALID(opts, bpf_tracepoint_opts))
12236 return libbpf_err_ptr(-EINVAL);
12237
12238 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
12239
12240 pfd = perf_event_open_tracepoint(tp_category, tp_name);
12241 if (pfd < 0) {
12242 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
12243 prog->name, tp_category, tp_name,
12244 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
12245 return libbpf_err_ptr(pfd);
12246 }
12247 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
12248 err = libbpf_get_error(link);
12249 if (err) {
12250 close(pfd);
12251 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
12252 prog->name, tp_category, tp_name,
12253 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
12254 return libbpf_err_ptr(err);
12255 }
12256 return link;
12257 }
12258
12259 struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog,
12260 const char *tp_category,
12261 const char *tp_name)
12262 {
12263 return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL);
12264 }
12265
12266 static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12267 {
12268 char *sec_name, *tp_cat, *tp_name;
12269
12270 *link = NULL;
12271
12272 /* no auto-attach for SEC("tp") or SEC("tracepoint") */
12273 if (strcmp(prog->sec_name, "tp") == 0 || strcmp(prog->sec_name, "tracepoint") == 0)
12274 return 0;
12275
12276 sec_name = strdup(prog->sec_name);
12277 if (!sec_name)
12278 return -ENOMEM;
12279
12280 /* extract "tp/<category>/<name>" or "tracepoint/<category>/<name>" */
12281 if (str_has_pfx(prog->sec_name, "tp/"))
12282 tp_cat = sec_name + sizeof("tp/") - 1;
12283 else
12284 tp_cat = sec_name + sizeof("tracepoint/") - 1;
12285 tp_name = strchr(tp_cat, '/');
12286 if (!tp_name) {
12287 free(sec_name);
12288 return -EINVAL;
12289 }
12290 *tp_name = '\0';
12291 tp_name++;
12292
12293 *link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
12294 free(sec_name);
12295 return libbpf_get_error(*link);
12296 }
12297
12298 struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
12299 const char *tp_name)
12300 {
12301 char errmsg[STRERR_BUFSIZE];
12302 struct bpf_link *link;
12303 int prog_fd, pfd;
12304
12305 prog_fd = bpf_program__fd(prog);
12306 if (prog_fd < 0) {
12307 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12308 return libbpf_err_ptr(-EINVAL);
12309 }
12310
12311 link = calloc(1, sizeof(*link));
12312 if (!link)
12313 return libbpf_err_ptr(-ENOMEM);
12314 link->detach = &bpf_link__detach_fd;
12315
12316 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
12317 if (pfd < 0) {
12318 pfd = -errno;
12319 free(link);
12320 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
12321 prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
12322 return libbpf_err_ptr(pfd);
12323 }
12324 link->fd = pfd;
12325 return link;
12326 }
12327
12328 static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12329 {
12330 static const char *const prefixes[] = {
12331 "raw_tp",
12332 "raw_tracepoint",
12333 "raw_tp.w",
12334 "raw_tracepoint.w",
12335 };
12336 size_t i;
12337 const char *tp_name = NULL;
12338
12339 *link = NULL;
12340
12341 for (i = 0; i < ARRAY_SIZE(prefixes); i++) {
12342 size_t pfx_len;
12343
12344 if (!str_has_pfx(prog->sec_name, prefixes[i]))
12345 continue;
12346
12347 pfx_len = strlen(prefixes[i]);
12348 /* no auto-attach case of, e.g., SEC("raw_tp") */
12349 if (prog->sec_name[pfx_len] == '\0')
12350 return 0;
12351
12352 if (prog->sec_name[pfx_len] != '/')
12353 continue;
12354
12355 tp_name = prog->sec_name + pfx_len + 1;
12356 break;
12357 }
12358
12359 if (!tp_name) {
12360 pr_warn("prog '%s': invalid section name '%s'\n",
12361 prog->name, prog->sec_name);
12362 return -EINVAL;
12363 }
12364
12365 *link = bpf_program__attach_raw_tracepoint(prog, tp_name);
12366 return libbpf_get_error(*link);
12367 }
12368
12369 /* Common logic for all BPF program types that attach to a btf_id */
12370 static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog,
12371 const struct bpf_trace_opts *opts)
12372 {
12373 LIBBPF_OPTS(bpf_link_create_opts, link_opts);
12374 char errmsg[STRERR_BUFSIZE];
12375 struct bpf_link *link;
12376 int prog_fd, pfd;
12377
12378 if (!OPTS_VALID(opts, bpf_trace_opts))
12379 return libbpf_err_ptr(-EINVAL);
12380
12381 prog_fd = bpf_program__fd(prog);
12382 if (prog_fd < 0) {
12383 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12384 return libbpf_err_ptr(-EINVAL);
12385 }
12386
12387 link = calloc(1, sizeof(*link));
12388 if (!link)
12389 return libbpf_err_ptr(-ENOMEM);
12390 link->detach = &bpf_link__detach_fd;
12391
12392 /* libbpf is smart enough to redirect to BPF_RAW_TRACEPOINT_OPEN on old kernels */
12393 link_opts.tracing.cookie = OPTS_GET(opts, cookie, 0);
12394 pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), &link_opts);
12395 if (pfd < 0) {
12396 pfd = -errno;
12397 free(link);
12398 pr_warn("prog '%s': failed to attach: %s\n",
12399 prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
12400 return libbpf_err_ptr(pfd);
12401 }
12402 link->fd = pfd;
12403 return link;
12404 }
12405
12406 struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog)
12407 {
12408 return bpf_program__attach_btf_id(prog, NULL);
12409 }
12410
12411 struct bpf_link *bpf_program__attach_trace_opts(const struct bpf_program *prog,
12412 const struct bpf_trace_opts *opts)
12413 {
12414 return bpf_program__attach_btf_id(prog, opts);
12415 }
12416
12417 struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog)
12418 {
12419 return bpf_program__attach_btf_id(prog, NULL);
12420 }
12421
12422 static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12423 {
12424 *link = bpf_program__attach_trace(prog);
12425 return libbpf_get_error(*link);
12426 }
12427
12428 static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12429 {
12430 *link = bpf_program__attach_lsm(prog);
12431 return libbpf_get_error(*link);
12432 }
12433
12434 static struct bpf_link *
12435 bpf_program_attach_fd(const struct bpf_program *prog,
12436 int target_fd, const char *target_name,
12437 const struct bpf_link_create_opts *opts)
12438 {
12439 enum bpf_attach_type attach_type;
12440 char errmsg[STRERR_BUFSIZE];
12441 struct bpf_link *link;
12442 int prog_fd, link_fd;
12443
12444 prog_fd = bpf_program__fd(prog);
12445 if (prog_fd < 0) {
12446 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12447 return libbpf_err_ptr(-EINVAL);
12448 }
12449
12450 link = calloc(1, sizeof(*link));
12451 if (!link)
12452 return libbpf_err_ptr(-ENOMEM);
12453 link->detach = &bpf_link__detach_fd;
12454
12455 attach_type = bpf_program__expected_attach_type(prog);
12456 link_fd = bpf_link_create(prog_fd, target_fd, attach_type, opts);
12457 if (link_fd < 0) {
12458 link_fd = -errno;
12459 free(link);
12460 pr_warn("prog '%s': failed to attach to %s: %s\n",
12461 prog->name, target_name,
12462 libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
12463 return libbpf_err_ptr(link_fd);
12464 }
12465 link->fd = link_fd;
12466 return link;
12467 }
12468
12469 struct bpf_link *
12470 bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd)
12471 {
12472 return bpf_program_attach_fd(prog, cgroup_fd, "cgroup", NULL);
12473 }
12474
12475 struct bpf_link *
12476 bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd)
12477 {
12478 return bpf_program_attach_fd(prog, netns_fd, "netns", NULL);
12479 }
12480
12481 struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex)
12482 {
12483 /* target_fd/target_ifindex use the same field in LINK_CREATE */
12484 return bpf_program_attach_fd(prog, ifindex, "xdp", NULL);
12485 }
12486
12487 struct bpf_link *
12488 bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex,
12489 const struct bpf_tcx_opts *opts)
12490 {
12491 LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
12492 __u32 relative_id;
12493 int relative_fd;
12494
12495 if (!OPTS_VALID(opts, bpf_tcx_opts))
12496 return libbpf_err_ptr(-EINVAL);
12497
12498 relative_id = OPTS_GET(opts, relative_id, 0);
12499 relative_fd = OPTS_GET(opts, relative_fd, 0);
12500
12501 /* validate we don't have unexpected combinations of non-zero fields */
12502 if (!ifindex) {
12503 pr_warn("prog '%s': target netdevice ifindex cannot be zero\n",
12504 prog->name);
12505 return libbpf_err_ptr(-EINVAL);
12506 }
12507 if (relative_fd && relative_id) {
12508 pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n",
12509 prog->name);
12510 return libbpf_err_ptr(-EINVAL);
12511 }
12512
12513 link_create_opts.tcx.expected_revision = OPTS_GET(opts, expected_revision, 0);
12514 link_create_opts.tcx.relative_fd = relative_fd;
12515 link_create_opts.tcx.relative_id = relative_id;
12516 link_create_opts.flags = OPTS_GET(opts, flags, 0);
12517
12518 /* target_fd/target_ifindex use the same field in LINK_CREATE */
12519 return bpf_program_attach_fd(prog, ifindex, "tcx", &link_create_opts);
12520 }
12521
12522 struct bpf_link *
12523 bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex,
12524 const struct bpf_netkit_opts *opts)
12525 {
12526 LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
12527 __u32 relative_id;
12528 int relative_fd;
12529
12530 if (!OPTS_VALID(opts, bpf_netkit_opts))
12531 return libbpf_err_ptr(-EINVAL);
12532
12533 relative_id = OPTS_GET(opts, relative_id, 0);
12534 relative_fd = OPTS_GET(opts, relative_fd, 0);
12535
12536 /* validate we don't have unexpected combinations of non-zero fields */
12537 if (!ifindex) {
12538 pr_warn("prog '%s': target netdevice ifindex cannot be zero\n",
12539 prog->name);
12540 return libbpf_err_ptr(-EINVAL);
12541 }
12542 if (relative_fd && relative_id) {
12543 pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n",
12544 prog->name);
12545 return libbpf_err_ptr(-EINVAL);
12546 }
12547
12548 link_create_opts.netkit.expected_revision = OPTS_GET(opts, expected_revision, 0);
12549 link_create_opts.netkit.relative_fd = relative_fd;
12550 link_create_opts.netkit.relative_id = relative_id;
12551 link_create_opts.flags = OPTS_GET(opts, flags, 0);
12552
12553 return bpf_program_attach_fd(prog, ifindex, "netkit", &link_create_opts);
12554 }
12555
12556 struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog,
12557 int target_fd,
12558 const char *attach_func_name)
12559 {
12560 int btf_id;
12561
12562 if (!!target_fd != !!attach_func_name) {
12563 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
12564 prog->name);
12565 return libbpf_err_ptr(-EINVAL);
12566 }
12567
12568 if (prog->type != BPF_PROG_TYPE_EXT) {
12569 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
12570 prog->name);
12571 return libbpf_err_ptr(-EINVAL);
12572 }
12573
12574 if (target_fd) {
12575 LIBBPF_OPTS(bpf_link_create_opts, target_opts);
12576
12577 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
12578 if (btf_id < 0)
12579 return libbpf_err_ptr(btf_id);
12580
12581 target_opts.target_btf_id = btf_id;
12582
12583 return bpf_program_attach_fd(prog, target_fd, "freplace",
12584 &target_opts);
12585 } else {
12586 /* no target, so use raw_tracepoint_open for compatibility
12587 * with old kernels
12588 */
12589 return bpf_program__attach_trace(prog);
12590 }
12591 }
12592
12593 struct bpf_link *
12594 bpf_program__attach_iter(const struct bpf_program *prog,
12595 const struct bpf_iter_attach_opts *opts)
12596 {
12597 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
12598 char errmsg[STRERR_BUFSIZE];
12599 struct bpf_link *link;
12600 int prog_fd, link_fd;
12601 __u32 target_fd = 0;
12602
12603 if (!OPTS_VALID(opts, bpf_iter_attach_opts))
12604 return libbpf_err_ptr(-EINVAL);
12605
12606 link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
12607 link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
12608
12609 prog_fd = bpf_program__fd(prog);
12610 if (prog_fd < 0) {
12611 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12612 return libbpf_err_ptr(-EINVAL);
12613 }
12614
12615 link = calloc(1, sizeof(*link));
12616 if (!link)
12617 return libbpf_err_ptr(-ENOMEM);
12618 link->detach = &bpf_link__detach_fd;
12619
12620 link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
12621 &link_create_opts);
12622 if (link_fd < 0) {
12623 link_fd = -errno;
12624 free(link);
12625 pr_warn("prog '%s': failed to attach to iterator: %s\n",
12626 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
12627 return libbpf_err_ptr(link_fd);
12628 }
12629 link->fd = link_fd;
12630 return link;
12631 }
12632
12633 static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12634 {
12635 *link = bpf_program__attach_iter(prog, NULL);
12636 return libbpf_get_error(*link);
12637 }
12638
12639 struct bpf_link *bpf_program__attach_netfilter(const struct bpf_program *prog,
12640 const struct bpf_netfilter_opts *opts)
12641 {
12642 LIBBPF_OPTS(bpf_link_create_opts, lopts);
12643 struct bpf_link *link;
12644 int prog_fd, link_fd;
12645
12646 if (!OPTS_VALID(opts, bpf_netfilter_opts))
12647 return libbpf_err_ptr(-EINVAL);
12648
12649 prog_fd = bpf_program__fd(prog);
12650 if (prog_fd < 0) {
12651 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12652 return libbpf_err_ptr(-EINVAL);
12653 }
12654
12655 link = calloc(1, sizeof(*link));
12656 if (!link)
12657 return libbpf_err_ptr(-ENOMEM);
12658
12659 link->detach = &bpf_link__detach_fd;
12660
12661 lopts.netfilter.pf = OPTS_GET(opts, pf, 0);
12662 lopts.netfilter.hooknum = OPTS_GET(opts, hooknum, 0);
12663 lopts.netfilter.priority = OPTS_GET(opts, priority, 0);
12664 lopts.netfilter.flags = OPTS_GET(opts, flags, 0);
12665
12666 link_fd = bpf_link_create(prog_fd, 0, BPF_NETFILTER, &lopts);
12667 if (link_fd < 0) {
12668 char errmsg[STRERR_BUFSIZE];
12669
12670 link_fd = -errno;
12671 free(link);
12672 pr_warn("prog '%s': failed to attach to netfilter: %s\n",
12673 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
12674 return libbpf_err_ptr(link_fd);
12675 }
12676 link->fd = link_fd;
12677
12678 return link;
12679 }
12680
12681 struct bpf_link *bpf_program__attach(const struct bpf_program *prog)
12682 {
12683 struct bpf_link *link = NULL;
12684 int err;
12685
12686 if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
12687 return libbpf_err_ptr(-EOPNOTSUPP);
12688
12689 err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link);
12690 if (err)
12691 return libbpf_err_ptr(err);
12692
12693 /* When calling bpf_program__attach() explicitly, auto-attach support
12694 * is expected to work, so NULL returned link is considered an error.
12695 * This is different for skeleton's attach, see comment in
12696 * bpf_object__attach_skeleton().
12697 */
12698 if (!link)
12699 return libbpf_err_ptr(-EOPNOTSUPP);
12700
12701 return link;
12702 }
12703
12704 struct bpf_link_struct_ops {
12705 struct bpf_link link;
12706 int map_fd;
12707 };
12708
12709 static int bpf_link__detach_struct_ops(struct bpf_link *link)
12710 {
12711 struct bpf_link_struct_ops *st_link;
12712 __u32 zero = 0;
12713
12714 st_link = container_of(link, struct bpf_link_struct_ops, link);
12715
12716 if (st_link->map_fd < 0)
12717 /* w/o a real link */
12718 return bpf_map_delete_elem(link->fd, &zero);
12719
12720 return close(link->fd);
12721 }
12722
12723 struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
12724 {
12725 struct bpf_link_struct_ops *link;
12726 __u32 zero = 0;
12727 int err, fd;
12728
12729 if (!bpf_map__is_struct_ops(map) || map->fd == -1)
12730 return libbpf_err_ptr(-EINVAL);
12731
12732 link = calloc(1, sizeof(*link));
12733 if (!link)
12734 return libbpf_err_ptr(-EINVAL);
12735
12736 /* kern_vdata should be prepared during the loading phase. */
12737 err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
12738 /* It can be EBUSY if the map has been used to create or
12739 * update a link before. We don't allow updating the value of
12740 * a struct_ops once it is set. That ensures that the value
12741 * never changed. So, it is safe to skip EBUSY.
12742 */
12743 if (err && (!(map->def.map_flags & BPF_F_LINK) || err != -EBUSY)) {
12744 free(link);
12745 return libbpf_err_ptr(err);
12746 }
12747
12748 link->link.detach = bpf_link__detach_struct_ops;
12749
12750 if (!(map->def.map_flags & BPF_F_LINK)) {
12751 /* w/o a real link */
12752 link->link.fd = map->fd;
12753 link->map_fd = -1;
12754 return &link->link;
12755 }
12756
12757 fd = bpf_link_create(map->fd, 0, BPF_STRUCT_OPS, NULL);
12758 if (fd < 0) {
12759 free(link);
12760 return libbpf_err_ptr(fd);
12761 }
12762
12763 link->link.fd = fd;
12764 link->map_fd = map->fd;
12765
12766 return &link->link;
12767 }
12768
12769 /*
12770 * Swap the back struct_ops of a link with a new struct_ops map.
12771 */
12772 int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map)
12773 {
12774 struct bpf_link_struct_ops *st_ops_link;
12775 __u32 zero = 0;
12776 int err;
12777
12778 if (!bpf_map__is_struct_ops(map) || map->fd < 0)
12779 return -EINVAL;
12780
12781 st_ops_link = container_of(link, struct bpf_link_struct_ops, link);
12782 /* Ensure the type of a link is correct */
12783 if (st_ops_link->map_fd < 0)
12784 return -EINVAL;
12785
12786 err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
12787 /* It can be EBUSY if the map has been used to create or
12788 * update a link before. We don't allow updating the value of
12789 * a struct_ops once it is set. That ensures that the value
12790 * never changed. So, it is safe to skip EBUSY.
12791 */
12792 if (err && err != -EBUSY)
12793 return err;
12794
12795 err = bpf_link_update(link->fd, map->fd, NULL);
12796 if (err < 0)
12797 return err;
12798
12799 st_ops_link->map_fd = map->fd;
12800
12801 return 0;
12802 }
12803
12804 typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
12805 void *private_data);
12806
12807 static enum bpf_perf_event_ret
12808 perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
12809 void **copy_mem, size_t *copy_size,
12810 bpf_perf_event_print_t fn, void *private_data)
12811 {
12812 struct perf_event_mmap_page *header = mmap_mem;
12813 __u64 data_head = ring_buffer_read_head(header);
12814 __u64 data_tail = header->data_tail;
12815 void *base = ((__u8 *)header) + page_size;
12816 int ret = LIBBPF_PERF_EVENT_CONT;
12817 struct perf_event_header *ehdr;
12818 size_t ehdr_size;
12819
12820 while (data_head != data_tail) {
12821 ehdr = base + (data_tail & (mmap_size - 1));
12822 ehdr_size = ehdr->size;
12823
12824 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
12825 void *copy_start = ehdr;
12826 size_t len_first = base + mmap_size - copy_start;
12827 size_t len_secnd = ehdr_size - len_first;
12828
12829 if (*copy_size < ehdr_size) {
12830 free(*copy_mem);
12831 *copy_mem = malloc(ehdr_size);
12832 if (!*copy_mem) {
12833 *copy_size = 0;
12834 ret = LIBBPF_PERF_EVENT_ERROR;
12835 break;
12836 }
12837 *copy_size = ehdr_size;
12838 }
12839
12840 memcpy(*copy_mem, copy_start, len_first);
12841 memcpy(*copy_mem + len_first, base, len_secnd);
12842 ehdr = *copy_mem;
12843 }
12844
12845 ret = fn(ehdr, private_data);
12846 data_tail += ehdr_size;
12847 if (ret != LIBBPF_PERF_EVENT_CONT)
12848 break;
12849 }
12850
12851 ring_buffer_write_tail(header, data_tail);
12852 return libbpf_err(ret);
12853 }
12854
12855 struct perf_buffer;
12856
12857 struct perf_buffer_params {
12858 struct perf_event_attr *attr;
12859 /* if event_cb is specified, it takes precendence */
12860 perf_buffer_event_fn event_cb;
12861 /* sample_cb and lost_cb are higher-level common-case callbacks */
12862 perf_buffer_sample_fn sample_cb;
12863 perf_buffer_lost_fn lost_cb;
12864 void *ctx;
12865 int cpu_cnt;
12866 int *cpus;
12867 int *map_keys;
12868 };
12869
12870 struct perf_cpu_buf {
12871 struct perf_buffer *pb;
12872 void *base; /* mmap()'ed memory */
12873 void *buf; /* for reconstructing segmented data */
12874 size_t buf_size;
12875 int fd;
12876 int cpu;
12877 int map_key;
12878 };
12879
12880 struct perf_buffer {
12881 perf_buffer_event_fn event_cb;
12882 perf_buffer_sample_fn sample_cb;
12883 perf_buffer_lost_fn lost_cb;
12884 void *ctx; /* passed into callbacks */
12885
12886 size_t page_size;
12887 size_t mmap_size;
12888 struct perf_cpu_buf **cpu_bufs;
12889 struct epoll_event *events;
12890 int cpu_cnt; /* number of allocated CPU buffers */
12891 int epoll_fd; /* perf event FD */
12892 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
12893 };
12894
12895 static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
12896 struct perf_cpu_buf *cpu_buf)
12897 {
12898 if (!cpu_buf)
12899 return;
12900 if (cpu_buf->base &&
12901 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
12902 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
12903 if (cpu_buf->fd >= 0) {
12904 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
12905 close(cpu_buf->fd);
12906 }
12907 free(cpu_buf->buf);
12908 free(cpu_buf);
12909 }
12910
12911 void perf_buffer__free(struct perf_buffer *pb)
12912 {
12913 int i;
12914
12915 if (IS_ERR_OR_NULL(pb))
12916 return;
12917 if (pb->cpu_bufs) {
12918 for (i = 0; i < pb->cpu_cnt; i++) {
12919 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
12920
12921 if (!cpu_buf)
12922 continue;
12923
12924 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
12925 perf_buffer__free_cpu_buf(pb, cpu_buf);
12926 }
12927 free(pb->cpu_bufs);
12928 }
12929 if (pb->epoll_fd >= 0)
12930 close(pb->epoll_fd);
12931 free(pb->events);
12932 free(pb);
12933 }
12934
12935 static struct perf_cpu_buf *
12936 perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
12937 int cpu, int map_key)
12938 {
12939 struct perf_cpu_buf *cpu_buf;
12940 char msg[STRERR_BUFSIZE];
12941 int err;
12942
12943 cpu_buf = calloc(1, sizeof(*cpu_buf));
12944 if (!cpu_buf)
12945 return ERR_PTR(-ENOMEM);
12946
12947 cpu_buf->pb = pb;
12948 cpu_buf->cpu = cpu;
12949 cpu_buf->map_key = map_key;
12950
12951 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
12952 -1, PERF_FLAG_FD_CLOEXEC);
12953 if (cpu_buf->fd < 0) {
12954 err = -errno;
12955 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
12956 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
12957 goto error;
12958 }
12959
12960 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
12961 PROT_READ | PROT_WRITE, MAP_SHARED,
12962 cpu_buf->fd, 0);
12963 if (cpu_buf->base == MAP_FAILED) {
12964 cpu_buf->base = NULL;
12965 err = -errno;
12966 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
12967 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
12968 goto error;
12969 }
12970
12971 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
12972 err = -errno;
12973 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
12974 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
12975 goto error;
12976 }
12977
12978 return cpu_buf;
12979
12980 error:
12981 perf_buffer__free_cpu_buf(pb, cpu_buf);
12982 return (struct perf_cpu_buf *)ERR_PTR(err);
12983 }
12984
12985 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
12986 struct perf_buffer_params *p);
12987
12988 struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
12989 perf_buffer_sample_fn sample_cb,
12990 perf_buffer_lost_fn lost_cb,
12991 void *ctx,
12992 const struct perf_buffer_opts *opts)
12993 {
12994 const size_t attr_sz = sizeof(struct perf_event_attr);
12995 struct perf_buffer_params p = {};
12996 struct perf_event_attr attr;
12997 __u32 sample_period;
12998
12999 if (!OPTS_VALID(opts, perf_buffer_opts))
13000 return libbpf_err_ptr(-EINVAL);
13001
13002 sample_period = OPTS_GET(opts, sample_period, 1);
13003 if (!sample_period)
13004 sample_period = 1;
13005
13006 memset(&attr, 0, attr_sz);
13007 attr.size = attr_sz;
13008 attr.config = PERF_COUNT_SW_BPF_OUTPUT;
13009 attr.type = PERF_TYPE_SOFTWARE;
13010 attr.sample_type = PERF_SAMPLE_RAW;
13011 attr.sample_period = sample_period;
13012 attr.wakeup_events = sample_period;
13013
13014 p.attr = &attr;
13015 p.sample_cb = sample_cb;
13016 p.lost_cb = lost_cb;
13017 p.ctx = ctx;
13018
13019 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
13020 }
13021
13022 struct perf_buffer *perf_buffer__new_raw(int map_fd, size_t page_cnt,
13023 struct perf_event_attr *attr,
13024 perf_buffer_event_fn event_cb, void *ctx,
13025 const struct perf_buffer_raw_opts *opts)
13026 {
13027 struct perf_buffer_params p = {};
13028
13029 if (!attr)
13030 return libbpf_err_ptr(-EINVAL);
13031
13032 if (!OPTS_VALID(opts, perf_buffer_raw_opts))
13033 return libbpf_err_ptr(-EINVAL);
13034
13035 p.attr = attr;
13036 p.event_cb = event_cb;
13037 p.ctx = ctx;
13038 p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0);
13039 p.cpus = OPTS_GET(opts, cpus, NULL);
13040 p.map_keys = OPTS_GET(opts, map_keys, NULL);
13041
13042 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
13043 }
13044
13045 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
13046 struct perf_buffer_params *p)
13047 {
13048 const char *online_cpus_file = "/sys/devices/system/cpu/online";
13049 struct bpf_map_info map;
13050 char msg[STRERR_BUFSIZE];
13051 struct perf_buffer *pb;
13052 bool *online = NULL;
13053 __u32 map_info_len;
13054 int err, i, j, n;
13055
13056 if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) {
13057 pr_warn("page count should be power of two, but is %zu\n",
13058 page_cnt);
13059 return ERR_PTR(-EINVAL);
13060 }
13061
13062 /* best-effort sanity checks */
13063 memset(&map, 0, sizeof(map));
13064 map_info_len = sizeof(map);
13065 err = bpf_map_get_info_by_fd(map_fd, &map, &map_info_len);
13066 if (err) {
13067 err = -errno;
13068 /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
13069 * -EBADFD, -EFAULT, or -E2BIG on real error
13070 */
13071 if (err != -EINVAL) {
13072 pr_warn("failed to get map info for map FD %d: %s\n",
13073 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
13074 return ERR_PTR(err);
13075 }
13076 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
13077 map_fd);
13078 } else {
13079 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
13080 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
13081 map.name);
13082 return ERR_PTR(-EINVAL);
13083 }
13084 }
13085
13086 pb = calloc(1, sizeof(*pb));
13087 if (!pb)
13088 return ERR_PTR(-ENOMEM);
13089
13090 pb->event_cb = p->event_cb;
13091 pb->sample_cb = p->sample_cb;
13092 pb->lost_cb = p->lost_cb;
13093 pb->ctx = p->ctx;
13094
13095 pb->page_size = getpagesize();
13096 pb->mmap_size = pb->page_size * page_cnt;
13097 pb->map_fd = map_fd;
13098
13099 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
13100 if (pb->epoll_fd < 0) {
13101 err = -errno;
13102 pr_warn("failed to create epoll instance: %s\n",
13103 libbpf_strerror_r(err, msg, sizeof(msg)));
13104 goto error;
13105 }
13106
13107 if (p->cpu_cnt > 0) {
13108 pb->cpu_cnt = p->cpu_cnt;
13109 } else {
13110 pb->cpu_cnt = libbpf_num_possible_cpus();
13111 if (pb->cpu_cnt < 0) {
13112 err = pb->cpu_cnt;
13113 goto error;
13114 }
13115 if (map.max_entries && map.max_entries < pb->cpu_cnt)
13116 pb->cpu_cnt = map.max_entries;
13117 }
13118
13119 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
13120 if (!pb->events) {
13121 err = -ENOMEM;
13122 pr_warn("failed to allocate events: out of memory\n");
13123 goto error;
13124 }
13125 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
13126 if (!pb->cpu_bufs) {
13127 err = -ENOMEM;
13128 pr_warn("failed to allocate buffers: out of memory\n");
13129 goto error;
13130 }
13131
13132 err = parse_cpu_mask_file(online_cpus_file, &online, &n);
13133 if (err) {
13134 pr_warn("failed to get online CPU mask: %d\n", err);
13135 goto error;
13136 }
13137
13138 for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
13139 struct perf_cpu_buf *cpu_buf;
13140 int cpu, map_key;
13141
13142 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
13143 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
13144
13145 /* in case user didn't explicitly requested particular CPUs to
13146 * be attached to, skip offline/not present CPUs
13147 */
13148 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
13149 continue;
13150
13151 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
13152 if (IS_ERR(cpu_buf)) {
13153 err = PTR_ERR(cpu_buf);
13154 goto error;
13155 }
13156
13157 pb->cpu_bufs[j] = cpu_buf;
13158
13159 err = bpf_map_update_elem(pb->map_fd, &map_key,
13160 &cpu_buf->fd, 0);
13161 if (err) {
13162 err = -errno;
13163 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
13164 cpu, map_key, cpu_buf->fd,
13165 libbpf_strerror_r(err, msg, sizeof(msg)));
13166 goto error;
13167 }
13168
13169 pb->events[j].events = EPOLLIN;
13170 pb->events[j].data.ptr = cpu_buf;
13171 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
13172 &pb->events[j]) < 0) {
13173 err = -errno;
13174 pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
13175 cpu, cpu_buf->fd,
13176 libbpf_strerror_r(err, msg, sizeof(msg)));
13177 goto error;
13178 }
13179 j++;
13180 }
13181 pb->cpu_cnt = j;
13182 free(online);
13183
13184 return pb;
13185
13186 error:
13187 free(online);
13188 if (pb)
13189 perf_buffer__free(pb);
13190 return ERR_PTR(err);
13191 }
13192
13193 struct perf_sample_raw {
13194 struct perf_event_header header;
13195 uint32_t size;
13196 char data[];
13197 };
13198
13199 struct perf_sample_lost {
13200 struct perf_event_header header;
13201 uint64_t id;
13202 uint64_t lost;
13203 uint64_t sample_id;
13204 };
13205
13206 static enum bpf_perf_event_ret
13207 perf_buffer__process_record(struct perf_event_header *e, void *ctx)
13208 {
13209 struct perf_cpu_buf *cpu_buf = ctx;
13210 struct perf_buffer *pb = cpu_buf->pb;
13211 void *data = e;
13212
13213 /* user wants full control over parsing perf event */
13214 if (pb->event_cb)
13215 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
13216
13217 switch (e->type) {
13218 case PERF_RECORD_SAMPLE: {
13219 struct perf_sample_raw *s = data;
13220
13221 if (pb->sample_cb)
13222 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
13223 break;
13224 }
13225 case PERF_RECORD_LOST: {
13226 struct perf_sample_lost *s = data;
13227
13228 if (pb->lost_cb)
13229 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
13230 break;
13231 }
13232 default:
13233 pr_warn("unknown perf sample type %d\n", e->type);
13234 return LIBBPF_PERF_EVENT_ERROR;
13235 }
13236 return LIBBPF_PERF_EVENT_CONT;
13237 }
13238
13239 static int perf_buffer__process_records(struct perf_buffer *pb,
13240 struct perf_cpu_buf *cpu_buf)
13241 {
13242 enum bpf_perf_event_ret ret;
13243
13244 ret = perf_event_read_simple(cpu_buf->base, pb->mmap_size,
13245 pb->page_size, &cpu_buf->buf,
13246 &cpu_buf->buf_size,
13247 perf_buffer__process_record, cpu_buf);
13248 if (ret != LIBBPF_PERF_EVENT_CONT)
13249 return ret;
13250 return 0;
13251 }
13252
13253 int perf_buffer__epoll_fd(const struct perf_buffer *pb)
13254 {
13255 return pb->epoll_fd;
13256 }
13257
13258 int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
13259 {
13260 int i, cnt, err;
13261
13262 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
13263 if (cnt < 0)
13264 return -errno;
13265
13266 for (i = 0; i < cnt; i++) {
13267 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
13268
13269 err = perf_buffer__process_records(pb, cpu_buf);
13270 if (err) {
13271 pr_warn("error while processing records: %d\n", err);
13272 return libbpf_err(err);
13273 }
13274 }
13275 return cnt;
13276 }
13277
13278 /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
13279 * manager.
13280 */
13281 size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
13282 {
13283 return pb->cpu_cnt;
13284 }
13285
13286 /*
13287 * Return perf_event FD of a ring buffer in *buf_idx* slot of
13288 * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
13289 * select()/poll()/epoll() Linux syscalls.
13290 */
13291 int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
13292 {
13293 struct perf_cpu_buf *cpu_buf;
13294
13295 if (buf_idx >= pb->cpu_cnt)
13296 return libbpf_err(-EINVAL);
13297
13298 cpu_buf = pb->cpu_bufs[buf_idx];
13299 if (!cpu_buf)
13300 return libbpf_err(-ENOENT);
13301
13302 return cpu_buf->fd;
13303 }
13304
13305 int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf, size_t *buf_size)
13306 {
13307 struct perf_cpu_buf *cpu_buf;
13308
13309 if (buf_idx >= pb->cpu_cnt)
13310 return libbpf_err(-EINVAL);
13311
13312 cpu_buf = pb->cpu_bufs[buf_idx];
13313 if (!cpu_buf)
13314 return libbpf_err(-ENOENT);
13315
13316 *buf = cpu_buf->base;
13317 *buf_size = pb->mmap_size;
13318 return 0;
13319 }
13320
13321 /*
13322 * Consume data from perf ring buffer corresponding to slot *buf_idx* in
13323 * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
13324 * consume, do nothing and return success.
13325 * Returns:
13326 * - 0 on success;
13327 * - <0 on failure.
13328 */
13329 int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
13330 {
13331 struct perf_cpu_buf *cpu_buf;
13332
13333 if (buf_idx >= pb->cpu_cnt)
13334 return libbpf_err(-EINVAL);
13335
13336 cpu_buf = pb->cpu_bufs[buf_idx];
13337 if (!cpu_buf)
13338 return libbpf_err(-ENOENT);
13339
13340 return perf_buffer__process_records(pb, cpu_buf);
13341 }
13342
13343 int perf_buffer__consume(struct perf_buffer *pb)
13344 {
13345 int i, err;
13346
13347 for (i = 0; i < pb->cpu_cnt; i++) {
13348 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
13349
13350 if (!cpu_buf)
13351 continue;
13352
13353 err = perf_buffer__process_records(pb, cpu_buf);
13354 if (err) {
13355 pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
13356 return libbpf_err(err);
13357 }
13358 }
13359 return 0;
13360 }
13361
13362 int bpf_program__set_attach_target(struct bpf_program *prog,
13363 int attach_prog_fd,
13364 const char *attach_func_name)
13365 {
13366 int btf_obj_fd = 0, btf_id = 0, err;
13367
13368 if (!prog || attach_prog_fd < 0)
13369 return libbpf_err(-EINVAL);
13370
13371 if (prog->obj->loaded)
13372 return libbpf_err(-EINVAL);
13373
13374 if (attach_prog_fd && !attach_func_name) {
13375 /* remember attach_prog_fd and let bpf_program__load() find
13376 * BTF ID during the program load
13377 */
13378 prog->attach_prog_fd = attach_prog_fd;
13379 return 0;
13380 }
13381
13382 if (attach_prog_fd) {
13383 btf_id = libbpf_find_prog_btf_id(attach_func_name,
13384 attach_prog_fd);
13385 if (btf_id < 0)
13386 return libbpf_err(btf_id);
13387 } else {
13388 if (!attach_func_name)
13389 return libbpf_err(-EINVAL);
13390
13391 /* load btf_vmlinux, if not yet */
13392 err = bpf_object__load_vmlinux_btf(prog->obj, true);
13393 if (err)
13394 return libbpf_err(err);
13395 err = find_kernel_btf_id(prog->obj, attach_func_name,
13396 prog->expected_attach_type,
13397 &btf_obj_fd, &btf_id);
13398 if (err)
13399 return libbpf_err(err);
13400 }
13401
13402 prog->attach_btf_id = btf_id;
13403 prog->attach_btf_obj_fd = btf_obj_fd;
13404 prog->attach_prog_fd = attach_prog_fd;
13405 return 0;
13406 }
13407
13408 int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
13409 {
13410 int err = 0, n, len, start, end = -1;
13411 bool *tmp;
13412
13413 *mask = NULL;
13414 *mask_sz = 0;
13415
13416 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
13417 while (*s) {
13418 if (*s == ',' || *s == '\n') {
13419 s++;
13420 continue;
13421 }
13422 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
13423 if (n <= 0 || n > 2) {
13424 pr_warn("Failed to get CPU range %s: %d\n", s, n);
13425 err = -EINVAL;
13426 goto cleanup;
13427 } else if (n == 1) {
13428 end = start;
13429 }
13430 if (start < 0 || start > end) {
13431 pr_warn("Invalid CPU range [%d,%d] in %s\n",
13432 start, end, s);
13433 err = -EINVAL;
13434 goto cleanup;
13435 }
13436 tmp = realloc(*mask, end + 1);
13437 if (!tmp) {
13438 err = -ENOMEM;
13439 goto cleanup;
13440 }
13441 *mask = tmp;
13442 memset(tmp + *mask_sz, 0, start - *mask_sz);
13443 memset(tmp + start, 1, end - start + 1);
13444 *mask_sz = end + 1;
13445 s += len;
13446 }
13447 if (!*mask_sz) {
13448 pr_warn("Empty CPU range\n");
13449 return -EINVAL;
13450 }
13451 return 0;
13452 cleanup:
13453 free(*mask);
13454 *mask = NULL;
13455 return err;
13456 }
13457
13458 int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
13459 {
13460 int fd, err = 0, len;
13461 char buf[128];
13462
13463 fd = open(fcpu, O_RDONLY | O_CLOEXEC);
13464 if (fd < 0) {
13465 err = -errno;
13466 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
13467 return err;
13468 }
13469 len = read(fd, buf, sizeof(buf));
13470 close(fd);
13471 if (len <= 0) {
13472 err = len ? -errno : -EINVAL;
13473 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
13474 return err;
13475 }
13476 if (len >= sizeof(buf)) {
13477 pr_warn("CPU mask is too big in file %s\n", fcpu);
13478 return -E2BIG;
13479 }
13480 buf[len] = '\0';
13481
13482 return parse_cpu_mask_str(buf, mask, mask_sz);
13483 }
13484
13485 int libbpf_num_possible_cpus(void)
13486 {
13487 static const char *fcpu = "/sys/devices/system/cpu/possible";
13488 static int cpus;
13489 int err, n, i, tmp_cpus;
13490 bool *mask;
13491
13492 tmp_cpus = READ_ONCE(cpus);
13493 if (tmp_cpus > 0)
13494 return tmp_cpus;
13495
13496 err = parse_cpu_mask_file(fcpu, &mask, &n);
13497 if (err)
13498 return libbpf_err(err);
13499
13500 tmp_cpus = 0;
13501 for (i = 0; i < n; i++) {
13502 if (mask[i])
13503 tmp_cpus++;
13504 }
13505 free(mask);
13506
13507 WRITE_ONCE(cpus, tmp_cpus);
13508 return tmp_cpus;
13509 }
13510
13511 static int populate_skeleton_maps(const struct bpf_object *obj,
13512 struct bpf_map_skeleton *maps,
13513 size_t map_cnt, size_t map_skel_sz)
13514 {
13515 int i;
13516
13517 for (i = 0; i < map_cnt; i++) {
13518 struct bpf_map_skeleton *map_skel = (void *)maps + i * map_skel_sz;
13519 struct bpf_map **map = map_skel->map;
13520 const char *name = map_skel->name;
13521 void **mmaped = map_skel->mmaped;
13522
13523 *map = bpf_object__find_map_by_name(obj, name);
13524 if (!*map) {
13525 pr_warn("failed to find skeleton map '%s'\n", name);
13526 return -ESRCH;
13527 }
13528
13529 /* externs shouldn't be pre-setup from user code */
13530 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
13531 *mmaped = (*map)->mmaped;
13532 }
13533 return 0;
13534 }
13535
13536 static int populate_skeleton_progs(const struct bpf_object *obj,
13537 struct bpf_prog_skeleton *progs,
13538 size_t prog_cnt, size_t prog_skel_sz)
13539 {
13540 int i;
13541
13542 for (i = 0; i < prog_cnt; i++) {
13543 struct bpf_prog_skeleton *prog_skel = (void *)progs + i * prog_skel_sz;
13544 struct bpf_program **prog = prog_skel->prog;
13545 const char *name = prog_skel->name;
13546
13547 *prog = bpf_object__find_program_by_name(obj, name);
13548 if (!*prog) {
13549 pr_warn("failed to find skeleton program '%s'\n", name);
13550 return -ESRCH;
13551 }
13552 }
13553 return 0;
13554 }
13555
13556 int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
13557 const struct bpf_object_open_opts *opts)
13558 {
13559 struct bpf_object *obj;
13560 int err;
13561
13562 obj = bpf_object_open(NULL, s->data, s->data_sz, s->name, opts);
13563 if (IS_ERR(obj)) {
13564 err = PTR_ERR(obj);
13565 pr_warn("failed to initialize skeleton BPF object '%s': %d\n", s->name, err);
13566 return libbpf_err(err);
13567 }
13568
13569 *s->obj = obj;
13570 err = populate_skeleton_maps(obj, s->maps, s->map_cnt, s->map_skel_sz);
13571 if (err) {
13572 pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
13573 return libbpf_err(err);
13574 }
13575
13576 err = populate_skeleton_progs(obj, s->progs, s->prog_cnt, s->prog_skel_sz);
13577 if (err) {
13578 pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
13579 return libbpf_err(err);
13580 }
13581
13582 return 0;
13583 }
13584
13585 int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
13586 {
13587 int err, len, var_idx, i;
13588 const char *var_name;
13589 const struct bpf_map *map;
13590 struct btf *btf;
13591 __u32 map_type_id;
13592 const struct btf_type *map_type, *var_type;
13593 const struct bpf_var_skeleton *var_skel;
13594 struct btf_var_secinfo *var;
13595
13596 if (!s->obj)
13597 return libbpf_err(-EINVAL);
13598
13599 btf = bpf_object__btf(s->obj);
13600 if (!btf) {
13601 pr_warn("subskeletons require BTF at runtime (object %s)\n",
13602 bpf_object__name(s->obj));
13603 return libbpf_err(-errno);
13604 }
13605
13606 err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt, s->map_skel_sz);
13607 if (err) {
13608 pr_warn("failed to populate subskeleton maps: %d\n", err);
13609 return libbpf_err(err);
13610 }
13611
13612 err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt, s->prog_skel_sz);
13613 if (err) {
13614 pr_warn("failed to populate subskeleton maps: %d\n", err);
13615 return libbpf_err(err);
13616 }
13617
13618 for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
13619 var_skel = (void *)s->vars + var_idx * s->var_skel_sz;
13620 map = *var_skel->map;
13621 map_type_id = bpf_map__btf_value_type_id(map);
13622 map_type = btf__type_by_id(btf, map_type_id);
13623
13624 if (!btf_is_datasec(map_type)) {
13625 pr_warn("type for map '%1$s' is not a datasec: %2$s",
13626 bpf_map__name(map),
13627 __btf_kind_str(btf_kind(map_type)));
13628 return libbpf_err(-EINVAL);
13629 }
13630
13631 len = btf_vlen(map_type);
13632 var = btf_var_secinfos(map_type);
13633 for (i = 0; i < len; i++, var++) {
13634 var_type = btf__type_by_id(btf, var->type);
13635 var_name = btf__name_by_offset(btf, var_type->name_off);
13636 if (strcmp(var_name, var_skel->name) == 0) {
13637 *var_skel->addr = map->mmaped + var->offset;
13638 break;
13639 }
13640 }
13641 }
13642 return 0;
13643 }
13644
13645 void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s)
13646 {
13647 if (!s)
13648 return;
13649 free(s->maps);
13650 free(s->progs);
13651 free(s->vars);
13652 free(s);
13653 }
13654
13655 int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
13656 {
13657 int i, err;
13658
13659 err = bpf_object__load(*s->obj);
13660 if (err) {
13661 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
13662 return libbpf_err(err);
13663 }
13664
13665 for (i = 0; i < s->map_cnt; i++) {
13666 struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
13667 struct bpf_map *map = *map_skel->map;
13668 size_t mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
13669 int prot, map_fd = map->fd;
13670 void **mmaped = map_skel->mmaped;
13671
13672 if (!mmaped)
13673 continue;
13674
13675 if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
13676 *mmaped = NULL;
13677 continue;
13678 }
13679
13680 if (map->def.map_flags & BPF_F_RDONLY_PROG)
13681 prot = PROT_READ;
13682 else
13683 prot = PROT_READ | PROT_WRITE;
13684
13685 /* Remap anonymous mmap()-ed "map initialization image" as
13686 * a BPF map-backed mmap()-ed memory, but preserving the same
13687 * memory address. This will cause kernel to change process'
13688 * page table to point to a different piece of kernel memory,
13689 * but from userspace point of view memory address (and its
13690 * contents, being identical at this point) will stay the
13691 * same. This mapping will be released by bpf_object__close()
13692 * as per normal clean up procedure, so we don't need to worry
13693 * about it from skeleton's clean up perspective.
13694 */
13695 *mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map_fd, 0);
13696 if (*mmaped == MAP_FAILED) {
13697 err = -errno;
13698 *mmaped = NULL;
13699 pr_warn("failed to re-mmap() map '%s': %d\n",
13700 bpf_map__name(map), err);
13701 return libbpf_err(err);
13702 }
13703 }
13704
13705 return 0;
13706 }
13707
13708 int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
13709 {
13710 int i, err;
13711
13712 for (i = 0; i < s->prog_cnt; i++) {
13713 struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz;
13714 struct bpf_program *prog = *prog_skel->prog;
13715 struct bpf_link **link = prog_skel->link;
13716
13717 if (!prog->autoload || !prog->autoattach)
13718 continue;
13719
13720 /* auto-attaching not supported for this program */
13721 if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
13722 continue;
13723
13724 /* if user already set the link manually, don't attempt auto-attach */
13725 if (*link)
13726 continue;
13727
13728 err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link);
13729 if (err) {
13730 pr_warn("prog '%s': failed to auto-attach: %d\n",
13731 bpf_program__name(prog), err);
13732 return libbpf_err(err);
13733 }
13734
13735 /* It's possible that for some SEC() definitions auto-attach
13736 * is supported in some cases (e.g., if definition completely
13737 * specifies target information), but is not in other cases.
13738 * SEC("uprobe") is one such case. If user specified target
13739 * binary and function name, such BPF program can be
13740 * auto-attached. But if not, it shouldn't trigger skeleton's
13741 * attach to fail. It should just be skipped.
13742 * attach_fn signals such case with returning 0 (no error) and
13743 * setting link to NULL.
13744 */
13745 }
13746
13747 return 0;
13748 }
13749
13750 void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
13751 {
13752 int i;
13753
13754 for (i = 0; i < s->prog_cnt; i++) {
13755 struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz;
13756 struct bpf_link **link = prog_skel->link;
13757
13758 bpf_link__destroy(*link);
13759 *link = NULL;
13760 }
13761 }
13762
13763 void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
13764 {
13765 if (!s)
13766 return;
13767
13768 if (s->progs)
13769 bpf_object__detach_skeleton(s);
13770 if (s->obj)
13771 bpf_object__close(*s->obj);
13772 free(s->maps);
13773 free(s->progs);
13774 free(s);
13775 }
13776