• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <byteswap.h>
5 #include <endian.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <fcntl.h>
10 #include <unistd.h>
11 #include <errno.h>
12 #include <sys/utsname.h>
13 #include <sys/param.h>
14 #include <sys/stat.h>
15 #include <linux/kernel.h>
16 #include <linux/err.h>
17 #include <linux/btf.h>
18 
19 #ifdef HAVE_LIBELF
20 #include <gelf.h>
21 #endif
22 
23 #ifdef HAVE_ELFIO
24 #include "elfio_c_wrapper.h"
25 
26 typedef struct Elf64_Ehdr Elf64_Ehdr;
27 typedef struct Elf64_Shdr Elf64_Shdr;
28 typedef struct {
29   void *d_buf;
30   size_t d_size;
31 } Elf_Data;
32 #endif
33 
34 #include "btf.h"
35 #include "bpf.h"
36 #include "libbpf.h"
37 #include "libbpf_internal.h"
38 #include "hashmap.h"
39 #include "strset.h"
40 
41 #define BTF_MAX_NR_TYPES 0x7fffffffU
42 #define BTF_MAX_STR_OFFSET 0x7fffffffU
43 
44 static struct btf_type btf_void;
45 
46 struct btf {
47 	/* raw BTF data in native endianness */
48 	void *raw_data;
49 	/* raw BTF data in non-native endianness */
50 	void *raw_data_swapped;
51 	__u32 raw_size;
52 	/* whether target endianness differs from the native one */
53 	bool swapped_endian;
54 
55 	/*
56 	 * When BTF is loaded from an ELF or raw memory it is stored
57 	 * in a contiguous memory block. The hdr, type_data, and, strs_data
58 	 * point inside that memory region to their respective parts of BTF
59 	 * representation:
60 	 *
61 	 * +--------------------------------+
62 	 * |  Header  |  Types  |  Strings  |
63 	 * +--------------------------------+
64 	 * ^          ^         ^
65 	 * |          |         |
66 	 * hdr        |         |
67 	 * types_data-+         |
68 	 * strs_data------------+
69 	 *
70 	 * If BTF data is later modified, e.g., due to types added or
71 	 * removed, BTF deduplication performed, etc, this contiguous
72 	 * representation is broken up into three independently allocated
73 	 * memory regions to be able to modify them independently.
74 	 * raw_data is nulled out at that point, but can be later allocated
75 	 * and cached again if user calls btf__raw_data(), at which point
76 	 * raw_data will contain a contiguous copy of header, types, and
77 	 * strings:
78 	 *
79 	 * +----------+  +---------+  +-----------+
80 	 * |  Header  |  |  Types  |  |  Strings  |
81 	 * +----------+  +---------+  +-----------+
82 	 * ^             ^            ^
83 	 * |             |            |
84 	 * hdr           |            |
85 	 * types_data----+            |
86 	 * strset__data(strs_set)-----+
87 	 *
88 	 *               +----------+---------+-----------+
89 	 *               |  Header  |  Types  |  Strings  |
90 	 * raw_data----->+----------+---------+-----------+
91 	 */
92 	struct btf_header *hdr;
93 
94 	void *types_data;
95 	size_t types_data_cap; /* used size stored in hdr->type_len */
96 
97 	/* type ID to `struct btf_type *` lookup index
98 	 * type_offs[0] corresponds to the first non-VOID type:
99 	 *   - for base BTF it's type [1];
100 	 *   - for split BTF it's the first non-base BTF type.
101 	 */
102 	__u32 *type_offs;
103 	size_t type_offs_cap;
104 	/* number of types in this BTF instance:
105 	 *   - doesn't include special [0] void type;
106 	 *   - for split BTF counts number of types added on top of base BTF.
107 	 */
108 	__u32 nr_types;
109 	/* if not NULL, points to the base BTF on top of which the current
110 	 * split BTF is based
111 	 */
112 	struct btf *base_btf;
113 	/* BTF type ID of the first type in this BTF instance:
114 	 *   - for base BTF it's equal to 1;
115 	 *   - for split BTF it's equal to biggest type ID of base BTF plus 1.
116 	 */
117 	int start_id;
118 	/* logical string offset of this BTF instance:
119 	 *   - for base BTF it's equal to 0;
120 	 *   - for split BTF it's equal to total size of base BTF's string section size.
121 	 */
122 	int start_str_off;
123 
124 	/* only one of strs_data or strs_set can be non-NULL, depending on
125 	 * whether BTF is in a modifiable state (strs_set is used) or not
126 	 * (strs_data points inside raw_data)
127 	 */
128 	void *strs_data;
129 	/* a set of unique strings */
130 	struct strset *strs_set;
131 	/* whether strings are already deduplicated */
132 	bool strs_deduped;
133 
134 	/* BTF object FD, if loaded into kernel */
135 	int fd;
136 
137 	/* Pointer size (in bytes) for a target architecture of this BTF */
138 	int ptr_sz;
139 };
140 
ptr_to_u64(const void * ptr)141 static inline __u64 ptr_to_u64(const void *ptr)
142 {
143 	return (__u64) (unsigned long) ptr;
144 }
145 
146 /* Ensure given dynamically allocated memory region pointed to by *data* with
147  * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough
148  * memory to accommodate *add_cnt* new elements, assuming *cur_cnt* elements
149  * are already used. At most *max_cnt* elements can be ever allocated.
150  * If necessary, memory is reallocated and all existing data is copied over,
151  * new pointer to the memory region is stored at *data, new memory region
152  * capacity (in number of elements) is stored in *cap.
153  * On success, memory pointer to the beginning of unused memory is returned.
154  * On error, NULL is returned.
155  */
libbpf_add_mem(void ** data,size_t * cap_cnt,size_t elem_sz,size_t cur_cnt,size_t max_cnt,size_t add_cnt)156 void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
157 		     size_t cur_cnt, size_t max_cnt, size_t add_cnt)
158 {
159 	size_t new_cnt;
160 	void *new_data;
161 
162 	if (cur_cnt + add_cnt <= *cap_cnt)
163 		return *data + cur_cnt * elem_sz;
164 
165 	/* requested more than the set limit */
166 	if (cur_cnt + add_cnt > max_cnt)
167 		return NULL;
168 
169 	new_cnt = *cap_cnt;
170 	new_cnt += new_cnt / 4;		  /* expand by 25% */
171 	if (new_cnt < 16)		  /* but at least 16 elements */
172 		new_cnt = 16;
173 	if (new_cnt > max_cnt)		  /* but not exceeding a set limit */
174 		new_cnt = max_cnt;
175 	if (new_cnt < cur_cnt + add_cnt)  /* also ensure we have enough memory */
176 		new_cnt = cur_cnt + add_cnt;
177 
178 	new_data = libbpf_reallocarray(*data, new_cnt, elem_sz);
179 	if (!new_data)
180 		return NULL;
181 
182 	/* zero out newly allocated portion of memory */
183 	memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz);
184 
185 	*data = new_data;
186 	*cap_cnt = new_cnt;
187 	return new_data + cur_cnt * elem_sz;
188 }
189 
190 /* Ensure given dynamically allocated memory region has enough allocated space
191  * to accommodate *need_cnt* elements of size *elem_sz* bytes each
192  */
libbpf_ensure_mem(void ** data,size_t * cap_cnt,size_t elem_sz,size_t need_cnt)193 int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
194 {
195 	void *p;
196 
197 	if (need_cnt <= *cap_cnt)
198 		return 0;
199 
200 	p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
201 	if (!p)
202 		return -ENOMEM;
203 
204 	return 0;
205 }
206 
btf_add_type_offs_mem(struct btf * btf,size_t add_cnt)207 static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt)
208 {
209 	return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
210 			      btf->nr_types, BTF_MAX_NR_TYPES, add_cnt);
211 }
212 
btf_add_type_idx_entry(struct btf * btf,__u32 type_off)213 static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
214 {
215 	__u32 *p;
216 
217 	p = btf_add_type_offs_mem(btf, 1);
218 	if (!p)
219 		return -ENOMEM;
220 
221 	*p = type_off;
222 	return 0;
223 }
224 
btf_bswap_hdr(struct btf_header * h)225 static void btf_bswap_hdr(struct btf_header *h)
226 {
227 	h->magic = bswap_16(h->magic);
228 	h->hdr_len = bswap_32(h->hdr_len);
229 	h->type_off = bswap_32(h->type_off);
230 	h->type_len = bswap_32(h->type_len);
231 	h->str_off = bswap_32(h->str_off);
232 	h->str_len = bswap_32(h->str_len);
233 }
234 
btf_parse_hdr(struct btf * btf)235 static int btf_parse_hdr(struct btf *btf)
236 {
237 	struct btf_header *hdr = btf->hdr;
238 	__u32 meta_left;
239 
240 	if (btf->raw_size < sizeof(struct btf_header)) {
241 		pr_debug("BTF header not found\n");
242 		return -EINVAL;
243 	}
244 
245 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
246 		btf->swapped_endian = true;
247 		if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) {
248 			pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n",
249 				bswap_32(hdr->hdr_len));
250 			return -ENOTSUP;
251 		}
252 		btf_bswap_hdr(hdr);
253 	} else if (hdr->magic != BTF_MAGIC) {
254 		pr_debug("Invalid BTF magic: %x\n", hdr->magic);
255 		return -EINVAL;
256 	}
257 
258 	if (btf->raw_size < hdr->hdr_len) {
259 		pr_debug("BTF header len %u larger than data size %u\n",
260 			 hdr->hdr_len, btf->raw_size);
261 		return -EINVAL;
262 	}
263 
264 	meta_left = btf->raw_size - hdr->hdr_len;
265 	if (meta_left < (long long)hdr->str_off + hdr->str_len) {
266 		pr_debug("Invalid BTF total size: %u\n", btf->raw_size);
267 		return -EINVAL;
268 	}
269 
270 	if ((long long)hdr->type_off + hdr->type_len > hdr->str_off) {
271 		pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n",
272 			 hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len);
273 		return -EINVAL;
274 	}
275 
276 	if (hdr->type_off % 4) {
277 		pr_debug("BTF type section is not aligned to 4 bytes\n");
278 		return -EINVAL;
279 	}
280 
281 	return 0;
282 }
283 
btf_parse_str_sec(struct btf * btf)284 static int btf_parse_str_sec(struct btf *btf)
285 {
286 	const struct btf_header *hdr = btf->hdr;
287 	const char *start = btf->strs_data;
288 	const char *end = start + btf->hdr->str_len;
289 
290 	if (btf->base_btf && hdr->str_len == 0)
291 		return 0;
292 	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) {
293 		pr_debug("Invalid BTF string section\n");
294 		return -EINVAL;
295 	}
296 	if (!btf->base_btf && start[0]) {
297 		pr_debug("Invalid BTF string section\n");
298 		return -EINVAL;
299 	}
300 	return 0;
301 }
302 
btf_type_size(const struct btf_type * t)303 static int btf_type_size(const struct btf_type *t)
304 {
305 	const int base_size = sizeof(struct btf_type);
306 	__u16 vlen = btf_vlen(t);
307 
308 	switch (btf_kind(t)) {
309 	case BTF_KIND_FWD:
310 	case BTF_KIND_CONST:
311 	case BTF_KIND_VOLATILE:
312 	case BTF_KIND_RESTRICT:
313 	case BTF_KIND_PTR:
314 	case BTF_KIND_TYPEDEF:
315 	case BTF_KIND_FUNC:
316 	case BTF_KIND_FLOAT:
317 	case BTF_KIND_TYPE_TAG:
318 		return base_size;
319 	case BTF_KIND_INT:
320 		return base_size + sizeof(__u32);
321 	case BTF_KIND_ENUM:
322 		return base_size + vlen * sizeof(struct btf_enum);
323 	case BTF_KIND_ENUM64:
324 		return base_size + vlen * sizeof(struct btf_enum64);
325 	case BTF_KIND_ARRAY:
326 		return base_size + sizeof(struct btf_array);
327 	case BTF_KIND_STRUCT:
328 	case BTF_KIND_UNION:
329 		return base_size + vlen * sizeof(struct btf_member);
330 	case BTF_KIND_FUNC_PROTO:
331 		return base_size + vlen * sizeof(struct btf_param);
332 	case BTF_KIND_VAR:
333 		return base_size + sizeof(struct btf_var);
334 	case BTF_KIND_DATASEC:
335 		return base_size + vlen * sizeof(struct btf_var_secinfo);
336 	case BTF_KIND_DECL_TAG:
337 		return base_size + sizeof(struct btf_decl_tag);
338 	default:
339 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
340 		return -EINVAL;
341 	}
342 }
343 
btf_bswap_type_base(struct btf_type * t)344 static void btf_bswap_type_base(struct btf_type *t)
345 {
346 	t->name_off = bswap_32(t->name_off);
347 	t->info = bswap_32(t->info);
348 	t->type = bswap_32(t->type);
349 }
350 
btf_bswap_type_rest(struct btf_type * t)351 static int btf_bswap_type_rest(struct btf_type *t)
352 {
353 	struct btf_var_secinfo *v;
354 	struct btf_enum64 *e64;
355 	struct btf_member *m;
356 	struct btf_array *a;
357 	struct btf_param *p;
358 	struct btf_enum *e;
359 	__u16 vlen = btf_vlen(t);
360 	int i;
361 
362 	switch (btf_kind(t)) {
363 	case BTF_KIND_FWD:
364 	case BTF_KIND_CONST:
365 	case BTF_KIND_VOLATILE:
366 	case BTF_KIND_RESTRICT:
367 	case BTF_KIND_PTR:
368 	case BTF_KIND_TYPEDEF:
369 	case BTF_KIND_FUNC:
370 	case BTF_KIND_FLOAT:
371 	case BTF_KIND_TYPE_TAG:
372 		return 0;
373 	case BTF_KIND_INT:
374 		*(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1));
375 		return 0;
376 	case BTF_KIND_ENUM:
377 		for (i = 0, e = btf_enum(t); i < vlen; i++, e++) {
378 			e->name_off = bswap_32(e->name_off);
379 			e->val = bswap_32(e->val);
380 		}
381 		return 0;
382 	case BTF_KIND_ENUM64:
383 		for (i = 0, e64 = btf_enum64(t); i < vlen; i++, e64++) {
384 			e64->name_off = bswap_32(e64->name_off);
385 			e64->val_lo32 = bswap_32(e64->val_lo32);
386 			e64->val_hi32 = bswap_32(e64->val_hi32);
387 		}
388 		return 0;
389 	case BTF_KIND_ARRAY:
390 		a = btf_array(t);
391 		a->type = bswap_32(a->type);
392 		a->index_type = bswap_32(a->index_type);
393 		a->nelems = bswap_32(a->nelems);
394 		return 0;
395 	case BTF_KIND_STRUCT:
396 	case BTF_KIND_UNION:
397 		for (i = 0, m = btf_members(t); i < vlen; i++, m++) {
398 			m->name_off = bswap_32(m->name_off);
399 			m->type = bswap_32(m->type);
400 			m->offset = bswap_32(m->offset);
401 		}
402 		return 0;
403 	case BTF_KIND_FUNC_PROTO:
404 		for (i = 0, p = btf_params(t); i < vlen; i++, p++) {
405 			p->name_off = bswap_32(p->name_off);
406 			p->type = bswap_32(p->type);
407 		}
408 		return 0;
409 	case BTF_KIND_VAR:
410 		btf_var(t)->linkage = bswap_32(btf_var(t)->linkage);
411 		return 0;
412 	case BTF_KIND_DATASEC:
413 		for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) {
414 			v->type = bswap_32(v->type);
415 			v->offset = bswap_32(v->offset);
416 			v->size = bswap_32(v->size);
417 		}
418 		return 0;
419 	case BTF_KIND_DECL_TAG:
420 		btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx);
421 		return 0;
422 	default:
423 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
424 		return -EINVAL;
425 	}
426 }
427 
btf_parse_type_sec(struct btf * btf)428 static int btf_parse_type_sec(struct btf *btf)
429 {
430 	struct btf_header *hdr = btf->hdr;
431 	void *next_type = btf->types_data;
432 	void *end_type = next_type + hdr->type_len;
433 	int err, type_size;
434 
435 	while (next_type + sizeof(struct btf_type) <= end_type) {
436 		if (btf->swapped_endian)
437 			btf_bswap_type_base(next_type);
438 
439 		type_size = btf_type_size(next_type);
440 		if (type_size < 0)
441 			return type_size;
442 		if (next_type + type_size > end_type) {
443 			pr_warn("BTF type [%d] is malformed\n", btf->start_id + btf->nr_types);
444 			return -EINVAL;
445 		}
446 
447 		if (btf->swapped_endian && btf_bswap_type_rest(next_type))
448 			return -EINVAL;
449 
450 		err = btf_add_type_idx_entry(btf, next_type - btf->types_data);
451 		if (err)
452 			return err;
453 
454 		next_type += type_size;
455 		btf->nr_types++;
456 	}
457 
458 	if (next_type != end_type) {
459 		pr_warn("BTF types data is malformed\n");
460 		return -EINVAL;
461 	}
462 
463 	return 0;
464 }
465 
btf__type_cnt(const struct btf * btf)466 __u32 btf__type_cnt(const struct btf *btf)
467 {
468 	return btf->start_id + btf->nr_types;
469 }
470 
btf__base_btf(const struct btf * btf)471 const struct btf *btf__base_btf(const struct btf *btf)
472 {
473 	return btf->base_btf;
474 }
475 
476 /* internal helper returning non-const pointer to a type */
btf_type_by_id(const struct btf * btf,__u32 type_id)477 struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id)
478 {
479 	if (type_id == 0)
480 		return &btf_void;
481 	if (type_id < btf->start_id)
482 		return btf_type_by_id(btf->base_btf, type_id);
483 	return btf->types_data + btf->type_offs[type_id - btf->start_id];
484 }
485 
btf__type_by_id(const struct btf * btf,__u32 type_id)486 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
487 {
488 	if (type_id >= btf->start_id + btf->nr_types)
489 		return errno = EINVAL, NULL;
490 	return btf_type_by_id((struct btf *)btf, type_id);
491 }
492 
determine_ptr_size(const struct btf * btf)493 static int determine_ptr_size(const struct btf *btf)
494 {
495 	static const char * const long_aliases[] = {
496 		"long",
497 		"long int",
498 		"int long",
499 		"unsigned long",
500 		"long unsigned",
501 		"unsigned long int",
502 		"unsigned int long",
503 		"long unsigned int",
504 		"long int unsigned",
505 		"int unsigned long",
506 		"int long unsigned",
507 	};
508 	const struct btf_type *t;
509 	const char *name;
510 	int i, j, n;
511 
512 	if (btf->base_btf && btf->base_btf->ptr_sz > 0)
513 		return btf->base_btf->ptr_sz;
514 
515 	n = btf__type_cnt(btf);
516 	for (i = 1; i < n; i++) {
517 		t = btf__type_by_id(btf, i);
518 		if (!btf_is_int(t))
519 			continue;
520 
521 		if (t->size != 4 && t->size != 8)
522 			continue;
523 
524 		name = btf__name_by_offset(btf, t->name_off);
525 		if (!name)
526 			continue;
527 
528 		for (j = 0; j < ARRAY_SIZE(long_aliases); j++) {
529 			if (strcmp(name, long_aliases[j]) == 0)
530 				return t->size;
531 		}
532 	}
533 
534 	return -1;
535 }
536 
btf_ptr_sz(const struct btf * btf)537 static size_t btf_ptr_sz(const struct btf *btf)
538 {
539 	if (!btf->ptr_sz)
540 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
541 	return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
542 }
543 
544 /* Return pointer size this BTF instance assumes. The size is heuristically
545  * determined by looking for 'long' or 'unsigned long' integer type and
546  * recording its size in bytes. If BTF type information doesn't have any such
547  * type, this function returns 0. In the latter case, native architecture's
548  * pointer size is assumed, so will be either 4 or 8, depending on
549  * architecture that libbpf was compiled for. It's possible to override
550  * guessed value by using btf__set_pointer_size() API.
551  */
btf__pointer_size(const struct btf * btf)552 size_t btf__pointer_size(const struct btf *btf)
553 {
554 	if (!btf->ptr_sz)
555 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
556 
557 	if (btf->ptr_sz < 0)
558 		/* not enough BTF type info to guess */
559 		return 0;
560 
561 	return btf->ptr_sz;
562 }
563 
564 /* Override or set pointer size in bytes. Only values of 4 and 8 are
565  * supported.
566  */
btf__set_pointer_size(struct btf * btf,size_t ptr_sz)567 int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
568 {
569 	if (ptr_sz != 4 && ptr_sz != 8)
570 		return libbpf_err(-EINVAL);
571 	btf->ptr_sz = ptr_sz;
572 	return 0;
573 }
574 
is_host_big_endian(void)575 static bool is_host_big_endian(void)
576 {
577 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
578 	return false;
579 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
580 	return true;
581 #else
582 # error "Unrecognized __BYTE_ORDER__"
583 #endif
584 }
585 
btf__endianness(const struct btf * btf)586 enum btf_endianness btf__endianness(const struct btf *btf)
587 {
588 	if (is_host_big_endian())
589 		return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
590 	else
591 		return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
592 }
593 
btf__set_endianness(struct btf * btf,enum btf_endianness endian)594 int btf__set_endianness(struct btf *btf, enum btf_endianness endian)
595 {
596 	if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
597 		return libbpf_err(-EINVAL);
598 
599 	btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
600 	if (!btf->swapped_endian) {
601 		free(btf->raw_data_swapped);
602 		btf->raw_data_swapped = NULL;
603 	}
604 	return 0;
605 }
606 
btf_type_is_void(const struct btf_type * t)607 static bool btf_type_is_void(const struct btf_type *t)
608 {
609 	return t == &btf_void || btf_is_fwd(t);
610 }
611 
btf_type_is_void_or_null(const struct btf_type * t)612 static bool btf_type_is_void_or_null(const struct btf_type *t)
613 {
614 	return !t || btf_type_is_void(t);
615 }
616 
617 #define MAX_RESOLVE_DEPTH 32
618 
btf__resolve_size(const struct btf * btf,__u32 type_id)619 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
620 {
621 	const struct btf_array *array;
622 	const struct btf_type *t;
623 	__u32 nelems = 1;
624 	__s64 size = -1;
625 	int i;
626 
627 	t = btf__type_by_id(btf, type_id);
628 	for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) {
629 		switch (btf_kind(t)) {
630 		case BTF_KIND_INT:
631 		case BTF_KIND_STRUCT:
632 		case BTF_KIND_UNION:
633 		case BTF_KIND_ENUM:
634 		case BTF_KIND_ENUM64:
635 		case BTF_KIND_DATASEC:
636 		case BTF_KIND_FLOAT:
637 			size = t->size;
638 			goto done;
639 		case BTF_KIND_PTR:
640 			size = btf_ptr_sz(btf);
641 			goto done;
642 		case BTF_KIND_TYPEDEF:
643 		case BTF_KIND_VOLATILE:
644 		case BTF_KIND_CONST:
645 		case BTF_KIND_RESTRICT:
646 		case BTF_KIND_VAR:
647 		case BTF_KIND_DECL_TAG:
648 		case BTF_KIND_TYPE_TAG:
649 			type_id = t->type;
650 			break;
651 		case BTF_KIND_ARRAY:
652 			array = btf_array(t);
653 			if (nelems && array->nelems > UINT32_MAX / nelems)
654 				return libbpf_err(-E2BIG);
655 			nelems *= array->nelems;
656 			type_id = array->type;
657 			break;
658 		default:
659 			return libbpf_err(-EINVAL);
660 		}
661 
662 		t = btf__type_by_id(btf, type_id);
663 	}
664 
665 done:
666 	if (size < 0)
667 		return libbpf_err(-EINVAL);
668 	if (nelems && size > UINT32_MAX / nelems)
669 		return libbpf_err(-E2BIG);
670 
671 	return nelems * size;
672 }
673 
btf__align_of(const struct btf * btf,__u32 id)674 int btf__align_of(const struct btf *btf, __u32 id)
675 {
676 	const struct btf_type *t = btf__type_by_id(btf, id);
677 	__u16 kind = btf_kind(t);
678 
679 	switch (kind) {
680 	case BTF_KIND_INT:
681 	case BTF_KIND_ENUM:
682 	case BTF_KIND_ENUM64:
683 	case BTF_KIND_FLOAT:
684 		return min(btf_ptr_sz(btf), (size_t)t->size);
685 	case BTF_KIND_PTR:
686 		return btf_ptr_sz(btf);
687 	case BTF_KIND_TYPEDEF:
688 	case BTF_KIND_VOLATILE:
689 	case BTF_KIND_CONST:
690 	case BTF_KIND_RESTRICT:
691 	case BTF_KIND_TYPE_TAG:
692 		return btf__align_of(btf, t->type);
693 	case BTF_KIND_ARRAY:
694 		return btf__align_of(btf, btf_array(t)->type);
695 	case BTF_KIND_STRUCT:
696 	case BTF_KIND_UNION: {
697 		const struct btf_member *m = btf_members(t);
698 		__u16 vlen = btf_vlen(t);
699 		int i, max_align = 1, align;
700 
701 		for (i = 0; i < vlen; i++, m++) {
702 			align = btf__align_of(btf, m->type);
703 			if (align <= 0)
704 				return libbpf_err(align);
705 			max_align = max(max_align, align);
706 
707 			/* if field offset isn't aligned according to field
708 			 * type's alignment, then struct must be packed
709 			 */
710 			if (btf_member_bitfield_size(t, i) == 0 &&
711 			    (m->offset % (8 * align)) != 0)
712 				return 1;
713 		}
714 
715 		/* if struct/union size isn't a multiple of its alignment,
716 		 * then struct must be packed
717 		 */
718 		if ((t->size % max_align) != 0)
719 			return 1;
720 
721 		return max_align;
722 	}
723 	default:
724 		pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
725 		return errno = EINVAL, 0;
726 	}
727 }
728 
btf__resolve_type(const struct btf * btf,__u32 type_id)729 int btf__resolve_type(const struct btf *btf, __u32 type_id)
730 {
731 	const struct btf_type *t;
732 	int depth = 0;
733 
734 	t = btf__type_by_id(btf, type_id);
735 	while (depth < MAX_RESOLVE_DEPTH &&
736 	       !btf_type_is_void_or_null(t) &&
737 	       (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
738 		type_id = t->type;
739 		t = btf__type_by_id(btf, type_id);
740 		depth++;
741 	}
742 
743 	if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
744 		return libbpf_err(-EINVAL);
745 
746 	return type_id;
747 }
748 
btf__find_by_name(const struct btf * btf,const char * type_name)749 __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
750 {
751 	__u32 i, nr_types = btf__type_cnt(btf);
752 
753 	if (!strcmp(type_name, "void"))
754 		return 0;
755 
756 	for (i = 1; i < nr_types; i++) {
757 		const struct btf_type *t = btf__type_by_id(btf, i);
758 		const char *name = btf__name_by_offset(btf, t->name_off);
759 
760 		if (name && !strcmp(type_name, name))
761 			return i;
762 	}
763 
764 	return libbpf_err(-ENOENT);
765 }
766 
btf_find_by_name_kind(const struct btf * btf,int start_id,const char * type_name,__u32 kind)767 static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
768 				   const char *type_name, __u32 kind)
769 {
770 	__u32 i, nr_types = btf__type_cnt(btf);
771 
772 	if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
773 		return 0;
774 
775 	for (i = start_id; i < nr_types; i++) {
776 		const struct btf_type *t = btf__type_by_id(btf, i);
777 		const char *name;
778 
779 		if (btf_kind(t) != kind)
780 			continue;
781 		name = btf__name_by_offset(btf, t->name_off);
782 		if (name && !strcmp(type_name, name))
783 			return i;
784 	}
785 
786 	return libbpf_err(-ENOENT);
787 }
788 
btf__find_by_name_kind_own(const struct btf * btf,const char * type_name,__u32 kind)789 __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
790 				 __u32 kind)
791 {
792 	return btf_find_by_name_kind(btf, btf->start_id, type_name, kind);
793 }
794 
btf__find_by_name_kind(const struct btf * btf,const char * type_name,__u32 kind)795 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
796 			     __u32 kind)
797 {
798 	return btf_find_by_name_kind(btf, 1, type_name, kind);
799 }
800 
btf_is_modifiable(const struct btf * btf)801 static bool btf_is_modifiable(const struct btf *btf)
802 {
803 	return (void *)btf->hdr != btf->raw_data;
804 }
805 
btf__free(struct btf * btf)806 void btf__free(struct btf *btf)
807 {
808 	if (IS_ERR_OR_NULL(btf))
809 		return;
810 
811 	if (btf->fd >= 0)
812 		close(btf->fd);
813 
814 	if (btf_is_modifiable(btf)) {
815 		/* if BTF was modified after loading, it will have a split
816 		 * in-memory representation for header, types, and strings
817 		 * sections, so we need to free all of them individually. It
818 		 * might still have a cached contiguous raw data present,
819 		 * which will be unconditionally freed below.
820 		 */
821 		free(btf->hdr);
822 		free(btf->types_data);
823 		strset__free(btf->strs_set);
824 	}
825 	free(btf->raw_data);
826 	free(btf->raw_data_swapped);
827 	free(btf->type_offs);
828 	free(btf);
829 }
830 
btf_new_empty(struct btf * base_btf)831 static struct btf *btf_new_empty(struct btf *base_btf)
832 {
833 	struct btf *btf;
834 
835 	btf = calloc(1, sizeof(*btf));
836 	if (!btf)
837 		return ERR_PTR(-ENOMEM);
838 
839 	btf->nr_types = 0;
840 	btf->start_id = 1;
841 	btf->start_str_off = 0;
842 	btf->fd = -1;
843 	btf->ptr_sz = sizeof(void *);
844 	btf->swapped_endian = false;
845 
846 	if (base_btf) {
847 		btf->base_btf = base_btf;
848 		btf->start_id = btf__type_cnt(base_btf);
849 		btf->start_str_off = base_btf->hdr->str_len;
850 	}
851 
852 	/* +1 for empty string at offset 0 */
853 	btf->raw_size = sizeof(struct btf_header) + (base_btf ? 0 : 1);
854 	btf->raw_data = calloc(1, btf->raw_size);
855 	if (!btf->raw_data) {
856 		free(btf);
857 		return ERR_PTR(-ENOMEM);
858 	}
859 
860 	btf->hdr = btf->raw_data;
861 	btf->hdr->hdr_len = sizeof(struct btf_header);
862 	btf->hdr->magic = BTF_MAGIC;
863 	btf->hdr->version = BTF_VERSION;
864 
865 	btf->types_data = btf->raw_data + btf->hdr->hdr_len;
866 	btf->strs_data = btf->raw_data + btf->hdr->hdr_len;
867 	btf->hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */
868 
869 	return btf;
870 }
871 
btf__new_empty(void)872 struct btf *btf__new_empty(void)
873 {
874 	return libbpf_ptr(btf_new_empty(NULL));
875 }
876 
btf__new_empty_split(struct btf * base_btf)877 struct btf *btf__new_empty_split(struct btf *base_btf)
878 {
879 	return libbpf_ptr(btf_new_empty(base_btf));
880 }
881 
btf_new(const void * data,__u32 size,struct btf * base_btf)882 static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
883 {
884 	struct btf *btf;
885 	int err;
886 
887 	btf = calloc(1, sizeof(struct btf));
888 	if (!btf)
889 		return ERR_PTR(-ENOMEM);
890 
891 	btf->nr_types = 0;
892 	btf->start_id = 1;
893 	btf->start_str_off = 0;
894 	btf->fd = -1;
895 
896 	if (base_btf) {
897 		btf->base_btf = base_btf;
898 		btf->start_id = btf__type_cnt(base_btf);
899 		btf->start_str_off = base_btf->hdr->str_len;
900 	}
901 
902 	btf->raw_data = malloc(size);
903 	if (!btf->raw_data) {
904 		err = -ENOMEM;
905 		goto done;
906 	}
907 	memcpy(btf->raw_data, data, size);
908 	btf->raw_size = size;
909 
910 	btf->hdr = btf->raw_data;
911 	err = btf_parse_hdr(btf);
912 	if (err)
913 		goto done;
914 
915 	btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off;
916 	btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off;
917 
918 	err = btf_parse_str_sec(btf);
919 	err = err ?: btf_parse_type_sec(btf);
920 	if (err)
921 		goto done;
922 
923 done:
924 	if (err) {
925 		btf__free(btf);
926 		return ERR_PTR(err);
927 	}
928 
929 	return btf;
930 }
931 
btf__new(const void * data,__u32 size)932 struct btf *btf__new(const void *data, __u32 size)
933 {
934 	return libbpf_ptr(btf_new(data, size, NULL));
935 }
936 
937 #ifdef HAVE_ELFIO
elf_sec_hdr_by_idx(pelfio_t elf,size_t idx,Elf64_Shdr * sheader)938 static Elf64_Shdr *elf_sec_hdr_by_idx(pelfio_t elf, size_t idx, Elf64_Shdr *sheader)
939 {
940 	psection_t psection = elfio_get_section_by_index(elf, idx);
941 
942 	sheader->sh_name = elfio_section_get_name_string_offset(psection);
943 	sheader->sh_type = elfio_section_get_type(psection);
944 	sheader->sh_flags = elfio_section_get_flags(psection);
945 	sheader->sh_addr = elfio_section_get_address(psection);
946 	sheader->sh_offset = elfio_section_get_offset(psection);
947 	sheader->sh_size = elfio_section_get_size(psection);
948 	sheader->sh_link = elfio_section_get_link(psection);
949 	sheader->sh_info = elfio_section_get_info(psection);
950 	sheader->sh_addralign = elfio_section_get_addr_align(psection);
951 	sheader->sh_entsize = elfio_section_get_entry_size(psection);
952 
953 	return sheader;
954 }
955 #endif
956 
btf_parse_elf(const char * path,struct btf * base_btf,struct btf_ext ** btf_ext)957 static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
958 				 struct btf_ext **btf_ext)
959 {
960 	Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
961 	int err = 0, idx = 0;
962 	struct btf *btf = NULL;
963 #ifdef HAVE_LIBELF
964 	Elf_Scn *scn = NULL;
965 	Elf *elf = NULL;
966 	GElf_Ehdr ehdr;
967 #elif defined HAVE_ELFIO
968 	pelfio_t elf;
969 	Elf64_Ehdr ehdr;
970 	Elf_Data btf_data_temp, btf_ext_data_temp;
971 #endif
972 	size_t shstrndx;
973 
974 #ifdef HAVE_LIBELF
975 	if (elf_version(EV_CURRENT) == EV_NONE) {
976 		pr_warn("failed to init libelf for %s\n", path);
977 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
978 	}
979 #endif
980 
981 	int fd = open(path, O_RDONLY | O_CLOEXEC);
982 	if (fd < 0) {
983 		err = -errno;
984 		pr_warn("failed to open %s: %s\n", path, strerror(errno));
985 		return ERR_PTR(err);
986 	}
987 
988 	err = -LIBBPF_ERRNO__FORMAT;
989 
990 #ifdef HAVE_LIBELF
991 	elf = elf_begin(fd, ELF_C_READ, NULL);
992 #elif defined HAVE_ELFIO
993 	elf = elfio_new();
994 	if (!elfio_load(elf, path)) {
995 		printf( "Can't load ELF file\n" );
996 		goto done;
997     }
998 #endif
999 	if (!elf) {
1000 		pr_warn("failed to open %s as ELF file\n", path);
1001 		goto done;
1002 	}
1003 
1004 #ifdef HAVE_LIBELF
1005 	if (!gelf_getehdr(elf, &ehdr)) {
1006 #elif defined HAVE_ELFIO
1007 	ssize_t nread = read(fd, &ehdr, sizeof(Elf64_Ehdr));
1008 	if(nread != sizeof(Elf64_Ehdr)) {
1009 #endif
1010 		pr_warn("failed to get EHDR from %s\n", path);
1011 		goto done;
1012 	}
1013 
1014 #ifdef HAVE_LIBELF
1015 	if (elf_getshdrstrndx(elf, &shstrndx)) {
1016 #elif defined HAVE_ELFIO
1017 	shstrndx = elfio_get_section_name_str_index(elf);
1018 	if(shstrndx == 0) {
1019 #endif
1020 		pr_warn("failed to get section names section index for %s\n",
1021 			path);
1022 		goto done;
1023 	}
1024 
1025 #ifdef HAVE_LIBELF
1026 	if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
1027 		pr_warn("failed to get e_shstrndx from %s\n", path);
1028 		goto done;
1029 	}
1030 #endif
1031 
1032 #if defined HAVE_LIBELF
1033 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
1034 		GElf_Shdr sh;
1035 #elif defined HAVE_ELFIO
1036 	psection_t psection = elfio_get_section_by_name(elf, ".shstrtab");
1037 	if (!psection )
1038 		goto done;
1039 
1040 	pstring_t pstring = elfio_string_section_accessor_new(psection);
1041 	if (!pstring )
1042 		goto done;
1043 
1044 	int secno = elfio_get_sections_num(elf);
1045 
1046 	for ( int i = 0; i < secno; i++ ) {
1047 		Elf64_Shdr sh;
1048 #endif
1049 		char *name;
1050 
1051 		idx++;
1052 #if defined HAVE_LIBELF
1053 		if (gelf_getshdr(scn, &sh) != &sh) {
1054 #elif defined HAVE_ELFIO
1055 		if (!elf_sec_hdr_by_idx(elf, i, &sh)) {
1056 #endif
1057 			pr_warn("failed to get section(%d) header from %s\n",
1058 				idx, path);
1059 			goto done;
1060 		}
1061 #if defined HAVE_LIBELF
1062 		name = elf_strptr(elf, shstrndx, sh.sh_name);
1063 #elif defined HAVE_ELFIO
1064 		name = elfio_string_get_string(pstring, sh.sh_name);
1065 #endif
1066 		if (!name) {
1067 			pr_warn("failed to get section(%d) name from %s\n",
1068 				idx, path);
1069 			goto done;
1070 		}
1071 		if (strcmp(name, BTF_ELF_SEC) == 0) {
1072 #if defined HAVE_LIBELF
1073 			btf_data = elf_getdata(scn, 0);
1074 #elif defined HAVE_ELFIO
1075 			psection_t psection_index = elfio_get_section_by_index(elf, i);
1076 			btf_data_temp.d_buf = (void*)elfio_section_get_data(psection_index);
1077 			btf_data_temp.d_size = elfio_section_get_size(psection_index);
1078 			btf_data = &btf_data_temp;
1079 #endif
1080 			if (!btf_data) {
1081 				pr_warn("failed to get section(%d, %s) data from %s\n",
1082 					idx, name, path);
1083 				goto done;
1084 			}
1085 			continue;
1086 		} else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
1087 #if defined HAVE_LIBELF
1088 			btf_ext_data = elf_getdata(scn, 0);
1089 #elif defined HAVE_ELFIO
1090 			psection_t psection_index = elfio_get_section_by_index(elf, i);
1091 			btf_ext_data_temp.d_buf = (void*)elfio_section_get_data(psection_index);
1092 			btf_ext_data_temp.d_size = elfio_section_get_size(psection_index);
1093 			btf_ext_data = &btf_ext_data_temp;
1094 #endif
1095 			if (!btf_ext_data) {
1096 				pr_warn("failed to get section(%d, %s) data from %s\n",
1097 					idx, name, path);
1098 				goto done;
1099 			}
1100 			continue;
1101 		}
1102 	}
1103 
1104 	err = 0;
1105 
1106 	if (!btf_data) {
1107 		pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path);
1108 		err = -ENOENT;
1109 		goto done;
1110 	}
1111 	btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf);
1112 	err = libbpf_get_error(btf);
1113 	if (err)
1114 		goto done;
1115 #ifdef HAVE_LIBELF
1116 	switch (gelf_getclass(elf)) {
1117 #elif defined HAVE_ELFIO
1118 	switch (elfio_get_class(elf)) {
1119 #endif
1120 
1121 	case ELFCLASS32:
1122 		btf__set_pointer_size(btf, 4);
1123 		break;
1124 	case ELFCLASS64:
1125 		btf__set_pointer_size(btf, 8);
1126 		break;
1127 	default:
1128 		pr_warn("failed to get ELF class (bitness) for %s\n", path);
1129 		break;
1130 	}
1131 
1132 	if (btf_ext && btf_ext_data) {
1133 		*btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
1134 		err = libbpf_get_error(*btf_ext);
1135 		if (err)
1136 			goto done;
1137 	} else if (btf_ext) {
1138 		*btf_ext = NULL;
1139 	}
1140 done:
1141 	if (elf)
1142 #ifdef HAVE_LIBELF
1143 		elf_end(elf);
1144 #elif defined HAVE_ELFIO
1145 		elfio_delete(elf);
1146 	if(pstring)
1147 		elfio_string_section_accessor_delete(pstring);
1148 #endif
1149 	close(fd);
1150 
1151 	if (!err)
1152 		return btf;
1153 
1154 	if (btf_ext)
1155 		btf_ext__free(*btf_ext);
1156 	btf__free(btf);
1157 
1158 	return ERR_PTR(err);
1159 }
1160 
1161 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
1162 {
1163 	return libbpf_ptr(btf_parse_elf(path, NULL, btf_ext));
1164 }
1165 
1166 struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf)
1167 {
1168 	return libbpf_ptr(btf_parse_elf(path, base_btf, NULL));
1169 }
1170 
1171 static struct btf *btf_parse_raw(const char *path, struct btf *base_btf)
1172 {
1173 	struct btf *btf = NULL;
1174 	void *data = NULL;
1175 	FILE *f = NULL;
1176 	__u16 magic;
1177 	int err = 0;
1178 	long sz;
1179 
1180 	f = fopen(path, "rb");
1181 	if (!f) {
1182 		err = -errno;
1183 		goto err_out;
1184 	}
1185 
1186 	/* check BTF magic */
1187 	if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) {
1188 		err = -EIO;
1189 		goto err_out;
1190 	}
1191 	if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) {
1192 		/* definitely not a raw BTF */
1193 		err = -EPROTO;
1194 		goto err_out;
1195 	}
1196 
1197 	/* get file size */
1198 	if (fseek(f, 0, SEEK_END)) {
1199 		err = -errno;
1200 		goto err_out;
1201 	}
1202 	sz = ftell(f);
1203 	if (sz < 0) {
1204 		err = -errno;
1205 		goto err_out;
1206 	}
1207 	/* rewind to the start */
1208 	if (fseek(f, 0, SEEK_SET)) {
1209 		err = -errno;
1210 		goto err_out;
1211 	}
1212 
1213 	/* pre-alloc memory and read all of BTF data */
1214 	data = malloc(sz);
1215 	if (!data) {
1216 		err = -ENOMEM;
1217 		goto err_out;
1218 	}
1219 	if (fread(data, 1, sz, f) < sz) {
1220 		err = -EIO;
1221 		goto err_out;
1222 	}
1223 
1224 	/* finally parse BTF data */
1225 	btf = btf_new(data, sz, base_btf);
1226 
1227 err_out:
1228 	free(data);
1229 	if (f)
1230 		fclose(f);
1231 	return err ? ERR_PTR(err) : btf;
1232 }
1233 
1234 struct btf *btf__parse_raw(const char *path)
1235 {
1236 	return libbpf_ptr(btf_parse_raw(path, NULL));
1237 }
1238 
1239 struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf)
1240 {
1241 	return libbpf_ptr(btf_parse_raw(path, base_btf));
1242 }
1243 
1244 static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext)
1245 {
1246 	struct btf *btf;
1247 	int err;
1248 
1249 	if (btf_ext)
1250 		*btf_ext = NULL;
1251 
1252 	btf = btf_parse_raw(path, base_btf);
1253 	err = libbpf_get_error(btf);
1254 	if (!err)
1255 		return btf;
1256 	if (err != -EPROTO)
1257 		return ERR_PTR(err);
1258 	return btf_parse_elf(path, base_btf, btf_ext);
1259 }
1260 
1261 struct btf *btf__parse(const char *path, struct btf_ext **btf_ext)
1262 {
1263 	return libbpf_ptr(btf_parse(path, NULL, btf_ext));
1264 }
1265 
1266 struct btf *btf__parse_split(const char *path, struct btf *base_btf)
1267 {
1268 	return libbpf_ptr(btf_parse(path, base_btf, NULL));
1269 }
1270 
1271 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
1272 
1273 int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level)
1274 {
1275 	LIBBPF_OPTS(bpf_btf_load_opts, opts);
1276 	__u32 buf_sz = 0, raw_size;
1277 	char *buf = NULL, *tmp;
1278 	void *raw_data;
1279 	int err = 0;
1280 
1281 	if (btf->fd >= 0)
1282 		return libbpf_err(-EEXIST);
1283 	if (log_sz && !log_buf)
1284 		return libbpf_err(-EINVAL);
1285 
1286 	/* cache native raw data representation */
1287 	raw_data = btf_get_raw_data(btf, &raw_size, false);
1288 	if (!raw_data) {
1289 		err = -ENOMEM;
1290 		goto done;
1291 	}
1292 	btf->raw_size = raw_size;
1293 	btf->raw_data = raw_data;
1294 
1295 retry_load:
1296 	/* if log_level is 0, we won't provide log_buf/log_size to the kernel,
1297 	 * initially. Only if BTF loading fails, we bump log_level to 1 and
1298 	 * retry, using either auto-allocated or custom log_buf. This way
1299 	 * non-NULL custom log_buf provides a buffer just in case, but hopes
1300 	 * for successful load and no need for log_buf.
1301 	 */
1302 	if (log_level) {
1303 		/* if caller didn't provide custom log_buf, we'll keep
1304 		 * allocating our own progressively bigger buffers for BTF
1305 		 * verification log
1306 		 */
1307 		if (!log_buf) {
1308 			buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2);
1309 			tmp = realloc(buf, buf_sz);
1310 			if (!tmp) {
1311 				err = -ENOMEM;
1312 				goto done;
1313 			}
1314 			buf = tmp;
1315 			buf[0] = '\0';
1316 		}
1317 
1318 		opts.log_buf = log_buf ? log_buf : buf;
1319 		opts.log_size = log_buf ? log_sz : buf_sz;
1320 		opts.log_level = log_level;
1321 	}
1322 
1323 	btf->fd = bpf_btf_load(raw_data, raw_size, &opts);
1324 	if (btf->fd < 0) {
1325 		/* time to turn on verbose mode and try again */
1326 		if (log_level == 0) {
1327 			log_level = 1;
1328 			goto retry_load;
1329 		}
1330 		/* only retry if caller didn't provide custom log_buf, but
1331 		 * make sure we can never overflow buf_sz
1332 		 */
1333 		if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2)
1334 			goto retry_load;
1335 
1336 		err = -errno;
1337 		pr_warn("BTF loading error: %d\n", err);
1338 		/* don't print out contents of custom log_buf */
1339 		if (!log_buf && buf[0])
1340 			pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf);
1341 	}
1342 
1343 done:
1344 	free(buf);
1345 	return libbpf_err(err);
1346 }
1347 
1348 int btf__load_into_kernel(struct btf *btf)
1349 {
1350 	return btf_load_into_kernel(btf, NULL, 0, 0);
1351 }
1352 
1353 int btf__fd(const struct btf *btf)
1354 {
1355 	return btf->fd;
1356 }
1357 
1358 void btf__set_fd(struct btf *btf, int fd)
1359 {
1360 	btf->fd = fd;
1361 }
1362 
1363 static const void *btf_strs_data(const struct btf *btf)
1364 {
1365 	return btf->strs_data ? btf->strs_data : strset__data(btf->strs_set);
1366 }
1367 
1368 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian)
1369 {
1370 	struct btf_header *hdr = btf->hdr;
1371 	struct btf_type *t;
1372 	void *data, *p;
1373 	__u32 data_sz;
1374 	int i;
1375 
1376 	data = swap_endian ? btf->raw_data_swapped : btf->raw_data;
1377 	if (data) {
1378 		*size = btf->raw_size;
1379 		return data;
1380 	}
1381 
1382 	data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len;
1383 	data = calloc(1, data_sz);
1384 	if (!data)
1385 		return NULL;
1386 	p = data;
1387 
1388 	memcpy(p, hdr, hdr->hdr_len);
1389 	if (swap_endian)
1390 		btf_bswap_hdr(p);
1391 	p += hdr->hdr_len;
1392 
1393 	memcpy(p, btf->types_data, hdr->type_len);
1394 	if (swap_endian) {
1395 		for (i = 0; i < btf->nr_types; i++) {
1396 			t = p + btf->type_offs[i];
1397 			/* btf_bswap_type_rest() relies on native t->info, so
1398 			 * we swap base type info after we swapped all the
1399 			 * additional information
1400 			 */
1401 			if (btf_bswap_type_rest(t))
1402 				goto err_out;
1403 			btf_bswap_type_base(t);
1404 		}
1405 	}
1406 	p += hdr->type_len;
1407 
1408 	memcpy(p, btf_strs_data(btf), hdr->str_len);
1409 	p += hdr->str_len;
1410 
1411 	*size = data_sz;
1412 	return data;
1413 err_out:
1414 	free(data);
1415 	return NULL;
1416 }
1417 
1418 const void *btf__raw_data(const struct btf *btf_ro, __u32 *size)
1419 {
1420 	struct btf *btf = (struct btf *)btf_ro;
1421 	__u32 data_sz;
1422 	void *data;
1423 
1424 	data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian);
1425 	if (!data)
1426 		return errno = ENOMEM, NULL;
1427 
1428 	btf->raw_size = data_sz;
1429 	if (btf->swapped_endian)
1430 		btf->raw_data_swapped = data;
1431 	else
1432 		btf->raw_data = data;
1433 	*size = data_sz;
1434 	return data;
1435 }
1436 
1437 __attribute__((alias("btf__raw_data")))
1438 const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
1439 
1440 const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
1441 {
1442 	if (offset < btf->start_str_off)
1443 		return btf__str_by_offset(btf->base_btf, offset);
1444 	else if (offset - btf->start_str_off < btf->hdr->str_len)
1445 		return btf_strs_data(btf) + (offset - btf->start_str_off);
1446 	else
1447 		return errno = EINVAL, NULL;
1448 }
1449 
1450 const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
1451 {
1452 	return btf__str_by_offset(btf, offset);
1453 }
1454 
1455 struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
1456 {
1457 	struct bpf_btf_info btf_info;
1458 	__u32 len = sizeof(btf_info);
1459 	__u32 last_size;
1460 	struct btf *btf;
1461 	void *ptr;
1462 	int err;
1463 
1464 	/* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
1465 	 * let's start with a sane default - 4KiB here - and resize it only if
1466 	 * bpf_obj_get_info_by_fd() needs a bigger buffer.
1467 	 */
1468 	last_size = 4096;
1469 	ptr = malloc(last_size);
1470 	if (!ptr)
1471 		return ERR_PTR(-ENOMEM);
1472 
1473 	memset(&btf_info, 0, sizeof(btf_info));
1474 	btf_info.btf = ptr_to_u64(ptr);
1475 	btf_info.btf_size = last_size;
1476 	err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
1477 
1478 	if (!err && btf_info.btf_size > last_size) {
1479 		void *temp_ptr;
1480 
1481 		last_size = btf_info.btf_size;
1482 		temp_ptr = realloc(ptr, last_size);
1483 		if (!temp_ptr) {
1484 			btf = ERR_PTR(-ENOMEM);
1485 			goto exit_free;
1486 		}
1487 		ptr = temp_ptr;
1488 
1489 		len = sizeof(btf_info);
1490 		memset(&btf_info, 0, sizeof(btf_info));
1491 		btf_info.btf = ptr_to_u64(ptr);
1492 		btf_info.btf_size = last_size;
1493 
1494 		err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
1495 	}
1496 
1497 	if (err || btf_info.btf_size > last_size) {
1498 		btf = err ? ERR_PTR(-errno) : ERR_PTR(-E2BIG);
1499 		goto exit_free;
1500 	}
1501 
1502 	btf = btf_new(ptr, btf_info.btf_size, base_btf);
1503 
1504 exit_free:
1505 	free(ptr);
1506 	return btf;
1507 }
1508 
1509 struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf)
1510 {
1511 	struct btf *btf;
1512 	int btf_fd;
1513 
1514 	btf_fd = bpf_btf_get_fd_by_id(id);
1515 	if (btf_fd < 0)
1516 		return libbpf_err_ptr(-errno);
1517 
1518 	btf = btf_get_from_fd(btf_fd, base_btf);
1519 	close(btf_fd);
1520 
1521 	return libbpf_ptr(btf);
1522 }
1523 
1524 struct btf *btf__load_from_kernel_by_id(__u32 id)
1525 {
1526 	return btf__load_from_kernel_by_id_split(id, NULL);
1527 }
1528 
1529 static void btf_invalidate_raw_data(struct btf *btf)
1530 {
1531 	if (btf->raw_data) {
1532 		free(btf->raw_data);
1533 		btf->raw_data = NULL;
1534 	}
1535 	if (btf->raw_data_swapped) {
1536 		free(btf->raw_data_swapped);
1537 		btf->raw_data_swapped = NULL;
1538 	}
1539 }
1540 
1541 /* Ensure BTF is ready to be modified (by splitting into a three memory
1542  * regions for header, types, and strings). Also invalidate cached
1543  * raw_data, if any.
1544  */
1545 static int btf_ensure_modifiable(struct btf *btf)
1546 {
1547 	void *hdr, *types;
1548 	struct strset *set = NULL;
1549 	int err = -ENOMEM;
1550 
1551 	if (btf_is_modifiable(btf)) {
1552 		/* any BTF modification invalidates raw_data */
1553 		btf_invalidate_raw_data(btf);
1554 		return 0;
1555 	}
1556 
1557 	/* split raw data into three memory regions */
1558 	hdr = malloc(btf->hdr->hdr_len);
1559 	types = malloc(btf->hdr->type_len);
1560 	if (!hdr || !types)
1561 		goto err_out;
1562 
1563 	memcpy(hdr, btf->hdr, btf->hdr->hdr_len);
1564 	memcpy(types, btf->types_data, btf->hdr->type_len);
1565 
1566 	/* build lookup index for all strings */
1567 	set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len);
1568 	if (IS_ERR(set)) {
1569 		err = PTR_ERR(set);
1570 		goto err_out;
1571 	}
1572 
1573 	/* only when everything was successful, update internal state */
1574 	btf->hdr = hdr;
1575 	btf->types_data = types;
1576 	btf->types_data_cap = btf->hdr->type_len;
1577 	btf->strs_data = NULL;
1578 	btf->strs_set = set;
1579 	/* if BTF was created from scratch, all strings are guaranteed to be
1580 	 * unique and deduplicated
1581 	 */
1582 	if (btf->hdr->str_len == 0)
1583 		btf->strs_deduped = true;
1584 	if (!btf->base_btf && btf->hdr->str_len == 1)
1585 		btf->strs_deduped = true;
1586 
1587 	/* invalidate raw_data representation */
1588 	btf_invalidate_raw_data(btf);
1589 
1590 	return 0;
1591 
1592 err_out:
1593 	strset__free(set);
1594 	free(hdr);
1595 	free(types);
1596 	return err;
1597 }
1598 
1599 /* Find an offset in BTF string section that corresponds to a given string *s*.
1600  * Returns:
1601  *   - >0 offset into string section, if string is found;
1602  *   - -ENOENT, if string is not in the string section;
1603  *   - <0, on any other error.
1604  */
1605 int btf__find_str(struct btf *btf, const char *s)
1606 {
1607 	int off;
1608 
1609 	if (btf->base_btf) {
1610 		off = btf__find_str(btf->base_btf, s);
1611 		if (off != -ENOENT)
1612 			return off;
1613 	}
1614 
1615 	/* BTF needs to be in a modifiable state to build string lookup index */
1616 	if (btf_ensure_modifiable(btf))
1617 		return libbpf_err(-ENOMEM);
1618 
1619 	off = strset__find_str(btf->strs_set, s);
1620 	if (off < 0)
1621 		return libbpf_err(off);
1622 
1623 	return btf->start_str_off + off;
1624 }
1625 
1626 /* Add a string s to the BTF string section.
1627  * Returns:
1628  *   - > 0 offset into string section, on success;
1629  *   - < 0, on error.
1630  */
1631 int btf__add_str(struct btf *btf, const char *s)
1632 {
1633 	int off;
1634 
1635 	if (btf->base_btf) {
1636 		off = btf__find_str(btf->base_btf, s);
1637 		if (off != -ENOENT)
1638 			return off;
1639 	}
1640 
1641 	if (btf_ensure_modifiable(btf))
1642 		return libbpf_err(-ENOMEM);
1643 
1644 	off = strset__add_str(btf->strs_set, s);
1645 	if (off < 0)
1646 		return libbpf_err(off);
1647 
1648 	btf->hdr->str_len = strset__data_size(btf->strs_set);
1649 
1650 	return btf->start_str_off + off;
1651 }
1652 
1653 static void *btf_add_type_mem(struct btf *btf, size_t add_sz)
1654 {
1655 	return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1,
1656 			      btf->hdr->type_len, UINT_MAX, add_sz);
1657 }
1658 
1659 static void btf_type_inc_vlen(struct btf_type *t)
1660 {
1661 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t));
1662 }
1663 
1664 static int btf_commit_type(struct btf *btf, int data_sz)
1665 {
1666 	int err;
1667 
1668 	err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
1669 	if (err)
1670 		return libbpf_err(err);
1671 
1672 	btf->hdr->type_len += data_sz;
1673 	btf->hdr->str_off += data_sz;
1674 	btf->nr_types++;
1675 	return btf->start_id + btf->nr_types - 1;
1676 }
1677 
1678 struct btf_pipe {
1679 	const struct btf *src;
1680 	struct btf *dst;
1681 	struct hashmap *str_off_map; /* map string offsets from src to dst */
1682 };
1683 
1684 static int btf_rewrite_str(__u32 *str_off, void *ctx)
1685 {
1686 	struct btf_pipe *p = ctx;
1687 	long mapped_off;
1688 	int off, err;
1689 
1690 	if (!*str_off) /* nothing to do for empty strings */
1691 		return 0;
1692 
1693 	if (p->str_off_map &&
1694 	    hashmap__find(p->str_off_map, *str_off, &mapped_off)) {
1695 		*str_off = mapped_off;
1696 		return 0;
1697 	}
1698 
1699 	off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off));
1700 	if (off < 0)
1701 		return off;
1702 
1703 	/* Remember string mapping from src to dst.  It avoids
1704 	 * performing expensive string comparisons.
1705 	 */
1706 	if (p->str_off_map) {
1707 		err = hashmap__append(p->str_off_map, *str_off, off);
1708 		if (err)
1709 			return err;
1710 	}
1711 
1712 	*str_off = off;
1713 	return 0;
1714 }
1715 
1716 int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
1717 {
1718 	struct btf_pipe p = { .src = src_btf, .dst = btf };
1719 	struct btf_type *t;
1720 	int sz, err;
1721 
1722 	sz = btf_type_size(src_type);
1723 	if (sz < 0)
1724 		return libbpf_err(sz);
1725 
1726 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1727 	if (btf_ensure_modifiable(btf))
1728 		return libbpf_err(-ENOMEM);
1729 
1730 	t = btf_add_type_mem(btf, sz);
1731 	if (!t)
1732 		return libbpf_err(-ENOMEM);
1733 
1734 	memcpy(t, src_type, sz);
1735 
1736 	err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
1737 	if (err)
1738 		return libbpf_err(err);
1739 
1740 	return btf_commit_type(btf, sz);
1741 }
1742 
1743 static int btf_rewrite_type_ids(__u32 *type_id, void *ctx)
1744 {
1745 	struct btf *btf = ctx;
1746 
1747 	if (!*type_id) /* nothing to do for VOID references */
1748 		return 0;
1749 
1750 	/* we haven't updated btf's type count yet, so
1751 	 * btf->start_id + btf->nr_types - 1 is the type ID offset we should
1752 	 * add to all newly added BTF types
1753 	 */
1754 	*type_id += btf->start_id + btf->nr_types - 1;
1755 	return 0;
1756 }
1757 
1758 static size_t btf_dedup_identity_hash_fn(long key, void *ctx);
1759 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx);
1760 
1761 int btf__add_btf(struct btf *btf, const struct btf *src_btf)
1762 {
1763 	struct btf_pipe p = { .src = src_btf, .dst = btf };
1764 	int data_sz, sz, cnt, i, err, old_strs_len;
1765 	__u32 *off;
1766 	void *t;
1767 
1768 	/* appending split BTF isn't supported yet */
1769 	if (src_btf->base_btf)
1770 		return libbpf_err(-ENOTSUP);
1771 
1772 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1773 	if (btf_ensure_modifiable(btf))
1774 		return libbpf_err(-ENOMEM);
1775 
1776 	/* remember original strings section size if we have to roll back
1777 	 * partial strings section changes
1778 	 */
1779 	old_strs_len = btf->hdr->str_len;
1780 
1781 	data_sz = src_btf->hdr->type_len;
1782 	cnt = btf__type_cnt(src_btf) - 1;
1783 
1784 	/* pre-allocate enough memory for new types */
1785 	t = btf_add_type_mem(btf, data_sz);
1786 	if (!t)
1787 		return libbpf_err(-ENOMEM);
1788 
1789 	/* pre-allocate enough memory for type offset index for new types */
1790 	off = btf_add_type_offs_mem(btf, cnt);
1791 	if (!off)
1792 		return libbpf_err(-ENOMEM);
1793 
1794 	/* Map the string offsets from src_btf to the offsets from btf to improve performance */
1795 	p.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
1796 	if (IS_ERR(p.str_off_map))
1797 		return libbpf_err(-ENOMEM);
1798 
1799 	/* bulk copy types data for all types from src_btf */
1800 	memcpy(t, src_btf->types_data, data_sz);
1801 
1802 	for (i = 0; i < cnt; i++) {
1803 		sz = btf_type_size(t);
1804 		if (sz < 0) {
1805 			/* unlikely, has to be corrupted src_btf */
1806 			err = sz;
1807 			goto err_out;
1808 		}
1809 
1810 		/* fill out type ID to type offset mapping for lookups by type ID */
1811 		*off = t - btf->types_data;
1812 
1813 		/* add, dedup, and remap strings referenced by this BTF type */
1814 		err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
1815 		if (err)
1816 			goto err_out;
1817 
1818 		/* remap all type IDs referenced from this BTF type */
1819 		err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf);
1820 		if (err)
1821 			goto err_out;
1822 
1823 		/* go to next type data and type offset index entry */
1824 		t += sz;
1825 		off++;
1826 	}
1827 
1828 	/* Up until now any of the copied type data was effectively invisible,
1829 	 * so if we exited early before this point due to error, BTF would be
1830 	 * effectively unmodified. There would be extra internal memory
1831 	 * pre-allocated, but it would not be available for querying.  But now
1832 	 * that we've copied and rewritten all the data successfully, we can
1833 	 * update type count and various internal offsets and sizes to
1834 	 * "commit" the changes and made them visible to the outside world.
1835 	 */
1836 	btf->hdr->type_len += data_sz;
1837 	btf->hdr->str_off += data_sz;
1838 	btf->nr_types += cnt;
1839 
1840 	hashmap__free(p.str_off_map);
1841 
1842 	/* return type ID of the first added BTF type */
1843 	return btf->start_id + btf->nr_types - cnt;
1844 err_out:
1845 	/* zero out preallocated memory as if it was just allocated with
1846 	 * libbpf_add_mem()
1847 	 */
1848 	memset(btf->types_data + btf->hdr->type_len, 0, data_sz);
1849 	memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len);
1850 
1851 	/* and now restore original strings section size; types data size
1852 	 * wasn't modified, so doesn't need restoring, see big comment above
1853 	 */
1854 	btf->hdr->str_len = old_strs_len;
1855 
1856 	hashmap__free(p.str_off_map);
1857 
1858 	return libbpf_err(err);
1859 }
1860 
1861 /*
1862  * Append new BTF_KIND_INT type with:
1863  *   - *name* - non-empty, non-NULL type name;
1864  *   - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes;
1865  *   - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL.
1866  * Returns:
1867  *   - >0, type ID of newly added BTF type;
1868  *   - <0, on error.
1869  */
1870 int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding)
1871 {
1872 	struct btf_type *t;
1873 	int sz, name_off;
1874 
1875 	/* non-empty name */
1876 	if (!name || !name[0])
1877 		return libbpf_err(-EINVAL);
1878 	/* byte_sz must be power of 2 */
1879 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16)
1880 		return libbpf_err(-EINVAL);
1881 	if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL))
1882 		return libbpf_err(-EINVAL);
1883 
1884 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1885 	if (btf_ensure_modifiable(btf))
1886 		return libbpf_err(-ENOMEM);
1887 
1888 	sz = sizeof(struct btf_type) + sizeof(int);
1889 	t = btf_add_type_mem(btf, sz);
1890 	if (!t)
1891 		return libbpf_err(-ENOMEM);
1892 
1893 	/* if something goes wrong later, we might end up with an extra string,
1894 	 * but that shouldn't be a problem, because BTF can't be constructed
1895 	 * completely anyway and will most probably be just discarded
1896 	 */
1897 	name_off = btf__add_str(btf, name);
1898 	if (name_off < 0)
1899 		return name_off;
1900 
1901 	t->name_off = name_off;
1902 	t->info = btf_type_info(BTF_KIND_INT, 0, 0);
1903 	t->size = byte_sz;
1904 	/* set INT info, we don't allow setting legacy bit offset/size */
1905 	*(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8);
1906 
1907 	return btf_commit_type(btf, sz);
1908 }
1909 
1910 /*
1911  * Append new BTF_KIND_FLOAT type with:
1912  *   - *name* - non-empty, non-NULL type name;
1913  *   - *sz* - size of the type, in bytes;
1914  * Returns:
1915  *   - >0, type ID of newly added BTF type;
1916  *   - <0, on error.
1917  */
1918 int btf__add_float(struct btf *btf, const char *name, size_t byte_sz)
1919 {
1920 	struct btf_type *t;
1921 	int sz, name_off;
1922 
1923 	/* non-empty name */
1924 	if (!name || !name[0])
1925 		return libbpf_err(-EINVAL);
1926 
1927 	/* byte_sz must be one of the explicitly allowed values */
1928 	if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 &&
1929 	    byte_sz != 16)
1930 		return libbpf_err(-EINVAL);
1931 
1932 	if (btf_ensure_modifiable(btf))
1933 		return libbpf_err(-ENOMEM);
1934 
1935 	sz = sizeof(struct btf_type);
1936 	t = btf_add_type_mem(btf, sz);
1937 	if (!t)
1938 		return libbpf_err(-ENOMEM);
1939 
1940 	name_off = btf__add_str(btf, name);
1941 	if (name_off < 0)
1942 		return name_off;
1943 
1944 	t->name_off = name_off;
1945 	t->info = btf_type_info(BTF_KIND_FLOAT, 0, 0);
1946 	t->size = byte_sz;
1947 
1948 	return btf_commit_type(btf, sz);
1949 }
1950 
1951 /* it's completely legal to append BTF types with type IDs pointing forward to
1952  * types that haven't been appended yet, so we only make sure that id looks
1953  * sane, we can't guarantee that ID will always be valid
1954  */
1955 static int validate_type_id(int id)
1956 {
1957 	if (id < 0 || id > BTF_MAX_NR_TYPES)
1958 		return -EINVAL;
1959 	return 0;
1960 }
1961 
1962 /* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */
1963 static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id)
1964 {
1965 	struct btf_type *t;
1966 	int sz, name_off = 0;
1967 
1968 	if (validate_type_id(ref_type_id))
1969 		return libbpf_err(-EINVAL);
1970 
1971 	if (btf_ensure_modifiable(btf))
1972 		return libbpf_err(-ENOMEM);
1973 
1974 	sz = sizeof(struct btf_type);
1975 	t = btf_add_type_mem(btf, sz);
1976 	if (!t)
1977 		return libbpf_err(-ENOMEM);
1978 
1979 	if (name && name[0]) {
1980 		name_off = btf__add_str(btf, name);
1981 		if (name_off < 0)
1982 			return name_off;
1983 	}
1984 
1985 	t->name_off = name_off;
1986 	t->info = btf_type_info(kind, 0, 0);
1987 	t->type = ref_type_id;
1988 
1989 	return btf_commit_type(btf, sz);
1990 }
1991 
1992 /*
1993  * Append new BTF_KIND_PTR type with:
1994  *   - *ref_type_id* - referenced type ID, it might not exist yet;
1995  * Returns:
1996  *   - >0, type ID of newly added BTF type;
1997  *   - <0, on error.
1998  */
1999 int btf__add_ptr(struct btf *btf, int ref_type_id)
2000 {
2001 	return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id);
2002 }
2003 
2004 /*
2005  * Append new BTF_KIND_ARRAY type with:
2006  *   - *index_type_id* - type ID of the type describing array index;
2007  *   - *elem_type_id* - type ID of the type describing array element;
2008  *   - *nr_elems* - the size of the array;
2009  * Returns:
2010  *   - >0, type ID of newly added BTF type;
2011  *   - <0, on error.
2012  */
2013 int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems)
2014 {
2015 	struct btf_type *t;
2016 	struct btf_array *a;
2017 	int sz;
2018 
2019 	if (validate_type_id(index_type_id) || validate_type_id(elem_type_id))
2020 		return libbpf_err(-EINVAL);
2021 
2022 	if (btf_ensure_modifiable(btf))
2023 		return libbpf_err(-ENOMEM);
2024 
2025 	sz = sizeof(struct btf_type) + sizeof(struct btf_array);
2026 	t = btf_add_type_mem(btf, sz);
2027 	if (!t)
2028 		return libbpf_err(-ENOMEM);
2029 
2030 	t->name_off = 0;
2031 	t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0);
2032 	t->size = 0;
2033 
2034 	a = btf_array(t);
2035 	a->type = elem_type_id;
2036 	a->index_type = index_type_id;
2037 	a->nelems = nr_elems;
2038 
2039 	return btf_commit_type(btf, sz);
2040 }
2041 
2042 /* generic STRUCT/UNION append function */
2043 static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz)
2044 {
2045 	struct btf_type *t;
2046 	int sz, name_off = 0;
2047 
2048 	if (btf_ensure_modifiable(btf))
2049 		return libbpf_err(-ENOMEM);
2050 
2051 	sz = sizeof(struct btf_type);
2052 	t = btf_add_type_mem(btf, sz);
2053 	if (!t)
2054 		return libbpf_err(-ENOMEM);
2055 
2056 	if (name && name[0]) {
2057 		name_off = btf__add_str(btf, name);
2058 		if (name_off < 0)
2059 			return name_off;
2060 	}
2061 
2062 	/* start out with vlen=0 and no kflag; this will be adjusted when
2063 	 * adding each member
2064 	 */
2065 	t->name_off = name_off;
2066 	t->info = btf_type_info(kind, 0, 0);
2067 	t->size = bytes_sz;
2068 
2069 	return btf_commit_type(btf, sz);
2070 }
2071 
2072 /*
2073  * Append new BTF_KIND_STRUCT type with:
2074  *   - *name* - name of the struct, can be NULL or empty for anonymous structs;
2075  *   - *byte_sz* - size of the struct, in bytes;
2076  *
2077  * Struct initially has no fields in it. Fields can be added by
2078  * btf__add_field() right after btf__add_struct() succeeds.
2079  *
2080  * Returns:
2081  *   - >0, type ID of newly added BTF type;
2082  *   - <0, on error.
2083  */
2084 int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz)
2085 {
2086 	return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz);
2087 }
2088 
2089 /*
2090  * Append new BTF_KIND_UNION type with:
2091  *   - *name* - name of the union, can be NULL or empty for anonymous union;
2092  *   - *byte_sz* - size of the union, in bytes;
2093  *
2094  * Union initially has no fields in it. Fields can be added by
2095  * btf__add_field() right after btf__add_union() succeeds. All fields
2096  * should have *bit_offset* of 0.
2097  *
2098  * Returns:
2099  *   - >0, type ID of newly added BTF type;
2100  *   - <0, on error.
2101  */
2102 int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz)
2103 {
2104 	return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz);
2105 }
2106 
2107 static struct btf_type *btf_last_type(struct btf *btf)
2108 {
2109 	return btf_type_by_id(btf, btf__type_cnt(btf) - 1);
2110 }
2111 
2112 /*
2113  * Append new field for the current STRUCT/UNION type with:
2114  *   - *name* - name of the field, can be NULL or empty for anonymous field;
2115  *   - *type_id* - type ID for the type describing field type;
2116  *   - *bit_offset* - bit offset of the start of the field within struct/union;
2117  *   - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields;
2118  * Returns:
2119  *   -  0, on success;
2120  *   - <0, on error.
2121  */
2122 int btf__add_field(struct btf *btf, const char *name, int type_id,
2123 		   __u32 bit_offset, __u32 bit_size)
2124 {
2125 	struct btf_type *t;
2126 	struct btf_member *m;
2127 	bool is_bitfield;
2128 	int sz, name_off = 0;
2129 
2130 	/* last type should be union/struct */
2131 	if (btf->nr_types == 0)
2132 		return libbpf_err(-EINVAL);
2133 	t = btf_last_type(btf);
2134 	if (!btf_is_composite(t))
2135 		return libbpf_err(-EINVAL);
2136 
2137 	if (validate_type_id(type_id))
2138 		return libbpf_err(-EINVAL);
2139 	/* best-effort bit field offset/size enforcement */
2140 	is_bitfield = bit_size || (bit_offset % 8 != 0);
2141 	if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff))
2142 		return libbpf_err(-EINVAL);
2143 
2144 	/* only offset 0 is allowed for unions */
2145 	if (btf_is_union(t) && bit_offset)
2146 		return libbpf_err(-EINVAL);
2147 
2148 	/* decompose and invalidate raw data */
2149 	if (btf_ensure_modifiable(btf))
2150 		return libbpf_err(-ENOMEM);
2151 
2152 	sz = sizeof(struct btf_member);
2153 	m = btf_add_type_mem(btf, sz);
2154 	if (!m)
2155 		return libbpf_err(-ENOMEM);
2156 
2157 	if (name && name[0]) {
2158 		name_off = btf__add_str(btf, name);
2159 		if (name_off < 0)
2160 			return name_off;
2161 	}
2162 
2163 	m->name_off = name_off;
2164 	m->type = type_id;
2165 	m->offset = bit_offset | (bit_size << 24);
2166 
2167 	/* btf_add_type_mem can invalidate t pointer */
2168 	t = btf_last_type(btf);
2169 	/* update parent type's vlen and kflag */
2170 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t));
2171 
2172 	btf->hdr->type_len += sz;
2173 	btf->hdr->str_off += sz;
2174 	return 0;
2175 }
2176 
2177 static int btf_add_enum_common(struct btf *btf, const char *name, __u32 byte_sz,
2178 			       bool is_signed, __u8 kind)
2179 {
2180 	struct btf_type *t;
2181 	int sz, name_off = 0;
2182 
2183 	/* byte_sz must be power of 2 */
2184 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8)
2185 		return libbpf_err(-EINVAL);
2186 
2187 	if (btf_ensure_modifiable(btf))
2188 		return libbpf_err(-ENOMEM);
2189 
2190 	sz = sizeof(struct btf_type);
2191 	t = btf_add_type_mem(btf, sz);
2192 	if (!t)
2193 		return libbpf_err(-ENOMEM);
2194 
2195 	if (name && name[0]) {
2196 		name_off = btf__add_str(btf, name);
2197 		if (name_off < 0)
2198 			return name_off;
2199 	}
2200 
2201 	/* start out with vlen=0; it will be adjusted when adding enum values */
2202 	t->name_off = name_off;
2203 	t->info = btf_type_info(kind, 0, is_signed);
2204 	t->size = byte_sz;
2205 
2206 	return btf_commit_type(btf, sz);
2207 }
2208 
2209 /*
2210  * Append new BTF_KIND_ENUM type with:
2211  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2212  *   - *byte_sz* - size of the enum, in bytes.
2213  *
2214  * Enum initially has no enum values in it (and corresponds to enum forward
2215  * declaration). Enumerator values can be added by btf__add_enum_value()
2216  * immediately after btf__add_enum() succeeds.
2217  *
2218  * Returns:
2219  *   - >0, type ID of newly added BTF type;
2220  *   - <0, on error.
2221  */
2222 int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
2223 {
2224 	/*
2225 	 * set the signedness to be unsigned, it will change to signed
2226 	 * if any later enumerator is negative.
2227 	 */
2228 	return btf_add_enum_common(btf, name, byte_sz, false, BTF_KIND_ENUM);
2229 }
2230 
2231 /*
2232  * Append new enum value for the current ENUM type with:
2233  *   - *name* - name of the enumerator value, can't be NULL or empty;
2234  *   - *value* - integer value corresponding to enum value *name*;
2235  * Returns:
2236  *   -  0, on success;
2237  *   - <0, on error.
2238  */
2239 int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
2240 {
2241 	struct btf_type *t;
2242 	struct btf_enum *v;
2243 	int sz, name_off;
2244 
2245 	/* last type should be BTF_KIND_ENUM */
2246 	if (btf->nr_types == 0)
2247 		return libbpf_err(-EINVAL);
2248 	t = btf_last_type(btf);
2249 	if (!btf_is_enum(t))
2250 		return libbpf_err(-EINVAL);
2251 
2252 	/* non-empty name */
2253 	if (!name || !name[0])
2254 		return libbpf_err(-EINVAL);
2255 	if (value < INT_MIN || value > UINT_MAX)
2256 		return libbpf_err(-E2BIG);
2257 
2258 	/* decompose and invalidate raw data */
2259 	if (btf_ensure_modifiable(btf))
2260 		return libbpf_err(-ENOMEM);
2261 
2262 	sz = sizeof(struct btf_enum);
2263 	v = btf_add_type_mem(btf, sz);
2264 	if (!v)
2265 		return libbpf_err(-ENOMEM);
2266 
2267 	name_off = btf__add_str(btf, name);
2268 	if (name_off < 0)
2269 		return name_off;
2270 
2271 	v->name_off = name_off;
2272 	v->val = value;
2273 
2274 	/* update parent type's vlen */
2275 	t = btf_last_type(btf);
2276 	btf_type_inc_vlen(t);
2277 
2278 	/* if negative value, set signedness to signed */
2279 	if (value < 0)
2280 		t->info = btf_type_info(btf_kind(t), btf_vlen(t), true);
2281 
2282 	btf->hdr->type_len += sz;
2283 	btf->hdr->str_off += sz;
2284 	return 0;
2285 }
2286 
2287 /*
2288  * Append new BTF_KIND_ENUM64 type with:
2289  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2290  *   - *byte_sz* - size of the enum, in bytes.
2291  *   - *is_signed* - whether the enum values are signed or not;
2292  *
2293  * Enum initially has no enum values in it (and corresponds to enum forward
2294  * declaration). Enumerator values can be added by btf__add_enum64_value()
2295  * immediately after btf__add_enum64() succeeds.
2296  *
2297  * Returns:
2298  *   - >0, type ID of newly added BTF type;
2299  *   - <0, on error.
2300  */
2301 int btf__add_enum64(struct btf *btf, const char *name, __u32 byte_sz,
2302 		    bool is_signed)
2303 {
2304 	return btf_add_enum_common(btf, name, byte_sz, is_signed,
2305 				   BTF_KIND_ENUM64);
2306 }
2307 
2308 /*
2309  * Append new enum value for the current ENUM64 type with:
2310  *   - *name* - name of the enumerator value, can't be NULL or empty;
2311  *   - *value* - integer value corresponding to enum value *name*;
2312  * Returns:
2313  *   -  0, on success;
2314  *   - <0, on error.
2315  */
2316 int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value)
2317 {
2318 	struct btf_enum64 *v;
2319 	struct btf_type *t;
2320 	int sz, name_off;
2321 
2322 	/* last type should be BTF_KIND_ENUM64 */
2323 	if (btf->nr_types == 0)
2324 		return libbpf_err(-EINVAL);
2325 	t = btf_last_type(btf);
2326 	if (!btf_is_enum64(t))
2327 		return libbpf_err(-EINVAL);
2328 
2329 	/* non-empty name */
2330 	if (!name || !name[0])
2331 		return libbpf_err(-EINVAL);
2332 
2333 	/* decompose and invalidate raw data */
2334 	if (btf_ensure_modifiable(btf))
2335 		return libbpf_err(-ENOMEM);
2336 
2337 	sz = sizeof(struct btf_enum64);
2338 	v = btf_add_type_mem(btf, sz);
2339 	if (!v)
2340 		return libbpf_err(-ENOMEM);
2341 
2342 	name_off = btf__add_str(btf, name);
2343 	if (name_off < 0)
2344 		return name_off;
2345 
2346 	v->name_off = name_off;
2347 	v->val_lo32 = (__u32)value;
2348 	v->val_hi32 = value >> 32;
2349 
2350 	/* update parent type's vlen */
2351 	t = btf_last_type(btf);
2352 	btf_type_inc_vlen(t);
2353 
2354 	btf->hdr->type_len += sz;
2355 	btf->hdr->str_off += sz;
2356 	return 0;
2357 }
2358 
2359 /*
2360  * Append new BTF_KIND_FWD type with:
2361  *   - *name*, non-empty/non-NULL name;
2362  *   - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT,
2363  *     BTF_FWD_UNION, or BTF_FWD_ENUM;
2364  * Returns:
2365  *   - >0, type ID of newly added BTF type;
2366  *   - <0, on error.
2367  */
2368 int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind)
2369 {
2370 	if (!name || !name[0])
2371 		return libbpf_err(-EINVAL);
2372 
2373 	switch (fwd_kind) {
2374 	case BTF_FWD_STRUCT:
2375 	case BTF_FWD_UNION: {
2376 		struct btf_type *t;
2377 		int id;
2378 
2379 		id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0);
2380 		if (id <= 0)
2381 			return id;
2382 		t = btf_type_by_id(btf, id);
2383 		t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION);
2384 		return id;
2385 	}
2386 	case BTF_FWD_ENUM:
2387 		/* enum forward in BTF currently is just an enum with no enum
2388 		 * values; we also assume a standard 4-byte size for it
2389 		 */
2390 		return btf__add_enum(btf, name, sizeof(int));
2391 	default:
2392 		return libbpf_err(-EINVAL);
2393 	}
2394 }
2395 
2396 /*
2397  * Append new BTF_KING_TYPEDEF type with:
2398  *   - *name*, non-empty/non-NULL name;
2399  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2400  * Returns:
2401  *   - >0, type ID of newly added BTF type;
2402  *   - <0, on error.
2403  */
2404 int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id)
2405 {
2406 	if (!name || !name[0])
2407 		return libbpf_err(-EINVAL);
2408 
2409 	return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id);
2410 }
2411 
2412 /*
2413  * Append new BTF_KIND_VOLATILE type with:
2414  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2415  * Returns:
2416  *   - >0, type ID of newly added BTF type;
2417  *   - <0, on error.
2418  */
2419 int btf__add_volatile(struct btf *btf, int ref_type_id)
2420 {
2421 	return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id);
2422 }
2423 
2424 /*
2425  * Append new BTF_KIND_CONST type with:
2426  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2427  * Returns:
2428  *   - >0, type ID of newly added BTF type;
2429  *   - <0, on error.
2430  */
2431 int btf__add_const(struct btf *btf, int ref_type_id)
2432 {
2433 	return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id);
2434 }
2435 
2436 /*
2437  * Append new BTF_KIND_RESTRICT type with:
2438  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2439  * Returns:
2440  *   - >0, type ID of newly added BTF type;
2441  *   - <0, on error.
2442  */
2443 int btf__add_restrict(struct btf *btf, int ref_type_id)
2444 {
2445 	return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id);
2446 }
2447 
2448 /*
2449  * Append new BTF_KIND_TYPE_TAG type with:
2450  *   - *value*, non-empty/non-NULL tag value;
2451  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2452  * Returns:
2453  *   - >0, type ID of newly added BTF type;
2454  *   - <0, on error.
2455  */
2456 int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id)
2457 {
2458 	if (!value || !value[0])
2459 		return libbpf_err(-EINVAL);
2460 
2461 	return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id);
2462 }
2463 
2464 /*
2465  * Append new BTF_KIND_FUNC type with:
2466  *   - *name*, non-empty/non-NULL name;
2467  *   - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet;
2468  * Returns:
2469  *   - >0, type ID of newly added BTF type;
2470  *   - <0, on error.
2471  */
2472 int btf__add_func(struct btf *btf, const char *name,
2473 		  enum btf_func_linkage linkage, int proto_type_id)
2474 {
2475 	int id;
2476 
2477 	if (!name || !name[0])
2478 		return libbpf_err(-EINVAL);
2479 	if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL &&
2480 	    linkage != BTF_FUNC_EXTERN)
2481 		return libbpf_err(-EINVAL);
2482 
2483 	id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id);
2484 	if (id > 0) {
2485 		struct btf_type *t = btf_type_by_id(btf, id);
2486 
2487 		t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0);
2488 	}
2489 	return libbpf_err(id);
2490 }
2491 
2492 /*
2493  * Append new BTF_KIND_FUNC_PROTO with:
2494  *   - *ret_type_id* - type ID for return result of a function.
2495  *
2496  * Function prototype initially has no arguments, but they can be added by
2497  * btf__add_func_param() one by one, immediately after
2498  * btf__add_func_proto() succeeded.
2499  *
2500  * Returns:
2501  *   - >0, type ID of newly added BTF type;
2502  *   - <0, on error.
2503  */
2504 int btf__add_func_proto(struct btf *btf, int ret_type_id)
2505 {
2506 	struct btf_type *t;
2507 	int sz;
2508 
2509 	if (validate_type_id(ret_type_id))
2510 		return libbpf_err(-EINVAL);
2511 
2512 	if (btf_ensure_modifiable(btf))
2513 		return libbpf_err(-ENOMEM);
2514 
2515 	sz = sizeof(struct btf_type);
2516 	t = btf_add_type_mem(btf, sz);
2517 	if (!t)
2518 		return libbpf_err(-ENOMEM);
2519 
2520 	/* start out with vlen=0; this will be adjusted when adding enum
2521 	 * values, if necessary
2522 	 */
2523 	t->name_off = 0;
2524 	t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0);
2525 	t->type = ret_type_id;
2526 
2527 	return btf_commit_type(btf, sz);
2528 }
2529 
2530 /*
2531  * Append new function parameter for current FUNC_PROTO type with:
2532  *   - *name* - parameter name, can be NULL or empty;
2533  *   - *type_id* - type ID describing the type of the parameter.
2534  * Returns:
2535  *   -  0, on success;
2536  *   - <0, on error.
2537  */
2538 int btf__add_func_param(struct btf *btf, const char *name, int type_id)
2539 {
2540 	struct btf_type *t;
2541 	struct btf_param *p;
2542 	int sz, name_off = 0;
2543 
2544 	if (validate_type_id(type_id))
2545 		return libbpf_err(-EINVAL);
2546 
2547 	/* last type should be BTF_KIND_FUNC_PROTO */
2548 	if (btf->nr_types == 0)
2549 		return libbpf_err(-EINVAL);
2550 	t = btf_last_type(btf);
2551 	if (!btf_is_func_proto(t))
2552 		return libbpf_err(-EINVAL);
2553 
2554 	/* decompose and invalidate raw data */
2555 	if (btf_ensure_modifiable(btf))
2556 		return libbpf_err(-ENOMEM);
2557 
2558 	sz = sizeof(struct btf_param);
2559 	p = btf_add_type_mem(btf, sz);
2560 	if (!p)
2561 		return libbpf_err(-ENOMEM);
2562 
2563 	if (name && name[0]) {
2564 		name_off = btf__add_str(btf, name);
2565 		if (name_off < 0)
2566 			return name_off;
2567 	}
2568 
2569 	p->name_off = name_off;
2570 	p->type = type_id;
2571 
2572 	/* update parent type's vlen */
2573 	t = btf_last_type(btf);
2574 	btf_type_inc_vlen(t);
2575 
2576 	btf->hdr->type_len += sz;
2577 	btf->hdr->str_off += sz;
2578 	return 0;
2579 }
2580 
2581 /*
2582  * Append new BTF_KIND_VAR type with:
2583  *   - *name* - non-empty/non-NULL name;
2584  *   - *linkage* - variable linkage, one of BTF_VAR_STATIC,
2585  *     BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN;
2586  *   - *type_id* - type ID of the type describing the type of the variable.
2587  * Returns:
2588  *   - >0, type ID of newly added BTF type;
2589  *   - <0, on error.
2590  */
2591 int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id)
2592 {
2593 	struct btf_type *t;
2594 	struct btf_var *v;
2595 	int sz, name_off;
2596 
2597 	/* non-empty name */
2598 	if (!name || !name[0])
2599 		return libbpf_err(-EINVAL);
2600 	if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED &&
2601 	    linkage != BTF_VAR_GLOBAL_EXTERN)
2602 		return libbpf_err(-EINVAL);
2603 	if (validate_type_id(type_id))
2604 		return libbpf_err(-EINVAL);
2605 
2606 	/* deconstruct BTF, if necessary, and invalidate raw_data */
2607 	if (btf_ensure_modifiable(btf))
2608 		return libbpf_err(-ENOMEM);
2609 
2610 	sz = sizeof(struct btf_type) + sizeof(struct btf_var);
2611 	t = btf_add_type_mem(btf, sz);
2612 	if (!t)
2613 		return libbpf_err(-ENOMEM);
2614 
2615 	name_off = btf__add_str(btf, name);
2616 	if (name_off < 0)
2617 		return name_off;
2618 
2619 	t->name_off = name_off;
2620 	t->info = btf_type_info(BTF_KIND_VAR, 0, 0);
2621 	t->type = type_id;
2622 
2623 	v = btf_var(t);
2624 	v->linkage = linkage;
2625 
2626 	return btf_commit_type(btf, sz);
2627 }
2628 
2629 /*
2630  * Append new BTF_KIND_DATASEC type with:
2631  *   - *name* - non-empty/non-NULL name;
2632  *   - *byte_sz* - data section size, in bytes.
2633  *
2634  * Data section is initially empty. Variables info can be added with
2635  * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds.
2636  *
2637  * Returns:
2638  *   - >0, type ID of newly added BTF type;
2639  *   - <0, on error.
2640  */
2641 int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz)
2642 {
2643 	struct btf_type *t;
2644 	int sz, name_off;
2645 
2646 	/* non-empty name */
2647 	if (!name || !name[0])
2648 		return libbpf_err(-EINVAL);
2649 
2650 	if (btf_ensure_modifiable(btf))
2651 		return libbpf_err(-ENOMEM);
2652 
2653 	sz = sizeof(struct btf_type);
2654 	t = btf_add_type_mem(btf, sz);
2655 	if (!t)
2656 		return libbpf_err(-ENOMEM);
2657 
2658 	name_off = btf__add_str(btf, name);
2659 	if (name_off < 0)
2660 		return name_off;
2661 
2662 	/* start with vlen=0, which will be update as var_secinfos are added */
2663 	t->name_off = name_off;
2664 	t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0);
2665 	t->size = byte_sz;
2666 
2667 	return btf_commit_type(btf, sz);
2668 }
2669 
2670 /*
2671  * Append new data section variable information entry for current DATASEC type:
2672  *   - *var_type_id* - type ID, describing type of the variable;
2673  *   - *offset* - variable offset within data section, in bytes;
2674  *   - *byte_sz* - variable size, in bytes.
2675  *
2676  * Returns:
2677  *   -  0, on success;
2678  *   - <0, on error.
2679  */
2680 int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz)
2681 {
2682 	struct btf_type *t;
2683 	struct btf_var_secinfo *v;
2684 	int sz;
2685 
2686 	/* last type should be BTF_KIND_DATASEC */
2687 	if (btf->nr_types == 0)
2688 		return libbpf_err(-EINVAL);
2689 	t = btf_last_type(btf);
2690 	if (!btf_is_datasec(t))
2691 		return libbpf_err(-EINVAL);
2692 
2693 	if (validate_type_id(var_type_id))
2694 		return libbpf_err(-EINVAL);
2695 
2696 	/* decompose and invalidate raw data */
2697 	if (btf_ensure_modifiable(btf))
2698 		return libbpf_err(-ENOMEM);
2699 
2700 	sz = sizeof(struct btf_var_secinfo);
2701 	v = btf_add_type_mem(btf, sz);
2702 	if (!v)
2703 		return libbpf_err(-ENOMEM);
2704 
2705 	v->type = var_type_id;
2706 	v->offset = offset;
2707 	v->size = byte_sz;
2708 
2709 	/* update parent type's vlen */
2710 	t = btf_last_type(btf);
2711 	btf_type_inc_vlen(t);
2712 
2713 	btf->hdr->type_len += sz;
2714 	btf->hdr->str_off += sz;
2715 	return 0;
2716 }
2717 
2718 /*
2719  * Append new BTF_KIND_DECL_TAG type with:
2720  *   - *value* - non-empty/non-NULL string;
2721  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2722  *   - *component_idx* - -1 for tagging reference type, otherwise struct/union
2723  *     member or function argument index;
2724  * Returns:
2725  *   - >0, type ID of newly added BTF type;
2726  *   - <0, on error.
2727  */
2728 int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
2729 		 int component_idx)
2730 {
2731 	struct btf_type *t;
2732 	int sz, value_off;
2733 
2734 	if (!value || !value[0] || component_idx < -1)
2735 		return libbpf_err(-EINVAL);
2736 
2737 	if (validate_type_id(ref_type_id))
2738 		return libbpf_err(-EINVAL);
2739 
2740 	if (btf_ensure_modifiable(btf))
2741 		return libbpf_err(-ENOMEM);
2742 
2743 	sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag);
2744 	t = btf_add_type_mem(btf, sz);
2745 	if (!t)
2746 		return libbpf_err(-ENOMEM);
2747 
2748 	value_off = btf__add_str(btf, value);
2749 	if (value_off < 0)
2750 		return value_off;
2751 
2752 	t->name_off = value_off;
2753 	t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, false);
2754 	t->type = ref_type_id;
2755 	btf_decl_tag(t)->component_idx = component_idx;
2756 
2757 	return btf_commit_type(btf, sz);
2758 }
2759 
2760 struct btf_ext_sec_setup_param {
2761 	__u32 off;
2762 	__u32 len;
2763 	__u32 min_rec_size;
2764 	struct btf_ext_info *ext_info;
2765 	const char *desc;
2766 };
2767 
2768 static int btf_ext_setup_info(struct btf_ext *btf_ext,
2769 			      struct btf_ext_sec_setup_param *ext_sec)
2770 {
2771 	const struct btf_ext_info_sec *sinfo;
2772 	struct btf_ext_info *ext_info;
2773 	__u32 info_left, record_size;
2774 	size_t sec_cnt = 0;
2775 	/* The start of the info sec (including the __u32 record_size). */
2776 	void *info;
2777 
2778 	if (ext_sec->len == 0)
2779 		return 0;
2780 
2781 	if (ext_sec->off & 0x03) {
2782 		pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
2783 		     ext_sec->desc);
2784 		return -EINVAL;
2785 	}
2786 
2787 	info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
2788 	info_left = ext_sec->len;
2789 
2790 	if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
2791 		pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
2792 			 ext_sec->desc, ext_sec->off, ext_sec->len);
2793 		return -EINVAL;
2794 	}
2795 
2796 	/* At least a record size */
2797 	if (info_left < sizeof(__u32)) {
2798 		pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
2799 		return -EINVAL;
2800 	}
2801 
2802 	/* The record size needs to meet the minimum standard */
2803 	record_size = *(__u32 *)info;
2804 	if (record_size < ext_sec->min_rec_size ||
2805 	    record_size & 0x03) {
2806 		pr_debug("%s section in .BTF.ext has invalid record size %u\n",
2807 			 ext_sec->desc, record_size);
2808 		return -EINVAL;
2809 	}
2810 
2811 	sinfo = info + sizeof(__u32);
2812 	info_left -= sizeof(__u32);
2813 
2814 	/* If no records, return failure now so .BTF.ext won't be used. */
2815 	if (!info_left) {
2816 		pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
2817 		return -EINVAL;
2818 	}
2819 
2820 	while (info_left) {
2821 		unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
2822 		__u64 total_record_size;
2823 		__u32 num_records;
2824 
2825 		if (info_left < sec_hdrlen) {
2826 			pr_debug("%s section header is not found in .BTF.ext\n",
2827 			     ext_sec->desc);
2828 			return -EINVAL;
2829 		}
2830 
2831 		num_records = sinfo->num_info;
2832 		if (num_records == 0) {
2833 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2834 			     ext_sec->desc);
2835 			return -EINVAL;
2836 		}
2837 
2838 		total_record_size = sec_hdrlen + (__u64)num_records * record_size;
2839 		if (info_left < total_record_size) {
2840 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2841 			     ext_sec->desc);
2842 			return -EINVAL;
2843 		}
2844 
2845 		info_left -= total_record_size;
2846 		sinfo = (void *)sinfo + total_record_size;
2847 		sec_cnt++;
2848 	}
2849 
2850 	ext_info = ext_sec->ext_info;
2851 	ext_info->len = ext_sec->len - sizeof(__u32);
2852 	ext_info->rec_size = record_size;
2853 	ext_info->info = info + sizeof(__u32);
2854 	ext_info->sec_cnt = sec_cnt;
2855 
2856 	return 0;
2857 }
2858 
2859 static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
2860 {
2861 	struct btf_ext_sec_setup_param param = {
2862 		.off = btf_ext->hdr->func_info_off,
2863 		.len = btf_ext->hdr->func_info_len,
2864 		.min_rec_size = sizeof(struct bpf_func_info_min),
2865 		.ext_info = &btf_ext->func_info,
2866 		.desc = "func_info"
2867 	};
2868 
2869 	return btf_ext_setup_info(btf_ext, &param);
2870 }
2871 
2872 static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
2873 {
2874 	struct btf_ext_sec_setup_param param = {
2875 		.off = btf_ext->hdr->line_info_off,
2876 		.len = btf_ext->hdr->line_info_len,
2877 		.min_rec_size = sizeof(struct bpf_line_info_min),
2878 		.ext_info = &btf_ext->line_info,
2879 		.desc = "line_info",
2880 	};
2881 
2882 	return btf_ext_setup_info(btf_ext, &param);
2883 }
2884 
2885 static int btf_ext_setup_core_relos(struct btf_ext *btf_ext)
2886 {
2887 	struct btf_ext_sec_setup_param param = {
2888 		.off = btf_ext->hdr->core_relo_off,
2889 		.len = btf_ext->hdr->core_relo_len,
2890 		.min_rec_size = sizeof(struct bpf_core_relo),
2891 		.ext_info = &btf_ext->core_relo_info,
2892 		.desc = "core_relo",
2893 	};
2894 
2895 	return btf_ext_setup_info(btf_ext, &param);
2896 }
2897 
2898 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
2899 {
2900 	const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
2901 
2902 	if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
2903 	    data_size < hdr->hdr_len) {
2904 		pr_debug("BTF.ext header not found");
2905 		return -EINVAL;
2906 	}
2907 
2908 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
2909 		pr_warn("BTF.ext in non-native endianness is not supported\n");
2910 		return -ENOTSUP;
2911 	} else if (hdr->magic != BTF_MAGIC) {
2912 		pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
2913 		return -EINVAL;
2914 	}
2915 
2916 	if (hdr->version != BTF_VERSION) {
2917 		pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
2918 		return -ENOTSUP;
2919 	}
2920 
2921 	if (hdr->flags) {
2922 		pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
2923 		return -ENOTSUP;
2924 	}
2925 
2926 	if (data_size == hdr->hdr_len) {
2927 		pr_debug("BTF.ext has no data\n");
2928 		return -EINVAL;
2929 	}
2930 
2931 	return 0;
2932 }
2933 
2934 void btf_ext__free(struct btf_ext *btf_ext)
2935 {
2936 	if (IS_ERR_OR_NULL(btf_ext))
2937 		return;
2938 	free(btf_ext->func_info.sec_idxs);
2939 	free(btf_ext->line_info.sec_idxs);
2940 	free(btf_ext->core_relo_info.sec_idxs);
2941 	free(btf_ext->data);
2942 	free(btf_ext);
2943 }
2944 
2945 struct btf_ext *btf_ext__new(const __u8 *data, __u32 size)
2946 {
2947 	struct btf_ext *btf_ext;
2948 	int err;
2949 
2950 	btf_ext = calloc(1, sizeof(struct btf_ext));
2951 	if (!btf_ext)
2952 		return libbpf_err_ptr(-ENOMEM);
2953 
2954 	btf_ext->data_size = size;
2955 	btf_ext->data = malloc(size);
2956 	if (!btf_ext->data) {
2957 		err = -ENOMEM;
2958 		goto done;
2959 	}
2960 	memcpy(btf_ext->data, data, size);
2961 
2962 	err = btf_ext_parse_hdr(btf_ext->data, size);
2963 	if (err)
2964 		goto done;
2965 
2966 	if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
2967 		err = -EINVAL;
2968 		goto done;
2969 	}
2970 
2971 	err = btf_ext_setup_func_info(btf_ext);
2972 	if (err)
2973 		goto done;
2974 
2975 	err = btf_ext_setup_line_info(btf_ext);
2976 	if (err)
2977 		goto done;
2978 
2979 	if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
2980 		goto done; /* skip core relos parsing */
2981 
2982 	err = btf_ext_setup_core_relos(btf_ext);
2983 	if (err)
2984 		goto done;
2985 
2986 done:
2987 	if (err) {
2988 		btf_ext__free(btf_ext);
2989 		return libbpf_err_ptr(err);
2990 	}
2991 
2992 	return btf_ext;
2993 }
2994 
2995 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
2996 {
2997 	*size = btf_ext->data_size;
2998 	return btf_ext->data;
2999 }
3000 
3001 struct btf_dedup;
3002 
3003 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
3004 static void btf_dedup_free(struct btf_dedup *d);
3005 static int btf_dedup_prep(struct btf_dedup *d);
3006 static int btf_dedup_strings(struct btf_dedup *d);
3007 static int btf_dedup_prim_types(struct btf_dedup *d);
3008 static int btf_dedup_struct_types(struct btf_dedup *d);
3009 static int btf_dedup_ref_types(struct btf_dedup *d);
3010 static int btf_dedup_resolve_fwds(struct btf_dedup *d);
3011 static int btf_dedup_compact_types(struct btf_dedup *d);
3012 static int btf_dedup_remap_types(struct btf_dedup *d);
3013 
3014 /*
3015  * Deduplicate BTF types and strings.
3016  *
3017  * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
3018  * section with all BTF type descriptors and string data. It overwrites that
3019  * memory in-place with deduplicated types and strings without any loss of
3020  * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
3021  * is provided, all the strings referenced from .BTF.ext section are honored
3022  * and updated to point to the right offsets after deduplication.
3023  *
3024  * If function returns with error, type/string data might be garbled and should
3025  * be discarded.
3026  *
3027  * More verbose and detailed description of both problem btf_dedup is solving,
3028  * as well as solution could be found at:
3029  * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
3030  *
3031  * Problem description and justification
3032  * =====================================
3033  *
3034  * BTF type information is typically emitted either as a result of conversion
3035  * from DWARF to BTF or directly by compiler. In both cases, each compilation
3036  * unit contains information about a subset of all the types that are used
3037  * in an application. These subsets are frequently overlapping and contain a lot
3038  * of duplicated information when later concatenated together into a single
3039  * binary. This algorithm ensures that each unique type is represented by single
3040  * BTF type descriptor, greatly reducing resulting size of BTF data.
3041  *
3042  * Compilation unit isolation and subsequent duplication of data is not the only
3043  * problem. The same type hierarchy (e.g., struct and all the type that struct
3044  * references) in different compilation units can be represented in BTF to
3045  * various degrees of completeness (or, rather, incompleteness) due to
3046  * struct/union forward declarations.
3047  *
3048  * Let's take a look at an example, that we'll use to better understand the
3049  * problem (and solution). Suppose we have two compilation units, each using
3050  * same `struct S`, but each of them having incomplete type information about
3051  * struct's fields:
3052  *
3053  * // CU #1:
3054  * struct S;
3055  * struct A {
3056  *	int a;
3057  *	struct A* self;
3058  *	struct S* parent;
3059  * };
3060  * struct B;
3061  * struct S {
3062  *	struct A* a_ptr;
3063  *	struct B* b_ptr;
3064  * };
3065  *
3066  * // CU #2:
3067  * struct S;
3068  * struct A;
3069  * struct B {
3070  *	int b;
3071  *	struct B* self;
3072  *	struct S* parent;
3073  * };
3074  * struct S {
3075  *	struct A* a_ptr;
3076  *	struct B* b_ptr;
3077  * };
3078  *
3079  * In case of CU #1, BTF data will know only that `struct B` exist (but no
3080  * more), but will know the complete type information about `struct A`. While
3081  * for CU #2, it will know full type information about `struct B`, but will
3082  * only know about forward declaration of `struct A` (in BTF terms, it will
3083  * have `BTF_KIND_FWD` type descriptor with name `B`).
3084  *
3085  * This compilation unit isolation means that it's possible that there is no
3086  * single CU with complete type information describing structs `S`, `A`, and
3087  * `B`. Also, we might get tons of duplicated and redundant type information.
3088  *
3089  * Additional complication we need to keep in mind comes from the fact that
3090  * types, in general, can form graphs containing cycles, not just DAGs.
3091  *
3092  * While algorithm does deduplication, it also merges and resolves type
3093  * information (unless disabled throught `struct btf_opts`), whenever possible.
3094  * E.g., in the example above with two compilation units having partial type
3095  * information for structs `A` and `B`, the output of algorithm will emit
3096  * a single copy of each BTF type that describes structs `A`, `B`, and `S`
3097  * (as well as type information for `int` and pointers), as if they were defined
3098  * in a single compilation unit as:
3099  *
3100  * struct A {
3101  *	int a;
3102  *	struct A* self;
3103  *	struct S* parent;
3104  * };
3105  * struct B {
3106  *	int b;
3107  *	struct B* self;
3108  *	struct S* parent;
3109  * };
3110  * struct S {
3111  *	struct A* a_ptr;
3112  *	struct B* b_ptr;
3113  * };
3114  *
3115  * Algorithm summary
3116  * =================
3117  *
3118  * Algorithm completes its work in 7 separate passes:
3119  *
3120  * 1. Strings deduplication.
3121  * 2. Primitive types deduplication (int, enum, fwd).
3122  * 3. Struct/union types deduplication.
3123  * 4. Resolve unambiguous forward declarations.
3124  * 5. Reference types deduplication (pointers, typedefs, arrays, funcs, func
3125  *    protos, and const/volatile/restrict modifiers).
3126  * 6. Types compaction.
3127  * 7. Types remapping.
3128  *
3129  * Algorithm determines canonical type descriptor, which is a single
3130  * representative type for each truly unique type. This canonical type is the
3131  * one that will go into final deduplicated BTF type information. For
3132  * struct/unions, it is also the type that algorithm will merge additional type
3133  * information into (while resolving FWDs), as it discovers it from data in
3134  * other CUs. Each input BTF type eventually gets either mapped to itself, if
3135  * that type is canonical, or to some other type, if that type is equivalent
3136  * and was chosen as canonical representative. This mapping is stored in
3137  * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
3138  * FWD type got resolved to.
3139  *
3140  * To facilitate fast discovery of canonical types, we also maintain canonical
3141  * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
3142  * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
3143  * that match that signature. With sufficiently good choice of type signature
3144  * hashing function, we can limit number of canonical types for each unique type
3145  * signature to a very small number, allowing to find canonical type for any
3146  * duplicated type very quickly.
3147  *
3148  * Struct/union deduplication is the most critical part and algorithm for
3149  * deduplicating structs/unions is described in greater details in comments for
3150  * `btf_dedup_is_equiv` function.
3151  */
3152 int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
3153 {
3154 	struct btf_dedup *d;
3155 	int err;
3156 
3157 	if (!OPTS_VALID(opts, btf_dedup_opts))
3158 		return libbpf_err(-EINVAL);
3159 
3160 	d = btf_dedup_new(btf, opts);
3161 	if (IS_ERR(d)) {
3162 		pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
3163 		return libbpf_err(-EINVAL);
3164 	}
3165 
3166 	if (btf_ensure_modifiable(btf)) {
3167 		err = -ENOMEM;
3168 		goto done;
3169 	}
3170 
3171 	err = btf_dedup_prep(d);
3172 	if (err) {
3173 		pr_debug("btf_dedup_prep failed:%d\n", err);
3174 		goto done;
3175 	}
3176 	err = btf_dedup_strings(d);
3177 	if (err < 0) {
3178 		pr_debug("btf_dedup_strings failed:%d\n", err);
3179 		goto done;
3180 	}
3181 	err = btf_dedup_prim_types(d);
3182 	if (err < 0) {
3183 		pr_debug("btf_dedup_prim_types failed:%d\n", err);
3184 		goto done;
3185 	}
3186 	err = btf_dedup_struct_types(d);
3187 	if (err < 0) {
3188 		pr_debug("btf_dedup_struct_types failed:%d\n", err);
3189 		goto done;
3190 	}
3191 	err = btf_dedup_resolve_fwds(d);
3192 	if (err < 0) {
3193 		pr_debug("btf_dedup_resolve_fwds failed:%d\n", err);
3194 		goto done;
3195 	}
3196 	err = btf_dedup_ref_types(d);
3197 	if (err < 0) {
3198 		pr_debug("btf_dedup_ref_types failed:%d\n", err);
3199 		goto done;
3200 	}
3201 	err = btf_dedup_compact_types(d);
3202 	if (err < 0) {
3203 		pr_debug("btf_dedup_compact_types failed:%d\n", err);
3204 		goto done;
3205 	}
3206 	err = btf_dedup_remap_types(d);
3207 	if (err < 0) {
3208 		pr_debug("btf_dedup_remap_types failed:%d\n", err);
3209 		goto done;
3210 	}
3211 
3212 done:
3213 	btf_dedup_free(d);
3214 	return libbpf_err(err);
3215 }
3216 
3217 #define BTF_UNPROCESSED_ID ((__u32)-1)
3218 #define BTF_IN_PROGRESS_ID ((__u32)-2)
3219 
3220 struct btf_dedup {
3221 	/* .BTF section to be deduped in-place */
3222 	struct btf *btf;
3223 	/*
3224 	 * Optional .BTF.ext section. When provided, any strings referenced
3225 	 * from it will be taken into account when deduping strings
3226 	 */
3227 	struct btf_ext *btf_ext;
3228 	/*
3229 	 * This is a map from any type's signature hash to a list of possible
3230 	 * canonical representative type candidates. Hash collisions are
3231 	 * ignored, so even types of various kinds can share same list of
3232 	 * candidates, which is fine because we rely on subsequent
3233 	 * btf_xxx_equal() checks to authoritatively verify type equality.
3234 	 */
3235 	struct hashmap *dedup_table;
3236 	/* Canonical types map */
3237 	__u32 *map;
3238 	/* Hypothetical mapping, used during type graph equivalence checks */
3239 	__u32 *hypot_map;
3240 	__u32 *hypot_list;
3241 	size_t hypot_cnt;
3242 	size_t hypot_cap;
3243 	/* Whether hypothetical mapping, if successful, would need to adjust
3244 	 * already canonicalized types (due to a new forward declaration to
3245 	 * concrete type resolution). In such case, during split BTF dedup
3246 	 * candidate type would still be considered as different, because base
3247 	 * BTF is considered to be immutable.
3248 	 */
3249 	bool hypot_adjust_canon;
3250 	/* Various option modifying behavior of algorithm */
3251 	struct btf_dedup_opts opts;
3252 	/* temporary strings deduplication state */
3253 	struct strset *strs_set;
3254 };
3255 
3256 static long hash_combine(long h, long value)
3257 {
3258 	return h * 31 + value;
3259 }
3260 
3261 #define for_each_dedup_cand(d, node, hash) \
3262 	hashmap__for_each_key_entry(d->dedup_table, node, hash)
3263 
3264 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
3265 {
3266 	return hashmap__append(d->dedup_table, hash, type_id);
3267 }
3268 
3269 static int btf_dedup_hypot_map_add(struct btf_dedup *d,
3270 				   __u32 from_id, __u32 to_id)
3271 {
3272 	if (d->hypot_cnt == d->hypot_cap) {
3273 		__u32 *new_list;
3274 
3275 		d->hypot_cap += max((size_t)16, d->hypot_cap / 2);
3276 		new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32));
3277 		if (!new_list)
3278 			return -ENOMEM;
3279 		d->hypot_list = new_list;
3280 	}
3281 	d->hypot_list[d->hypot_cnt++] = from_id;
3282 	d->hypot_map[from_id] = to_id;
3283 	return 0;
3284 }
3285 
3286 static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
3287 {
3288 	int i;
3289 
3290 	for (i = 0; i < d->hypot_cnt; i++)
3291 		d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
3292 	d->hypot_cnt = 0;
3293 	d->hypot_adjust_canon = false;
3294 }
3295 
3296 static void btf_dedup_free(struct btf_dedup *d)
3297 {
3298 	hashmap__free(d->dedup_table);
3299 	d->dedup_table = NULL;
3300 
3301 	free(d->map);
3302 	d->map = NULL;
3303 
3304 	free(d->hypot_map);
3305 	d->hypot_map = NULL;
3306 
3307 	free(d->hypot_list);
3308 	d->hypot_list = NULL;
3309 
3310 	free(d);
3311 }
3312 
3313 static size_t btf_dedup_identity_hash_fn(long key, void *ctx)
3314 {
3315 	return key;
3316 }
3317 
3318 static size_t btf_dedup_collision_hash_fn(long key, void *ctx)
3319 {
3320 	return 0;
3321 }
3322 
3323 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx)
3324 {
3325 	return k1 == k2;
3326 }
3327 
3328 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts)
3329 {
3330 	struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
3331 	hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
3332 	int i, err = 0, type_cnt;
3333 
3334 	if (!d)
3335 		return ERR_PTR(-ENOMEM);
3336 
3337 	if (OPTS_GET(opts, force_collisions, false))
3338 		hash_fn = btf_dedup_collision_hash_fn;
3339 
3340 	d->btf = btf;
3341 	d->btf_ext = OPTS_GET(opts, btf_ext, NULL);
3342 
3343 	d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
3344 	if (IS_ERR(d->dedup_table)) {
3345 		err = PTR_ERR(d->dedup_table);
3346 		d->dedup_table = NULL;
3347 		goto done;
3348 	}
3349 
3350 	type_cnt = btf__type_cnt(btf);
3351 	d->map = malloc(sizeof(__u32) * type_cnt);
3352 	if (!d->map) {
3353 		err = -ENOMEM;
3354 		goto done;
3355 	}
3356 	/* special BTF "void" type is made canonical immediately */
3357 	d->map[0] = 0;
3358 	for (i = 1; i < type_cnt; i++) {
3359 		struct btf_type *t = btf_type_by_id(d->btf, i);
3360 
3361 		/* VAR and DATASEC are never deduped and are self-canonical */
3362 		if (btf_is_var(t) || btf_is_datasec(t))
3363 			d->map[i] = i;
3364 		else
3365 			d->map[i] = BTF_UNPROCESSED_ID;
3366 	}
3367 
3368 	d->hypot_map = malloc(sizeof(__u32) * type_cnt);
3369 	if (!d->hypot_map) {
3370 		err = -ENOMEM;
3371 		goto done;
3372 	}
3373 	for (i = 0; i < type_cnt; i++)
3374 		d->hypot_map[i] = BTF_UNPROCESSED_ID;
3375 
3376 done:
3377 	if (err) {
3378 		btf_dedup_free(d);
3379 		return ERR_PTR(err);
3380 	}
3381 
3382 	return d;
3383 }
3384 
3385 /*
3386  * Iterate over all possible places in .BTF and .BTF.ext that can reference
3387  * string and pass pointer to it to a provided callback `fn`.
3388  */
3389 static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *ctx)
3390 {
3391 	int i, r;
3392 
3393 	for (i = 0; i < d->btf->nr_types; i++) {
3394 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
3395 
3396 		r = btf_type_visit_str_offs(t, fn, ctx);
3397 		if (r)
3398 			return r;
3399 	}
3400 
3401 	if (!d->btf_ext)
3402 		return 0;
3403 
3404 	r = btf_ext_visit_str_offs(d->btf_ext, fn, ctx);
3405 	if (r)
3406 		return r;
3407 
3408 	return 0;
3409 }
3410 
3411 static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx)
3412 {
3413 	struct btf_dedup *d = ctx;
3414 	__u32 str_off = *str_off_ptr;
3415 	const char *s;
3416 	int off, err;
3417 
3418 	/* don't touch empty string or string in main BTF */
3419 	if (str_off == 0 || str_off < d->btf->start_str_off)
3420 		return 0;
3421 
3422 	s = btf__str_by_offset(d->btf, str_off);
3423 	if (d->btf->base_btf) {
3424 		err = btf__find_str(d->btf->base_btf, s);
3425 		if (err >= 0) {
3426 			*str_off_ptr = err;
3427 			return 0;
3428 		}
3429 		if (err != -ENOENT)
3430 			return err;
3431 	}
3432 
3433 	off = strset__add_str(d->strs_set, s);
3434 	if (off < 0)
3435 		return off;
3436 
3437 	*str_off_ptr = d->btf->start_str_off + off;
3438 	return 0;
3439 }
3440 
3441 /*
3442  * Dedup string and filter out those that are not referenced from either .BTF
3443  * or .BTF.ext (if provided) sections.
3444  *
3445  * This is done by building index of all strings in BTF's string section,
3446  * then iterating over all entities that can reference strings (e.g., type
3447  * names, struct field names, .BTF.ext line info, etc) and marking corresponding
3448  * strings as used. After that all used strings are deduped and compacted into
3449  * sequential blob of memory and new offsets are calculated. Then all the string
3450  * references are iterated again and rewritten using new offsets.
3451  */
3452 static int btf_dedup_strings(struct btf_dedup *d)
3453 {
3454 	int err;
3455 
3456 	if (d->btf->strs_deduped)
3457 		return 0;
3458 
3459 	d->strs_set = strset__new(BTF_MAX_STR_OFFSET, NULL, 0);
3460 	if (IS_ERR(d->strs_set)) {
3461 		err = PTR_ERR(d->strs_set);
3462 		goto err_out;
3463 	}
3464 
3465 	if (!d->btf->base_btf) {
3466 		/* insert empty string; we won't be looking it up during strings
3467 		 * dedup, but it's good to have it for generic BTF string lookups
3468 		 */
3469 		err = strset__add_str(d->strs_set, "");
3470 		if (err < 0)
3471 			goto err_out;
3472 	}
3473 
3474 	/* remap string offsets */
3475 	err = btf_for_each_str_off(d, strs_dedup_remap_str_off, d);
3476 	if (err)
3477 		goto err_out;
3478 
3479 	/* replace BTF string data and hash with deduped ones */
3480 	strset__free(d->btf->strs_set);
3481 	d->btf->hdr->str_len = strset__data_size(d->strs_set);
3482 	d->btf->strs_set = d->strs_set;
3483 	d->strs_set = NULL;
3484 	d->btf->strs_deduped = true;
3485 	return 0;
3486 
3487 err_out:
3488 	strset__free(d->strs_set);
3489 	d->strs_set = NULL;
3490 
3491 	return err;
3492 }
3493 
3494 static long btf_hash_common(struct btf_type *t)
3495 {
3496 	long h;
3497 
3498 	h = hash_combine(0, t->name_off);
3499 	h = hash_combine(h, t->info);
3500 	h = hash_combine(h, t->size);
3501 	return h;
3502 }
3503 
3504 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
3505 {
3506 	return t1->name_off == t2->name_off &&
3507 	       t1->info == t2->info &&
3508 	       t1->size == t2->size;
3509 }
3510 
3511 /* Calculate type signature hash of INT or TAG. */
3512 static long btf_hash_int_decl_tag(struct btf_type *t)
3513 {
3514 	__u32 info = *(__u32 *)(t + 1);
3515 	long h;
3516 
3517 	h = btf_hash_common(t);
3518 	h = hash_combine(h, info);
3519 	return h;
3520 }
3521 
3522 /* Check structural equality of two INTs or TAGs. */
3523 static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2)
3524 {
3525 	__u32 info1, info2;
3526 
3527 	if (!btf_equal_common(t1, t2))
3528 		return false;
3529 	info1 = *(__u32 *)(t1 + 1);
3530 	info2 = *(__u32 *)(t2 + 1);
3531 	return info1 == info2;
3532 }
3533 
3534 /* Calculate type signature hash of ENUM/ENUM64. */
3535 static long btf_hash_enum(struct btf_type *t)
3536 {
3537 	long h;
3538 
3539 	/* don't hash vlen, enum members and size to support enum fwd resolving */
3540 	h = hash_combine(0, t->name_off);
3541 	return h;
3542 }
3543 
3544 static bool btf_equal_enum_members(struct btf_type *t1, struct btf_type *t2)
3545 {
3546 	const struct btf_enum *m1, *m2;
3547 	__u16 vlen;
3548 	int i;
3549 
3550 	vlen = btf_vlen(t1);
3551 	m1 = btf_enum(t1);
3552 	m2 = btf_enum(t2);
3553 	for (i = 0; i < vlen; i++) {
3554 		if (m1->name_off != m2->name_off || m1->val != m2->val)
3555 			return false;
3556 		m1++;
3557 		m2++;
3558 	}
3559 	return true;
3560 }
3561 
3562 static bool btf_equal_enum64_members(struct btf_type *t1, struct btf_type *t2)
3563 {
3564 	const struct btf_enum64 *m1, *m2;
3565 	__u16 vlen;
3566 	int i;
3567 
3568 	vlen = btf_vlen(t1);
3569 	m1 = btf_enum64(t1);
3570 	m2 = btf_enum64(t2);
3571 	for (i = 0; i < vlen; i++) {
3572 		if (m1->name_off != m2->name_off || m1->val_lo32 != m2->val_lo32 ||
3573 		    m1->val_hi32 != m2->val_hi32)
3574 			return false;
3575 		m1++;
3576 		m2++;
3577 	}
3578 	return true;
3579 }
3580 
3581 /* Check structural equality of two ENUMs or ENUM64s. */
3582 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
3583 {
3584 	if (!btf_equal_common(t1, t2))
3585 		return false;
3586 
3587 	/* t1 & t2 kinds are identical because of btf_equal_common */
3588 	if (btf_kind(t1) == BTF_KIND_ENUM)
3589 		return btf_equal_enum_members(t1, t2);
3590 	else
3591 		return btf_equal_enum64_members(t1, t2);
3592 }
3593 
3594 static inline bool btf_is_enum_fwd(struct btf_type *t)
3595 {
3596 	return btf_is_any_enum(t) && btf_vlen(t) == 0;
3597 }
3598 
3599 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
3600 {
3601 	if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
3602 		return btf_equal_enum(t1, t2);
3603 	/* At this point either t1 or t2 or both are forward declarations, thus:
3604 	 * - skip comparing vlen because it is zero for forward declarations;
3605 	 * - skip comparing size to allow enum forward declarations
3606 	 *   to be compatible with enum64 full declarations;
3607 	 * - skip comparing kind for the same reason.
3608 	 */
3609 	return t1->name_off == t2->name_off &&
3610 	       btf_is_any_enum(t1) && btf_is_any_enum(t2);
3611 }
3612 
3613 /*
3614  * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
3615  * as referenced type IDs equivalence is established separately during type
3616  * graph equivalence check algorithm.
3617  */
3618 static long btf_hash_struct(struct btf_type *t)
3619 {
3620 	const struct btf_member *member = btf_members(t);
3621 	__u32 vlen = btf_vlen(t);
3622 	long h = btf_hash_common(t);
3623 	int i;
3624 
3625 	for (i = 0; i < vlen; i++) {
3626 		h = hash_combine(h, member->name_off);
3627 		h = hash_combine(h, member->offset);
3628 		/* no hashing of referenced type ID, it can be unresolved yet */
3629 		member++;
3630 	}
3631 	return h;
3632 }
3633 
3634 /*
3635  * Check structural compatibility of two STRUCTs/UNIONs, ignoring referenced
3636  * type IDs. This check is performed during type graph equivalence check and
3637  * referenced types equivalence is checked separately.
3638  */
3639 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
3640 {
3641 	const struct btf_member *m1, *m2;
3642 	__u16 vlen;
3643 	int i;
3644 
3645 	if (!btf_equal_common(t1, t2))
3646 		return false;
3647 
3648 	vlen = btf_vlen(t1);
3649 	m1 = btf_members(t1);
3650 	m2 = btf_members(t2);
3651 	for (i = 0; i < vlen; i++) {
3652 		if (m1->name_off != m2->name_off || m1->offset != m2->offset)
3653 			return false;
3654 		m1++;
3655 		m2++;
3656 	}
3657 	return true;
3658 }
3659 
3660 /*
3661  * Calculate type signature hash of ARRAY, including referenced type IDs,
3662  * under assumption that they were already resolved to canonical type IDs and
3663  * are not going to change.
3664  */
3665 static long btf_hash_array(struct btf_type *t)
3666 {
3667 	const struct btf_array *info = btf_array(t);
3668 	long h = btf_hash_common(t);
3669 
3670 	h = hash_combine(h, info->type);
3671 	h = hash_combine(h, info->index_type);
3672 	h = hash_combine(h, info->nelems);
3673 	return h;
3674 }
3675 
3676 /*
3677  * Check exact equality of two ARRAYs, taking into account referenced
3678  * type IDs, under assumption that they were already resolved to canonical
3679  * type IDs and are not going to change.
3680  * This function is called during reference types deduplication to compare
3681  * ARRAY to potential canonical representative.
3682  */
3683 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
3684 {
3685 	const struct btf_array *info1, *info2;
3686 
3687 	if (!btf_equal_common(t1, t2))
3688 		return false;
3689 
3690 	info1 = btf_array(t1);
3691 	info2 = btf_array(t2);
3692 	return info1->type == info2->type &&
3693 	       info1->index_type == info2->index_type &&
3694 	       info1->nelems == info2->nelems;
3695 }
3696 
3697 /*
3698  * Check structural compatibility of two ARRAYs, ignoring referenced type
3699  * IDs. This check is performed during type graph equivalence check and
3700  * referenced types equivalence is checked separately.
3701  */
3702 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
3703 {
3704 	if (!btf_equal_common(t1, t2))
3705 		return false;
3706 
3707 	return btf_array(t1)->nelems == btf_array(t2)->nelems;
3708 }
3709 
3710 /*
3711  * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
3712  * under assumption that they were already resolved to canonical type IDs and
3713  * are not going to change.
3714  */
3715 static long btf_hash_fnproto(struct btf_type *t)
3716 {
3717 	const struct btf_param *member = btf_params(t);
3718 	__u16 vlen = btf_vlen(t);
3719 	long h = btf_hash_common(t);
3720 	int i;
3721 
3722 	for (i = 0; i < vlen; i++) {
3723 		h = hash_combine(h, member->name_off);
3724 		h = hash_combine(h, member->type);
3725 		member++;
3726 	}
3727 	return h;
3728 }
3729 
3730 /*
3731  * Check exact equality of two FUNC_PROTOs, taking into account referenced
3732  * type IDs, under assumption that they were already resolved to canonical
3733  * type IDs and are not going to change.
3734  * This function is called during reference types deduplication to compare
3735  * FUNC_PROTO to potential canonical representative.
3736  */
3737 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
3738 {
3739 	const struct btf_param *m1, *m2;
3740 	__u16 vlen;
3741 	int i;
3742 
3743 	if (!btf_equal_common(t1, t2))
3744 		return false;
3745 
3746 	vlen = btf_vlen(t1);
3747 	m1 = btf_params(t1);
3748 	m2 = btf_params(t2);
3749 	for (i = 0; i < vlen; i++) {
3750 		if (m1->name_off != m2->name_off || m1->type != m2->type)
3751 			return false;
3752 		m1++;
3753 		m2++;
3754 	}
3755 	return true;
3756 }
3757 
3758 /*
3759  * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
3760  * IDs. This check is performed during type graph equivalence check and
3761  * referenced types equivalence is checked separately.
3762  */
3763 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
3764 {
3765 	const struct btf_param *m1, *m2;
3766 	__u16 vlen;
3767 	int i;
3768 
3769 	/* skip return type ID */
3770 	if (t1->name_off != t2->name_off || t1->info != t2->info)
3771 		return false;
3772 
3773 	vlen = btf_vlen(t1);
3774 	m1 = btf_params(t1);
3775 	m2 = btf_params(t2);
3776 	for (i = 0; i < vlen; i++) {
3777 		if (m1->name_off != m2->name_off)
3778 			return false;
3779 		m1++;
3780 		m2++;
3781 	}
3782 	return true;
3783 }
3784 
3785 /* Prepare split BTF for deduplication by calculating hashes of base BTF's
3786  * types and initializing the rest of the state (canonical type mapping) for
3787  * the fixed base BTF part.
3788  */
3789 static int btf_dedup_prep(struct btf_dedup *d)
3790 {
3791 	struct btf_type *t;
3792 	int type_id;
3793 	long h;
3794 
3795 	if (!d->btf->base_btf)
3796 		return 0;
3797 
3798 	for (type_id = 1; type_id < d->btf->start_id; type_id++) {
3799 		t = btf_type_by_id(d->btf, type_id);
3800 
3801 		/* all base BTF types are self-canonical by definition */
3802 		d->map[type_id] = type_id;
3803 
3804 		switch (btf_kind(t)) {
3805 		case BTF_KIND_VAR:
3806 		case BTF_KIND_DATASEC:
3807 			/* VAR and DATASEC are never hash/deduplicated */
3808 			continue;
3809 		case BTF_KIND_CONST:
3810 		case BTF_KIND_VOLATILE:
3811 		case BTF_KIND_RESTRICT:
3812 		case BTF_KIND_PTR:
3813 		case BTF_KIND_FWD:
3814 		case BTF_KIND_TYPEDEF:
3815 		case BTF_KIND_FUNC:
3816 		case BTF_KIND_FLOAT:
3817 		case BTF_KIND_TYPE_TAG:
3818 			h = btf_hash_common(t);
3819 			break;
3820 		case BTF_KIND_INT:
3821 		case BTF_KIND_DECL_TAG:
3822 			h = btf_hash_int_decl_tag(t);
3823 			break;
3824 		case BTF_KIND_ENUM:
3825 		case BTF_KIND_ENUM64:
3826 			h = btf_hash_enum(t);
3827 			break;
3828 		case BTF_KIND_STRUCT:
3829 		case BTF_KIND_UNION:
3830 			h = btf_hash_struct(t);
3831 			break;
3832 		case BTF_KIND_ARRAY:
3833 			h = btf_hash_array(t);
3834 			break;
3835 		case BTF_KIND_FUNC_PROTO:
3836 			h = btf_hash_fnproto(t);
3837 			break;
3838 		default:
3839 			pr_debug("unknown kind %d for type [%d]\n", btf_kind(t), type_id);
3840 			return -EINVAL;
3841 		}
3842 		if (btf_dedup_table_add(d, h, type_id))
3843 			return -ENOMEM;
3844 	}
3845 
3846 	return 0;
3847 }
3848 
3849 /*
3850  * Deduplicate primitive types, that can't reference other types, by calculating
3851  * their type signature hash and comparing them with any possible canonical
3852  * candidate. If no canonical candidate matches, type itself is marked as
3853  * canonical and is added into `btf_dedup->dedup_table` as another candidate.
3854  */
3855 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
3856 {
3857 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
3858 	struct hashmap_entry *hash_entry;
3859 	struct btf_type *cand;
3860 	/* if we don't find equivalent type, then we are canonical */
3861 	__u32 new_id = type_id;
3862 	__u32 cand_id;
3863 	long h;
3864 
3865 	switch (btf_kind(t)) {
3866 	case BTF_KIND_CONST:
3867 	case BTF_KIND_VOLATILE:
3868 	case BTF_KIND_RESTRICT:
3869 	case BTF_KIND_PTR:
3870 	case BTF_KIND_TYPEDEF:
3871 	case BTF_KIND_ARRAY:
3872 	case BTF_KIND_STRUCT:
3873 	case BTF_KIND_UNION:
3874 	case BTF_KIND_FUNC:
3875 	case BTF_KIND_FUNC_PROTO:
3876 	case BTF_KIND_VAR:
3877 	case BTF_KIND_DATASEC:
3878 	case BTF_KIND_DECL_TAG:
3879 	case BTF_KIND_TYPE_TAG:
3880 		return 0;
3881 
3882 	case BTF_KIND_INT:
3883 		h = btf_hash_int_decl_tag(t);
3884 		for_each_dedup_cand(d, hash_entry, h) {
3885 			cand_id = hash_entry->value;
3886 			cand = btf_type_by_id(d->btf, cand_id);
3887 			if (btf_equal_int_tag(t, cand)) {
3888 				new_id = cand_id;
3889 				break;
3890 			}
3891 		}
3892 		break;
3893 
3894 	case BTF_KIND_ENUM:
3895 	case BTF_KIND_ENUM64:
3896 		h = btf_hash_enum(t);
3897 		for_each_dedup_cand(d, hash_entry, h) {
3898 			cand_id = hash_entry->value;
3899 			cand = btf_type_by_id(d->btf, cand_id);
3900 			if (btf_equal_enum(t, cand)) {
3901 				new_id = cand_id;
3902 				break;
3903 			}
3904 			if (btf_compat_enum(t, cand)) {
3905 				if (btf_is_enum_fwd(t)) {
3906 					/* resolve fwd to full enum */
3907 					new_id = cand_id;
3908 					break;
3909 				}
3910 				/* resolve canonical enum fwd to full enum */
3911 				d->map[cand_id] = type_id;
3912 			}
3913 		}
3914 		break;
3915 
3916 	case BTF_KIND_FWD:
3917 	case BTF_KIND_FLOAT:
3918 		h = btf_hash_common(t);
3919 		for_each_dedup_cand(d, hash_entry, h) {
3920 			cand_id = hash_entry->value;
3921 			cand = btf_type_by_id(d->btf, cand_id);
3922 			if (btf_equal_common(t, cand)) {
3923 				new_id = cand_id;
3924 				break;
3925 			}
3926 		}
3927 		break;
3928 
3929 	default:
3930 		return -EINVAL;
3931 	}
3932 
3933 	d->map[type_id] = new_id;
3934 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
3935 		return -ENOMEM;
3936 
3937 	return 0;
3938 }
3939 
3940 static int btf_dedup_prim_types(struct btf_dedup *d)
3941 {
3942 	int i, err;
3943 
3944 	for (i = 0; i < d->btf->nr_types; i++) {
3945 		err = btf_dedup_prim_type(d, d->btf->start_id + i);
3946 		if (err)
3947 			return err;
3948 	}
3949 	return 0;
3950 }
3951 
3952 /*
3953  * Check whether type is already mapped into canonical one (could be to itself).
3954  */
3955 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
3956 {
3957 	return d->map[type_id] <= BTF_MAX_NR_TYPES;
3958 }
3959 
3960 /*
3961  * Resolve type ID into its canonical type ID, if any; otherwise return original
3962  * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
3963  * STRUCT/UNION link and resolve it into canonical type ID as well.
3964  */
3965 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
3966 {
3967 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
3968 		type_id = d->map[type_id];
3969 	return type_id;
3970 }
3971 
3972 /*
3973  * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
3974  * type ID.
3975  */
3976 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
3977 {
3978 	__u32 orig_type_id = type_id;
3979 
3980 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
3981 		return type_id;
3982 
3983 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
3984 		type_id = d->map[type_id];
3985 
3986 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
3987 		return type_id;
3988 
3989 	return orig_type_id;
3990 }
3991 
3992 
3993 static inline __u16 btf_fwd_kind(struct btf_type *t)
3994 {
3995 	return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
3996 }
3997 
3998 /* Check if given two types are identical ARRAY definitions */
3999 static bool btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2)
4000 {
4001 	struct btf_type *t1, *t2;
4002 
4003 	t1 = btf_type_by_id(d->btf, id1);
4004 	t2 = btf_type_by_id(d->btf, id2);
4005 	if (!btf_is_array(t1) || !btf_is_array(t2))
4006 		return false;
4007 
4008 	return btf_equal_array(t1, t2);
4009 }
4010 
4011 /* Check if given two types are identical STRUCT/UNION definitions */
4012 static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id2)
4013 {
4014 	const struct btf_member *m1, *m2;
4015 	struct btf_type *t1, *t2;
4016 	int n, i;
4017 
4018 	t1 = btf_type_by_id(d->btf, id1);
4019 	t2 = btf_type_by_id(d->btf, id2);
4020 
4021 	if (!btf_is_composite(t1) || btf_kind(t1) != btf_kind(t2))
4022 		return false;
4023 
4024 	if (!btf_shallow_equal_struct(t1, t2))
4025 		return false;
4026 
4027 	m1 = btf_members(t1);
4028 	m2 = btf_members(t2);
4029 	for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) {
4030 		if (m1->type != m2->type &&
4031 		    !btf_dedup_identical_arrays(d, m1->type, m2->type) &&
4032 		    !btf_dedup_identical_structs(d, m1->type, m2->type))
4033 			return false;
4034 	}
4035 	return true;
4036 }
4037 
4038 /*
4039  * Check equivalence of BTF type graph formed by candidate struct/union (we'll
4040  * call it "candidate graph" in this description for brevity) to a type graph
4041  * formed by (potential) canonical struct/union ("canonical graph" for brevity
4042  * here, though keep in mind that not all types in canonical graph are
4043  * necessarily canonical representatives themselves, some of them might be
4044  * duplicates or its uniqueness might not have been established yet).
4045  * Returns:
4046  *  - >0, if type graphs are equivalent;
4047  *  -  0, if not equivalent;
4048  *  - <0, on error.
4049  *
4050  * Algorithm performs side-by-side DFS traversal of both type graphs and checks
4051  * equivalence of BTF types at each step. If at any point BTF types in candidate
4052  * and canonical graphs are not compatible structurally, whole graphs are
4053  * incompatible. If types are structurally equivalent (i.e., all information
4054  * except referenced type IDs is exactly the same), a mapping from `canon_id` to
4055  * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
4056  * If a type references other types, then those referenced types are checked
4057  * for equivalence recursively.
4058  *
4059  * During DFS traversal, if we find that for current `canon_id` type we
4060  * already have some mapping in hypothetical map, we check for two possible
4061  * situations:
4062  *   - `canon_id` is mapped to exactly the same type as `cand_id`. This will
4063  *     happen when type graphs have cycles. In this case we assume those two
4064  *     types are equivalent.
4065  *   - `canon_id` is mapped to different type. This is contradiction in our
4066  *     hypothetical mapping, because same graph in canonical graph corresponds
4067  *     to two different types in candidate graph, which for equivalent type
4068  *     graphs shouldn't happen. This condition terminates equivalence check
4069  *     with negative result.
4070  *
4071  * If type graphs traversal exhausts types to check and find no contradiction,
4072  * then type graphs are equivalent.
4073  *
4074  * When checking types for equivalence, there is one special case: FWD types.
4075  * If FWD type resolution is allowed and one of the types (either from canonical
4076  * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
4077  * flag) and their names match, hypothetical mapping is updated to point from
4078  * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
4079  * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
4080  *
4081  * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
4082  * if there are two exactly named (or anonymous) structs/unions that are
4083  * compatible structurally, one of which has FWD field, while other is concrete
4084  * STRUCT/UNION, but according to C sources they are different structs/unions
4085  * that are referencing different types with the same name. This is extremely
4086  * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
4087  * this logic is causing problems.
4088  *
4089  * Doing FWD resolution means that both candidate and/or canonical graphs can
4090  * consists of portions of the graph that come from multiple compilation units.
4091  * This is due to the fact that types within single compilation unit are always
4092  * deduplicated and FWDs are already resolved, if referenced struct/union
4093  * definiton is available. So, if we had unresolved FWD and found corresponding
4094  * STRUCT/UNION, they will be from different compilation units. This
4095  * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
4096  * type graph will likely have at least two different BTF types that describe
4097  * same type (e.g., most probably there will be two different BTF types for the
4098  * same 'int' primitive type) and could even have "overlapping" parts of type
4099  * graph that describe same subset of types.
4100  *
4101  * This in turn means that our assumption that each type in canonical graph
4102  * must correspond to exactly one type in candidate graph might not hold
4103  * anymore and will make it harder to detect contradictions using hypothetical
4104  * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
4105  * resolution only in canonical graph. FWDs in candidate graphs are never
4106  * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
4107  * that can occur:
4108  *   - Both types in canonical and candidate graphs are FWDs. If they are
4109  *     structurally equivalent, then they can either be both resolved to the
4110  *     same STRUCT/UNION or not resolved at all. In both cases they are
4111  *     equivalent and there is no need to resolve FWD on candidate side.
4112  *   - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
4113  *     so nothing to resolve as well, algorithm will check equivalence anyway.
4114  *   - Type in canonical graph is FWD, while type in candidate is concrete
4115  *     STRUCT/UNION. In this case candidate graph comes from single compilation
4116  *     unit, so there is exactly one BTF type for each unique C type. After
4117  *     resolving FWD into STRUCT/UNION, there might be more than one BTF type
4118  *     in canonical graph mapping to single BTF type in candidate graph, but
4119  *     because hypothetical mapping maps from canonical to candidate types, it's
4120  *     alright, and we still maintain the property of having single `canon_id`
4121  *     mapping to single `cand_id` (there could be two different `canon_id`
4122  *     mapped to the same `cand_id`, but it's not contradictory).
4123  *   - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
4124  *     graph is FWD. In this case we are just going to check compatibility of
4125  *     STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
4126  *     assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
4127  *     a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
4128  *     turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
4129  *     canonical graph.
4130  */
4131 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
4132 			      __u32 canon_id)
4133 {
4134 	struct btf_type *cand_type;
4135 	struct btf_type *canon_type;
4136 	__u32 hypot_type_id;
4137 	__u16 cand_kind;
4138 	__u16 canon_kind;
4139 	int i, eq;
4140 
4141 	/* if both resolve to the same canonical, they must be equivalent */
4142 	if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
4143 		return 1;
4144 
4145 	canon_id = resolve_fwd_id(d, canon_id);
4146 
4147 	hypot_type_id = d->hypot_map[canon_id];
4148 	if (hypot_type_id <= BTF_MAX_NR_TYPES) {
4149 		if (hypot_type_id == cand_id)
4150 			return 1;
4151 		/* In some cases compiler will generate different DWARF types
4152 		 * for *identical* array type definitions and use them for
4153 		 * different fields within the *same* struct. This breaks type
4154 		 * equivalence check, which makes an assumption that candidate
4155 		 * types sub-graph has a consistent and deduped-by-compiler
4156 		 * types within a single CU. So work around that by explicitly
4157 		 * allowing identical array types here.
4158 		 */
4159 		if (btf_dedup_identical_arrays(d, hypot_type_id, cand_id))
4160 			return 1;
4161 		/* It turns out that similar situation can happen with
4162 		 * struct/union sometimes, sigh... Handle the case where
4163 		 * structs/unions are exactly the same, down to the referenced
4164 		 * type IDs. Anything more complicated (e.g., if referenced
4165 		 * types are different, but equivalent) is *way more*
4166 		 * complicated and requires a many-to-many equivalence mapping.
4167 		 */
4168 		if (btf_dedup_identical_structs(d, hypot_type_id, cand_id))
4169 			return 1;
4170 		return 0;
4171 	}
4172 
4173 	if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
4174 		return -ENOMEM;
4175 
4176 	cand_type = btf_type_by_id(d->btf, cand_id);
4177 	canon_type = btf_type_by_id(d->btf, canon_id);
4178 	cand_kind = btf_kind(cand_type);
4179 	canon_kind = btf_kind(canon_type);
4180 
4181 	if (cand_type->name_off != canon_type->name_off)
4182 		return 0;
4183 
4184 	/* FWD <--> STRUCT/UNION equivalence check, if enabled */
4185 	if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
4186 	    && cand_kind != canon_kind) {
4187 		__u16 real_kind;
4188 		__u16 fwd_kind;
4189 
4190 		if (cand_kind == BTF_KIND_FWD) {
4191 			real_kind = canon_kind;
4192 			fwd_kind = btf_fwd_kind(cand_type);
4193 		} else {
4194 			real_kind = cand_kind;
4195 			fwd_kind = btf_fwd_kind(canon_type);
4196 			/* we'd need to resolve base FWD to STRUCT/UNION */
4197 			if (fwd_kind == real_kind && canon_id < d->btf->start_id)
4198 				d->hypot_adjust_canon = true;
4199 		}
4200 		return fwd_kind == real_kind;
4201 	}
4202 
4203 	if (cand_kind != canon_kind)
4204 		return 0;
4205 
4206 	switch (cand_kind) {
4207 	case BTF_KIND_INT:
4208 		return btf_equal_int_tag(cand_type, canon_type);
4209 
4210 	case BTF_KIND_ENUM:
4211 	case BTF_KIND_ENUM64:
4212 		return btf_compat_enum(cand_type, canon_type);
4213 
4214 	case BTF_KIND_FWD:
4215 	case BTF_KIND_FLOAT:
4216 		return btf_equal_common(cand_type, canon_type);
4217 
4218 	case BTF_KIND_CONST:
4219 	case BTF_KIND_VOLATILE:
4220 	case BTF_KIND_RESTRICT:
4221 	case BTF_KIND_PTR:
4222 	case BTF_KIND_TYPEDEF:
4223 	case BTF_KIND_FUNC:
4224 	case BTF_KIND_TYPE_TAG:
4225 		if (cand_type->info != canon_type->info)
4226 			return 0;
4227 		return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4228 
4229 	case BTF_KIND_ARRAY: {
4230 		const struct btf_array *cand_arr, *canon_arr;
4231 
4232 		if (!btf_compat_array(cand_type, canon_type))
4233 			return 0;
4234 		cand_arr = btf_array(cand_type);
4235 		canon_arr = btf_array(canon_type);
4236 		eq = btf_dedup_is_equiv(d, cand_arr->index_type, canon_arr->index_type);
4237 		if (eq <= 0)
4238 			return eq;
4239 		return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
4240 	}
4241 
4242 	case BTF_KIND_STRUCT:
4243 	case BTF_KIND_UNION: {
4244 		const struct btf_member *cand_m, *canon_m;
4245 		__u16 vlen;
4246 
4247 		if (!btf_shallow_equal_struct(cand_type, canon_type))
4248 			return 0;
4249 		vlen = btf_vlen(cand_type);
4250 		cand_m = btf_members(cand_type);
4251 		canon_m = btf_members(canon_type);
4252 		for (i = 0; i < vlen; i++) {
4253 			eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
4254 			if (eq <= 0)
4255 				return eq;
4256 			cand_m++;
4257 			canon_m++;
4258 		}
4259 
4260 		return 1;
4261 	}
4262 
4263 	case BTF_KIND_FUNC_PROTO: {
4264 		const struct btf_param *cand_p, *canon_p;
4265 		__u16 vlen;
4266 
4267 		if (!btf_compat_fnproto(cand_type, canon_type))
4268 			return 0;
4269 		eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4270 		if (eq <= 0)
4271 			return eq;
4272 		vlen = btf_vlen(cand_type);
4273 		cand_p = btf_params(cand_type);
4274 		canon_p = btf_params(canon_type);
4275 		for (i = 0; i < vlen; i++) {
4276 			eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
4277 			if (eq <= 0)
4278 				return eq;
4279 			cand_p++;
4280 			canon_p++;
4281 		}
4282 		return 1;
4283 	}
4284 
4285 	default:
4286 		return -EINVAL;
4287 	}
4288 	return 0;
4289 }
4290 
4291 /*
4292  * Use hypothetical mapping, produced by successful type graph equivalence
4293  * check, to augment existing struct/union canonical mapping, where possible.
4294  *
4295  * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
4296  * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
4297  * it doesn't matter if FWD type was part of canonical graph or candidate one,
4298  * we are recording the mapping anyway. As opposed to carefulness required
4299  * for struct/union correspondence mapping (described below), for FWD resolution
4300  * it's not important, as by the time that FWD type (reference type) will be
4301  * deduplicated all structs/unions will be deduped already anyway.
4302  *
4303  * Recording STRUCT/UNION mapping is purely a performance optimization and is
4304  * not required for correctness. It needs to be done carefully to ensure that
4305  * struct/union from candidate's type graph is not mapped into corresponding
4306  * struct/union from canonical type graph that itself hasn't been resolved into
4307  * canonical representative. The only guarantee we have is that canonical
4308  * struct/union was determined as canonical and that won't change. But any
4309  * types referenced through that struct/union fields could have been not yet
4310  * resolved, so in case like that it's too early to establish any kind of
4311  * correspondence between structs/unions.
4312  *
4313  * No canonical correspondence is derived for primitive types (they are already
4314  * deduplicated completely already anyway) or reference types (they rely on
4315  * stability of struct/union canonical relationship for equivalence checks).
4316  */
4317 static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
4318 {
4319 	__u32 canon_type_id, targ_type_id;
4320 	__u16 t_kind, c_kind;
4321 	__u32 t_id, c_id;
4322 	int i;
4323 
4324 	for (i = 0; i < d->hypot_cnt; i++) {
4325 		canon_type_id = d->hypot_list[i];
4326 		targ_type_id = d->hypot_map[canon_type_id];
4327 		t_id = resolve_type_id(d, targ_type_id);
4328 		c_id = resolve_type_id(d, canon_type_id);
4329 		t_kind = btf_kind(btf__type_by_id(d->btf, t_id));
4330 		c_kind = btf_kind(btf__type_by_id(d->btf, c_id));
4331 		/*
4332 		 * Resolve FWD into STRUCT/UNION.
4333 		 * It's ok to resolve FWD into STRUCT/UNION that's not yet
4334 		 * mapped to canonical representative (as opposed to
4335 		 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
4336 		 * eventually that struct is going to be mapped and all resolved
4337 		 * FWDs will automatically resolve to correct canonical
4338 		 * representative. This will happen before ref type deduping,
4339 		 * which critically depends on stability of these mapping. This
4340 		 * stability is not a requirement for STRUCT/UNION equivalence
4341 		 * checks, though.
4342 		 */
4343 
4344 		/* if it's the split BTF case, we still need to point base FWD
4345 		 * to STRUCT/UNION in a split BTF, because FWDs from split BTF
4346 		 * will be resolved against base FWD. If we don't point base
4347 		 * canonical FWD to the resolved STRUCT/UNION, then all the
4348 		 * FWDs in split BTF won't be correctly resolved to a proper
4349 		 * STRUCT/UNION.
4350 		 */
4351 		if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
4352 			d->map[c_id] = t_id;
4353 
4354 		/* if graph equivalence determined that we'd need to adjust
4355 		 * base canonical types, then we need to only point base FWDs
4356 		 * to STRUCTs/UNIONs and do no more modifications. For all
4357 		 * other purposes the type graphs were not equivalent.
4358 		 */
4359 		if (d->hypot_adjust_canon)
4360 			continue;
4361 
4362 		if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
4363 			d->map[t_id] = c_id;
4364 
4365 		if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
4366 		    c_kind != BTF_KIND_FWD &&
4367 		    is_type_mapped(d, c_id) &&
4368 		    !is_type_mapped(d, t_id)) {
4369 			/*
4370 			 * as a perf optimization, we can map struct/union
4371 			 * that's part of type graph we just verified for
4372 			 * equivalence. We can do that for struct/union that has
4373 			 * canonical representative only, though.
4374 			 */
4375 			d->map[t_id] = c_id;
4376 		}
4377 	}
4378 }
4379 
4380 /*
4381  * Deduplicate struct/union types.
4382  *
4383  * For each struct/union type its type signature hash is calculated, taking
4384  * into account type's name, size, number, order and names of fields, but
4385  * ignoring type ID's referenced from fields, because they might not be deduped
4386  * completely until after reference types deduplication phase. This type hash
4387  * is used to iterate over all potential canonical types, sharing same hash.
4388  * For each canonical candidate we check whether type graphs that they form
4389  * (through referenced types in fields and so on) are equivalent using algorithm
4390  * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
4391  * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
4392  * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
4393  * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
4394  * potentially map other structs/unions to their canonical representatives,
4395  * if such relationship hasn't yet been established. This speeds up algorithm
4396  * by eliminating some of the duplicate work.
4397  *
4398  * If no matching canonical representative was found, struct/union is marked
4399  * as canonical for itself and is added into btf_dedup->dedup_table hash map
4400  * for further look ups.
4401  */
4402 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
4403 {
4404 	struct btf_type *cand_type, *t;
4405 	struct hashmap_entry *hash_entry;
4406 	/* if we don't find equivalent type, then we are canonical */
4407 	__u32 new_id = type_id;
4408 	__u16 kind;
4409 	long h;
4410 
4411 	/* already deduped or is in process of deduping (loop detected) */
4412 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4413 		return 0;
4414 
4415 	t = btf_type_by_id(d->btf, type_id);
4416 	kind = btf_kind(t);
4417 
4418 	if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4419 		return 0;
4420 
4421 	h = btf_hash_struct(t);
4422 	for_each_dedup_cand(d, hash_entry, h) {
4423 		__u32 cand_id = hash_entry->value;
4424 		int eq;
4425 
4426 		/*
4427 		 * Even though btf_dedup_is_equiv() checks for
4428 		 * btf_shallow_equal_struct() internally when checking two
4429 		 * structs (unions) for equivalence, we need to guard here
4430 		 * from picking matching FWD type as a dedup candidate.
4431 		 * This can happen due to hash collision. In such case just
4432 		 * relying on btf_dedup_is_equiv() would lead to potentially
4433 		 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
4434 		 * FWD and compatible STRUCT/UNION are considered equivalent.
4435 		 */
4436 		cand_type = btf_type_by_id(d->btf, cand_id);
4437 		if (!btf_shallow_equal_struct(t, cand_type))
4438 			continue;
4439 
4440 		btf_dedup_clear_hypot_map(d);
4441 		eq = btf_dedup_is_equiv(d, type_id, cand_id);
4442 		if (eq < 0)
4443 			return eq;
4444 		if (!eq)
4445 			continue;
4446 		btf_dedup_merge_hypot_map(d);
4447 		if (d->hypot_adjust_canon) /* not really equivalent */
4448 			continue;
4449 		new_id = cand_id;
4450 		break;
4451 	}
4452 
4453 	d->map[type_id] = new_id;
4454 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4455 		return -ENOMEM;
4456 
4457 	return 0;
4458 }
4459 
4460 static int btf_dedup_struct_types(struct btf_dedup *d)
4461 {
4462 	int i, err;
4463 
4464 	for (i = 0; i < d->btf->nr_types; i++) {
4465 		err = btf_dedup_struct_type(d, d->btf->start_id + i);
4466 		if (err)
4467 			return err;
4468 	}
4469 	return 0;
4470 }
4471 
4472 /*
4473  * Deduplicate reference type.
4474  *
4475  * Once all primitive and struct/union types got deduplicated, we can easily
4476  * deduplicate all other (reference) BTF types. This is done in two steps:
4477  *
4478  * 1. Resolve all referenced type IDs into their canonical type IDs. This
4479  * resolution can be done either immediately for primitive or struct/union types
4480  * (because they were deduped in previous two phases) or recursively for
4481  * reference types. Recursion will always terminate at either primitive or
4482  * struct/union type, at which point we can "unwind" chain of reference types
4483  * one by one. There is no danger of encountering cycles because in C type
4484  * system the only way to form type cycle is through struct/union, so any chain
4485  * of reference types, even those taking part in a type cycle, will inevitably
4486  * reach struct/union at some point.
4487  *
4488  * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
4489  * becomes "stable", in the sense that no further deduplication will cause
4490  * any changes to it. With that, it's now possible to calculate type's signature
4491  * hash (this time taking into account referenced type IDs) and loop over all
4492  * potential canonical representatives. If no match was found, current type
4493  * will become canonical representative of itself and will be added into
4494  * btf_dedup->dedup_table as another possible canonical representative.
4495  */
4496 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
4497 {
4498 	struct hashmap_entry *hash_entry;
4499 	__u32 new_id = type_id, cand_id;
4500 	struct btf_type *t, *cand;
4501 	/* if we don't find equivalent type, then we are representative type */
4502 	int ref_type_id;
4503 	long h;
4504 
4505 	if (d->map[type_id] == BTF_IN_PROGRESS_ID)
4506 		return -ELOOP;
4507 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4508 		return resolve_type_id(d, type_id);
4509 
4510 	t = btf_type_by_id(d->btf, type_id);
4511 	d->map[type_id] = BTF_IN_PROGRESS_ID;
4512 
4513 	switch (btf_kind(t)) {
4514 	case BTF_KIND_CONST:
4515 	case BTF_KIND_VOLATILE:
4516 	case BTF_KIND_RESTRICT:
4517 	case BTF_KIND_PTR:
4518 	case BTF_KIND_TYPEDEF:
4519 	case BTF_KIND_FUNC:
4520 	case BTF_KIND_TYPE_TAG:
4521 		ref_type_id = btf_dedup_ref_type(d, t->type);
4522 		if (ref_type_id < 0)
4523 			return ref_type_id;
4524 		t->type = ref_type_id;
4525 
4526 		h = btf_hash_common(t);
4527 		for_each_dedup_cand(d, hash_entry, h) {
4528 			cand_id = hash_entry->value;
4529 			cand = btf_type_by_id(d->btf, cand_id);
4530 			if (btf_equal_common(t, cand)) {
4531 				new_id = cand_id;
4532 				break;
4533 			}
4534 		}
4535 		break;
4536 
4537 	case BTF_KIND_DECL_TAG:
4538 		ref_type_id = btf_dedup_ref_type(d, t->type);
4539 		if (ref_type_id < 0)
4540 			return ref_type_id;
4541 		t->type = ref_type_id;
4542 
4543 		h = btf_hash_int_decl_tag(t);
4544 		for_each_dedup_cand(d, hash_entry, h) {
4545 			cand_id = hash_entry->value;
4546 			cand = btf_type_by_id(d->btf, cand_id);
4547 			if (btf_equal_int_tag(t, cand)) {
4548 				new_id = cand_id;
4549 				break;
4550 			}
4551 		}
4552 		break;
4553 
4554 	case BTF_KIND_ARRAY: {
4555 		struct btf_array *info = btf_array(t);
4556 
4557 		ref_type_id = btf_dedup_ref_type(d, info->type);
4558 		if (ref_type_id < 0)
4559 			return ref_type_id;
4560 		info->type = ref_type_id;
4561 
4562 		ref_type_id = btf_dedup_ref_type(d, info->index_type);
4563 		if (ref_type_id < 0)
4564 			return ref_type_id;
4565 		info->index_type = ref_type_id;
4566 
4567 		h = btf_hash_array(t);
4568 		for_each_dedup_cand(d, hash_entry, h) {
4569 			cand_id = hash_entry->value;
4570 			cand = btf_type_by_id(d->btf, cand_id);
4571 			if (btf_equal_array(t, cand)) {
4572 				new_id = cand_id;
4573 				break;
4574 			}
4575 		}
4576 		break;
4577 	}
4578 
4579 	case BTF_KIND_FUNC_PROTO: {
4580 		struct btf_param *param;
4581 		__u16 vlen;
4582 		int i;
4583 
4584 		ref_type_id = btf_dedup_ref_type(d, t->type);
4585 		if (ref_type_id < 0)
4586 			return ref_type_id;
4587 		t->type = ref_type_id;
4588 
4589 		vlen = btf_vlen(t);
4590 		param = btf_params(t);
4591 		for (i = 0; i < vlen; i++) {
4592 			ref_type_id = btf_dedup_ref_type(d, param->type);
4593 			if (ref_type_id < 0)
4594 				return ref_type_id;
4595 			param->type = ref_type_id;
4596 			param++;
4597 		}
4598 
4599 		h = btf_hash_fnproto(t);
4600 		for_each_dedup_cand(d, hash_entry, h) {
4601 			cand_id = hash_entry->value;
4602 			cand = btf_type_by_id(d->btf, cand_id);
4603 			if (btf_equal_fnproto(t, cand)) {
4604 				new_id = cand_id;
4605 				break;
4606 			}
4607 		}
4608 		break;
4609 	}
4610 
4611 	default:
4612 		return -EINVAL;
4613 	}
4614 
4615 	d->map[type_id] = new_id;
4616 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4617 		return -ENOMEM;
4618 
4619 	return new_id;
4620 }
4621 
4622 static int btf_dedup_ref_types(struct btf_dedup *d)
4623 {
4624 	int i, err;
4625 
4626 	for (i = 0; i < d->btf->nr_types; i++) {
4627 		err = btf_dedup_ref_type(d, d->btf->start_id + i);
4628 		if (err < 0)
4629 			return err;
4630 	}
4631 	/* we won't need d->dedup_table anymore */
4632 	hashmap__free(d->dedup_table);
4633 	d->dedup_table = NULL;
4634 	return 0;
4635 }
4636 
4637 /*
4638  * Collect a map from type names to type ids for all canonical structs
4639  * and unions. If the same name is shared by several canonical types
4640  * use a special value 0 to indicate this fact.
4641  */
4642 static int btf_dedup_fill_unique_names_map(struct btf_dedup *d, struct hashmap *names_map)
4643 {
4644 	__u32 nr_types = btf__type_cnt(d->btf);
4645 	struct btf_type *t;
4646 	__u32 type_id;
4647 	__u16 kind;
4648 	int err;
4649 
4650 	/*
4651 	 * Iterate over base and split module ids in order to get all
4652 	 * available structs in the map.
4653 	 */
4654 	for (type_id = 1; type_id < nr_types; ++type_id) {
4655 		t = btf_type_by_id(d->btf, type_id);
4656 		kind = btf_kind(t);
4657 
4658 		if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4659 			continue;
4660 
4661 		/* Skip non-canonical types */
4662 		if (type_id != d->map[type_id])
4663 			continue;
4664 
4665 		err = hashmap__add(names_map, t->name_off, type_id);
4666 		if (err == -EEXIST)
4667 			err = hashmap__set(names_map, t->name_off, 0, NULL, NULL);
4668 
4669 		if (err)
4670 			return err;
4671 	}
4672 
4673 	return 0;
4674 }
4675 
4676 static int btf_dedup_resolve_fwd(struct btf_dedup *d, struct hashmap *names_map, __u32 type_id)
4677 {
4678 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
4679 	enum btf_fwd_kind fwd_kind = btf_kflag(t);
4680 	__u16 cand_kind, kind = btf_kind(t);
4681 	struct btf_type *cand_t;
4682 	uintptr_t cand_id;
4683 
4684 	if (kind != BTF_KIND_FWD)
4685 		return 0;
4686 
4687 	/* Skip if this FWD already has a mapping */
4688 	if (type_id != d->map[type_id])
4689 		return 0;
4690 
4691 	if (!hashmap__find(names_map, t->name_off, &cand_id))
4692 		return 0;
4693 
4694 	/* Zero is a special value indicating that name is not unique */
4695 	if (!cand_id)
4696 		return 0;
4697 
4698 	cand_t = btf_type_by_id(d->btf, cand_id);
4699 	cand_kind = btf_kind(cand_t);
4700 	if ((cand_kind == BTF_KIND_STRUCT && fwd_kind != BTF_FWD_STRUCT) ||
4701 	    (cand_kind == BTF_KIND_UNION && fwd_kind != BTF_FWD_UNION))
4702 		return 0;
4703 
4704 	d->map[type_id] = cand_id;
4705 
4706 	return 0;
4707 }
4708 
4709 /*
4710  * Resolve unambiguous forward declarations.
4711  *
4712  * The lion's share of all FWD declarations is resolved during
4713  * `btf_dedup_struct_types` phase when different type graphs are
4714  * compared against each other. However, if in some compilation unit a
4715  * FWD declaration is not a part of a type graph compared against
4716  * another type graph that declaration's canonical type would not be
4717  * changed. Example:
4718  *
4719  * CU #1:
4720  *
4721  * struct foo;
4722  * struct foo *some_global;
4723  *
4724  * CU #2:
4725  *
4726  * struct foo { int u; };
4727  * struct foo *another_global;
4728  *
4729  * After `btf_dedup_struct_types` the BTF looks as follows:
4730  *
4731  * [1] STRUCT 'foo' size=4 vlen=1 ...
4732  * [2] INT 'int' size=4 ...
4733  * [3] PTR '(anon)' type_id=1
4734  * [4] FWD 'foo' fwd_kind=struct
4735  * [5] PTR '(anon)' type_id=4
4736  *
4737  * This pass assumes that such FWD declarations should be mapped to
4738  * structs or unions with identical name in case if the name is not
4739  * ambiguous.
4740  */
4741 static int btf_dedup_resolve_fwds(struct btf_dedup *d)
4742 {
4743 	int i, err;
4744 	struct hashmap *names_map;
4745 
4746 	names_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
4747 	if (IS_ERR(names_map))
4748 		return PTR_ERR(names_map);
4749 
4750 	err = btf_dedup_fill_unique_names_map(d, names_map);
4751 	if (err < 0)
4752 		goto exit;
4753 
4754 	for (i = 0; i < d->btf->nr_types; i++) {
4755 		err = btf_dedup_resolve_fwd(d, names_map, d->btf->start_id + i);
4756 		if (err < 0)
4757 			break;
4758 	}
4759 
4760 exit:
4761 	hashmap__free(names_map);
4762 	return err;
4763 }
4764 
4765 /*
4766  * Compact types.
4767  *
4768  * After we established for each type its corresponding canonical representative
4769  * type, we now can eliminate types that are not canonical and leave only
4770  * canonical ones layed out sequentially in memory by copying them over
4771  * duplicates. During compaction btf_dedup->hypot_map array is reused to store
4772  * a map from original type ID to a new compacted type ID, which will be used
4773  * during next phase to "fix up" type IDs, referenced from struct/union and
4774  * reference types.
4775  */
4776 static int btf_dedup_compact_types(struct btf_dedup *d)
4777 {
4778 	__u32 *new_offs;
4779 	__u32 next_type_id = d->btf->start_id;
4780 	const struct btf_type *t;
4781 	void *p;
4782 	int i, id, len;
4783 
4784 	/* we are going to reuse hypot_map to store compaction remapping */
4785 	d->hypot_map[0] = 0;
4786 	/* base BTF types are not renumbered */
4787 	for (id = 1; id < d->btf->start_id; id++)
4788 		d->hypot_map[id] = id;
4789 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++)
4790 		d->hypot_map[id] = BTF_UNPROCESSED_ID;
4791 
4792 	p = d->btf->types_data;
4793 
4794 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) {
4795 		if (d->map[id] != id)
4796 			continue;
4797 
4798 		t = btf__type_by_id(d->btf, id);
4799 		len = btf_type_size(t);
4800 		if (len < 0)
4801 			return len;
4802 
4803 		memmove(p, t, len);
4804 		d->hypot_map[id] = next_type_id;
4805 		d->btf->type_offs[next_type_id - d->btf->start_id] = p - d->btf->types_data;
4806 		p += len;
4807 		next_type_id++;
4808 	}
4809 
4810 	/* shrink struct btf's internal types index and update btf_header */
4811 	d->btf->nr_types = next_type_id - d->btf->start_id;
4812 	d->btf->type_offs_cap = d->btf->nr_types;
4813 	d->btf->hdr->type_len = p - d->btf->types_data;
4814 	new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap,
4815 				       sizeof(*new_offs));
4816 	if (d->btf->type_offs_cap && !new_offs)
4817 		return -ENOMEM;
4818 	d->btf->type_offs = new_offs;
4819 	d->btf->hdr->str_off = d->btf->hdr->type_len;
4820 	d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len;
4821 	return 0;
4822 }
4823 
4824 /*
4825  * Figure out final (deduplicated and compacted) type ID for provided original
4826  * `type_id` by first resolving it into corresponding canonical type ID and
4827  * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
4828  * which is populated during compaction phase.
4829  */
4830 static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx)
4831 {
4832 	struct btf_dedup *d = ctx;
4833 	__u32 resolved_type_id, new_type_id;
4834 
4835 	resolved_type_id = resolve_type_id(d, *type_id);
4836 	new_type_id = d->hypot_map[resolved_type_id];
4837 	if (new_type_id > BTF_MAX_NR_TYPES)
4838 		return -EINVAL;
4839 
4840 	*type_id = new_type_id;
4841 	return 0;
4842 }
4843 
4844 /*
4845  * Remap referenced type IDs into deduped type IDs.
4846  *
4847  * After BTF types are deduplicated and compacted, their final type IDs may
4848  * differ from original ones. The map from original to a corresponding
4849  * deduped type ID is stored in btf_dedup->hypot_map and is populated during
4850  * compaction phase. During remapping phase we are rewriting all type IDs
4851  * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
4852  * their final deduped type IDs.
4853  */
4854 static int btf_dedup_remap_types(struct btf_dedup *d)
4855 {
4856 	int i, r;
4857 
4858 	for (i = 0; i < d->btf->nr_types; i++) {
4859 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
4860 
4861 		r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d);
4862 		if (r)
4863 			return r;
4864 	}
4865 
4866 	if (!d->btf_ext)
4867 		return 0;
4868 
4869 	r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d);
4870 	if (r)
4871 		return r;
4872 
4873 	return 0;
4874 }
4875 
4876 /*
4877  * Probe few well-known locations for vmlinux kernel image and try to load BTF
4878  * data out of it to use for target BTF.
4879  */
4880 struct btf *btf__load_vmlinux_btf(void)
4881 {
4882 	const char *locations[] = {
4883 		/* try canonical vmlinux BTF through sysfs first */
4884 		"/sys/kernel/btf/vmlinux",
4885 		/* fall back to trying to find vmlinux on disk otherwise */
4886 		"/boot/vmlinux-%1$s",
4887 		"/lib/modules/%1$s/vmlinux-%1$s",
4888 		"/lib/modules/%1$s/build/vmlinux",
4889 		"/usr/lib/modules/%1$s/kernel/vmlinux",
4890 		"/usr/lib/debug/boot/vmlinux-%1$s",
4891 		"/usr/lib/debug/boot/vmlinux-%1$s.debug",
4892 		"/usr/lib/debug/lib/modules/%1$s/vmlinux",
4893 	};
4894 	char path[PATH_MAX + 1];
4895 	struct utsname buf;
4896 	struct btf *btf;
4897 	int i, err;
4898 
4899 	uname(&buf);
4900 
4901 	for (i = 0; i < ARRAY_SIZE(locations); i++) {
4902 		snprintf(path, PATH_MAX, locations[i], buf.release);
4903 
4904 		if (faccessat(AT_FDCWD, path, R_OK, AT_EACCESS))
4905 			continue;
4906 
4907 		btf = btf__parse(path, NULL);
4908 		err = libbpf_get_error(btf);
4909 		pr_debug("loading kernel BTF '%s': %d\n", path, err);
4910 		if (err)
4911 			continue;
4912 
4913 		return btf;
4914 	}
4915 
4916 	pr_warn("failed to find valid kernel BTF\n");
4917 	return libbpf_err_ptr(-ESRCH);
4918 }
4919 
4920 struct btf *libbpf_find_kernel_btf(void) __attribute__((alias("btf__load_vmlinux_btf")));
4921 
4922 struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf)
4923 {
4924 	char path[80];
4925 
4926 	snprintf(path, sizeof(path), "/sys/kernel/btf/%s", module_name);
4927 	return btf__parse_split(path, vmlinux_btf);
4928 }
4929 
4930 int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
4931 {
4932 	int i, n, err;
4933 
4934 	switch (btf_kind(t)) {
4935 	case BTF_KIND_INT:
4936 	case BTF_KIND_FLOAT:
4937 	case BTF_KIND_ENUM:
4938 	case BTF_KIND_ENUM64:
4939 		return 0;
4940 
4941 	case BTF_KIND_FWD:
4942 	case BTF_KIND_CONST:
4943 	case BTF_KIND_VOLATILE:
4944 	case BTF_KIND_RESTRICT:
4945 	case BTF_KIND_PTR:
4946 	case BTF_KIND_TYPEDEF:
4947 	case BTF_KIND_FUNC:
4948 	case BTF_KIND_VAR:
4949 	case BTF_KIND_DECL_TAG:
4950 	case BTF_KIND_TYPE_TAG:
4951 		return visit(&t->type, ctx);
4952 
4953 	case BTF_KIND_ARRAY: {
4954 		struct btf_array *a = btf_array(t);
4955 
4956 		err = visit(&a->type, ctx);
4957 		err = err ?: visit(&a->index_type, ctx);
4958 		return err;
4959 	}
4960 
4961 	case BTF_KIND_STRUCT:
4962 	case BTF_KIND_UNION: {
4963 		struct btf_member *m = btf_members(t);
4964 
4965 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
4966 			err = visit(&m->type, ctx);
4967 			if (err)
4968 				return err;
4969 		}
4970 		return 0;
4971 	}
4972 
4973 	case BTF_KIND_FUNC_PROTO: {
4974 		struct btf_param *m = btf_params(t);
4975 
4976 		err = visit(&t->type, ctx);
4977 		if (err)
4978 			return err;
4979 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
4980 			err = visit(&m->type, ctx);
4981 			if (err)
4982 				return err;
4983 		}
4984 		return 0;
4985 	}
4986 
4987 	case BTF_KIND_DATASEC: {
4988 		struct btf_var_secinfo *m = btf_var_secinfos(t);
4989 
4990 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
4991 			err = visit(&m->type, ctx);
4992 			if (err)
4993 				return err;
4994 		}
4995 		return 0;
4996 	}
4997 
4998 	default:
4999 		return -EINVAL;
5000 	}
5001 }
5002 
5003 int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx)
5004 {
5005 	int i, n, err;
5006 
5007 	err = visit(&t->name_off, ctx);
5008 	if (err)
5009 		return err;
5010 
5011 	switch (btf_kind(t)) {
5012 	case BTF_KIND_STRUCT:
5013 	case BTF_KIND_UNION: {
5014 		struct btf_member *m = btf_members(t);
5015 
5016 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5017 			err = visit(&m->name_off, ctx);
5018 			if (err)
5019 				return err;
5020 		}
5021 		break;
5022 	}
5023 	case BTF_KIND_ENUM: {
5024 		struct btf_enum *m = btf_enum(t);
5025 
5026 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5027 			err = visit(&m->name_off, ctx);
5028 			if (err)
5029 				return err;
5030 		}
5031 		break;
5032 	}
5033 	case BTF_KIND_ENUM64: {
5034 		struct btf_enum64 *m = btf_enum64(t);
5035 
5036 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5037 			err = visit(&m->name_off, ctx);
5038 			if (err)
5039 				return err;
5040 		}
5041 		break;
5042 	}
5043 	case BTF_KIND_FUNC_PROTO: {
5044 		struct btf_param *m = btf_params(t);
5045 
5046 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5047 			err = visit(&m->name_off, ctx);
5048 			if (err)
5049 				return err;
5050 		}
5051 		break;
5052 	}
5053 	default:
5054 		break;
5055 	}
5056 
5057 	return 0;
5058 }
5059 
5060 int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
5061 {
5062 	const struct btf_ext_info *seg;
5063 	struct btf_ext_info_sec *sec;
5064 	int i, err;
5065 
5066 	seg = &btf_ext->func_info;
5067 	for_each_btf_ext_sec(seg, sec) {
5068 		struct bpf_func_info_min *rec;
5069 
5070 		for_each_btf_ext_rec(seg, sec, i, rec) {
5071 			err = visit(&rec->type_id, ctx);
5072 			if (err < 0)
5073 				return err;
5074 		}
5075 	}
5076 
5077 	seg = &btf_ext->core_relo_info;
5078 	for_each_btf_ext_sec(seg, sec) {
5079 		struct bpf_core_relo *rec;
5080 
5081 		for_each_btf_ext_rec(seg, sec, i, rec) {
5082 			err = visit(&rec->type_id, ctx);
5083 			if (err < 0)
5084 				return err;
5085 		}
5086 	}
5087 
5088 	return 0;
5089 }
5090 
5091 int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx)
5092 {
5093 	const struct btf_ext_info *seg;
5094 	struct btf_ext_info_sec *sec;
5095 	int i, err;
5096 
5097 	seg = &btf_ext->func_info;
5098 	for_each_btf_ext_sec(seg, sec) {
5099 		err = visit(&sec->sec_name_off, ctx);
5100 		if (err)
5101 			return err;
5102 	}
5103 
5104 	seg = &btf_ext->line_info;
5105 	for_each_btf_ext_sec(seg, sec) {
5106 		struct bpf_line_info_min *rec;
5107 
5108 		err = visit(&sec->sec_name_off, ctx);
5109 		if (err)
5110 			return err;
5111 
5112 		for_each_btf_ext_rec(seg, sec, i, rec) {
5113 			err = visit(&rec->file_name_off, ctx);
5114 			if (err)
5115 				return err;
5116 			err = visit(&rec->line_off, ctx);
5117 			if (err)
5118 				return err;
5119 		}
5120 	}
5121 
5122 	seg = &btf_ext->core_relo_info;
5123 	for_each_btf_ext_sec(seg, sec) {
5124 		struct bpf_core_relo *rec;
5125 
5126 		err = visit(&sec->sec_name_off, ctx);
5127 		if (err)
5128 			return err;
5129 
5130 		for_each_btf_ext_rec(seg, sec, i, rec) {
5131 			err = visit(&rec->access_str_off, ctx);
5132 			if (err)
5133 				return err;
5134 		}
5135 	}
5136 
5137 	return 0;
5138 }
5139