• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <byteswap.h>
5 #include <endian.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <fcntl.h>
10 #include <unistd.h>
11 #include <errno.h>
12 #include <sys/utsname.h>
13 #include <sys/param.h>
14 #include <sys/stat.h>
15 #include <linux/kernel.h>
16 #include <linux/err.h>
17 #include <linux/btf.h>
18 
19 #ifdef HAVE_LIBELF
20 #include <gelf.h>
21 #endif
22 
23 #include "btf.h"
24 #include "bpf.h"
25 #include "libbpf.h"
26 #include "libbpf_internal.h"
27 #include "hashmap.h"
28 #include "strset.h"
29 
30 #define BTF_MAX_NR_TYPES 0x7fffffffU
31 #define BTF_MAX_STR_OFFSET 0x7fffffffU
32 
33 static struct btf_type btf_void;
34 
35 struct btf {
36 	/* raw BTF data in native endianness */
37 	void *raw_data;
38 	/* raw BTF data in non-native endianness */
39 	void *raw_data_swapped;
40 	__u32 raw_size;
41 	/* whether target endianness differs from the native one */
42 	bool swapped_endian;
43 
44 	/*
45 	 * When BTF is loaded from an ELF or raw memory it is stored
46 	 * in a contiguous memory block. The hdr, type_data, and, strs_data
47 	 * point inside that memory region to their respective parts of BTF
48 	 * representation:
49 	 *
50 	 * +--------------------------------+
51 	 * |  Header  |  Types  |  Strings  |
52 	 * +--------------------------------+
53 	 * ^          ^         ^
54 	 * |          |         |
55 	 * hdr        |         |
56 	 * types_data-+         |
57 	 * strs_data------------+
58 	 *
59 	 * If BTF data is later modified, e.g., due to types added or
60 	 * removed, BTF deduplication performed, etc, this contiguous
61 	 * representation is broken up into three independently allocated
62 	 * memory regions to be able to modify them independently.
63 	 * raw_data is nulled out at that point, but can be later allocated
64 	 * and cached again if user calls btf__raw_data(), at which point
65 	 * raw_data will contain a contiguous copy of header, types, and
66 	 * strings:
67 	 *
68 	 * +----------+  +---------+  +-----------+
69 	 * |  Header  |  |  Types  |  |  Strings  |
70 	 * +----------+  +---------+  +-----------+
71 	 * ^             ^            ^
72 	 * |             |            |
73 	 * hdr           |            |
74 	 * types_data----+            |
75 	 * strset__data(strs_set)-----+
76 	 *
77 	 *               +----------+---------+-----------+
78 	 *               |  Header  |  Types  |  Strings  |
79 	 * raw_data----->+----------+---------+-----------+
80 	 */
81 	struct btf_header *hdr;
82 
83 	void *types_data;
84 	size_t types_data_cap; /* used size stored in hdr->type_len */
85 
86 	/* type ID to `struct btf_type *` lookup index
87 	 * type_offs[0] corresponds to the first non-VOID type:
88 	 *   - for base BTF it's type [1];
89 	 *   - for split BTF it's the first non-base BTF type.
90 	 */
91 	__u32 *type_offs;
92 	size_t type_offs_cap;
93 	/* number of types in this BTF instance:
94 	 *   - doesn't include special [0] void type;
95 	 *   - for split BTF counts number of types added on top of base BTF.
96 	 */
97 	__u32 nr_types;
98 	/* if not NULL, points to the base BTF on top of which the current
99 	 * split BTF is based
100 	 */
101 	struct btf *base_btf;
102 	/* BTF type ID of the first type in this BTF instance:
103 	 *   - for base BTF it's equal to 1;
104 	 *   - for split BTF it's equal to biggest type ID of base BTF plus 1.
105 	 */
106 	int start_id;
107 	/* logical string offset of this BTF instance:
108 	 *   - for base BTF it's equal to 0;
109 	 *   - for split BTF it's equal to total size of base BTF's string section size.
110 	 */
111 	int start_str_off;
112 
113 	/* only one of strs_data or strs_set can be non-NULL, depending on
114 	 * whether BTF is in a modifiable state (strs_set is used) or not
115 	 * (strs_data points inside raw_data)
116 	 */
117 	void *strs_data;
118 	/* a set of unique strings */
119 	struct strset *strs_set;
120 	/* whether strings are already deduplicated */
121 	bool strs_deduped;
122 
123 	/* BTF object FD, if loaded into kernel */
124 	int fd;
125 
126 	/* Pointer size (in bytes) for a target architecture of this BTF */
127 	int ptr_sz;
128 };
129 
ptr_to_u64(const void * ptr)130 static inline __u64 ptr_to_u64(const void *ptr)
131 {
132 	return (__u64) (unsigned long) ptr;
133 }
134 
135 /* Ensure given dynamically allocated memory region pointed to by *data* with
136  * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough
137  * memory to accommodate *add_cnt* new elements, assuming *cur_cnt* elements
138  * are already used. At most *max_cnt* elements can be ever allocated.
139  * If necessary, memory is reallocated and all existing data is copied over,
140  * new pointer to the memory region is stored at *data, new memory region
141  * capacity (in number of elements) is stored in *cap.
142  * On success, memory pointer to the beginning of unused memory is returned.
143  * On error, NULL is returned.
144  */
libbpf_add_mem(void ** data,size_t * cap_cnt,size_t elem_sz,size_t cur_cnt,size_t max_cnt,size_t add_cnt)145 void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
146 		     size_t cur_cnt, size_t max_cnt, size_t add_cnt)
147 {
148 	size_t new_cnt;
149 	void *new_data;
150 
151 	if (cur_cnt + add_cnt <= *cap_cnt)
152 		return *data + cur_cnt * elem_sz;
153 
154 	/* requested more than the set limit */
155 	if (cur_cnt + add_cnt > max_cnt)
156 		return NULL;
157 
158 	new_cnt = *cap_cnt;
159 	new_cnt += new_cnt / 4;		  /* expand by 25% */
160 	if (new_cnt < 16)		  /* but at least 16 elements */
161 		new_cnt = 16;
162 	if (new_cnt > max_cnt)		  /* but not exceeding a set limit */
163 		new_cnt = max_cnt;
164 	if (new_cnt < cur_cnt + add_cnt)  /* also ensure we have enough memory */
165 		new_cnt = cur_cnt + add_cnt;
166 
167 	new_data = libbpf_reallocarray(*data, new_cnt, elem_sz);
168 	if (!new_data)
169 		return NULL;
170 
171 	/* zero out newly allocated portion of memory */
172 	memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz);
173 
174 	*data = new_data;
175 	*cap_cnt = new_cnt;
176 	return new_data + cur_cnt * elem_sz;
177 }
178 
179 /* Ensure given dynamically allocated memory region has enough allocated space
180  * to accommodate *need_cnt* elements of size *elem_sz* bytes each
181  */
libbpf_ensure_mem(void ** data,size_t * cap_cnt,size_t elem_sz,size_t need_cnt)182 int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
183 {
184 	void *p;
185 
186 	if (need_cnt <= *cap_cnt)
187 		return 0;
188 
189 	p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
190 	if (!p)
191 		return -ENOMEM;
192 
193 	return 0;
194 }
195 
btf_add_type_offs_mem(struct btf * btf,size_t add_cnt)196 static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt)
197 {
198 	return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
199 			      btf->nr_types, BTF_MAX_NR_TYPES, add_cnt);
200 }
201 
btf_add_type_idx_entry(struct btf * btf,__u32 type_off)202 static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
203 {
204 	__u32 *p;
205 
206 	p = btf_add_type_offs_mem(btf, 1);
207 	if (!p)
208 		return -ENOMEM;
209 
210 	*p = type_off;
211 	return 0;
212 }
213 
btf_bswap_hdr(struct btf_header * h)214 static void btf_bswap_hdr(struct btf_header *h)
215 {
216 	h->magic = bswap_16(h->magic);
217 	h->hdr_len = bswap_32(h->hdr_len);
218 	h->type_off = bswap_32(h->type_off);
219 	h->type_len = bswap_32(h->type_len);
220 	h->str_off = bswap_32(h->str_off);
221 	h->str_len = bswap_32(h->str_len);
222 }
223 
btf_parse_hdr(struct btf * btf)224 static int btf_parse_hdr(struct btf *btf)
225 {
226 	struct btf_header *hdr = btf->hdr;
227 	__u32 meta_left;
228 
229 	if (btf->raw_size < sizeof(struct btf_header)) {
230 		pr_debug("BTF header not found\n");
231 		return -EINVAL;
232 	}
233 
234 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
235 		btf->swapped_endian = true;
236 		if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) {
237 			pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n",
238 				bswap_32(hdr->hdr_len));
239 			return -ENOTSUP;
240 		}
241 		btf_bswap_hdr(hdr);
242 	} else if (hdr->magic != BTF_MAGIC) {
243 		pr_debug("Invalid BTF magic: %x\n", hdr->magic);
244 		return -EINVAL;
245 	}
246 
247 	if (btf->raw_size < hdr->hdr_len) {
248 		pr_debug("BTF header len %u larger than data size %u\n",
249 			 hdr->hdr_len, btf->raw_size);
250 		return -EINVAL;
251 	}
252 
253 	meta_left = btf->raw_size - hdr->hdr_len;
254 	if (meta_left < (long long)hdr->str_off + hdr->str_len) {
255 		pr_debug("Invalid BTF total size: %u\n", btf->raw_size);
256 		return -EINVAL;
257 	}
258 
259 	if ((long long)hdr->type_off + hdr->type_len > hdr->str_off) {
260 		pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n",
261 			 hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len);
262 		return -EINVAL;
263 	}
264 
265 	if (hdr->type_off % 4) {
266 		pr_debug("BTF type section is not aligned to 4 bytes\n");
267 		return -EINVAL;
268 	}
269 
270 	return 0;
271 }
272 
btf_parse_str_sec(struct btf * btf)273 static int btf_parse_str_sec(struct btf *btf)
274 {
275 	const struct btf_header *hdr = btf->hdr;
276 	const char *start = btf->strs_data;
277 	const char *end = start + btf->hdr->str_len;
278 
279 	if (btf->base_btf && hdr->str_len == 0)
280 		return 0;
281 	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) {
282 		pr_debug("Invalid BTF string section\n");
283 		return -EINVAL;
284 	}
285 	if (!btf->base_btf && start[0]) {
286 		pr_debug("Invalid BTF string section\n");
287 		return -EINVAL;
288 	}
289 	return 0;
290 }
291 
btf_type_size(const struct btf_type * t)292 static int btf_type_size(const struct btf_type *t)
293 {
294 	const int base_size = sizeof(struct btf_type);
295 	__u16 vlen = btf_vlen(t);
296 
297 	switch (btf_kind(t)) {
298 	case BTF_KIND_FWD:
299 	case BTF_KIND_CONST:
300 	case BTF_KIND_VOLATILE:
301 	case BTF_KIND_RESTRICT:
302 	case BTF_KIND_PTR:
303 	case BTF_KIND_TYPEDEF:
304 	case BTF_KIND_FUNC:
305 	case BTF_KIND_FLOAT:
306 	case BTF_KIND_TYPE_TAG:
307 		return base_size;
308 	case BTF_KIND_INT:
309 		return base_size + sizeof(__u32);
310 	case BTF_KIND_ENUM:
311 		return base_size + vlen * sizeof(struct btf_enum);
312 	case BTF_KIND_ENUM64:
313 		return base_size + vlen * sizeof(struct btf_enum64);
314 	case BTF_KIND_ARRAY:
315 		return base_size + sizeof(struct btf_array);
316 	case BTF_KIND_STRUCT:
317 	case BTF_KIND_UNION:
318 		return base_size + vlen * sizeof(struct btf_member);
319 	case BTF_KIND_FUNC_PROTO:
320 		return base_size + vlen * sizeof(struct btf_param);
321 	case BTF_KIND_VAR:
322 		return base_size + sizeof(struct btf_var);
323 	case BTF_KIND_DATASEC:
324 		return base_size + vlen * sizeof(struct btf_var_secinfo);
325 	case BTF_KIND_DECL_TAG:
326 		return base_size + sizeof(struct btf_decl_tag);
327 	default:
328 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
329 		return -EINVAL;
330 	}
331 }
332 
btf_bswap_type_base(struct btf_type * t)333 static void btf_bswap_type_base(struct btf_type *t)
334 {
335 	t->name_off = bswap_32(t->name_off);
336 	t->info = bswap_32(t->info);
337 	t->type = bswap_32(t->type);
338 }
339 
btf_bswap_type_rest(struct btf_type * t)340 static int btf_bswap_type_rest(struct btf_type *t)
341 {
342 	struct btf_var_secinfo *v;
343 	struct btf_enum64 *e64;
344 	struct btf_member *m;
345 	struct btf_array *a;
346 	struct btf_param *p;
347 	struct btf_enum *e;
348 	__u16 vlen = btf_vlen(t);
349 	int i;
350 
351 	switch (btf_kind(t)) {
352 	case BTF_KIND_FWD:
353 	case BTF_KIND_CONST:
354 	case BTF_KIND_VOLATILE:
355 	case BTF_KIND_RESTRICT:
356 	case BTF_KIND_PTR:
357 	case BTF_KIND_TYPEDEF:
358 	case BTF_KIND_FUNC:
359 	case BTF_KIND_FLOAT:
360 	case BTF_KIND_TYPE_TAG:
361 		return 0;
362 	case BTF_KIND_INT:
363 		*(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1));
364 		return 0;
365 	case BTF_KIND_ENUM:
366 		for (i = 0, e = btf_enum(t); i < vlen; i++, e++) {
367 			e->name_off = bswap_32(e->name_off);
368 			e->val = bswap_32(e->val);
369 		}
370 		return 0;
371 	case BTF_KIND_ENUM64:
372 		for (i = 0, e64 = btf_enum64(t); i < vlen; i++, e64++) {
373 			e64->name_off = bswap_32(e64->name_off);
374 			e64->val_lo32 = bswap_32(e64->val_lo32);
375 			e64->val_hi32 = bswap_32(e64->val_hi32);
376 		}
377 		return 0;
378 	case BTF_KIND_ARRAY:
379 		a = btf_array(t);
380 		a->type = bswap_32(a->type);
381 		a->index_type = bswap_32(a->index_type);
382 		a->nelems = bswap_32(a->nelems);
383 		return 0;
384 	case BTF_KIND_STRUCT:
385 	case BTF_KIND_UNION:
386 		for (i = 0, m = btf_members(t); i < vlen; i++, m++) {
387 			m->name_off = bswap_32(m->name_off);
388 			m->type = bswap_32(m->type);
389 			m->offset = bswap_32(m->offset);
390 		}
391 		return 0;
392 	case BTF_KIND_FUNC_PROTO:
393 		for (i = 0, p = btf_params(t); i < vlen; i++, p++) {
394 			p->name_off = bswap_32(p->name_off);
395 			p->type = bswap_32(p->type);
396 		}
397 		return 0;
398 	case BTF_KIND_VAR:
399 		btf_var(t)->linkage = bswap_32(btf_var(t)->linkage);
400 		return 0;
401 	case BTF_KIND_DATASEC:
402 		for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) {
403 			v->type = bswap_32(v->type);
404 			v->offset = bswap_32(v->offset);
405 			v->size = bswap_32(v->size);
406 		}
407 		return 0;
408 	case BTF_KIND_DECL_TAG:
409 		btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx);
410 		return 0;
411 	default:
412 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
413 		return -EINVAL;
414 	}
415 }
416 
btf_parse_type_sec(struct btf * btf)417 static int btf_parse_type_sec(struct btf *btf)
418 {
419 	struct btf_header *hdr = btf->hdr;
420 	void *next_type = btf->types_data;
421 	void *end_type = next_type + hdr->type_len;
422 	int err, type_size;
423 
424 	while (next_type + sizeof(struct btf_type) <= end_type) {
425 		if (btf->swapped_endian)
426 			btf_bswap_type_base(next_type);
427 
428 		type_size = btf_type_size(next_type);
429 		if (type_size < 0)
430 			return type_size;
431 		if (next_type + type_size > end_type) {
432 			pr_warn("BTF type [%d] is malformed\n", btf->start_id + btf->nr_types);
433 			return -EINVAL;
434 		}
435 
436 		if (btf->swapped_endian && btf_bswap_type_rest(next_type))
437 			return -EINVAL;
438 
439 		err = btf_add_type_idx_entry(btf, next_type - btf->types_data);
440 		if (err)
441 			return err;
442 
443 		next_type += type_size;
444 		btf->nr_types++;
445 	}
446 
447 	if (next_type != end_type) {
448 		pr_warn("BTF types data is malformed\n");
449 		return -EINVAL;
450 	}
451 
452 	return 0;
453 }
454 
btf_validate_str(const struct btf * btf,__u32 str_off,const char * what,__u32 type_id)455 static int btf_validate_str(const struct btf *btf, __u32 str_off, const char *what, __u32 type_id)
456 {
457 	const char *s;
458 
459 	s = btf__str_by_offset(btf, str_off);
460 	if (!s) {
461 		pr_warn("btf: type [%u]: invalid %s (string offset %u)\n", type_id, what, str_off);
462 		return -EINVAL;
463 	}
464 
465 	return 0;
466 }
467 
btf_validate_id(const struct btf * btf,__u32 id,__u32 ctx_id)468 static int btf_validate_id(const struct btf *btf, __u32 id, __u32 ctx_id)
469 {
470 	const struct btf_type *t;
471 
472 	t = btf__type_by_id(btf, id);
473 	if (!t) {
474 		pr_warn("btf: type [%u]: invalid referenced type ID %u\n", ctx_id, id);
475 		return -EINVAL;
476 	}
477 
478 	return 0;
479 }
480 
btf_validate_type(const struct btf * btf,const struct btf_type * t,__u32 id)481 static int btf_validate_type(const struct btf *btf, const struct btf_type *t, __u32 id)
482 {
483 	__u32 kind = btf_kind(t);
484 	int err, i, n;
485 
486 	err = btf_validate_str(btf, t->name_off, "type name", id);
487 	if (err)
488 		return err;
489 
490 	switch (kind) {
491 	case BTF_KIND_UNKN:
492 	case BTF_KIND_INT:
493 	case BTF_KIND_FWD:
494 	case BTF_KIND_FLOAT:
495 		break;
496 	case BTF_KIND_PTR:
497 	case BTF_KIND_TYPEDEF:
498 	case BTF_KIND_VOLATILE:
499 	case BTF_KIND_CONST:
500 	case BTF_KIND_RESTRICT:
501 	case BTF_KIND_VAR:
502 	case BTF_KIND_DECL_TAG:
503 	case BTF_KIND_TYPE_TAG:
504 		err = btf_validate_id(btf, t->type, id);
505 		if (err)
506 			return err;
507 		break;
508 	case BTF_KIND_ARRAY: {
509 		const struct btf_array *a = btf_array(t);
510 
511 		err = btf_validate_id(btf, a->type, id);
512 		err = err ?: btf_validate_id(btf, a->index_type, id);
513 		if (err)
514 			return err;
515 		break;
516 	}
517 	case BTF_KIND_STRUCT:
518 	case BTF_KIND_UNION: {
519 		const struct btf_member *m = btf_members(t);
520 
521 		n = btf_vlen(t);
522 		for (i = 0; i < n; i++, m++) {
523 			err = btf_validate_str(btf, m->name_off, "field name", id);
524 			err = err ?: btf_validate_id(btf, m->type, id);
525 			if (err)
526 				return err;
527 		}
528 		break;
529 	}
530 	case BTF_KIND_ENUM: {
531 		const struct btf_enum *m = btf_enum(t);
532 
533 		n = btf_vlen(t);
534 		for (i = 0; i < n; i++, m++) {
535 			err = btf_validate_str(btf, m->name_off, "enum name", id);
536 			if (err)
537 				return err;
538 		}
539 		break;
540 	}
541 	case BTF_KIND_ENUM64: {
542 		const struct btf_enum64 *m = btf_enum64(t);
543 
544 		n = btf_vlen(t);
545 		for (i = 0; i < n; i++, m++) {
546 			err = btf_validate_str(btf, m->name_off, "enum name", id);
547 			if (err)
548 				return err;
549 		}
550 		break;
551 	}
552 	case BTF_KIND_FUNC: {
553 		const struct btf_type *ft;
554 
555 		err = btf_validate_id(btf, t->type, id);
556 		if (err)
557 			return err;
558 		ft = btf__type_by_id(btf, t->type);
559 		if (btf_kind(ft) != BTF_KIND_FUNC_PROTO) {
560 			pr_warn("btf: type [%u]: referenced type [%u] is not FUNC_PROTO\n", id, t->type);
561 			return -EINVAL;
562 		}
563 		break;
564 	}
565 	case BTF_KIND_FUNC_PROTO: {
566 		const struct btf_param *m = btf_params(t);
567 
568 		n = btf_vlen(t);
569 		for (i = 0; i < n; i++, m++) {
570 			err = btf_validate_str(btf, m->name_off, "param name", id);
571 			err = err ?: btf_validate_id(btf, m->type, id);
572 			if (err)
573 				return err;
574 		}
575 		break;
576 	}
577 	case BTF_KIND_DATASEC: {
578 		const struct btf_var_secinfo *m = btf_var_secinfos(t);
579 
580 		n = btf_vlen(t);
581 		for (i = 0; i < n; i++, m++) {
582 			err = btf_validate_id(btf, m->type, id);
583 			if (err)
584 				return err;
585 		}
586 		break;
587 	}
588 	default:
589 		pr_warn("btf: type [%u]: unrecognized kind %u\n", id, kind);
590 		return -EINVAL;
591 	}
592 	return 0;
593 }
594 
595 /* Validate basic sanity of BTF. It's intentionally less thorough than
596  * kernel's validation and validates only properties of BTF that libbpf relies
597  * on to be correct (e.g., valid type IDs, valid string offsets, etc)
598  */
btf_sanity_check(const struct btf * btf)599 static int btf_sanity_check(const struct btf *btf)
600 {
601 	const struct btf_type *t;
602 	__u32 i, n = btf__type_cnt(btf);
603 	int err;
604 
605 	for (i = 1; i < n; i++) {
606 		t = btf_type_by_id(btf, i);
607 		err = btf_validate_type(btf, t, i);
608 		if (err)
609 			return err;
610 	}
611 	return 0;
612 }
613 
btf__type_cnt(const struct btf * btf)614 __u32 btf__type_cnt(const struct btf *btf)
615 {
616 	return btf->start_id + btf->nr_types;
617 }
618 
btf__base_btf(const struct btf * btf)619 const struct btf *btf__base_btf(const struct btf *btf)
620 {
621 	return btf->base_btf;
622 }
623 
624 /* internal helper returning non-const pointer to a type */
btf_type_by_id(const struct btf * btf,__u32 type_id)625 struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id)
626 {
627 	if (type_id == 0)
628 		return &btf_void;
629 	if (type_id < btf->start_id)
630 		return btf_type_by_id(btf->base_btf, type_id);
631 	return btf->types_data + btf->type_offs[type_id - btf->start_id];
632 }
633 
btf__type_by_id(const struct btf * btf,__u32 type_id)634 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
635 {
636 	if (type_id >= btf->start_id + btf->nr_types)
637 		return errno = EINVAL, NULL;
638 	return btf_type_by_id((struct btf *)btf, type_id);
639 }
640 
determine_ptr_size(const struct btf * btf)641 static int determine_ptr_size(const struct btf *btf)
642 {
643 	static const char * const long_aliases[] = {
644 		"long",
645 		"long int",
646 		"int long",
647 		"unsigned long",
648 		"long unsigned",
649 		"unsigned long int",
650 		"unsigned int long",
651 		"long unsigned int",
652 		"long int unsigned",
653 		"int unsigned long",
654 		"int long unsigned",
655 	};
656 	const struct btf_type *t;
657 	const char *name;
658 	int i, j, n;
659 
660 	if (btf->base_btf && btf->base_btf->ptr_sz > 0)
661 		return btf->base_btf->ptr_sz;
662 
663 	n = btf__type_cnt(btf);
664 	for (i = 1; i < n; i++) {
665 		t = btf__type_by_id(btf, i);
666 		if (!btf_is_int(t))
667 			continue;
668 
669 		if (t->size != 4 && t->size != 8)
670 			continue;
671 
672 		name = btf__name_by_offset(btf, t->name_off);
673 		if (!name)
674 			continue;
675 
676 		for (j = 0; j < ARRAY_SIZE(long_aliases); j++) {
677 			if (strcmp(name, long_aliases[j]) == 0)
678 				return t->size;
679 		}
680 	}
681 
682 	return -1;
683 }
684 
btf_ptr_sz(const struct btf * btf)685 static size_t btf_ptr_sz(const struct btf *btf)
686 {
687 	if (!btf->ptr_sz)
688 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
689 	return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
690 }
691 
692 /* Return pointer size this BTF instance assumes. The size is heuristically
693  * determined by looking for 'long' or 'unsigned long' integer type and
694  * recording its size in bytes. If BTF type information doesn't have any such
695  * type, this function returns 0. In the latter case, native architecture's
696  * pointer size is assumed, so will be either 4 or 8, depending on
697  * architecture that libbpf was compiled for. It's possible to override
698  * guessed value by using btf__set_pointer_size() API.
699  */
btf__pointer_size(const struct btf * btf)700 size_t btf__pointer_size(const struct btf *btf)
701 {
702 	if (!btf->ptr_sz)
703 		((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
704 
705 	if (btf->ptr_sz < 0)
706 		/* not enough BTF type info to guess */
707 		return 0;
708 
709 	return btf->ptr_sz;
710 }
711 
712 /* Override or set pointer size in bytes. Only values of 4 and 8 are
713  * supported.
714  */
btf__set_pointer_size(struct btf * btf,size_t ptr_sz)715 int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
716 {
717 	if (ptr_sz != 4 && ptr_sz != 8)
718 		return libbpf_err(-EINVAL);
719 	btf->ptr_sz = ptr_sz;
720 	return 0;
721 }
722 
is_host_big_endian(void)723 static bool is_host_big_endian(void)
724 {
725 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
726 	return false;
727 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
728 	return true;
729 #else
730 # error "Unrecognized __BYTE_ORDER__"
731 #endif
732 }
733 
btf__endianness(const struct btf * btf)734 enum btf_endianness btf__endianness(const struct btf *btf)
735 {
736 	if (is_host_big_endian())
737 		return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
738 	else
739 		return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
740 }
741 
btf__set_endianness(struct btf * btf,enum btf_endianness endian)742 int btf__set_endianness(struct btf *btf, enum btf_endianness endian)
743 {
744 	if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
745 		return libbpf_err(-EINVAL);
746 
747 	btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
748 	if (!btf->swapped_endian) {
749 		free(btf->raw_data_swapped);
750 		btf->raw_data_swapped = NULL;
751 	}
752 	return 0;
753 }
754 
btf_type_is_void(const struct btf_type * t)755 static bool btf_type_is_void(const struct btf_type *t)
756 {
757 	return t == &btf_void || btf_is_fwd(t);
758 }
759 
btf_type_is_void_or_null(const struct btf_type * t)760 static bool btf_type_is_void_or_null(const struct btf_type *t)
761 {
762 	return !t || btf_type_is_void(t);
763 }
764 
765 #define MAX_RESOLVE_DEPTH 32
766 
btf__resolve_size(const struct btf * btf,__u32 type_id)767 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
768 {
769 	const struct btf_array *array;
770 	const struct btf_type *t;
771 	__u32 nelems = 1;
772 	__s64 size = -1;
773 	int i;
774 
775 	t = btf__type_by_id(btf, type_id);
776 	for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) {
777 		switch (btf_kind(t)) {
778 		case BTF_KIND_INT:
779 		case BTF_KIND_STRUCT:
780 		case BTF_KIND_UNION:
781 		case BTF_KIND_ENUM:
782 		case BTF_KIND_ENUM64:
783 		case BTF_KIND_DATASEC:
784 		case BTF_KIND_FLOAT:
785 			size = t->size;
786 			goto done;
787 		case BTF_KIND_PTR:
788 			size = btf_ptr_sz(btf);
789 			goto done;
790 		case BTF_KIND_TYPEDEF:
791 		case BTF_KIND_VOLATILE:
792 		case BTF_KIND_CONST:
793 		case BTF_KIND_RESTRICT:
794 		case BTF_KIND_VAR:
795 		case BTF_KIND_DECL_TAG:
796 		case BTF_KIND_TYPE_TAG:
797 			type_id = t->type;
798 			break;
799 		case BTF_KIND_ARRAY:
800 			array = btf_array(t);
801 			if (nelems && array->nelems > UINT32_MAX / nelems)
802 				return libbpf_err(-E2BIG);
803 			nelems *= array->nelems;
804 			type_id = array->type;
805 			break;
806 		default:
807 			return libbpf_err(-EINVAL);
808 		}
809 
810 		t = btf__type_by_id(btf, type_id);
811 	}
812 
813 done:
814 	if (size < 0)
815 		return libbpf_err(-EINVAL);
816 	if (nelems && size > UINT32_MAX / nelems)
817 		return libbpf_err(-E2BIG);
818 
819 	return nelems * size;
820 }
821 
btf__align_of(const struct btf * btf,__u32 id)822 int btf__align_of(const struct btf *btf, __u32 id)
823 {
824 	const struct btf_type *t = btf__type_by_id(btf, id);
825 	__u16 kind = btf_kind(t);
826 
827 	switch (kind) {
828 	case BTF_KIND_INT:
829 	case BTF_KIND_ENUM:
830 	case BTF_KIND_ENUM64:
831 	case BTF_KIND_FLOAT:
832 		return min(btf_ptr_sz(btf), (size_t)t->size);
833 	case BTF_KIND_PTR:
834 		return btf_ptr_sz(btf);
835 	case BTF_KIND_TYPEDEF:
836 	case BTF_KIND_VOLATILE:
837 	case BTF_KIND_CONST:
838 	case BTF_KIND_RESTRICT:
839 	case BTF_KIND_TYPE_TAG:
840 		return btf__align_of(btf, t->type);
841 	case BTF_KIND_ARRAY:
842 		return btf__align_of(btf, btf_array(t)->type);
843 	case BTF_KIND_STRUCT:
844 	case BTF_KIND_UNION: {
845 		const struct btf_member *m = btf_members(t);
846 		__u16 vlen = btf_vlen(t);
847 		int i, max_align = 1, align;
848 
849 		for (i = 0; i < vlen; i++, m++) {
850 			align = btf__align_of(btf, m->type);
851 			if (align <= 0)
852 				return libbpf_err(align);
853 			max_align = max(max_align, align);
854 
855 			/* if field offset isn't aligned according to field
856 			 * type's alignment, then struct must be packed
857 			 */
858 			if (btf_member_bitfield_size(t, i) == 0 &&
859 			    (m->offset % (8 * align)) != 0)
860 				return 1;
861 		}
862 
863 		/* if struct/union size isn't a multiple of its alignment,
864 		 * then struct must be packed
865 		 */
866 		if ((t->size % max_align) != 0)
867 			return 1;
868 
869 		return max_align;
870 	}
871 	default:
872 		pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
873 		return errno = EINVAL, 0;
874 	}
875 }
876 
btf__resolve_type(const struct btf * btf,__u32 type_id)877 int btf__resolve_type(const struct btf *btf, __u32 type_id)
878 {
879 	const struct btf_type *t;
880 	int depth = 0;
881 
882 	t = btf__type_by_id(btf, type_id);
883 	while (depth < MAX_RESOLVE_DEPTH &&
884 	       !btf_type_is_void_or_null(t) &&
885 	       (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
886 		type_id = t->type;
887 		t = btf__type_by_id(btf, type_id);
888 		depth++;
889 	}
890 
891 	if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
892 		return libbpf_err(-EINVAL);
893 
894 	return type_id;
895 }
896 
btf__find_by_name(const struct btf * btf,const char * type_name)897 __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
898 {
899 	__u32 i, nr_types = btf__type_cnt(btf);
900 
901 	if (!strcmp(type_name, "void"))
902 		return 0;
903 
904 	for (i = 1; i < nr_types; i++) {
905 		const struct btf_type *t = btf__type_by_id(btf, i);
906 		const char *name = btf__name_by_offset(btf, t->name_off);
907 
908 		if (name && !strcmp(type_name, name))
909 			return i;
910 	}
911 
912 	return libbpf_err(-ENOENT);
913 }
914 
btf_find_by_name_kind(const struct btf * btf,int start_id,const char * type_name,__u32 kind)915 static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
916 				   const char *type_name, __u32 kind)
917 {
918 	__u32 i, nr_types = btf__type_cnt(btf);
919 
920 	if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
921 		return 0;
922 
923 	for (i = start_id; i < nr_types; i++) {
924 		const struct btf_type *t = btf__type_by_id(btf, i);
925 		const char *name;
926 
927 		if (btf_kind(t) != kind)
928 			continue;
929 		name = btf__name_by_offset(btf, t->name_off);
930 		if (name && !strcmp(type_name, name))
931 			return i;
932 	}
933 
934 	return libbpf_err(-ENOENT);
935 }
936 
btf__find_by_name_kind_own(const struct btf * btf,const char * type_name,__u32 kind)937 __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
938 				 __u32 kind)
939 {
940 	return btf_find_by_name_kind(btf, btf->start_id, type_name, kind);
941 }
942 
btf__find_by_name_kind(const struct btf * btf,const char * type_name,__u32 kind)943 __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
944 			     __u32 kind)
945 {
946 	return btf_find_by_name_kind(btf, 1, type_name, kind);
947 }
948 
btf_is_modifiable(const struct btf * btf)949 static bool btf_is_modifiable(const struct btf *btf)
950 {
951 	return (void *)btf->hdr != btf->raw_data;
952 }
953 
btf__free(struct btf * btf)954 void btf__free(struct btf *btf)
955 {
956 	if (IS_ERR_OR_NULL(btf))
957 		return;
958 
959 	if (btf->fd >= 0)
960 		close(btf->fd);
961 
962 	if (btf_is_modifiable(btf)) {
963 		/* if BTF was modified after loading, it will have a split
964 		 * in-memory representation for header, types, and strings
965 		 * sections, so we need to free all of them individually. It
966 		 * might still have a cached contiguous raw data present,
967 		 * which will be unconditionally freed below.
968 		 */
969 		free(btf->hdr);
970 		free(btf->types_data);
971 		strset__free(btf->strs_set);
972 	}
973 	free(btf->raw_data);
974 	free(btf->raw_data_swapped);
975 	free(btf->type_offs);
976 	free(btf);
977 }
978 
btf_new_empty(struct btf * base_btf)979 static struct btf *btf_new_empty(struct btf *base_btf)
980 {
981 	struct btf *btf;
982 
983 	btf = calloc(1, sizeof(*btf));
984 	if (!btf)
985 		return ERR_PTR(-ENOMEM);
986 
987 	btf->nr_types = 0;
988 	btf->start_id = 1;
989 	btf->start_str_off = 0;
990 	btf->fd = -1;
991 	btf->ptr_sz = sizeof(void *);
992 	btf->swapped_endian = false;
993 
994 	if (base_btf) {
995 		btf->base_btf = base_btf;
996 		btf->start_id = btf__type_cnt(base_btf);
997 		btf->start_str_off = base_btf->hdr->str_len;
998 	}
999 
1000 	/* +1 for empty string at offset 0 */
1001 	btf->raw_size = sizeof(struct btf_header) + (base_btf ? 0 : 1);
1002 	btf->raw_data = calloc(1, btf->raw_size);
1003 	if (!btf->raw_data) {
1004 		free(btf);
1005 		return ERR_PTR(-ENOMEM);
1006 	}
1007 
1008 	btf->hdr = btf->raw_data;
1009 	btf->hdr->hdr_len = sizeof(struct btf_header);
1010 	btf->hdr->magic = BTF_MAGIC;
1011 	btf->hdr->version = BTF_VERSION;
1012 
1013 	btf->types_data = btf->raw_data + btf->hdr->hdr_len;
1014 	btf->strs_data = btf->raw_data + btf->hdr->hdr_len;
1015 	btf->hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */
1016 
1017 	return btf;
1018 }
1019 
btf__new_empty(void)1020 struct btf *btf__new_empty(void)
1021 {
1022 	return libbpf_ptr(btf_new_empty(NULL));
1023 }
1024 
btf__new_empty_split(struct btf * base_btf)1025 struct btf *btf__new_empty_split(struct btf *base_btf)
1026 {
1027 	return libbpf_ptr(btf_new_empty(base_btf));
1028 }
1029 
btf_new(const void * data,__u32 size,struct btf * base_btf)1030 static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
1031 {
1032 	struct btf *btf;
1033 	int err;
1034 
1035 	btf = calloc(1, sizeof(struct btf));
1036 	if (!btf)
1037 		return ERR_PTR(-ENOMEM);
1038 
1039 	btf->nr_types = 0;
1040 	btf->start_id = 1;
1041 	btf->start_str_off = 0;
1042 	btf->fd = -1;
1043 
1044 	if (base_btf) {
1045 		btf->base_btf = base_btf;
1046 		btf->start_id = btf__type_cnt(base_btf);
1047 		btf->start_str_off = base_btf->hdr->str_len;
1048 	}
1049 
1050 	btf->raw_data = malloc(size);
1051 	if (!btf->raw_data) {
1052 		err = -ENOMEM;
1053 		goto done;
1054 	}
1055 	memcpy(btf->raw_data, data, size);
1056 	btf->raw_size = size;
1057 
1058 	btf->hdr = btf->raw_data;
1059 	err = btf_parse_hdr(btf);
1060 	if (err)
1061 		goto done;
1062 
1063 	btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off;
1064 	btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off;
1065 
1066 	err = btf_parse_str_sec(btf);
1067 	err = err ?: btf_parse_type_sec(btf);
1068 	err = err ?: btf_sanity_check(btf);
1069 	if (err)
1070 		goto done;
1071 
1072 done:
1073 	if (err) {
1074 		btf__free(btf);
1075 		return ERR_PTR(err);
1076 	}
1077 
1078 	return btf;
1079 }
1080 
btf__new(const void * data,__u32 size)1081 struct btf *btf__new(const void *data, __u32 size)
1082 {
1083 	return libbpf_ptr(btf_new(data, size, NULL));
1084 }
1085 
1086 #ifdef HAVE_ELFIO
elf_sec_hdr_by_idx(pelfio_t elf,size_t idx,Elf64_Shdr * sheader)1087 static Elf64_Shdr *elf_sec_hdr_by_idx(pelfio_t elf, size_t idx, Elf64_Shdr *sheader)
1088 {
1089 	psection_t psection = elfio_get_section_by_index(elf, idx);
1090 
1091 	sheader->sh_name = elfio_section_get_name_string_offset(psection);
1092 	sheader->sh_type = elfio_section_get_type(psection);
1093 	sheader->sh_flags = elfio_section_get_flags(psection);
1094 	sheader->sh_addr = elfio_section_get_address(psection);
1095 	sheader->sh_offset = elfio_section_get_offset(psection);
1096 	sheader->sh_size = elfio_section_get_size(psection);
1097 	sheader->sh_link = elfio_section_get_link(psection);
1098 	sheader->sh_info = elfio_section_get_info(psection);
1099 	sheader->sh_addralign = elfio_section_get_addr_align(psection);
1100 	sheader->sh_entsize = elfio_section_get_entry_size(psection);
1101 
1102 	return sheader;
1103 }
1104 #endif
1105 
btf_parse_elf(const char * path,struct btf * base_btf,struct btf_ext ** btf_ext)1106 static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
1107 				 struct btf_ext **btf_ext)
1108 {
1109 	Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
1110 	int err = 0, fd = -1, idx = 0;
1111 	struct btf *btf = NULL;
1112 #ifdef HAVE_LIBELF
1113 	Elf_Scn *scn = NULL;
1114 	Elf *elf = NULL;
1115 	GElf_Ehdr ehdr;
1116 #elif defined HAVE_ELFIO
1117 	pelfio_t elf;
1118 	Elf64_Ehdr ehdr;
1119 	Elf_Data btf_data_temp, btf_ext_data_temp;
1120 #endif
1121 	size_t shstrndx;
1122 
1123 #ifdef HAVE_LIBELF
1124 	if (elf_version(EV_CURRENT) == EV_NONE) {
1125 		pr_warn("failed to init libelf for %s\n", path);
1126 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1127 	}
1128 #endif
1129 
1130 	fd = open(path, O_RDONLY | O_CLOEXEC);
1131 	if (fd < 0) {
1132 		err = -errno;
1133 		pr_warn("failed to open %s: %s\n", path, strerror(errno));
1134 		return ERR_PTR(err);
1135 	}
1136 
1137 	err = -LIBBPF_ERRNO__FORMAT;
1138 
1139 #ifdef HAVE_LIBELF
1140 	elf = elf_begin(fd, ELF_C_READ, NULL);
1141 #elif defined HAVE_ELFIO
1142 	elf = elfio_new();
1143 	if (!elfio_load(elf, path)) {
1144 		printf( "Can't load ELF file\n" );
1145 		goto done;
1146     }
1147 #endif
1148 	if (!elf) {
1149 		pr_warn("failed to open %s as ELF file\n", path);
1150 		goto done;
1151 	}
1152 
1153 #ifdef HAVE_LIBELF
1154 	if (!gelf_getehdr(elf, &ehdr)) {
1155 #elif defined HAVE_ELFIO
1156 	ssize_t nread = read(fd, &ehdr, sizeof(Elf64_Ehdr));
1157 	if(nread != sizeof(Elf64_Ehdr)) {
1158 #endif
1159 		pr_warn("failed to get EHDR from %s\n", path);
1160 		goto done;
1161 	}
1162 
1163 #ifdef HAVE_LIBELF
1164 	if (elf_getshdrstrndx(elf, &shstrndx)) {
1165 #elif defined HAVE_ELFIO
1166 	shstrndx = elfio_get_section_name_str_index(elf);
1167 	if(shstrndx == 0) {
1168 #endif
1169 		pr_warn("failed to get section names section index for %s\n",
1170 			path);
1171 		goto done;
1172 	}
1173 
1174 #ifdef HAVE_LIBELF
1175 	if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
1176 		pr_warn("failed to get e_shstrndx from %s\n", path);
1177 		goto done;
1178 	}
1179 #endif
1180 
1181 #if defined HAVE_LIBELF
1182 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
1183 		GElf_Shdr sh;
1184 #elif defined HAVE_ELFIO
1185 	psection_t psection = elfio_get_section_by_name(elf, ".shstrtab");
1186 	if (!psection )
1187 		goto done;
1188 
1189 	pstring_t pstring = elfio_string_section_accessor_new(psection);
1190 	if (!pstring )
1191 		goto done;
1192 
1193 	int secno = elfio_get_sections_num(elf);
1194 
1195 	for ( int i = 0; i < secno; i++ ) {
1196 		Elf64_Shdr sh;
1197 #endif
1198 		char *name;
1199 
1200 		idx++;
1201 #if defined HAVE_LIBELF
1202 		if (gelf_getshdr(scn, &sh) != &sh) {
1203 #elif defined HAVE_ELFIO
1204 		if (!elf_sec_hdr_by_idx(elf, i, &sh)) {
1205 #endif
1206 			pr_warn("failed to get section(%d) header from %s\n",
1207 				idx, path);
1208 			goto done;
1209 		}
1210 #if defined HAVE_LIBELF
1211 		name = elf_strptr(elf, shstrndx, sh.sh_name);
1212 #elif defined HAVE_ELFIO
1213 		name = elfio_string_get_string(pstring, sh.sh_name);
1214 #endif
1215 		if (!name) {
1216 			pr_warn("failed to get section(%d) name from %s\n",
1217 				idx, path);
1218 			goto done;
1219 		}
1220 		if (strcmp(name, BTF_ELF_SEC) == 0) {
1221 #if defined HAVE_LIBELF
1222 			btf_data = elf_getdata(scn, 0);
1223 #elif defined HAVE_ELFIO
1224 			psection_t psection_index = elfio_get_section_by_index(elf, i);
1225 			btf_data_temp.d_buf = (void*)elfio_section_get_data(psection_index);
1226 			btf_data_temp.d_size = elfio_section_get_size(psection_index);
1227 			btf_data = &btf_data_temp;
1228 #endif
1229 			if (!btf_data) {
1230 				pr_warn("failed to get section(%d, %s) data from %s\n",
1231 					idx, name, path);
1232 				goto done;
1233 			}
1234 			continue;
1235 		} else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
1236 #if defined HAVE_LIBELF
1237 			btf_ext_data = elf_getdata(scn, 0);
1238 #elif defined HAVE_ELFIO
1239 			psection_t psection_index = elfio_get_section_by_index(elf, i);
1240 			btf_ext_data_temp.d_buf = (void*)elfio_section_get_data(psection_index);
1241 			btf_ext_data_temp.d_size = elfio_section_get_size(psection_index);
1242 			btf_ext_data = &btf_ext_data_temp;
1243 #endif
1244 			if (!btf_ext_data) {
1245 				pr_warn("failed to get section(%d, %s) data from %s\n",
1246 					idx, name, path);
1247 				goto done;
1248 			}
1249 			continue;
1250 		}
1251 	}
1252 
1253 	err = 0;
1254 
1255 	if (!btf_data) {
1256 		pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path);
1257 		err = -ENODATA;
1258 		goto done;
1259 	}
1260 	btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf);
1261 	err = libbpf_get_error(btf);
1262 	if (err)
1263 		goto done;
1264 #ifdef HAVE_LIBELF
1265 	switch (gelf_getclass(elf)) {
1266 #elif defined HAVE_ELFIO
1267 	switch (elfio_get_class(elf)) {
1268 #endif
1269 
1270 	case ELFCLASS32:
1271 		btf__set_pointer_size(btf, 4);
1272 		break;
1273 	case ELFCLASS64:
1274 		btf__set_pointer_size(btf, 8);
1275 		break;
1276 	default:
1277 		pr_warn("failed to get ELF class (bitness) for %s\n", path);
1278 		break;
1279 	}
1280 
1281 	if (btf_ext && btf_ext_data) {
1282 		*btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
1283 		err = libbpf_get_error(*btf_ext);
1284 		if (err)
1285 			goto done;
1286 	} else if (btf_ext) {
1287 		*btf_ext = NULL;
1288 	}
1289 done:
1290 	if (elf)
1291 #ifdef HAVE_LIBELF
1292 		elf_end(elf);
1293 #elif defined HAVE_ELFIO
1294 		elfio_delete(elf);
1295 	if(pstring)
1296 		elfio_string_section_accessor_delete(pstring);
1297 #endif
1298 	close(fd);
1299 
1300 	if (!err)
1301 		return btf;
1302 
1303 	if (btf_ext)
1304 		btf_ext__free(*btf_ext);
1305 	btf__free(btf);
1306 
1307 	return ERR_PTR(err);
1308 }
1309 
1310 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
1311 {
1312 	return libbpf_ptr(btf_parse_elf(path, NULL, btf_ext));
1313 }
1314 
1315 struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf)
1316 {
1317 	return libbpf_ptr(btf_parse_elf(path, base_btf, NULL));
1318 }
1319 
1320 static struct btf *btf_parse_raw(const char *path, struct btf *base_btf)
1321 {
1322 	struct btf *btf = NULL;
1323 	void *data = NULL;
1324 	FILE *f = NULL;
1325 	__u16 magic;
1326 	int err = 0;
1327 	long sz;
1328 
1329 	f = fopen(path, "rbe");
1330 	if (!f) {
1331 		err = -errno;
1332 		goto err_out;
1333 	}
1334 
1335 	/* check BTF magic */
1336 	if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) {
1337 		err = -EIO;
1338 		goto err_out;
1339 	}
1340 	if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) {
1341 		/* definitely not a raw BTF */
1342 		err = -EPROTO;
1343 		goto err_out;
1344 	}
1345 
1346 	/* get file size */
1347 	if (fseek(f, 0, SEEK_END)) {
1348 		err = -errno;
1349 		goto err_out;
1350 	}
1351 	sz = ftell(f);
1352 	if (sz < 0) {
1353 		err = -errno;
1354 		goto err_out;
1355 	}
1356 	/* rewind to the start */
1357 	if (fseek(f, 0, SEEK_SET)) {
1358 		err = -errno;
1359 		goto err_out;
1360 	}
1361 
1362 	/* pre-alloc memory and read all of BTF data */
1363 	data = malloc(sz);
1364 	if (!data) {
1365 		err = -ENOMEM;
1366 		goto err_out;
1367 	}
1368 	if (fread(data, 1, sz, f) < sz) {
1369 		err = -EIO;
1370 		goto err_out;
1371 	}
1372 
1373 	/* finally parse BTF data */
1374 	btf = btf_new(data, sz, base_btf);
1375 
1376 err_out:
1377 	free(data);
1378 	if (f)
1379 		fclose(f);
1380 	return err ? ERR_PTR(err) : btf;
1381 }
1382 
1383 struct btf *btf__parse_raw(const char *path)
1384 {
1385 	return libbpf_ptr(btf_parse_raw(path, NULL));
1386 }
1387 
1388 struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf)
1389 {
1390 	return libbpf_ptr(btf_parse_raw(path, base_btf));
1391 }
1392 
1393 static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext)
1394 {
1395 	struct btf *btf;
1396 	int err;
1397 
1398 	if (btf_ext)
1399 		*btf_ext = NULL;
1400 
1401 	btf = btf_parse_raw(path, base_btf);
1402 	err = libbpf_get_error(btf);
1403 	if (!err)
1404 		return btf;
1405 	if (err != -EPROTO)
1406 		return ERR_PTR(err);
1407 	return btf_parse_elf(path, base_btf, btf_ext);
1408 }
1409 
1410 struct btf *btf__parse(const char *path, struct btf_ext **btf_ext)
1411 {
1412 	return libbpf_ptr(btf_parse(path, NULL, btf_ext));
1413 }
1414 
1415 struct btf *btf__parse_split(const char *path, struct btf *base_btf)
1416 {
1417 	return libbpf_ptr(btf_parse(path, base_btf, NULL));
1418 }
1419 
1420 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
1421 
1422 int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level)
1423 {
1424 	LIBBPF_OPTS(bpf_btf_load_opts, opts);
1425 	__u32 buf_sz = 0, raw_size;
1426 	char *buf = NULL, *tmp;
1427 	void *raw_data;
1428 	int err = 0;
1429 
1430 	if (btf->fd >= 0)
1431 		return libbpf_err(-EEXIST);
1432 	if (log_sz && !log_buf)
1433 		return libbpf_err(-EINVAL);
1434 
1435 	/* cache native raw data representation */
1436 	raw_data = btf_get_raw_data(btf, &raw_size, false);
1437 	if (!raw_data) {
1438 		err = -ENOMEM;
1439 		goto done;
1440 	}
1441 	btf->raw_size = raw_size;
1442 	btf->raw_data = raw_data;
1443 
1444 retry_load:
1445 	/* if log_level is 0, we won't provide log_buf/log_size to the kernel,
1446 	 * initially. Only if BTF loading fails, we bump log_level to 1 and
1447 	 * retry, using either auto-allocated or custom log_buf. This way
1448 	 * non-NULL custom log_buf provides a buffer just in case, but hopes
1449 	 * for successful load and no need for log_buf.
1450 	 */
1451 	if (log_level) {
1452 		/* if caller didn't provide custom log_buf, we'll keep
1453 		 * allocating our own progressively bigger buffers for BTF
1454 		 * verification log
1455 		 */
1456 		if (!log_buf) {
1457 			buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2);
1458 			tmp = realloc(buf, buf_sz);
1459 			if (!tmp) {
1460 				err = -ENOMEM;
1461 				goto done;
1462 			}
1463 			buf = tmp;
1464 			buf[0] = '\0';
1465 		}
1466 
1467 		opts.log_buf = log_buf ? log_buf : buf;
1468 		opts.log_size = log_buf ? log_sz : buf_sz;
1469 		opts.log_level = log_level;
1470 	}
1471 
1472 	btf->fd = bpf_btf_load(raw_data, raw_size, &opts);
1473 	if (btf->fd < 0) {
1474 		/* time to turn on verbose mode and try again */
1475 		if (log_level == 0) {
1476 			log_level = 1;
1477 			goto retry_load;
1478 		}
1479 		/* only retry if caller didn't provide custom log_buf, but
1480 		 * make sure we can never overflow buf_sz
1481 		 */
1482 		if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2)
1483 			goto retry_load;
1484 
1485 		err = -errno;
1486 		pr_warn("BTF loading error: %d\n", err);
1487 		/* don't print out contents of custom log_buf */
1488 		if (!log_buf && buf[0])
1489 			pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf);
1490 	}
1491 
1492 done:
1493 	free(buf);
1494 	return libbpf_err(err);
1495 }
1496 
1497 int btf__load_into_kernel(struct btf *btf)
1498 {
1499 	return btf_load_into_kernel(btf, NULL, 0, 0);
1500 }
1501 
1502 int btf__fd(const struct btf *btf)
1503 {
1504 	return btf->fd;
1505 }
1506 
1507 void btf__set_fd(struct btf *btf, int fd)
1508 {
1509 	btf->fd = fd;
1510 }
1511 
1512 static const void *btf_strs_data(const struct btf *btf)
1513 {
1514 	return btf->strs_data ? btf->strs_data : strset__data(btf->strs_set);
1515 }
1516 
1517 static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian)
1518 {
1519 	struct btf_header *hdr = btf->hdr;
1520 	struct btf_type *t;
1521 	void *data, *p;
1522 	__u32 data_sz;
1523 	int i;
1524 
1525 	data = swap_endian ? btf->raw_data_swapped : btf->raw_data;
1526 	if (data) {
1527 		*size = btf->raw_size;
1528 		return data;
1529 	}
1530 
1531 	data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len;
1532 	data = calloc(1, data_sz);
1533 	if (!data)
1534 		return NULL;
1535 	p = data;
1536 
1537 	memcpy(p, hdr, hdr->hdr_len);
1538 	if (swap_endian)
1539 		btf_bswap_hdr(p);
1540 	p += hdr->hdr_len;
1541 
1542 	memcpy(p, btf->types_data, hdr->type_len);
1543 	if (swap_endian) {
1544 		for (i = 0; i < btf->nr_types; i++) {
1545 			t = p + btf->type_offs[i];
1546 			/* btf_bswap_type_rest() relies on native t->info, so
1547 			 * we swap base type info after we swapped all the
1548 			 * additional information
1549 			 */
1550 			if (btf_bswap_type_rest(t))
1551 				goto err_out;
1552 			btf_bswap_type_base(t);
1553 		}
1554 	}
1555 	p += hdr->type_len;
1556 
1557 	memcpy(p, btf_strs_data(btf), hdr->str_len);
1558 	p += hdr->str_len;
1559 
1560 	*size = data_sz;
1561 	return data;
1562 err_out:
1563 	free(data);
1564 	return NULL;
1565 }
1566 
1567 const void *btf__raw_data(const struct btf *btf_ro, __u32 *size)
1568 {
1569 	struct btf *btf = (struct btf *)btf_ro;
1570 	__u32 data_sz;
1571 	void *data;
1572 
1573 	data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian);
1574 	if (!data)
1575 		return errno = ENOMEM, NULL;
1576 
1577 	btf->raw_size = data_sz;
1578 	if (btf->swapped_endian)
1579 		btf->raw_data_swapped = data;
1580 	else
1581 		btf->raw_data = data;
1582 	*size = data_sz;
1583 	return data;
1584 }
1585 
1586 __attribute__((alias("btf__raw_data")))
1587 const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
1588 
1589 const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
1590 {
1591 	if (offset < btf->start_str_off)
1592 		return btf__str_by_offset(btf->base_btf, offset);
1593 	else if (offset - btf->start_str_off < btf->hdr->str_len)
1594 		return btf_strs_data(btf) + (offset - btf->start_str_off);
1595 	else
1596 		return errno = EINVAL, NULL;
1597 }
1598 
1599 const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
1600 {
1601 	return btf__str_by_offset(btf, offset);
1602 }
1603 
1604 struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
1605 {
1606 	struct bpf_btf_info btf_info;
1607 	__u32 len = sizeof(btf_info);
1608 	__u32 last_size;
1609 	struct btf *btf;
1610 	void *ptr;
1611 	int err;
1612 
1613 	/* we won't know btf_size until we call bpf_btf_get_info_by_fd(). so
1614 	 * let's start with a sane default - 4KiB here - and resize it only if
1615 	 * bpf_btf_get_info_by_fd() needs a bigger buffer.
1616 	 */
1617 	last_size = 4096;
1618 	ptr = malloc(last_size);
1619 	if (!ptr)
1620 		return ERR_PTR(-ENOMEM);
1621 
1622 	memset(&btf_info, 0, sizeof(btf_info));
1623 	btf_info.btf = ptr_to_u64(ptr);
1624 	btf_info.btf_size = last_size;
1625 	err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1626 
1627 	if (!err && btf_info.btf_size > last_size) {
1628 		void *temp_ptr;
1629 
1630 		last_size = btf_info.btf_size;
1631 		temp_ptr = realloc(ptr, last_size);
1632 		if (!temp_ptr) {
1633 			btf = ERR_PTR(-ENOMEM);
1634 			goto exit_free;
1635 		}
1636 		ptr = temp_ptr;
1637 
1638 		len = sizeof(btf_info);
1639 		memset(&btf_info, 0, sizeof(btf_info));
1640 		btf_info.btf = ptr_to_u64(ptr);
1641 		btf_info.btf_size = last_size;
1642 
1643 		err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
1644 	}
1645 
1646 	if (err || btf_info.btf_size > last_size) {
1647 		btf = err ? ERR_PTR(-errno) : ERR_PTR(-E2BIG);
1648 		goto exit_free;
1649 	}
1650 
1651 	btf = btf_new(ptr, btf_info.btf_size, base_btf);
1652 
1653 exit_free:
1654 	free(ptr);
1655 	return btf;
1656 }
1657 
1658 struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf)
1659 {
1660 	struct btf *btf;
1661 	int btf_fd;
1662 
1663 	btf_fd = bpf_btf_get_fd_by_id(id);
1664 	if (btf_fd < 0)
1665 		return libbpf_err_ptr(-errno);
1666 
1667 	btf = btf_get_from_fd(btf_fd, base_btf);
1668 	close(btf_fd);
1669 
1670 	return libbpf_ptr(btf);
1671 }
1672 
1673 struct btf *btf__load_from_kernel_by_id(__u32 id)
1674 {
1675 	return btf__load_from_kernel_by_id_split(id, NULL);
1676 }
1677 
1678 static void btf_invalidate_raw_data(struct btf *btf)
1679 {
1680 	if (btf->raw_data) {
1681 		free(btf->raw_data);
1682 		btf->raw_data = NULL;
1683 	}
1684 	if (btf->raw_data_swapped) {
1685 		free(btf->raw_data_swapped);
1686 		btf->raw_data_swapped = NULL;
1687 	}
1688 }
1689 
1690 /* Ensure BTF is ready to be modified (by splitting into a three memory
1691  * regions for header, types, and strings). Also invalidate cached
1692  * raw_data, if any.
1693  */
1694 static int btf_ensure_modifiable(struct btf *btf)
1695 {
1696 	void *hdr, *types;
1697 	struct strset *set = NULL;
1698 	int err = -ENOMEM;
1699 
1700 	if (btf_is_modifiable(btf)) {
1701 		/* any BTF modification invalidates raw_data */
1702 		btf_invalidate_raw_data(btf);
1703 		return 0;
1704 	}
1705 
1706 	/* split raw data into three memory regions */
1707 	hdr = malloc(btf->hdr->hdr_len);
1708 	types = malloc(btf->hdr->type_len);
1709 	if (!hdr || !types)
1710 		goto err_out;
1711 
1712 	memcpy(hdr, btf->hdr, btf->hdr->hdr_len);
1713 	memcpy(types, btf->types_data, btf->hdr->type_len);
1714 
1715 	/* build lookup index for all strings */
1716 	set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len);
1717 	if (IS_ERR(set)) {
1718 		err = PTR_ERR(set);
1719 		goto err_out;
1720 	}
1721 
1722 	/* only when everything was successful, update internal state */
1723 	btf->hdr = hdr;
1724 	btf->types_data = types;
1725 	btf->types_data_cap = btf->hdr->type_len;
1726 	btf->strs_data = NULL;
1727 	btf->strs_set = set;
1728 	/* if BTF was created from scratch, all strings are guaranteed to be
1729 	 * unique and deduplicated
1730 	 */
1731 	if (btf->hdr->str_len == 0)
1732 		btf->strs_deduped = true;
1733 	if (!btf->base_btf && btf->hdr->str_len == 1)
1734 		btf->strs_deduped = true;
1735 
1736 	/* invalidate raw_data representation */
1737 	btf_invalidate_raw_data(btf);
1738 
1739 	return 0;
1740 
1741 err_out:
1742 	strset__free(set);
1743 	free(hdr);
1744 	free(types);
1745 	return err;
1746 }
1747 
1748 /* Find an offset in BTF string section that corresponds to a given string *s*.
1749  * Returns:
1750  *   - >0 offset into string section, if string is found;
1751  *   - -ENOENT, if string is not in the string section;
1752  *   - <0, on any other error.
1753  */
1754 int btf__find_str(struct btf *btf, const char *s)
1755 {
1756 	int off;
1757 
1758 	if (btf->base_btf) {
1759 		off = btf__find_str(btf->base_btf, s);
1760 		if (off != -ENOENT)
1761 			return off;
1762 	}
1763 
1764 	/* BTF needs to be in a modifiable state to build string lookup index */
1765 	if (btf_ensure_modifiable(btf))
1766 		return libbpf_err(-ENOMEM);
1767 
1768 	off = strset__find_str(btf->strs_set, s);
1769 	if (off < 0)
1770 		return libbpf_err(off);
1771 
1772 	return btf->start_str_off + off;
1773 }
1774 
1775 /* Add a string s to the BTF string section.
1776  * Returns:
1777  *   - > 0 offset into string section, on success;
1778  *   - < 0, on error.
1779  */
1780 int btf__add_str(struct btf *btf, const char *s)
1781 {
1782 	int off;
1783 
1784 	if (btf->base_btf) {
1785 		off = btf__find_str(btf->base_btf, s);
1786 		if (off != -ENOENT)
1787 			return off;
1788 	}
1789 
1790 	if (btf_ensure_modifiable(btf))
1791 		return libbpf_err(-ENOMEM);
1792 
1793 	off = strset__add_str(btf->strs_set, s);
1794 	if (off < 0)
1795 		return libbpf_err(off);
1796 
1797 	btf->hdr->str_len = strset__data_size(btf->strs_set);
1798 
1799 	return btf->start_str_off + off;
1800 }
1801 
1802 static void *btf_add_type_mem(struct btf *btf, size_t add_sz)
1803 {
1804 	return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1,
1805 			      btf->hdr->type_len, UINT_MAX, add_sz);
1806 }
1807 
1808 static void btf_type_inc_vlen(struct btf_type *t)
1809 {
1810 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t));
1811 }
1812 
1813 static int btf_commit_type(struct btf *btf, int data_sz)
1814 {
1815 	int err;
1816 
1817 	err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
1818 	if (err)
1819 		return libbpf_err(err);
1820 
1821 	btf->hdr->type_len += data_sz;
1822 	btf->hdr->str_off += data_sz;
1823 	btf->nr_types++;
1824 	return btf->start_id + btf->nr_types - 1;
1825 }
1826 
1827 struct btf_pipe {
1828 	const struct btf *src;
1829 	struct btf *dst;
1830 	struct hashmap *str_off_map; /* map string offsets from src to dst */
1831 };
1832 
1833 static int btf_rewrite_str(__u32 *str_off, void *ctx)
1834 {
1835 	struct btf_pipe *p = ctx;
1836 	long mapped_off;
1837 	int off, err;
1838 
1839 	if (!*str_off) /* nothing to do for empty strings */
1840 		return 0;
1841 
1842 	if (p->str_off_map &&
1843 	    hashmap__find(p->str_off_map, *str_off, &mapped_off)) {
1844 		*str_off = mapped_off;
1845 		return 0;
1846 	}
1847 
1848 	off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off));
1849 	if (off < 0)
1850 		return off;
1851 
1852 	/* Remember string mapping from src to dst.  It avoids
1853 	 * performing expensive string comparisons.
1854 	 */
1855 	if (p->str_off_map) {
1856 		err = hashmap__append(p->str_off_map, *str_off, off);
1857 		if (err)
1858 			return err;
1859 	}
1860 
1861 	*str_off = off;
1862 	return 0;
1863 }
1864 
1865 int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
1866 {
1867 	struct btf_pipe p = { .src = src_btf, .dst = btf };
1868 	struct btf_type *t;
1869 	int sz, err;
1870 
1871 	sz = btf_type_size(src_type);
1872 	if (sz < 0)
1873 		return libbpf_err(sz);
1874 
1875 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1876 	if (btf_ensure_modifiable(btf))
1877 		return libbpf_err(-ENOMEM);
1878 
1879 	t = btf_add_type_mem(btf, sz);
1880 	if (!t)
1881 		return libbpf_err(-ENOMEM);
1882 
1883 	memcpy(t, src_type, sz);
1884 
1885 	err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
1886 	if (err)
1887 		return libbpf_err(err);
1888 
1889 	return btf_commit_type(btf, sz);
1890 }
1891 
1892 static int btf_rewrite_type_ids(__u32 *type_id, void *ctx)
1893 {
1894 	struct btf *btf = ctx;
1895 
1896 	if (!*type_id) /* nothing to do for VOID references */
1897 		return 0;
1898 
1899 	/* we haven't updated btf's type count yet, so
1900 	 * btf->start_id + btf->nr_types - 1 is the type ID offset we should
1901 	 * add to all newly added BTF types
1902 	 */
1903 	*type_id += btf->start_id + btf->nr_types - 1;
1904 	return 0;
1905 }
1906 
1907 static size_t btf_dedup_identity_hash_fn(long key, void *ctx);
1908 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx);
1909 
1910 int btf__add_btf(struct btf *btf, const struct btf *src_btf)
1911 {
1912 	struct btf_pipe p = { .src = src_btf, .dst = btf };
1913 	int data_sz, sz, cnt, i, err, old_strs_len;
1914 	__u32 *off;
1915 	void *t;
1916 
1917 	/* appending split BTF isn't supported yet */
1918 	if (src_btf->base_btf)
1919 		return libbpf_err(-ENOTSUP);
1920 
1921 	/* deconstruct BTF, if necessary, and invalidate raw_data */
1922 	if (btf_ensure_modifiable(btf))
1923 		return libbpf_err(-ENOMEM);
1924 
1925 	/* remember original strings section size if we have to roll back
1926 	 * partial strings section changes
1927 	 */
1928 	old_strs_len = btf->hdr->str_len;
1929 
1930 	data_sz = src_btf->hdr->type_len;
1931 	cnt = btf__type_cnt(src_btf) - 1;
1932 
1933 	/* pre-allocate enough memory for new types */
1934 	t = btf_add_type_mem(btf, data_sz);
1935 	if (!t)
1936 		return libbpf_err(-ENOMEM);
1937 
1938 	/* pre-allocate enough memory for type offset index for new types */
1939 	off = btf_add_type_offs_mem(btf, cnt);
1940 	if (!off)
1941 		return libbpf_err(-ENOMEM);
1942 
1943 	/* Map the string offsets from src_btf to the offsets from btf to improve performance */
1944 	p.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
1945 	if (IS_ERR(p.str_off_map))
1946 		return libbpf_err(-ENOMEM);
1947 
1948 	/* bulk copy types data for all types from src_btf */
1949 	memcpy(t, src_btf->types_data, data_sz);
1950 
1951 	for (i = 0; i < cnt; i++) {
1952 		sz = btf_type_size(t);
1953 		if (sz < 0) {
1954 			/* unlikely, has to be corrupted src_btf */
1955 			err = sz;
1956 			goto err_out;
1957 		}
1958 
1959 		/* fill out type ID to type offset mapping for lookups by type ID */
1960 		*off = t - btf->types_data;
1961 
1962 		/* add, dedup, and remap strings referenced by this BTF type */
1963 		err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
1964 		if (err)
1965 			goto err_out;
1966 
1967 		/* remap all type IDs referenced from this BTF type */
1968 		err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf);
1969 		if (err)
1970 			goto err_out;
1971 
1972 		/* go to next type data and type offset index entry */
1973 		t += sz;
1974 		off++;
1975 	}
1976 
1977 	/* Up until now any of the copied type data was effectively invisible,
1978 	 * so if we exited early before this point due to error, BTF would be
1979 	 * effectively unmodified. There would be extra internal memory
1980 	 * pre-allocated, but it would not be available for querying.  But now
1981 	 * that we've copied and rewritten all the data successfully, we can
1982 	 * update type count and various internal offsets and sizes to
1983 	 * "commit" the changes and made them visible to the outside world.
1984 	 */
1985 	btf->hdr->type_len += data_sz;
1986 	btf->hdr->str_off += data_sz;
1987 	btf->nr_types += cnt;
1988 
1989 	hashmap__free(p.str_off_map);
1990 
1991 	/* return type ID of the first added BTF type */
1992 	return btf->start_id + btf->nr_types - cnt;
1993 err_out:
1994 	/* zero out preallocated memory as if it was just allocated with
1995 	 * libbpf_add_mem()
1996 	 */
1997 	memset(btf->types_data + btf->hdr->type_len, 0, data_sz);
1998 	memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len);
1999 
2000 	/* and now restore original strings section size; types data size
2001 	 * wasn't modified, so doesn't need restoring, see big comment above
2002 	 */
2003 	btf->hdr->str_len = old_strs_len;
2004 
2005 	hashmap__free(p.str_off_map);
2006 
2007 	return libbpf_err(err);
2008 }
2009 
2010 /*
2011  * Append new BTF_KIND_INT type with:
2012  *   - *name* - non-empty, non-NULL type name;
2013  *   - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes;
2014  *   - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL.
2015  * Returns:
2016  *   - >0, type ID of newly added BTF type;
2017  *   - <0, on error.
2018  */
2019 int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding)
2020 {
2021 	struct btf_type *t;
2022 	int sz, name_off;
2023 
2024 	/* non-empty name */
2025 	if (!name || !name[0])
2026 		return libbpf_err(-EINVAL);
2027 	/* byte_sz must be power of 2 */
2028 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16)
2029 		return libbpf_err(-EINVAL);
2030 	if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL))
2031 		return libbpf_err(-EINVAL);
2032 
2033 	/* deconstruct BTF, if necessary, and invalidate raw_data */
2034 	if (btf_ensure_modifiable(btf))
2035 		return libbpf_err(-ENOMEM);
2036 
2037 	sz = sizeof(struct btf_type) + sizeof(int);
2038 	t = btf_add_type_mem(btf, sz);
2039 	if (!t)
2040 		return libbpf_err(-ENOMEM);
2041 
2042 	/* if something goes wrong later, we might end up with an extra string,
2043 	 * but that shouldn't be a problem, because BTF can't be constructed
2044 	 * completely anyway and will most probably be just discarded
2045 	 */
2046 	name_off = btf__add_str(btf, name);
2047 	if (name_off < 0)
2048 		return name_off;
2049 
2050 	t->name_off = name_off;
2051 	t->info = btf_type_info(BTF_KIND_INT, 0, 0);
2052 	t->size = byte_sz;
2053 	/* set INT info, we don't allow setting legacy bit offset/size */
2054 	*(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8);
2055 
2056 	return btf_commit_type(btf, sz);
2057 }
2058 
2059 /*
2060  * Append new BTF_KIND_FLOAT type with:
2061  *   - *name* - non-empty, non-NULL type name;
2062  *   - *sz* - size of the type, in bytes;
2063  * Returns:
2064  *   - >0, type ID of newly added BTF type;
2065  *   - <0, on error.
2066  */
2067 int btf__add_float(struct btf *btf, const char *name, size_t byte_sz)
2068 {
2069 	struct btf_type *t;
2070 	int sz, name_off;
2071 
2072 	/* non-empty name */
2073 	if (!name || !name[0])
2074 		return libbpf_err(-EINVAL);
2075 
2076 	/* byte_sz must be one of the explicitly allowed values */
2077 	if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 &&
2078 	    byte_sz != 16)
2079 		return libbpf_err(-EINVAL);
2080 
2081 	if (btf_ensure_modifiable(btf))
2082 		return libbpf_err(-ENOMEM);
2083 
2084 	sz = sizeof(struct btf_type);
2085 	t = btf_add_type_mem(btf, sz);
2086 	if (!t)
2087 		return libbpf_err(-ENOMEM);
2088 
2089 	name_off = btf__add_str(btf, name);
2090 	if (name_off < 0)
2091 		return name_off;
2092 
2093 	t->name_off = name_off;
2094 	t->info = btf_type_info(BTF_KIND_FLOAT, 0, 0);
2095 	t->size = byte_sz;
2096 
2097 	return btf_commit_type(btf, sz);
2098 }
2099 
2100 /* it's completely legal to append BTF types with type IDs pointing forward to
2101  * types that haven't been appended yet, so we only make sure that id looks
2102  * sane, we can't guarantee that ID will always be valid
2103  */
2104 static int validate_type_id(int id)
2105 {
2106 	if (id < 0 || id > BTF_MAX_NR_TYPES)
2107 		return -EINVAL;
2108 	return 0;
2109 }
2110 
2111 /* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */
2112 static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id)
2113 {
2114 	struct btf_type *t;
2115 	int sz, name_off = 0;
2116 
2117 	if (validate_type_id(ref_type_id))
2118 		return libbpf_err(-EINVAL);
2119 
2120 	if (btf_ensure_modifiable(btf))
2121 		return libbpf_err(-ENOMEM);
2122 
2123 	sz = sizeof(struct btf_type);
2124 	t = btf_add_type_mem(btf, sz);
2125 	if (!t)
2126 		return libbpf_err(-ENOMEM);
2127 
2128 	if (name && name[0]) {
2129 		name_off = btf__add_str(btf, name);
2130 		if (name_off < 0)
2131 			return name_off;
2132 	}
2133 
2134 	t->name_off = name_off;
2135 	t->info = btf_type_info(kind, 0, 0);
2136 	t->type = ref_type_id;
2137 
2138 	return btf_commit_type(btf, sz);
2139 }
2140 
2141 /*
2142  * Append new BTF_KIND_PTR type with:
2143  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2144  * Returns:
2145  *   - >0, type ID of newly added BTF type;
2146  *   - <0, on error.
2147  */
2148 int btf__add_ptr(struct btf *btf, int ref_type_id)
2149 {
2150 	return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id);
2151 }
2152 
2153 /*
2154  * Append new BTF_KIND_ARRAY type with:
2155  *   - *index_type_id* - type ID of the type describing array index;
2156  *   - *elem_type_id* - type ID of the type describing array element;
2157  *   - *nr_elems* - the size of the array;
2158  * Returns:
2159  *   - >0, type ID of newly added BTF type;
2160  *   - <0, on error.
2161  */
2162 int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems)
2163 {
2164 	struct btf_type *t;
2165 	struct btf_array *a;
2166 	int sz;
2167 
2168 	if (validate_type_id(index_type_id) || validate_type_id(elem_type_id))
2169 		return libbpf_err(-EINVAL);
2170 
2171 	if (btf_ensure_modifiable(btf))
2172 		return libbpf_err(-ENOMEM);
2173 
2174 	sz = sizeof(struct btf_type) + sizeof(struct btf_array);
2175 	t = btf_add_type_mem(btf, sz);
2176 	if (!t)
2177 		return libbpf_err(-ENOMEM);
2178 
2179 	t->name_off = 0;
2180 	t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0);
2181 	t->size = 0;
2182 
2183 	a = btf_array(t);
2184 	a->type = elem_type_id;
2185 	a->index_type = index_type_id;
2186 	a->nelems = nr_elems;
2187 
2188 	return btf_commit_type(btf, sz);
2189 }
2190 
2191 /* generic STRUCT/UNION append function */
2192 static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz)
2193 {
2194 	struct btf_type *t;
2195 	int sz, name_off = 0;
2196 
2197 	if (btf_ensure_modifiable(btf))
2198 		return libbpf_err(-ENOMEM);
2199 
2200 	sz = sizeof(struct btf_type);
2201 	t = btf_add_type_mem(btf, sz);
2202 	if (!t)
2203 		return libbpf_err(-ENOMEM);
2204 
2205 	if (name && name[0]) {
2206 		name_off = btf__add_str(btf, name);
2207 		if (name_off < 0)
2208 			return name_off;
2209 	}
2210 
2211 	/* start out with vlen=0 and no kflag; this will be adjusted when
2212 	 * adding each member
2213 	 */
2214 	t->name_off = name_off;
2215 	t->info = btf_type_info(kind, 0, 0);
2216 	t->size = bytes_sz;
2217 
2218 	return btf_commit_type(btf, sz);
2219 }
2220 
2221 /*
2222  * Append new BTF_KIND_STRUCT type with:
2223  *   - *name* - name of the struct, can be NULL or empty for anonymous structs;
2224  *   - *byte_sz* - size of the struct, in bytes;
2225  *
2226  * Struct initially has no fields in it. Fields can be added by
2227  * btf__add_field() right after btf__add_struct() succeeds.
2228  *
2229  * Returns:
2230  *   - >0, type ID of newly added BTF type;
2231  *   - <0, on error.
2232  */
2233 int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz)
2234 {
2235 	return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz);
2236 }
2237 
2238 /*
2239  * Append new BTF_KIND_UNION type with:
2240  *   - *name* - name of the union, can be NULL or empty for anonymous union;
2241  *   - *byte_sz* - size of the union, in bytes;
2242  *
2243  * Union initially has no fields in it. Fields can be added by
2244  * btf__add_field() right after btf__add_union() succeeds. All fields
2245  * should have *bit_offset* of 0.
2246  *
2247  * Returns:
2248  *   - >0, type ID of newly added BTF type;
2249  *   - <0, on error.
2250  */
2251 int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz)
2252 {
2253 	return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz);
2254 }
2255 
2256 static struct btf_type *btf_last_type(struct btf *btf)
2257 {
2258 	return btf_type_by_id(btf, btf__type_cnt(btf) - 1);
2259 }
2260 
2261 /*
2262  * Append new field for the current STRUCT/UNION type with:
2263  *   - *name* - name of the field, can be NULL or empty for anonymous field;
2264  *   - *type_id* - type ID for the type describing field type;
2265  *   - *bit_offset* - bit offset of the start of the field within struct/union;
2266  *   - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields;
2267  * Returns:
2268  *   -  0, on success;
2269  *   - <0, on error.
2270  */
2271 int btf__add_field(struct btf *btf, const char *name, int type_id,
2272 		   __u32 bit_offset, __u32 bit_size)
2273 {
2274 	struct btf_type *t;
2275 	struct btf_member *m;
2276 	bool is_bitfield;
2277 	int sz, name_off = 0;
2278 
2279 	/* last type should be union/struct */
2280 	if (btf->nr_types == 0)
2281 		return libbpf_err(-EINVAL);
2282 	t = btf_last_type(btf);
2283 	if (!btf_is_composite(t))
2284 		return libbpf_err(-EINVAL);
2285 
2286 	if (validate_type_id(type_id))
2287 		return libbpf_err(-EINVAL);
2288 	/* best-effort bit field offset/size enforcement */
2289 	is_bitfield = bit_size || (bit_offset % 8 != 0);
2290 	if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff))
2291 		return libbpf_err(-EINVAL);
2292 
2293 	/* only offset 0 is allowed for unions */
2294 	if (btf_is_union(t) && bit_offset)
2295 		return libbpf_err(-EINVAL);
2296 
2297 	/* decompose and invalidate raw data */
2298 	if (btf_ensure_modifiable(btf))
2299 		return libbpf_err(-ENOMEM);
2300 
2301 	sz = sizeof(struct btf_member);
2302 	m = btf_add_type_mem(btf, sz);
2303 	if (!m)
2304 		return libbpf_err(-ENOMEM);
2305 
2306 	if (name && name[0]) {
2307 		name_off = btf__add_str(btf, name);
2308 		if (name_off < 0)
2309 			return name_off;
2310 	}
2311 
2312 	m->name_off = name_off;
2313 	m->type = type_id;
2314 	m->offset = bit_offset | (bit_size << 24);
2315 
2316 	/* btf_add_type_mem can invalidate t pointer */
2317 	t = btf_last_type(btf);
2318 	/* update parent type's vlen and kflag */
2319 	t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t));
2320 
2321 	btf->hdr->type_len += sz;
2322 	btf->hdr->str_off += sz;
2323 	return 0;
2324 }
2325 
2326 static int btf_add_enum_common(struct btf *btf, const char *name, __u32 byte_sz,
2327 			       bool is_signed, __u8 kind)
2328 {
2329 	struct btf_type *t;
2330 	int sz, name_off = 0;
2331 
2332 	/* byte_sz must be power of 2 */
2333 	if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8)
2334 		return libbpf_err(-EINVAL);
2335 
2336 	if (btf_ensure_modifiable(btf))
2337 		return libbpf_err(-ENOMEM);
2338 
2339 	sz = sizeof(struct btf_type);
2340 	t = btf_add_type_mem(btf, sz);
2341 	if (!t)
2342 		return libbpf_err(-ENOMEM);
2343 
2344 	if (name && name[0]) {
2345 		name_off = btf__add_str(btf, name);
2346 		if (name_off < 0)
2347 			return name_off;
2348 	}
2349 
2350 	/* start out with vlen=0; it will be adjusted when adding enum values */
2351 	t->name_off = name_off;
2352 	t->info = btf_type_info(kind, 0, is_signed);
2353 	t->size = byte_sz;
2354 
2355 	return btf_commit_type(btf, sz);
2356 }
2357 
2358 /*
2359  * Append new BTF_KIND_ENUM type with:
2360  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2361  *   - *byte_sz* - size of the enum, in bytes.
2362  *
2363  * Enum initially has no enum values in it (and corresponds to enum forward
2364  * declaration). Enumerator values can be added by btf__add_enum_value()
2365  * immediately after btf__add_enum() succeeds.
2366  *
2367  * Returns:
2368  *   - >0, type ID of newly added BTF type;
2369  *   - <0, on error.
2370  */
2371 int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
2372 {
2373 	/*
2374 	 * set the signedness to be unsigned, it will change to signed
2375 	 * if any later enumerator is negative.
2376 	 */
2377 	return btf_add_enum_common(btf, name, byte_sz, false, BTF_KIND_ENUM);
2378 }
2379 
2380 /*
2381  * Append new enum value for the current ENUM type with:
2382  *   - *name* - name of the enumerator value, can't be NULL or empty;
2383  *   - *value* - integer value corresponding to enum value *name*;
2384  * Returns:
2385  *   -  0, on success;
2386  *   - <0, on error.
2387  */
2388 int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
2389 {
2390 	struct btf_type *t;
2391 	struct btf_enum *v;
2392 	int sz, name_off;
2393 
2394 	/* last type should be BTF_KIND_ENUM */
2395 	if (btf->nr_types == 0)
2396 		return libbpf_err(-EINVAL);
2397 	t = btf_last_type(btf);
2398 	if (!btf_is_enum(t))
2399 		return libbpf_err(-EINVAL);
2400 
2401 	/* non-empty name */
2402 	if (!name || !name[0])
2403 		return libbpf_err(-EINVAL);
2404 	if (value < INT_MIN || value > UINT_MAX)
2405 		return libbpf_err(-E2BIG);
2406 
2407 	/* decompose and invalidate raw data */
2408 	if (btf_ensure_modifiable(btf))
2409 		return libbpf_err(-ENOMEM);
2410 
2411 	sz = sizeof(struct btf_enum);
2412 	v = btf_add_type_mem(btf, sz);
2413 	if (!v)
2414 		return libbpf_err(-ENOMEM);
2415 
2416 	name_off = btf__add_str(btf, name);
2417 	if (name_off < 0)
2418 		return name_off;
2419 
2420 	v->name_off = name_off;
2421 	v->val = value;
2422 
2423 	/* update parent type's vlen */
2424 	t = btf_last_type(btf);
2425 	btf_type_inc_vlen(t);
2426 
2427 	/* if negative value, set signedness to signed */
2428 	if (value < 0)
2429 		t->info = btf_type_info(btf_kind(t), btf_vlen(t), true);
2430 
2431 	btf->hdr->type_len += sz;
2432 	btf->hdr->str_off += sz;
2433 	return 0;
2434 }
2435 
2436 /*
2437  * Append new BTF_KIND_ENUM64 type with:
2438  *   - *name* - name of the enum, can be NULL or empty for anonymous enums;
2439  *   - *byte_sz* - size of the enum, in bytes.
2440  *   - *is_signed* - whether the enum values are signed or not;
2441  *
2442  * Enum initially has no enum values in it (and corresponds to enum forward
2443  * declaration). Enumerator values can be added by btf__add_enum64_value()
2444  * immediately after btf__add_enum64() succeeds.
2445  *
2446  * Returns:
2447  *   - >0, type ID of newly added BTF type;
2448  *   - <0, on error.
2449  */
2450 int btf__add_enum64(struct btf *btf, const char *name, __u32 byte_sz,
2451 		    bool is_signed)
2452 {
2453 	return btf_add_enum_common(btf, name, byte_sz, is_signed,
2454 				   BTF_KIND_ENUM64);
2455 }
2456 
2457 /*
2458  * Append new enum value for the current ENUM64 type with:
2459  *   - *name* - name of the enumerator value, can't be NULL or empty;
2460  *   - *value* - integer value corresponding to enum value *name*;
2461  * Returns:
2462  *   -  0, on success;
2463  *   - <0, on error.
2464  */
2465 int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value)
2466 {
2467 	struct btf_enum64 *v;
2468 	struct btf_type *t;
2469 	int sz, name_off;
2470 
2471 	/* last type should be BTF_KIND_ENUM64 */
2472 	if (btf->nr_types == 0)
2473 		return libbpf_err(-EINVAL);
2474 	t = btf_last_type(btf);
2475 	if (!btf_is_enum64(t))
2476 		return libbpf_err(-EINVAL);
2477 
2478 	/* non-empty name */
2479 	if (!name || !name[0])
2480 		return libbpf_err(-EINVAL);
2481 
2482 	/* decompose and invalidate raw data */
2483 	if (btf_ensure_modifiable(btf))
2484 		return libbpf_err(-ENOMEM);
2485 
2486 	sz = sizeof(struct btf_enum64);
2487 	v = btf_add_type_mem(btf, sz);
2488 	if (!v)
2489 		return libbpf_err(-ENOMEM);
2490 
2491 	name_off = btf__add_str(btf, name);
2492 	if (name_off < 0)
2493 		return name_off;
2494 
2495 	v->name_off = name_off;
2496 	v->val_lo32 = (__u32)value;
2497 	v->val_hi32 = value >> 32;
2498 
2499 	/* update parent type's vlen */
2500 	t = btf_last_type(btf);
2501 	btf_type_inc_vlen(t);
2502 
2503 	btf->hdr->type_len += sz;
2504 	btf->hdr->str_off += sz;
2505 	return 0;
2506 }
2507 
2508 /*
2509  * Append new BTF_KIND_FWD type with:
2510  *   - *name*, non-empty/non-NULL name;
2511  *   - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT,
2512  *     BTF_FWD_UNION, or BTF_FWD_ENUM;
2513  * Returns:
2514  *   - >0, type ID of newly added BTF type;
2515  *   - <0, on error.
2516  */
2517 int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind)
2518 {
2519 	if (!name || !name[0])
2520 		return libbpf_err(-EINVAL);
2521 
2522 	switch (fwd_kind) {
2523 	case BTF_FWD_STRUCT:
2524 	case BTF_FWD_UNION: {
2525 		struct btf_type *t;
2526 		int id;
2527 
2528 		id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0);
2529 		if (id <= 0)
2530 			return id;
2531 		t = btf_type_by_id(btf, id);
2532 		t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION);
2533 		return id;
2534 	}
2535 	case BTF_FWD_ENUM:
2536 		/* enum forward in BTF currently is just an enum with no enum
2537 		 * values; we also assume a standard 4-byte size for it
2538 		 */
2539 		return btf__add_enum(btf, name, sizeof(int));
2540 	default:
2541 		return libbpf_err(-EINVAL);
2542 	}
2543 }
2544 
2545 /*
2546  * Append new BTF_KING_TYPEDEF type with:
2547  *   - *name*, non-empty/non-NULL name;
2548  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2549  * Returns:
2550  *   - >0, type ID of newly added BTF type;
2551  *   - <0, on error.
2552  */
2553 int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id)
2554 {
2555 	if (!name || !name[0])
2556 		return libbpf_err(-EINVAL);
2557 
2558 	return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id);
2559 }
2560 
2561 /*
2562  * Append new BTF_KIND_VOLATILE type with:
2563  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2564  * Returns:
2565  *   - >0, type ID of newly added BTF type;
2566  *   - <0, on error.
2567  */
2568 int btf__add_volatile(struct btf *btf, int ref_type_id)
2569 {
2570 	return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id);
2571 }
2572 
2573 /*
2574  * Append new BTF_KIND_CONST type with:
2575  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2576  * Returns:
2577  *   - >0, type ID of newly added BTF type;
2578  *   - <0, on error.
2579  */
2580 int btf__add_const(struct btf *btf, int ref_type_id)
2581 {
2582 	return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id);
2583 }
2584 
2585 /*
2586  * Append new BTF_KIND_RESTRICT type with:
2587  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2588  * Returns:
2589  *   - >0, type ID of newly added BTF type;
2590  *   - <0, on error.
2591  */
2592 int btf__add_restrict(struct btf *btf, int ref_type_id)
2593 {
2594 	return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id);
2595 }
2596 
2597 /*
2598  * Append new BTF_KIND_TYPE_TAG type with:
2599  *   - *value*, non-empty/non-NULL tag value;
2600  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2601  * Returns:
2602  *   - >0, type ID of newly added BTF type;
2603  *   - <0, on error.
2604  */
2605 int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id)
2606 {
2607 	if (!value || !value[0])
2608 		return libbpf_err(-EINVAL);
2609 
2610 	return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id);
2611 }
2612 
2613 /*
2614  * Append new BTF_KIND_FUNC type with:
2615  *   - *name*, non-empty/non-NULL name;
2616  *   - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet;
2617  * Returns:
2618  *   - >0, type ID of newly added BTF type;
2619  *   - <0, on error.
2620  */
2621 int btf__add_func(struct btf *btf, const char *name,
2622 		  enum btf_func_linkage linkage, int proto_type_id)
2623 {
2624 	int id;
2625 
2626 	if (!name || !name[0])
2627 		return libbpf_err(-EINVAL);
2628 	if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL &&
2629 	    linkage != BTF_FUNC_EXTERN)
2630 		return libbpf_err(-EINVAL);
2631 
2632 	id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id);
2633 	if (id > 0) {
2634 		struct btf_type *t = btf_type_by_id(btf, id);
2635 
2636 		t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0);
2637 	}
2638 	return libbpf_err(id);
2639 }
2640 
2641 /*
2642  * Append new BTF_KIND_FUNC_PROTO with:
2643  *   - *ret_type_id* - type ID for return result of a function.
2644  *
2645  * Function prototype initially has no arguments, but they can be added by
2646  * btf__add_func_param() one by one, immediately after
2647  * btf__add_func_proto() succeeded.
2648  *
2649  * Returns:
2650  *   - >0, type ID of newly added BTF type;
2651  *   - <0, on error.
2652  */
2653 int btf__add_func_proto(struct btf *btf, int ret_type_id)
2654 {
2655 	struct btf_type *t;
2656 	int sz;
2657 
2658 	if (validate_type_id(ret_type_id))
2659 		return libbpf_err(-EINVAL);
2660 
2661 	if (btf_ensure_modifiable(btf))
2662 		return libbpf_err(-ENOMEM);
2663 
2664 	sz = sizeof(struct btf_type);
2665 	t = btf_add_type_mem(btf, sz);
2666 	if (!t)
2667 		return libbpf_err(-ENOMEM);
2668 
2669 	/* start out with vlen=0; this will be adjusted when adding enum
2670 	 * values, if necessary
2671 	 */
2672 	t->name_off = 0;
2673 	t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0);
2674 	t->type = ret_type_id;
2675 
2676 	return btf_commit_type(btf, sz);
2677 }
2678 
2679 /*
2680  * Append new function parameter for current FUNC_PROTO type with:
2681  *   - *name* - parameter name, can be NULL or empty;
2682  *   - *type_id* - type ID describing the type of the parameter.
2683  * Returns:
2684  *   -  0, on success;
2685  *   - <0, on error.
2686  */
2687 int btf__add_func_param(struct btf *btf, const char *name, int type_id)
2688 {
2689 	struct btf_type *t;
2690 	struct btf_param *p;
2691 	int sz, name_off = 0;
2692 
2693 	if (validate_type_id(type_id))
2694 		return libbpf_err(-EINVAL);
2695 
2696 	/* last type should be BTF_KIND_FUNC_PROTO */
2697 	if (btf->nr_types == 0)
2698 		return libbpf_err(-EINVAL);
2699 	t = btf_last_type(btf);
2700 	if (!btf_is_func_proto(t))
2701 		return libbpf_err(-EINVAL);
2702 
2703 	/* decompose and invalidate raw data */
2704 	if (btf_ensure_modifiable(btf))
2705 		return libbpf_err(-ENOMEM);
2706 
2707 	sz = sizeof(struct btf_param);
2708 	p = btf_add_type_mem(btf, sz);
2709 	if (!p)
2710 		return libbpf_err(-ENOMEM);
2711 
2712 	if (name && name[0]) {
2713 		name_off = btf__add_str(btf, name);
2714 		if (name_off < 0)
2715 			return name_off;
2716 	}
2717 
2718 	p->name_off = name_off;
2719 	p->type = type_id;
2720 
2721 	/* update parent type's vlen */
2722 	t = btf_last_type(btf);
2723 	btf_type_inc_vlen(t);
2724 
2725 	btf->hdr->type_len += sz;
2726 	btf->hdr->str_off += sz;
2727 	return 0;
2728 }
2729 
2730 /*
2731  * Append new BTF_KIND_VAR type with:
2732  *   - *name* - non-empty/non-NULL name;
2733  *   - *linkage* - variable linkage, one of BTF_VAR_STATIC,
2734  *     BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN;
2735  *   - *type_id* - type ID of the type describing the type of the variable.
2736  * Returns:
2737  *   - >0, type ID of newly added BTF type;
2738  *   - <0, on error.
2739  */
2740 int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id)
2741 {
2742 	struct btf_type *t;
2743 	struct btf_var *v;
2744 	int sz, name_off;
2745 
2746 	/* non-empty name */
2747 	if (!name || !name[0])
2748 		return libbpf_err(-EINVAL);
2749 	if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED &&
2750 	    linkage != BTF_VAR_GLOBAL_EXTERN)
2751 		return libbpf_err(-EINVAL);
2752 	if (validate_type_id(type_id))
2753 		return libbpf_err(-EINVAL);
2754 
2755 	/* deconstruct BTF, if necessary, and invalidate raw_data */
2756 	if (btf_ensure_modifiable(btf))
2757 		return libbpf_err(-ENOMEM);
2758 
2759 	sz = sizeof(struct btf_type) + sizeof(struct btf_var);
2760 	t = btf_add_type_mem(btf, sz);
2761 	if (!t)
2762 		return libbpf_err(-ENOMEM);
2763 
2764 	name_off = btf__add_str(btf, name);
2765 	if (name_off < 0)
2766 		return name_off;
2767 
2768 	t->name_off = name_off;
2769 	t->info = btf_type_info(BTF_KIND_VAR, 0, 0);
2770 	t->type = type_id;
2771 
2772 	v = btf_var(t);
2773 	v->linkage = linkage;
2774 
2775 	return btf_commit_type(btf, sz);
2776 }
2777 
2778 /*
2779  * Append new BTF_KIND_DATASEC type with:
2780  *   - *name* - non-empty/non-NULL name;
2781  *   - *byte_sz* - data section size, in bytes.
2782  *
2783  * Data section is initially empty. Variables info can be added with
2784  * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds.
2785  *
2786  * Returns:
2787  *   - >0, type ID of newly added BTF type;
2788  *   - <0, on error.
2789  */
2790 int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz)
2791 {
2792 	struct btf_type *t;
2793 	int sz, name_off;
2794 
2795 	/* non-empty name */
2796 	if (!name || !name[0])
2797 		return libbpf_err(-EINVAL);
2798 
2799 	if (btf_ensure_modifiable(btf))
2800 		return libbpf_err(-ENOMEM);
2801 
2802 	sz = sizeof(struct btf_type);
2803 	t = btf_add_type_mem(btf, sz);
2804 	if (!t)
2805 		return libbpf_err(-ENOMEM);
2806 
2807 	name_off = btf__add_str(btf, name);
2808 	if (name_off < 0)
2809 		return name_off;
2810 
2811 	/* start with vlen=0, which will be update as var_secinfos are added */
2812 	t->name_off = name_off;
2813 	t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0);
2814 	t->size = byte_sz;
2815 
2816 	return btf_commit_type(btf, sz);
2817 }
2818 
2819 /*
2820  * Append new data section variable information entry for current DATASEC type:
2821  *   - *var_type_id* - type ID, describing type of the variable;
2822  *   - *offset* - variable offset within data section, in bytes;
2823  *   - *byte_sz* - variable size, in bytes.
2824  *
2825  * Returns:
2826  *   -  0, on success;
2827  *   - <0, on error.
2828  */
2829 int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz)
2830 {
2831 	struct btf_type *t;
2832 	struct btf_var_secinfo *v;
2833 	int sz;
2834 
2835 	/* last type should be BTF_KIND_DATASEC */
2836 	if (btf->nr_types == 0)
2837 		return libbpf_err(-EINVAL);
2838 	t = btf_last_type(btf);
2839 	if (!btf_is_datasec(t))
2840 		return libbpf_err(-EINVAL);
2841 
2842 	if (validate_type_id(var_type_id))
2843 		return libbpf_err(-EINVAL);
2844 
2845 	/* decompose and invalidate raw data */
2846 	if (btf_ensure_modifiable(btf))
2847 		return libbpf_err(-ENOMEM);
2848 
2849 	sz = sizeof(struct btf_var_secinfo);
2850 	v = btf_add_type_mem(btf, sz);
2851 	if (!v)
2852 		return libbpf_err(-ENOMEM);
2853 
2854 	v->type = var_type_id;
2855 	v->offset = offset;
2856 	v->size = byte_sz;
2857 
2858 	/* update parent type's vlen */
2859 	t = btf_last_type(btf);
2860 	btf_type_inc_vlen(t);
2861 
2862 	btf->hdr->type_len += sz;
2863 	btf->hdr->str_off += sz;
2864 	return 0;
2865 }
2866 
2867 /*
2868  * Append new BTF_KIND_DECL_TAG type with:
2869  *   - *value* - non-empty/non-NULL string;
2870  *   - *ref_type_id* - referenced type ID, it might not exist yet;
2871  *   - *component_idx* - -1 for tagging reference type, otherwise struct/union
2872  *     member or function argument index;
2873  * Returns:
2874  *   - >0, type ID of newly added BTF type;
2875  *   - <0, on error.
2876  */
2877 int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
2878 		 int component_idx)
2879 {
2880 	struct btf_type *t;
2881 	int sz, value_off;
2882 
2883 	if (!value || !value[0] || component_idx < -1)
2884 		return libbpf_err(-EINVAL);
2885 
2886 	if (validate_type_id(ref_type_id))
2887 		return libbpf_err(-EINVAL);
2888 
2889 	if (btf_ensure_modifiable(btf))
2890 		return libbpf_err(-ENOMEM);
2891 
2892 	sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag);
2893 	t = btf_add_type_mem(btf, sz);
2894 	if (!t)
2895 		return libbpf_err(-ENOMEM);
2896 
2897 	value_off = btf__add_str(btf, value);
2898 	if (value_off < 0)
2899 		return value_off;
2900 
2901 	t->name_off = value_off;
2902 	t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, false);
2903 	t->type = ref_type_id;
2904 	btf_decl_tag(t)->component_idx = component_idx;
2905 
2906 	return btf_commit_type(btf, sz);
2907 }
2908 
2909 struct btf_ext_sec_setup_param {
2910 	__u32 off;
2911 	__u32 len;
2912 	__u32 min_rec_size;
2913 	struct btf_ext_info *ext_info;
2914 	const char *desc;
2915 };
2916 
2917 static int btf_ext_setup_info(struct btf_ext *btf_ext,
2918 			      struct btf_ext_sec_setup_param *ext_sec)
2919 {
2920 	const struct btf_ext_info_sec *sinfo;
2921 	struct btf_ext_info *ext_info;
2922 	__u32 info_left, record_size;
2923 	size_t sec_cnt = 0;
2924 	/* The start of the info sec (including the __u32 record_size). */
2925 	void *info;
2926 
2927 	if (ext_sec->len == 0)
2928 		return 0;
2929 
2930 	if (ext_sec->off & 0x03) {
2931 		pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
2932 		     ext_sec->desc);
2933 		return -EINVAL;
2934 	}
2935 
2936 	info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
2937 	info_left = ext_sec->len;
2938 
2939 	if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
2940 		pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
2941 			 ext_sec->desc, ext_sec->off, ext_sec->len);
2942 		return -EINVAL;
2943 	}
2944 
2945 	/* At least a record size */
2946 	if (info_left < sizeof(__u32)) {
2947 		pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
2948 		return -EINVAL;
2949 	}
2950 
2951 	/* The record size needs to meet the minimum standard */
2952 	record_size = *(__u32 *)info;
2953 	if (record_size < ext_sec->min_rec_size ||
2954 	    record_size & 0x03) {
2955 		pr_debug("%s section in .BTF.ext has invalid record size %u\n",
2956 			 ext_sec->desc, record_size);
2957 		return -EINVAL;
2958 	}
2959 
2960 	sinfo = info + sizeof(__u32);
2961 	info_left -= sizeof(__u32);
2962 
2963 	/* If no records, return failure now so .BTF.ext won't be used. */
2964 	if (!info_left) {
2965 		pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
2966 		return -EINVAL;
2967 	}
2968 
2969 	while (info_left) {
2970 		unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
2971 		__u64 total_record_size;
2972 		__u32 num_records;
2973 
2974 		if (info_left < sec_hdrlen) {
2975 			pr_debug("%s section header is not found in .BTF.ext\n",
2976 			     ext_sec->desc);
2977 			return -EINVAL;
2978 		}
2979 
2980 		num_records = sinfo->num_info;
2981 		if (num_records == 0) {
2982 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2983 			     ext_sec->desc);
2984 			return -EINVAL;
2985 		}
2986 
2987 		total_record_size = sec_hdrlen + (__u64)num_records * record_size;
2988 		if (info_left < total_record_size) {
2989 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
2990 			     ext_sec->desc);
2991 			return -EINVAL;
2992 		}
2993 
2994 		info_left -= total_record_size;
2995 		sinfo = (void *)sinfo + total_record_size;
2996 		sec_cnt++;
2997 	}
2998 
2999 	ext_info = ext_sec->ext_info;
3000 	ext_info->len = ext_sec->len - sizeof(__u32);
3001 	ext_info->rec_size = record_size;
3002 	ext_info->info = info + sizeof(__u32);
3003 	ext_info->sec_cnt = sec_cnt;
3004 
3005 	return 0;
3006 }
3007 
3008 static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
3009 {
3010 	struct btf_ext_sec_setup_param param = {
3011 		.off = btf_ext->hdr->func_info_off,
3012 		.len = btf_ext->hdr->func_info_len,
3013 		.min_rec_size = sizeof(struct bpf_func_info_min),
3014 		.ext_info = &btf_ext->func_info,
3015 		.desc = "func_info"
3016 	};
3017 
3018 	return btf_ext_setup_info(btf_ext, &param);
3019 }
3020 
3021 static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
3022 {
3023 	struct btf_ext_sec_setup_param param = {
3024 		.off = btf_ext->hdr->line_info_off,
3025 		.len = btf_ext->hdr->line_info_len,
3026 		.min_rec_size = sizeof(struct bpf_line_info_min),
3027 		.ext_info = &btf_ext->line_info,
3028 		.desc = "line_info",
3029 	};
3030 
3031 	return btf_ext_setup_info(btf_ext, &param);
3032 }
3033 
3034 static int btf_ext_setup_core_relos(struct btf_ext *btf_ext)
3035 {
3036 	struct btf_ext_sec_setup_param param = {
3037 		.off = btf_ext->hdr->core_relo_off,
3038 		.len = btf_ext->hdr->core_relo_len,
3039 		.min_rec_size = sizeof(struct bpf_core_relo),
3040 		.ext_info = &btf_ext->core_relo_info,
3041 		.desc = "core_relo",
3042 	};
3043 
3044 	return btf_ext_setup_info(btf_ext, &param);
3045 }
3046 
3047 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
3048 {
3049 	const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
3050 
3051 	if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
3052 	    data_size < hdr->hdr_len) {
3053 		pr_debug("BTF.ext header not found");
3054 		return -EINVAL;
3055 	}
3056 
3057 	if (hdr->magic == bswap_16(BTF_MAGIC)) {
3058 		pr_warn("BTF.ext in non-native endianness is not supported\n");
3059 		return -ENOTSUP;
3060 	} else if (hdr->magic != BTF_MAGIC) {
3061 		pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
3062 		return -EINVAL;
3063 	}
3064 
3065 	if (hdr->version != BTF_VERSION) {
3066 		pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
3067 		return -ENOTSUP;
3068 	}
3069 
3070 	if (hdr->flags) {
3071 		pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
3072 		return -ENOTSUP;
3073 	}
3074 
3075 	if (data_size == hdr->hdr_len) {
3076 		pr_debug("BTF.ext has no data\n");
3077 		return -EINVAL;
3078 	}
3079 
3080 	return 0;
3081 }
3082 
3083 void btf_ext__free(struct btf_ext *btf_ext)
3084 {
3085 	if (IS_ERR_OR_NULL(btf_ext))
3086 		return;
3087 	free(btf_ext->func_info.sec_idxs);
3088 	free(btf_ext->line_info.sec_idxs);
3089 	free(btf_ext->core_relo_info.sec_idxs);
3090 	free(btf_ext->data);
3091 	free(btf_ext);
3092 }
3093 
3094 struct btf_ext *btf_ext__new(const __u8 *data, __u32 size)
3095 {
3096 	struct btf_ext *btf_ext;
3097 	int err;
3098 
3099 	btf_ext = calloc(1, sizeof(struct btf_ext));
3100 	if (!btf_ext)
3101 		return libbpf_err_ptr(-ENOMEM);
3102 
3103 	btf_ext->data_size = size;
3104 	btf_ext->data = malloc(size);
3105 	if (!btf_ext->data) {
3106 		err = -ENOMEM;
3107 		goto done;
3108 	}
3109 	memcpy(btf_ext->data, data, size);
3110 
3111 	err = btf_ext_parse_hdr(btf_ext->data, size);
3112 	if (err)
3113 		goto done;
3114 
3115 	if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
3116 		err = -EINVAL;
3117 		goto done;
3118 	}
3119 
3120 	err = btf_ext_setup_func_info(btf_ext);
3121 	if (err)
3122 		goto done;
3123 
3124 	err = btf_ext_setup_line_info(btf_ext);
3125 	if (err)
3126 		goto done;
3127 
3128 	if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
3129 		goto done; /* skip core relos parsing */
3130 
3131 	err = btf_ext_setup_core_relos(btf_ext);
3132 	if (err)
3133 		goto done;
3134 
3135 done:
3136 	if (err) {
3137 		btf_ext__free(btf_ext);
3138 		return libbpf_err_ptr(err);
3139 	}
3140 
3141 	return btf_ext;
3142 }
3143 
3144 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
3145 {
3146 	*size = btf_ext->data_size;
3147 	return btf_ext->data;
3148 }
3149 
3150 struct btf_dedup;
3151 
3152 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
3153 static void btf_dedup_free(struct btf_dedup *d);
3154 static int btf_dedup_prep(struct btf_dedup *d);
3155 static int btf_dedup_strings(struct btf_dedup *d);
3156 static int btf_dedup_prim_types(struct btf_dedup *d);
3157 static int btf_dedup_struct_types(struct btf_dedup *d);
3158 static int btf_dedup_ref_types(struct btf_dedup *d);
3159 static int btf_dedup_resolve_fwds(struct btf_dedup *d);
3160 static int btf_dedup_compact_types(struct btf_dedup *d);
3161 static int btf_dedup_remap_types(struct btf_dedup *d);
3162 
3163 /*
3164  * Deduplicate BTF types and strings.
3165  *
3166  * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
3167  * section with all BTF type descriptors and string data. It overwrites that
3168  * memory in-place with deduplicated types and strings without any loss of
3169  * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
3170  * is provided, all the strings referenced from .BTF.ext section are honored
3171  * and updated to point to the right offsets after deduplication.
3172  *
3173  * If function returns with error, type/string data might be garbled and should
3174  * be discarded.
3175  *
3176  * More verbose and detailed description of both problem btf_dedup is solving,
3177  * as well as solution could be found at:
3178  * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
3179  *
3180  * Problem description and justification
3181  * =====================================
3182  *
3183  * BTF type information is typically emitted either as a result of conversion
3184  * from DWARF to BTF or directly by compiler. In both cases, each compilation
3185  * unit contains information about a subset of all the types that are used
3186  * in an application. These subsets are frequently overlapping and contain a lot
3187  * of duplicated information when later concatenated together into a single
3188  * binary. This algorithm ensures that each unique type is represented by single
3189  * BTF type descriptor, greatly reducing resulting size of BTF data.
3190  *
3191  * Compilation unit isolation and subsequent duplication of data is not the only
3192  * problem. The same type hierarchy (e.g., struct and all the type that struct
3193  * references) in different compilation units can be represented in BTF to
3194  * various degrees of completeness (or, rather, incompleteness) due to
3195  * struct/union forward declarations.
3196  *
3197  * Let's take a look at an example, that we'll use to better understand the
3198  * problem (and solution). Suppose we have two compilation units, each using
3199  * same `struct S`, but each of them having incomplete type information about
3200  * struct's fields:
3201  *
3202  * // CU #1:
3203  * struct S;
3204  * struct A {
3205  *	int a;
3206  *	struct A* self;
3207  *	struct S* parent;
3208  * };
3209  * struct B;
3210  * struct S {
3211  *	struct A* a_ptr;
3212  *	struct B* b_ptr;
3213  * };
3214  *
3215  * // CU #2:
3216  * struct S;
3217  * struct A;
3218  * struct B {
3219  *	int b;
3220  *	struct B* self;
3221  *	struct S* parent;
3222  * };
3223  * struct S {
3224  *	struct A* a_ptr;
3225  *	struct B* b_ptr;
3226  * };
3227  *
3228  * In case of CU #1, BTF data will know only that `struct B` exist (but no
3229  * more), but will know the complete type information about `struct A`. While
3230  * for CU #2, it will know full type information about `struct B`, but will
3231  * only know about forward declaration of `struct A` (in BTF terms, it will
3232  * have `BTF_KIND_FWD` type descriptor with name `B`).
3233  *
3234  * This compilation unit isolation means that it's possible that there is no
3235  * single CU with complete type information describing structs `S`, `A`, and
3236  * `B`. Also, we might get tons of duplicated and redundant type information.
3237  *
3238  * Additional complication we need to keep in mind comes from the fact that
3239  * types, in general, can form graphs containing cycles, not just DAGs.
3240  *
3241  * While algorithm does deduplication, it also merges and resolves type
3242  * information (unless disabled throught `struct btf_opts`), whenever possible.
3243  * E.g., in the example above with two compilation units having partial type
3244  * information for structs `A` and `B`, the output of algorithm will emit
3245  * a single copy of each BTF type that describes structs `A`, `B`, and `S`
3246  * (as well as type information for `int` and pointers), as if they were defined
3247  * in a single compilation unit as:
3248  *
3249  * struct A {
3250  *	int a;
3251  *	struct A* self;
3252  *	struct S* parent;
3253  * };
3254  * struct B {
3255  *	int b;
3256  *	struct B* self;
3257  *	struct S* parent;
3258  * };
3259  * struct S {
3260  *	struct A* a_ptr;
3261  *	struct B* b_ptr;
3262  * };
3263  *
3264  * Algorithm summary
3265  * =================
3266  *
3267  * Algorithm completes its work in 7 separate passes:
3268  *
3269  * 1. Strings deduplication.
3270  * 2. Primitive types deduplication (int, enum, fwd).
3271  * 3. Struct/union types deduplication.
3272  * 4. Resolve unambiguous forward declarations.
3273  * 5. Reference types deduplication (pointers, typedefs, arrays, funcs, func
3274  *    protos, and const/volatile/restrict modifiers).
3275  * 6. Types compaction.
3276  * 7. Types remapping.
3277  *
3278  * Algorithm determines canonical type descriptor, which is a single
3279  * representative type for each truly unique type. This canonical type is the
3280  * one that will go into final deduplicated BTF type information. For
3281  * struct/unions, it is also the type that algorithm will merge additional type
3282  * information into (while resolving FWDs), as it discovers it from data in
3283  * other CUs. Each input BTF type eventually gets either mapped to itself, if
3284  * that type is canonical, or to some other type, if that type is equivalent
3285  * and was chosen as canonical representative. This mapping is stored in
3286  * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
3287  * FWD type got resolved to.
3288  *
3289  * To facilitate fast discovery of canonical types, we also maintain canonical
3290  * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
3291  * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
3292  * that match that signature. With sufficiently good choice of type signature
3293  * hashing function, we can limit number of canonical types for each unique type
3294  * signature to a very small number, allowing to find canonical type for any
3295  * duplicated type very quickly.
3296  *
3297  * Struct/union deduplication is the most critical part and algorithm for
3298  * deduplicating structs/unions is described in greater details in comments for
3299  * `btf_dedup_is_equiv` function.
3300  */
3301 int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
3302 {
3303 	struct btf_dedup *d;
3304 	int err;
3305 
3306 	if (!OPTS_VALID(opts, btf_dedup_opts))
3307 		return libbpf_err(-EINVAL);
3308 
3309 	d = btf_dedup_new(btf, opts);
3310 	if (IS_ERR(d)) {
3311 		pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
3312 		return libbpf_err(-EINVAL);
3313 	}
3314 
3315 	if (btf_ensure_modifiable(btf)) {
3316 		err = -ENOMEM;
3317 		goto done;
3318 	}
3319 
3320 	err = btf_dedup_prep(d);
3321 	if (err) {
3322 		pr_debug("btf_dedup_prep failed:%d\n", err);
3323 		goto done;
3324 	}
3325 	err = btf_dedup_strings(d);
3326 	if (err < 0) {
3327 		pr_debug("btf_dedup_strings failed:%d\n", err);
3328 		goto done;
3329 	}
3330 	err = btf_dedup_prim_types(d);
3331 	if (err < 0) {
3332 		pr_debug("btf_dedup_prim_types failed:%d\n", err);
3333 		goto done;
3334 	}
3335 	err = btf_dedup_struct_types(d);
3336 	if (err < 0) {
3337 		pr_debug("btf_dedup_struct_types failed:%d\n", err);
3338 		goto done;
3339 	}
3340 	err = btf_dedup_resolve_fwds(d);
3341 	if (err < 0) {
3342 		pr_debug("btf_dedup_resolve_fwds failed:%d\n", err);
3343 		goto done;
3344 	}
3345 	err = btf_dedup_ref_types(d);
3346 	if (err < 0) {
3347 		pr_debug("btf_dedup_ref_types failed:%d\n", err);
3348 		goto done;
3349 	}
3350 	err = btf_dedup_compact_types(d);
3351 	if (err < 0) {
3352 		pr_debug("btf_dedup_compact_types failed:%d\n", err);
3353 		goto done;
3354 	}
3355 	err = btf_dedup_remap_types(d);
3356 	if (err < 0) {
3357 		pr_debug("btf_dedup_remap_types failed:%d\n", err);
3358 		goto done;
3359 	}
3360 
3361 done:
3362 	btf_dedup_free(d);
3363 	return libbpf_err(err);
3364 }
3365 
3366 #define BTF_UNPROCESSED_ID ((__u32)-1)
3367 #define BTF_IN_PROGRESS_ID ((__u32)-2)
3368 
3369 struct btf_dedup {
3370 	/* .BTF section to be deduped in-place */
3371 	struct btf *btf;
3372 	/*
3373 	 * Optional .BTF.ext section. When provided, any strings referenced
3374 	 * from it will be taken into account when deduping strings
3375 	 */
3376 	struct btf_ext *btf_ext;
3377 	/*
3378 	 * This is a map from any type's signature hash to a list of possible
3379 	 * canonical representative type candidates. Hash collisions are
3380 	 * ignored, so even types of various kinds can share same list of
3381 	 * candidates, which is fine because we rely on subsequent
3382 	 * btf_xxx_equal() checks to authoritatively verify type equality.
3383 	 */
3384 	struct hashmap *dedup_table;
3385 	/* Canonical types map */
3386 	__u32 *map;
3387 	/* Hypothetical mapping, used during type graph equivalence checks */
3388 	__u32 *hypot_map;
3389 	__u32 *hypot_list;
3390 	size_t hypot_cnt;
3391 	size_t hypot_cap;
3392 	/* Whether hypothetical mapping, if successful, would need to adjust
3393 	 * already canonicalized types (due to a new forward declaration to
3394 	 * concrete type resolution). In such case, during split BTF dedup
3395 	 * candidate type would still be considered as different, because base
3396 	 * BTF is considered to be immutable.
3397 	 */
3398 	bool hypot_adjust_canon;
3399 	/* Various option modifying behavior of algorithm */
3400 	struct btf_dedup_opts opts;
3401 	/* temporary strings deduplication state */
3402 	struct strset *strs_set;
3403 };
3404 
3405 static long hash_combine(long h, long value)
3406 {
3407 	return h * 31 + value;
3408 }
3409 
3410 #define for_each_dedup_cand(d, node, hash) \
3411 	hashmap__for_each_key_entry(d->dedup_table, node, hash)
3412 
3413 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
3414 {
3415 	return hashmap__append(d->dedup_table, hash, type_id);
3416 }
3417 
3418 static int btf_dedup_hypot_map_add(struct btf_dedup *d,
3419 				   __u32 from_id, __u32 to_id)
3420 {
3421 	if (d->hypot_cnt == d->hypot_cap) {
3422 		__u32 *new_list;
3423 
3424 		d->hypot_cap += max((size_t)16, d->hypot_cap / 2);
3425 		new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32));
3426 		if (!new_list)
3427 			return -ENOMEM;
3428 		d->hypot_list = new_list;
3429 	}
3430 	d->hypot_list[d->hypot_cnt++] = from_id;
3431 	d->hypot_map[from_id] = to_id;
3432 	return 0;
3433 }
3434 
3435 static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
3436 {
3437 	int i;
3438 
3439 	for (i = 0; i < d->hypot_cnt; i++)
3440 		d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
3441 	d->hypot_cnt = 0;
3442 	d->hypot_adjust_canon = false;
3443 }
3444 
3445 static void btf_dedup_free(struct btf_dedup *d)
3446 {
3447 	hashmap__free(d->dedup_table);
3448 	d->dedup_table = NULL;
3449 
3450 	free(d->map);
3451 	d->map = NULL;
3452 
3453 	free(d->hypot_map);
3454 	d->hypot_map = NULL;
3455 
3456 	free(d->hypot_list);
3457 	d->hypot_list = NULL;
3458 
3459 	free(d);
3460 }
3461 
3462 static size_t btf_dedup_identity_hash_fn(long key, void *ctx)
3463 {
3464 	return key;
3465 }
3466 
3467 static size_t btf_dedup_collision_hash_fn(long key, void *ctx)
3468 {
3469 	return 0;
3470 }
3471 
3472 static bool btf_dedup_equal_fn(long k1, long k2, void *ctx)
3473 {
3474 	return k1 == k2;
3475 }
3476 
3477 static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts)
3478 {
3479 	struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
3480 	hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
3481 	int i, err = 0, type_cnt;
3482 
3483 	if (!d)
3484 		return ERR_PTR(-ENOMEM);
3485 
3486 	if (OPTS_GET(opts, force_collisions, false))
3487 		hash_fn = btf_dedup_collision_hash_fn;
3488 
3489 	d->btf = btf;
3490 	d->btf_ext = OPTS_GET(opts, btf_ext, NULL);
3491 
3492 	d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
3493 	if (IS_ERR(d->dedup_table)) {
3494 		err = PTR_ERR(d->dedup_table);
3495 		d->dedup_table = NULL;
3496 		goto done;
3497 	}
3498 
3499 	type_cnt = btf__type_cnt(btf);
3500 	d->map = malloc(sizeof(__u32) * type_cnt);
3501 	if (!d->map) {
3502 		err = -ENOMEM;
3503 		goto done;
3504 	}
3505 	/* special BTF "void" type is made canonical immediately */
3506 	d->map[0] = 0;
3507 	for (i = 1; i < type_cnt; i++) {
3508 		struct btf_type *t = btf_type_by_id(d->btf, i);
3509 
3510 		/* VAR and DATASEC are never deduped and are self-canonical */
3511 		if (btf_is_var(t) || btf_is_datasec(t))
3512 			d->map[i] = i;
3513 		else
3514 			d->map[i] = BTF_UNPROCESSED_ID;
3515 	}
3516 
3517 	d->hypot_map = malloc(sizeof(__u32) * type_cnt);
3518 	if (!d->hypot_map) {
3519 		err = -ENOMEM;
3520 		goto done;
3521 	}
3522 	for (i = 0; i < type_cnt; i++)
3523 		d->hypot_map[i] = BTF_UNPROCESSED_ID;
3524 
3525 done:
3526 	if (err) {
3527 		btf_dedup_free(d);
3528 		return ERR_PTR(err);
3529 	}
3530 
3531 	return d;
3532 }
3533 
3534 /*
3535  * Iterate over all possible places in .BTF and .BTF.ext that can reference
3536  * string and pass pointer to it to a provided callback `fn`.
3537  */
3538 static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *ctx)
3539 {
3540 	int i, r;
3541 
3542 	for (i = 0; i < d->btf->nr_types; i++) {
3543 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
3544 
3545 		r = btf_type_visit_str_offs(t, fn, ctx);
3546 		if (r)
3547 			return r;
3548 	}
3549 
3550 	if (!d->btf_ext)
3551 		return 0;
3552 
3553 	r = btf_ext_visit_str_offs(d->btf_ext, fn, ctx);
3554 	if (r)
3555 		return r;
3556 
3557 	return 0;
3558 }
3559 
3560 static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx)
3561 {
3562 	struct btf_dedup *d = ctx;
3563 	__u32 str_off = *str_off_ptr;
3564 	const char *s;
3565 	int off, err;
3566 
3567 	/* don't touch empty string or string in main BTF */
3568 	if (str_off == 0 || str_off < d->btf->start_str_off)
3569 		return 0;
3570 
3571 	s = btf__str_by_offset(d->btf, str_off);
3572 	if (d->btf->base_btf) {
3573 		err = btf__find_str(d->btf->base_btf, s);
3574 		if (err >= 0) {
3575 			*str_off_ptr = err;
3576 			return 0;
3577 		}
3578 		if (err != -ENOENT)
3579 			return err;
3580 	}
3581 
3582 	off = strset__add_str(d->strs_set, s);
3583 	if (off < 0)
3584 		return off;
3585 
3586 	*str_off_ptr = d->btf->start_str_off + off;
3587 	return 0;
3588 }
3589 
3590 /*
3591  * Dedup string and filter out those that are not referenced from either .BTF
3592  * or .BTF.ext (if provided) sections.
3593  *
3594  * This is done by building index of all strings in BTF's string section,
3595  * then iterating over all entities that can reference strings (e.g., type
3596  * names, struct field names, .BTF.ext line info, etc) and marking corresponding
3597  * strings as used. After that all used strings are deduped and compacted into
3598  * sequential blob of memory and new offsets are calculated. Then all the string
3599  * references are iterated again and rewritten using new offsets.
3600  */
3601 static int btf_dedup_strings(struct btf_dedup *d)
3602 {
3603 	int err;
3604 
3605 	if (d->btf->strs_deduped)
3606 		return 0;
3607 
3608 	d->strs_set = strset__new(BTF_MAX_STR_OFFSET, NULL, 0);
3609 	if (IS_ERR(d->strs_set)) {
3610 		err = PTR_ERR(d->strs_set);
3611 		goto err_out;
3612 	}
3613 
3614 	if (!d->btf->base_btf) {
3615 		/* insert empty string; we won't be looking it up during strings
3616 		 * dedup, but it's good to have it for generic BTF string lookups
3617 		 */
3618 		err = strset__add_str(d->strs_set, "");
3619 		if (err < 0)
3620 			goto err_out;
3621 	}
3622 
3623 	/* remap string offsets */
3624 	err = btf_for_each_str_off(d, strs_dedup_remap_str_off, d);
3625 	if (err)
3626 		goto err_out;
3627 
3628 	/* replace BTF string data and hash with deduped ones */
3629 	strset__free(d->btf->strs_set);
3630 	d->btf->hdr->str_len = strset__data_size(d->strs_set);
3631 	d->btf->strs_set = d->strs_set;
3632 	d->strs_set = NULL;
3633 	d->btf->strs_deduped = true;
3634 	return 0;
3635 
3636 err_out:
3637 	strset__free(d->strs_set);
3638 	d->strs_set = NULL;
3639 
3640 	return err;
3641 }
3642 
3643 static long btf_hash_common(struct btf_type *t)
3644 {
3645 	long h;
3646 
3647 	h = hash_combine(0, t->name_off);
3648 	h = hash_combine(h, t->info);
3649 	h = hash_combine(h, t->size);
3650 	return h;
3651 }
3652 
3653 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
3654 {
3655 	return t1->name_off == t2->name_off &&
3656 	       t1->info == t2->info &&
3657 	       t1->size == t2->size;
3658 }
3659 
3660 /* Calculate type signature hash of INT or TAG. */
3661 static long btf_hash_int_decl_tag(struct btf_type *t)
3662 {
3663 	__u32 info = *(__u32 *)(t + 1);
3664 	long h;
3665 
3666 	h = btf_hash_common(t);
3667 	h = hash_combine(h, info);
3668 	return h;
3669 }
3670 
3671 /* Check structural equality of two INTs or TAGs. */
3672 static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2)
3673 {
3674 	__u32 info1, info2;
3675 
3676 	if (!btf_equal_common(t1, t2))
3677 		return false;
3678 	info1 = *(__u32 *)(t1 + 1);
3679 	info2 = *(__u32 *)(t2 + 1);
3680 	return info1 == info2;
3681 }
3682 
3683 /* Calculate type signature hash of ENUM/ENUM64. */
3684 static long btf_hash_enum(struct btf_type *t)
3685 {
3686 	long h;
3687 
3688 	/* don't hash vlen, enum members and size to support enum fwd resolving */
3689 	h = hash_combine(0, t->name_off);
3690 	return h;
3691 }
3692 
3693 static bool btf_equal_enum_members(struct btf_type *t1, struct btf_type *t2)
3694 {
3695 	const struct btf_enum *m1, *m2;
3696 	__u16 vlen;
3697 	int i;
3698 
3699 	vlen = btf_vlen(t1);
3700 	m1 = btf_enum(t1);
3701 	m2 = btf_enum(t2);
3702 	for (i = 0; i < vlen; i++) {
3703 		if (m1->name_off != m2->name_off || m1->val != m2->val)
3704 			return false;
3705 		m1++;
3706 		m2++;
3707 	}
3708 	return true;
3709 }
3710 
3711 static bool btf_equal_enum64_members(struct btf_type *t1, struct btf_type *t2)
3712 {
3713 	const struct btf_enum64 *m1, *m2;
3714 	__u16 vlen;
3715 	int i;
3716 
3717 	vlen = btf_vlen(t1);
3718 	m1 = btf_enum64(t1);
3719 	m2 = btf_enum64(t2);
3720 	for (i = 0; i < vlen; i++) {
3721 		if (m1->name_off != m2->name_off || m1->val_lo32 != m2->val_lo32 ||
3722 		    m1->val_hi32 != m2->val_hi32)
3723 			return false;
3724 		m1++;
3725 		m2++;
3726 	}
3727 	return true;
3728 }
3729 
3730 /* Check structural equality of two ENUMs or ENUM64s. */
3731 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
3732 {
3733 	if (!btf_equal_common(t1, t2))
3734 		return false;
3735 
3736 	/* t1 & t2 kinds are identical because of btf_equal_common */
3737 	if (btf_kind(t1) == BTF_KIND_ENUM)
3738 		return btf_equal_enum_members(t1, t2);
3739 	else
3740 		return btf_equal_enum64_members(t1, t2);
3741 }
3742 
3743 static inline bool btf_is_enum_fwd(struct btf_type *t)
3744 {
3745 	return btf_is_any_enum(t) && btf_vlen(t) == 0;
3746 }
3747 
3748 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
3749 {
3750 	if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
3751 		return btf_equal_enum(t1, t2);
3752 	/* At this point either t1 or t2 or both are forward declarations, thus:
3753 	 * - skip comparing vlen because it is zero for forward declarations;
3754 	 * - skip comparing size to allow enum forward declarations
3755 	 *   to be compatible with enum64 full declarations;
3756 	 * - skip comparing kind for the same reason.
3757 	 */
3758 	return t1->name_off == t2->name_off &&
3759 	       btf_is_any_enum(t1) && btf_is_any_enum(t2);
3760 }
3761 
3762 /*
3763  * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
3764  * as referenced type IDs equivalence is established separately during type
3765  * graph equivalence check algorithm.
3766  */
3767 static long btf_hash_struct(struct btf_type *t)
3768 {
3769 	const struct btf_member *member = btf_members(t);
3770 	__u32 vlen = btf_vlen(t);
3771 	long h = btf_hash_common(t);
3772 	int i;
3773 
3774 	for (i = 0; i < vlen; i++) {
3775 		h = hash_combine(h, member->name_off);
3776 		h = hash_combine(h, member->offset);
3777 		/* no hashing of referenced type ID, it can be unresolved yet */
3778 		member++;
3779 	}
3780 	return h;
3781 }
3782 
3783 /*
3784  * Check structural compatibility of two STRUCTs/UNIONs, ignoring referenced
3785  * type IDs. This check is performed during type graph equivalence check and
3786  * referenced types equivalence is checked separately.
3787  */
3788 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
3789 {
3790 	const struct btf_member *m1, *m2;
3791 	__u16 vlen;
3792 	int i;
3793 
3794 	if (!btf_equal_common(t1, t2))
3795 		return false;
3796 
3797 	vlen = btf_vlen(t1);
3798 	m1 = btf_members(t1);
3799 	m2 = btf_members(t2);
3800 	for (i = 0; i < vlen; i++) {
3801 		if (m1->name_off != m2->name_off || m1->offset != m2->offset)
3802 			return false;
3803 		m1++;
3804 		m2++;
3805 	}
3806 	return true;
3807 }
3808 
3809 /*
3810  * Calculate type signature hash of ARRAY, including referenced type IDs,
3811  * under assumption that they were already resolved to canonical type IDs and
3812  * are not going to change.
3813  */
3814 static long btf_hash_array(struct btf_type *t)
3815 {
3816 	const struct btf_array *info = btf_array(t);
3817 	long h = btf_hash_common(t);
3818 
3819 	h = hash_combine(h, info->type);
3820 	h = hash_combine(h, info->index_type);
3821 	h = hash_combine(h, info->nelems);
3822 	return h;
3823 }
3824 
3825 /*
3826  * Check exact equality of two ARRAYs, taking into account referenced
3827  * type IDs, under assumption that they were already resolved to canonical
3828  * type IDs and are not going to change.
3829  * This function is called during reference types deduplication to compare
3830  * ARRAY to potential canonical representative.
3831  */
3832 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
3833 {
3834 	const struct btf_array *info1, *info2;
3835 
3836 	if (!btf_equal_common(t1, t2))
3837 		return false;
3838 
3839 	info1 = btf_array(t1);
3840 	info2 = btf_array(t2);
3841 	return info1->type == info2->type &&
3842 	       info1->index_type == info2->index_type &&
3843 	       info1->nelems == info2->nelems;
3844 }
3845 
3846 /*
3847  * Check structural compatibility of two ARRAYs, ignoring referenced type
3848  * IDs. This check is performed during type graph equivalence check and
3849  * referenced types equivalence is checked separately.
3850  */
3851 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
3852 {
3853 	if (!btf_equal_common(t1, t2))
3854 		return false;
3855 
3856 	return btf_array(t1)->nelems == btf_array(t2)->nelems;
3857 }
3858 
3859 /*
3860  * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
3861  * under assumption that they were already resolved to canonical type IDs and
3862  * are not going to change.
3863  */
3864 static long btf_hash_fnproto(struct btf_type *t)
3865 {
3866 	const struct btf_param *member = btf_params(t);
3867 	__u16 vlen = btf_vlen(t);
3868 	long h = btf_hash_common(t);
3869 	int i;
3870 
3871 	for (i = 0; i < vlen; i++) {
3872 		h = hash_combine(h, member->name_off);
3873 		h = hash_combine(h, member->type);
3874 		member++;
3875 	}
3876 	return h;
3877 }
3878 
3879 /*
3880  * Check exact equality of two FUNC_PROTOs, taking into account referenced
3881  * type IDs, under assumption that they were already resolved to canonical
3882  * type IDs and are not going to change.
3883  * This function is called during reference types deduplication to compare
3884  * FUNC_PROTO to potential canonical representative.
3885  */
3886 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
3887 {
3888 	const struct btf_param *m1, *m2;
3889 	__u16 vlen;
3890 	int i;
3891 
3892 	if (!btf_equal_common(t1, t2))
3893 		return false;
3894 
3895 	vlen = btf_vlen(t1);
3896 	m1 = btf_params(t1);
3897 	m2 = btf_params(t2);
3898 	for (i = 0; i < vlen; i++) {
3899 		if (m1->name_off != m2->name_off || m1->type != m2->type)
3900 			return false;
3901 		m1++;
3902 		m2++;
3903 	}
3904 	return true;
3905 }
3906 
3907 /*
3908  * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
3909  * IDs. This check is performed during type graph equivalence check and
3910  * referenced types equivalence is checked separately.
3911  */
3912 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
3913 {
3914 	const struct btf_param *m1, *m2;
3915 	__u16 vlen;
3916 	int i;
3917 
3918 	/* skip return type ID */
3919 	if (t1->name_off != t2->name_off || t1->info != t2->info)
3920 		return false;
3921 
3922 	vlen = btf_vlen(t1);
3923 	m1 = btf_params(t1);
3924 	m2 = btf_params(t2);
3925 	for (i = 0; i < vlen; i++) {
3926 		if (m1->name_off != m2->name_off)
3927 			return false;
3928 		m1++;
3929 		m2++;
3930 	}
3931 	return true;
3932 }
3933 
3934 /* Prepare split BTF for deduplication by calculating hashes of base BTF's
3935  * types and initializing the rest of the state (canonical type mapping) for
3936  * the fixed base BTF part.
3937  */
3938 static int btf_dedup_prep(struct btf_dedup *d)
3939 {
3940 	struct btf_type *t;
3941 	int type_id;
3942 	long h;
3943 
3944 	if (!d->btf->base_btf)
3945 		return 0;
3946 
3947 	for (type_id = 1; type_id < d->btf->start_id; type_id++) {
3948 		t = btf_type_by_id(d->btf, type_id);
3949 
3950 		/* all base BTF types are self-canonical by definition */
3951 		d->map[type_id] = type_id;
3952 
3953 		switch (btf_kind(t)) {
3954 		case BTF_KIND_VAR:
3955 		case BTF_KIND_DATASEC:
3956 			/* VAR and DATASEC are never hash/deduplicated */
3957 			continue;
3958 		case BTF_KIND_CONST:
3959 		case BTF_KIND_VOLATILE:
3960 		case BTF_KIND_RESTRICT:
3961 		case BTF_KIND_PTR:
3962 		case BTF_KIND_FWD:
3963 		case BTF_KIND_TYPEDEF:
3964 		case BTF_KIND_FUNC:
3965 		case BTF_KIND_FLOAT:
3966 		case BTF_KIND_TYPE_TAG:
3967 			h = btf_hash_common(t);
3968 			break;
3969 		case BTF_KIND_INT:
3970 		case BTF_KIND_DECL_TAG:
3971 			h = btf_hash_int_decl_tag(t);
3972 			break;
3973 		case BTF_KIND_ENUM:
3974 		case BTF_KIND_ENUM64:
3975 			h = btf_hash_enum(t);
3976 			break;
3977 		case BTF_KIND_STRUCT:
3978 		case BTF_KIND_UNION:
3979 			h = btf_hash_struct(t);
3980 			break;
3981 		case BTF_KIND_ARRAY:
3982 			h = btf_hash_array(t);
3983 			break;
3984 		case BTF_KIND_FUNC_PROTO:
3985 			h = btf_hash_fnproto(t);
3986 			break;
3987 		default:
3988 			pr_debug("unknown kind %d for type [%d]\n", btf_kind(t), type_id);
3989 			return -EINVAL;
3990 		}
3991 		if (btf_dedup_table_add(d, h, type_id))
3992 			return -ENOMEM;
3993 	}
3994 
3995 	return 0;
3996 }
3997 
3998 /*
3999  * Deduplicate primitive types, that can't reference other types, by calculating
4000  * their type signature hash and comparing them with any possible canonical
4001  * candidate. If no canonical candidate matches, type itself is marked as
4002  * canonical and is added into `btf_dedup->dedup_table` as another candidate.
4003  */
4004 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
4005 {
4006 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
4007 	struct hashmap_entry *hash_entry;
4008 	struct btf_type *cand;
4009 	/* if we don't find equivalent type, then we are canonical */
4010 	__u32 new_id = type_id;
4011 	__u32 cand_id;
4012 	long h;
4013 
4014 	switch (btf_kind(t)) {
4015 	case BTF_KIND_CONST:
4016 	case BTF_KIND_VOLATILE:
4017 	case BTF_KIND_RESTRICT:
4018 	case BTF_KIND_PTR:
4019 	case BTF_KIND_TYPEDEF:
4020 	case BTF_KIND_ARRAY:
4021 	case BTF_KIND_STRUCT:
4022 	case BTF_KIND_UNION:
4023 	case BTF_KIND_FUNC:
4024 	case BTF_KIND_FUNC_PROTO:
4025 	case BTF_KIND_VAR:
4026 	case BTF_KIND_DATASEC:
4027 	case BTF_KIND_DECL_TAG:
4028 	case BTF_KIND_TYPE_TAG:
4029 		return 0;
4030 
4031 	case BTF_KIND_INT:
4032 		h = btf_hash_int_decl_tag(t);
4033 		for_each_dedup_cand(d, hash_entry, h) {
4034 			cand_id = hash_entry->value;
4035 			cand = btf_type_by_id(d->btf, cand_id);
4036 			if (btf_equal_int_tag(t, cand)) {
4037 				new_id = cand_id;
4038 				break;
4039 			}
4040 		}
4041 		break;
4042 
4043 	case BTF_KIND_ENUM:
4044 	case BTF_KIND_ENUM64:
4045 		h = btf_hash_enum(t);
4046 		for_each_dedup_cand(d, hash_entry, h) {
4047 			cand_id = hash_entry->value;
4048 			cand = btf_type_by_id(d->btf, cand_id);
4049 			if (btf_equal_enum(t, cand)) {
4050 				new_id = cand_id;
4051 				break;
4052 			}
4053 			if (btf_compat_enum(t, cand)) {
4054 				if (btf_is_enum_fwd(t)) {
4055 					/* resolve fwd to full enum */
4056 					new_id = cand_id;
4057 					break;
4058 				}
4059 				/* resolve canonical enum fwd to full enum */
4060 				d->map[cand_id] = type_id;
4061 			}
4062 		}
4063 		break;
4064 
4065 	case BTF_KIND_FWD:
4066 	case BTF_KIND_FLOAT:
4067 		h = btf_hash_common(t);
4068 		for_each_dedup_cand(d, hash_entry, h) {
4069 			cand_id = hash_entry->value;
4070 			cand = btf_type_by_id(d->btf, cand_id);
4071 			if (btf_equal_common(t, cand)) {
4072 				new_id = cand_id;
4073 				break;
4074 			}
4075 		}
4076 		break;
4077 
4078 	default:
4079 		return -EINVAL;
4080 	}
4081 
4082 	d->map[type_id] = new_id;
4083 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4084 		return -ENOMEM;
4085 
4086 	return 0;
4087 }
4088 
4089 static int btf_dedup_prim_types(struct btf_dedup *d)
4090 {
4091 	int i, err;
4092 
4093 	for (i = 0; i < d->btf->nr_types; i++) {
4094 		err = btf_dedup_prim_type(d, d->btf->start_id + i);
4095 		if (err)
4096 			return err;
4097 	}
4098 	return 0;
4099 }
4100 
4101 /*
4102  * Check whether type is already mapped into canonical one (could be to itself).
4103  */
4104 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
4105 {
4106 	return d->map[type_id] <= BTF_MAX_NR_TYPES;
4107 }
4108 
4109 /*
4110  * Resolve type ID into its canonical type ID, if any; otherwise return original
4111  * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
4112  * STRUCT/UNION link and resolve it into canonical type ID as well.
4113  */
4114 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
4115 {
4116 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4117 		type_id = d->map[type_id];
4118 	return type_id;
4119 }
4120 
4121 /*
4122  * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
4123  * type ID.
4124  */
4125 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
4126 {
4127 	__u32 orig_type_id = type_id;
4128 
4129 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4130 		return type_id;
4131 
4132 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
4133 		type_id = d->map[type_id];
4134 
4135 	if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
4136 		return type_id;
4137 
4138 	return orig_type_id;
4139 }
4140 
4141 
4142 static inline __u16 btf_fwd_kind(struct btf_type *t)
4143 {
4144 	return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
4145 }
4146 
4147 /* Check if given two types are identical ARRAY definitions */
4148 static bool btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2)
4149 {
4150 	struct btf_type *t1, *t2;
4151 
4152 	t1 = btf_type_by_id(d->btf, id1);
4153 	t2 = btf_type_by_id(d->btf, id2);
4154 	if (!btf_is_array(t1) || !btf_is_array(t2))
4155 		return false;
4156 
4157 	return btf_equal_array(t1, t2);
4158 }
4159 
4160 /* Check if given two types are identical STRUCT/UNION definitions */
4161 static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id2)
4162 {
4163 	const struct btf_member *m1, *m2;
4164 	struct btf_type *t1, *t2;
4165 	int n, i;
4166 
4167 	t1 = btf_type_by_id(d->btf, id1);
4168 	t2 = btf_type_by_id(d->btf, id2);
4169 
4170 	if (!btf_is_composite(t1) || btf_kind(t1) != btf_kind(t2))
4171 		return false;
4172 
4173 	if (!btf_shallow_equal_struct(t1, t2))
4174 		return false;
4175 
4176 	m1 = btf_members(t1);
4177 	m2 = btf_members(t2);
4178 	for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) {
4179 		if (m1->type != m2->type &&
4180 		    !btf_dedup_identical_arrays(d, m1->type, m2->type) &&
4181 		    !btf_dedup_identical_structs(d, m1->type, m2->type))
4182 			return false;
4183 	}
4184 	return true;
4185 }
4186 
4187 /*
4188  * Check equivalence of BTF type graph formed by candidate struct/union (we'll
4189  * call it "candidate graph" in this description for brevity) to a type graph
4190  * formed by (potential) canonical struct/union ("canonical graph" for brevity
4191  * here, though keep in mind that not all types in canonical graph are
4192  * necessarily canonical representatives themselves, some of them might be
4193  * duplicates or its uniqueness might not have been established yet).
4194  * Returns:
4195  *  - >0, if type graphs are equivalent;
4196  *  -  0, if not equivalent;
4197  *  - <0, on error.
4198  *
4199  * Algorithm performs side-by-side DFS traversal of both type graphs and checks
4200  * equivalence of BTF types at each step. If at any point BTF types in candidate
4201  * and canonical graphs are not compatible structurally, whole graphs are
4202  * incompatible. If types are structurally equivalent (i.e., all information
4203  * except referenced type IDs is exactly the same), a mapping from `canon_id` to
4204  * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
4205  * If a type references other types, then those referenced types are checked
4206  * for equivalence recursively.
4207  *
4208  * During DFS traversal, if we find that for current `canon_id` type we
4209  * already have some mapping in hypothetical map, we check for two possible
4210  * situations:
4211  *   - `canon_id` is mapped to exactly the same type as `cand_id`. This will
4212  *     happen when type graphs have cycles. In this case we assume those two
4213  *     types are equivalent.
4214  *   - `canon_id` is mapped to different type. This is contradiction in our
4215  *     hypothetical mapping, because same graph in canonical graph corresponds
4216  *     to two different types in candidate graph, which for equivalent type
4217  *     graphs shouldn't happen. This condition terminates equivalence check
4218  *     with negative result.
4219  *
4220  * If type graphs traversal exhausts types to check and find no contradiction,
4221  * then type graphs are equivalent.
4222  *
4223  * When checking types for equivalence, there is one special case: FWD types.
4224  * If FWD type resolution is allowed and one of the types (either from canonical
4225  * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
4226  * flag) and their names match, hypothetical mapping is updated to point from
4227  * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
4228  * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
4229  *
4230  * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
4231  * if there are two exactly named (or anonymous) structs/unions that are
4232  * compatible structurally, one of which has FWD field, while other is concrete
4233  * STRUCT/UNION, but according to C sources they are different structs/unions
4234  * that are referencing different types with the same name. This is extremely
4235  * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
4236  * this logic is causing problems.
4237  *
4238  * Doing FWD resolution means that both candidate and/or canonical graphs can
4239  * consists of portions of the graph that come from multiple compilation units.
4240  * This is due to the fact that types within single compilation unit are always
4241  * deduplicated and FWDs are already resolved, if referenced struct/union
4242  * definiton is available. So, if we had unresolved FWD and found corresponding
4243  * STRUCT/UNION, they will be from different compilation units. This
4244  * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
4245  * type graph will likely have at least two different BTF types that describe
4246  * same type (e.g., most probably there will be two different BTF types for the
4247  * same 'int' primitive type) and could even have "overlapping" parts of type
4248  * graph that describe same subset of types.
4249  *
4250  * This in turn means that our assumption that each type in canonical graph
4251  * must correspond to exactly one type in candidate graph might not hold
4252  * anymore and will make it harder to detect contradictions using hypothetical
4253  * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
4254  * resolution only in canonical graph. FWDs in candidate graphs are never
4255  * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
4256  * that can occur:
4257  *   - Both types in canonical and candidate graphs are FWDs. If they are
4258  *     structurally equivalent, then they can either be both resolved to the
4259  *     same STRUCT/UNION or not resolved at all. In both cases they are
4260  *     equivalent and there is no need to resolve FWD on candidate side.
4261  *   - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
4262  *     so nothing to resolve as well, algorithm will check equivalence anyway.
4263  *   - Type in canonical graph is FWD, while type in candidate is concrete
4264  *     STRUCT/UNION. In this case candidate graph comes from single compilation
4265  *     unit, so there is exactly one BTF type for each unique C type. After
4266  *     resolving FWD into STRUCT/UNION, there might be more than one BTF type
4267  *     in canonical graph mapping to single BTF type in candidate graph, but
4268  *     because hypothetical mapping maps from canonical to candidate types, it's
4269  *     alright, and we still maintain the property of having single `canon_id`
4270  *     mapping to single `cand_id` (there could be two different `canon_id`
4271  *     mapped to the same `cand_id`, but it's not contradictory).
4272  *   - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
4273  *     graph is FWD. In this case we are just going to check compatibility of
4274  *     STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
4275  *     assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
4276  *     a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
4277  *     turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
4278  *     canonical graph.
4279  */
4280 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
4281 			      __u32 canon_id)
4282 {
4283 	struct btf_type *cand_type;
4284 	struct btf_type *canon_type;
4285 	__u32 hypot_type_id;
4286 	__u16 cand_kind;
4287 	__u16 canon_kind;
4288 	int i, eq;
4289 
4290 	/* if both resolve to the same canonical, they must be equivalent */
4291 	if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
4292 		return 1;
4293 
4294 	canon_id = resolve_fwd_id(d, canon_id);
4295 
4296 	hypot_type_id = d->hypot_map[canon_id];
4297 	if (hypot_type_id <= BTF_MAX_NR_TYPES) {
4298 		if (hypot_type_id == cand_id)
4299 			return 1;
4300 		/* In some cases compiler will generate different DWARF types
4301 		 * for *identical* array type definitions and use them for
4302 		 * different fields within the *same* struct. This breaks type
4303 		 * equivalence check, which makes an assumption that candidate
4304 		 * types sub-graph has a consistent and deduped-by-compiler
4305 		 * types within a single CU. So work around that by explicitly
4306 		 * allowing identical array types here.
4307 		 */
4308 		if (btf_dedup_identical_arrays(d, hypot_type_id, cand_id))
4309 			return 1;
4310 		/* It turns out that similar situation can happen with
4311 		 * struct/union sometimes, sigh... Handle the case where
4312 		 * structs/unions are exactly the same, down to the referenced
4313 		 * type IDs. Anything more complicated (e.g., if referenced
4314 		 * types are different, but equivalent) is *way more*
4315 		 * complicated and requires a many-to-many equivalence mapping.
4316 		 */
4317 		if (btf_dedup_identical_structs(d, hypot_type_id, cand_id))
4318 			return 1;
4319 		return 0;
4320 	}
4321 
4322 	if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
4323 		return -ENOMEM;
4324 
4325 	cand_type = btf_type_by_id(d->btf, cand_id);
4326 	canon_type = btf_type_by_id(d->btf, canon_id);
4327 	cand_kind = btf_kind(cand_type);
4328 	canon_kind = btf_kind(canon_type);
4329 
4330 	if (cand_type->name_off != canon_type->name_off)
4331 		return 0;
4332 
4333 	/* FWD <--> STRUCT/UNION equivalence check, if enabled */
4334 	if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
4335 	    && cand_kind != canon_kind) {
4336 		__u16 real_kind;
4337 		__u16 fwd_kind;
4338 
4339 		if (cand_kind == BTF_KIND_FWD) {
4340 			real_kind = canon_kind;
4341 			fwd_kind = btf_fwd_kind(cand_type);
4342 		} else {
4343 			real_kind = cand_kind;
4344 			fwd_kind = btf_fwd_kind(canon_type);
4345 			/* we'd need to resolve base FWD to STRUCT/UNION */
4346 			if (fwd_kind == real_kind && canon_id < d->btf->start_id)
4347 				d->hypot_adjust_canon = true;
4348 		}
4349 		return fwd_kind == real_kind;
4350 	}
4351 
4352 	if (cand_kind != canon_kind)
4353 		return 0;
4354 
4355 	switch (cand_kind) {
4356 	case BTF_KIND_INT:
4357 		return btf_equal_int_tag(cand_type, canon_type);
4358 
4359 	case BTF_KIND_ENUM:
4360 	case BTF_KIND_ENUM64:
4361 		return btf_compat_enum(cand_type, canon_type);
4362 
4363 	case BTF_KIND_FWD:
4364 	case BTF_KIND_FLOAT:
4365 		return btf_equal_common(cand_type, canon_type);
4366 
4367 	case BTF_KIND_CONST:
4368 	case BTF_KIND_VOLATILE:
4369 	case BTF_KIND_RESTRICT:
4370 	case BTF_KIND_PTR:
4371 	case BTF_KIND_TYPEDEF:
4372 	case BTF_KIND_FUNC:
4373 	case BTF_KIND_TYPE_TAG:
4374 		if (cand_type->info != canon_type->info)
4375 			return 0;
4376 		return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4377 
4378 	case BTF_KIND_ARRAY: {
4379 		const struct btf_array *cand_arr, *canon_arr;
4380 
4381 		if (!btf_compat_array(cand_type, canon_type))
4382 			return 0;
4383 		cand_arr = btf_array(cand_type);
4384 		canon_arr = btf_array(canon_type);
4385 		eq = btf_dedup_is_equiv(d, cand_arr->index_type, canon_arr->index_type);
4386 		if (eq <= 0)
4387 			return eq;
4388 		return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
4389 	}
4390 
4391 	case BTF_KIND_STRUCT:
4392 	case BTF_KIND_UNION: {
4393 		const struct btf_member *cand_m, *canon_m;
4394 		__u16 vlen;
4395 
4396 		if (!btf_shallow_equal_struct(cand_type, canon_type))
4397 			return 0;
4398 		vlen = btf_vlen(cand_type);
4399 		cand_m = btf_members(cand_type);
4400 		canon_m = btf_members(canon_type);
4401 		for (i = 0; i < vlen; i++) {
4402 			eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
4403 			if (eq <= 0)
4404 				return eq;
4405 			cand_m++;
4406 			canon_m++;
4407 		}
4408 
4409 		return 1;
4410 	}
4411 
4412 	case BTF_KIND_FUNC_PROTO: {
4413 		const struct btf_param *cand_p, *canon_p;
4414 		__u16 vlen;
4415 
4416 		if (!btf_compat_fnproto(cand_type, canon_type))
4417 			return 0;
4418 		eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
4419 		if (eq <= 0)
4420 			return eq;
4421 		vlen = btf_vlen(cand_type);
4422 		cand_p = btf_params(cand_type);
4423 		canon_p = btf_params(canon_type);
4424 		for (i = 0; i < vlen; i++) {
4425 			eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
4426 			if (eq <= 0)
4427 				return eq;
4428 			cand_p++;
4429 			canon_p++;
4430 		}
4431 		return 1;
4432 	}
4433 
4434 	default:
4435 		return -EINVAL;
4436 	}
4437 	return 0;
4438 }
4439 
4440 /*
4441  * Use hypothetical mapping, produced by successful type graph equivalence
4442  * check, to augment existing struct/union canonical mapping, where possible.
4443  *
4444  * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
4445  * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
4446  * it doesn't matter if FWD type was part of canonical graph or candidate one,
4447  * we are recording the mapping anyway. As opposed to carefulness required
4448  * for struct/union correspondence mapping (described below), for FWD resolution
4449  * it's not important, as by the time that FWD type (reference type) will be
4450  * deduplicated all structs/unions will be deduped already anyway.
4451  *
4452  * Recording STRUCT/UNION mapping is purely a performance optimization and is
4453  * not required for correctness. It needs to be done carefully to ensure that
4454  * struct/union from candidate's type graph is not mapped into corresponding
4455  * struct/union from canonical type graph that itself hasn't been resolved into
4456  * canonical representative. The only guarantee we have is that canonical
4457  * struct/union was determined as canonical and that won't change. But any
4458  * types referenced through that struct/union fields could have been not yet
4459  * resolved, so in case like that it's too early to establish any kind of
4460  * correspondence between structs/unions.
4461  *
4462  * No canonical correspondence is derived for primitive types (they are already
4463  * deduplicated completely already anyway) or reference types (they rely on
4464  * stability of struct/union canonical relationship for equivalence checks).
4465  */
4466 static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
4467 {
4468 	__u32 canon_type_id, targ_type_id;
4469 	__u16 t_kind, c_kind;
4470 	__u32 t_id, c_id;
4471 	int i;
4472 
4473 	for (i = 0; i < d->hypot_cnt; i++) {
4474 		canon_type_id = d->hypot_list[i];
4475 		targ_type_id = d->hypot_map[canon_type_id];
4476 		t_id = resolve_type_id(d, targ_type_id);
4477 		c_id = resolve_type_id(d, canon_type_id);
4478 		t_kind = btf_kind(btf__type_by_id(d->btf, t_id));
4479 		c_kind = btf_kind(btf__type_by_id(d->btf, c_id));
4480 		/*
4481 		 * Resolve FWD into STRUCT/UNION.
4482 		 * It's ok to resolve FWD into STRUCT/UNION that's not yet
4483 		 * mapped to canonical representative (as opposed to
4484 		 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
4485 		 * eventually that struct is going to be mapped and all resolved
4486 		 * FWDs will automatically resolve to correct canonical
4487 		 * representative. This will happen before ref type deduping,
4488 		 * which critically depends on stability of these mapping. This
4489 		 * stability is not a requirement for STRUCT/UNION equivalence
4490 		 * checks, though.
4491 		 */
4492 
4493 		/* if it's the split BTF case, we still need to point base FWD
4494 		 * to STRUCT/UNION in a split BTF, because FWDs from split BTF
4495 		 * will be resolved against base FWD. If we don't point base
4496 		 * canonical FWD to the resolved STRUCT/UNION, then all the
4497 		 * FWDs in split BTF won't be correctly resolved to a proper
4498 		 * STRUCT/UNION.
4499 		 */
4500 		if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
4501 			d->map[c_id] = t_id;
4502 
4503 		/* if graph equivalence determined that we'd need to adjust
4504 		 * base canonical types, then we need to only point base FWDs
4505 		 * to STRUCTs/UNIONs and do no more modifications. For all
4506 		 * other purposes the type graphs were not equivalent.
4507 		 */
4508 		if (d->hypot_adjust_canon)
4509 			continue;
4510 
4511 		if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
4512 			d->map[t_id] = c_id;
4513 
4514 		if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
4515 		    c_kind != BTF_KIND_FWD &&
4516 		    is_type_mapped(d, c_id) &&
4517 		    !is_type_mapped(d, t_id)) {
4518 			/*
4519 			 * as a perf optimization, we can map struct/union
4520 			 * that's part of type graph we just verified for
4521 			 * equivalence. We can do that for struct/union that has
4522 			 * canonical representative only, though.
4523 			 */
4524 			d->map[t_id] = c_id;
4525 		}
4526 	}
4527 }
4528 
4529 /*
4530  * Deduplicate struct/union types.
4531  *
4532  * For each struct/union type its type signature hash is calculated, taking
4533  * into account type's name, size, number, order and names of fields, but
4534  * ignoring type ID's referenced from fields, because they might not be deduped
4535  * completely until after reference types deduplication phase. This type hash
4536  * is used to iterate over all potential canonical types, sharing same hash.
4537  * For each canonical candidate we check whether type graphs that they form
4538  * (through referenced types in fields and so on) are equivalent using algorithm
4539  * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
4540  * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
4541  * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
4542  * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
4543  * potentially map other structs/unions to their canonical representatives,
4544  * if such relationship hasn't yet been established. This speeds up algorithm
4545  * by eliminating some of the duplicate work.
4546  *
4547  * If no matching canonical representative was found, struct/union is marked
4548  * as canonical for itself and is added into btf_dedup->dedup_table hash map
4549  * for further look ups.
4550  */
4551 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
4552 {
4553 	struct btf_type *cand_type, *t;
4554 	struct hashmap_entry *hash_entry;
4555 	/* if we don't find equivalent type, then we are canonical */
4556 	__u32 new_id = type_id;
4557 	__u16 kind;
4558 	long h;
4559 
4560 	/* already deduped or is in process of deduping (loop detected) */
4561 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4562 		return 0;
4563 
4564 	t = btf_type_by_id(d->btf, type_id);
4565 	kind = btf_kind(t);
4566 
4567 	if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4568 		return 0;
4569 
4570 	h = btf_hash_struct(t);
4571 	for_each_dedup_cand(d, hash_entry, h) {
4572 		__u32 cand_id = hash_entry->value;
4573 		int eq;
4574 
4575 		/*
4576 		 * Even though btf_dedup_is_equiv() checks for
4577 		 * btf_shallow_equal_struct() internally when checking two
4578 		 * structs (unions) for equivalence, we need to guard here
4579 		 * from picking matching FWD type as a dedup candidate.
4580 		 * This can happen due to hash collision. In such case just
4581 		 * relying on btf_dedup_is_equiv() would lead to potentially
4582 		 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
4583 		 * FWD and compatible STRUCT/UNION are considered equivalent.
4584 		 */
4585 		cand_type = btf_type_by_id(d->btf, cand_id);
4586 		if (!btf_shallow_equal_struct(t, cand_type))
4587 			continue;
4588 
4589 		btf_dedup_clear_hypot_map(d);
4590 		eq = btf_dedup_is_equiv(d, type_id, cand_id);
4591 		if (eq < 0)
4592 			return eq;
4593 		if (!eq)
4594 			continue;
4595 		btf_dedup_merge_hypot_map(d);
4596 		if (d->hypot_adjust_canon) /* not really equivalent */
4597 			continue;
4598 		new_id = cand_id;
4599 		break;
4600 	}
4601 
4602 	d->map[type_id] = new_id;
4603 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4604 		return -ENOMEM;
4605 
4606 	return 0;
4607 }
4608 
4609 static int btf_dedup_struct_types(struct btf_dedup *d)
4610 {
4611 	int i, err;
4612 
4613 	for (i = 0; i < d->btf->nr_types; i++) {
4614 		err = btf_dedup_struct_type(d, d->btf->start_id + i);
4615 		if (err)
4616 			return err;
4617 	}
4618 	return 0;
4619 }
4620 
4621 /*
4622  * Deduplicate reference type.
4623  *
4624  * Once all primitive and struct/union types got deduplicated, we can easily
4625  * deduplicate all other (reference) BTF types. This is done in two steps:
4626  *
4627  * 1. Resolve all referenced type IDs into their canonical type IDs. This
4628  * resolution can be done either immediately for primitive or struct/union types
4629  * (because they were deduped in previous two phases) or recursively for
4630  * reference types. Recursion will always terminate at either primitive or
4631  * struct/union type, at which point we can "unwind" chain of reference types
4632  * one by one. There is no danger of encountering cycles because in C type
4633  * system the only way to form type cycle is through struct/union, so any chain
4634  * of reference types, even those taking part in a type cycle, will inevitably
4635  * reach struct/union at some point.
4636  *
4637  * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
4638  * becomes "stable", in the sense that no further deduplication will cause
4639  * any changes to it. With that, it's now possible to calculate type's signature
4640  * hash (this time taking into account referenced type IDs) and loop over all
4641  * potential canonical representatives. If no match was found, current type
4642  * will become canonical representative of itself and will be added into
4643  * btf_dedup->dedup_table as another possible canonical representative.
4644  */
4645 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
4646 {
4647 	struct hashmap_entry *hash_entry;
4648 	__u32 new_id = type_id, cand_id;
4649 	struct btf_type *t, *cand;
4650 	/* if we don't find equivalent type, then we are representative type */
4651 	int ref_type_id;
4652 	long h;
4653 
4654 	if (d->map[type_id] == BTF_IN_PROGRESS_ID)
4655 		return -ELOOP;
4656 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
4657 		return resolve_type_id(d, type_id);
4658 
4659 	t = btf_type_by_id(d->btf, type_id);
4660 	d->map[type_id] = BTF_IN_PROGRESS_ID;
4661 
4662 	switch (btf_kind(t)) {
4663 	case BTF_KIND_CONST:
4664 	case BTF_KIND_VOLATILE:
4665 	case BTF_KIND_RESTRICT:
4666 	case BTF_KIND_PTR:
4667 	case BTF_KIND_TYPEDEF:
4668 	case BTF_KIND_FUNC:
4669 	case BTF_KIND_TYPE_TAG:
4670 		ref_type_id = btf_dedup_ref_type(d, t->type);
4671 		if (ref_type_id < 0)
4672 			return ref_type_id;
4673 		t->type = ref_type_id;
4674 
4675 		h = btf_hash_common(t);
4676 		for_each_dedup_cand(d, hash_entry, h) {
4677 			cand_id = hash_entry->value;
4678 			cand = btf_type_by_id(d->btf, cand_id);
4679 			if (btf_equal_common(t, cand)) {
4680 				new_id = cand_id;
4681 				break;
4682 			}
4683 		}
4684 		break;
4685 
4686 	case BTF_KIND_DECL_TAG:
4687 		ref_type_id = btf_dedup_ref_type(d, t->type);
4688 		if (ref_type_id < 0)
4689 			return ref_type_id;
4690 		t->type = ref_type_id;
4691 
4692 		h = btf_hash_int_decl_tag(t);
4693 		for_each_dedup_cand(d, hash_entry, h) {
4694 			cand_id = hash_entry->value;
4695 			cand = btf_type_by_id(d->btf, cand_id);
4696 			if (btf_equal_int_tag(t, cand)) {
4697 				new_id = cand_id;
4698 				break;
4699 			}
4700 		}
4701 		break;
4702 
4703 	case BTF_KIND_ARRAY: {
4704 		struct btf_array *info = btf_array(t);
4705 
4706 		ref_type_id = btf_dedup_ref_type(d, info->type);
4707 		if (ref_type_id < 0)
4708 			return ref_type_id;
4709 		info->type = ref_type_id;
4710 
4711 		ref_type_id = btf_dedup_ref_type(d, info->index_type);
4712 		if (ref_type_id < 0)
4713 			return ref_type_id;
4714 		info->index_type = ref_type_id;
4715 
4716 		h = btf_hash_array(t);
4717 		for_each_dedup_cand(d, hash_entry, h) {
4718 			cand_id = hash_entry->value;
4719 			cand = btf_type_by_id(d->btf, cand_id);
4720 			if (btf_equal_array(t, cand)) {
4721 				new_id = cand_id;
4722 				break;
4723 			}
4724 		}
4725 		break;
4726 	}
4727 
4728 	case BTF_KIND_FUNC_PROTO: {
4729 		struct btf_param *param;
4730 		__u16 vlen;
4731 		int i;
4732 
4733 		ref_type_id = btf_dedup_ref_type(d, t->type);
4734 		if (ref_type_id < 0)
4735 			return ref_type_id;
4736 		t->type = ref_type_id;
4737 
4738 		vlen = btf_vlen(t);
4739 		param = btf_params(t);
4740 		for (i = 0; i < vlen; i++) {
4741 			ref_type_id = btf_dedup_ref_type(d, param->type);
4742 			if (ref_type_id < 0)
4743 				return ref_type_id;
4744 			param->type = ref_type_id;
4745 			param++;
4746 		}
4747 
4748 		h = btf_hash_fnproto(t);
4749 		for_each_dedup_cand(d, hash_entry, h) {
4750 			cand_id = hash_entry->value;
4751 			cand = btf_type_by_id(d->btf, cand_id);
4752 			if (btf_equal_fnproto(t, cand)) {
4753 				new_id = cand_id;
4754 				break;
4755 			}
4756 		}
4757 		break;
4758 	}
4759 
4760 	default:
4761 		return -EINVAL;
4762 	}
4763 
4764 	d->map[type_id] = new_id;
4765 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
4766 		return -ENOMEM;
4767 
4768 	return new_id;
4769 }
4770 
4771 static int btf_dedup_ref_types(struct btf_dedup *d)
4772 {
4773 	int i, err;
4774 
4775 	for (i = 0; i < d->btf->nr_types; i++) {
4776 		err = btf_dedup_ref_type(d, d->btf->start_id + i);
4777 		if (err < 0)
4778 			return err;
4779 	}
4780 	/* we won't need d->dedup_table anymore */
4781 	hashmap__free(d->dedup_table);
4782 	d->dedup_table = NULL;
4783 	return 0;
4784 }
4785 
4786 /*
4787  * Collect a map from type names to type ids for all canonical structs
4788  * and unions. If the same name is shared by several canonical types
4789  * use a special value 0 to indicate this fact.
4790  */
4791 static int btf_dedup_fill_unique_names_map(struct btf_dedup *d, struct hashmap *names_map)
4792 {
4793 	__u32 nr_types = btf__type_cnt(d->btf);
4794 	struct btf_type *t;
4795 	__u32 type_id;
4796 	__u16 kind;
4797 	int err;
4798 
4799 	/*
4800 	 * Iterate over base and split module ids in order to get all
4801 	 * available structs in the map.
4802 	 */
4803 	for (type_id = 1; type_id < nr_types; ++type_id) {
4804 		t = btf_type_by_id(d->btf, type_id);
4805 		kind = btf_kind(t);
4806 
4807 		if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
4808 			continue;
4809 
4810 		/* Skip non-canonical types */
4811 		if (type_id != d->map[type_id])
4812 			continue;
4813 
4814 		err = hashmap__add(names_map, t->name_off, type_id);
4815 		if (err == -EEXIST)
4816 			err = hashmap__set(names_map, t->name_off, 0, NULL, NULL);
4817 
4818 		if (err)
4819 			return err;
4820 	}
4821 
4822 	return 0;
4823 }
4824 
4825 static int btf_dedup_resolve_fwd(struct btf_dedup *d, struct hashmap *names_map, __u32 type_id)
4826 {
4827 	struct btf_type *t = btf_type_by_id(d->btf, type_id);
4828 	enum btf_fwd_kind fwd_kind = btf_kflag(t);
4829 	__u16 cand_kind, kind = btf_kind(t);
4830 	struct btf_type *cand_t;
4831 	uintptr_t cand_id;
4832 
4833 	if (kind != BTF_KIND_FWD)
4834 		return 0;
4835 
4836 	/* Skip if this FWD already has a mapping */
4837 	if (type_id != d->map[type_id])
4838 		return 0;
4839 
4840 	if (!hashmap__find(names_map, t->name_off, &cand_id))
4841 		return 0;
4842 
4843 	/* Zero is a special value indicating that name is not unique */
4844 	if (!cand_id)
4845 		return 0;
4846 
4847 	cand_t = btf_type_by_id(d->btf, cand_id);
4848 	cand_kind = btf_kind(cand_t);
4849 	if ((cand_kind == BTF_KIND_STRUCT && fwd_kind != BTF_FWD_STRUCT) ||
4850 	    (cand_kind == BTF_KIND_UNION && fwd_kind != BTF_FWD_UNION))
4851 		return 0;
4852 
4853 	d->map[type_id] = cand_id;
4854 
4855 	return 0;
4856 }
4857 
4858 /*
4859  * Resolve unambiguous forward declarations.
4860  *
4861  * The lion's share of all FWD declarations is resolved during
4862  * `btf_dedup_struct_types` phase when different type graphs are
4863  * compared against each other. However, if in some compilation unit a
4864  * FWD declaration is not a part of a type graph compared against
4865  * another type graph that declaration's canonical type would not be
4866  * changed. Example:
4867  *
4868  * CU #1:
4869  *
4870  * struct foo;
4871  * struct foo *some_global;
4872  *
4873  * CU #2:
4874  *
4875  * struct foo { int u; };
4876  * struct foo *another_global;
4877  *
4878  * After `btf_dedup_struct_types` the BTF looks as follows:
4879  *
4880  * [1] STRUCT 'foo' size=4 vlen=1 ...
4881  * [2] INT 'int' size=4 ...
4882  * [3] PTR '(anon)' type_id=1
4883  * [4] FWD 'foo' fwd_kind=struct
4884  * [5] PTR '(anon)' type_id=4
4885  *
4886  * This pass assumes that such FWD declarations should be mapped to
4887  * structs or unions with identical name in case if the name is not
4888  * ambiguous.
4889  */
4890 static int btf_dedup_resolve_fwds(struct btf_dedup *d)
4891 {
4892 	int i, err;
4893 	struct hashmap *names_map;
4894 
4895 	names_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
4896 	if (IS_ERR(names_map))
4897 		return PTR_ERR(names_map);
4898 
4899 	err = btf_dedup_fill_unique_names_map(d, names_map);
4900 	if (err < 0)
4901 		goto exit;
4902 
4903 	for (i = 0; i < d->btf->nr_types; i++) {
4904 		err = btf_dedup_resolve_fwd(d, names_map, d->btf->start_id + i);
4905 		if (err < 0)
4906 			break;
4907 	}
4908 
4909 exit:
4910 	hashmap__free(names_map);
4911 	return err;
4912 }
4913 
4914 /*
4915  * Compact types.
4916  *
4917  * After we established for each type its corresponding canonical representative
4918  * type, we now can eliminate types that are not canonical and leave only
4919  * canonical ones layed out sequentially in memory by copying them over
4920  * duplicates. During compaction btf_dedup->hypot_map array is reused to store
4921  * a map from original type ID to a new compacted type ID, which will be used
4922  * during next phase to "fix up" type IDs, referenced from struct/union and
4923  * reference types.
4924  */
4925 static int btf_dedup_compact_types(struct btf_dedup *d)
4926 {
4927 	__u32 *new_offs;
4928 	__u32 next_type_id = d->btf->start_id;
4929 	const struct btf_type *t;
4930 	void *p;
4931 	int i, id, len;
4932 
4933 	/* we are going to reuse hypot_map to store compaction remapping */
4934 	d->hypot_map[0] = 0;
4935 	/* base BTF types are not renumbered */
4936 	for (id = 1; id < d->btf->start_id; id++)
4937 		d->hypot_map[id] = id;
4938 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++)
4939 		d->hypot_map[id] = BTF_UNPROCESSED_ID;
4940 
4941 	p = d->btf->types_data;
4942 
4943 	for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) {
4944 		if (d->map[id] != id)
4945 			continue;
4946 
4947 		t = btf__type_by_id(d->btf, id);
4948 		len = btf_type_size(t);
4949 		if (len < 0)
4950 			return len;
4951 
4952 		memmove(p, t, len);
4953 		d->hypot_map[id] = next_type_id;
4954 		d->btf->type_offs[next_type_id - d->btf->start_id] = p - d->btf->types_data;
4955 		p += len;
4956 		next_type_id++;
4957 	}
4958 
4959 	/* shrink struct btf's internal types index and update btf_header */
4960 	d->btf->nr_types = next_type_id - d->btf->start_id;
4961 	d->btf->type_offs_cap = d->btf->nr_types;
4962 	d->btf->hdr->type_len = p - d->btf->types_data;
4963 	new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap,
4964 				       sizeof(*new_offs));
4965 	if (d->btf->type_offs_cap && !new_offs)
4966 		return -ENOMEM;
4967 	d->btf->type_offs = new_offs;
4968 	d->btf->hdr->str_off = d->btf->hdr->type_len;
4969 	d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len;
4970 	return 0;
4971 }
4972 
4973 /*
4974  * Figure out final (deduplicated and compacted) type ID for provided original
4975  * `type_id` by first resolving it into corresponding canonical type ID and
4976  * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
4977  * which is populated during compaction phase.
4978  */
4979 static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx)
4980 {
4981 	struct btf_dedup *d = ctx;
4982 	__u32 resolved_type_id, new_type_id;
4983 
4984 	resolved_type_id = resolve_type_id(d, *type_id);
4985 	new_type_id = d->hypot_map[resolved_type_id];
4986 	if (new_type_id > BTF_MAX_NR_TYPES)
4987 		return -EINVAL;
4988 
4989 	*type_id = new_type_id;
4990 	return 0;
4991 }
4992 
4993 /*
4994  * Remap referenced type IDs into deduped type IDs.
4995  *
4996  * After BTF types are deduplicated and compacted, their final type IDs may
4997  * differ from original ones. The map from original to a corresponding
4998  * deduped type ID is stored in btf_dedup->hypot_map and is populated during
4999  * compaction phase. During remapping phase we are rewriting all type IDs
5000  * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
5001  * their final deduped type IDs.
5002  */
5003 static int btf_dedup_remap_types(struct btf_dedup *d)
5004 {
5005 	int i, r;
5006 
5007 	for (i = 0; i < d->btf->nr_types; i++) {
5008 		struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
5009 
5010 		r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d);
5011 		if (r)
5012 			return r;
5013 	}
5014 
5015 	if (!d->btf_ext)
5016 		return 0;
5017 
5018 	r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d);
5019 	if (r)
5020 		return r;
5021 
5022 	return 0;
5023 }
5024 
5025 /*
5026  * Probe few well-known locations for vmlinux kernel image and try to load BTF
5027  * data out of it to use for target BTF.
5028  */
5029 struct btf *btf__load_vmlinux_btf(void)
5030 {
5031 	const char *locations[] = {
5032 		/* try canonical vmlinux BTF through sysfs first */
5033 		"/sys/kernel/btf/vmlinux",
5034 		/* fall back to trying to find vmlinux on disk otherwise */
5035 		"/boot/vmlinux-%1$s",
5036 		"/lib/modules/%1$s/vmlinux-%1$s",
5037 		"/lib/modules/%1$s/build/vmlinux",
5038 		"/usr/lib/modules/%1$s/kernel/vmlinux",
5039 		"/usr/lib/debug/boot/vmlinux-%1$s",
5040 		"/usr/lib/debug/boot/vmlinux-%1$s.debug",
5041 		"/usr/lib/debug/lib/modules/%1$s/vmlinux",
5042 	};
5043 	char path[PATH_MAX + 1];
5044 	struct utsname buf;
5045 	struct btf *btf;
5046 	int i, err;
5047 
5048 	uname(&buf);
5049 
5050 	for (i = 0; i < ARRAY_SIZE(locations); i++) {
5051 		snprintf(path, PATH_MAX, locations[i], buf.release);
5052 
5053 		if (faccessat(AT_FDCWD, path, R_OK, AT_EACCESS))
5054 			continue;
5055 
5056 		btf = btf__parse(path, NULL);
5057 		err = libbpf_get_error(btf);
5058 		pr_debug("loading kernel BTF '%s': %d\n", path, err);
5059 		if (err)
5060 			continue;
5061 
5062 		return btf;
5063 	}
5064 
5065 	pr_warn("failed to find valid kernel BTF\n");
5066 	return libbpf_err_ptr(-ESRCH);
5067 }
5068 
5069 struct btf *libbpf_find_kernel_btf(void) __attribute__((alias("btf__load_vmlinux_btf")));
5070 
5071 struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf)
5072 {
5073 	char path[80];
5074 
5075 	snprintf(path, sizeof(path), "/sys/kernel/btf/%s", module_name);
5076 	return btf__parse_split(path, vmlinux_btf);
5077 }
5078 
5079 int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
5080 {
5081 	int i, n, err;
5082 
5083 	switch (btf_kind(t)) {
5084 	case BTF_KIND_INT:
5085 	case BTF_KIND_FLOAT:
5086 	case BTF_KIND_ENUM:
5087 	case BTF_KIND_ENUM64:
5088 		return 0;
5089 
5090 	case BTF_KIND_FWD:
5091 	case BTF_KIND_CONST:
5092 	case BTF_KIND_VOLATILE:
5093 	case BTF_KIND_RESTRICT:
5094 	case BTF_KIND_PTR:
5095 	case BTF_KIND_TYPEDEF:
5096 	case BTF_KIND_FUNC:
5097 	case BTF_KIND_VAR:
5098 	case BTF_KIND_DECL_TAG:
5099 	case BTF_KIND_TYPE_TAG:
5100 		return visit(&t->type, ctx);
5101 
5102 	case BTF_KIND_ARRAY: {
5103 		struct btf_array *a = btf_array(t);
5104 
5105 		err = visit(&a->type, ctx);
5106 		err = err ?: visit(&a->index_type, ctx);
5107 		return err;
5108 	}
5109 
5110 	case BTF_KIND_STRUCT:
5111 	case BTF_KIND_UNION: {
5112 		struct btf_member *m = btf_members(t);
5113 
5114 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5115 			err = visit(&m->type, ctx);
5116 			if (err)
5117 				return err;
5118 		}
5119 		return 0;
5120 	}
5121 
5122 	case BTF_KIND_FUNC_PROTO: {
5123 		struct btf_param *m = btf_params(t);
5124 
5125 		err = visit(&t->type, ctx);
5126 		if (err)
5127 			return err;
5128 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5129 			err = visit(&m->type, ctx);
5130 			if (err)
5131 				return err;
5132 		}
5133 		return 0;
5134 	}
5135 
5136 	case BTF_KIND_DATASEC: {
5137 		struct btf_var_secinfo *m = btf_var_secinfos(t);
5138 
5139 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5140 			err = visit(&m->type, ctx);
5141 			if (err)
5142 				return err;
5143 		}
5144 		return 0;
5145 	}
5146 
5147 	default:
5148 		return -EINVAL;
5149 	}
5150 }
5151 
5152 int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx)
5153 {
5154 	int i, n, err;
5155 
5156 	err = visit(&t->name_off, ctx);
5157 	if (err)
5158 		return err;
5159 
5160 	switch (btf_kind(t)) {
5161 	case BTF_KIND_STRUCT:
5162 	case BTF_KIND_UNION: {
5163 		struct btf_member *m = btf_members(t);
5164 
5165 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5166 			err = visit(&m->name_off, ctx);
5167 			if (err)
5168 				return err;
5169 		}
5170 		break;
5171 	}
5172 	case BTF_KIND_ENUM: {
5173 		struct btf_enum *m = btf_enum(t);
5174 
5175 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5176 			err = visit(&m->name_off, ctx);
5177 			if (err)
5178 				return err;
5179 		}
5180 		break;
5181 	}
5182 	case BTF_KIND_ENUM64: {
5183 		struct btf_enum64 *m = btf_enum64(t);
5184 
5185 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5186 			err = visit(&m->name_off, ctx);
5187 			if (err)
5188 				return err;
5189 		}
5190 		break;
5191 	}
5192 	case BTF_KIND_FUNC_PROTO: {
5193 		struct btf_param *m = btf_params(t);
5194 
5195 		for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
5196 			err = visit(&m->name_off, ctx);
5197 			if (err)
5198 				return err;
5199 		}
5200 		break;
5201 	}
5202 	default:
5203 		break;
5204 	}
5205 
5206 	return 0;
5207 }
5208 
5209 int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
5210 {
5211 	const struct btf_ext_info *seg;
5212 	struct btf_ext_info_sec *sec;
5213 	int i, err;
5214 
5215 	seg = &btf_ext->func_info;
5216 	for_each_btf_ext_sec(seg, sec) {
5217 		struct bpf_func_info_min *rec;
5218 
5219 		for_each_btf_ext_rec(seg, sec, i, rec) {
5220 			err = visit(&rec->type_id, ctx);
5221 			if (err < 0)
5222 				return err;
5223 		}
5224 	}
5225 
5226 	seg = &btf_ext->core_relo_info;
5227 	for_each_btf_ext_sec(seg, sec) {
5228 		struct bpf_core_relo *rec;
5229 
5230 		for_each_btf_ext_rec(seg, sec, i, rec) {
5231 			err = visit(&rec->type_id, ctx);
5232 			if (err < 0)
5233 				return err;
5234 		}
5235 	}
5236 
5237 	return 0;
5238 }
5239 
5240 int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx)
5241 {
5242 	const struct btf_ext_info *seg;
5243 	struct btf_ext_info_sec *sec;
5244 	int i, err;
5245 
5246 	seg = &btf_ext->func_info;
5247 	for_each_btf_ext_sec(seg, sec) {
5248 		err = visit(&sec->sec_name_off, ctx);
5249 		if (err)
5250 			return err;
5251 	}
5252 
5253 	seg = &btf_ext->line_info;
5254 	for_each_btf_ext_sec(seg, sec) {
5255 		struct bpf_line_info_min *rec;
5256 
5257 		err = visit(&sec->sec_name_off, ctx);
5258 		if (err)
5259 			return err;
5260 
5261 		for_each_btf_ext_rec(seg, sec, i, rec) {
5262 			err = visit(&rec->file_name_off, ctx);
5263 			if (err)
5264 				return err;
5265 			err = visit(&rec->line_off, ctx);
5266 			if (err)
5267 				return err;
5268 		}
5269 	}
5270 
5271 	seg = &btf_ext->core_relo_info;
5272 	for_each_btf_ext_sec(seg, sec) {
5273 		struct bpf_core_relo *rec;
5274 
5275 		err = visit(&sec->sec_name_off, ctx);
5276 		if (err)
5277 			return err;
5278 
5279 		for_each_btf_ext_rec(seg, sec, i, rec) {
5280 			err = visit(&rec->access_str_off, ctx);
5281 			if (err)
5282 				return err;
5283 		}
5284 	}
5285 
5286 	return 0;
5287 }
5288