1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
3
4 #include <endian.h>
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <errno.h>
11 #include <linux/err.h>
12 #include <linux/btf.h>
13 #include <gelf.h>
14 #include "btf.h"
15 #include "bpf.h"
16 #include "libbpf.h"
17 #include "libbpf_internal.h"
18 #include "hashmap.h"
19
20 #define BTF_MAX_NR_TYPES 0x7fffffff
21 #define BTF_MAX_STR_OFFSET 0x7fffffff
22
23 static struct btf_type btf_void;
24
25 struct btf {
26 union {
27 struct btf_header *hdr;
28 void *data;
29 };
30 struct btf_type **types;
31 const char *strings;
32 void *nohdr_data;
33 __u32 nr_types;
34 __u32 types_size;
35 __u32 data_size;
36 int fd;
37 };
38
ptr_to_u64(const void * ptr)39 static inline __u64 ptr_to_u64(const void *ptr)
40 {
41 return (__u64) (unsigned long) ptr;
42 }
43
btf_add_type(struct btf * btf,struct btf_type * t)44 static int btf_add_type(struct btf *btf, struct btf_type *t)
45 {
46 if (btf->types_size - btf->nr_types < 2) {
47 struct btf_type **new_types;
48 __u32 expand_by, new_size;
49
50 if (btf->types_size == BTF_MAX_NR_TYPES)
51 return -E2BIG;
52
53 expand_by = max(btf->types_size >> 2, 16);
54 new_size = min(BTF_MAX_NR_TYPES, btf->types_size + expand_by);
55
56 new_types = realloc(btf->types, sizeof(*new_types) * new_size);
57 if (!new_types)
58 return -ENOMEM;
59
60 if (btf->nr_types == 0)
61 new_types[0] = &btf_void;
62
63 btf->types = new_types;
64 btf->types_size = new_size;
65 }
66
67 btf->types[++(btf->nr_types)] = t;
68
69 return 0;
70 }
71
btf_parse_hdr(struct btf * btf)72 static int btf_parse_hdr(struct btf *btf)
73 {
74 const struct btf_header *hdr = btf->hdr;
75 __u32 meta_left;
76
77 if (btf->data_size < sizeof(struct btf_header)) {
78 pr_debug("BTF header not found\n");
79 return -EINVAL;
80 }
81
82 if (hdr->magic != BTF_MAGIC) {
83 pr_debug("Invalid BTF magic:%x\n", hdr->magic);
84 return -EINVAL;
85 }
86
87 if (hdr->version != BTF_VERSION) {
88 pr_debug("Unsupported BTF version:%u\n", hdr->version);
89 return -ENOTSUP;
90 }
91
92 if (hdr->flags) {
93 pr_debug("Unsupported BTF flags:%x\n", hdr->flags);
94 return -ENOTSUP;
95 }
96
97 meta_left = btf->data_size - sizeof(*hdr);
98 if (!meta_left) {
99 pr_debug("BTF has no data\n");
100 return -EINVAL;
101 }
102
103 if (meta_left < hdr->str_off + hdr->str_len) {
104 pr_debug("Invalid BTF total size:%u\n", btf->data_size);
105 return -EINVAL;
106 }
107
108 if (hdr->type_off + hdr->type_len > hdr->str_off) {
109 pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n",
110 hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len);
111 return -EINVAL;
112 }
113
114 if (hdr->type_off % 4) {
115 pr_debug("BTF type section is not aligned to 4 bytes\n");
116 return -EINVAL;
117 }
118
119 btf->nohdr_data = btf->hdr + 1;
120
121 return 0;
122 }
123
btf_parse_str_sec(struct btf * btf)124 static int btf_parse_str_sec(struct btf *btf)
125 {
126 const struct btf_header *hdr = btf->hdr;
127 const char *start = btf->nohdr_data + hdr->str_off;
128 const char *end = start + btf->hdr->str_len;
129
130 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET ||
131 start[0] || end[-1]) {
132 pr_debug("Invalid BTF string section\n");
133 return -EINVAL;
134 }
135
136 btf->strings = start;
137
138 return 0;
139 }
140
btf_type_size(struct btf_type * t)141 static int btf_type_size(struct btf_type *t)
142 {
143 int base_size = sizeof(struct btf_type);
144 __u16 vlen = btf_vlen(t);
145
146 switch (btf_kind(t)) {
147 case BTF_KIND_FWD:
148 case BTF_KIND_CONST:
149 case BTF_KIND_VOLATILE:
150 case BTF_KIND_RESTRICT:
151 case BTF_KIND_PTR:
152 case BTF_KIND_TYPEDEF:
153 case BTF_KIND_FUNC:
154 return base_size;
155 case BTF_KIND_INT:
156 return base_size + sizeof(__u32);
157 case BTF_KIND_ENUM:
158 return base_size + vlen * sizeof(struct btf_enum);
159 case BTF_KIND_ARRAY:
160 return base_size + sizeof(struct btf_array);
161 case BTF_KIND_STRUCT:
162 case BTF_KIND_UNION:
163 return base_size + vlen * sizeof(struct btf_member);
164 case BTF_KIND_FUNC_PROTO:
165 return base_size + vlen * sizeof(struct btf_param);
166 case BTF_KIND_VAR:
167 return base_size + sizeof(struct btf_var);
168 case BTF_KIND_DATASEC:
169 return base_size + vlen * sizeof(struct btf_var_secinfo);
170 default:
171 pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
172 return -EINVAL;
173 }
174 }
175
btf_parse_type_sec(struct btf * btf)176 static int btf_parse_type_sec(struct btf *btf)
177 {
178 struct btf_header *hdr = btf->hdr;
179 void *nohdr_data = btf->nohdr_data;
180 void *next_type = nohdr_data + hdr->type_off;
181 void *end_type = nohdr_data + hdr->str_off;
182
183 while (next_type < end_type) {
184 struct btf_type *t = next_type;
185 int type_size;
186 int err;
187
188 type_size = btf_type_size(t);
189 if (type_size < 0)
190 return type_size;
191 next_type += type_size;
192 err = btf_add_type(btf, t);
193 if (err)
194 return err;
195 }
196
197 return 0;
198 }
199
btf__get_nr_types(const struct btf * btf)200 __u32 btf__get_nr_types(const struct btf *btf)
201 {
202 return btf->nr_types;
203 }
204
btf__type_by_id(const struct btf * btf,__u32 type_id)205 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
206 {
207 if (type_id > btf->nr_types)
208 return NULL;
209
210 return btf->types[type_id];
211 }
212
btf_type_is_void(const struct btf_type * t)213 static bool btf_type_is_void(const struct btf_type *t)
214 {
215 return t == &btf_void || btf_is_fwd(t);
216 }
217
btf_type_is_void_or_null(const struct btf_type * t)218 static bool btf_type_is_void_or_null(const struct btf_type *t)
219 {
220 return !t || btf_type_is_void(t);
221 }
222
223 #define MAX_RESOLVE_DEPTH 32
224
btf__resolve_size(const struct btf * btf,__u32 type_id)225 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
226 {
227 const struct btf_array *array;
228 const struct btf_type *t;
229 __u32 nelems = 1;
230 __s64 size = -1;
231 int i;
232
233 t = btf__type_by_id(btf, type_id);
234 for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t);
235 i++) {
236 switch (btf_kind(t)) {
237 case BTF_KIND_INT:
238 case BTF_KIND_STRUCT:
239 case BTF_KIND_UNION:
240 case BTF_KIND_ENUM:
241 case BTF_KIND_DATASEC:
242 size = t->size;
243 goto done;
244 case BTF_KIND_PTR:
245 size = sizeof(void *);
246 goto done;
247 case BTF_KIND_TYPEDEF:
248 case BTF_KIND_VOLATILE:
249 case BTF_KIND_CONST:
250 case BTF_KIND_RESTRICT:
251 case BTF_KIND_VAR:
252 type_id = t->type;
253 break;
254 case BTF_KIND_ARRAY:
255 array = btf_array(t);
256 if (nelems && array->nelems > UINT32_MAX / nelems)
257 return -E2BIG;
258 nelems *= array->nelems;
259 type_id = array->type;
260 break;
261 default:
262 return -EINVAL;
263 }
264
265 t = btf__type_by_id(btf, type_id);
266 }
267
268 done:
269 if (size < 0)
270 return -EINVAL;
271 if (nelems && size > UINT32_MAX / nelems)
272 return -E2BIG;
273
274 return nelems * size;
275 }
276
btf__resolve_type(const struct btf * btf,__u32 type_id)277 int btf__resolve_type(const struct btf *btf, __u32 type_id)
278 {
279 const struct btf_type *t;
280 int depth = 0;
281
282 t = btf__type_by_id(btf, type_id);
283 while (depth < MAX_RESOLVE_DEPTH &&
284 !btf_type_is_void_or_null(t) &&
285 (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
286 type_id = t->type;
287 t = btf__type_by_id(btf, type_id);
288 depth++;
289 }
290
291 if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
292 return -EINVAL;
293
294 return type_id;
295 }
296
btf__find_by_name(const struct btf * btf,const char * type_name)297 __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
298 {
299 __u32 i;
300
301 if (!strcmp(type_name, "void"))
302 return 0;
303
304 for (i = 1; i <= btf->nr_types; i++) {
305 const struct btf_type *t = btf->types[i];
306 const char *name = btf__name_by_offset(btf, t->name_off);
307
308 if (name && !strcmp(type_name, name))
309 return i;
310 }
311
312 return -ENOENT;
313 }
314
btf__free(struct btf * btf)315 void btf__free(struct btf *btf)
316 {
317 if (!btf)
318 return;
319
320 if (btf->fd != -1)
321 close(btf->fd);
322
323 free(btf->data);
324 free(btf->types);
325 free(btf);
326 }
327
btf__new(__u8 * data,__u32 size)328 struct btf *btf__new(__u8 *data, __u32 size)
329 {
330 struct btf *btf;
331 int err;
332
333 btf = calloc(1, sizeof(struct btf));
334 if (!btf)
335 return ERR_PTR(-ENOMEM);
336
337 btf->fd = -1;
338
339 btf->data = malloc(size);
340 if (!btf->data) {
341 err = -ENOMEM;
342 goto done;
343 }
344
345 memcpy(btf->data, data, size);
346 btf->data_size = size;
347
348 err = btf_parse_hdr(btf);
349 if (err)
350 goto done;
351
352 err = btf_parse_str_sec(btf);
353 if (err)
354 goto done;
355
356 err = btf_parse_type_sec(btf);
357
358 done:
359 if (err) {
360 btf__free(btf);
361 return ERR_PTR(err);
362 }
363
364 return btf;
365 }
366
btf_check_endianness(const GElf_Ehdr * ehdr)367 static bool btf_check_endianness(const GElf_Ehdr *ehdr)
368 {
369 #if __BYTE_ORDER == __LITTLE_ENDIAN
370 return ehdr->e_ident[EI_DATA] == ELFDATA2LSB;
371 #elif __BYTE_ORDER == __BIG_ENDIAN
372 return ehdr->e_ident[EI_DATA] == ELFDATA2MSB;
373 #else
374 # error "Unrecognized __BYTE_ORDER__"
375 #endif
376 }
377
btf__parse_elf(const char * path,struct btf_ext ** btf_ext)378 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
379 {
380 Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
381 int err = 0, fd = -1, idx = 0;
382 struct btf *btf = NULL;
383 Elf_Scn *scn = NULL;
384 Elf *elf = NULL;
385 GElf_Ehdr ehdr;
386
387 if (elf_version(EV_CURRENT) == EV_NONE) {
388 pr_warning("failed to init libelf for %s\n", path);
389 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
390 }
391
392 fd = open(path, O_RDONLY);
393 if (fd < 0) {
394 err = -errno;
395 pr_warning("failed to open %s: %s\n", path, strerror(errno));
396 return ERR_PTR(err);
397 }
398
399 err = -LIBBPF_ERRNO__FORMAT;
400
401 elf = elf_begin(fd, ELF_C_READ, NULL);
402 if (!elf) {
403 pr_warning("failed to open %s as ELF file\n", path);
404 goto done;
405 }
406 if (!gelf_getehdr(elf, &ehdr)) {
407 pr_warning("failed to get EHDR from %s\n", path);
408 goto done;
409 }
410 if (!btf_check_endianness(&ehdr)) {
411 pr_warning("non-native ELF endianness is not supported\n");
412 goto done;
413 }
414 if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) {
415 pr_warning("failed to get e_shstrndx from %s\n", path);
416 goto done;
417 }
418
419 while ((scn = elf_nextscn(elf, scn)) != NULL) {
420 GElf_Shdr sh;
421 char *name;
422
423 idx++;
424 if (gelf_getshdr(scn, &sh) != &sh) {
425 pr_warning("failed to get section(%d) header from %s\n",
426 idx, path);
427 goto done;
428 }
429 name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name);
430 if (!name) {
431 pr_warning("failed to get section(%d) name from %s\n",
432 idx, path);
433 goto done;
434 }
435 if (strcmp(name, BTF_ELF_SEC) == 0) {
436 btf_data = elf_getdata(scn, 0);
437 if (!btf_data) {
438 pr_warning("failed to get section(%d, %s) data from %s\n",
439 idx, name, path);
440 goto done;
441 }
442 continue;
443 } else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
444 btf_ext_data = elf_getdata(scn, 0);
445 if (!btf_ext_data) {
446 pr_warning("failed to get section(%d, %s) data from %s\n",
447 idx, name, path);
448 goto done;
449 }
450 continue;
451 }
452 }
453
454 err = 0;
455
456 if (!btf_data) {
457 err = -ENOENT;
458 goto done;
459 }
460 btf = btf__new(btf_data->d_buf, btf_data->d_size);
461 if (IS_ERR(btf))
462 goto done;
463
464 if (btf_ext && btf_ext_data) {
465 *btf_ext = btf_ext__new(btf_ext_data->d_buf,
466 btf_ext_data->d_size);
467 if (IS_ERR(*btf_ext))
468 goto done;
469 } else if (btf_ext) {
470 *btf_ext = NULL;
471 }
472 done:
473 if (elf)
474 elf_end(elf);
475 close(fd);
476
477 if (err)
478 return ERR_PTR(err);
479 /*
480 * btf is always parsed before btf_ext, so no need to clean up
481 * btf_ext, if btf loading failed
482 */
483 if (IS_ERR(btf))
484 return btf;
485 if (btf_ext && IS_ERR(*btf_ext)) {
486 btf__free(btf);
487 err = PTR_ERR(*btf_ext);
488 return ERR_PTR(err);
489 }
490 return btf;
491 }
492
compare_vsi_off(const void * _a,const void * _b)493 static int compare_vsi_off(const void *_a, const void *_b)
494 {
495 const struct btf_var_secinfo *a = _a;
496 const struct btf_var_secinfo *b = _b;
497
498 return a->offset - b->offset;
499 }
500
btf_fixup_datasec(struct bpf_object * obj,struct btf * btf,struct btf_type * t)501 static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
502 struct btf_type *t)
503 {
504 __u32 size = 0, off = 0, i, vars = btf_vlen(t);
505 const char *name = btf__name_by_offset(btf, t->name_off);
506 const struct btf_type *t_var;
507 struct btf_var_secinfo *vsi;
508 const struct btf_var *var;
509 int ret;
510
511 if (!name) {
512 pr_debug("No name found in string section for DATASEC kind.\n");
513 return -ENOENT;
514 }
515
516 ret = bpf_object__section_size(obj, name, &size);
517 if (ret || !size || (t->size && t->size != size)) {
518 pr_debug("Invalid size for section %s: %u bytes\n", name, size);
519 return -ENOENT;
520 }
521
522 t->size = size;
523
524 for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
525 t_var = btf__type_by_id(btf, vsi->type);
526 var = btf_var(t_var);
527
528 if (!btf_is_var(t_var)) {
529 pr_debug("Non-VAR type seen in section %s\n", name);
530 return -EINVAL;
531 }
532
533 if (var->linkage == BTF_VAR_STATIC)
534 continue;
535
536 name = btf__name_by_offset(btf, t_var->name_off);
537 if (!name) {
538 pr_debug("No name found in string section for VAR kind\n");
539 return -ENOENT;
540 }
541
542 ret = bpf_object__variable_offset(obj, name, &off);
543 if (ret) {
544 pr_debug("No offset found in symbol table for VAR %s\n",
545 name);
546 return -ENOENT;
547 }
548
549 vsi->offset = off;
550 }
551
552 qsort(t + 1, vars, sizeof(*vsi), compare_vsi_off);
553 return 0;
554 }
555
btf__finalize_data(struct bpf_object * obj,struct btf * btf)556 int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
557 {
558 int err = 0;
559 __u32 i;
560
561 for (i = 1; i <= btf->nr_types; i++) {
562 struct btf_type *t = btf->types[i];
563
564 /* Loader needs to fix up some of the things compiler
565 * couldn't get its hands on while emitting BTF. This
566 * is section size and global variable offset. We use
567 * the info from the ELF itself for this purpose.
568 */
569 if (btf_is_datasec(t)) {
570 err = btf_fixup_datasec(obj, btf, t);
571 if (err)
572 break;
573 }
574 }
575
576 return err;
577 }
578
btf__load(struct btf * btf)579 int btf__load(struct btf *btf)
580 {
581 __u32 log_buf_size = BPF_LOG_BUF_SIZE;
582 char *log_buf = NULL;
583 int err = 0;
584
585 if (btf->fd >= 0)
586 return -EEXIST;
587
588 log_buf = malloc(log_buf_size);
589 if (!log_buf)
590 return -ENOMEM;
591
592 *log_buf = 0;
593
594 btf->fd = bpf_load_btf(btf->data, btf->data_size,
595 log_buf, log_buf_size, false);
596 if (btf->fd < 0) {
597 err = -errno;
598 pr_warning("Error loading BTF: %s(%d)\n", strerror(errno), errno);
599 if (*log_buf)
600 pr_warning("%s\n", log_buf);
601 goto done;
602 }
603
604 done:
605 free(log_buf);
606 return err;
607 }
608
btf__fd(const struct btf * btf)609 int btf__fd(const struct btf *btf)
610 {
611 return btf->fd;
612 }
613
btf__get_raw_data(const struct btf * btf,__u32 * size)614 const void *btf__get_raw_data(const struct btf *btf, __u32 *size)
615 {
616 *size = btf->data_size;
617 return btf->data;
618 }
619
btf__name_by_offset(const struct btf * btf,__u32 offset)620 const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
621 {
622 if (offset < btf->hdr->str_len)
623 return &btf->strings[offset];
624 else
625 return NULL;
626 }
627
btf__get_from_id(__u32 id,struct btf ** btf)628 int btf__get_from_id(__u32 id, struct btf **btf)
629 {
630 struct bpf_btf_info btf_info = { 0 };
631 __u32 len = sizeof(btf_info);
632 __u32 last_size;
633 int btf_fd;
634 void *ptr;
635 int err;
636
637 err = 0;
638 *btf = NULL;
639 btf_fd = bpf_btf_get_fd_by_id(id);
640 if (btf_fd < 0)
641 return 0;
642
643 /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
644 * let's start with a sane default - 4KiB here - and resize it only if
645 * bpf_obj_get_info_by_fd() needs a bigger buffer.
646 */
647 btf_info.btf_size = 4096;
648 last_size = btf_info.btf_size;
649 ptr = malloc(last_size);
650 if (!ptr) {
651 err = -ENOMEM;
652 goto exit_free;
653 }
654
655 memset(ptr, 0, last_size);
656 btf_info.btf = ptr_to_u64(ptr);
657 err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
658
659 if (!err && btf_info.btf_size > last_size) {
660 void *temp_ptr;
661
662 last_size = btf_info.btf_size;
663 temp_ptr = realloc(ptr, last_size);
664 if (!temp_ptr) {
665 err = -ENOMEM;
666 goto exit_free;
667 }
668 ptr = temp_ptr;
669 memset(ptr, 0, last_size);
670 btf_info.btf = ptr_to_u64(ptr);
671 err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
672 }
673
674 if (err || btf_info.btf_size > last_size) {
675 err = errno;
676 goto exit_free;
677 }
678
679 *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size);
680 if (IS_ERR(*btf)) {
681 err = PTR_ERR(*btf);
682 *btf = NULL;
683 }
684
685 exit_free:
686 close(btf_fd);
687 free(ptr);
688
689 return err;
690 }
691
btf__get_map_kv_tids(const struct btf * btf,const char * map_name,__u32 expected_key_size,__u32 expected_value_size,__u32 * key_type_id,__u32 * value_type_id)692 int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
693 __u32 expected_key_size, __u32 expected_value_size,
694 __u32 *key_type_id, __u32 *value_type_id)
695 {
696 const struct btf_type *container_type;
697 const struct btf_member *key, *value;
698 const size_t max_name = 256;
699 char container_name[max_name];
700 __s64 key_size, value_size;
701 __s32 container_id;
702
703 if (snprintf(container_name, max_name, "____btf_map_%s", map_name) ==
704 max_name) {
705 pr_warning("map:%s length of '____btf_map_%s' is too long\n",
706 map_name, map_name);
707 return -EINVAL;
708 }
709
710 container_id = btf__find_by_name(btf, container_name);
711 if (container_id < 0) {
712 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
713 map_name, container_name);
714 return container_id;
715 }
716
717 container_type = btf__type_by_id(btf, container_id);
718 if (!container_type) {
719 pr_warning("map:%s cannot find BTF type for container_id:%u\n",
720 map_name, container_id);
721 return -EINVAL;
722 }
723
724 if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) {
725 pr_warning("map:%s container_name:%s is an invalid container struct\n",
726 map_name, container_name);
727 return -EINVAL;
728 }
729
730 key = btf_members(container_type);
731 value = key + 1;
732
733 key_size = btf__resolve_size(btf, key->type);
734 if (key_size < 0) {
735 pr_warning("map:%s invalid BTF key_type_size\n", map_name);
736 return key_size;
737 }
738
739 if (expected_key_size != key_size) {
740 pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
741 map_name, (__u32)key_size, expected_key_size);
742 return -EINVAL;
743 }
744
745 value_size = btf__resolve_size(btf, value->type);
746 if (value_size < 0) {
747 pr_warning("map:%s invalid BTF value_type_size\n", map_name);
748 return value_size;
749 }
750
751 if (expected_value_size != value_size) {
752 pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
753 map_name, (__u32)value_size, expected_value_size);
754 return -EINVAL;
755 }
756
757 *key_type_id = key->type;
758 *value_type_id = value->type;
759
760 return 0;
761 }
762
763 struct btf_ext_sec_setup_param {
764 __u32 off;
765 __u32 len;
766 __u32 min_rec_size;
767 struct btf_ext_info *ext_info;
768 const char *desc;
769 };
770
btf_ext_setup_info(struct btf_ext * btf_ext,struct btf_ext_sec_setup_param * ext_sec)771 static int btf_ext_setup_info(struct btf_ext *btf_ext,
772 struct btf_ext_sec_setup_param *ext_sec)
773 {
774 const struct btf_ext_info_sec *sinfo;
775 struct btf_ext_info *ext_info;
776 __u32 info_left, record_size;
777 /* The start of the info sec (including the __u32 record_size). */
778 void *info;
779
780 if (ext_sec->len == 0)
781 return 0;
782
783 if (ext_sec->off & 0x03) {
784 pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
785 ext_sec->desc);
786 return -EINVAL;
787 }
788
789 info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
790 info_left = ext_sec->len;
791
792 if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
793 pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
794 ext_sec->desc, ext_sec->off, ext_sec->len);
795 return -EINVAL;
796 }
797
798 /* At least a record size */
799 if (info_left < sizeof(__u32)) {
800 pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
801 return -EINVAL;
802 }
803
804 /* The record size needs to meet the minimum standard */
805 record_size = *(__u32 *)info;
806 if (record_size < ext_sec->min_rec_size ||
807 record_size & 0x03) {
808 pr_debug("%s section in .BTF.ext has invalid record size %u\n",
809 ext_sec->desc, record_size);
810 return -EINVAL;
811 }
812
813 sinfo = info + sizeof(__u32);
814 info_left -= sizeof(__u32);
815
816 /* If no records, return failure now so .BTF.ext won't be used. */
817 if (!info_left) {
818 pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
819 return -EINVAL;
820 }
821
822 while (info_left) {
823 unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
824 __u64 total_record_size;
825 __u32 num_records;
826
827 if (info_left < sec_hdrlen) {
828 pr_debug("%s section header is not found in .BTF.ext\n",
829 ext_sec->desc);
830 return -EINVAL;
831 }
832
833 num_records = sinfo->num_info;
834 if (num_records == 0) {
835 pr_debug("%s section has incorrect num_records in .BTF.ext\n",
836 ext_sec->desc);
837 return -EINVAL;
838 }
839
840 total_record_size = sec_hdrlen +
841 (__u64)num_records * record_size;
842 if (info_left < total_record_size) {
843 pr_debug("%s section has incorrect num_records in .BTF.ext\n",
844 ext_sec->desc);
845 return -EINVAL;
846 }
847
848 info_left -= total_record_size;
849 sinfo = (void *)sinfo + total_record_size;
850 }
851
852 ext_info = ext_sec->ext_info;
853 ext_info->len = ext_sec->len - sizeof(__u32);
854 ext_info->rec_size = record_size;
855 ext_info->info = info + sizeof(__u32);
856
857 return 0;
858 }
859
btf_ext_setup_func_info(struct btf_ext * btf_ext)860 static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
861 {
862 struct btf_ext_sec_setup_param param = {
863 .off = btf_ext->hdr->func_info_off,
864 .len = btf_ext->hdr->func_info_len,
865 .min_rec_size = sizeof(struct bpf_func_info_min),
866 .ext_info = &btf_ext->func_info,
867 .desc = "func_info"
868 };
869
870 return btf_ext_setup_info(btf_ext, ¶m);
871 }
872
btf_ext_setup_line_info(struct btf_ext * btf_ext)873 static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
874 {
875 struct btf_ext_sec_setup_param param = {
876 .off = btf_ext->hdr->line_info_off,
877 .len = btf_ext->hdr->line_info_len,
878 .min_rec_size = sizeof(struct bpf_line_info_min),
879 .ext_info = &btf_ext->line_info,
880 .desc = "line_info",
881 };
882
883 return btf_ext_setup_info(btf_ext, ¶m);
884 }
885
btf_ext_setup_offset_reloc(struct btf_ext * btf_ext)886 static int btf_ext_setup_offset_reloc(struct btf_ext *btf_ext)
887 {
888 struct btf_ext_sec_setup_param param = {
889 .off = btf_ext->hdr->offset_reloc_off,
890 .len = btf_ext->hdr->offset_reloc_len,
891 .min_rec_size = sizeof(struct bpf_offset_reloc),
892 .ext_info = &btf_ext->offset_reloc_info,
893 .desc = "offset_reloc",
894 };
895
896 return btf_ext_setup_info(btf_ext, ¶m);
897 }
898
btf_ext_parse_hdr(__u8 * data,__u32 data_size)899 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
900 {
901 const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
902
903 if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
904 data_size < hdr->hdr_len) {
905 pr_debug("BTF.ext header not found");
906 return -EINVAL;
907 }
908
909 if (hdr->magic != BTF_MAGIC) {
910 pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
911 return -EINVAL;
912 }
913
914 if (hdr->version != BTF_VERSION) {
915 pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
916 return -ENOTSUP;
917 }
918
919 if (hdr->flags) {
920 pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
921 return -ENOTSUP;
922 }
923
924 if (data_size == hdr->hdr_len) {
925 pr_debug("BTF.ext has no data\n");
926 return -EINVAL;
927 }
928
929 return 0;
930 }
931
btf_ext__free(struct btf_ext * btf_ext)932 void btf_ext__free(struct btf_ext *btf_ext)
933 {
934 if (!btf_ext)
935 return;
936 free(btf_ext->data);
937 free(btf_ext);
938 }
939
btf_ext__new(__u8 * data,__u32 size)940 struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
941 {
942 struct btf_ext *btf_ext;
943 int err;
944
945 err = btf_ext_parse_hdr(data, size);
946 if (err)
947 return ERR_PTR(err);
948
949 btf_ext = calloc(1, sizeof(struct btf_ext));
950 if (!btf_ext)
951 return ERR_PTR(-ENOMEM);
952
953 btf_ext->data_size = size;
954 btf_ext->data = malloc(size);
955 if (!btf_ext->data) {
956 err = -ENOMEM;
957 goto done;
958 }
959 memcpy(btf_ext->data, data, size);
960
961 if (btf_ext->hdr->hdr_len <
962 offsetofend(struct btf_ext_header, line_info_len))
963 goto done;
964 err = btf_ext_setup_func_info(btf_ext);
965 if (err)
966 goto done;
967
968 err = btf_ext_setup_line_info(btf_ext);
969 if (err)
970 goto done;
971
972 if (btf_ext->hdr->hdr_len <
973 offsetofend(struct btf_ext_header, offset_reloc_len))
974 goto done;
975 err = btf_ext_setup_offset_reloc(btf_ext);
976 if (err)
977 goto done;
978
979 done:
980 if (err) {
981 btf_ext__free(btf_ext);
982 return ERR_PTR(err);
983 }
984
985 return btf_ext;
986 }
987
btf_ext__get_raw_data(const struct btf_ext * btf_ext,__u32 * size)988 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
989 {
990 *size = btf_ext->data_size;
991 return btf_ext->data;
992 }
993
btf_ext_reloc_info(const struct btf * btf,const struct btf_ext_info * ext_info,const char * sec_name,__u32 insns_cnt,void ** info,__u32 * cnt)994 static int btf_ext_reloc_info(const struct btf *btf,
995 const struct btf_ext_info *ext_info,
996 const char *sec_name, __u32 insns_cnt,
997 void **info, __u32 *cnt)
998 {
999 __u32 sec_hdrlen = sizeof(struct btf_ext_info_sec);
1000 __u32 i, record_size, existing_len, records_len;
1001 struct btf_ext_info_sec *sinfo;
1002 const char *info_sec_name;
1003 __u64 remain_len;
1004 void *data;
1005
1006 record_size = ext_info->rec_size;
1007 sinfo = ext_info->info;
1008 remain_len = ext_info->len;
1009 while (remain_len > 0) {
1010 records_len = sinfo->num_info * record_size;
1011 info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off);
1012 if (strcmp(info_sec_name, sec_name)) {
1013 remain_len -= sec_hdrlen + records_len;
1014 sinfo = (void *)sinfo + sec_hdrlen + records_len;
1015 continue;
1016 }
1017
1018 existing_len = (*cnt) * record_size;
1019 data = realloc(*info, existing_len + records_len);
1020 if (!data)
1021 return -ENOMEM;
1022
1023 memcpy(data + existing_len, sinfo->data, records_len);
1024 /* adjust insn_off only, the rest data will be passed
1025 * to the kernel.
1026 */
1027 for (i = 0; i < sinfo->num_info; i++) {
1028 __u32 *insn_off;
1029
1030 insn_off = data + existing_len + (i * record_size);
1031 *insn_off = *insn_off / sizeof(struct bpf_insn) +
1032 insns_cnt;
1033 }
1034 *info = data;
1035 *cnt += sinfo->num_info;
1036 return 0;
1037 }
1038
1039 return -ENOENT;
1040 }
1041
btf_ext__reloc_func_info(const struct btf * btf,const struct btf_ext * btf_ext,const char * sec_name,__u32 insns_cnt,void ** func_info,__u32 * cnt)1042 int btf_ext__reloc_func_info(const struct btf *btf,
1043 const struct btf_ext *btf_ext,
1044 const char *sec_name, __u32 insns_cnt,
1045 void **func_info, __u32 *cnt)
1046 {
1047 return btf_ext_reloc_info(btf, &btf_ext->func_info, sec_name,
1048 insns_cnt, func_info, cnt);
1049 }
1050
btf_ext__reloc_line_info(const struct btf * btf,const struct btf_ext * btf_ext,const char * sec_name,__u32 insns_cnt,void ** line_info,__u32 * cnt)1051 int btf_ext__reloc_line_info(const struct btf *btf,
1052 const struct btf_ext *btf_ext,
1053 const char *sec_name, __u32 insns_cnt,
1054 void **line_info, __u32 *cnt)
1055 {
1056 return btf_ext_reloc_info(btf, &btf_ext->line_info, sec_name,
1057 insns_cnt, line_info, cnt);
1058 }
1059
btf_ext__func_info_rec_size(const struct btf_ext * btf_ext)1060 __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext)
1061 {
1062 return btf_ext->func_info.rec_size;
1063 }
1064
btf_ext__line_info_rec_size(const struct btf_ext * btf_ext)1065 __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)
1066 {
1067 return btf_ext->line_info.rec_size;
1068 }
1069
1070 struct btf_dedup;
1071
1072 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
1073 const struct btf_dedup_opts *opts);
1074 static void btf_dedup_free(struct btf_dedup *d);
1075 static int btf_dedup_strings(struct btf_dedup *d);
1076 static int btf_dedup_prim_types(struct btf_dedup *d);
1077 static int btf_dedup_struct_types(struct btf_dedup *d);
1078 static int btf_dedup_ref_types(struct btf_dedup *d);
1079 static int btf_dedup_compact_types(struct btf_dedup *d);
1080 static int btf_dedup_remap_types(struct btf_dedup *d);
1081
1082 /*
1083 * Deduplicate BTF types and strings.
1084 *
1085 * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
1086 * section with all BTF type descriptors and string data. It overwrites that
1087 * memory in-place with deduplicated types and strings without any loss of
1088 * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
1089 * is provided, all the strings referenced from .BTF.ext section are honored
1090 * and updated to point to the right offsets after deduplication.
1091 *
1092 * If function returns with error, type/string data might be garbled and should
1093 * be discarded.
1094 *
1095 * More verbose and detailed description of both problem btf_dedup is solving,
1096 * as well as solution could be found at:
1097 * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
1098 *
1099 * Problem description and justification
1100 * =====================================
1101 *
1102 * BTF type information is typically emitted either as a result of conversion
1103 * from DWARF to BTF or directly by compiler. In both cases, each compilation
1104 * unit contains information about a subset of all the types that are used
1105 * in an application. These subsets are frequently overlapping and contain a lot
1106 * of duplicated information when later concatenated together into a single
1107 * binary. This algorithm ensures that each unique type is represented by single
1108 * BTF type descriptor, greatly reducing resulting size of BTF data.
1109 *
1110 * Compilation unit isolation and subsequent duplication of data is not the only
1111 * problem. The same type hierarchy (e.g., struct and all the type that struct
1112 * references) in different compilation units can be represented in BTF to
1113 * various degrees of completeness (or, rather, incompleteness) due to
1114 * struct/union forward declarations.
1115 *
1116 * Let's take a look at an example, that we'll use to better understand the
1117 * problem (and solution). Suppose we have two compilation units, each using
1118 * same `struct S`, but each of them having incomplete type information about
1119 * struct's fields:
1120 *
1121 * // CU #1:
1122 * struct S;
1123 * struct A {
1124 * int a;
1125 * struct A* self;
1126 * struct S* parent;
1127 * };
1128 * struct B;
1129 * struct S {
1130 * struct A* a_ptr;
1131 * struct B* b_ptr;
1132 * };
1133 *
1134 * // CU #2:
1135 * struct S;
1136 * struct A;
1137 * struct B {
1138 * int b;
1139 * struct B* self;
1140 * struct S* parent;
1141 * };
1142 * struct S {
1143 * struct A* a_ptr;
1144 * struct B* b_ptr;
1145 * };
1146 *
1147 * In case of CU #1, BTF data will know only that `struct B` exist (but no
1148 * more), but will know the complete type information about `struct A`. While
1149 * for CU #2, it will know full type information about `struct B`, but will
1150 * only know about forward declaration of `struct A` (in BTF terms, it will
1151 * have `BTF_KIND_FWD` type descriptor with name `B`).
1152 *
1153 * This compilation unit isolation means that it's possible that there is no
1154 * single CU with complete type information describing structs `S`, `A`, and
1155 * `B`. Also, we might get tons of duplicated and redundant type information.
1156 *
1157 * Additional complication we need to keep in mind comes from the fact that
1158 * types, in general, can form graphs containing cycles, not just DAGs.
1159 *
1160 * While algorithm does deduplication, it also merges and resolves type
1161 * information (unless disabled throught `struct btf_opts`), whenever possible.
1162 * E.g., in the example above with two compilation units having partial type
1163 * information for structs `A` and `B`, the output of algorithm will emit
1164 * a single copy of each BTF type that describes structs `A`, `B`, and `S`
1165 * (as well as type information for `int` and pointers), as if they were defined
1166 * in a single compilation unit as:
1167 *
1168 * struct A {
1169 * int a;
1170 * struct A* self;
1171 * struct S* parent;
1172 * };
1173 * struct B {
1174 * int b;
1175 * struct B* self;
1176 * struct S* parent;
1177 * };
1178 * struct S {
1179 * struct A* a_ptr;
1180 * struct B* b_ptr;
1181 * };
1182 *
1183 * Algorithm summary
1184 * =================
1185 *
1186 * Algorithm completes its work in 6 separate passes:
1187 *
1188 * 1. Strings deduplication.
1189 * 2. Primitive types deduplication (int, enum, fwd).
1190 * 3. Struct/union types deduplication.
1191 * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func
1192 * protos, and const/volatile/restrict modifiers).
1193 * 5. Types compaction.
1194 * 6. Types remapping.
1195 *
1196 * Algorithm determines canonical type descriptor, which is a single
1197 * representative type for each truly unique type. This canonical type is the
1198 * one that will go into final deduplicated BTF type information. For
1199 * struct/unions, it is also the type that algorithm will merge additional type
1200 * information into (while resolving FWDs), as it discovers it from data in
1201 * other CUs. Each input BTF type eventually gets either mapped to itself, if
1202 * that type is canonical, or to some other type, if that type is equivalent
1203 * and was chosen as canonical representative. This mapping is stored in
1204 * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
1205 * FWD type got resolved to.
1206 *
1207 * To facilitate fast discovery of canonical types, we also maintain canonical
1208 * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
1209 * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
1210 * that match that signature. With sufficiently good choice of type signature
1211 * hashing function, we can limit number of canonical types for each unique type
1212 * signature to a very small number, allowing to find canonical type for any
1213 * duplicated type very quickly.
1214 *
1215 * Struct/union deduplication is the most critical part and algorithm for
1216 * deduplicating structs/unions is described in greater details in comments for
1217 * `btf_dedup_is_equiv` function.
1218 */
btf__dedup(struct btf * btf,struct btf_ext * btf_ext,const struct btf_dedup_opts * opts)1219 int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
1220 const struct btf_dedup_opts *opts)
1221 {
1222 struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts);
1223 int err;
1224
1225 if (IS_ERR(d)) {
1226 pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
1227 return -EINVAL;
1228 }
1229
1230 err = btf_dedup_strings(d);
1231 if (err < 0) {
1232 pr_debug("btf_dedup_strings failed:%d\n", err);
1233 goto done;
1234 }
1235 err = btf_dedup_prim_types(d);
1236 if (err < 0) {
1237 pr_debug("btf_dedup_prim_types failed:%d\n", err);
1238 goto done;
1239 }
1240 err = btf_dedup_struct_types(d);
1241 if (err < 0) {
1242 pr_debug("btf_dedup_struct_types failed:%d\n", err);
1243 goto done;
1244 }
1245 err = btf_dedup_ref_types(d);
1246 if (err < 0) {
1247 pr_debug("btf_dedup_ref_types failed:%d\n", err);
1248 goto done;
1249 }
1250 err = btf_dedup_compact_types(d);
1251 if (err < 0) {
1252 pr_debug("btf_dedup_compact_types failed:%d\n", err);
1253 goto done;
1254 }
1255 err = btf_dedup_remap_types(d);
1256 if (err < 0) {
1257 pr_debug("btf_dedup_remap_types failed:%d\n", err);
1258 goto done;
1259 }
1260
1261 done:
1262 btf_dedup_free(d);
1263 return err;
1264 }
1265
1266 #define BTF_UNPROCESSED_ID ((__u32)-1)
1267 #define BTF_IN_PROGRESS_ID ((__u32)-2)
1268
1269 struct btf_dedup {
1270 /* .BTF section to be deduped in-place */
1271 struct btf *btf;
1272 /*
1273 * Optional .BTF.ext section. When provided, any strings referenced
1274 * from it will be taken into account when deduping strings
1275 */
1276 struct btf_ext *btf_ext;
1277 /*
1278 * This is a map from any type's signature hash to a list of possible
1279 * canonical representative type candidates. Hash collisions are
1280 * ignored, so even types of various kinds can share same list of
1281 * candidates, which is fine because we rely on subsequent
1282 * btf_xxx_equal() checks to authoritatively verify type equality.
1283 */
1284 struct hashmap *dedup_table;
1285 /* Canonical types map */
1286 __u32 *map;
1287 /* Hypothetical mapping, used during type graph equivalence checks */
1288 __u32 *hypot_map;
1289 __u32 *hypot_list;
1290 size_t hypot_cnt;
1291 size_t hypot_cap;
1292 /* Various option modifying behavior of algorithm */
1293 struct btf_dedup_opts opts;
1294 };
1295
1296 struct btf_str_ptr {
1297 const char *str;
1298 __u32 new_off;
1299 bool used;
1300 };
1301
1302 struct btf_str_ptrs {
1303 struct btf_str_ptr *ptrs;
1304 const char *data;
1305 __u32 cnt;
1306 __u32 cap;
1307 };
1308
hash_combine(long h,long value)1309 static long hash_combine(long h, long value)
1310 {
1311 return h * 31 + value;
1312 }
1313
1314 #define for_each_dedup_cand(d, node, hash) \
1315 hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash)
1316
btf_dedup_table_add(struct btf_dedup * d,long hash,__u32 type_id)1317 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
1318 {
1319 return hashmap__append(d->dedup_table,
1320 (void *)hash, (void *)(long)type_id);
1321 }
1322
btf_dedup_hypot_map_add(struct btf_dedup * d,__u32 from_id,__u32 to_id)1323 static int btf_dedup_hypot_map_add(struct btf_dedup *d,
1324 __u32 from_id, __u32 to_id)
1325 {
1326 if (d->hypot_cnt == d->hypot_cap) {
1327 __u32 *new_list;
1328
1329 d->hypot_cap += max(16, d->hypot_cap / 2);
1330 new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap);
1331 if (!new_list)
1332 return -ENOMEM;
1333 d->hypot_list = new_list;
1334 }
1335 d->hypot_list[d->hypot_cnt++] = from_id;
1336 d->hypot_map[from_id] = to_id;
1337 return 0;
1338 }
1339
btf_dedup_clear_hypot_map(struct btf_dedup * d)1340 static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
1341 {
1342 int i;
1343
1344 for (i = 0; i < d->hypot_cnt; i++)
1345 d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
1346 d->hypot_cnt = 0;
1347 }
1348
btf_dedup_free(struct btf_dedup * d)1349 static void btf_dedup_free(struct btf_dedup *d)
1350 {
1351 hashmap__free(d->dedup_table);
1352 d->dedup_table = NULL;
1353
1354 free(d->map);
1355 d->map = NULL;
1356
1357 free(d->hypot_map);
1358 d->hypot_map = NULL;
1359
1360 free(d->hypot_list);
1361 d->hypot_list = NULL;
1362
1363 free(d);
1364 }
1365
btf_dedup_identity_hash_fn(const void * key,void * ctx)1366 static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx)
1367 {
1368 return (size_t)key;
1369 }
1370
btf_dedup_collision_hash_fn(const void * key,void * ctx)1371 static size_t btf_dedup_collision_hash_fn(const void *key, void *ctx)
1372 {
1373 return 0;
1374 }
1375
btf_dedup_equal_fn(const void * k1,const void * k2,void * ctx)1376 static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx)
1377 {
1378 return k1 == k2;
1379 }
1380
btf_dedup_new(struct btf * btf,struct btf_ext * btf_ext,const struct btf_dedup_opts * opts)1381 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
1382 const struct btf_dedup_opts *opts)
1383 {
1384 struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
1385 hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
1386 int i, err = 0;
1387
1388 if (!d)
1389 return ERR_PTR(-ENOMEM);
1390
1391 d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
1392 /* dedup_table_size is now used only to force collisions in tests */
1393 if (opts && opts->dedup_table_size == 1)
1394 hash_fn = btf_dedup_collision_hash_fn;
1395
1396 d->btf = btf;
1397 d->btf_ext = btf_ext;
1398
1399 d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
1400 if (IS_ERR(d->dedup_table)) {
1401 err = PTR_ERR(d->dedup_table);
1402 d->dedup_table = NULL;
1403 goto done;
1404 }
1405
1406 d->map = malloc(sizeof(__u32) * (1 + btf->nr_types));
1407 if (!d->map) {
1408 err = -ENOMEM;
1409 goto done;
1410 }
1411 /* special BTF "void" type is made canonical immediately */
1412 d->map[0] = 0;
1413 for (i = 1; i <= btf->nr_types; i++) {
1414 struct btf_type *t = d->btf->types[i];
1415
1416 /* VAR and DATASEC are never deduped and are self-canonical */
1417 if (btf_is_var(t) || btf_is_datasec(t))
1418 d->map[i] = i;
1419 else
1420 d->map[i] = BTF_UNPROCESSED_ID;
1421 }
1422
1423 d->hypot_map = malloc(sizeof(__u32) * (1 + btf->nr_types));
1424 if (!d->hypot_map) {
1425 err = -ENOMEM;
1426 goto done;
1427 }
1428 for (i = 0; i <= btf->nr_types; i++)
1429 d->hypot_map[i] = BTF_UNPROCESSED_ID;
1430
1431 done:
1432 if (err) {
1433 btf_dedup_free(d);
1434 return ERR_PTR(err);
1435 }
1436
1437 return d;
1438 }
1439
1440 typedef int (*str_off_fn_t)(__u32 *str_off_ptr, void *ctx);
1441
1442 /*
1443 * Iterate over all possible places in .BTF and .BTF.ext that can reference
1444 * string and pass pointer to it to a provided callback `fn`.
1445 */
btf_for_each_str_off(struct btf_dedup * d,str_off_fn_t fn,void * ctx)1446 static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx)
1447 {
1448 void *line_data_cur, *line_data_end;
1449 int i, j, r, rec_size;
1450 struct btf_type *t;
1451
1452 for (i = 1; i <= d->btf->nr_types; i++) {
1453 t = d->btf->types[i];
1454 r = fn(&t->name_off, ctx);
1455 if (r)
1456 return r;
1457
1458 switch (btf_kind(t)) {
1459 case BTF_KIND_STRUCT:
1460 case BTF_KIND_UNION: {
1461 struct btf_member *m = btf_members(t);
1462 __u16 vlen = btf_vlen(t);
1463
1464 for (j = 0; j < vlen; j++) {
1465 r = fn(&m->name_off, ctx);
1466 if (r)
1467 return r;
1468 m++;
1469 }
1470 break;
1471 }
1472 case BTF_KIND_ENUM: {
1473 struct btf_enum *m = btf_enum(t);
1474 __u16 vlen = btf_vlen(t);
1475
1476 for (j = 0; j < vlen; j++) {
1477 r = fn(&m->name_off, ctx);
1478 if (r)
1479 return r;
1480 m++;
1481 }
1482 break;
1483 }
1484 case BTF_KIND_FUNC_PROTO: {
1485 struct btf_param *m = btf_params(t);
1486 __u16 vlen = btf_vlen(t);
1487
1488 for (j = 0; j < vlen; j++) {
1489 r = fn(&m->name_off, ctx);
1490 if (r)
1491 return r;
1492 m++;
1493 }
1494 break;
1495 }
1496 default:
1497 break;
1498 }
1499 }
1500
1501 if (!d->btf_ext)
1502 return 0;
1503
1504 line_data_cur = d->btf_ext->line_info.info;
1505 line_data_end = d->btf_ext->line_info.info + d->btf_ext->line_info.len;
1506 rec_size = d->btf_ext->line_info.rec_size;
1507
1508 while (line_data_cur < line_data_end) {
1509 struct btf_ext_info_sec *sec = line_data_cur;
1510 struct bpf_line_info_min *line_info;
1511 __u32 num_info = sec->num_info;
1512
1513 r = fn(&sec->sec_name_off, ctx);
1514 if (r)
1515 return r;
1516
1517 line_data_cur += sizeof(struct btf_ext_info_sec);
1518 for (i = 0; i < num_info; i++) {
1519 line_info = line_data_cur;
1520 r = fn(&line_info->file_name_off, ctx);
1521 if (r)
1522 return r;
1523 r = fn(&line_info->line_off, ctx);
1524 if (r)
1525 return r;
1526 line_data_cur += rec_size;
1527 }
1528 }
1529
1530 return 0;
1531 }
1532
str_sort_by_content(const void * a1,const void * a2)1533 static int str_sort_by_content(const void *a1, const void *a2)
1534 {
1535 const struct btf_str_ptr *p1 = a1;
1536 const struct btf_str_ptr *p2 = a2;
1537
1538 return strcmp(p1->str, p2->str);
1539 }
1540
str_sort_by_offset(const void * a1,const void * a2)1541 static int str_sort_by_offset(const void *a1, const void *a2)
1542 {
1543 const struct btf_str_ptr *p1 = a1;
1544 const struct btf_str_ptr *p2 = a2;
1545
1546 if (p1->str != p2->str)
1547 return p1->str < p2->str ? -1 : 1;
1548 return 0;
1549 }
1550
btf_dedup_str_ptr_cmp(const void * str_ptr,const void * pelem)1551 static int btf_dedup_str_ptr_cmp(const void *str_ptr, const void *pelem)
1552 {
1553 const struct btf_str_ptr *p = pelem;
1554
1555 if (str_ptr != p->str)
1556 return (const char *)str_ptr < p->str ? -1 : 1;
1557 return 0;
1558 }
1559
btf_str_mark_as_used(__u32 * str_off_ptr,void * ctx)1560 static int btf_str_mark_as_used(__u32 *str_off_ptr, void *ctx)
1561 {
1562 struct btf_str_ptrs *strs;
1563 struct btf_str_ptr *s;
1564
1565 if (*str_off_ptr == 0)
1566 return 0;
1567
1568 strs = ctx;
1569 s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
1570 sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
1571 if (!s)
1572 return -EINVAL;
1573 s->used = true;
1574 return 0;
1575 }
1576
btf_str_remap_offset(__u32 * str_off_ptr,void * ctx)1577 static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx)
1578 {
1579 struct btf_str_ptrs *strs;
1580 struct btf_str_ptr *s;
1581
1582 if (*str_off_ptr == 0)
1583 return 0;
1584
1585 strs = ctx;
1586 s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
1587 sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
1588 if (!s)
1589 return -EINVAL;
1590 *str_off_ptr = s->new_off;
1591 return 0;
1592 }
1593
1594 /*
1595 * Dedup string and filter out those that are not referenced from either .BTF
1596 * or .BTF.ext (if provided) sections.
1597 *
1598 * This is done by building index of all strings in BTF's string section,
1599 * then iterating over all entities that can reference strings (e.g., type
1600 * names, struct field names, .BTF.ext line info, etc) and marking corresponding
1601 * strings as used. After that all used strings are deduped and compacted into
1602 * sequential blob of memory and new offsets are calculated. Then all the string
1603 * references are iterated again and rewritten using new offsets.
1604 */
btf_dedup_strings(struct btf_dedup * d)1605 static int btf_dedup_strings(struct btf_dedup *d)
1606 {
1607 const struct btf_header *hdr = d->btf->hdr;
1608 char *start = (char *)d->btf->nohdr_data + hdr->str_off;
1609 char *end = start + d->btf->hdr->str_len;
1610 char *p = start, *tmp_strs = NULL;
1611 struct btf_str_ptrs strs = {
1612 .cnt = 0,
1613 .cap = 0,
1614 .ptrs = NULL,
1615 .data = start,
1616 };
1617 int i, j, err = 0, grp_idx;
1618 bool grp_used;
1619
1620 /* build index of all strings */
1621 while (p < end) {
1622 if (strs.cnt + 1 > strs.cap) {
1623 struct btf_str_ptr *new_ptrs;
1624
1625 strs.cap += max(strs.cnt / 2, 16);
1626 new_ptrs = realloc(strs.ptrs,
1627 sizeof(strs.ptrs[0]) * strs.cap);
1628 if (!new_ptrs) {
1629 err = -ENOMEM;
1630 goto done;
1631 }
1632 strs.ptrs = new_ptrs;
1633 }
1634
1635 strs.ptrs[strs.cnt].str = p;
1636 strs.ptrs[strs.cnt].used = false;
1637
1638 p += strlen(p) + 1;
1639 strs.cnt++;
1640 }
1641
1642 /* temporary storage for deduplicated strings */
1643 tmp_strs = malloc(d->btf->hdr->str_len);
1644 if (!tmp_strs) {
1645 err = -ENOMEM;
1646 goto done;
1647 }
1648
1649 /* mark all used strings */
1650 strs.ptrs[0].used = true;
1651 err = btf_for_each_str_off(d, btf_str_mark_as_used, &strs);
1652 if (err)
1653 goto done;
1654
1655 /* sort strings by context, so that we can identify duplicates */
1656 qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_content);
1657
1658 /*
1659 * iterate groups of equal strings and if any instance in a group was
1660 * referenced, emit single instance and remember new offset
1661 */
1662 p = tmp_strs;
1663 grp_idx = 0;
1664 grp_used = strs.ptrs[0].used;
1665 /* iterate past end to avoid code duplication after loop */
1666 for (i = 1; i <= strs.cnt; i++) {
1667 /*
1668 * when i == strs.cnt, we want to skip string comparison and go
1669 * straight to handling last group of strings (otherwise we'd
1670 * need to handle last group after the loop w/ duplicated code)
1671 */
1672 if (i < strs.cnt &&
1673 !strcmp(strs.ptrs[i].str, strs.ptrs[grp_idx].str)) {
1674 grp_used = grp_used || strs.ptrs[i].used;
1675 continue;
1676 }
1677
1678 /*
1679 * this check would have been required after the loop to handle
1680 * last group of strings, but due to <= condition in a loop
1681 * we avoid that duplication
1682 */
1683 if (grp_used) {
1684 int new_off = p - tmp_strs;
1685 __u32 len = strlen(strs.ptrs[grp_idx].str);
1686
1687 memmove(p, strs.ptrs[grp_idx].str, len + 1);
1688 for (j = grp_idx; j < i; j++)
1689 strs.ptrs[j].new_off = new_off;
1690 p += len + 1;
1691 }
1692
1693 if (i < strs.cnt) {
1694 grp_idx = i;
1695 grp_used = strs.ptrs[i].used;
1696 }
1697 }
1698
1699 /* replace original strings with deduped ones */
1700 d->btf->hdr->str_len = p - tmp_strs;
1701 memmove(start, tmp_strs, d->btf->hdr->str_len);
1702 end = start + d->btf->hdr->str_len;
1703
1704 /* restore original order for further binary search lookups */
1705 qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_offset);
1706
1707 /* remap string offsets */
1708 err = btf_for_each_str_off(d, btf_str_remap_offset, &strs);
1709 if (err)
1710 goto done;
1711
1712 d->btf->hdr->str_len = end - start;
1713
1714 done:
1715 free(tmp_strs);
1716 free(strs.ptrs);
1717 return err;
1718 }
1719
btf_hash_common(struct btf_type * t)1720 static long btf_hash_common(struct btf_type *t)
1721 {
1722 long h;
1723
1724 h = hash_combine(0, t->name_off);
1725 h = hash_combine(h, t->info);
1726 h = hash_combine(h, t->size);
1727 return h;
1728 }
1729
btf_equal_common(struct btf_type * t1,struct btf_type * t2)1730 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
1731 {
1732 return t1->name_off == t2->name_off &&
1733 t1->info == t2->info &&
1734 t1->size == t2->size;
1735 }
1736
1737 /* Calculate type signature hash of INT. */
btf_hash_int(struct btf_type * t)1738 static long btf_hash_int(struct btf_type *t)
1739 {
1740 __u32 info = *(__u32 *)(t + 1);
1741 long h;
1742
1743 h = btf_hash_common(t);
1744 h = hash_combine(h, info);
1745 return h;
1746 }
1747
1748 /* Check structural equality of two INTs. */
btf_equal_int(struct btf_type * t1,struct btf_type * t2)1749 static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
1750 {
1751 __u32 info1, info2;
1752
1753 if (!btf_equal_common(t1, t2))
1754 return false;
1755 info1 = *(__u32 *)(t1 + 1);
1756 info2 = *(__u32 *)(t2 + 1);
1757 return info1 == info2;
1758 }
1759
1760 /* Calculate type signature hash of ENUM. */
btf_hash_enum(struct btf_type * t)1761 static long btf_hash_enum(struct btf_type *t)
1762 {
1763 long h;
1764
1765 /* don't hash vlen and enum members to support enum fwd resolving */
1766 h = hash_combine(0, t->name_off);
1767 h = hash_combine(h, t->info & ~0xffff);
1768 h = hash_combine(h, t->size);
1769 return h;
1770 }
1771
1772 /* Check structural equality of two ENUMs. */
btf_equal_enum(struct btf_type * t1,struct btf_type * t2)1773 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
1774 {
1775 const struct btf_enum *m1, *m2;
1776 __u16 vlen;
1777 int i;
1778
1779 if (!btf_equal_common(t1, t2))
1780 return false;
1781
1782 vlen = btf_vlen(t1);
1783 m1 = btf_enum(t1);
1784 m2 = btf_enum(t2);
1785 for (i = 0; i < vlen; i++) {
1786 if (m1->name_off != m2->name_off || m1->val != m2->val)
1787 return false;
1788 m1++;
1789 m2++;
1790 }
1791 return true;
1792 }
1793
btf_is_enum_fwd(struct btf_type * t)1794 static inline bool btf_is_enum_fwd(struct btf_type *t)
1795 {
1796 return btf_is_enum(t) && btf_vlen(t) == 0;
1797 }
1798
btf_compat_enum(struct btf_type * t1,struct btf_type * t2)1799 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
1800 {
1801 if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
1802 return btf_equal_enum(t1, t2);
1803 /* ignore vlen when comparing */
1804 return t1->name_off == t2->name_off &&
1805 (t1->info & ~0xffff) == (t2->info & ~0xffff) &&
1806 t1->size == t2->size;
1807 }
1808
1809 /*
1810 * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
1811 * as referenced type IDs equivalence is established separately during type
1812 * graph equivalence check algorithm.
1813 */
btf_hash_struct(struct btf_type * t)1814 static long btf_hash_struct(struct btf_type *t)
1815 {
1816 const struct btf_member *member = btf_members(t);
1817 __u32 vlen = btf_vlen(t);
1818 long h = btf_hash_common(t);
1819 int i;
1820
1821 for (i = 0; i < vlen; i++) {
1822 h = hash_combine(h, member->name_off);
1823 h = hash_combine(h, member->offset);
1824 /* no hashing of referenced type ID, it can be unresolved yet */
1825 member++;
1826 }
1827 return h;
1828 }
1829
1830 /*
1831 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
1832 * IDs. This check is performed during type graph equivalence check and
1833 * referenced types equivalence is checked separately.
1834 */
btf_shallow_equal_struct(struct btf_type * t1,struct btf_type * t2)1835 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
1836 {
1837 const struct btf_member *m1, *m2;
1838 __u16 vlen;
1839 int i;
1840
1841 if (!btf_equal_common(t1, t2))
1842 return false;
1843
1844 vlen = btf_vlen(t1);
1845 m1 = btf_members(t1);
1846 m2 = btf_members(t2);
1847 for (i = 0; i < vlen; i++) {
1848 if (m1->name_off != m2->name_off || m1->offset != m2->offset)
1849 return false;
1850 m1++;
1851 m2++;
1852 }
1853 return true;
1854 }
1855
1856 /*
1857 * Calculate type signature hash of ARRAY, including referenced type IDs,
1858 * under assumption that they were already resolved to canonical type IDs and
1859 * are not going to change.
1860 */
btf_hash_array(struct btf_type * t)1861 static long btf_hash_array(struct btf_type *t)
1862 {
1863 const struct btf_array *info = btf_array(t);
1864 long h = btf_hash_common(t);
1865
1866 h = hash_combine(h, info->type);
1867 h = hash_combine(h, info->index_type);
1868 h = hash_combine(h, info->nelems);
1869 return h;
1870 }
1871
1872 /*
1873 * Check exact equality of two ARRAYs, taking into account referenced
1874 * type IDs, under assumption that they were already resolved to canonical
1875 * type IDs and are not going to change.
1876 * This function is called during reference types deduplication to compare
1877 * ARRAY to potential canonical representative.
1878 */
btf_equal_array(struct btf_type * t1,struct btf_type * t2)1879 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
1880 {
1881 const struct btf_array *info1, *info2;
1882
1883 if (!btf_equal_common(t1, t2))
1884 return false;
1885
1886 info1 = btf_array(t1);
1887 info2 = btf_array(t2);
1888 return info1->type == info2->type &&
1889 info1->index_type == info2->index_type &&
1890 info1->nelems == info2->nelems;
1891 }
1892
1893 /*
1894 * Check structural compatibility of two ARRAYs, ignoring referenced type
1895 * IDs. This check is performed during type graph equivalence check and
1896 * referenced types equivalence is checked separately.
1897 */
btf_compat_array(struct btf_type * t1,struct btf_type * t2)1898 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
1899 {
1900 if (!btf_equal_common(t1, t2))
1901 return false;
1902
1903 return btf_array(t1)->nelems == btf_array(t2)->nelems;
1904 }
1905
1906 /*
1907 * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
1908 * under assumption that they were already resolved to canonical type IDs and
1909 * are not going to change.
1910 */
btf_hash_fnproto(struct btf_type * t)1911 static long btf_hash_fnproto(struct btf_type *t)
1912 {
1913 const struct btf_param *member = btf_params(t);
1914 __u16 vlen = btf_vlen(t);
1915 long h = btf_hash_common(t);
1916 int i;
1917
1918 for (i = 0; i < vlen; i++) {
1919 h = hash_combine(h, member->name_off);
1920 h = hash_combine(h, member->type);
1921 member++;
1922 }
1923 return h;
1924 }
1925
1926 /*
1927 * Check exact equality of two FUNC_PROTOs, taking into account referenced
1928 * type IDs, under assumption that they were already resolved to canonical
1929 * type IDs and are not going to change.
1930 * This function is called during reference types deduplication to compare
1931 * FUNC_PROTO to potential canonical representative.
1932 */
btf_equal_fnproto(struct btf_type * t1,struct btf_type * t2)1933 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
1934 {
1935 const struct btf_param *m1, *m2;
1936 __u16 vlen;
1937 int i;
1938
1939 if (!btf_equal_common(t1, t2))
1940 return false;
1941
1942 vlen = btf_vlen(t1);
1943 m1 = btf_params(t1);
1944 m2 = btf_params(t2);
1945 for (i = 0; i < vlen; i++) {
1946 if (m1->name_off != m2->name_off || m1->type != m2->type)
1947 return false;
1948 m1++;
1949 m2++;
1950 }
1951 return true;
1952 }
1953
1954 /*
1955 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
1956 * IDs. This check is performed during type graph equivalence check and
1957 * referenced types equivalence is checked separately.
1958 */
btf_compat_fnproto(struct btf_type * t1,struct btf_type * t2)1959 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
1960 {
1961 const struct btf_param *m1, *m2;
1962 __u16 vlen;
1963 int i;
1964
1965 /* skip return type ID */
1966 if (t1->name_off != t2->name_off || t1->info != t2->info)
1967 return false;
1968
1969 vlen = btf_vlen(t1);
1970 m1 = btf_params(t1);
1971 m2 = btf_params(t2);
1972 for (i = 0; i < vlen; i++) {
1973 if (m1->name_off != m2->name_off)
1974 return false;
1975 m1++;
1976 m2++;
1977 }
1978 return true;
1979 }
1980
1981 /*
1982 * Deduplicate primitive types, that can't reference other types, by calculating
1983 * their type signature hash and comparing them with any possible canonical
1984 * candidate. If no canonical candidate matches, type itself is marked as
1985 * canonical and is added into `btf_dedup->dedup_table` as another candidate.
1986 */
btf_dedup_prim_type(struct btf_dedup * d,__u32 type_id)1987 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
1988 {
1989 struct btf_type *t = d->btf->types[type_id];
1990 struct hashmap_entry *hash_entry;
1991 struct btf_type *cand;
1992 /* if we don't find equivalent type, then we are canonical */
1993 __u32 new_id = type_id;
1994 __u32 cand_id;
1995 long h;
1996
1997 switch (btf_kind(t)) {
1998 case BTF_KIND_CONST:
1999 case BTF_KIND_VOLATILE:
2000 case BTF_KIND_RESTRICT:
2001 case BTF_KIND_PTR:
2002 case BTF_KIND_TYPEDEF:
2003 case BTF_KIND_ARRAY:
2004 case BTF_KIND_STRUCT:
2005 case BTF_KIND_UNION:
2006 case BTF_KIND_FUNC:
2007 case BTF_KIND_FUNC_PROTO:
2008 case BTF_KIND_VAR:
2009 case BTF_KIND_DATASEC:
2010 return 0;
2011
2012 case BTF_KIND_INT:
2013 h = btf_hash_int(t);
2014 for_each_dedup_cand(d, hash_entry, h) {
2015 cand_id = (__u32)(long)hash_entry->value;
2016 cand = d->btf->types[cand_id];
2017 if (btf_equal_int(t, cand)) {
2018 new_id = cand_id;
2019 break;
2020 }
2021 }
2022 break;
2023
2024 case BTF_KIND_ENUM:
2025 h = btf_hash_enum(t);
2026 for_each_dedup_cand(d, hash_entry, h) {
2027 cand_id = (__u32)(long)hash_entry->value;
2028 cand = d->btf->types[cand_id];
2029 if (btf_equal_enum(t, cand)) {
2030 new_id = cand_id;
2031 break;
2032 }
2033 if (d->opts.dont_resolve_fwds)
2034 continue;
2035 if (btf_compat_enum(t, cand)) {
2036 if (btf_is_enum_fwd(t)) {
2037 /* resolve fwd to full enum */
2038 new_id = cand_id;
2039 break;
2040 }
2041 /* resolve canonical enum fwd to full enum */
2042 d->map[cand_id] = type_id;
2043 }
2044 }
2045 break;
2046
2047 case BTF_KIND_FWD:
2048 h = btf_hash_common(t);
2049 for_each_dedup_cand(d, hash_entry, h) {
2050 cand_id = (__u32)(long)hash_entry->value;
2051 cand = d->btf->types[cand_id];
2052 if (btf_equal_common(t, cand)) {
2053 new_id = cand_id;
2054 break;
2055 }
2056 }
2057 break;
2058
2059 default:
2060 return -EINVAL;
2061 }
2062
2063 d->map[type_id] = new_id;
2064 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2065 return -ENOMEM;
2066
2067 return 0;
2068 }
2069
btf_dedup_prim_types(struct btf_dedup * d)2070 static int btf_dedup_prim_types(struct btf_dedup *d)
2071 {
2072 int i, err;
2073
2074 for (i = 1; i <= d->btf->nr_types; i++) {
2075 err = btf_dedup_prim_type(d, i);
2076 if (err)
2077 return err;
2078 }
2079 return 0;
2080 }
2081
2082 /*
2083 * Check whether type is already mapped into canonical one (could be to itself).
2084 */
is_type_mapped(struct btf_dedup * d,uint32_t type_id)2085 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
2086 {
2087 return d->map[type_id] <= BTF_MAX_NR_TYPES;
2088 }
2089
2090 /*
2091 * Resolve type ID into its canonical type ID, if any; otherwise return original
2092 * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
2093 * STRUCT/UNION link and resolve it into canonical type ID as well.
2094 */
resolve_type_id(struct btf_dedup * d,__u32 type_id)2095 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
2096 {
2097 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
2098 type_id = d->map[type_id];
2099 return type_id;
2100 }
2101
2102 /*
2103 * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
2104 * type ID.
2105 */
resolve_fwd_id(struct btf_dedup * d,uint32_t type_id)2106 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
2107 {
2108 __u32 orig_type_id = type_id;
2109
2110 if (!btf_is_fwd(d->btf->types[type_id]))
2111 return type_id;
2112
2113 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
2114 type_id = d->map[type_id];
2115
2116 if (!btf_is_fwd(d->btf->types[type_id]))
2117 return type_id;
2118
2119 return orig_type_id;
2120 }
2121
2122
btf_fwd_kind(struct btf_type * t)2123 static inline __u16 btf_fwd_kind(struct btf_type *t)
2124 {
2125 return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
2126 }
2127
2128 /*
2129 * Check equivalence of BTF type graph formed by candidate struct/union (we'll
2130 * call it "candidate graph" in this description for brevity) to a type graph
2131 * formed by (potential) canonical struct/union ("canonical graph" for brevity
2132 * here, though keep in mind that not all types in canonical graph are
2133 * necessarily canonical representatives themselves, some of them might be
2134 * duplicates or its uniqueness might not have been established yet).
2135 * Returns:
2136 * - >0, if type graphs are equivalent;
2137 * - 0, if not equivalent;
2138 * - <0, on error.
2139 *
2140 * Algorithm performs side-by-side DFS traversal of both type graphs and checks
2141 * equivalence of BTF types at each step. If at any point BTF types in candidate
2142 * and canonical graphs are not compatible structurally, whole graphs are
2143 * incompatible. If types are structurally equivalent (i.e., all information
2144 * except referenced type IDs is exactly the same), a mapping from `canon_id` to
2145 * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
2146 * If a type references other types, then those referenced types are checked
2147 * for equivalence recursively.
2148 *
2149 * During DFS traversal, if we find that for current `canon_id` type we
2150 * already have some mapping in hypothetical map, we check for two possible
2151 * situations:
2152 * - `canon_id` is mapped to exactly the same type as `cand_id`. This will
2153 * happen when type graphs have cycles. In this case we assume those two
2154 * types are equivalent.
2155 * - `canon_id` is mapped to different type. This is contradiction in our
2156 * hypothetical mapping, because same graph in canonical graph corresponds
2157 * to two different types in candidate graph, which for equivalent type
2158 * graphs shouldn't happen. This condition terminates equivalence check
2159 * with negative result.
2160 *
2161 * If type graphs traversal exhausts types to check and find no contradiction,
2162 * then type graphs are equivalent.
2163 *
2164 * When checking types for equivalence, there is one special case: FWD types.
2165 * If FWD type resolution is allowed and one of the types (either from canonical
2166 * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
2167 * flag) and their names match, hypothetical mapping is updated to point from
2168 * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
2169 * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
2170 *
2171 * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
2172 * if there are two exactly named (or anonymous) structs/unions that are
2173 * compatible structurally, one of which has FWD field, while other is concrete
2174 * STRUCT/UNION, but according to C sources they are different structs/unions
2175 * that are referencing different types with the same name. This is extremely
2176 * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
2177 * this logic is causing problems.
2178 *
2179 * Doing FWD resolution means that both candidate and/or canonical graphs can
2180 * consists of portions of the graph that come from multiple compilation units.
2181 * This is due to the fact that types within single compilation unit are always
2182 * deduplicated and FWDs are already resolved, if referenced struct/union
2183 * definiton is available. So, if we had unresolved FWD and found corresponding
2184 * STRUCT/UNION, they will be from different compilation units. This
2185 * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
2186 * type graph will likely have at least two different BTF types that describe
2187 * same type (e.g., most probably there will be two different BTF types for the
2188 * same 'int' primitive type) and could even have "overlapping" parts of type
2189 * graph that describe same subset of types.
2190 *
2191 * This in turn means that our assumption that each type in canonical graph
2192 * must correspond to exactly one type in candidate graph might not hold
2193 * anymore and will make it harder to detect contradictions using hypothetical
2194 * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
2195 * resolution only in canonical graph. FWDs in candidate graphs are never
2196 * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
2197 * that can occur:
2198 * - Both types in canonical and candidate graphs are FWDs. If they are
2199 * structurally equivalent, then they can either be both resolved to the
2200 * same STRUCT/UNION or not resolved at all. In both cases they are
2201 * equivalent and there is no need to resolve FWD on candidate side.
2202 * - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
2203 * so nothing to resolve as well, algorithm will check equivalence anyway.
2204 * - Type in canonical graph is FWD, while type in candidate is concrete
2205 * STRUCT/UNION. In this case candidate graph comes from single compilation
2206 * unit, so there is exactly one BTF type for each unique C type. After
2207 * resolving FWD into STRUCT/UNION, there might be more than one BTF type
2208 * in canonical graph mapping to single BTF type in candidate graph, but
2209 * because hypothetical mapping maps from canonical to candidate types, it's
2210 * alright, and we still maintain the property of having single `canon_id`
2211 * mapping to single `cand_id` (there could be two different `canon_id`
2212 * mapped to the same `cand_id`, but it's not contradictory).
2213 * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
2214 * graph is FWD. In this case we are just going to check compatibility of
2215 * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
2216 * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
2217 * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
2218 * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
2219 * canonical graph.
2220 */
btf_dedup_is_equiv(struct btf_dedup * d,__u32 cand_id,__u32 canon_id)2221 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
2222 __u32 canon_id)
2223 {
2224 struct btf_type *cand_type;
2225 struct btf_type *canon_type;
2226 __u32 hypot_type_id;
2227 __u16 cand_kind;
2228 __u16 canon_kind;
2229 int i, eq;
2230
2231 /* if both resolve to the same canonical, they must be equivalent */
2232 if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
2233 return 1;
2234
2235 canon_id = resolve_fwd_id(d, canon_id);
2236
2237 hypot_type_id = d->hypot_map[canon_id];
2238 if (hypot_type_id <= BTF_MAX_NR_TYPES)
2239 return hypot_type_id == cand_id;
2240
2241 if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
2242 return -ENOMEM;
2243
2244 cand_type = d->btf->types[cand_id];
2245 canon_type = d->btf->types[canon_id];
2246 cand_kind = btf_kind(cand_type);
2247 canon_kind = btf_kind(canon_type);
2248
2249 if (cand_type->name_off != canon_type->name_off)
2250 return 0;
2251
2252 /* FWD <--> STRUCT/UNION equivalence check, if enabled */
2253 if (!d->opts.dont_resolve_fwds
2254 && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
2255 && cand_kind != canon_kind) {
2256 __u16 real_kind;
2257 __u16 fwd_kind;
2258
2259 if (cand_kind == BTF_KIND_FWD) {
2260 real_kind = canon_kind;
2261 fwd_kind = btf_fwd_kind(cand_type);
2262 } else {
2263 real_kind = cand_kind;
2264 fwd_kind = btf_fwd_kind(canon_type);
2265 }
2266 return fwd_kind == real_kind;
2267 }
2268
2269 if (cand_kind != canon_kind)
2270 return 0;
2271
2272 switch (cand_kind) {
2273 case BTF_KIND_INT:
2274 return btf_equal_int(cand_type, canon_type);
2275
2276 case BTF_KIND_ENUM:
2277 if (d->opts.dont_resolve_fwds)
2278 return btf_equal_enum(cand_type, canon_type);
2279 else
2280 return btf_compat_enum(cand_type, canon_type);
2281
2282 case BTF_KIND_FWD:
2283 return btf_equal_common(cand_type, canon_type);
2284
2285 case BTF_KIND_CONST:
2286 case BTF_KIND_VOLATILE:
2287 case BTF_KIND_RESTRICT:
2288 case BTF_KIND_PTR:
2289 case BTF_KIND_TYPEDEF:
2290 case BTF_KIND_FUNC:
2291 if (cand_type->info != canon_type->info)
2292 return 0;
2293 return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
2294
2295 case BTF_KIND_ARRAY: {
2296 const struct btf_array *cand_arr, *canon_arr;
2297
2298 if (!btf_compat_array(cand_type, canon_type))
2299 return 0;
2300 cand_arr = btf_array(cand_type);
2301 canon_arr = btf_array(canon_type);
2302 eq = btf_dedup_is_equiv(d,
2303 cand_arr->index_type, canon_arr->index_type);
2304 if (eq <= 0)
2305 return eq;
2306 return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
2307 }
2308
2309 case BTF_KIND_STRUCT:
2310 case BTF_KIND_UNION: {
2311 const struct btf_member *cand_m, *canon_m;
2312 __u16 vlen;
2313
2314 if (!btf_shallow_equal_struct(cand_type, canon_type))
2315 return 0;
2316 vlen = btf_vlen(cand_type);
2317 cand_m = btf_members(cand_type);
2318 canon_m = btf_members(canon_type);
2319 for (i = 0; i < vlen; i++) {
2320 eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
2321 if (eq <= 0)
2322 return eq;
2323 cand_m++;
2324 canon_m++;
2325 }
2326
2327 return 1;
2328 }
2329
2330 case BTF_KIND_FUNC_PROTO: {
2331 const struct btf_param *cand_p, *canon_p;
2332 __u16 vlen;
2333
2334 if (!btf_compat_fnproto(cand_type, canon_type))
2335 return 0;
2336 eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
2337 if (eq <= 0)
2338 return eq;
2339 vlen = btf_vlen(cand_type);
2340 cand_p = btf_params(cand_type);
2341 canon_p = btf_params(canon_type);
2342 for (i = 0; i < vlen; i++) {
2343 eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
2344 if (eq <= 0)
2345 return eq;
2346 cand_p++;
2347 canon_p++;
2348 }
2349 return 1;
2350 }
2351
2352 default:
2353 return -EINVAL;
2354 }
2355 return 0;
2356 }
2357
2358 /*
2359 * Use hypothetical mapping, produced by successful type graph equivalence
2360 * check, to augment existing struct/union canonical mapping, where possible.
2361 *
2362 * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
2363 * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
2364 * it doesn't matter if FWD type was part of canonical graph or candidate one,
2365 * we are recording the mapping anyway. As opposed to carefulness required
2366 * for struct/union correspondence mapping (described below), for FWD resolution
2367 * it's not important, as by the time that FWD type (reference type) will be
2368 * deduplicated all structs/unions will be deduped already anyway.
2369 *
2370 * Recording STRUCT/UNION mapping is purely a performance optimization and is
2371 * not required for correctness. It needs to be done carefully to ensure that
2372 * struct/union from candidate's type graph is not mapped into corresponding
2373 * struct/union from canonical type graph that itself hasn't been resolved into
2374 * canonical representative. The only guarantee we have is that canonical
2375 * struct/union was determined as canonical and that won't change. But any
2376 * types referenced through that struct/union fields could have been not yet
2377 * resolved, so in case like that it's too early to establish any kind of
2378 * correspondence between structs/unions.
2379 *
2380 * No canonical correspondence is derived for primitive types (they are already
2381 * deduplicated completely already anyway) or reference types (they rely on
2382 * stability of struct/union canonical relationship for equivalence checks).
2383 */
btf_dedup_merge_hypot_map(struct btf_dedup * d)2384 static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
2385 {
2386 __u32 cand_type_id, targ_type_id;
2387 __u16 t_kind, c_kind;
2388 __u32 t_id, c_id;
2389 int i;
2390
2391 for (i = 0; i < d->hypot_cnt; i++) {
2392 cand_type_id = d->hypot_list[i];
2393 targ_type_id = d->hypot_map[cand_type_id];
2394 t_id = resolve_type_id(d, targ_type_id);
2395 c_id = resolve_type_id(d, cand_type_id);
2396 t_kind = btf_kind(d->btf->types[t_id]);
2397 c_kind = btf_kind(d->btf->types[c_id]);
2398 /*
2399 * Resolve FWD into STRUCT/UNION.
2400 * It's ok to resolve FWD into STRUCT/UNION that's not yet
2401 * mapped to canonical representative (as opposed to
2402 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
2403 * eventually that struct is going to be mapped and all resolved
2404 * FWDs will automatically resolve to correct canonical
2405 * representative. This will happen before ref type deduping,
2406 * which critically depends on stability of these mapping. This
2407 * stability is not a requirement for STRUCT/UNION equivalence
2408 * checks, though.
2409 */
2410 if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
2411 d->map[c_id] = t_id;
2412 else if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
2413 d->map[t_id] = c_id;
2414
2415 if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
2416 c_kind != BTF_KIND_FWD &&
2417 is_type_mapped(d, c_id) &&
2418 !is_type_mapped(d, t_id)) {
2419 /*
2420 * as a perf optimization, we can map struct/union
2421 * that's part of type graph we just verified for
2422 * equivalence. We can do that for struct/union that has
2423 * canonical representative only, though.
2424 */
2425 d->map[t_id] = c_id;
2426 }
2427 }
2428 }
2429
2430 /*
2431 * Deduplicate struct/union types.
2432 *
2433 * For each struct/union type its type signature hash is calculated, taking
2434 * into account type's name, size, number, order and names of fields, but
2435 * ignoring type ID's referenced from fields, because they might not be deduped
2436 * completely until after reference types deduplication phase. This type hash
2437 * is used to iterate over all potential canonical types, sharing same hash.
2438 * For each canonical candidate we check whether type graphs that they form
2439 * (through referenced types in fields and so on) are equivalent using algorithm
2440 * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
2441 * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
2442 * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
2443 * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
2444 * potentially map other structs/unions to their canonical representatives,
2445 * if such relationship hasn't yet been established. This speeds up algorithm
2446 * by eliminating some of the duplicate work.
2447 *
2448 * If no matching canonical representative was found, struct/union is marked
2449 * as canonical for itself and is added into btf_dedup->dedup_table hash map
2450 * for further look ups.
2451 */
btf_dedup_struct_type(struct btf_dedup * d,__u32 type_id)2452 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
2453 {
2454 struct btf_type *cand_type, *t;
2455 struct hashmap_entry *hash_entry;
2456 /* if we don't find equivalent type, then we are canonical */
2457 __u32 new_id = type_id;
2458 __u16 kind;
2459 long h;
2460
2461 /* already deduped or is in process of deduping (loop detected) */
2462 if (d->map[type_id] <= BTF_MAX_NR_TYPES)
2463 return 0;
2464
2465 t = d->btf->types[type_id];
2466 kind = btf_kind(t);
2467
2468 if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
2469 return 0;
2470
2471 h = btf_hash_struct(t);
2472 for_each_dedup_cand(d, hash_entry, h) {
2473 __u32 cand_id = (__u32)(long)hash_entry->value;
2474 int eq;
2475
2476 /*
2477 * Even though btf_dedup_is_equiv() checks for
2478 * btf_shallow_equal_struct() internally when checking two
2479 * structs (unions) for equivalence, we need to guard here
2480 * from picking matching FWD type as a dedup candidate.
2481 * This can happen due to hash collision. In such case just
2482 * relying on btf_dedup_is_equiv() would lead to potentially
2483 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
2484 * FWD and compatible STRUCT/UNION are considered equivalent.
2485 */
2486 cand_type = d->btf->types[cand_id];
2487 if (!btf_shallow_equal_struct(t, cand_type))
2488 continue;
2489
2490 btf_dedup_clear_hypot_map(d);
2491 eq = btf_dedup_is_equiv(d, type_id, cand_id);
2492 if (eq < 0)
2493 return eq;
2494 if (!eq)
2495 continue;
2496 new_id = cand_id;
2497 btf_dedup_merge_hypot_map(d);
2498 break;
2499 }
2500
2501 d->map[type_id] = new_id;
2502 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2503 return -ENOMEM;
2504
2505 return 0;
2506 }
2507
btf_dedup_struct_types(struct btf_dedup * d)2508 static int btf_dedup_struct_types(struct btf_dedup *d)
2509 {
2510 int i, err;
2511
2512 for (i = 1; i <= d->btf->nr_types; i++) {
2513 err = btf_dedup_struct_type(d, i);
2514 if (err)
2515 return err;
2516 }
2517 return 0;
2518 }
2519
2520 /*
2521 * Deduplicate reference type.
2522 *
2523 * Once all primitive and struct/union types got deduplicated, we can easily
2524 * deduplicate all other (reference) BTF types. This is done in two steps:
2525 *
2526 * 1. Resolve all referenced type IDs into their canonical type IDs. This
2527 * resolution can be done either immediately for primitive or struct/union types
2528 * (because they were deduped in previous two phases) or recursively for
2529 * reference types. Recursion will always terminate at either primitive or
2530 * struct/union type, at which point we can "unwind" chain of reference types
2531 * one by one. There is no danger of encountering cycles because in C type
2532 * system the only way to form type cycle is through struct/union, so any chain
2533 * of reference types, even those taking part in a type cycle, will inevitably
2534 * reach struct/union at some point.
2535 *
2536 * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
2537 * becomes "stable", in the sense that no further deduplication will cause
2538 * any changes to it. With that, it's now possible to calculate type's signature
2539 * hash (this time taking into account referenced type IDs) and loop over all
2540 * potential canonical representatives. If no match was found, current type
2541 * will become canonical representative of itself and will be added into
2542 * btf_dedup->dedup_table as another possible canonical representative.
2543 */
btf_dedup_ref_type(struct btf_dedup * d,__u32 type_id)2544 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
2545 {
2546 struct hashmap_entry *hash_entry;
2547 __u32 new_id = type_id, cand_id;
2548 struct btf_type *t, *cand;
2549 /* if we don't find equivalent type, then we are representative type */
2550 int ref_type_id;
2551 long h;
2552
2553 if (d->map[type_id] == BTF_IN_PROGRESS_ID)
2554 return -ELOOP;
2555 if (d->map[type_id] <= BTF_MAX_NR_TYPES)
2556 return resolve_type_id(d, type_id);
2557
2558 t = d->btf->types[type_id];
2559 d->map[type_id] = BTF_IN_PROGRESS_ID;
2560
2561 switch (btf_kind(t)) {
2562 case BTF_KIND_CONST:
2563 case BTF_KIND_VOLATILE:
2564 case BTF_KIND_RESTRICT:
2565 case BTF_KIND_PTR:
2566 case BTF_KIND_TYPEDEF:
2567 case BTF_KIND_FUNC:
2568 ref_type_id = btf_dedup_ref_type(d, t->type);
2569 if (ref_type_id < 0)
2570 return ref_type_id;
2571 t->type = ref_type_id;
2572
2573 h = btf_hash_common(t);
2574 for_each_dedup_cand(d, hash_entry, h) {
2575 cand_id = (__u32)(long)hash_entry->value;
2576 cand = d->btf->types[cand_id];
2577 if (btf_equal_common(t, cand)) {
2578 new_id = cand_id;
2579 break;
2580 }
2581 }
2582 break;
2583
2584 case BTF_KIND_ARRAY: {
2585 struct btf_array *info = btf_array(t);
2586
2587 ref_type_id = btf_dedup_ref_type(d, info->type);
2588 if (ref_type_id < 0)
2589 return ref_type_id;
2590 info->type = ref_type_id;
2591
2592 ref_type_id = btf_dedup_ref_type(d, info->index_type);
2593 if (ref_type_id < 0)
2594 return ref_type_id;
2595 info->index_type = ref_type_id;
2596
2597 h = btf_hash_array(t);
2598 for_each_dedup_cand(d, hash_entry, h) {
2599 cand_id = (__u32)(long)hash_entry->value;
2600 cand = d->btf->types[cand_id];
2601 if (btf_equal_array(t, cand)) {
2602 new_id = cand_id;
2603 break;
2604 }
2605 }
2606 break;
2607 }
2608
2609 case BTF_KIND_FUNC_PROTO: {
2610 struct btf_param *param;
2611 __u16 vlen;
2612 int i;
2613
2614 ref_type_id = btf_dedup_ref_type(d, t->type);
2615 if (ref_type_id < 0)
2616 return ref_type_id;
2617 t->type = ref_type_id;
2618
2619 vlen = btf_vlen(t);
2620 param = btf_params(t);
2621 for (i = 0; i < vlen; i++) {
2622 ref_type_id = btf_dedup_ref_type(d, param->type);
2623 if (ref_type_id < 0)
2624 return ref_type_id;
2625 param->type = ref_type_id;
2626 param++;
2627 }
2628
2629 h = btf_hash_fnproto(t);
2630 for_each_dedup_cand(d, hash_entry, h) {
2631 cand_id = (__u32)(long)hash_entry->value;
2632 cand = d->btf->types[cand_id];
2633 if (btf_equal_fnproto(t, cand)) {
2634 new_id = cand_id;
2635 break;
2636 }
2637 }
2638 break;
2639 }
2640
2641 default:
2642 return -EINVAL;
2643 }
2644
2645 d->map[type_id] = new_id;
2646 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2647 return -ENOMEM;
2648
2649 return new_id;
2650 }
2651
btf_dedup_ref_types(struct btf_dedup * d)2652 static int btf_dedup_ref_types(struct btf_dedup *d)
2653 {
2654 int i, err;
2655
2656 for (i = 1; i <= d->btf->nr_types; i++) {
2657 err = btf_dedup_ref_type(d, i);
2658 if (err < 0)
2659 return err;
2660 }
2661 /* we won't need d->dedup_table anymore */
2662 hashmap__free(d->dedup_table);
2663 d->dedup_table = NULL;
2664 return 0;
2665 }
2666
2667 /*
2668 * Compact types.
2669 *
2670 * After we established for each type its corresponding canonical representative
2671 * type, we now can eliminate types that are not canonical and leave only
2672 * canonical ones layed out sequentially in memory by copying them over
2673 * duplicates. During compaction btf_dedup->hypot_map array is reused to store
2674 * a map from original type ID to a new compacted type ID, which will be used
2675 * during next phase to "fix up" type IDs, referenced from struct/union and
2676 * reference types.
2677 */
btf_dedup_compact_types(struct btf_dedup * d)2678 static int btf_dedup_compact_types(struct btf_dedup *d)
2679 {
2680 struct btf_type **new_types;
2681 __u32 next_type_id = 1;
2682 char *types_start, *p;
2683 int i, len;
2684
2685 /* we are going to reuse hypot_map to store compaction remapping */
2686 d->hypot_map[0] = 0;
2687 for (i = 1; i <= d->btf->nr_types; i++)
2688 d->hypot_map[i] = BTF_UNPROCESSED_ID;
2689
2690 types_start = d->btf->nohdr_data + d->btf->hdr->type_off;
2691 p = types_start;
2692
2693 for (i = 1; i <= d->btf->nr_types; i++) {
2694 if (d->map[i] != i)
2695 continue;
2696
2697 len = btf_type_size(d->btf->types[i]);
2698 if (len < 0)
2699 return len;
2700
2701 memmove(p, d->btf->types[i], len);
2702 d->hypot_map[i] = next_type_id;
2703 d->btf->types[next_type_id] = (struct btf_type *)p;
2704 p += len;
2705 next_type_id++;
2706 }
2707
2708 /* shrink struct btf's internal types index and update btf_header */
2709 d->btf->nr_types = next_type_id - 1;
2710 d->btf->types_size = d->btf->nr_types;
2711 d->btf->hdr->type_len = p - types_start;
2712 new_types = realloc(d->btf->types,
2713 (1 + d->btf->nr_types) * sizeof(struct btf_type *));
2714 if (!new_types)
2715 return -ENOMEM;
2716 d->btf->types = new_types;
2717
2718 /* make sure string section follows type information without gaps */
2719 d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data;
2720 memmove(p, d->btf->strings, d->btf->hdr->str_len);
2721 d->btf->strings = p;
2722 p += d->btf->hdr->str_len;
2723
2724 d->btf->data_size = p - (char *)d->btf->data;
2725 return 0;
2726 }
2727
2728 /*
2729 * Figure out final (deduplicated and compacted) type ID for provided original
2730 * `type_id` by first resolving it into corresponding canonical type ID and
2731 * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
2732 * which is populated during compaction phase.
2733 */
btf_dedup_remap_type_id(struct btf_dedup * d,__u32 type_id)2734 static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id)
2735 {
2736 __u32 resolved_type_id, new_type_id;
2737
2738 resolved_type_id = resolve_type_id(d, type_id);
2739 new_type_id = d->hypot_map[resolved_type_id];
2740 if (new_type_id > BTF_MAX_NR_TYPES)
2741 return -EINVAL;
2742 return new_type_id;
2743 }
2744
2745 /*
2746 * Remap referenced type IDs into deduped type IDs.
2747 *
2748 * After BTF types are deduplicated and compacted, their final type IDs may
2749 * differ from original ones. The map from original to a corresponding
2750 * deduped type ID is stored in btf_dedup->hypot_map and is populated during
2751 * compaction phase. During remapping phase we are rewriting all type IDs
2752 * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
2753 * their final deduped type IDs.
2754 */
btf_dedup_remap_type(struct btf_dedup * d,__u32 type_id)2755 static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
2756 {
2757 struct btf_type *t = d->btf->types[type_id];
2758 int i, r;
2759
2760 switch (btf_kind(t)) {
2761 case BTF_KIND_INT:
2762 case BTF_KIND_ENUM:
2763 break;
2764
2765 case BTF_KIND_FWD:
2766 case BTF_KIND_CONST:
2767 case BTF_KIND_VOLATILE:
2768 case BTF_KIND_RESTRICT:
2769 case BTF_KIND_PTR:
2770 case BTF_KIND_TYPEDEF:
2771 case BTF_KIND_FUNC:
2772 case BTF_KIND_VAR:
2773 r = btf_dedup_remap_type_id(d, t->type);
2774 if (r < 0)
2775 return r;
2776 t->type = r;
2777 break;
2778
2779 case BTF_KIND_ARRAY: {
2780 struct btf_array *arr_info = btf_array(t);
2781
2782 r = btf_dedup_remap_type_id(d, arr_info->type);
2783 if (r < 0)
2784 return r;
2785 arr_info->type = r;
2786 r = btf_dedup_remap_type_id(d, arr_info->index_type);
2787 if (r < 0)
2788 return r;
2789 arr_info->index_type = r;
2790 break;
2791 }
2792
2793 case BTF_KIND_STRUCT:
2794 case BTF_KIND_UNION: {
2795 struct btf_member *member = btf_members(t);
2796 __u16 vlen = btf_vlen(t);
2797
2798 for (i = 0; i < vlen; i++) {
2799 r = btf_dedup_remap_type_id(d, member->type);
2800 if (r < 0)
2801 return r;
2802 member->type = r;
2803 member++;
2804 }
2805 break;
2806 }
2807
2808 case BTF_KIND_FUNC_PROTO: {
2809 struct btf_param *param = btf_params(t);
2810 __u16 vlen = btf_vlen(t);
2811
2812 r = btf_dedup_remap_type_id(d, t->type);
2813 if (r < 0)
2814 return r;
2815 t->type = r;
2816
2817 for (i = 0; i < vlen; i++) {
2818 r = btf_dedup_remap_type_id(d, param->type);
2819 if (r < 0)
2820 return r;
2821 param->type = r;
2822 param++;
2823 }
2824 break;
2825 }
2826
2827 case BTF_KIND_DATASEC: {
2828 struct btf_var_secinfo *var = btf_var_secinfos(t);
2829 __u16 vlen = btf_vlen(t);
2830
2831 for (i = 0; i < vlen; i++) {
2832 r = btf_dedup_remap_type_id(d, var->type);
2833 if (r < 0)
2834 return r;
2835 var->type = r;
2836 var++;
2837 }
2838 break;
2839 }
2840
2841 default:
2842 return -EINVAL;
2843 }
2844
2845 return 0;
2846 }
2847
btf_dedup_remap_types(struct btf_dedup * d)2848 static int btf_dedup_remap_types(struct btf_dedup *d)
2849 {
2850 int i, r;
2851
2852 for (i = 1; i <= d->btf->nr_types; i++) {
2853 r = btf_dedup_remap_type(d, i);
2854 if (r < 0)
2855 return r;
2856 }
2857 return 0;
2858 }
2859