• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2018 Facebook */
3 
4 #include <endian.h>
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <errno.h>
11 #include <linux/err.h>
12 #include <linux/btf.h>
13 #include <gelf.h>
14 #include "btf.h"
15 #include "bpf.h"
16 #include "libbpf.h"
17 #include "libbpf_internal.h"
18 #include "hashmap.h"
19 
20 #define BTF_MAX_NR_TYPES 0x7fffffff
21 #define BTF_MAX_STR_OFFSET 0x7fffffff
22 
23 static struct btf_type btf_void;
24 
25 struct btf {
26 	union {
27 		struct btf_header *hdr;
28 		void *data;
29 	};
30 	struct btf_type **types;
31 	const char *strings;
32 	void *nohdr_data;
33 	__u32 nr_types;
34 	__u32 types_size;
35 	__u32 data_size;
36 	int fd;
37 };
38 
ptr_to_u64(const void * ptr)39 static inline __u64 ptr_to_u64(const void *ptr)
40 {
41 	return (__u64) (unsigned long) ptr;
42 }
43 
btf_add_type(struct btf * btf,struct btf_type * t)44 static int btf_add_type(struct btf *btf, struct btf_type *t)
45 {
46 	if (btf->types_size - btf->nr_types < 2) {
47 		struct btf_type **new_types;
48 		__u32 expand_by, new_size;
49 
50 		if (btf->types_size == BTF_MAX_NR_TYPES)
51 			return -E2BIG;
52 
53 		expand_by = max(btf->types_size >> 2, 16);
54 		new_size = min(BTF_MAX_NR_TYPES, btf->types_size + expand_by);
55 
56 		new_types = realloc(btf->types, sizeof(*new_types) * new_size);
57 		if (!new_types)
58 			return -ENOMEM;
59 
60 		if (btf->nr_types == 0)
61 			new_types[0] = &btf_void;
62 
63 		btf->types = new_types;
64 		btf->types_size = new_size;
65 	}
66 
67 	btf->types[++(btf->nr_types)] = t;
68 
69 	return 0;
70 }
71 
btf_parse_hdr(struct btf * btf)72 static int btf_parse_hdr(struct btf *btf)
73 {
74 	const struct btf_header *hdr = btf->hdr;
75 	__u32 meta_left;
76 
77 	if (btf->data_size < sizeof(struct btf_header)) {
78 		pr_debug("BTF header not found\n");
79 		return -EINVAL;
80 	}
81 
82 	if (hdr->magic != BTF_MAGIC) {
83 		pr_debug("Invalid BTF magic:%x\n", hdr->magic);
84 		return -EINVAL;
85 	}
86 
87 	if (hdr->version != BTF_VERSION) {
88 		pr_debug("Unsupported BTF version:%u\n", hdr->version);
89 		return -ENOTSUP;
90 	}
91 
92 	if (hdr->flags) {
93 		pr_debug("Unsupported BTF flags:%x\n", hdr->flags);
94 		return -ENOTSUP;
95 	}
96 
97 	meta_left = btf->data_size - sizeof(*hdr);
98 	if (!meta_left) {
99 		pr_debug("BTF has no data\n");
100 		return -EINVAL;
101 	}
102 
103 	if (meta_left < hdr->type_off) {
104 		pr_debug("Invalid BTF type section offset:%u\n", hdr->type_off);
105 		return -EINVAL;
106 	}
107 
108 	if (meta_left < hdr->str_off) {
109 		pr_debug("Invalid BTF string section offset:%u\n", hdr->str_off);
110 		return -EINVAL;
111 	}
112 
113 	if (hdr->type_off >= hdr->str_off) {
114 		pr_debug("BTF type section offset >= string section offset. No type?\n");
115 		return -EINVAL;
116 	}
117 
118 	if (hdr->type_off & 0x02) {
119 		pr_debug("BTF type section is not aligned to 4 bytes\n");
120 		return -EINVAL;
121 	}
122 
123 	btf->nohdr_data = btf->hdr + 1;
124 
125 	return 0;
126 }
127 
btf_parse_str_sec(struct btf * btf)128 static int btf_parse_str_sec(struct btf *btf)
129 {
130 	const struct btf_header *hdr = btf->hdr;
131 	const char *start = btf->nohdr_data + hdr->str_off;
132 	const char *end = start + btf->hdr->str_len;
133 
134 	if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET ||
135 	    start[0] || end[-1]) {
136 		pr_debug("Invalid BTF string section\n");
137 		return -EINVAL;
138 	}
139 
140 	btf->strings = start;
141 
142 	return 0;
143 }
144 
btf_type_size(struct btf_type * t)145 static int btf_type_size(struct btf_type *t)
146 {
147 	int base_size = sizeof(struct btf_type);
148 	__u16 vlen = btf_vlen(t);
149 
150 	switch (btf_kind(t)) {
151 	case BTF_KIND_FWD:
152 	case BTF_KIND_CONST:
153 	case BTF_KIND_VOLATILE:
154 	case BTF_KIND_RESTRICT:
155 	case BTF_KIND_PTR:
156 	case BTF_KIND_TYPEDEF:
157 	case BTF_KIND_FUNC:
158 		return base_size;
159 	case BTF_KIND_INT:
160 		return base_size + sizeof(__u32);
161 	case BTF_KIND_ENUM:
162 		return base_size + vlen * sizeof(struct btf_enum);
163 	case BTF_KIND_ARRAY:
164 		return base_size + sizeof(struct btf_array);
165 	case BTF_KIND_STRUCT:
166 	case BTF_KIND_UNION:
167 		return base_size + vlen * sizeof(struct btf_member);
168 	case BTF_KIND_FUNC_PROTO:
169 		return base_size + vlen * sizeof(struct btf_param);
170 	case BTF_KIND_VAR:
171 		return base_size + sizeof(struct btf_var);
172 	case BTF_KIND_DATASEC:
173 		return base_size + vlen * sizeof(struct btf_var_secinfo);
174 	default:
175 		pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
176 		return -EINVAL;
177 	}
178 }
179 
btf_parse_type_sec(struct btf * btf)180 static int btf_parse_type_sec(struct btf *btf)
181 {
182 	struct btf_header *hdr = btf->hdr;
183 	void *nohdr_data = btf->nohdr_data;
184 	void *next_type = nohdr_data + hdr->type_off;
185 	void *end_type = nohdr_data + hdr->str_off;
186 
187 	while (next_type < end_type) {
188 		struct btf_type *t = next_type;
189 		int type_size;
190 		int err;
191 
192 		type_size = btf_type_size(t);
193 		if (type_size < 0)
194 			return type_size;
195 		next_type += type_size;
196 		err = btf_add_type(btf, t);
197 		if (err)
198 			return err;
199 	}
200 
201 	return 0;
202 }
203 
btf__get_nr_types(const struct btf * btf)204 __u32 btf__get_nr_types(const struct btf *btf)
205 {
206 	return btf->nr_types;
207 }
208 
btf__type_by_id(const struct btf * btf,__u32 type_id)209 const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
210 {
211 	if (type_id > btf->nr_types)
212 		return NULL;
213 
214 	return btf->types[type_id];
215 }
216 
btf_type_is_void(const struct btf_type * t)217 static bool btf_type_is_void(const struct btf_type *t)
218 {
219 	return t == &btf_void || btf_is_fwd(t);
220 }
221 
btf_type_is_void_or_null(const struct btf_type * t)222 static bool btf_type_is_void_or_null(const struct btf_type *t)
223 {
224 	return !t || btf_type_is_void(t);
225 }
226 
227 #define MAX_RESOLVE_DEPTH 32
228 
btf__resolve_size(const struct btf * btf,__u32 type_id)229 __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
230 {
231 	const struct btf_array *array;
232 	const struct btf_type *t;
233 	__u32 nelems = 1;
234 	__s64 size = -1;
235 	int i;
236 
237 	t = btf__type_by_id(btf, type_id);
238 	for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t);
239 	     i++) {
240 		switch (btf_kind(t)) {
241 		case BTF_KIND_INT:
242 		case BTF_KIND_STRUCT:
243 		case BTF_KIND_UNION:
244 		case BTF_KIND_ENUM:
245 		case BTF_KIND_DATASEC:
246 			size = t->size;
247 			goto done;
248 		case BTF_KIND_PTR:
249 			size = sizeof(void *);
250 			goto done;
251 		case BTF_KIND_TYPEDEF:
252 		case BTF_KIND_VOLATILE:
253 		case BTF_KIND_CONST:
254 		case BTF_KIND_RESTRICT:
255 		case BTF_KIND_VAR:
256 			type_id = t->type;
257 			break;
258 		case BTF_KIND_ARRAY:
259 			array = btf_array(t);
260 			if (nelems && array->nelems > UINT32_MAX / nelems)
261 				return -E2BIG;
262 			nelems *= array->nelems;
263 			type_id = array->type;
264 			break;
265 		default:
266 			return -EINVAL;
267 		}
268 
269 		t = btf__type_by_id(btf, type_id);
270 	}
271 
272 done:
273 	if (size < 0)
274 		return -EINVAL;
275 	if (nelems && size > UINT32_MAX / nelems)
276 		return -E2BIG;
277 
278 	return nelems * size;
279 }
280 
btf__resolve_type(const struct btf * btf,__u32 type_id)281 int btf__resolve_type(const struct btf *btf, __u32 type_id)
282 {
283 	const struct btf_type *t;
284 	int depth = 0;
285 
286 	t = btf__type_by_id(btf, type_id);
287 	while (depth < MAX_RESOLVE_DEPTH &&
288 	       !btf_type_is_void_or_null(t) &&
289 	       (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
290 		type_id = t->type;
291 		t = btf__type_by_id(btf, type_id);
292 		depth++;
293 	}
294 
295 	if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
296 		return -EINVAL;
297 
298 	return type_id;
299 }
300 
btf__find_by_name(const struct btf * btf,const char * type_name)301 __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
302 {
303 	__u32 i;
304 
305 	if (!strcmp(type_name, "void"))
306 		return 0;
307 
308 	for (i = 1; i <= btf->nr_types; i++) {
309 		const struct btf_type *t = btf->types[i];
310 		const char *name = btf__name_by_offset(btf, t->name_off);
311 
312 		if (name && !strcmp(type_name, name))
313 			return i;
314 	}
315 
316 	return -ENOENT;
317 }
318 
btf__free(struct btf * btf)319 void btf__free(struct btf *btf)
320 {
321 	if (!btf)
322 		return;
323 
324 	if (btf->fd != -1)
325 		close(btf->fd);
326 
327 	free(btf->data);
328 	free(btf->types);
329 	free(btf);
330 }
331 
btf__new(__u8 * data,__u32 size)332 struct btf *btf__new(__u8 *data, __u32 size)
333 {
334 	struct btf *btf;
335 	int err;
336 
337 	btf = calloc(1, sizeof(struct btf));
338 	if (!btf)
339 		return ERR_PTR(-ENOMEM);
340 
341 	btf->fd = -1;
342 
343 	btf->data = malloc(size);
344 	if (!btf->data) {
345 		err = -ENOMEM;
346 		goto done;
347 	}
348 
349 	memcpy(btf->data, data, size);
350 	btf->data_size = size;
351 
352 	err = btf_parse_hdr(btf);
353 	if (err)
354 		goto done;
355 
356 	err = btf_parse_str_sec(btf);
357 	if (err)
358 		goto done;
359 
360 	err = btf_parse_type_sec(btf);
361 
362 done:
363 	if (err) {
364 		btf__free(btf);
365 		return ERR_PTR(err);
366 	}
367 
368 	return btf;
369 }
370 
btf_check_endianness(const GElf_Ehdr * ehdr)371 static bool btf_check_endianness(const GElf_Ehdr *ehdr)
372 {
373 #if __BYTE_ORDER == __LITTLE_ENDIAN
374 	return ehdr->e_ident[EI_DATA] == ELFDATA2LSB;
375 #elif __BYTE_ORDER == __BIG_ENDIAN
376 	return ehdr->e_ident[EI_DATA] == ELFDATA2MSB;
377 #else
378 # error "Unrecognized __BYTE_ORDER__"
379 #endif
380 }
381 
btf__parse_elf(const char * path,struct btf_ext ** btf_ext)382 struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
383 {
384 	Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
385 	int err = 0, fd = -1, idx = 0;
386 	struct btf *btf = NULL;
387 	Elf_Scn *scn = NULL;
388 	Elf *elf = NULL;
389 	GElf_Ehdr ehdr;
390 
391 	if (elf_version(EV_CURRENT) == EV_NONE) {
392 		pr_warning("failed to init libelf for %s\n", path);
393 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
394 	}
395 
396 	fd = open(path, O_RDONLY);
397 	if (fd < 0) {
398 		err = -errno;
399 		pr_warning("failed to open %s: %s\n", path, strerror(errno));
400 		return ERR_PTR(err);
401 	}
402 
403 	err = -LIBBPF_ERRNO__FORMAT;
404 
405 	elf = elf_begin(fd, ELF_C_READ, NULL);
406 	if (!elf) {
407 		pr_warning("failed to open %s as ELF file\n", path);
408 		goto done;
409 	}
410 	if (!gelf_getehdr(elf, &ehdr)) {
411 		pr_warning("failed to get EHDR from %s\n", path);
412 		goto done;
413 	}
414 	if (!btf_check_endianness(&ehdr)) {
415 		pr_warning("non-native ELF endianness is not supported\n");
416 		goto done;
417 	}
418 	if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) {
419 		pr_warning("failed to get e_shstrndx from %s\n", path);
420 		goto done;
421 	}
422 
423 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
424 		GElf_Shdr sh;
425 		char *name;
426 
427 		idx++;
428 		if (gelf_getshdr(scn, &sh) != &sh) {
429 			pr_warning("failed to get section(%d) header from %s\n",
430 				   idx, path);
431 			goto done;
432 		}
433 		name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name);
434 		if (!name) {
435 			pr_warning("failed to get section(%d) name from %s\n",
436 				   idx, path);
437 			goto done;
438 		}
439 		if (strcmp(name, BTF_ELF_SEC) == 0) {
440 			btf_data = elf_getdata(scn, 0);
441 			if (!btf_data) {
442 				pr_warning("failed to get section(%d, %s) data from %s\n",
443 					   idx, name, path);
444 				goto done;
445 			}
446 			continue;
447 		} else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
448 			btf_ext_data = elf_getdata(scn, 0);
449 			if (!btf_ext_data) {
450 				pr_warning("failed to get section(%d, %s) data from %s\n",
451 					   idx, name, path);
452 				goto done;
453 			}
454 			continue;
455 		}
456 	}
457 
458 	err = 0;
459 
460 	if (!btf_data) {
461 		err = -ENOENT;
462 		goto done;
463 	}
464 	btf = btf__new(btf_data->d_buf, btf_data->d_size);
465 	if (IS_ERR(btf))
466 		goto done;
467 
468 	if (btf_ext && btf_ext_data) {
469 		*btf_ext = btf_ext__new(btf_ext_data->d_buf,
470 					btf_ext_data->d_size);
471 		if (IS_ERR(*btf_ext))
472 			goto done;
473 	} else if (btf_ext) {
474 		*btf_ext = NULL;
475 	}
476 done:
477 	if (elf)
478 		elf_end(elf);
479 	close(fd);
480 
481 	if (err)
482 		return ERR_PTR(err);
483 	/*
484 	 * btf is always parsed before btf_ext, so no need to clean up
485 	 * btf_ext, if btf loading failed
486 	 */
487 	if (IS_ERR(btf))
488 		return btf;
489 	if (btf_ext && IS_ERR(*btf_ext)) {
490 		btf__free(btf);
491 		err = PTR_ERR(*btf_ext);
492 		return ERR_PTR(err);
493 	}
494 	return btf;
495 }
496 
compare_vsi_off(const void * _a,const void * _b)497 static int compare_vsi_off(const void *_a, const void *_b)
498 {
499 	const struct btf_var_secinfo *a = _a;
500 	const struct btf_var_secinfo *b = _b;
501 
502 	return a->offset - b->offset;
503 }
504 
btf_fixup_datasec(struct bpf_object * obj,struct btf * btf,struct btf_type * t)505 static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
506 			     struct btf_type *t)
507 {
508 	__u32 size = 0, off = 0, i, vars = btf_vlen(t);
509 	const char *name = btf__name_by_offset(btf, t->name_off);
510 	const struct btf_type *t_var;
511 	struct btf_var_secinfo *vsi;
512 	const struct btf_var *var;
513 	int ret;
514 
515 	if (!name) {
516 		pr_debug("No name found in string section for DATASEC kind.\n");
517 		return -ENOENT;
518 	}
519 
520 	ret = bpf_object__section_size(obj, name, &size);
521 	if (ret || !size || (t->size && t->size != size)) {
522 		pr_debug("Invalid size for section %s: %u bytes\n", name, size);
523 		return -ENOENT;
524 	}
525 
526 	t->size = size;
527 
528 	for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
529 		t_var = btf__type_by_id(btf, vsi->type);
530 		var = btf_var(t_var);
531 
532 		if (!btf_is_var(t_var)) {
533 			pr_debug("Non-VAR type seen in section %s\n", name);
534 			return -EINVAL;
535 		}
536 
537 		if (var->linkage == BTF_VAR_STATIC)
538 			continue;
539 
540 		name = btf__name_by_offset(btf, t_var->name_off);
541 		if (!name) {
542 			pr_debug("No name found in string section for VAR kind\n");
543 			return -ENOENT;
544 		}
545 
546 		ret = bpf_object__variable_offset(obj, name, &off);
547 		if (ret) {
548 			pr_debug("No offset found in symbol table for VAR %s\n",
549 				 name);
550 			return -ENOENT;
551 		}
552 
553 		vsi->offset = off;
554 	}
555 
556 	qsort(t + 1, vars, sizeof(*vsi), compare_vsi_off);
557 	return 0;
558 }
559 
btf__finalize_data(struct bpf_object * obj,struct btf * btf)560 int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
561 {
562 	int err = 0;
563 	__u32 i;
564 
565 	for (i = 1; i <= btf->nr_types; i++) {
566 		struct btf_type *t = btf->types[i];
567 
568 		/* Loader needs to fix up some of the things compiler
569 		 * couldn't get its hands on while emitting BTF. This
570 		 * is section size and global variable offset. We use
571 		 * the info from the ELF itself for this purpose.
572 		 */
573 		if (btf_is_datasec(t)) {
574 			err = btf_fixup_datasec(obj, btf, t);
575 			if (err)
576 				break;
577 		}
578 	}
579 
580 	return err;
581 }
582 
btf__load(struct btf * btf)583 int btf__load(struct btf *btf)
584 {
585 	__u32 log_buf_size = BPF_LOG_BUF_SIZE;
586 	char *log_buf = NULL;
587 	int err = 0;
588 
589 	if (btf->fd >= 0)
590 		return -EEXIST;
591 
592 	log_buf = malloc(log_buf_size);
593 	if (!log_buf)
594 		return -ENOMEM;
595 
596 	*log_buf = 0;
597 
598 	btf->fd = bpf_load_btf(btf->data, btf->data_size,
599 			       log_buf, log_buf_size, false);
600 	if (btf->fd < 0) {
601 		err = -errno;
602 		pr_warning("Error loading BTF: %s(%d)\n", strerror(errno), errno);
603 		if (*log_buf)
604 			pr_warning("%s\n", log_buf);
605 		goto done;
606 	}
607 
608 done:
609 	free(log_buf);
610 	return err;
611 }
612 
btf__fd(const struct btf * btf)613 int btf__fd(const struct btf *btf)
614 {
615 	return btf->fd;
616 }
617 
btf__get_raw_data(const struct btf * btf,__u32 * size)618 const void *btf__get_raw_data(const struct btf *btf, __u32 *size)
619 {
620 	*size = btf->data_size;
621 	return btf->data;
622 }
623 
btf__name_by_offset(const struct btf * btf,__u32 offset)624 const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
625 {
626 	if (offset < btf->hdr->str_len)
627 		return &btf->strings[offset];
628 	else
629 		return NULL;
630 }
631 
btf__get_from_id(__u32 id,struct btf ** btf)632 int btf__get_from_id(__u32 id, struct btf **btf)
633 {
634 	struct bpf_btf_info btf_info = { 0 };
635 	__u32 len = sizeof(btf_info);
636 	__u32 last_size;
637 	int btf_fd;
638 	void *ptr;
639 	int err;
640 
641 	err = 0;
642 	*btf = NULL;
643 	btf_fd = bpf_btf_get_fd_by_id(id);
644 	if (btf_fd < 0)
645 		return 0;
646 
647 	/* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
648 	 * let's start with a sane default - 4KiB here - and resize it only if
649 	 * bpf_obj_get_info_by_fd() needs a bigger buffer.
650 	 */
651 	btf_info.btf_size = 4096;
652 	last_size = btf_info.btf_size;
653 	ptr = malloc(last_size);
654 	if (!ptr) {
655 		err = -ENOMEM;
656 		goto exit_free;
657 	}
658 
659 	memset(ptr, 0, last_size);
660 	btf_info.btf = ptr_to_u64(ptr);
661 	err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
662 
663 	if (!err && btf_info.btf_size > last_size) {
664 		void *temp_ptr;
665 
666 		last_size = btf_info.btf_size;
667 		temp_ptr = realloc(ptr, last_size);
668 		if (!temp_ptr) {
669 			err = -ENOMEM;
670 			goto exit_free;
671 		}
672 		ptr = temp_ptr;
673 		memset(ptr, 0, last_size);
674 		btf_info.btf = ptr_to_u64(ptr);
675 		err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
676 	}
677 
678 	if (err || btf_info.btf_size > last_size) {
679 		err = errno;
680 		goto exit_free;
681 	}
682 
683 	*btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size);
684 	if (IS_ERR(*btf)) {
685 		err = PTR_ERR(*btf);
686 		*btf = NULL;
687 	}
688 
689 exit_free:
690 	close(btf_fd);
691 	free(ptr);
692 
693 	return err;
694 }
695 
btf__get_map_kv_tids(const struct btf * btf,const char * map_name,__u32 expected_key_size,__u32 expected_value_size,__u32 * key_type_id,__u32 * value_type_id)696 int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
697 			 __u32 expected_key_size, __u32 expected_value_size,
698 			 __u32 *key_type_id, __u32 *value_type_id)
699 {
700 	const struct btf_type *container_type;
701 	const struct btf_member *key, *value;
702 	const size_t max_name = 256;
703 	char container_name[max_name];
704 	__s64 key_size, value_size;
705 	__s32 container_id;
706 
707 	if (snprintf(container_name, max_name, "____btf_map_%s", map_name) ==
708 	    max_name) {
709 		pr_warning("map:%s length of '____btf_map_%s' is too long\n",
710 			   map_name, map_name);
711 		return -EINVAL;
712 	}
713 
714 	container_id = btf__find_by_name(btf, container_name);
715 	if (container_id < 0) {
716 		pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
717 			 map_name, container_name);
718 		return container_id;
719 	}
720 
721 	container_type = btf__type_by_id(btf, container_id);
722 	if (!container_type) {
723 		pr_warning("map:%s cannot find BTF type for container_id:%u\n",
724 			   map_name, container_id);
725 		return -EINVAL;
726 	}
727 
728 	if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) {
729 		pr_warning("map:%s container_name:%s is an invalid container struct\n",
730 			   map_name, container_name);
731 		return -EINVAL;
732 	}
733 
734 	key = btf_members(container_type);
735 	value = key + 1;
736 
737 	key_size = btf__resolve_size(btf, key->type);
738 	if (key_size < 0) {
739 		pr_warning("map:%s invalid BTF key_type_size\n", map_name);
740 		return key_size;
741 	}
742 
743 	if (expected_key_size != key_size) {
744 		pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
745 			   map_name, (__u32)key_size, expected_key_size);
746 		return -EINVAL;
747 	}
748 
749 	value_size = btf__resolve_size(btf, value->type);
750 	if (value_size < 0) {
751 		pr_warning("map:%s invalid BTF value_type_size\n", map_name);
752 		return value_size;
753 	}
754 
755 	if (expected_value_size != value_size) {
756 		pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
757 			   map_name, (__u32)value_size, expected_value_size);
758 		return -EINVAL;
759 	}
760 
761 	*key_type_id = key->type;
762 	*value_type_id = value->type;
763 
764 	return 0;
765 }
766 
767 struct btf_ext_sec_setup_param {
768 	__u32 off;
769 	__u32 len;
770 	__u32 min_rec_size;
771 	struct btf_ext_info *ext_info;
772 	const char *desc;
773 };
774 
btf_ext_setup_info(struct btf_ext * btf_ext,struct btf_ext_sec_setup_param * ext_sec)775 static int btf_ext_setup_info(struct btf_ext *btf_ext,
776 			      struct btf_ext_sec_setup_param *ext_sec)
777 {
778 	const struct btf_ext_info_sec *sinfo;
779 	struct btf_ext_info *ext_info;
780 	__u32 info_left, record_size;
781 	/* The start of the info sec (including the __u32 record_size). */
782 	void *info;
783 
784 	if (ext_sec->len == 0)
785 		return 0;
786 
787 	if (ext_sec->off & 0x03) {
788 		pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
789 		     ext_sec->desc);
790 		return -EINVAL;
791 	}
792 
793 	info = btf_ext->data + btf_ext->hdr->hdr_len + ext_sec->off;
794 	info_left = ext_sec->len;
795 
796 	if (btf_ext->data + btf_ext->data_size < info + ext_sec->len) {
797 		pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
798 			 ext_sec->desc, ext_sec->off, ext_sec->len);
799 		return -EINVAL;
800 	}
801 
802 	/* At least a record size */
803 	if (info_left < sizeof(__u32)) {
804 		pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
805 		return -EINVAL;
806 	}
807 
808 	/* The record size needs to meet the minimum standard */
809 	record_size = *(__u32 *)info;
810 	if (record_size < ext_sec->min_rec_size ||
811 	    record_size & 0x03) {
812 		pr_debug("%s section in .BTF.ext has invalid record size %u\n",
813 			 ext_sec->desc, record_size);
814 		return -EINVAL;
815 	}
816 
817 	sinfo = info + sizeof(__u32);
818 	info_left -= sizeof(__u32);
819 
820 	/* If no records, return failure now so .BTF.ext won't be used. */
821 	if (!info_left) {
822 		pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
823 		return -EINVAL;
824 	}
825 
826 	while (info_left) {
827 		unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
828 		__u64 total_record_size;
829 		__u32 num_records;
830 
831 		if (info_left < sec_hdrlen) {
832 			pr_debug("%s section header is not found in .BTF.ext\n",
833 			     ext_sec->desc);
834 			return -EINVAL;
835 		}
836 
837 		num_records = sinfo->num_info;
838 		if (num_records == 0) {
839 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
840 			     ext_sec->desc);
841 			return -EINVAL;
842 		}
843 
844 		total_record_size = sec_hdrlen +
845 				    (__u64)num_records * record_size;
846 		if (info_left < total_record_size) {
847 			pr_debug("%s section has incorrect num_records in .BTF.ext\n",
848 			     ext_sec->desc);
849 			return -EINVAL;
850 		}
851 
852 		info_left -= total_record_size;
853 		sinfo = (void *)sinfo + total_record_size;
854 	}
855 
856 	ext_info = ext_sec->ext_info;
857 	ext_info->len = ext_sec->len - sizeof(__u32);
858 	ext_info->rec_size = record_size;
859 	ext_info->info = info + sizeof(__u32);
860 
861 	return 0;
862 }
863 
btf_ext_setup_func_info(struct btf_ext * btf_ext)864 static int btf_ext_setup_func_info(struct btf_ext *btf_ext)
865 {
866 	struct btf_ext_sec_setup_param param = {
867 		.off = btf_ext->hdr->func_info_off,
868 		.len = btf_ext->hdr->func_info_len,
869 		.min_rec_size = sizeof(struct bpf_func_info_min),
870 		.ext_info = &btf_ext->func_info,
871 		.desc = "func_info"
872 	};
873 
874 	return btf_ext_setup_info(btf_ext, &param);
875 }
876 
btf_ext_setup_line_info(struct btf_ext * btf_ext)877 static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
878 {
879 	struct btf_ext_sec_setup_param param = {
880 		.off = btf_ext->hdr->line_info_off,
881 		.len = btf_ext->hdr->line_info_len,
882 		.min_rec_size = sizeof(struct bpf_line_info_min),
883 		.ext_info = &btf_ext->line_info,
884 		.desc = "line_info",
885 	};
886 
887 	return btf_ext_setup_info(btf_ext, &param);
888 }
889 
btf_ext_setup_offset_reloc(struct btf_ext * btf_ext)890 static int btf_ext_setup_offset_reloc(struct btf_ext *btf_ext)
891 {
892 	struct btf_ext_sec_setup_param param = {
893 		.off = btf_ext->hdr->offset_reloc_off,
894 		.len = btf_ext->hdr->offset_reloc_len,
895 		.min_rec_size = sizeof(struct bpf_offset_reloc),
896 		.ext_info = &btf_ext->offset_reloc_info,
897 		.desc = "offset_reloc",
898 	};
899 
900 	return btf_ext_setup_info(btf_ext, &param);
901 }
902 
btf_ext_parse_hdr(__u8 * data,__u32 data_size)903 static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
904 {
905 	const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
906 
907 	if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
908 	    data_size < hdr->hdr_len) {
909 		pr_debug("BTF.ext header not found");
910 		return -EINVAL;
911 	}
912 
913 	if (hdr->magic != BTF_MAGIC) {
914 		pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
915 		return -EINVAL;
916 	}
917 
918 	if (hdr->version != BTF_VERSION) {
919 		pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
920 		return -ENOTSUP;
921 	}
922 
923 	if (hdr->flags) {
924 		pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
925 		return -ENOTSUP;
926 	}
927 
928 	if (data_size == hdr->hdr_len) {
929 		pr_debug("BTF.ext has no data\n");
930 		return -EINVAL;
931 	}
932 
933 	return 0;
934 }
935 
btf_ext__free(struct btf_ext * btf_ext)936 void btf_ext__free(struct btf_ext *btf_ext)
937 {
938 	if (!btf_ext)
939 		return;
940 	free(btf_ext->data);
941 	free(btf_ext);
942 }
943 
btf_ext__new(__u8 * data,__u32 size)944 struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
945 {
946 	struct btf_ext *btf_ext;
947 	int err;
948 
949 	err = btf_ext_parse_hdr(data, size);
950 	if (err)
951 		return ERR_PTR(err);
952 
953 	btf_ext = calloc(1, sizeof(struct btf_ext));
954 	if (!btf_ext)
955 		return ERR_PTR(-ENOMEM);
956 
957 	btf_ext->data_size = size;
958 	btf_ext->data = malloc(size);
959 	if (!btf_ext->data) {
960 		err = -ENOMEM;
961 		goto done;
962 	}
963 	memcpy(btf_ext->data, data, size);
964 
965 	if (btf_ext->hdr->hdr_len <
966 	    offsetofend(struct btf_ext_header, line_info_len))
967 		goto done;
968 	err = btf_ext_setup_func_info(btf_ext);
969 	if (err)
970 		goto done;
971 
972 	err = btf_ext_setup_line_info(btf_ext);
973 	if (err)
974 		goto done;
975 
976 	if (btf_ext->hdr->hdr_len <
977 	    offsetofend(struct btf_ext_header, offset_reloc_len))
978 		goto done;
979 	err = btf_ext_setup_offset_reloc(btf_ext);
980 	if (err)
981 		goto done;
982 
983 done:
984 	if (err) {
985 		btf_ext__free(btf_ext);
986 		return ERR_PTR(err);
987 	}
988 
989 	return btf_ext;
990 }
991 
btf_ext__get_raw_data(const struct btf_ext * btf_ext,__u32 * size)992 const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
993 {
994 	*size = btf_ext->data_size;
995 	return btf_ext->data;
996 }
997 
btf_ext_reloc_info(const struct btf * btf,const struct btf_ext_info * ext_info,const char * sec_name,__u32 insns_cnt,void ** info,__u32 * cnt)998 static int btf_ext_reloc_info(const struct btf *btf,
999 			      const struct btf_ext_info *ext_info,
1000 			      const char *sec_name, __u32 insns_cnt,
1001 			      void **info, __u32 *cnt)
1002 {
1003 	__u32 sec_hdrlen = sizeof(struct btf_ext_info_sec);
1004 	__u32 i, record_size, existing_len, records_len;
1005 	struct btf_ext_info_sec *sinfo;
1006 	const char *info_sec_name;
1007 	__u64 remain_len;
1008 	void *data;
1009 
1010 	record_size = ext_info->rec_size;
1011 	sinfo = ext_info->info;
1012 	remain_len = ext_info->len;
1013 	while (remain_len > 0) {
1014 		records_len = sinfo->num_info * record_size;
1015 		info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off);
1016 		if (strcmp(info_sec_name, sec_name)) {
1017 			remain_len -= sec_hdrlen + records_len;
1018 			sinfo = (void *)sinfo + sec_hdrlen + records_len;
1019 			continue;
1020 		}
1021 
1022 		existing_len = (*cnt) * record_size;
1023 		data = realloc(*info, existing_len + records_len);
1024 		if (!data)
1025 			return -ENOMEM;
1026 
1027 		memcpy(data + existing_len, sinfo->data, records_len);
1028 		/* adjust insn_off only, the rest data will be passed
1029 		 * to the kernel.
1030 		 */
1031 		for (i = 0; i < sinfo->num_info; i++) {
1032 			__u32 *insn_off;
1033 
1034 			insn_off = data + existing_len + (i * record_size);
1035 			*insn_off = *insn_off / sizeof(struct bpf_insn) +
1036 				insns_cnt;
1037 		}
1038 		*info = data;
1039 		*cnt += sinfo->num_info;
1040 		return 0;
1041 	}
1042 
1043 	return -ENOENT;
1044 }
1045 
btf_ext__reloc_func_info(const struct btf * btf,const struct btf_ext * btf_ext,const char * sec_name,__u32 insns_cnt,void ** func_info,__u32 * cnt)1046 int btf_ext__reloc_func_info(const struct btf *btf,
1047 			     const struct btf_ext *btf_ext,
1048 			     const char *sec_name, __u32 insns_cnt,
1049 			     void **func_info, __u32 *cnt)
1050 {
1051 	return btf_ext_reloc_info(btf, &btf_ext->func_info, sec_name,
1052 				  insns_cnt, func_info, cnt);
1053 }
1054 
btf_ext__reloc_line_info(const struct btf * btf,const struct btf_ext * btf_ext,const char * sec_name,__u32 insns_cnt,void ** line_info,__u32 * cnt)1055 int btf_ext__reloc_line_info(const struct btf *btf,
1056 			     const struct btf_ext *btf_ext,
1057 			     const char *sec_name, __u32 insns_cnt,
1058 			     void **line_info, __u32 *cnt)
1059 {
1060 	return btf_ext_reloc_info(btf, &btf_ext->line_info, sec_name,
1061 				  insns_cnt, line_info, cnt);
1062 }
1063 
btf_ext__func_info_rec_size(const struct btf_ext * btf_ext)1064 __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext)
1065 {
1066 	return btf_ext->func_info.rec_size;
1067 }
1068 
btf_ext__line_info_rec_size(const struct btf_ext * btf_ext)1069 __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)
1070 {
1071 	return btf_ext->line_info.rec_size;
1072 }
1073 
1074 struct btf_dedup;
1075 
1076 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
1077 				       const struct btf_dedup_opts *opts);
1078 static void btf_dedup_free(struct btf_dedup *d);
1079 static int btf_dedup_strings(struct btf_dedup *d);
1080 static int btf_dedup_prim_types(struct btf_dedup *d);
1081 static int btf_dedup_struct_types(struct btf_dedup *d);
1082 static int btf_dedup_ref_types(struct btf_dedup *d);
1083 static int btf_dedup_compact_types(struct btf_dedup *d);
1084 static int btf_dedup_remap_types(struct btf_dedup *d);
1085 
1086 /*
1087  * Deduplicate BTF types and strings.
1088  *
1089  * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
1090  * section with all BTF type descriptors and string data. It overwrites that
1091  * memory in-place with deduplicated types and strings without any loss of
1092  * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
1093  * is provided, all the strings referenced from .BTF.ext section are honored
1094  * and updated to point to the right offsets after deduplication.
1095  *
1096  * If function returns with error, type/string data might be garbled and should
1097  * be discarded.
1098  *
1099  * More verbose and detailed description of both problem btf_dedup is solving,
1100  * as well as solution could be found at:
1101  * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
1102  *
1103  * Problem description and justification
1104  * =====================================
1105  *
1106  * BTF type information is typically emitted either as a result of conversion
1107  * from DWARF to BTF or directly by compiler. In both cases, each compilation
1108  * unit contains information about a subset of all the types that are used
1109  * in an application. These subsets are frequently overlapping and contain a lot
1110  * of duplicated information when later concatenated together into a single
1111  * binary. This algorithm ensures that each unique type is represented by single
1112  * BTF type descriptor, greatly reducing resulting size of BTF data.
1113  *
1114  * Compilation unit isolation and subsequent duplication of data is not the only
1115  * problem. The same type hierarchy (e.g., struct and all the type that struct
1116  * references) in different compilation units can be represented in BTF to
1117  * various degrees of completeness (or, rather, incompleteness) due to
1118  * struct/union forward declarations.
1119  *
1120  * Let's take a look at an example, that we'll use to better understand the
1121  * problem (and solution). Suppose we have two compilation units, each using
1122  * same `struct S`, but each of them having incomplete type information about
1123  * struct's fields:
1124  *
1125  * // CU #1:
1126  * struct S;
1127  * struct A {
1128  *	int a;
1129  *	struct A* self;
1130  *	struct S* parent;
1131  * };
1132  * struct B;
1133  * struct S {
1134  *	struct A* a_ptr;
1135  *	struct B* b_ptr;
1136  * };
1137  *
1138  * // CU #2:
1139  * struct S;
1140  * struct A;
1141  * struct B {
1142  *	int b;
1143  *	struct B* self;
1144  *	struct S* parent;
1145  * };
1146  * struct S {
1147  *	struct A* a_ptr;
1148  *	struct B* b_ptr;
1149  * };
1150  *
1151  * In case of CU #1, BTF data will know only that `struct B` exist (but no
1152  * more), but will know the complete type information about `struct A`. While
1153  * for CU #2, it will know full type information about `struct B`, but will
1154  * only know about forward declaration of `struct A` (in BTF terms, it will
1155  * have `BTF_KIND_FWD` type descriptor with name `B`).
1156  *
1157  * This compilation unit isolation means that it's possible that there is no
1158  * single CU with complete type information describing structs `S`, `A`, and
1159  * `B`. Also, we might get tons of duplicated and redundant type information.
1160  *
1161  * Additional complication we need to keep in mind comes from the fact that
1162  * types, in general, can form graphs containing cycles, not just DAGs.
1163  *
1164  * While algorithm does deduplication, it also merges and resolves type
1165  * information (unless disabled throught `struct btf_opts`), whenever possible.
1166  * E.g., in the example above with two compilation units having partial type
1167  * information for structs `A` and `B`, the output of algorithm will emit
1168  * a single copy of each BTF type that describes structs `A`, `B`, and `S`
1169  * (as well as type information for `int` and pointers), as if they were defined
1170  * in a single compilation unit as:
1171  *
1172  * struct A {
1173  *	int a;
1174  *	struct A* self;
1175  *	struct S* parent;
1176  * };
1177  * struct B {
1178  *	int b;
1179  *	struct B* self;
1180  *	struct S* parent;
1181  * };
1182  * struct S {
1183  *	struct A* a_ptr;
1184  *	struct B* b_ptr;
1185  * };
1186  *
1187  * Algorithm summary
1188  * =================
1189  *
1190  * Algorithm completes its work in 6 separate passes:
1191  *
1192  * 1. Strings deduplication.
1193  * 2. Primitive types deduplication (int, enum, fwd).
1194  * 3. Struct/union types deduplication.
1195  * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func
1196  *    protos, and const/volatile/restrict modifiers).
1197  * 5. Types compaction.
1198  * 6. Types remapping.
1199  *
1200  * Algorithm determines canonical type descriptor, which is a single
1201  * representative type for each truly unique type. This canonical type is the
1202  * one that will go into final deduplicated BTF type information. For
1203  * struct/unions, it is also the type that algorithm will merge additional type
1204  * information into (while resolving FWDs), as it discovers it from data in
1205  * other CUs. Each input BTF type eventually gets either mapped to itself, if
1206  * that type is canonical, or to some other type, if that type is equivalent
1207  * and was chosen as canonical representative. This mapping is stored in
1208  * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
1209  * FWD type got resolved to.
1210  *
1211  * To facilitate fast discovery of canonical types, we also maintain canonical
1212  * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
1213  * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
1214  * that match that signature. With sufficiently good choice of type signature
1215  * hashing function, we can limit number of canonical types for each unique type
1216  * signature to a very small number, allowing to find canonical type for any
1217  * duplicated type very quickly.
1218  *
1219  * Struct/union deduplication is the most critical part and algorithm for
1220  * deduplicating structs/unions is described in greater details in comments for
1221  * `btf_dedup_is_equiv` function.
1222  */
btf__dedup(struct btf * btf,struct btf_ext * btf_ext,const struct btf_dedup_opts * opts)1223 int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
1224 	       const struct btf_dedup_opts *opts)
1225 {
1226 	struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts);
1227 	int err;
1228 
1229 	if (IS_ERR(d)) {
1230 		pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
1231 		return -EINVAL;
1232 	}
1233 
1234 	err = btf_dedup_strings(d);
1235 	if (err < 0) {
1236 		pr_debug("btf_dedup_strings failed:%d\n", err);
1237 		goto done;
1238 	}
1239 	err = btf_dedup_prim_types(d);
1240 	if (err < 0) {
1241 		pr_debug("btf_dedup_prim_types failed:%d\n", err);
1242 		goto done;
1243 	}
1244 	err = btf_dedup_struct_types(d);
1245 	if (err < 0) {
1246 		pr_debug("btf_dedup_struct_types failed:%d\n", err);
1247 		goto done;
1248 	}
1249 	err = btf_dedup_ref_types(d);
1250 	if (err < 0) {
1251 		pr_debug("btf_dedup_ref_types failed:%d\n", err);
1252 		goto done;
1253 	}
1254 	err = btf_dedup_compact_types(d);
1255 	if (err < 0) {
1256 		pr_debug("btf_dedup_compact_types failed:%d\n", err);
1257 		goto done;
1258 	}
1259 	err = btf_dedup_remap_types(d);
1260 	if (err < 0) {
1261 		pr_debug("btf_dedup_remap_types failed:%d\n", err);
1262 		goto done;
1263 	}
1264 
1265 done:
1266 	btf_dedup_free(d);
1267 	return err;
1268 }
1269 
1270 #define BTF_UNPROCESSED_ID ((__u32)-1)
1271 #define BTF_IN_PROGRESS_ID ((__u32)-2)
1272 
1273 struct btf_dedup {
1274 	/* .BTF section to be deduped in-place */
1275 	struct btf *btf;
1276 	/*
1277 	 * Optional .BTF.ext section. When provided, any strings referenced
1278 	 * from it will be taken into account when deduping strings
1279 	 */
1280 	struct btf_ext *btf_ext;
1281 	/*
1282 	 * This is a map from any type's signature hash to a list of possible
1283 	 * canonical representative type candidates. Hash collisions are
1284 	 * ignored, so even types of various kinds can share same list of
1285 	 * candidates, which is fine because we rely on subsequent
1286 	 * btf_xxx_equal() checks to authoritatively verify type equality.
1287 	 */
1288 	struct hashmap *dedup_table;
1289 	/* Canonical types map */
1290 	__u32 *map;
1291 	/* Hypothetical mapping, used during type graph equivalence checks */
1292 	__u32 *hypot_map;
1293 	__u32 *hypot_list;
1294 	size_t hypot_cnt;
1295 	size_t hypot_cap;
1296 	/* Various option modifying behavior of algorithm */
1297 	struct btf_dedup_opts opts;
1298 };
1299 
1300 struct btf_str_ptr {
1301 	const char *str;
1302 	__u32 new_off;
1303 	bool used;
1304 };
1305 
1306 struct btf_str_ptrs {
1307 	struct btf_str_ptr *ptrs;
1308 	const char *data;
1309 	__u32 cnt;
1310 	__u32 cap;
1311 };
1312 
hash_combine(long h,long value)1313 static long hash_combine(long h, long value)
1314 {
1315 	return h * 31 + value;
1316 }
1317 
1318 #define for_each_dedup_cand(d, node, hash) \
1319 	hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash)
1320 
btf_dedup_table_add(struct btf_dedup * d,long hash,__u32 type_id)1321 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
1322 {
1323 	return hashmap__append(d->dedup_table,
1324 			       (void *)hash, (void *)(long)type_id);
1325 }
1326 
btf_dedup_hypot_map_add(struct btf_dedup * d,__u32 from_id,__u32 to_id)1327 static int btf_dedup_hypot_map_add(struct btf_dedup *d,
1328 				   __u32 from_id, __u32 to_id)
1329 {
1330 	if (d->hypot_cnt == d->hypot_cap) {
1331 		__u32 *new_list;
1332 
1333 		d->hypot_cap += max(16, d->hypot_cap / 2);
1334 		new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap);
1335 		if (!new_list)
1336 			return -ENOMEM;
1337 		d->hypot_list = new_list;
1338 	}
1339 	d->hypot_list[d->hypot_cnt++] = from_id;
1340 	d->hypot_map[from_id] = to_id;
1341 	return 0;
1342 }
1343 
btf_dedup_clear_hypot_map(struct btf_dedup * d)1344 static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
1345 {
1346 	int i;
1347 
1348 	for (i = 0; i < d->hypot_cnt; i++)
1349 		d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
1350 	d->hypot_cnt = 0;
1351 }
1352 
btf_dedup_free(struct btf_dedup * d)1353 static void btf_dedup_free(struct btf_dedup *d)
1354 {
1355 	hashmap__free(d->dedup_table);
1356 	d->dedup_table = NULL;
1357 
1358 	free(d->map);
1359 	d->map = NULL;
1360 
1361 	free(d->hypot_map);
1362 	d->hypot_map = NULL;
1363 
1364 	free(d->hypot_list);
1365 	d->hypot_list = NULL;
1366 
1367 	free(d);
1368 }
1369 
btf_dedup_identity_hash_fn(const void * key,void * ctx)1370 static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx)
1371 {
1372 	return (size_t)key;
1373 }
1374 
btf_dedup_collision_hash_fn(const void * key,void * ctx)1375 static size_t btf_dedup_collision_hash_fn(const void *key, void *ctx)
1376 {
1377 	return 0;
1378 }
1379 
btf_dedup_equal_fn(const void * k1,const void * k2,void * ctx)1380 static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx)
1381 {
1382 	return k1 == k2;
1383 }
1384 
btf_dedup_new(struct btf * btf,struct btf_ext * btf_ext,const struct btf_dedup_opts * opts)1385 static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
1386 				       const struct btf_dedup_opts *opts)
1387 {
1388 	struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
1389 	hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
1390 	int i, err = 0;
1391 
1392 	if (!d)
1393 		return ERR_PTR(-ENOMEM);
1394 
1395 	d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
1396 	/* dedup_table_size is now used only to force collisions in tests */
1397 	if (opts && opts->dedup_table_size == 1)
1398 		hash_fn = btf_dedup_collision_hash_fn;
1399 
1400 	d->btf = btf;
1401 	d->btf_ext = btf_ext;
1402 
1403 	d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
1404 	if (IS_ERR(d->dedup_table)) {
1405 		err = PTR_ERR(d->dedup_table);
1406 		d->dedup_table = NULL;
1407 		goto done;
1408 	}
1409 
1410 	d->map = malloc(sizeof(__u32) * (1 + btf->nr_types));
1411 	if (!d->map) {
1412 		err = -ENOMEM;
1413 		goto done;
1414 	}
1415 	/* special BTF "void" type is made canonical immediately */
1416 	d->map[0] = 0;
1417 	for (i = 1; i <= btf->nr_types; i++) {
1418 		struct btf_type *t = d->btf->types[i];
1419 
1420 		/* VAR and DATASEC are never deduped and are self-canonical */
1421 		if (btf_is_var(t) || btf_is_datasec(t))
1422 			d->map[i] = i;
1423 		else
1424 			d->map[i] = BTF_UNPROCESSED_ID;
1425 	}
1426 
1427 	d->hypot_map = malloc(sizeof(__u32) * (1 + btf->nr_types));
1428 	if (!d->hypot_map) {
1429 		err = -ENOMEM;
1430 		goto done;
1431 	}
1432 	for (i = 0; i <= btf->nr_types; i++)
1433 		d->hypot_map[i] = BTF_UNPROCESSED_ID;
1434 
1435 done:
1436 	if (err) {
1437 		btf_dedup_free(d);
1438 		return ERR_PTR(err);
1439 	}
1440 
1441 	return d;
1442 }
1443 
1444 typedef int (*str_off_fn_t)(__u32 *str_off_ptr, void *ctx);
1445 
1446 /*
1447  * Iterate over all possible places in .BTF and .BTF.ext that can reference
1448  * string and pass pointer to it to a provided callback `fn`.
1449  */
btf_for_each_str_off(struct btf_dedup * d,str_off_fn_t fn,void * ctx)1450 static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx)
1451 {
1452 	void *line_data_cur, *line_data_end;
1453 	int i, j, r, rec_size;
1454 	struct btf_type *t;
1455 
1456 	for (i = 1; i <= d->btf->nr_types; i++) {
1457 		t = d->btf->types[i];
1458 		r = fn(&t->name_off, ctx);
1459 		if (r)
1460 			return r;
1461 
1462 		switch (btf_kind(t)) {
1463 		case BTF_KIND_STRUCT:
1464 		case BTF_KIND_UNION: {
1465 			struct btf_member *m = btf_members(t);
1466 			__u16 vlen = btf_vlen(t);
1467 
1468 			for (j = 0; j < vlen; j++) {
1469 				r = fn(&m->name_off, ctx);
1470 				if (r)
1471 					return r;
1472 				m++;
1473 			}
1474 			break;
1475 		}
1476 		case BTF_KIND_ENUM: {
1477 			struct btf_enum *m = btf_enum(t);
1478 			__u16 vlen = btf_vlen(t);
1479 
1480 			for (j = 0; j < vlen; j++) {
1481 				r = fn(&m->name_off, ctx);
1482 				if (r)
1483 					return r;
1484 				m++;
1485 			}
1486 			break;
1487 		}
1488 		case BTF_KIND_FUNC_PROTO: {
1489 			struct btf_param *m = btf_params(t);
1490 			__u16 vlen = btf_vlen(t);
1491 
1492 			for (j = 0; j < vlen; j++) {
1493 				r = fn(&m->name_off, ctx);
1494 				if (r)
1495 					return r;
1496 				m++;
1497 			}
1498 			break;
1499 		}
1500 		default:
1501 			break;
1502 		}
1503 	}
1504 
1505 	if (!d->btf_ext)
1506 		return 0;
1507 
1508 	line_data_cur = d->btf_ext->line_info.info;
1509 	line_data_end = d->btf_ext->line_info.info + d->btf_ext->line_info.len;
1510 	rec_size = d->btf_ext->line_info.rec_size;
1511 
1512 	while (line_data_cur < line_data_end) {
1513 		struct btf_ext_info_sec *sec = line_data_cur;
1514 		struct bpf_line_info_min *line_info;
1515 		__u32 num_info = sec->num_info;
1516 
1517 		r = fn(&sec->sec_name_off, ctx);
1518 		if (r)
1519 			return r;
1520 
1521 		line_data_cur += sizeof(struct btf_ext_info_sec);
1522 		for (i = 0; i < num_info; i++) {
1523 			line_info = line_data_cur;
1524 			r = fn(&line_info->file_name_off, ctx);
1525 			if (r)
1526 				return r;
1527 			r = fn(&line_info->line_off, ctx);
1528 			if (r)
1529 				return r;
1530 			line_data_cur += rec_size;
1531 		}
1532 	}
1533 
1534 	return 0;
1535 }
1536 
str_sort_by_content(const void * a1,const void * a2)1537 static int str_sort_by_content(const void *a1, const void *a2)
1538 {
1539 	const struct btf_str_ptr *p1 = a1;
1540 	const struct btf_str_ptr *p2 = a2;
1541 
1542 	return strcmp(p1->str, p2->str);
1543 }
1544 
str_sort_by_offset(const void * a1,const void * a2)1545 static int str_sort_by_offset(const void *a1, const void *a2)
1546 {
1547 	const struct btf_str_ptr *p1 = a1;
1548 	const struct btf_str_ptr *p2 = a2;
1549 
1550 	if (p1->str != p2->str)
1551 		return p1->str < p2->str ? -1 : 1;
1552 	return 0;
1553 }
1554 
btf_dedup_str_ptr_cmp(const void * str_ptr,const void * pelem)1555 static int btf_dedup_str_ptr_cmp(const void *str_ptr, const void *pelem)
1556 {
1557 	const struct btf_str_ptr *p = pelem;
1558 
1559 	if (str_ptr != p->str)
1560 		return (const char *)str_ptr < p->str ? -1 : 1;
1561 	return 0;
1562 }
1563 
btf_str_mark_as_used(__u32 * str_off_ptr,void * ctx)1564 static int btf_str_mark_as_used(__u32 *str_off_ptr, void *ctx)
1565 {
1566 	struct btf_str_ptrs *strs;
1567 	struct btf_str_ptr *s;
1568 
1569 	if (*str_off_ptr == 0)
1570 		return 0;
1571 
1572 	strs = ctx;
1573 	s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
1574 		    sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
1575 	if (!s)
1576 		return -EINVAL;
1577 	s->used = true;
1578 	return 0;
1579 }
1580 
btf_str_remap_offset(__u32 * str_off_ptr,void * ctx)1581 static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx)
1582 {
1583 	struct btf_str_ptrs *strs;
1584 	struct btf_str_ptr *s;
1585 
1586 	if (*str_off_ptr == 0)
1587 		return 0;
1588 
1589 	strs = ctx;
1590 	s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
1591 		    sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
1592 	if (!s)
1593 		return -EINVAL;
1594 	*str_off_ptr = s->new_off;
1595 	return 0;
1596 }
1597 
1598 /*
1599  * Dedup string and filter out those that are not referenced from either .BTF
1600  * or .BTF.ext (if provided) sections.
1601  *
1602  * This is done by building index of all strings in BTF's string section,
1603  * then iterating over all entities that can reference strings (e.g., type
1604  * names, struct field names, .BTF.ext line info, etc) and marking corresponding
1605  * strings as used. After that all used strings are deduped and compacted into
1606  * sequential blob of memory and new offsets are calculated. Then all the string
1607  * references are iterated again and rewritten using new offsets.
1608  */
btf_dedup_strings(struct btf_dedup * d)1609 static int btf_dedup_strings(struct btf_dedup *d)
1610 {
1611 	const struct btf_header *hdr = d->btf->hdr;
1612 	char *start = (char *)d->btf->nohdr_data + hdr->str_off;
1613 	char *end = start + d->btf->hdr->str_len;
1614 	char *p = start, *tmp_strs = NULL;
1615 	struct btf_str_ptrs strs = {
1616 		.cnt = 0,
1617 		.cap = 0,
1618 		.ptrs = NULL,
1619 		.data = start,
1620 	};
1621 	int i, j, err = 0, grp_idx;
1622 	bool grp_used;
1623 
1624 	/* build index of all strings */
1625 	while (p < end) {
1626 		if (strs.cnt + 1 > strs.cap) {
1627 			struct btf_str_ptr *new_ptrs;
1628 
1629 			strs.cap += max(strs.cnt / 2, 16);
1630 			new_ptrs = realloc(strs.ptrs,
1631 					   sizeof(strs.ptrs[0]) * strs.cap);
1632 			if (!new_ptrs) {
1633 				err = -ENOMEM;
1634 				goto done;
1635 			}
1636 			strs.ptrs = new_ptrs;
1637 		}
1638 
1639 		strs.ptrs[strs.cnt].str = p;
1640 		strs.ptrs[strs.cnt].used = false;
1641 
1642 		p += strlen(p) + 1;
1643 		strs.cnt++;
1644 	}
1645 
1646 	/* temporary storage for deduplicated strings */
1647 	tmp_strs = malloc(d->btf->hdr->str_len);
1648 	if (!tmp_strs) {
1649 		err = -ENOMEM;
1650 		goto done;
1651 	}
1652 
1653 	/* mark all used strings */
1654 	strs.ptrs[0].used = true;
1655 	err = btf_for_each_str_off(d, btf_str_mark_as_used, &strs);
1656 	if (err)
1657 		goto done;
1658 
1659 	/* sort strings by context, so that we can identify duplicates */
1660 	qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_content);
1661 
1662 	/*
1663 	 * iterate groups of equal strings and if any instance in a group was
1664 	 * referenced, emit single instance and remember new offset
1665 	 */
1666 	p = tmp_strs;
1667 	grp_idx = 0;
1668 	grp_used = strs.ptrs[0].used;
1669 	/* iterate past end to avoid code duplication after loop */
1670 	for (i = 1; i <= strs.cnt; i++) {
1671 		/*
1672 		 * when i == strs.cnt, we want to skip string comparison and go
1673 		 * straight to handling last group of strings (otherwise we'd
1674 		 * need to handle last group after the loop w/ duplicated code)
1675 		 */
1676 		if (i < strs.cnt &&
1677 		    !strcmp(strs.ptrs[i].str, strs.ptrs[grp_idx].str)) {
1678 			grp_used = grp_used || strs.ptrs[i].used;
1679 			continue;
1680 		}
1681 
1682 		/*
1683 		 * this check would have been required after the loop to handle
1684 		 * last group of strings, but due to <= condition in a loop
1685 		 * we avoid that duplication
1686 		 */
1687 		if (grp_used) {
1688 			int new_off = p - tmp_strs;
1689 			__u32 len = strlen(strs.ptrs[grp_idx].str);
1690 
1691 			memmove(p, strs.ptrs[grp_idx].str, len + 1);
1692 			for (j = grp_idx; j < i; j++)
1693 				strs.ptrs[j].new_off = new_off;
1694 			p += len + 1;
1695 		}
1696 
1697 		if (i < strs.cnt) {
1698 			grp_idx = i;
1699 			grp_used = strs.ptrs[i].used;
1700 		}
1701 	}
1702 
1703 	/* replace original strings with deduped ones */
1704 	d->btf->hdr->str_len = p - tmp_strs;
1705 	memmove(start, tmp_strs, d->btf->hdr->str_len);
1706 	end = start + d->btf->hdr->str_len;
1707 
1708 	/* restore original order for further binary search lookups */
1709 	qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_offset);
1710 
1711 	/* remap string offsets */
1712 	err = btf_for_each_str_off(d, btf_str_remap_offset, &strs);
1713 	if (err)
1714 		goto done;
1715 
1716 	d->btf->hdr->str_len = end - start;
1717 
1718 done:
1719 	free(tmp_strs);
1720 	free(strs.ptrs);
1721 	return err;
1722 }
1723 
btf_hash_common(struct btf_type * t)1724 static long btf_hash_common(struct btf_type *t)
1725 {
1726 	long h;
1727 
1728 	h = hash_combine(0, t->name_off);
1729 	h = hash_combine(h, t->info);
1730 	h = hash_combine(h, t->size);
1731 	return h;
1732 }
1733 
btf_equal_common(struct btf_type * t1,struct btf_type * t2)1734 static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
1735 {
1736 	return t1->name_off == t2->name_off &&
1737 	       t1->info == t2->info &&
1738 	       t1->size == t2->size;
1739 }
1740 
1741 /* Calculate type signature hash of INT. */
btf_hash_int(struct btf_type * t)1742 static long btf_hash_int(struct btf_type *t)
1743 {
1744 	__u32 info = *(__u32 *)(t + 1);
1745 	long h;
1746 
1747 	h = btf_hash_common(t);
1748 	h = hash_combine(h, info);
1749 	return h;
1750 }
1751 
1752 /* Check structural equality of two INTs. */
btf_equal_int(struct btf_type * t1,struct btf_type * t2)1753 static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
1754 {
1755 	__u32 info1, info2;
1756 
1757 	if (!btf_equal_common(t1, t2))
1758 		return false;
1759 	info1 = *(__u32 *)(t1 + 1);
1760 	info2 = *(__u32 *)(t2 + 1);
1761 	return info1 == info2;
1762 }
1763 
1764 /* Calculate type signature hash of ENUM. */
btf_hash_enum(struct btf_type * t)1765 static long btf_hash_enum(struct btf_type *t)
1766 {
1767 	long h;
1768 
1769 	/* don't hash vlen and enum members to support enum fwd resolving */
1770 	h = hash_combine(0, t->name_off);
1771 	h = hash_combine(h, t->info & ~0xffff);
1772 	h = hash_combine(h, t->size);
1773 	return h;
1774 }
1775 
1776 /* Check structural equality of two ENUMs. */
btf_equal_enum(struct btf_type * t1,struct btf_type * t2)1777 static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
1778 {
1779 	const struct btf_enum *m1, *m2;
1780 	__u16 vlen;
1781 	int i;
1782 
1783 	if (!btf_equal_common(t1, t2))
1784 		return false;
1785 
1786 	vlen = btf_vlen(t1);
1787 	m1 = btf_enum(t1);
1788 	m2 = btf_enum(t2);
1789 	for (i = 0; i < vlen; i++) {
1790 		if (m1->name_off != m2->name_off || m1->val != m2->val)
1791 			return false;
1792 		m1++;
1793 		m2++;
1794 	}
1795 	return true;
1796 }
1797 
btf_is_enum_fwd(struct btf_type * t)1798 static inline bool btf_is_enum_fwd(struct btf_type *t)
1799 {
1800 	return btf_is_enum(t) && btf_vlen(t) == 0;
1801 }
1802 
btf_compat_enum(struct btf_type * t1,struct btf_type * t2)1803 static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
1804 {
1805 	if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
1806 		return btf_equal_enum(t1, t2);
1807 	/* ignore vlen when comparing */
1808 	return t1->name_off == t2->name_off &&
1809 	       (t1->info & ~0xffff) == (t2->info & ~0xffff) &&
1810 	       t1->size == t2->size;
1811 }
1812 
1813 /*
1814  * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
1815  * as referenced type IDs equivalence is established separately during type
1816  * graph equivalence check algorithm.
1817  */
btf_hash_struct(struct btf_type * t)1818 static long btf_hash_struct(struct btf_type *t)
1819 {
1820 	const struct btf_member *member = btf_members(t);
1821 	__u32 vlen = btf_vlen(t);
1822 	long h = btf_hash_common(t);
1823 	int i;
1824 
1825 	for (i = 0; i < vlen; i++) {
1826 		h = hash_combine(h, member->name_off);
1827 		h = hash_combine(h, member->offset);
1828 		/* no hashing of referenced type ID, it can be unresolved yet */
1829 		member++;
1830 	}
1831 	return h;
1832 }
1833 
1834 /*
1835  * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
1836  * IDs. This check is performed during type graph equivalence check and
1837  * referenced types equivalence is checked separately.
1838  */
btf_shallow_equal_struct(struct btf_type * t1,struct btf_type * t2)1839 static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
1840 {
1841 	const struct btf_member *m1, *m2;
1842 	__u16 vlen;
1843 	int i;
1844 
1845 	if (!btf_equal_common(t1, t2))
1846 		return false;
1847 
1848 	vlen = btf_vlen(t1);
1849 	m1 = btf_members(t1);
1850 	m2 = btf_members(t2);
1851 	for (i = 0; i < vlen; i++) {
1852 		if (m1->name_off != m2->name_off || m1->offset != m2->offset)
1853 			return false;
1854 		m1++;
1855 		m2++;
1856 	}
1857 	return true;
1858 }
1859 
1860 /*
1861  * Calculate type signature hash of ARRAY, including referenced type IDs,
1862  * under assumption that they were already resolved to canonical type IDs and
1863  * are not going to change.
1864  */
btf_hash_array(struct btf_type * t)1865 static long btf_hash_array(struct btf_type *t)
1866 {
1867 	const struct btf_array *info = btf_array(t);
1868 	long h = btf_hash_common(t);
1869 
1870 	h = hash_combine(h, info->type);
1871 	h = hash_combine(h, info->index_type);
1872 	h = hash_combine(h, info->nelems);
1873 	return h;
1874 }
1875 
1876 /*
1877  * Check exact equality of two ARRAYs, taking into account referenced
1878  * type IDs, under assumption that they were already resolved to canonical
1879  * type IDs and are not going to change.
1880  * This function is called during reference types deduplication to compare
1881  * ARRAY to potential canonical representative.
1882  */
btf_equal_array(struct btf_type * t1,struct btf_type * t2)1883 static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
1884 {
1885 	const struct btf_array *info1, *info2;
1886 
1887 	if (!btf_equal_common(t1, t2))
1888 		return false;
1889 
1890 	info1 = btf_array(t1);
1891 	info2 = btf_array(t2);
1892 	return info1->type == info2->type &&
1893 	       info1->index_type == info2->index_type &&
1894 	       info1->nelems == info2->nelems;
1895 }
1896 
1897 /*
1898  * Check structural compatibility of two ARRAYs, ignoring referenced type
1899  * IDs. This check is performed during type graph equivalence check and
1900  * referenced types equivalence is checked separately.
1901  */
btf_compat_array(struct btf_type * t1,struct btf_type * t2)1902 static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
1903 {
1904 	if (!btf_equal_common(t1, t2))
1905 		return false;
1906 
1907 	return btf_array(t1)->nelems == btf_array(t2)->nelems;
1908 }
1909 
1910 /*
1911  * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
1912  * under assumption that they were already resolved to canonical type IDs and
1913  * are not going to change.
1914  */
btf_hash_fnproto(struct btf_type * t)1915 static long btf_hash_fnproto(struct btf_type *t)
1916 {
1917 	const struct btf_param *member = btf_params(t);
1918 	__u16 vlen = btf_vlen(t);
1919 	long h = btf_hash_common(t);
1920 	int i;
1921 
1922 	for (i = 0; i < vlen; i++) {
1923 		h = hash_combine(h, member->name_off);
1924 		h = hash_combine(h, member->type);
1925 		member++;
1926 	}
1927 	return h;
1928 }
1929 
1930 /*
1931  * Check exact equality of two FUNC_PROTOs, taking into account referenced
1932  * type IDs, under assumption that they were already resolved to canonical
1933  * type IDs and are not going to change.
1934  * This function is called during reference types deduplication to compare
1935  * FUNC_PROTO to potential canonical representative.
1936  */
btf_equal_fnproto(struct btf_type * t1,struct btf_type * t2)1937 static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
1938 {
1939 	const struct btf_param *m1, *m2;
1940 	__u16 vlen;
1941 	int i;
1942 
1943 	if (!btf_equal_common(t1, t2))
1944 		return false;
1945 
1946 	vlen = btf_vlen(t1);
1947 	m1 = btf_params(t1);
1948 	m2 = btf_params(t2);
1949 	for (i = 0; i < vlen; i++) {
1950 		if (m1->name_off != m2->name_off || m1->type != m2->type)
1951 			return false;
1952 		m1++;
1953 		m2++;
1954 	}
1955 	return true;
1956 }
1957 
1958 /*
1959  * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
1960  * IDs. This check is performed during type graph equivalence check and
1961  * referenced types equivalence is checked separately.
1962  */
btf_compat_fnproto(struct btf_type * t1,struct btf_type * t2)1963 static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
1964 {
1965 	const struct btf_param *m1, *m2;
1966 	__u16 vlen;
1967 	int i;
1968 
1969 	/* skip return type ID */
1970 	if (t1->name_off != t2->name_off || t1->info != t2->info)
1971 		return false;
1972 
1973 	vlen = btf_vlen(t1);
1974 	m1 = btf_params(t1);
1975 	m2 = btf_params(t2);
1976 	for (i = 0; i < vlen; i++) {
1977 		if (m1->name_off != m2->name_off)
1978 			return false;
1979 		m1++;
1980 		m2++;
1981 	}
1982 	return true;
1983 }
1984 
1985 /*
1986  * Deduplicate primitive types, that can't reference other types, by calculating
1987  * their type signature hash and comparing them with any possible canonical
1988  * candidate. If no canonical candidate matches, type itself is marked as
1989  * canonical and is added into `btf_dedup->dedup_table` as another candidate.
1990  */
btf_dedup_prim_type(struct btf_dedup * d,__u32 type_id)1991 static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
1992 {
1993 	struct btf_type *t = d->btf->types[type_id];
1994 	struct hashmap_entry *hash_entry;
1995 	struct btf_type *cand;
1996 	/* if we don't find equivalent type, then we are canonical */
1997 	__u32 new_id = type_id;
1998 	__u32 cand_id;
1999 	long h;
2000 
2001 	switch (btf_kind(t)) {
2002 	case BTF_KIND_CONST:
2003 	case BTF_KIND_VOLATILE:
2004 	case BTF_KIND_RESTRICT:
2005 	case BTF_KIND_PTR:
2006 	case BTF_KIND_TYPEDEF:
2007 	case BTF_KIND_ARRAY:
2008 	case BTF_KIND_STRUCT:
2009 	case BTF_KIND_UNION:
2010 	case BTF_KIND_FUNC:
2011 	case BTF_KIND_FUNC_PROTO:
2012 	case BTF_KIND_VAR:
2013 	case BTF_KIND_DATASEC:
2014 		return 0;
2015 
2016 	case BTF_KIND_INT:
2017 		h = btf_hash_int(t);
2018 		for_each_dedup_cand(d, hash_entry, h) {
2019 			cand_id = (__u32)(long)hash_entry->value;
2020 			cand = d->btf->types[cand_id];
2021 			if (btf_equal_int(t, cand)) {
2022 				new_id = cand_id;
2023 				break;
2024 			}
2025 		}
2026 		break;
2027 
2028 	case BTF_KIND_ENUM:
2029 		h = btf_hash_enum(t);
2030 		for_each_dedup_cand(d, hash_entry, h) {
2031 			cand_id = (__u32)(long)hash_entry->value;
2032 			cand = d->btf->types[cand_id];
2033 			if (btf_equal_enum(t, cand)) {
2034 				new_id = cand_id;
2035 				break;
2036 			}
2037 			if (d->opts.dont_resolve_fwds)
2038 				continue;
2039 			if (btf_compat_enum(t, cand)) {
2040 				if (btf_is_enum_fwd(t)) {
2041 					/* resolve fwd to full enum */
2042 					new_id = cand_id;
2043 					break;
2044 				}
2045 				/* resolve canonical enum fwd to full enum */
2046 				d->map[cand_id] = type_id;
2047 			}
2048 		}
2049 		break;
2050 
2051 	case BTF_KIND_FWD:
2052 		h = btf_hash_common(t);
2053 		for_each_dedup_cand(d, hash_entry, h) {
2054 			cand_id = (__u32)(long)hash_entry->value;
2055 			cand = d->btf->types[cand_id];
2056 			if (btf_equal_common(t, cand)) {
2057 				new_id = cand_id;
2058 				break;
2059 			}
2060 		}
2061 		break;
2062 
2063 	default:
2064 		return -EINVAL;
2065 	}
2066 
2067 	d->map[type_id] = new_id;
2068 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2069 		return -ENOMEM;
2070 
2071 	return 0;
2072 }
2073 
btf_dedup_prim_types(struct btf_dedup * d)2074 static int btf_dedup_prim_types(struct btf_dedup *d)
2075 {
2076 	int i, err;
2077 
2078 	for (i = 1; i <= d->btf->nr_types; i++) {
2079 		err = btf_dedup_prim_type(d, i);
2080 		if (err)
2081 			return err;
2082 	}
2083 	return 0;
2084 }
2085 
2086 /*
2087  * Check whether type is already mapped into canonical one (could be to itself).
2088  */
is_type_mapped(struct btf_dedup * d,uint32_t type_id)2089 static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
2090 {
2091 	return d->map[type_id] <= BTF_MAX_NR_TYPES;
2092 }
2093 
2094 /*
2095  * Resolve type ID into its canonical type ID, if any; otherwise return original
2096  * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
2097  * STRUCT/UNION link and resolve it into canonical type ID as well.
2098  */
resolve_type_id(struct btf_dedup * d,__u32 type_id)2099 static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
2100 {
2101 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
2102 		type_id = d->map[type_id];
2103 	return type_id;
2104 }
2105 
2106 /*
2107  * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
2108  * type ID.
2109  */
resolve_fwd_id(struct btf_dedup * d,uint32_t type_id)2110 static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
2111 {
2112 	__u32 orig_type_id = type_id;
2113 
2114 	if (!btf_is_fwd(d->btf->types[type_id]))
2115 		return type_id;
2116 
2117 	while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
2118 		type_id = d->map[type_id];
2119 
2120 	if (!btf_is_fwd(d->btf->types[type_id]))
2121 		return type_id;
2122 
2123 	return orig_type_id;
2124 }
2125 
2126 
btf_fwd_kind(struct btf_type * t)2127 static inline __u16 btf_fwd_kind(struct btf_type *t)
2128 {
2129 	return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
2130 }
2131 
2132 /*
2133  * Check equivalence of BTF type graph formed by candidate struct/union (we'll
2134  * call it "candidate graph" in this description for brevity) to a type graph
2135  * formed by (potential) canonical struct/union ("canonical graph" for brevity
2136  * here, though keep in mind that not all types in canonical graph are
2137  * necessarily canonical representatives themselves, some of them might be
2138  * duplicates or its uniqueness might not have been established yet).
2139  * Returns:
2140  *  - >0, if type graphs are equivalent;
2141  *  -  0, if not equivalent;
2142  *  - <0, on error.
2143  *
2144  * Algorithm performs side-by-side DFS traversal of both type graphs and checks
2145  * equivalence of BTF types at each step. If at any point BTF types in candidate
2146  * and canonical graphs are not compatible structurally, whole graphs are
2147  * incompatible. If types are structurally equivalent (i.e., all information
2148  * except referenced type IDs is exactly the same), a mapping from `canon_id` to
2149  * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
2150  * If a type references other types, then those referenced types are checked
2151  * for equivalence recursively.
2152  *
2153  * During DFS traversal, if we find that for current `canon_id` type we
2154  * already have some mapping in hypothetical map, we check for two possible
2155  * situations:
2156  *   - `canon_id` is mapped to exactly the same type as `cand_id`. This will
2157  *     happen when type graphs have cycles. In this case we assume those two
2158  *     types are equivalent.
2159  *   - `canon_id` is mapped to different type. This is contradiction in our
2160  *     hypothetical mapping, because same graph in canonical graph corresponds
2161  *     to two different types in candidate graph, which for equivalent type
2162  *     graphs shouldn't happen. This condition terminates equivalence check
2163  *     with negative result.
2164  *
2165  * If type graphs traversal exhausts types to check and find no contradiction,
2166  * then type graphs are equivalent.
2167  *
2168  * When checking types for equivalence, there is one special case: FWD types.
2169  * If FWD type resolution is allowed and one of the types (either from canonical
2170  * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
2171  * flag) and their names match, hypothetical mapping is updated to point from
2172  * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
2173  * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
2174  *
2175  * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
2176  * if there are two exactly named (or anonymous) structs/unions that are
2177  * compatible structurally, one of which has FWD field, while other is concrete
2178  * STRUCT/UNION, but according to C sources they are different structs/unions
2179  * that are referencing different types with the same name. This is extremely
2180  * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
2181  * this logic is causing problems.
2182  *
2183  * Doing FWD resolution means that both candidate and/or canonical graphs can
2184  * consists of portions of the graph that come from multiple compilation units.
2185  * This is due to the fact that types within single compilation unit are always
2186  * deduplicated and FWDs are already resolved, if referenced struct/union
2187  * definiton is available. So, if we had unresolved FWD and found corresponding
2188  * STRUCT/UNION, they will be from different compilation units. This
2189  * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
2190  * type graph will likely have at least two different BTF types that describe
2191  * same type (e.g., most probably there will be two different BTF types for the
2192  * same 'int' primitive type) and could even have "overlapping" parts of type
2193  * graph that describe same subset of types.
2194  *
2195  * This in turn means that our assumption that each type in canonical graph
2196  * must correspond to exactly one type in candidate graph might not hold
2197  * anymore and will make it harder to detect contradictions using hypothetical
2198  * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
2199  * resolution only in canonical graph. FWDs in candidate graphs are never
2200  * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
2201  * that can occur:
2202  *   - Both types in canonical and candidate graphs are FWDs. If they are
2203  *     structurally equivalent, then they can either be both resolved to the
2204  *     same STRUCT/UNION or not resolved at all. In both cases they are
2205  *     equivalent and there is no need to resolve FWD on candidate side.
2206  *   - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
2207  *     so nothing to resolve as well, algorithm will check equivalence anyway.
2208  *   - Type in canonical graph is FWD, while type in candidate is concrete
2209  *     STRUCT/UNION. In this case candidate graph comes from single compilation
2210  *     unit, so there is exactly one BTF type for each unique C type. After
2211  *     resolving FWD into STRUCT/UNION, there might be more than one BTF type
2212  *     in canonical graph mapping to single BTF type in candidate graph, but
2213  *     because hypothetical mapping maps from canonical to candidate types, it's
2214  *     alright, and we still maintain the property of having single `canon_id`
2215  *     mapping to single `cand_id` (there could be two different `canon_id`
2216  *     mapped to the same `cand_id`, but it's not contradictory).
2217  *   - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
2218  *     graph is FWD. In this case we are just going to check compatibility of
2219  *     STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
2220  *     assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
2221  *     a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
2222  *     turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
2223  *     canonical graph.
2224  */
btf_dedup_is_equiv(struct btf_dedup * d,__u32 cand_id,__u32 canon_id)2225 static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
2226 			      __u32 canon_id)
2227 {
2228 	struct btf_type *cand_type;
2229 	struct btf_type *canon_type;
2230 	__u32 hypot_type_id;
2231 	__u16 cand_kind;
2232 	__u16 canon_kind;
2233 	int i, eq;
2234 
2235 	/* if both resolve to the same canonical, they must be equivalent */
2236 	if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
2237 		return 1;
2238 
2239 	canon_id = resolve_fwd_id(d, canon_id);
2240 
2241 	hypot_type_id = d->hypot_map[canon_id];
2242 	if (hypot_type_id <= BTF_MAX_NR_TYPES)
2243 		return hypot_type_id == cand_id;
2244 
2245 	if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
2246 		return -ENOMEM;
2247 
2248 	cand_type = d->btf->types[cand_id];
2249 	canon_type = d->btf->types[canon_id];
2250 	cand_kind = btf_kind(cand_type);
2251 	canon_kind = btf_kind(canon_type);
2252 
2253 	if (cand_type->name_off != canon_type->name_off)
2254 		return 0;
2255 
2256 	/* FWD <--> STRUCT/UNION equivalence check, if enabled */
2257 	if (!d->opts.dont_resolve_fwds
2258 	    && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
2259 	    && cand_kind != canon_kind) {
2260 		__u16 real_kind;
2261 		__u16 fwd_kind;
2262 
2263 		if (cand_kind == BTF_KIND_FWD) {
2264 			real_kind = canon_kind;
2265 			fwd_kind = btf_fwd_kind(cand_type);
2266 		} else {
2267 			real_kind = cand_kind;
2268 			fwd_kind = btf_fwd_kind(canon_type);
2269 		}
2270 		return fwd_kind == real_kind;
2271 	}
2272 
2273 	if (cand_kind != canon_kind)
2274 		return 0;
2275 
2276 	switch (cand_kind) {
2277 	case BTF_KIND_INT:
2278 		return btf_equal_int(cand_type, canon_type);
2279 
2280 	case BTF_KIND_ENUM:
2281 		if (d->opts.dont_resolve_fwds)
2282 			return btf_equal_enum(cand_type, canon_type);
2283 		else
2284 			return btf_compat_enum(cand_type, canon_type);
2285 
2286 	case BTF_KIND_FWD:
2287 		return btf_equal_common(cand_type, canon_type);
2288 
2289 	case BTF_KIND_CONST:
2290 	case BTF_KIND_VOLATILE:
2291 	case BTF_KIND_RESTRICT:
2292 	case BTF_KIND_PTR:
2293 	case BTF_KIND_TYPEDEF:
2294 	case BTF_KIND_FUNC:
2295 		if (cand_type->info != canon_type->info)
2296 			return 0;
2297 		return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
2298 
2299 	case BTF_KIND_ARRAY: {
2300 		const struct btf_array *cand_arr, *canon_arr;
2301 
2302 		if (!btf_compat_array(cand_type, canon_type))
2303 			return 0;
2304 		cand_arr = btf_array(cand_type);
2305 		canon_arr = btf_array(canon_type);
2306 		eq = btf_dedup_is_equiv(d,
2307 			cand_arr->index_type, canon_arr->index_type);
2308 		if (eq <= 0)
2309 			return eq;
2310 		return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
2311 	}
2312 
2313 	case BTF_KIND_STRUCT:
2314 	case BTF_KIND_UNION: {
2315 		const struct btf_member *cand_m, *canon_m;
2316 		__u16 vlen;
2317 
2318 		if (!btf_shallow_equal_struct(cand_type, canon_type))
2319 			return 0;
2320 		vlen = btf_vlen(cand_type);
2321 		cand_m = btf_members(cand_type);
2322 		canon_m = btf_members(canon_type);
2323 		for (i = 0; i < vlen; i++) {
2324 			eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
2325 			if (eq <= 0)
2326 				return eq;
2327 			cand_m++;
2328 			canon_m++;
2329 		}
2330 
2331 		return 1;
2332 	}
2333 
2334 	case BTF_KIND_FUNC_PROTO: {
2335 		const struct btf_param *cand_p, *canon_p;
2336 		__u16 vlen;
2337 
2338 		if (!btf_compat_fnproto(cand_type, canon_type))
2339 			return 0;
2340 		eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
2341 		if (eq <= 0)
2342 			return eq;
2343 		vlen = btf_vlen(cand_type);
2344 		cand_p = btf_params(cand_type);
2345 		canon_p = btf_params(canon_type);
2346 		for (i = 0; i < vlen; i++) {
2347 			eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
2348 			if (eq <= 0)
2349 				return eq;
2350 			cand_p++;
2351 			canon_p++;
2352 		}
2353 		return 1;
2354 	}
2355 
2356 	default:
2357 		return -EINVAL;
2358 	}
2359 	return 0;
2360 }
2361 
2362 /*
2363  * Use hypothetical mapping, produced by successful type graph equivalence
2364  * check, to augment existing struct/union canonical mapping, where possible.
2365  *
2366  * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
2367  * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
2368  * it doesn't matter if FWD type was part of canonical graph or candidate one,
2369  * we are recording the mapping anyway. As opposed to carefulness required
2370  * for struct/union correspondence mapping (described below), for FWD resolution
2371  * it's not important, as by the time that FWD type (reference type) will be
2372  * deduplicated all structs/unions will be deduped already anyway.
2373  *
2374  * Recording STRUCT/UNION mapping is purely a performance optimization and is
2375  * not required for correctness. It needs to be done carefully to ensure that
2376  * struct/union from candidate's type graph is not mapped into corresponding
2377  * struct/union from canonical type graph that itself hasn't been resolved into
2378  * canonical representative. The only guarantee we have is that canonical
2379  * struct/union was determined as canonical and that won't change. But any
2380  * types referenced through that struct/union fields could have been not yet
2381  * resolved, so in case like that it's too early to establish any kind of
2382  * correspondence between structs/unions.
2383  *
2384  * No canonical correspondence is derived for primitive types (they are already
2385  * deduplicated completely already anyway) or reference types (they rely on
2386  * stability of struct/union canonical relationship for equivalence checks).
2387  */
btf_dedup_merge_hypot_map(struct btf_dedup * d)2388 static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
2389 {
2390 	__u32 cand_type_id, targ_type_id;
2391 	__u16 t_kind, c_kind;
2392 	__u32 t_id, c_id;
2393 	int i;
2394 
2395 	for (i = 0; i < d->hypot_cnt; i++) {
2396 		cand_type_id = d->hypot_list[i];
2397 		targ_type_id = d->hypot_map[cand_type_id];
2398 		t_id = resolve_type_id(d, targ_type_id);
2399 		c_id = resolve_type_id(d, cand_type_id);
2400 		t_kind = btf_kind(d->btf->types[t_id]);
2401 		c_kind = btf_kind(d->btf->types[c_id]);
2402 		/*
2403 		 * Resolve FWD into STRUCT/UNION.
2404 		 * It's ok to resolve FWD into STRUCT/UNION that's not yet
2405 		 * mapped to canonical representative (as opposed to
2406 		 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
2407 		 * eventually that struct is going to be mapped and all resolved
2408 		 * FWDs will automatically resolve to correct canonical
2409 		 * representative. This will happen before ref type deduping,
2410 		 * which critically depends on stability of these mapping. This
2411 		 * stability is not a requirement for STRUCT/UNION equivalence
2412 		 * checks, though.
2413 		 */
2414 		if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
2415 			d->map[c_id] = t_id;
2416 		else if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
2417 			d->map[t_id] = c_id;
2418 
2419 		if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
2420 		    c_kind != BTF_KIND_FWD &&
2421 		    is_type_mapped(d, c_id) &&
2422 		    !is_type_mapped(d, t_id)) {
2423 			/*
2424 			 * as a perf optimization, we can map struct/union
2425 			 * that's part of type graph we just verified for
2426 			 * equivalence. We can do that for struct/union that has
2427 			 * canonical representative only, though.
2428 			 */
2429 			d->map[t_id] = c_id;
2430 		}
2431 	}
2432 }
2433 
2434 /*
2435  * Deduplicate struct/union types.
2436  *
2437  * For each struct/union type its type signature hash is calculated, taking
2438  * into account type's name, size, number, order and names of fields, but
2439  * ignoring type ID's referenced from fields, because they might not be deduped
2440  * completely until after reference types deduplication phase. This type hash
2441  * is used to iterate over all potential canonical types, sharing same hash.
2442  * For each canonical candidate we check whether type graphs that they form
2443  * (through referenced types in fields and so on) are equivalent using algorithm
2444  * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
2445  * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
2446  * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
2447  * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
2448  * potentially map other structs/unions to their canonical representatives,
2449  * if such relationship hasn't yet been established. This speeds up algorithm
2450  * by eliminating some of the duplicate work.
2451  *
2452  * If no matching canonical representative was found, struct/union is marked
2453  * as canonical for itself and is added into btf_dedup->dedup_table hash map
2454  * for further look ups.
2455  */
btf_dedup_struct_type(struct btf_dedup * d,__u32 type_id)2456 static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
2457 {
2458 	struct btf_type *cand_type, *t;
2459 	struct hashmap_entry *hash_entry;
2460 	/* if we don't find equivalent type, then we are canonical */
2461 	__u32 new_id = type_id;
2462 	__u16 kind;
2463 	long h;
2464 
2465 	/* already deduped or is in process of deduping (loop detected) */
2466 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
2467 		return 0;
2468 
2469 	t = d->btf->types[type_id];
2470 	kind = btf_kind(t);
2471 
2472 	if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
2473 		return 0;
2474 
2475 	h = btf_hash_struct(t);
2476 	for_each_dedup_cand(d, hash_entry, h) {
2477 		__u32 cand_id = (__u32)(long)hash_entry->value;
2478 		int eq;
2479 
2480 		/*
2481 		 * Even though btf_dedup_is_equiv() checks for
2482 		 * btf_shallow_equal_struct() internally when checking two
2483 		 * structs (unions) for equivalence, we need to guard here
2484 		 * from picking matching FWD type as a dedup candidate.
2485 		 * This can happen due to hash collision. In such case just
2486 		 * relying on btf_dedup_is_equiv() would lead to potentially
2487 		 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
2488 		 * FWD and compatible STRUCT/UNION are considered equivalent.
2489 		 */
2490 		cand_type = d->btf->types[cand_id];
2491 		if (!btf_shallow_equal_struct(t, cand_type))
2492 			continue;
2493 
2494 		btf_dedup_clear_hypot_map(d);
2495 		eq = btf_dedup_is_equiv(d, type_id, cand_id);
2496 		if (eq < 0)
2497 			return eq;
2498 		if (!eq)
2499 			continue;
2500 		new_id = cand_id;
2501 		btf_dedup_merge_hypot_map(d);
2502 		break;
2503 	}
2504 
2505 	d->map[type_id] = new_id;
2506 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2507 		return -ENOMEM;
2508 
2509 	return 0;
2510 }
2511 
btf_dedup_struct_types(struct btf_dedup * d)2512 static int btf_dedup_struct_types(struct btf_dedup *d)
2513 {
2514 	int i, err;
2515 
2516 	for (i = 1; i <= d->btf->nr_types; i++) {
2517 		err = btf_dedup_struct_type(d, i);
2518 		if (err)
2519 			return err;
2520 	}
2521 	return 0;
2522 }
2523 
2524 /*
2525  * Deduplicate reference type.
2526  *
2527  * Once all primitive and struct/union types got deduplicated, we can easily
2528  * deduplicate all other (reference) BTF types. This is done in two steps:
2529  *
2530  * 1. Resolve all referenced type IDs into their canonical type IDs. This
2531  * resolution can be done either immediately for primitive or struct/union types
2532  * (because they were deduped in previous two phases) or recursively for
2533  * reference types. Recursion will always terminate at either primitive or
2534  * struct/union type, at which point we can "unwind" chain of reference types
2535  * one by one. There is no danger of encountering cycles because in C type
2536  * system the only way to form type cycle is through struct/union, so any chain
2537  * of reference types, even those taking part in a type cycle, will inevitably
2538  * reach struct/union at some point.
2539  *
2540  * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
2541  * becomes "stable", in the sense that no further deduplication will cause
2542  * any changes to it. With that, it's now possible to calculate type's signature
2543  * hash (this time taking into account referenced type IDs) and loop over all
2544  * potential canonical representatives. If no match was found, current type
2545  * will become canonical representative of itself and will be added into
2546  * btf_dedup->dedup_table as another possible canonical representative.
2547  */
btf_dedup_ref_type(struct btf_dedup * d,__u32 type_id)2548 static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
2549 {
2550 	struct hashmap_entry *hash_entry;
2551 	__u32 new_id = type_id, cand_id;
2552 	struct btf_type *t, *cand;
2553 	/* if we don't find equivalent type, then we are representative type */
2554 	int ref_type_id;
2555 	long h;
2556 
2557 	if (d->map[type_id] == BTF_IN_PROGRESS_ID)
2558 		return -ELOOP;
2559 	if (d->map[type_id] <= BTF_MAX_NR_TYPES)
2560 		return resolve_type_id(d, type_id);
2561 
2562 	t = d->btf->types[type_id];
2563 	d->map[type_id] = BTF_IN_PROGRESS_ID;
2564 
2565 	switch (btf_kind(t)) {
2566 	case BTF_KIND_CONST:
2567 	case BTF_KIND_VOLATILE:
2568 	case BTF_KIND_RESTRICT:
2569 	case BTF_KIND_PTR:
2570 	case BTF_KIND_TYPEDEF:
2571 	case BTF_KIND_FUNC:
2572 		ref_type_id = btf_dedup_ref_type(d, t->type);
2573 		if (ref_type_id < 0)
2574 			return ref_type_id;
2575 		t->type = ref_type_id;
2576 
2577 		h = btf_hash_common(t);
2578 		for_each_dedup_cand(d, hash_entry, h) {
2579 			cand_id = (__u32)(long)hash_entry->value;
2580 			cand = d->btf->types[cand_id];
2581 			if (btf_equal_common(t, cand)) {
2582 				new_id = cand_id;
2583 				break;
2584 			}
2585 		}
2586 		break;
2587 
2588 	case BTF_KIND_ARRAY: {
2589 		struct btf_array *info = btf_array(t);
2590 
2591 		ref_type_id = btf_dedup_ref_type(d, info->type);
2592 		if (ref_type_id < 0)
2593 			return ref_type_id;
2594 		info->type = ref_type_id;
2595 
2596 		ref_type_id = btf_dedup_ref_type(d, info->index_type);
2597 		if (ref_type_id < 0)
2598 			return ref_type_id;
2599 		info->index_type = ref_type_id;
2600 
2601 		h = btf_hash_array(t);
2602 		for_each_dedup_cand(d, hash_entry, h) {
2603 			cand_id = (__u32)(long)hash_entry->value;
2604 			cand = d->btf->types[cand_id];
2605 			if (btf_equal_array(t, cand)) {
2606 				new_id = cand_id;
2607 				break;
2608 			}
2609 		}
2610 		break;
2611 	}
2612 
2613 	case BTF_KIND_FUNC_PROTO: {
2614 		struct btf_param *param;
2615 		__u16 vlen;
2616 		int i;
2617 
2618 		ref_type_id = btf_dedup_ref_type(d, t->type);
2619 		if (ref_type_id < 0)
2620 			return ref_type_id;
2621 		t->type = ref_type_id;
2622 
2623 		vlen = btf_vlen(t);
2624 		param = btf_params(t);
2625 		for (i = 0; i < vlen; i++) {
2626 			ref_type_id = btf_dedup_ref_type(d, param->type);
2627 			if (ref_type_id < 0)
2628 				return ref_type_id;
2629 			param->type = ref_type_id;
2630 			param++;
2631 		}
2632 
2633 		h = btf_hash_fnproto(t);
2634 		for_each_dedup_cand(d, hash_entry, h) {
2635 			cand_id = (__u32)(long)hash_entry->value;
2636 			cand = d->btf->types[cand_id];
2637 			if (btf_equal_fnproto(t, cand)) {
2638 				new_id = cand_id;
2639 				break;
2640 			}
2641 		}
2642 		break;
2643 	}
2644 
2645 	default:
2646 		return -EINVAL;
2647 	}
2648 
2649 	d->map[type_id] = new_id;
2650 	if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2651 		return -ENOMEM;
2652 
2653 	return new_id;
2654 }
2655 
btf_dedup_ref_types(struct btf_dedup * d)2656 static int btf_dedup_ref_types(struct btf_dedup *d)
2657 {
2658 	int i, err;
2659 
2660 	for (i = 1; i <= d->btf->nr_types; i++) {
2661 		err = btf_dedup_ref_type(d, i);
2662 		if (err < 0)
2663 			return err;
2664 	}
2665 	/* we won't need d->dedup_table anymore */
2666 	hashmap__free(d->dedup_table);
2667 	d->dedup_table = NULL;
2668 	return 0;
2669 }
2670 
2671 /*
2672  * Compact types.
2673  *
2674  * After we established for each type its corresponding canonical representative
2675  * type, we now can eliminate types that are not canonical and leave only
2676  * canonical ones layed out sequentially in memory by copying them over
2677  * duplicates. During compaction btf_dedup->hypot_map array is reused to store
2678  * a map from original type ID to a new compacted type ID, which will be used
2679  * during next phase to "fix up" type IDs, referenced from struct/union and
2680  * reference types.
2681  */
btf_dedup_compact_types(struct btf_dedup * d)2682 static int btf_dedup_compact_types(struct btf_dedup *d)
2683 {
2684 	struct btf_type **new_types;
2685 	__u32 next_type_id = 1;
2686 	char *types_start, *p;
2687 	int i, len;
2688 
2689 	/* we are going to reuse hypot_map to store compaction remapping */
2690 	d->hypot_map[0] = 0;
2691 	for (i = 1; i <= d->btf->nr_types; i++)
2692 		d->hypot_map[i] = BTF_UNPROCESSED_ID;
2693 
2694 	types_start = d->btf->nohdr_data + d->btf->hdr->type_off;
2695 	p = types_start;
2696 
2697 	for (i = 1; i <= d->btf->nr_types; i++) {
2698 		if (d->map[i] != i)
2699 			continue;
2700 
2701 		len = btf_type_size(d->btf->types[i]);
2702 		if (len < 0)
2703 			return len;
2704 
2705 		memmove(p, d->btf->types[i], len);
2706 		d->hypot_map[i] = next_type_id;
2707 		d->btf->types[next_type_id] = (struct btf_type *)p;
2708 		p += len;
2709 		next_type_id++;
2710 	}
2711 
2712 	/* shrink struct btf's internal types index and update btf_header */
2713 	d->btf->nr_types = next_type_id - 1;
2714 	d->btf->types_size = d->btf->nr_types;
2715 	d->btf->hdr->type_len = p - types_start;
2716 	new_types = realloc(d->btf->types,
2717 			    (1 + d->btf->nr_types) * sizeof(struct btf_type *));
2718 	if (!new_types)
2719 		return -ENOMEM;
2720 	d->btf->types = new_types;
2721 
2722 	/* make sure string section follows type information without gaps */
2723 	d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data;
2724 	memmove(p, d->btf->strings, d->btf->hdr->str_len);
2725 	d->btf->strings = p;
2726 	p += d->btf->hdr->str_len;
2727 
2728 	d->btf->data_size = p - (char *)d->btf->data;
2729 	return 0;
2730 }
2731 
2732 /*
2733  * Figure out final (deduplicated and compacted) type ID for provided original
2734  * `type_id` by first resolving it into corresponding canonical type ID and
2735  * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
2736  * which is populated during compaction phase.
2737  */
btf_dedup_remap_type_id(struct btf_dedup * d,__u32 type_id)2738 static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id)
2739 {
2740 	__u32 resolved_type_id, new_type_id;
2741 
2742 	resolved_type_id = resolve_type_id(d, type_id);
2743 	new_type_id = d->hypot_map[resolved_type_id];
2744 	if (new_type_id > BTF_MAX_NR_TYPES)
2745 		return -EINVAL;
2746 	return new_type_id;
2747 }
2748 
2749 /*
2750  * Remap referenced type IDs into deduped type IDs.
2751  *
2752  * After BTF types are deduplicated and compacted, their final type IDs may
2753  * differ from original ones. The map from original to a corresponding
2754  * deduped type ID is stored in btf_dedup->hypot_map and is populated during
2755  * compaction phase. During remapping phase we are rewriting all type IDs
2756  * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
2757  * their final deduped type IDs.
2758  */
btf_dedup_remap_type(struct btf_dedup * d,__u32 type_id)2759 static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
2760 {
2761 	struct btf_type *t = d->btf->types[type_id];
2762 	int i, r;
2763 
2764 	switch (btf_kind(t)) {
2765 	case BTF_KIND_INT:
2766 	case BTF_KIND_ENUM:
2767 		break;
2768 
2769 	case BTF_KIND_FWD:
2770 	case BTF_KIND_CONST:
2771 	case BTF_KIND_VOLATILE:
2772 	case BTF_KIND_RESTRICT:
2773 	case BTF_KIND_PTR:
2774 	case BTF_KIND_TYPEDEF:
2775 	case BTF_KIND_FUNC:
2776 	case BTF_KIND_VAR:
2777 		r = btf_dedup_remap_type_id(d, t->type);
2778 		if (r < 0)
2779 			return r;
2780 		t->type = r;
2781 		break;
2782 
2783 	case BTF_KIND_ARRAY: {
2784 		struct btf_array *arr_info = btf_array(t);
2785 
2786 		r = btf_dedup_remap_type_id(d, arr_info->type);
2787 		if (r < 0)
2788 			return r;
2789 		arr_info->type = r;
2790 		r = btf_dedup_remap_type_id(d, arr_info->index_type);
2791 		if (r < 0)
2792 			return r;
2793 		arr_info->index_type = r;
2794 		break;
2795 	}
2796 
2797 	case BTF_KIND_STRUCT:
2798 	case BTF_KIND_UNION: {
2799 		struct btf_member *member = btf_members(t);
2800 		__u16 vlen = btf_vlen(t);
2801 
2802 		for (i = 0; i < vlen; i++) {
2803 			r = btf_dedup_remap_type_id(d, member->type);
2804 			if (r < 0)
2805 				return r;
2806 			member->type = r;
2807 			member++;
2808 		}
2809 		break;
2810 	}
2811 
2812 	case BTF_KIND_FUNC_PROTO: {
2813 		struct btf_param *param = btf_params(t);
2814 		__u16 vlen = btf_vlen(t);
2815 
2816 		r = btf_dedup_remap_type_id(d, t->type);
2817 		if (r < 0)
2818 			return r;
2819 		t->type = r;
2820 
2821 		for (i = 0; i < vlen; i++) {
2822 			r = btf_dedup_remap_type_id(d, param->type);
2823 			if (r < 0)
2824 				return r;
2825 			param->type = r;
2826 			param++;
2827 		}
2828 		break;
2829 	}
2830 
2831 	case BTF_KIND_DATASEC: {
2832 		struct btf_var_secinfo *var = btf_var_secinfos(t);
2833 		__u16 vlen = btf_vlen(t);
2834 
2835 		for (i = 0; i < vlen; i++) {
2836 			r = btf_dedup_remap_type_id(d, var->type);
2837 			if (r < 0)
2838 				return r;
2839 			var->type = r;
2840 			var++;
2841 		}
2842 		break;
2843 	}
2844 
2845 	default:
2846 		return -EINVAL;
2847 	}
2848 
2849 	return 0;
2850 }
2851 
btf_dedup_remap_types(struct btf_dedup * d)2852 static int btf_dedup_remap_types(struct btf_dedup *d)
2853 {
2854 	int i, r;
2855 
2856 	for (i = 1; i <= d->btf->nr_types; i++) {
2857 		r = btf_dedup_remap_type(d, i);
2858 		if (r < 0)
2859 			return r;
2860 	}
2861 	return 0;
2862 }
2863